diff options
Diffstat (limited to 'drivers')
819 files changed, 29650 insertions, 9150 deletions
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index 9a70bee84125..b508df2ecada 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -21,7 +21,6 @@ #include <linux/module.h> #include <linux/atmdev.h> #include <linux/sonet.h> -#include <linux/atm_suni.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/firmware.h> @@ -100,8 +99,6 @@ static LIST_HEAD(fore200e_boards); MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen"); MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION); -MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E"); - static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = { { BUFFER_S1_NBR, BUFFER_L1_NBR }, diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c index c920a8c52925..21e5acc766b8 100644 --- a/drivers/atm/suni.c +++ b/drivers/atm/suni.c @@ -21,7 +21,6 @@ #include <linux/timer.h> #include <linux/init.h> #include <linux/capability.h> -#include <linux/atm_suni.h> #include <linux/slab.h> #include <asm/param.h> #include <linux/uaccess.h> diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 18b82427d0cb..a46a7e30881b 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -325,22 +325,22 @@ static void rpm_put_suppliers(struct device *dev) static int __rpm_callback(int (*cb)(struct device *), struct device *dev) __releases(&dev->power.lock) __acquires(&dev->power.lock) { - bool use_links = dev->power.links_count > 0; - bool get = false; int retval, idx; - bool put; + bool use_links = dev->power.links_count > 0; if (dev->power.irq_safe) { spin_unlock(&dev->power.lock); - } else if (!use_links) { - spin_unlock_irq(&dev->power.lock); } else { - get = dev->power.runtime_status == RPM_RESUMING; - spin_unlock_irq(&dev->power.lock); - /* Resume suppliers if necessary. */ - if (get) { + /* + * Resume suppliers if necessary. + * + * The device's runtime PM status cannot change until this + * routine returns, so it is safe to read the status outside of + * the lock. + */ + if (use_links && dev->power.runtime_status == RPM_RESUMING) { idx = device_links_read_lock(); retval = rpm_get_suppliers(dev); @@ -355,36 +355,24 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev) if (dev->power.irq_safe) { spin_lock(&dev->power.lock); - return retval; - } - - spin_lock_irq(&dev->power.lock); - - if (!use_links) - return retval; - - /* - * If the device is suspending and the callback has returned success, - * drop the usage counters of the suppliers that have been reference - * counted on its resume. - * - * Do that if the resume fails too. - */ - put = dev->power.runtime_status == RPM_SUSPENDING && !retval; - if (put) - __update_runtime_status(dev, RPM_SUSPENDED); - else - put = get && retval; - - if (put) { - spin_unlock_irq(&dev->power.lock); - - idx = device_links_read_lock(); + } else { + /* + * If the device is suspending and the callback has returned + * success, drop the usage counters of the suppliers that have + * been reference counted on its resume. + * + * Do that if resume fails too. + */ + if (use_links + && ((dev->power.runtime_status == RPM_SUSPENDING && !retval) + || (dev->power.runtime_status == RPM_RESUMING && retval))) { + idx = device_links_read_lock(); -fail: - rpm_put_suppliers(dev); + fail: + rpm_put_suppliers(dev); - device_links_read_unlock(idx); + device_links_read_unlock(idx); + } spin_lock_irq(&dev->power.lock); } diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index 37179a8b1ceb..fa3719ef80e4 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -938,6 +938,9 @@ int software_node_register(const struct software_node *node) if (software_node_to_swnode(node)) return -EEXIST; + if (node->parent && !parent) + return -EINVAL; + return PTR_ERR_OR_ZERO(swnode_register(node, parent, 0)); } EXPORT_SYMBOL_GPL(software_node_register); @@ -1002,25 +1005,33 @@ EXPORT_SYMBOL_GPL(fwnode_remove_software_node); /** * device_add_software_node - Assign software node to a device * @dev: The device the software node is meant for. - * @swnode: The software node. + * @node: The software node. * - * This function will register @swnode and make it the secondary firmware node - * pointer of @dev. If @dev has no primary node, then @swnode will become the primary - * node. + * This function will make @node the secondary firmware node pointer of @dev. If + * @dev has no primary node, then @node will become the primary node. The + * function will register @node automatically if it wasn't already registered. */ -int device_add_software_node(struct device *dev, const struct software_node *swnode) +int device_add_software_node(struct device *dev, const struct software_node *node) { + struct swnode *swnode; int ret; /* Only one software node per device. */ if (dev_to_swnode(dev)) return -EBUSY; - ret = software_node_register(swnode); - if (ret) - return ret; + swnode = software_node_to_swnode(node); + if (swnode) { + kobject_get(&swnode->kobj); + } else { + ret = software_node_register(node); + if (ret) + return ret; + + swnode = software_node_to_swnode(node); + } - set_secondary_fwnode(dev, software_node_fwnode(swnode)); + set_secondary_fwnode(dev, &swnode->fwnode); return 0; } diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 7d9cc433b758..5d9181382ce1 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1324,7 +1324,7 @@ struct bm_extent { * A followup commit may allow even bigger BIO sizes, * once we thought that through. */ #define DRBD_MAX_BIO_SIZE (1U << 20) -#if DRBD_MAX_BIO_SIZE > (BIO_MAX_PAGES << PAGE_SHIFT) +#if DRBD_MAX_BIO_SIZE > (BIO_MAX_VECS << PAGE_SHIFT) #error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE #endif #define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */ diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 0b71292d9d5a..4aa9683ee0c1 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -5091,7 +5091,6 @@ module_param(floppy, charp, 0); module_param(FLOPPY_IRQ, int, 0); module_param(FLOPPY_DMA, int, 0); MODULE_AUTHOR("Alain L. Knaff"); -MODULE_SUPPORTED_DEVICE("fd"); MODULE_LICENSE("GPL"); /* This doesn't actually get used other than for module information */ diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index 5ac1881396af..227e1be4c6f9 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c @@ -871,6 +871,7 @@ static int rsxx_pci_probe(struct pci_dev *dev, card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event"); if (!card->event_wq) { dev_err(CARD_TO_DEV(card), "Failed card event setup.\n"); + st = -ENOMEM; goto failed_event_handler; } diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 982732dbe82e..664280f23bee 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -877,6 +877,7 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) if (card->mm_pages[0].desc == NULL || card->mm_pages[1].desc == NULL) { dev_printk(KERN_ERR, &card->dev->dev, "alloc failed\n"); + ret = -ENOMEM; goto failed_alloc; } reset_page(&card->mm_pages[0]); @@ -888,8 +889,10 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) spin_lock_init(&card->lock); card->queue = blk_alloc_queue(NUMA_NO_NODE); - if (!card->queue) + if (!card->queue) { + ret = -ENOMEM; goto failed_alloc; + } tasklet_init(&card->tasklet, process_page, (unsigned long)card); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index a711a2e2a794..cf8deecc39ef 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -627,7 +627,7 @@ static ssize_t writeback_store(struct device *dev, struct bio_vec bio_vec; struct page *page; ssize_t ret = len; - int mode; + int mode, err; unsigned long blk_idx = 0; if (sysfs_streq(buf, "idle")) @@ -638,8 +638,8 @@ static ssize_t writeback_store(struct device *dev, if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1)) return -EINVAL; - ret = kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index); - if (ret || index >= nr_pages) + if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) || + index >= nr_pages) return -EINVAL; nr_pages = 1; @@ -663,7 +663,7 @@ static ssize_t writeback_store(struct device *dev, goto release_init_lock; } - while (nr_pages--) { + for (; nr_pages != 0; index++, nr_pages--) { struct bio_vec bvec; bvec.bv_page = page; @@ -728,12 +728,17 @@ static ssize_t writeback_store(struct device *dev, * XXX: A single page IO would be inefficient for write * but it would be not bad as starter. */ - ret = submit_bio_wait(&bio); - if (ret) { + err = submit_bio_wait(&bio); + if (err) { zram_slot_lock(zram, index); zram_clear_flag(zram, index, ZRAM_UNDER_WB); zram_clear_flag(zram, index, ZRAM_IDLE); zram_slot_unlock(zram, index); + /* + * Return last IO error unless every IO were + * not suceeded. + */ + ret = err; continue; } diff --git a/drivers/bluetooth/btrsi.c b/drivers/bluetooth/btrsi.c index 3951f7b23840..bea1595f6432 100644 --- a/drivers/bluetooth/btrsi.c +++ b/drivers/bluetooth/btrsi.c @@ -194,5 +194,4 @@ module_init(rsi_91x_bt_module_init); module_exit(rsi_91x_bt_module_exit); MODULE_AUTHOR("Redpine Signals Inc"); MODULE_DESCRIPTION("RSI BT driver"); -MODULE_SUPPORTED_DEVICE("RSI-BT"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index 14b2d8034c51..45ac7ab003ce 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c @@ -81,9 +81,6 @@ MODULE_DESCRIPTION("Driver for Applicom Profibus card"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(AC_MINOR); -MODULE_SUPPORTED_DEVICE("ac"); - - static struct applicom_board { unsigned long PhysIO; void __iomem *RamIO; diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c index aff0a8e44fff..776abbfd85d6 100644 --- a/drivers/char/toshiba.c +++ b/drivers/char/toshiba.c @@ -64,7 +64,6 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jonathan Buzzard <jonathan@buzzard.org.uk>"); MODULE_DESCRIPTION("Toshiba laptop SMM driver"); -MODULE_SUPPORTED_DEVICE("toshiba"); static DEFINE_MUTEX(tosh_mutex); static int tosh_fn; diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index 42f13a2d1cc1..05ff3b0d233e 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -730,7 +730,8 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw, struct clk_rate_request parent_req = { }; struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw); struct clk_hw *xo, *p0, *p1, *p2; - unsigned long request, p0_rate; + unsigned long p0_rate; + u8 mux_div = cgfx->div; int ret; p0 = cgfx->hws[0]; @@ -750,14 +751,15 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw, return 0; } - request = req->rate; - if (cgfx->div > 1) - parent_req.rate = request = request * cgfx->div; + if (mux_div == 0) + mux_div = 1; + + parent_req.rate = req->rate * mux_div; /* This has to be a fixed rate PLL */ p0_rate = clk_hw_get_rate(p0); - if (request == p0_rate) { + if (parent_req.rate == p0_rate) { req->rate = req->best_parent_rate = p0_rate; req->best_parent_hw = p0; return 0; @@ -765,7 +767,7 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw, if (req->best_parent_hw == p0) { /* Are we going back to a previously used rate? */ - if (clk_hw_get_rate(p2) == request) + if (clk_hw_get_rate(p2) == parent_req.rate) req->best_parent_hw = p2; else req->best_parent_hw = p1; @@ -780,8 +782,7 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw, return ret; req->rate = req->best_parent_rate = parent_req.rate; - if (cgfx->div > 1) - req->rate /= cgfx->div; + req->rate /= mux_div; return 0; } diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c index 91dc390a583b..c623ce900406 100644 --- a/drivers/clk/qcom/clk-rpmh.c +++ b/drivers/clk/qcom/clk-rpmh.c @@ -510,9 +510,12 @@ static const struct clk_rpmh_desc clk_rpmh_sm8350 = { .num_clks = ARRAY_SIZE(sm8350_rpmh_clocks), }; +/* Resource name must match resource id present in cmd-db */ +DEFINE_CLK_RPMH_ARC(sc7280, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 4); + static struct clk_hw *sc7280_rpmh_clocks[] = { - [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw, - [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw, + [RPMH_CXO_CLK] = &sc7280_bi_tcxo.hw, + [RPMH_CXO_CLK_A] = &sc7280_bi_tcxo_ao.hw, [RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw, [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw, [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw, diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c index 88e896abb663..da8b627ca156 100644 --- a/drivers/clk/qcom/gcc-sc7180.c +++ b/drivers/clk/qcom/gcc-sc7180.c @@ -620,7 +620,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = { .name = "gcc_sdcc1_apps_clk_src", .parent_data = gcc_parent_data_1, .num_parents = 5, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -642,7 +642,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = { .name = "gcc_sdcc1_ice_core_clk_src", .parent_data = gcc_parent_data_0, .num_parents = 4, - .ops = &clk_rcg2_floor_ops, + .ops = &clk_rcg2_ops, }, }; diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c index ef2a974a2f10..75bc401fdd18 100644 --- a/drivers/counter/stm32-timer-cnt.c +++ b/drivers/counter/stm32-timer-cnt.c @@ -31,7 +31,7 @@ struct stm32_timer_cnt { struct counter_device counter; struct regmap *regmap; struct clk *clk; - u32 ceiling; + u32 max_arr; bool enabled; struct stm32_timer_regs bak; }; @@ -44,13 +44,14 @@ struct stm32_timer_cnt { * @STM32_COUNT_ENCODER_MODE_3: counts on both TI1FP1 and TI2FP2 edges */ enum stm32_count_function { - STM32_COUNT_SLAVE_MODE_DISABLED = -1, + STM32_COUNT_SLAVE_MODE_DISABLED, STM32_COUNT_ENCODER_MODE_1, STM32_COUNT_ENCODER_MODE_2, STM32_COUNT_ENCODER_MODE_3, }; static enum counter_count_function stm32_count_functions[] = { + [STM32_COUNT_SLAVE_MODE_DISABLED] = COUNTER_COUNT_FUNCTION_INCREASE, [STM32_COUNT_ENCODER_MODE_1] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A, [STM32_COUNT_ENCODER_MODE_2] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B, [STM32_COUNT_ENCODER_MODE_3] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4, @@ -73,8 +74,10 @@ static int stm32_count_write(struct counter_device *counter, const unsigned long val) { struct stm32_timer_cnt *const priv = counter->priv; + u32 ceiling; - if (val > priv->ceiling) + regmap_read(priv->regmap, TIM_ARR, &ceiling); + if (val > ceiling) return -EINVAL; return regmap_write(priv->regmap, TIM_CNT, val); @@ -90,6 +93,9 @@ static int stm32_count_function_get(struct counter_device *counter, regmap_read(priv->regmap, TIM_SMCR, &smcr); switch (smcr & TIM_SMCR_SMS) { + case 0: + *function = STM32_COUNT_SLAVE_MODE_DISABLED; + return 0; case 1: *function = STM32_COUNT_ENCODER_MODE_1; return 0; @@ -99,9 +105,9 @@ static int stm32_count_function_get(struct counter_device *counter, case 3: *function = STM32_COUNT_ENCODER_MODE_3; return 0; + default: + return -EINVAL; } - - return -EINVAL; } static int stm32_count_function_set(struct counter_device *counter, @@ -112,6 +118,9 @@ static int stm32_count_function_set(struct counter_device *counter, u32 cr1, sms; switch (function) { + case STM32_COUNT_SLAVE_MODE_DISABLED: + sms = 0; + break; case STM32_COUNT_ENCODER_MODE_1: sms = 1; break; @@ -122,8 +131,7 @@ static int stm32_count_function_set(struct counter_device *counter, sms = 3; break; default: - sms = 0; - break; + return -EINVAL; } /* Store enable status */ @@ -131,10 +139,6 @@ static int stm32_count_function_set(struct counter_device *counter, regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0); - /* TIMx_ARR register shouldn't be buffered (ARPE=0) */ - regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0); - regmap_write(priv->regmap, TIM_ARR, priv->ceiling); - regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms); /* Make sure that registers are updated */ @@ -185,11 +189,13 @@ static ssize_t stm32_count_ceiling_write(struct counter_device *counter, if (ret) return ret; + if (ceiling > priv->max_arr) + return -ERANGE; + /* TIMx_ARR register shouldn't be buffered (ARPE=0) */ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0); regmap_write(priv->regmap, TIM_ARR, ceiling); - priv->ceiling = ceiling; return len; } @@ -274,31 +280,36 @@ static int stm32_action_get(struct counter_device *counter, size_t function; int err; - /* Default action mode (e.g. STM32_COUNT_SLAVE_MODE_DISABLED) */ - *action = STM32_SYNAPSE_ACTION_NONE; - err = stm32_count_function_get(counter, count, &function); if (err) - return 0; + return err; switch (function) { + case STM32_COUNT_SLAVE_MODE_DISABLED: + /* counts on internal clock when CEN=1 */ + *action = STM32_SYNAPSE_ACTION_NONE; + return 0; case STM32_COUNT_ENCODER_MODE_1: /* counts up/down on TI1FP1 edge depending on TI2FP2 level */ if (synapse->signal->id == count->synapses[0].signal->id) *action = STM32_SYNAPSE_ACTION_BOTH_EDGES; - break; + else + *action = STM32_SYNAPSE_ACTION_NONE; + return 0; case STM32_COUNT_ENCODER_MODE_2: /* counts up/down on TI2FP2 edge depending on TI1FP1 level */ if (synapse->signal->id == count->synapses[1].signal->id) *action = STM32_SYNAPSE_ACTION_BOTH_EDGES; - break; + else + *action = STM32_SYNAPSE_ACTION_NONE; + return 0; case STM32_COUNT_ENCODER_MODE_3: /* counts up/down on both TI1FP1 and TI2FP2 edges */ *action = STM32_SYNAPSE_ACTION_BOTH_EDGES; - break; + return 0; + default: + return -EINVAL; } - - return 0; } static const struct counter_ops stm32_timer_cnt_ops = { @@ -359,7 +370,7 @@ static int stm32_timer_cnt_probe(struct platform_device *pdev) priv->regmap = ddata->regmap; priv->clk = ddata->clk; - priv->ceiling = ddata->max_arr; + priv->max_arr = ddata->max_arr; priv->counter.name = dev_name(dev); priv->counter.parent = dev; diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 3ba2f716fe97..5e07065ec22f 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -103,6 +103,8 @@ static const struct of_device_id whitelist[] __initconst = { static const struct of_device_id blacklist[] __initconst = { { .compatible = "allwinner,sun50i-h6", }, + { .compatible = "arm,vexpress", }, + { .compatible = "calxeda,highbank", }, { .compatible = "calxeda,ecx-2000", }, diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index d3c23447b892..f86859bf76f1 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -317,9 +317,9 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) } base = ioremap(res->start, resource_size(res)); - if (IS_ERR(base)) { + if (!base) { dev_err(dev, "failed to map resource %pR\n", res); - ret = PTR_ERR(base); + ret = -ENOMEM; goto release_region; } @@ -374,7 +374,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) error: kfree(data); unmap_base: - iounmap(data->base); + iounmap(base); release_region: release_mem_region(res->start, resource_size(res)); return ret; diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index df3f9bcab581..4b7ee3fa9224 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -927,7 +927,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) } /* first try to find a slot in an existing linked list entry */ - for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) { + for (prsv = efi_memreserve_root->next; prsv; ) { rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); if (index < rsv->size) { @@ -937,6 +937,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) memunmap(rsv); return efi_mem_reserve_iomem(addr, size); } + prsv = rsv->next; memunmap(rsv); } diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index b69d63143e0d..7bf0a7acae5e 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -24,7 +24,7 @@ efi_status_t check_platform_features(void) return EFI_SUCCESS; tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf; - if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) { + if (tg < ID_AA64MMFR0_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_TGRAN_SUPPORTED_MAX) { if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) efi_err("This 64 KB granular kernel is not supported by your CPU\n"); else diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c index ec2f3985bef3..26e69788f27a 100644 --- a/drivers/firmware/efi/libstub/efi-stub.c +++ b/drivers/firmware/efi/libstub/efi-stub.c @@ -96,6 +96,18 @@ static void install_memreserve_table(void) efi_err("Failed to install memreserve config table!\n"); } +static u32 get_supported_rt_services(void) +{ + const efi_rt_properties_table_t *rt_prop_table; + u32 supported = EFI_RT_SUPPORTED_ALL; + + rt_prop_table = get_efi_config_table(EFI_RT_PROPERTIES_TABLE_GUID); + if (rt_prop_table) + supported &= rt_prop_table->runtime_services_supported; + + return supported; +} + /* * EFI entry point for the arm/arm64 EFI stubs. This is the entrypoint * that is described in the PE/COFF header. Most of the code is the same @@ -250,6 +262,10 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, (prop_tbl->memory_protection_attribute & EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA); + /* force efi_novamap if SetVirtualAddressMap() is unsupported */ + efi_novamap |= !(get_supported_rt_services() & + EFI_RT_SUPPORTED_SET_VIRTUAL_ADDRESS_MAP); + /* hibernation expects the runtime regions to stay in the same place */ if (!IS_ENABLED(CONFIG_HIBERNATION) && !efi_nokaslr && !flat_va_mapping) { /* diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index 41c1d00bf933..abdc8a6a3963 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -485,6 +485,10 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *), } break; + case EFI_UNSUPPORTED: + err = -EOPNOTSUPP; + status = EFI_NOT_FOUND; + break; case EFI_NOT_FOUND: break; default: diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 7ec0822c0505..6367646dce83 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -571,6 +571,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, struct lock_class_key *lock_key, struct lock_class_key *request_key) { + struct fwnode_handle *fwnode = gc->parent ? dev_fwnode(gc->parent) : NULL; unsigned long flags; int ret = 0; unsigned i; @@ -594,6 +595,12 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, of_gpio_dev_init(gc, gdev); + /* + * Assign fwnode depending on the result of the previous calls, + * if none of them succeed, assign it to the parent's one. + */ + gdev->dev.fwnode = dev_fwnode(&gdev->dev) ?: fwnode; + gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL); if (gdev->id < 0) { ret = gdev->id; @@ -4256,7 +4263,8 @@ static int __init gpiolib_dev_init(void) return ret; } - if (driver_register(&gpio_stub_drv) < 0) { + ret = driver_register(&gpio_stub_drv); + if (ret < 0) { pr_err("gpiolib: could not register GPIO stub driver\n"); bus_unregister(&gpio_bus_type); return ret; diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index e392a90ca687..85b79a7fee63 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -228,6 +228,7 @@ source "drivers/gpu/drm/arm/Kconfig" config DRM_RADEON tristate "ATI Radeon" depends on DRM && PCI && MMU + depends on AGP || !AGP select FW_LOADER select DRM_KMS_HELPER select DRM_TTM diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index b6879d97c9c9..49267eb64302 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -180,6 +180,7 @@ extern uint amdgpu_smu_memory_pool_size; extern uint amdgpu_dc_feature_mask; extern uint amdgpu_dc_debug_mask; extern uint amdgpu_dm_abm_level; +extern int amdgpu_backlight; extern struct amdgpu_mgpu_info mgpu_info; extern int amdgpu_ras_enable; extern uint amdgpu_ras_mask; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 36a741d63ddc..2e9b16fb3fcd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -903,7 +903,7 @@ void amdgpu_acpi_fini(struct amdgpu_device *adev) */ bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev) { -#if defined(CONFIG_AMD_PMC) +#if defined(CONFIG_AMD_PMC) || defined(CONFIG_AMD_PMC_MODULE) if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) { if (adev->flags & AMD_IS_APU) return true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 4575192d9b08..b26e2fd1c538 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -781,6 +781,10 @@ uint amdgpu_dm_abm_level; MODULE_PARM_DESC(abmlevel, "ABM level (0 = off (default), 1-4 = backlight reduction level) "); module_param_named(abmlevel, amdgpu_dm_abm_level, uint, 0444); +int amdgpu_backlight = -1; +MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (default))"); +module_param_named(backlight, amdgpu_backlight, bint, 0444); + /** * DOC: tmz (int) * Trusted Memory Zone (TMZ) is a method to protect data being written diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 51cd49c6f38f..24010cacf7d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -146,7 +146,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, size = mode_cmd->pitches[0] * height; aligned_size = ALIGN(size, PAGE_SIZE); ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, flags, - ttm_bo_type_kernel, NULL, &gobj); + ttm_bo_type_device, NULL, &gobj); if (ret) { pr_err("failed to allocate framebuffer (%d)\n", aligned_size); return -ENOMEM; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 3e1fd1e7d09f..573cf17262da 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2267,6 +2267,11 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) caps->ext_caps->bits.hdr_aux_backlight_control == 1) caps->aux_support = true; + if (amdgpu_backlight == 0) + caps->aux_support = false; + else if (amdgpu_backlight == 1) + caps->aux_support = true; + /* From the specification (CTA-861-G), for calculating the maximum * luminance we need to use: * Luminance = 50*2**(CV/32) @@ -3185,19 +3190,6 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm) #endif } -static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness) -{ - bool rc; - - if (!link) - return 1; - - rc = dc_link_set_backlight_level_nits(link, true, brightness, - AUX_BL_DEFAULT_TRANSITION_TIME_MS); - - return rc ? 0 : 1; -} - static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, unsigned *min, unsigned *max) { @@ -3260,9 +3252,10 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) brightness = convert_brightness_from_user(&caps, bd->props.brightness); // Change brightness based on AUX property if (caps.aux_support) - return set_backlight_via_aux(link, brightness); - - rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0); + rc = dc_link_set_backlight_level_nits(link, true, brightness, + AUX_BL_DEFAULT_TRANSITION_TIME_MS); + else + rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0); return rc ? 0 : 1; } @@ -3270,11 +3263,27 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) { struct amdgpu_display_manager *dm = bl_get_data(bd); - int ret = dc_link_get_backlight_level(dm->backlight_link); + struct amdgpu_dm_backlight_caps caps; + + amdgpu_dm_update_backlight_caps(dm); + caps = dm->backlight_caps; + + if (caps.aux_support) { + struct dc_link *link = (struct dc_link *)dm->backlight_link; + u32 avg, peak; + bool rc; - if (ret == DC_ERROR_UNEXPECTED) - return bd->props.brightness; - return convert_brightness_to_user(&dm->backlight_caps, ret); + rc = dc_link_get_backlight_level_nits(link, &avg, &peak); + if (!rc) + return bd->props.brightness; + return convert_brightness_to_user(&caps, avg); + } else { + int ret = dc_link_get_backlight_level(dm->backlight_link); + + if (ret == DC_ERROR_UNEXPECTED) + return bd->props.brightness; + return convert_brightness_to_user(&caps, ret); + } } static const struct backlight_ops amdgpu_dm_backlight_ops = { @@ -4716,6 +4725,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, dc_plane_state->global_alpha_value = plane_info.global_alpha_value; dc_plane_state->dcc = plane_info.dcc; dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0 + dc_plane_state->flip_int_enabled = true; /* * Always set input transfer function, since plane state is refreshed diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index fa5059f71727..bd0101013ec8 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2602,7 +2602,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link, if (pipe_ctx->plane_state == NULL) frame_ramp = 0; } else { - ASSERT(false); return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 4eee3a55fa30..18ed0d3f247e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -887,6 +887,7 @@ struct dc_plane_state { int layer_index; union surface_update_flags update_flags; + bool flip_int_enabled; /* private to DC core */ struct dc_plane_status status; struct dc_context *ctx; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 9e796dfeac20..714c71a5fbde 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -1257,6 +1257,16 @@ void hubp1_soft_reset(struct hubp *hubp, bool reset) REG_UPDATE(DCHUBP_CNTL, HUBP_DISABLE, reset ? 1 : 0); } +void hubp1_set_flip_int(struct hubp *hubp) +{ + struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); + + REG_UPDATE(DCSURF_SURFACE_FLIP_INTERRUPT, + SURFACE_FLIP_INT_MASK, 1); + + return; +} + void hubp1_init(struct hubp *hubp) { //do nothing @@ -1290,6 +1300,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = { .dmdata_load = NULL, .hubp_soft_reset = hubp1_soft_reset, .hubp_in_blank = hubp1_in_blank, + .hubp_set_flip_int = hubp1_set_flip_int, }; /*****************************************/ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index a9a6ed7f4f99..e2f2f6995935 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -74,6 +74,7 @@ SRI(DCSURF_SURFACE_EARLIEST_INUSE_C, HUBPREQ, id),\ SRI(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, HUBPREQ, id),\ SRI(DCSURF_SURFACE_CONTROL, HUBPREQ, id),\ + SRI(DCSURF_SURFACE_FLIP_INTERRUPT, HUBPREQ, id),\ SRI(HUBPRET_CONTROL, HUBPRET, id),\ SRI(DCN_EXPANSION_MODE, HUBPREQ, id),\ SRI(DCHUBP_REQ_SIZE_CONFIG, HUBP, id),\ @@ -183,6 +184,7 @@ uint32_t DCSURF_SURFACE_EARLIEST_INUSE_C; \ uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C; \ uint32_t DCSURF_SURFACE_CONTROL; \ + uint32_t DCSURF_SURFACE_FLIP_INTERRUPT; \ uint32_t HUBPRET_CONTROL; \ uint32_t DCN_EXPANSION_MODE; \ uint32_t DCHUBP_REQ_SIZE_CONFIG; \ @@ -332,6 +334,7 @@ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_META_SURFACE_TMZ_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK, mask_sh),\ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\ @@ -531,6 +534,7 @@ type PRIMARY_SURFACE_DCC_IND_64B_BLK;\ type SECONDARY_SURFACE_DCC_EN;\ type SECONDARY_SURFACE_DCC_IND_64B_BLK;\ + type SURFACE_FLIP_INT_MASK;\ type DET_BUF_PLANE1_BASE_ADDRESS;\ type CROSSBAR_SRC_CB_B;\ type CROSSBAR_SRC_CR_R;\ @@ -777,4 +781,6 @@ void hubp1_read_state_common(struct hubp *hubp); bool hubp1_in_blank(struct hubp *hubp); void hubp1_soft_reset(struct hubp *hubp, bool reset); +void hubp1_set_flip_int(struct hubp *hubp); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 89912bb5014f..9ba5c624770d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2196,6 +2196,13 @@ static void dcn10_enable_plane( if (dc->debug.sanity_checks) { hws->funcs.verify_allow_pstate_change_high(dc); } + + if (!pipe_ctx->top_pipe + && pipe_ctx->plane_state + && pipe_ctx->plane_state->flip_int_enabled + && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int) + pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp); + } void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index 0df0da2e6a4d..bec7059f6d5d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -1597,6 +1597,7 @@ static struct hubp_funcs dcn20_hubp_funcs = { .validate_dml_output = hubp2_validate_dml_output, .hubp_in_blank = hubp1_in_blank, .hubp_soft_reset = hubp1_soft_reset, + .hubp_set_flip_int = hubp1_set_flip_int, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 0726fb435e2a..aece1103331d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1146,6 +1146,12 @@ void dcn20_enable_plane( pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt); } + if (!pipe_ctx->top_pipe + && pipe_ctx->plane_state + && pipe_ctx->plane_state->flip_int_enabled + && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int) + pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp); + // if (dc->debug.sanity_checks) { // dcn10_verify_allow_pstate_change_high(dc); // } @@ -1501,38 +1507,8 @@ static void dcn20_update_dchubp_dpp( if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || pipe_ctx->stream->update_flags.bits.gamut_remap || pipe_ctx->stream->update_flags.bits.out_csc) { - struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; - - if (mpc->funcs->set_gamut_remap) { - int i; - int mpcc_id = hubp->inst; - struct mpc_grph_gamut_adjustment adjust; - bool enable_remap_dpp = false; - - memset(&adjust, 0, sizeof(adjust)); - adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; - - /* save the enablement of gamut remap for dpp */ - enable_remap_dpp = pipe_ctx->stream->gamut_remap_matrix.enable_remap; - - /* force bypass gamut remap for dpp/cm */ - pipe_ctx->stream->gamut_remap_matrix.enable_remap = false; - dc->hwss.program_gamut_remap(pipe_ctx); - - /* restore gamut remap flag and use this remap into mpc */ - pipe_ctx->stream->gamut_remap_matrix.enable_remap = enable_remap_dpp; - - /* build remap matrix for top plane if enabled */ - if (enable_remap_dpp && pipe_ctx->top_pipe == NULL) { - adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; - for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++) - adjust.temperature_matrix[i] = - pipe_ctx->stream->gamut_remap_matrix.matrix[i]; - } - mpc->funcs->set_gamut_remap(mpc, mpcc_id, &adjust); - } else - /* dpp/cm gamut remap*/ - dc->hwss.program_gamut_remap(pipe_ctx); + /* dpp/cm gamut remap*/ + dc->hwss.program_gamut_remap(pipe_ctx); /*call the dcn2 method which uses mpc csc*/ dc->hwss.program_output_csc(dc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index f9045852728f..b0c9180b808f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -838,6 +838,7 @@ static struct hubp_funcs dcn21_hubp_funcs = { .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl, .hubp_init = hubp21_init, .validate_dml_output = hubp21_validate_dml_output, + .hubp_set_flip_int = hubp1_set_flip_int, }; bool hubp21_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 072f8c880924..4a3df13c9e49 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -296,7 +296,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .num_banks = 8, .num_chans = 4, .vmm_page_size_bytes = 4096, - .dram_clock_change_latency_us = 11.72, + .dram_clock_change_latency_us = 23.84, .return_bus_width_bytes = 64, .dispclk_dppclk_vco_speed_mhz = 3600, .xfc_bus_transport_time_us = 4, @@ -1062,8 +1062,6 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s { int i; - DC_FP_START(); - if (dc->bb_overrides.sr_exit_time_ns) { for (i = 0; i < WM_SET_COUNT; i++) { dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us = @@ -1088,8 +1086,6 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; } } - - DC_FP_END(); } void dcn21_calculate_wm( @@ -1339,7 +1335,7 @@ static noinline bool dcn21_validate_bandwidth_fp(struct dc *dc, int vlevel = 0; int pipe_split_from[MAX_PIPES]; int pipe_cnt = 0; - display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); + display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC); DC_LOGGER_INIT(dc->ctx->logger); BW_VAL_TRACE_COUNT(); @@ -1599,6 +1595,11 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn2_1_soc.num_chans = bw_params->num_channels; ASSERT(clk_table->num_entries); + /* Copy dcn2_1_soc.clock_limits to clock_limits to avoid copying over null states later */ + for (i = 0; i < dcn2_1_soc.num_states + 1; i++) { + clock_limits[i] = dcn2_1_soc.clock_limits[i]; + } + for (i = 0; i < clk_table->num_entries; i++) { /* loop backwards*/ for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c index 41a1d0e9b7e2..e0df9b0065f9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c @@ -113,6 +113,7 @@ bool cm3_helper_translate_curve_to_hw_format( struct pwl_result_data *rgb_resulted; struct pwl_result_data *rgb; struct pwl_result_data *rgb_plus_1; + struct pwl_result_data *rgb_minus_1; struct fixed31_32 end_value; int32_t region_start, region_end; @@ -140,7 +141,7 @@ bool cm3_helper_translate_curve_to_hw_format( region_start = -MAX_LOW_POINT; region_end = NUMBER_REGIONS - MAX_LOW_POINT; } else { - /* 10 segments + /* 11 segments * segment is from 2^-10 to 2^0 * There are less than 256 points, for optimization */ @@ -154,9 +155,10 @@ bool cm3_helper_translate_curve_to_hw_format( seg_distr[7] = 4; seg_distr[8] = 4; seg_distr[9] = 4; + seg_distr[10] = 1; region_start = -10; - region_end = 0; + region_end = 1; } for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++) @@ -189,6 +191,10 @@ bool cm3_helper_translate_curve_to_hw_format( rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; + rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red; + rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green; + rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue; + // All 3 color channels have same x corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), dc_fixpt_from_int(region_start)); @@ -259,15 +265,18 @@ bool cm3_helper_translate_curve_to_hw_format( rgb = rgb_resulted; rgb_plus_1 = rgb_resulted + 1; + rgb_minus_1 = rgb; i = 1; while (i != hw_points + 1) { - if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) - rgb_plus_1->red = rgb->red; - if (dc_fixpt_lt(rgb_plus_1->green, rgb->green)) - rgb_plus_1->green = rgb->green; - if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue)) - rgb_plus_1->blue = rgb->blue; + if (i >= hw_points - 1) { + if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) + rgb_plus_1->red = dc_fixpt_add(rgb->red, rgb_minus_1->delta_red); + if (dc_fixpt_lt(rgb_plus_1->green, rgb->green)) + rgb_plus_1->green = dc_fixpt_add(rgb->green, rgb_minus_1->delta_green); + if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue)) + rgb_plus_1->blue = dc_fixpt_add(rgb->blue, rgb_minus_1->delta_blue); + } rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red); rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green); @@ -283,6 +292,7 @@ bool cm3_helper_translate_curve_to_hw_format( } ++rgb_plus_1; + rgb_minus_1 = rgb; ++rgb; ++i; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c index 88ffa9ff1ed1..f24612523248 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c @@ -511,6 +511,7 @@ static struct hubp_funcs dcn30_hubp_funcs = { .hubp_init = hubp3_init, .hubp_in_blank = hubp1_in_blank, .hubp_soft_reset = hubp1_soft_reset, + .hubp_set_flip_int = hubp1_set_flip_int, }; bool hubp3_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 8d0f663489ac..fb7f1dea3c46 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -2508,6 +2508,19 @@ static const struct resource_funcs dcn30_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, }; +#define CTX ctx + +#define REG(reg_name) \ + (DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) + +static uint32_t read_pipe_fuses(struct dc_context *ctx) +{ + uint32_t value = REG_READ(CC_DC_PIPE_DIS); + /* Support for max 6 pipes */ + value = value & 0x3f; + return value; +} + static bool dcn30_resource_construct( uint8_t num_virtual_links, struct dc *dc, @@ -2517,6 +2530,15 @@ static bool dcn30_resource_construct( struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; struct ddc_service_init_data ddc_init_data; + uint32_t pipe_fuses = read_pipe_fuses(ctx); + uint32_t num_pipes = 0; + + if (!(pipe_fuses == 0 || pipe_fuses == 0x3e)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: Unexpected fuse recipe for navi2x !\n"); + /* fault to single pipe */ + pipe_fuses = 0x3e; + } DC_FP_START(); @@ -2650,6 +2672,15 @@ static bool dcn30_resource_construct( /* PP Lib and SMU interfaces */ init_soc_bounding_box(dc, pool); + num_pipes = dcn3_0_ip.max_num_dpp; + + for (i = 0; i < dcn3_0_ip.max_num_dpp; i++) + if (pipe_fuses & 1 << i) + num_pipes--; + + dcn3_0_ip.max_num_dpp = num_pipes; + dcn3_0_ip.max_num_otg = num_pipes; + dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30); /* IRQ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 5d4b2c60192e..c494235016e0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -1619,12 +1619,106 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30); } +static void calculate_wm_set_for_vlevel( + int vlevel, + struct wm_range_table_entry *table_entry, + struct dcn_watermarks *wm_set, + struct display_mode_lib *dml, + display_e2e_pipe_params_st *pipes, + int pipe_cnt) +{ + double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us; + + ASSERT(vlevel < dml->soc.num_states); + /* only pipe 0 is read for voltage and dcf/soc clocks */ + pipes[0].clks_cfg.voltage = vlevel; + pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz; + pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz; + + dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us; + dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us; + dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us; + + wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000; + wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000; + wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; + wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; + wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; + dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; + +} + +static void dcn301_calculate_wm_and_dlg( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel_req) +{ + int i, pipe_idx; + int vlevel, vlevel_max; + struct wm_range_table_entry *table_entry; + struct clk_bw_params *bw_params = dc->clk_mgr->bw_params; + + ASSERT(bw_params); + + vlevel_max = bw_params->clk_table.num_entries - 1; + + /* WM Set D */ + table_entry = &bw_params->wm_table.entries[WM_D]; + if (table_entry->wm_type == WM_TYPE_RETRAINING) + vlevel = 0; + else + vlevel = vlevel_max; + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d, + &context->bw_ctx.dml, pipes, pipe_cnt); + /* WM Set C */ + table_entry = &bw_params->wm_table.entries[WM_C]; + vlevel = min(max(vlevel_req, 2), vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c, + &context->bw_ctx.dml, pipes, pipe_cnt); + /* WM Set B */ + table_entry = &bw_params->wm_table.entries[WM_B]; + vlevel = min(max(vlevel_req, 1), vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b, + &context->bw_ctx.dml, pipes, pipe_cnt); + + /* WM Set A */ + table_entry = &bw_params->wm_table.entries[WM_A]; + vlevel = min(vlevel_req, vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a, + &context->bw_ctx.dml, pipes, pipe_cnt); + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); + pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + + if (dc->config.forced_clocks) { + pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; + pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; + } + if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000) + pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; + if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) + pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; + + pipe_idx++; + } + + dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); +} + static struct resource_funcs dcn301_res_pool_funcs = { .destroy = dcn301_destroy_resource_pool, .link_enc_create = dcn301_link_encoder_create, .panel_cntl_create = dcn301_panel_cntl_create, .validate_bandwidth = dcn30_validate_bandwidth, - .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, + .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 22f3f643ed1b..346dcd87dc10 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -191,6 +191,8 @@ struct hubp_funcs { bool (*hubp_in_blank)(struct hubp *hubp); void (*hubp_soft_reset)(struct hubp *hubp, bool reset); + void (*hubp_set_flip_int)(struct hubp *hubp); + }; #endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index c57dc9ae81f2..a2681fe875ed 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -5216,10 +5216,10 @@ static int smu7_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, for (j = 0; j < dep_sclk_table->count; j++) { valid_entry = false; for (k = 0; k < watermarks->num_wm_sets; k++) { - if (dep_sclk_table->entries[i].clk / 10 >= watermarks->wm_clk_ranges[k].wm_min_eng_clk_in_khz && - dep_sclk_table->entries[i].clk / 10 < watermarks->wm_clk_ranges[k].wm_max_eng_clk_in_khz && - dep_mclk_table->entries[i].clk / 10 >= watermarks->wm_clk_ranges[k].wm_min_mem_clk_in_khz && - dep_mclk_table->entries[i].clk / 10 < watermarks->wm_clk_ranges[k].wm_max_mem_clk_in_khz) { + if (dep_sclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_eng_clk_in_khz / 10 && + dep_sclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_eng_clk_in_khz / 10 && + dep_mclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_mem_clk_in_khz / 10 && + dep_mclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_mem_clk_in_khz / 10) { valid_entry = true; table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k].wm_set_id; break; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index 29c99642d22d..22b636e2b89b 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -1505,6 +1505,48 @@ static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr, return 0; } +static int vega10_override_pcie_parameters(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + uint32_t pcie_gen = 0, pcie_width = 0; + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + int i; + + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) + pcie_gen = 3; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) + pcie_gen = 2; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) + pcie_gen = 1; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) + pcie_gen = 0; + + if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) + pcie_width = 6; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) + pcie_width = 5; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) + pcie_width = 4; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) + pcie_width = 3; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) + pcie_width = 2; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) + pcie_width = 1; + + for (i = 0; i < NUM_LINK_LEVELS; i++) { + if (pp_table->PcieGenSpeed[i] > pcie_gen) + pp_table->PcieGenSpeed[i] = pcie_gen; + + if (pp_table->PcieLaneCount[i] > pcie_width) + pp_table->PcieLaneCount[i] = pcie_width; + } + + return 0; +} + static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr) { int result = -1; @@ -2556,6 +2598,11 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) "Failed to initialize Link Level!", return result); + result = vega10_override_pcie_parameters(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to override pcie parameters!", + return result); + result = vega10_populate_all_graphic_levels(hwmgr); PP_ASSERT_WITH_CODE(!result, "Failed to initialize Graphics Level!", @@ -2922,6 +2969,7 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap) return 0; } + static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool enable) { struct vega10_hwmgr *data = hwmgr->backend; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c index c0753029a8e2..43e01d880f7c 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c @@ -481,6 +481,67 @@ static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state) dpm_state->hard_max_level = 0xffff; } +static int vega12_override_pcie_parameters(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + struct vega12_hwmgr *data = + (struct vega12_hwmgr *)(hwmgr->backend); + uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg; + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + int i; + int ret; + + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) + pcie_gen = 3; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) + pcie_gen = 2; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) + pcie_gen = 1; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) + pcie_gen = 0; + + if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) + pcie_width = 6; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) + pcie_width = 5; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) + pcie_width = 4; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) + pcie_width = 3; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) + pcie_width = 2; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) + pcie_width = 1; + + /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 + * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 + * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 + */ + for (i = 0; i < NUM_LINK_LEVELS; i++) { + pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen : + pp_table->PcieGenSpeed[i]; + pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width : + pp_table->PcieLaneCount[i]; + + if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg != + pp_table->PcieLaneCount[i]) { + smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg; + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, + NULL); + PP_ASSERT_WITH_CODE(!ret, + "[OverridePcieParameters] Attempt to override pcie params failed!", + return ret); + } + + /* update the pptable */ + pp_table->PcieGenSpeed[i] = pcie_gen_arg; + pp_table->PcieLaneCount[i] = pcie_width_arg; + } + + return 0; +} + static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr, PPCLK_e clk_id, uint32_t *num_of_levels) { @@ -968,6 +1029,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr) "Failed to enable all smu features!", return result); + result = vega12_override_pcie_parameters(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "[EnableDPMTasks] Failed to override pcie parameters!", + return result); + tmp_result = vega12_power_control_set_level(hwmgr); PP_ASSERT_WITH_CODE(!tmp_result, "Failed to power control set level!", diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index 87811b005b85..f19964c69a00 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -831,7 +831,9 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr) struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); - uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg; + uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg; + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + int i; int ret; if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) @@ -860,17 +862,27 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr) * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */ - smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width; - ret = smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, - NULL); - PP_ASSERT_WITH_CODE(!ret, - "[OverridePcieParameters] Attempt to override pcie params failed!", - return ret); + for (i = 0; i < NUM_LINK_LEVELS; i++) { + pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen : + pp_table->PcieGenSpeed[i]; + pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width : + pp_table->PcieLaneCount[i]; + + if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg != + pp_table->PcieLaneCount[i]) { + smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg; + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, + NULL); + PP_ASSERT_WITH_CODE(!ret, + "[OverridePcieParameters] Attempt to override pcie params failed!", + return ret); + } - data->pcie_parameters_override = true; - data->pcie_gen_level1 = pcie_gen; - data->pcie_width_level1 = pcie_width; + /* update the pptable */ + pp_table->PcieGenSpeed[i] = pcie_gen_arg; + pp_table->PcieLaneCount[i] = pcie_width_arg; + } return 0; } @@ -3319,9 +3331,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, data->od8_settings.od8_settings_array; OverDriveTable_t *od_table = &(data->smc_state_table.overdrive_table); - struct phm_ppt_v3_information *pptable_information = - (struct phm_ppt_v3_information *)hwmgr->pptable; - PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable; + PPTable_t *pptable = &(data->smc_state_table.pp_table); struct pp_clock_levels_with_latency clocks; struct vega20_single_dpm_table *fclk_dpm_table = &(data->dpm_table.fclk_table); @@ -3420,13 +3430,9 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, current_lane_width = vega20_get_current_pcie_link_width_level(hwmgr); for (i = 0; i < NUM_LINK_LEVELS; i++) { - if (i == 1 && data->pcie_parameters_override) { - gen_speed = data->pcie_gen_level1; - lane_width = data->pcie_width_level1; - } else { - gen_speed = pptable->PcieGenSpeed[i]; - lane_width = pptable->PcieLaneCount[i]; - } + gen_speed = pptable->PcieGenSpeed[i]; + lane_width = pptable->PcieLaneCount[i]; + size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, (gen_speed == 0) ? "2.5GT/s," : (gen_speed == 1) ? "5.0GT/s," : diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index b9a616737c0e..f6baa2046124 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -2048,7 +2048,7 @@ static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper) if (shadow) vfree(shadow); - else + else if (fb_helper->buffer) drm_client_buffer_vunmap(fb_helper->buffer); drm_client_framebuffer_delete(fb_helper->buffer); diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 9825c378dfa6..6d625cee7a6a 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -357,13 +357,14 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, if (--shmem->vmap_use_count > 0) return; - if (obj->import_attach) + if (obj->import_attach) { dma_buf_vunmap(obj->import_attach->dmabuf, map); - else + } else { vunmap(shmem->vaddr); + drm_gem_shmem_put_pages(shmem); + } shmem->vaddr = NULL; - drm_gem_shmem_put_pages(shmem); } /* @@ -525,14 +526,28 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); loff_t num_pages = obj->size >> PAGE_SHIFT; + vm_fault_t ret; struct page *page; + pgoff_t page_offset; - if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages)) - return VM_FAULT_SIGBUS; + /* We don't use vmf->pgoff since that has the fake offset */ + page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; - page = shmem->pages[vmf->pgoff]; + mutex_lock(&shmem->pages_lock); - return vmf_insert_page(vma, vmf->address, page); + if (page_offset >= num_pages || + WARN_ON_ONCE(!shmem->pages) || + shmem->madv < 0) { + ret = VM_FAULT_SIGBUS; + } else { + page = shmem->pages[page_offset]; + + ret = vmf_insert_page(vma, vmf->address, page); + } + + mutex_unlock(&shmem->pages_lock); + + return ret; } static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) @@ -581,9 +596,6 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) struct drm_gem_shmem_object *shmem; int ret; - /* Remove the fake offset */ - vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); - if (obj->import_attach) { /* Drop the reference drm_gem_mmap_obj() acquired.*/ drm_gem_object_put(obj); diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index f86448ab1fe0..dc734d4828a1 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -99,6 +99,8 @@ static int compat_drm_version(struct file *file, unsigned int cmd, if (copy_from_user(&v32, (void __user *)arg, sizeof(v32))) return -EFAULT; + memset(&v, 0, sizeof(v)); + v = (struct drm_version) { .name_len = v32.name_len, .name = compat_ptr(v32.name), @@ -137,6 +139,9 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd, if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32))) return -EFAULT; + + memset(&uq, 0, sizeof(uq)); + uq = (struct drm_unique){ .unique_len = uq32.unique_len, .unique = compat_ptr(uq32.unique), @@ -265,6 +270,8 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd, if (copy_from_user(&c32, argp, sizeof(c32))) return -EFAULT; + memset(&client, 0, sizeof(client)); + client.idx = c32.idx; err = drm_ioctl_kernel(file, drm_getclient, &client, 0); @@ -852,6 +859,8 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, if (copy_from_user(&req32, argp, sizeof(req32))) return -EFAULT; + memset(&req, 0, sizeof(req)); + req.request.type = req32.request.type; req.request.sequence = req32.request.sequence; req.request.signal = req32.request.signal; @@ -889,6 +898,8 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd, struct drm_mode_fb_cmd2 req64; int err; + memset(&req64, 0, sizeof(req64)); + if (copy_from_user(&req64, argp, offsetof(drm_mode_fb_cmd232_t, modifier))) return -EFAULT; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index fb1b1d096975..9cf555d6842b 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -713,9 +713,12 @@ static int engine_setup_common(struct intel_engine_cs *engine) goto err_status; } + err = intel_engine_init_cmd_parser(engine); + if (err) + goto err_cmd_parser; + intel_engine_init_active(engine, ENGINE_PHYSICAL); intel_engine_init_execlists(engine); - intel_engine_init_cmd_parser(engine); intel_engine_init__pm(engine); intel_engine_init_retire(engine); @@ -732,6 +735,8 @@ static int engine_setup_common(struct intel_engine_cs *engine) return 0; +err_cmd_parser: + intel_breadcrumbs_free(engine->breadcrumbs); err_status: cleanup_status_page(engine); return err; diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index ced9a96d7c34..5f86f5b2caf6 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -940,7 +940,7 @@ static void fini_hash_table(struct intel_engine_cs *engine) * struct intel_engine_cs based on whether the platform requires software * command parsing. */ -void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) +int intel_engine_init_cmd_parser(struct intel_engine_cs *engine) { const struct drm_i915_cmd_table *cmd_tables; int cmd_table_count; @@ -948,7 +948,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) if (!IS_GEN(engine->i915, 7) && !(IS_GEN(engine->i915, 9) && engine->class == COPY_ENGINE_CLASS)) - return; + return 0; switch (engine->class) { case RENDER_CLASS: @@ -1013,19 +1013,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) break; default: MISSING_CASE(engine->class); - return; + goto out; } if (!validate_cmds_sorted(engine, cmd_tables, cmd_table_count)) { drm_err(&engine->i915->drm, "%s: command descriptions are not sorted\n", engine->name); - return; + goto out; } if (!validate_regs_sorted(engine)) { drm_err(&engine->i915->drm, "%s: registers are not sorted\n", engine->name); - return; + goto out; } ret = init_hash_table(engine, cmd_tables, cmd_table_count); @@ -1033,10 +1033,17 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) drm_err(&engine->i915->drm, "%s: initialised failed!\n", engine->name); fini_hash_table(engine); - return; + goto out; } engine->flags |= I915_ENGINE_USING_CMD_PARSER; + +out: + if (intel_engine_requires_cmd_parser(engine) && + !intel_engine_using_cmd_parser(engine)) + return -EINVAL; + + return 0; } /** diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 26d69d06aa6d..cb62ddba2035 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1952,7 +1952,7 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type); /* i915_cmd_parser.c */ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); -void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); +int intel_engine_init_cmd_parser(struct intel_engine_cs *engine); void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); int intel_engine_cmd_parser(struct intel_engine_cs *engine, struct i915_vma *batch, diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 112ba5f2ce90..e62ad69606f6 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -603,7 +603,6 @@ static int append_oa_sample(struct i915_perf_stream *stream, { int report_size = stream->oa_buffer.format_size; struct drm_i915_perf_record_header header; - u32 sample_flags = stream->sample_flags; header.type = DRM_I915_PERF_RECORD_SAMPLE; header.pad = 0; @@ -617,10 +616,8 @@ static int append_oa_sample(struct i915_perf_stream *stream, return -EFAULT; buf += sizeof(header); - if (sample_flags & SAMPLE_OA_REPORT) { - if (copy_to_user(buf, report, report_size)) - return -EFAULT; - } + if (copy_to_user(buf, report, report_size)) + return -EFAULT; (*offset) += header.size; @@ -2682,7 +2679,7 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream) stream->perf->ops.oa_enable(stream); - if (stream->periodic) + if (stream->sample_flags & SAMPLE_OA_REPORT) hrtimer_start(&stream->poll_check_timer, ns_to_ktime(stream->poll_oa_period), HRTIMER_MODE_REL_PINNED); @@ -2745,7 +2742,7 @@ static void i915_oa_stream_disable(struct i915_perf_stream *stream) { stream->perf->ops.oa_disable(stream); - if (stream->periodic) + if (stream->sample_flags & SAMPLE_OA_REPORT) hrtimer_cancel(&stream->poll_check_timer); } @@ -3028,7 +3025,7 @@ static ssize_t i915_perf_read(struct file *file, * disabled stream as an error. In particular it might otherwise lead * to a deadlock for blocking file descriptors... */ - if (!stream->enabled) + if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT)) return -EIO; if (!(file->f_flags & O_NONBLOCK)) { diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 7146cd0f3256..aaf1f0045b16 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3316,7 +3316,18 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000) #define ILK_FBCQ_DIS (1 << 22) -#define ILK_PABSTRETCH_DIS (1 << 21) +#define ILK_PABSTRETCH_DIS REG_BIT(21) +#define ILK_SABSTRETCH_DIS REG_BIT(20) +#define IVB_PRI_STRETCH_MAX_MASK REG_GENMASK(21, 20) +#define IVB_PRI_STRETCH_MAX_X8 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 0) +#define IVB_PRI_STRETCH_MAX_X4 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 1) +#define IVB_PRI_STRETCH_MAX_X2 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 2) +#define IVB_PRI_STRETCH_MAX_X1 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 3) +#define IVB_SPR_STRETCH_MAX_MASK REG_GENMASK(19, 18) +#define IVB_SPR_STRETCH_MAX_X8 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 0) +#define IVB_SPR_STRETCH_MAX_X4 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 1) +#define IVB_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 2) +#define IVB_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 3) /* @@ -8039,6 +8050,16 @@ enum { #define _CHICKEN_PIPESL_1_A 0x420b0 #define _CHICKEN_PIPESL_1_B 0x420b4 +#define HSW_PRI_STRETCH_MAX_MASK REG_GENMASK(28, 27) +#define HSW_PRI_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 0) +#define HSW_PRI_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 1) +#define HSW_PRI_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 2) +#define HSW_PRI_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 3) +#define HSW_SPR_STRETCH_MAX_MASK REG_GENMASK(26, 25) +#define HSW_SPR_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 0) +#define HSW_SPR_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 1) +#define HSW_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 2) +#define HSW_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 3) #define HSW_FBCQ_DIS (1 << 22) #define BDW_DPRS_MASK_VBLANK_SRD (1 << 0) #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 0c3e63f27c29..97b57acc02e2 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -7245,11 +7245,16 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1, intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); - /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ for_each_pipe(dev_priv, pipe) { + /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe)) | BDW_DPRS_MASK_VBLANK_SRD); + + /* Undocumented but fixes async flip + VT-d corruption */ + if (intel_vtd_active()) + intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), + HSW_PRI_STRETCH_MAX_MASK, HSW_PRI_STRETCH_MAX_X1); } /* WaVSRefCountFullforceMissDisable:bdw */ @@ -7285,11 +7290,20 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) static void hsw_init_clock_gating(struct drm_i915_private *dev_priv) { + enum pipe pipe; + /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A), intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) | HSW_FBCQ_DIS); + for_each_pipe(dev_priv, pipe) { + /* Undocumented but fixes async flip + VT-d corruption */ + if (intel_vtd_active()) + intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), + HSW_PRI_STRETCH_MAX_MASK, HSW_PRI_STRETCH_MAX_X1); + } + /* This is required by WaCatErrorRejectionIssue:hsw */ intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 42c5d3246cfc..453d8b4c5763 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -482,6 +482,16 @@ static int meson_probe_remote(struct platform_device *pdev, return count; } +static void meson_drv_shutdown(struct platform_device *pdev) +{ + struct meson_drm *priv = dev_get_drvdata(&pdev->dev); + struct drm_device *drm = priv->drm; + + DRM_DEBUG_DRIVER("\n"); + drm_kms_helper_poll_fini(drm); + drm_atomic_helper_shutdown(drm); +} + static int meson_drv_probe(struct platform_device *pdev) { struct component_match *match = NULL; @@ -553,6 +563,7 @@ static const struct dev_pm_ops meson_drv_pm_ops = { static struct platform_driver meson_drm_platform_driver = { .probe = meson_drv_probe, + .shutdown = meson_drv_shutdown, .driver = { .name = "meson-drm", .of_match_table = dt_match, diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 2375711877cf..f2720a006199 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -551,12 +551,17 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) if (!ttm_dma) return; + if (!ttm_dma->pages) { + NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma); + return; + } /* Don't waste time looping if the object is coherent */ if (nvbo->force_coherent) return; - for (i = 0; i < ttm_dma->num_pages; ++i) { + i = 0; + while (i < ttm_dma->num_pages) { struct page *p = ttm_dma->pages[i]; size_t num_pages = 1; @@ -582,12 +587,17 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) if (!ttm_dma) return; + if (!ttm_dma->pages) { + NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma); + return; + } /* Don't waste time looping if the object is coherent */ if (nvbo->force_coherent) return; - for (i = 0; i < ttm_dma->num_pages; ++i) { + i = 0; + while (i < ttm_dma->num_pages) { struct page *p = ttm_dma->pages[i]; size_t num_pages = 1; diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 8e11612f5fe1..b31d750c425a 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -2149,11 +2149,12 @@ static int dsi_vc_send_short(struct dsi_data *dsi, int vc, const struct mipi_dsi_msg *msg) { struct mipi_dsi_packet pkt; + int ret; u32 r; - r = mipi_dsi_create_packet(&pkt, msg); - if (r < 0) - return r; + ret = mipi_dsi_create_packet(&pkt, msg); + if (ret < 0) + return ret; WARN_ON(!dsi_bus_is_locked(dsi)); diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 012bce0cdb65..10738e04c09b 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -328,6 +328,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc, head.id = i; head.flags = 0; + head.surface_id = 0; oldcount = qdev->monitors_config->count; if (crtc->state->active) { struct drm_display_mode *mode = &crtc->mode; diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 0fcfc952d5e9..b372455e2729 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -321,7 +321,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, int type, struct qxl_release **release, struct qxl_bo **rbo) { - struct qxl_bo *bo; + struct qxl_bo *bo, *free_bo = NULL; int idr_ret; int ret = 0; union qxl_release_info *info; @@ -347,7 +347,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, mutex_lock(&qdev->release_mutex); if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) { - qxl_bo_unref(&qdev->current_release_bo[cur_idx]); + free_bo = qdev->current_release_bo[cur_idx]; qdev->current_release_bo_offset[cur_idx] = 0; qdev->current_release_bo[cur_idx] = NULL; } @@ -355,6 +355,10 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]); if (ret) { mutex_unlock(&qdev->release_mutex); + if (free_bo) { + qxl_bo_unpin(free_bo); + qxl_bo_unref(&free_bo); + } qxl_release_free(qdev, *release); return ret; } @@ -370,6 +374,10 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, *rbo = bo; mutex_unlock(&qdev->release_mutex); + if (free_bo) { + qxl_bo_unpin(free_bo); + qxl_bo_unref(&free_bo); + } ret = qxl_release_list_add(*release, bo); qxl_bo_unref(&bo); diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index f09989bdce98..3effc8c71494 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -574,6 +574,8 @@ struct radeon_gem { struct list_head objects; }; +extern const struct drm_gem_object_funcs radeon_gem_object_funcs; + int radeon_gem_init(struct radeon_device *rdev); void radeon_gem_fini(struct radeon_device *rdev); int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 941826923247..db14a82a2e4b 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -43,7 +43,7 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); int radeon_gem_prime_pin(struct drm_gem_object *obj); void radeon_gem_prime_unpin(struct drm_gem_object *obj); -static const struct drm_gem_object_funcs radeon_gem_object_funcs; +const struct drm_gem_object_funcs radeon_gem_object_funcs; static void radeon_gem_object_free(struct drm_gem_object *gobj) { @@ -227,7 +227,7 @@ static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) return r; } -static const struct drm_gem_object_funcs radeon_gem_object_funcs = { +const struct drm_gem_object_funcs radeon_gem_object_funcs = { .free = radeon_gem_object_free, .open = radeon_gem_object_open, .close = radeon_gem_object_close, diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index ab29eb9e8667..42a87948e28c 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c @@ -56,6 +56,8 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, if (ret) return ERR_PTR(ret); + bo->tbo.base.funcs = &radeon_gem_object_funcs; + mutex_lock(&rdev->gem.mutex); list_add_tail(&bo->list, &rdev->gem.objects); mutex_unlock(&rdev->gem.mutex); diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c index 33f65f4626e5..23866a54e3f9 100644 --- a/drivers/gpu/drm/tiny/gm12u320.c +++ b/drivers/gpu/drm/tiny/gm12u320.c @@ -83,6 +83,7 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)"); struct gm12u320_device { struct drm_device dev; + struct device *dmadev; struct drm_simple_display_pipe pipe; struct drm_connector conn; unsigned char *cmd_buf; @@ -601,6 +602,22 @@ static const uint64_t gm12u320_pipe_modifiers[] = { DRM_FORMAT_MOD_INVALID }; +/* + * FIXME: Dma-buf sharing requires DMA support by the importing device. + * This function is a workaround to make USB devices work as well. + * See todo.rst for how to fix the issue in the dma-buf framework. + */ +static struct drm_gem_object *gm12u320_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct gm12u320_device *gm12u320 = to_gm12u320(dev); + + if (!gm12u320->dmadev) + return ERR_PTR(-ENODEV); + + return drm_gem_prime_import_dev(dev, dma_buf, gm12u320->dmadev); +} + DEFINE_DRM_GEM_FOPS(gm12u320_fops); static const struct drm_driver gm12u320_drm_driver = { @@ -614,6 +631,7 @@ static const struct drm_driver gm12u320_drm_driver = { .fops = &gm12u320_fops, DRM_GEM_SHMEM_DRIVER_OPS, + .gem_prime_import = gm12u320_gem_prime_import, }; static const struct drm_mode_config_funcs gm12u320_mode_config_funcs = { @@ -640,15 +658,18 @@ static int gm12u320_usb_probe(struct usb_interface *interface, struct gm12u320_device, dev); if (IS_ERR(gm12u320)) return PTR_ERR(gm12u320); + dev = &gm12u320->dev; + + gm12u320->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev)); + if (!gm12u320->dmadev) + drm_warn(dev, "buffer sharing not supported"); /* not an error */ INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work); mutex_init(&gm12u320->fb_update.lock); - dev = &gm12u320->dev; - ret = drmm_mode_config_init(dev); if (ret) - return ret; + goto err_put_device; dev->mode_config.min_width = GM12U320_USER_WIDTH; dev->mode_config.max_width = GM12U320_USER_WIDTH; @@ -658,15 +679,15 @@ static int gm12u320_usb_probe(struct usb_interface *interface, ret = gm12u320_usb_alloc(gm12u320); if (ret) - return ret; + goto err_put_device; ret = gm12u320_set_ecomode(gm12u320); if (ret) - return ret; + goto err_put_device; ret = gm12u320_conn_init(gm12u320); if (ret) - return ret; + goto err_put_device; ret = drm_simple_display_pipe_init(&gm12u320->dev, &gm12u320->pipe, @@ -676,24 +697,31 @@ static int gm12u320_usb_probe(struct usb_interface *interface, gm12u320_pipe_modifiers, &gm12u320->conn); if (ret) - return ret; + goto err_put_device; drm_mode_config_reset(dev); usb_set_intfdata(interface, dev); ret = drm_dev_register(dev, 0); if (ret) - return ret; + goto err_put_device; drm_fbdev_generic_setup(dev, 0); return 0; + +err_put_device: + put_device(gm12u320->dmadev); + return ret; } static void gm12u320_usb_disconnect(struct usb_interface *interface) { struct drm_device *dev = usb_get_intfdata(interface); + struct gm12u320_device *gm12u320 = to_gm12u320(dev); + put_device(gm12u320->dmadev); + gm12u320->dmadev = NULL; drm_dev_unplug(dev); drm_atomic_helper_shutdown(dev); } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 20a25660b35b..101a68dc615b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -136,7 +136,8 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, struct ttm_bo_device *bdev = bo->bdev; struct ttm_resource_manager *man; - dma_resv_assert_held(bo->base.resv); + if (!bo->deleted) + dma_resv_assert_held(bo->base.resv); if (bo->pin_count) { ttm_bo_del_from_lru(bo); @@ -508,8 +509,11 @@ static void ttm_bo_release(struct kref *kref) * Make pinned bos immediately available to * shrinkers, now that they are queued for * destruction. + * + * FIXME: QXL is triggering this. Can be removed when the + * driver is fixed. */ - if (WARN_ON(bo->pin_count)) { + if (WARN_ON_ONCE(bo->pin_count)) { bo->pin_count = 0; ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); } diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index 6e27cb1bf48b..4eb6efb8b8c0 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -268,13 +268,13 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool, /* Remove a pool_type from the global shrinker list and free all pages */ static void ttm_pool_type_fini(struct ttm_pool_type *pt) { - struct page *p, *tmp; + struct page *p; mutex_lock(&shrinker_lock); list_del(&pt->shrinker_list); mutex_unlock(&shrinker_lock); - list_for_each_entry_safe(p, tmp, &pt->pages, lru) + while ((p = ttm_pool_type_take(pt))) ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); } diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 9269092697d8..5703277c6f52 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -32,6 +32,22 @@ static int udl_usb_resume(struct usb_interface *interface) return drm_mode_config_helper_resume(dev); } +/* + * FIXME: Dma-buf sharing requires DMA support by the importing device. + * This function is a workaround to make USB devices work as well. + * See todo.rst for how to fix the issue in the dma-buf framework. + */ +static struct drm_gem_object *udl_driver_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct udl_device *udl = to_udl(dev); + + if (!udl->dmadev) + return ERR_PTR(-ENODEV); + + return drm_gem_prime_import_dev(dev, dma_buf, udl->dmadev); +} + DEFINE_DRM_GEM_FOPS(udl_driver_fops); static const struct drm_driver driver = { @@ -40,6 +56,7 @@ static const struct drm_driver driver = { /* GEM hooks */ .fops = &udl_driver_fops, DRM_GEM_SHMEM_DRIVER_OPS, + .gem_prime_import = udl_driver_gem_prime_import, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index 875e73551ae9..cc16a13316e4 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h @@ -50,6 +50,7 @@ struct urb_list { struct udl_device { struct drm_device drm; struct device *dev; + struct device *dmadev; struct drm_simple_display_pipe display_pipe; diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index 0e2a376cb075..853f147036f6 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -315,6 +315,10 @@ int udl_init(struct udl_device *udl) DRM_DEBUG("\n"); + udl->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev)); + if (!udl->dmadev) + drm_warn(dev, "buffer sharing not supported"); /* not an error */ + mutex_init(&udl->gem_lock); if (!udl_parse_vendor_descriptor(udl)) { @@ -343,12 +347,18 @@ int udl_init(struct udl_device *udl) err: if (udl->urbs.count) udl_free_urb_list(dev); + put_device(udl->dmadev); DRM_ERROR("%d\n", ret); return ret; } int udl_drop_usb(struct drm_device *dev) { + struct udl_device *udl = to_udl(dev); + udl_free_urb_list(dev); + put_device(udl->dmadev); + udl->dmadev = NULL; + return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c index 0a900afc66ff..45c9c6a7f1d6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c @@ -500,8 +500,6 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, vm_fault_t ret; pgoff_t fault_page_size; bool write = vmf->flags & FAULT_FLAG_WRITE; - bool is_cow_mapping = - (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; switch (pe_size) { case PE_SIZE_PMD: @@ -518,7 +516,7 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, } /* Always do write dirty-tracking and COW on PTE level. */ - if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping)) + if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping(vma->vm_flags))) return VM_FAULT_FALLBACK; ret = ttm_bo_vm_reserve(bo, vmf); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c index 3c03b1746661..cb9975889e2f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c @@ -49,7 +49,7 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_ops = &vmw_vm_ops; /* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */ - if ((vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) != VM_MAYWRITE) + if (!is_cow_mapping(vma->vm_flags)) vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP; return 0; diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index bf7d22fa4be2..e0667c4b3c08 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -266,6 +266,8 @@ config ADI_AXI_ADC select IIO_BUFFER select IIO_BUFFER_HW_CONSUMER select IIO_BUFFER_DMAENGINE + depends on HAS_IOMEM + depends on OF help Say yes here to build support for Analog Devices Generic AXI ADC IP core. The IP core is used for interfacing with @@ -923,6 +925,7 @@ config STM32_ADC_CORE depends on ARCH_STM32 || COMPILE_TEST depends on OF depends on REGULATOR + depends on HAS_IOMEM select IIO_BUFFER select MFD_STM32_TIMERS select IIO_STM32_TIMER_TRIGGER diff --git a/drivers/iio/adc/ab8500-gpadc.c b/drivers/iio/adc/ab8500-gpadc.c index 6f9a3e2d5533..7b5212ba5501 100644 --- a/drivers/iio/adc/ab8500-gpadc.c +++ b/drivers/iio/adc/ab8500-gpadc.c @@ -918,7 +918,7 @@ static int ab8500_gpadc_read_raw(struct iio_dev *indio_dev, return processed; /* Return millivolt or milliamps or millicentigrades */ - *val = processed * 1000; + *val = processed; return IIO_VAL_INT; } diff --git a/drivers/iio/adc/ad7949.c b/drivers/iio/adc/ad7949.c index 5d597e5050f6..1b4b3203e428 100644 --- a/drivers/iio/adc/ad7949.c +++ b/drivers/iio/adc/ad7949.c @@ -91,7 +91,7 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val, int ret; int i; int bits_per_word = ad7949_adc->resolution; - int mask = GENMASK(ad7949_adc->resolution, 0); + int mask = GENMASK(ad7949_adc->resolution - 1, 0); struct spi_message msg; struct spi_transfer tx[] = { { diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c index 05ff948372b3..07b1a99381d9 100644 --- a/drivers/iio/adc/qcom-spmi-vadc.c +++ b/drivers/iio/adc/qcom-spmi-vadc.c @@ -597,7 +597,7 @@ static const struct vadc_channels vadc_chans[] = { VADC_CHAN_NO_SCALE(P_MUX16_1_3, 1) VADC_CHAN_NO_SCALE(LR_MUX1_BAT_THERM, 0) - VADC_CHAN_NO_SCALE(LR_MUX2_BAT_ID, 0) + VADC_CHAN_VOLT(LR_MUX2_BAT_ID, 0, SCALE_DEFAULT) VADC_CHAN_NO_SCALE(LR_MUX3_XO_THERM, 0) VADC_CHAN_NO_SCALE(LR_MUX4_AMUX_THM1, 0) VADC_CHAN_NO_SCALE(LR_MUX5_AMUX_THM2, 0) diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c index dfa31a23500f..ac90be03332a 100644 --- a/drivers/iio/gyro/mpu3050-core.c +++ b/drivers/iio/gyro/mpu3050-core.c @@ -551,6 +551,8 @@ static irqreturn_t mpu3050_trigger_handler(int irq, void *p) MPU3050_FIFO_R, &fifo_values[offset], toread); + if (ret) + goto out_trigger_unlock; dev_dbg(mpu3050->dev, "%04x %04x %04x %04x %04x\n", diff --git a/drivers/iio/humidity/hid-sensor-humidity.c b/drivers/iio/humidity/hid-sensor-humidity.c index 52f605114ef7..d62705448ae2 100644 --- a/drivers/iio/humidity/hid-sensor-humidity.c +++ b/drivers/iio/humidity/hid-sensor-humidity.c @@ -15,7 +15,10 @@ struct hid_humidity_state { struct hid_sensor_common common_attributes; struct hid_sensor_hub_attribute_info humidity_attr; - s32 humidity_data; + struct { + s32 humidity_data; + u64 timestamp __aligned(8); + } scan; int scale_pre_decml; int scale_post_decml; int scale_precision; @@ -125,9 +128,8 @@ static int humidity_proc_event(struct hid_sensor_hub_device *hsdev, struct hid_humidity_state *humid_st = iio_priv(indio_dev); if (atomic_read(&humid_st->common_attributes.data_ready)) - iio_push_to_buffers_with_timestamp(indio_dev, - &humid_st->humidity_data, - iio_get_time_ns(indio_dev)); + iio_push_to_buffers_with_timestamp(indio_dev, &humid_st->scan, + iio_get_time_ns(indio_dev)); return 0; } @@ -142,7 +144,7 @@ static int humidity_capture_sample(struct hid_sensor_hub_device *hsdev, switch (usage_id) { case HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY: - humid_st->humidity_data = *(s32 *)raw_data; + humid_st->scan.humidity_data = *(s32 *)raw_data; return 0; default: diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c index 54af2ed664f6..785a4ce606d8 100644 --- a/drivers/iio/imu/adis16400.c +++ b/drivers/iio/imu/adis16400.c @@ -462,8 +462,7 @@ static int adis16400_initial_setup(struct iio_dev *indio_dev) if (ret) goto err_ret; - ret = sscanf(indio_dev->name, "adis%u\n", &device_id); - if (ret != 1) { + if (sscanf(indio_dev->name, "adis%u\n", &device_id) != 1) { ret = -EINVAL; goto err_ret; } diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c index 330cf359e0b8..e9e00ce0c6d4 100644 --- a/drivers/iio/light/hid-sensor-prox.c +++ b/drivers/iio/light/hid-sensor-prox.c @@ -23,6 +23,9 @@ struct prox_state { struct hid_sensor_common common_attributes; struct hid_sensor_hub_attribute_info prox_attr; u32 human_presence; + int scale_pre_decml; + int scale_post_decml; + int scale_precision; }; /* Channel definitions */ @@ -93,8 +96,9 @@ static int prox_read_raw(struct iio_dev *indio_dev, ret_type = IIO_VAL_INT; break; case IIO_CHAN_INFO_SCALE: - *val = prox_state->prox_attr.units; - ret_type = IIO_VAL_INT; + *val = prox_state->scale_pre_decml; + *val2 = prox_state->scale_post_decml; + ret_type = prox_state->scale_precision; break; case IIO_CHAN_INFO_OFFSET: *val = hid_sensor_convert_exponent( @@ -234,6 +238,11 @@ static int prox_parse_report(struct platform_device *pdev, HID_USAGE_SENSOR_HUMAN_PRESENCE, &st->common_attributes.sensitivity); + st->scale_precision = hid_sensor_format_scale( + hsdev->usage, + &st->prox_attr, + &st->scale_pre_decml, &st->scale_post_decml); + return ret; } diff --git a/drivers/iio/temperature/hid-sensor-temperature.c b/drivers/iio/temperature/hid-sensor-temperature.c index 81688f1b932f..da9a247097fa 100644 --- a/drivers/iio/temperature/hid-sensor-temperature.c +++ b/drivers/iio/temperature/hid-sensor-temperature.c @@ -15,7 +15,10 @@ struct temperature_state { struct hid_sensor_common common_attributes; struct hid_sensor_hub_attribute_info temperature_attr; - s32 temperature_data; + struct { + s32 temperature_data; + u64 timestamp __aligned(8); + } scan; int scale_pre_decml; int scale_post_decml; int scale_precision; @@ -32,7 +35,7 @@ static const struct iio_chan_spec temperature_channels[] = { BIT(IIO_CHAN_INFO_SAMP_FREQ) | BIT(IIO_CHAN_INFO_HYSTERESIS), }, - IIO_CHAN_SOFT_TIMESTAMP(3), + IIO_CHAN_SOFT_TIMESTAMP(1), }; /* Adjust channel real bits based on report descriptor */ @@ -123,9 +126,8 @@ static int temperature_proc_event(struct hid_sensor_hub_device *hsdev, struct temperature_state *temp_st = iio_priv(indio_dev); if (atomic_read(&temp_st->common_attributes.data_ready)) - iio_push_to_buffers_with_timestamp(indio_dev, - &temp_st->temperature_data, - iio_get_time_ns(indio_dev)); + iio_push_to_buffers_with_timestamp(indio_dev, &temp_st->scan, + iio_get_time_ns(indio_dev)); return 0; } @@ -140,7 +142,7 @@ static int temperature_capture_sample(struct hid_sensor_hub_device *hsdev, switch (usage_id) { case HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE: - temp_st->temperature_data = *(s32 *)raw_data; + temp_st->scan.temperature_data = *(s32 *)raw_data; return 0; default: return -EINVAL; diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 8769e7aa097f..81903749d241 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -3610,13 +3610,13 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id) ep->com.local_addr.ss_family == AF_INET) { err = cxgb4_remove_server_filter( ep->com.dev->rdev.lldi.ports[0], ep->stid, - ep->com.dev->rdev.lldi.rxq_ids[0], 0); + ep->com.dev->rdev.lldi.rxq_ids[0], false); } else { struct sockaddr_in6 *sin6; c4iw_init_wr_wait(ep->com.wr_waitp); err = cxgb4_remove_server( ep->com.dev->rdev.lldi.ports[0], ep->stid, - ep->com.dev->rdev.lldi.rxq_ids[0], 0); + ep->com.dev->rdev.lldi.rxq_ids[0], true); if (err) goto done; err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index c3934abeb260..ce26f97b2ca2 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1194,8 +1194,10 @@ static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type) upper_32_bits(dma)); roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG, (u32)ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S); - roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0); + + /* Make sure to write tail first and then head */ roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0); + roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0); } else { roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma); roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG, diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index de3c2fc6f361..07b8350929cd 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -1116,7 +1116,7 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din, case MLX5_CMD_OP_CREATE_MKEY: MLX5_SET(destroy_mkey_in, din, opcode, MLX5_CMD_OP_DESTROY_MKEY); - MLX5_SET(destroy_mkey_in, in, mkey_index, *obj_id); + MLX5_SET(destroy_mkey_in, din, mkey_index, *obj_id); break; case MLX5_CMD_OP_CREATE_CQ: MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index ec4b3f6a8222..f5a52a6fae43 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1078,7 +1078,7 @@ static int _create_kernel_qp(struct mlx5_ib_dev *dev, qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc); MLX5_SET(qpc, qpc, uar_page, uar_index); - MLX5_SET(qpc, qpc, ts_format, MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT); + MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev)); MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); /* Set "fast registration enabled" for all kernel QPs */ @@ -1188,7 +1188,8 @@ static int get_rq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq) } return MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING; } - return MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT; + return fr_supported ? MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING : + MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT; } static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq) @@ -1206,7 +1207,8 @@ static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq) } return MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING; } - return MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT; + return fr_supported ? MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING : + MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT; } static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq, @@ -1217,7 +1219,8 @@ static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq, MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING || MLX5_CAP_ROCE(dev->mdev, qp_ts_format) == MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME; - int ts_format = MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT; + int ts_format = fr_supported ? MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING : + MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT; if (recv_cq && recv_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) @@ -1930,6 +1933,7 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, if (qp->flags & IB_QP_CREATE_MANAGED_RECV) MLX5_SET(qpc, qpc, cd_slave_receive, 1); + MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev)); MLX5_SET(qpc, qpc, rq_type, MLX5_SRQ_RQ); MLX5_SET(qpc, qpc, no_sq, 1); MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); @@ -4873,6 +4877,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, struct mlx5_ib_dev *dev; int has_net_offloads; __be64 *rq_pas0; + int ts_format; void *in; void *rqc; void *wq; @@ -4881,6 +4886,10 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, dev = to_mdev(pd->device); + ts_format = get_rq_ts_format(dev, to_mcq(init_attr->cq)); + if (ts_format < 0) + return ts_format; + inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas; in = kvzalloc(inlen, GFP_KERNEL); if (!in) @@ -4890,6 +4899,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); MLX5_SET(rqc, rqc, mem_rq_type, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE); + MLX5_SET(rqc, rqc, ts_format, ts_format); MLX5_SET(rqc, rqc, user_index, rwq->user_index); MLX5_SET(rqc, rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn); MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index 430dc6975004..da8963a9f044 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -26,7 +26,6 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION("Joystick device interfaces"); -MODULE_SUPPORTED_DEVICE("input/js"); MODULE_LICENSE("GPL"); #define JOYDEV_MINOR_BASE 0 diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 9126efcbaf2c..321f5906e6ed 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -2714,7 +2714,6 @@ static int __init early_amd_iommu_init(void) struct acpi_table_header *ivrs_base; int i, remap_cache_sz, ret; acpi_status status; - u32 pci_id; if (!amd_iommu_detected) return -ENODEV; @@ -2804,16 +2803,6 @@ static int __init early_amd_iommu_init(void) if (ret) goto out; - /* Disable IOMMU if there's Stoney Ridge graphics */ - for (i = 0; i < 32; i++) { - pci_id = read_pci_config(0, i, 0, 0); - if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) { - pr_info("Disable IOMMU on Stoney Ridge\n"); - amd_iommu_disabled = true; - break; - } - } - /* Disable any previously enabled IOMMUs */ if (!is_kdump_kernel() || amd_iommu_disabled) disable_iommus(); @@ -2880,6 +2869,7 @@ static bool detect_ivrs(void) { struct acpi_table_header *ivrs_base; acpi_status status; + int i; status = acpi_get_table("IVRS", 0, &ivrs_base); if (status == AE_NOT_FOUND) @@ -2892,6 +2882,17 @@ static bool detect_ivrs(void) acpi_put_table(ivrs_base); + /* Don't use IOMMU if there is Stoney Ridge graphics */ + for (i = 0; i < 32; i++) { + u32 pci_id; + + pci_id = read_pci_config(0, i, 0, 0); + if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) { + pr_info("Disable IOMMU on Stoney Ridge\n"); + return false; + } + } + /* Make sure ACS will be enabled during PCI probe */ pci_request_acs(); @@ -2918,12 +2919,12 @@ static int __init state_next(void) } break; case IOMMU_IVRS_DETECTED: - ret = early_amd_iommu_init(); - init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; - if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) { - pr_info("AMD IOMMU disabled\n"); + if (amd_iommu_disabled) { init_state = IOMMU_CMDLINE_DISABLED; ret = -EINVAL; + } else { + ret = early_amd_iommu_init(); + init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; } break; case IOMMU_ACPI_FINISHED: @@ -3001,8 +3002,11 @@ int __init amd_iommu_prepare(void) amd_iommu_irq_remap = true; ret = iommu_go_to_state(IOMMU_ACPI_FINISHED); - if (ret) + if (ret) { + amd_iommu_irq_remap = false; return ret; + } + return amd_iommu_irq_remap ? 0 : -ENODEV; } diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 97eb62f667d2..602aab98c079 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -849,12 +849,11 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev) smmu = tegra_smmu_find(args.np); if (smmu) { err = tegra_smmu_configure(smmu, dev, &args); - of_node_put(args.np); - if (err < 0) + if (err < 0) { + of_node_put(args.np); return ERR_PTR(err); - - break; + } } of_node_put(args.np); diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index e74fa206240a..15536e321df5 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -8,7 +8,6 @@ config IRQCHIP config ARM_GIC bool select IRQ_DOMAIN_HIERARCHY - select GENERIC_IRQ_MULTI_HANDLER select GENERIC_IRQ_EFFECTIVE_AFF_MASK config ARM_GIC_PM @@ -33,7 +32,6 @@ config GIC_NON_BANKED config ARM_GIC_V3 bool - select GENERIC_IRQ_MULTI_HANDLER select IRQ_DOMAIN_HIERARCHY select PARTITION_PERCPU select GENERIC_IRQ_EFFECTIVE_AFF_MASK @@ -64,7 +62,6 @@ config ARM_NVIC config ARM_VIC bool select IRQ_DOMAIN - select GENERIC_IRQ_MULTI_HANDLER config ARM_VIC_NR int @@ -99,14 +96,12 @@ config ATMEL_AIC_IRQ bool select GENERIC_IRQ_CHIP select IRQ_DOMAIN - select GENERIC_IRQ_MULTI_HANDLER select SPARSE_IRQ config ATMEL_AIC5_IRQ bool select GENERIC_IRQ_CHIP select IRQ_DOMAIN - select GENERIC_IRQ_MULTI_HANDLER select SPARSE_IRQ config I8259 @@ -153,7 +148,6 @@ config DW_APB_ICTL config FARADAY_FTINTC010 bool select IRQ_DOMAIN - select GENERIC_IRQ_MULTI_HANDLER select SPARSE_IRQ config HISILICON_IRQ_MBIGEN @@ -169,7 +163,6 @@ config IMGPDC_IRQ config IXP4XX_IRQ bool select IRQ_DOMAIN - select GENERIC_IRQ_MULTI_HANDLER select SPARSE_IRQ config MADERA_IRQ @@ -186,7 +179,6 @@ config CLPS711X_IRQCHIP bool depends on ARCH_CLPS711X select IRQ_DOMAIN - select GENERIC_IRQ_MULTI_HANDLER select SPARSE_IRQ default y @@ -205,7 +197,6 @@ config OMAP_IRQCHIP config ORION_IRQCHIP bool select IRQ_DOMAIN - select GENERIC_IRQ_MULTI_HANDLER config PIC32_EVIC bool diff --git a/drivers/irqchip/irq-ingenic-tcu.c b/drivers/irqchip/irq-ingenic-tcu.c index 7a7222d4c19c..b938d1d04d96 100644 --- a/drivers/irqchip/irq-ingenic-tcu.c +++ b/drivers/irqchip/irq-ingenic-tcu.c @@ -179,5 +179,6 @@ err_free_tcu: } IRQCHIP_DECLARE(jz4740_tcu_irq, "ingenic,jz4740-tcu", ingenic_tcu_irq_init); IRQCHIP_DECLARE(jz4725b_tcu_irq, "ingenic,jz4725b-tcu", ingenic_tcu_irq_init); +IRQCHIP_DECLARE(jz4760_tcu_irq, "ingenic,jz4760-tcu", ingenic_tcu_irq_init); IRQCHIP_DECLARE(jz4770_tcu_irq, "ingenic,jz4770-tcu", ingenic_tcu_irq_init); IRQCHIP_DECLARE(x1000_tcu_irq, "ingenic,x1000-tcu", ingenic_tcu_irq_init); diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c index b61a8901ef72..ea36bb00be80 100644 --- a/drivers/irqchip/irq-ingenic.c +++ b/drivers/irqchip/irq-ingenic.c @@ -155,6 +155,7 @@ static int __init intc_2chip_of_init(struct device_node *node, { return ingenic_intc_of_init(node, 2); } +IRQCHIP_DECLARE(jz4760_intc, "ingenic,jz4760-intc", intc_2chip_of_init); IRQCHIP_DECLARE(jz4770_intc, "ingenic,jz4770-intc", intc_2chip_of_init); IRQCHIP_DECLARE(jz4775_intc, "ingenic,jz4775-intc", intc_2chip_of_init); IRQCHIP_DECLARE(jz4780_intc, "ingenic,jz4780-intc", intc_2chip_of_init); diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c index 7168778fbbe1..cb0afe897162 100644 --- a/drivers/isdn/capi/kcapi.c +++ b/drivers/isdn/capi/kcapi.c @@ -721,7 +721,7 @@ u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb) * Return value: CAPI result code */ -u16 capi20_get_manufacturer(u32 contr, u8 *buf) +u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN]) { struct capi_ctr *ctr; u16 ret; @@ -787,7 +787,7 @@ u16 capi20_get_version(u32 contr, struct capi_version *verp) * Return value: CAPI result code */ -u16 capi20_get_serial(u32 contr, u8 *serial) +u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]) { struct capi_ctr *ctr; u16 ret; diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c index 7013a3f08429..14092152b786 100644 --- a/drivers/isdn/hardware/mISDN/hfcmulti.c +++ b/drivers/isdn/hardware/mISDN/hfcmulti.c @@ -2748,8 +2748,6 @@ hfcmulti_interrupt(int intno, void *dev_id) if (hc->ctype != HFC_TYPE_E1) ph_state_irq(hc, r_irq_statech); } - if (status & V_EXT_IRQSTA) - ; /* external IRQ */ if (status & V_LOST_STA) { /* LOST IRQ */ HFC_outb(hc, R_INC_RES_FIFO, V_RES_LOST); /* clear irq! */ diff --git a/drivers/isdn/hardware/mISDN/iohelper.h b/drivers/isdn/hardware/mISDN/iohelper.h index b2b2bde8edba..c81f7aba4b57 100644 --- a/drivers/isdn/hardware/mISDN/iohelper.h +++ b/drivers/isdn/hardware/mISDN/iohelper.h @@ -13,14 +13,14 @@ #ifndef _IOHELPER_H #define _IOHELPER_H -typedef u8 (read_reg_func)(void *hwp, u8 offset); - typedef void (write_reg_func)(void *hwp, u8 offset, u8 value); - typedef void (fifo_func)(void *hwp, u8 offset, u8 *datap, int size); +typedef u8 (read_reg_func)(void *hwp, u8 offset); +typedef void (write_reg_func)(void *hwp, u8 offset, u8 value); +typedef void (fifo_func)(void *hwp, u8 offset, u8 *datap, int size); - struct _ioport { - u32 port; - u32 ale; - }; +struct _ioport { + u32 port; + u32 ale; +}; #define IOFUNC_IO(name, hws, ap) \ static u8 Read##name##_IO(void *p, u8 off) { \ diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c index ec475087fbf9..39f841b42488 100644 --- a/drivers/isdn/hardware/mISDN/mISDNipac.c +++ b/drivers/isdn/hardware/mISDN/mISDNipac.c @@ -694,7 +694,7 @@ isac_release(struct isac_hw *isac) { if (isac->type & IPAC_TYPE_ISACX) WriteISAC(isac, ISACX_MASK, 0xff); - else + else if (isac->type != 0) WriteISAC(isac, ISAC_MASK, 0xff); if (isac->dch.timer.function != NULL) { del_timer(&isac->dch.timer); diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c index 038e72a84b33..4946ea14bf74 100644 --- a/drivers/isdn/mISDN/dsp_core.c +++ b/drivers/isdn/mISDN/dsp_core.c @@ -953,7 +953,6 @@ dsp_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg) { struct dsp *dsp = container_of(ch, struct dsp, ch); u_long flags; - int err = 0; if (debug & DEBUG_DSP_CTRL) printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd); @@ -998,7 +997,7 @@ dsp_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg) module_put(THIS_MODULE); break; } - return err; + return 0; } static void diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index b57dcb834594..facbd886ee1c 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c @@ -200,7 +200,7 @@ The complete socket opening and closing is done by a thread. When the thread opened a socket, the hc->socket descriptor is set. Whenever a - packet shall be sent to the socket, the hc->socket must be checked wheter not + packet shall be sent to the socket, the hc->socket must be checked whether not NULL. To prevent change in socket descriptor, the hc->socket_lock must be used. To change the socket, a recall of l1oip_socket_open() will safely kill the socket process and create a new one. diff --git a/drivers/leds/trigger/ledtrig-tty.c b/drivers/leds/trigger/ledtrig-tty.c index d2ab6ab080ac..f62db7e520b5 100644 --- a/drivers/leds/trigger/ledtrig-tty.c +++ b/drivers/leds/trigger/ledtrig-tty.c @@ -51,10 +51,8 @@ static ssize_t ttyname_store(struct device *dev, if (size) { ttyname = kmemdup_nul(buf, size, GFP_KERNEL); - if (!ttyname) { - ret = -ENOMEM; - goto out_unlock; - } + if (!ttyname) + return -ENOMEM; } else { ttyname = NULL; } @@ -69,7 +67,6 @@ static ssize_t ttyname_store(struct device *dev, trigger_data->ttyname = ttyname; -out_unlock: mutex_unlock(&trigger_data->mutex); if (ttyname && !running) @@ -125,12 +122,12 @@ static void ledtrig_tty_work(struct work_struct *work) if (icount.rx != trigger_data->rx || icount.tx != trigger_data->tx) { - led_set_brightness(trigger_data->led_cdev, LED_ON); + led_set_brightness_sync(trigger_data->led_cdev, LED_ON); trigger_data->rx = icount.rx; trigger_data->tx = icount.tx; } else { - led_set_brightness(trigger_data->led_cdev, LED_OFF); + led_set_brightness_sync(trigger_data->led_cdev, LED_OFF); } out: diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 71691f32959b..03e1fe4de53d 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -965,7 +965,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, q->limits.max_hw_sectors = UINT_MAX; q->limits.max_sectors = UINT_MAX; q->limits.max_segment_size = UINT_MAX; - q->limits.max_segments = BIO_MAX_PAGES; + q->limits.max_segments = BIO_MAX_VECS; blk_queue_max_discard_sectors(q, UINT_MAX); q->limits.discard_granularity = 512; q->limits.io_min = block_size; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 11c105ecd165..b0ab080f2567 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -229,7 +229,7 @@ static DEFINE_SPINLOCK(dm_crypt_clients_lock); static unsigned dm_crypt_clients_n = 0; static volatile unsigned long dm_crypt_pages_per_client; #define DM_CRYPT_MEMORY_PERCENT 2 -#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_PAGES * 16) +#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16) static void clone_init(struct dm_crypt_io *, struct bio *); static void kcryptd_queue_crypt(struct dm_crypt_io *io); @@ -3246,7 +3246,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size, ARCH_KMALLOC_MINALIGN); - ret = mempool_init(&cc->page_pool, BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc); + ret = mempool_init(&cc->page_pool, BIO_MAX_VECS, crypt_page_alloc, crypt_page_free, cc); if (ret) { ti->error = "Cannot allocate page mempool"; goto bad; @@ -3373,9 +3373,9 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) /* * Check if bio is too large, split as needed. */ - if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) && + if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_VECS << PAGE_SHIFT)) && (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size)) - dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT)); + dm_accept_partial_bio(bio, ((BIO_MAX_VECS << PAGE_SHIFT) >> SECTOR_SHIFT)); /* * Ensure that bio is a multiple of internal sector encryption size diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 844c4be11768..4f72b6f66c3a 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -1892,10 +1892,10 @@ restart: list_add(&g->lru, &wbl.list); wbl.size++; g->write_in_progress = true; - g->wc_list_contiguous = BIO_MAX_PAGES; + g->wc_list_contiguous = BIO_MAX_VECS; f = g; e->wc_list_contiguous++; - if (unlikely(e->wc_list_contiguous == BIO_MAX_PAGES)) { + if (unlikely(e->wc_list_contiguous == BIO_MAX_VECS)) { if (unlikely(wc->writeback_all)) { next_node = rb_next(&f->rb_node); if (likely(next_node)) diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 4337ae0e6af2..0b5dcaabbc15 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -735,7 +735,7 @@ static void r5l_submit_current_io(struct r5l_log *log) static struct bio *r5l_bio_alloc(struct r5l_log *log) { - struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, &log->bs); + struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_VECS, &log->bs); bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio_set_dev(bio, log->rdev->bdev); @@ -1634,7 +1634,7 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log, { struct page *page; - ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_PAGES, &log->bs); + ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_VECS, &log->bs); if (!ctx->ra_bio) return -ENOMEM; diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index e8c118e05dfd..3ddc2aa0b530 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -496,7 +496,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io) if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) { struct bio *prev = bio; - bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, + bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_VECS, &ppl_conf->bs); bio->bi_opf = prev->bi_opf; bio->bi_write_hint = prev->bi_write_hint; diff --git a/drivers/media/firewire/firedtv-fw.c b/drivers/media/firewire/firedtv-fw.c index 8a8585261bb8..5f6e97a8d1c0 100644 --- a/drivers/media/firewire/firedtv-fw.c +++ b/drivers/media/firewire/firedtv-fw.c @@ -430,4 +430,3 @@ MODULE_AUTHOR("Andreas Monitzer <andy@monitzer.com>"); MODULE_AUTHOR("Ben Backx <ben@bbackx.com>"); MODULE_DESCRIPTION("FireDTV DVB Driver"); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("FireDTV DVB"); diff --git a/drivers/media/pci/cx18/cx18-alsa-main.c b/drivers/media/pci/cx18/cx18-alsa-main.c index 692b95a685d1..9a82e68303b6 100644 --- a/drivers/media/pci/cx18/cx18-alsa-main.c +++ b/drivers/media/pci/cx18/cx18-alsa-main.c @@ -41,7 +41,6 @@ MODULE_PARM_DESC(debug, MODULE_AUTHOR("Andy Walls"); MODULE_DESCRIPTION("CX23418 ALSA Interface"); -MODULE_SUPPORTED_DEVICE("CX23418 MPEG2 encoder"); MODULE_LICENSE("GPL"); MODULE_VERSION(CX18_VERSION); diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c index 95aed00f353b..f2440eb38820 100644 --- a/drivers/media/pci/cx18/cx18-driver.c +++ b/drivers/media/pci/cx18/cx18-driver.c @@ -232,7 +232,6 @@ MODULE_PARM_DESC(cx18_first_minor, MODULE_AUTHOR("Hans Verkuil"); MODULE_DESCRIPTION("CX23418 driver"); -MODULE_SUPPORTED_DEVICE("CX23418 MPEG2 encoder"); MODULE_LICENSE("GPL"); MODULE_VERSION(CX18_VERSION); diff --git a/drivers/media/pci/cx25821/cx25821-alsa.c b/drivers/media/pci/cx25821/cx25821-alsa.c index 608fbaf0f659..8797d85a6b0a 100644 --- a/drivers/media/pci/cx25821/cx25821-alsa.c +++ b/drivers/media/pci/cx25821/cx25821-alsa.c @@ -104,7 +104,6 @@ MODULE_PARM_DESC(index, "Index value for cx25821 capture interface(s)."); MODULE_DESCRIPTION("ALSA driver module for cx25821 based capture cards"); MODULE_AUTHOR("Hiep Huynh"); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("{{Conexant,25821}"); /* "{{Conexant,23881}," */ static unsigned int debug; module_param(debug, int, 0644); diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c index 95e0cbb1277d..c83814c052d3 100644 --- a/drivers/media/pci/cx88/cx88-alsa.c +++ b/drivers/media/pci/cx88/cx88-alsa.c @@ -98,7 +98,6 @@ MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(CX88_VERSION); -MODULE_SUPPORTED_DEVICE("{{Conexant,23881},{{Conexant,23882},{{Conexant,23883}"); static unsigned int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable debug messages"); diff --git a/drivers/media/pci/ivtv/ivtv-alsa-main.c b/drivers/media/pci/ivtv/ivtv-alsa-main.c index 39029b8e12c9..4cefdb2e4d40 100644 --- a/drivers/media/pci/ivtv/ivtv-alsa-main.c +++ b/drivers/media/pci/ivtv/ivtv-alsa-main.c @@ -38,7 +38,6 @@ MODULE_PARM_DESC(index, MODULE_AUTHOR("Andy Walls"); MODULE_DESCRIPTION("CX23415/CX23416 ALSA Interface"); -MODULE_SUPPORTED_DEVICE("CX23415/CX23416 MPEG2 encoder"); MODULE_LICENSE("GPL"); MODULE_VERSION(IVTV_VERSION); diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c index 6e448cb3b51c..942b8c266f50 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.c +++ b/drivers/media/pci/ivtv/ivtv-driver.c @@ -275,9 +275,6 @@ MODULE_PARM_DESC(ivtv_first_minor, "Set device node number assigned to first car MODULE_AUTHOR("Kevin Thayer, Chris Kennedy, Hans Verkuil"); MODULE_DESCRIPTION("CX23415/CX23416 driver"); -MODULE_SUPPORTED_DEVICE - ("CX23415/CX23416 MPEG2 encoder (WinTV PVR-150/250/350/500,\n" - "\t\t\tYuan MPG series and similar)"); MODULE_LICENSE("GPL"); MODULE_VERSION(IVTV_VERSION); diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c index 336df65c8af1..524912f20d9f 100644 --- a/drivers/media/pci/sta2x11/sta2x11_vip.c +++ b/drivers/media/pci/sta2x11/sta2x11_vip.c @@ -1269,6 +1269,5 @@ late_initcall_sync(sta2x11_vip_init_module); MODULE_DESCRIPTION("STA2X11 Video Input Port driver"); MODULE_AUTHOR("Wind River"); MODULE_LICENSE("GPL v2"); -MODULE_SUPPORTED_DEVICE("sta2x11 video input"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, sta2x11_vip_pci_tbl); diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c index 0514be6153df..e392b3efe363 100644 --- a/drivers/media/platform/atmel/atmel-isi.c +++ b/drivers/media/platform/atmel/atmel-isi.c @@ -1363,4 +1363,3 @@ module_platform_driver(atmel_isi_driver); MODULE_AUTHOR("Josh Wu <josh.wu@atmel.com>"); MODULE_DESCRIPTION("The V4L2 driver for Atmel Linux"); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("video"); diff --git a/drivers/media/platform/atmel/atmel-sama5d2-isc.c b/drivers/media/platform/atmel/atmel-sama5d2-isc.c index 0b78fecfd2a8..61d9885765f4 100644 --- a/drivers/media/platform/atmel/atmel-sama5d2-isc.c +++ b/drivers/media/platform/atmel/atmel-sama5d2-isc.c @@ -330,4 +330,3 @@ module_platform_driver(atmel_isc_driver); MODULE_AUTHOR("Songjun Wu"); MODULE_DESCRIPTION("The V4L2 driver for Atmel-ISC"); MODULE_LICENSE("GPL v2"); -MODULE_SUPPORTED_DEVICE("video"); diff --git a/drivers/media/platform/marvell-ccic/cafe-driver.c b/drivers/media/platform/marvell-ccic/cafe-driver.c index 9c94a8b58b7c..baac86f3d153 100644 --- a/drivers/media/platform/marvell-ccic/cafe-driver.c +++ b/drivers/media/platform/marvell-ccic/cafe-driver.c @@ -44,10 +44,6 @@ MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>"); MODULE_DESCRIPTION("Marvell 88ALP01 CMOS Camera Controller driver"); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("Video"); - - - struct cafe_camera { int registered; /* Fully initialized? */ diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c index aa5f45749543..a60c302ef267 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c @@ -1288,7 +1288,6 @@ static void rkisp1_params_config_parameter(struct rkisp1_params *params) memset(hst.hist_weight, 0x01, sizeof(hst.hist_weight)); rkisp1_hst_config(params, &hst); rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP, - ~RKISP1_CIF_ISP_HIST_PROP_MODE_MASK | rkisp1_hst_params_default_config.mode); /* set the range */ diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c index bbcc2254fa2e..d9b4ad0abf0c 100644 --- a/drivers/media/platform/stm32/stm32-dcmi.c +++ b/drivers/media/platform/stm32/stm32-dcmi.c @@ -2149,4 +2149,3 @@ MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>"); MODULE_AUTHOR("Hugues Fruchet <hugues.fruchet@st.com>"); MODULE_DESCRIPTION("STMicroelectronics STM32 Digital Camera Memory Interface driver"); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("video"); diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c index 86d5e3f4b1ff..06f74d410973 100644 --- a/drivers/media/platform/vsp1/vsp1_drm.c +++ b/drivers/media/platform/vsp1/vsp1_drm.c @@ -245,7 +245,7 @@ static int vsp1_du_pipeline_setup_brx(struct vsp1_device *vsp1, brx = &vsp1->bru->entity; else if (pipe->brx && !drm_pipe->force_brx_release) brx = pipe->brx; - else if (!vsp1->bru->entity.pipe) + else if (vsp1_feature(vsp1, VSP1_HAS_BRU) && !vsp1->bru->entity.pipe) brx = &vsp1->bru->entity; else brx = &vsp1->brs->entity; @@ -462,9 +462,9 @@ static int vsp1_du_pipeline_setup_inputs(struct vsp1_device *vsp1, * make sure it is present in the pipeline's list of entities if it * wasn't already. */ - if (!use_uif) { + if (drm_pipe->uif && !use_uif) { drm_pipe->uif->pipe = NULL; - } else if (!drm_pipe->uif->pipe) { + } else if (drm_pipe->uif && !drm_pipe->uif->pipe) { drm_pipe->uif->pipe = pipe; list_add_tail(&drm_pipe->uif->list_pipe, &pipe->entities); } diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile index 5bb2932ab119..ff6a8fc4c38e 100644 --- a/drivers/media/rc/Makefile +++ b/drivers/media/rc/Makefile @@ -5,6 +5,7 @@ obj-y += keymaps/ obj-$(CONFIG_RC_CORE) += rc-core.o rc-core-y := rc-main.o rc-ir-raw.o rc-core-$(CONFIG_LIRC) += lirc_dev.o +rc-core-$(CONFIG_MEDIA_CEC_RC) += keymaps/rc-cec.o rc-core-$(CONFIG_BPF_LIRC_MODE2) += bpf-lirc.o obj-$(CONFIG_IR_NEC_DECODER) += ir-nec-decoder.o obj-$(CONFIG_IR_RC5_DECODER) += ir-rc5-decoder.o diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile index b252a1d2ebd6..cc6662e1903f 100644 --- a/drivers/media/rc/keymaps/Makefile +++ b/drivers/media/rc/keymaps/Makefile @@ -21,7 +21,6 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \ rc-behold.o \ rc-behold-columbus.o \ rc-budget-ci-old.o \ - rc-cec.o \ rc-cinergy-1400.o \ rc-cinergy.o \ rc-d680-dmb.o \ diff --git a/drivers/media/rc/keymaps/rc-cec.c b/drivers/media/rc/keymaps/rc-cec.c index 3e3bd11092b4..068e22aeac8c 100644 --- a/drivers/media/rc/keymaps/rc-cec.c +++ b/drivers/media/rc/keymaps/rc-cec.c @@ -1,6 +1,16 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* Keytable for the CEC remote control * + * This keymap is unusual in that it can't be built as a module, + * instead it is registered directly in rc-main.c if CONFIG_MEDIA_CEC_RC + * is set. This is because it can be called from drm_dp_cec_set_edid() via + * cec_register_adapter() in an asynchronous context, and it is not + * allowed to use request_module() to load rc-cec.ko in that case. + * + * Since this keymap is only used if CONFIG_MEDIA_CEC_RC is set, we + * just compile this keymap into the rc-core module and never as a + * separate module. + * * Copyright (c) 2015 by Kamil Debski */ @@ -152,7 +162,7 @@ static struct rc_map_table cec[] = { /* 0x77-0xff: Reserved */ }; -static struct rc_map_list cec_map = { +struct rc_map_list cec_map = { .map = { .scan = cec, .size = ARRAY_SIZE(cec), @@ -160,19 +170,3 @@ static struct rc_map_list cec_map = { .name = RC_MAP_CEC, } }; - -static int __init init_rc_map_cec(void) -{ - return rc_map_register(&cec_map); -} - -static void __exit exit_rc_map_cec(void) -{ - rc_map_unregister(&cec_map); -} - -module_init(init_rc_map_cec); -module_exit(exit_rc_map_cec); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Kamil Debski"); diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 1fd62c1dac76..8e88dc8ea6c5 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -2069,6 +2069,9 @@ static int __init rc_core_init(void) led_trigger_register_simple("rc-feedback", &led_feedback); rc_map_register(&empty_map); +#ifdef CONFIG_MEDIA_CEC_RC + rc_map_register(&cec_map); +#endif return 0; } @@ -2078,6 +2081,9 @@ static void __exit rc_core_exit(void) lirc_dev_exit(); class_unregister(&rc_class); led_trigger_unregister_simple(led_feedback); +#ifdef CONFIG_MEDIA_CEC_RC + rc_map_unregister(&cec_map); +#endif rc_map_unregister(&empty_map); } diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c index e488e7870f42..69d5c628a797 100644 --- a/drivers/media/usb/cpia2/cpia2_v4l.c +++ b/drivers/media/usb/cpia2/cpia2_v4l.c @@ -56,7 +56,6 @@ MODULE_PARM_DESC(flicker_mode, "Flicker frequency (0 (disabled), " __stringify(5 MODULE_AUTHOR("Steve Miller (STMicroelectronics) <steve.miller@st.com>"); MODULE_DESCRIPTION("V4L-driver for STMicroelectronics CPiA2 based cameras"); -MODULE_SUPPORTED_DEVICE("video"); MODULE_LICENSE("GPL"); MODULE_VERSION(CPIA_VERSION); diff --git a/drivers/media/usb/tm6000/tm6000-alsa.c b/drivers/media/usb/tm6000/tm6000-alsa.c index 3a2df36ef1db..a19a46770c2b 100644 --- a/drivers/media/usb/tm6000/tm6000-alsa.c +++ b/drivers/media/usb/tm6000/tm6000-alsa.c @@ -51,7 +51,6 @@ MODULE_PARM_DESC(index, "Index value for tm6000x capture interface(s)."); MODULE_DESCRIPTION("ALSA driver module for tm5600/tm6000/tm6010 based TV cards"); MODULE_AUTHOR("Mauro Carvalho Chehab"); MODULE_LICENSE("GPL v2"); -MODULE_SUPPORTED_DEVICE("{{Trident,tm5600},{{Trident,tm6000},{{Trident,tm6010}"); static unsigned int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable debug messages"); diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c index 293a460f4616..4990fa886d7a 100644 --- a/drivers/media/usb/tm6000/tm6000-dvb.c +++ b/drivers/media/usb/tm6000/tm6000-dvb.c @@ -23,8 +23,6 @@ MODULE_DESCRIPTION("DVB driver extension module for tm5600/6000/6010 based TV ca MODULE_AUTHOR("Mauro Carvalho Chehab"); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("{{Trident, tm5600},{{Trident, tm6000},{{Trident, tm6010}"); - static int debug; module_param(debug, int, 0644); diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c index b57e94fb1977..333bd305a4f9 100644 --- a/drivers/media/usb/usbtv/usbtv-audio.c +++ b/drivers/media/usb/usbtv/usbtv-audio.c @@ -371,7 +371,7 @@ void usbtv_audio_free(struct usbtv *usbtv) cancel_work_sync(&usbtv->snd_trigger); if (usbtv->snd && usbtv->udev) { - snd_card_free(usbtv->snd); + snd_card_free_when_closed(usbtv->snd); usbtv->snd = NULL; } } diff --git a/drivers/mfd/intel_quark_i2c_gpio.c b/drivers/mfd/intel_quark_i2c_gpio.c index fe8ca945f367..b67cb0a3ab05 100644 --- a/drivers/mfd/intel_quark_i2c_gpio.c +++ b/drivers/mfd/intel_quark_i2c_gpio.c @@ -72,7 +72,8 @@ static const struct dmi_system_id dmi_platform_info[] = { {} }; -static const struct resource intel_quark_i2c_res[] = { +/* This is used as a place holder and will be modified at run-time */ +static struct resource intel_quark_i2c_res[] = { [INTEL_QUARK_IORES_MEM] = { .flags = IORESOURCE_MEM, }, @@ -85,7 +86,8 @@ static struct mfd_cell_acpi_match intel_quark_acpi_match_i2c = { .adr = MFD_ACPI_MATCH_I2C, }; -static const struct resource intel_quark_gpio_res[] = { +/* This is used as a place holder and will be modified at run-time */ +static struct resource intel_quark_gpio_res[] = { [INTEL_QUARK_IORES_MEM] = { .flags = IORESOURCE_MEM, }, diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index f12e909034ac..beda610e6b30 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -950,6 +950,11 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, if (!fl->cctx->rpdev) return -EPIPE; + if (handle == FASTRPC_INIT_HANDLE && !kernel) { + dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle); + return -EPERM; + } + ctx = fastrpc_context_alloc(fl, kernel, sc, args); if (IS_ERR(ctx)) return PTR_ERR(ctx); diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c index df847a6d19f4..9f19bee7b592 100644 --- a/drivers/misc/habanalabs/common/debugfs.c +++ b/drivers/misc/habanalabs/common/debugfs.c @@ -992,7 +992,6 @@ void hl_debugfs_add_device(struct hl_device *hdev) struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs; int count = ARRAY_SIZE(hl_debugfs_list); struct hl_debugfs_entry *entry; - struct dentry *ent; int i; dev_entry->hdev = hdev; @@ -1105,13 +1104,11 @@ void hl_debugfs_add_device(struct hl_device *hdev) &hl_security_violations_fops); for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) { - - ent = debugfs_create_file(hl_debugfs_list[i].name, + debugfs_create_file(hl_debugfs_list[i].name, 0444, dev_entry->root, entry, &hl_debugfs_fops); - entry->dent = ent; entry->info_ent = &hl_debugfs_list[i]; entry->dev_entry = dev_entry; } diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c index 15fcb5c31c4b..334009e83823 100644 --- a/drivers/misc/habanalabs/common/device.c +++ b/drivers/misc/habanalabs/common/device.c @@ -93,12 +93,19 @@ void hl_hpriv_put(struct hl_fpriv *hpriv) static int hl_device_release(struct inode *inode, struct file *filp) { struct hl_fpriv *hpriv = filp->private_data; + struct hl_device *hdev = hpriv->hdev; + + filp->private_data = NULL; + + if (!hdev) { + pr_crit("Closing FD after device was removed. Memory leak will occur and it is advised to reboot.\n"); + put_pid(hpriv->taskpid); + return 0; + } hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr); hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr); - filp->private_data = NULL; - hl_hpriv_put(hpriv); return 0; @@ -107,15 +114,20 @@ static int hl_device_release(struct inode *inode, struct file *filp) static int hl_device_release_ctrl(struct inode *inode, struct file *filp) { struct hl_fpriv *hpriv = filp->private_data; - struct hl_device *hdev; + struct hl_device *hdev = hpriv->hdev; filp->private_data = NULL; - hdev = hpriv->hdev; + if (!hdev) { + pr_err("Closing FD after device was removed\n"); + goto out; + } mutex_lock(&hdev->fpriv_list_lock); list_del(&hpriv->dev_node); mutex_unlock(&hdev->fpriv_list_lock); +out: + put_pid(hpriv->taskpid); kfree(hpriv); @@ -134,8 +146,14 @@ static int hl_device_release_ctrl(struct inode *inode, struct file *filp) static int hl_mmap(struct file *filp, struct vm_area_struct *vma) { struct hl_fpriv *hpriv = filp->private_data; + struct hl_device *hdev = hpriv->hdev; unsigned long vm_pgoff; + if (!hdev) { + pr_err_ratelimited("Trying to mmap after device was removed! Please close FD\n"); + return -ENODEV; + } + vm_pgoff = vma->vm_pgoff; vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff); @@ -883,6 +901,16 @@ wait_for_processes: return -EBUSY; } +static void device_disable_open_processes(struct hl_device *hdev) +{ + struct hl_fpriv *hpriv; + + mutex_lock(&hdev->fpriv_list_lock); + list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) + hpriv->hdev = NULL; + mutex_unlock(&hdev->fpriv_list_lock); +} + /* * hl_device_reset - reset the device * @@ -1556,8 +1584,10 @@ void hl_device_fini(struct hl_device *hdev) HL_PENDING_RESET_LONG_SEC); rc = device_kill_open_processes(hdev, HL_PENDING_RESET_LONG_SEC); - if (rc) + if (rc) { dev_crit(hdev->dev, "Failed to kill all open processes\n"); + device_disable_open_processes(hdev); + } hl_cb_pool_fini(hdev); diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index d933878b24d1..4b321e4f8059 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -1465,12 +1465,10 @@ struct hl_info_list { /** * struct hl_debugfs_entry - debugfs dentry wrapper. - * @dent: base debugfs entry structure. * @info_ent: dentry realted ops. * @dev_entry: ASIC specific debugfs manager. */ struct hl_debugfs_entry { - struct dentry *dent; const struct hl_info_list *info_ent; struct hl_dbg_device_entry *dev_entry; }; diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index 03af61cecd37..083a30969c5f 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -5,6 +5,8 @@ * All Rights Reserved. */ +#define pr_fmt(fmt) "habanalabs: " fmt + #include <uapi/misc/habanalabs.h> #include "habanalabs.h" @@ -682,6 +684,11 @@ long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) const struct hl_ioctl_desc *ioctl = NULL; unsigned int nr = _IOC_NR(cmd); + if (!hdev) { + pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n"); + return -ENODEV; + } + if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) { ioctl = &hl_ioctls[nr]; } else { @@ -700,6 +707,11 @@ long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg) const struct hl_ioctl_desc *ioctl = NULL; unsigned int nr = _IOC_NR(cmd); + if (!hdev) { + pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n"); + return -ENODEV; + } + if (nr == _IOC_NR(HL_IOCTL_INFO)) { ioctl = &hl_ioctls_control[nr]; } else { diff --git a/drivers/misc/habanalabs/common/irq.c b/drivers/misc/habanalabs/common/irq.c index de53fb5f978a..44a0522b59b9 100644 --- a/drivers/misc/habanalabs/common/irq.c +++ b/drivers/misc/habanalabs/common/irq.c @@ -47,7 +47,7 @@ inline u32 hl_cq_inc_ptr(u32 ptr) * Increment ptr by 1. If it reaches the number of event queue * entries, set it to 0 */ -inline u32 hl_eq_inc_ptr(u32 ptr) +static inline u32 hl_eq_inc_ptr(u32 ptr) { ptr++; if (unlikely(ptr == HL_EQ_LENGTH)) diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c index 71703a32350f..93c9e5f587e1 100644 --- a/drivers/misc/habanalabs/common/mmu/mmu.c +++ b/drivers/misc/habanalabs/common/mmu/mmu.c @@ -499,18 +499,32 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr, else /* HL_VA_RANGE_TYPE_DRAM */ p = &prop->dmmu; - /* - * find the correct hop shift field in hl_mmu_properties structure - * in order to determine the right maks for the page offset. - */ - hop0_shift_off = offsetof(struct hl_mmu_properties, hop0_shift); - p = (char *)p + hop0_shift_off; - p = (char *)p + ((hops->used_hops - 1) * sizeof(u64)); - hop_shift = *(u64 *)p; - offset_mask = (1ull << hop_shift) - 1; - addr_mask = ~(offset_mask); - *phys_addr = (tmp_phys_addr & addr_mask) | - (virt_addr & offset_mask); + if ((hops->range_type == HL_VA_RANGE_TYPE_DRAM) && + !is_power_of_2(prop->dram_page_size)) { + u32 bit; + u64 page_offset_mask; + u64 phys_addr_mask; + + bit = __ffs64((u64)prop->dram_page_size); + page_offset_mask = ((1ull << bit) - 1); + phys_addr_mask = ~page_offset_mask; + *phys_addr = (tmp_phys_addr & phys_addr_mask) | + (virt_addr & page_offset_mask); + } else { + /* + * find the correct hop shift field in hl_mmu_properties + * structure in order to determine the right masks + * for the page offset. + */ + hop0_shift_off = offsetof(struct hl_mmu_properties, hop0_shift); + p = (char *)p + hop0_shift_off; + p = (char *)p + ((hops->used_hops - 1) * sizeof(u64)); + hop_shift = *(u64 *)p; + offset_mask = (1ull << hop_shift) - 1; + addr_mask = ~(offset_mask); + *phys_addr = (tmp_phys_addr & addr_mask) | + (virt_addr & offset_mask); + } } int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr) diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c index 9f350e05ef68..f1655f5ca016 100644 --- a/drivers/misc/pvpanic.c +++ b/drivers/misc/pvpanic.c @@ -140,6 +140,7 @@ static const struct of_device_id pvpanic_mmio_match[] = { { .compatible = "qemu,pvpanic-mmio", }, {} }; +MODULE_DEVICE_TABLE(of, pvpanic_mmio_match); static const struct acpi_device_id pvpanic_device_ids[] = { { "QEMU0001", 0 }, diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index c2e70b757dd1..4383c262b3f5 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -399,11 +399,6 @@ void mmc_remove_card(struct mmc_card *card) mmc_remove_card_debugfs(card); #endif - if (host->cqe_enabled) { - host->cqe_ops->cqe_disable(host); - host->cqe_enabled = false; - } - if (mmc_card_present(card)) { if (mmc_host_is_spi(card->host)) { pr_info("%s: SPI card removed\n", @@ -416,6 +411,10 @@ void mmc_remove_card(struct mmc_card *card) of_node_put(card->dev.of_node); } + if (host->cqe_enabled) { + host->cqe_ops->cqe_disable(host); + host->cqe_enabled = false; + } + put_device(&card->dev); } - diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 0d80b72ddde8..8741271d3971 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -423,10 +423,6 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) /* EXT_CSD value is in units of 10ms, but we store in ms */ card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME]; - /* Some eMMC set the value too low so set a minimum */ - if (card->ext_csd.part_time && - card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME) - card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME; /* Sleep / awake timeout in 100ns units */ if (sa_shift > 0 && sa_shift <= 0x17) @@ -616,6 +612,17 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) card->ext_csd.data_sector_size = 512; } + /* + * GENERIC_CMD6_TIME is to be used "unless a specific timeout is defined + * when accessing a specific field", so use it here if there is no + * PARTITION_SWITCH_TIME. + */ + if (!card->ext_csd.part_time) + card->ext_csd.part_time = card->ext_csd.generic_cmd6_time; + /* Some eMMC set the value too low so set a minimum */ + if (card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME) + card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME; + /* eMMC v5 or later */ if (card->ext_csd.rev >= 7) { memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION], diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 17dbc81c221e..984d35055156 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1242,7 +1242,11 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) if (!cmd->busy_timeout) cmd->busy_timeout = 10 * MSEC_PER_SEC; - clks = (unsigned long long)cmd->busy_timeout * host->cclk; + if (cmd->busy_timeout > host->mmc->max_busy_timeout) + clks = (unsigned long long)host->mmc->max_busy_timeout * host->cclk; + else + clks = (unsigned long long)cmd->busy_timeout * host->cclk; + do_div(clks, MSEC_PER_SEC); writel_relaxed(clks, host->base + MMCIDATATIMER); } @@ -2151,6 +2155,10 @@ static int mmci_probe(struct amba_device *dev, mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; } + /* Variants with mandatory busy timeout in HW needs R1B responses. */ + if (variant->busy_timeout) + mmc->caps |= MMC_CAP_NEED_RSP_BUSY; + /* Prepare a CMD12 - needed to clear the DPSM on some variants. */ host->stop_abort.opcode = MMC_STOP_TRANSMISSION; host->stop_abort.arg = 0; diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c index eb7258293256..f9cfb084c029 100644 --- a/drivers/mtd/maps/sun_uflash.c +++ b/drivers/mtd/maps/sun_uflash.c @@ -32,7 +32,6 @@ MODULE_AUTHOR("Eric Brower <ebrower@usa.net>"); MODULE_DESCRIPTION("User-programmable flash device on Sun Microsystems boardsets"); -MODULE_SUPPORTED_DEVICE(DRIVER_NAME); MODULE_LICENSE("GPL"); MODULE_VERSION("2.1"); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index bcd31f458d1a..5895905b6aa1 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -579,6 +579,7 @@ config NETDEVSIM depends on DEBUG_FS depends on INET depends on IPV6 || IPV6=n + depends on PSAMPLE || PSAMPLE=n select NET_DEVLINK help This driver is a developer testing tool and software model that can diff --git a/drivers/net/Makefile b/drivers/net/Makefile index f4990ff32fa4..040e20b81317 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -45,7 +45,7 @@ obj-$(CONFIG_ARCNET) += arcnet/ obj-$(CONFIG_DEV_APPLETALK) += appletalk/ obj-$(CONFIG_CAIF) += caif/ obj-$(CONFIG_CAN) += can/ -obj-y += dsa/ +obj-$(CONFIG_NET_DSA) += dsa/ obj-$(CONFIG_ETHERNET) += ethernet/ obj-$(CONFIG_FDDI) += fddi/ obj-$(CONFIG_HIPPI) += hippi/ diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 8bdc44b7e09a..3c8f665c1558 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -127,6 +127,8 @@ static int com20020pci_probe(struct pci_dev *pdev, int i, ioaddr, ret; struct resource *r; + ret = 0; + if (pci_enable_device(pdev)) return -EIO; @@ -139,6 +141,8 @@ static int com20020pci_probe(struct pci_dev *pdev, priv->ci = ci; mm = &ci->misc_map; + pci_set_drvdata(pdev, priv); + INIT_LIST_HEAD(&priv->list_dev); if (mm->size) { @@ -161,7 +165,7 @@ static int com20020pci_probe(struct pci_dev *pdev, dev = alloc_arcdev(device); if (!dev) { ret = -ENOMEM; - goto out_port; + break; } dev->dev_port = i; @@ -178,7 +182,7 @@ static int com20020pci_probe(struct pci_dev *pdev, pr_err("IO region %xh-%xh already allocated\n", ioaddr, ioaddr + cm->size - 1); ret = -EBUSY; - goto out_port; + goto err_free_arcdev; } /* Dummy access after Reset @@ -216,18 +220,18 @@ static int com20020pci_probe(struct pci_dev *pdev, if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) { pr_err("IO address %Xh is empty!\n", ioaddr); ret = -EIO; - goto out_port; + goto err_free_arcdev; } if (com20020_check(dev)) { ret = -EIO; - goto out_port; + goto err_free_arcdev; } card = devm_kzalloc(&pdev->dev, sizeof(struct com20020_dev), GFP_KERNEL); if (!card) { ret = -ENOMEM; - goto out_port; + goto err_free_arcdev; } card->index = i; @@ -253,29 +257,29 @@ static int com20020pci_probe(struct pci_dev *pdev, ret = devm_led_classdev_register(&pdev->dev, &card->tx_led); if (ret) - goto out_port; + goto err_free_arcdev; ret = devm_led_classdev_register(&pdev->dev, &card->recon_led); if (ret) - goto out_port; + goto err_free_arcdev; dev_set_drvdata(&dev->dev, card); ret = com20020_found(dev, IRQF_SHARED); if (ret) - goto out_port; + goto err_free_arcdev; devm_arcnet_led_init(dev, dev->dev_id, i); list_add(&card->list, &priv->list_dev); - } + continue; - pci_set_drvdata(pdev, priv); - - return 0; - -out_port: - com20020pci_remove(pdev); +err_free_arcdev: + free_arcdev(dev); + break; + } + if (ret) + com20020pci_remove(pdev); return ret; } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 456315bef3a8..74cbbb22470b 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3978,15 +3978,11 @@ static int bond_neigh_init(struct neighbour *n) rcu_read_lock(); slave = bond_first_slave_rcu(bond); - if (!slave) { - ret = -EINVAL; + if (!slave) goto out; - } slave_ops = slave->dev->netdev_ops; - if (!slave_ops->ndo_neigh_setup) { - ret = -EINVAL; + if (!slave_ops->ndo_neigh_setup) goto out; - } /* TODO: find another way [1] to implement this. * Passing a zeroed structure is fragile, diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 77d7c38bd435..c9d3604ae129 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -640,6 +640,15 @@ static void bond_opt_error_interpret(struct bonding *bond, netdev_err(bond->dev, "option %s: unable to set because the bond device is up\n", opt->name); break; + case -ENODEV: + if (val && val->string) { + p = strchr(val->string, '\n'); + if (p) + *p = '\0'; + netdev_err(bond->dev, "option %s: interface %s does not exist!\n", + opt->name, val->string); + } + break; default: break; } diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index ef474bae47a1..6958830cb983 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -212,18 +212,6 @@ static const struct can_bittiming_const c_can_bittiming_const = { .brp_inc = 1, }; -static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv) -{ - if (priv->device) - pm_runtime_enable(priv->device); -} - -static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv) -{ - if (priv->device) - pm_runtime_disable(priv->device); -} - static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv) { if (priv->device) @@ -1335,7 +1323,6 @@ static const struct net_device_ops c_can_netdev_ops = { int register_c_can_dev(struct net_device *dev) { - struct c_can_priv *priv = netdev_priv(dev); int err; /* Deactivate pins to prevent DRA7 DCAN IP from being @@ -1345,28 +1332,19 @@ int register_c_can_dev(struct net_device *dev) */ pinctrl_pm_select_sleep_state(dev->dev.parent); - c_can_pm_runtime_enable(priv); - dev->flags |= IFF_ECHO; /* we support local echo */ dev->netdev_ops = &c_can_netdev_ops; err = register_candev(dev); - if (err) - c_can_pm_runtime_disable(priv); - else + if (!err) devm_can_led_init(dev); - return err; } EXPORT_SYMBOL_GPL(register_c_can_dev); void unregister_c_can_dev(struct net_device *dev) { - struct c_can_priv *priv = netdev_priv(dev); - unregister_candev(dev); - - c_can_pm_runtime_disable(priv); } EXPORT_SYMBOL_GPL(unregister_c_can_dev); diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c index 406b4847e5dc..7efb60b50876 100644 --- a/drivers/net/can/c_can/c_can_pci.c +++ b/drivers/net/can/c_can/c_can_pci.c @@ -239,12 +239,13 @@ static void c_can_pci_remove(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct c_can_priv *priv = netdev_priv(dev); + void __iomem *addr = priv->base; unregister_c_can_dev(dev); free_c_can_dev(dev); - pci_iounmap(pdev, priv->base); + pci_iounmap(pdev, addr); pci_disable_msi(pdev); pci_clear_master(pdev); pci_release_regions(pdev); diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index 05f425ceb53a..47b251b1607c 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -29,6 +29,7 @@ #include <linux/list.h> #include <linux/io.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/clk.h> #include <linux/of.h> #include <linux/of_device.h> @@ -386,6 +387,7 @@ static int c_can_plat_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); + pm_runtime_enable(priv->device); ret = register_c_can_dev(dev); if (ret) { dev_err(&pdev->dev, "registering %s failed (err=%d)\n", @@ -398,6 +400,7 @@ static int c_can_plat_probe(struct platform_device *pdev) return 0; exit_free_device: + pm_runtime_disable(priv->device); free_c_can_dev(dev); exit: dev_err(&pdev->dev, "probe failed\n"); @@ -408,9 +411,10 @@ exit: static int c_can_plat_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); + struct c_can_priv *priv = netdev_priv(dev); unregister_c_can_dev(dev); - + pm_runtime_disable(priv->device); free_c_can_dev(dev); return 0; diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c index 867f6be31230..f5d79e6e5483 100644 --- a/drivers/net/can/dev/netlink.c +++ b/drivers/net/can/dev/netlink.c @@ -355,6 +355,7 @@ static void can_dellink(struct net_device *dev, struct list_head *head) struct rtnl_link_ops can_link_ops __read_mostly = { .kind = "can", + .netns_refund = true, .maxtype = IFLA_CAN_MAX, .policy = can_policy, .setup = can_setup, diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 134c05757a3b..57f3635ad8d7 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -697,9 +697,15 @@ static int flexcan_chip_disable(struct flexcan_priv *priv) static int flexcan_chip_freeze(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; - unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate; + unsigned int timeout; + u32 bitrate = priv->can.bittiming.bitrate; u32 reg; + if (bitrate) + timeout = 1000 * 1000 * 10 / bitrate; + else + timeout = FLEXCAN_TIMEOUT_US / 10; + reg = priv->read(®s->mcr); reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT; priv->write(reg, ®s->mcr); diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index 37e05010ca91..74d9899fc904 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -57,6 +57,7 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); #define KVASER_PCIEFD_KCAN_STAT_REG 0x418 #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420 +#define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424 #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428 #define KVASER_PCIEFD_KCAN_PWM_REG 0x430 /* Loopback control register */ @@ -949,6 +950,9 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, 0); + /* Disable Bus load reporting */ + iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG); + tx_npackets = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG); if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) & diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 3752520a7d4b..0c8d36bc668c 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -501,9 +501,6 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota) } while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) { - if (rxfs & RXFS_RFL) - netdev_warn(dev, "Rx FIFO 0 Message Lost\n"); - m_can_read_fifo(dev, rxfs); quota--; @@ -876,7 +873,7 @@ static int m_can_rx_peripheral(struct net_device *dev) { struct m_can_classdev *cdev = netdev_priv(dev); - m_can_rx_handler(dev, 1); + m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT); m_can_enable_all_interrupts(cdev); diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c index 0df1cdfa6835..1df3c4b54f03 100644 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c @@ -21,7 +21,6 @@ MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards"); -MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe/M.2 FD CAN cards"); MODULE_LICENSE("GPL v2"); #define PCIEFD_DRV_NAME "peak_pciefd" diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c index 6f88c9932920..4ab91759a5c6 100644 --- a/drivers/net/can/sja1000/ems_pci.c +++ b/drivers/net/can/sja1000/ems_pci.c @@ -21,7 +21,6 @@ MODULE_AUTHOR("Sebastian Haas <haas@ems-wuenche.com>"); MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-PCI/PCIe/104P CAN cards"); -MODULE_SUPPORTED_DEVICE("EMS CPC-PCI/PCIe/104P CAN card"); MODULE_LICENSE("GPL v2"); #define EMS_PCI_V1_MAX_CHAN 2 diff --git a/drivers/net/can/sja1000/ems_pcmcia.c b/drivers/net/can/sja1000/ems_pcmcia.c index 770304eaef95..e21b169c14c0 100644 --- a/drivers/net/can/sja1000/ems_pcmcia.c +++ b/drivers/net/can/sja1000/ems_pcmcia.c @@ -21,7 +21,6 @@ MODULE_AUTHOR("Markus Plessing <plessing@ems-wuensche.com>"); MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-CARD cards"); -MODULE_SUPPORTED_DEVICE("EMS CPC-CARD CAN card"); MODULE_LICENSE("GPL v2"); #define EMS_PCMCIA_MAX_CHAN 2 diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c index 0ea6b711c07b..95fe9ee1ce32 100644 --- a/drivers/net/can/sja1000/kvaser_pci.c +++ b/drivers/net/can/sja1000/kvaser_pci.c @@ -33,7 +33,6 @@ MODULE_AUTHOR("Per Dalen <per.dalen@cnw.se>"); MODULE_DESCRIPTION("Socket-CAN driver for KVASER PCAN PCI cards"); -MODULE_SUPPORTED_DEVICE("KVASER PCAN PCI CAN card"); MODULE_LICENSE("GPL v2"); #define MAX_NO_OF_CHANNELS 4 /* max no of channels on a single card */ diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index 4713921bd511..84eac8cb8686 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -24,8 +24,6 @@ MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI family cards"); -MODULE_SUPPORTED_DEVICE("PEAK PCAN PCI/PCIe/PCIeC miniPCI CAN cards"); -MODULE_SUPPORTED_DEVICE("PEAK PCAN miniPCIe/cPCI PC/104+ PCI/104e CAN Cards"); MODULE_LICENSE("GPL v2"); #define DRV_NAME "peak_pci" diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c index cf951a783078..131a084c3535 100644 --- a/drivers/net/can/sja1000/peak_pcmcia.c +++ b/drivers/net/can/sja1000/peak_pcmcia.c @@ -22,7 +22,6 @@ MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); MODULE_DESCRIPTION("CAN driver for PEAK-System PCAN-PC Cards"); MODULE_LICENSE("GPL v2"); -MODULE_SUPPORTED_DEVICE("PEAK PCAN-PC Card"); /* PEAK-System PCMCIA driver name */ #define PCC_NAME "peak_pcmcia" diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index 85679588ef73..5de1ebb0c6f0 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c @@ -25,18 +25,6 @@ MODULE_AUTHOR("Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>"); MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with " "the SJA1000 chips"); -MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, " - "Adlink PCI-7841/cPCI-7841 SE, " - "Marathon CAN-bus-PCI, " - "Marathon CAN-bus-PCIe, " - "TEWS TECHNOLOGIES TPMC810, " - "esd CAN-PCI/CPCI/PCI104/200, " - "esd CAN-PCI/PMC/266, " - "esd CAN-PCIe/2000, " - "Connect Tech Inc. CANpro/104-Plus Opto (CRG001), " - "IXXAT PC-I 04/PCI, " - "ELCUS CAN-200-PCI, " - "ASEM DUAL CAN-RAW") MODULE_LICENSE("GPL v2"); #define PLX_PCI_MAX_CHAN 2 diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig index c1e5d5b570b6..538f4d9adb91 100644 --- a/drivers/net/can/usb/Kconfig +++ b/drivers/net/can/usb/Kconfig @@ -73,6 +73,7 @@ config CAN_KVASER_USB - Kvaser Memorator Pro 5xHS - Kvaser USBcan Light 4xHS - Kvaser USBcan Pro 2xHS v2 + - Kvaser USBcan Pro 4xHS - Kvaser USBcan Pro 5xHS - Kvaser U100 - Kvaser U100P diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index 2b7efd296758..4e97da8434ab 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -86,8 +86,9 @@ #define USB_U100_PRODUCT_ID 273 #define USB_U100P_PRODUCT_ID 274 #define USB_U100S_PRODUCT_ID 275 +#define USB_USBCAN_PRO_4HS_PRODUCT_ID 276 #define USB_HYDRA_PRODUCT_ID_END \ - USB_U100S_PRODUCT_ID + USB_USBCAN_PRO_4HS_PRODUCT_ID static inline bool kvaser_is_leaf(const struct usb_device_id *id) { @@ -193,6 +194,7 @@ static const struct usb_device_id kvaser_usb_table[] = { { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID) }, { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID) }, { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID) }, { } }; MODULE_DEVICE_TABLE(usb, kvaser_usb_table); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index e6c1e5d33924..e393e8457d77 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -18,8 +18,6 @@ #include "pcan_usb_core.h" -MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB adapter"); - /* PCAN-USB Endpoints */ #define PCAN_USB_EP_CMDOUT 1 #define PCAN_USB_EP_CMDIN (PCAN_USB_EP_CMDOUT | USB_DIR_IN) diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index f347ecc79aef..bae078579c0d 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c @@ -16,9 +16,6 @@ #include "pcan_usb_core.h" #include "pcan_usb_pro.h" -MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB FD adapter"); -MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro FD adapter"); - #define PCAN_USBPROFD_CHANNEL_COUNT 2 #define PCAN_USBFD_CHANNEL_COUNT 1 diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 275087c39602..18fa180ecc81 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -17,8 +17,6 @@ #include "pcan_usb_core.h" #include "pcan_usb_pro.h" -MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro adapter"); - #define PCAN_USBPRO_CHANNEL_COUNT 2 /* PCAN-USB Pro adapter internal clock (MHz) */ diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 3af373e90806..a5f1aa911fe2 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -1,12 +1,12 @@ # SPDX-License-Identifier: GPL-2.0-only menu "Distributed Switch Architecture drivers" - depends on HAVE_NET_DSA + depends on NET_DSA source "drivers/net/dsa/b53/Kconfig" config NET_DSA_BCM_SF2 tristate "Broadcom Starfighter 2 Ethernet switch support" - depends on HAS_IOMEM && NET_DSA + depends on HAS_IOMEM select NET_DSA_TAG_BRCM select FIXED_PHY select BCM7XXX_PHY @@ -18,7 +18,6 @@ config NET_DSA_BCM_SF2 config NET_DSA_LOOP tristate "DSA mock-up Ethernet switch chip support" - depends on NET_DSA select FIXED_PHY help This enables support for a fake mock-up switch chip which @@ -28,7 +27,7 @@ source "drivers/net/dsa/hirschmann/Kconfig" config NET_DSA_LANTIQ_GSWIP tristate "Lantiq / Intel GSWIP" - depends on HAS_IOMEM && NET_DSA + depends on HAS_IOMEM select NET_DSA_TAG_GSWIP help This enables support for the Lantiq / Intel GSWIP 2.1 found in @@ -36,7 +35,6 @@ config NET_DSA_LANTIQ_GSWIP config NET_DSA_MT7530 tristate "MediaTek MT753x and MT7621 Ethernet switch support" - depends on NET_DSA select NET_DSA_TAG_MTK help This enables support for the MediaTek MT7530, MT7531, and MT7621 @@ -44,7 +42,6 @@ config NET_DSA_MT7530 config NET_DSA_MV88E6060 tristate "Marvell 88E6060 ethernet switch chip support" - depends on NET_DSA select NET_DSA_TAG_TRAILER help This enables support for the Marvell 88E6060 ethernet switch @@ -64,7 +61,6 @@ source "drivers/net/dsa/xrs700x/Kconfig" config NET_DSA_QCA8K tristate "Qualcomm Atheros QCA8K Ethernet switch family support" - depends on NET_DSA select NET_DSA_TAG_QCA select REGMAP help @@ -73,7 +69,6 @@ config NET_DSA_QCA8K config NET_DSA_REALTEK_SMI tristate "Realtek SMI Ethernet switch family support" - depends on NET_DSA select NET_DSA_TAG_RTL4_A select FIXED_PHY select IRQ_DOMAIN @@ -93,7 +88,7 @@ config NET_DSA_SMSC_LAN9303 config NET_DSA_SMSC_LAN9303_I2C tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in I2C managed mode" - depends on NET_DSA && I2C + depends on I2C select NET_DSA_SMSC_LAN9303 select REGMAP_I2C help @@ -102,7 +97,6 @@ config NET_DSA_SMSC_LAN9303_I2C config NET_DSA_SMSC_LAN9303_MDIO tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in MDIO managed mode" - depends on NET_DSA select NET_DSA_SMSC_LAN9303 help Enable access functions if the SMSC/Microchip LAN9303 is configured @@ -110,7 +104,6 @@ config NET_DSA_SMSC_LAN9303_MDIO config NET_DSA_VITESSE_VSC73XX tristate - depends on NET_DSA select FIXED_PHY select VITESSE_PHY select GPIOLIB @@ -120,7 +113,6 @@ config NET_DSA_VITESSE_VSC73XX config NET_DSA_VITESSE_VSC73XX_SPI tristate "Vitesse VSC7385/7388/7395/7398 SPI mode support" - depends on NET_DSA depends on SPI select NET_DSA_VITESSE_VSC73XX help @@ -129,7 +121,6 @@ config NET_DSA_VITESSE_VSC73XX_SPI config NET_DSA_VITESSE_VSC73XX_PLATFORM tristate "Vitesse VSC7385/7388/7395/7398 Platform mode support" - depends on NET_DSA depends on HAS_IOMEM select NET_DSA_VITESSE_VSC73XX help diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig index f9891a81c808..90b525160b71 100644 --- a/drivers/net/dsa/b53/Kconfig +++ b/drivers/net/dsa/b53/Kconfig @@ -3,6 +3,7 @@ menuconfig B53 tristate "Broadcom BCM53xx managed switch support" depends on NET_DSA select NET_DSA_TAG_BRCM + select NET_DSA_TAG_BRCM_LEGACY select NET_DSA_TAG_BRCM_PREPEND help This driver adds support for Broadcom managed switch chips. It supports diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index a162499bcafc..3ca6b394dd5f 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -349,7 +349,7 @@ static void b53_set_forwarding(struct b53_device *dev, int enable) b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); } -static void b53_enable_vlan(struct b53_device *dev, bool enable, +static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, bool enable_filtering) { u8 mgmt, vc0, vc1, vc4 = 0, vc5; @@ -431,6 +431,9 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable, b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); dev->vlan_enabled = enable; + + dev_dbg(dev->dev, "Port %d VLAN enabled: %d, filtering: %d\n", + port, enable, enable_filtering); } static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) @@ -743,7 +746,7 @@ int b53_configure_vlan(struct dsa_switch *ds) b53_do_vlan_op(dev, VTA_CMD_CLEAR); } - b53_enable_vlan(dev, dev->vlan_enabled, ds->vlan_filtering); + b53_enable_vlan(dev, -1, dev->vlan_enabled, ds->vlan_filtering); b53_for_each_port(dev, i) b53_write16(dev, B53_VLAN_PAGE, @@ -1105,13 +1108,6 @@ static int b53_setup(struct dsa_switch *ds) b53_disable_port(ds, port); } - /* Let DSA handle the case were multiple bridges span the same switch - * device and different VLAN awareness settings are requested, which - * would be breaking filtering semantics for any of the other bridge - * devices. (not hardware supported) - */ - ds->vlan_filtering_is_global = true; - return b53_setup_devlink_resources(ds); } @@ -1429,7 +1425,7 @@ int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, { struct b53_device *dev = ds->priv; - b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering); + b53_enable_vlan(dev, port, dev->vlan_enabled, vlan_filtering); return 0; } @@ -1454,7 +1450,7 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port, if (vlan->vid >= dev->num_vlans) return -ERANGE; - b53_enable_vlan(dev, true, ds->vlan_filtering); + b53_enable_vlan(dev, port, true, ds->vlan_filtering); return 0; } @@ -2052,15 +2048,17 @@ enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port, { struct b53_device *dev = ds->priv; - /* Older models (5325, 5365) support a different tag format that we do - * not support in net/dsa/tag_brcm.c yet. - */ - if (is5325(dev) || is5365(dev) || - !b53_can_enable_brcm_tags(ds, port, mprot)) { + if (!b53_can_enable_brcm_tags(ds, port, mprot)) { dev->tag_protocol = DSA_TAG_PROTO_NONE; goto out; } + /* Older models require a different 6 byte tag */ + if (is5325(dev) || is5365(dev) || is63xx(dev)) { + dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY; + goto out; + } + /* Broadcom BCM58xx chips have a flow accelerator on Port 8 * which requires us to use the prepended Broadcom tag type */ @@ -2664,6 +2662,13 @@ struct b53_device *b53_switch_alloc(struct device *base, ds->ops = &b53_switch_ops; ds->untag_bridge_pvid = true; dev->vlan_enabled = true; + /* Let DSA handle the case were multiple bridges span the same switch + * device and different VLAN awareness settings are requested, which + * would be breaking filtering semantics for any of the other bridge + * devices. (not hardware supported) + */ + ds->vlan_filtering_is_global = true; + mutex_init(&dev->reg_mutex); mutex_init(&dev->stats_mutex); diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c index c628d0980c0b..82680e083cc2 100644 --- a/drivers/net/dsa/b53/b53_mmap.c +++ b/drivers/net/dsa/b53/b53_mmap.c @@ -16,6 +16,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#include <linux/bits.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/io.h> @@ -228,11 +229,65 @@ static const struct b53_io_ops b53_mmap_ops = { .write64 = b53_mmap_write64, }; +static int b53_mmap_probe_of(struct platform_device *pdev, + struct b53_platform_data **ppdata) +{ + struct device_node *np = pdev->dev.of_node; + struct device_node *of_ports, *of_port; + struct device *dev = &pdev->dev; + struct b53_platform_data *pdata; + void __iomem *mem; + + mem = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mem)) + return PTR_ERR(mem); + + pdata = devm_kzalloc(dev, sizeof(struct b53_platform_data), + GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + pdata->regs = mem; + pdata->chip_id = BCM63XX_DEVICE_ID; + pdata->big_endian = of_property_read_bool(np, "big-endian"); + + of_ports = of_get_child_by_name(np, "ports"); + if (!of_ports) { + dev_err(dev, "no ports child node found\n"); + return -EINVAL; + } + + for_each_available_child_of_node(of_ports, of_port) { + u32 reg; + + if (of_property_read_u32(of_port, "reg", ®)) + continue; + + if (reg < B53_CPU_PORT) + pdata->enabled_ports |= BIT(reg); + } + + of_node_put(of_ports); + *ppdata = pdata; + + return 0; +} + static int b53_mmap_probe(struct platform_device *pdev) { + struct device_node *np = pdev->dev.of_node; struct b53_platform_data *pdata = pdev->dev.platform_data; struct b53_mmap_priv *priv; struct b53_device *dev; + int ret; + + if (!pdata && np) { + ret = b53_mmap_probe_of(pdev, &pdata); + if (ret) { + dev_err(&pdev->dev, "OF probe error\n"); + return ret; + } + } if (!pdata) return -EINVAL; diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 8419bb7f4505..82700a5714c1 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -186,11 +186,7 @@ static inline int is531x5(struct b53_device *dev) static inline int is63xx(struct b53_device *dev) { -#ifdef CONFIG_BCM63XX return dev->chip_id == BCM63XX_DEVICE_ID; -#else - return 0; -#endif } static inline int is5301x(struct b53_device *dev) diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c index 7abec8dab8ba..413158275db8 100644 --- a/drivers/net/dsa/b53/b53_spi.c +++ b/drivers/net/dsa/b53/b53_spi.c @@ -324,9 +324,22 @@ static int b53_spi_remove(struct spi_device *spi) return 0; } +static const struct of_device_id b53_spi_of_match[] = { + { .compatible = "brcm,bcm5325" }, + { .compatible = "brcm,bcm5365" }, + { .compatible = "brcm,bcm5395" }, + { .compatible = "brcm,bcm5397" }, + { .compatible = "brcm,bcm5398" }, + { .compatible = "brcm,bcm53115" }, + { .compatible = "brcm,bcm53125" }, + { .compatible = "brcm,bcm53128" }, + { /* sentinel */ } +}; + static struct spi_driver b53_spi_driver = { .driver = { .name = "b53-switch", + .of_match_table = b53_spi_of_match, }, .probe = b53_spi_probe, .remove = b53_spi_remove, diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index f277df922fcd..9150038b60cb 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -32,6 +32,36 @@ #include "b53/b53_priv.h" #include "b53/b53_regs.h" +static u16 bcm_sf2_reg_rgmii_cntrl(struct bcm_sf2_priv *priv, int port) +{ + switch (priv->type) { + case BCM4908_DEVICE_ID: + switch (port) { + case 7: + return REG_RGMII_11_CNTRL; + default: + break; + } + break; + default: + switch (port) { + case 0: + return REG_RGMII_0_CNTRL; + case 1: + return REG_RGMII_1_CNTRL; + case 2: + return REG_RGMII_2_CNTRL; + default: + break; + } + } + + WARN_ONCE(1, "Unsupported port %d\n", port); + + /* RO fallback reg */ + return REG_SWITCH_STATUS; +} + /* Return the number of active ports, not counting the IMP (CPU) port */ static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds) { @@ -114,7 +144,10 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) /* Force link status for IMP port */ reg = core_readl(priv, offset); reg |= (MII_SW_OR | LINK_STS); - reg &= ~GMII_SPEED_UP_2G; + if (priv->type == BCM4908_DEVICE_ID) + reg |= GMII_SPEED_UP_2G; + else + reg &= ~GMII_SPEED_UP_2G; core_writel(priv, reg, offset); /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ @@ -432,6 +465,44 @@ static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) return 0; } +static void bcm_sf2_crossbar_setup(struct bcm_sf2_priv *priv) +{ + struct device *dev = priv->dev->ds->dev; + int shift; + u32 mask; + u32 reg; + int i; + + mask = BIT(priv->num_crossbar_int_ports) - 1; + + reg = reg_readl(priv, REG_CROSSBAR); + switch (priv->type) { + case BCM4908_DEVICE_ID: + shift = CROSSBAR_BCM4908_INT_P7 * priv->num_crossbar_int_ports; + reg &= ~(mask << shift); + if (0) /* FIXME */ + reg |= CROSSBAR_BCM4908_EXT_SERDES << shift; + else if (priv->int_phy_mask & BIT(7)) + reg |= CROSSBAR_BCM4908_EXT_GPHY4 << shift; + else if (phy_interface_mode_is_rgmii(priv->port_sts[7].mode)) + reg |= CROSSBAR_BCM4908_EXT_RGMII << shift; + else if (WARN(1, "Invalid port mode\n")) + return; + break; + default: + return; + } + reg_writel(priv, reg, REG_CROSSBAR); + + reg = reg_readl(priv, REG_CROSSBAR); + for (i = 0; i < priv->num_crossbar_int_ports; i++) { + shift = i * priv->num_crossbar_int_ports; + + dev_dbg(dev, "crossbar int port #%d - ext port #%d\n", i, + (reg >> shift) & mask); + } +} + static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv) { intrl2_0_mask_set(priv, 0xffffffff); @@ -443,10 +514,11 @@ static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv) static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv, struct device_node *dn) { + struct device *dev = priv->dev->ds->dev; + struct bcm_sf2_port_status *port_st; struct device_node *port; unsigned int port_num; struct property *prop; - phy_interface_t mode; int err; priv->moca_port = -1; @@ -455,19 +527,26 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv, if (of_property_read_u32(port, "reg", &port_num)) continue; + if (port_num >= DSA_MAX_PORTS) { + dev_err(dev, "Invalid port number %d\n", port_num); + continue; + } + + port_st = &priv->port_sts[port_num]; + /* Internal PHYs get assigned a specific 'phy-mode' property * value: "internal" to help flag them before MDIO probing * has completed, since they might be turned off at that * time */ - err = of_get_phy_mode(port, &mode); + err = of_get_phy_mode(port, &port_st->mode); if (err) continue; - if (mode == PHY_INTERFACE_MODE_INTERNAL) + if (port_st->mode == PHY_INTERFACE_MODE_INTERNAL) priv->int_phy_mask |= 1 << port_num; - if (mode == PHY_INTERFACE_MODE_MOCA) + if (port_st->mode == PHY_INTERFACE_MODE_MOCA) priv->moca_port = port_num; if (of_property_read_bool(port, "brcm,use-bcm-hdr")) @@ -585,8 +664,10 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) * in bits 15:8 and the patch level in bits 7:0 which is exactly what * the REG_PHY_REVISION register layout is. */ - - return priv->hw_params.gphy_rev; + if (priv->int_phy_mask & BIT(port)) + return priv->hw_params.gphy_rev; + else + return 0; } static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port, @@ -642,6 +723,7 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); u32 id_mode_dis = 0, port_mode; + u32 reg_rgmii_ctrl; u32 reg; if (port == core_readl(priv, CORE_IMP0_PRT_ID)) @@ -665,10 +747,12 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, return; } + reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port); + /* Clear id_mode_dis bit, and the existing port mode, let * RGMII_MODE_EN bet set by mac_link_{up,down} */ - reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); + reg = reg_readl(priv, reg_rgmii_ctrl); reg &= ~ID_MODE_DIS; reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT); @@ -676,13 +760,14 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, if (id_mode_dis) reg |= ID_MODE_DIS; - reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); + reg_writel(priv, reg, reg_rgmii_ctrl); } static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port, phy_interface_t interface, bool link) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + u32 reg_rgmii_ctrl; u32 reg; if (!phy_interface_mode_is_rgmii(interface) && @@ -690,13 +775,15 @@ static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port, interface != PHY_INTERFACE_MODE_REVMII) return; + reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port); + /* If the link is down, just disable the interface to conserve power */ - reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); + reg = reg_readl(priv, reg_rgmii_ctrl); if (link) reg |= RGMII_MODE_EN; else reg &= ~RGMII_MODE_EN; - reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); + reg_writel(priv, reg, reg_rgmii_ctrl); } static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port, @@ -730,11 +817,15 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port, { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_eee *p = &priv->dev->ports[port].eee; - u32 reg, offset; bcm_sf2_sw_mac_link_set(ds, port, interface, true); if (port != core_readl(priv, CORE_IMP0_PRT_ID)) { + u32 reg_rgmii_ctrl; + u32 reg, offset; + + reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port); + if (priv->type == BCM4908_DEVICE_ID || priv->type == BCM7445_DEVICE_ID) offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); @@ -745,7 +836,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port, interface == PHY_INTERFACE_MODE_RGMII_TXID || interface == PHY_INTERFACE_MODE_MII || interface == PHY_INTERFACE_MODE_REVMII) { - reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); + reg = reg_readl(priv, reg_rgmii_ctrl); reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN); if (tx_pause) @@ -753,7 +844,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port, if (rx_pause) reg |= RX_PAUSE_EN; - reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); + reg_writel(priv, reg, reg_rgmii_ctrl); } reg = SW_OVERRIDE | LINK_STS; @@ -856,6 +947,8 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds) return ret; } + bcm_sf2_crossbar_setup(priv); + ret = bcm_sf2_cfp_resume(ds); if (ret) return ret; @@ -1128,6 +1221,7 @@ struct bcm_sf2_of_data { const u16 *reg_offsets; unsigned int core_reg_align; unsigned int num_cfp_rules; + unsigned int num_crossbar_int_ports; }; static const u16 bcm_sf2_4908_reg_offsets[] = { @@ -1139,9 +1233,7 @@ static const u16 bcm_sf2_4908_reg_offsets[] = { [REG_PHY_REVISION] = 0x14, [REG_SPHY_CNTRL] = 0x24, [REG_CROSSBAR] = 0xc8, - [REG_RGMII_0_CNTRL] = 0xe0, - [REG_RGMII_1_CNTRL] = 0xec, - [REG_RGMII_2_CNTRL] = 0xf8, + [REG_RGMII_11_CNTRL] = 0x014c, [REG_LED_0_CNTRL] = 0x40, [REG_LED_1_CNTRL] = 0x4c, [REG_LED_2_CNTRL] = 0x58, @@ -1151,7 +1243,8 @@ static const struct bcm_sf2_of_data bcm_sf2_4908_data = { .type = BCM4908_DEVICE_ID, .core_reg_align = 0, .reg_offsets = bcm_sf2_4908_reg_offsets, - .num_cfp_rules = 0, /* FIXME */ + .num_cfp_rules = 256, + .num_crossbar_int_ports = 2, }; /* Register offsets for the SWITCH_REG_* block */ @@ -1262,6 +1355,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) priv->reg_offsets = data->reg_offsets; priv->core_reg_align = data->core_reg_align; priv->num_cfp_rules = data->num_cfp_rules; + priv->num_crossbar_int_ports = data->num_crossbar_int_ports; priv->rcdev = devm_reset_control_get_optional_exclusive(&pdev->dev, "switch"); @@ -1335,6 +1429,8 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) goto out_clk_mdiv; } + bcm_sf2_crossbar_setup(priv); + bcm_sf2_gphy_enable_set(priv->dev->ds, true); ret = bcm_sf2_mdio_register(ds); diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 1ed901a68536..0d48402068d3 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -44,6 +44,7 @@ struct bcm_sf2_hw_params { #define BCM_SF2_REGS_NUM 6 struct bcm_sf2_port_status { + phy_interface_t mode; unsigned int link; bool enabled; }; @@ -73,6 +74,7 @@ struct bcm_sf2_priv { const u16 *reg_offsets; unsigned int core_reg_align; unsigned int num_cfp_rules; + unsigned int num_crossbar_int_ports; /* spinlock protecting access to the indirect registers */ spinlock_t indir_lock; diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index 1d2d55c9f8aa..7bffc80f241f 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -21,6 +21,7 @@ enum bcm_sf2_reg_offs { REG_RGMII_0_CNTRL, REG_RGMII_1_CNTRL, REG_RGMII_2_CNTRL, + REG_RGMII_11_CNTRL, REG_LED_0_CNTRL, REG_LED_1_CNTRL, REG_LED_2_CNTRL, @@ -48,7 +49,12 @@ enum bcm_sf2_reg_offs { #define PHY_PHYAD_SHIFT 8 #define PHY_PHYAD_MASK 0x1F -#define REG_RGMII_CNTRL_P(x) (REG_RGMII_0_CNTRL + (x)) +/* Relative to REG_CROSSBAR */ +#define CROSSBAR_BCM4908_INT_P7 0 +#define CROSSBAR_BCM4908_INT_RUNNER 1 +#define CROSSBAR_BCM4908_EXT_SERDES 0 +#define CROSSBAR_BCM4908_EXT_GPHY4 1 +#define CROSSBAR_BCM4908_EXT_RGMII 2 /* Relative to REG_RGMII_CNTRL */ #define RGMII_MODE_EN (1 << 0) diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c index 463137c39db2..918be7eb626f 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c @@ -433,7 +433,7 @@ static void hellcreek_unapply_vlan(struct hellcreek *hellcreek, int port, mutex_lock(&hellcreek->reg_lock); - hellcreek_select_vlan(hellcreek, vid, 0); + hellcreek_select_vlan(hellcreek, vid, false); /* Setup port vlan membership */ hellcreek_select_vlan_params(hellcreek, port, &shift, &mask); @@ -596,6 +596,83 @@ static void hellcreek_setup_vlan_membership(struct dsa_switch *ds, int port, hellcreek_unapply_vlan(hellcreek, upstream, vid); } +static void hellcreek_port_set_ucast_flood(struct hellcreek *hellcreek, + int port, bool enable) +{ + struct hellcreek_port *hellcreek_port; + u16 val; + + hellcreek_port = &hellcreek->ports[port]; + + dev_dbg(hellcreek->dev, "%s unicast flooding on port %d\n", + enable ? "Enable" : "Disable", port); + + mutex_lock(&hellcreek->reg_lock); + + hellcreek_select_port(hellcreek, port); + val = hellcreek_port->ptcfg; + if (enable) + val &= ~HR_PTCFG_UUC_FLT; + else + val |= HR_PTCFG_UUC_FLT; + hellcreek_write(hellcreek, val, HR_PTCFG); + hellcreek_port->ptcfg = val; + + mutex_unlock(&hellcreek->reg_lock); +} + +static void hellcreek_port_set_mcast_flood(struct hellcreek *hellcreek, + int port, bool enable) +{ + struct hellcreek_port *hellcreek_port; + u16 val; + + hellcreek_port = &hellcreek->ports[port]; + + dev_dbg(hellcreek->dev, "%s multicast flooding on port %d\n", + enable ? "Enable" : "Disable", port); + + mutex_lock(&hellcreek->reg_lock); + + hellcreek_select_port(hellcreek, port); + val = hellcreek_port->ptcfg; + if (enable) + val &= ~HR_PTCFG_UMC_FLT; + else + val |= HR_PTCFG_UMC_FLT; + hellcreek_write(hellcreek, val, HR_PTCFG); + hellcreek_port->ptcfg = val; + + mutex_unlock(&hellcreek->reg_lock); +} + +static int hellcreek_pre_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD)) + return -EINVAL; + + return 0; +} + +static int hellcreek_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + struct hellcreek *hellcreek = ds->priv; + + if (flags.mask & BR_FLOOD) + hellcreek_port_set_ucast_flood(hellcreek, port, + !!(flags.val & BR_FLOOD)); + + if (flags.mask & BR_MCAST_FLOOD) + hellcreek_port_set_mcast_flood(hellcreek, port, + !!(flags.val & BR_MCAST_FLOOD)); + + return 0; +} + static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br) { @@ -670,6 +747,40 @@ static int __hellcreek_fdb_del(struct hellcreek *hellcreek, return hellcreek_wait_fdb_ready(hellcreek); } +static void hellcreek_populate_fdb_entry(struct hellcreek *hellcreek, + struct hellcreek_fdb_entry *entry, + size_t idx) +{ + unsigned char addr[ETH_ALEN]; + u16 meta, mac; + + /* Read values */ + meta = hellcreek_read(hellcreek, HR_FDBMDRD); + mac = hellcreek_read(hellcreek, HR_FDBRDL); + addr[5] = mac & 0xff; + addr[4] = (mac & 0xff00) >> 8; + mac = hellcreek_read(hellcreek, HR_FDBRDM); + addr[3] = mac & 0xff; + addr[2] = (mac & 0xff00) >> 8; + mac = hellcreek_read(hellcreek, HR_FDBRDH); + addr[1] = mac & 0xff; + addr[0] = (mac & 0xff00) >> 8; + + /* Populate @entry */ + memcpy(entry->mac, addr, sizeof(addr)); + entry->idx = idx; + entry->portmask = (meta & HR_FDBMDRD_PORTMASK_MASK) >> + HR_FDBMDRD_PORTMASK_SHIFT; + entry->age = (meta & HR_FDBMDRD_AGE_MASK) >> + HR_FDBMDRD_AGE_SHIFT; + entry->is_obt = !!(meta & HR_FDBMDRD_OBT); + entry->pass_blocked = !!(meta & HR_FDBMDRD_PASS_BLOCKED); + entry->is_static = !!(meta & HR_FDBMDRD_STATIC); + entry->reprio_tc = (meta & HR_FDBMDRD_REPRIO_TC_MASK) >> + HR_FDBMDRD_REPRIO_TC_SHIFT; + entry->reprio_en = !!(meta & HR_FDBMDRD_REPRIO_EN); +} + /* Retrieve the index of a FDB entry by mac address. Currently we search through * the complete table in hardware. If that's too slow, we might have to cache * the complete FDB table in software. @@ -691,39 +802,19 @@ static int hellcreek_fdb_get(struct hellcreek *hellcreek, * enter new entries anywhere. */ for (i = 0; i < hellcreek->fdb_entries; ++i) { - unsigned char addr[ETH_ALEN]; - u16 meta, mac; - - meta = hellcreek_read(hellcreek, HR_FDBMDRD); - mac = hellcreek_read(hellcreek, HR_FDBRDL); - addr[5] = mac & 0xff; - addr[4] = (mac & 0xff00) >> 8; - mac = hellcreek_read(hellcreek, HR_FDBRDM); - addr[3] = mac & 0xff; - addr[2] = (mac & 0xff00) >> 8; - mac = hellcreek_read(hellcreek, HR_FDBRDH); - addr[1] = mac & 0xff; - addr[0] = (mac & 0xff00) >> 8; + struct hellcreek_fdb_entry tmp = { 0 }; + + /* Read entry */ + hellcreek_populate_fdb_entry(hellcreek, &tmp, i); /* Force next entry */ hellcreek_write(hellcreek, 0x00, HR_FDBRDH); - if (memcmp(addr, dest, ETH_ALEN)) + if (memcmp(tmp.mac, dest, ETH_ALEN)) continue; /* Match found */ - entry->idx = i; - entry->portmask = (meta & HR_FDBMDRD_PORTMASK_MASK) >> - HR_FDBMDRD_PORTMASK_SHIFT; - entry->age = (meta & HR_FDBMDRD_AGE_MASK) >> - HR_FDBMDRD_AGE_SHIFT; - entry->is_obt = !!(meta & HR_FDBMDRD_OBT); - entry->pass_blocked = !!(meta & HR_FDBMDRD_PASS_BLOCKED); - entry->is_static = !!(meta & HR_FDBMDRD_STATIC); - entry->reprio_tc = (meta & HR_FDBMDRD_REPRIO_TC_MASK) >> - HR_FDBMDRD_REPRIO_TC_SHIFT; - entry->reprio_en = !!(meta & HR_FDBMDRD_REPRIO_EN); - memcpy(entry->mac, addr, sizeof(addr)); + memcpy(entry, &tmp, sizeof(*entry)); return 0; } @@ -838,18 +929,9 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port, for (i = 0; i < hellcreek->fdb_entries; ++i) { unsigned char null_addr[ETH_ALEN] = { 0 }; struct hellcreek_fdb_entry entry = { 0 }; - u16 meta, mac; - - meta = hellcreek_read(hellcreek, HR_FDBMDRD); - mac = hellcreek_read(hellcreek, HR_FDBRDL); - entry.mac[5] = mac & 0xff; - entry.mac[4] = (mac & 0xff00) >> 8; - mac = hellcreek_read(hellcreek, HR_FDBRDM); - entry.mac[3] = mac & 0xff; - entry.mac[2] = (mac & 0xff00) >> 8; - mac = hellcreek_read(hellcreek, HR_FDBRDH); - entry.mac[1] = mac & 0xff; - entry.mac[0] = (mac & 0xff00) >> 8; + + /* Read entry */ + hellcreek_populate_fdb_entry(hellcreek, &entry, i); /* Force next entry */ hellcreek_write(hellcreek, 0x00, HR_FDBRDH); @@ -858,10 +940,6 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port, if (!memcmp(entry.mac, null_addr, ETH_ALEN)) continue; - entry.portmask = (meta & HR_FDBMDRD_PORTMASK_MASK) >> - HR_FDBMDRD_PORTMASK_SHIFT; - entry.is_static = !!(meta & HR_FDBMDRD_STATIC); - /* Check port mask */ if (!(entry.portmask & BIT(port))) continue; @@ -1004,6 +1082,22 @@ out: return ret; } +static int hellcreek_devlink_info_get(struct dsa_switch *ds, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct hellcreek *hellcreek = ds->priv; + int ret; + + ret = devlink_info_driver_name_put(req, "hellcreek"); + if (ret) + return ret; + + return devlink_info_version_fixed_put(req, + DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, + hellcreek->pdata->name); +} + static u64 hellcreek_devlink_vlan_table_get(void *priv) { struct hellcreek *hellcreek = priv; @@ -1082,6 +1176,129 @@ out: return err; } +static int hellcreek_devlink_region_vlan_snapshot(struct devlink *dl, + const struct devlink_region_ops *ops, + struct netlink_ext_ack *extack, + u8 **data) +{ + struct hellcreek_devlink_vlan_entry *table, *entry; + struct dsa_switch *ds = dsa_devlink_to_ds(dl); + struct hellcreek *hellcreek = ds->priv; + int i; + + table = kcalloc(VLAN_N_VID, sizeof(*entry), GFP_KERNEL); + if (!table) + return -ENOMEM; + + entry = table; + + mutex_lock(&hellcreek->reg_lock); + for (i = 0; i < VLAN_N_VID; ++i, ++entry) { + entry->member = hellcreek->vidmbrcfg[i]; + entry->vid = i; + } + mutex_unlock(&hellcreek->reg_lock); + + *data = (u8 *)table; + + return 0; +} + +static int hellcreek_devlink_region_fdb_snapshot(struct devlink *dl, + const struct devlink_region_ops *ops, + struct netlink_ext_ack *extack, + u8 **data) +{ + struct dsa_switch *ds = dsa_devlink_to_ds(dl); + struct hellcreek_fdb_entry *table, *entry; + struct hellcreek *hellcreek = ds->priv; + size_t i; + + table = kcalloc(hellcreek->fdb_entries, sizeof(*entry), GFP_KERNEL); + if (!table) + return -ENOMEM; + + entry = table; + + mutex_lock(&hellcreek->reg_lock); + + /* Start table read */ + hellcreek_read(hellcreek, HR_FDBMAX); + hellcreek_write(hellcreek, 0x00, HR_FDBMAX); + + for (i = 0; i < hellcreek->fdb_entries; ++i, ++entry) { + /* Read current entry */ + hellcreek_populate_fdb_entry(hellcreek, entry, i); + + /* Advance read pointer */ + hellcreek_write(hellcreek, 0x00, HR_FDBRDH); + } + + mutex_unlock(&hellcreek->reg_lock); + + *data = (u8 *)table; + + return 0; +} + +static struct devlink_region_ops hellcreek_region_vlan_ops = { + .name = "vlan", + .snapshot = hellcreek_devlink_region_vlan_snapshot, + .destructor = kfree, +}; + +static struct devlink_region_ops hellcreek_region_fdb_ops = { + .name = "fdb", + .snapshot = hellcreek_devlink_region_fdb_snapshot, + .destructor = kfree, +}; + +static int hellcreek_setup_devlink_regions(struct dsa_switch *ds) +{ + struct hellcreek *hellcreek = ds->priv; + struct devlink_region_ops *ops; + struct devlink_region *region; + u64 size; + int ret; + + /* VLAN table */ + size = VLAN_N_VID * sizeof(struct hellcreek_devlink_vlan_entry); + ops = &hellcreek_region_vlan_ops; + + region = dsa_devlink_region_create(ds, ops, 1, size); + if (IS_ERR(region)) + return PTR_ERR(region); + + hellcreek->vlan_region = region; + + /* FDB table */ + size = hellcreek->fdb_entries * sizeof(struct hellcreek_fdb_entry); + ops = &hellcreek_region_fdb_ops; + + region = dsa_devlink_region_create(ds, ops, 1, size); + if (IS_ERR(region)) { + ret = PTR_ERR(region); + goto err_fdb; + } + + hellcreek->fdb_region = region; + + return 0; + +err_fdb: + dsa_devlink_region_destroy(hellcreek->vlan_region); + + return ret; +} + +static void hellcreek_teardown_devlink_regions(struct dsa_switch *ds) +{ + struct hellcreek *hellcreek = ds->priv; + + dsa_devlink_region_destroy(hellcreek->fdb_region); + dsa_devlink_region_destroy(hellcreek->vlan_region); +} + static int hellcreek_setup(struct dsa_switch *ds) { struct hellcreek *hellcreek = ds->priv; @@ -1143,11 +1360,24 @@ static int hellcreek_setup(struct dsa_switch *ds) return ret; } + ret = hellcreek_setup_devlink_regions(ds); + if (ret) { + dev_err(hellcreek->dev, + "Failed to setup devlink regions!\n"); + goto err_regions; + } + return 0; + +err_regions: + dsa_devlink_resources_unregister(ds); + + return ret; } static void hellcreek_teardown(struct dsa_switch *ds) { + hellcreek_teardown_devlink_regions(ds); dsa_devlink_resources_unregister(ds); } @@ -1518,31 +1748,34 @@ static int hellcreek_port_setup_tc(struct dsa_switch *ds, int port, } static const struct dsa_switch_ops hellcreek_ds_ops = { - .get_ethtool_stats = hellcreek_get_ethtool_stats, - .get_sset_count = hellcreek_get_sset_count, - .get_strings = hellcreek_get_strings, - .get_tag_protocol = hellcreek_get_tag_protocol, - .get_ts_info = hellcreek_get_ts_info, - .phylink_validate = hellcreek_phylink_validate, - .port_bridge_join = hellcreek_port_bridge_join, - .port_bridge_leave = hellcreek_port_bridge_leave, - .port_disable = hellcreek_port_disable, - .port_enable = hellcreek_port_enable, - .port_fdb_add = hellcreek_fdb_add, - .port_fdb_del = hellcreek_fdb_del, - .port_fdb_dump = hellcreek_fdb_dump, - .port_hwtstamp_set = hellcreek_port_hwtstamp_set, - .port_hwtstamp_get = hellcreek_port_hwtstamp_get, - .port_prechangeupper = hellcreek_port_prechangeupper, - .port_rxtstamp = hellcreek_port_rxtstamp, - .port_setup_tc = hellcreek_port_setup_tc, - .port_stp_state_set = hellcreek_port_stp_state_set, - .port_txtstamp = hellcreek_port_txtstamp, - .port_vlan_add = hellcreek_vlan_add, - .port_vlan_del = hellcreek_vlan_del, - .port_vlan_filtering = hellcreek_vlan_filtering, - .setup = hellcreek_setup, - .teardown = hellcreek_teardown, + .devlink_info_get = hellcreek_devlink_info_get, + .get_ethtool_stats = hellcreek_get_ethtool_stats, + .get_sset_count = hellcreek_get_sset_count, + .get_strings = hellcreek_get_strings, + .get_tag_protocol = hellcreek_get_tag_protocol, + .get_ts_info = hellcreek_get_ts_info, + .phylink_validate = hellcreek_phylink_validate, + .port_bridge_flags = hellcreek_bridge_flags, + .port_bridge_join = hellcreek_port_bridge_join, + .port_bridge_leave = hellcreek_port_bridge_leave, + .port_disable = hellcreek_port_disable, + .port_enable = hellcreek_port_enable, + .port_fdb_add = hellcreek_fdb_add, + .port_fdb_del = hellcreek_fdb_del, + .port_fdb_dump = hellcreek_fdb_dump, + .port_hwtstamp_set = hellcreek_port_hwtstamp_set, + .port_hwtstamp_get = hellcreek_port_hwtstamp_get, + .port_pre_bridge_flags = hellcreek_pre_bridge_flags, + .port_prechangeupper = hellcreek_port_prechangeupper, + .port_rxtstamp = hellcreek_port_rxtstamp, + .port_setup_tc = hellcreek_port_setup_tc, + .port_stp_state_set = hellcreek_port_stp_state_set, + .port_txtstamp = hellcreek_port_txtstamp, + .port_vlan_add = hellcreek_vlan_add, + .port_vlan_del = hellcreek_vlan_del, + .port_vlan_filtering = hellcreek_vlan_filtering, + .setup = hellcreek_setup, + .teardown = hellcreek_teardown, }; static int hellcreek_probe(struct platform_device *pdev) @@ -1693,6 +1926,7 @@ static int hellcreek_remove(struct platform_device *pdev) } static const struct hellcreek_platform_data de1soc_r1_pdata = { + .name = "r4c30", .num_ports = 4, .is_100_mbits = 1, .qbv_support = 1, diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h index 305e76dab34d..9e303b8ab13c 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.h +++ b/drivers/net/dsa/hirschmann/hellcreek.h @@ -278,6 +278,8 @@ struct hellcreek { struct mutex reg_lock; /* Switch IP register lock */ struct mutex vlan_lock; /* VLAN bitmaps lock */ struct mutex ptp_lock; /* PTP IP register lock */ + struct devlink_region *vlan_region; + struct devlink_region *fdb_region; void __iomem *base; void __iomem *ptp_base; u16 swcfg; /* swcfg shadow */ @@ -304,4 +306,9 @@ enum hellcreek_devlink_resource_id { HELLCREEK_DEVLINK_PARAM_ID_FDB_TABLE, }; +struct hellcreek_devlink_vlan_entry { + u16 vid; + u16 member; +}; + #endif /* _HELLCREEK_H_ */ diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 52e865a3912c..26d0e3bb5dea 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Lantiq / Intel GSWIP switch driver for VRX200 SoCs + * Lantiq / Intel GSWIP switch driver for VRX200, xRX300 and xRX330 SoCs * * Copyright (C) 2010 Lantiq Deutschland * Copyright (C) 2012 John Crispin <john@phrozen.org> @@ -100,6 +100,7 @@ #define GSWIP_MII_CFG_MODE_RMIIP 0x2 #define GSWIP_MII_CFG_MODE_RMIIM 0x3 #define GSWIP_MII_CFG_MODE_RGMII 0x4 +#define GSWIP_MII_CFG_MODE_GMII 0x9 #define GSWIP_MII_CFG_MODE_MASK 0xf #define GSWIP_MII_CFG_RATE_M2P5 0x00 #define GSWIP_MII_CFG_RATE_M25 0x10 @@ -220,6 +221,7 @@ struct gswip_hw_info { int max_ports; int cpu_port; + const struct dsa_switch_ops *ops; }; struct xway_gphy_match_data { @@ -1384,12 +1386,42 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port, return 0; } -static void gswip_phylink_validate(struct dsa_switch *ds, int port, - unsigned long *supported, - struct phylink_link_state *state) +static void gswip_phylink_set_capab(unsigned long *supported, + struct phylink_link_state *state) { __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + /* Allow all the expected bits */ + phylink_set(mask, Autoneg); + phylink_set_port_modes(mask); + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + + /* With the exclusion of MII, Reverse MII and Reduced MII, we + * support Gigabit, including Half duplex + */ + if (state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_REVMII && + state->interface != PHY_INTERFACE_MODE_RMII) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseT_Half); + } + + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + + bitmap_and(supported, supported, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ switch (port) { case 0: case 1: @@ -1416,38 +1448,54 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port, return; } - /* Allow all the expected bits */ - phylink_set(mask, Autoneg); - phylink_set_port_modes(mask); - phylink_set(mask, Pause); - phylink_set(mask, Asym_Pause); + gswip_phylink_set_capab(supported, state); - /* With the exclusion of MII, Reverse MII and Reduced MII, we - * support Gigabit, including Half duplex - */ - if (state->interface != PHY_INTERFACE_MODE_MII && - state->interface != PHY_INTERFACE_MODE_REVMII && - state->interface != PHY_INTERFACE_MODE_RMII) { - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseT_Half); + return; + +unsupported: + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + dev_err(ds->dev, "Unsupported interface '%s' for port %d\n", + phy_modes(state->interface), port); +} + +static void gswip_xrx300_phylink_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ + switch (port) { + case 0: + if (!phy_interface_mode_is_rgmii(state->interface) && + state->interface != PHY_INTERFACE_MODE_GMII && + state->interface != PHY_INTERFACE_MODE_RMII) + goto unsupported; + break; + case 1: + case 2: + case 3: + case 4: + if (state->interface != PHY_INTERFACE_MODE_INTERNAL) + goto unsupported; + break; + case 5: + if (!phy_interface_mode_is_rgmii(state->interface) && + state->interface != PHY_INTERFACE_MODE_INTERNAL && + state->interface != PHY_INTERFACE_MODE_RMII) + goto unsupported; + break; + default: + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + dev_err(ds->dev, "Unsupported port: %i\n", port); + return; } - phylink_set(mask, 10baseT_Half); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); + gswip_phylink_set_capab(supported, state); - bitmap_and(supported, supported, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); return; unsupported: bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); dev_err(ds->dev, "Unsupported interface '%s' for port %d\n", phy_modes(state->interface), port); - return; } static void gswip_phylink_mac_config(struct dsa_switch *ds, int port, @@ -1476,6 +1524,9 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port, case PHY_INTERFACE_MODE_RGMII_TXID: miicfg |= GSWIP_MII_CFG_MODE_RGMII; break; + case PHY_INTERFACE_MODE_GMII: + miicfg |= GSWIP_MII_CFG_MODE_GMII; + break; default: dev_err(ds->dev, "Unsupported interface: %d\n", state->interface); @@ -1588,7 +1639,7 @@ static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset) return ARRAY_SIZE(gswip_rmon_cnt); } -static const struct dsa_switch_ops gswip_switch_ops = { +static const struct dsa_switch_ops gswip_xrx200_switch_ops = { .get_tag_protocol = gswip_get_tag_protocol, .setup = gswip_setup, .port_enable = gswip_port_enable, @@ -1603,7 +1654,31 @@ static const struct dsa_switch_ops gswip_switch_ops = { .port_fdb_add = gswip_port_fdb_add, .port_fdb_del = gswip_port_fdb_del, .port_fdb_dump = gswip_port_fdb_dump, - .phylink_validate = gswip_phylink_validate, + .phylink_validate = gswip_xrx200_phylink_validate, + .phylink_mac_config = gswip_phylink_mac_config, + .phylink_mac_link_down = gswip_phylink_mac_link_down, + .phylink_mac_link_up = gswip_phylink_mac_link_up, + .get_strings = gswip_get_strings, + .get_ethtool_stats = gswip_get_ethtool_stats, + .get_sset_count = gswip_get_sset_count, +}; + +static const struct dsa_switch_ops gswip_xrx300_switch_ops = { + .get_tag_protocol = gswip_get_tag_protocol, + .setup = gswip_setup, + .port_enable = gswip_port_enable, + .port_disable = gswip_port_disable, + .port_bridge_join = gswip_port_bridge_join, + .port_bridge_leave = gswip_port_bridge_leave, + .port_fast_age = gswip_port_fast_age, + .port_vlan_filtering = gswip_port_vlan_filtering, + .port_vlan_add = gswip_port_vlan_add, + .port_vlan_del = gswip_port_vlan_del, + .port_stp_state_set = gswip_port_stp_state_set, + .port_fdb_add = gswip_port_fdb_add, + .port_fdb_del = gswip_port_fdb_del, + .port_fdb_dump = gswip_port_fdb_dump, + .phylink_validate = gswip_xrx300_phylink_validate, .phylink_mac_config = gswip_phylink_mac_config, .phylink_mac_link_down = gswip_phylink_mac_link_down, .phylink_mac_link_up = gswip_phylink_mac_link_up, @@ -1832,7 +1907,7 @@ remove_gphy: static int gswip_probe(struct platform_device *pdev) { struct gswip_priv *priv; - struct device_node *mdio_np, *gphy_fw_np; + struct device_node *np, *mdio_np, *gphy_fw_np; struct device *dev = &pdev->dev; int err; int i; @@ -1865,10 +1940,28 @@ static int gswip_probe(struct platform_device *pdev) priv->ds->dev = dev; priv->ds->num_ports = priv->hw_info->max_ports; priv->ds->priv = priv; - priv->ds->ops = &gswip_switch_ops; + priv->ds->ops = priv->hw_info->ops; priv->dev = dev; version = gswip_switch_r(priv, GSWIP_VERSION); + np = dev->of_node; + switch (version) { + case GSWIP_VERSION_2_0: + case GSWIP_VERSION_2_1: + if (!of_device_is_compatible(np, "lantiq,xrx200-gswip")) + return -EINVAL; + break; + case GSWIP_VERSION_2_2: + case GSWIP_VERSION_2_2_ETC: + if (!of_device_is_compatible(np, "lantiq,xrx300-gswip") && + !of_device_is_compatible(np, "lantiq,xrx330-gswip")) + return -EINVAL; + break; + default: + dev_err(dev, "unknown GSWIP version: 0x%x", version); + return -ENOENT; + } + /* bring up the mdio bus */ gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw"); if (gphy_fw_np) { @@ -1946,10 +2039,19 @@ static int gswip_remove(struct platform_device *pdev) static const struct gswip_hw_info gswip_xrx200 = { .max_ports = 7, .cpu_port = 6, + .ops = &gswip_xrx200_switch_ops, +}; + +static const struct gswip_hw_info gswip_xrx300 = { + .max_ports = 7, + .cpu_port = 6, + .ops = &gswip_xrx300_switch_ops, }; static const struct of_device_id gswip_of_match[] = { { .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 }, + { .compatible = "lantiq,xrx300-gswip", .data = &gswip_xrx300 }, + { .compatible = "lantiq,xrx330-gswip", .data = &gswip_xrx300 }, {}, }; MODULE_DEVICE_TABLE(of, gswip_of_match); diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index f06f5fa2f898..c442a5885fca 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -436,34 +436,32 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface) TD_DM_DRVP(8) | TD_DM_DRVN(8)); /* Setup core clock for MT7530 */ - if (!trgint) { - /* Disable MT7530 core clock */ - core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); - - /* Disable PLL, since phy_device has not yet been created - * provided for phy_[read,write]_mmd_indirect is called, we - * provide our own core_write_mmd_indirect to complete this - * function. - */ - core_write_mmd_indirect(priv, - CORE_GSWPLL_GRP1, - MDIO_MMD_VEND2, - 0); - - /* Set core clock into 500Mhz */ - core_write(priv, CORE_GSWPLL_GRP2, - RG_GSWPLL_POSDIV_500M(1) | - RG_GSWPLL_FBKDIV_500M(25)); - - /* Enable PLL */ - core_write(priv, CORE_GSWPLL_GRP1, - RG_GSWPLL_EN_PRE | - RG_GSWPLL_POSDIV_200M(2) | - RG_GSWPLL_FBKDIV_200M(32)); + /* Disable MT7530 core clock */ + core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); - /* Enable MT7530 core clock */ - core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); - } + /* Disable PLL, since phy_device has not yet been created + * provided for phy_[read,write]_mmd_indirect is called, we + * provide our own core_write_mmd_indirect to complete this + * function. + */ + core_write_mmd_indirect(priv, + CORE_GSWPLL_GRP1, + MDIO_MMD_VEND2, + 0); + + /* Set core clock into 500Mhz */ + core_write(priv, CORE_GSWPLL_GRP2, + RG_GSWPLL_POSDIV_500M(1) | + RG_GSWPLL_FBKDIV_500M(25)); + + /* Enable PLL */ + core_write(priv, CORE_GSWPLL_GRP1, + RG_GSWPLL_EN_PRE | + RG_GSWPLL_POSDIV_200M(2) | + RG_GSWPLL_FBKDIV_200M(32)); + + /* Enable MT7530 core clock */ + core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); /* Setup the MT7530 TRGMII Tx Clock */ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); @@ -999,8 +997,9 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port) mt7530_write(priv, MT7530_PVC_P(port), PORT_SPEC_TAG); - /* Unknown multicast frame forwarding to the cpu port */ - mt7530_rmw(priv, MT7530_MFC, UNM_FFP_MASK, UNM_FFP(BIT(port))); + /* Disable flooding by default */ + mt7530_rmw(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK | UNU_FFP_MASK, + BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) | UNU_FFP(BIT(port))); /* Set CPU port number */ if (priv->id == ID_MT7621) @@ -1138,6 +1137,56 @@ mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state) } static int +mt7530_port_pre_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | + BR_BCAST_FLOOD)) + return -EINVAL; + + return 0; +} + +static int +mt7530_port_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + struct mt7530_priv *priv = ds->priv; + + if (flags.mask & BR_LEARNING) + mt7530_rmw(priv, MT7530_PSC_P(port), SA_DIS, + flags.val & BR_LEARNING ? 0 : SA_DIS); + + if (flags.mask & BR_FLOOD) + mt7530_rmw(priv, MT7530_MFC, UNU_FFP(BIT(port)), + flags.val & BR_FLOOD ? UNU_FFP(BIT(port)) : 0); + + if (flags.mask & BR_MCAST_FLOOD) + mt7530_rmw(priv, MT7530_MFC, UNM_FFP(BIT(port)), + flags.val & BR_MCAST_FLOOD ? UNM_FFP(BIT(port)) : 0); + + if (flags.mask & BR_BCAST_FLOOD) + mt7530_rmw(priv, MT7530_MFC, BC_FFP(BIT(port)), + flags.val & BR_BCAST_FLOOD ? BC_FFP(BIT(port)) : 0); + + return 0; +} + +static int +mt7530_port_set_mrouter(struct dsa_switch *ds, int port, bool mrouter, + struct netlink_ext_ack *extack) +{ + struct mt7530_priv *priv = ds->priv; + + mt7530_rmw(priv, MT7530_MFC, UNM_FFP(BIT(port)), + mrouter ? UNM_FFP(BIT(port)) : 0); + + return 0; +} + +static int mt7530_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *bridge) { @@ -1349,6 +1398,59 @@ err: } static int +mt7530_port_mdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct mt7530_priv *priv = ds->priv; + const u8 *addr = mdb->addr; + u16 vid = mdb->vid; + u8 port_mask = 0; + int ret; + + mutex_lock(&priv->reg_mutex); + + mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP); + if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL)) + port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP) + & PORT_MAP_MASK; + + port_mask |= BIT(port); + mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT); + ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); + + mutex_unlock(&priv->reg_mutex); + + return ret; +} + +static int +mt7530_port_mdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct mt7530_priv *priv = ds->priv; + const u8 *addr = mdb->addr; + u16 vid = mdb->vid; + u8 port_mask = 0; + int ret; + + mutex_lock(&priv->reg_mutex); + + mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP); + if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL)) + port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP) + & PORT_MAP_MASK; + + port_mask &= ~BIT(port); + mt7530_fdb_write(priv, vid, port_mask, addr, -1, + port_mask ? STATIC_ENT : STATIC_EMP); + ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); + + mutex_unlock(&priv->reg_mutex); + + return ret; +} + +static int mt7530_vlan_cmd(struct mt7530_priv *priv, enum mt7530_vlan_cmd cmd, u16 vid) { struct mt7530_dummy_poll p; @@ -1820,9 +1922,12 @@ mt7530_setup(struct dsa_switch *ds) ret = mt753x_cpu_port_enable(ds, i); if (ret) return ret; - } else + } else { mt7530_port_disable(ds, i); + /* Disable learning by default on all user ports */ + mt7530_set(priv, MT7530_PSC_P(i), SA_DIS); + } /* Enable consistent egress tag */ mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK, PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); @@ -1984,9 +2089,13 @@ mt7531_setup(struct dsa_switch *ds) ret = mt753x_cpu_port_enable(ds, i); if (ret) return ret; - } else + } else { mt7530_port_disable(ds, i); + /* Disable learning by default on all user ports */ + mt7530_set(priv, MT7530_PSC_P(i), SA_DIS); + } + /* Enable consistent egress tag */ mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK, PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); @@ -2708,11 +2817,16 @@ static const struct dsa_switch_ops mt7530_switch_ops = { .port_change_mtu = mt7530_port_change_mtu, .port_max_mtu = mt7530_port_max_mtu, .port_stp_state_set = mt7530_stp_state_set, + .port_pre_bridge_flags = mt7530_port_pre_bridge_flags, + .port_bridge_flags = mt7530_port_bridge_flags, + .port_set_mrouter = mt7530_port_set_mrouter, .port_bridge_join = mt7530_port_bridge_join, .port_bridge_leave = mt7530_port_bridge_leave, .port_fdb_add = mt7530_port_fdb_add, .port_fdb_del = mt7530_port_fdb_del, .port_fdb_dump = mt7530_port_fdb_dump, + .port_mdb_add = mt7530_port_mdb_add, + .port_mdb_del = mt7530_port_mdb_del, .port_vlan_filtering = mt7530_port_vlan_filtering, .port_vlan_add = mt7530_port_vlan_add, .port_vlan_del = mt7530_port_vlan_del, diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index 64a9bb377e15..ec36ea5dfd57 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -34,6 +34,7 @@ enum mt753x_id { /* Registers to mac forward control for unknown frames */ #define MT7530_MFC 0x10 #define BC_FFP(x) (((x) & 0xff) << 24) +#define BC_FFP_MASK BC_FFP(~0) #define UNM_FFP(x) (((x) & 0xff) << 16) #define UNM_FFP_MASK UNM_FFP(~0) #define UNU_FFP(x) (((x) & 0xff) << 8) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 903d619e08ed..95f07fcd4f85 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -485,12 +485,12 @@ static int mv88e6xxx_serdes_pcs_get_state(struct dsa_switch *ds, int port, struct phylink_link_state *state) { struct mv88e6xxx_chip *chip = ds->priv; - u8 lane; + int lane; int err; mv88e6xxx_reg_lock(chip); lane = mv88e6xxx_serdes_get_lane(chip, port); - if (lane && chip->info->ops->serdes_pcs_get_state) + if (lane >= 0 && chip->info->ops->serdes_pcs_get_state) err = chip->info->ops->serdes_pcs_get_state(chip, port, lane, state); else @@ -506,11 +506,11 @@ static int mv88e6xxx_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, const unsigned long *advertise) { const struct mv88e6xxx_ops *ops = chip->info->ops; - u8 lane; + int lane; if (ops->serdes_pcs_config) { lane = mv88e6xxx_serdes_get_lane(chip, port); - if (lane) + if (lane >= 0) return ops->serdes_pcs_config(chip, port, lane, mode, interface, advertise); } @@ -523,14 +523,14 @@ static void mv88e6xxx_serdes_pcs_an_restart(struct dsa_switch *ds, int port) struct mv88e6xxx_chip *chip = ds->priv; const struct mv88e6xxx_ops *ops; int err = 0; - u8 lane; + int lane; ops = chip->info->ops; if (ops->serdes_pcs_an_restart) { mv88e6xxx_reg_lock(chip); lane = mv88e6xxx_serdes_get_lane(chip, port); - if (lane) + if (lane >= 0) err = ops->serdes_pcs_an_restart(chip, port, lane); mv88e6xxx_reg_unlock(chip); @@ -544,11 +544,11 @@ static int mv88e6xxx_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port, int speed, int duplex) { const struct mv88e6xxx_ops *ops = chip->info->ops; - u8 lane; + int lane; if (!phylink_autoneg_inband(mode) && ops->serdes_pcs_link_up) { lane = mv88e6xxx_serdes_get_lane(chip, port); - if (lane) + if (lane >= 0) return ops->serdes_pcs_link_up(chip, port, lane, speed, duplex); } @@ -635,6 +635,29 @@ static void mv88e6390x_phylink_validate(struct mv88e6xxx_chip *chip, int port, mv88e6390_phylink_validate(chip, port, mask, state); } +static void mv88e6393x_phylink_validate(struct mv88e6xxx_chip *chip, int port, + unsigned long *mask, + struct phylink_link_state *state) +{ + if (port == 0 || port == 9 || port == 10) { + phylink_set(mask, 10000baseT_Full); + phylink_set(mask, 10000baseKR_Full); + phylink_set(mask, 10000baseCR_Full); + phylink_set(mask, 10000baseSR_Full); + phylink_set(mask, 10000baseLR_Full); + phylink_set(mask, 10000baseLRM_Full); + phylink_set(mask, 10000baseER_Full); + phylink_set(mask, 5000baseT_Full); + phylink_set(mask, 2500baseX_Full); + phylink_set(mask, 2500baseT_Full); + } + + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + + mv88e6065_phylink_validate(chip, port, mask, state); +} + static void mv88e6xxx_validate(struct dsa_switch *ds, int port, unsigned long *supported, struct phylink_link_state *state) @@ -1456,6 +1479,13 @@ static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port) struct mv88e6xxx_chip *chip = ds->priv; int err; + if (dsa_to_port(ds, port)->lag_dev) + /* Hardware is incapable of fast-aging a LAG through a + * regular ATU move operation. Until we have something + * more fancy in place this is a no-op. + */ + return; + mv88e6xxx_reg_lock(chip); err = mv88e6xxx_g1_atu_remove(chip, 0, port, false); mv88e6xxx_reg_unlock(chip); @@ -1472,13 +1502,54 @@ static int mv88e6xxx_vtu_setup(struct mv88e6xxx_chip *chip) return mv88e6xxx_g1_vtu_flush(chip); } -static int mv88e6xxx_vtu_getnext(struct mv88e6xxx_chip *chip, - struct mv88e6xxx_vtu_entry *entry) +static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid, + struct mv88e6xxx_vtu_entry *entry) +{ + int err; + + if (!chip->info->ops->vtu_getnext) + return -EOPNOTSUPP; + + entry->vid = vid ? vid - 1 : mv88e6xxx_max_vid(chip); + entry->valid = false; + + err = chip->info->ops->vtu_getnext(chip, entry); + + if (entry->vid != vid) + entry->valid = false; + + return err; +} + +static int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip, + int (*cb)(struct mv88e6xxx_chip *chip, + const struct mv88e6xxx_vtu_entry *entry, + void *priv), + void *priv) { + struct mv88e6xxx_vtu_entry entry = { + .vid = mv88e6xxx_max_vid(chip), + .valid = false, + }; + int err; + if (!chip->info->ops->vtu_getnext) return -EOPNOTSUPP; - return chip->info->ops->vtu_getnext(chip, entry); + do { + err = chip->info->ops->vtu_getnext(chip, &entry); + if (err) + return err; + + if (!entry.valid) + break; + + err = cb(chip, &entry, priv); + if (err) + return err; + } while (entry.vid < mv88e6xxx_max_vid(chip)); + + return 0; } static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip, @@ -1490,9 +1561,18 @@ static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip, return chip->info->ops->vtu_loadpurge(chip, entry); } +static int mv88e6xxx_fid_map_vlan(struct mv88e6xxx_chip *chip, + const struct mv88e6xxx_vtu_entry *entry, + void *_fid_bitmap) +{ + unsigned long *fid_bitmap = _fid_bitmap; + + set_bit(entry->fid, fid_bitmap); + return 0; +} + int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap) { - struct mv88e6xxx_vtu_entry vlan; int i, err; u16 fid; @@ -1508,21 +1588,7 @@ int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap) } /* Set every FID bit used by the VLAN entries */ - vlan.vid = mv88e6xxx_max_vid(chip); - vlan.valid = false; - - do { - err = mv88e6xxx_vtu_getnext(chip, &vlan); - if (err) - return err; - - if (!vlan.valid) - break; - - set_bit(vlan.fid, fid_bitmap); - } while (vlan.vid < mv88e6xxx_max_vid(chip)); - - return 0; + return mv88e6xxx_vtu_walk(chip, mv88e6xxx_fid_map_vlan, fid_bitmap); } static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid) @@ -1559,19 +1625,13 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) return 0; - vlan.vid = vid - 1; - vlan.valid = false; - - err = mv88e6xxx_vtu_getnext(chip, &vlan); + err = mv88e6xxx_vtu_get(chip, vid, &vlan); if (err) return err; if (!vlan.valid) return 0; - if (vlan.vid != vid) - return 0; - for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) { if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i)) continue; @@ -1653,15 +1713,12 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, if (err) return err; } else { - vlan.vid = vid - 1; - vlan.valid = false; - - err = mv88e6xxx_vtu_getnext(chip, &vlan); + err = mv88e6xxx_vtu_get(chip, vid, &vlan); if (err) return err; /* switchdev expects -EOPNOTSUPP to honor software VLANs */ - if (vlan.vid != vid || !vlan.valid) + if (!vlan.valid) return -EOPNOTSUPP; fid = vlan.fid; @@ -1911,8 +1968,10 @@ static int mv88e6xxx_set_rxnfc(struct dsa_switch *ds, int port, static int mv88e6xxx_port_add_broadcast(struct mv88e6xxx_chip *chip, int port, u16 vid) { - const char broadcast[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; u8 state = MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC; + u8 broadcast[ETH_ALEN]; + + eth_broadcast_addr(broadcast); return mv88e6xxx_port_db_load_purge(chip, port, broadcast, vid, state); } @@ -1923,6 +1982,19 @@ static int mv88e6xxx_broadcast_setup(struct mv88e6xxx_chip *chip, u16 vid) int err; for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { + struct dsa_port *dp = dsa_to_port(chip->ds, port); + struct net_device *brport; + + if (dsa_is_unused_port(chip->ds, port)) + continue; + + brport = dsa_port_to_bridge_port(dp); + if (brport && !br_port_flag_is_set(brport, BR_BCAST_FLOOD)) + /* Skip bridged user ports where broadcast + * flooding is disabled. + */ + continue; + err = mv88e6xxx_port_add_broadcast(chip, port, vid); if (err) return err; @@ -1931,6 +2003,53 @@ static int mv88e6xxx_broadcast_setup(struct mv88e6xxx_chip *chip, u16 vid) return 0; } +struct mv88e6xxx_port_broadcast_sync_ctx { + int port; + bool flood; +}; + +static int +mv88e6xxx_port_broadcast_sync_vlan(struct mv88e6xxx_chip *chip, + const struct mv88e6xxx_vtu_entry *vlan, + void *_ctx) +{ + struct mv88e6xxx_port_broadcast_sync_ctx *ctx = _ctx; + u8 broadcast[ETH_ALEN]; + u8 state; + + if (ctx->flood) + state = MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC; + else + state = MV88E6XXX_G1_ATU_DATA_STATE_MC_UNUSED; + + eth_broadcast_addr(broadcast); + + return mv88e6xxx_port_db_load_purge(chip, ctx->port, broadcast, + vlan->vid, state); +} + +static int mv88e6xxx_port_broadcast_sync(struct mv88e6xxx_chip *chip, int port, + bool flood) +{ + struct mv88e6xxx_port_broadcast_sync_ctx ctx = { + .port = port, + .flood = flood, + }; + struct mv88e6xxx_vtu_entry vid0 = { + .vid = 0, + }; + int err; + + /* Update the port's private database... */ + err = mv88e6xxx_port_broadcast_sync_vlan(chip, &vid0, &ctx); + if (err) + return err; + + /* ...and the database for all VLANs. */ + return mv88e6xxx_vtu_walk(chip, mv88e6xxx_port_broadcast_sync_vlan, + &ctx); +} + static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port, u16 vid, u8 member, bool warn) { @@ -1938,14 +2057,11 @@ static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port, struct mv88e6xxx_vtu_entry vlan; int i, err; - vlan.vid = vid - 1; - vlan.valid = false; - - err = mv88e6xxx_vtu_getnext(chip, &vlan); + err = mv88e6xxx_vtu_get(chip, vid, &vlan); if (err) return err; - if (vlan.vid != vid || !vlan.valid) { + if (!vlan.valid) { memset(&vlan, 0, sizeof(vlan)); err = mv88e6xxx_atu_new(chip, &vlan.fid); @@ -2041,17 +2157,14 @@ static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip, if (!vid) return -EOPNOTSUPP; - vlan.vid = vid - 1; - vlan.valid = false; - - err = mv88e6xxx_vtu_getnext(chip, &vlan); + err = mv88e6xxx_vtu_get(chip, vid, &vlan); if (err) return err; /* If the VLAN doesn't exist in hardware or the port isn't a member, * tell switchdev that this VLAN is likely handled in software. */ - if (vlan.vid != vid || !vlan.valid || + if (!vlan.valid || vlan.member[port] == MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER) return -EOPNOTSUPP; @@ -2168,10 +2281,30 @@ static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip, return err; } +struct mv88e6xxx_port_db_dump_vlan_ctx { + int port; + dsa_fdb_dump_cb_t *cb; + void *data; +}; + +static int mv88e6xxx_port_db_dump_vlan(struct mv88e6xxx_chip *chip, + const struct mv88e6xxx_vtu_entry *entry, + void *_data) +{ + struct mv88e6xxx_port_db_dump_vlan_ctx *ctx = _data; + + return mv88e6xxx_port_db_dump_fid(chip, entry->fid, entry->vid, + ctx->port, ctx->cb, ctx->data); +} + static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, dsa_fdb_dump_cb_t *cb, void *data) { - struct mv88e6xxx_vtu_entry vlan; + struct mv88e6xxx_port_db_dump_vlan_ctx ctx = { + .port = port, + .cb = cb, + .data = data, + }; u16 fid; int err; @@ -2184,25 +2317,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, if (err) return err; - /* Dump VLANs' Filtering Information Databases */ - vlan.vid = mv88e6xxx_max_vid(chip); - vlan.valid = false; - - do { - err = mv88e6xxx_vtu_getnext(chip, &vlan); - if (err) - return err; - - if (!vlan.valid) - break; - - err = mv88e6xxx_port_db_dump_fid(chip, vlan.fid, vlan.vid, port, - cb, data); - if (err) - return err; - } while (vlan.vid < mv88e6xxx_max_vid(chip)); - - return err; + return mv88e6xxx_vtu_walk(chip, mv88e6xxx_port_db_dump_vlan, &ctx); } static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, @@ -2434,19 +2549,15 @@ static int mv88e6xxx_setup_message_port(struct mv88e6xxx_chip *chip, int port) static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port) { - struct dsa_switch *ds = chip->ds; - bool flood; int err; - /* Upstream ports flood frames with unknown unicast or multicast DA */ - flood = dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port); if (chip->info->ops->port_set_ucast_flood) { - err = chip->info->ops->port_set_ucast_flood(chip, port, flood); + err = chip->info->ops->port_set_ucast_flood(chip, port, true); if (err) return err; } if (chip->info->ops->port_set_mcast_flood) { - err = chip->info->ops->port_set_mcast_flood(chip, port, flood); + err = chip->info->ops->port_set_mcast_flood(chip, port, true); if (err) return err; } @@ -2460,11 +2571,11 @@ static irqreturn_t mv88e6xxx_serdes_irq_thread_fn(int irq, void *dev_id) struct mv88e6xxx_chip *chip = mvp->chip; irqreturn_t ret = IRQ_NONE; int port = mvp->port; - u8 lane; + int lane; mv88e6xxx_reg_lock(chip); lane = mv88e6xxx_serdes_get_lane(chip, port); - if (lane) + if (lane >= 0) ret = mv88e6xxx_serdes_irq_status(chip, port, lane); mv88e6xxx_reg_unlock(chip); @@ -2472,7 +2583,7 @@ static irqreturn_t mv88e6xxx_serdes_irq_thread_fn(int irq, void *dev_id) } static int mv88e6xxx_serdes_irq_request(struct mv88e6xxx_chip *chip, int port, - u8 lane) + int lane) { struct mv88e6xxx_port *dev_id = &chip->ports[port]; unsigned int irq; @@ -2501,7 +2612,7 @@ static int mv88e6xxx_serdes_irq_request(struct mv88e6xxx_chip *chip, int port, } static int mv88e6xxx_serdes_irq_free(struct mv88e6xxx_chip *chip, int port, - u8 lane) + int lane) { struct mv88e6xxx_port *dev_id = &chip->ports[port]; unsigned int irq = dev_id->serdes_irq; @@ -2526,11 +2637,11 @@ static int mv88e6xxx_serdes_irq_free(struct mv88e6xxx_chip *chip, int port, static int mv88e6xxx_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) { - u8 lane; + int lane; int err; lane = mv88e6xxx_serdes_get_lane(chip, port); - if (!lane) + if (lane < 0) return 0; if (on) { @@ -2550,6 +2661,27 @@ static int mv88e6xxx_serdes_power(struct mv88e6xxx_chip *chip, int port, return err; } +static int mv88e6xxx_set_egress_port(struct mv88e6xxx_chip *chip, + enum mv88e6xxx_egress_direction direction, + int port) +{ + int err; + + if (!chip->info->ops->set_egress_port) + return -EOPNOTSUPP; + + err = chip->info->ops->set_egress_port(chip, direction, port); + if (err) + return err; + + if (direction == MV88E6XXX_EGRESS_DIR_INGRESS) + chip->ingress_dest_port = port; + else + chip->egress_dest_port = port; + + return 0; +} + static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port) { struct dsa_switch *ds = chip->ds; @@ -2572,19 +2704,17 @@ static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port) return err; } - if (chip->info->ops->set_egress_port) { - err = chip->info->ops->set_egress_port(chip, + err = mv88e6xxx_set_egress_port(chip, MV88E6XXX_EGRESS_DIR_INGRESS, upstream_port); - if (err) - return err; + if (err && err != -EOPNOTSUPP) + return err; - err = chip->info->ops->set_egress_port(chip, + err = mv88e6xxx_set_egress_port(chip, MV88E6XXX_EGRESS_DIR_EGRESS, upstream_port); - if (err) - return err; - } + if (err && err != -EOPNOTSUPP) + return err; } return 0; @@ -2670,15 +2800,20 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) return err; } - /* Port Association Vector: when learning source addresses - * of packets, add the address to the address database using - * a port bitmap that has only the bit for this port set and - * the other bits clear. + /* Port Association Vector: disable automatic address learning + * on all user ports since they start out in standalone + * mode. When joining a bridge, learning will be configured to + * match the bridge port settings. Enable learning on all + * DSA/CPU ports. NOTE: FROM_CPU frames always bypass the + * learning process. + * + * Disable HoldAt1, IntOnAgeOut, LockedPort, IgnoreWrongData, + * and RefreshLocked. I.e. setup standard automatic learning. */ - reg = 1 << port; - /* Disable learning for CPU port */ - if (dsa_is_cpu_port(ds, port)) + if (dsa_is_user_port(ds, port)) reg = 0; + else + reg = 1 << port; err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR, reg); @@ -4570,6 +4705,70 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .phylink_validate = mv88e6390x_phylink_validate, }; +static const struct mv88e6xxx_ops mv88e6393x_ops = { + /* MV88E6XXX_FAMILY_6393 */ + .setup_errata = mv88e6393x_serdes_setup_errata, + .irl_init_all = mv88e6390_g2_irl_init_all, + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, + .set_switch_mac = mv88e6xxx_g2_set_switch_mac, + .phy_read = mv88e6xxx_g2_smi_phy_read, + .phy_write = mv88e6xxx_g2_smi_phy_write, + .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, + .port_set_speed_duplex = mv88e6393x_port_set_speed_duplex, + .port_max_speed_mode = mv88e6393x_port_max_speed_mode, + .port_tag_remap = mv88e6390_port_tag_remap, + .port_set_policy = mv88e6393x_port_set_policy, + .port_set_frame_mode = mv88e6351_port_set_frame_mode, + .port_set_ucast_flood = mv88e6352_port_set_ucast_flood, + .port_set_mcast_flood = mv88e6352_port_set_mcast_flood, + .port_set_ether_type = mv88e6393x_port_set_ether_type, + .port_set_jumbo_size = mv88e6165_port_set_jumbo_size, + .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, + .port_pause_limit = mv88e6390_port_pause_limit, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_get_cmode = mv88e6352_port_get_cmode, + .port_set_cmode = mv88e6393x_port_set_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, + .port_set_upstream_port = mv88e6393x_port_set_upstream_port, + .stats_snapshot = mv88e6390_g1_stats_snapshot, + .stats_set_histogram = mv88e6390_g1_stats_set_histogram, + .stats_get_sset_count = mv88e6320_stats_get_sset_count, + .stats_get_strings = mv88e6320_stats_get_strings, + .stats_get_stats = mv88e6390_stats_get_stats, + /* .set_cpu_port is missing because this family does not support a global + * CPU port, only per port CPU port which is set via + * .port_set_upstream_port method. + */ + .set_egress_port = mv88e6393x_set_egress_port, + .watchdog_ops = &mv88e6390_watchdog_ops, + .mgmt_rsvd2cpu = mv88e6393x_port_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, + .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, + .vtu_getnext = mv88e6390_g1_vtu_getnext, + .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, + .serdes_power = mv88e6393x_serdes_power, + .serdes_get_lane = mv88e6393x_serdes_get_lane, + .serdes_pcs_get_state = mv88e6393x_serdes_pcs_get_state, + .serdes_pcs_config = mv88e6390_serdes_pcs_config, + .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart, + .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up, + .serdes_irq_mapping = mv88e6390_serdes_irq_mapping, + .serdes_irq_enable = mv88e6393x_serdes_irq_enable, + .serdes_irq_status = mv88e6393x_serdes_irq_status, + /* TODO: serdes stats */ + .gpio_ops = &mv88e6352_gpio_ops, + .avb_ops = &mv88e6390_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, + .phylink_validate = mv88e6393x_phylink_validate, +}; + static const struct mv88e6xxx_info mv88e6xxx_table[] = { [MV88E6085] = { .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6085, @@ -4941,6 +5140,52 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .ops = &mv88e6191_ops, }, + [MV88E6191X] = { + .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6191X, + .family = MV88E6XXX_FAMILY_6393, + .name = "Marvell 88E6191X", + .num_databases = 4096, + .num_ports = 11, /* 10 + Z80 */ + .num_internal_phys = 9, + .max_vid = 8191, + .port_base_addr = 0x0, + .phy_base_addr = 0x0, + .global1_addr = 0x1b, + .global2_addr = 0x1c, + .age_time_coeff = 3750, + .g1_irqs = 10, + .g2_irqs = 14, + .atu_move_port_mask = 0x1f, + .pvt = true, + .multi_chip = true, + .tag_protocol = DSA_TAG_PROTO_DSA, + .ptp_support = true, + .ops = &mv88e6393x_ops, + }, + + [MV88E6193X] = { + .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6193X, + .family = MV88E6XXX_FAMILY_6393, + .name = "Marvell 88E6193X", + .num_databases = 4096, + .num_ports = 11, /* 10 + Z80 */ + .num_internal_phys = 9, + .max_vid = 8191, + .port_base_addr = 0x0, + .phy_base_addr = 0x0, + .global1_addr = 0x1b, + .global2_addr = 0x1c, + .age_time_coeff = 3750, + .g1_irqs = 10, + .g2_irqs = 14, + .atu_move_port_mask = 0x1f, + .pvt = true, + .multi_chip = true, + .tag_protocol = DSA_TAG_PROTO_DSA, + .ptp_support = true, + .ops = &mv88e6393x_ops, + }, + [MV88E6220] = { .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6220, .family = MV88E6XXX_FAMILY_6250, @@ -5231,6 +5476,29 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .ptp_support = true, .ops = &mv88e6390x_ops, }, + + [MV88E6393X] = { + .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6393X, + .family = MV88E6XXX_FAMILY_6393, + .name = "Marvell 88E6393X", + .num_databases = 4096, + .num_ports = 11, /* 10 + Z80 */ + .num_internal_phys = 9, + .max_vid = 8191, + .port_base_addr = 0x0, + .phy_base_addr = 0x0, + .global1_addr = 0x1b, + .global2_addr = 0x1c, + .age_time_coeff = 3750, + .g1_irqs = 10, + .g2_irqs = 14, + .atu_move_port_mask = 0x1f, + .pvt = true, + .multi_chip = true, + .tag_protocol = DSA_TAG_PROTO_DSA, + .ptp_support = true, + .ops = &mv88e6393x_ops, + }, }; static const struct mv88e6xxx_info *mv88e6xxx_lookup_info(unsigned int prod_num) @@ -5338,9 +5606,6 @@ static int mv88e6xxx_port_mirror_add(struct dsa_switch *ds, int port, int i; int err; - if (!chip->info->ops->set_egress_port) - return -EOPNOTSUPP; - mutex_lock(&chip->reg_lock); if ((ingress ? chip->ingress_dest_port : chip->egress_dest_port) != mirror->to_local_port) { @@ -5355,9 +5620,8 @@ static int mv88e6xxx_port_mirror_add(struct dsa_switch *ds, int port, goto out; } - err = chip->info->ops->set_egress_port(chip, - direction, - mirror->to_local_port); + err = mv88e6xxx_set_egress_port(chip, direction, + mirror->to_local_port); if (err) goto out; } @@ -5390,10 +5654,8 @@ static void mv88e6xxx_port_mirror_del(struct dsa_switch *ds, int port, /* Reset egress port when no other mirror is active */ if (!other_mirrors) { - if (chip->info->ops->set_egress_port(chip, - direction, - dsa_upstream_port(ds, - port))) + if (mv88e6xxx_set_egress_port(chip, direction, + dsa_upstream_port(ds, port))) dev_err(ds->dev, "failed to set egress port\n"); } @@ -5407,7 +5669,8 @@ static int mv88e6xxx_port_pre_bridge_flags(struct dsa_switch *ds, int port, struct mv88e6xxx_chip *chip = ds->priv; const struct mv88e6xxx_ops *ops; - if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD)) + if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | + BR_BCAST_FLOOD)) return -EINVAL; ops = chip->info->ops; @@ -5426,10 +5689,23 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port, struct netlink_ext_ack *extack) { struct mv88e6xxx_chip *chip = ds->priv; + bool do_fast_age = false; int err = -EOPNOTSUPP; mv88e6xxx_reg_lock(chip); + if (flags.mask & BR_LEARNING) { + bool learning = !!(flags.val & BR_LEARNING); + u16 pav = learning ? (1 << port) : 0; + + err = mv88e6xxx_port_set_assoc_vector(chip, port, pav); + if (err) + goto out; + + if (!learning) + do_fast_age = true; + } + if (flags.mask & BR_FLOOD) { bool unicast = !!(flags.val & BR_FLOOD); @@ -5448,9 +5724,20 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port, goto out; } + if (flags.mask & BR_BCAST_FLOOD) { + bool broadcast = !!(flags.val & BR_BCAST_FLOOD); + + err = mv88e6xxx_port_broadcast_sync(chip, port, broadcast); + if (err) + goto out; + } + out: mv88e6xxx_reg_unlock(chip); + if (do_fast_age) + mv88e6xxx_port_fast_age(ds, port); + return err; } diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index a57c8886f3ac..bce6e0dc8535 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -63,6 +63,8 @@ enum mv88e6xxx_model { MV88E6190, MV88E6190X, MV88E6191, + MV88E6191X, + MV88E6193X, MV88E6220, MV88E6240, MV88E6250, @@ -75,6 +77,7 @@ enum mv88e6xxx_model { MV88E6352, MV88E6390, MV88E6390X, + MV88E6393X, }; enum mv88e6xxx_family { @@ -90,6 +93,7 @@ enum mv88e6xxx_family { MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */ MV88E6XXX_FAMILY_6352, /* 6172 6176 6240 6352 */ MV88E6XXX_FAMILY_6390, /* 6190 6190X 6191 6290 6390 6390X */ + MV88E6XXX_FAMILY_6393, /* 6191X 6193X 6393X */ }; struct mv88e6xxx_ops; @@ -513,30 +517,30 @@ struct mv88e6xxx_ops { int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip); /* Power on/off a SERDES interface */ - int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, u8 lane, + int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, int lane, bool up); /* SERDES lane mapping */ - u8 (*serdes_get_lane)(struct mv88e6xxx_chip *chip, int port); + int (*serdes_get_lane)(struct mv88e6xxx_chip *chip, int port); int (*serdes_pcs_get_state)(struct mv88e6xxx_chip *chip, int port, - u8 lane, struct phylink_link_state *state); + int lane, struct phylink_link_state *state); int (*serdes_pcs_config)(struct mv88e6xxx_chip *chip, int port, - u8 lane, unsigned int mode, + int lane, unsigned int mode, phy_interface_t interface, const unsigned long *advertise); int (*serdes_pcs_an_restart)(struct mv88e6xxx_chip *chip, int port, - u8 lane); + int lane); int (*serdes_pcs_link_up)(struct mv88e6xxx_chip *chip, int port, - u8 lane, int speed, int duplex); + int lane, int speed, int duplex); /* SERDES interrupt handling */ unsigned int (*serdes_irq_mapping)(struct mv88e6xxx_chip *chip, int port); - int (*serdes_irq_enable)(struct mv88e6xxx_chip *chip, int port, u8 lane, + int (*serdes_irq_enable)(struct mv88e6xxx_chip *chip, int port, int lane, bool enable); irqreturn_t (*serdes_irq_status)(struct mv88e6xxx_chip *chip, int port, - u8 lane); + int lane); /* Statistics from the SERDES interface */ int (*serdes_get_sset_count)(struct mv88e6xxx_chip *chip, int port); diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index 33d443a37efc..815b0f681d69 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -315,7 +315,6 @@ int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, enum mv88e6xxx_egress_direction direction, int port) { - int *dest_port_chip; u16 reg; int err; @@ -325,13 +324,11 @@ int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, switch (direction) { case MV88E6XXX_EGRESS_DIR_INGRESS: - dest_port_chip = &chip->ingress_dest_port; reg &= ~MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK; reg |= port << __bf_shf(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK); break; case MV88E6XXX_EGRESS_DIR_EGRESS: - dest_port_chip = &chip->egress_dest_port; reg &= ~MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK; reg |= port << __bf_shf(MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK); @@ -340,11 +337,7 @@ int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, return -EINVAL; } - err = mv88e6xxx_g1_write(chip, MV88E6185_G1_MONITOR_CTL, reg); - if (!err) - *dest_port_chip = port; - - return err; + return mv88e6xxx_g1_write(chip, MV88E6185_G1_MONITOR_CTL, reg); } /* Older generations also call this the ARP destination. It has been @@ -380,28 +373,20 @@ int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip, enum mv88e6xxx_egress_direction direction, int port) { - int *dest_port_chip; u16 ptr; - int err; switch (direction) { case MV88E6XXX_EGRESS_DIR_INGRESS: - dest_port_chip = &chip->ingress_dest_port; ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST; break; case MV88E6XXX_EGRESS_DIR_EGRESS: - dest_port_chip = &chip->egress_dest_port; ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST; break; default: return -EINVAL; } - err = mv88e6390_g1_monitor_write(chip, ptr, port); - if (!err) - *dest_port_chip = port; - - return err; + return mv88e6390_g1_monitor_write(chip, ptr, port); } int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port) diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index 7c396964d0b2..4f3dbb015f77 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -22,6 +22,7 @@ #define MV88E6185_G1_STS_PPU_STATE_DISABLED 0x8000 #define MV88E6185_G1_STS_PPU_STATE_POLLING 0xc000 #define MV88E6XXX_G1_STS_INIT_READY 0x0800 +#define MV88E6393X_G1_STS_IRQ_DEVICE_2 9 #define MV88E6XXX_G1_STS_IRQ_AVB 8 #define MV88E6XXX_G1_STS_IRQ_DEVICE 7 #define MV88E6XXX_G1_STS_IRQ_STATS 6 @@ -59,6 +60,7 @@ #define MV88E6185_G1_CTL1_SCHED_PRIO 0x0800 #define MV88E6185_G1_CTL1_MAX_FRAME_1632 0x0400 #define MV88E6185_G1_CTL1_RELOAD_EEPROM 0x0200 +#define MV88E6393X_G1_CTL1_DEVICE2_EN 0x0200 #define MV88E6XXX_G1_CTL1_DEVICE_EN 0x0080 #define MV88E6XXX_G1_CTL1_STATS_DONE_EN 0x0040 #define MV88E6XXX_G1_CTL1_VTU_PROBLEM_EN 0x0020 diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 4127f82275ad..c78769cdbb59 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -38,9 +38,15 @@ /* Offset 0x02: MGMT Enable Register 2x */ #define MV88E6XXX_G2_MGMT_EN_2X 0x02 +/* Offset 0x02: MAC LINK change IRQ Register for MV88E6393X */ +#define MV88E6393X_G2_MACLINK_INT_SRC 0x02 + /* Offset 0x03: MGMT Enable Register 0x */ #define MV88E6XXX_G2_MGMT_EN_0X 0x03 +/* Offset 0x03: MAC LINK change IRQ Mask Register for MV88E6393X */ +#define MV88E6393X_G2_MACLINK_INT_MASK 0x03 + /* Offset 0x04: Flow Control Delay Register */ #define MV88E6XXX_G2_FLOW_CTL 0x04 @@ -52,6 +58,8 @@ #define MV88E6XXX_G2_SWITCH_MGMT_FORCE_FLOW_CTL_PRI 0x0080 #define MV88E6XXX_G2_SWITCH_MGMT_RSVD2CPU 0x0008 +#define MV88E6393X_G2_EGRESS_MONITOR_DEST 0x05 + /* Offset 0x06: Device Mapping Table Register */ #define MV88E6XXX_G2_DEVICE_MAPPING 0x06 #define MV88E6XXX_G2_DEVICE_MAPPING_UPDATE 0x8000 diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c index 7c2c67405322..eda710062933 100644 --- a/drivers/net/dsa/mv88e6xxx/global2_scratch.c +++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c @@ -42,7 +42,7 @@ static int mv88e6xxx_g2_scratch_write(struct mv88e6xxx_chip *chip, int reg, } /** - * mv88e6xxx_g2_scratch_gpio_get_bit - get a bit + * mv88e6xxx_g2_scratch_get_bit - get a bit * @chip: chip private data * @base_reg: base of scratch bits * @offset: index of bit within the register @@ -67,7 +67,7 @@ static int mv88e6xxx_g2_scratch_get_bit(struct mv88e6xxx_chip *chip, } /** - * mv88e6xxx_g2_scratch_gpio_set_bit - set (or clear) a bit + * mv88e6xxx_g2_scratch_set_bit - set (or clear) a bit * @chip: chip private data * @base_reg: base of scratch bits * @offset: index of bit within the register @@ -240,7 +240,7 @@ const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops = { }; /** - * mv88e6xxx_g2_gpio_set_smi - set gpio muxing for external smi + * mv88e6xxx_g2_scratch_gpio_set_smi - set gpio muxing for external smi * @chip: chip private data * @external: set mux for external smi, or free for gpio usage * diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index 4561f289ab76..f77e2ee64a60 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -14,6 +14,7 @@ #include <linux/phylink.h> #include "chip.h" +#include "global2.h" #include "port.h" #include "serdes.h" @@ -25,6 +26,14 @@ int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg, return mv88e6xxx_read(chip, addr, reg, val); } +int mv88e6xxx_port_wait_bit(struct mv88e6xxx_chip *chip, int port, int reg, + int bit, int val) +{ + int addr = chip->info->port_base_addr + port; + + return mv88e6xxx_wait_bit(chip, addr, reg, bit, val); +} + int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg, u16 val) { @@ -426,11 +435,111 @@ phy_interface_t mv88e6390x_port_max_speed_mode(int port) return PHY_INTERFACE_MODE_NA; } +/* Support 10, 100, 200, 1000, 2500, 5000, 10000 Mbps (e.g. 88E6393X) + * Function mv88e6xxx_port_set_speed_duplex() can't be used as the register + * values for speeds 2500 & 5000 conflict. + */ +int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port, + int speed, int duplex) +{ + u16 reg, ctrl; + int err; + + if (speed == SPEED_MAX) + speed = (port > 0 && port < 9) ? 1000 : 10000; + + if (speed == 200 && port != 0) + return -EOPNOTSUPP; + + if (speed >= 2500 && port > 0 && port < 9) + return -EOPNOTSUPP; + + switch (speed) { + case 10: + ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_10; + break; + case 100: + ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_100; + break; + case 200: + ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_100 | + MV88E6390_PORT_MAC_CTL_ALTSPEED; + break; + case 1000: + ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_1000; + break; + case 2500: + ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_1000 | + MV88E6390_PORT_MAC_CTL_ALTSPEED; + break; + case 5000: + ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000 | + MV88E6390_PORT_MAC_CTL_ALTSPEED; + break; + case 10000: + case SPEED_UNFORCED: + ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_UNFORCED; + break; + default: + return -EOPNOTSUPP; + } + + switch (duplex) { + case DUPLEX_HALF: + ctrl |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX; + break; + case DUPLEX_FULL: + ctrl |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX | + MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL; + break; + case DUPLEX_UNFORCED: + /* normal duplex detection */ + break; + default: + return -EOPNOTSUPP; + } + + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, ®); + if (err) + return err; + + reg &= ~(MV88E6XXX_PORT_MAC_CTL_SPEED_MASK | + MV88E6390_PORT_MAC_CTL_ALTSPEED | + MV88E6390_PORT_MAC_CTL_FORCE_SPEED); + + if (speed != SPEED_UNFORCED) + reg |= MV88E6390_PORT_MAC_CTL_FORCE_SPEED; + + reg |= ctrl; + + err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg); + if (err) + return err; + + if (speed) + dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed); + else + dev_dbg(chip->dev, "p%d: Speed unforced\n", port); + dev_dbg(chip->dev, "p%d: %s %s duplex\n", port, + reg & MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX ? "Force" : "Unforce", + reg & MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL ? "full" : "half"); + + return 0; +} + +phy_interface_t mv88e6393x_port_max_speed_mode(int port) +{ + if (port == 0 || port == 9 || port == 10) + return PHY_INTERFACE_MODE_10GBASER; + + return PHY_INTERFACE_MODE_NA; +} + static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode, bool force) { - u8 lane; u16 cmode; + int lane; u16 reg; int err; @@ -450,6 +559,9 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, case PHY_INTERFACE_MODE_2500BASEX: cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX; break; + case PHY_INTERFACE_MODE_5GBASER: + cmode = MV88E6393X_PORT_STS_CMODE_5GBASER; + break; case PHY_INTERFACE_MODE_XGMII: case PHY_INTERFACE_MODE_XAUI: cmode = MV88E6XXX_PORT_STS_CMODE_XAUI; @@ -457,6 +569,9 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, case PHY_INTERFACE_MODE_RXAUI: cmode = MV88E6XXX_PORT_STS_CMODE_RXAUI; break; + case PHY_INTERFACE_MODE_10GBASER: + cmode = MV88E6393X_PORT_STS_CMODE_10GBASER; + break; default: cmode = 0; } @@ -466,7 +581,7 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, return 0; lane = mv88e6xxx_serdes_get_lane(chip, port); - if (lane) { + if (lane >= 0) { if (chip->ports[port].serdes_irq) { err = mv88e6xxx_serdes_irq_disable(chip, port, lane); if (err) @@ -495,8 +610,8 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, chip->ports[port].cmode = cmode; lane = mv88e6xxx_serdes_get_lane(chip, port); - if (!lane) - return -ENODEV; + if (lane < 0) + return lane; err = mv88e6xxx_serdes_power_up(chip, port, lane); if (err) @@ -541,6 +656,29 @@ int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port, return mv88e6xxx_port_set_cmode(chip, port, mode, false); } +int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, + phy_interface_t mode) +{ + int err; + u16 reg; + + if (port != 0 && port != 9 && port != 10) + return -EOPNOTSUPP; + + /* mv88e6393x errata 4.5: EEE should be disabled on SERDES ports */ + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, ®); + if (err) + return err; + + reg &= ~MV88E6XXX_PORT_MAC_CTL_EEE; + reg |= MV88E6XXX_PORT_MAC_CTL_FORCE_EEE; + err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg); + if (err) + return err; + + return mv88e6xxx_port_set_cmode(chip, port, mode, false); +} + static int mv88e6341_port_set_cmode_writable(struct mv88e6xxx_chip *chip, int port) { @@ -1171,6 +1309,27 @@ int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port) 0x0001); } +/* Offset 0x0B: Port Association Vector */ + +int mv88e6xxx_port_set_assoc_vector(struct mv88e6xxx_chip *chip, int port, + u16 pav) +{ + u16 reg, mask; + int err; + + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR, + ®); + if (err) + return err; + + mask = mv88e6xxx_port_mask(chip); + reg &= ~mask; + reg |= pav & mask; + + return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR, + reg); +} + /* Offset 0x0C: Port ATU Control */ int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port) @@ -1185,6 +1344,156 @@ int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port) return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_PRI_OVERRIDE, 0); } +/* Offset 0x0E: Policy & MGMT Control Register for FAMILY 6191X 6193X 6393X */ + +static int mv88e6393x_port_policy_read(struct mv88e6xxx_chip *chip, int port, + u16 pointer, u8 *data) +{ + u16 reg; + int err; + + err = mv88e6xxx_port_write(chip, port, MV88E6393X_PORT_POLICY_MGMT_CTL, + pointer); + if (err) + return err; + + err = mv88e6xxx_port_read(chip, port, MV88E6393X_PORT_POLICY_MGMT_CTL, + ®); + if (err) + return err; + + *data = reg; + + return 0; +} + +static int mv88e6393x_port_policy_write(struct mv88e6xxx_chip *chip, int port, + u16 pointer, u8 data) +{ + u16 reg; + + reg = MV88E6393X_PORT_POLICY_MGMT_CTL_UPDATE | pointer | data; + + return mv88e6xxx_port_write(chip, port, MV88E6393X_PORT_POLICY_MGMT_CTL, + reg); +} + +static int mv88e6393x_port_policy_write_all(struct mv88e6xxx_chip *chip, + u16 pointer, u8 data) +{ + int err, port; + + for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { + if (dsa_is_unused_port(chip->ds, port)) + continue; + + err = mv88e6393x_port_policy_write(chip, port, pointer, data); + if (err) + return err; + } + + return 0; +} + +int mv88e6393x_set_egress_port(struct mv88e6xxx_chip *chip, + enum mv88e6xxx_egress_direction direction, + int port) +{ + u16 ptr; + int err; + + switch (direction) { + case MV88E6XXX_EGRESS_DIR_INGRESS: + ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_INGRESS_DEST; + err = mv88e6393x_port_policy_write_all(chip, ptr, port); + if (err) + return err; + break; + case MV88E6XXX_EGRESS_DIR_EGRESS: + ptr = MV88E6393X_G2_EGRESS_MONITOR_DEST; + err = mv88e6xxx_g2_write(chip, ptr, port); + if (err) + return err; + break; + } + + return 0; +} + +int mv88e6393x_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port, + int upstream_port) +{ + u16 ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_CPU_DEST; + u8 data = MV88E6393X_PORT_POLICY_MGMT_CTL_CPU_DEST_MGMTPRI | + upstream_port; + + return mv88e6393x_port_policy_write(chip, port, ptr, data); +} + +int mv88e6393x_port_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) +{ + u16 ptr; + int err; + + /* Consider the frames with reserved multicast destination + * addresses matching 01:80:c2:00:00:00 and + * 01:80:c2:00:00:02 as MGMT. + */ + ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000000XLO; + err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff); + if (err) + return err; + + ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000000XHI; + err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff); + if (err) + return err; + + ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000002XLO; + err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff); + if (err) + return err; + + ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000002XHI; + err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff); + if (err) + return err; + + return 0; +} + +/* Offset 0x10 & 0x11: EPC */ + +static int mv88e6393x_port_epc_wait_ready(struct mv88e6xxx_chip *chip, int port) +{ + int bit = __bf_shf(MV88E6393X_PORT_EPC_CMD_BUSY); + + return mv88e6xxx_port_wait_bit(chip, port, MV88E6393X_PORT_EPC_CMD, bit, 0); +} + +/* Port Ether type for 6393X family */ + +int mv88e6393x_port_set_ether_type(struct mv88e6xxx_chip *chip, int port, + u16 etype) +{ + u16 val; + int err; + + err = mv88e6393x_port_epc_wait_ready(chip, port); + if (err) + return err; + + err = mv88e6xxx_port_write(chip, port, MV88E6393X_PORT_EPC_DATA, etype); + if (err) + return err; + + val = MV88E6393X_PORT_EPC_CMD_BUSY | + MV88E6393X_PORT_EPC_CMD_WRITE | + MV88E6393X_PORT_EPC_INDEX_PORT_ETYPE; + + return mv88e6xxx_port_write(chip, port, MV88E6393X_PORT_EPC_CMD, val); +} + /* Offset 0x0f: Port Ether type */ int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port, @@ -1259,46 +1568,43 @@ int mv88e6390_port_tag_remap(struct mv88e6xxx_chip *chip, int port) /* Offset 0x0E: Policy Control Register */ -int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port, - enum mv88e6xxx_policy_mapping mapping, - enum mv88e6xxx_policy_action action) +static int +mv88e6xxx_port_policy_mapping_get_pos(enum mv88e6xxx_policy_mapping mapping, + enum mv88e6xxx_policy_action action, + u16 *mask, u16 *val, int *shift) { - u16 reg, mask, val; - int shift; - int err; - switch (mapping) { case MV88E6XXX_POLICY_MAPPING_DA: - shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_DA_MASK); - mask = MV88E6XXX_PORT_POLICY_CTL_DA_MASK; + *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_DA_MASK); + *mask = MV88E6XXX_PORT_POLICY_CTL_DA_MASK; break; case MV88E6XXX_POLICY_MAPPING_SA: - shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_SA_MASK); - mask = MV88E6XXX_PORT_POLICY_CTL_SA_MASK; + *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_SA_MASK); + *mask = MV88E6XXX_PORT_POLICY_CTL_SA_MASK; break; case MV88E6XXX_POLICY_MAPPING_VTU: - shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_VTU_MASK); - mask = MV88E6XXX_PORT_POLICY_CTL_VTU_MASK; + *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_VTU_MASK); + *mask = MV88E6XXX_PORT_POLICY_CTL_VTU_MASK; break; case MV88E6XXX_POLICY_MAPPING_ETYPE: - shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_ETYPE_MASK); - mask = MV88E6XXX_PORT_POLICY_CTL_ETYPE_MASK; + *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_ETYPE_MASK); + *mask = MV88E6XXX_PORT_POLICY_CTL_ETYPE_MASK; break; case MV88E6XXX_POLICY_MAPPING_PPPOE: - shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_PPPOE_MASK); - mask = MV88E6XXX_PORT_POLICY_CTL_PPPOE_MASK; + *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_PPPOE_MASK); + *mask = MV88E6XXX_PORT_POLICY_CTL_PPPOE_MASK; break; case MV88E6XXX_POLICY_MAPPING_VBAS: - shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_VBAS_MASK); - mask = MV88E6XXX_PORT_POLICY_CTL_VBAS_MASK; + *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_VBAS_MASK); + *mask = MV88E6XXX_PORT_POLICY_CTL_VBAS_MASK; break; case MV88E6XXX_POLICY_MAPPING_OPT82: - shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_OPT82_MASK); - mask = MV88E6XXX_PORT_POLICY_CTL_OPT82_MASK; + *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_OPT82_MASK); + *mask = MV88E6XXX_PORT_POLICY_CTL_OPT82_MASK; break; case MV88E6XXX_POLICY_MAPPING_UDP: - shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_UDP_MASK); - mask = MV88E6XXX_PORT_POLICY_CTL_UDP_MASK; + *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_UDP_MASK); + *mask = MV88E6XXX_PORT_POLICY_CTL_UDP_MASK; break; default: return -EOPNOTSUPP; @@ -1306,21 +1612,37 @@ int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port, switch (action) { case MV88E6XXX_POLICY_ACTION_NORMAL: - val = MV88E6XXX_PORT_POLICY_CTL_NORMAL; + *val = MV88E6XXX_PORT_POLICY_CTL_NORMAL; break; case MV88E6XXX_POLICY_ACTION_MIRROR: - val = MV88E6XXX_PORT_POLICY_CTL_MIRROR; + *val = MV88E6XXX_PORT_POLICY_CTL_MIRROR; break; case MV88E6XXX_POLICY_ACTION_TRAP: - val = MV88E6XXX_PORT_POLICY_CTL_TRAP; + *val = MV88E6XXX_PORT_POLICY_CTL_TRAP; break; case MV88E6XXX_POLICY_ACTION_DISCARD: - val = MV88E6XXX_PORT_POLICY_CTL_DISCARD; + *val = MV88E6XXX_PORT_POLICY_CTL_DISCARD; break; default: return -EOPNOTSUPP; } + return 0; +} + +int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port, + enum mv88e6xxx_policy_mapping mapping, + enum mv88e6xxx_policy_action action) +{ + u16 reg, mask, val; + int shift; + int err; + + err = mv88e6xxx_port_policy_mapping_get_pos(mapping, action, &mask, + &val, &shift); + if (err) + return err; + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_POLICY_CTL, ®); if (err) return err; @@ -1330,3 +1652,37 @@ int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port, return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_POLICY_CTL, reg); } + +int mv88e6393x_port_set_policy(struct mv88e6xxx_chip *chip, int port, + enum mv88e6xxx_policy_mapping mapping, + enum mv88e6xxx_policy_action action) +{ + u16 mask, val; + int shift; + int err; + u16 ptr; + u8 reg; + + err = mv88e6xxx_port_policy_mapping_get_pos(mapping, action, &mask, + &val, &shift); + if (err) + return err; + + /* The 16-bit Port Policy CTL register from older chips is on 6393x + * changed to Port Policy MGMT CTL, which can access more data, but + * indirectly. The original 16-bit value is divided into two 8-bit + * registers. + */ + ptr = shift / 8; + shift %= 8; + mask >>= ptr * 8; + + err = mv88e6393x_port_policy_read(chip, port, ptr, ®); + if (err) + return err; + + reg &= ~mask; + reg |= (val << shift) & mask; + + return mv88e6393x_port_policy_write(chip, port, ptr, reg); +} diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index e6d0eaa6aa1d..b10e5aebacf6 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -49,6 +49,9 @@ #define MV88E6XXX_PORT_STS_CMODE_2500BASEX 0x000b #define MV88E6XXX_PORT_STS_CMODE_XAUI 0x000c #define MV88E6XXX_PORT_STS_CMODE_RXAUI 0x000d +#define MV88E6393X_PORT_STS_CMODE_5GBASER 0x000c +#define MV88E6393X_PORT_STS_CMODE_10GBASER 0x000d +#define MV88E6393X_PORT_STS_CMODE_USXGMII 0x000e #define MV88E6185_PORT_STS_CDUPLEX 0x0008 #define MV88E6185_PORT_STS_CMODE_MASK 0x0007 #define MV88E6185_PORT_STS_CMODE_GMII_FD 0x0000 @@ -68,6 +71,8 @@ #define MV88E6390_PORT_MAC_CTL_FORCE_SPEED 0x2000 #define MV88E6390_PORT_MAC_CTL_ALTSPEED 0x1000 #define MV88E6352_PORT_MAC_CTL_200BASE 0x1000 +#define MV88E6XXX_PORT_MAC_CTL_EEE 0x0200 +#define MV88E6XXX_PORT_MAC_CTL_FORCE_EEE 0x0100 #define MV88E6185_PORT_MAC_CTL_AN_EN 0x0400 #define MV88E6185_PORT_MAC_CTL_AN_RESTART 0x0200 #define MV88E6185_PORT_MAC_CTL_AN_DONE 0x0100 @@ -117,6 +122,8 @@ #define MV88E6XXX_PORT_SWITCH_ID_PROD_6176 0x1760 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6190 0x1900 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6191 0x1910 +#define MV88E6XXX_PORT_SWITCH_ID_PROD_6191X 0x1920 +#define MV88E6XXX_PORT_SWITCH_ID_PROD_6193X 0x1930 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6185 0x1a70 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6220 0x2200 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6240 0x2400 @@ -129,6 +136,7 @@ #define MV88E6XXX_PORT_SWITCH_ID_PROD_6350 0x3710 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6351 0x3750 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6390 0x3900 +#define MV88E6XXX_PORT_SWITCH_ID_PROD_6393X 0x3930 #define MV88E6XXX_PORT_SWITCH_ID_REV_MASK 0x000f /* Offset 0x04: Port Control Register */ @@ -236,6 +244,19 @@ #define MV88E6XXX_PORT_POLICY_CTL_TRAP 0x0002 #define MV88E6XXX_PORT_POLICY_CTL_DISCARD 0x0003 +/* Offset 0x0E: Policy & MGMT Control Register (FAMILY_6393X) */ +#define MV88E6393X_PORT_POLICY_MGMT_CTL 0x0e +#define MV88E6393X_PORT_POLICY_MGMT_CTL_UPDATE 0x8000 +#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_MASK 0x3f00 +#define MV88E6393X_PORT_POLICY_MGMT_CTL_DATA_MASK 0x00ff +#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000000XLO 0x2000 +#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000000XHI 0x2100 +#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000002XLO 0x2400 +#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000002XHI 0x2500 +#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_INGRESS_DEST 0x3000 +#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_CPU_DEST 0x3800 +#define MV88E6393X_PORT_POLICY_MGMT_CTL_CPU_DEST_MGMTPRI 0x00e0 + /* Offset 0x0F: Port Special Ether Type */ #define MV88E6XXX_PORT_ETH_TYPE 0x0f #define MV88E6XXX_PORT_ETH_TYPE_DEFAULT 0x9100 @@ -243,6 +264,15 @@ /* Offset 0x10: InDiscards Low Counter */ #define MV88E6XXX_PORT_IN_DISCARD_LO 0x10 +/* Offset 0x10: Extended Port Control Command */ +#define MV88E6393X_PORT_EPC_CMD 0x10 +#define MV88E6393X_PORT_EPC_CMD_BUSY 0x8000 +#define MV88E6393X_PORT_EPC_CMD_WRITE 0x0300 +#define MV88E6393X_PORT_EPC_INDEX_PORT_ETYPE 0x02 + +/* Offset 0x11: Extended Port Control Data */ +#define MV88E6393X_PORT_EPC_DATA 0x11 + /* Offset 0x11: InDiscards High Counter */ #define MV88E6XXX_PORT_IN_DISCARD_HI 0x11 @@ -288,6 +318,8 @@ int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg, u16 *val); int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg, u16 val); +int mv88e6xxx_port_wait_bit(struct mv88e6xxx_chip *chip, int port, int reg, + int bit, int val); int mv88e6185_port_set_pause(struct mv88e6xxx_chip *chip, int port, int pause); @@ -315,10 +347,13 @@ int mv88e6390_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port, int speed, int duplex); int mv88e6390x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port, int speed, int duplex); +int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port, + int speed, int duplex); phy_interface_t mv88e6341_port_max_speed_mode(int port); phy_interface_t mv88e6390_port_max_speed_mode(int port); phy_interface_t mv88e6390x_port_max_speed_mode(int port); +phy_interface_t mv88e6393x_port_max_speed_mode(int port); int mv88e6xxx_port_set_state(struct mv88e6xxx_chip *chip, int port, u8 state); @@ -351,8 +386,19 @@ int mv88e6352_port_set_mcast_flood(struct mv88e6xxx_chip *chip, int port, int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port, enum mv88e6xxx_policy_mapping mapping, enum mv88e6xxx_policy_action action); +int mv88e6393x_port_set_policy(struct mv88e6xxx_chip *chip, int port, + enum mv88e6xxx_policy_mapping mapping, + enum mv88e6xxx_policy_action action); int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port, u16 etype); +int mv88e6393x_set_egress_port(struct mv88e6xxx_chip *chip, + enum mv88e6xxx_egress_direction direction, + int port); +int mv88e6393x_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port, + int upstream_port); +int mv88e6393x_port_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); +int mv88e6393x_port_set_ether_type(struct mv88e6xxx_chip *chip, int port, + u16 etype); int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port, bool message_port); int mv88e6xxx_port_set_trunk(struct mv88e6xxx_chip *chip, int port, @@ -361,6 +407,8 @@ int mv88e6165_port_set_jumbo_size(struct mv88e6xxx_chip *chip, int port, size_t size); int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port); int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port); +int mv88e6xxx_port_set_assoc_vector(struct mv88e6xxx_chip *chip, int port, + u16 pav); int mv88e6097_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in, u8 out); int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in, @@ -371,6 +419,8 @@ int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode); int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode); +int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, + phy_interface_t mode); int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode); int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode); int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port); diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index 3195936dc5be..470856bcd2f3 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -95,7 +95,7 @@ static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, return 0; } -int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, bool up) { u16 val, new_val; @@ -117,7 +117,7 @@ int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, } int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, - u8 lane, unsigned int mode, + int lane, unsigned int mode, phy_interface_t interface, const unsigned long *advertise) { @@ -166,7 +166,7 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, } int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, - u8 lane, struct phylink_link_state *state) + int lane, struct phylink_link_state *state) { u16 lpa, status; int err; @@ -187,7 +187,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, } int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, - u8 lane) + int lane) { u16 bmcr; int err; @@ -200,7 +200,7 @@ int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, } int mv88e6352_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port, - u8 lane, int speed, int duplex) + int lane, int speed, int duplex) { u16 val, bmcr; int err; @@ -230,10 +230,10 @@ int mv88e6352_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port, return mv88e6352_serdes_write(chip, MII_BMCR, bmcr); } -u8 mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) +int mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) { u8 cmode = chip->ports[port].cmode; - u8 lane = 0; + int lane = -ENODEV; if ((cmode == MV88E6XXX_PORT_STS_CMODE_100BASEX) || (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX) || @@ -245,7 +245,7 @@ u8 mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) static bool mv88e6352_port_has_serdes(struct mv88e6xxx_chip *chip, int port) { - if (mv88e6xxx_serdes_get_lane(chip, port)) + if (mv88e6xxx_serdes_get_lane(chip, port) >= 0) return true; return false; @@ -354,7 +354,7 @@ static void mv88e6352_serdes_irq_link(struct mv88e6xxx_chip *chip, int port) } irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, - u8 lane) + int lane) { irqreturn_t ret = IRQ_NONE; u16 status; @@ -372,7 +372,7 @@ irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, return ret; } -int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane, bool enable) { u16 val = 0; @@ -413,10 +413,10 @@ void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p) } } -u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) +int mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) { u8 cmode = chip->ports[port].cmode; - u8 lane = 0; + int lane = -ENODEV; switch (port) { case 5: @@ -430,7 +430,7 @@ u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) return lane; } -int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, bool up) { /* The serdes power can't be controlled on this switch chip but we need @@ -440,7 +440,7 @@ int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, return 0; } -u8 mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) +int mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) { /* There are no configurable serdes lanes on this switch chip but we * need to return non-zero so that callers of @@ -456,7 +456,7 @@ u8 mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) } int mv88e6185_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, - u8 lane, struct phylink_link_state *state) + int lane, struct phylink_link_state *state) { int err; u16 status; @@ -492,7 +492,7 @@ int mv88e6185_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, return 0; } -int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane, bool enable) { u8 cmode = chip->ports[port].cmode; @@ -525,7 +525,7 @@ static void mv88e6097_serdes_irq_link(struct mv88e6xxx_chip *chip, int port) } irqreturn_t mv88e6097_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, - u8 lane) + int lane) { u8 cmode = chip->ports[port].cmode; @@ -539,10 +539,10 @@ irqreturn_t mv88e6097_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, return IRQ_NONE; } -u8 mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) +int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) { u8 cmode = chip->ports[port].cmode; - u8 lane = 0; + int lane = -ENODEV; switch (port) { case 9: @@ -562,12 +562,12 @@ u8 mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) return lane; } -u8 mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) +int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) { u8 cmode_port = chip->ports[port].cmode; u8 cmode_port10 = chip->ports[10].cmode; u8 cmode_port9 = chip->ports[9].cmode; - u8 lane = 0; + int lane = -ENODEV; switch (port) { case 2: @@ -637,8 +637,29 @@ u8 mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) return lane; } +/* Only Ports 0, 9 and 10 have SERDES lanes. Return the SERDES lane address + * a port is using else Returns -ENODEV. + */ +int mv88e6393x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) +{ + u8 cmode = chip->ports[port].cmode; + int lane = -ENODEV; + + if (port != 0 && port != 9 && port != 10) + return -EOPNOTSUPP; + + if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX || + cmode == MV88E6XXX_PORT_STS_CMODE_SGMII || + cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX || + cmode == MV88E6393X_PORT_STS_CMODE_5GBASER || + cmode == MV88E6393X_PORT_STS_CMODE_10GBASER) + lane = port; + + return lane; +} + /* Set power up/down for 10GBASE-R and 10GBASE-X4/X2 */ -static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, u8 lane, +static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, int lane, bool up) { u16 val, new_val; @@ -665,7 +686,7 @@ static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, u8 lane, } /* Set power up/down for SGMII and 1000Base-X */ -static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, u8 lane, +static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, int lane, bool up) { u16 val, new_val; @@ -701,7 +722,7 @@ static struct mv88e6390_serdes_hw_stat mv88e6390_serdes_hw_stats[] = { int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port) { - if (mv88e6390_serdes_get_lane(chip, port) == 0) + if (mv88e6390_serdes_get_lane(chip, port) < 0) return 0; return ARRAY_SIZE(mv88e6390_serdes_hw_stats); @@ -713,7 +734,7 @@ int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip, struct mv88e6390_serdes_hw_stat *stat; int i; - if (mv88e6390_serdes_get_lane(chip, port) == 0) + if (mv88e6390_serdes_get_lane(chip, port) < 0) return 0; for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) { @@ -750,7 +771,7 @@ int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, int i; lane = mv88e6390_serdes_get_lane(chip, port); - if (lane == 0) + if (lane < 0) return 0; for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) { @@ -761,7 +782,7 @@ int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, return ARRAY_SIZE(mv88e6390_serdes_hw_stats); } -static int mv88e6390_serdes_enable_checker(struct mv88e6xxx_chip *chip, u8 lane) +static int mv88e6390_serdes_enable_checker(struct mv88e6xxx_chip *chip, int lane) { u16 reg; int err; @@ -776,7 +797,7 @@ static int mv88e6390_serdes_enable_checker(struct mv88e6xxx_chip *chip, u8 lane) MV88E6390_PG_CONTROL, reg); } -int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, bool up) { u8 cmode = chip->ports[port].cmode; @@ -801,7 +822,7 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, } int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, - u8 lane, unsigned int mode, + int lane, unsigned int mode, phy_interface_t interface, const unsigned long *advertise) { @@ -860,7 +881,7 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, } static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip, - int port, u8 lane, struct phylink_link_state *state) + int port, int lane, struct phylink_link_state *state) { u16 lpa, status; int err; @@ -883,7 +904,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip, } static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip, - int port, u8 lane, struct phylink_link_state *state) + int port, int lane, struct phylink_link_state *state) { u16 status; int err; @@ -902,8 +923,32 @@ static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip, return 0; } +static int mv88e6393x_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip, + int port, int lane, + struct phylink_link_state *state) +{ + u16 status; + int err; + + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6390_10G_STAT1, &status); + if (err) + return err; + + state->link = !!(status & MDIO_STAT1_LSTATUS); + if (state->link) { + if (state->interface == PHY_INTERFACE_MODE_5GBASER) + state->speed = SPEED_5000; + else + state->speed = SPEED_10000; + state->duplex = DUPLEX_FULL; + } + + return 0; +} + int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, - u8 lane, struct phylink_link_state *state) + int lane, struct phylink_link_state *state) { switch (state->interface) { case PHY_INTERFACE_MODE_SGMII: @@ -921,8 +966,27 @@ int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, } } +int mv88e6393x_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, + int lane, struct phylink_link_state *state) +{ + switch (state->interface) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + return mv88e6390_serdes_pcs_get_state_sgmii(chip, port, lane, + state); + case PHY_INTERFACE_MODE_5GBASER: + case PHY_INTERFACE_MODE_10GBASER: + return mv88e6393x_serdes_pcs_get_state_10g(chip, port, lane, + state); + + default: + return -EOPNOTSUPP; + } +} + int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, - u8 lane) + int lane) { u16 bmcr; int err; @@ -938,7 +1002,7 @@ int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, } int mv88e6390_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port, - u8 lane, int speed, int duplex) + int lane, int speed, int duplex) { u16 val, bmcr; int err; @@ -972,7 +1036,7 @@ int mv88e6390_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port, } static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip, - int port, u8 lane) + int port, int lane) { u16 bmsr; int err; @@ -988,8 +1052,25 @@ static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip, dsa_port_phylink_mac_change(chip->ds, port, !!(bmsr & BMSR_LSTATUS)); } +static void mv88e6393x_serdes_irq_link_10g(struct mv88e6xxx_chip *chip, + int port, u8 lane) +{ + u16 status; + int err; + + /* If the link has dropped, we want to know about it. */ + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6390_10G_STAT1, &status); + if (err) { + dev_err(chip->dev, "can't read Serdes STAT1: %d\n", err); + return; + } + + dsa_port_phylink_mac_change(chip->ds, port, !!(status & MDIO_STAT1_LSTATUS)); +} + static int mv88e6390_serdes_irq_enable_sgmii(struct mv88e6xxx_chip *chip, - u8 lane, bool enable) + int lane, bool enable) { u16 val = 0; @@ -1001,7 +1082,7 @@ static int mv88e6390_serdes_irq_enable_sgmii(struct mv88e6xxx_chip *chip, MV88E6390_SGMII_INT_ENABLE, val); } -int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane, bool enable) { u8 cmode = chip->ports[port].cmode; @@ -1017,7 +1098,7 @@ int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane, } static int mv88e6390_serdes_irq_status_sgmii(struct mv88e6xxx_chip *chip, - u8 lane, u16 *status) + int lane, u16 *status) { int err; @@ -1027,8 +1108,85 @@ static int mv88e6390_serdes_irq_status_sgmii(struct mv88e6xxx_chip *chip, return err; } +static int mv88e6393x_serdes_irq_enable_10g(struct mv88e6xxx_chip *chip, + u8 lane, bool enable) +{ + u16 val = 0; + + if (enable) + val |= MV88E6393X_10G_INT_LINK_CHANGE; + + return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_10G_INT_ENABLE, val); +} + +int mv88e6393x_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, + int lane, bool enable) +{ + u8 cmode = chip->ports[port].cmode; + + switch (cmode) { + case MV88E6XXX_PORT_STS_CMODE_SGMII: + case MV88E6XXX_PORT_STS_CMODE_1000BASEX: + case MV88E6XXX_PORT_STS_CMODE_2500BASEX: + return mv88e6390_serdes_irq_enable_sgmii(chip, lane, enable); + case MV88E6393X_PORT_STS_CMODE_5GBASER: + case MV88E6393X_PORT_STS_CMODE_10GBASER: + return mv88e6393x_serdes_irq_enable_10g(chip, lane, enable); + } + + return 0; +} + +static int mv88e6393x_serdes_irq_status_10g(struct mv88e6xxx_chip *chip, + u8 lane, u16 *status) +{ + int err; + + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_10G_INT_STATUS, status); + + return err; +} + +irqreturn_t mv88e6393x_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, + int lane) +{ + u8 cmode = chip->ports[port].cmode; + irqreturn_t ret = IRQ_NONE; + u16 status; + int err; + + switch (cmode) { + case MV88E6XXX_PORT_STS_CMODE_SGMII: + case MV88E6XXX_PORT_STS_CMODE_1000BASEX: + case MV88E6XXX_PORT_STS_CMODE_2500BASEX: + err = mv88e6390_serdes_irq_status_sgmii(chip, lane, &status); + if (err) + return ret; + if (status & (MV88E6390_SGMII_INT_LINK_DOWN | + MV88E6390_SGMII_INT_LINK_UP)) { + ret = IRQ_HANDLED; + mv88e6390_serdes_irq_link_sgmii(chip, port, lane); + } + break; + case MV88E6393X_PORT_STS_CMODE_5GBASER: + case MV88E6393X_PORT_STS_CMODE_10GBASER: + err = mv88e6393x_serdes_irq_status_10g(chip, lane, &status); + if (err) + return err; + if (status & MV88E6393X_10G_INT_LINK_CHANGE) { + ret = IRQ_HANDLED; + mv88e6393x_serdes_irq_link_10g(chip, port, lane); + } + break; + } + + return ret; +} + irqreturn_t mv88e6390_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, - u8 lane) + int lane) { u8 cmode = chip->ports[port].cmode; irqreturn_t ret = IRQ_NONE; @@ -1087,7 +1245,7 @@ static const u16 mv88e6390_serdes_regs[] = { int mv88e6390_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port) { - if (mv88e6xxx_serdes_get_lane(chip, port) == 0) + if (mv88e6xxx_serdes_get_lane(chip, port) < 0) return 0; return ARRAY_SIZE(mv88e6390_serdes_regs) * sizeof(u16); @@ -1102,7 +1260,7 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p) int i; lane = mv88e6xxx_serdes_get_lane(chip, port); - if (lane == 0) + if (lane < 0) return; for (i = 0 ; i < ARRAY_SIZE(mv88e6390_serdes_regs); i++) { @@ -1112,3 +1270,101 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p) p[i] = reg; } } + +static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane) +{ + u16 reg, pcs; + int err; + + /* mv88e6393x family errata 4.6: + * Cannot clear PwrDn bit on SERDES on port 0 if device is configured + * CPU_MGD mode or P0_mode is configured for [x]MII. + * Workaround: Set Port0 SERDES register 4.F002 bit 5=0 and bit 15=1. + * + * It seems that after this workaround the SERDES is automatically + * powered up (the bit is cleared), so power it down. + */ + if (lane == MV88E6393X_PORT0_LANE) { + err = mv88e6390_serdes_read(chip, MV88E6393X_PORT0_LANE, + MDIO_MMD_PHYXS, + MV88E6393X_SERDES_POC, ®); + if (err) + return err; + + reg &= ~MV88E6393X_SERDES_POC_PDOWN; + reg |= MV88E6393X_SERDES_POC_RESET; + + err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_SERDES_POC, reg); + if (err) + return err; + + err = mv88e6390_serdes_power_sgmii(chip, lane, false); + if (err) + return err; + } + + /* mv88e6393x family errata 4.8: + * When a SERDES port is operating in 1000BASE-X or SGMII mode link may + * not come up after hardware reset or software reset of SERDES core. + * Workaround is to write SERDES register 4.F074.14=1 for only those + * modes and 0 in all other modes. + */ + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_SERDES_POC, &pcs); + if (err) + return err; + + pcs &= MV88E6393X_SERDES_POC_PCS_MASK; + + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_ERRATA_4_8_REG, ®); + if (err) + return err; + + if (pcs == MV88E6393X_SERDES_POC_PCS_1000BASEX || + pcs == MV88E6393X_SERDES_POC_PCS_SGMII_PHY || + pcs == MV88E6393X_SERDES_POC_PCS_SGMII_MAC) + reg |= MV88E6393X_ERRATA_4_8_BIT; + else + reg &= ~MV88E6393X_ERRATA_4_8_BIT; + + return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_ERRATA_4_8_REG, reg); +} + +int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip) +{ + int err; + + err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT0_LANE); + if (err) + return err; + + err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT9_LANE); + if (err) + return err; + + return mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT10_LANE); +} + +int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, + bool on) +{ + u8 cmode = chip->ports[port].cmode; + + if (port != 0 && port != 9 && port != 10) + return -EOPNOTSUPP; + + switch (cmode) { + case MV88E6XXX_PORT_STS_CMODE_SGMII: + case MV88E6XXX_PORT_STS_CMODE_1000BASEX: + case MV88E6XXX_PORT_STS_CMODE_2500BASEX: + return mv88e6390_serdes_power_sgmii(chip, lane, on); + case MV88E6393X_PORT_STS_CMODE_5GBASER: + case MV88E6393X_PORT_STS_CMODE_10GBASER: + return mv88e6390_serdes_power_10g(chip, lane, on); + } + + return 0; +} diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h index 93822ef9bab8..cbb3ba30caea 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.h +++ b/drivers/net/dsa/mv88e6xxx/serdes.h @@ -42,6 +42,9 @@ /* 10GBASE-R and 10GBASE-X4/X2 */ #define MV88E6390_10G_CTRL1 (0x1000 + MDIO_CTRL1) #define MV88E6390_10G_STAT1 (0x1000 + MDIO_STAT1) +#define MV88E6393X_10G_INT_ENABLE 0x9000 +#define MV88E6393X_10G_INT_LINK_CHANGE BIT(2) +#define MV88E6393X_10G_INT_STATUS 0x9001 /* 1000BASE-X and SGMII */ #define MV88E6390_SGMII_BMCR (0x2000 + MII_BMCR) @@ -73,55 +76,86 @@ #define MV88E6390_PG_CONTROL 0xf010 #define MV88E6390_PG_CONTROL_ENABLE_PC BIT(0) -u8 mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); -u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); -u8 mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); -u8 mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); -u8 mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); +#define MV88E6393X_PORT0_LANE 0x00 +#define MV88E6393X_PORT9_LANE 0x09 +#define MV88E6393X_PORT10_LANE 0x0a + +/* Port Operational Configuration */ +#define MV88E6393X_SERDES_POC 0xf002 +#define MV88E6393X_SERDES_POC_PCS_1000BASEX 0x0000 +#define MV88E6393X_SERDES_POC_PCS_2500BASEX 0x0001 +#define MV88E6393X_SERDES_POC_PCS_SGMII_PHY 0x0002 +#define MV88E6393X_SERDES_POC_PCS_SGMII_MAC 0x0003 +#define MV88E6393X_SERDES_POC_PCS_5GBASER 0x0004 +#define MV88E6393X_SERDES_POC_PCS_10GBASER 0x0005 +#define MV88E6393X_SERDES_POC_PCS_USXGMII_PHY 0x0006 +#define MV88E6393X_SERDES_POC_PCS_USXGMII_MAC 0x0007 +#define MV88E6393X_SERDES_POC_PCS_MASK 0x0007 +#define MV88E6393X_SERDES_POC_RESET BIT(15) +#define MV88E6393X_SERDES_POC_PDOWN BIT(5) + +#define MV88E6393X_ERRATA_4_8_REG 0xF074 +#define MV88E6393X_ERRATA_4_8_BIT BIT(14) + +int mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); +int mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); +int mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); +int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); +int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); +int mv88e6393x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, - u8 lane, unsigned int mode, + int lane, unsigned int mode, phy_interface_t interface, const unsigned long *advertise); int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, - u8 lane, unsigned int mode, + int lane, unsigned int mode, phy_interface_t interface, const unsigned long *advertise); int mv88e6185_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, - u8 lane, struct phylink_link_state *state); + int lane, struct phylink_link_state *state); int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, - u8 lane, struct phylink_link_state *state); + int lane, struct phylink_link_state *state); int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, - u8 lane, struct phylink_link_state *state); + int lane, struct phylink_link_state *state); +int mv88e6393x_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, + int lane, struct phylink_link_state *state); int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, - u8 lane); + int lane); int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, - u8 lane); + int lane); int mv88e6352_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port, - u8 lane, int speed, int duplex); + int lane, int speed, int duplex); int mv88e6390_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port, - u8 lane, int speed, int duplex); + int lane, int speed, int duplex); unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port); unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port); -int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, bool up); -int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, bool on); -int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, bool on); -int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, + bool on); +int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip); +int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane, bool enable); -int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane, bool enable); -int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane, +int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane, bool enable); +int mv88e6393x_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, + int lane, bool enable); irqreturn_t mv88e6097_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, - u8 lane); + int lane); irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, - u8 lane); + int lane); irqreturn_t mv88e6390_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, - u8 lane); + int lane); +irqreturn_t mv88e6393x_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, + int lane); int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port); int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip, int port, uint8_t *data); @@ -138,18 +172,18 @@ void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p); int mv88e6390_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port); void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p); -/* Return the (first) SERDES lane address a port is using, 0 otherwise. */ -static inline u8 mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip, - int port) +/* Return the (first) SERDES lane address a port is using, -errno otherwise. */ +static inline int mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip, + int port) { if (!chip->info->ops->serdes_get_lane) - return 0; + return -EOPNOTSUPP; return chip->info->ops->serdes_get_lane(chip, port); } static inline int mv88e6xxx_serdes_power_up(struct mv88e6xxx_chip *chip, - int port, u8 lane) + int port, int lane) { if (!chip->info->ops->serdes_power) return -EOPNOTSUPP; @@ -158,7 +192,7 @@ static inline int mv88e6xxx_serdes_power_up(struct mv88e6xxx_chip *chip, } static inline int mv88e6xxx_serdes_power_down(struct mv88e6xxx_chip *chip, - int port, u8 lane) + int port, int lane) { if (!chip->info->ops->serdes_power) return -EOPNOTSUPP; @@ -176,7 +210,7 @@ mv88e6xxx_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port) } static inline int mv88e6xxx_serdes_irq_enable(struct mv88e6xxx_chip *chip, - int port, u8 lane) + int port, int lane) { if (!chip->info->ops->serdes_irq_enable) return -EOPNOTSUPP; @@ -185,7 +219,7 @@ static inline int mv88e6xxx_serdes_irq_enable(struct mv88e6xxx_chip *chip, } static inline int mv88e6xxx_serdes_irq_disable(struct mv88e6xxx_chip *chip, - int port, u8 lane) + int port, int lane) { if (!chip->info->ops->serdes_irq_enable) return -EOPNOTSUPP; @@ -194,7 +228,7 @@ static inline int mv88e6xxx_serdes_irq_disable(struct mv88e6xxx_chip *chip, } static inline irqreturn_t -mv88e6xxx_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, u8 lane) +mv88e6xxx_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, int lane) { if (!chip->info->ops->serdes_irq_status) return IRQ_NONE; diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 628afb47b579..6b5442be0230 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -719,7 +719,9 @@ static int felix_bridge_join(struct dsa_switch *ds, int port, { struct ocelot *ocelot = ds->priv; - return ocelot_port_bridge_join(ocelot, port, br); + ocelot_port_bridge_join(ocelot, port, br); + + return 0; } static void felix_bridge_leave(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c index 12e76020bea3..973761132fc3 100644 --- a/drivers/net/dsa/sja1105/sja1105_flower.c +++ b/drivers/net/dsa/sja1105/sja1105_flower.c @@ -317,11 +317,16 @@ int sja1105_cls_flower_add(struct dsa_switch *ds, int port, if (rc) return rc; - rc = -EOPNOTSUPP; - flow_action_for_each(i, act, &rule->action) { switch (act->id) { case FLOW_ACTION_POLICE: + if (act->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, + "QoS offload not support packets per second"); + rc = -EOPNOTSUPP; + goto out; + } + rc = sja1105_flower_policer(priv, port, extack, cookie, &key, act->police.rate_bytes_ps, diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 51ea104c63bb..d9c198ca0197 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -3049,21 +3049,6 @@ static void sja1105_teardown(struct dsa_switch *ds) } } -static int sja1105_port_enable(struct dsa_switch *ds, int port, - struct phy_device *phy) -{ - struct net_device *slave; - - if (!dsa_is_user_port(ds, port)) - return 0; - - slave = dsa_to_port(ds, port)->slave; - - slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; - - return 0; -} - static void sja1105_port_disable(struct dsa_switch *ds, int port) { struct sja1105_private *priv = ds->priv; @@ -3491,7 +3476,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = { .get_ethtool_stats = sja1105_get_ethtool_stats, .get_sset_count = sja1105_get_sset_count, .get_ts_info = sja1105_get_ts_info, - .port_enable = sja1105_port_enable, .port_disable = sja1105_port_disable, .port_fdb_dump = sja1105_fdb_dump, .port_fdb_add = sja1105_fdb_add, diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index 53e1f7e07959..96cc5fc36eb5 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -1051,6 +1051,7 @@ el3_netdev_get_ecmd(struct net_device *dev, struct ethtool_link_ksettings *cmd) break; case 3: cmd->base.port = PORT_BNC; + break; default: break; } diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index ad04660b97b8..4b85f2b74872 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -19,6 +19,7 @@ config SUNGEM_PHY tristate source "drivers/net/ethernet/3com/Kconfig" +source "drivers/net/ethernet/actions/Kconfig" source "drivers/net/ethernet/adaptec/Kconfig" source "drivers/net/ethernet/aeroflex/Kconfig" source "drivers/net/ethernet/agere/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 1e7dc8a7762d..9394493e8187 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_NET_VENDOR_3COM) += 3com/ obj-$(CONFIG_NET_VENDOR_8390) += 8390/ +obj-$(CONFIG_NET_VENDOR_ACTIONS) += actions/ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/ obj-$(CONFIG_GRETH) += aeroflex/ obj-$(CONFIG_NET_VENDOR_AGERE) += agere/ diff --git a/drivers/net/ethernet/actions/Kconfig b/drivers/net/ethernet/actions/Kconfig new file mode 100644 index 000000000000..ccad6a3f4d6f --- /dev/null +++ b/drivers/net/ethernet/actions/Kconfig @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config NET_VENDOR_ACTIONS + bool "Actions Semi devices" + default y + depends on ARCH_ACTIONS + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all the + questions about Actions Semi devices. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_ACTIONS + +config OWL_EMAC + tristate "Actions Semi Owl Ethernet MAC support" + select PHYLIB + help + This driver supports the Actions Semi Ethernet Media Access + Controller (EMAC) found on the S500 and S900 SoCs. The controller + is compliant with the IEEE 802.3 CSMA/CD standard and supports + both half-duplex and full-duplex operation modes at 10/100 Mb/s. + +endif # NET_VENDOR_ACTIONS diff --git a/drivers/net/ethernet/actions/Makefile b/drivers/net/ethernet/actions/Makefile new file mode 100644 index 000000000000..fde8001d538a --- /dev/null +++ b/drivers/net/ethernet/actions/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the Actions Semi Owl SoCs built-in ethernet macs +# + +obj-$(CONFIG_OWL_EMAC) += owl-emac.o diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c new file mode 100644 index 000000000000..b8e771c2bc40 --- /dev/null +++ b/drivers/net/ethernet/actions/owl-emac.c @@ -0,0 +1,1625 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Actions Semi Owl SoCs Ethernet MAC driver + * + * Copyright (c) 2012 Actions Semi Inc. + * Copyright (c) 2021 Cristian Ciocaltea <cristian.ciocaltea@gmail.com> + */ + +#include <linux/circ_buf.h> +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/reset.h> + +#include "owl-emac.h" + +#define OWL_EMAC_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) + +static u32 owl_emac_reg_read(struct owl_emac_priv *priv, u32 reg) +{ + return readl(priv->base + reg); +} + +static void owl_emac_reg_write(struct owl_emac_priv *priv, u32 reg, u32 data) +{ + writel(data, priv->base + reg); +} + +static u32 owl_emac_reg_update(struct owl_emac_priv *priv, + u32 reg, u32 mask, u32 val) +{ + u32 data, old_val; + + data = owl_emac_reg_read(priv, reg); + old_val = data & mask; + + data &= ~mask; + data |= val & mask; + + owl_emac_reg_write(priv, reg, data); + + return old_val; +} + +static void owl_emac_reg_set(struct owl_emac_priv *priv, u32 reg, u32 bits) +{ + owl_emac_reg_update(priv, reg, bits, bits); +} + +static void owl_emac_reg_clear(struct owl_emac_priv *priv, u32 reg, u32 bits) +{ + owl_emac_reg_update(priv, reg, bits, 0); +} + +static struct device *owl_emac_get_dev(struct owl_emac_priv *priv) +{ + return priv->netdev->dev.parent; +} + +static void owl_emac_irq_enable(struct owl_emac_priv *priv) +{ + /* Enable all interrupts except TU. + * + * Note the NIE and AIE bits shall also be set in order to actually + * enable the selected interrupts. + */ + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR7, + OWL_EMAC_BIT_MAC_CSR7_NIE | + OWL_EMAC_BIT_MAC_CSR7_AIE | + OWL_EMAC_BIT_MAC_CSR7_ALL_NOT_TUE); +} + +static void owl_emac_irq_disable(struct owl_emac_priv *priv) +{ + /* Disable all interrupts. + * + * WARNING: Unset only the NIE and AIE bits in CSR7 to workaround an + * unexpected side effect (MAC hardware bug?!) where some bits in the + * status register (CSR5) are cleared automatically before being able + * to read them via owl_emac_irq_clear(). + */ + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR7, + OWL_EMAC_BIT_MAC_CSR7_ALL_NOT_TUE); +} + +static u32 owl_emac_irq_status(struct owl_emac_priv *priv) +{ + return owl_emac_reg_read(priv, OWL_EMAC_REG_MAC_CSR5); +} + +static u32 owl_emac_irq_clear(struct owl_emac_priv *priv) +{ + u32 val = owl_emac_irq_status(priv); + + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR5, val); + + return val; +} + +static dma_addr_t owl_emac_dma_map_rx(struct owl_emac_priv *priv, + struct sk_buff *skb) +{ + struct device *dev = owl_emac_get_dev(priv); + + /* Buffer pointer for the RX DMA descriptor must be word aligned. */ + return dma_map_single(dev, skb_tail_pointer(skb), + skb_tailroom(skb), DMA_FROM_DEVICE); +} + +static void owl_emac_dma_unmap_rx(struct owl_emac_priv *priv, + struct sk_buff *skb, dma_addr_t dma_addr) +{ + struct device *dev = owl_emac_get_dev(priv); + + dma_unmap_single(dev, dma_addr, skb_tailroom(skb), DMA_FROM_DEVICE); +} + +static dma_addr_t owl_emac_dma_map_tx(struct owl_emac_priv *priv, + struct sk_buff *skb) +{ + struct device *dev = owl_emac_get_dev(priv); + + return dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); +} + +static void owl_emac_dma_unmap_tx(struct owl_emac_priv *priv, + struct sk_buff *skb, dma_addr_t dma_addr) +{ + struct device *dev = owl_emac_get_dev(priv); + + dma_unmap_single(dev, dma_addr, skb_headlen(skb), DMA_TO_DEVICE); +} + +static unsigned int owl_emac_ring_num_unused(struct owl_emac_ring *ring) +{ + return CIRC_SPACE(ring->head, ring->tail, ring->size); +} + +static unsigned int owl_emac_ring_get_next(struct owl_emac_ring *ring, + unsigned int cur) +{ + return (cur + 1) & (ring->size - 1); +} + +static void owl_emac_ring_push_head(struct owl_emac_ring *ring) +{ + ring->head = owl_emac_ring_get_next(ring, ring->head); +} + +static void owl_emac_ring_pop_tail(struct owl_emac_ring *ring) +{ + ring->tail = owl_emac_ring_get_next(ring, ring->tail); +} + +static struct sk_buff *owl_emac_alloc_skb(struct net_device *netdev) +{ + struct sk_buff *skb; + int offset; + + skb = netdev_alloc_skb(netdev, OWL_EMAC_RX_FRAME_MAX_LEN + + OWL_EMAC_SKB_RESERVE); + if (unlikely(!skb)) + return NULL; + + /* Ensure 4 bytes DMA alignment. */ + offset = ((uintptr_t)skb->data) & (OWL_EMAC_SKB_ALIGN - 1); + if (unlikely(offset)) + skb_reserve(skb, OWL_EMAC_SKB_ALIGN - offset); + + return skb; +} + +static int owl_emac_ring_prepare_rx(struct owl_emac_priv *priv) +{ + struct owl_emac_ring *ring = &priv->rx_ring; + struct device *dev = owl_emac_get_dev(priv); + struct net_device *netdev = priv->netdev; + struct owl_emac_ring_desc *desc; + struct sk_buff *skb; + dma_addr_t dma_addr; + int i; + + for (i = 0; i < ring->size; i++) { + skb = owl_emac_alloc_skb(netdev); + if (!skb) + return -ENOMEM; + + dma_addr = owl_emac_dma_map_rx(priv, skb); + if (dma_mapping_error(dev, dma_addr)) { + dev_kfree_skb(skb); + return -ENOMEM; + } + + desc = &ring->descs[i]; + desc->status = OWL_EMAC_BIT_RDES0_OWN; + desc->control = skb_tailroom(skb) & OWL_EMAC_MSK_RDES1_RBS1; + desc->buf_addr = dma_addr; + desc->reserved = 0; + + ring->skbs[i] = skb; + ring->skbs_dma[i] = dma_addr; + } + + desc->control |= OWL_EMAC_BIT_RDES1_RER; + + ring->head = 0; + ring->tail = 0; + + return 0; +} + +static void owl_emac_ring_prepare_tx(struct owl_emac_priv *priv) +{ + struct owl_emac_ring *ring = &priv->tx_ring; + struct owl_emac_ring_desc *desc; + int i; + + for (i = 0; i < ring->size; i++) { + desc = &ring->descs[i]; + + desc->status = 0; + desc->control = OWL_EMAC_BIT_TDES1_IC; + desc->buf_addr = 0; + desc->reserved = 0; + } + + desc->control |= OWL_EMAC_BIT_TDES1_TER; + + memset(ring->skbs_dma, 0, sizeof(dma_addr_t) * ring->size); + + ring->head = 0; + ring->tail = 0; +} + +static void owl_emac_ring_unprepare_rx(struct owl_emac_priv *priv) +{ + struct owl_emac_ring *ring = &priv->rx_ring; + int i; + + for (i = 0; i < ring->size; i++) { + ring->descs[i].status = 0; + + if (!ring->skbs_dma[i]) + continue; + + owl_emac_dma_unmap_rx(priv, ring->skbs[i], ring->skbs_dma[i]); + ring->skbs_dma[i] = 0; + + dev_kfree_skb(ring->skbs[i]); + ring->skbs[i] = NULL; + } +} + +static void owl_emac_ring_unprepare_tx(struct owl_emac_priv *priv) +{ + struct owl_emac_ring *ring = &priv->tx_ring; + int i; + + for (i = 0; i < ring->size; i++) { + ring->descs[i].status = 0; + + if (!ring->skbs_dma[i]) + continue; + + owl_emac_dma_unmap_tx(priv, ring->skbs[i], ring->skbs_dma[i]); + ring->skbs_dma[i] = 0; + + dev_kfree_skb(ring->skbs[i]); + ring->skbs[i] = NULL; + } +} + +static int owl_emac_ring_alloc(struct device *dev, struct owl_emac_ring *ring, + unsigned int size) +{ + ring->descs = dmam_alloc_coherent(dev, + sizeof(struct owl_emac_ring_desc) * size, + &ring->descs_dma, GFP_KERNEL); + if (!ring->descs) + return -ENOMEM; + + ring->skbs = devm_kcalloc(dev, size, sizeof(struct sk_buff *), + GFP_KERNEL); + if (!ring->skbs) + return -ENOMEM; + + ring->skbs_dma = devm_kcalloc(dev, size, sizeof(dma_addr_t), + GFP_KERNEL); + if (!ring->skbs_dma) + return -ENOMEM; + + ring->size = size; + + return 0; +} + +static void owl_emac_dma_cmd_resume_rx(struct owl_emac_priv *priv) +{ + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR2, + OWL_EMAC_VAL_MAC_CSR2_RPD); +} + +static void owl_emac_dma_cmd_resume_tx(struct owl_emac_priv *priv) +{ + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR1, + OWL_EMAC_VAL_MAC_CSR1_TPD); +} + +static u32 owl_emac_dma_cmd_set_tx(struct owl_emac_priv *priv, u32 status) +{ + return owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6, + OWL_EMAC_BIT_MAC_CSR6_ST, status); +} + +static u32 owl_emac_dma_cmd_start_tx(struct owl_emac_priv *priv) +{ + return owl_emac_dma_cmd_set_tx(priv, ~0); +} + +static u32 owl_emac_dma_cmd_set(struct owl_emac_priv *priv, u32 status) +{ + return owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6, + OWL_EMAC_MSK_MAC_CSR6_STSR, status); +} + +static u32 owl_emac_dma_cmd_start(struct owl_emac_priv *priv) +{ + return owl_emac_dma_cmd_set(priv, ~0); +} + +static u32 owl_emac_dma_cmd_stop(struct owl_emac_priv *priv) +{ + return owl_emac_dma_cmd_set(priv, 0); +} + +static void owl_emac_set_hw_mac_addr(struct net_device *netdev) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + u8 *mac_addr = netdev->dev_addr; + u32 addr_high, addr_low; + + addr_high = mac_addr[0] << 8 | mac_addr[1]; + addr_low = mac_addr[2] << 24 | mac_addr[3] << 16 | + mac_addr[4] << 8 | mac_addr[5]; + + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR17, addr_high); + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR16, addr_low); +} + +static void owl_emac_update_link_state(struct owl_emac_priv *priv) +{ + u32 val, status; + + if (priv->pause) { + val = OWL_EMAC_BIT_MAC_CSR20_FCE | OWL_EMAC_BIT_MAC_CSR20_TUE; + val |= OWL_EMAC_BIT_MAC_CSR20_TPE | OWL_EMAC_BIT_MAC_CSR20_RPE; + val |= OWL_EMAC_BIT_MAC_CSR20_BPE; + } else { + val = 0; + } + + /* Update flow control. */ + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR20, val); + + val = (priv->speed == SPEED_100) ? OWL_EMAC_VAL_MAC_CSR6_SPEED_100M : + OWL_EMAC_VAL_MAC_CSR6_SPEED_10M; + val <<= OWL_EMAC_OFF_MAC_CSR6_SPEED; + + if (priv->duplex == DUPLEX_FULL) + val |= OWL_EMAC_BIT_MAC_CSR6_FD; + + spin_lock_bh(&priv->lock); + + /* Temporarily stop DMA TX & RX. */ + status = owl_emac_dma_cmd_stop(priv); + + /* Update operation modes. */ + owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6, + OWL_EMAC_MSK_MAC_CSR6_SPEED | + OWL_EMAC_BIT_MAC_CSR6_FD, val); + + /* Restore DMA TX & RX status. */ + owl_emac_dma_cmd_set(priv, status); + + spin_unlock_bh(&priv->lock); +} + +static void owl_emac_adjust_link(struct net_device *netdev) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + struct phy_device *phydev = netdev->phydev; + bool state_changed = false; + + if (phydev->link) { + if (!priv->link) { + priv->link = phydev->link; + state_changed = true; + } + + if (priv->speed != phydev->speed) { + priv->speed = phydev->speed; + state_changed = true; + } + + if (priv->duplex != phydev->duplex) { + priv->duplex = phydev->duplex; + state_changed = true; + } + + if (priv->pause != phydev->pause) { + priv->pause = phydev->pause; + state_changed = true; + } + } else { + if (priv->link) { + priv->link = phydev->link; + state_changed = true; + } + } + + if (state_changed) { + if (phydev->link) + owl_emac_update_link_state(priv); + + if (netif_msg_link(priv)) + phy_print_status(phydev); + } +} + +static irqreturn_t owl_emac_handle_irq(int irq, void *data) +{ + struct net_device *netdev = data; + struct owl_emac_priv *priv = netdev_priv(netdev); + + if (netif_running(netdev)) { + owl_emac_irq_disable(priv); + napi_schedule(&priv->napi); + } + + return IRQ_HANDLED; +} + +static void owl_emac_ether_addr_push(u8 **dst, const u8 *src) +{ + u32 *a = (u32 *)(*dst); + const u16 *b = (const u16 *)src; + + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; + + *dst += 12; +} + +static void +owl_emac_setup_frame_prepare(struct owl_emac_priv *priv, struct sk_buff *skb) +{ + const u8 bcast_addr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + const u8 *mac_addr = priv->netdev->dev_addr; + u8 *frame; + int i; + + skb_put(skb, OWL_EMAC_SETUP_FRAME_LEN); + + frame = skb->data; + memset(frame, 0, skb->len); + + owl_emac_ether_addr_push(&frame, mac_addr); + owl_emac_ether_addr_push(&frame, bcast_addr); + + /* Fill multicast addresses. */ + WARN_ON(priv->mcaddr_list.count >= OWL_EMAC_MAX_MULTICAST_ADDRS); + for (i = 0; i < priv->mcaddr_list.count; i++) { + mac_addr = priv->mcaddr_list.addrs[i]; + owl_emac_ether_addr_push(&frame, mac_addr); + } +} + +/* The setup frame is a special descriptor which is used to provide physical + * addresses (i.e. mac, broadcast and multicast) to the MAC hardware for + * filtering purposes. To be recognized as a setup frame, the TDES1_SET bit + * must be set in the TX descriptor control field. + */ +static int owl_emac_setup_frame_xmit(struct owl_emac_priv *priv) +{ + struct owl_emac_ring *ring = &priv->tx_ring; + struct net_device *netdev = priv->netdev; + struct owl_emac_ring_desc *desc; + struct sk_buff *skb; + unsigned int tx_head; + u32 status, control; + dma_addr_t dma_addr; + int ret; + + skb = owl_emac_alloc_skb(netdev); + if (!skb) + return -ENOMEM; + + owl_emac_setup_frame_prepare(priv, skb); + + dma_addr = owl_emac_dma_map_tx(priv, skb); + if (dma_mapping_error(owl_emac_get_dev(priv), dma_addr)) { + ret = -ENOMEM; + goto err_free_skb; + } + + spin_lock_bh(&priv->lock); + + tx_head = ring->head; + desc = &ring->descs[tx_head]; + + status = READ_ONCE(desc->status); + control = READ_ONCE(desc->control); + dma_rmb(); /* Ensure data has been read before used. */ + + if (unlikely(status & OWL_EMAC_BIT_TDES0_OWN) || + !owl_emac_ring_num_unused(ring)) { + spin_unlock_bh(&priv->lock); + owl_emac_dma_unmap_tx(priv, skb, dma_addr); + ret = -EBUSY; + goto err_free_skb; + } + + ring->skbs[tx_head] = skb; + ring->skbs_dma[tx_head] = dma_addr; + + control &= OWL_EMAC_BIT_TDES1_IC | OWL_EMAC_BIT_TDES1_TER; /* Maintain bits */ + control |= OWL_EMAC_BIT_TDES1_SET; + control |= OWL_EMAC_MSK_TDES1_TBS1 & skb->len; + + WRITE_ONCE(desc->control, control); + WRITE_ONCE(desc->buf_addr, dma_addr); + dma_wmb(); /* Flush descriptor before changing ownership. */ + WRITE_ONCE(desc->status, OWL_EMAC_BIT_TDES0_OWN); + + owl_emac_ring_push_head(ring); + + /* Temporarily enable DMA TX. */ + status = owl_emac_dma_cmd_start_tx(priv); + + /* Trigger setup frame processing. */ + owl_emac_dma_cmd_resume_tx(priv); + + /* Restore DMA TX status. */ + owl_emac_dma_cmd_set_tx(priv, status); + + /* Stop regular TX until setup frame is processed. */ + netif_stop_queue(netdev); + + spin_unlock_bh(&priv->lock); + + return 0; + +err_free_skb: + dev_kfree_skb(skb); + return ret; +} + +static netdev_tx_t owl_emac_ndo_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + struct device *dev = owl_emac_get_dev(priv); + struct owl_emac_ring *ring = &priv->tx_ring; + struct owl_emac_ring_desc *desc; + unsigned int tx_head; + u32 status, control; + dma_addr_t dma_addr; + + dma_addr = owl_emac_dma_map_tx(priv, skb); + if (dma_mapping_error(dev, dma_addr)) { + dev_err_ratelimited(&netdev->dev, "TX DMA mapping failed\n"); + dev_kfree_skb(skb); + netdev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + spin_lock_bh(&priv->lock); + + tx_head = ring->head; + desc = &ring->descs[tx_head]; + + status = READ_ONCE(desc->status); + control = READ_ONCE(desc->control); + dma_rmb(); /* Ensure data has been read before used. */ + + if (!owl_emac_ring_num_unused(ring) || + unlikely(status & OWL_EMAC_BIT_TDES0_OWN)) { + netif_stop_queue(netdev); + spin_unlock_bh(&priv->lock); + + dev_dbg_ratelimited(&netdev->dev, "TX buffer full, status=0x%08x\n", + owl_emac_irq_status(priv)); + owl_emac_dma_unmap_tx(priv, skb, dma_addr); + netdev->stats.tx_dropped++; + return NETDEV_TX_BUSY; + } + + ring->skbs[tx_head] = skb; + ring->skbs_dma[tx_head] = dma_addr; + + control &= OWL_EMAC_BIT_TDES1_IC | OWL_EMAC_BIT_TDES1_TER; /* Maintain bits */ + control |= OWL_EMAC_BIT_TDES1_FS | OWL_EMAC_BIT_TDES1_LS; + control |= OWL_EMAC_MSK_TDES1_TBS1 & skb->len; + + WRITE_ONCE(desc->control, control); + WRITE_ONCE(desc->buf_addr, dma_addr); + dma_wmb(); /* Flush descriptor before changing ownership. */ + WRITE_ONCE(desc->status, OWL_EMAC_BIT_TDES0_OWN); + + owl_emac_dma_cmd_resume_tx(priv); + owl_emac_ring_push_head(ring); + + /* FIXME: The transmission is currently restricted to a single frame + * at a time as a workaround for a MAC hardware bug that causes random + * freeze of the TX queue processor. + */ + netif_stop_queue(netdev); + + spin_unlock_bh(&priv->lock); + + return NETDEV_TX_OK; +} + +static bool owl_emac_tx_complete_tail(struct owl_emac_priv *priv) +{ + struct owl_emac_ring *ring = &priv->tx_ring; + struct net_device *netdev = priv->netdev; + struct owl_emac_ring_desc *desc; + struct sk_buff *skb; + unsigned int tx_tail; + u32 status; + + tx_tail = ring->tail; + desc = &ring->descs[tx_tail]; + + status = READ_ONCE(desc->status); + dma_rmb(); /* Ensure data has been read before used. */ + + if (status & OWL_EMAC_BIT_TDES0_OWN) + return false; + + /* Check for errors. */ + if (status & OWL_EMAC_BIT_TDES0_ES) { + dev_dbg_ratelimited(&netdev->dev, + "TX complete error status: 0x%08x\n", + status); + + netdev->stats.tx_errors++; + + if (status & OWL_EMAC_BIT_TDES0_UF) + netdev->stats.tx_fifo_errors++; + + if (status & OWL_EMAC_BIT_TDES0_EC) + netdev->stats.tx_aborted_errors++; + + if (status & OWL_EMAC_BIT_TDES0_LC) + netdev->stats.tx_window_errors++; + + if (status & OWL_EMAC_BIT_TDES0_NC) + netdev->stats.tx_heartbeat_errors++; + + if (status & OWL_EMAC_BIT_TDES0_LO) + netdev->stats.tx_carrier_errors++; + } else { + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += ring->skbs[tx_tail]->len; + } + + /* Some collisions occurred, but pkt has been transmitted. */ + if (status & OWL_EMAC_BIT_TDES0_DE) + netdev->stats.collisions++; + + skb = ring->skbs[tx_tail]; + owl_emac_dma_unmap_tx(priv, skb, ring->skbs_dma[tx_tail]); + dev_kfree_skb(skb); + + ring->skbs[tx_tail] = NULL; + ring->skbs_dma[tx_tail] = 0; + + owl_emac_ring_pop_tail(ring); + + if (unlikely(netif_queue_stopped(netdev))) + netif_wake_queue(netdev); + + return true; +} + +static void owl_emac_tx_complete(struct owl_emac_priv *priv) +{ + struct owl_emac_ring *ring = &priv->tx_ring; + struct net_device *netdev = priv->netdev; + unsigned int tx_next; + u32 status; + + spin_lock(&priv->lock); + + while (ring->tail != ring->head) { + if (!owl_emac_tx_complete_tail(priv)) + break; + } + + /* FIXME: This is a workaround for a MAC hardware bug not clearing + * (sometimes) the OWN bit for a transmitted frame descriptor. + * + * At this point, when TX queue is full, the tail descriptor has the + * OWN bit set, which normally means the frame has not been processed + * or transmitted yet. But if there is at least one descriptor in the + * queue having the OWN bit cleared, we can safely assume the tail + * frame has been also processed by the MAC hardware. + * + * If that's the case, let's force the frame completion by manually + * clearing the OWN bit. + */ + if (unlikely(!owl_emac_ring_num_unused(ring))) { + tx_next = ring->tail; + + while ((tx_next = owl_emac_ring_get_next(ring, tx_next)) != ring->head) { + status = READ_ONCE(ring->descs[tx_next].status); + dma_rmb(); /* Ensure data has been read before used. */ + + if (status & OWL_EMAC_BIT_TDES0_OWN) + continue; + + netdev_dbg(netdev, "Found uncleared TX desc OWN bit\n"); + + status = READ_ONCE(ring->descs[ring->tail].status); + dma_rmb(); /* Ensure data has been read before used. */ + status &= ~OWL_EMAC_BIT_TDES0_OWN; + WRITE_ONCE(ring->descs[ring->tail].status, status); + + owl_emac_tx_complete_tail(priv); + break; + } + } + + spin_unlock(&priv->lock); +} + +static int owl_emac_rx_process(struct owl_emac_priv *priv, int budget) +{ + struct owl_emac_ring *ring = &priv->rx_ring; + struct device *dev = owl_emac_get_dev(priv); + struct net_device *netdev = priv->netdev; + struct owl_emac_ring_desc *desc; + struct sk_buff *curr_skb, *new_skb; + dma_addr_t curr_dma, new_dma; + unsigned int rx_tail, len; + u32 status; + int recv = 0; + + while (recv < budget) { + spin_lock(&priv->lock); + + rx_tail = ring->tail; + desc = &ring->descs[rx_tail]; + + status = READ_ONCE(desc->status); + dma_rmb(); /* Ensure data has been read before used. */ + + if (status & OWL_EMAC_BIT_RDES0_OWN) { + spin_unlock(&priv->lock); + break; + } + + curr_skb = ring->skbs[rx_tail]; + curr_dma = ring->skbs_dma[rx_tail]; + owl_emac_ring_pop_tail(ring); + + spin_unlock(&priv->lock); + + if (status & (OWL_EMAC_BIT_RDES0_DE | OWL_EMAC_BIT_RDES0_RF | + OWL_EMAC_BIT_RDES0_TL | OWL_EMAC_BIT_RDES0_CS | + OWL_EMAC_BIT_RDES0_DB | OWL_EMAC_BIT_RDES0_CE | + OWL_EMAC_BIT_RDES0_ZERO)) { + dev_dbg_ratelimited(&netdev->dev, + "RX desc error status: 0x%08x\n", + status); + + if (status & OWL_EMAC_BIT_RDES0_DE) + netdev->stats.rx_over_errors++; + + if (status & (OWL_EMAC_BIT_RDES0_RF | OWL_EMAC_BIT_RDES0_DB)) + netdev->stats.rx_frame_errors++; + + if (status & OWL_EMAC_BIT_RDES0_TL) + netdev->stats.rx_length_errors++; + + if (status & OWL_EMAC_BIT_RDES0_CS) + netdev->stats.collisions++; + + if (status & OWL_EMAC_BIT_RDES0_CE) + netdev->stats.rx_crc_errors++; + + if (status & OWL_EMAC_BIT_RDES0_ZERO) + netdev->stats.rx_fifo_errors++; + + goto drop_skb; + } + + len = (status & OWL_EMAC_MSK_RDES0_FL) >> OWL_EMAC_OFF_RDES0_FL; + if (unlikely(len > OWL_EMAC_RX_FRAME_MAX_LEN)) { + netdev->stats.rx_length_errors++; + netdev_err(netdev, "invalid RX frame len: %u\n", len); + goto drop_skb; + } + + /* Prepare new skb before receiving the current one. */ + new_skb = owl_emac_alloc_skb(netdev); + if (unlikely(!new_skb)) + goto drop_skb; + + new_dma = owl_emac_dma_map_rx(priv, new_skb); + if (dma_mapping_error(dev, new_dma)) { + dev_kfree_skb(new_skb); + netdev_err(netdev, "RX DMA mapping failed\n"); + goto drop_skb; + } + + owl_emac_dma_unmap_rx(priv, curr_skb, curr_dma); + + skb_put(curr_skb, len - ETH_FCS_LEN); + curr_skb->ip_summed = CHECKSUM_NONE; + curr_skb->protocol = eth_type_trans(curr_skb, netdev); + curr_skb->dev = netdev; + + netif_receive_skb(curr_skb); + + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += len; + recv++; + goto push_skb; + +drop_skb: + netdev->stats.rx_dropped++; + netdev->stats.rx_errors++; + /* Reuse the current skb. */ + new_skb = curr_skb; + new_dma = curr_dma; + +push_skb: + spin_lock(&priv->lock); + + ring->skbs[ring->head] = new_skb; + ring->skbs_dma[ring->head] = new_dma; + + WRITE_ONCE(desc->buf_addr, new_dma); + dma_wmb(); /* Flush descriptor before changing ownership. */ + WRITE_ONCE(desc->status, OWL_EMAC_BIT_RDES0_OWN); + + owl_emac_ring_push_head(ring); + + spin_unlock(&priv->lock); + } + + return recv; +} + +static int owl_emac_poll(struct napi_struct *napi, int budget) +{ + int work_done = 0, ru_cnt = 0, recv; + static int tx_err_cnt, rx_err_cnt; + struct owl_emac_priv *priv; + u32 status, proc_status; + + priv = container_of(napi, struct owl_emac_priv, napi); + + while ((status = owl_emac_irq_clear(priv)) & + (OWL_EMAC_BIT_MAC_CSR5_NIS | OWL_EMAC_BIT_MAC_CSR5_AIS)) { + recv = 0; + + /* TX setup frame raises ETI instead of TI. */ + if (status & (OWL_EMAC_BIT_MAC_CSR5_TI | OWL_EMAC_BIT_MAC_CSR5_ETI)) { + owl_emac_tx_complete(priv); + tx_err_cnt = 0; + + /* Count MAC internal RX errors. */ + proc_status = status & OWL_EMAC_MSK_MAC_CSR5_RS; + proc_status >>= OWL_EMAC_OFF_MAC_CSR5_RS; + if (proc_status == OWL_EMAC_VAL_MAC_CSR5_RS_DATA || + proc_status == OWL_EMAC_VAL_MAC_CSR5_RS_CDES || + proc_status == OWL_EMAC_VAL_MAC_CSR5_RS_FDES) + rx_err_cnt++; + } + + if (status & OWL_EMAC_BIT_MAC_CSR5_RI) { + recv = owl_emac_rx_process(priv, budget - work_done); + rx_err_cnt = 0; + + /* Count MAC internal TX errors. */ + proc_status = status & OWL_EMAC_MSK_MAC_CSR5_TS; + proc_status >>= OWL_EMAC_OFF_MAC_CSR5_TS; + if (proc_status == OWL_EMAC_VAL_MAC_CSR5_TS_DATA || + proc_status == OWL_EMAC_VAL_MAC_CSR5_TS_CDES) + tx_err_cnt++; + } else if (status & OWL_EMAC_BIT_MAC_CSR5_RU) { + /* MAC AHB is in suspended state, will return to RX + * descriptor processing when the host changes ownership + * of the descriptor and either an RX poll demand CMD is + * issued or a new frame is recognized by the MAC AHB. + */ + if (++ru_cnt == 2) + owl_emac_dma_cmd_resume_rx(priv); + + recv = owl_emac_rx_process(priv, budget - work_done); + + /* Guard against too many RU interrupts. */ + if (ru_cnt > 3) + break; + } + + work_done += recv; + if (work_done >= budget) + break; + } + + if (work_done < budget) { + napi_complete_done(napi, work_done); + owl_emac_irq_enable(priv); + } + + /* Reset MAC when getting too many internal TX or RX errors. */ + if (tx_err_cnt > 10 || rx_err_cnt > 10) { + netdev_dbg(priv->netdev, "%s error status: 0x%08x\n", + tx_err_cnt > 10 ? "TX" : "RX", status); + rx_err_cnt = 0; + tx_err_cnt = 0; + schedule_work(&priv->mac_reset_task); + } + + return work_done; +} + +static void owl_emac_mdio_clock_enable(struct owl_emac_priv *priv) +{ + u32 val; + + /* Enable MDC clock generation by adjusting CLKDIV according to + * the vendor implementation of the original driver. + */ + val = owl_emac_reg_read(priv, OWL_EMAC_REG_MAC_CSR10); + val &= OWL_EMAC_MSK_MAC_CSR10_CLKDIV; + val |= OWL_EMAC_VAL_MAC_CSR10_CLKDIV_128 << OWL_EMAC_OFF_MAC_CSR10_CLKDIV; + + val |= OWL_EMAC_BIT_MAC_CSR10_SB; + val |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_CDS << OWL_EMAC_OFF_MAC_CSR10_OPCODE; + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR10, val); +} + +static void owl_emac_core_hw_reset(struct owl_emac_priv *priv) +{ + /* Trigger hardware reset. */ + reset_control_assert(priv->reset); + usleep_range(10, 20); + reset_control_deassert(priv->reset); + usleep_range(100, 200); +} + +static int owl_emac_core_sw_reset(struct owl_emac_priv *priv) +{ + u32 val; + int ret; + + /* Trigger software reset. */ + owl_emac_reg_set(priv, OWL_EMAC_REG_MAC_CSR0, OWL_EMAC_BIT_MAC_CSR0_SWR); + ret = readl_poll_timeout(priv->base + OWL_EMAC_REG_MAC_CSR0, + val, !(val & OWL_EMAC_BIT_MAC_CSR0_SWR), + OWL_EMAC_POLL_DELAY_USEC, + OWL_EMAC_RESET_POLL_TIMEOUT_USEC); + if (ret) + return ret; + + if (priv->phy_mode == PHY_INTERFACE_MODE_RMII) { + /* Enable RMII and use the 50MHz rmii clk as output to PHY. */ + val = 0; + } else { + /* Enable SMII and use the 125MHz rmii clk as output to PHY. + * Additionally set SMII SYNC delay to 4 half cycle. + */ + val = 0x04 << OWL_EMAC_OFF_MAC_CTRL_SSDC; + val |= OWL_EMAC_BIT_MAC_CTRL_RSIS; + } + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CTRL, val); + + /* MDC is disabled after reset. */ + owl_emac_mdio_clock_enable(priv); + + /* Set FIFO pause & restart threshold levels. */ + val = 0x40 << OWL_EMAC_OFF_MAC_CSR19_FPTL; + val |= 0x10 << OWL_EMAC_OFF_MAC_CSR19_FRTL; + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR19, val); + + /* Set flow control pause quanta time to ~100 ms. */ + val = 0x4FFF << OWL_EMAC_OFF_MAC_CSR18_PQT; + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR18, val); + + /* Setup interrupt mitigation. */ + val = 7 << OWL_EMAC_OFF_MAC_CSR11_NRP; + val |= 4 << OWL_EMAC_OFF_MAC_CSR11_RT; + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR11, val); + + /* Set RX/TX rings base addresses. */ + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR3, + (u32)(priv->rx_ring.descs_dma)); + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR4, + (u32)(priv->tx_ring.descs_dma)); + + /* Setup initial operation mode. */ + val = OWL_EMAC_VAL_MAC_CSR6_SPEED_100M << OWL_EMAC_OFF_MAC_CSR6_SPEED; + val |= OWL_EMAC_BIT_MAC_CSR6_FD; + owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6, + OWL_EMAC_MSK_MAC_CSR6_SPEED | + OWL_EMAC_BIT_MAC_CSR6_FD, val); + owl_emac_reg_clear(priv, OWL_EMAC_REG_MAC_CSR6, + OWL_EMAC_BIT_MAC_CSR6_PR | OWL_EMAC_BIT_MAC_CSR6_PM); + + priv->link = 0; + priv->speed = SPEED_UNKNOWN; + priv->duplex = DUPLEX_UNKNOWN; + priv->pause = 0; + priv->mcaddr_list.count = 0; + + return 0; +} + +static int owl_emac_enable(struct net_device *netdev, bool start_phy) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + int ret; + + owl_emac_dma_cmd_stop(priv); + owl_emac_irq_disable(priv); + owl_emac_irq_clear(priv); + + owl_emac_ring_prepare_tx(priv); + ret = owl_emac_ring_prepare_rx(priv); + if (ret) + goto err_unprep; + + ret = owl_emac_core_sw_reset(priv); + if (ret) { + netdev_err(netdev, "failed to soft reset MAC core: %d\n", ret); + goto err_unprep; + } + + owl_emac_set_hw_mac_addr(netdev); + owl_emac_setup_frame_xmit(priv); + + netdev_reset_queue(netdev); + napi_enable(&priv->napi); + + owl_emac_irq_enable(priv); + owl_emac_dma_cmd_start(priv); + + if (start_phy) + phy_start(netdev->phydev); + + netif_start_queue(netdev); + + return 0; + +err_unprep: + owl_emac_ring_unprepare_rx(priv); + owl_emac_ring_unprepare_tx(priv); + + return ret; +} + +static void owl_emac_disable(struct net_device *netdev, bool stop_phy) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + + owl_emac_dma_cmd_stop(priv); + owl_emac_irq_disable(priv); + + netif_stop_queue(netdev); + napi_disable(&priv->napi); + + if (stop_phy) + phy_stop(netdev->phydev); + + owl_emac_ring_unprepare_rx(priv); + owl_emac_ring_unprepare_tx(priv); +} + +static int owl_emac_ndo_open(struct net_device *netdev) +{ + return owl_emac_enable(netdev, true); +} + +static int owl_emac_ndo_stop(struct net_device *netdev) +{ + owl_emac_disable(netdev, true); + + return 0; +} + +static void owl_emac_set_multicast(struct net_device *netdev, int count) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + struct netdev_hw_addr *ha; + int index = 0; + + if (count <= 0) { + priv->mcaddr_list.count = 0; + return; + } + + netdev_for_each_mc_addr(ha, netdev) { + if (!is_multicast_ether_addr(ha->addr)) + continue; + + WARN_ON(index >= OWL_EMAC_MAX_MULTICAST_ADDRS); + ether_addr_copy(priv->mcaddr_list.addrs[index++], ha->addr); + } + + priv->mcaddr_list.count = index; + + owl_emac_setup_frame_xmit(priv); +} + +static void owl_emac_ndo_set_rx_mode(struct net_device *netdev) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + u32 status, val = 0; + int mcast_count = 0; + + if (netdev->flags & IFF_PROMISC) { + val = OWL_EMAC_BIT_MAC_CSR6_PR; + } else if (netdev->flags & IFF_ALLMULTI) { + val = OWL_EMAC_BIT_MAC_CSR6_PM; + } else if (netdev->flags & IFF_MULTICAST) { + mcast_count = netdev_mc_count(netdev); + + if (mcast_count > OWL_EMAC_MAX_MULTICAST_ADDRS) { + val = OWL_EMAC_BIT_MAC_CSR6_PM; + mcast_count = 0; + } + } + + spin_lock_bh(&priv->lock); + + /* Temporarily stop DMA TX & RX. */ + status = owl_emac_dma_cmd_stop(priv); + + /* Update operation modes. */ + owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6, + OWL_EMAC_BIT_MAC_CSR6_PR | OWL_EMAC_BIT_MAC_CSR6_PM, + val); + + /* Restore DMA TX & RX status. */ + owl_emac_dma_cmd_set(priv, status); + + spin_unlock_bh(&priv->lock); + + /* Set/reset multicast addr list. */ + owl_emac_set_multicast(netdev, mcast_count); +} + +static int owl_emac_ndo_set_mac_addr(struct net_device *netdev, void *addr) +{ + struct sockaddr *skaddr = addr; + + if (!is_valid_ether_addr(skaddr->sa_data)) + return -EADDRNOTAVAIL; + + if (netif_running(netdev)) + return -EBUSY; + + memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len); + owl_emac_set_hw_mac_addr(netdev); + + return owl_emac_setup_frame_xmit(netdev_priv(netdev)); +} + +static int owl_emac_ndo_do_ioctl(struct net_device *netdev, + struct ifreq *req, int cmd) +{ + if (!netif_running(netdev)) + return -EINVAL; + + return phy_mii_ioctl(netdev->phydev, req, cmd); +} + +static void owl_emac_ndo_tx_timeout(struct net_device *netdev, + unsigned int txqueue) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + + schedule_work(&priv->mac_reset_task); +} + +static void owl_emac_reset_task(struct work_struct *work) +{ + struct owl_emac_priv *priv; + + priv = container_of(work, struct owl_emac_priv, mac_reset_task); + + netdev_dbg(priv->netdev, "resetting MAC\n"); + owl_emac_disable(priv->netdev, false); + owl_emac_enable(priv->netdev, false); +} + +static struct net_device_stats * +owl_emac_ndo_get_stats(struct net_device *netdev) +{ + /* FIXME: If possible, try to get stats from MAC hardware registers + * instead of tracking them manually in the driver. + */ + + return &netdev->stats; +} + +static const struct net_device_ops owl_emac_netdev_ops = { + .ndo_open = owl_emac_ndo_open, + .ndo_stop = owl_emac_ndo_stop, + .ndo_start_xmit = owl_emac_ndo_start_xmit, + .ndo_set_rx_mode = owl_emac_ndo_set_rx_mode, + .ndo_set_mac_address = owl_emac_ndo_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = owl_emac_ndo_do_ioctl, + .ndo_tx_timeout = owl_emac_ndo_tx_timeout, + .ndo_get_stats = owl_emac_ndo_get_stats, +}; + +static void owl_emac_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strscpy(info->driver, OWL_EMAC_DRVNAME, sizeof(info->driver)); +} + +static u32 owl_emac_ethtool_get_msglevel(struct net_device *netdev) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + + return priv->msg_enable; +} + +static void owl_emac_ethtool_set_msglevel(struct net_device *ndev, u32 val) +{ + struct owl_emac_priv *priv = netdev_priv(ndev); + + priv->msg_enable = val; +} + +static const struct ethtool_ops owl_emac_ethtool_ops = { + .get_drvinfo = owl_emac_ethtool_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_msglevel = owl_emac_ethtool_get_msglevel, + .set_msglevel = owl_emac_ethtool_set_msglevel, +}; + +static int owl_emac_mdio_wait(struct owl_emac_priv *priv) +{ + u32 val; + + /* Wait while data transfer is in progress. */ + return readl_poll_timeout(priv->base + OWL_EMAC_REG_MAC_CSR10, + val, !(val & OWL_EMAC_BIT_MAC_CSR10_SB), + OWL_EMAC_POLL_DELAY_USEC, + OWL_EMAC_MDIO_POLL_TIMEOUT_USEC); +} + +static int owl_emac_mdio_read(struct mii_bus *bus, int addr, int regnum) +{ + struct owl_emac_priv *priv = bus->priv; + u32 data, tmp; + int ret; + + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + + data = OWL_EMAC_BIT_MAC_CSR10_SB; + data |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_RD << OWL_EMAC_OFF_MAC_CSR10_OPCODE; + + tmp = addr << OWL_EMAC_OFF_MAC_CSR10_PHYADD; + data |= tmp & OWL_EMAC_MSK_MAC_CSR10_PHYADD; + + tmp = regnum << OWL_EMAC_OFF_MAC_CSR10_REGADD; + data |= tmp & OWL_EMAC_MSK_MAC_CSR10_REGADD; + + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR10, data); + + ret = owl_emac_mdio_wait(priv); + if (ret) + return ret; + + data = owl_emac_reg_read(priv, OWL_EMAC_REG_MAC_CSR10); + data &= OWL_EMAC_MSK_MAC_CSR10_DATA; + + return data; +} + +static int +owl_emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) +{ + struct owl_emac_priv *priv = bus->priv; + u32 data, tmp; + + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + + data = OWL_EMAC_BIT_MAC_CSR10_SB; + data |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_WR << OWL_EMAC_OFF_MAC_CSR10_OPCODE; + + tmp = addr << OWL_EMAC_OFF_MAC_CSR10_PHYADD; + data |= tmp & OWL_EMAC_MSK_MAC_CSR10_PHYADD; + + tmp = regnum << OWL_EMAC_OFF_MAC_CSR10_REGADD; + data |= tmp & OWL_EMAC_MSK_MAC_CSR10_REGADD; + + data |= val & OWL_EMAC_MSK_MAC_CSR10_DATA; + + owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR10, data); + + return owl_emac_mdio_wait(priv); +} + +static int owl_emac_mdio_init(struct net_device *netdev) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + struct device *dev = owl_emac_get_dev(priv); + struct device_node *mdio_node; + int ret; + + mdio_node = of_get_child_by_name(dev->of_node, "mdio"); + if (!mdio_node) + return -ENODEV; + + if (!of_device_is_available(mdio_node)) { + ret = -ENODEV; + goto err_put_node; + } + + priv->mii = devm_mdiobus_alloc(dev); + if (!priv->mii) { + ret = -ENOMEM; + goto err_put_node; + } + + snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); + priv->mii->name = "owl-emac-mdio"; + priv->mii->parent = dev; + priv->mii->read = owl_emac_mdio_read; + priv->mii->write = owl_emac_mdio_write; + priv->mii->phy_mask = ~0; /* Mask out all PHYs from auto probing. */ + priv->mii->priv = priv; + + ret = devm_of_mdiobus_register(dev, priv->mii, mdio_node); + +err_put_node: + of_node_put(mdio_node); + return ret; +} + +static int owl_emac_phy_init(struct net_device *netdev) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + struct device *dev = owl_emac_get_dev(priv); + struct phy_device *phy; + + phy = of_phy_get_and_connect(netdev, dev->of_node, + owl_emac_adjust_link); + if (!phy) + return -ENODEV; + + phy_set_sym_pause(phy, true, true, true); + + if (netif_msg_link(priv)) + phy_attached_info(phy); + + return 0; +} + +static void owl_emac_get_mac_addr(struct net_device *netdev) +{ + struct device *dev = netdev->dev.parent; + int ret; + + ret = eth_platform_get_mac_address(dev, netdev->dev_addr); + if (!ret && is_valid_ether_addr(netdev->dev_addr)) + return; + + eth_hw_addr_random(netdev); + dev_warn(dev, "using random MAC address %pM\n", netdev->dev_addr); +} + +static __maybe_unused int owl_emac_suspend(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct owl_emac_priv *priv = netdev_priv(netdev); + + disable_irq(netdev->irq); + + if (netif_running(netdev)) { + owl_emac_disable(netdev, true); + netif_device_detach(netdev); + } + + clk_bulk_disable_unprepare(OWL_EMAC_NCLKS, priv->clks); + + return 0; +} + +static __maybe_unused int owl_emac_resume(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct owl_emac_priv *priv = netdev_priv(netdev); + int ret; + + ret = clk_bulk_prepare_enable(OWL_EMAC_NCLKS, priv->clks); + if (ret) + return ret; + + if (netif_running(netdev)) { + owl_emac_core_hw_reset(priv); + owl_emac_core_sw_reset(priv); + + ret = owl_emac_enable(netdev, true); + if (ret) { + clk_bulk_disable_unprepare(OWL_EMAC_NCLKS, priv->clks); + return ret; + } + + netif_device_attach(netdev); + } + + enable_irq(netdev->irq); + + return 0; +} + +static void owl_emac_clk_disable_unprepare(void *data) +{ + struct owl_emac_priv *priv = data; + + clk_bulk_disable_unprepare(OWL_EMAC_NCLKS, priv->clks); +} + +static int owl_emac_clk_set_rate(struct owl_emac_priv *priv) +{ + struct device *dev = owl_emac_get_dev(priv); + unsigned long rate; + int ret; + + switch (priv->phy_mode) { + case PHY_INTERFACE_MODE_RMII: + rate = 50000000; + break; + + case PHY_INTERFACE_MODE_SMII: + rate = 125000000; + break; + + default: + dev_err(dev, "unsupported phy interface mode %d\n", + priv->phy_mode); + return -EOPNOTSUPP; + } + + ret = clk_set_rate(priv->clks[OWL_EMAC_CLK_RMII].clk, rate); + if (ret) + dev_err(dev, "failed to set RMII clock rate: %d\n", ret); + + return ret; +} + +static int owl_emac_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct net_device *netdev; + struct owl_emac_priv *priv; + int ret, i; + + netdev = devm_alloc_etherdev(dev, sizeof(*priv)); + if (!netdev) + return -ENOMEM; + + platform_set_drvdata(pdev, netdev); + SET_NETDEV_DEV(netdev, dev); + + priv = netdev_priv(netdev); + priv->netdev = netdev; + priv->msg_enable = netif_msg_init(-1, OWL_EMAC_DEFAULT_MSG_ENABLE); + + ret = of_get_phy_mode(dev->of_node, &priv->phy_mode); + if (ret) { + dev_err(dev, "failed to get phy mode: %d\n", ret); + return ret; + } + + spin_lock_init(&priv->lock); + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(dev, "unsupported DMA mask\n"); + return ret; + } + + ret = owl_emac_ring_alloc(dev, &priv->rx_ring, OWL_EMAC_RX_RING_SIZE); + if (ret) + return ret; + + ret = owl_emac_ring_alloc(dev, &priv->tx_ring, OWL_EMAC_TX_RING_SIZE); + if (ret) + return ret; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + netdev->irq = platform_get_irq(pdev, 0); + if (netdev->irq < 0) + return netdev->irq; + + ret = devm_request_irq(dev, netdev->irq, owl_emac_handle_irq, + IRQF_SHARED, netdev->name, netdev); + if (ret) { + dev_err(dev, "failed to request irq: %d\n", netdev->irq); + return ret; + } + + for (i = 0; i < OWL_EMAC_NCLKS; i++) + priv->clks[i].id = owl_emac_clk_names[i]; + + ret = devm_clk_bulk_get(dev, OWL_EMAC_NCLKS, priv->clks); + if (ret) + return ret; + + ret = clk_bulk_prepare_enable(OWL_EMAC_NCLKS, priv->clks); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, owl_emac_clk_disable_unprepare, priv); + if (ret) + return ret; + + ret = owl_emac_clk_set_rate(priv); + if (ret) + return ret; + + priv->reset = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(priv->reset)) + return dev_err_probe(dev, PTR_ERR(priv->reset), + "failed to get reset control"); + + owl_emac_get_mac_addr(netdev); + + owl_emac_core_hw_reset(priv); + owl_emac_mdio_clock_enable(priv); + + ret = owl_emac_mdio_init(netdev); + if (ret) { + dev_err(dev, "failed to initialize MDIO bus\n"); + return ret; + } + + ret = owl_emac_phy_init(netdev); + if (ret) { + dev_err(dev, "failed to initialize PHY\n"); + return ret; + } + + INIT_WORK(&priv->mac_reset_task, owl_emac_reset_task); + + netdev->min_mtu = OWL_EMAC_MTU_MIN; + netdev->max_mtu = OWL_EMAC_MTU_MAX; + netdev->watchdog_timeo = OWL_EMAC_TX_TIMEOUT; + netdev->netdev_ops = &owl_emac_netdev_ops; + netdev->ethtool_ops = &owl_emac_ethtool_ops; + netif_napi_add(netdev, &priv->napi, owl_emac_poll, NAPI_POLL_WEIGHT); + + ret = devm_register_netdev(dev, netdev); + if (ret) { + netif_napi_del(&priv->napi); + phy_disconnect(netdev->phydev); + return ret; + } + + return 0; +} + +static int owl_emac_remove(struct platform_device *pdev) +{ + struct owl_emac_priv *priv = platform_get_drvdata(pdev); + + netif_napi_del(&priv->napi); + phy_disconnect(priv->netdev->phydev); + cancel_work_sync(&priv->mac_reset_task); + + return 0; +} + +static const struct of_device_id owl_emac_of_match[] = { + { .compatible = "actions,owl-emac", }, + { } +}; +MODULE_DEVICE_TABLE(of, owl_emac_of_match); + +static SIMPLE_DEV_PM_OPS(owl_emac_pm_ops, + owl_emac_suspend, owl_emac_resume); + +static struct platform_driver owl_emac_driver = { + .driver = { + .name = OWL_EMAC_DRVNAME, + .of_match_table = owl_emac_of_match, + .pm = &owl_emac_pm_ops, + }, + .probe = owl_emac_probe, + .remove = owl_emac_remove, +}; +module_platform_driver(owl_emac_driver); + +MODULE_DESCRIPTION("Actions Semi Owl SoCs Ethernet MAC Driver"); +MODULE_AUTHOR("Actions Semi Inc."); +MODULE_AUTHOR("Cristian Ciocaltea <cristian.ciocaltea@gmail.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/actions/owl-emac.h b/drivers/net/ethernet/actions/owl-emac.h new file mode 100644 index 000000000000..9eb0d1a30242 --- /dev/null +++ b/drivers/net/ethernet/actions/owl-emac.h @@ -0,0 +1,280 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Actions Semi Owl SoCs Ethernet MAC driver + * + * Copyright (c) 2012 Actions Semi Inc. + * Copyright (c) 2021 Cristian Ciocaltea <cristian.ciocaltea@gmail.com> + */ + +#ifndef __OWL_EMAC_H__ +#define __OWL_EMAC_H__ + +#define OWL_EMAC_DRVNAME "owl-emac" + +#define OWL_EMAC_POLL_DELAY_USEC 5 +#define OWL_EMAC_MDIO_POLL_TIMEOUT_USEC 1000 +#define OWL_EMAC_RESET_POLL_TIMEOUT_USEC 2000 +#define OWL_EMAC_TX_TIMEOUT (2 * HZ) + +#define OWL_EMAC_MTU_MIN ETH_MIN_MTU +#define OWL_EMAC_MTU_MAX ETH_DATA_LEN +#define OWL_EMAC_RX_FRAME_MAX_LEN (ETH_FRAME_LEN + ETH_FCS_LEN) +#define OWL_EMAC_SKB_ALIGN 4 +#define OWL_EMAC_SKB_RESERVE 18 + +#define OWL_EMAC_MAX_MULTICAST_ADDRS 14 +#define OWL_EMAC_SETUP_FRAME_LEN 192 + +#define OWL_EMAC_RX_RING_SIZE 64 +#define OWL_EMAC_TX_RING_SIZE 32 + +/* Bus mode register */ +#define OWL_EMAC_REG_MAC_CSR0 0x0000 +#define OWL_EMAC_BIT_MAC_CSR0_SWR BIT(0) /* Software reset */ + +/* Transmit/receive poll demand registers */ +#define OWL_EMAC_REG_MAC_CSR1 0x0008 +#define OWL_EMAC_VAL_MAC_CSR1_TPD 0x01 +#define OWL_EMAC_REG_MAC_CSR2 0x0010 +#define OWL_EMAC_VAL_MAC_CSR2_RPD 0x01 + +/* Receive/transmit descriptor list base address registers */ +#define OWL_EMAC_REG_MAC_CSR3 0x0018 +#define OWL_EMAC_REG_MAC_CSR4 0x0020 + +/* Status register */ +#define OWL_EMAC_REG_MAC_CSR5 0x0028 +#define OWL_EMAC_MSK_MAC_CSR5_TS GENMASK(22, 20) /* Transmit process state */ +#define OWL_EMAC_OFF_MAC_CSR5_TS 20 +#define OWL_EMAC_VAL_MAC_CSR5_TS_DATA 0x03 /* Transferring data HOST -> FIFO */ +#define OWL_EMAC_VAL_MAC_CSR5_TS_CDES 0x07 /* Closing transmit descriptor */ +#define OWL_EMAC_MSK_MAC_CSR5_RS GENMASK(19, 17) /* Receive process state */ +#define OWL_EMAC_OFF_MAC_CSR5_RS 17 +#define OWL_EMAC_VAL_MAC_CSR5_RS_FDES 0x01 /* Fetching receive descriptor */ +#define OWL_EMAC_VAL_MAC_CSR5_RS_CDES 0x05 /* Closing receive descriptor */ +#define OWL_EMAC_VAL_MAC_CSR5_RS_DATA 0x07 /* Transferring data FIFO -> HOST */ +#define OWL_EMAC_BIT_MAC_CSR5_NIS BIT(16) /* Normal interrupt summary */ +#define OWL_EMAC_BIT_MAC_CSR5_AIS BIT(15) /* Abnormal interrupt summary */ +#define OWL_EMAC_BIT_MAC_CSR5_ERI BIT(14) /* Early receive interrupt */ +#define OWL_EMAC_BIT_MAC_CSR5_GTE BIT(11) /* General-purpose timer expiration */ +#define OWL_EMAC_BIT_MAC_CSR5_ETI BIT(10) /* Early transmit interrupt */ +#define OWL_EMAC_BIT_MAC_CSR5_RPS BIT(8) /* Receive process stopped */ +#define OWL_EMAC_BIT_MAC_CSR5_RU BIT(7) /* Receive buffer unavailable */ +#define OWL_EMAC_BIT_MAC_CSR5_RI BIT(6) /* Receive interrupt */ +#define OWL_EMAC_BIT_MAC_CSR5_UNF BIT(5) /* Transmit underflow */ +#define OWL_EMAC_BIT_MAC_CSR5_LCIS BIT(4) /* Link change status */ +#define OWL_EMAC_BIT_MAC_CSR5_LCIQ BIT(3) /* Link change interrupt */ +#define OWL_EMAC_BIT_MAC_CSR5_TU BIT(2) /* Transmit buffer unavailable */ +#define OWL_EMAC_BIT_MAC_CSR5_TPS BIT(1) /* Transmit process stopped */ +#define OWL_EMAC_BIT_MAC_CSR5_TI BIT(0) /* Transmit interrupt */ + +/* Operation mode register */ +#define OWL_EMAC_REG_MAC_CSR6 0x0030 +#define OWL_EMAC_BIT_MAC_CSR6_RA BIT(30) /* Receive all */ +#define OWL_EMAC_BIT_MAC_CSR6_TTM BIT(22) /* Transmit threshold mode */ +#define OWL_EMAC_BIT_MAC_CSR6_SF BIT(21) /* Store and forward */ +#define OWL_EMAC_MSK_MAC_CSR6_SPEED GENMASK(17, 16) /* Eth speed selection */ +#define OWL_EMAC_OFF_MAC_CSR6_SPEED 16 +#define OWL_EMAC_VAL_MAC_CSR6_SPEED_100M 0x00 +#define OWL_EMAC_VAL_MAC_CSR6_SPEED_10M 0x02 +#define OWL_EMAC_BIT_MAC_CSR6_ST BIT(13) /* Start/stop transmit command */ +#define OWL_EMAC_BIT_MAC_CSR6_LP BIT(10) /* Loopback mode */ +#define OWL_EMAC_BIT_MAC_CSR6_FD BIT(9) /* Full duplex mode */ +#define OWL_EMAC_BIT_MAC_CSR6_PM BIT(7) /* Pass all multicast */ +#define OWL_EMAC_BIT_MAC_CSR6_PR BIT(6) /* Promiscuous mode */ +#define OWL_EMAC_BIT_MAC_CSR6_IF BIT(4) /* Inverse filtering */ +#define OWL_EMAC_BIT_MAC_CSR6_PB BIT(3) /* Pass bad frames */ +#define OWL_EMAC_BIT_MAC_CSR6_HO BIT(2) /* Hash only filtering mode */ +#define OWL_EMAC_BIT_MAC_CSR6_SR BIT(1) /* Start/stop receive command */ +#define OWL_EMAC_BIT_MAC_CSR6_HP BIT(0) /* Hash/perfect receive filtering mode */ +#define OWL_EMAC_MSK_MAC_CSR6_STSR (OWL_EMAC_BIT_MAC_CSR6_ST | \ + OWL_EMAC_BIT_MAC_CSR6_SR) + +/* Interrupt enable register */ +#define OWL_EMAC_REG_MAC_CSR7 0x0038 +#define OWL_EMAC_BIT_MAC_CSR7_NIE BIT(16) /* Normal interrupt summary enable */ +#define OWL_EMAC_BIT_MAC_CSR7_AIE BIT(15) /* Abnormal interrupt summary enable */ +#define OWL_EMAC_BIT_MAC_CSR7_ERE BIT(14) /* Early receive interrupt enable */ +#define OWL_EMAC_BIT_MAC_CSR7_GTE BIT(11) /* General-purpose timer overflow */ +#define OWL_EMAC_BIT_MAC_CSR7_ETE BIT(10) /* Early transmit interrupt enable */ +#define OWL_EMAC_BIT_MAC_CSR7_RSE BIT(8) /* Receive stopped enable */ +#define OWL_EMAC_BIT_MAC_CSR7_RUE BIT(7) /* Receive buffer unavailable enable */ +#define OWL_EMAC_BIT_MAC_CSR7_RIE BIT(6) /* Receive interrupt enable */ +#define OWL_EMAC_BIT_MAC_CSR7_UNE BIT(5) /* Underflow interrupt enable */ +#define OWL_EMAC_BIT_MAC_CSR7_TUE BIT(2) /* Transmit buffer unavailable enable */ +#define OWL_EMAC_BIT_MAC_CSR7_TSE BIT(1) /* Transmit stopped enable */ +#define OWL_EMAC_BIT_MAC_CSR7_TIE BIT(0) /* Transmit interrupt enable */ +#define OWL_EMAC_BIT_MAC_CSR7_ALL_NOT_TUE (OWL_EMAC_BIT_MAC_CSR7_ERE | \ + OWL_EMAC_BIT_MAC_CSR7_GTE | \ + OWL_EMAC_BIT_MAC_CSR7_ETE | \ + OWL_EMAC_BIT_MAC_CSR7_RSE | \ + OWL_EMAC_BIT_MAC_CSR7_RUE | \ + OWL_EMAC_BIT_MAC_CSR7_RIE | \ + OWL_EMAC_BIT_MAC_CSR7_UNE | \ + OWL_EMAC_BIT_MAC_CSR7_TSE | \ + OWL_EMAC_BIT_MAC_CSR7_TIE) + +/* Missed frames and overflow counter register */ +#define OWL_EMAC_REG_MAC_CSR8 0x0040 +/* MII management and serial ROM register */ +#define OWL_EMAC_REG_MAC_CSR9 0x0048 + +/* MII serial management register */ +#define OWL_EMAC_REG_MAC_CSR10 0x0050 +#define OWL_EMAC_BIT_MAC_CSR10_SB BIT(31) /* Start transfer or busy */ +#define OWL_EMAC_MSK_MAC_CSR10_CLKDIV GENMASK(30, 28) /* Clock divider */ +#define OWL_EMAC_OFF_MAC_CSR10_CLKDIV 28 +#define OWL_EMAC_VAL_MAC_CSR10_CLKDIV_128 0x04 +#define OWL_EMAC_VAL_MAC_CSR10_OPCODE_WR 0x01 /* Register write command */ +#define OWL_EMAC_OFF_MAC_CSR10_OPCODE 26 /* Operation mode */ +#define OWL_EMAC_VAL_MAC_CSR10_OPCODE_DCG 0x00 /* Disable clock generation */ +#define OWL_EMAC_VAL_MAC_CSR10_OPCODE_WR 0x01 /* Register write command */ +#define OWL_EMAC_VAL_MAC_CSR10_OPCODE_RD 0x02 /* Register read command */ +#define OWL_EMAC_VAL_MAC_CSR10_OPCODE_CDS 0x03 /* Clock divider set */ +#define OWL_EMAC_MSK_MAC_CSR10_PHYADD GENMASK(25, 21) /* Physical layer address */ +#define OWL_EMAC_OFF_MAC_CSR10_PHYADD 21 +#define OWL_EMAC_MSK_MAC_CSR10_REGADD GENMASK(20, 16) /* Register address */ +#define OWL_EMAC_OFF_MAC_CSR10_REGADD 16 +#define OWL_EMAC_MSK_MAC_CSR10_DATA GENMASK(15, 0) /* Register data */ + +/* General-purpose timer and interrupt mitigation control register */ +#define OWL_EMAC_REG_MAC_CSR11 0x0058 +#define OWL_EMAC_OFF_MAC_CSR11_TT 27 /* Transmit timer */ +#define OWL_EMAC_OFF_MAC_CSR11_NTP 24 /* No. of transmit packets */ +#define OWL_EMAC_OFF_MAC_CSR11_RT 20 /* Receive timer */ +#define OWL_EMAC_OFF_MAC_CSR11_NRP 17 /* No. of receive packets */ + +/* MAC address low/high registers */ +#define OWL_EMAC_REG_MAC_CSR16 0x0080 +#define OWL_EMAC_REG_MAC_CSR17 0x0088 + +/* Pause time & cache thresholds register */ +#define OWL_EMAC_REG_MAC_CSR18 0x0090 +#define OWL_EMAC_OFF_MAC_CSR18_CPTL 24 /* Cache pause threshold level */ +#define OWL_EMAC_OFF_MAC_CSR18_CRTL 16 /* Cache restart threshold level */ +#define OWL_EMAC_OFF_MAC_CSR18_PQT 0 /* Flow control pause quanta time */ + +/* FIFO pause & restart threshold register */ +#define OWL_EMAC_REG_MAC_CSR19 0x0098 +#define OWL_EMAC_OFF_MAC_CSR19_FPTL 16 /* FIFO pause threshold level */ +#define OWL_EMAC_OFF_MAC_CSR19_FRTL 0 /* FIFO restart threshold level */ + +/* Flow control setup & status register */ +#define OWL_EMAC_REG_MAC_CSR20 0x00A0 +#define OWL_EMAC_BIT_MAC_CSR20_FCE BIT(31) /* Flow Control Enable */ +#define OWL_EMAC_BIT_MAC_CSR20_TUE BIT(30) /* Transmit Un-pause frames Enable */ +#define OWL_EMAC_BIT_MAC_CSR20_TPE BIT(29) /* Transmit Pause frames Enable */ +#define OWL_EMAC_BIT_MAC_CSR20_RPE BIT(28) /* Receive Pause frames Enable */ +#define OWL_EMAC_BIT_MAC_CSR20_BPE BIT(27) /* Back pressure (half-duplex) Enable */ + +/* MII control register */ +#define OWL_EMAC_REG_MAC_CTRL 0x00B0 +#define OWL_EMAC_BIT_MAC_CTRL_RRSB BIT(8) /* RMII_REFCLK select bit */ +#define OWL_EMAC_OFF_MAC_CTRL_SSDC 4 /* SMII SYNC delay cycle */ +#define OWL_EMAC_BIT_MAC_CTRL_RCPS BIT(1) /* REF_CLK phase select */ +#define OWL_EMAC_BIT_MAC_CTRL_RSIS BIT(0) /* RMII/SMII interface select */ + +/* Receive descriptor status field */ +#define OWL_EMAC_BIT_RDES0_OWN BIT(31) /* Ownership bit */ +#define OWL_EMAC_BIT_RDES0_FF BIT(30) /* Filtering fail */ +#define OWL_EMAC_MSK_RDES0_FL GENMASK(29, 16) /* Frame length */ +#define OWL_EMAC_OFF_RDES0_FL 16 +#define OWL_EMAC_BIT_RDES0_ES BIT(15) /* Error summary */ +#define OWL_EMAC_BIT_RDES0_DE BIT(14) /* Descriptor error */ +#define OWL_EMAC_BIT_RDES0_RF BIT(11) /* Runt frame */ +#define OWL_EMAC_BIT_RDES0_MF BIT(10) /* Multicast frame */ +#define OWL_EMAC_BIT_RDES0_FS BIT(9) /* First descriptor */ +#define OWL_EMAC_BIT_RDES0_LS BIT(8) /* Last descriptor */ +#define OWL_EMAC_BIT_RDES0_TL BIT(7) /* Frame too long */ +#define OWL_EMAC_BIT_RDES0_CS BIT(6) /* Collision seen */ +#define OWL_EMAC_BIT_RDES0_FT BIT(5) /* Frame type */ +#define OWL_EMAC_BIT_RDES0_RE BIT(3) /* Report on MII error */ +#define OWL_EMAC_BIT_RDES0_DB BIT(2) /* Dribbling bit */ +#define OWL_EMAC_BIT_RDES0_CE BIT(1) /* CRC error */ +#define OWL_EMAC_BIT_RDES0_ZERO BIT(0) /* Legal frame length indicator */ + +/* Receive descriptor control and count field */ +#define OWL_EMAC_BIT_RDES1_RER BIT(25) /* Receive end of ring */ +#define OWL_EMAC_MSK_RDES1_RBS1 GENMASK(10, 0) /* Buffer 1 size */ + +/* Transmit descriptor status field */ +#define OWL_EMAC_BIT_TDES0_OWN BIT(31) /* Ownership bit */ +#define OWL_EMAC_BIT_TDES0_ES BIT(15) /* Error summary */ +#define OWL_EMAC_BIT_TDES0_LO BIT(11) /* Loss of carrier */ +#define OWL_EMAC_BIT_TDES0_NC BIT(10) /* No carrier */ +#define OWL_EMAC_BIT_TDES0_LC BIT(9) /* Late collision */ +#define OWL_EMAC_BIT_TDES0_EC BIT(8) /* Excessive collisions */ +#define OWL_EMAC_MSK_TDES0_CC GENMASK(6, 3) /* Collision count */ +#define OWL_EMAC_BIT_TDES0_UF BIT(1) /* Underflow error */ +#define OWL_EMAC_BIT_TDES0_DE BIT(0) /* Deferred */ + +/* Transmit descriptor control and count field */ +#define OWL_EMAC_BIT_TDES1_IC BIT(31) /* Interrupt on completion */ +#define OWL_EMAC_BIT_TDES1_LS BIT(30) /* Last descriptor */ +#define OWL_EMAC_BIT_TDES1_FS BIT(29) /* First descriptor */ +#define OWL_EMAC_BIT_TDES1_FT1 BIT(28) /* Filtering type */ +#define OWL_EMAC_BIT_TDES1_SET BIT(27) /* Setup packet */ +#define OWL_EMAC_BIT_TDES1_AC BIT(26) /* Add CRC disable */ +#define OWL_EMAC_BIT_TDES1_TER BIT(25) /* Transmit end of ring */ +#define OWL_EMAC_BIT_TDES1_DPD BIT(23) /* Disabled padding */ +#define OWL_EMAC_BIT_TDES1_FT0 BIT(22) /* Filtering type */ +#define OWL_EMAC_MSK_TDES1_TBS1 GENMASK(10, 0) /* Buffer 1 size */ + +static const char *const owl_emac_clk_names[] = { "eth", "rmii" }; +#define OWL_EMAC_NCLKS ARRAY_SIZE(owl_emac_clk_names) + +enum owl_emac_clk_map { + OWL_EMAC_CLK_ETH = 0, + OWL_EMAC_CLK_RMII +}; + +struct owl_emac_addr_list { + u8 addrs[OWL_EMAC_MAX_MULTICAST_ADDRS][ETH_ALEN]; + int count; +}; + +/* TX/RX descriptors */ +struct owl_emac_ring_desc { + u32 status; + u32 control; + u32 buf_addr; + u32 reserved; /* 2nd buffer address is not used */ +}; + +struct owl_emac_ring { + struct owl_emac_ring_desc *descs; + dma_addr_t descs_dma; + struct sk_buff **skbs; + dma_addr_t *skbs_dma; + unsigned int size; + unsigned int head; + unsigned int tail; +}; + +struct owl_emac_priv { + struct net_device *netdev; + void __iomem *base; + + struct clk_bulk_data clks[OWL_EMAC_NCLKS]; + struct reset_control *reset; + + struct owl_emac_ring rx_ring; + struct owl_emac_ring tx_ring; + + struct mii_bus *mii; + struct napi_struct napi; + + phy_interface_t phy_mode; + unsigned int link; + int speed; + int duplex; + int pause; + struct owl_emac_addr_list mcaddr_list; + + struct work_struct mac_reset_task; + + u32 msg_enable; /* Debug message level */ + spinlock_t lock; /* Sync concurrent ring access */ +}; + +#endif /* __OWL_EMAC_H__ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index 343caf41e709..73b03ce59412 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -124,7 +124,7 @@ struct ena_com_io_cq { /* holds the number of cdesc of the current packet */ u16 cur_rx_pkt_cdesc_count; - /* save the firt cdesc idx of the current packet */ + /* save the first cdesc idx of the current packet */ u16 cur_rx_pkt_cdesc_start_idx; u16 q_depth; diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index d6cc7aa612b7..2fe7ccee55b2 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -251,10 +251,10 @@ static void ena_queue_strings(struct ena_adapter *adapter, u8 **data) for (j = 0; j < ENA_STATS_ARRAY_TX; j++) { ena_stats = &ena_stats_tx_strings[j]; - snprintf(*data, ETH_GSTRING_LEN, - "queue_%u_%s_%s", i, - is_xdp ? "xdp_tx" : "tx", ena_stats->name); - (*data) += ETH_GSTRING_LEN; + ethtool_sprintf(data, + "queue_%u_%s_%s", i, + is_xdp ? "xdp_tx" : "tx", + ena_stats->name); } if (!is_xdp) { @@ -264,9 +264,9 @@ static void ena_queue_strings(struct ena_adapter *adapter, u8 **data) for (j = 0; j < ENA_STATS_ARRAY_RX; j++) { ena_stats = &ena_stats_rx_strings[j]; - snprintf(*data, ETH_GSTRING_LEN, - "queue_%u_rx_%s", i, ena_stats->name); - (*data) += ETH_GSTRING_LEN; + ethtool_sprintf(data, + "queue_%u_rx_%s", i, + ena_stats->name); } } } @@ -280,9 +280,8 @@ static void ena_com_dev_strings(u8 **data) for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) { ena_stats = &ena_stats_ena_com_strings[i]; - snprintf(*data, ETH_GSTRING_LEN, - "ena_admin_q_%s", ena_stats->name); - (*data) += ETH_GSTRING_LEN; + ethtool_sprintf(data, + "ena_admin_q_%s", ena_stats->name); } } @@ -295,15 +294,13 @@ static void ena_get_strings(struct ena_adapter *adapter, for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) { ena_stats = &ena_stats_global_strings[i]; - memcpy(data, ena_stats->name, ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; + ethtool_sprintf(&data, ena_stats->name); } if (eni_stats_needed) { for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) { ena_stats = &ena_stats_eni_strings[i]; - memcpy(data, ena_stats->name, ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; + ethtool_sprintf(&data, ena_stats->name); } } diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 3f65f2b370c5..d54375b255dc 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -47,7 +47,7 @@ static void atl1c_down(struct atl1c_adapter *adapter); static int atl1c_reset_mac(struct atl1c_hw *hw); static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter); static int atl1c_configure(struct atl1c_adapter *adapter); -static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter); +static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, bool napi_mode); static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | @@ -470,7 +470,7 @@ static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter, adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ? roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE; - head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD) + + head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD + NET_IP_ALIGN) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); adapter->rx_frag_size = roundup_pow_of_two(head_size); } @@ -1434,7 +1434,7 @@ static int atl1c_configure(struct atl1c_adapter *adapter) atl1c_set_multi(netdev); atl1c_restore_vlan(adapter); - num = atl1c_alloc_rx_buffer(adapter); + num = atl1c_alloc_rx_buffer(adapter, false); if (unlikely(num == 0)) return -ENOMEM; @@ -1650,14 +1650,20 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter, skb_checksum_none_assert(skb); } -static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter) +static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter, + bool napi_mode) { struct sk_buff *skb; struct page *page; - if (adapter->rx_frag_size > PAGE_SIZE) - return netdev_alloc_skb(adapter->netdev, - adapter->rx_buffer_len); + if (adapter->rx_frag_size > PAGE_SIZE) { + if (likely(napi_mode)) + return napi_alloc_skb(&adapter->napi, + adapter->rx_buffer_len); + else + return netdev_alloc_skb_ip_align(adapter->netdev, + adapter->rx_buffer_len); + } page = adapter->rx_page; if (!page) { @@ -1670,7 +1676,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter) skb = build_skb(page_address(page) + adapter->rx_page_offset, adapter->rx_frag_size); if (likely(skb)) { - skb_reserve(skb, NET_SKB_PAD); + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); adapter->rx_page_offset += adapter->rx_frag_size; if (adapter->rx_page_offset >= PAGE_SIZE) adapter->rx_page = NULL; @@ -1680,7 +1686,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter) return skb; } -static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter) +static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, bool napi_mode) { struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; struct pci_dev *pdev = adapter->pdev; @@ -1701,7 +1707,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter) while (next_info->flags & ATL1C_BUFFER_FREE) { rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use); - skb = atl1c_alloc_skb(adapter); + skb = atl1c_alloc_skb(adapter, napi_mode); if (unlikely(!skb)) { if (netif_msg_rx_err(adapter)) dev_warn(&pdev->dev, "alloc rx buffer failed\n"); @@ -1851,13 +1857,13 @@ rrs_checked: vlan = le16_to_cpu(vlan); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan); } - netif_receive_skb(skb); + napi_gro_receive(&adapter->napi, skb); (*work_done)++; count++; } if (count) - atl1c_alloc_rx_buffer(adapter); + atl1c_alloc_rx_buffer(adapter, true); } /** diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index f8a168b73307..cb88ffb8f12f 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -54,7 +54,7 @@ config B44_PCI config BCM4908_ENET tristate "Broadcom BCM4908 internal mac support" depends on ARCH_BCM4908 || COMPILE_TEST - default y + default y if ARCH_BCM4908 help This driver supports Ethernet controller integrated into Broadcom BCM4908 family SoCs. diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c index 98cf82dea3e4..cbfed1d1477b 100644 --- a/drivers/net/ethernet/broadcom/bcm4908_enet.c +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c @@ -9,6 +9,7 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_net.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/string.h> @@ -53,6 +54,7 @@ struct bcm4908_enet_dma_ring { int length; u16 cfg_block; u16 st_ram_block; + struct napi_struct napi; union { void *cpu_addr; @@ -66,8 +68,8 @@ struct bcm4908_enet_dma_ring { struct bcm4908_enet { struct device *dev; struct net_device *netdev; - struct napi_struct napi; void __iomem *base; + int irq_tx; struct bcm4908_enet_dma_ring tx_ring; struct bcm4908_enet_dma_ring rx_ring; @@ -122,24 +124,31 @@ static void enet_umac_set(struct bcm4908_enet *enet, u16 offset, u32 set) * Helpers */ -static void bcm4908_enet_intrs_on(struct bcm4908_enet *enet) +static void bcm4908_enet_set_mtu(struct bcm4908_enet *enet, int mtu) { - enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, ENET_DMA_INT_DEFAULTS); + enet_umac_write(enet, UMAC_MAX_FRAME_LEN, mtu + ENET_MAX_ETH_OVERHEAD); } -static void bcm4908_enet_intrs_off(struct bcm4908_enet *enet) +/*** + * DMA ring ops + */ + +static void bcm4908_enet_dma_ring_intrs_on(struct bcm4908_enet *enet, + struct bcm4908_enet_dma_ring *ring) { - enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, 0); + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, ENET_DMA_INT_DEFAULTS); } -static void bcm4908_enet_intrs_ack(struct bcm4908_enet *enet) +static void bcm4908_enet_dma_ring_intrs_off(struct bcm4908_enet *enet, + struct bcm4908_enet_dma_ring *ring) { - enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_STAT, ENET_DMA_INT_DEFAULTS); + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, 0); } -static void bcm4908_enet_set_mtu(struct bcm4908_enet *enet, int mtu) +static void bcm4908_enet_dma_ring_intrs_ack(struct bcm4908_enet *enet, + struct bcm4908_enet_dma_ring *ring) { - enet_umac_write(enet, UMAC_MAX_FRAME_LEN, mtu + ENET_MAX_ETH_OVERHEAD); + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_STAT, ENET_DMA_INT_DEFAULTS); } /*** @@ -413,11 +422,14 @@ static void bcm4908_enet_gmac_init(struct bcm4908_enet *enet) static irqreturn_t bcm4908_enet_irq_handler(int irq, void *dev_id) { struct bcm4908_enet *enet = dev_id; + struct bcm4908_enet_dma_ring *ring; - bcm4908_enet_intrs_off(enet); - bcm4908_enet_intrs_ack(enet); + ring = (irq == enet->irq_tx) ? &enet->tx_ring : &enet->rx_ring; - napi_schedule(&enet->napi); + bcm4908_enet_dma_ring_intrs_off(enet, ring); + bcm4908_enet_dma_ring_intrs_ack(enet, ring); + + napi_schedule(&ring->napi); return IRQ_HANDLED; } @@ -425,6 +437,8 @@ static irqreturn_t bcm4908_enet_irq_handler(int irq, void *dev_id) static int bcm4908_enet_open(struct net_device *netdev) { struct bcm4908_enet *enet = netdev_priv(netdev); + struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring; + struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring; struct device *dev = enet->dev; int err; @@ -434,6 +448,17 @@ static int bcm4908_enet_open(struct net_device *netdev) return err; } + if (enet->irq_tx > 0) { + err = request_irq(enet->irq_tx, bcm4908_enet_irq_handler, 0, + "tx", enet); + if (err) { + dev_err(dev, "Failed to request IRQ %d: %d\n", + enet->irq_tx, err); + free_irq(netdev->irq, enet); + return err; + } + } + bcm4908_enet_gmac_init(enet); bcm4908_enet_dma_reset(enet); bcm4908_enet_dma_init(enet); @@ -442,14 +467,19 @@ static int bcm4908_enet_open(struct net_device *netdev) enet_set(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN); enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_FLOWC_CH1_EN, 0); - bcm4908_enet_dma_rx_ring_enable(enet, &enet->rx_ring); - napi_enable(&enet->napi); + if (enet->irq_tx > 0) { + napi_enable(&tx_ring->napi); + bcm4908_enet_dma_ring_intrs_ack(enet, tx_ring); + bcm4908_enet_dma_ring_intrs_on(enet, tx_ring); + } + + bcm4908_enet_dma_rx_ring_enable(enet, rx_ring); + napi_enable(&rx_ring->napi); netif_carrier_on(netdev); netif_start_queue(netdev); - - bcm4908_enet_intrs_ack(enet); - bcm4908_enet_intrs_on(enet); + bcm4908_enet_dma_ring_intrs_ack(enet, rx_ring); + bcm4908_enet_dma_ring_intrs_on(enet, rx_ring); return 0; } @@ -457,16 +487,20 @@ static int bcm4908_enet_open(struct net_device *netdev) static int bcm4908_enet_stop(struct net_device *netdev) { struct bcm4908_enet *enet = netdev_priv(netdev); + struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring; + struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring; netif_stop_queue(netdev); netif_carrier_off(netdev); - napi_disable(&enet->napi); + napi_disable(&rx_ring->napi); + napi_disable(&tx_ring->napi); bcm4908_enet_dma_rx_ring_disable(enet, &enet->rx_ring); bcm4908_enet_dma_tx_ring_disable(enet, &enet->tx_ring); bcm4908_enet_dma_uninit(enet); + free_irq(enet->irq_tx, enet); free_irq(enet->netdev->irq, enet); return 0; @@ -483,25 +517,19 @@ static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netde u32 tmp; /* Free transmitted skbs */ - while (ring->read_idx != ring->write_idx) { - buf_desc = &ring->buf_desc[ring->read_idx]; - if (le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN) - break; - slot = &ring->slots[ring->read_idx]; - - dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE); - dev_kfree_skb(slot->skb); - if (++ring->read_idx == ring->length) - ring->read_idx = 0; - } + if (enet->irq_tx < 0 && + !(le32_to_cpu(ring->buf_desc[ring->read_idx].ctl) & DMA_CTL_STATUS_OWN)) + napi_schedule(&enet->tx_ring.napi); /* Don't use the last empty buf descriptor */ if (ring->read_idx <= ring->write_idx) free_buf_descs = ring->read_idx - ring->write_idx + ring->length; else free_buf_descs = ring->read_idx - ring->write_idx; - if (free_buf_descs < 2) + if (free_buf_descs < 2) { + netif_stop_queue(netdev); return NETDEV_TX_BUSY; + } /* Hardware removes OWN bit after sending data */ buf_desc = &ring->buf_desc[ring->write_idx]; @@ -538,9 +566,10 @@ static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netde return NETDEV_TX_OK; } -static int bcm4908_enet_poll(struct napi_struct *napi, int weight) +static int bcm4908_enet_poll_rx(struct napi_struct *napi, int weight) { - struct bcm4908_enet *enet = container_of(napi, struct bcm4908_enet, napi); + struct bcm4908_enet_dma_ring *rx_ring = container_of(napi, struct bcm4908_enet_dma_ring, napi); + struct bcm4908_enet *enet = container_of(rx_ring, struct bcm4908_enet, rx_ring); struct device *dev = enet->dev; int handled = 0; @@ -589,7 +618,7 @@ static int bcm4908_enet_poll(struct napi_struct *napi, int weight) if (handled < weight) { napi_complete_done(napi, handled); - bcm4908_enet_intrs_on(enet); + bcm4908_enet_dma_ring_intrs_on(enet, rx_ring); } /* Hardware could disable ring if it run out of descriptors */ @@ -598,6 +627,42 @@ static int bcm4908_enet_poll(struct napi_struct *napi, int weight) return handled; } +static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight) +{ + struct bcm4908_enet_dma_ring *tx_ring = container_of(napi, struct bcm4908_enet_dma_ring, napi); + struct bcm4908_enet *enet = container_of(tx_ring, struct bcm4908_enet, tx_ring); + struct bcm4908_enet_dma_ring_bd *buf_desc; + struct bcm4908_enet_dma_ring_slot *slot; + struct device *dev = enet->dev; + unsigned int bytes = 0; + int handled = 0; + + while (handled < weight && tx_ring->read_idx != tx_ring->write_idx) { + buf_desc = &tx_ring->buf_desc[tx_ring->read_idx]; + if (le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN) + break; + slot = &tx_ring->slots[tx_ring->read_idx]; + + dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE); + dev_kfree_skb(slot->skb); + bytes += slot->len; + if (++tx_ring->read_idx == tx_ring->length) + tx_ring->read_idx = 0; + + handled++; + } + + if (handled < weight) { + napi_complete_done(napi, handled); + bcm4908_enet_dma_ring_intrs_on(enet, tx_ring); + } + + if (netif_queue_stopped(enet->netdev)) + netif_wake_queue(enet->netdev); + + return handled; +} + static int bcm4908_enet_change_mtu(struct net_device *netdev, int new_mtu) { struct bcm4908_enet *enet = netdev_priv(netdev); @@ -620,6 +685,7 @@ static int bcm4908_enet_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct net_device *netdev; struct bcm4908_enet *enet; + const u8 *mac; int err; netdev = devm_alloc_etherdev(dev, sizeof(*enet)); @@ -640,6 +706,8 @@ static int bcm4908_enet_probe(struct platform_device *pdev) if (netdev->irq < 0) return netdev->irq; + enet->irq_tx = platform_get_irq_byname(pdev, "tx"); + dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); err = bcm4908_enet_dma_alloc(enet); @@ -647,12 +715,17 @@ static int bcm4908_enet_probe(struct platform_device *pdev) return err; SET_NETDEV_DEV(netdev, &pdev->dev); - eth_hw_addr_random(netdev); + mac = of_get_mac_address(dev->of_node); + if (!IS_ERR(mac)) + ether_addr_copy(netdev->dev_addr, mac); + else + eth_hw_addr_random(netdev); netdev->netdev_ops = &bcm4908_enet_netdev_ops; netdev->min_mtu = ETH_ZLEN; netdev->mtu = ETH_DATA_LEN; netdev->max_mtu = ENET_MTU_MAX; - netif_napi_add(netdev, &enet->napi, bcm4908_enet_poll, 64); + netif_tx_napi_add(netdev, &enet->tx_ring.napi, bcm4908_enet_poll_tx, NAPI_POLL_WEIGHT); + netif_napi_add(netdev, &enet->rx_ring.napi, bcm4908_enet_poll_rx, NAPI_POLL_WEIGHT); err = register_netdev(netdev); if (err) { @@ -670,7 +743,8 @@ static int bcm4908_enet_remove(struct platform_device *pdev) struct bcm4908_enet *enet = platform_get_drvdata(pdev); unregister_netdev(enet->netdev); - netif_napi_del(&enet->napi); + netif_napi_del(&enet->rx_ring.napi); + netif_napi_del(&enet->tx_ring.napi); bcm4908_enet_dma_free(enet); return 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index b652ed72a621..56801387591d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -1395,7 +1395,6 @@ int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) u32 op_gen_command = 0; u32 comp_addr = BAR_CSTRORM_INTMEM + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); - int ret = 0; if (REG_RD(bp, comp_addr)) { BNX2X_ERR("Cleanup complete was not 0 before sending\n"); @@ -1420,7 +1419,7 @@ int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) /* Zero completion for next FLR */ REG_WR(bp, comp_addr, 0); - return ret; + return 0; } u8 bnx2x_is_pcie_pending(struct pci_dev *dev) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b53a0d87371a..6f13642121c4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4470,7 +4470,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, writel(1, bp->bar0 + doorbell_offset); if (!pci_is_enabled(bp->pdev)) - return 0; + return -ENODEV; if (!timeout) timeout = DFLT_HWRM_CMD_TIMEOUT; @@ -4500,12 +4500,15 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) return -EBUSY; /* on first few passes, just barely sleep */ - if (i < HWRM_SHORT_TIMEOUT_COUNTER) + if (i < HWRM_SHORT_TIMEOUT_COUNTER) { usleep_range(HWRM_SHORT_MIN_TIMEOUT, HWRM_SHORT_MAX_TIMEOUT); - else + } else { + if (HWRM_WAIT_MUST_ABORT(bp, req)) + break; usleep_range(HWRM_MIN_TIMEOUT, HWRM_MAX_TIMEOUT); + } } if (bp->hwrm_intr_seq_id != (u16)~seq_id) { @@ -4530,15 +4533,19 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, if (len) break; /* on first few passes, just barely sleep */ - if (i < HWRM_SHORT_TIMEOUT_COUNTER) + if (i < HWRM_SHORT_TIMEOUT_COUNTER) { usleep_range(HWRM_SHORT_MIN_TIMEOUT, HWRM_SHORT_MAX_TIMEOUT); - else + } else { + if (HWRM_WAIT_MUST_ABORT(bp, req)) + goto timeout_abort; usleep_range(HWRM_MIN_TIMEOUT, HWRM_MAX_TIMEOUT); + } } if (i >= tmo_count) { +timeout_abort: if (!silent) netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", HWRM_TOTAL_TIMEOUT(i), @@ -7540,6 +7547,32 @@ static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg) BNXT_FW_HEALTH_WIN_MAP_OFF); } +bool bnxt_is_fw_healthy(struct bnxt *bp) +{ + if (bp->fw_health && bp->fw_health->status_reliable) { + u32 fw_status; + + fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); + if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status)) + return false; + } + + return true; +} + +static void bnxt_inv_fw_health_reg(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + u32 reg_type; + + if (!fw_health || !fw_health->status_reliable) + return; + + reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]); + if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) + fw_health->status_reliable = false; +} + static void bnxt_try_map_fw_health_reg(struct bnxt *bp) { void __iomem *hs; @@ -7547,6 +7580,9 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp) u32 reg_type; u32 sig; + if (bp->fw_health) + bp->fw_health->status_reliable = false; + __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC); hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC); @@ -7558,11 +7594,9 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp) BNXT_FW_HEALTH_WIN_BASE + BNXT_GRC_REG_CHIP_NUM); } - if (!BNXT_CHIP_P5(bp)) { - if (bp->fw_health) - bp->fw_health->status_reliable = false; + if (!BNXT_CHIP_P5(bp)) return; - } + status_loc = BNXT_GRC_REG_STATUS_P5 | BNXT_FW_HEALTH_REG_TYPE_BAR0; } else { @@ -7592,6 +7626,7 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp) u32 reg_base = 0xffffffff; int i; + bp->fw_health->status_reliable = false; /* Only pre-map the monitoring GRC registers using window 3 */ for (i = 0; i < 4; i++) { u32 reg = fw_health->regs[i]; @@ -7604,6 +7639,7 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp) return -ERANGE; fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg); } + bp->fw_health->status_reliable = true; if (reg_base == 0xffffffff) return 0; @@ -9494,9 +9530,10 @@ static int bnxt_try_recover_fw(struct bnxt *bp) mutex_lock(&bp->hwrm_cmd_lock); do { - rc = __bnxt_hwrm_ver_get(bp, true); sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); - if (!sts || !BNXT_FW_IS_BOOTING(sts)) + rc = __bnxt_hwrm_ver_get(bp, true); + if (!sts || (!BNXT_FW_IS_BOOTING(sts) && + !BNXT_FW_IS_RECOVERING(sts))) break; retry++; } while (rc == -EBUSY && retry < BNXT_FW_RETRY); @@ -9556,13 +9593,17 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) if (rc) return rc; - if (!up) + if (!up) { + bnxt_inv_fw_health_reg(bp); return 0; + } if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) resc_reinit = true; if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE) fw_reset = true; + else if (bp->fw_health && !bp->fw_health->status_reliable) + bnxt_try_map_fw_health_reg(bp); if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); @@ -9571,6 +9612,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) } if (resc_reinit || fw_reset) { if (fw_reset) { + set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) bnxt_ulp_stop(bp); bnxt_free_ctx_mem(bp); @@ -9579,21 +9621,25 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) bnxt_dcb_free(bp); rc = bnxt_fw_init_one(bp); if (rc) { + clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); set_bit(BNXT_STATE_ABORT_ERR, &bp->state); return rc; } bnxt_clear_int_mode(bp); rc = bnxt_init_int_mode(bp); if (rc) { + clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); netdev_err(bp->dev, "init int mode failed\n"); return rc; } - set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); } if (BNXT_NEW_RM(bp)) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; rc = bnxt_hwrm_func_resc_qcaps(bp, true); + if (rc) + netdev_err(bp->dev, "resc_qcaps failed\n"); + hw_resc->resv_cp_rings = 0; hw_resc->resv_stat_ctxs = 0; hw_resc->resv_irqs = 0; @@ -9607,7 +9653,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) } } } - return 0; + return rc; } static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) @@ -11640,7 +11686,7 @@ static void bnxt_reset_all(struct bnxt *bp) req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) + if (rc != -ENODEV) netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); } bp->fw_reset_timestamp = jiffies; @@ -11723,28 +11769,20 @@ static void bnxt_fw_reset_task(struct work_struct *work) bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); return; case BNXT_FW_RESET_STATE_ENABLE_DEV: - if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { - u32 val; - - if (!bp->fw_reset_min_dsecs) { - u16 val; - - pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, - &val); - if (val == 0xffff) { - if (bnxt_fw_reset_timeout(bp)) { - netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n"); - goto fw_reset_abort; - } - bnxt_queue_fw_reset_work(bp, HZ / 1000); - return; + bnxt_inv_fw_health_reg(bp); + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && + !bp->fw_reset_min_dsecs) { + u16 val; + + pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); + if (val == 0xffff) { + if (bnxt_fw_reset_timeout(bp)) { + netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n"); + goto fw_reset_abort; } + bnxt_queue_fw_reset_work(bp, HZ / 1000); + return; } - val = bnxt_fw_health_readl(bp, - BNXT_FW_RESET_INPROG_REG); - if (val) - netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n", - val); } clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); if (pci_enable_device(bp->pdev)) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 1259e68cba2a..29061c577baa 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -671,6 +671,10 @@ struct nqe_cn { #define HWRM_MIN_TIMEOUT 25 #define HWRM_MAX_TIMEOUT 40 +#define HWRM_WAIT_MUST_ABORT(bp, req) \ + (le16_to_cpu((req)->req_type) != HWRM_VER_GET && \ + !bnxt_is_fw_healthy(bp)) + #define HWRM_TOTAL_TIMEOUT(n) (((n) <= HWRM_SHORT_TIMEOUT_COUNTER) ? \ ((n) * HWRM_SHORT_MIN_TIMEOUT) : \ (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \ @@ -1560,6 +1564,7 @@ struct bnxt_fw_reporter_ctx { #define BNXT_FW_STATUS_HEALTH_MSK 0xffff #define BNXT_FW_STATUS_HEALTHY 0x8000 #define BNXT_FW_STATUS_SHUTDOWN 0x100000 +#define BNXT_FW_STATUS_RECOVERING 0x400000 #define BNXT_FW_IS_HEALTHY(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) ==\ BNXT_FW_STATUS_HEALTHY) @@ -1570,6 +1575,9 @@ struct bnxt_fw_reporter_ctx { #define BNXT_FW_IS_ERR(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) > \ BNXT_FW_STATUS_HEALTHY) +#define BNXT_FW_IS_RECOVERING(sts) (BNXT_FW_IS_ERR(sts) && \ + ((sts) & BNXT_FW_STATUS_RECOVERING)) + #define BNXT_FW_RETRY 5 #define BNXT_FW_IF_RETRY 10 @@ -2228,6 +2236,7 @@ int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool); int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp); int bnxt_hwrm_free_wol_fltr(struct bnxt *bp); int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all); +bool bnxt_is_fw_healthy(struct bnxt *bp); int bnxt_hwrm_fw_set_time(struct bnxt *); int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_half_open_nic(struct bnxt *bp); diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index 588c4804d10a..265c2fa6bbe0 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -524,6 +524,68 @@ bnad_set_pauseparam(struct net_device *netdev, return 0; } +static void bnad_get_txf_strings(u8 **string, int f_num) +{ + ethtool_sprintf(string, "txf%d_ucast_octets", f_num); + ethtool_sprintf(string, "txf%d_ucast", f_num); + ethtool_sprintf(string, "txf%d_ucast_vlan", f_num); + ethtool_sprintf(string, "txf%d_mcast_octets", f_num); + ethtool_sprintf(string, "txf%d_mcast", f_num); + ethtool_sprintf(string, "txf%d_mcast_vlan", f_num); + ethtool_sprintf(string, "txf%d_bcast_octets", f_num); + ethtool_sprintf(string, "txf%d_bcast", f_num); + ethtool_sprintf(string, "txf%d_bcast_vlan", f_num); + ethtool_sprintf(string, "txf%d_errors", f_num); + ethtool_sprintf(string, "txf%d_filter_vlan", f_num); + ethtool_sprintf(string, "txf%d_filter_mac_sa", f_num); +} + +static void bnad_get_rxf_strings(u8 **string, int f_num) +{ + ethtool_sprintf(string, "rxf%d_ucast_octets", f_num); + ethtool_sprintf(string, "rxf%d_ucast", f_num); + ethtool_sprintf(string, "rxf%d_ucast_vlan", f_num); + ethtool_sprintf(string, "rxf%d_mcast_octets", f_num); + ethtool_sprintf(string, "rxf%d_mcast", f_num); + ethtool_sprintf(string, "rxf%d_mcast_vlan", f_num); + ethtool_sprintf(string, "rxf%d_bcast_octets", f_num); + ethtool_sprintf(string, "rxf%d_bcast", f_num); + ethtool_sprintf(string, "rxf%d_bcast_vlan", f_num); + ethtool_sprintf(string, "rxf%d_frame_drops", f_num); +} + +static void bnad_get_cq_strings(u8 **string, int q_num) +{ + ethtool_sprintf(string, "cq%d_producer_index", q_num); + ethtool_sprintf(string, "cq%d_consumer_index", q_num); + ethtool_sprintf(string, "cq%d_hw_producer_index", q_num); + ethtool_sprintf(string, "cq%d_intr", q_num); + ethtool_sprintf(string, "cq%d_poll", q_num); + ethtool_sprintf(string, "cq%d_schedule", q_num); + ethtool_sprintf(string, "cq%d_keep_poll", q_num); + ethtool_sprintf(string, "cq%d_complete", q_num); +} + +static void bnad_get_rxq_strings(u8 **string, int q_num) +{ + ethtool_sprintf(string, "rxq%d_packets", q_num); + ethtool_sprintf(string, "rxq%d_bytes", q_num); + ethtool_sprintf(string, "rxq%d_packets_with_error", q_num); + ethtool_sprintf(string, "rxq%d_allocbuf_failed", q_num); + ethtool_sprintf(string, "rxq%d_mapbuf_failed", q_num); + ethtool_sprintf(string, "rxq%d_producer_index", q_num); + ethtool_sprintf(string, "rxq%d_consumer_index", q_num); +} + +static void bnad_get_txq_strings(u8 **string, int q_num) +{ + ethtool_sprintf(string, "txq%d_packets", q_num); + ethtool_sprintf(string, "txq%d_bytes", q_num); + ethtool_sprintf(string, "txq%d_producer_index", q_num); + ethtool_sprintf(string, "txq%d_consumer_index", q_num); + ethtool_sprintf(string, "txq%d_hw_consumer_index", q_num); +} + static void bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string) { @@ -531,175 +593,57 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string) int i, j, q_num; u32 bmap; + if (stringset != ETH_SS_STATS) + return; + mutex_lock(&bnad->conf_mutex); - switch (stringset) { - case ETH_SS_STATS: - for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) { - BUG_ON(!(strlen(bnad_net_stats_strings[i]) < - ETH_GSTRING_LEN)); - strncpy(string, bnad_net_stats_strings[i], - ETH_GSTRING_LEN); - string += ETH_GSTRING_LEN; - } - bmap = bna_tx_rid_mask(&bnad->bna); - for (i = 0; bmap; i++) { - if (bmap & 1) { - sprintf(string, "txf%d_ucast_octets", i); - string += ETH_GSTRING_LEN; - sprintf(string, "txf%d_ucast", i); - string += ETH_GSTRING_LEN; - sprintf(string, "txf%d_ucast_vlan", i); - string += ETH_GSTRING_LEN; - sprintf(string, "txf%d_mcast_octets", i); - string += ETH_GSTRING_LEN; - sprintf(string, "txf%d_mcast", i); - string += ETH_GSTRING_LEN; - sprintf(string, "txf%d_mcast_vlan", i); - string += ETH_GSTRING_LEN; - sprintf(string, "txf%d_bcast_octets", i); - string += ETH_GSTRING_LEN; - sprintf(string, "txf%d_bcast", i); - string += ETH_GSTRING_LEN; - sprintf(string, "txf%d_bcast_vlan", i); - string += ETH_GSTRING_LEN; - sprintf(string, "txf%d_errors", i); - string += ETH_GSTRING_LEN; - sprintf(string, "txf%d_filter_vlan", i); - string += ETH_GSTRING_LEN; - sprintf(string, "txf%d_filter_mac_sa", i); - string += ETH_GSTRING_LEN; - } - bmap >>= 1; - } + for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) { + BUG_ON(!(strlen(bnad_net_stats_strings[i]) < ETH_GSTRING_LEN)); + ethtool_sprintf(&string, bnad_net_stats_strings[i]); + } - bmap = bna_rx_rid_mask(&bnad->bna); - for (i = 0; bmap; i++) { - if (bmap & 1) { - sprintf(string, "rxf%d_ucast_octets", i); - string += ETH_GSTRING_LEN; - sprintf(string, "rxf%d_ucast", i); - string += ETH_GSTRING_LEN; - sprintf(string, "rxf%d_ucast_vlan", i); - string += ETH_GSTRING_LEN; - sprintf(string, "rxf%d_mcast_octets", i); - string += ETH_GSTRING_LEN; - sprintf(string, "rxf%d_mcast", i); - string += ETH_GSTRING_LEN; - sprintf(string, "rxf%d_mcast_vlan", i); - string += ETH_GSTRING_LEN; - sprintf(string, "rxf%d_bcast_octets", i); - string += ETH_GSTRING_LEN; - sprintf(string, "rxf%d_bcast", i); - string += ETH_GSTRING_LEN; - sprintf(string, "rxf%d_bcast_vlan", i); - string += ETH_GSTRING_LEN; - sprintf(string, "rxf%d_frame_drops", i); - string += ETH_GSTRING_LEN; - } - bmap >>= 1; - } + bmap = bna_tx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++) { + if (bmap & 1) + bnad_get_txf_strings(&string, i); + bmap >>= 1; + } - q_num = 0; - for (i = 0; i < bnad->num_rx; i++) { - if (!bnad->rx_info[i].rx) - continue; - for (j = 0; j < bnad->num_rxp_per_rx; j++) { - sprintf(string, "cq%d_producer_index", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "cq%d_consumer_index", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "cq%d_hw_producer_index", - q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "cq%d_intr", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "cq%d_poll", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "cq%d_schedule", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "cq%d_keep_poll", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "cq%d_complete", q_num); - string += ETH_GSTRING_LEN; - q_num++; - } - } + bmap = bna_rx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++, bmap >>= 1) { + if (bmap & 1) + bnad_get_rxf_strings(&string, i); + bmap >>= 1; + } - q_num = 0; - for (i = 0; i < bnad->num_rx; i++) { - if (!bnad->rx_info[i].rx) - continue; - for (j = 0; j < bnad->num_rxp_per_rx; j++) { - sprintf(string, "rxq%d_packets", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "rxq%d_bytes", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "rxq%d_packets_with_error", - q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "rxq%d_allocbuf_failed", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "rxq%d_mapbuf_failed", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "rxq%d_producer_index", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "rxq%d_consumer_index", q_num); - string += ETH_GSTRING_LEN; - q_num++; - if (bnad->rx_info[i].rx_ctrl[j].ccb && - bnad->rx_info[i].rx_ctrl[j].ccb-> - rcb[1] && - bnad->rx_info[i].rx_ctrl[j].ccb-> - rcb[1]->rxq) { - sprintf(string, "rxq%d_packets", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "rxq%d_bytes", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, - "rxq%d_packets_with_error", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "rxq%d_allocbuf_failed", - q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "rxq%d_mapbuf_failed", - q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "rxq%d_producer_index", - q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "rxq%d_consumer_index", - q_num); - string += ETH_GSTRING_LEN; - q_num++; - } - } - } + q_num = 0; + for (i = 0; i < bnad->num_rx; i++) { + if (!bnad->rx_info[i].rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) + bnad_get_cq_strings(&string, q_num++); + } - q_num = 0; - for (i = 0; i < bnad->num_tx; i++) { - if (!bnad->tx_info[i].tx) - continue; - for (j = 0; j < bnad->num_txq_per_tx; j++) { - sprintf(string, "txq%d_packets", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "txq%d_bytes", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "txq%d_producer_index", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "txq%d_consumer_index", q_num); - string += ETH_GSTRING_LEN; - sprintf(string, "txq%d_hw_consumer_index", - q_num); - string += ETH_GSTRING_LEN; - q_num++; - } + q_num = 0; + for (i = 0; i < bnad->num_rx; i++) { + if (!bnad->rx_info[i].rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) { + bnad_get_rxq_strings(&string, q_num++); + if (bnad->rx_info[i].rx_ctrl[j].ccb && + bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && + bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq) + bnad_get_rxq_strings(&string, q_num++); } + } - break; - - default: - break; + q_num = 0; + for (i = 0; i < bnad->num_tx; i++) { + if (!bnad->tx_info[i].tx) + continue; + for (j = 0; j < bnad->num_txq_per_tx; j++) + bnad_get_txq_strings(&string, q_num++); } mutex_unlock(&bnad->conf_mutex); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index d8c68906525a..d8d87213697c 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -159,6 +159,16 @@ #define GEM_PEFTN 0x01f4 /* PTP Peer Event Frame Tx Ns */ #define GEM_PEFRSL 0x01f8 /* PTP Peer Event Frame Rx Sec Low */ #define GEM_PEFRN 0x01fc /* PTP Peer Event Frame Rx Ns */ +#define GEM_PCSCNTRL 0x0200 /* PCS Control */ +#define GEM_PCSSTS 0x0204 /* PCS Status */ +#define GEM_PCSPHYTOPID 0x0208 /* PCS PHY Top ID */ +#define GEM_PCSPHYBOTID 0x020c /* PCS PHY Bottom ID */ +#define GEM_PCSANADV 0x0210 /* PCS AN Advertisement */ +#define GEM_PCSANLPBASE 0x0214 /* PCS AN Link Partner Base */ +#define GEM_PCSANEXP 0x0218 /* PCS AN Expansion */ +#define GEM_PCSANNPTX 0x021c /* PCS AN Next Page TX */ +#define GEM_PCSANNPLP 0x0220 /* PCS AN Next Page LP */ +#define GEM_PCSANEXTSTS 0x023c /* PCS AN Extended Status */ #define GEM_DCFG1 0x0280 /* Design Config 1 */ #define GEM_DCFG2 0x0284 /* Design Config 2 */ #define GEM_DCFG3 0x0288 /* Design Config 3 */ @@ -478,6 +488,10 @@ #define GEM_HS_MAC_SPEED_OFFSET 0 #define GEM_HS_MAC_SPEED_SIZE 3 +/* Bitfields in PCSCNTRL */ +#define GEM_PCSAUTONEG_OFFSET 12 +#define GEM_PCSAUTONEG_SIZE 1 + /* Bitfields in DCFG1. */ #define GEM_IRQCOR_OFFSET 23 #define GEM_IRQCOR_SIZE 1 diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 15362d016a87..f56f3dbbc015 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -694,6 +694,22 @@ static void macb_mac_config(struct phylink_config *config, unsigned int mode, if (old_ncr ^ ncr) macb_or_gem_writel(bp, NCR, ncr); + /* Disable AN for SGMII fixed link configuration, enable otherwise. + * Must be written after PCSSEL is set in NCFGR, + * otherwise writes will not take effect. + */ + if (macb_is_gem(bp) && state->interface == PHY_INTERFACE_MODE_SGMII) { + u32 pcsctrl, old_pcsctrl; + + old_pcsctrl = gem_readl(bp, PCSCNTRL); + if (mode == MLO_AN_FIXED) + pcsctrl = old_pcsctrl & ~GEM_BIT(PCSAUTONEG); + else + pcsctrl = old_pcsctrl | GEM_BIT(PCSAUTONEG); + if (old_pcsctrl != pcsctrl) + gem_writel(bp, PCSCNTRL, pcsctrl); + } + spin_unlock_irqrestore(&bp->lock, flags); } @@ -847,6 +863,15 @@ static int macb_phylink_connect(struct macb *bp) return 0; } +static void macb_get_pcs_fixed_state(struct phylink_config *config, + struct phylink_link_state *state) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct macb *bp = netdev_priv(ndev); + + state->link = (macb_readl(bp, NSR) & MACB_BIT(NSR_LINK)) != 0; +} + /* based on au1000_eth. c*/ static int macb_mii_probe(struct net_device *dev) { @@ -855,6 +880,11 @@ static int macb_mii_probe(struct net_device *dev) bp->phylink_config.dev = &dev->dev; bp->phylink_config.type = PHYLINK_NETDEV; + if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { + bp->phylink_config.poll_fixed_state = true; + bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state; + } + bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode, bp->phy_interface, &macb_phylink_ops); if (IS_ERR(bp->phylink)) { @@ -3728,17 +3758,15 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, *hclk = devm_clk_get(&pdev->dev, "hclk"); } - if (IS_ERR_OR_NULL(*pclk)) { - err = IS_ERR(*pclk) ? PTR_ERR(*pclk) : -ENODEV; - dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err); - return err; - } + if (IS_ERR_OR_NULL(*pclk)) + return dev_err_probe(&pdev->dev, + IS_ERR(*pclk) ? PTR_ERR(*pclk) : -ENODEV, + "failed to get pclk\n"); - if (IS_ERR_OR_NULL(*hclk)) { - err = IS_ERR(*hclk) ? PTR_ERR(*hclk) : -ENODEV; - dev_err(&pdev->dev, "failed to get hclk (%d)\n", err); - return err; - } + if (IS_ERR_OR_NULL(*hclk)) + return dev_err_probe(&pdev->dev, + IS_ERR(*hclk) ? PTR_ERR(*hclk) : -ENODEV, + "failed to get hclk\n"); *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk"); if (IS_ERR(*tx_clk)) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 6c85a10f465c..d2ba40c19696 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -3536,8 +3536,7 @@ out: } out_free: - if (data) - kvfree(data); + kvfree(data); #undef QDESC_GET_FLQ #undef QDESC_GET_RXQ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 83b46440408b..b1cae5a19839 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -979,7 +979,7 @@ void clear_filter(struct adapter *adap, struct filter_entry *f) { struct port_info *pi = netdev_priv(f->dev); - /* If the new or old filter have loopback rewriteing rules then we'll + /* If the new or old filter have loopback rewriting rules then we'll * need to free any existing L2T, SMT, CLIP entries of filter * rule. */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c index 2e309f6673f7..28fd2de9e4cf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c @@ -48,6 +48,11 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev, flow_action_for_each(i, entry, actions) { switch (entry->id) { case FLOW_ACTION_POLICE: + if (entry->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, + "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } /* Convert bytes per second to bits per second */ if (entry->police.rate_bytes_ps * 8 > max_link_rate) { NL_SET_ERR_MSG_MOD(extack, @@ -145,7 +150,11 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev, flow_action_for_each(i, entry, &cls->rule->action) if (entry->id == FLOW_ACTION_POLICE) break; - + if (entry->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, + "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } /* Convert from bytes per second to Kbps */ p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000); p.u.params.channel = pi->tx_chan; diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c index 169e10c91378..1115b8f9ea4e 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c @@ -722,7 +722,7 @@ static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input) kvfree(tx_info); return 0; } - tx_info->open_state = false; + tx_info->open_state = CH_KTLS_OPEN_SUCCESS; spin_unlock(&tx_info->lock); complete(&tx_info->completion); diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 88bfe2107938..04421aec2dfd 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1337,6 +1337,7 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget) */ if (unlikely(priv->need_mac_restart)) { ftgmac100_start_hw(priv); + priv->need_mac_restart = false; /* Re-enable "bad" interrupts */ iowrite32(FTGMAC100_INT_BAD, diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index 473b337b2e3b..5a1a8f2ea63c 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -1177,18 +1177,7 @@ static struct platform_driver ftmac100_driver = { /****************************************************************************** * initialization / finalization *****************************************************************************/ -static int __init ftmac100_init(void) -{ - return platform_driver_register(&ftmac100_driver); -} - -static void __exit ftmac100_exit(void) -{ - platform_driver_unregister(&ftmac100_driver); -} - -module_init(ftmac100_init); -module_exit(ftmac100_exit); +module_platform_driver(ftmac100_driver); MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); MODULE_DESCRIPTION("FTMAC100 driver"); diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig index ee7a906e30b3..d029b69c3f18 100644 --- a/drivers/net/ethernet/freescale/dpaa2/Kconfig +++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig @@ -29,3 +29,11 @@ config FSL_DPAA2_PTP_CLOCK help This driver adds support for using the DPAA2 1588 timer module as a PTP clock. + +config FSL_DPAA2_SWITCH + tristate "Freescale DPAA2 Ethernet Switch" + depends on BRIDGE || BRIDGE=n + depends on NET_SWITCHDEV + help + Driver for Freescale DPAA2 Ethernet Switch. This driver manages + switch objects discovered on the Freeescale MC bus. diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile index 146cb3540e61..644ef9ae02a3 100644 --- a/drivers/net/ethernet/freescale/dpaa2/Makefile +++ b/drivers/net/ethernet/freescale/dpaa2/Makefile @@ -5,11 +5,13 @@ obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o +obj-$(CONFIG_FSL_DPAA2_SWITCH) += fsl-dpaa2-switch.o fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o +fsl-dpaa2-switch-objs := dpaa2-switch.o dpaa2-switch-ethtool.o dpsw.o # Needed by the tracing framework CFLAGS_dpaa2-eth.o := -I$(src) diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c index 0af2e9914ec4..70e04321c420 100644 --- a/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c @@ -9,7 +9,7 @@ #include <linux/ethtool.h> -#include "ethsw.h" +#include "dpaa2-switch.h" static struct { enum dpsw_counter id; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c new file mode 100644 index 000000000000..a9b30a72ddad --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -0,0 +1,3031 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DPAA2 Ethernet Switch driver + * + * Copyright 2014-2016 Freescale Semiconductor Inc. + * Copyright 2017-2021 NXP + * + */ + +#include <linux/module.h> + +#include <linux/interrupt.h> +#include <linux/msi.h> +#include <linux/kthread.h> +#include <linux/workqueue.h> +#include <linux/iommu.h> + +#include <linux/fsl/mc.h> + +#include "dpaa2-switch.h" + +/* Minimal supported DPSW version */ +#define DPSW_MIN_VER_MAJOR 8 +#define DPSW_MIN_VER_MINOR 9 + +#define DEFAULT_VLAN_ID 1 + +static u16 dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv *port_priv) +{ + return port_priv->fdb->fdb_id; +} + +static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *ethsw) +{ + int i; + + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) + if (!ethsw->fdbs[i].in_use) + return ðsw->fdbs[i]; + return NULL; +} + +static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv, + struct net_device *bridge_dev) +{ + struct ethsw_port_priv *other_port_priv = NULL; + struct dpaa2_switch_fdb *fdb; + struct net_device *other_dev; + struct list_head *iter; + + /* If we leave a bridge (bridge_dev is NULL), find an unused + * FDB and use that. + */ + if (!bridge_dev) { + fdb = dpaa2_switch_fdb_get_unused(port_priv->ethsw_data); + + /* If there is no unused FDB, we must be the last port that + * leaves the last bridge, all the others are standalone. We + * can just keep the FDB that we already have. + */ + + if (!fdb) { + port_priv->fdb->bridge_dev = NULL; + return 0; + } + + port_priv->fdb = fdb; + port_priv->fdb->in_use = true; + port_priv->fdb->bridge_dev = NULL; + return 0; + } + + /* The below call to netdev_for_each_lower_dev() demands the RTNL lock + * being held. Assert on it so that it's easier to catch new code + * paths that reach this point without the RTNL lock. + */ + ASSERT_RTNL(); + + /* If part of a bridge, use the FDB of the first dpaa2 switch interface + * to be present in that bridge + */ + netdev_for_each_lower_dev(bridge_dev, other_dev, iter) { + if (!dpaa2_switch_port_dev_check(other_dev)) + continue; + + if (other_dev == port_priv->netdev) + continue; + + other_port_priv = netdev_priv(other_dev); + break; + } + + /* The current port is about to change its FDB to the one used by the + * first port that joined the bridge. + */ + if (other_port_priv) { + /* The previous FDB is about to become unused, since the + * interface is no longer standalone. + */ + port_priv->fdb->in_use = false; + port_priv->fdb->bridge_dev = NULL; + + /* Get a reference to the new FDB */ + port_priv->fdb = other_port_priv->fdb; + } + + /* Keep track of the new upper bridge device */ + port_priv->fdb->bridge_dev = bridge_dev; + + return 0; +} + +static void dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core *ethsw, u16 fdb_id, + enum dpsw_flood_type type, + struct dpsw_egress_flood_cfg *cfg) +{ + int i = 0, j; + + memset(cfg, 0, sizeof(*cfg)); + + /* Add all the DPAA2 switch ports found in the same bridging domain to + * the egress flooding domain + */ + for (j = 0; j < ethsw->sw_attr.num_ifs; j++) { + if (!ethsw->ports[j]) + continue; + if (ethsw->ports[j]->fdb->fdb_id != fdb_id) + continue; + + if (type == DPSW_BROADCAST && ethsw->ports[j]->bcast_flood) + cfg->if_id[i++] = ethsw->ports[j]->idx; + else if (type == DPSW_FLOODING && ethsw->ports[j]->ucast_flood) + cfg->if_id[i++] = ethsw->ports[j]->idx; + } + + /* Add the CTRL interface to the egress flooding domain */ + cfg->if_id[i++] = ethsw->sw_attr.num_ifs; + + cfg->fdb_id = fdb_id; + cfg->flood_type = type; + cfg->num_ifs = i; +} + +static int dpaa2_switch_fdb_set_egress_flood(struct ethsw_core *ethsw, u16 fdb_id) +{ + struct dpsw_egress_flood_cfg flood_cfg; + int err; + + /* Setup broadcast flooding domain */ + dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_BROADCAST, &flood_cfg); + err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle, + &flood_cfg); + if (err) { + dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err); + return err; + } + + /* Setup unknown flooding domain */ + dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_FLOODING, &flood_cfg); + err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle, + &flood_cfg); + if (err) { + dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err); + return err; + } + + return 0; +} + +static void *dpaa2_iova_to_virt(struct iommu_domain *domain, + dma_addr_t iova_addr) +{ + phys_addr_t phys_addr; + + phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; + + return phys_to_virt(phys_addr); +} + +static int dpaa2_switch_add_vlan(struct ethsw_port_priv *port_priv, u16 vid) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpsw_vlan_cfg vcfg = {0}; + int err; + + vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); + err = dpsw_vlan_add(ethsw->mc_io, 0, + ethsw->dpsw_handle, vid, &vcfg); + if (err) { + dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err); + return err; + } + ethsw->vlans[vid] = ETHSW_VLAN_MEMBER; + + return 0; +} + +static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv) +{ + struct net_device *netdev = port_priv->netdev; + struct dpsw_link_state state; + int err; + + err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx, &state); + if (err) { + netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); + return true; + } + + WARN_ONCE(state.up > 1, "Garbage read into link_state"); + + return state.up ? true : false; +} + +static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct net_device *netdev = port_priv->netdev; + struct dpsw_tci_cfg tci_cfg = { 0 }; + bool up; + int err, ret; + + err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, + port_priv->idx, &tci_cfg); + if (err) { + netdev_err(netdev, "dpsw_if_get_tci err %d\n", err); + return err; + } + + tci_cfg.vlan_id = pvid; + + /* Interface needs to be down to change PVID */ + up = dpaa2_switch_port_is_up(port_priv); + if (up) { + err = dpsw_if_disable(ethsw->mc_io, 0, + ethsw->dpsw_handle, + port_priv->idx); + if (err) { + netdev_err(netdev, "dpsw_if_disable err %d\n", err); + return err; + } + } + + err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, + port_priv->idx, &tci_cfg); + if (err) { + netdev_err(netdev, "dpsw_if_set_tci err %d\n", err); + goto set_tci_error; + } + + /* Delete previous PVID info and mark the new one */ + port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID; + port_priv->vlans[pvid] |= ETHSW_VLAN_PVID; + port_priv->pvid = pvid; + +set_tci_error: + if (up) { + ret = dpsw_if_enable(ethsw->mc_io, 0, + ethsw->dpsw_handle, + port_priv->idx); + if (ret) { + netdev_err(netdev, "dpsw_if_enable err %d\n", ret); + return ret; + } + } + + return err; +} + +static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv, + u16 vid, u16 flags) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct net_device *netdev = port_priv->netdev; + struct dpsw_vlan_if_cfg vcfg = {0}; + int err; + + if (port_priv->vlans[vid]) { + netdev_warn(netdev, "VLAN %d already configured\n", vid); + return -EEXIST; + } + + /* If hit, this VLAN rule will lead the packet into the FDB table + * specified in the vlan configuration below + */ + vcfg.num_ifs = 1; + vcfg.if_id[0] = port_priv->idx; + vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); + vcfg.options |= DPSW_VLAN_ADD_IF_OPT_FDB_ID; + err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg); + if (err) { + netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err); + return err; + } + + port_priv->vlans[vid] = ETHSW_VLAN_MEMBER; + + if (flags & BRIDGE_VLAN_INFO_UNTAGGED) { + err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0, + ethsw->dpsw_handle, + vid, &vcfg); + if (err) { + netdev_err(netdev, + "dpsw_vlan_add_if_untagged err %d\n", err); + return err; + } + port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED; + } + + if (flags & BRIDGE_VLAN_INFO_PVID) { + err = dpaa2_switch_port_set_pvid(port_priv, vid); + if (err) + return err; + } + + return 0; +} + +static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state) +{ + struct dpsw_stp_cfg stp_cfg = { + .state = state, + }; + int err; + u16 vid; + + if (!netif_running(port_priv->netdev) || state == port_priv->stp_state) + return 0; /* Nothing to do */ + + for (vid = 0; vid <= VLAN_VID_MASK; vid++) { + if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) { + stp_cfg.vlan_id = vid; + err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx, &stp_cfg); + if (err) { + netdev_err(port_priv->netdev, + "dpsw_if_set_stp err %d\n", err); + return err; + } + } + } + + port_priv->stp_state = state; + + return 0; +} + +static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid) +{ + struct ethsw_port_priv *ppriv_local = NULL; + int i, err; + + if (!ethsw->vlans[vid]) + return -ENOENT; + + err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid); + if (err) { + dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err); + return err; + } + ethsw->vlans[vid] = 0; + + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { + ppriv_local = ethsw->ports[i]; + ppriv_local->vlans[vid] = 0; + } + + return 0; +} + +static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv, + const unsigned char *addr) +{ + struct dpsw_fdb_unicast_cfg entry = {0}; + u16 fdb_id; + int err; + + entry.if_egress = port_priv->idx; + entry.type = DPSW_FDB_ENTRY_STATIC; + ether_addr_copy(entry.mac_addr, addr); + + fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); + err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + fdb_id, &entry); + if (err) + netdev_err(port_priv->netdev, + "dpsw_fdb_add_unicast err %d\n", err); + return err; +} + +static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv, + const unsigned char *addr) +{ + struct dpsw_fdb_unicast_cfg entry = {0}; + u16 fdb_id; + int err; + + entry.if_egress = port_priv->idx; + entry.type = DPSW_FDB_ENTRY_STATIC; + ether_addr_copy(entry.mac_addr, addr); + + fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); + err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + fdb_id, &entry); + /* Silently discard error for calling multiple times the del command */ + if (err && err != -ENXIO) + netdev_err(port_priv->netdev, + "dpsw_fdb_remove_unicast err %d\n", err); + return err; +} + +static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv, + const unsigned char *addr) +{ + struct dpsw_fdb_multicast_cfg entry = {0}; + u16 fdb_id; + int err; + + ether_addr_copy(entry.mac_addr, addr); + entry.type = DPSW_FDB_ENTRY_STATIC; + entry.num_ifs = 1; + entry.if_id[0] = port_priv->idx; + + fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); + err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + fdb_id, &entry); + /* Silently discard error for calling multiple times the add command */ + if (err && err != -ENXIO) + netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n", + err); + return err; +} + +static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv, + const unsigned char *addr) +{ + struct dpsw_fdb_multicast_cfg entry = {0}; + u16 fdb_id; + int err; + + ether_addr_copy(entry.mac_addr, addr); + entry.type = DPSW_FDB_ENTRY_STATIC; + entry.num_ifs = 1; + entry.if_id[0] = port_priv->idx; + + fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); + err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + fdb_id, &entry); + /* Silently discard error for calling multiple times the del command */ + if (err && err != -ENAVAIL) + netdev_err(port_priv->netdev, + "dpsw_fdb_remove_multicast err %d\n", err); + return err; +} + +static void dpaa2_switch_port_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + u64 tmp; + int err; + + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx, + DPSW_CNT_ING_FRAME, &stats->rx_packets); + if (err) + goto error; + + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx, + DPSW_CNT_EGR_FRAME, &stats->tx_packets); + if (err) + goto error; + + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx, + DPSW_CNT_ING_BYTE, &stats->rx_bytes); + if (err) + goto error; + + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx, + DPSW_CNT_EGR_BYTE, &stats->tx_bytes); + if (err) + goto error; + + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx, + DPSW_CNT_ING_FRAME_DISCARD, + &stats->rx_dropped); + if (err) + goto error; + + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx, + DPSW_CNT_ING_FLTR_FRAME, + &tmp); + if (err) + goto error; + stats->rx_dropped += tmp; + + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx, + DPSW_CNT_EGR_FRAME_DISCARD, + &stats->tx_dropped); + if (err) + goto error; + + return; + +error: + netdev_err(netdev, "dpsw_if_get_counter err %d\n", err); +} + +static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev, + int attr_id) +{ + return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT); +} + +static int dpaa2_switch_port_get_offload_stats(int attr_id, + const struct net_device *netdev, + void *sp) +{ + switch (attr_id) { + case IFLA_OFFLOAD_XSTATS_CPU_HIT: + dpaa2_switch_port_get_stats((struct net_device *)netdev, sp); + return 0; + } + + return -EINVAL; +} + +static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + int err; + + err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io, + 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx, + (u16)ETHSW_L2_MAX_FRM(mtu)); + if (err) { + netdev_err(netdev, + "dpsw_if_set_max_frame_length() err %d\n", err); + return err; + } + + netdev->mtu = mtu; + return 0; +} + +static int dpaa2_switch_port_carrier_state_sync(struct net_device *netdev) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct dpsw_link_state state; + int err; + + /* Interrupts are received even though no one issued an 'ifconfig up' + * on the switch interface. Ignore these link state update interrupts + */ + if (!netif_running(netdev)) + return 0; + + err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx, &state); + if (err) { + netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); + return err; + } + + WARN_ONCE(state.up > 1, "Garbage read into link_state"); + + if (state.up != port_priv->link_state) { + if (state.up) { + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); + } else { + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + } + port_priv->link_state = state.up; + } + + return 0; +} + +/* Manage all NAPI instances for the control interface. + * + * We only have one RX queue and one Tx Conf queue for all + * switch ports. Therefore, we only need to enable the NAPI instance once, the + * first time one of the switch ports runs .dev_open(). + */ + +static void dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core *ethsw) +{ + int i; + + /* Access to the ethsw->napi_users relies on the RTNL lock */ + ASSERT_RTNL(); + + /* a new interface is using the NAPI instance */ + ethsw->napi_users++; + + /* if there is already a user of the instance, return */ + if (ethsw->napi_users > 1) + return; + + for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) + napi_enable(ðsw->fq[i].napi); +} + +static void dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core *ethsw) +{ + int i; + + /* Access to the ethsw->napi_users relies on the RTNL lock */ + ASSERT_RTNL(); + + /* If we are not the last interface using the NAPI, return */ + ethsw->napi_users--; + if (ethsw->napi_users) + return; + + for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) + napi_disable(ðsw->fq[i].napi); +} + +static int dpaa2_switch_port_open(struct net_device *netdev) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct ethsw_core *ethsw = port_priv->ethsw_data; + int err; + + /* Explicitly set carrier off, otherwise + * netif_carrier_ok() will return true and cause 'ip link show' + * to report the LOWER_UP flag, even though the link + * notification wasn't even received. + */ + netif_carrier_off(netdev); + + err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx); + if (err) { + netdev_err(netdev, "dpsw_if_enable err %d\n", err); + return err; + } + + /* sync carrier state */ + err = dpaa2_switch_port_carrier_state_sync(netdev); + if (err) { + netdev_err(netdev, + "dpaa2_switch_port_carrier_state_sync err %d\n", err); + goto err_carrier_sync; + } + + dpaa2_switch_enable_ctrl_if_napi(ethsw); + + return 0; + +err_carrier_sync: + dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx); + return err; +} + +static int dpaa2_switch_port_stop(struct net_device *netdev) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct ethsw_core *ethsw = port_priv->ethsw_data; + int err; + + err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, + port_priv->ethsw_data->dpsw_handle, + port_priv->idx); + if (err) { + netdev_err(netdev, "dpsw_if_disable err %d\n", err); + return err; + } + + dpaa2_switch_disable_ctrl_if_napi(ethsw); + + return 0; +} + +static int dpaa2_switch_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct ethsw_port_priv *port_priv = netdev_priv(dev); + + ppid->id_len = 1; + ppid->id[0] = port_priv->ethsw_data->dev_id; + + return 0; +} + +static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name, + size_t len) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + int err; + + err = snprintf(name, len, "p%d", port_priv->idx); + if (err >= len) + return -EINVAL; + + return 0; +} + +struct ethsw_dump_ctx { + struct net_device *dev; + struct sk_buff *skb; + struct netlink_callback *cb; + int idx; +}; + +static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry, + struct ethsw_dump_ctx *dump) +{ + int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC; + u32 portid = NETLINK_CB(dump->cb->skb).portid; + u32 seq = dump->cb->nlh->nlmsg_seq; + struct nlmsghdr *nlh; + struct ndmsg *ndm; + + if (dump->idx < dump->cb->args[2]) + goto skip; + + nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, + sizeof(*ndm), NLM_F_MULTI); + if (!nlh) + return -EMSGSIZE; + + ndm = nlmsg_data(nlh); + ndm->ndm_family = AF_BRIDGE; + ndm->ndm_pad1 = 0; + ndm->ndm_pad2 = 0; + ndm->ndm_flags = NTF_SELF; + ndm->ndm_type = 0; + ndm->ndm_ifindex = dump->dev->ifindex; + ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP; + + if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr)) + goto nla_put_failure; + + nlmsg_end(dump->skb, nlh); + +skip: + dump->idx++; + return 0; + +nla_put_failure: + nlmsg_cancel(dump->skb, nlh); + return -EMSGSIZE; +} + +static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry, + struct ethsw_port_priv *port_priv) +{ + int idx = port_priv->idx; + int valid; + + if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST) + valid = entry->if_info == port_priv->idx; + else + valid = entry->if_mask[idx / 8] & BIT(idx % 8); + + return valid; +} + +static int dpaa2_switch_fdb_iterate(struct ethsw_port_priv *port_priv, + dpaa2_switch_fdb_cb_t cb, void *data) +{ + struct net_device *net_dev = port_priv->netdev; + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct device *dev = net_dev->dev.parent; + struct fdb_dump_entry *fdb_entries; + struct fdb_dump_entry fdb_entry; + dma_addr_t fdb_dump_iova; + u16 num_fdb_entries; + u32 fdb_dump_size; + int err = 0, i; + u8 *dma_mem; + u16 fdb_id; + + fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry); + dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL); + if (!dma_mem) + return -ENOMEM; + + fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, fdb_dump_iova)) { + netdev_err(net_dev, "dma_map_single() failed\n"); + err = -ENOMEM; + goto err_map; + } + + fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); + err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, fdb_id, + fdb_dump_iova, fdb_dump_size, &num_fdb_entries); + if (err) { + netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err); + goto err_dump; + } + + dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE); + + fdb_entries = (struct fdb_dump_entry *)dma_mem; + for (i = 0; i < num_fdb_entries; i++) { + fdb_entry = fdb_entries[i]; + + err = cb(port_priv, &fdb_entry, data); + if (err) + goto end; + } + +end: + kfree(dma_mem); + + return 0; + +err_dump: + dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE); +err_map: + kfree(dma_mem); + return err; +} + +static int dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv *port_priv, + struct fdb_dump_entry *fdb_entry, + void *data) +{ + if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv)) + return 0; + + return dpaa2_switch_fdb_dump_nl(fdb_entry, data); +} + +static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, + struct net_device *net_dev, + struct net_device *filter_dev, int *idx) +{ + struct ethsw_port_priv *port_priv = netdev_priv(net_dev); + struct ethsw_dump_ctx dump = { + .dev = net_dev, + .skb = skb, + .cb = cb, + .idx = *idx, + }; + int err; + + err = dpaa2_switch_fdb_iterate(port_priv, dpaa2_switch_fdb_entry_dump, &dump); + *idx = dump.idx; + + return err; +} + +static int dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv *port_priv, + struct fdb_dump_entry *fdb_entry, + void *data __always_unused) +{ + if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv)) + return 0; + + if (!(fdb_entry->type & DPSW_FDB_ENTRY_TYPE_DYNAMIC)) + return 0; + + if (fdb_entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST) + dpaa2_switch_port_fdb_del_uc(port_priv, fdb_entry->mac_addr); + else + dpaa2_switch_port_fdb_del_mc(port_priv, fdb_entry->mac_addr); + + return 0; +} + +static void dpaa2_switch_port_fast_age(struct ethsw_port_priv *port_priv) +{ + dpaa2_switch_fdb_iterate(port_priv, + dpaa2_switch_fdb_entry_fast_age, NULL); +} + +static int dpaa2_switch_port_vlan_add(struct net_device *netdev, __be16 proto, + u16 vid) +{ + struct switchdev_obj_port_vlan vlan = { + .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, + .vid = vid, + .obj.orig_dev = netdev, + /* This API only allows programming tagged, non-PVID VIDs */ + .flags = 0, + }; + + return dpaa2_switch_port_vlans_add(netdev, &vlan); +} + +static int dpaa2_switch_port_vlan_kill(struct net_device *netdev, __be16 proto, + u16 vid) +{ + struct switchdev_obj_port_vlan vlan = { + .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, + .vid = vid, + .obj.orig_dev = netdev, + /* This API only allows programming tagged, non-PVID VIDs */ + .flags = 0, + }; + + return dpaa2_switch_port_vlans_del(netdev, &vlan); +} + +static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct net_device *net_dev = port_priv->netdev; + struct device *dev = net_dev->dev.parent; + u8 mac_addr[ETH_ALEN]; + int err; + + if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR)) + return 0; + + /* Get firmware address, if any */ + err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle, + port_priv->idx, mac_addr); + if (err) { + dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n"); + return err; + } + + /* First check if firmware has any address configured by bootloader */ + if (!is_zero_ether_addr(mac_addr)) { + memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); + } else { + /* No MAC address configured, fill in net_dev->dev_addr + * with a random one + */ + eth_hw_addr_random(net_dev); + dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); + + /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all + * practical purposes, this will be our "permanent" mac address, + * at least until the next reboot. This move will also permit + * register_netdevice() to properly fill up net_dev->perm_addr. + */ + net_dev->addr_assign_type = NET_ADDR_PERM; + } + + return 0; +} + +static void dpaa2_switch_free_fd(const struct ethsw_core *ethsw, + const struct dpaa2_fd *fd) +{ + struct device *dev = ethsw->dev; + unsigned char *buffer_start; + struct sk_buff **skbh, *skb; + dma_addr_t fd_addr; + + fd_addr = dpaa2_fd_get_addr(fd); + skbh = dpaa2_iova_to_virt(ethsw->iommu_domain, fd_addr); + + skb = *skbh; + buffer_start = (unsigned char *)skbh; + + dma_unmap_single(dev, fd_addr, + skb_tail_pointer(skb) - buffer_start, + DMA_TO_DEVICE); + + /* Move on with skb release */ + dev_kfree_skb(skb); +} + +static int dpaa2_switch_build_single_fd(struct ethsw_core *ethsw, + struct sk_buff *skb, + struct dpaa2_fd *fd) +{ + struct device *dev = ethsw->dev; + struct sk_buff **skbh; + dma_addr_t addr; + u8 *buff_start; + void *hwa; + + buff_start = PTR_ALIGN(skb->data - DPAA2_SWITCH_TX_DATA_OFFSET - + DPAA2_SWITCH_TX_BUF_ALIGN, + DPAA2_SWITCH_TX_BUF_ALIGN); + + /* Clear FAS to have consistent values for TX confirmation. It is + * located in the first 8 bytes of the buffer's hardware annotation + * area + */ + hwa = buff_start + DPAA2_SWITCH_SWA_SIZE; + memset(hwa, 0, 8); + + /* Store a backpointer to the skb at the beginning of the buffer + * (in the private data area) such that we can release it + * on Tx confirm + */ + skbh = (struct sk_buff **)buff_start; + *skbh = skb; + + addr = dma_map_single(dev, buff_start, + skb_tail_pointer(skb) - buff_start, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, addr))) + return -ENOMEM; + + /* Setup the FD fields */ + memset(fd, 0, sizeof(*fd)); + + dpaa2_fd_set_addr(fd, addr); + dpaa2_fd_set_offset(fd, (u16)(skb->data - buff_start)); + dpaa2_fd_set_len(fd, skb->len); + dpaa2_fd_set_format(fd, dpaa2_fd_single); + + return 0; +} + +static netdev_tx_t dpaa2_switch_port_tx(struct sk_buff *skb, + struct net_device *net_dev) +{ + struct ethsw_port_priv *port_priv = netdev_priv(net_dev); + struct ethsw_core *ethsw = port_priv->ethsw_data; + int retries = DPAA2_SWITCH_SWP_BUSY_RETRIES; + struct dpaa2_fd fd; + int err; + + if (unlikely(skb_headroom(skb) < DPAA2_SWITCH_NEEDED_HEADROOM)) { + struct sk_buff *ns; + + ns = skb_realloc_headroom(skb, DPAA2_SWITCH_NEEDED_HEADROOM); + if (unlikely(!ns)) { + net_err_ratelimited("%s: Error reallocating skb headroom\n", net_dev->name); + goto err_free_skb; + } + dev_consume_skb_any(skb); + skb = ns; + } + + /* We'll be holding a back-reference to the skb until Tx confirmation */ + skb = skb_unshare(skb, GFP_ATOMIC); + if (unlikely(!skb)) { + /* skb_unshare() has already freed the skb */ + net_err_ratelimited("%s: Error copying the socket buffer\n", net_dev->name); + goto err_exit; + } + + /* At this stage, we do not support non-linear skbs so just try to + * linearize the skb and if that's not working, just drop the packet. + */ + err = skb_linearize(skb); + if (err) { + net_err_ratelimited("%s: skb_linearize error (%d)!\n", net_dev->name, err); + goto err_free_skb; + } + + err = dpaa2_switch_build_single_fd(ethsw, skb, &fd); + if (unlikely(err)) { + net_err_ratelimited("%s: ethsw_build_*_fd() %d\n", net_dev->name, err); + goto err_free_skb; + } + + do { + err = dpaa2_io_service_enqueue_qd(NULL, + port_priv->tx_qdid, + 8, 0, &fd); + retries--; + } while (err == -EBUSY && retries); + + if (unlikely(err < 0)) { + dpaa2_switch_free_fd(ethsw, &fd); + goto err_exit; + } + + return NETDEV_TX_OK; + +err_free_skb: + dev_kfree_skb(skb); +err_exit: + return NETDEV_TX_OK; +} + +static const struct net_device_ops dpaa2_switch_port_ops = { + .ndo_open = dpaa2_switch_port_open, + .ndo_stop = dpaa2_switch_port_stop, + + .ndo_set_mac_address = eth_mac_addr, + .ndo_get_stats64 = dpaa2_switch_port_get_stats, + .ndo_change_mtu = dpaa2_switch_port_change_mtu, + .ndo_has_offload_stats = dpaa2_switch_port_has_offload_stats, + .ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats, + .ndo_fdb_dump = dpaa2_switch_port_fdb_dump, + .ndo_vlan_rx_add_vid = dpaa2_switch_port_vlan_add, + .ndo_vlan_rx_kill_vid = dpaa2_switch_port_vlan_kill, + + .ndo_start_xmit = dpaa2_switch_port_tx, + .ndo_get_port_parent_id = dpaa2_switch_port_parent_id, + .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name, +}; + +bool dpaa2_switch_port_dev_check(const struct net_device *netdev) +{ + return netdev->netdev_ops == &dpaa2_switch_port_ops; +} + +static void dpaa2_switch_links_state_update(struct ethsw_core *ethsw) +{ + int i; + + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { + dpaa2_switch_port_carrier_state_sync(ethsw->ports[i]->netdev); + dpaa2_switch_port_set_mac_addr(ethsw->ports[i]); + } +} + +static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg) +{ + struct device *dev = (struct device *)arg; + struct ethsw_core *ethsw = dev_get_drvdata(dev); + + /* Mask the events and the if_id reserved bits to be cleared on read */ + u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000; + int err; + + err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, + DPSW_IRQ_INDEX_IF, &status); + if (err) { + dev_err(dev, "Can't get irq status (err %d)\n", err); + + err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, + DPSW_IRQ_INDEX_IF, 0xFFFFFFFF); + if (err) + dev_err(dev, "Can't clear irq status (err %d)\n", err); + goto out; + } + + if (status & DPSW_IRQ_EVENT_LINK_CHANGED) + dpaa2_switch_links_state_update(ethsw); + +out: + return IRQ_HANDLED; +} + +static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev) +{ + struct device *dev = &sw_dev->dev; + struct ethsw_core *ethsw = dev_get_drvdata(dev); + u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED; + struct fsl_mc_device_irq *irq; + int err; + + err = fsl_mc_allocate_irqs(sw_dev); + if (err) { + dev_err(dev, "MC irqs allocation failed\n"); + return err; + } + + if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) { + err = -EINVAL; + goto free_irq; + } + + err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, + DPSW_IRQ_INDEX_IF, 0); + if (err) { + dev_err(dev, "dpsw_set_irq_enable err %d\n", err); + goto free_irq; + } + + irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF]; + + err = devm_request_threaded_irq(dev, irq->msi_desc->irq, + NULL, + dpaa2_switch_irq0_handler_thread, + IRQF_NO_SUSPEND | IRQF_ONESHOT, + dev_name(dev), dev); + if (err) { + dev_err(dev, "devm_request_threaded_irq(): %d\n", err); + goto free_irq; + } + + err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle, + DPSW_IRQ_INDEX_IF, mask); + if (err) { + dev_err(dev, "dpsw_set_irq_mask(): %d\n", err); + goto free_devm_irq; + } + + err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, + DPSW_IRQ_INDEX_IF, 1); + if (err) { + dev_err(dev, "dpsw_set_irq_enable(): %d\n", err); + goto free_devm_irq; + } + + return 0; + +free_devm_irq: + devm_free_irq(dev, irq->msi_desc->irq, dev); +free_irq: + fsl_mc_free_irqs(sw_dev); + return err; +} + +static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev) +{ + struct device *dev = &sw_dev->dev; + struct ethsw_core *ethsw = dev_get_drvdata(dev); + int err; + + err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, + DPSW_IRQ_INDEX_IF, 0); + if (err) + dev_err(dev, "dpsw_set_irq_enable err %d\n", err); + + fsl_mc_free_irqs(sw_dev); +} + +static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev, + u8 state) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + + return dpaa2_switch_port_set_stp_state(port_priv, state); +} + +static int dpaa2_switch_port_set_learning(struct ethsw_port_priv *port_priv, bool enable) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + enum dpsw_learning_mode learn_mode; + int err; + + if (enable) + learn_mode = DPSW_LEARNING_MODE_HW; + else + learn_mode = DPSW_LEARNING_MODE_DIS; + + err = dpsw_if_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, + port_priv->idx, learn_mode); + if (err) + netdev_err(port_priv->netdev, "dpsw_if_set_learning_mode err %d\n", err); + + if (!enable) + dpaa2_switch_port_fast_age(port_priv); + + return err; +} + +static int dpaa2_switch_port_flood(struct ethsw_port_priv *port_priv, + struct switchdev_brport_flags flags) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + + if (flags.mask & BR_BCAST_FLOOD) + port_priv->bcast_flood = !!(flags.val & BR_BCAST_FLOOD); + + if (flags.mask & BR_FLOOD) + port_priv->ucast_flood = !!(flags.val & BR_FLOOD); + + return dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); +} + +static int dpaa2_switch_port_pre_bridge_flags(struct net_device *netdev, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + if (flags.mask & ~(BR_LEARNING | BR_BCAST_FLOOD | BR_FLOOD | + BR_MCAST_FLOOD)) + return -EINVAL; + + if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD)) { + bool multicast = !!(flags.val & BR_MCAST_FLOOD); + bool unicast = !!(flags.val & BR_FLOOD); + + if (unicast != multicast) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot configure multicast flooding independently of unicast"); + return -EINVAL; + } + } + + return 0; +} + +static int dpaa2_switch_port_bridge_flags(struct net_device *netdev, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + int err; + + if (flags.mask & BR_LEARNING) { + bool learn_ena = !!(flags.val & BR_LEARNING); + + err = dpaa2_switch_port_set_learning(port_priv, learn_ena); + if (err) + return err; + } + + if (flags.mask & (BR_BCAST_FLOOD | BR_FLOOD | BR_MCAST_FLOOD)) { + err = dpaa2_switch_port_flood(port_priv, flags); + if (err) + return err; + } + + return 0; +} + +static int dpaa2_switch_port_attr_set(struct net_device *netdev, + const struct switchdev_attr *attr, + struct netlink_ext_ack *extack) +{ + int err = 0; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + err = dpaa2_switch_port_attr_stp_state_set(netdev, + attr->u.stp_state); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: + if (!attr->u.vlan_filtering) { + NL_SET_ERR_MSG_MOD(extack, + "The DPAA2 switch does not support VLAN-unaware operation"); + return -EOPNOTSUPP; + } + break; + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + err = dpaa2_switch_port_pre_bridge_flags(netdev, attr->u.brport_flags, extack); + break; + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + err = dpaa2_switch_port_bridge_flags(netdev, attr->u.brport_flags, extack); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +int dpaa2_switch_port_vlans_add(struct net_device *netdev, + const struct switchdev_obj_port_vlan *vlan) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpsw_attr *attr = ðsw->sw_attr; + int err = 0; + + /* Make sure that the VLAN is not already configured + * on the switch port + */ + if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER) + return -EEXIST; + + /* Check if there is space for a new VLAN */ + err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, + ðsw->sw_attr); + if (err) { + netdev_err(netdev, "dpsw_get_attributes err %d\n", err); + return err; + } + if (attr->max_vlans - attr->num_vlans < 1) + return -ENOSPC; + + /* Check if there is space for a new VLAN */ + err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, + ðsw->sw_attr); + if (err) { + netdev_err(netdev, "dpsw_get_attributes err %d\n", err); + return err; + } + if (attr->max_vlans - attr->num_vlans < 1) + return -ENOSPC; + + if (!port_priv->ethsw_data->vlans[vlan->vid]) { + /* this is a new VLAN */ + err = dpaa2_switch_add_vlan(port_priv, vlan->vid); + if (err) + return err; + + port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL; + } + + return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags); +} + +static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc, + const unsigned char *addr) +{ + struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc; + struct netdev_hw_addr *ha; + + netif_addr_lock_bh(netdev); + list_for_each_entry(ha, &list->list, list) { + if (ether_addr_equal(ha->addr, addr)) { + netif_addr_unlock_bh(netdev); + return 1; + } + } + netif_addr_unlock_bh(netdev); + return 0; +} + +static int dpaa2_switch_port_mdb_add(struct net_device *netdev, + const struct switchdev_obj_port_mdb *mdb) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + int err; + + /* Check if address is already set on this port */ + if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr)) + return -EEXIST; + + err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr); + if (err) + return err; + + err = dev_mc_add(netdev, mdb->addr); + if (err) { + netdev_err(netdev, "dev_mc_add err %d\n", err); + dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr); + } + + return err; +} + +static int dpaa2_switch_port_obj_add(struct net_device *netdev, + const struct switchdev_obj *obj) +{ + int err; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = dpaa2_switch_port_vlans_add(netdev, + SWITCHDEV_OBJ_PORT_VLAN(obj)); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + err = dpaa2_switch_port_mdb_add(netdev, + SWITCHDEV_OBJ_PORT_MDB(obj)); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct net_device *netdev = port_priv->netdev; + struct dpsw_vlan_if_cfg vcfg; + int i, err; + + if (!port_priv->vlans[vid]) + return -ENOENT; + + if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) { + /* If we are deleting the PVID of a port, use VLAN 4095 instead + * as we are sure that neither the bridge nor the 8021q module + * will use it + */ + err = dpaa2_switch_port_set_pvid(port_priv, 4095); + if (err) + return err; + } + + vcfg.num_ifs = 1; + vcfg.if_id[0] = port_priv->idx; + if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) { + err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, + ethsw->dpsw_handle, + vid, &vcfg); + if (err) { + netdev_err(netdev, + "dpsw_vlan_remove_if_untagged err %d\n", + err); + } + port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED; + } + + if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) { + err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, + vid, &vcfg); + if (err) { + netdev_err(netdev, + "dpsw_vlan_remove_if err %d\n", err); + return err; + } + port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER; + + /* Delete VLAN from switch if it is no longer configured on + * any port + */ + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) + if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER) + return 0; /* Found a port member in VID */ + + ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL; + + err = dpaa2_switch_dellink(ethsw, vid); + if (err) + return err; + } + + return 0; +} + +int dpaa2_switch_port_vlans_del(struct net_device *netdev, + const struct switchdev_obj_port_vlan *vlan) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + + if (netif_is_bridge_master(vlan->obj.orig_dev)) + return -EOPNOTSUPP; + + return dpaa2_switch_port_del_vlan(port_priv, vlan->vid); +} + +static int dpaa2_switch_port_mdb_del(struct net_device *netdev, + const struct switchdev_obj_port_mdb *mdb) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + int err; + + if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr)) + return -ENOENT; + + err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr); + if (err) + return err; + + err = dev_mc_del(netdev, mdb->addr); + if (err) { + netdev_err(netdev, "dev_mc_del err %d\n", err); + return err; + } + + return err; +} + +static int dpaa2_switch_port_obj_del(struct net_device *netdev, + const struct switchdev_obj *obj) +{ + int err; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj)); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj)); + break; + default: + err = -EOPNOTSUPP; + break; + } + return err; +} + +static int dpaa2_switch_port_attr_set_event(struct net_device *netdev, + struct switchdev_notifier_port_attr_info *ptr) +{ + int err; + + err = switchdev_handle_port_attr_set(netdev, ptr, + dpaa2_switch_port_dev_check, + dpaa2_switch_port_attr_set); + return notifier_from_errno(err); +} + +static int dpaa2_switch_port_bridge_join(struct net_device *netdev, + struct net_device *upper_dev) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct ethsw_port_priv *other_port_priv; + struct net_device *other_dev; + struct list_head *iter; + bool learn_ena; + int err; + + netdev_for_each_lower_dev(upper_dev, other_dev, iter) { + if (!dpaa2_switch_port_dev_check(other_dev)) + continue; + + other_port_priv = netdev_priv(other_dev); + if (other_port_priv->ethsw_data != port_priv->ethsw_data) { + netdev_err(netdev, + "Interface from a different DPSW is in the bridge already!\n"); + return -EINVAL; + } + } + + /* Delete the previously manually installed VLAN 1 */ + err = dpaa2_switch_port_del_vlan(port_priv, 1); + if (err) + return err; + + dpaa2_switch_port_set_fdb(port_priv, upper_dev); + + /* Inherit the initial bridge port learning state */ + learn_ena = br_port_flag_is_set(netdev, BR_LEARNING); + err = dpaa2_switch_port_set_learning(port_priv, learn_ena); + + /* Setup the egress flood policy (broadcast, unknown unicast) */ + err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); + if (err) + goto err_egress_flood; + + return 0; + +err_egress_flood: + dpaa2_switch_port_set_fdb(port_priv, NULL); + return err; +} + +static int dpaa2_switch_port_clear_rxvlan(struct net_device *vdev, int vid, void *arg) +{ + __be16 vlan_proto = htons(ETH_P_8021Q); + + if (vdev) + vlan_proto = vlan_dev_vlan_proto(vdev); + + return dpaa2_switch_port_vlan_kill(arg, vlan_proto, vid); +} + +static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, void *arg) +{ + __be16 vlan_proto = htons(ETH_P_8021Q); + + if (vdev) + vlan_proto = vlan_dev_vlan_proto(vdev); + + return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid); +} + +static int dpaa2_switch_port_bridge_leave(struct net_device *netdev) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct dpaa2_switch_fdb *old_fdb = port_priv->fdb; + struct ethsw_core *ethsw = port_priv->ethsw_data; + int err; + + /* First of all, fast age any learn FDB addresses on this switch port */ + dpaa2_switch_port_fast_age(port_priv); + + /* Clear all RX VLANs installed through vlan_vid_add() either as VLAN + * upper devices or otherwise from the FDB table that we are about to + * leave + */ + err = vlan_for_each(netdev, dpaa2_switch_port_clear_rxvlan, netdev); + if (err) + netdev_err(netdev, "Unable to clear RX VLANs from old FDB table, err (%d)\n", err); + + dpaa2_switch_port_set_fdb(port_priv, NULL); + + /* Restore all RX VLANs into the new FDB table that we just joined */ + err = vlan_for_each(netdev, dpaa2_switch_port_restore_rxvlan, netdev); + if (err) + netdev_err(netdev, "Unable to restore RX VLANs to the new FDB, err (%d)\n", err); + + /* Reset the flooding state to denote that this port can send any + * packet in standalone mode. With this, we are also ensuring that any + * later bridge join will have the flooding flag on. + */ + port_priv->bcast_flood = true; + port_priv->ucast_flood = true; + + /* Setup the egress flood policy (broadcast, unknown unicast). + * When the port is not under a bridge, only the CTRL interface is part + * of the flooding domain besides the actual port + */ + err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); + if (err) + return err; + + /* Recreate the egress flood domain of the FDB that we just left */ + err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id); + if (err) + return err; + + /* No HW learning when not under a bridge */ + err = dpaa2_switch_port_set_learning(port_priv, false); + if (err) + return err; + + /* Add the VLAN 1 as PVID when not under a bridge. We need this since + * the dpaa2 switch interfaces are not capable to be VLAN unaware + */ + return dpaa2_switch_port_add_vlan(port_priv, DEFAULT_VLAN_ID, + BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID); +} + +static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *netdev) +{ + struct net_device *upper_dev; + struct list_head *iter; + + /* RCU read lock not necessary because we have write-side protection + * (rtnl_mutex), however a non-rcu iterator does not exist. + */ + netdev_for_each_upper_dev_rcu(netdev, upper_dev, iter) + if (is_vlan_dev(upper_dev)) + return -EOPNOTSUPP; + + return 0; +} + +static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + struct netdev_notifier_changeupper_info *info = ptr; + struct netlink_ext_ack *extack; + struct net_device *upper_dev; + int err = 0; + + if (!dpaa2_switch_port_dev_check(netdev)) + return NOTIFY_DONE; + + extack = netdev_notifier_info_to_extack(&info->info); + + switch (event) { + case NETDEV_PRECHANGEUPPER: + upper_dev = info->upper_dev; + if (!netif_is_bridge_master(upper_dev)) + break; + + if (!br_vlan_enabled(upper_dev)) { + NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge"); + err = -EOPNOTSUPP; + goto out; + } + + err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot join a bridge while VLAN uppers are present"); + goto out; + } + + break; + case NETDEV_CHANGEUPPER: + upper_dev = info->upper_dev; + if (netif_is_bridge_master(upper_dev)) { + if (info->linking) + err = dpaa2_switch_port_bridge_join(netdev, upper_dev); + else + err = dpaa2_switch_port_bridge_leave(netdev); + } + break; + } + +out: + return notifier_from_errno(err); +} + +struct ethsw_switchdev_event_work { + struct work_struct work; + struct switchdev_notifier_fdb_info fdb_info; + struct net_device *dev; + unsigned long event; +}; + +static void dpaa2_switch_event_work(struct work_struct *work) +{ + struct ethsw_switchdev_event_work *switchdev_work = + container_of(work, struct ethsw_switchdev_event_work, work); + struct net_device *dev = switchdev_work->dev; + struct switchdev_notifier_fdb_info *fdb_info; + int err; + + rtnl_lock(); + fdb_info = &switchdev_work->fdb_info; + + switch (switchdev_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + if (!fdb_info->added_by_user) + break; + if (is_unicast_ether_addr(fdb_info->addr)) + err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev), + fdb_info->addr); + else + err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev), + fdb_info->addr); + if (err) + break; + fdb_info->offloaded = true; + call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev, + &fdb_info->info, NULL); + break; + case SWITCHDEV_FDB_DEL_TO_DEVICE: + if (!fdb_info->added_by_user) + break; + if (is_unicast_ether_addr(fdb_info->addr)) + dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr); + else + dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr); + break; + } + + rtnl_unlock(); + kfree(switchdev_work->fdb_info.addr); + kfree(switchdev_work); + dev_put(dev); +} + +/* Called under rcu_read_lock() */ +static int dpaa2_switch_port_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + struct ethsw_port_priv *port_priv = netdev_priv(dev); + struct ethsw_switchdev_event_work *switchdev_work; + struct switchdev_notifier_fdb_info *fdb_info = ptr; + struct ethsw_core *ethsw = port_priv->ethsw_data; + + if (event == SWITCHDEV_PORT_ATTR_SET) + return dpaa2_switch_port_attr_set_event(dev, ptr); + + if (!dpaa2_switch_port_dev_check(dev)) + return NOTIFY_DONE; + + switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); + if (!switchdev_work) + return NOTIFY_BAD; + + INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work); + switchdev_work->dev = dev; + switchdev_work->event = event; + + switch (event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: + memcpy(&switchdev_work->fdb_info, ptr, + sizeof(switchdev_work->fdb_info)); + switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); + if (!switchdev_work->fdb_info.addr) + goto err_addr_alloc; + + ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, + fdb_info->addr); + + /* Take a reference on the device to avoid being freed. */ + dev_hold(dev); + break; + default: + kfree(switchdev_work); + return NOTIFY_DONE; + } + + queue_work(ethsw->workqueue, &switchdev_work->work); + + return NOTIFY_DONE; + +err_addr_alloc: + kfree(switchdev_work); + return NOTIFY_BAD; +} + +static int dpaa2_switch_port_obj_event(unsigned long event, + struct net_device *netdev, + struct switchdev_notifier_port_obj_info *port_obj_info) +{ + int err = -EOPNOTSUPP; + + if (!dpaa2_switch_port_dev_check(netdev)) + return NOTIFY_DONE; + + switch (event) { + case SWITCHDEV_PORT_OBJ_ADD: + err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj); + break; + case SWITCHDEV_PORT_OBJ_DEL: + err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj); + break; + } + + port_obj_info->handled = true; + return notifier_from_errno(err); +} + +static int dpaa2_switch_port_blocking_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + + switch (event) { + case SWITCHDEV_PORT_OBJ_ADD: + case SWITCHDEV_PORT_OBJ_DEL: + return dpaa2_switch_port_obj_event(event, dev, ptr); + case SWITCHDEV_PORT_ATTR_SET: + return dpaa2_switch_port_attr_set_event(dev, ptr); + } + + return NOTIFY_DONE; +} + +/* Build a linear skb based on a single-buffer frame descriptor */ +static struct sk_buff *dpaa2_switch_build_linear_skb(struct ethsw_core *ethsw, + const struct dpaa2_fd *fd) +{ + u16 fd_offset = dpaa2_fd_get_offset(fd); + dma_addr_t addr = dpaa2_fd_get_addr(fd); + u32 fd_length = dpaa2_fd_get_len(fd); + struct device *dev = ethsw->dev; + struct sk_buff *skb = NULL; + void *fd_vaddr; + + fd_vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, addr); + dma_unmap_page(dev, addr, DPAA2_SWITCH_RX_BUF_SIZE, + DMA_FROM_DEVICE); + + skb = build_skb(fd_vaddr, DPAA2_SWITCH_RX_BUF_SIZE + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); + if (unlikely(!skb)) { + dev_err(dev, "build_skb() failed\n"); + return NULL; + } + + skb_reserve(skb, fd_offset); + skb_put(skb, fd_length); + + ethsw->buf_count--; + + return skb; +} + +static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq *fq, + const struct dpaa2_fd *fd) +{ + dpaa2_switch_free_fd(fq->ethsw, fd); +} + +static void dpaa2_switch_rx(struct dpaa2_switch_fq *fq, + const struct dpaa2_fd *fd) +{ + struct ethsw_core *ethsw = fq->ethsw; + struct ethsw_port_priv *port_priv; + struct net_device *netdev; + struct vlan_ethhdr *hdr; + struct sk_buff *skb; + u16 vlan_tci, vid; + int if_id, err; + + /* get switch ingress interface ID */ + if_id = upper_32_bits(dpaa2_fd_get_flc(fd)) & 0x0000FFFF; + + if (if_id >= ethsw->sw_attr.num_ifs) { + dev_err(ethsw->dev, "Frame received from unknown interface!\n"); + goto err_free_fd; + } + port_priv = ethsw->ports[if_id]; + netdev = port_priv->netdev; + + /* build the SKB based on the FD received */ + if (dpaa2_fd_get_format(fd) != dpaa2_fd_single) { + if (net_ratelimit()) { + netdev_err(netdev, "Received invalid frame format\n"); + goto err_free_fd; + } + } + + skb = dpaa2_switch_build_linear_skb(ethsw, fd); + if (unlikely(!skb)) + goto err_free_fd; + + skb_reset_mac_header(skb); + + /* Remove the VLAN header if the packet that we just received has a vid + * equal to the port PVIDs. Since the dpaa2-switch can operate only in + * VLAN-aware mode and no alterations are made on the packet when it's + * redirected/mirrored to the control interface, we are sure that there + * will always be a VLAN header present. + */ + hdr = vlan_eth_hdr(skb); + vid = ntohs(hdr->h_vlan_TCI) & VLAN_VID_MASK; + if (vid == port_priv->pvid) { + err = __skb_vlan_pop(skb, &vlan_tci); + if (err) { + dev_info(ethsw->dev, "__skb_vlan_pop() returned %d", err); + goto err_free_fd; + } + } + + skb->dev = netdev; + skb->protocol = eth_type_trans(skb, skb->dev); + + /* Setup the offload_fwd_mark only if the port is under a bridge */ + skb->offload_fwd_mark = !!(port_priv->fdb->bridge_dev); + + netif_receive_skb(skb); + + return; + +err_free_fd: + dpaa2_switch_free_fd(ethsw, fd); +} + +static void dpaa2_switch_detect_features(struct ethsw_core *ethsw) +{ + ethsw->features = 0; + + if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6)) + ethsw->features |= ETHSW_FEATURE_MAC_ADDR; +} + +static int dpaa2_switch_setup_fqs(struct ethsw_core *ethsw) +{ + struct dpsw_ctrl_if_attr ctrl_if_attr; + struct device *dev = ethsw->dev; + int i = 0; + int err; + + err = dpsw_ctrl_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, + &ctrl_if_attr); + if (err) { + dev_err(dev, "dpsw_ctrl_if_get_attributes() = %d\n", err); + return err; + } + + ethsw->fq[i].fqid = ctrl_if_attr.rx_fqid; + ethsw->fq[i].ethsw = ethsw; + ethsw->fq[i++].type = DPSW_QUEUE_RX; + + ethsw->fq[i].fqid = ctrl_if_attr.tx_err_conf_fqid; + ethsw->fq[i].ethsw = ethsw; + ethsw->fq[i++].type = DPSW_QUEUE_TX_ERR_CONF; + + return 0; +} + +/* Free buffers acquired from the buffer pool or which were meant to + * be released in the pool + */ +static void dpaa2_switch_free_bufs(struct ethsw_core *ethsw, u64 *buf_array, int count) +{ + struct device *dev = ethsw->dev; + void *vaddr; + int i; + + for (i = 0; i < count; i++) { + vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, buf_array[i]); + dma_unmap_page(dev, buf_array[i], DPAA2_SWITCH_RX_BUF_SIZE, + DMA_FROM_DEVICE); + free_pages((unsigned long)vaddr, 0); + } +} + +/* Perform a single release command to add buffers + * to the specified buffer pool + */ +static int dpaa2_switch_add_bufs(struct ethsw_core *ethsw, u16 bpid) +{ + struct device *dev = ethsw->dev; + u64 buf_array[BUFS_PER_CMD]; + struct page *page; + int retries = 0; + dma_addr_t addr; + int err; + int i; + + for (i = 0; i < BUFS_PER_CMD; i++) { + /* Allocate one page for each Rx buffer. WRIOP sees + * the entire page except for a tailroom reserved for + * skb shared info + */ + page = dev_alloc_pages(0); + if (!page) { + dev_err(dev, "buffer allocation failed\n"); + goto err_alloc; + } + + addr = dma_map_page(dev, page, 0, DPAA2_SWITCH_RX_BUF_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, addr)) { + dev_err(dev, "dma_map_single() failed\n"); + goto err_map; + } + buf_array[i] = addr; + } + +release_bufs: + /* In case the portal is busy, retry until successful or + * max retries hit. + */ + while ((err = dpaa2_io_service_release(NULL, bpid, + buf_array, i)) == -EBUSY) { + if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) + break; + + cpu_relax(); + } + + /* If release command failed, clean up and bail out. */ + if (err) { + dpaa2_switch_free_bufs(ethsw, buf_array, i); + return 0; + } + + return i; + +err_map: + __free_pages(page, 0); +err_alloc: + /* If we managed to allocate at least some buffers, + * release them to hardware + */ + if (i) + goto release_bufs; + + return 0; +} + +static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw) +{ + int *count = ðsw->buf_count; + int new_count; + int err = 0; + + if (unlikely(*count < DPAA2_ETHSW_REFILL_THRESH)) { + do { + new_count = dpaa2_switch_add_bufs(ethsw, ethsw->bpid); + if (unlikely(!new_count)) { + /* Out of memory; abort for now, we'll + * try later on + */ + break; + } + *count += new_count; + } while (*count < DPAA2_ETHSW_NUM_BUFS); + + if (unlikely(*count < DPAA2_ETHSW_NUM_BUFS)) + err = -ENOMEM; + } + + return err; +} + +static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw) +{ + int *count, i; + + for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) { + count = ðsw->buf_count; + *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid); + + if (unlikely(*count < BUFS_PER_CMD)) + return -ENOMEM; + } + + return 0; +} + +static void dpaa2_switch_drain_bp(struct ethsw_core *ethsw) +{ + u64 buf_array[BUFS_PER_CMD]; + int ret; + + do { + ret = dpaa2_io_service_acquire(NULL, ethsw->bpid, + buf_array, BUFS_PER_CMD); + if (ret < 0) { + dev_err(ethsw->dev, + "dpaa2_io_service_acquire() = %d\n", ret); + return; + } + dpaa2_switch_free_bufs(ethsw, buf_array, ret); + + } while (ret); +} + +static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw) +{ + struct dpsw_ctrl_if_pools_cfg dpsw_ctrl_if_pools_cfg = { 0 }; + struct device *dev = ethsw->dev; + struct fsl_mc_device *dpbp_dev; + struct dpbp_attr dpbp_attrs; + int err; + + err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, + &dpbp_dev); + if (err) { + if (err == -ENXIO) + err = -EPROBE_DEFER; + else + dev_err(dev, "DPBP device allocation failed\n"); + return err; + } + ethsw->dpbp_dev = dpbp_dev; + + err = dpbp_open(ethsw->mc_io, 0, dpbp_dev->obj_desc.id, + &dpbp_dev->mc_handle); + if (err) { + dev_err(dev, "dpbp_open() failed\n"); + goto err_open; + } + + err = dpbp_reset(ethsw->mc_io, 0, dpbp_dev->mc_handle); + if (err) { + dev_err(dev, "dpbp_reset() failed\n"); + goto err_reset; + } + + err = dpbp_enable(ethsw->mc_io, 0, dpbp_dev->mc_handle); + if (err) { + dev_err(dev, "dpbp_enable() failed\n"); + goto err_enable; + } + + err = dpbp_get_attributes(ethsw->mc_io, 0, dpbp_dev->mc_handle, + &dpbp_attrs); + if (err) { + dev_err(dev, "dpbp_get_attributes() failed\n"); + goto err_get_attr; + } + + dpsw_ctrl_if_pools_cfg.num_dpbp = 1; + dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id; + dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE; + dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0; + + err = dpsw_ctrl_if_set_pools(ethsw->mc_io, 0, ethsw->dpsw_handle, + &dpsw_ctrl_if_pools_cfg); + if (err) { + dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n"); + goto err_get_attr; + } + ethsw->bpid = dpbp_attrs.id; + + return 0; + +err_get_attr: + dpbp_disable(ethsw->mc_io, 0, dpbp_dev->mc_handle); +err_enable: +err_reset: + dpbp_close(ethsw->mc_io, 0, dpbp_dev->mc_handle); +err_open: + fsl_mc_object_free(dpbp_dev); + return err; +} + +static void dpaa2_switch_free_dpbp(struct ethsw_core *ethsw) +{ + dpbp_disable(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle); + dpbp_close(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle); + fsl_mc_object_free(ethsw->dpbp_dev); +} + +static int dpaa2_switch_alloc_rings(struct ethsw_core *ethsw) +{ + int i; + + for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) { + ethsw->fq[i].store = + dpaa2_io_store_create(DPAA2_SWITCH_STORE_SIZE, + ethsw->dev); + if (!ethsw->fq[i].store) { + dev_err(ethsw->dev, "dpaa2_io_store_create failed\n"); + while (--i >= 0) + dpaa2_io_store_destroy(ethsw->fq[i].store); + return -ENOMEM; + } + } + + return 0; +} + +static void dpaa2_switch_destroy_rings(struct ethsw_core *ethsw) +{ + int i; + + for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) + dpaa2_io_store_destroy(ethsw->fq[i].store); +} + +static int dpaa2_switch_pull_fq(struct dpaa2_switch_fq *fq) +{ + int err, retries = 0; + + /* Try to pull from the FQ while the portal is busy and we didn't hit + * the maximum number fo retries + */ + do { + err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store); + cpu_relax(); + } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES); + + if (unlikely(err)) + dev_err(fq->ethsw->dev, "dpaa2_io_service_pull err %d", err); + + return err; +} + +/* Consume all frames pull-dequeued into the store */ +static int dpaa2_switch_store_consume(struct dpaa2_switch_fq *fq) +{ + struct ethsw_core *ethsw = fq->ethsw; + int cleaned = 0, is_last; + struct dpaa2_dq *dq; + int retries = 0; + + do { + /* Get the next available FD from the store */ + dq = dpaa2_io_store_next(fq->store, &is_last); + if (unlikely(!dq)) { + if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) { + dev_err_once(ethsw->dev, + "No valid dequeue response\n"); + return -ETIMEDOUT; + } + continue; + } + + if (fq->type == DPSW_QUEUE_RX) + dpaa2_switch_rx(fq, dpaa2_dq_fd(dq)); + else + dpaa2_switch_tx_conf(fq, dpaa2_dq_fd(dq)); + cleaned++; + + } while (!is_last); + + return cleaned; +} + +/* NAPI poll routine */ +static int dpaa2_switch_poll(struct napi_struct *napi, int budget) +{ + int err, cleaned = 0, store_cleaned, work_done; + struct dpaa2_switch_fq *fq; + int retries = 0; + + fq = container_of(napi, struct dpaa2_switch_fq, napi); + + do { + err = dpaa2_switch_pull_fq(fq); + if (unlikely(err)) + break; + + /* Refill pool if appropriate */ + dpaa2_switch_refill_bp(fq->ethsw); + + store_cleaned = dpaa2_switch_store_consume(fq); + cleaned += store_cleaned; + + if (cleaned >= budget) { + work_done = budget; + goto out; + } + + } while (store_cleaned); + + /* We didn't consume the entire budget, so finish napi and re-enable + * data availability notifications + */ + napi_complete_done(napi, cleaned); + do { + err = dpaa2_io_service_rearm(NULL, &fq->nctx); + cpu_relax(); + } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES); + + work_done = max(cleaned, 1); +out: + + return work_done; +} + +static void dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx *nctx) +{ + struct dpaa2_switch_fq *fq; + + fq = container_of(nctx, struct dpaa2_switch_fq, nctx); + + napi_schedule(&fq->napi); +} + +static int dpaa2_switch_setup_dpio(struct ethsw_core *ethsw) +{ + struct dpsw_ctrl_if_queue_cfg queue_cfg; + struct dpaa2_io_notification_ctx *nctx; + int err, i, j; + + for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) { + nctx = ðsw->fq[i].nctx; + + /* Register a new software context for the FQID. + * By using NULL as the first parameter, we specify that we do + * not care on which cpu are interrupts received for this queue + */ + nctx->is_cdan = 0; + nctx->id = ethsw->fq[i].fqid; + nctx->desired_cpu = DPAA2_IO_ANY_CPU; + nctx->cb = dpaa2_switch_fqdan_cb; + err = dpaa2_io_service_register(NULL, nctx, ethsw->dev); + if (err) { + err = -EPROBE_DEFER; + goto err_register; + } + + queue_cfg.options = DPSW_CTRL_IF_QUEUE_OPT_DEST | + DPSW_CTRL_IF_QUEUE_OPT_USER_CTX; + queue_cfg.dest_cfg.dest_type = DPSW_CTRL_IF_DEST_DPIO; + queue_cfg.dest_cfg.dest_id = nctx->dpio_id; + queue_cfg.dest_cfg.priority = 0; + queue_cfg.user_ctx = nctx->qman64; + + err = dpsw_ctrl_if_set_queue(ethsw->mc_io, 0, + ethsw->dpsw_handle, + ethsw->fq[i].type, + &queue_cfg); + if (err) + goto err_set_queue; + } + + return 0; + +err_set_queue: + dpaa2_io_service_deregister(NULL, nctx, ethsw->dev); +err_register: + for (j = 0; j < i; j++) + dpaa2_io_service_deregister(NULL, ðsw->fq[j].nctx, + ethsw->dev); + + return err; +} + +static void dpaa2_switch_free_dpio(struct ethsw_core *ethsw) +{ + int i; + + for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) + dpaa2_io_service_deregister(NULL, ðsw->fq[i].nctx, + ethsw->dev); +} + +static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw) +{ + int err; + + /* setup FQs for Rx and Tx Conf */ + err = dpaa2_switch_setup_fqs(ethsw); + if (err) + return err; + + /* setup the buffer pool needed on the Rx path */ + err = dpaa2_switch_setup_dpbp(ethsw); + if (err) + return err; + + err = dpaa2_switch_seed_bp(ethsw); + if (err) + goto err_free_dpbp; + + err = dpaa2_switch_alloc_rings(ethsw); + if (err) + goto err_drain_dpbp; + + err = dpaa2_switch_setup_dpio(ethsw); + if (err) + goto err_destroy_rings; + + err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); + if (err) { + dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err); + goto err_deregister_dpio; + } + + return 0; + +err_deregister_dpio: + dpaa2_switch_free_dpio(ethsw); +err_destroy_rings: + dpaa2_switch_destroy_rings(ethsw); +err_drain_dpbp: + dpaa2_switch_drain_bp(ethsw); +err_free_dpbp: + dpaa2_switch_free_dpbp(ethsw); + + return err; +} + +static int dpaa2_switch_init(struct fsl_mc_device *sw_dev) +{ + struct device *dev = &sw_dev->dev; + struct ethsw_core *ethsw = dev_get_drvdata(dev); + struct dpsw_vlan_if_cfg vcfg = {0}; + struct dpsw_tci_cfg tci_cfg = {0}; + struct dpsw_stp_cfg stp_cfg; + int err; + u16 i; + + ethsw->dev_id = sw_dev->obj_desc.id; + + err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw->dpsw_handle); + if (err) { + dev_err(dev, "dpsw_open err %d\n", err); + return err; + } + + err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, + ðsw->sw_attr); + if (err) { + dev_err(dev, "dpsw_get_attributes err %d\n", err); + goto err_close; + } + + err = dpsw_get_api_version(ethsw->mc_io, 0, + ðsw->major, + ðsw->minor); + if (err) { + dev_err(dev, "dpsw_get_api_version err %d\n", err); + goto err_close; + } + + /* Minimum supported DPSW version check */ + if (ethsw->major < DPSW_MIN_VER_MAJOR || + (ethsw->major == DPSW_MIN_VER_MAJOR && + ethsw->minor < DPSW_MIN_VER_MINOR)) { + dev_err(dev, "DPSW version %d:%d not supported. Use firmware 10.28.0 or greater.\n", + ethsw->major, ethsw->minor); + err = -EOPNOTSUPP; + goto err_close; + } + + if (!dpaa2_switch_supports_cpu_traffic(ethsw)) { + err = -EOPNOTSUPP; + goto err_close; + } + + dpaa2_switch_detect_features(ethsw); + + err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle); + if (err) { + dev_err(dev, "dpsw_reset err %d\n", err); + goto err_close; + } + + stp_cfg.vlan_id = DEFAULT_VLAN_ID; + stp_cfg.state = DPSW_STP_STATE_FORWARDING; + + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { + err = dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i); + if (err) { + dev_err(dev, "dpsw_if_disable err %d\n", err); + goto err_close; + } + + err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i, + &stp_cfg); + if (err) { + dev_err(dev, "dpsw_if_set_stp err %d for port %d\n", + err, i); + goto err_close; + } + + /* Switch starts with all ports configured to VLAN 1. Need to + * remove this setting to allow configuration at bridge join + */ + vcfg.num_ifs = 1; + vcfg.if_id[0] = i; + err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle, + DEFAULT_VLAN_ID, &vcfg); + if (err) { + dev_err(dev, "dpsw_vlan_remove_if_untagged err %d\n", + err); + goto err_close; + } + + tci_cfg.vlan_id = 4095; + err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, i, &tci_cfg); + if (err) { + dev_err(dev, "dpsw_if_set_tci err %d\n", err); + goto err_close; + } + + err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, + DEFAULT_VLAN_ID, &vcfg); + if (err) { + dev_err(dev, "dpsw_vlan_remove_if err %d\n", err); + goto err_close; + } + } + + err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, DEFAULT_VLAN_ID); + if (err) { + dev_err(dev, "dpsw_vlan_remove err %d\n", err); + goto err_close; + } + + ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered", + WQ_MEM_RECLAIM, "ethsw", + ethsw->sw_attr.id); + if (!ethsw->workqueue) { + err = -ENOMEM; + goto err_close; + } + + err = dpsw_fdb_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, 0); + if (err) + goto err_destroy_ordered_workqueue; + + err = dpaa2_switch_ctrl_if_setup(ethsw); + if (err) + goto err_destroy_ordered_workqueue; + + return 0; + +err_destroy_ordered_workqueue: + destroy_workqueue(ethsw->workqueue); + +err_close: + dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); + return err; +} + +static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) +{ + struct switchdev_obj_port_vlan vlan = { + .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, + .vid = DEFAULT_VLAN_ID, + .flags = BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID, + }; + struct net_device *netdev = port_priv->netdev; + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpsw_fdb_cfg fdb_cfg = {0}; + struct dpaa2_switch_fdb *fdb; + struct dpsw_if_attr dpsw_if_attr; + u16 fdb_id; + int err; + + /* Get the Tx queue for this specific port */ + err = dpsw_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, + port_priv->idx, &dpsw_if_attr); + if (err) { + netdev_err(netdev, "dpsw_if_get_attributes err %d\n", err); + return err; + } + port_priv->tx_qdid = dpsw_if_attr.qdid; + + /* Create a FDB table for this particular switch port */ + fdb_cfg.num_fdb_entries = ethsw->sw_attr.max_fdb_entries / ethsw->sw_attr.num_ifs; + err = dpsw_fdb_add(ethsw->mc_io, 0, ethsw->dpsw_handle, + &fdb_id, &fdb_cfg); + if (err) { + netdev_err(netdev, "dpsw_fdb_add err %d\n", err); + return err; + } + + /* Find an unused dpaa2_switch_fdb structure and use it */ + fdb = dpaa2_switch_fdb_get_unused(ethsw); + fdb->fdb_id = fdb_id; + fdb->in_use = true; + fdb->bridge_dev = NULL; + port_priv->fdb = fdb; + + /* We need to add VLAN 1 as the PVID on this port until it is under a + * bridge since the DPAA2 switch is not able to handle the traffic in a + * VLAN unaware fashion + */ + err = dpaa2_switch_port_vlans_add(netdev, &vlan); + if (err) + return err; + + /* Setup the egress flooding domains (broadcast, unknown unicast */ + err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); + if (err) + return err; + + return err; +} + +static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev) +{ + struct device *dev = &sw_dev->dev; + struct ethsw_core *ethsw = dev_get_drvdata(dev); + int err; + + err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); + if (err) + dev_warn(dev, "dpsw_close err %d\n", err); +} + +static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw) +{ + dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); + dpaa2_switch_free_dpio(ethsw); + dpaa2_switch_destroy_rings(ethsw); + dpaa2_switch_drain_bp(ethsw); + dpaa2_switch_free_dpbp(ethsw); +} + +static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev) +{ + struct ethsw_port_priv *port_priv; + struct ethsw_core *ethsw; + struct device *dev; + int i; + + dev = &sw_dev->dev; + ethsw = dev_get_drvdata(dev); + + dpaa2_switch_ctrl_if_teardown(ethsw); + + dpaa2_switch_teardown_irqs(sw_dev); + + dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); + + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { + port_priv = ethsw->ports[i]; + unregister_netdev(port_priv->netdev); + free_netdev(port_priv->netdev); + } + + kfree(ethsw->fdbs); + kfree(ethsw->ports); + + dpaa2_switch_takedown(sw_dev); + + destroy_workqueue(ethsw->workqueue); + + fsl_mc_portal_free(ethsw->mc_io); + + kfree(ethsw); + + dev_set_drvdata(dev, NULL); + + return 0; +} + +static int dpaa2_switch_probe_port(struct ethsw_core *ethsw, + u16 port_idx) +{ + struct ethsw_port_priv *port_priv; + struct device *dev = ethsw->dev; + struct net_device *port_netdev; + int err; + + port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv)); + if (!port_netdev) { + dev_err(dev, "alloc_etherdev error\n"); + return -ENOMEM; + } + + port_priv = netdev_priv(port_netdev); + port_priv->netdev = port_netdev; + port_priv->ethsw_data = ethsw; + + port_priv->idx = port_idx; + port_priv->stp_state = BR_STATE_FORWARDING; + + SET_NETDEV_DEV(port_netdev, dev); + port_netdev->netdev_ops = &dpaa2_switch_port_ops; + port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops; + + port_netdev->needed_headroom = DPAA2_SWITCH_NEEDED_HEADROOM; + + port_priv->bcast_flood = true; + port_priv->ucast_flood = true; + + /* Set MTU limits */ + port_netdev->min_mtu = ETH_MIN_MTU; + port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH; + + /* Populate the private port structure so that later calls to + * dpaa2_switch_port_init() can use it. + */ + ethsw->ports[port_idx] = port_priv; + + /* The DPAA2 switch's ingress path depends on the VLAN table, + * thus we are not able to disable VLAN filtering. + */ + port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER; + + err = dpaa2_switch_port_init(port_priv, port_idx); + if (err) + goto err_port_probe; + + err = dpaa2_switch_port_set_mac_addr(port_priv); + if (err) + goto err_port_probe; + + err = dpaa2_switch_port_set_learning(port_priv, false); + if (err) + goto err_port_probe; + + return 0; + +err_port_probe: + free_netdev(port_netdev); + ethsw->ports[port_idx] = NULL; + + return err; +} + +static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev) +{ + struct device *dev = &sw_dev->dev; + struct ethsw_core *ethsw; + int i, err; + + /* Allocate switch core*/ + ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL); + + if (!ethsw) + return -ENOMEM; + + ethsw->dev = dev; + ethsw->iommu_domain = iommu_get_domain_for_dev(dev); + dev_set_drvdata(dev, ethsw); + + err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, + ðsw->mc_io); + if (err) { + if (err == -ENXIO) + err = -EPROBE_DEFER; + else + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); + goto err_free_drvdata; + } + + err = dpaa2_switch_init(sw_dev); + if (err) + goto err_free_cmdport; + + ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports), + GFP_KERNEL); + if (!(ethsw->ports)) { + err = -ENOMEM; + goto err_takedown; + } + + ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs), + GFP_KERNEL); + if (!ethsw->fdbs) { + err = -ENOMEM; + goto err_free_ports; + } + + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { + err = dpaa2_switch_probe_port(ethsw, i); + if (err) + goto err_free_netdev; + } + + /* Add a NAPI instance for each of the Rx queues. The first port's + * net_device will be associated with the instances since we do not have + * different queues for each switch ports. + */ + for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) + netif_napi_add(ethsw->ports[0]->netdev, + ðsw->fq[i].napi, dpaa2_switch_poll, + NAPI_POLL_WEIGHT); + + err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); + if (err) { + dev_err(ethsw->dev, "dpsw_enable err %d\n", err); + goto err_free_netdev; + } + + /* Setup IRQs */ + err = dpaa2_switch_setup_irqs(sw_dev); + if (err) + goto err_stop; + + /* Register the netdev only when the entire setup is done and the + * switch port interfaces are ready to receive traffic + */ + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { + err = register_netdev(ethsw->ports[i]->netdev); + if (err < 0) { + dev_err(dev, "register_netdev error %d\n", err); + goto err_unregister_ports; + } + } + + return 0; + +err_unregister_ports: + for (i--; i >= 0; i--) + unregister_netdev(ethsw->ports[i]->netdev); + dpaa2_switch_teardown_irqs(sw_dev); +err_stop: + dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); +err_free_netdev: + for (i--; i >= 0; i--) + free_netdev(ethsw->ports[i]->netdev); + kfree(ethsw->fdbs); +err_free_ports: + kfree(ethsw->ports); + +err_takedown: + dpaa2_switch_takedown(sw_dev); + +err_free_cmdport: + fsl_mc_portal_free(ethsw->mc_io); + +err_free_drvdata: + kfree(ethsw); + dev_set_drvdata(dev, NULL); + + return err; +} + +static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = { + { + .vendor = FSL_MC_VENDOR_FREESCALE, + .obj_type = "dpsw", + }, + { .vendor = 0x0 } +}; +MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table); + +static struct fsl_mc_driver dpaa2_switch_drv = { + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + }, + .probe = dpaa2_switch_probe, + .remove = dpaa2_switch_remove, + .match_id_table = dpaa2_switch_match_id_table +}; + +static struct notifier_block dpaa2_switch_port_nb __read_mostly = { + .notifier_call = dpaa2_switch_port_netdevice_event, +}; + +static struct notifier_block dpaa2_switch_port_switchdev_nb = { + .notifier_call = dpaa2_switch_port_event, +}; + +static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb = { + .notifier_call = dpaa2_switch_port_blocking_event, +}; + +static int dpaa2_switch_register_notifiers(void) +{ + int err; + + err = register_netdevice_notifier(&dpaa2_switch_port_nb); + if (err) { + pr_err("dpaa2-switch: failed to register net_device notifier (%d)\n", err); + return err; + } + + err = register_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); + if (err) { + pr_err("dpaa2-switch: failed to register switchdev notifier (%d)\n", err); + goto err_switchdev_nb; + } + + err = register_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb); + if (err) { + pr_err("dpaa2-switch: failed to register switchdev blocking notifier (%d)\n", err); + goto err_switchdev_blocking_nb; + } + + return 0; + +err_switchdev_blocking_nb: + unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); +err_switchdev_nb: + unregister_netdevice_notifier(&dpaa2_switch_port_nb); + + return err; +} + +static void dpaa2_switch_unregister_notifiers(void) +{ + int err; + + err = unregister_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb); + if (err) + pr_err("dpaa2-switch: failed to unregister switchdev blocking notifier (%d)\n", + err); + + err = unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); + if (err) + pr_err("dpaa2-switch: failed to unregister switchdev notifier (%d)\n", err); + + err = unregister_netdevice_notifier(&dpaa2_switch_port_nb); + if (err) + pr_err("dpaa2-switch: failed to unregister net_device notifier (%d)\n", err); +} + +static int __init dpaa2_switch_driver_init(void) +{ + int err; + + err = fsl_mc_driver_register(&dpaa2_switch_drv); + if (err) + return err; + + err = dpaa2_switch_register_notifiers(); + if (err) { + fsl_mc_driver_unregister(&dpaa2_switch_drv); + return err; + } + + return 0; +} + +static void __exit dpaa2_switch_driver_exit(void) +{ + dpaa2_switch_unregister_notifiers(); + fsl_mc_driver_unregister(&dpaa2_switch_drv); +} + +module_init(dpaa2_switch_driver_init); +module_exit(dpaa2_switch_driver_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver"); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h new file mode 100644 index 000000000000..549218994243 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * DPAA2 Ethernet Switch declarations + * + * Copyright 2014-2016 Freescale Semiconductor Inc. + * Copyright 2017-2021 NXP + * + */ + +#ifndef __ETHSW_H +#define __ETHSW_H + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <linux/if_vlan.h> +#include <uapi/linux/if_bridge.h> +#include <net/switchdev.h> +#include <linux/if_bridge.h> +#include <linux/fsl/mc.h> +#include <soc/fsl/dpaa2-io.h> + +#include "dpsw.h" + +/* Number of IRQs supported */ +#define DPSW_IRQ_NUM 2 + +/* Port is member of VLAN */ +#define ETHSW_VLAN_MEMBER 1 +/* VLAN to be treated as untagged on egress */ +#define ETHSW_VLAN_UNTAGGED 2 +/* Untagged frames will be assigned to this VLAN */ +#define ETHSW_VLAN_PVID 4 +/* VLAN configured on the switch */ +#define ETHSW_VLAN_GLOBAL 8 + +/* Maximum Frame Length supported by HW (currently 10k) */ +#define DPAA2_MFL (10 * 1024) +#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN) + +#define ETHSW_FEATURE_MAC_ADDR BIT(0) + +/* Number of receive queues (one RX and one TX_CONF) */ +#define DPAA2_SWITCH_RX_NUM_FQS 2 + +/* Hardware requires alignment for ingress/egress buffer addresses */ +#define DPAA2_SWITCH_RX_BUF_RAW_SIZE PAGE_SIZE +#define DPAA2_SWITCH_RX_BUF_TAILROOM \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +#define DPAA2_SWITCH_RX_BUF_SIZE \ + (DPAA2_SWITCH_RX_BUF_RAW_SIZE - DPAA2_SWITCH_RX_BUF_TAILROOM) + +#define DPAA2_SWITCH_STORE_SIZE 16 + +/* Buffer management */ +#define BUFS_PER_CMD 7 +#define DPAA2_ETHSW_NUM_BUFS (1024 * BUFS_PER_CMD) +#define DPAA2_ETHSW_REFILL_THRESH (DPAA2_ETHSW_NUM_BUFS * 5 / 6) + +/* Number of times to retry DPIO portal operations while waiting + * for portal to finish executing current command and become + * available. We want to avoid being stuck in a while loop in case + * hardware becomes unresponsive, but not give up too easily if + * the portal really is busy for valid reasons + */ +#define DPAA2_SWITCH_SWP_BUSY_RETRIES 1000 + +/* Hardware annotation buffer size */ +#define DPAA2_SWITCH_HWA_SIZE 64 +/* Software annotation buffer size */ +#define DPAA2_SWITCH_SWA_SIZE 64 + +#define DPAA2_SWITCH_TX_BUF_ALIGN 64 + +#define DPAA2_SWITCH_TX_DATA_OFFSET \ + (DPAA2_SWITCH_HWA_SIZE + DPAA2_SWITCH_SWA_SIZE) + +#define DPAA2_SWITCH_NEEDED_HEADROOM \ + (DPAA2_SWITCH_TX_DATA_OFFSET + DPAA2_SWITCH_TX_BUF_ALIGN) + +extern const struct ethtool_ops dpaa2_switch_port_ethtool_ops; + +struct ethsw_core; + +struct dpaa2_switch_fq { + struct ethsw_core *ethsw; + enum dpsw_queue_type type; + struct dpaa2_io_store *store; + struct dpaa2_io_notification_ctx nctx; + struct napi_struct napi; + u32 fqid; +}; + +struct dpaa2_switch_fdb { + struct net_device *bridge_dev; + u16 fdb_id; + bool in_use; +}; + +/* Per port private data */ +struct ethsw_port_priv { + struct net_device *netdev; + u16 idx; + struct ethsw_core *ethsw_data; + u8 link_state; + u8 stp_state; + + u8 vlans[VLAN_VID_MASK + 1]; + u16 pvid; + u16 tx_qdid; + + struct dpaa2_switch_fdb *fdb; + bool bcast_flood; + bool ucast_flood; +}; + +/* Switch data */ +struct ethsw_core { + struct device *dev; + struct fsl_mc_io *mc_io; + u16 dpsw_handle; + struct dpsw_attr sw_attr; + u16 major, minor; + unsigned long features; + int dev_id; + struct ethsw_port_priv **ports; + struct iommu_domain *iommu_domain; + + u8 vlans[VLAN_VID_MASK + 1]; + + struct workqueue_struct *workqueue; + + struct dpaa2_switch_fq fq[DPAA2_SWITCH_RX_NUM_FQS]; + struct fsl_mc_device *dpbp_dev; + int buf_count; + u16 bpid; + int napi_users; + + struct dpaa2_switch_fdb *fdbs; +}; + +static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw) +{ + if (ethsw->sw_attr.options & DPSW_OPT_CTRL_IF_DIS) { + dev_err(ethsw->dev, "Control Interface is disabled, cannot probe\n"); + return false; + } + + if (ethsw->sw_attr.flooding_cfg != DPSW_FLOODING_PER_FDB) { + dev_err(ethsw->dev, "Flooding domain is not per FDB, cannot probe\n"); + return false; + } + + if (ethsw->sw_attr.broadcast_cfg != DPSW_BROADCAST_PER_FDB) { + dev_err(ethsw->dev, "Broadcast domain is not per FDB, cannot probe\n"); + return false; + } + + if (ethsw->sw_attr.max_fdbs < ethsw->sw_attr.num_ifs) { + dev_err(ethsw->dev, "The number of FDBs is lower than the number of ports, cannot probe\n"); + return false; + } + + return true; +} + +bool dpaa2_switch_port_dev_check(const struct net_device *netdev); + +int dpaa2_switch_port_vlans_add(struct net_device *netdev, + const struct switchdev_obj_port_vlan *vlan); + +int dpaa2_switch_port_vlans_del(struct net_device *netdev, + const struct switchdev_obj_port_vlan *vlan); + +typedef int dpaa2_switch_fdb_cb_t(struct ethsw_port_priv *port_priv, + struct fdb_dump_entry *fdb_entry, + void *data); +#endif /* __ETHSW_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpkg.h b/drivers/net/ethernet/freescale/dpaa2/dpkg.h index 6de613b13e4d..6f596a5fbeeb 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpkg.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpkg.h @@ -13,11 +13,12 @@ /** Key Generator properties */ /** - * Number of masks per key extraction + * DPKG_NUM_OF_MASKS - Number of masks per key extraction */ #define DPKG_NUM_OF_MASKS 4 + /** - * Number of extractions per key profile + * DPKG_MAX_NUM_OF_EXTRACTS - Number of extractions per key profile */ #define DPKG_MAX_NUM_OF_EXTRACTS 10 diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.h b/drivers/net/ethernet/freescale/dpaa2/dpmac.h index 135f143097a5..8f7ceb731282 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpmac.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.h @@ -83,39 +83,21 @@ int dpmac_get_attributes(struct fsl_mc_io *mc_io, u16 token, struct dpmac_attr *attr); -/** - * DPMAC link configuration/state options - */ +/* DPMAC link configuration/state options */ -/** - * Enable auto-negotiation - */ #define DPMAC_LINK_OPT_AUTONEG BIT_ULL(0) -/** - * Enable half-duplex mode - */ #define DPMAC_LINK_OPT_HALF_DUPLEX BIT_ULL(1) -/** - * Enable pause frames - */ #define DPMAC_LINK_OPT_PAUSE BIT_ULL(2) -/** - * Enable a-symmetric pause frames - */ #define DPMAC_LINK_OPT_ASYM_PAUSE BIT_ULL(3) -/** - * Advertised link speeds - */ +/* Advertised link speeds */ #define DPMAC_ADVERTISED_10BASET_FULL BIT_ULL(0) #define DPMAC_ADVERTISED_100BASET_FULL BIT_ULL(1) #define DPMAC_ADVERTISED_1000BASET_FULL BIT_ULL(2) #define DPMAC_ADVERTISED_10000BASET_FULL BIT_ULL(4) #define DPMAC_ADVERTISED_2500BASEX_FULL BIT_ULL(5) -/** - * Advertise auto-negotiation enable - */ +/* Advertise auto-negotiation enable */ #define DPMAC_ADVERTISED_AUTONEG BIT_ULL(3) /** diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c index aa429c17c343..d6afada99fb6 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c @@ -17,6 +17,8 @@ * This function has to be called before the following functions: * - dpni_set_rx_tc_dist() * - dpni_set_qos_table() + * + * Return: '0' on Success; Error code otherwise. */ int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, u8 *key_cfg_buf) { @@ -1793,6 +1795,8 @@ int dpni_get_api_version(struct fsl_mc_io *mc_io, * If cfg.enable is set to 0 the command will clear flow steering table. * The packets will be classified according to settings made in * dpni_set_rx_hash_dist() + * + * Return: '0' on Success; Error code otherwise. */ int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, u32 cmd_flags, @@ -1826,6 +1830,8 @@ int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, * If cfg.enable is set to 1 the packets will be classified using a hash * function based on the key received in cfg.key_cfg_iova parameter. * If cfg.enable is set to 0 the packets will be sent to the default queue + * + * Return: '0' on Success; Error code otherwise. */ int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, u32 cmd_flags, diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h index 4e96d9362dd2..7de0562bbf59 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h @@ -10,73 +10,76 @@ struct fsl_mc_io; -/** - * Data Path Network Interface API +/* Data Path Network Interface API * Contains initialization APIs and runtime control APIs for DPNI */ /** General DPNI macros */ /** - * Maximum number of traffic classes + * DPNI_MAX_TC - Maximum number of traffic classes */ #define DPNI_MAX_TC 8 /** - * Maximum number of buffer pools per DPNI + * DPNI_MAX_DPBP - Maximum number of buffer pools per DPNI */ #define DPNI_MAX_DPBP 8 /** - * All traffic classes considered; see dpni_set_queue() + * DPNI_ALL_TCS - All traffic classes considered; see dpni_set_queue() */ #define DPNI_ALL_TCS (u8)(-1) /** - * All flows within traffic class considered; see dpni_set_queue() + * DPNI_ALL_TC_FLOWS - All flows within traffic class considered; see + * dpni_set_queue() */ #define DPNI_ALL_TC_FLOWS (u16)(-1) /** - * Generate new flow ID; see dpni_set_queue() + * DPNI_NEW_FLOW_ID - Generate new flow ID; see dpni_set_queue() */ #define DPNI_NEW_FLOW_ID (u16)(-1) /** - * Tx traffic is always released to a buffer pool on transmit, there are no - * resources allocated to have the frames confirmed back to the source after - * transmission. + * DPNI_OPT_TX_FRM_RELEASE - Tx traffic is always released to a buffer pool on + * transmit, there are no resources allocated to have the frames confirmed back + * to the source after transmission. */ #define DPNI_OPT_TX_FRM_RELEASE 0x000001 /** - * Disables support for MAC address filtering for addresses other than primary - * MAC address. This affects both unicast and multicast. Promiscuous mode can - * still be enabled/disabled for both unicast and multicast. If promiscuous mode - * is disabled, only traffic matching the primary MAC address will be accepted. + * DPNI_OPT_NO_MAC_FILTER - Disables support for MAC address filtering for + * addresses other than primary MAC address. This affects both unicast and + * multicast. Promiscuous mode can still be enabled/disabled for both unicast + * and multicast. If promiscuous mode is disabled, only traffic matching the + * primary MAC address will be accepted. */ #define DPNI_OPT_NO_MAC_FILTER 0x000002 /** - * Allocate policers for this DPNI. They can be used to rate-limit traffic per - * traffic class (TC) basis. + * DPNI_OPT_HAS_POLICING - Allocate policers for this DPNI. They can be used to + * rate-limit traffic per traffic class (TC) basis. */ #define DPNI_OPT_HAS_POLICING 0x000004 /** - * Congestion can be managed in several ways, allowing the buffer pool to - * deplete on ingress, taildrop on each queue or use congestion groups for sets - * of queues. If set, it configures a single congestion groups across all TCs. - * If reset, a congestion group is allocated for each TC. Only relevant if the - * DPNI has multiple traffic classes. + * DPNI_OPT_SHARED_CONGESTION - Congestion can be managed in several ways, + * allowing the buffer pool to deplete on ingress, taildrop on each queue or + * use congestion groups for sets of queues. If set, it configures a single + * congestion groups across all TCs. If reset, a congestion group is allocated + * for each TC. Only relevant if the DPNI has multiple traffic classes. */ #define DPNI_OPT_SHARED_CONGESTION 0x000008 /** - * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all - * look-ups are exact match. Note that TCAM is not available on LS1088 and its - * variants. Setting this bit on these SoCs will trigger an error. + * DPNI_OPT_HAS_KEY_MASKING - Enables TCAM for Flow Steering and QoS look-ups. + * If not specified, all look-ups are exact match. Note that TCAM is not + * available on LS1088 and its variants. Setting this bit on these SoCs will + * trigger an error. */ #define DPNI_OPT_HAS_KEY_MASKING 0x000010 /** - * Disables the flow steering table. + * DPNI_OPT_NO_FS - Disables the flow steering table. */ #define DPNI_OPT_NO_FS 0x000020 /** - * Flow steering table is shared between all traffic classes + * DPNI_OPT_SHARED_FS - Flow steering table is shared between all traffic + * classes */ #define DPNI_OPT_SHARED_FS 0x001000 @@ -129,20 +132,14 @@ int dpni_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); -/** - * DPNI IRQ Index and Events - */ +/* DPNI IRQ Index and Events */ -/** - * IRQ index - */ #define DPNI_IRQ_INDEX 0 -/** - * IRQ events: - * indicates a change in link state - * indicates a change in endpoint - */ + +/* DPNI_IRQ_EVENT_LINK_CHANGED - indicates a change in link state */ #define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001 + +/* DPNI_IRQ_EVENT_ENDPOINT_CHANGED - indicates a change in endpoint */ #define DPNI_IRQ_EVENT_ENDPOINT_CHANGED 0x00000002 int dpni_set_irq_enable(struct fsl_mc_io *mc_io, @@ -222,32 +219,30 @@ int dpni_get_attributes(struct fsl_mc_io *mc_io, u16 token, struct dpni_attr *attr); -/** - * DPNI errors - */ +/* DPNI errors */ /** - * Extract out of frame header error + * DPNI_ERROR_EOFHE - Extract out of frame header error */ #define DPNI_ERROR_EOFHE 0x00020000 /** - * Frame length error + * DPNI_ERROR_FLE - Frame length error */ #define DPNI_ERROR_FLE 0x00002000 /** - * Frame physical error + * DPNI_ERROR_FPE - Frame physical error */ #define DPNI_ERROR_FPE 0x00001000 /** - * Parsing header error + * DPNI_ERROR_PHE - Parsing header error */ #define DPNI_ERROR_PHE 0x00000020 /** - * Parser L3 checksum error + * DPNI_ERROR_L3CE - Parser L3 checksum error */ #define DPNI_ERROR_L3CE 0x00000004 /** - * Parser L3 checksum error + * DPNI_ERROR_L4CE - Parser L3 checksum error */ #define DPNI_ERROR_L4CE 0x00000001 @@ -281,36 +276,35 @@ int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, u16 token, struct dpni_error_cfg *cfg); -/** - * DPNI buffer layout modification options - */ +/* DPNI buffer layout modification options */ /** - * Select to modify the time-stamp setting + * DPNI_BUF_LAYOUT_OPT_TIMESTAMP - Select to modify the time-stamp setting */ #define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001 /** - * Select to modify the parser-result setting; not applicable for Tx + * DPNI_BUF_LAYOUT_OPT_PARSER_RESULT - Select to modify the parser-result + * setting; not applicable for Tx */ #define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002 /** - * Select to modify the frame-status setting + * DPNI_BUF_LAYOUT_OPT_FRAME_STATUS - Select to modify the frame-status setting */ #define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004 /** - * Select to modify the private-data-size setting + * DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE - Select to modify the private-data-size setting */ #define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008 /** - * Select to modify the data-alignment setting + * DPNI_BUF_LAYOUT_OPT_DATA_ALIGN - Select to modify the data-alignment setting */ #define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010 /** - * Select to modify the data-head-room setting + * DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM - Select to modify the data-head-room setting */ #define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020 /** - * Select to modify the data-tail-room setting + * DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM - Select to modify the data-tail-room setting */ #define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040 @@ -343,7 +337,8 @@ struct dpni_buffer_layout { * @DPNI_QUEUE_TX: Tx queue * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue * @DPNI_QUEUE_RX_ERR: Rx error queue - */enum dpni_queue_type { + */ +enum dpni_queue_type { DPNI_QUEUE_RX, DPNI_QUEUE_TX, DPNI_QUEUE_TX_CONFIRM, @@ -424,7 +419,7 @@ int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, * lack of buffers * @page_2.egress_discarded_frames: Egress discarded frame count * @page_2.egress_confirmed_frames: Egress confirmed frame count - * @page3: Page_3 statistics structure + * @page_3: Page_3 statistics structure * @page_3.egress_dequeue_bytes: Cumulative count of the number of bytes * dequeued from egress FQs * @page_3.egress_dequeue_frames: Cumulative count of the number of frames @@ -501,30 +496,14 @@ int dpni_get_statistics(struct fsl_mc_io *mc_io, u8 page, union dpni_statistics *stat); -/** - * Enable auto-negotiation - */ #define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL -/** - * Enable half-duplex mode - */ #define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL -/** - * Enable pause frames - */ #define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL -/** - * Enable a-symmetric pause frames - */ #define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL - -/** - * Enable priority flow control pause frames - */ #define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL /** - * struct - Structure representing DPNI link configuration + * struct dpni_link_cfg - Structure representing DPNI link configuration * @rate: Rate * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values */ @@ -687,8 +666,8 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, const struct dpni_rx_tc_dist_cfg *cfg); /** - * When used for fs_miss_flow_id in function dpni_set_rx_dist, - * will signal to dpni to drop all unclassified frames + * DPNI_FS_MISS_DROP - When used for fs_miss_flow_id in function + * dpni_set_rx_dist, will signal to dpni to drop all unclassified frames */ #define DPNI_FS_MISS_DROP ((uint16_t)-1) @@ -766,7 +745,7 @@ enum dpni_dest { /** * struct dpni_queue - Queue structure - * @destination - Destination structure + * @destination: - Destination structure * @destination.id: ID of the destination, only relevant if DEST_TYPE is > 0. * Identifies either a DPIO or a DPCON object. * Not relevant for Tx queues. @@ -837,9 +816,7 @@ struct dpni_queue_id { u16 qdbin; }; -/** - * Set User Context - */ +/* Set User Context */ #define DPNI_QUEUE_OPT_USER_CTX 0x00000001 #define DPNI_QUEUE_OPT_DEST 0x00000002 #define DPNI_QUEUE_OPT_FLC 0x00000004 @@ -904,9 +881,9 @@ struct dpni_dest_cfg { /* DPNI congestion options */ /** - * This congestion will trigger flow control or priority flow control. - * This will have effect only if flow control is enabled with - * dpni_set_link_cfg(). + * DPNI_CONG_OPT_FLOW_CONTROL - This congestion will trigger flow control or + * priority flow control. This will have effect only if flow control is + * enabled with dpni_set_link_cfg(). */ #define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040 @@ -990,23 +967,24 @@ struct dpni_rule_cfg { }; /** - * Discard matching traffic. If set, this takes precedence over any other - * configuration and matching traffic is always discarded. + * DPNI_FS_OPT_DISCARD - Discard matching traffic. If set, this takes + * precedence over any other configuration and matching traffic is always + * discarded. */ #define DPNI_FS_OPT_DISCARD 0x1 /** - * Set FLC value. If set, flc member of struct dpni_fs_action_cfg is used to - * override the FLC value set per queue. + * DPNI_FS_OPT_SET_FLC - Set FLC value. If set, flc member of struct + * dpni_fs_action_cfg is used to override the FLC value set per queue. * For more details check the Frame Descriptor section in the hardware * documentation. */ #define DPNI_FS_OPT_SET_FLC 0x2 /** - * Indicates whether the 6 lowest significant bits of FLC are used for stash - * control. If set, the 6 least significant bits in value are interpreted as - * follows: + * DPNI_FS_OPT_SET_STASH_CONTROL - Indicates whether the 6 lowest significant + * bits of FLC are used for stash control. If set, the 6 least significant bits + * in value are interpreted as follows: * - bits 0-1: indicates the number of 64 byte units of context that are * stashed. FLC value is interpreted as a memory address in this case, * excluding the 6 LS bits. @@ -1068,7 +1046,7 @@ int dpni_get_api_version(struct fsl_mc_io *mc_io, u16 *major_ver, u16 *minor_ver); /** - * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration + * struct dpni_tx_shaping_cfg - Structure representing DPNI tx shaping configuration * @rate_limit: Rate in Mbps * @max_burst_size: Burst size in bytes (up to 64KB) */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.h b/drivers/net/ethernet/freescale/dpaa2/dprtc.h index 05c413719e55..01d77c685a5b 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dprtc.h +++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.h @@ -13,9 +13,6 @@ struct fsl_mc_io; -/** - * Number of irq's - */ #define DPRTC_MAX_IRQ_NUM 1 #define DPRTC_IRQ_INDEX 0 diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h index 450841cc6ca8..24b17d6e09af 100644 --- a/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h @@ -1,16 +1,18 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright 2017-2020 NXP + * Copyright 2017-2021 NXP * */ #ifndef __FSL_DPSW_CMD_H #define __FSL_DPSW_CMD_H +#include "dpsw.h" + /* DPSW Version */ #define DPSW_VER_MAJOR 8 -#define DPSW_VER_MINOR 5 +#define DPSW_VER_MINOR 9 #define DPSW_CMD_BASE_VERSION 1 #define DPSW_CMD_VERSION_2 2 @@ -27,7 +29,7 @@ #define DPSW_CMDID_ENABLE DPSW_CMD_ID(0x002) #define DPSW_CMDID_DISABLE DPSW_CMD_ID(0x003) -#define DPSW_CMDID_GET_ATTR DPSW_CMD_ID(0x004) +#define DPSW_CMDID_GET_ATTR DPSW_CMD_V2(0x004) #define DPSW_CMDID_RESET DPSW_CMD_ID(0x005) #define DPSW_CMDID_SET_IRQ_ENABLE DPSW_CMD_ID(0x012) @@ -45,18 +47,18 @@ #define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D) #define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E) +#define DPSW_CMDID_IF_GET_ATTR DPSW_CMD_ID(0x042) + #define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x044) #define DPSW_CMDID_IF_GET_LINK_STATE DPSW_CMD_ID(0x046) -#define DPSW_CMDID_IF_SET_FLOODING DPSW_CMD_ID(0x047) -#define DPSW_CMDID_IF_SET_BROADCAST DPSW_CMD_ID(0x048) #define DPSW_CMDID_IF_GET_TCI DPSW_CMD_ID(0x04A) #define DPSW_CMDID_IF_SET_LINK_CFG DPSW_CMD_ID(0x04C) #define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060) -#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_ID(0x061) +#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_V2(0x061) #define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED DPSW_CMD_ID(0x062) #define DPSW_CMDID_VLAN_REMOVE_IF DPSW_CMD_ID(0x064) @@ -64,16 +66,24 @@ #define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING DPSW_CMD_ID(0x066) #define DPSW_CMDID_VLAN_REMOVE DPSW_CMD_ID(0x067) +#define DPSW_CMDID_FDB_ADD DPSW_CMD_ID(0x082) +#define DPSW_CMDID_FDB_REMOVE DPSW_CMD_ID(0x083) #define DPSW_CMDID_FDB_ADD_UNICAST DPSW_CMD_ID(0x084) #define DPSW_CMDID_FDB_REMOVE_UNICAST DPSW_CMD_ID(0x085) #define DPSW_CMDID_FDB_ADD_MULTICAST DPSW_CMD_ID(0x086) #define DPSW_CMDID_FDB_REMOVE_MULTICAST DPSW_CMD_ID(0x087) -#define DPSW_CMDID_FDB_SET_LEARNING_MODE DPSW_CMD_ID(0x088) #define DPSW_CMDID_FDB_DUMP DPSW_CMD_ID(0x08A) #define DPSW_CMDID_IF_GET_PORT_MAC_ADDR DPSW_CMD_ID(0x0A7) -#define DPSW_CMDID_IF_GET_PRIMARY_MAC_ADDR DPSW_CMD_ID(0x0A8) -#define DPSW_CMDID_IF_SET_PRIMARY_MAC_ADDR DPSW_CMD_ID(0x0A9) + +#define DPSW_CMDID_CTRL_IF_GET_ATTR DPSW_CMD_ID(0x0A0) +#define DPSW_CMDID_CTRL_IF_SET_POOLS DPSW_CMD_ID(0x0A1) +#define DPSW_CMDID_CTRL_IF_ENABLE DPSW_CMD_ID(0x0A2) +#define DPSW_CMDID_CTRL_IF_DISABLE DPSW_CMD_ID(0x0A3) +#define DPSW_CMDID_CTRL_IF_SET_QUEUE DPSW_CMD_ID(0x0A6) + +#define DPSW_CMDID_SET_EGRESS_FLOOD DPSW_CMD_ID(0x0AC) +#define DPSW_CMDID_IF_SET_LEARNING_MODE DPSW_CMD_ID(0x0AD) /* Macros for accessing command fields smaller than 1byte */ #define DPSW_MASK(field) \ @@ -169,6 +179,12 @@ struct dpsw_cmd_clear_irq_status { #define DPSW_COMPONENT_TYPE_SHIFT 0 #define DPSW_COMPONENT_TYPE_SIZE 4 +#define DPSW_FLOODING_CFG_SHIFT 0 +#define DPSW_FLOODING_CFG_SIZE 4 + +#define DPSW_BROADCAST_CFG_SHIFT 4 +#define DPSW_BROADCAST_CFG_SIZE 4 + struct dpsw_rsp_get_attr { /* cmd word 0 */ __le16 num_ifs; @@ -186,23 +202,15 @@ struct dpsw_rsp_get_attr { u8 max_meters_per_if; /* from LSB only the first 4 bits */ u8 component_type; - __le16 pad; + /* [0:3] - flooding configuration + * [4:7] - broadcast configuration + */ + u8 repl_cfg; + u8 pad; /* cmd word 3 */ __le64 options; }; -struct dpsw_cmd_if_set_flooding { - __le16 if_id; - /* from LSB: enable:1 */ - u8 enable; -}; - -struct dpsw_cmd_if_set_broadcast { - __le16 if_id; - /* from LSB: enable:1 */ - u8 enable; -}; - #define DPSW_VLAN_ID_SHIFT 0 #define DPSW_VLAN_ID_SIZE 12 #define DPSW_DEI_SHIFT 12 @@ -255,6 +263,28 @@ struct dpsw_cmd_if { __le16 if_id; }; +#define DPSW_ADMIT_UNTAGGED_SHIFT 0 +#define DPSW_ADMIT_UNTAGGED_SIZE 4 +#define DPSW_ENABLED_SHIFT 5 +#define DPSW_ENABLED_SIZE 1 +#define DPSW_ACCEPT_ALL_VLAN_SHIFT 6 +#define DPSW_ACCEPT_ALL_VLAN_SIZE 1 + +struct dpsw_rsp_if_get_attr { + /* cmd word 0 */ + /* from LSB: admit_untagged:4 enabled:1 accept_all_vlan:1 */ + u8 conf; + u8 pad1; + u8 num_tcs; + u8 pad2; + __le16 qdid; + /* cmd word 1 */ + __le32 options; + __le32 pad3; + /* cmd word 2 */ + __le32 rate; +}; + struct dpsw_cmd_if_set_max_frame_length { __le16 if_id; __le16 frame_length; @@ -295,13 +325,23 @@ struct dpsw_vlan_add { __le16 vlan_id; }; +struct dpsw_cmd_vlan_add_if { + /* cmd word 0 */ + __le16 options; + __le16 vlan_id; + __le16 fdb_id; + __le16 pad0; + /* cmd word 1-4 */ + __le64 if_id; +}; + struct dpsw_cmd_vlan_manage_if { /* cmd word 0 */ __le16 pad0; __le16 vlan_id; __le32 pad1; /* cmd word 1-4 */ - __le64 if_id[4]; + __le64 if_id; }; struct dpsw_cmd_vlan_remove { @@ -311,7 +351,7 @@ struct dpsw_cmd_vlan_remove { struct dpsw_cmd_fdb_add { __le32 pad; - __le16 fdb_aging_time; + __le16 fdb_ageing_time; __le16 num_fdb_entries; }; @@ -347,16 +387,7 @@ struct dpsw_cmd_fdb_multicast_op { u8 mac_addr[6]; __le16 pad2; /* cmd word 2-5 */ - __le64 if_id[4]; -}; - -#define DPSW_LEARNING_MODE_SHIFT 0 -#define DPSW_LEARNING_MODE_SIZE 4 - -struct dpsw_cmd_fdb_set_learning_mode { - __le16 fdb_id; - /* only the first 4 bits from LSB */ - u8 mode; + __le64 if_id; }; struct dpsw_cmd_fdb_dump { @@ -371,6 +402,36 @@ struct dpsw_rsp_fdb_dump { __le16 num_entries; }; +struct dpsw_rsp_ctrl_if_get_attr { + __le64 pad; + __le32 rx_fqid; + __le32 rx_err_fqid; + __le32 tx_err_conf_fqid; +}; + +#define DPSW_BACKUP_POOL(val, order) (((val) & 0x1) << (order)) +struct dpsw_cmd_ctrl_if_set_pools { + u8 num_dpbp; + u8 backup_pool_mask; + __le16 pad; + __le32 dpbp_id[DPSW_MAX_DPBP]; + __le16 buffer_size[DPSW_MAX_DPBP]; +}; + +#define DPSW_DEST_TYPE_SHIFT 0 +#define DPSW_DEST_TYPE_SIZE 4 + +struct dpsw_cmd_ctrl_if_set_queue { + __le32 dest_id; + u8 dest_priority; + u8 pad; + /* from LSB: dest_type:4 */ + u8 dest_type; + u8 qtype; + __le64 user_ctx; + __le32 options; +}; + struct dpsw_rsp_get_api_version { __le16 version_major; __le16 version_minor; @@ -381,10 +442,20 @@ struct dpsw_rsp_if_get_mac_addr { u8 mac_addr[6]; }; -struct dpsw_cmd_if_set_mac_addr { - __le16 if_id; - u8 mac_addr[6]; +struct dpsw_cmd_set_egress_flood { + __le16 fdb_id; + u8 flood_type; + u8 pad[5]; + __le64 if_id; }; +#define DPSW_LEARNING_MODE_SHIFT 0 +#define DPSW_LEARNING_MODE_SIZE 4 + +struct dpsw_cmd_if_set_learning_mode { + __le16 if_id; + /* only the first 4 bits from LSB */ + u8 mode; +}; #pragma pack(pop) #endif /* __FSL_DPSW_CMD_H */ diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw.c b/drivers/net/ethernet/freescale/dpaa2/dpsw.c index f8bfe779bd30..6c787d4b85f9 100644 --- a/drivers/staging/fsl-dpaa2/ethsw/dpsw.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright 2017-2018 NXP + * Copyright 2017-2021 NXP * */ @@ -9,9 +9,7 @@ #include "dpsw.h" #include "dpsw-cmd.h" -static void build_if_id_bitmap(__le64 *bmap, - const u16 *id, - const u16 num_ifs) +static void build_if_id_bitmap(__le64 *bmap, const u16 *id, const u16 num_ifs) { int i; @@ -38,10 +36,7 @@ static void build_if_id_bitmap(__le64 *bmap, * * Return: '0' on Success; Error code otherwise. */ -int dpsw_open(struct fsl_mc_io *mc_io, - u32 cmd_flags, - int dpsw_id, - u16 *token) +int dpsw_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpsw_id, u16 *token) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_open *cmd_params; @@ -76,9 +71,7 @@ int dpsw_open(struct fsl_mc_io *mc_io, * * Return: '0' on Success; Error code otherwise. */ -int dpsw_close(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token) +int dpsw_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) { struct fsl_mc_command cmd = { 0 }; @@ -99,9 +92,7 @@ int dpsw_close(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_enable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token) +int dpsw_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) { struct fsl_mc_command cmd = { 0 }; @@ -122,9 +113,7 @@ int dpsw_enable(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_disable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token) +int dpsw_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) { struct fsl_mc_command cmd = { 0 }; @@ -145,9 +134,7 @@ int dpsw_disable(struct fsl_mc_io *mc_io, * * Return: '0' on Success; Error code otherwise. */ -int dpsw_reset(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token) +int dpsw_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) { struct fsl_mc_command cmd = { 0 }; @@ -175,11 +162,8 @@ int dpsw_reset(struct fsl_mc_io *mc_io, * * Return: '0' on Success; Error code otherwise. */ -int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u8 en) +int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 irq_index, u8 en) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_set_irq_enable *cmd_params; @@ -212,11 +196,8 @@ int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, * * Return: '0' on Success; Error code otherwise. */ -int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 mask) +int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 irq_index, u32 mask) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_set_irq_mask *cmd_params; @@ -245,11 +226,8 @@ int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, * * Return: '0' on Success; Error code otherwise. */ -int dpsw_get_irq_status(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 *status) +int dpsw_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 irq_index, u32 *status) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_get_irq_status *cmd_params; @@ -288,11 +266,8 @@ int dpsw_get_irq_status(struct fsl_mc_io *mc_io, * * Return: '0' on Success; Error code otherwise. */ -int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 status) +int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 irq_index, u32 status) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_clear_irq_status *cmd_params; @@ -318,9 +293,7 @@ int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_get_attributes(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, +int dpsw_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, struct dpsw_attr *attr) { struct fsl_mc_command cmd = { 0 }; @@ -351,9 +324,9 @@ int dpsw_get_attributes(struct fsl_mc_io *mc_io, attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups); attr->max_meters_per_if = rsp_params->max_meters_per_if; attr->options = le64_to_cpu(rsp_params->options); - attr->component_type = dpsw_get_field(rsp_params->component_type, - COMPONENT_TYPE); - + attr->component_type = dpsw_get_field(rsp_params->component_type, COMPONENT_TYPE); + attr->flooding_cfg = dpsw_get_field(rsp_params->repl_cfg, FLOODING_CFG); + attr->broadcast_cfg = dpsw_get_field(rsp_params->repl_cfg, BROADCAST_CFG); return 0; } @@ -367,10 +340,7 @@ int dpsw_get_attributes(struct fsl_mc_io *mc_io, * * Return: '0' on Success; Error code otherwise. */ -int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, +int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id, struct dpsw_link_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; @@ -397,13 +367,10 @@ int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, * @if_id: Interface id * @state: Link state 1 - linkup, 0 - link down or disconnected * - * @Return '0' on Success; Error code otherwise. + * Return: '0' on Success; Error code otherwise. */ -int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - struct dpsw_link_state *state) +int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, struct dpsw_link_state *state) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_if_get_link_state *cmd_params; @@ -432,68 +399,6 @@ int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, } /** - * dpsw_if_set_flooding() - Enable Disable flooding for particular interface - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPSW object - * @if_id: Interface Identifier - * @en: 1 - enable, 0 - disable - * - * Return: Completion status. '0' on Success; Error code otherwise. - */ -int dpsw_if_set_flooding(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - u8 en) -{ - struct fsl_mc_command cmd = { 0 }; - struct dpsw_cmd_if_set_flooding *cmd_params; - - /* prepare command */ - cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING, - cmd_flags, - token); - cmd_params = (struct dpsw_cmd_if_set_flooding *)cmd.params; - cmd_params->if_id = cpu_to_le16(if_id); - dpsw_set_field(cmd_params->enable, ENABLE, en); - - /* send command to mc*/ - return mc_send_command(mc_io, &cmd); -} - -/** - * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPSW object - * @if_id: Interface Identifier - * @en: 1 - enable, 0 - disable - * - * Return: Completion status. '0' on Success; Error code otherwise. - */ -int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - u8 en) -{ - struct fsl_mc_command cmd = { 0 }; - struct dpsw_cmd_if_set_broadcast *cmd_params; - - /* prepare command */ - cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST, - cmd_flags, - token); - cmd_params = (struct dpsw_cmd_if_set_broadcast *)cmd.params; - cmd_params->if_id = cpu_to_le16(if_id); - dpsw_set_field(cmd_params->enable, ENABLE, en); - - /* send command to mc*/ - return mc_send_command(mc_io, &cmd); -} - -/** * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI) * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' @@ -503,10 +408,7 @@ int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_if_set_tci(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, +int dpsw_if_set_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id, const struct dpsw_tci_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; @@ -538,10 +440,7 @@ int dpsw_if_set_tci(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_if_get_tci(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, +int dpsw_if_get_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id, struct dpsw_tci_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; @@ -583,10 +482,7 @@ int dpsw_if_get_tci(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_if_set_stp(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, +int dpsw_if_set_stp(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id, const struct dpsw_stp_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; @@ -616,12 +512,8 @@ int dpsw_if_set_stp(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_if_get_counter(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - enum dpsw_counter type, - u64 *counter) +int dpsw_if_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, enum dpsw_counter type, u64 *counter) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_if_get_counter *cmd_params; @@ -657,10 +549,7 @@ int dpsw_if_get_counter(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_if_enable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id) +int dpsw_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_if *cmd_params; @@ -685,10 +574,7 @@ int dpsw_if_enable(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_if_disable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id) +int dpsw_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_if *cmd_params; @@ -705,6 +591,47 @@ int dpsw_if_disable(struct fsl_mc_io *mc_io, } /** + * dpsw_if_get_attributes() - Function obtains attributes of interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @attr: Returned interface attributes + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, struct dpsw_if_attr *attr) +{ + struct dpsw_rsp_if_get_attr *rsp_params; + struct fsl_mc_command cmd = { 0 }; + struct dpsw_cmd_if *cmd_params; + int err; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_ATTR, cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpsw_rsp_if_get_attr *)cmd.params; + attr->num_tcs = rsp_params->num_tcs; + attr->rate = le32_to_cpu(rsp_params->rate); + attr->options = le32_to_cpu(rsp_params->options); + attr->qdid = le16_to_cpu(rsp_params->qdid); + attr->enabled = dpsw_get_field(rsp_params->conf, ENABLED); + attr->accept_all_vlan = dpsw_get_field(rsp_params->conf, + ACCEPT_ALL_VLAN); + attr->admit_untagged = dpsw_get_field(rsp_params->conf, + ADMIT_UNTAGGED); + + return 0; +} + +/** * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length. * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' @@ -714,11 +641,8 @@ int dpsw_if_disable(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - u16 frame_length) +int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, u16 frame_length) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_if_set_max_frame_length *cmd_params; @@ -752,11 +676,8 @@ int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_vlan_add(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 vlan_id, - const struct dpsw_vlan_cfg *cfg) +int dpsw_vlan_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 vlan_id, const struct dpsw_vlan_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; struct dpsw_vlan_add *cmd_params; @@ -788,22 +709,21 @@ int dpsw_vlan_add(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 vlan_id, - const struct dpsw_vlan_if_cfg *cfg) +int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg) { + struct dpsw_cmd_vlan_add_if *cmd_params; struct fsl_mc_command cmd = { 0 }; - struct dpsw_cmd_vlan_manage_if *cmd_params; /* prepare command */ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF, cmd_flags, token); - cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params; + cmd_params = (struct dpsw_cmd_vlan_add_if *)cmd.params; cmd_params->vlan_id = cpu_to_le16(vlan_id); - build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); + cmd_params->options = cpu_to_le16(cfg->options); + cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id); + build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs); /* send command to mc*/ return mc_send_command(mc_io, &cmd); @@ -826,11 +746,8 @@ int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 vlan_id, - const struct dpsw_vlan_if_cfg *cfg) +int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_vlan_manage_if *cmd_params; @@ -841,7 +758,7 @@ int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, token); cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params; cmd_params->vlan_id = cpu_to_le16(vlan_id); - build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); + build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs); /* send command to mc*/ return mc_send_command(mc_io, &cmd); @@ -860,11 +777,8 @@ int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 vlan_id, - const struct dpsw_vlan_if_cfg *cfg) +int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_vlan_manage_if *cmd_params; @@ -875,7 +789,7 @@ int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, token); cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params; cmd_params->vlan_id = cpu_to_le16(vlan_id); - build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); + build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs); /* send command to mc*/ return mc_send_command(mc_io, &cmd); @@ -896,11 +810,8 @@ int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 vlan_id, - const struct dpsw_vlan_if_cfg *cfg) +int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_vlan_manage_if *cmd_params; @@ -911,7 +822,7 @@ int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, token); cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params; cmd_params->vlan_id = cpu_to_le16(vlan_id); - build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); + build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs); /* send command to mc*/ return mc_send_command(mc_io, &cmd); @@ -926,9 +837,7 @@ int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_vlan_remove(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, +int dpsw_vlan_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 vlan_id) { struct fsl_mc_command cmd = { 0 }; @@ -946,6 +855,66 @@ int dpsw_vlan_remove(struct fsl_mc_io *mc_io, } /** + * dpsw_fdb_add() - Add FDB to switch and Returns handle to FDB table for + * the reference + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @fdb_id: Returned Forwarding Database Identifier + * @cfg: FDB Configuration + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_fdb_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 *fdb_id, + const struct dpsw_fdb_cfg *cfg) +{ + struct dpsw_cmd_fdb_add *cmd_params; + struct dpsw_rsp_fdb_add *rsp_params; + struct fsl_mc_command cmd = { 0 }; + int err; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_fdb_add *)cmd.params; + cmd_params->fdb_ageing_time = cpu_to_le16(cfg->fdb_ageing_time); + cmd_params->num_fdb_entries = cpu_to_le16(cfg->num_fdb_entries); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpsw_rsp_fdb_add *)cmd.params; + *fdb_id = le16_to_cpu(rsp_params->fdb_id); + + return 0; +} + +/** + * dpsw_fdb_remove() - Remove FDB from switch + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @fdb_id: Forwarding Database Identifier + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_fdb_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id) +{ + struct dpsw_cmd_fdb_remove *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_fdb_remove *)cmd.params; + cmd_params->fdb_id = cpu_to_le16(fdb_id); + + return mc_send_command(mc_io, &cmd); +} + +/** * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' @@ -955,11 +924,8 @@ int dpsw_vlan_remove(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - const struct dpsw_fdb_unicast_cfg *cfg) +int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_fdb_unicast_op *cmd_params; @@ -998,13 +964,8 @@ int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, * The struct fdb_dump_entry array must be parsed until the end of memory * area or until an entry with mac_addr set to zero is found. */ -int dpsw_fdb_dump(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - u64 iova_addr, - u32 iova_size, - u16 *num_entries) +int dpsw_fdb_dump(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id, + u64 iova_addr, u32 iova_size, u16 *num_entries) { struct dpsw_cmd_fdb_dump *cmd_params; struct dpsw_rsp_fdb_dump *rsp_params; @@ -1041,11 +1002,8 @@ int dpsw_fdb_dump(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - const struct dpsw_fdb_unicast_cfg *cfg) +int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_fdb_unicast_op *cmd_params; @@ -1083,11 +1041,8 @@ int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - const struct dpsw_fdb_multicast_cfg *cfg) +int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_fdb_multicast_op *cmd_params; @@ -1101,7 +1056,7 @@ int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, cmd_params->fdb_id = cpu_to_le16(fdb_id); cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs); dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type); - build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); + build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs); for (i = 0; i < 6; i++) cmd_params->mac_addr[i] = cfg->mac_addr[5 - i]; @@ -1125,11 +1080,8 @@ int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - const struct dpsw_fdb_multicast_cfg *cfg) +int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; struct dpsw_cmd_fdb_multicast_op *cmd_params; @@ -1143,7 +1095,7 @@ int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, cmd_params->fdb_id = cpu_to_le16(fdb_id); cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs); dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type); - build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); + build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs); for (i = 0; i < 6; i++) cmd_params->mac_addr[i] = cfg->mac_addr[5 - i]; @@ -1152,33 +1104,97 @@ int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, } /** - * dpsw_fdb_set_learning_mode() - Define FDB learning mode + * dpsw_ctrl_if_get_attributes() - Obtain control interface attributes * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPSW object - * @fdb_id: Forwarding Database Identifier - * @mode: Learning mode + * @attr: Returned control interface attributes * - * Return: Completion status. '0' on Success; Error code otherwise. + * Return: '0' on Success; Error code otherwise. */ -int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - enum dpsw_fdb_learning_mode mode) +int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, + u16 token, struct dpsw_ctrl_if_attr *attr) { + struct dpsw_rsp_ctrl_if_get_attr *rsp_params; struct fsl_mc_command cmd = { 0 }; - struct dpsw_cmd_fdb_set_learning_mode *cmd_params; + int err; - /* prepare command */ - cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE, + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_GET_ATTR, + cmd_flags, token); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpsw_rsp_ctrl_if_get_attr *)cmd.params; + attr->rx_fqid = le32_to_cpu(rsp_params->rx_fqid); + attr->rx_err_fqid = le32_to_cpu(rsp_params->rx_err_fqid); + attr->tx_err_conf_fqid = le32_to_cpu(rsp_params->tx_err_conf_fqid); + + return 0; +} + +/** + * dpsw_ctrl_if_set_pools() - Set control interface buffer pools + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @cfg: Buffer pools configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + const struct dpsw_ctrl_if_pools_cfg *cfg) +{ + struct dpsw_cmd_ctrl_if_set_pools *cmd_params; + struct fsl_mc_command cmd = { 0 }; + int i; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_POOLS, + cmd_flags, token); + cmd_params = (struct dpsw_cmd_ctrl_if_set_pools *)cmd.params; + cmd_params->num_dpbp = cfg->num_dpbp; + for (i = 0; i < DPSW_MAX_DPBP; i++) { + cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id); + cmd_params->buffer_size[i] = + cpu_to_le16(cfg->pools[i].buffer_size); + cmd_params->backup_pool_mask |= + DPSW_BACKUP_POOL(cfg->pools[i].backup_pool, i); + } + + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_ctrl_if_set_queue() - Set Rx queue configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of dpsw object + * @qtype: dpsw_queue_type of the targeted queue + * @cfg: Rx queue configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_ctrl_if_set_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + enum dpsw_queue_type qtype, + const struct dpsw_ctrl_if_queue_cfg *cfg) +{ + struct dpsw_cmd_ctrl_if_set_queue *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_QUEUE, cmd_flags, token); - cmd_params = (struct dpsw_cmd_fdb_set_learning_mode *)cmd.params; - cmd_params->fdb_id = cpu_to_le16(fdb_id); - dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode); + cmd_params = (struct dpsw_cmd_ctrl_if_set_queue *)cmd.params; + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); + cmd_params->dest_priority = cfg->dest_cfg.priority; + cmd_params->qtype = qtype; + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx); + cmd_params->options = cpu_to_le32(cfg->options); + dpsw_set_field(cmd_params->dest_type, + DEST_TYPE, + cfg->dest_cfg.dest_type); - /* send command to mc*/ return mc_send_command(mc_io, &cmd); } @@ -1191,10 +1207,8 @@ int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io, * * Return: '0' on Success; Error code otherwise. */ -int dpsw_get_api_version(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 *major_ver, - u16 *minor_ver) +int dpsw_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags, + u16 *major_ver, u16 *minor_ver) { struct fsl_mc_command cmd = { 0 }; struct dpsw_rsp_get_api_version *rsp_params; @@ -1216,7 +1230,7 @@ int dpsw_get_api_version(struct fsl_mc_io *mc_io, } /** - * dpsw_if_get_port_mac_addr() + * dpsw_if_get_port_mac_addr() - Retrieve MAC address associated to the physical port * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPSW object @@ -1254,69 +1268,89 @@ int dpsw_if_get_port_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, } /** - * dpsw_if_get_primary_mac_addr() + * dpsw_ctrl_if_enable() - Enable control interface * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPSW object - * @if_id: Interface Identifier - * @mac_addr: MAC address of the physical port, if any, otherwise 0 * - * Return: Completion status. '0' on Success; Error code otherwise. + * Return: '0' on Success; Error code otherwise. */ -int dpsw_if_get_primary_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, - u16 token, u16 if_id, u8 mac_addr[6]) +int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) { - struct dpsw_rsp_if_get_mac_addr *rsp_params; struct fsl_mc_command cmd = { 0 }; - struct dpsw_cmd_if *cmd_params; - int err, i; - /* prepare command */ - cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_PRIMARY_MAC_ADDR, + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_ENABLE, cmd_flags, + token); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_ctrl_if_disable() - Function disables control interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_DISABLE, cmd_flags, token); - cmd_params = (struct dpsw_cmd_if *)cmd.params; - cmd_params->if_id = cpu_to_le16(if_id); - /* send command to mc*/ - err = mc_send_command(mc_io, &cmd); - if (err) - return err; + return mc_send_command(mc_io, &cmd); +} - /* retrieve response parameters */ - rsp_params = (struct dpsw_rsp_if_get_mac_addr *)cmd.params; - for (i = 0; i < 6; i++) - mac_addr[5 - i] = rsp_params->mac_addr[i]; +/** + * dpsw_set_egress_flood() - Set egress parameters associated with an FDB ID + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @cfg: Egress flooding configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_set_egress_flood(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + const struct dpsw_egress_flood_cfg *cfg) +{ + struct dpsw_cmd_set_egress_flood *cmd_params; + struct fsl_mc_command cmd = { 0 }; - return 0; + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_EGRESS_FLOOD, cmd_flags, token); + cmd_params = (struct dpsw_cmd_set_egress_flood *)cmd.params; + cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id); + cmd_params->flood_type = cfg->flood_type; + build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs); + + return mc_send_command(mc_io, &cmd); } /** - * dpsw_if_set_primary_mac_addr() + * dpsw_if_set_learning_mode() - Configure the learning mode on an interface. + * If this API is used, it will take precedence over the FDB configuration. * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPSW object - * @if_id: Interface Identifier - * @mac_addr: MAC address of the physical port, if any, otherwise 0 + * @if_id: InterfaceID + * @mode: Learning mode * * Return: Completion status. '0' on Success; Error code otherwise. */ -int dpsw_if_set_primary_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, - u16 token, u16 if_id, u8 mac_addr[6]) +int dpsw_if_set_learning_mode(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, enum dpsw_learning_mode mode) { - struct dpsw_cmd_if_set_mac_addr *cmd_params; + struct dpsw_cmd_if_set_learning_mode *cmd_params; struct fsl_mc_command cmd = { 0 }; - int i; - /* prepare command */ - cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_PRIMARY_MAC_ADDR, + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LEARNING_MODE, cmd_flags, token); - cmd_params = (struct dpsw_cmd_if_set_mac_addr *)cmd.params; + cmd_params = (struct dpsw_cmd_if_set_learning_mode *)cmd.params; cmd_params->if_id = cpu_to_le16(if_id); - for (i = 0; i < 6; i++) - cmd_params->mac_addr[i] = mac_addr[5 - i]; + dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode); - /* send command to mc*/ return mc_send_command(mc_io, &cmd); } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.h b/drivers/net/ethernet/freescale/dpaa2/dpsw.h new file mode 100644 index 000000000000..96837b10cc94 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.h @@ -0,0 +1,631 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2014-2016 Freescale Semiconductor Inc. + * Copyright 2017-2021 NXP + * + */ + +#ifndef __FSL_DPSW_H +#define __FSL_DPSW_H + +/* Data Path L2-Switch API + * Contains API for handling DPSW topology and functionality + */ + +struct fsl_mc_io; + +/* DPSW general definitions */ + +#define DPSW_MAX_PRIORITIES 8 + +#define DPSW_MAX_IF 64 + +int dpsw_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpsw_id, u16 *token); + +int dpsw_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); + +/* DPSW options */ + +/** + * DPSW_OPT_FLOODING_DIS - Flooding was disabled at device create + */ +#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL +/** + * DPSW_OPT_MULTICAST_DIS - Multicast was disabled at device create + */ +#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL +/** + * DPSW_OPT_CTRL_IF_DIS - Control interface support is disabled + */ +#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL + +/** + * enum dpsw_component_type - component type of a bridge + * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an + * enterprise VLAN bridge or of a Provider Bridge used + * to process C-tagged frames + * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a + * Provider Bridge + * + */ +enum dpsw_component_type { + DPSW_COMPONENT_TYPE_C_VLAN = 0, + DPSW_COMPONENT_TYPE_S_VLAN +}; + +/** + * enum dpsw_flooding_cfg - flooding configuration requested + * @DPSW_FLOODING_PER_VLAN: Flooding replicators are allocated per VLAN and + * interfaces present in each of them can be configured using + * dpsw_vlan_add_if_flooding()/dpsw_vlan_remove_if_flooding(). + * This is the default configuration. + * + * @DPSW_FLOODING_PER_FDB: Flooding replicators are allocated per FDB and + * interfaces present in each of them can be configured using + * dpsw_set_egress_flood(). + */ +enum dpsw_flooding_cfg { + DPSW_FLOODING_PER_VLAN = 0, + DPSW_FLOODING_PER_FDB, +}; + +/** + * enum dpsw_broadcast_cfg - broadcast configuration requested + * @DPSW_BROADCAST_PER_OBJECT: There is only one broadcast replicator per DPSW + * object. This is the default configuration. + * @DPSW_BROADCAST_PER_FDB: Broadcast replicators are allocated per FDB and + * interfaces present in each of them can be configured using + * dpsw_set_egress_flood(). + */ +enum dpsw_broadcast_cfg { + DPSW_BROADCAST_PER_OBJECT = 0, + DPSW_BROADCAST_PER_FDB, +}; + +int dpsw_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); + +int dpsw_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); + +int dpsw_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); + +/* DPSW IRQ Index and Events */ + +#define DPSW_IRQ_INDEX_IF 0x0000 +#define DPSW_IRQ_INDEX_L2SW 0x0001 + +/** + * DPSW_IRQ_EVENT_LINK_CHANGED - Indicates that the link state changed + */ +#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001 + +/** + * struct dpsw_irq_cfg - IRQ configuration + * @addr: Address that must be written to signal a message-based interrupt + * @val: Value to write into irq_addr address + * @irq_num: A user defined number associated with this IRQ + */ +struct dpsw_irq_cfg { + u64 addr; + u32 val; + int irq_num; +}; + +int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 irq_index, u8 en); + +int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 irq_index, u32 mask); + +int dpsw_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 irq_index, u32 *status); + +int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 irq_index, u32 status); + +/** + * struct dpsw_attr - Structure representing DPSW attributes + * @id: DPSW object ID + * @options: Enable/Disable DPSW features + * @max_vlans: Maximum Number of VLANs + * @max_meters_per_if: Number of meters per interface + * @max_fdbs: Maximum Number of FDBs + * @max_fdb_entries: Number of FDB entries for default FDB table; + * 0 - indicates default 1024 entries. + * @fdb_aging_time: Default FDB aging time for default FDB table; + * 0 - indicates default 300 seconds + * @max_fdb_mc_groups: Number of multicast groups in each FDB table; + * 0 - indicates default 32 + * @mem_size: DPSW frame storage memory size + * @num_ifs: Number of interfaces + * @num_vlans: Current number of VLANs + * @num_fdbs: Current number of FDBs + * @component_type: Component type of this bridge + * @flooding_cfg: Flooding configuration (PER_VLAN - default, PER_FDB) + * @broadcast_cfg: Broadcast configuration (PER_OBJECT - default, PER_FDB) + */ +struct dpsw_attr { + int id; + u64 options; + u16 max_vlans; + u8 max_meters_per_if; + u8 max_fdbs; + u16 max_fdb_entries; + u16 fdb_aging_time; + u16 max_fdb_mc_groups; + u16 num_ifs; + u16 mem_size; + u16 num_vlans; + u8 num_fdbs; + enum dpsw_component_type component_type; + enum dpsw_flooding_cfg flooding_cfg; + enum dpsw_broadcast_cfg broadcast_cfg; +}; + +int dpsw_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + struct dpsw_attr *attr); + +/** + * struct dpsw_ctrl_if_attr - Control interface attributes + * @rx_fqid: Receive FQID + * @rx_err_fqid: Receive error FQID + * @tx_err_conf_fqid: Transmit error and confirmation FQID + */ +struct dpsw_ctrl_if_attr { + u32 rx_fqid; + u32 rx_err_fqid; + u32 tx_err_conf_fqid; +}; + +int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, + u16 token, struct dpsw_ctrl_if_attr *attr); + +enum dpsw_queue_type { + DPSW_QUEUE_RX, + DPSW_QUEUE_TX_ERR_CONF, + DPSW_QUEUE_RX_ERR, +}; + +#define DPSW_MAX_DPBP 8 + +/** + * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration + * @num_dpbp: Number of DPBPs + * @pools: Array of buffer pools parameters; The number of valid entries + * must match 'num_dpbp' value + * @pools.dpbp_id: DPBP object ID + * @pools.buffer_size: Buffer size + * @pools.backup_pool: Backup pool + */ +struct dpsw_ctrl_if_pools_cfg { + u8 num_dpbp; + struct { + int dpbp_id; + u16 buffer_size; + int backup_pool; + } pools[DPSW_MAX_DPBP]; +}; + +int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + const struct dpsw_ctrl_if_pools_cfg *cfg); + +#define DPSW_CTRL_IF_QUEUE_OPT_USER_CTX 0x00000001 +#define DPSW_CTRL_IF_QUEUE_OPT_DEST 0x00000002 + +enum dpsw_ctrl_if_dest { + DPSW_CTRL_IF_DEST_NONE = 0, + DPSW_CTRL_IF_DEST_DPIO = 1, +}; + +struct dpsw_ctrl_if_dest_cfg { + enum dpsw_ctrl_if_dest dest_type; + int dest_id; + u8 priority; +}; + +struct dpsw_ctrl_if_queue_cfg { + u32 options; + u64 user_ctx; + struct dpsw_ctrl_if_dest_cfg dest_cfg; +}; + +int dpsw_ctrl_if_set_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + enum dpsw_queue_type qtype, + const struct dpsw_ctrl_if_queue_cfg *cfg); + +int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); + +int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); + +/** + * enum dpsw_action - Action selection for special/control frames + * @DPSW_ACTION_DROP: Drop frame + * @DPSW_ACTION_REDIRECT: Redirect frame to control port + */ +enum dpsw_action { + DPSW_ACTION_DROP = 0, + DPSW_ACTION_REDIRECT = 1 +}; + +#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL +#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL +#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL +#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL + +/** + * struct dpsw_link_cfg - Structure representing DPSW link configuration + * @rate: Rate + * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values + */ +struct dpsw_link_cfg { + u32 rate; + u64 options; +}; + +int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id, + struct dpsw_link_cfg *cfg); + +/** + * struct dpsw_link_state - Structure representing DPSW link state + * @rate: Rate + * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values + * @up: 0 - covers two cases: down and disconnected, 1 - up + */ +struct dpsw_link_state { + u32 rate; + u64 options; + u8 up; +}; + +int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, struct dpsw_link_state *state); + +/** + * struct dpsw_tci_cfg - Tag Control Information (TCI) configuration + * @pcp: Priority Code Point (PCP): a 3-bit field which refers + * to the IEEE 802.1p priority + * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used + * separately or in conjunction with PCP to indicate frames + * eligible to be dropped in the presence of congestion + * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN + * to which the frame belongs. The hexadecimal values + * of 0x000 and 0xFFF are reserved; + * all other values may be used as VLAN identifiers, + * allowing up to 4,094 VLANs + */ +struct dpsw_tci_cfg { + u8 pcp; + u8 dei; + u16 vlan_id; +}; + +int dpsw_if_set_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id, + const struct dpsw_tci_cfg *cfg); + +int dpsw_if_get_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id, + struct dpsw_tci_cfg *cfg); + +/** + * enum dpsw_stp_state - Spanning Tree Protocol (STP) states + * @DPSW_STP_STATE_DISABLED: Disabled state + * @DPSW_STP_STATE_LISTENING: Listening state + * @DPSW_STP_STATE_LEARNING: Learning state + * @DPSW_STP_STATE_FORWARDING: Forwarding state + * @DPSW_STP_STATE_BLOCKING: Blocking state + * + */ +enum dpsw_stp_state { + DPSW_STP_STATE_DISABLED = 0, + DPSW_STP_STATE_LISTENING = 1, + DPSW_STP_STATE_LEARNING = 2, + DPSW_STP_STATE_FORWARDING = 3, + DPSW_STP_STATE_BLOCKING = 0 +}; + +/** + * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration + * @vlan_id: VLAN ID STP state + * @state: STP state + */ +struct dpsw_stp_cfg { + u16 vlan_id; + enum dpsw_stp_state state; +}; + +int dpsw_if_set_stp(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id, + const struct dpsw_stp_cfg *cfg); + +/** + * enum dpsw_accepted_frames - Types of frames to accept + * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and + * priority tagged frames + * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or + * Priority-Tagged frames received on this interface. + * + */ +enum dpsw_accepted_frames { + DPSW_ADMIT_ALL = 1, + DPSW_ADMIT_ONLY_VLAN_TAGGED = 3 +}; + +/** + * enum dpsw_counter - Counters types + * @DPSW_CNT_ING_FRAME: Counts ingress frames + * @DPSW_CNT_ING_BYTE: Counts ingress bytes + * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames + * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame + * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames + * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes + * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames + * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes + * @DPSW_CNT_EGR_FRAME: Counts egress frames + * @DPSW_CNT_EGR_BYTE: Counts egress bytes + * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames + * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames + * @DPSW_CNT_ING_NO_BUFF_DISCARD: Counts ingress no buffer discarded frames + */ +enum dpsw_counter { + DPSW_CNT_ING_FRAME = 0x0, + DPSW_CNT_ING_BYTE = 0x1, + DPSW_CNT_ING_FLTR_FRAME = 0x2, + DPSW_CNT_ING_FRAME_DISCARD = 0x3, + DPSW_CNT_ING_MCAST_FRAME = 0x4, + DPSW_CNT_ING_MCAST_BYTE = 0x5, + DPSW_CNT_ING_BCAST_FRAME = 0x6, + DPSW_CNT_ING_BCAST_BYTES = 0x7, + DPSW_CNT_EGR_FRAME = 0x8, + DPSW_CNT_EGR_BYTE = 0x9, + DPSW_CNT_EGR_FRAME_DISCARD = 0xa, + DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb, + DPSW_CNT_ING_NO_BUFF_DISCARD = 0xc, +}; + +int dpsw_if_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, enum dpsw_counter type, u64 *counter); + +int dpsw_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id); + +int dpsw_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id); + +/** + * struct dpsw_if_attr - Structure representing DPSW interface attributes + * @num_tcs: Number of traffic classes + * @rate: Transmit rate in bits per second + * @options: Interface configuration options (bitmap) + * @enabled: Indicates if interface is enabled + * @accept_all_vlan: The device discards/accepts incoming frames + * for VLANs that do not include this interface + * @admit_untagged: When set to 'DPSW_ADMIT_ONLY_VLAN_TAGGED', the device + * discards untagged frames or priority-tagged frames received on + * this interface; + * When set to 'DPSW_ADMIT_ALL', untagged frames or priority- + * tagged frames received on this interface are accepted + * @qdid: control frames transmit qdid + */ +struct dpsw_if_attr { + u8 num_tcs; + u32 rate; + u32 options; + int enabled; + int accept_all_vlan; + enum dpsw_accepted_frames admit_untagged; + u16 qdid; +}; + +int dpsw_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, struct dpsw_if_attr *attr); + +int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, u16 frame_length); + +/** + * struct dpsw_vlan_cfg - VLAN Configuration + * @fdb_id: Forwarding Data Base + */ +struct dpsw_vlan_cfg { + u16 fdb_id; +}; + +int dpsw_vlan_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 vlan_id, const struct dpsw_vlan_cfg *cfg); + +#define DPSW_VLAN_ADD_IF_OPT_FDB_ID 0x0001 + +/** + * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces + * @num_ifs: The number of interfaces that are assigned to the egress + * list for this VLAN + * @if_id: The set of interfaces that are + * assigned to the egress list for this VLAN + * @options: Options map for this command (DPSW_VLAN_ADD_IF_OPT_FDB_ID) + * @fdb_id: FDB id to be used by this VLAN on these specific interfaces + * (taken into account only if the DPSW_VLAN_ADD_IF_OPT_FDB_ID is + * specified in the options field) + */ +struct dpsw_vlan_if_cfg { + u16 num_ifs; + u16 options; + u16 if_id[DPSW_MAX_IF]; + u16 fdb_id; +}; + +int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg); + +int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg); + +int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg); + +int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg); + +int dpsw_vlan_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 vlan_id); + +/** + * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic + * @DPSW_FDB_ENTRY_STATIC: Static entry + * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry + */ +enum dpsw_fdb_entry_type { + DPSW_FDB_ENTRY_STATIC = 0, + DPSW_FDB_ENTRY_DINAMIC = 1 +}; + +/** + * struct dpsw_fdb_unicast_cfg - Unicast entry configuration + * @type: Select static or dynamic entry + * @mac_addr: MAC address + * @if_egress: Egress interface ID + */ +struct dpsw_fdb_unicast_cfg { + enum dpsw_fdb_entry_type type; + u8 mac_addr[6]; + u16 if_egress; +}; + +int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg); + +int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg); + +#define DPSW_FDB_ENTRY_TYPE_DYNAMIC BIT(0) +#define DPSW_FDB_ENTRY_TYPE_UNICAST BIT(1) + +/** + * struct fdb_dump_entry - fdb snapshot entry + * @mac_addr: MAC address + * @type: bit0 - DINAMIC(1)/STATIC(0), bit1 - UNICAST(1)/MULTICAST(0) + * @if_info: unicast - egress interface, multicast - number of egress interfaces + * @if_mask: multicast - egress interface mask + */ +struct fdb_dump_entry { + u8 mac_addr[6]; + u8 type; + u8 if_info; + u8 if_mask[8]; +}; + +int dpsw_fdb_dump(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id, + u64 iova_addr, u32 iova_size, u16 *num_entries); + +/** + * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration + * @type: Select static or dynamic entry + * @mac_addr: MAC address + * @num_ifs: Number of external and internal interfaces + * @if_id: Egress interface IDs + */ +struct dpsw_fdb_multicast_cfg { + enum dpsw_fdb_entry_type type; + u8 mac_addr[6]; + u16 num_ifs; + u16 if_id[DPSW_MAX_IF]; +}; + +int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg); + +int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg); + +/** + * enum dpsw_learning_mode - Auto-learning modes + * @DPSW_LEARNING_MODE_DIS: Disable Auto-learning + * @DPSW_LEARNING_MODE_HW: Enable HW auto-Learning + * @DPSW_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU + * @DPSW_LEARNING_MODE_SECURE: Enable secure learning by CPU + * + * NONE - SECURE LEARNING + * SMAC found DMAC found CTLU Action + * v v Forward frame to + * 1. DMAC destination + * - v Forward frame to + * 1. DMAC destination + * 2. Control interface + * v - Forward frame to + * 1. Flooding list of interfaces + * - - Forward frame to + * 1. Flooding list of interfaces + * 2. Control interface + * SECURE LEARING + * SMAC found DMAC found CTLU Action + * v v Forward frame to + * 1. DMAC destination + * - v Forward frame to + * 1. Control interface + * v - Forward frame to + * 1. Flooding list of interfaces + * - - Forward frame to + * 1. Control interface + */ +enum dpsw_learning_mode { + DPSW_LEARNING_MODE_DIS = 0, + DPSW_LEARNING_MODE_HW = 1, + DPSW_LEARNING_MODE_NON_SECURE = 2, + DPSW_LEARNING_MODE_SECURE = 3 +}; + +/** + * struct dpsw_fdb_attr - FDB Attributes + * @max_fdb_entries: Number of FDB entries + * @fdb_ageing_time: Ageing time in seconds + * @learning_mode: Learning mode + * @num_fdb_mc_groups: Current number of multicast groups + * @max_fdb_mc_groups: Maximum number of multicast groups + */ +struct dpsw_fdb_attr { + u16 max_fdb_entries; + u16 fdb_ageing_time; + enum dpsw_learning_mode learning_mode; + u16 num_fdb_mc_groups; + u16 max_fdb_mc_groups; +}; + +int dpsw_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags, + u16 *major_ver, u16 *minor_ver); + +int dpsw_if_get_port_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, u8 mac_addr[6]); + +/** + * struct dpsw_fdb_cfg - FDB Configuration + * @num_fdb_entries: Number of FDB entries + * @fdb_ageing_time: Ageing time in seconds + */ +struct dpsw_fdb_cfg { + u16 num_fdb_entries; + u16 fdb_ageing_time; +}; + +int dpsw_fdb_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 *fdb_id, + const struct dpsw_fdb_cfg *cfg); + +int dpsw_fdb_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id); + +/** + * enum dpsw_flood_type - Define the flood type of a DPSW object + * @DPSW_BROADCAST: Broadcast flooding + * @DPSW_FLOODING: Unknown flooding + */ +enum dpsw_flood_type { + DPSW_BROADCAST = 0, + DPSW_FLOODING, +}; + +struct dpsw_egress_flood_cfg { + u16 fdb_id; + enum dpsw_flood_type flood_type; + u16 num_ifs; + u16 if_id[DPSW_MAX_IF]; +}; + +int dpsw_set_egress_flood(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + const struct dpsw_egress_flood_cfg *cfg); + +int dpsw_if_set_learning_mode(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, enum dpsw_learning_mode mode); + +#endif /* __FSL_DPSW_H */ diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 09471329f3a3..5a54976e6a28 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -5,6 +5,7 @@ #include <linux/tcp.h> #include <linux/udp.h> #include <linux/vmalloc.h> +#include <net/pkt_sched.h> /* ENETC overhead: optional extension BD + 1 BD gap */ #define ENETC_TXBDS_NEEDED(val) ((val) + 2) @@ -12,44 +13,6 @@ #define ENETC_MAX_SKB_FRAGS 13 #define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1) -static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb, - int active_offloads); - -netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev) -{ - struct enetc_ndev_priv *priv = netdev_priv(ndev); - struct enetc_bdr *tx_ring; - int count; - - tx_ring = priv->tx_ring[skb->queue_mapping]; - - if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS)) - if (unlikely(skb_linearize(skb))) - goto drop_packet_err; - - count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */ - if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { - netif_stop_subqueue(ndev, tx_ring->index); - return NETDEV_TX_BUSY; - } - - enetc_lock_mdio(); - count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads); - enetc_unlock_mdio(); - - if (unlikely(!count)) - goto drop_packet_err; - - if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED) - netif_stop_subqueue(ndev, tx_ring->index); - - return NETDEV_TX_OK; - -drop_packet_err: - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; -} - static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring, struct enetc_tx_swbd *tx_swbd) { @@ -220,6 +183,41 @@ dma_err: return 0; } +netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_bdr *tx_ring; + int count; + + tx_ring = priv->tx_ring[skb->queue_mapping]; + + if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS)) + if (unlikely(skb_linearize(skb))) + goto drop_packet_err; + + count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */ + if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { + netif_stop_subqueue(ndev, tx_ring->index); + return NETDEV_TX_BUSY; + } + + enetc_lock_mdio(); + count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads); + enetc_unlock_mdio(); + + if (unlikely(!count)) + goto drop_packet_err; + + if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED) + netif_stop_subqueue(ndev, tx_ring->index); + + return NETDEV_TX_OK; + +drop_packet_err: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + static irqreturn_t enetc_msix(int irq, void *data) { struct enetc_int_vector *v = data; @@ -241,10 +239,6 @@ static irqreturn_t enetc_msix(int irq, void *data) return IRQ_HANDLED; } -static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget); -static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, - struct napi_struct *napi, int work_limit); - static void enetc_rx_dim_work(struct work_struct *w) { struct dim *dim = container_of(w, struct dim, work); @@ -273,50 +267,6 @@ static void enetc_rx_net_dim(struct enetc_int_vector *v) net_dim(&v->rx_dim, dim_sample); } -static int enetc_poll(struct napi_struct *napi, int budget) -{ - struct enetc_int_vector - *v = container_of(napi, struct enetc_int_vector, napi); - bool complete = true; - int work_done; - int i; - - enetc_lock_mdio(); - - for (i = 0; i < v->count_tx_rings; i++) - if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) - complete = false; - - work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget); - if (work_done == budget) - complete = false; - if (work_done) - v->rx_napi_work = true; - - if (!complete) { - enetc_unlock_mdio(); - return budget; - } - - napi_complete_done(napi, work_done); - - if (likely(v->rx_dim_en)) - enetc_rx_net_dim(v); - - v->rx_napi_work = false; - - /* enable interrupts */ - enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE); - - for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) - enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), - ENETC_TBIER_TXTIE); - - enetc_unlock_mdio(); - - return work_done; -} - static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci) { int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK; @@ -344,12 +294,7 @@ static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp) if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ns_to_ktime(tstamp); - /* Ensure skb_mstamp_ns, which might have been populated with - * the txtime, is not mistaken for a software timestamp, - * because this will prevent the dispatch of our hardware - * timestamp to the socket. - */ - skb->tstamp = ktime_set(0, 0); + skb_txtime_consumed(skb); skb_tstamp_tx(skb, &shhwtstamps); } } @@ -483,18 +428,16 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt) /* clear 'R" as well */ rxbd->r.lstatus = 0; - rxbd = enetc_rxbd_next(rx_ring, rxbd, i); - rx_swbd++; - i++; - if (unlikely(i == rx_ring->bd_count)) { - i = 0; - rx_swbd = rx_ring->rx_swbd; - } + enetc_rxbd_next(rx_ring, &rxbd, &i); + rx_swbd = &rx_ring->rx_swbd[i]; } if (likely(j)) { rx_ring->next_to_alloc = i; /* keep track from page reuse */ rx_ring->next_to_use = i; + + /* update ENETC's consumer index */ + enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use); } return j; @@ -680,13 +623,9 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, u32 bd_status; u16 size; - if (cleaned_cnt >= ENETC_RXBD_BUNDLE) { - int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt); - - /* update ENETC's consumer index */ - enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use); - cleaned_cnt -= count; - } + if (cleaned_cnt >= ENETC_RXBD_BUNDLE) + cleaned_cnt -= enetc_refill_rx_ring(rx_ring, + cleaned_cnt); rxbd = enetc_rxbd(rx_ring, i); bd_status = le32_to_cpu(rxbd->r.lstatus); @@ -704,9 +643,7 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, cleaned_cnt++; - rxbd = enetc_rxbd_next(rx_ring, rxbd, i); - if (unlikely(++i == rx_ring->bd_count)) - i = 0; + enetc_rxbd_next(rx_ring, &rxbd, &i); if (unlikely(bd_status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) { @@ -715,9 +652,7 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, dma_rmb(); bd_status = le32_to_cpu(rxbd->r.lstatus); - rxbd = enetc_rxbd_next(rx_ring, rxbd, i); - if (unlikely(++i == rx_ring->bd_count)) - i = 0; + enetc_rxbd_next(rx_ring, &rxbd, &i); } rx_ring->ndev->stats.rx_dropped++; @@ -740,9 +675,7 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, cleaned_cnt++; - rxbd = enetc_rxbd_next(rx_ring, rxbd, i); - if (unlikely(++i == rx_ring->bd_count)) - i = 0; + enetc_rxbd_next(rx_ring, &rxbd, &i); } rx_byte_cnt += skb->len; @@ -762,6 +695,50 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, return rx_frm_cnt; } +static int enetc_poll(struct napi_struct *napi, int budget) +{ + struct enetc_int_vector + *v = container_of(napi, struct enetc_int_vector, napi); + bool complete = true; + int work_done; + int i; + + enetc_lock_mdio(); + + for (i = 0; i < v->count_tx_rings; i++) + if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) + complete = false; + + work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget); + if (work_done == budget) + complete = false; + if (work_done) + v->rx_napi_work = true; + + if (!complete) { + enetc_unlock_mdio(); + return budget; + } + + napi_complete_done(napi, work_done); + + if (likely(v->rx_dim_en)) + enetc_rx_net_dim(v); + + v->rx_napi_work = false; + + /* enable interrupts */ + enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE); + + for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) + enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), + ENETC_TBIER_TXTIE); + + enetc_unlock_mdio(); + + return work_done; +} + /* Probing and Init */ #define ENETC_MAX_RFS_SIZE 64 void enetc_get_si_caps(struct enetc_si *si) @@ -995,60 +972,6 @@ static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv) enetc_free_tx_ring(priv->tx_ring[i]); } -int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr) -{ - int size = cbdr->bd_count * sizeof(struct enetc_cbd); - - cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base, - GFP_KERNEL); - if (!cbdr->bd_base) - return -ENOMEM; - - /* h/w requires 128B alignment */ - if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) { - dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base); - return -EINVAL; - } - - cbdr->next_to_clean = 0; - cbdr->next_to_use = 0; - - return 0; -} - -void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr) -{ - int size = cbdr->bd_count * sizeof(struct enetc_cbd); - - dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base); - cbdr->bd_base = NULL; -} - -void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr) -{ - /* set CBDR cache attributes */ - enetc_wr(hw, ENETC_SICAR2, - ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); - - enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base)); - enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base)); - enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count)); - - enetc_wr(hw, ENETC_SICBDRPIR, 0); - enetc_wr(hw, ENETC_SICBDRCIR, 0); - - /* enable ring */ - enetc_wr(hw, ENETC_SICBDRMR, BIT(31)); - - cbdr->pir = hw->reg + ENETC_SICBDRPIR; - cbdr->cir = hw->reg + ENETC_SICBDRCIR; -} - -void enetc_clear_cbdr(struct enetc_hw *hw) -{ - enetc_wr(hw, ENETC_SICBDRMR, 0); -} - static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups) { int *rss_table; @@ -1108,45 +1031,22 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv) priv->bdr_int_num = cpus; priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL; priv->tx_ictt = ENETC_TXIC_TIMETHR; - - /* SI specific */ - si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE; } int enetc_alloc_si_resources(struct enetc_ndev_priv *priv) { struct enetc_si *si = priv->si; - int err; - - err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring); - if (err) - return err; - - enetc_setup_cbdr(&si->hw, &si->cbd_ring); priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules), GFP_KERNEL); - if (!priv->cls_rules) { - err = -ENOMEM; - goto err_alloc_cls; - } + if (!priv->cls_rules) + return -ENOMEM; return 0; - -err_alloc_cls: - enetc_clear_cbdr(&si->hw); - enetc_free_cbdr(priv->dev, &si->cbd_ring); - - return err; } void enetc_free_si_resources(struct enetc_ndev_priv *priv) { - struct enetc_si *si = priv->si; - - enetc_clear_cbdr(&si->hw); - enetc_free_cbdr(priv->dev, &si->cbd_ring); - kfree(priv->cls_rules); } @@ -1217,9 +1117,9 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR); rx_ring->idr = hw->reg + ENETC_SIRXIDR; + enetc_lock_mdio(); enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring)); - /* update ENETC's consumer index */ - enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, rx_ring->next_to_use); + enetc_unlock_mdio(); /* enable ring */ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 8b380fc13314..773e412b9f4e 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -98,12 +98,14 @@ struct enetc_cbdr { void *bd_base; /* points to Rx or Tx BD ring */ void __iomem *pir; void __iomem *cir; + void __iomem *mr; /* mode register */ int bd_count; /* # of BDs */ int next_to_use; int next_to_clean; dma_addr_t bd_dma_base; + struct device *dma_dev; }; #define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i])) @@ -119,19 +121,26 @@ static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i) return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]); } -static inline union enetc_rx_bd *enetc_rxbd_next(struct enetc_bdr *rx_ring, - union enetc_rx_bd *rxbd, - int i) +static inline void enetc_rxbd_next(struct enetc_bdr *rx_ring, + union enetc_rx_bd **old_rxbd, int *old_index) { - rxbd++; + union enetc_rx_bd *new_rxbd = *old_rxbd; + int new_index = *old_index; + + new_rxbd++; + #ifdef CONFIG_FSL_ENETC_PTP_CLOCK if (rx_ring->ext_en) - rxbd++; + new_rxbd++; #endif - if (unlikely(++i == rx_ring->bd_count)) - rxbd = rx_ring->bd_base; - return rxbd; + if (unlikely(++new_index == rx_ring->bd_count)) { + new_rxbd = rx_ring->bd_base; + new_index = 0; + } + + *old_rxbd = new_rxbd; + *old_index = new_index; } static inline union enetc_rx_bd *enetc_rxbd_ext(union enetc_rx_bd *rxbd) @@ -252,7 +261,7 @@ struct enetc_ndev_priv { u16 rx_bd_count, tx_bd_count; u16 msg_enable; - int active_offloads; + enum enetc_active_offloads active_offloads; u32 speed; /* store speed for compare update pspeed */ @@ -310,10 +319,9 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type, void enetc_set_ethtool_ops(struct net_device *ndev); /* control buffer descriptor ring (CBDR) */ -int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr); -void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr); -void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr); -void enetc_clear_cbdr(struct enetc_hw *hw); +int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count, + struct enetc_cbdr *cbdr); +void enetc_teardown_cbdr(struct enetc_cbdr *cbdr); int enetc_set_mac_flt_entry(struct enetc_si *si, int index, char *mac_addr, int si_map); int enetc_clear_mac_flt_entry(struct enetc_si *si, int index); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c index 201cbc362e33..073e56dcca4e 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c @@ -3,9 +3,63 @@ #include "enetc.h" -static void enetc_clean_cbdr(struct enetc_si *si) +int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count, + struct enetc_cbdr *cbdr) +{ + int size = bd_count * sizeof(struct enetc_cbd); + + cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base, + GFP_KERNEL); + if (!cbdr->bd_base) + return -ENOMEM; + + /* h/w requires 128B alignment */ + if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) { + dma_free_coherent(dev, size, cbdr->bd_base, + cbdr->bd_dma_base); + return -EINVAL; + } + + cbdr->next_to_clean = 0; + cbdr->next_to_use = 0; + cbdr->dma_dev = dev; + cbdr->bd_count = bd_count; + + cbdr->pir = hw->reg + ENETC_SICBDRPIR; + cbdr->cir = hw->reg + ENETC_SICBDRCIR; + cbdr->mr = hw->reg + ENETC_SICBDRMR; + + /* set CBDR cache attributes */ + enetc_wr(hw, ENETC_SICAR2, + ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); + + enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base)); + enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base)); + enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count)); + + enetc_wr_reg(cbdr->pir, cbdr->next_to_clean); + enetc_wr_reg(cbdr->cir, cbdr->next_to_use); + /* enable ring */ + enetc_wr_reg(cbdr->mr, BIT(31)); + + return 0; +} + +void enetc_teardown_cbdr(struct enetc_cbdr *cbdr) +{ + int size = cbdr->bd_count * sizeof(struct enetc_cbd); + + /* disable ring */ + enetc_wr_reg(cbdr->mr, 0); + + dma_free_coherent(cbdr->dma_dev, size, cbdr->bd_base, + cbdr->bd_dma_base); + cbdr->bd_base = NULL; + cbdr->dma_dev = NULL; +} + +static void enetc_clean_cbdr(struct enetc_cbdr *ring) { - struct enetc_cbdr *ring = &si->cbd_ring; struct enetc_cbd *dest_cbd; int i, status; @@ -15,7 +69,7 @@ static void enetc_clean_cbdr(struct enetc_si *si) dest_cbd = ENETC_CBD(*ring, i); status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK; if (status) - dev_warn(&si->pdev->dev, "CMD err %04x for cmd %04x\n", + dev_warn(ring->dma_dev, "CMD err %04x for cmd %04x\n", status, dest_cbd->cmd); memset(dest_cbd, 0, sizeof(*dest_cbd)); @@ -43,7 +97,7 @@ int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd) return -EIO; if (unlikely(!enetc_cbd_unused(ring))) - enetc_clean_cbdr(si); + enetc_clean_cbdr(ring); i = ring->next_to_use; dest_cbd = ENETC_CBD(*ring, i); @@ -69,7 +123,7 @@ int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd) /* CBD may writeback data, feedback up level */ *cbd = *dest_cbd; - enetc_clean_cbdr(si); + enetc_clean_cbdr(ring); return 0; } @@ -117,6 +171,7 @@ int enetc_set_mac_flt_entry(struct enetc_si *si, int index, int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse, int index) { + struct enetc_cbdr *ring = &si->cbd_ring; struct enetc_cbd cbd = {.cmd = 0}; dma_addr_t dma, dma_align; void *tmp, *tmp_align; @@ -129,10 +184,10 @@ int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse, cbd.length = cpu_to_le16(sizeof(*rfse)); cbd.opt[3] = cpu_to_le32(0); /* SI */ - tmp = dma_alloc_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN, + tmp = dma_alloc_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN, &dma, GFP_KERNEL); if (!tmp) { - dev_err(&si->pdev->dev, "DMA mapping of RFS entry failed!\n"); + dev_err(ring->dma_dev, "DMA mapping of RFS entry failed!\n"); return -ENOMEM; } @@ -145,9 +200,9 @@ int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse, err = enetc_send_cmd(si, &cbd); if (err) - dev_err(&si->pdev->dev, "FS entry add failed (%d)!", err); + dev_err(ring->dma_dev, "FS entry add failed (%d)!", err); - dma_free_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN, + dma_free_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN, tmp, dma); return err; @@ -157,6 +212,7 @@ int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse, static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count, bool read) { + struct enetc_cbdr *ring = &si->cbd_ring; struct enetc_cbd cbd = {.cmd = 0}; dma_addr_t dma, dma_align; u8 *tmp, *tmp_align; @@ -166,10 +222,10 @@ static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count, /* HW only takes in a full 64 entry table */ return -EINVAL; - tmp = dma_alloc_coherent(&si->pdev->dev, count + RSSE_ALIGN, + tmp = dma_alloc_coherent(ring->dma_dev, count + RSSE_ALIGN, &dma, GFP_KERNEL); if (!tmp) { - dev_err(&si->pdev->dev, "DMA mapping of RSS table failed!\n"); + dev_err(ring->dma_dev, "DMA mapping of RSS table failed!\n"); return -ENOMEM; } dma_align = ALIGN(dma, RSSE_ALIGN); @@ -189,13 +245,13 @@ static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count, err = enetc_send_cmd(si, &cbd); if (err) - dev_err(&si->pdev->dev, "RSS cmd failed (%d)!", err); + dev_err(ring->dma_dev, "RSS cmd failed (%d)!", err); if (read) for (i = 0; i < count; i++) table[i] = tmp_align[i]; - dma_free_coherent(&si->pdev->dev, count + RSSE_ALIGN, tmp, dma); + dma_free_coherent(ring->dma_dev, count + RSSE_ALIGN, tmp, dma); return err; } diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 224fc37a6757..5e95afd61c87 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -129,16 +129,20 @@ static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type) } static void enetc_set_mac_ht_flt(struct enetc_si *si, int si_idx, int type, - u32 *hash) + unsigned long hash) { bool err = si->errata & ENETC_ERR_UCMCSWP; if (type == UC) { - enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), *hash); - enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), *(hash + 1)); + enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), + lower_32_bits(hash)); + enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), + upper_32_bits(hash)); } else { /* MC */ - enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), *hash); - enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), *(hash + 1)); + enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), + lower_32_bits(hash)); + enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), + upper_32_bits(hash)); } } @@ -182,7 +186,7 @@ static void enetc_sync_mac_filters(struct enetc_pf *pf) if (i == UC) enetc_clear_mac_flt_entry(si, pos); - enetc_set_mac_ht_flt(si, 0, i, (u32 *)f->mac_hash_table); + enetc_set_mac_ht_flt(si, 0, i, *f->mac_hash_table); } } @@ -248,10 +252,10 @@ static void enetc_pf_set_rx_mode(struct net_device *ndev) } static void enetc_set_vlan_ht_filter(struct enetc_hw *hw, int si_idx, - u32 *hash) + unsigned long hash) { - enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), *hash); - enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), *(hash + 1)); + enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), lower_32_bits(hash)); + enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), upper_32_bits(hash)); } static int enetc_vid_hash_idx(unsigned int vid) @@ -279,7 +283,7 @@ static void enetc_sync_vlan_ht_filter(struct enetc_pf *pf, bool rehash) } } - enetc_set_vlan_ht_filter(&pf->si->hw, 0, (u32 *)pf->vlan_ht_filter); + enetc_set_vlan_ht_filter(&pf->si->hw, 0, *pf->vlan_ht_filter); } static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid) @@ -1081,26 +1085,6 @@ static int enetc_init_port_rss_memory(struct enetc_si *si) return err; } -static void enetc_init_unused_port(struct enetc_si *si) -{ - struct device *dev = &si->pdev->dev; - struct enetc_hw *hw = &si->hw; - int err; - - si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE; - err = enetc_alloc_cbdr(dev, &si->cbd_ring); - if (err) - return; - - enetc_setup_cbdr(hw, &si->cbd_ring); - - enetc_init_port_rfs_memory(si); - enetc_init_port_rss_memory(si); - - enetc_clear_cbdr(hw); - enetc_free_cbdr(dev, &si->cbd_ring); -} - static int enetc_pf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -1124,8 +1108,24 @@ static int enetc_pf_probe(struct pci_dev *pdev, goto err_map_pf_space; } + err = enetc_setup_cbdr(&pdev->dev, &si->hw, ENETC_CBDR_DEFAULT_SIZE, + &si->cbd_ring); + if (err) + goto err_setup_cbdr; + + err = enetc_init_port_rfs_memory(si); + if (err) { + dev_err(&pdev->dev, "Failed to initialize RFS memory\n"); + goto err_init_port_rfs; + } + + err = enetc_init_port_rss_memory(si); + if (err) { + dev_err(&pdev->dev, "Failed to initialize RSS memory\n"); + goto err_init_port_rss; + } + if (node && !of_device_is_available(node)) { - enetc_init_unused_port(si); dev_info(&pdev->dev, "device is disabled, skipping\n"); err = -ENODEV; goto err_device_disabled; @@ -1158,18 +1158,6 @@ static int enetc_pf_probe(struct pci_dev *pdev, goto err_alloc_si_res; } - err = enetc_init_port_rfs_memory(si); - if (err) { - dev_err(&pdev->dev, "Failed to initialize RFS memory\n"); - goto err_init_port_rfs; - } - - err = enetc_init_port_rss_memory(si); - if (err) { - dev_err(&pdev->dev, "Failed to initialize RSS memory\n"); - goto err_init_port_rss; - } - err = enetc_configure_si(priv); if (err) { dev_err(&pdev->dev, "Failed to configure SI\n"); @@ -1205,15 +1193,17 @@ err_phylink_create: err_mdiobus_create: enetc_free_msix(priv); err_config_si: -err_init_port_rss: -err_init_port_rfs: err_alloc_msix: enetc_free_si_resources(priv); err_alloc_si_res: si->ndev = NULL; free_netdev(ndev); err_alloc_netdev: +err_init_port_rss: +err_init_port_rfs: err_device_disabled: + enetc_teardown_cbdr(&si->cbd_ring); +err_setup_cbdr: err_map_pf_space: enetc_pci_remove(pdev); @@ -1239,6 +1229,7 @@ static void enetc_pf_remove(struct pci_dev *pdev) enetc_free_msix(priv); enetc_free_si_resources(priv); + enetc_teardown_cbdr(&si->cbd_ring); free_netdev(si->ndev); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index a9aee219fb58..cb7fa4bceaf2 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -1221,6 +1221,11 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, /* Flow meter and max frame size */ if (entryp) { + if (entryp->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); + err = -EOPNOTSUPP; + goto free_sfi; + } if (entryp->police.burst) { fmi = kzalloc(sizeof(*fmi), GFP_KERNEL); if (!fmi) { diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c index 9b755a84c2d6..03090ba7e226 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -165,6 +165,11 @@ static int enetc_vf_probe(struct pci_dev *pdev, enetc_init_si_rings_params(priv); + err = enetc_setup_cbdr(priv->dev, &si->hw, ENETC_CBDR_DEFAULT_SIZE, + &si->cbd_ring); + if (err) + goto err_setup_cbdr; + err = enetc_alloc_si_resources(priv); if (err) { dev_err(&pdev->dev, "SI resource alloc failed\n"); @@ -197,6 +202,8 @@ err_config_si: err_alloc_msix: enetc_free_si_resources(priv); err_alloc_si_res: + enetc_teardown_cbdr(&si->cbd_ring); +err_setup_cbdr: si->ndev = NULL; free_netdev(ndev); err_alloc_netdev: @@ -216,6 +223,7 @@ static void enetc_vf_remove(struct pci_dev *pdev) enetc_free_msix(priv); enetc_free_si_resources(priv); + enetc_teardown_cbdr(&si->cbd_ring); free_netdev(si->ndev); diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 6ab9458302e1..2b7db1c22321 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -414,10 +414,6 @@ enum hnae_media_type { * get ring bd number limit * get_pauseparam() * get tx and rx of pause frame use - * set_autoneg() - * set auto autonegotiation of pause frame use - * get_autoneg() - * get auto autonegotiation of pause frame use * set_pauseparam() * set tx and rx of pause frame use * get_coalesce_usecs() @@ -487,8 +483,6 @@ struct hnae_ae_ops { u32 *uplimit); void (*get_pauseparam)(struct hnae_handle *handle, u32 *auto_neg, u32 *rx_en, u32 *tx_en); - int (*set_autoneg)(struct hnae_handle *handle, u8 enable); - int (*get_autoneg)(struct hnae_handle *handle); int (*set_pauseparam)(struct hnae_handle *handle, u32 auto_neg, u32 rx_en, u32 tx_en); void (*get_coalesce_usecs)(struct hnae_handle *handle, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index b98244f75ab9..c615fbf9094e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -487,13 +487,6 @@ static void hns_ae_get_pauseparam(struct hnae_handle *handle, hns_dsaf_get_rx_mac_pause_en(dsaf_dev, mac_cb->mac_id, rx_en); } -static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable) -{ - assert(handle); - - return hns_mac_set_autoneg(hns_get_mac_cb(handle), enable); -} - static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en) { struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); @@ -502,17 +495,6 @@ static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en) hns_mac_set_promisc(mac_cb, (u8)!!en); } -static int hns_ae_get_autoneg(struct hnae_handle *handle) -{ - u32 auto_neg; - - assert(handle); - - hns_mac_get_autoneg(hns_get_mac_cb(handle), &auto_neg); - - return auto_neg; -} - static int hns_ae_set_pauseparam(struct hnae_handle *handle, u32 autoneg, u32 rx_en, u32 tx_en) { @@ -648,7 +630,7 @@ static void hns_ae_update_stats(struct hnae_handle *handle, struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); u64 tx_bytes = 0, rx_bytes = 0, tx_packets = 0, rx_packets = 0; u64 rx_errors = 0, tx_errors = 0, tx_dropped = 0; - u64 rx_missed_errors = 0; + u64 rx_missed_errors; dsaf_dev = hns_ae_get_dsaf_dev(handle->dev); if (!dsaf_dev) @@ -965,8 +947,6 @@ static struct hnae_ae_ops hns_dsaf_ops = { .set_loopback = hns_ae_config_loopback, .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit, .get_pauseparam = hns_ae_get_pauseparam, - .set_autoneg = hns_ae_set_autoneg, - .get_autoneg = hns_ae_get_autoneg, .set_pauseparam = hns_ae_set_pauseparam, .get_coalesce_usecs = hns_ae_get_coalesce_usecs, .get_max_coalesced_frames = hns_ae_get_max_coalesced_frames, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 7fb7a419607d..f387a859a201 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c @@ -130,14 +130,6 @@ static void hns_gmac_get_tx_auto_pause_frames(void *mac_drv, u16 *newval) GMAC_FC_TX_TIMER_M, GMAC_FC_TX_TIMER_S); } -static void hns_gmac_set_rx_auto_pause_frames(void *mac_drv, u32 newval) -{ - struct mac_driver *drv = (struct mac_driver *)mac_drv; - - dsaf_set_dev_bit(drv, GMAC_PAUSE_EN_REG, - GMAC_PAUSE_EN_RX_FDFC_B, !!newval); -} - static void hns_gmac_config_max_frame_length(void *mac_drv, u16 newval) { struct mac_driver *drv = (struct mac_driver *)mac_drv; @@ -179,14 +171,6 @@ static void hns_gmac_tx_loop_pkt_dis(void *mac_drv) dsaf_write_dev(drv, GMAC_TX_LOOP_PKT_PRI_REG, tx_loop_pkt_pri); } -static void hns_gmac_set_duplex_type(void *mac_drv, u8 newval) -{ - struct mac_driver *drv = (struct mac_driver *)mac_drv; - - dsaf_set_dev_bit(drv, GMAC_DUPLEX_TYPE_REG, - GMAC_DUPLEX_TYPE_B, !!newval); -} - static void hns_gmac_get_duplex_type(void *mac_drv, enum hns_gmac_duplex_mdoe *duplex_mode) { @@ -687,17 +671,14 @@ static void hns_gmac_get_stats(void *mac_drv, u64 *data) static void hns_gmac_get_strings(u32 stringset, u8 *data) { - char *buff = (char *)data; + u8 *buff = data; u32 i; if (stringset != ETH_SS_STATS) return; - for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) { - snprintf(buff, ETH_GSTRING_LEN, "%s", - g_gmac_stats_string[i].desc); - buff = buff + ETH_GSTRING_LEN; - } + for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) + ethtool_sprintf(&buff, g_gmac_stats_string[i].desc); } static int hns_gmac_get_sset_count(int stringset) @@ -741,8 +722,6 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param) mac_drv->set_an_mode = hns_gmac_config_an_mode; mac_drv->config_loopback = hns_gmac_config_loopback; mac_drv->config_pad_and_crc = hns_gmac_config_pad_and_crc; - mac_drv->config_half_duplex = hns_gmac_set_duplex_type; - mac_drv->set_rx_ignore_pause_frames = hns_gmac_set_rx_auto_pause_frames; mac_drv->get_info = hns_gmac_get_info; mac_drv->autoneg_stat = hns_gmac_autoneg_stat; mac_drv->get_pause_enable = hns_gmac_get_pausefrm_cfg; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 4a448138b4ec..f4cf569a2599 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -1202,7 +1202,7 @@ void hns_mac_get_regs(struct hns_mac_cb *mac_cb, void *data) void hns_set_led_opt(struct hns_mac_cb *mac_cb) { - int nic_data = 0; + int nic_data; int txpkts, rxpkts; txpkts = mac_cb->txpkt_for_led - mac_cb->hw_stats.tx_good_pkts; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index 3278bf471ddf..8943ffab4418 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -364,12 +364,8 @@ struct mac_driver { void (*config_max_frame_length)(void *mac_drv, u16 newval); /*config PAD and CRC enable */ void (*config_pad_and_crc)(void *mac_drv, u8 newval); - /* config duplex mode*/ - void (*config_half_duplex)(void *mac_drv, u8 newval); /*config tx pause time,if pause_time is zero,disable tx pause enable*/ void (*set_tx_auto_pause_frames)(void *mac_drv, u16 pause_time); - /*config rx pause enable*/ - void (*set_rx_ignore_pause_frames)(void *mac_drv, u32 enable); /* config rx mode for promiscuous*/ void (*set_promiscuous)(void *mac_drv, u8 enable); void (*mac_pausefrm_cfg)(void *mac_drv, u32 rx_en, u32 tx_en); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 87d3db4666df..c2a60612f503 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -1613,7 +1613,7 @@ int hns_dsaf_set_mac_uc_entry( struct dsaf_device *dsaf_dev, struct dsaf_drv_mac_single_dest_entry *mac_entry) { - u16 entry_index = DSAF_INVALID_ENTRY_IDX; + u16 entry_index; struct dsaf_drv_tbl_tcam_key mac_key; struct dsaf_tbl_tcam_ucast_cfg mac_data; struct dsaf_drv_priv *priv = @@ -1679,7 +1679,7 @@ int hns_dsaf_rm_mac_addr( struct dsaf_device *dsaf_dev, struct dsaf_drv_mac_single_dest_entry *mac_entry) { - u16 entry_index = DSAF_INVALID_ENTRY_IDX; + u16 entry_index; struct dsaf_tbl_tcam_ucast_cfg mac_data; struct dsaf_drv_tbl_tcam_key mac_key; @@ -1751,7 +1751,7 @@ static void hns_dsaf_mc_mask_bit_clear(char *dst, const char *src) int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, struct dsaf_drv_mac_single_dest_entry *mac_entry) { - u16 entry_index = DSAF_INVALID_ENTRY_IDX; + u16 entry_index; struct dsaf_drv_tbl_tcam_key mac_key; struct dsaf_drv_tbl_tcam_key mask_key; struct dsaf_tbl_tcam_data *pmask_key = NULL; @@ -1861,7 +1861,7 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id, u8 in_port_num, u8 *addr) { - u16 entry_index = DSAF_INVALID_ENTRY_IDX; + u16 entry_index; struct dsaf_drv_tbl_tcam_key mac_key; struct dsaf_drv_priv *priv = (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev); @@ -1910,7 +1910,7 @@ int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id, int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, struct dsaf_drv_mac_single_dest_entry *mac_entry) { - u16 entry_index = DSAF_INVALID_ENTRY_IDX; + u16 entry_index; struct dsaf_drv_tbl_tcam_key mac_key; struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev); struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl; @@ -2264,7 +2264,7 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num) */ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data) { - u32 i = 0; + u32 i; u32 j; u32 *p = data; u32 reg_tmp; @@ -2768,7 +2768,7 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) struct dsaf_drv_mac_single_dest_entry mask_entry; struct dsaf_drv_tbl_tcam_key temp_key, mask_key; struct dsaf_drv_soft_mac_tbl *soft_mac_entry; - u16 entry_index = DSAF_INVALID_ENTRY_IDX; + u16 entry_index; struct dsaf_drv_tbl_tcam_key mac_key; struct hns_mac_cb *mac_cb; u8 addr[ETH_ALEN] = {0}; @@ -2870,7 +2870,7 @@ static void set_promisc_tcam_disable(struct dsaf_device *dsaf_dev, u32 port) struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, 0}; struct dsaf_tbl_tcam_data tbl_tcam_mask = {0, 0}; struct dsaf_drv_soft_mac_tbl *soft_mac_entry; - u16 entry_index = DSAF_INVALID_ENTRY_IDX; + u16 entry_index; struct dsaf_drv_tbl_tcam_key mac_key; u8 addr[ETH_ALEN] = {0}; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 173d6966c1a3..325e81d30cfd 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -686,7 +686,7 @@ hns_mac_config_sds_loopback_acpi(struct hns_mac_cb *mac_cb, bool en) obj_args[0].integer.type = ACPI_TYPE_INTEGER; obj_args[0].integer.value = mac_cb->mac_id; obj_args[1].integer.type = ACPI_TYPE_INTEGER; - obj_args[1].integer.value = !!en; + obj_args[1].integer.value = en; argv4.type = ACPI_TYPE_PACKAGE; argv4.package.count = 2; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index d0f8b1fff333..ff03cafccb66 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -462,33 +462,22 @@ int hns_ppe_get_regs_count(void) */ void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data) { - char *buff = (char *)data; int index = ppe_cb->index; - - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_sw_pkt", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_ok", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_drop_pkt_no_bd", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_alloc_buf_fail", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_alloc_buf_wait", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_drop_no_buf", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_err_fifo_full", index); - buff = buff + ETH_GSTRING_LEN; - - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_bd", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_ok", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_err_fifo_empty", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_err_csum_fail", index); + u8 *buff = data; + + ethtool_sprintf(&buff, "ppe%d_rx_sw_pkt", index); + ethtool_sprintf(&buff, "ppe%d_rx_pkt_ok", index); + ethtool_sprintf(&buff, "ppe%d_rx_drop_pkt_no_bd", index); + ethtool_sprintf(&buff, "ppe%d_rx_alloc_buf_fail", index); + ethtool_sprintf(&buff, "ppe%d_rx_alloc_buf_wait", index); + ethtool_sprintf(&buff, "ppe%d_rx_pkt_drop_no_buf", index); + ethtool_sprintf(&buff, "ppe%d_rx_pkt_err_fifo_full", index); + + ethtool_sprintf(&buff, "ppe%d_tx_bd", index); + ethtool_sprintf(&buff, "ppe%d_tx_pkt", index); + ethtool_sprintf(&buff, "ppe%d_tx_pkt_ok", index); + ethtool_sprintf(&buff, "ppe%d_tx_pkt_err_fifo_empty", index); + ethtool_sprintf(&buff, "ppe%d_tx_pkt_err_csum_fail", index); } void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index b6c8910cf7ba..5d5dc6942232 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -929,69 +929,42 @@ int hns_rcb_get_ring_regs_count(void) */ void hns_rcb_get_strings(int stringset, u8 *data, int index) { - char *buff = (char *)data; + u8 *buff = data; if (stringset != ETH_SS_STATS) return; - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_rcb_pkt_num", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_tx_pkt_num", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_drop_pkt_num", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_fbd_num", index); - buff = buff + ETH_GSTRING_LEN; - - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_pkt_num", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_bytes", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_err_cnt", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_io_err", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_sw_err", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_seg_pkt", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_restart_queue", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_tx_busy", index); - buff = buff + ETH_GSTRING_LEN; - - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_rcb_pkt_num", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_pkt_num", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_drop_pkt_num", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_fbd_num", index); - buff = buff + ETH_GSTRING_LEN; - - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_pkt_num", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bytes", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_err_cnt", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_io_err", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_sw_err", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_seg_pkt", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_reuse_pg", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_len_err", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_non_vld_desc_err", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bd_num_err", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l2_err", index); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l3l4csum_err", index); + ethtool_sprintf(&buff, "tx_ring%d_rcb_pkt_num", index); + ethtool_sprintf(&buff, "tx_ring%d_ppe_tx_pkt_num", index); + ethtool_sprintf(&buff, "tx_ring%d_ppe_drop_pkt_num", index); + ethtool_sprintf(&buff, "tx_ring%d_fbd_num", index); + + ethtool_sprintf(&buff, "tx_ring%d_pkt_num", index); + ethtool_sprintf(&buff, "tx_ring%d_bytes", index); + ethtool_sprintf(&buff, "tx_ring%d_err_cnt", index); + ethtool_sprintf(&buff, "tx_ring%d_io_err", index); + ethtool_sprintf(&buff, "tx_ring%d_sw_err", index); + ethtool_sprintf(&buff, "tx_ring%d_seg_pkt", index); + ethtool_sprintf(&buff, "tx_ring%d_restart_queue", index); + ethtool_sprintf(&buff, "tx_ring%d_tx_busy", index); + + ethtool_sprintf(&buff, "rx_ring%d_rcb_pkt_num", index); + ethtool_sprintf(&buff, "rx_ring%d_ppe_pkt_num", index); + ethtool_sprintf(&buff, "rx_ring%d_ppe_drop_pkt_num", index); + ethtool_sprintf(&buff, "rx_ring%d_fbd_num", index); + + ethtool_sprintf(&buff, "rx_ring%d_pkt_num", index); + ethtool_sprintf(&buff, "rx_ring%d_bytes", index); + ethtool_sprintf(&buff, "rx_ring%d_err_cnt", index); + ethtool_sprintf(&buff, "rx_ring%d_io_err", index); + ethtool_sprintf(&buff, "rx_ring%d_sw_err", index); + ethtool_sprintf(&buff, "rx_ring%d_seg_pkt", index); + ethtool_sprintf(&buff, "rx_ring%d_reuse_pg", index); + ethtool_sprintf(&buff, "rx_ring%d_len_err", index); + ethtool_sprintf(&buff, "rx_ring%d_non_vld_desc_err", index); + ethtool_sprintf(&buff, "rx_ring%d_bd_num_err", index); + ethtool_sprintf(&buff, "rx_ring%d_l2_err", index); + ethtool_sprintf(&buff, "rx_ring%d_l3l4csum_err", index); } void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) @@ -1001,7 +974,7 @@ void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) bool is_dbg = HNS_DSAF_IS_DEBUG(rcb_com->dsaf_dev); u32 reg_tmp; u32 reg_num_tmp; - u32 i = 0; + u32 i; /*rcb common registers */ regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG); @@ -1072,7 +1045,7 @@ void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data) u32 *regs = data; struct ring_pair_cb *ring_pair = container_of(queue, struct ring_pair_cb, q); - u32 i = 0; + u32 i; /*rcb ring registers */ regs[0] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_L_REG); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index 7e3609ce112a..be52acd448f9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c @@ -267,19 +267,6 @@ static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, char *mac_addr) } /** - *hns_xgmac_set_rx_ignore_pause_frames - set rx pause param about xgmac - *@mac_drv: mac driver - *@enable:enable rx pause param - */ -static void hns_xgmac_set_rx_ignore_pause_frames(void *mac_drv, u32 enable) -{ - struct mac_driver *drv = (struct mac_driver *)mac_drv; - - dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG, - XGMAC_PAUSE_CTL_RX_B, !!enable); -} - -/** *hns_xgmac_set_tx_auto_pause_frames - set tx pause param about xgmac *@mac_drv: mac driver *@enable:enable tx pause param @@ -495,7 +482,7 @@ static void hns_xgmac_get_link_status(void *mac_drv, u32 *link_stat) */ static void hns_xgmac_get_regs(void *mac_drv, void *data) { - u32 i = 0; + u32 i; struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 *regs = data; u64 qtmp; @@ -758,16 +745,14 @@ static void hns_xgmac_get_stats(void *mac_drv, u64 *data) */ static void hns_xgmac_get_strings(u32 stringset, u8 *data) { - char *buff = (char *)data; + u8 *buff = data; u32 i; if (stringset != ETH_SS_STATS) return; - for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) { - snprintf(buff, ETH_GSTRING_LEN, g_xgmac_stats_string[i].desc); - buff = buff + ETH_GSTRING_LEN; - } + for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) + ethtool_sprintf(&buff, g_xgmac_stats_string[i].desc); } /** @@ -814,9 +799,6 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param) mac_drv->set_an_mode = NULL; mac_drv->config_loopback = NULL; mac_drv->config_pad_and_crc = hns_xgmac_config_pad_and_crc; - mac_drv->config_half_duplex = NULL; - mac_drv->set_rx_ignore_pause_frames = - hns_xgmac_set_rx_ignore_pause_frames; mac_drv->mac_free = hns_xgmac_free; mac_drv->adjust_link = NULL; mac_drv->set_tx_auto_pause_frames = hns_xgmac_set_tx_auto_pause_frames; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index c66a7a51198e..7a1a0a90a1a3 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -872,7 +872,7 @@ out: static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data) { struct hnae_ring *ring = ring_data->ring; - int num = 0; + int num; bool rx_stopped; hns_update_rx_rate(ring); @@ -1881,7 +1881,7 @@ static void hns_nic_set_rx_mode(struct net_device *ndev) static void hns_nic_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) { - int idx = 0; + int idx; u64 tx_bytes = 0; u64 rx_bytes = 0; u64 tx_pkts = 0; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index a6e3f07caf99..da48c05435ea 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -17,7 +17,6 @@ #define HNS_PHY_CSC_REG 16 /* Copper Specific Control Register */ #define HNS_PHY_CSS_REG 17 /* Copper Specific Status Register */ #define HNS_LED_FC_REG 16 /* LED Function Control Reg. */ -#define HNS_LED_PC_REG 17 /* LED Polarity Control Reg. */ #define HNS_LED_FORCE_ON 9 #define HNS_LED_FORCE_OFF 8 @@ -480,7 +479,7 @@ static int __lb_run_test(struct net_device *ndev, #define NIC_LB_TEST_NO_MEM_ERR 1 #define NIC_LB_TEST_TX_CNT_ERR 2 #define NIC_LB_TEST_RX_CNT_ERR 3 -#define NIC_LB_TEST_RX_PKG_ERR 4 + struct hns_nic_priv *priv = netdev_priv(ndev); struct hnae_handle *h = priv->ae_handle; int i, j, lc, good_cnt, ret_val = 0; @@ -895,7 +894,7 @@ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct hns_nic_priv *priv = netdev_priv(netdev); struct hnae_handle *h = priv->ae_handle; - char *buff = (char *)data; + u8 *buff = data; if (!h->dev->ops->get_strings) { netdev_err(netdev, "h->dev->ops->get_strings is null!\n"); @@ -903,74 +902,45 @@ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data) } if (stringset == ETH_SS_TEST) { - if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII) { - memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_MAC], - ETH_GSTRING_LEN); - buff += ETH_GSTRING_LEN; - } - memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_SERDES], - ETH_GSTRING_LEN); - buff += ETH_GSTRING_LEN; + if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII) + ethtool_sprintf(&buff, + hns_nic_test_strs[MAC_INTERNALLOOP_MAC]); + ethtool_sprintf(&buff, + hns_nic_test_strs[MAC_INTERNALLOOP_SERDES]); if ((netdev->phydev) && (!netdev->phydev->is_c45)) - memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_PHY], - ETH_GSTRING_LEN); + ethtool_sprintf(&buff, + hns_nic_test_strs[MAC_INTERNALLOOP_PHY]); } else { - snprintf(buff, ETH_GSTRING_LEN, "rx_packets"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_packets"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_bytes"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_bytes"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_dropped"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_dropped"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "multicast"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "collisions"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_over_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_crc_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_frame_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_fifo_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_missed_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_aborted_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_carrier_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_fifo_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_heartbeat_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_length_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_window_errors"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "rx_compressed"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "tx_compressed"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "netdev_rx_dropped"); - buff = buff + ETH_GSTRING_LEN; - snprintf(buff, ETH_GSTRING_LEN, "netdev_tx_dropped"); - buff = buff + ETH_GSTRING_LEN; - - snprintf(buff, ETH_GSTRING_LEN, "netdev_tx_timeout"); - buff = buff + ETH_GSTRING_LEN; - - h->dev->ops->get_strings(h, stringset, (u8 *)buff); + ethtool_sprintf(&buff, "rx_packets"); + ethtool_sprintf(&buff, "tx_packets"); + ethtool_sprintf(&buff, "rx_bytes"); + ethtool_sprintf(&buff, "tx_bytes"); + ethtool_sprintf(&buff, "rx_errors"); + ethtool_sprintf(&buff, "tx_errors"); + ethtool_sprintf(&buff, "rx_dropped"); + ethtool_sprintf(&buff, "tx_dropped"); + ethtool_sprintf(&buff, "multicast"); + ethtool_sprintf(&buff, "collisions"); + ethtool_sprintf(&buff, "rx_over_errors"); + ethtool_sprintf(&buff, "rx_crc_errors"); + ethtool_sprintf(&buff, "rx_frame_errors"); + ethtool_sprintf(&buff, "rx_fifo_errors"); + ethtool_sprintf(&buff, "rx_missed_errors"); + ethtool_sprintf(&buff, "tx_aborted_errors"); + ethtool_sprintf(&buff, "tx_carrier_errors"); + ethtool_sprintf(&buff, "tx_fifo_errors"); + ethtool_sprintf(&buff, "tx_heartbeat_errors"); + ethtool_sprintf(&buff, "rx_length_errors"); + ethtool_sprintf(&buff, "tx_window_errors"); + ethtool_sprintf(&buff, "rx_compressed"); + ethtool_sprintf(&buff, "tx_compressed"); + ethtool_sprintf(&buff, "netdev_rx_dropped"); + ethtool_sprintf(&buff, "netdev_tx_dropped"); + + ethtool_sprintf(&buff, "netdev_tx_timeout"); + + h->dev->ops->get_strings(h, stringset, buff); } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index e9e60a935f40..01d6bfc0917c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -90,6 +90,7 @@ enum HNAE3_DEV_CAP_BITS { HNAE3_DEV_SUPPORT_HW_PAD_B, HNAE3_DEV_SUPPORT_STASH_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, + HNAE3_DEV_SUPPORT_PAUSE_B, }; #define hnae3_dev_fd_supported(hdev) \ @@ -134,6 +135,9 @@ enum HNAE3_DEV_CAP_BITS { #define hnae3_dev_stash_supported(hdev) \ test_bit(HNAE3_DEV_SUPPORT_STASH_B, (hdev)->ae_dev->caps) +#define hnae3_dev_pause_supported(hdev) \ + test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, (hdev)->ae_dev->caps) + #define hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev) \ test_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, (ae_dev)->caps) @@ -608,8 +612,6 @@ struct hnae3_ae_ops { struct ethtool_rxnfc *cmd); int (*del_fd_entry)(struct hnae3_handle *handle, struct ethtool_rxnfc *cmd); - void (*del_all_fd_entries)(struct hnae3_handle *handle, - bool clear_list); int (*get_fd_rule_cnt)(struct hnae3_handle *handle, struct ethtool_rxnfc *cmd); int (*get_fd_rule_info)(struct hnae3_handle *handle, @@ -649,6 +651,10 @@ struct hnae3_ae_ops { int (*del_cls_flower)(struct hnae3_handle *handle, struct flow_cls_offload *cls_flower); bool (*cls_flower_active)(struct hnae3_handle *handle); + int (*get_phy_link_ksettings)(struct hnae3_handle *handle, + struct ethtool_link_ksettings *cmd); + int (*set_phy_link_ksettings)(struct hnae3_handle *handle, + const struct ethtool_link_ksettings *cmd); }; struct hnae3_dcb_ops { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index dd11c57027bb..9d702bd0c7c1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -362,6 +362,11 @@ static void hns3_dbg_dev_caps(struct hnae3_handle *h) dev_info(&h->pdev->dev, "support UDP tunnel csum: %s\n", test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, caps) ? "yes" : "no"); + dev_info(&h->pdev->dev, "support PAUSE: %s\n", + test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps) ? + "yes" : "no"); + dev_info(&h->pdev->dev, "support imp-controlled PHY: %s\n", + test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, caps) ? "yes" : "no"); } static void hns3_dbg_dev_specs(struct hnae3_handle *h) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index bf4302a5cf95..44b775efd5b9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -4143,14 +4143,6 @@ static void hns3_uninit_phy(struct net_device *netdev) h->ae_algo->ops->mac_disconnect_phy(h); } -static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (h->ae_algo->ops->del_all_fd_entries) - h->ae_algo->ops->del_all_fd_entries(h, clear_list); -} - static int hns3_client_start(struct hnae3_handle *handle) { if (!handle->ae_algo->ops->client_start) @@ -4337,8 +4329,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) hns3_nic_uninit_irq(priv); - hns3_del_all_fd_rules(netdev, true); - hns3_clear_all_ring(handle, true); hns3_nic_uninit_vector_data(priv); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index adcec4ea7cb9..a1d69c56d119 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -642,6 +642,10 @@ static void hns3_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *param) { struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + + if (!test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps)) + return; if (h->ae_algo->ops->get_pauseparam) h->ae_algo->ops->get_pauseparam(h, ¶m->autoneg, @@ -652,6 +656,10 @@ static int hns3_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *param) { struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + + if (!test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps)) + return -EOPNOTSUPP; netif_dbg(h, drv, netdev, "set pauseparam: autoneg=%u, rx:%u, tx:%u\n", @@ -692,6 +700,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); const struct hnae3_ae_ops *ops; u8 module_type; u8 media_type; @@ -722,7 +731,10 @@ static int hns3_get_link_ksettings(struct net_device *netdev, break; case HNAE3_MEDIA_TYPE_COPPER: cmd->base.port = PORT_TP; - if (!netdev->phydev) + if (test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps) && + ops->get_phy_link_ksettings) + ops->get_phy_link_ksettings(h, cmd); + else if (!netdev->phydev) hns3_get_ksettings(h, cmd); else phy_ethtool_ksettings_get(netdev->phydev, cmd); @@ -815,6 +827,9 @@ static int hns3_set_link_ksettings(struct net_device *netdev, return -EINVAL; return phy_ethtool_ksettings_set(netdev->phydev, cmd); + } else if (test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps) && + ops->set_phy_link_ksettings) { + return ops->set_phy_link_ksettings(handle, cmd); } if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 1bd0ddfaec4d..3284a2cb52e6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -353,7 +353,10 @@ static void hclge_set_default_capability(struct hclge_dev *hdev) set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps); set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps); - set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); + if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) { + set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); + set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps); + } } static void hclge_parse_capability(struct hclge_dev *hdev, @@ -378,6 +381,12 @@ static void hclge_parse_capability(struct hclge_dev *hdev, set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps); if (hnae3_get_bit(caps, HCLGE_CAP_FD_FORWARD_TC_B)) set_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps); + if (hnae3_get_bit(caps, HCLGE_CAP_FEC_B)) + set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); + if (hnae3_get_bit(caps, HCLGE_CAP_PAUSE_B)) + set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps); + if (hnae3_get_bit(caps, HCLGE_CAP_PHY_IMP_B)) + set_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps); } static __le32 hclge_build_api_caps(void) @@ -467,6 +476,8 @@ static int hclge_firmware_compat_config(struct hclge_dev *hdev) hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1); hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1); + if (hnae3_dev_phy_imp_supported(hdev)) + hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1); req->compat = cpu_to_le32(compat); return hclge_cmd_send(&hdev->hw, &desc, 1); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 057dda735492..565c5aa54f88 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -127,7 +127,7 @@ enum hclge_opcode_type { HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310, HCLGE_OPC_MAC_TNL_INT_EN = 0x0311, HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312, - HCLGE_OPC_SERDES_LOOPBACK = 0x0315, + HCLGE_OPC_COMMON_LOOPBACK = 0x0315, HCLGE_OPC_CONFIG_FEC_MODE = 0x031A, /* PFC/Pause commands */ @@ -243,6 +243,7 @@ enum hclge_opcode_type { HCLGE_OPC_FD_KEY_CONFIG = 0x1202, HCLGE_OPC_FD_TCAM_OP = 0x1203, HCLGE_OPC_FD_AD_OP = 0x1204, + HCLGE_OPC_FD_USER_DEF_OP = 0x1207, /* MDIO command */ HCLGE_OPC_MDIO_CONFIG = 0x1900, @@ -303,6 +304,10 @@ enum hclge_opcode_type { HCLGE_PPP_CMD1_INT_CMD = 0x2101, HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105, HCLGE_NCSI_INT_EN = 0x2401, + + /* PHY command */ + HCLGE_OPC_PHY_LINK_KSETTING = 0x7025, + HCLGE_OPC_PHY_REG = 0x7026, }; #define HCLGE_TQP_REG_OFFSET 0x80000 @@ -384,6 +389,8 @@ enum HCLGE_CAP_BITS { HCLGE_CAP_HW_PAD_B, HCLGE_CAP_STASH_B, HCLGE_CAP_UDP_TUNNEL_CSUM_B, + HCLGE_CAP_FEC_B = 13, + HCLGE_CAP_PAUSE_B = 14, }; enum HCLGE_API_CAP_BITS { @@ -958,9 +965,10 @@ struct hclge_pf_rst_done_cmd { #define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0) #define HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B BIT(2) -#define HCLGE_CMD_SERDES_DONE_B BIT(0) -#define HCLGE_CMD_SERDES_SUCCESS_B BIT(1) -struct hclge_serdes_lb_cmd { +#define HCLGE_CMD_GE_PHY_INNER_LOOP_B BIT(3) +#define HCLGE_CMD_COMMON_LB_DONE_B BIT(0) +#define HCLGE_CMD_COMMON_LB_SUCCESS_B BIT(1) +struct hclge_common_lb_cmd { u8 mask; u8 enable; u8 result; @@ -1075,6 +1083,19 @@ struct hclge_fd_ad_config_cmd { u8 rsv2[8]; }; +#define HCLGE_FD_USER_DEF_OFT_S 0 +#define HCLGE_FD_USER_DEF_OFT_M GENMASK(14, 0) +#define HCLGE_FD_USER_DEF_EN_B 15 +struct hclge_fd_user_def_cfg_cmd { + __le16 ol2_cfg; + __le16 l2_cfg; + __le16 ol3_cfg; + __le16 l3_cfg; + __le16 ol4_cfg; + __le16 l4_cfg; + u8 rsv[12]; +}; + struct hclge_get_m7_bd_cmd { __le32 bd_num; u8 rsv[20]; @@ -1096,6 +1117,7 @@ struct hclge_query_ppu_pf_other_int_dfx_cmd { #define HCLGE_LINK_EVENT_REPORT_EN_B 0 #define HCLGE_NCSI_ERROR_REPORT_EN_B 1 +#define HCLGE_PHY_IMP_EN_B 2 struct hclge_firmware_compat_cmd { __le32 compat; u8 rsv[20]; @@ -1137,6 +1159,36 @@ struct hclge_dev_specs_1_cmd { u8 rsv1[18]; }; +#define HCLGE_PHY_LINK_SETTING_BD_NUM 2 + +struct hclge_phy_link_ksetting_0_cmd { + __le32 speed; + u8 duplex; + u8 autoneg; + u8 eth_tp_mdix; + u8 eth_tp_mdix_ctrl; + u8 port; + u8 transceiver; + u8 phy_address; + u8 rsv; + __le32 supported; + __le32 advertising; + __le32 lp_advertising; +}; + +struct hclge_phy_link_ksetting_1_cmd { + u8 master_slave_cfg; + u8 master_slave_state; + u8 rsv[22]; +}; + +struct hclge_phy_reg_cmd { + __le16 reg_addr; + u8 rsv0[2]; + __le16 reg_val; + u8 rsv1[18]; +}; + int hclge_cmd_init(struct hclge_dev *hdev); static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 6b1d197df881..1c699131e8df 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -1546,13 +1546,13 @@ static void hclge_dbg_dump_loopback(struct hclge_dev *hdev, { struct phy_device *phydev = hdev->hw.mac.phydev; struct hclge_config_mac_mode_cmd *req_app; - struct hclge_serdes_lb_cmd *req_serdes; + struct hclge_common_lb_cmd *req_common; struct hclge_desc desc; u8 loopback_en; int ret; req_app = (struct hclge_config_mac_mode_cmd *)desc.data; - req_serdes = (struct hclge_serdes_lb_cmd *)desc.data; + req_common = (struct hclge_common_lb_cmd *)desc.data; dev_info(&hdev->pdev->dev, "mac id: %u\n", hdev->hw.mac.mac_id); @@ -1569,27 +1569,33 @@ static void hclge_dbg_dump_loopback(struct hclge_dev *hdev, dev_info(&hdev->pdev->dev, "app loopback: %s\n", loopback_en ? "on" : "off"); - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, true); + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, - "failed to dump serdes loopback status, ret = %d\n", + "failed to dump common loopback status, ret = %d\n", ret); return; } - loopback_en = req_serdes->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; + loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; dev_info(&hdev->pdev->dev, "serdes serial loopback: %s\n", loopback_en ? "on" : "off"); - loopback_en = req_serdes->enable & + loopback_en = req_common->enable & HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; dev_info(&hdev->pdev->dev, "serdes parallel loopback: %s\n", loopback_en ? "on" : "off"); - if (phydev) + if (phydev) { dev_info(&hdev->pdev->dev, "phy loopback: %s\n", phydev->loopback_enabled ? "on" : "off"); + } else if (hnae3_dev_phy_imp_supported(hdev)) { + loopback_en = req_common->enable & + HCLGE_CMD_GE_PHY_INNER_LOOP_B; + dev_info(&hdev->pdev->dev, "phy loopback: %s\n", + loopback_en ? "on" : "off"); + } } /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index e3f81c7e0ce7..058317ce579c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -62,7 +62,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle); static void hclge_rfs_filter_expire(struct hclge_dev *hdev); -static void hclge_clear_arfs_rules(struct hnae3_handle *handle); +static int hclge_clear_arfs_rules(struct hclge_dev *hdev); static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, unsigned long *addr); static int hclge_set_default_loopback(struct hclge_dev *hdev); @@ -70,6 +70,7 @@ static int hclge_set_default_loopback(struct hclge_dev *hdev); static void hclge_sync_mac_table(struct hclge_dev *hdev); static void hclge_restore_hw_table(struct hclge_dev *hdev); static void hclge_sync_promisc_mode(struct hclge_dev *hdev); +static void hclge_sync_fd_table(struct hclge_dev *hdev); static struct hnae3_ae_algo ae_algo; @@ -384,36 +385,62 @@ static const struct key_info meta_data_key_info[] = { }; static const struct key_info tuple_key_info[] = { - { OUTER_DST_MAC, 48}, - { OUTER_SRC_MAC, 48}, - { OUTER_VLAN_TAG_FST, 16}, - { OUTER_VLAN_TAG_SEC, 16}, - { OUTER_ETH_TYPE, 16}, - { OUTER_L2_RSV, 16}, - { OUTER_IP_TOS, 8}, - { OUTER_IP_PROTO, 8}, - { OUTER_SRC_IP, 32}, - { OUTER_DST_IP, 32}, - { OUTER_L3_RSV, 16}, - { OUTER_SRC_PORT, 16}, - { OUTER_DST_PORT, 16}, - { OUTER_L4_RSV, 32}, - { OUTER_TUN_VNI, 24}, - { OUTER_TUN_FLOW_ID, 8}, - { INNER_DST_MAC, 48}, - { INNER_SRC_MAC, 48}, - { INNER_VLAN_TAG_FST, 16}, - { INNER_VLAN_TAG_SEC, 16}, - { INNER_ETH_TYPE, 16}, - { INNER_L2_RSV, 16}, - { INNER_IP_TOS, 8}, - { INNER_IP_PROTO, 8}, - { INNER_SRC_IP, 32}, - { INNER_DST_IP, 32}, - { INNER_L3_RSV, 16}, - { INNER_SRC_PORT, 16}, - { INNER_DST_PORT, 16}, - { INNER_L4_RSV, 32}, + { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 }, + { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 }, + { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 }, + { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 }, + { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 }, + { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 }, + { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 }, + { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 }, + { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 }, + { INNER_DST_MAC, 48, KEY_OPT_MAC, + offsetof(struct hclge_fd_rule, tuples.dst_mac), + offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) }, + { INNER_SRC_MAC, 48, KEY_OPT_MAC, + offsetof(struct hclge_fd_rule, tuples.src_mac), + offsetof(struct hclge_fd_rule, tuples_mask.src_mac) }, + { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.vlan_tag1), + offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) }, + { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 }, + { INNER_ETH_TYPE, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.ether_proto), + offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) }, + { INNER_L2_RSV, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.l2_user_def), + offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) }, + { INNER_IP_TOS, 8, KEY_OPT_U8, + offsetof(struct hclge_fd_rule, tuples.ip_tos), + offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) }, + { INNER_IP_PROTO, 8, KEY_OPT_U8, + offsetof(struct hclge_fd_rule, tuples.ip_proto), + offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) }, + { INNER_SRC_IP, 32, KEY_OPT_IP, + offsetof(struct hclge_fd_rule, tuples.src_ip), + offsetof(struct hclge_fd_rule, tuples_mask.src_ip) }, + { INNER_DST_IP, 32, KEY_OPT_IP, + offsetof(struct hclge_fd_rule, tuples.dst_ip), + offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) }, + { INNER_L3_RSV, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.l3_user_def), + offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) }, + { INNER_SRC_PORT, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.src_port), + offsetof(struct hclge_fd_rule, tuples_mask.src_port) }, + { INNER_DST_PORT, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.dst_port), + offsetof(struct hclge_fd_rule, tuples_mask.dst_port) }, + { INNER_L4_RSV, 32, KEY_OPT_LE32, + offsetof(struct hclge_fd_rule, tuples.l4_user_def), + offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) }, }; static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) @@ -751,8 +778,9 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; - if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && - hdev->hw.mac.phydev->drv->set_loopback) { + if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && + hdev->hw.mac.phydev->drv->set_loopback) || + hnae3_dev_phy_imp_supported(hdev)) { count += 1; handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; } @@ -1150,8 +1178,10 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, if (hnae3_dev_fec_supported(hdev)) hclge_convert_setting_fec(mac); + if (hnae3_dev_pause_supported(hdev)) + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported); - linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); } @@ -1163,8 +1193,11 @@ static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev, hclge_convert_setting_kr(mac, speed_ability); if (hnae3_dev_fec_supported(hdev)) hclge_convert_setting_fec(mac); + + if (hnae3_dev_pause_supported(hdev)) + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported); - linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); } @@ -1193,10 +1226,13 @@ static void hclge_parse_copper_link_mode(struct hclge_dev *hdev, linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported); } + if (hnae3_dev_pause_supported(hdev)) { + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported); + } + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported); linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported); - linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); - linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported); } static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability) @@ -2889,10 +2925,12 @@ static void hclge_update_link_status(struct hclge_dev *hdev) clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); } -static void hclge_update_port_capability(struct hclge_mac *mac) +static void hclge_update_port_capability(struct hclge_dev *hdev, + struct hclge_mac *mac) { - /* update fec ability by speed */ - hclge_convert_setting_fec(mac); + if (hnae3_dev_fec_supported(hdev)) + /* update fec ability by speed */ + hclge_convert_setting_fec(mac); /* firmware can not identify back plane type, the media type * read from configuration can help deal it @@ -2984,6 +3022,141 @@ static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac) return 0; } +static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle, + struct ethtool_link_ksettings *cmd) +{ + struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM]; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_phy_link_ksetting_0_cmd *req0; + struct hclge_phy_link_ksetting_1_cmd *req1; + u32 supported, advertising, lp_advertising; + struct hclge_dev *hdev = vport->back; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING, + true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING, + true); + + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get phy link ksetting, ret = %d.\n", ret); + return ret; + } + + req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data; + cmd->base.autoneg = req0->autoneg; + cmd->base.speed = le32_to_cpu(req0->speed); + cmd->base.duplex = req0->duplex; + cmd->base.port = req0->port; + cmd->base.transceiver = req0->transceiver; + cmd->base.phy_address = req0->phy_address; + cmd->base.eth_tp_mdix = req0->eth_tp_mdix; + cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl; + supported = le32_to_cpu(req0->supported); + advertising = le32_to_cpu(req0->advertising); + lp_advertising = le32_to_cpu(req0->lp_advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, + lp_advertising); + + req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data; + cmd->base.master_slave_cfg = req1->master_slave_cfg; + cmd->base.master_slave_state = req1->master_slave_state; + + return 0; +} + +static int +hclge_set_phy_link_ksettings(struct hnae3_handle *handle, + const struct ethtool_link_ksettings *cmd) +{ + struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM]; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_phy_link_ksetting_0_cmd *req0; + struct hclge_phy_link_ksetting_1_cmd *req1; + struct hclge_dev *hdev = vport->back; + u32 advertising; + int ret; + + if (cmd->base.autoneg == AUTONEG_DISABLE && + ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) || + (cmd->base.duplex != DUPLEX_HALF && + cmd->base.duplex != DUPLEX_FULL))) + return -EINVAL; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING, + false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING, + false); + + req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data; + req0->autoneg = cmd->base.autoneg; + req0->speed = cpu_to_le32(cmd->base.speed); + req0->duplex = cmd->base.duplex; + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + req0->advertising = cpu_to_le32(advertising); + req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl; + + req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data; + req1->master_slave_cfg = cmd->base.master_slave_cfg; + + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to set phy link ksettings, ret = %d.\n", ret); + return ret; + } + + hdev->hw.mac.autoneg = cmd->base.autoneg; + hdev->hw.mac.speed = cmd->base.speed; + hdev->hw.mac.duplex = cmd->base.duplex; + linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising); + + return 0; +} + +static int hclge_update_tp_port_info(struct hclge_dev *hdev) +{ + struct ethtool_link_ksettings cmd; + int ret; + + if (!hnae3_dev_phy_imp_supported(hdev)) + return 0; + + ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd); + if (ret) + return ret; + + hdev->hw.mac.autoneg = cmd.base.autoneg; + hdev->hw.mac.speed = cmd.base.speed; + hdev->hw.mac.duplex = cmd.base.duplex; + + return 0; +} + +static int hclge_tp_port_init(struct hclge_dev *hdev) +{ + struct ethtool_link_ksettings cmd; + + if (!hnae3_dev_phy_imp_supported(hdev)) + return 0; + + cmd.base.autoneg = hdev->hw.mac.autoneg; + cmd.base.speed = hdev->hw.mac.speed; + cmd.base.duplex = hdev->hw.mac.duplex; + linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising); + + return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd); +} + static int hclge_update_port_info(struct hclge_dev *hdev) { struct hclge_mac *mac = &hdev->hw.mac; @@ -2992,7 +3165,7 @@ static int hclge_update_port_info(struct hclge_dev *hdev) /* get the port info from SFP cmd if not copper port */ if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) - return 0; + return hclge_update_tp_port_info(hdev); /* if IMP does not support get SFP/qSFP info, return directly */ if (!hdev->support_sfp_query) @@ -3012,7 +3185,7 @@ static int hclge_update_port_info(struct hclge_dev *hdev) if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { if (mac->speed_type == QUERY_ACTIVE_SPEED) { - hclge_update_port_capability(mac); + hclge_update_port_capability(hdev, mac); return 0; } return hclge_cfg_mac_speed_dup(hdev, mac->speed, @@ -4095,6 +4268,7 @@ static void hclge_periodic_service_task(struct hclge_dev *hdev) hclge_update_link_status(hdev); hclge_sync_mac_table(hdev); hclge_sync_promisc_mode(hdev); + hclge_sync_fd_table(hdev); if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { delta = jiffies - hdev->last_serv_processed; @@ -4996,6 +5170,285 @@ static void hclge_request_update_promisc_mode(struct hnae3_handle *handle) set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); } +static void hclge_sync_fd_state(struct hclge_dev *hdev) +{ + if (hlist_empty(&hdev->fd_rule_list)) + hdev->fd_active_type = HCLGE_FD_RULE_NONE; +} + +static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location) +{ + if (!test_bit(location, hdev->fd_bmap)) { + set_bit(location, hdev->fd_bmap); + hdev->hclge_fd_rule_num++; + } +} + +static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location) +{ + if (test_bit(location, hdev->fd_bmap)) { + clear_bit(location, hdev->fd_bmap); + hdev->hclge_fd_rule_num--; + } +} + +static void hclge_fd_free_node(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + hlist_del(&rule->rule_node); + kfree(rule); + hclge_sync_fd_state(hdev); +} + +static void hclge_update_fd_rule_node(struct hclge_dev *hdev, + struct hclge_fd_rule *old_rule, + struct hclge_fd_rule *new_rule, + enum HCLGE_FD_NODE_STATE state) +{ + switch (state) { + case HCLGE_FD_TO_ADD: + case HCLGE_FD_ACTIVE: + /* 1) if the new state is TO_ADD, just replace the old rule + * with the same location, no matter its state, because the + * new rule will be configured to the hardware. + * 2) if the new state is ACTIVE, it means the new rule + * has been configured to the hardware, so just replace + * the old rule node with the same location. + * 3) for it doesn't add a new node to the list, so it's + * unnecessary to update the rule number and fd_bmap. + */ + new_rule->rule_node.next = old_rule->rule_node.next; + new_rule->rule_node.pprev = old_rule->rule_node.pprev; + memcpy(old_rule, new_rule, sizeof(*old_rule)); + kfree(new_rule); + break; + case HCLGE_FD_DELETED: + hclge_fd_dec_rule_cnt(hdev, old_rule->location); + hclge_fd_free_node(hdev, old_rule); + break; + case HCLGE_FD_TO_DEL: + /* if new request is TO_DEL, and old rule is existent + * 1) the state of old rule is TO_DEL, we need do nothing, + * because we delete rule by location, other rule content + * is unncessary. + * 2) the state of old rule is ACTIVE, we need to change its + * state to TO_DEL, so the rule will be deleted when periodic + * task being scheduled. + * 3) the state of old rule is TO_ADD, it means the rule hasn't + * been added to hardware, so we just delete the rule node from + * fd_rule_list directly. + */ + if (old_rule->state == HCLGE_FD_TO_ADD) { + hclge_fd_dec_rule_cnt(hdev, old_rule->location); + hclge_fd_free_node(hdev, old_rule); + return; + } + old_rule->state = HCLGE_FD_TO_DEL; + break; + } +} + +static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist, + u16 location, + struct hclge_fd_rule **parent) +{ + struct hclge_fd_rule *rule; + struct hlist_node *node; + + hlist_for_each_entry_safe(rule, node, hlist, rule_node) { + if (rule->location == location) + return rule; + else if (rule->location > location) + return NULL; + /* record the parent node, use to keep the nodes in fd_rule_list + * in ascend order. + */ + *parent = rule; + } + + return NULL; +} + +/* insert fd rule node in ascend order according to rule->location */ +static void hclge_fd_insert_rule_node(struct hlist_head *hlist, + struct hclge_fd_rule *rule, + struct hclge_fd_rule *parent) +{ + INIT_HLIST_NODE(&rule->rule_node); + + if (parent) + hlist_add_behind(&rule->rule_node, &parent->rule_node); + else + hlist_add_head(&rule->rule_node, hlist); +} + +static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev, + struct hclge_fd_user_def_cfg *cfg) +{ + struct hclge_fd_user_def_cfg_cmd *req; + struct hclge_desc desc; + u16 data = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false); + + req = (struct hclge_fd_user_def_cfg_cmd *)desc.data; + + hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0); + hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, + HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset); + req->ol2_cfg = cpu_to_le16(data); + + data = 0; + hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0); + hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, + HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset); + req->ol3_cfg = cpu_to_le16(data); + + data = 0; + hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0); + hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, + HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset); + req->ol4_cfg = cpu_to_le16(data); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to set fd user def data, ret= %d\n", ret); + return ret; +} + +static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked) +{ + int ret; + + if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state)) + return; + + if (!locked) + spin_lock_bh(&hdev->fd_rule_lock); + + ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg); + if (ret) + set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); + + if (!locked) + spin_unlock_bh(&hdev->fd_rule_lock); +} + +static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + struct hlist_head *hlist = &hdev->fd_rule_list; + struct hclge_fd_rule *fd_rule, *parent = NULL; + struct hclge_fd_user_def_info *info, *old_info; + struct hclge_fd_user_def_cfg *cfg; + + if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || + rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) + return 0; + + /* for valid layer is start from 1, so need minus 1 to get the cfg */ + cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; + info = &rule->ep.user_def; + + if (!cfg->ref_cnt || cfg->offset == info->offset) + return 0; + + if (cfg->ref_cnt > 1) + goto error; + + fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent); + if (fd_rule) { + old_info = &fd_rule->ep.user_def; + if (info->layer == old_info->layer) + return 0; + } + +error: + dev_err(&hdev->pdev->dev, + "No available offset for layer%d fd rule, each layer only support one user def offset.\n", + info->layer + 1); + return -ENOSPC; +} + +static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + struct hclge_fd_user_def_cfg *cfg; + + if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || + rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) + return; + + cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; + if (!cfg->ref_cnt) { + cfg->offset = rule->ep.user_def.offset; + set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); + } + cfg->ref_cnt++; +} + +static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + struct hclge_fd_user_def_cfg *cfg; + + if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || + rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) + return; + + cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; + if (!cfg->ref_cnt) + return; + + cfg->ref_cnt--; + if (!cfg->ref_cnt) { + cfg->offset = 0; + set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); + } +} + +static void hclge_update_fd_list(struct hclge_dev *hdev, + enum HCLGE_FD_NODE_STATE state, u16 location, + struct hclge_fd_rule *new_rule) +{ + struct hlist_head *hlist = &hdev->fd_rule_list; + struct hclge_fd_rule *fd_rule, *parent = NULL; + + fd_rule = hclge_find_fd_rule(hlist, location, &parent); + if (fd_rule) { + hclge_fd_dec_user_def_refcnt(hdev, fd_rule); + if (state == HCLGE_FD_ACTIVE) + hclge_fd_inc_user_def_refcnt(hdev, new_rule); + hclge_sync_fd_user_def_cfg(hdev, true); + + hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state); + return; + } + + /* it's unlikely to fail here, because we have checked the rule + * exist before. + */ + if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) { + dev_warn(&hdev->pdev->dev, + "failed to delete fd rule %u, it's inexistent\n", + location); + return; + } + + hclge_fd_inc_user_def_refcnt(hdev, new_rule); + hclge_sync_fd_user_def_cfg(hdev, true); + + hclge_fd_insert_rule_node(hlist, new_rule, parent); + hclge_fd_inc_rule_cnt(hdev, new_rule->location); + + if (state == HCLGE_FD_TO_ADD) { + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); + hclge_task_schedule(hdev, 0); + } +} + static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) { struct hclge_get_fd_mode_cmd *req; @@ -5074,6 +5527,17 @@ static int hclge_set_fd_key_config(struct hclge_dev *hdev, return ret; } +static void hclge_fd_disable_user_def(struct hclge_dev *hdev) +{ + struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg; + + spin_lock_bh(&hdev->fd_rule_lock); + memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg)); + spin_unlock_bh(&hdev->fd_rule_lock); + + hclge_fd_set_user_def_cmd(hdev, cfg); +} + static int hclge_init_fd_config(struct hclge_dev *hdev) { #define LOW_2_WORDS 0x03 @@ -5114,9 +5578,12 @@ static int hclge_init_fd_config(struct hclge_dev *hdev) BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); /* If use max 400bit key, we can support tuples for ether type */ - if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) + if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { key_cfg->tuple_active |= BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) + key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES; + } /* roce_type is used to filter roce frames * dst_vport is used to specify the rule @@ -5225,96 +5692,57 @@ static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, struct hclge_fd_rule *rule) { + int offset, moffset, ip_offset; + enum HCLGE_FD_KEY_OPT key_opt; u16 tmp_x_s, tmp_y_s; u32 tmp_x_l, tmp_y_l; + u8 *p = (u8 *)rule; int i; - if (rule->unused_tuple & tuple_bit) + if (rule->unused_tuple & BIT(tuple_bit)) return true; - switch (tuple_bit) { - case BIT(INNER_DST_MAC): - for (i = 0; i < ETH_ALEN; i++) { - calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i], - rule->tuples_mask.dst_mac[i]); - calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i], - rule->tuples_mask.dst_mac[i]); - } + key_opt = tuple_key_info[tuple_bit].key_opt; + offset = tuple_key_info[tuple_bit].offset; + moffset = tuple_key_info[tuple_bit].moffset; - return true; - case BIT(INNER_SRC_MAC): - for (i = 0; i < ETH_ALEN; i++) { - calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i], - rule->tuples_mask.src_mac[i]); - calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i], - rule->tuples_mask.src_mac[i]); - } + switch (key_opt) { + case KEY_OPT_U8: + calc_x(*key_x, p[offset], p[moffset]); + calc_y(*key_y, p[offset], p[moffset]); return true; - case BIT(INNER_VLAN_TAG_FST): - calc_x(tmp_x_s, rule->tuples.vlan_tag1, - rule->tuples_mask.vlan_tag1); - calc_y(tmp_y_s, rule->tuples.vlan_tag1, - rule->tuples_mask.vlan_tag1); + case KEY_OPT_LE16: + calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset])); + calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset])); *(__le16 *)key_x = cpu_to_le16(tmp_x_s); *(__le16 *)key_y = cpu_to_le16(tmp_y_s); return true; - case BIT(INNER_ETH_TYPE): - calc_x(tmp_x_s, rule->tuples.ether_proto, - rule->tuples_mask.ether_proto); - calc_y(tmp_y_s, rule->tuples.ether_proto, - rule->tuples_mask.ether_proto); - *(__le16 *)key_x = cpu_to_le16(tmp_x_s); - *(__le16 *)key_y = cpu_to_le16(tmp_y_s); - - return true; - case BIT(INNER_IP_TOS): - calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); - calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); - - return true; - case BIT(INNER_IP_PROTO): - calc_x(*key_x, rule->tuples.ip_proto, - rule->tuples_mask.ip_proto); - calc_y(*key_y, rule->tuples.ip_proto, - rule->tuples_mask.ip_proto); - - return true; - case BIT(INNER_SRC_IP): - calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX], - rule->tuples_mask.src_ip[IPV4_INDEX]); - calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX], - rule->tuples_mask.src_ip[IPV4_INDEX]); - *(__le32 *)key_x = cpu_to_le32(tmp_x_l); - *(__le32 *)key_y = cpu_to_le32(tmp_y_l); - - return true; - case BIT(INNER_DST_IP): - calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX], - rule->tuples_mask.dst_ip[IPV4_INDEX]); - calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX], - rule->tuples_mask.dst_ip[IPV4_INDEX]); + case KEY_OPT_LE32: + calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset])); + calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset])); *(__le32 *)key_x = cpu_to_le32(tmp_x_l); *(__le32 *)key_y = cpu_to_le32(tmp_y_l); return true; - case BIT(INNER_SRC_PORT): - calc_x(tmp_x_s, rule->tuples.src_port, - rule->tuples_mask.src_port); - calc_y(tmp_y_s, rule->tuples.src_port, - rule->tuples_mask.src_port); - *(__le16 *)key_x = cpu_to_le16(tmp_x_s); - *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + case KEY_OPT_MAC: + for (i = 0; i < ETH_ALEN; i++) { + calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i], + p[moffset + i]); + calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i], + p[moffset + i]); + } return true; - case BIT(INNER_DST_PORT): - calc_x(tmp_x_s, rule->tuples.dst_port, - rule->tuples_mask.dst_port); - calc_y(tmp_y_s, rule->tuples.dst_port, - rule->tuples_mask.dst_port); - *(__le16 *)key_x = cpu_to_le16(tmp_x_s); - *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + case KEY_OPT_IP: + ip_offset = IPV4_INDEX * sizeof(u32); + calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]), + *(u32 *)(&p[moffset + ip_offset])); + calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]), + *(u32 *)(&p[moffset + ip_offset])); + *(__le32 *)key_x = cpu_to_le32(tmp_x_l); + *(__le32 *)key_y = cpu_to_le32(tmp_y_l); return true; default: @@ -5402,12 +5830,12 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage, for (i = 0 ; i < MAX_TUPLE; i++) { bool tuple_valid; - u32 check_tuple; tuple_size = tuple_key_info[i].key_length / 8; - check_tuple = key_cfg->tuple_active & BIT(i); + if (!(key_cfg->tuple_active & BIT(i))) + continue; - tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x, + tuple_valid = hclge_fd_convert_tuple(i, cur_key_x, cur_key_y, rule); if (tuple_valid) { cur_key_x += tuple_size; @@ -5538,8 +5966,7 @@ static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec, if (!spec || !unused_tuple) return -EINVAL; - *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | - BIT(INNER_IP_TOS); + *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); /* check whether src/dst ip address used */ if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) @@ -5554,8 +5981,8 @@ static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec, if (!spec->pdst) *unused_tuple |= BIT(INNER_DST_PORT); - if (spec->tclass) - return -EOPNOTSUPP; + if (!spec->tclass) + *unused_tuple |= BIT(INNER_IP_TOS); return 0; } @@ -5567,7 +5994,7 @@ static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec, return -EINVAL; *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | - BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); /* check whether src/dst ip address used */ if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) @@ -5579,8 +6006,8 @@ static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec, if (!spec->l4_proto) *unused_tuple |= BIT(INNER_IP_PROTO); - if (spec->tclass) - return -EOPNOTSUPP; + if (!spec->tclass) + *unused_tuple |= BIT(INNER_IP_TOS); if (spec->l4_4_bytes) return -EOPNOTSUPP; @@ -5650,9 +6077,98 @@ static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev, return 0; } +static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple, + struct hclge_fd_user_def_info *info) +{ + switch (flow_type) { + case ETHER_FLOW: + info->layer = HCLGE_FD_USER_DEF_L2; + *unused_tuple &= ~BIT(INNER_L2_RSV); + break; + case IP_USER_FLOW: + case IPV6_USER_FLOW: + info->layer = HCLGE_FD_USER_DEF_L3; + *unused_tuple &= ~BIT(INNER_L3_RSV); + break; + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + info->layer = HCLGE_FD_USER_DEF_L4; + *unused_tuple &= ~BIT(INNER_L4_RSV); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs) +{ + return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0; +} + +static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + u32 *unused_tuple, + struct hclge_fd_user_def_info *info) +{ + u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active; + u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + u16 data, offset, data_mask, offset_mask; + int ret; + + info->layer = HCLGE_FD_USER_DEF_NONE; + *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES; + + if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs)) + return 0; + + /* user-def data from ethtool is 64 bit value, the bit0~15 is used + * for data, and bit32~47 is used for offset. + */ + data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA; + data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA; + offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET; + offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET; + + if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) { + dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); + return -EOPNOTSUPP; + } + + if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) { + dev_err(&hdev->pdev->dev, + "user-def offset[%u] should be no more than %u\n", + offset, HCLGE_FD_MAX_USER_DEF_OFFSET); + return -EINVAL; + } + + if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) { + dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n"); + return -EINVAL; + } + + ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info); + if (ret) { + dev_err(&hdev->pdev->dev, + "unsupported flow type for user-def bytes, ret = %d\n", + ret); + return ret; + } + + info->data = data; + info->data_mask = data_mask; + info->offset = offset; + + return 0; +} + static int hclge_fd_check_spec(struct hclge_dev *hdev, struct ethtool_rx_flow_spec *fs, - u32 *unused_tuple) + u32 *unused_tuple, + struct hclge_fd_user_def_info *info) { u32 flow_type; int ret; @@ -5665,11 +6181,9 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev, return -EINVAL; } - if ((fs->flow_type & FLOW_EXT) && - (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) { - dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); - return -EOPNOTSUPP; - } + ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info); + if (ret) + return ret; flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); switch (flow_type) { @@ -5721,217 +6235,194 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev, return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple); } -static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location) +static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule, u8 ip_proto) { - struct hclge_fd_rule *rule = NULL; - struct hlist_node *node2; + rule->tuples.src_ip[IPV4_INDEX] = + be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); + rule->tuples_mask.src_ip[IPV4_INDEX] = + be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); - spin_lock_bh(&hdev->fd_rule_lock); - hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { - if (rule->location >= location) - break; - } + rule->tuples.dst_ip[IPV4_INDEX] = + be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); + rule->tuples_mask.dst_ip[IPV4_INDEX] = + be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); - spin_unlock_bh(&hdev->fd_rule_lock); + rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); + rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); - return rule && rule->location == location; -} + rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); + rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); -/* make sure being called after lock up with fd_rule_lock */ -static int hclge_fd_update_rule_list(struct hclge_dev *hdev, - struct hclge_fd_rule *new_rule, - u16 location, - bool is_add) -{ - struct hclge_fd_rule *rule = NULL, *parent = NULL; - struct hlist_node *node2; + rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; + rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; - if (is_add && !new_rule) - return -EINVAL; + rule->tuples.ether_proto = ETH_P_IP; + rule->tuples_mask.ether_proto = 0xFFFF; - hlist_for_each_entry_safe(rule, node2, - &hdev->fd_rule_list, rule_node) { - if (rule->location >= location) - break; - parent = rule; - } - - if (rule && rule->location == location) { - hlist_del(&rule->rule_node); - kfree(rule); - hdev->hclge_fd_rule_num--; - - if (!is_add) { - if (!hdev->hclge_fd_rule_num) - hdev->fd_active_type = HCLGE_FD_RULE_NONE; - clear_bit(location, hdev->fd_bmap); + rule->tuples.ip_proto = ip_proto; + rule->tuples_mask.ip_proto = 0xFF; +} - return 0; - } - } else if (!is_add) { - dev_err(&hdev->pdev->dev, - "delete fail, rule %u is inexistent\n", - location); - return -EINVAL; - } +static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + rule->tuples.src_ip[IPV4_INDEX] = + be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); + rule->tuples_mask.src_ip[IPV4_INDEX] = + be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); - INIT_HLIST_NODE(&new_rule->rule_node); + rule->tuples.dst_ip[IPV4_INDEX] = + be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); + rule->tuples_mask.dst_ip[IPV4_INDEX] = + be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); - if (parent) - hlist_add_behind(&new_rule->rule_node, &parent->rule_node); - else - hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list); + rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; + rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; - set_bit(location, hdev->fd_bmap); - hdev->hclge_fd_rule_num++; - hdev->fd_active_type = new_rule->rule_type; + rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; + rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; - return 0; + rule->tuples.ether_proto = ETH_P_IP; + rule->tuples_mask.ether_proto = 0xFFFF; } -static int hclge_fd_get_tuple(struct hclge_dev *hdev, - struct ethtool_rx_flow_spec *fs, - struct hclge_fd_rule *rule) +static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule, u8 ip_proto) { - u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src, + IPV6_SIZE); - switch (flow_type) { - case SCTP_V4_FLOW: - case TCP_V4_FLOW: - case UDP_V4_FLOW: - rule->tuples.src_ip[IPV4_INDEX] = - be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); - rule->tuples_mask.src_ip[IPV4_INDEX] = - be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); + be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst, + IPV6_SIZE); - rule->tuples.dst_ip[IPV4_INDEX] = - be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); - rule->tuples_mask.dst_ip[IPV4_INDEX] = - be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); - - rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); - rule->tuples_mask.src_port = - be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); - - rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); - rule->tuples_mask.dst_port = - be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); + rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); + rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); - rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; - rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; + rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); + rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); - rule->tuples.ether_proto = ETH_P_IP; - rule->tuples_mask.ether_proto = 0xFFFF; + rule->tuples.ether_proto = ETH_P_IPV6; + rule->tuples_mask.ether_proto = 0xFFFF; - break; - case IP_USER_FLOW: - rule->tuples.src_ip[IPV4_INDEX] = - be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); - rule->tuples_mask.src_ip[IPV4_INDEX] = - be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); + rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; + rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; - rule->tuples.dst_ip[IPV4_INDEX] = - be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); - rule->tuples_mask.dst_ip[IPV4_INDEX] = - be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); + rule->tuples.ip_proto = ip_proto; + rule->tuples_mask.ip_proto = 0xFF; +} - rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; - rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; +static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src, + IPV6_SIZE); - rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; - rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; + be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst, + IPV6_SIZE); - rule->tuples.ether_proto = ETH_P_IP; - rule->tuples_mask.ether_proto = 0xFFFF; + rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; + rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; - break; - case SCTP_V6_FLOW: - case TCP_V6_FLOW: - case UDP_V6_FLOW: - be32_to_cpu_array(rule->tuples.src_ip, - fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE); - be32_to_cpu_array(rule->tuples_mask.src_ip, - fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE); + rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; + rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; - be32_to_cpu_array(rule->tuples.dst_ip, - fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE); - be32_to_cpu_array(rule->tuples_mask.dst_ip, - fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE); + rule->tuples.ether_proto = ETH_P_IPV6; + rule->tuples_mask.ether_proto = 0xFFFF; +} - rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); - rule->tuples_mask.src_port = - be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); +static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source); + ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source); - rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); - rule->tuples_mask.dst_port = - be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); + ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest); + ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest); - rule->tuples.ether_proto = ETH_P_IPV6; - rule->tuples_mask.ether_proto = 0xFFFF; + rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto); + rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto); +} +static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info, + struct hclge_fd_rule *rule) +{ + switch (info->layer) { + case HCLGE_FD_USER_DEF_L2: + rule->tuples.l2_user_def = info->data; + rule->tuples_mask.l2_user_def = info->data_mask; break; - case IPV6_USER_FLOW: - be32_to_cpu_array(rule->tuples.src_ip, - fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE); - be32_to_cpu_array(rule->tuples_mask.src_ip, - fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE); - - be32_to_cpu_array(rule->tuples.dst_ip, - fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE); - be32_to_cpu_array(rule->tuples_mask.dst_ip, - fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE); - - rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; - rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; - - rule->tuples.ether_proto = ETH_P_IPV6; - rule->tuples_mask.ether_proto = 0xFFFF; - + case HCLGE_FD_USER_DEF_L3: + rule->tuples.l3_user_def = info->data; + rule->tuples_mask.l3_user_def = info->data_mask; break; - case ETHER_FLOW: - ether_addr_copy(rule->tuples.src_mac, - fs->h_u.ether_spec.h_source); - ether_addr_copy(rule->tuples_mask.src_mac, - fs->m_u.ether_spec.h_source); - - ether_addr_copy(rule->tuples.dst_mac, - fs->h_u.ether_spec.h_dest); - ether_addr_copy(rule->tuples_mask.dst_mac, - fs->m_u.ether_spec.h_dest); - - rule->tuples.ether_proto = - be16_to_cpu(fs->h_u.ether_spec.h_proto); - rule->tuples_mask.ether_proto = - be16_to_cpu(fs->m_u.ether_spec.h_proto); - + case HCLGE_FD_USER_DEF_L4: + rule->tuples.l4_user_def = (u32)info->data << 16; + rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16; break; default: - return -EOPNOTSUPP; + break; } + rule->ep.user_def = *info; +} + +static int hclge_fd_get_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule, + struct hclge_fd_user_def_info *info) +{ + u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + switch (flow_type) { case SCTP_V4_FLOW: - case SCTP_V6_FLOW: - rule->tuples.ip_proto = IPPROTO_SCTP; - rule->tuples_mask.ip_proto = 0xFF; + hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP); break; case TCP_V4_FLOW: - case TCP_V6_FLOW: - rule->tuples.ip_proto = IPPROTO_TCP; - rule->tuples_mask.ip_proto = 0xFF; + hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP); break; case UDP_V4_FLOW: + hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP); + break; + case IP_USER_FLOW: + hclge_fd_get_ip4_tuple(hdev, fs, rule); + break; + case SCTP_V6_FLOW: + hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP); + break; + case TCP_V6_FLOW: + hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP); + break; case UDP_V6_FLOW: - rule->tuples.ip_proto = IPPROTO_UDP; - rule->tuples_mask.ip_proto = 0xFF; + hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP); break; - default: + case IPV6_USER_FLOW: + hclge_fd_get_ip6_tuple(hdev, fs, rule); break; + case ETHER_FLOW: + hclge_fd_get_ether_tuple(hdev, fs, rule); + break; + default: + return -EOPNOTSUPP; } if (fs->flow_type & FLOW_EXT) { rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); + hclge_fd_get_user_def_tuple(info, rule); } if (fs->flow_type & FLOW_MAC_EXT) { @@ -5942,33 +6433,52 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev, return 0; } -/* make sure being called after lock up with fd_rule_lock */ static int hclge_fd_config_rule(struct hclge_dev *hdev, struct hclge_fd_rule *rule) { int ret; - if (!rule) { + ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); + if (ret) + return ret; + + return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); +} + +static int hclge_add_fd_entry_common(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + int ret; + + spin_lock_bh(&hdev->fd_rule_lock); + + if (hdev->fd_active_type != rule->rule_type && + (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || + hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) { dev_err(&hdev->pdev->dev, - "The flow director rule is NULL\n"); + "mode conflict(new type %d, active type %d), please delete existent rules first\n", + rule->rule_type, hdev->fd_active_type); + spin_unlock_bh(&hdev->fd_rule_lock); return -EINVAL; } - /* it will never fail here, so needn't to check return value */ - hclge_fd_update_rule_list(hdev, rule, rule->location, true); + ret = hclge_fd_check_user_def_refcnt(hdev, rule); + if (ret) + goto out; - ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); + ret = hclge_clear_arfs_rules(hdev); if (ret) - goto clear_rule; + goto out; - ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); + ret = hclge_fd_config_rule(hdev, rule); if (ret) - goto clear_rule; + goto out; - return 0; + hclge_update_fd_list(hdev, HCLGE_FD_ACTIVE, rule->location, rule); + hdev->fd_active_type = rule->rule_type; -clear_rule: - hclge_fd_update_rule_list(hdev, rule, rule->location, false); +out: + spin_unlock_bh(&hdev->fd_rule_lock); return ret; } @@ -5980,11 +6490,48 @@ static bool hclge_is_cls_flower_active(struct hnae3_handle *handle) return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE; } +static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie, + u16 *vport_id, u8 *action, u16 *queue_id) +{ + struct hclge_vport *vport = hdev->vport; + + if (ring_cookie == RX_CLS_FLOW_DISC) { + *action = HCLGE_FD_ACTION_DROP_PACKET; + } else { + u32 ring = ethtool_get_flow_spec_ring(ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie); + u16 tqps; + + if (vf > hdev->num_req_vfs) { + dev_err(&hdev->pdev->dev, + "Error: vf id (%u) > max vf num (%u)\n", + vf, hdev->num_req_vfs); + return -EINVAL; + } + + *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; + tqps = hdev->vport[vf].nic.kinfo.num_tqps; + + if (ring >= tqps) { + dev_err(&hdev->pdev->dev, + "Error: queue id (%u) > max tqp num (%u)\n", + ring, tqps - 1); + return -EINVAL; + } + + *action = HCLGE_FD_ACTION_SELECT_QUEUE; + *queue_id = ring; + } + + return 0; +} + static int hclge_add_fd_entry(struct hnae3_handle *handle, struct ethtool_rxnfc *cmd) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + struct hclge_fd_user_def_info info; u16 dst_vport_id = 0, q_index = 0; struct ethtool_rx_flow_spec *fs; struct hclge_fd_rule *rule; @@ -6004,51 +6551,22 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, return -EOPNOTSUPP; } - if (hclge_is_cls_flower_active(handle)) { - dev_err(&hdev->pdev->dev, - "please delete all exist cls flower rules first\n"); - return -EINVAL; - } - fs = (struct ethtool_rx_flow_spec *)&cmd->fs; - ret = hclge_fd_check_spec(hdev, fs, &unused); + ret = hclge_fd_check_spec(hdev, fs, &unused, &info); if (ret) return ret; - if (fs->ring_cookie == RX_CLS_FLOW_DISC) { - action = HCLGE_FD_ACTION_DROP_PACKET; - } else { - u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); - u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); - u16 tqps; - - if (vf > hdev->num_req_vfs) { - dev_err(&hdev->pdev->dev, - "Error: vf id (%u) > max vf num (%u)\n", - vf, hdev->num_req_vfs); - return -EINVAL; - } - - dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; - tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps; - - if (ring >= tqps) { - dev_err(&hdev->pdev->dev, - "Error: queue id (%u) > max tqp num (%u)\n", - ring, tqps - 1); - return -EINVAL; - } - - action = HCLGE_FD_ACTION_SELECT_QUEUE; - q_index = ring; - } + ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id, + &action, &q_index); + if (ret) + return ret; rule = kzalloc(sizeof(*rule), GFP_KERNEL); if (!rule) return -ENOMEM; - ret = hclge_fd_get_tuple(hdev, fs, rule); + ret = hclge_fd_get_tuple(hdev, fs, rule, &info); if (ret) { kfree(rule); return ret; @@ -6062,15 +6580,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, rule->action = action; rule->rule_type = HCLGE_FD_EP_ACTIVE; - /* to avoid rule conflict, when user configure rule by ethtool, - * we need to clear all arfs rules - */ - spin_lock_bh(&hdev->fd_rule_lock); - hclge_clear_arfs_rules(handle); - - ret = hclge_fd_config_rule(hdev, rule); - - spin_unlock_bh(&hdev->fd_rule_lock); + ret = hclge_add_fd_entry_common(hdev, rule); + if (ret) + kfree(rule); return ret; } @@ -6091,32 +6603,30 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle, if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) return -EINVAL; - if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num || - !hclge_fd_rule_exist(hdev, fs->location)) { + spin_lock_bh(&hdev->fd_rule_lock); + if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || + !test_bit(fs->location, hdev->fd_bmap)) { dev_err(&hdev->pdev->dev, "Delete fail, rule %u is inexistent\n", fs->location); + spin_unlock_bh(&hdev->fd_rule_lock); return -ENOENT; } ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location, NULL, false); if (ret) - return ret; + goto out; - spin_lock_bh(&hdev->fd_rule_lock); - ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false); + hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL); +out: spin_unlock_bh(&hdev->fd_rule_lock); - return ret; } -/* make sure being called after lock up with fd_rule_lock */ -static void hclge_del_all_fd_entries(struct hnae3_handle *handle, - bool clear_list) +static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev, + bool clear_list) { - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; struct hclge_fd_rule *rule; struct hlist_node *node; u16 location; @@ -6124,6 +6634,8 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, if (!hnae3_dev_fd_supported(hdev)) return; + spin_lock_bh(&hdev->fd_rule_lock); + for_each_set_bit(location, hdev->fd_bmap, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location, @@ -6140,6 +6652,14 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, bitmap_zero(hdev->fd_bmap, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); } + + spin_unlock_bh(&hdev->fd_rule_lock); +} + +static void hclge_del_all_fd_entries(struct hclge_dev *hdev) +{ + hclge_clear_fd_rules_in_list(hdev, true); + hclge_fd_disable_user_def(hdev); } static int hclge_restore_fd_entries(struct hnae3_handle *handle) @@ -6148,7 +6668,6 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle) struct hclge_dev *hdev = vport->back; struct hclge_fd_rule *rule; struct hlist_node *node; - int ret; /* Return ok here, because reset error handling will check this * return value. If error is returned here, the reset process will @@ -6163,25 +6682,11 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle) spin_lock_bh(&hdev->fd_rule_lock); hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { - ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); - if (!ret) - ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); - - if (ret) { - dev_warn(&hdev->pdev->dev, - "Restore rule %u failed, remove it\n", - rule->location); - clear_bit(rule->location, hdev->fd_bmap); - hlist_del(&rule->rule_node); - kfree(rule); - hdev->hclge_fd_rule_num--; - } + if (rule->state == HCLGE_FD_ACTIVE) + rule->state = HCLGE_FD_TO_ADD; } - - if (hdev->hclge_fd_rule_num) - hdev->fd_active_type = HCLGE_FD_EP_ACTIVE; - spin_unlock_bh(&hdev->fd_rule_lock); + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); return 0; } @@ -6269,6 +6774,10 @@ static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule, cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip, IPV6_SIZE); + spec->tclass = rule->tuples.ip_tos; + spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? + 0 : rule->tuples_mask.ip_tos; + spec->psrc = cpu_to_be16(rule->tuples.src_port); spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? 0 : cpu_to_be16(rule->tuples_mask.src_port); @@ -6296,6 +6805,10 @@ static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule, cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip, IPV6_SIZE); + spec->tclass = rule->tuples.ip_tos; + spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? + 0 : rule->tuples_mask.ip_tos; + spec->l4_proto = rule->tuples.ip_proto; spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? 0 : rule->tuples_mask.ip_proto; @@ -6323,6 +6836,24 @@ static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule, 0 : cpu_to_be16(rule->tuples_mask.ether_proto); } +static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) == + HCLGE_FD_TUPLE_USER_DEF_TUPLES) { + fs->h_ext.data[0] = 0; + fs->h_ext.data[1] = 0; + fs->m_ext.data[0] = 0; + fs->m_ext.data[1] = 0; + } else { + fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset); + fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data); + fs->m_ext.data[0] = + cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK); + fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask); + } +} + static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs, struct hclge_fd_rule *rule) { @@ -6331,6 +6862,8 @@ static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs, fs->m_ext.vlan_tci = rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1); + + hclge_fd_get_user_def_info(fs, rule); } if (fs->flow_type & FLOW_MAC_EXT) { @@ -6442,6 +6975,9 @@ static int hclge_get_all_rules(struct hnae3_handle *handle, return -EMSGSIZE; } + if (rule->state == HCLGE_FD_TO_DEL) + continue; + rule_locs[cnt] = rule->location; cnt++; } @@ -6523,9 +7059,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, struct hclge_fd_rule_tuples new_tuples = {}; struct hclge_dev *hdev = vport->back; struct hclge_fd_rule *rule; - u16 tmp_queue_id; u16 bit_id; - int ret; if (!hnae3_dev_fd_supported(hdev)) return -EOPNOTSUPP; @@ -6561,34 +7095,20 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, return -ENOMEM; } - set_bit(bit_id, hdev->fd_bmap); rule->location = bit_id; rule->arfs.flow_id = flow_id; rule->queue_id = queue_id; hclge_fd_build_arfs_rule(&new_tuples, rule); - ret = hclge_fd_config_rule(hdev, rule); - - spin_unlock_bh(&hdev->fd_rule_lock); - - if (ret) - return ret; - - return rule->location; + hclge_update_fd_list(hdev, HCLGE_FD_TO_ADD, rule->location, + rule); + hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE; + } else if (rule->queue_id != queue_id) { + rule->queue_id = queue_id; + rule->state = HCLGE_FD_TO_ADD; + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); + hclge_task_schedule(hdev, 0); } - spin_unlock_bh(&hdev->fd_rule_lock); - - if (rule->queue_id == queue_id) - return rule->location; - - tmp_queue_id = rule->queue_id; - rule->queue_id = queue_id; - ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); - if (ret) { - rule->queue_id = tmp_queue_id; - return ret; - } - return rule->location; } @@ -6598,7 +7118,6 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev) struct hnae3_handle *handle = &hdev->vport[0].nic; struct hclge_fd_rule *rule; struct hlist_node *node; - HLIST_HEAD(del_list); spin_lock_bh(&hdev->fd_rule_lock); if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) { @@ -6606,33 +7125,50 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev) return; } hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + if (rule->state != HCLGE_FD_ACTIVE) + continue; if (rps_may_expire_flow(handle->netdev, rule->queue_id, rule->arfs.flow_id, rule->location)) { - hlist_del_init(&rule->rule_node); - hlist_add_head(&rule->rule_node, &del_list); - hdev->hclge_fd_rule_num--; - clear_bit(rule->location, hdev->fd_bmap); + rule->state = HCLGE_FD_TO_DEL; + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); } } spin_unlock_bh(&hdev->fd_rule_lock); - - hlist_for_each_entry_safe(rule, node, &del_list, rule_node) { - hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, - rule->location, NULL, false); - kfree(rule); - } #endif } /* make sure being called after lock up with fd_rule_lock */ -static void hclge_clear_arfs_rules(struct hnae3_handle *handle) +static int hclge_clear_arfs_rules(struct hclge_dev *hdev) { #ifdef CONFIG_RFS_ACCEL - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + struct hlist_node *node; + int ret; - if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE) - hclge_del_all_fd_entries(handle, true); + if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) + return 0; + + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + switch (rule->state) { + case HCLGE_FD_TO_DEL: + case HCLGE_FD_ACTIVE: + ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, + rule->location, NULL, false); + if (ret) + return ret; + fallthrough; + case HCLGE_FD_TO_ADD: + hclge_fd_dec_rule_cnt(hdev, rule->location); + hlist_del(&rule->rule_node); + kfree(rule); + break; + default: + break; + } + } + hclge_sync_fd_state(hdev); + + return 0; #endif } @@ -6815,12 +7351,6 @@ static int hclge_add_cls_flower(struct hnae3_handle *handle, struct hclge_fd_rule *rule; int ret; - if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) { - dev_err(&hdev->pdev->dev, - "please remove all exist fd rules via ethtool first\n"); - return -EINVAL; - } - ret = hclge_check_cls_flower(hdev, cls_flower, tc); if (ret) { dev_err(&hdev->pdev->dev, @@ -6833,8 +7363,10 @@ static int hclge_add_cls_flower(struct hnae3_handle *handle, return -ENOMEM; ret = hclge_parse_cls_flower(hdev, cls_flower, rule); - if (ret) - goto err; + if (ret) { + kfree(rule); + return ret; + } rule->action = HCLGE_FD_ACTION_SELECT_TC; rule->cls_flower.tc = tc; @@ -6843,22 +7375,10 @@ static int hclge_add_cls_flower(struct hnae3_handle *handle, rule->cls_flower.cookie = cls_flower->cookie; rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE; - spin_lock_bh(&hdev->fd_rule_lock); - hclge_clear_arfs_rules(handle); - - ret = hclge_fd_config_rule(hdev, rule); - - spin_unlock_bh(&hdev->fd_rule_lock); - - if (ret) { - dev_err(&hdev->pdev->dev, - "failed to add cls flower rule, ret = %d\n", ret); - goto err; - } + ret = hclge_add_fd_entry_common(hdev, rule); + if (ret) + kfree(rule); - return 0; -err: - kfree(rule); return ret; } @@ -6895,25 +7415,66 @@ static int hclge_del_cls_flower(struct hnae3_handle *handle, ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location, NULL, false); if (ret) { - dev_err(&hdev->pdev->dev, - "failed to delete cls flower rule %u, ret = %d\n", - rule->location, ret); spin_unlock_bh(&hdev->fd_rule_lock); return ret; } - ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false); - if (ret) { - dev_err(&hdev->pdev->dev, - "failed to delete cls flower rule %u in list, ret = %d\n", - rule->location, ret); - spin_unlock_bh(&hdev->fd_rule_lock); - return ret; + hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL); + spin_unlock_bh(&hdev->fd_rule_lock); + + return 0; +} + +static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist) +{ + struct hclge_fd_rule *rule; + struct hlist_node *node; + int ret = 0; + + if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state)) + return; + + spin_lock_bh(&hdev->fd_rule_lock); + + hlist_for_each_entry_safe(rule, node, hlist, rule_node) { + switch (rule->state) { + case HCLGE_FD_TO_ADD: + ret = hclge_fd_config_rule(hdev, rule); + if (ret) + goto out; + rule->state = HCLGE_FD_ACTIVE; + break; + case HCLGE_FD_TO_DEL: + ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, + rule->location, NULL, false); + if (ret) + goto out; + hclge_fd_dec_rule_cnt(hdev, rule->location); + hclge_fd_free_node(hdev, rule); + break; + default: + break; + } } +out: + if (ret) + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); + spin_unlock_bh(&hdev->fd_rule_lock); +} - return 0; +static void hclge_sync_fd_table(struct hclge_dev *hdev) +{ + if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) { + bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; + + hclge_clear_fd_rules_in_list(hdev, clear_list); + } + + hclge_sync_fd_user_def_cfg(hdev, false); + + hclge_sync_fd_list(hdev, &hdev->fd_rule_list); } static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) @@ -6953,18 +7514,15 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - bool clear; hdev->fd_en = enable; - clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; - if (!enable) { - spin_lock_bh(&hdev->fd_rule_lock); - hclge_del_all_fd_entries(handle, clear); - spin_unlock_bh(&hdev->fd_rule_lock); - } else { + if (!enable) + set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state); + else hclge_restore_fd_entries(handle); - } + + hclge_task_schedule(hdev, 0); } static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) @@ -7125,19 +7683,19 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) return ret; } -static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en, +static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en, enum hnae3_loop loop_mode) { -#define HCLGE_SERDES_RETRY_MS 10 -#define HCLGE_SERDES_RETRY_NUM 100 +#define HCLGE_COMMON_LB_RETRY_MS 10 +#define HCLGE_COMMON_LB_RETRY_NUM 100 - struct hclge_serdes_lb_cmd *req; + struct hclge_common_lb_cmd *req; struct hclge_desc desc; int ret, i = 0; u8 loop_mode_b; - req = (struct hclge_serdes_lb_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false); + req = (struct hclge_common_lb_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false); switch (loop_mode) { case HNAE3_LOOP_SERIAL_SERDES: @@ -7146,9 +7704,12 @@ static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en, case HNAE3_LOOP_PARALLEL_SERDES: loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; break; + case HNAE3_LOOP_PHY: + loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B; + break; default: dev_err(&hdev->pdev->dev, - "unsupported serdes loopback mode %d\n", loop_mode); + "unsupported common loopback mode %d\n", loop_mode); return -ENOTSUPP; } @@ -7162,39 +7723,39 @@ static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en, ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, - "serdes loopback set fail, ret = %d\n", ret); + "common loopback set fail, ret = %d\n", ret); return ret; } do { - msleep(HCLGE_SERDES_RETRY_MS); - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, + msleep(HCLGE_COMMON_LB_RETRY_MS); + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, - "serdes loopback get, ret = %d\n", ret); + "common loopback get, ret = %d\n", ret); return ret; } - } while (++i < HCLGE_SERDES_RETRY_NUM && - !(req->result & HCLGE_CMD_SERDES_DONE_B)); + } while (++i < HCLGE_COMMON_LB_RETRY_NUM && + !(req->result & HCLGE_CMD_COMMON_LB_DONE_B)); - if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) { - dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n"); + if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) { + dev_err(&hdev->pdev->dev, "common loopback set timeout\n"); return -EBUSY; - } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) { - dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n"); + } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) { + dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n"); return -EIO; } return ret; } -static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, +static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en, enum hnae3_loop loop_mode) { int ret; - ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode); + ret = hclge_cfg_common_loopback(hdev, en, loop_mode); if (ret) return ret; @@ -7243,8 +7804,12 @@ static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en) struct phy_device *phydev = hdev->hw.mac.phydev; int ret; - if (!phydev) + if (!phydev) { + if (hnae3_dev_phy_imp_supported(hdev)) + return hclge_set_common_loopback(hdev, en, + HNAE3_LOOP_PHY); return -ENOTSUPP; + } if (en) ret = hclge_enable_phy_loopback(hdev, phydev); @@ -7315,7 +7880,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle, break; case HNAE3_LOOP_SERIAL_SERDES: case HNAE3_LOOP_PARALLEL_SERDES: - ret = hclge_set_serdes_loopback(hdev, en, loop_mode); + ret = hclge_set_common_loopback(hdev, en, loop_mode); break; case HNAE3_LOOP_PHY: ret = hclge_set_phy_loopback(hdev, en); @@ -7348,11 +7913,11 @@ static int hclge_set_default_loopback(struct hclge_dev *hdev) if (ret) return ret; - ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES); + ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES); if (ret) return ret; - return hclge_cfg_serdes_loopback(hdev, false, + return hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_PARALLEL_SERDES); } @@ -7428,7 +7993,7 @@ static void hclge_ae_stop(struct hnae3_handle *handle) set_bit(HCLGE_STATE_DOWN, &hdev->state); spin_lock_bh(&hdev->fd_rule_lock); - hclge_clear_arfs_rules(handle); + hclge_clear_arfs_rules(hdev); spin_unlock_bh(&hdev->fd_rule_lock); /* If it is not PF reset, the firmware will disable the MAC, @@ -8759,6 +9324,29 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, return 0; } +static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + + if (!hnae3_dev_phy_imp_supported(hdev)) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = hdev->hw.mac.phy_addr; + /* this command reads phy id and register at the same time */ + fallthrough; + case SIOCGMIIREG: + data->val_out = hclge_read_phy_reg(hdev, data->reg_num); + return 0; + + case SIOCSMIIREG: + return hclge_write_phy_reg(hdev, data->reg_num, data->val_in); + default: + return -EOPNOTSUPP; + } +} + static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, int cmd) { @@ -8766,7 +9354,7 @@ static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, struct hclge_dev *hdev = vport->back; if (!hdev->hw.mac.phydev) - return -EOPNOTSUPP; + return hclge_mii_ioctl(hdev, ifr, cmd); return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); } @@ -9441,7 +10029,7 @@ static void hclge_restore_hw_table(struct hclge_dev *hdev) hclge_restore_mac_table_common(vport); hclge_restore_vport_vlan_table(vport); set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); - + set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); hclge_restore_fd_entries(handle); } @@ -10013,9 +10601,10 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - struct phy_device *phydev = hdev->hw.mac.phydev; + u8 media_type = hdev->hw.mac.media_type; - *auto_neg = phydev ? hclge_get_autoneg(handle) : 0; + *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ? + hclge_get_autoneg(handle) : 0; if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { *rx_en = 0; @@ -10061,7 +10650,7 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, struct phy_device *phydev = hdev->hw.mac.phydev; u32 fc_autoneg; - if (phydev) { + if (phydev || hnae3_dev_phy_imp_supported(hdev)) { fc_autoneg = hclge_get_autoneg(handle); if (auto_neg != fc_autoneg) { dev_info(&hdev->pdev->dev, @@ -10080,7 +10669,7 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, hclge_record_user_pauseparam(hdev, rx_en, tx_en); - if (!auto_neg) + if (!auto_neg || hnae3_dev_phy_imp_supported(hdev)) return hclge_cfg_pauseparam(hdev, rx_en, tx_en); if (phydev) @@ -10638,7 +11227,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) if (ret) goto err_msi_irq_uninit; - if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { + if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER && + !hnae3_dev_phy_imp_supported(hdev)) { ret = hclge_mac_mdio_config(hdev); if (ret) goto err_msi_irq_uninit; @@ -11031,6 +11621,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + ret = hclge_tp_port_init(hdev); + if (ret) { + dev_err(&pdev->dev, "failed to init tp port, ret = %d\n", + ret); + return ret; + } + ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); if (ret) { dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); @@ -11121,6 +11718,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_misc_affinity_teardown(hdev); hclge_state_uninit(hdev); hclge_uninit_mac_table(hdev); + hclge_del_all_fd_entries(hdev); if (mac->phydev) mdiobus_unregister(mac->mdio_bus); @@ -11944,7 +12542,6 @@ static const struct hnae3_ae_ops hclge_ops = { .get_link_mode = hclge_get_link_mode, .add_fd_entry = hclge_add_fd_entry, .del_fd_entry = hclge_del_fd_entry, - .del_all_fd_entries = hclge_del_all_fd_entries, .get_fd_rule_cnt = hclge_get_fd_rule_cnt, .get_fd_rule_info = hclge_get_fd_rule_info, .get_fd_all_rules = hclge_get_all_rules, @@ -11972,6 +12569,8 @@ static const struct hnae3_ae_ops hclge_ops = { .add_cls_flower = hclge_add_cls_flower, .del_cls_flower = hclge_del_cls_flower, .cls_flower_active = hclge_is_cls_flower_active, + .get_phy_link_ksettings = hclge_get_phy_link_ksettings, + .set_phy_link_ksettings = hclge_set_phy_link_ksettings, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 19d7f28773f3..97e77e2f7539 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -223,6 +223,9 @@ enum HCLGE_DEV_STATE { HCLGE_STATE_LINK_UPDATING, HCLGE_STATE_PROMISC_CHANGED, HCLGE_STATE_RST_FAIL, + HCLGE_STATE_FD_TBL_CHANGED, + HCLGE_STATE_FD_CLEAR_ALL, + HCLGE_STATE_FD_USER_DEF_CHANGED, HCLGE_STATE_MAX }; @@ -536,6 +539,9 @@ enum HCLGE_FD_TUPLE { MAX_TUPLE, }; +#define HCLGE_FD_TUPLE_USER_DEF_TUPLES \ + (BIT(INNER_L2_RSV) | BIT(INNER_L3_RSV) | BIT(INNER_L4_RSV)) + enum HCLGE_FD_META_DATA { PACKET_TYPE_ID, IP_FRAGEMENT, @@ -548,9 +554,21 @@ enum HCLGE_FD_META_DATA { MAX_META_DATA, }; +enum HCLGE_FD_KEY_OPT { + KEY_OPT_U8, + KEY_OPT_LE16, + KEY_OPT_LE32, + KEY_OPT_MAC, + KEY_OPT_IP, + KEY_OPT_VNI, +}; + struct key_info { u8 key_type; u8 key_length; /* use bit as unit */ + enum HCLGE_FD_KEY_OPT key_opt; + int offset; + int moffset; }; #define MAX_KEY_LENGTH 400 @@ -558,6 +576,11 @@ struct key_info { #define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4) #define MAX_META_DATA_LENGTH 32 +#define HCLGE_FD_MAX_USER_DEF_OFFSET 9000 +#define HCLGE_FD_USER_DEF_DATA GENMASK(15, 0) +#define HCLGE_FD_USER_DEF_OFFSET GENMASK(15, 0) +#define HCLGE_FD_USER_DEF_OFFSET_UNMASK GENMASK(15, 0) + /* assigned by firmware, the real filter number for each pf may be less */ #define MAX_FD_FILTER_NUM 4096 #define HCLGE_ARFS_EXPIRE_INTERVAL 5UL @@ -580,6 +603,33 @@ enum HCLGE_FD_ACTION { HCLGE_FD_ACTION_SELECT_TC, }; +enum HCLGE_FD_NODE_STATE { + HCLGE_FD_TO_ADD, + HCLGE_FD_TO_DEL, + HCLGE_FD_ACTIVE, + HCLGE_FD_DELETED, +}; + +enum HCLGE_FD_USER_DEF_LAYER { + HCLGE_FD_USER_DEF_NONE, + HCLGE_FD_USER_DEF_L2, + HCLGE_FD_USER_DEF_L3, + HCLGE_FD_USER_DEF_L4, +}; + +#define HCLGE_FD_USER_DEF_LAYER_NUM 3 +struct hclge_fd_user_def_cfg { + u16 ref_cnt; + u16 offset; +}; + +struct hclge_fd_user_def_info { + enum HCLGE_FD_USER_DEF_LAYER layer; + u16 data; + u16 data_mask; + u16 offset; +}; + struct hclge_fd_key_cfg { u8 key_sel; u8 inner_sipv6_word_en; @@ -596,6 +646,7 @@ struct hclge_fd_cfg { u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */ u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */ struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM]; + struct hclge_fd_user_def_cfg user_def_cfg[HCLGE_FD_USER_DEF_LAYER_NUM]; }; #define IPV4_INDEX 3 @@ -612,6 +663,9 @@ struct hclge_fd_rule_tuples { u16 dst_port; u16 vlan_tag1; u16 ether_proto; + u16 l2_user_def; + u16 l3_user_def; + u32 l4_user_def; u8 ip_tos; u8 ip_proto; }; @@ -630,11 +684,15 @@ struct hclge_fd_rule { struct { u16 flow_id; /* only used for arfs */ } arfs; + struct { + struct hclge_fd_user_def_info user_def; + } ep; }; u16 queue_id; u16 vf_id; u16 location; enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type; + enum HCLGE_FD_NODE_STATE state; u8 action; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index e89820702540..08e88d9422cd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -268,3 +268,42 @@ void hclge_mac_stop_phy(struct hclge_dev *hdev) phy_stop(phydev); } + +u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr) +{ + struct hclge_phy_reg_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PHY_REG, true); + + req = (struct hclge_phy_reg_cmd *)desc.data; + req->reg_addr = cpu_to_le16(reg_addr); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to read phy reg, ret = %d.\n", ret); + + return le16_to_cpu(req->reg_val); +} + +int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val) +{ + struct hclge_phy_reg_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PHY_REG, false); + + req = (struct hclge_phy_reg_cmd *)desc.data; + req->reg_addr = cpu_to_le16(reg_addr); + req->reg_val = cpu_to_le16(val); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to write phy reg, ret = %d.\n", ret); + + return ret; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h index dd9a1218a7b0..fd0e20190b90 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h @@ -9,5 +9,7 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle); void hclge_mac_disconnect_phy(struct hnae3_handle *handle); void hclge_mac_start_phy(struct hclge_dev *hdev); void hclge_mac_stop_phy(struct hclge_dev *hdev); +u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr); +int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val); #endif diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index 883d0d7c6858..3e54017a2a5b 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -279,7 +279,7 @@ static int hns_mdio_write(struct mii_bus *bus, static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum) { int ret; - u16 reg_val = 0; + u16 reg_val; u8 devad = ((regnum >> 16) & 0x1f); u8 is_c45 = !!(regnum & MII_ADDR_C45); u16 reg = (u16)(regnum & 0xffff); @@ -420,7 +420,7 @@ static int hns_mdio_probe(struct platform_device *pdev) { struct hns_mdio_device *mdio_dev; struct mii_bus *new_bus; - int ret = -ENODEV; + int ret; if (!pdev) { dev_err(NULL, "pdev is NULL!\r\n"); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c index c340d9acba80..dc024ef521c0 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c @@ -34,7 +34,7 @@ #include "hinic_rx.h" #include "hinic_dev.h" -#define SET_LINK_STR_MAX_LEN 128 +#define SET_LINK_STR_MAX_LEN 16 #define GET_SUPPORTED_MODE 0 #define GET_ADVERTISED_MODE 1 @@ -462,24 +462,19 @@ static int hinic_set_settings_to_hw(struct hinic_dev *nic_dev, { struct hinic_link_ksettings_info settings = {0}; char set_link_str[SET_LINK_STR_MAX_LEN] = {0}; + const char *autoneg_str; struct net_device *netdev = nic_dev->netdev; enum nic_speed_level speed_level = 0; int err; - err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN, "%s", - (set_settings & HILINK_LINK_SET_AUTONEG) ? - (autoneg ? "autong enable " : "autong disable ") : ""); - if (err < 0 || err >= SET_LINK_STR_MAX_LEN) { - netif_err(nic_dev, drv, netdev, "Failed to snprintf link state, function return(%d) and dest_len(%d)\n", - err, SET_LINK_STR_MAX_LEN); - return -EFAULT; - } + autoneg_str = (set_settings & HILINK_LINK_SET_AUTONEG) ? + (autoneg ? "autong enable " : "autong disable ") : ""; if (set_settings & HILINK_LINK_SET_SPEED) { speed_level = hinic_ethtool_to_hw_speed_level(speed); err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN, - "%sspeed %d ", set_link_str, speed); - if (err <= 0 || err >= SET_LINK_STR_MAX_LEN) { + "speed %d ", speed); + if (err >= SET_LINK_STR_MAX_LEN) { netif_err(nic_dev, drv, netdev, "Failed to snprintf link speed, function return(%d) and dest_len(%d)\n", err, SET_LINK_STR_MAX_LEN); return -EFAULT; @@ -494,11 +489,11 @@ static int hinic_set_settings_to_hw(struct hinic_dev *nic_dev, err = hinic_set_link_settings(nic_dev->hwdev, &settings); if (err != HINIC_MGMT_CMD_UNSUPPORTED) { if (err) - netif_err(nic_dev, drv, netdev, "Set %s failed\n", - set_link_str); + netif_err(nic_dev, drv, netdev, "Set %s%sfailed\n", + autoneg_str, set_link_str); else - netif_info(nic_dev, drv, netdev, "Set %s successfully\n", - set_link_str); + netif_info(nic_dev, drv, netdev, "Set %s%ssuccessfully\n", + autoneg_str, set_link_str); return err; } @@ -543,8 +538,8 @@ static void hinic_get_drvinfo(struct net_device *netdev, struct hinic_hwif *hwif = hwdev->hwif; int err; - strlcpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver)); - strlcpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info)); + strscpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver)); + strscpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info)); err = hinic_get_mgmt_version(nic_dev, mgmt_ver); if (err) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c index 4e4029d5c8e1..06586173add7 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c @@ -629,10 +629,8 @@ static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain, cmd_vaddr = dma_alloc_coherent(&pdev->dev, API_CMD_BUF_SIZE, &cmd_paddr, GFP_KERNEL); - if (!cmd_vaddr) { - dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n"); + if (!cmd_vaddr) return -ENOMEM; - } cell_ctxt = &chain->cell_ctxt[cell_idx]; @@ -679,10 +677,8 @@ static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain, node = dma_alloc_coherent(&pdev->dev, chain->cell_size, &node_paddr, GFP_KERNEL); - if (!node) { - dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n"); + if (!node) return -ENOMEM; - } node->read.hw_wb_resp_paddr = 0; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c index efbaed389440..cab38ff0713c 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c @@ -334,7 +334,7 @@ static void set_dma_attr(struct hinic_hwif *hwif, u32 entry_idx, } /** - * dma_attr_table_init - initialize the the default dma attributes + * dma_attr_table_init - initialize the default dma attributes * @hwif: the HW interface of a pci function device **/ static void dma_attr_init(struct hinic_hwif *hwif) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c index 819fa13034c0..817173f1fbb7 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c @@ -440,18 +440,14 @@ static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, struct hinic_recv_msg *recv_msg) { struct hinic_mgmt_msg_handle_work *mgmt_work = NULL; - struct pci_dev *pdev = pf_to_mgmt->hwif->pdev; mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL); - if (!mgmt_work) { - dev_err(&pdev->dev, "Allocate mgmt work memory failed\n"); + if (!mgmt_work) return; - } if (recv_msg->msg_len) { mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL); if (!mgmt_work->msg) { - dev_err(&pdev->dev, "Allocate mgmt msg memory failed\n"); kfree(mgmt_work); return; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c index fcf7bfe4aa47..dcba4d009bad 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c @@ -414,7 +414,6 @@ int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif, rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size, &rq->pi_dma_addr, GFP_KERNEL); if (!rq->pi_virt_addr) { - dev_err(&pdev->dev, "Failed to allocate PI address\n"); err = -ENOMEM; goto err_pi_virt; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index 070a7cc6392e..cce08647b9b2 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -137,10 +137,8 @@ static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq, int err; skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz); - if (!skb) { - netdev_err(rxq->netdev, "Failed to allocate Rx SKB\n"); + if (!skb) return NULL; - } addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz, DMA_FROM_DEVICE); @@ -212,10 +210,8 @@ static int rx_alloc_pkts(struct hinic_rxq *rxq) for (i = 0; i < free_wqebbs; i++) { skb = rx_alloc_skb(rxq, &dma_addr); - if (!skb) { - netdev_err(rxq->netdev, "Failed to alloc Rx skb\n"); + if (!skb) goto skb_out; - } hinic_set_sge(&sge, dma_addr, skb->len); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index 8da7d46363b2..710c4ff7bc0e 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -377,6 +377,7 @@ static int offload_csum(struct hinic_sq_task *task, u32 *queue_info, } else if (ip.v4->version == 6) { unsigned char *exthdr; __be16 frag_off; + l3_type = IPV6_PKT; tunnel_type = TUNNEL_UDP_CSUM; exthdr = ip.hdr + sizeof(*ip.v6); diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c index 4c0c9433bd60..19cf36360933 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.c +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c @@ -1183,6 +1183,7 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw) break; case e1000_ms_auto: phy_data &= ~CR_1000T_MS_ENABLE; + break; default: break; } diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 88faf05e23ba..0b1e890dd583 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -899,6 +899,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) } else { data &= ~IGP02E1000_PM_D0_LPLU; ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; /* LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 69a2329ea463..db79c4e6413e 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright(c) 1999 - 2018 Intel Corporation. */ -#ifndef _E1000_HW_H_ -#define _E1000_HW_H_ +#ifndef _E1000E_HW_H_ +#define _E1000E_HW_H_ #include "regs.h" #include "defines.h" @@ -714,4 +714,4 @@ struct e1000_hw { #include "80003es2lan.h" #include "ich8lan.h" -#endif +#endif /* _E1000E_HW_H_ */ diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 0ac8d79a7987..590ad110d383 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -2745,7 +2745,7 @@ release: } /** - * e1000_k1_gig_workaround_lv - K1 Si workaround + * e1000_k1_workaround_lv - K1 Si workaround * @hw: pointer to the HW structure * * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps @@ -5220,7 +5220,7 @@ void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, } /** - * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 + * e1000e_igp3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 * @hw: pointer to the HW structure * * Workaround for 82566 power-down on D3 entry: diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index e9b82c209c2d..88e9035b75cf 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -25,6 +25,7 @@ #include <linux/pm_runtime.h> #include <linux/aer.h> #include <linux/prefetch.h> +#include <linux/suspend.h> #include "e1000.h" @@ -5974,19 +5975,23 @@ static void e1000_reset_task(struct work_struct *work) struct e1000_adapter *adapter; adapter = container_of(work, struct e1000_adapter, reset_task); + rtnl_lock(); /* don't run the task if already down */ - if (test_bit(__E1000_DOWN, &adapter->state)) + if (test_bit(__E1000_DOWN, &adapter->state)) { + rtnl_unlock(); return; + } if (!(adapter->flags & FLAG_RESTART_NOW)) { e1000e_dump(adapter); e_err("Reset adapter unexpectedly\n"); } e1000e_reinit_locked(adapter); + rtnl_unlock(); } /** - * e1000_get_stats64 - Get System Network Statistics + * e1000e_get_stats64 - Get System Network Statistics * @netdev: network interface device structure * @stats: rtnl_link_stats64 pointer * @@ -6159,7 +6164,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, } /** - * e1000e_hwtstamp_ioctl - control hardware time stamping + * e1000e_hwtstamp_set - control hardware time stamping * @netdev: network interface device structure * @ifr: interface request * @@ -6817,7 +6822,7 @@ static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) } /** - * e1000e_disable_aspm_locked Disable ASPM states. + * e1000e_disable_aspm_locked - Disable ASPM states. * @pdev: pointer to PCI device struct * @state: bit-mask of ASPM states to disable * @@ -6918,6 +6923,12 @@ static int __e1000_resume(struct pci_dev *pdev) return 0; } +static __maybe_unused int e1000e_pm_prepare(struct device *dev) +{ + return pm_runtime_suspended(dev) && + pm_suspend_via_firmware(); +} + static __maybe_unused int e1000e_pm_suspend(struct device *dev) { struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); @@ -7626,9 +7637,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) e1000_print_device_info(adapter); - dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE); - if (pci_dev_run_wake(pdev) && hw->mac.type < e1000_pch_cnp) + if (pci_dev_run_wake(pdev) && hw->mac.type != e1000_pch_cnp) pm_runtime_put_noidle(&pdev->dev); return 0; @@ -7851,6 +7862,7 @@ MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); static const struct dev_pm_ops e1000_pm_ops = { #ifdef CONFIG_PM_SLEEP + .prepare = e1000e_pm_prepare, .suspend = e1000e_pm_suspend, .resume = e1000e_pm_resume, .freeze = e1000e_pm_freeze, diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index bdd9dc163f15..1db35b2c7750 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -371,7 +371,7 @@ s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) } /** - * e1000e_write_phy_reg_igp - Write igp PHY register + * __e1000e_write_phy_reg_igp - Write igp PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index f3f671311855..9e79d672f4f1 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -142,7 +142,7 @@ static int e1000e_phc_get_syncdevicetime(ktime_t *device, } /** - * e1000e_phc_getsynctime - Reads the current system/device cross timestamp + * e1000e_phc_getcrosststamp - Reads the current system/device cross timestamp * @ptp: ptp clock structure * @xtstamp: structure containing timestamp * diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c index c45315472245..86397c564dfc 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c @@ -105,7 +105,7 @@ static int fm10k_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) } /** - * fm10k_dcbnl_ieee_getdcbx - get the DCBX configuration for the device + * fm10k_dcbnl_getdcbx - get the DCBX configuration for the device * @dev: netdev interface for the device * * Returns that we support only IEEE DCB for this interface @@ -116,7 +116,7 @@ static u8 fm10k_dcbnl_getdcbx(struct net_device __always_unused *dev) } /** - * fm10k_dcbnl_ieee_setdcbx - get the DCBX configuration for the device + * fm10k_dcbnl_setdcbx - get the DCBX configuration for the device * @dev: netdev interface for the device * @mode: new mode for this device * diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c index 1d27b2fb23af..5c77054d67c6 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c @@ -185,7 +185,7 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) } /** - * fm10k_dbg_free_q_vector_dir - setup debugfs for the q_vectors + * fm10k_dbg_q_vector_exit - setup debugfs for the q_vectors * @q_vector: q_vector to allocate directories for **/ void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 247f44f4cb30..3362f26d7f99 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1774,7 +1774,7 @@ static void fm10k_free_q_vectors(struct fm10k_intfc *interface) } /** - * f10k_reset_msix_capability - reset MSI-X capability + * fm10k_reset_msix_capability - reset MSI-X capability * @interface: board private structure to initialize * * Reset the MSI-X capability back to its starting state @@ -1787,7 +1787,7 @@ static void fm10k_reset_msix_capability(struct fm10k_intfc *interface) } /** - * f10k_init_msix_capability - configure MSI-X capability + * fm10k_init_msix_capability - configure MSI-X capability * @interface: board private structure to initialize * * Attempt to configure the interrupts using the best available diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index 8e2e92bf3cd4..30ca9ee1900b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -692,7 +692,7 @@ static bool fm10k_mbx_tx_complete(struct fm10k_mbx_info *mbx) } /** - * fm10k_mbx_deqeueue_rx - Dequeues the message from the head in the Rx FIFO + * fm10k_mbx_dequeue_rx - Dequeues the message from the head in the Rx FIFO * @hw: pointer to hardware structure * @mbx: pointer to mailbox * @@ -1039,6 +1039,7 @@ static s32 fm10k_mbx_create_reply(struct fm10k_hw *hw, case FM10K_STATE_CLOSED: /* generate new header based on data */ fm10k_mbx_create_disconnect_hdr(mbx); + break; default: break; } @@ -2017,6 +2018,7 @@ static s32 fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, case FM10K_STATE_CONNECT: /* Update remote value to match local value */ mbx->remote = mbx->local; + break; default: break; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index c0780c3624c8..af1b0cde3670 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1417,7 +1417,7 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results, } /** - * fm10k_update_stats_hw_pf - Updates hardware related statistics of PF + * fm10k_update_hw_stats_pf - Updates hardware related statistics of PF * @hw: pointer to hardware structure * @stats: pointer to the stats structure to update * diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index ec19e18305ec..41b813fe07a5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -2332,7 +2332,7 @@ i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, } /** - * i40e_get_vsi_params - get VSI configuration info + * i40e_aq_get_vsi_params - get VSI configuration info * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL @@ -2586,7 +2586,7 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) } /** - * i40e_updatelink_status - update status of the HW network link + * i40e_update_link_info - update status of the HW network link * @hw: pointer to the hw struct **/ noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw) @@ -5059,7 +5059,7 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) } /** - * i40e_blink_phy_led + * i40e_blink_phy_link_led * @hw: pointer to the HW structure * @time: time how long led will blinks in secs * @interval: gap between LED on and off in msecs diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 243b0d2b7b72..673f341f4c0c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -234,7 +234,7 @@ static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv, } /** - * i40e_parse_ieee_etsrec_tlv + * i40e_parse_ieee_tlv * @tlv: IEEE 802.1Qaz TLV * @dcbcfg: Local store to update ETS REC data * @@ -1588,7 +1588,7 @@ void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share, } /** - * i40e_dcb_hw_rx_ets_bw_config + * i40e_dcb_hw_rx_up2tc_config * @hw: pointer to the hw struct * @prio_tc: priority to tc assignment indexed by priority * diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index 0345132a0ef5..e32c61909b31 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -392,7 +392,7 @@ static void i40e_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, } /** - * i40e_dcbnl_set_pg_tc_cfg_tx - Set CEE PG Tx BW config + * i40e_dcbnl_set_pg_bwg_cfg_tx - Set CEE PG Tx BW config * @netdev: the corresponding netdev * @pgid: the corresponding traffic class * @bw_pct: the BW percentage for the specified traffic class diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c index 5e08f100c413..e1069ae658ad 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c @@ -77,7 +77,7 @@ static bool i40e_ddp_profiles_overlap(struct i40e_profile_info *new, } /** - * i40e_ddp_does_profiles_ - checks if DDP overlaps with existing one. + * i40e_ddp_does_profile_overlap - checks if DDP overlaps with existing one. * @hw: HW data structure * @pinfo: DDP profile information structure * diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index d7c13ca9be7d..e8230da29f05 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -651,7 +651,7 @@ static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf) } /** - * i40e_dbg_dump_stats - handles dump stats write into command datum + * i40e_dbg_dump_eth_stats - handles dump stats write into command datum * @pf: the i40e_pf created in command write * @estats: the eth stats structure to be dumped **/ @@ -1638,7 +1638,7 @@ static const struct file_operations i40e_dbg_command_fops = { static char i40e_dbg_netdev_ops_buf[256] = ""; /** - * i40e_dbg_netdev_ops - read for netdev_ops datum + * i40e_dbg_netdev_ops_read - read for netdev_ops datum * @filp: the opened file * @buffer: where to write the data for the user to read * @count: the size of the user's buffer diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index c70dec65a572..c4c167650b6b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -212,7 +212,7 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[], } /** - * 40e_add_stat_strings - copy stat strings into ethtool buffer + * i40e_add_stat_strings - copy stat strings into ethtool buffer * @p: ethtool supplied buffer * @stats: stat definitions array * @@ -2368,21 +2368,15 @@ static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - char *p = (char *)data; unsigned int i; + u8 *p = data; - for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - i40e_gstrings_priv_flags[i].flag_string); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) + ethtool_sprintf(&p, i40e_gstrings_priv_flags[i].flag_string); if (pf->hw.pf_id != 0) return; - for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - i40e_gl_gstrings_priv_flags[i].flag_string); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) + ethtool_sprintf(&p, i40e_gl_gstrings_priv_flags[i].flag_string); } static void i40e_get_strings(struct net_device *netdev, u32 stringset, diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index a3da422ab05b..d6e92ecddfbd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -511,7 +511,7 @@ configure_lan_hmc_out: } /** - * i40e_delete_hmc_object - remove hmc objects + * i40e_delete_lan_hmc_object - remove hmc objects * @hw: pointer to the HW structure * @info: pointer to i40e_hmc_delete_obj_info struct * diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 353deae139f9..0f84ed0143e4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2023,7 +2023,7 @@ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi, } /** - * i40e_next_entry - Get the next non-broadcast filter from a list + * i40e_next_filter - Get the next non-broadcast filter from a list * @next: pointer to filter in list * * Returns the next non-broadcast filter in the list. Required so that we @@ -3259,6 +3259,17 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) } /** + * i40e_rx_offset - Return expected offset into page to access data + * @rx_ring: Ring we are requesting offset of + * + * Returns the offset value for ring into the data buffer. + */ +static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0; +} + +/** * i40e_configure_rx_ring - Configure a receive ring context * @ring: The Rx ring to configure * @@ -3369,6 +3380,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) else set_ring_build_skb_enabled(ring); + ring->rx_offset = i40e_rx_offset(ring); + /* cache tail for quicker writes, and clear the reg before use */ ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); writel(0, ring->tail); @@ -5191,7 +5204,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) } /** - * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes + * i40e_pf_get_tc_map - Get bitmap for enabled traffic classes * @pf: PF being queried * * Return a bitmap for enabled traffic classes for this PF. @@ -9454,7 +9467,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) } /** - * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed + * i40e_get_current_atr_cnt - Get the count of total FD ATR filters programmed * @pf: board private structure **/ u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 7164f4ad8120..fe6dca846028 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -4,7 +4,7 @@ #include "i40e_prototype.h" /** - * i40e_init_nvm_ops - Initialize NVM function pointers + * i40e_init_nvm - Initialize NVM function pointers * @hw: pointer to the HW structure * * Setup the function pointers and the NVM info structure. Should be called diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 7a879614ca55..f1f6fc3744e9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -216,7 +216,7 @@ static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp, } /** - * i40e_ptp_update_latch_events - Read I40E_PRTTSYN_STAT_1 and latch events + * i40e_ptp_get_rx_events - Read I40E_PRTTSYN_STAT_1 and latch events * @pf: the PF data structure * * This function reads I40E_PRTTSYN_STAT_1 and updates the corresponding timers diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index e2c5c6d83f25..fc20afc23bfa 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1570,17 +1570,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring) } /** - * i40e_rx_offset - Return expected offset into page to access data - * @rx_ring: Ring we are requesting offset of - * - * Returns the offset value for ring into the data buffer. - */ -static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring) -{ - return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0; -} - -/** * i40e_setup_rx_descriptors - Allocate Rx descriptors * @rx_ring: Rx descriptor ring (for a specific queue) to setup * @@ -1608,7 +1597,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; - rx_ring->rx_offset = i40e_rx_offset(rx_ring); /* XDP RX-queue info only needed for RX rings exposed to XDP */ if (rx_ring->vsi->type == I40E_VSI_MAIN) { @@ -3345,7 +3333,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, } /** - * i40e_create_tx_ctx Build the Tx context descriptor + * i40e_create_tx_ctx - Build the Tx context descriptor * @tx_ring: ring to create the descriptor on * @cd_type_cmd_tso_mss: Quad Word 1 * @cd_tunneling: Quad Word 0 - bits 0-31 diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index fc32c5019b0f..d89c22347d9d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -160,6 +160,13 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) xdp_prog = READ_ONCE(rx_ring->xdp_prog); act = bpf_prog_run_xdp(xdp_prog, xdp); + if (likely(act == XDP_REDIRECT)) { + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); + result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; + rcu_read_unlock(); + return result; + } + switch (act) { case XDP_PASS: break; @@ -167,10 +174,6 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); break; - case XDP_REDIRECT: - err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; - break; default: bpf_warn_invalid_xdp_action(act); fallthrough; @@ -625,7 +628,7 @@ void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring) } /** - * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown + * i40e_xsk_clean_tx_ring - Clean the XDP Tx ring on shutdown * @tx_ring: XDP Tx ring **/ void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring) diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile index c997063ed728..121e194ee734 100644 --- a/drivers/net/ethernet/intel/iavf/Makefile +++ b/drivers/net/ethernet/intel/iavf/Makefile @@ -11,5 +11,5 @@ subdir-ccflags-y += -I$(src) obj-$(CONFIG_IAVF) += iavf.o -iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o \ +iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \ iavf_txrx.o iavf_common.o iavf_adminq.o iavf_client.o diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 8a65525a7c0d..bda2a900df8e 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -37,6 +37,7 @@ #include "iavf_type.h" #include <linux/avf/virtchnl.h> #include "iavf_txrx.h" +#include "iavf_fdir.h" #define DEFAULT_DEBUG_LEVEL_SHIFT 3 #define PFX "iavf: " @@ -300,6 +301,8 @@ struct iavf_adapter { #define IAVF_FLAG_AQ_DISABLE_CHANNELS BIT(22) #define IAVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23) #define IAVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24) +#define IAVF_FLAG_AQ_ADD_FDIR_FILTER BIT(25) +#define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT(26) /* OS defined structs */ struct net_device *netdev; @@ -340,6 +343,8 @@ struct iavf_adapter { VIRTCHNL_VF_OFFLOAD_VLAN) #define ADV_LINK_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_CAP_ADV_LINK_SPEED) +#define FDIR_FLTR_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_FDIR_PF) struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */ struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ struct virtchnl_version_info pf_version; @@ -362,6 +367,11 @@ struct iavf_adapter { /* lock to protect access to the cloud filter list */ spinlock_t cloud_filter_list_lock; u16 num_cloud_filters; + +#define IAVF_MAX_FDIR_FILTERS 128 /* max allowed Flow Director filters */ + u16 fdir_active_fltr; + struct list_head fdir_list_head; + spinlock_t fdir_fltr_lock; /* protect the Flow Director filter list */ }; @@ -432,6 +442,8 @@ void iavf_enable_channels(struct iavf_adapter *adapter); void iavf_disable_channels(struct iavf_adapter *adapter); void iavf_add_cloud_filter(struct iavf_adapter *adapter); void iavf_del_cloud_filter(struct iavf_adapter *adapter); +void iavf_add_fdir_filter(struct iavf_adapter *adapter); +void iavf_del_fdir_filter(struct iavf_adapter *adapter); struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, const u8 *macaddr); #endif /* _IAVF_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index c93567f4d0f7..3ebfef737f5c 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -828,6 +828,623 @@ static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue, } /** + * iavf_fltr_to_ethtool_flow - convert filter type values to ethtool + * flow type values + * @flow: filter type to be converted + * + * Returns the corresponding ethtool flow type. + */ +static int iavf_fltr_to_ethtool_flow(enum iavf_fdir_flow_type flow) +{ + switch (flow) { + case IAVF_FDIR_FLOW_IPV4_TCP: + return TCP_V4_FLOW; + case IAVF_FDIR_FLOW_IPV4_UDP: + return UDP_V4_FLOW; + case IAVF_FDIR_FLOW_IPV4_SCTP: + return SCTP_V4_FLOW; + case IAVF_FDIR_FLOW_IPV4_AH: + return AH_V4_FLOW; + case IAVF_FDIR_FLOW_IPV4_ESP: + return ESP_V4_FLOW; + case IAVF_FDIR_FLOW_IPV4_OTHER: + return IPV4_USER_FLOW; + case IAVF_FDIR_FLOW_IPV6_TCP: + return TCP_V6_FLOW; + case IAVF_FDIR_FLOW_IPV6_UDP: + return UDP_V6_FLOW; + case IAVF_FDIR_FLOW_IPV6_SCTP: + return SCTP_V6_FLOW; + case IAVF_FDIR_FLOW_IPV6_AH: + return AH_V6_FLOW; + case IAVF_FDIR_FLOW_IPV6_ESP: + return ESP_V6_FLOW; + case IAVF_FDIR_FLOW_IPV6_OTHER: + return IPV6_USER_FLOW; + case IAVF_FDIR_FLOW_NON_IP_L2: + return ETHER_FLOW; + default: + /* 0 is undefined ethtool flow */ + return 0; + } +} + +/** + * iavf_ethtool_flow_to_fltr - convert ethtool flow type to filter enum + * @eth: Ethtool flow type to be converted + * + * Returns flow enum + */ +static enum iavf_fdir_flow_type iavf_ethtool_flow_to_fltr(int eth) +{ + switch (eth) { + case TCP_V4_FLOW: + return IAVF_FDIR_FLOW_IPV4_TCP; + case UDP_V4_FLOW: + return IAVF_FDIR_FLOW_IPV4_UDP; + case SCTP_V4_FLOW: + return IAVF_FDIR_FLOW_IPV4_SCTP; + case AH_V4_FLOW: + return IAVF_FDIR_FLOW_IPV4_AH; + case ESP_V4_FLOW: + return IAVF_FDIR_FLOW_IPV4_ESP; + case IPV4_USER_FLOW: + return IAVF_FDIR_FLOW_IPV4_OTHER; + case TCP_V6_FLOW: + return IAVF_FDIR_FLOW_IPV6_TCP; + case UDP_V6_FLOW: + return IAVF_FDIR_FLOW_IPV6_UDP; + case SCTP_V6_FLOW: + return IAVF_FDIR_FLOW_IPV6_SCTP; + case AH_V6_FLOW: + return IAVF_FDIR_FLOW_IPV6_AH; + case ESP_V6_FLOW: + return IAVF_FDIR_FLOW_IPV6_ESP; + case IPV6_USER_FLOW: + return IAVF_FDIR_FLOW_IPV6_OTHER; + case ETHER_FLOW: + return IAVF_FDIR_FLOW_NON_IP_L2; + default: + return IAVF_FDIR_FLOW_NONE; + } +} + +/** + * iavf_is_mask_valid - check mask field set + * @mask: full mask to check + * @field: field for which mask should be valid + * + * If the mask is fully set return true. If it is not valid for field return + * false. + */ +static bool iavf_is_mask_valid(u64 mask, u64 field) +{ + return (mask & field) == field; +} + +/** + * iavf_parse_rx_flow_user_data - deconstruct user-defined data + * @fsp: pointer to ethtool Rx flow specification + * @fltr: pointer to Flow Director filter for userdef data storage + * + * Returns 0 on success, negative error value on failure + */ +static int +iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, + struct iavf_fdir_fltr *fltr) +{ + struct iavf_flex_word *flex; + int i, cnt = 0; + + if (!(fsp->flow_type & FLOW_EXT)) + return 0; + + for (i = 0; i < 2; i++) { +#define IAVF_USERDEF_FLEX_WORD_M GENMASK(15, 0) +#define IAVF_USERDEF_FLEX_OFFS_S 16 +#define IAVF_USERDEF_FLEX_OFFS_M GENMASK(31, IAVF_USERDEF_FLEX_OFFS_S) +#define IAVF_USERDEF_FLEX_FLTR_M GENMASK(31, 0) + u32 value = be32_to_cpu(fsp->h_ext.data[i]); + u32 mask = be32_to_cpu(fsp->m_ext.data[i]); + + if (!value || !mask) + continue; + + if (!iavf_is_mask_valid(mask, IAVF_USERDEF_FLEX_FLTR_M)) + return -EINVAL; + + /* 504 is the maximum value for offsets, and offset is measured + * from the start of the MAC address. + */ +#define IAVF_USERDEF_FLEX_MAX_OFFS_VAL 504 + flex = &fltr->flex_words[cnt++]; + flex->word = value & IAVF_USERDEF_FLEX_WORD_M; + flex->offset = (value & IAVF_USERDEF_FLEX_OFFS_M) >> + IAVF_USERDEF_FLEX_OFFS_S; + if (flex->offset > IAVF_USERDEF_FLEX_MAX_OFFS_VAL) + return -EINVAL; + } + + fltr->flex_cnt = cnt; + + return 0; +} + +/** + * iavf_fill_rx_flow_ext_data - fill the additional data + * @fsp: pointer to ethtool Rx flow specification + * @fltr: pointer to Flow Director filter to get additional data + */ +static void +iavf_fill_rx_flow_ext_data(struct ethtool_rx_flow_spec *fsp, + struct iavf_fdir_fltr *fltr) +{ + if (!fltr->ext_mask.usr_def[0] && !fltr->ext_mask.usr_def[1]) + return; + + fsp->flow_type |= FLOW_EXT; + + memcpy(fsp->h_ext.data, fltr->ext_data.usr_def, sizeof(fsp->h_ext.data)); + memcpy(fsp->m_ext.data, fltr->ext_mask.usr_def, sizeof(fsp->m_ext.data)); +} + +/** + * iavf_get_ethtool_fdir_entry - fill ethtool structure with Flow Director filter data + * @adapter: the VF adapter structure that contains filter list + * @cmd: ethtool command data structure to receive the filter data + * + * Returns 0 as expected for success by ethtool + */ +static int +iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + struct iavf_fdir_fltr *rule = NULL; + int ret = 0; + + if (!FDIR_FLTR_SUPPORT(adapter)) + return -EOPNOTSUPP; + + spin_lock_bh(&adapter->fdir_fltr_lock); + + rule = iavf_find_fdir_fltr_by_loc(adapter, fsp->location); + if (!rule) { + ret = -EINVAL; + goto release_lock; + } + + fsp->flow_type = iavf_fltr_to_ethtool_flow(rule->flow_type); + + memset(&fsp->m_u, 0, sizeof(fsp->m_u)); + memset(&fsp->m_ext, 0, sizeof(fsp->m_ext)); + + switch (fsp->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + fsp->h_u.tcp_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip; + fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip; + fsp->h_u.tcp_ip4_spec.psrc = rule->ip_data.src_port; + fsp->h_u.tcp_ip4_spec.pdst = rule->ip_data.dst_port; + fsp->h_u.tcp_ip4_spec.tos = rule->ip_data.tos; + fsp->m_u.tcp_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip; + fsp->m_u.tcp_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip; + fsp->m_u.tcp_ip4_spec.psrc = rule->ip_mask.src_port; + fsp->m_u.tcp_ip4_spec.pdst = rule->ip_mask.dst_port; + fsp->m_u.tcp_ip4_spec.tos = rule->ip_mask.tos; + break; + case AH_V4_FLOW: + case ESP_V4_FLOW: + fsp->h_u.ah_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip; + fsp->h_u.ah_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip; + fsp->h_u.ah_ip4_spec.spi = rule->ip_data.spi; + fsp->h_u.ah_ip4_spec.tos = rule->ip_data.tos; + fsp->m_u.ah_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip; + fsp->m_u.ah_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip; + fsp->m_u.ah_ip4_spec.spi = rule->ip_mask.spi; + fsp->m_u.ah_ip4_spec.tos = rule->ip_mask.tos; + break; + case IPV4_USER_FLOW: + fsp->h_u.usr_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip; + fsp->h_u.usr_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip; + fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip_data.l4_header; + fsp->h_u.usr_ip4_spec.tos = rule->ip_data.tos; + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + fsp->h_u.usr_ip4_spec.proto = rule->ip_data.proto; + fsp->m_u.usr_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip; + fsp->m_u.usr_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip; + fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->ip_mask.l4_header; + fsp->m_u.usr_ip4_spec.tos = rule->ip_mask.tos; + fsp->m_u.usr_ip4_spec.ip_ver = 0xFF; + fsp->m_u.usr_ip4_spec.proto = rule->ip_mask.proto; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->h_u.tcp_ip6_spec.psrc = rule->ip_data.src_port; + fsp->h_u.tcp_ip6_spec.pdst = rule->ip_data.dst_port; + fsp->h_u.tcp_ip6_spec.tclass = rule->ip_data.tclass; + memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->m_u.tcp_ip6_spec.psrc = rule->ip_mask.src_port; + fsp->m_u.tcp_ip6_spec.pdst = rule->ip_mask.dst_port; + fsp->m_u.tcp_ip6_spec.tclass = rule->ip_mask.tclass; + break; + case AH_V6_FLOW: + case ESP_V6_FLOW: + memcpy(fsp->h_u.ah_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->h_u.ah_ip6_spec.spi = rule->ip_data.spi; + fsp->h_u.ah_ip6_spec.tclass = rule->ip_data.tclass; + memcpy(fsp->m_u.ah_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->m_u.ah_ip6_spec.spi = rule->ip_mask.spi; + fsp->m_u.ah_ip6_spec.tclass = rule->ip_mask.tclass; + break; + case IPV6_USER_FLOW: + memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip_data.l4_header; + fsp->h_u.usr_ip6_spec.tclass = rule->ip_data.tclass; + fsp->h_u.usr_ip6_spec.l4_proto = rule->ip_data.proto; + memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->ip_mask.l4_header; + fsp->m_u.usr_ip6_spec.tclass = rule->ip_mask.tclass; + fsp->m_u.usr_ip6_spec.l4_proto = rule->ip_mask.proto; + break; + case ETHER_FLOW: + fsp->h_u.ether_spec.h_proto = rule->eth_data.etype; + fsp->m_u.ether_spec.h_proto = rule->eth_mask.etype; + break; + default: + ret = -EINVAL; + break; + } + + iavf_fill_rx_flow_ext_data(fsp, rule); + + if (rule->action == VIRTCHNL_ACTION_DROP) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else + fsp->ring_cookie = rule->q_index; + +release_lock: + spin_unlock_bh(&adapter->fdir_fltr_lock); + return ret; +} + +/** + * iavf_get_fdir_fltr_ids - fill buffer with filter IDs of active filters + * @adapter: the VF adapter structure containing the filter list + * @cmd: ethtool command data structure + * @rule_locs: ethtool array passed in from OS to receive filter IDs + * + * Returns 0 as expected for success by ethtool + */ +static int +iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct iavf_fdir_fltr *fltr; + unsigned int cnt = 0; + int val = 0; + + if (!FDIR_FLTR_SUPPORT(adapter)) + return -EOPNOTSUPP; + + cmd->data = IAVF_MAX_FDIR_FILTERS; + + spin_lock_bh(&adapter->fdir_fltr_lock); + + list_for_each_entry(fltr, &adapter->fdir_list_head, list) { + if (cnt == cmd->rule_cnt) { + val = -EMSGSIZE; + goto release_lock; + } + rule_locs[cnt] = fltr->loc; + cnt++; + } + +release_lock: + spin_unlock_bh(&adapter->fdir_fltr_lock); + if (!val) + cmd->rule_cnt = cnt; + + return val; +} + +/** + * iavf_add_fdir_fltr_info - Set the input set for Flow Director filter + * @adapter: pointer to the VF adapter structure + * @fsp: pointer to ethtool Rx flow specification + * @fltr: filter structure + */ +static int +iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spec *fsp, + struct iavf_fdir_fltr *fltr) +{ + u32 flow_type, q_index = 0; + enum virtchnl_action act; + int err; + + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + act = VIRTCHNL_ACTION_DROP; + } else { + q_index = fsp->ring_cookie; + if (q_index >= adapter->num_active_queues) + return -EINVAL; + + act = VIRTCHNL_ACTION_QUEUE; + } + + fltr->action = act; + fltr->loc = fsp->location; + fltr->q_index = q_index; + + if (fsp->flow_type & FLOW_EXT) { + memcpy(fltr->ext_data.usr_def, fsp->h_ext.data, + sizeof(fltr->ext_data.usr_def)); + memcpy(fltr->ext_mask.usr_def, fsp->m_ext.data, + sizeof(fltr->ext_mask.usr_def)); + } + + flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); + fltr->flow_type = iavf_ethtool_flow_to_fltr(flow_type); + + switch (flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + fltr->ip_data.v4_addrs.src_ip = fsp->h_u.tcp_ip4_spec.ip4src; + fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst; + fltr->ip_data.src_port = fsp->h_u.tcp_ip4_spec.psrc; + fltr->ip_data.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + fltr->ip_data.tos = fsp->h_u.tcp_ip4_spec.tos; + fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.tcp_ip4_spec.ip4src; + fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst; + fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc; + fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst; + fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos; + break; + case AH_V4_FLOW: + case ESP_V4_FLOW: + fltr->ip_data.v4_addrs.src_ip = fsp->h_u.ah_ip4_spec.ip4src; + fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.ah_ip4_spec.ip4dst; + fltr->ip_data.spi = fsp->h_u.ah_ip4_spec.spi; + fltr->ip_data.tos = fsp->h_u.ah_ip4_spec.tos; + fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.ah_ip4_spec.ip4src; + fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst; + fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi; + fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos; + break; + case IPV4_USER_FLOW: + fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src; + fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst; + fltr->ip_data.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes; + fltr->ip_data.tos = fsp->h_u.usr_ip4_spec.tos; + fltr->ip_data.proto = fsp->h_u.usr_ip4_spec.proto; + fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.usr_ip4_spec.ip4src; + fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst; + fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes; + fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos; + fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_data.src_port = fsp->h_u.tcp_ip6_spec.psrc; + fltr->ip_data.dst_port = fsp->h_u.tcp_ip6_spec.pdst; + fltr->ip_data.tclass = fsp->h_u.tcp_ip6_spec.tclass; + memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc; + fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst; + fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass; + break; + case AH_V6_FLOW: + case ESP_V6_FLOW: + memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.ah_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.ah_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_data.spi = fsp->h_u.ah_ip6_spec.spi; + fltr->ip_data.tclass = fsp->h_u.ah_ip6_spec.tclass; + memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.ah_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.ah_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi; + fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass; + break; + case IPV6_USER_FLOW: + memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_data.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes; + fltr->ip_data.tclass = fsp->h_u.usr_ip6_spec.tclass; + fltr->ip_data.proto = fsp->h_u.usr_ip6_spec.l4_proto; + memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes; + fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass; + fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto; + break; + case ETHER_FLOW: + fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto; + fltr->eth_mask.etype = fsp->m_u.ether_spec.h_proto; + break; + default: + /* not doing un-parsed flow types */ + return -EINVAL; + } + + if (iavf_fdir_is_dup_fltr(adapter, fltr)) + return -EEXIST; + + err = iavf_parse_rx_flow_user_data(fsp, fltr); + if (err) + return err; + + return iavf_fill_fdir_add_msg(adapter, fltr); +} + +/** + * iavf_add_fdir_ethtool - add Flow Director filter + * @adapter: pointer to the VF adapter structure + * @cmd: command to add Flow Director filter + * + * Returns 0 on success and negative values for failure + */ +static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct iavf_fdir_fltr *fltr; + int count = 50; + int err; + + if (!FDIR_FLTR_SUPPORT(adapter)) + return -EOPNOTSUPP; + + if (fsp->flow_type & FLOW_MAC_EXT) + return -EINVAL; + + if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) { + dev_err(&adapter->pdev->dev, + "Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n", + IAVF_MAX_FDIR_FILTERS); + return -ENOSPC; + } + + spin_lock_bh(&adapter->fdir_fltr_lock); + if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) { + dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n"); + spin_unlock_bh(&adapter->fdir_fltr_lock); + return -EEXIST; + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + fltr = kzalloc(sizeof(*fltr), GFP_KERNEL); + if (!fltr) + return -ENOMEM; + + while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, + &adapter->crit_section)) { + if (--count == 0) { + kfree(fltr); + return -EINVAL; + } + udelay(1); + } + + err = iavf_add_fdir_fltr_info(adapter, fsp, fltr); + if (err) + goto ret; + + spin_lock_bh(&adapter->fdir_fltr_lock); + iavf_fdir_list_add_fltr(adapter, fltr); + adapter->fdir_active_fltr++; + fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; + spin_unlock_bh(&adapter->fdir_fltr_lock); + + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + +ret: + if (err && fltr) + kfree(fltr); + + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + return err; +} + +/** + * iavf_del_fdir_ethtool - delete Flow Director filter + * @adapter: pointer to the VF adapter structure + * @cmd: command to delete Flow Director filter + * + * Returns 0 on success and negative values for failure + */ +static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + struct iavf_fdir_fltr *fltr = NULL; + int err = 0; + + if (!FDIR_FLTR_SUPPORT(adapter)) + return -EOPNOTSUPP; + + spin_lock_bh(&adapter->fdir_fltr_lock); + fltr = iavf_find_fdir_fltr_by_loc(adapter, fsp->location); + if (fltr) { + if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) { + fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; + } else { + err = -EBUSY; + } + } else if (adapter->fdir_active_fltr) { + err = -EINVAL; + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST) + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + + return err; +} + +/** + * iavf_set_rxnfc - command to set Rx flow rules. + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * + * Returns 0 for success and negative values for errors + */ +static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = iavf_add_fdir_ethtool(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = iavf_del_fdir_ethtool(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +/** * iavf_get_rxnfc - command to get RX flow classification rules * @netdev: network interface device structure * @cmd: ethtool rxnfc command @@ -846,6 +1463,19 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, cmd->data = adapter->num_active_queues; ret = 0; break; + case ETHTOOL_GRXCLSRLCNT: + if (!FDIR_FLTR_SUPPORT(adapter)) + break; + cmd->rule_cnt = adapter->fdir_active_fltr; + cmd->data = IAVF_MAX_FDIR_FILTERS; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = iavf_get_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs); + break; case ETHTOOL_GRXFH: netdev_info(netdev, "RSS hash info is not available to vf, use pf.\n"); @@ -1025,6 +1655,7 @@ static const struct ethtool_ops iavf_ethtool_ops = { .set_coalesce = iavf_set_coalesce, .get_per_queue_coalesce = iavf_get_per_queue_coalesce, .set_per_queue_coalesce = iavf_set_per_queue_coalesce, + .set_rxnfc = iavf_set_rxnfc, .get_rxnfc = iavf_get_rxnfc, .get_rxfh_indir_size = iavf_get_rxfh_indir_size, .get_rxfh = iavf_get_rxfh, diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c new file mode 100644 index 000000000000..3e687189d737 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c @@ -0,0 +1,773 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Intel Corporation. */ + +/* flow director ethtool support for iavf */ + +#include "iavf.h" + +#define GTPU_PORT 2152 +#define NAT_T_ESP_PORT 4500 +#define PFCP_PORT 8805 + +static const struct in6_addr ipv6_addr_full_mask = { + .in6_u = { + .u6_addr8 = { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } + } +}; + +/** + * iavf_pkt_udp_no_pay_len - the length of UDP packet without payload + * @fltr: Flow Director filter data structure + */ +static u16 iavf_pkt_udp_no_pay_len(struct iavf_fdir_fltr *fltr) +{ + return sizeof(struct ethhdr) + + (fltr->ip_ver == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) + + sizeof(struct udphdr); +} + +/** + * iavf_fill_fdir_gtpu_hdr - fill the GTP-U protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the GTP-U protocol header is set successfully + */ +static int +iavf_fill_fdir_gtpu_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1]; + struct virtchnl_proto_hdr *ghdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct virtchnl_proto_hdr *ehdr = NULL; /* Extension Header if it exists */ + u16 adj_offs, hdr_offs; + int i; + + VIRTCHNL_SET_PROTO_HDR_TYPE(ghdr, GTPU_IP); + + adj_offs = iavf_pkt_udp_no_pay_len(fltr); + + for (i = 0; i < fltr->flex_cnt; i++) { +#define IAVF_GTPU_HDR_TEID_OFFS0 4 +#define IAVF_GTPU_HDR_TEID_OFFS1 6 +#define IAVF_GTPU_HDR_N_PDU_AND_NEXT_EXTHDR_OFFS 10 +#define IAVF_GTPU_HDR_PSC_PDU_TYPE_AND_QFI_OFFS 13 +#define IAVF_GTPU_PSC_EXTHDR_TYPE 0x85 /* PDU Session Container Extension Header */ + if (fltr->flex_words[i].offset < adj_offs) + return -EINVAL; + + hdr_offs = fltr->flex_words[i].offset - adj_offs; + + switch (hdr_offs) { + case IAVF_GTPU_HDR_TEID_OFFS0: + case IAVF_GTPU_HDR_TEID_OFFS1: { + __be16 *pay_word = (__be16 *)ghdr->buffer; + + pay_word[hdr_offs >> 1] = htons(fltr->flex_words[i].word); + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(ghdr, GTPU_IP, TEID); + } + break; + case IAVF_GTPU_HDR_N_PDU_AND_NEXT_EXTHDR_OFFS: + if ((fltr->flex_words[i].word & 0xff) != IAVF_GTPU_PSC_EXTHDR_TYPE) + return -EOPNOTSUPP; + if (!ehdr) + ehdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + VIRTCHNL_SET_PROTO_HDR_TYPE(ehdr, GTPU_EH); + break; + case IAVF_GTPU_HDR_PSC_PDU_TYPE_AND_QFI_OFFS: + if (!ehdr) + return -EINVAL; + ehdr->buffer[1] = fltr->flex_words[i].word & 0x3F; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(ehdr, GTPU_EH, QFI); + break; + default: + return -EINVAL; + } + } + + uhdr->field_selector = 0; /* The PF ignores the UDP header fields */ + + return 0; +} + +/** + * iavf_fill_fdir_pfcp_hdr - fill the PFCP protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the PFCP protocol header is set successfully + */ +static int +iavf_fill_fdir_pfcp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1]; + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + u16 adj_offs, hdr_offs; + int i; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP); + + adj_offs = iavf_pkt_udp_no_pay_len(fltr); + + for (i = 0; i < fltr->flex_cnt; i++) { +#define IAVF_PFCP_HDR_SFIELD_AND_MSG_TYPE_OFFS 0 + if (fltr->flex_words[i].offset < adj_offs) + return -EINVAL; + + hdr_offs = fltr->flex_words[i].offset - adj_offs; + + switch (hdr_offs) { + case IAVF_PFCP_HDR_SFIELD_AND_MSG_TYPE_OFFS: + hdr->buffer[0] = (fltr->flex_words[i].word >> 8) & 0xff; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD); + break; + default: + return -EINVAL; + } + } + + uhdr->field_selector = 0; /* The PF ignores the UDP header fields */ + + return 0; +} + +/** + * iavf_fill_fdir_nat_t_esp_hdr - fill the NAT-T-ESP protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the NAT-T-ESP protocol header is set successfully + */ +static int +iavf_fill_fdir_nat_t_esp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1]; + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + u16 adj_offs, hdr_offs; + u32 spi = 0; + int i; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP); + + adj_offs = iavf_pkt_udp_no_pay_len(fltr); + + for (i = 0; i < fltr->flex_cnt; i++) { +#define IAVF_NAT_T_ESP_SPI_OFFS0 0 +#define IAVF_NAT_T_ESP_SPI_OFFS1 2 + if (fltr->flex_words[i].offset < adj_offs) + return -EINVAL; + + hdr_offs = fltr->flex_words[i].offset - adj_offs; + + switch (hdr_offs) { + case IAVF_NAT_T_ESP_SPI_OFFS0: + spi |= fltr->flex_words[i].word << 16; + break; + case IAVF_NAT_T_ESP_SPI_OFFS1: + spi |= fltr->flex_words[i].word; + break; + default: + return -EINVAL; + } + } + + if (!spi) + return -EOPNOTSUPP; /* Not support IKE Header Format with SPI 0 */ + + *(__be32 *)hdr->buffer = htonl(spi); + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI); + + uhdr->field_selector = 0; /* The PF ignores the UDP header fields */ + + return 0; +} + +/** + * iavf_fill_fdir_udp_flex_pay_hdr - fill the UDP payload header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the UDP payload defined protocol header is set successfully + */ +static int +iavf_fill_fdir_udp_flex_pay_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + int err; + + switch (ntohs(fltr->ip_data.dst_port)) { + case GTPU_PORT: + err = iavf_fill_fdir_gtpu_hdr(fltr, proto_hdrs); + break; + case NAT_T_ESP_PORT: + err = iavf_fill_fdir_nat_t_esp_hdr(fltr, proto_hdrs); + break; + case PFCP_PORT: + err = iavf_fill_fdir_pfcp_hdr(fltr, proto_hdrs); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +/** + * iavf_fill_fdir_ip4_hdr - fill the IPv4 protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the IPv4 protocol header is set successfully + */ +static int +iavf_fill_fdir_ip4_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct iphdr *iph = (struct iphdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4); + + if (fltr->ip_mask.tos == U8_MAX) { + iph->tos = fltr->ip_data.tos; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP); + } + + if (fltr->ip_mask.proto == U8_MAX) { + iph->protocol = fltr->ip_data.proto; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT); + } + + if (fltr->ip_mask.v4_addrs.src_ip == htonl(U32_MAX)) { + iph->saddr = fltr->ip_data.v4_addrs.src_ip; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC); + } + + if (fltr->ip_mask.v4_addrs.dst_ip == htonl(U32_MAX)) { + iph->daddr = fltr->ip_data.v4_addrs.dst_ip; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST); + } + + fltr->ip_ver = 4; + + return 0; +} + +/** + * iavf_fill_fdir_ip6_hdr - fill the IPv6 protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the IPv6 protocol header is set successfully + */ +static int +iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct ipv6hdr *iph = (struct ipv6hdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6); + + if (fltr->ip_mask.tclass == U8_MAX) { + iph->priority = (fltr->ip_data.tclass >> 4) & 0xF; + iph->flow_lbl[0] = (fltr->ip_data.tclass << 4) & 0xF0; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC); + } + + if (fltr->ip_mask.proto == U8_MAX) { + iph->nexthdr = fltr->ip_data.proto; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT); + } + + if (!memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask, + sizeof(struct in6_addr))) { + memcpy(&iph->saddr, &fltr->ip_data.v6_addrs.src_ip, + sizeof(struct in6_addr)); + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC); + } + + if (!memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask, + sizeof(struct in6_addr))) { + memcpy(&iph->daddr, &fltr->ip_data.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST); + } + + fltr->ip_ver = 6; + + return 0; +} + +/** + * iavf_fill_fdir_tcp_hdr - fill the TCP protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the TCP protocol header is set successfully + */ +static int +iavf_fill_fdir_tcp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct tcphdr *tcph = (struct tcphdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP); + + if (fltr->ip_mask.src_port == htons(U16_MAX)) { + tcph->source = fltr->ip_data.src_port; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT); + } + + if (fltr->ip_mask.dst_port == htons(U16_MAX)) { + tcph->dest = fltr->ip_data.dst_port; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT); + } + + return 0; +} + +/** + * iavf_fill_fdir_udp_hdr - fill the UDP protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the UDP protocol header is set successfully + */ +static int +iavf_fill_fdir_udp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct udphdr *udph = (struct udphdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP); + + if (fltr->ip_mask.src_port == htons(U16_MAX)) { + udph->source = fltr->ip_data.src_port; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT); + } + + if (fltr->ip_mask.dst_port == htons(U16_MAX)) { + udph->dest = fltr->ip_data.dst_port; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT); + } + + if (!fltr->flex_cnt) + return 0; + + return iavf_fill_fdir_udp_flex_pay_hdr(fltr, proto_hdrs); +} + +/** + * iavf_fill_fdir_sctp_hdr - fill the SCTP protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the SCTP protocol header is set successfully + */ +static int +iavf_fill_fdir_sctp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct sctphdr *sctph = (struct sctphdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP); + + if (fltr->ip_mask.src_port == htons(U16_MAX)) { + sctph->source = fltr->ip_data.src_port; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT); + } + + if (fltr->ip_mask.dst_port == htons(U16_MAX)) { + sctph->dest = fltr->ip_data.dst_port; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT); + } + + return 0; +} + +/** + * iavf_fill_fdir_ah_hdr - fill the AH protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the AH protocol header is set successfully + */ +static int +iavf_fill_fdir_ah_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct ip_auth_hdr *ah = (struct ip_auth_hdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH); + + if (fltr->ip_mask.spi == htonl(U32_MAX)) { + ah->spi = fltr->ip_data.spi; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI); + } + + return 0; +} + +/** + * iavf_fill_fdir_esp_hdr - fill the ESP protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the ESP protocol header is set successfully + */ +static int +iavf_fill_fdir_esp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct ip_esp_hdr *esph = (struct ip_esp_hdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP); + + if (fltr->ip_mask.spi == htonl(U32_MAX)) { + esph->spi = fltr->ip_data.spi; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI); + } + + return 0; +} + +/** + * iavf_fill_fdir_l4_hdr - fill the L4 protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the L4 protocol header is set successfully + */ +static int +iavf_fill_fdir_l4_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr; + __be32 *l4_4_data; + + if (!fltr->ip_mask.proto) /* IPv4/IPv6 header only */ + return 0; + + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + l4_4_data = (__be32 *)hdr->buffer; + + /* L2TPv3 over IP with 'Session ID' */ + if (fltr->ip_data.proto == 115 && fltr->ip_mask.l4_header == htonl(U32_MAX)) { + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3); + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID); + + *l4_4_data = fltr->ip_data.l4_header; + } else { + return -EOPNOTSUPP; + } + + return 0; +} + +/** + * iavf_fill_fdir_eth_hdr - fill the Ethernet protocol header + * @fltr: Flow Director filter data structure + * @proto_hdrs: Flow Director protocol headers data structure + * + * Returns 0 if the Ethernet protocol header is set successfully + */ +static int +iavf_fill_fdir_eth_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct ethhdr *ehdr = (struct ethhdr *)hdr->buffer; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH); + + if (fltr->eth_mask.etype == htons(U16_MAX)) { + if (fltr->eth_data.etype == htons(ETH_P_IP) || + fltr->eth_data.etype == htons(ETH_P_IPV6)) + return -EOPNOTSUPP; + + ehdr->h_proto = fltr->eth_data.etype; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE); + } + + return 0; +} + +/** + * iavf_fill_fdir_add_msg - fill the Flow Director filter into virtchnl message + * @adapter: pointer to the VF adapter structure + * @fltr: Flow Director filter data structure + * + * Returns 0 if the add Flow Director virtchnl message is filled successfully + */ +int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) +{ + struct virtchnl_fdir_add *vc_msg = &fltr->vc_add_msg; + struct virtchnl_proto_hdrs *proto_hdrs; + int err; + + proto_hdrs = &vc_msg->rule_cfg.proto_hdrs; + + err = iavf_fill_fdir_eth_hdr(fltr, proto_hdrs); /* L2 always exists */ + if (err) + return err; + + switch (fltr->flow_type) { + case IAVF_FDIR_FLOW_IPV4_TCP: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV4_UDP: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_udp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV4_SCTP: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV4_AH: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_ah_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV4_ESP: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_esp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV4_OTHER: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_l4_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV6_TCP: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV6_UDP: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_udp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV6_SCTP: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV6_AH: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_ah_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV6_ESP: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_esp_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_IPV6_OTHER: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_l4_hdr(fltr, proto_hdrs); + break; + case IAVF_FDIR_FLOW_NON_IP_L2: + break; + default: + err = -EINVAL; + break; + } + + if (err) + return err; + + vc_msg->vsi_id = adapter->vsi.id; + vc_msg->rule_cfg.action_set.count = 1; + vc_msg->rule_cfg.action_set.actions[0].type = fltr->action; + vc_msg->rule_cfg.action_set.actions[0].act_conf.queue.index = fltr->q_index; + + return 0; +} + +/** + * iavf_fdir_flow_proto_name - get the flow protocol name + * @flow_type: Flow Director filter flow type + **/ +static const char *iavf_fdir_flow_proto_name(enum iavf_fdir_flow_type flow_type) +{ + switch (flow_type) { + case IAVF_FDIR_FLOW_IPV4_TCP: + case IAVF_FDIR_FLOW_IPV6_TCP: + return "TCP"; + case IAVF_FDIR_FLOW_IPV4_UDP: + case IAVF_FDIR_FLOW_IPV6_UDP: + return "UDP"; + case IAVF_FDIR_FLOW_IPV4_SCTP: + case IAVF_FDIR_FLOW_IPV6_SCTP: + return "SCTP"; + case IAVF_FDIR_FLOW_IPV4_AH: + case IAVF_FDIR_FLOW_IPV6_AH: + return "AH"; + case IAVF_FDIR_FLOW_IPV4_ESP: + case IAVF_FDIR_FLOW_IPV6_ESP: + return "ESP"; + case IAVF_FDIR_FLOW_IPV4_OTHER: + case IAVF_FDIR_FLOW_IPV6_OTHER: + return "Other"; + case IAVF_FDIR_FLOW_NON_IP_L2: + return "Ethernet"; + default: + return NULL; + } +} + +/** + * iavf_print_fdir_fltr + * @adapter: adapter structure + * @fltr: Flow Director filter to print + * + * Print the Flow Director filter + **/ +void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) +{ + const char *proto = iavf_fdir_flow_proto_name(fltr->flow_type); + + if (!proto) + return; + + switch (fltr->flow_type) { + case IAVF_FDIR_FLOW_IPV4_TCP: + case IAVF_FDIR_FLOW_IPV4_UDP: + case IAVF_FDIR_FLOW_IPV4_SCTP: + dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: dst_port %hu src_port %hu\n", + fltr->loc, + &fltr->ip_data.v4_addrs.dst_ip, + &fltr->ip_data.v4_addrs.src_ip, + proto, + ntohs(fltr->ip_data.dst_port), + ntohs(fltr->ip_data.src_port)); + break; + case IAVF_FDIR_FLOW_IPV4_AH: + case IAVF_FDIR_FLOW_IPV4_ESP: + dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: SPI %u\n", + fltr->loc, + &fltr->ip_data.v4_addrs.dst_ip, + &fltr->ip_data.v4_addrs.src_ip, + proto, + ntohl(fltr->ip_data.spi)); + break; + case IAVF_FDIR_FLOW_IPV4_OTHER: + dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 proto: %u L4_bytes: 0x%x\n", + fltr->loc, + &fltr->ip_data.v4_addrs.dst_ip, + &fltr->ip_data.v4_addrs.src_ip, + fltr->ip_data.proto, + ntohl(fltr->ip_data.l4_header)); + break; + case IAVF_FDIR_FLOW_IPV6_TCP: + case IAVF_FDIR_FLOW_IPV6_UDP: + case IAVF_FDIR_FLOW_IPV6_SCTP: + dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: dst_port %hu src_port %hu\n", + fltr->loc, + &fltr->ip_data.v6_addrs.dst_ip, + &fltr->ip_data.v6_addrs.src_ip, + proto, + ntohs(fltr->ip_data.dst_port), + ntohs(fltr->ip_data.src_port)); + break; + case IAVF_FDIR_FLOW_IPV6_AH: + case IAVF_FDIR_FLOW_IPV6_ESP: + dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: SPI %u\n", + fltr->loc, + &fltr->ip_data.v6_addrs.dst_ip, + &fltr->ip_data.v6_addrs.src_ip, + proto, + ntohl(fltr->ip_data.spi)); + break; + case IAVF_FDIR_FLOW_IPV6_OTHER: + dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 proto: %u L4_bytes: 0x%x\n", + fltr->loc, + &fltr->ip_data.v6_addrs.dst_ip, + &fltr->ip_data.v6_addrs.src_ip, + fltr->ip_data.proto, + ntohl(fltr->ip_data.l4_header)); + break; + case IAVF_FDIR_FLOW_NON_IP_L2: + dev_info(&adapter->pdev->dev, "Rule ID: %u eth_type: 0x%x\n", + fltr->loc, + ntohs(fltr->eth_data.etype)); + break; + default: + break; + } +} + +/** + * iavf_fdir_is_dup_fltr - test if filter is already in list + * @adapter: pointer to the VF adapter structure + * @fltr: Flow Director filter data structure + * + * Returns true if the filter is found in the list + */ +bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) +{ + struct iavf_fdir_fltr *tmp; + bool ret = false; + + list_for_each_entry(tmp, &adapter->fdir_list_head, list) { + if (tmp->flow_type != fltr->flow_type) + continue; + + if (!memcmp(&tmp->eth_data, &fltr->eth_data, + sizeof(fltr->eth_data)) && + !memcmp(&tmp->ip_data, &fltr->ip_data, + sizeof(fltr->ip_data)) && + !memcmp(&tmp->ext_data, &fltr->ext_data, + sizeof(fltr->ext_data))) { + ret = true; + break; + } + } + + return ret; +} + +/** + * iavf_find_fdir_fltr_by_loc - find filter with location + * @adapter: pointer to the VF adapter structure + * @loc: location to find. + * + * Returns pointer to Flow Director filter if found or null + */ +struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc) +{ + struct iavf_fdir_fltr *rule; + + list_for_each_entry(rule, &adapter->fdir_list_head, list) + if (rule->loc == loc) + return rule; + + return NULL; +} + +/** + * iavf_fdir_list_add_fltr - add a new node to the flow director filter list + * @adapter: pointer to the VF adapter structure + * @fltr: filter node to add to structure + */ +void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) +{ + struct iavf_fdir_fltr *rule, *parent = NULL; + + list_for_each_entry(rule, &adapter->fdir_list_head, list) { + if (rule->loc >= fltr->loc) + break; + parent = rule; + } + + if (parent) + list_add(&fltr->list, &parent->list); + else + list_add(&fltr->list, &adapter->fdir_list_head); +} diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h new file mode 100644 index 000000000000..2439c970b657 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021, Intel Corporation. */ + +#ifndef _IAVF_FDIR_H_ +#define _IAVF_FDIR_H_ + +struct iavf_adapter; + +/* State of Flow Director filter */ +enum iavf_fdir_fltr_state_t { + IAVF_FDIR_FLTR_ADD_REQUEST, /* User requests to add filter */ + IAVF_FDIR_FLTR_ADD_PENDING, /* Filter pending add by the PF */ + IAVF_FDIR_FLTR_DEL_REQUEST, /* User requests to delete filter */ + IAVF_FDIR_FLTR_DEL_PENDING, /* Filter pending delete by the PF */ + IAVF_FDIR_FLTR_ACTIVE, /* Filter is active */ +}; + +enum iavf_fdir_flow_type { + /* NONE - used for undef/error */ + IAVF_FDIR_FLOW_NONE = 0, + IAVF_FDIR_FLOW_IPV4_TCP, + IAVF_FDIR_FLOW_IPV4_UDP, + IAVF_FDIR_FLOW_IPV4_SCTP, + IAVF_FDIR_FLOW_IPV4_AH, + IAVF_FDIR_FLOW_IPV4_ESP, + IAVF_FDIR_FLOW_IPV4_OTHER, + IAVF_FDIR_FLOW_IPV6_TCP, + IAVF_FDIR_FLOW_IPV6_UDP, + IAVF_FDIR_FLOW_IPV6_SCTP, + IAVF_FDIR_FLOW_IPV6_AH, + IAVF_FDIR_FLOW_IPV6_ESP, + IAVF_FDIR_FLOW_IPV6_OTHER, + IAVF_FDIR_FLOW_NON_IP_L2, + /* MAX - this must be last and add anything new just above it */ + IAVF_FDIR_FLOW_PTYPE_MAX, +}; + +struct iavf_flex_word { + u16 offset; + u16 word; +}; + +struct iavf_ipv4_addrs { + __be32 src_ip; + __be32 dst_ip; +}; + +struct iavf_ipv6_addrs { + struct in6_addr src_ip; + struct in6_addr dst_ip; +}; + +struct iavf_fdir_eth { + __be16 etype; +}; + +struct iavf_fdir_ip { + union { + struct iavf_ipv4_addrs v4_addrs; + struct iavf_ipv6_addrs v6_addrs; + }; + __be16 src_port; + __be16 dst_port; + __be32 l4_header; /* first 4 bytes of the layer 4 header */ + __be32 spi; /* security parameter index for AH/ESP */ + union { + u8 tos; + u8 tclass; + }; + u8 proto; +}; + +struct iavf_fdir_extra { + u32 usr_def[2]; +}; + +/* bookkeeping of Flow Director filters */ +struct iavf_fdir_fltr { + enum iavf_fdir_fltr_state_t state; + struct list_head list; + + enum iavf_fdir_flow_type flow_type; + + struct iavf_fdir_eth eth_data; + struct iavf_fdir_eth eth_mask; + + struct iavf_fdir_ip ip_data; + struct iavf_fdir_ip ip_mask; + + struct iavf_fdir_extra ext_data; + struct iavf_fdir_extra ext_mask; + + enum virtchnl_action action; + + /* flex byte filter data */ + u8 ip_ver; /* used to adjust the flex offset, 4 : IPv4, 6 : IPv6 */ + u8 flex_cnt; + struct iavf_flex_word flex_words[2]; + + u32 flow_id; + + u32 loc; /* Rule location inside the flow table */ + u32 q_index; + + struct virtchnl_fdir_add vc_add_msg; +}; + +int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); +void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); +bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); +void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); +struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc); +#endif /* _IAVF_FDIR_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index dc5b3c06d1e0..a3268c894d85 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -959,8 +959,9 @@ void iavf_down(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct iavf_vlan_filter *vlf; - struct iavf_mac_filter *f; struct iavf_cloud_filter *cf; + struct iavf_fdir_fltr *fdir; + struct iavf_mac_filter *f; if (adapter->state <= __IAVF_DOWN_PENDING) return; @@ -996,6 +997,13 @@ void iavf_down(struct iavf_adapter *adapter) } spin_unlock_bh(&adapter->cloud_filter_list_lock); + /* remove all Flow Director filters */ + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry(fdir, &adapter->fdir_list_head, list) { + fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && adapter->state != __IAVF_RESETTING) { /* cancel any current operation */ @@ -1007,6 +1015,7 @@ void iavf_down(struct iavf_adapter *adapter) adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER; adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; } @@ -1629,6 +1638,14 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) iavf_add_cloud_filter(adapter); return 0; } + if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) { + iavf_add_fdir_filter(adapter); + return IAVF_SUCCESS; + } + if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) { + iavf_del_fdir_filter(adapter); + return IAVF_SUCCESS; + } return -EAGAIN; } @@ -2529,7 +2546,7 @@ validate_bw: } /** - * iavf_validate_channel_config - validate queue mapping info + * iavf_validate_ch_config - validate queue mapping info * @adapter: board private structure * @mqprio_qopt: queue parameters * @@ -3738,10 +3755,12 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&adapter->mac_vlan_list_lock); spin_lock_init(&adapter->cloud_filter_list_lock); + spin_lock_init(&adapter->fdir_fltr_lock); INIT_LIST_HEAD(&adapter->mac_filter_list); INIT_LIST_HEAD(&adapter->vlan_filter_list); INIT_LIST_HEAD(&adapter->cloud_filter_list); + INIT_LIST_HEAD(&adapter->fdir_list_head); INIT_WORK(&adapter->reset_task, iavf_reset_task); INIT_WORK(&adapter->adminq_task, iavf_adminq_task); @@ -3845,6 +3864,7 @@ static void iavf_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_fdir_fltr *fdir, *fdirtmp; struct iavf_vlan_filter *vlf, *vlftmp; struct iavf_mac_filter *f, *ftmp; struct iavf_cloud_filter *cf, *cftmp; @@ -3926,6 +3946,13 @@ static void iavf_remove(struct pci_dev *pdev) } spin_unlock_bh(&adapter->cloud_filter_list_lock); + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) { + list_del(&fdir->list); + kfree(fdir); + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + free_netdev(netdev); pci_disable_pcie_error_reporting(pdev); diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index ffaf2742a2e0..d6cba53a3a21 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -2098,7 +2098,7 @@ static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, } /** - * iavf_create_tx_ctx Build the Tx context descriptor + * iavf_create_tx_ctx - Build the Tx context descriptor * @tx_ring: ring to create the descriptor on * @cd_type_cmd_tso_mss: Quad Word 1 * @cd_tunneling: Quad Word 0 - bits 0-31 diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 647e7fde11b4..3069092468b2 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -140,6 +140,7 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | VIRTCHNL_VF_OFFLOAD_ADQ | + VIRTCHNL_VF_OFFLOAD_FDIR_PF | VIRTCHNL_VF_CAP_ADV_LINK_SPEED; adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; @@ -1005,7 +1006,7 @@ iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter, } /** - * iavf_enable_channel + * iavf_enable_channels * @adapter: adapter structure * * Request that the PF enable channels as specified by @@ -1046,7 +1047,7 @@ void iavf_enable_channels(struct iavf_adapter *adapter) } /** - * iavf_disable_channel + * iavf_disable_channels * @adapter: adapter structure * * Request that the PF disable channels that are configured @@ -1198,6 +1199,101 @@ void iavf_del_cloud_filter(struct iavf_adapter *adapter) } /** + * iavf_add_fdir_filter + * @adapter: the VF adapter structure + * + * Request that the PF add Flow Director filters as specified + * by the user via ethtool. + **/ +void iavf_add_fdir_filter(struct iavf_adapter *adapter) +{ + struct iavf_fdir_fltr *fdir; + struct virtchnl_fdir_add *f; + bool process_fltr = false; + int len; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n", + adapter->current_op); + return; + } + + len = sizeof(struct virtchnl_fdir_add); + f = kzalloc(len, GFP_KERNEL); + if (!f) + return; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry(fdir, &adapter->fdir_list_head, list) { + if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) { + process_fltr = true; + fdir->state = IAVF_FDIR_FLTR_ADD_PENDING; + memcpy(f, &fdir->vc_add_msg, len); + break; + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + if (!process_fltr) { + /* prevent iavf_add_fdir_filter() from being called when there + * are no filters to add + */ + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER; + kfree(f); + return; + } + adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len); + kfree(f); +} + +/** + * iavf_del_fdir_filter + * @adapter: the VF adapter structure + * + * Request that the PF delete Flow Director filters as specified + * by the user via ethtool. + **/ +void iavf_del_fdir_filter(struct iavf_adapter *adapter) +{ + struct iavf_fdir_fltr *fdir; + struct virtchnl_fdir_del f; + bool process_fltr = false; + int len; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n", + adapter->current_op); + return; + } + + len = sizeof(struct virtchnl_fdir_del); + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry(fdir, &adapter->fdir_list_head, list) { + if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) { + process_fltr = true; + memset(&f, 0, len); + f.vsi_id = fdir->vc_add_msg.vsi_id; + f.flow_id = fdir->flow_id; + fdir->state = IAVF_FDIR_FLTR_DEL_PENDING; + break; + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + if (!process_fltr) { + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER; + return; + } + + adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len); +} + +/** * iavf_request_reset * @adapter: adapter structure * @@ -1357,6 +1453,50 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, } } break; + case VIRTCHNL_OP_ADD_FDIR_FILTER: { + struct iavf_fdir_fltr *fdir, *fdir_tmp; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(fdir, fdir_tmp, + &adapter->fdir_list_head, + list) { + if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { + dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n", + iavf_stat_str(&adapter->hw, + v_retval)); + iavf_print_fdir_fltr(adapter, fdir); + if (msglen) + dev_err(&adapter->pdev->dev, + "%s\n", msg); + list_del(&fdir->list); + kfree(fdir); + adapter->fdir_active_fltr--; + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + } + break; + case VIRTCHNL_OP_DEL_FDIR_FILTER: { + struct iavf_fdir_fltr *fdir; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry(fdir, &adapter->fdir_list_head, + list) { + if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { + fdir->state = IAVF_FDIR_FLTR_ACTIVE; + dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n", + iavf_stat_str(&adapter->hw, + v_retval)); + iavf_print_fdir_fltr(adapter, fdir); + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + } + break; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: + dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); + break; default: dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", v_retval, iavf_stat_str(&adapter->hw, v_retval), @@ -1490,6 +1630,58 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, } } break; + case VIRTCHNL_OP_ADD_FDIR_FILTER: { + struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg; + struct iavf_fdir_fltr *fdir, *fdir_tmp; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(fdir, fdir_tmp, + &adapter->fdir_list_head, + list) { + if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { + if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) { + dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n", + fdir->loc); + fdir->state = IAVF_FDIR_FLTR_ACTIVE; + fdir->flow_id = add_fltr->flow_id; + } else { + dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n", + add_fltr->status); + iavf_print_fdir_fltr(adapter, fdir); + list_del(&fdir->list); + kfree(fdir); + adapter->fdir_active_fltr--; + } + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + } + break; + case VIRTCHNL_OP_DEL_FDIR_FILTER: { + struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg; + struct iavf_fdir_fltr *fdir, *fdir_tmp; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head, + list) { + if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { + if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) { + dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n", + fdir->loc); + list_del(&fdir->list); + kfree(fdir); + adapter->fdir_active_fltr--; + } else { + fdir->state = IAVF_FDIR_FLTR_ACTIVE; + dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n", + del_fltr->status); + iavf_print_fdir_fltr(adapter, fdir); + } + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + } + break; default: if (adapter->current_op && (v_opcode != adapter->current_op)) dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 73da4f71f530..f391691e2c7e 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -26,7 +26,7 @@ ice-y := ice_main.o \ ice_fw_update.o \ ice_lag.o \ ice_ethtool.o -ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o +ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_virtchnl_fdir.o ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 357706444dd5..9bf346133cbd 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -73,7 +73,7 @@ #define ICE_MIN_LAN_TXRX_MSIX 1 #define ICE_MIN_LAN_OICR_MSIX 1 #define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX) -#define ICE_FDIR_MSIX 1 +#define ICE_FDIR_MSIX 2 #define ICE_NO_VSI 0xffff #define ICE_VSI_MAP_CONTIG 0 #define ICE_VSI_MAP_SCATTER 1 @@ -84,6 +84,8 @@ #define ICE_MAX_LG_RSS_QS 256 #define ICE_RES_VALID_BIT 0x8000 #define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) +/* All VF control VSIs share the same IRQ, so assign a unique ID for them */ +#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_MISC_VEC_ID - 1) #define ICE_INVAL_Q_INDEX 0xffff #define ICE_INVAL_VFID 256 @@ -229,6 +231,7 @@ enum ice_state { __ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */ __ICE_LINK_DEFAULT_OVERRIDE_PENDING, __ICE_PHY_INIT_COMPLETE, + __ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */ __ICE_STATE_NBITS /* must be last */ }; diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 3124a3bf519a..1148d768f8ed 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -275,6 +275,22 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) } /** + * ice_rx_offset - Return expected offset into page to access data + * @rx_ring: Ring we are requesting offset of + * + * Returns the offset value for ring into the data buffer. + */ +static unsigned int ice_rx_offset(struct ice_ring *rx_ring) +{ + if (ice_ring_uses_build_skb(rx_ring)) + return ICE_SKB_PAD; + else if (ice_is_xdp_ena_vsi(rx_ring->vsi)) + return XDP_PACKET_HEADROOM; + + return 0; +} + +/** * ice_setup_rx_ctx - Configure a receive ring context * @ring: The Rx ring to configure * @@ -413,11 +429,15 @@ int ice_setup_rx_ctx(struct ice_ring *ring) else ice_set_ring_build_skb_ena(ring); + ring->rx_offset = ice_rx_offset(ring); + /* init queue specific tail register */ ring->tail = hw->hw_addr + QRX_TAIL(pf_q); writel(0, ring->tail); if (ring->xsk_pool) { + bool ok; + if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) { dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n", num_bufs, ring->q_index); @@ -426,8 +446,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring) return 0; } - err = ice_alloc_rx_bufs_zc(ring, num_bufs); - if (err) + ok = ice_alloc_rx_bufs_zc(ring, num_bufs); + if (!ok) dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n", ring->q_index, pf_q); return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 3d9475e222cd..1898325e62b5 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -4373,7 +4373,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, } /** - * ice_fw_supports_lldp_fltr - check NVM version supports lldp_fltr_ctrl + * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl * @hw: pointer to HW struct */ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index e42727941ef5..85c9eccfdae8 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -834,7 +834,7 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, } /** - * ice_get_ieee_dcb_cfg + * ice_get_ieee_or_cee_dcb_cfg * @pi: port information structure * @dcbx_mode: mode of DCBX (IEEE or CEE) * diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 2dcfa685b763..4f738425fb44 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -871,68 +871,47 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; - char *p = (char *)data; unsigned int i; + u8 *p = data; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < ICE_VSI_STATS_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - ice_gstrings_vsi_stats[i].stat_string); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < ICE_VSI_STATS_LEN; i++) + ethtool_sprintf(&p, + ice_gstrings_vsi_stats[i].stat_string); ice_for_each_alloc_txq(vsi, i) { - snprintf(p, ETH_GSTRING_LEN, - "tx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); } ice_for_each_alloc_rxq(vsi, i) { - snprintf(p, ETH_GSTRING_LEN, - "rx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); } if (vsi->type != ICE_VSI_PF) return; - for (i = 0; i < ICE_PF_STATS_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - ice_gstrings_pf_stats[i].stat_string); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < ICE_PF_STATS_LEN; i++) + ethtool_sprintf(&p, + ice_gstrings_pf_stats[i].stat_string); for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { - snprintf(p, ETH_GSTRING_LEN, - "tx_priority_%u_xon.nic", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, - "tx_priority_%u_xoff.nic", i); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&p, "tx_priority_%u_xon.nic", i); + ethtool_sprintf(&p, "tx_priority_%u_xoff.nic", i); } for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { - snprintf(p, ETH_GSTRING_LEN, - "rx_priority_%u_xon.nic", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, - "rx_priority_%u_xoff.nic", i); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&p, "rx_priority_%u_xon.nic", i); + ethtool_sprintf(&p, "rx_priority_%u_xoff.nic", i); } break; case ETH_SS_TEST: memcpy(data, ice_gstrings_test, ICE_TEST_LEN * ETH_GSTRING_LEN); break; case ETH_SS_PRIV_FLAGS: - for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - ice_gstrings_priv_flags[i].name); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) + ethtool_sprintf(&p, ice_gstrings_priv_flags[i].name); break; default: break; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index 192729546bbf..440964defa4a 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -1679,6 +1679,10 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) input->flex_offset = userdata.flex_offset; } + input->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS; + input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE; + input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL; + /* input struct is added to the HW filter list */ ice_fdir_update_list_entry(pf, input, fsp->location); diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c index 59c0c6a0f8c5..59ef68f072c0 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_fdir.c @@ -40,6 +40,204 @@ static const u8 ice_fdir_ipv4_pkt[] = { 0x00, 0x00 }; +static const u8 ice_fdir_udp4_gtpu4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x4c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00, + 0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x1c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp4_gtpu4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x58, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00, + 0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x28, 0x00, 0x00, 0x40, 0x00, 0x40, 0x06, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_icmp4_gtpu4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x4c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00, + 0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x1c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv4_gtpu4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x44, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00, + 0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv4_l2tpv3_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x73, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv6_l2tpv3_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x73, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv4_esp_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x32, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00 +}; + +static const u8 ice_fdir_ipv6_esp_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x32, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv4_ah_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x33, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00 +}; + +static const u8 ice_fdir_ipv6_ah_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x33, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv4_nat_t_esp_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x1C, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x11, 0x94, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv6_nat_t_esp_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x08, 0x11, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x11, 0x94, 0x00, 0x00, 0x00, 0x08, +}; + +static const u8 ice_fdir_ipv4_pfcp_node_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x2C, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x22, 0x65, 0x22, 0x65, 0x00, 0x00, + 0x00, 0x00, 0x20, 0x00, 0x00, 0x10, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv4_pfcp_session_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x2C, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x22, 0x65, 0x22, 0x65, 0x00, 0x00, + 0x00, 0x00, 0x21, 0x00, 0x00, 0x10, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv6_pfcp_node_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x18, 0x11, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x65, + 0x22, 0x65, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, + 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv6_pfcp_session_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x18, 0x11, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x65, + 0x22, 0x65, 0x00, 0x00, 0x00, 0x00, 0x21, 0x00, + 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_non_ip_l2_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + static const u8 ice_fdir_tcpv6_pkt[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, @@ -239,6 +437,111 @@ static const struct ice_fdir_base_pkt ice_fdir_pkt[] = { sizeof(ice_fdir_ip4_tun_pkt), ice_fdir_ip4_tun_pkt, }, { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP, + sizeof(ice_fdir_udp4_gtpu4_pkt), + ice_fdir_udp4_gtpu4_pkt, + sizeof(ice_fdir_udp4_gtpu4_pkt), + ice_fdir_udp4_gtpu4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP, + sizeof(ice_fdir_tcp4_gtpu4_pkt), + ice_fdir_tcp4_gtpu4_pkt, + sizeof(ice_fdir_tcp4_gtpu4_pkt), + ice_fdir_tcp4_gtpu4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP, + sizeof(ice_fdir_icmp4_gtpu4_pkt), + ice_fdir_icmp4_gtpu4_pkt, + sizeof(ice_fdir_icmp4_gtpu4_pkt), + ice_fdir_icmp4_gtpu4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER, + sizeof(ice_fdir_ipv4_gtpu4_pkt), + ice_fdir_ipv4_gtpu4_pkt, + sizeof(ice_fdir_ipv4_gtpu4_pkt), + ice_fdir_ipv4_gtpu4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3, + sizeof(ice_fdir_ipv4_l2tpv3_pkt), ice_fdir_ipv4_l2tpv3_pkt, + sizeof(ice_fdir_ipv4_l2tpv3_pkt), ice_fdir_ipv4_l2tpv3_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3, + sizeof(ice_fdir_ipv6_l2tpv3_pkt), ice_fdir_ipv6_l2tpv3_pkt, + sizeof(ice_fdir_ipv6_l2tpv3_pkt), ice_fdir_ipv6_l2tpv3_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_ESP, + sizeof(ice_fdir_ipv4_esp_pkt), ice_fdir_ipv4_esp_pkt, + sizeof(ice_fdir_ipv4_esp_pkt), ice_fdir_ipv4_esp_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_ESP, + sizeof(ice_fdir_ipv6_esp_pkt), ice_fdir_ipv6_esp_pkt, + sizeof(ice_fdir_ipv6_esp_pkt), ice_fdir_ipv6_esp_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_AH, + sizeof(ice_fdir_ipv4_ah_pkt), ice_fdir_ipv4_ah_pkt, + sizeof(ice_fdir_ipv4_ah_pkt), ice_fdir_ipv4_ah_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_AH, + sizeof(ice_fdir_ipv6_ah_pkt), ice_fdir_ipv6_ah_pkt, + sizeof(ice_fdir_ipv6_ah_pkt), ice_fdir_ipv6_ah_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP, + sizeof(ice_fdir_ipv4_nat_t_esp_pkt), + ice_fdir_ipv4_nat_t_esp_pkt, + sizeof(ice_fdir_ipv4_nat_t_esp_pkt), + ice_fdir_ipv4_nat_t_esp_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP, + sizeof(ice_fdir_ipv6_nat_t_esp_pkt), + ice_fdir_ipv6_nat_t_esp_pkt, + sizeof(ice_fdir_ipv6_nat_t_esp_pkt), + ice_fdir_ipv6_nat_t_esp_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE, + sizeof(ice_fdir_ipv4_pfcp_node_pkt), + ice_fdir_ipv4_pfcp_node_pkt, + sizeof(ice_fdir_ipv4_pfcp_node_pkt), + ice_fdir_ipv4_pfcp_node_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION, + sizeof(ice_fdir_ipv4_pfcp_session_pkt), + ice_fdir_ipv4_pfcp_session_pkt, + sizeof(ice_fdir_ipv4_pfcp_session_pkt), + ice_fdir_ipv4_pfcp_session_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE, + sizeof(ice_fdir_ipv6_pfcp_node_pkt), + ice_fdir_ipv6_pfcp_node_pkt, + sizeof(ice_fdir_ipv6_pfcp_node_pkt), + ice_fdir_ipv6_pfcp_node_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION, + sizeof(ice_fdir_ipv6_pfcp_session_pkt), + ice_fdir_ipv6_pfcp_session_pkt, + sizeof(ice_fdir_ipv6_pfcp_session_pkt), + ice_fdir_ipv6_pfcp_session_pkt, + }, + { + ICE_FLTR_PTYPE_NON_IP_L2, + sizeof(ice_fdir_non_ip_l2_pkt), ice_fdir_non_ip_l2_pkt, + sizeof(ice_fdir_non_ip_l2_pkt), ice_fdir_non_ip_l2_pkt, + }, + { ICE_FLTR_PTYPE_NONF_IPV6_TCP, sizeof(ice_fdir_tcpv6_pkt), ice_fdir_tcpv6_pkt, sizeof(ice_fdir_tcp6_tun_pkt), ice_fdir_tcp6_tun_pkt, @@ -374,21 +677,31 @@ ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input, if (input->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) { fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_YES; fdir_fltr_ctx.qindex = 0; + } else if (input->dest_ctl == + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER) { + fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_NO; + fdir_fltr_ctx.qindex = 0; } else { + if (input->dest_ctl == + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP) + fdir_fltr_ctx.toq = input->q_region; fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_NO; fdir_fltr_ctx.qindex = input->q_index; } - fdir_fltr_ctx.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS; + fdir_fltr_ctx.cnt_ena = input->cnt_ena; fdir_fltr_ctx.cnt_index = input->cnt_index; fdir_fltr_ctx.fd_vsi = ice_get_hw_vsi_num(hw, input->dest_vsi); fdir_fltr_ctx.evict_ena = ICE_FXD_FLTR_QW0_EVICT_ENA_FALSE; - fdir_fltr_ctx.toq_prio = 3; + if (input->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER) + fdir_fltr_ctx.toq_prio = 0; + else + fdir_fltr_ctx.toq_prio = 3; fdir_fltr_ctx.pcmd = add ? ICE_FXD_FLTR_QW1_PCMD_ADD : ICE_FXD_FLTR_QW1_PCMD_REMOVE; fdir_fltr_ctx.swap = ICE_FXD_FLTR_QW1_SWAP_NOT_SET; fdir_fltr_ctx.comp_q = ICE_FXD_FLTR_QW0_COMP_Q_ZERO; - fdir_fltr_ctx.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL; - fdir_fltr_ctx.fdid_prio = 3; + fdir_fltr_ctx.comp_report = input->comp_report; + fdir_fltr_ctx.fdid_prio = input->fdid_prio; fdir_fltr_ctx.desc_prof = 1; fdir_fltr_ctx.desc_prof_prio = 3; ice_set_fd_desc_val(&fdir_fltr_ctx, fdesc); @@ -471,6 +784,55 @@ static void ice_pkt_insert_ipv6_addr(u8 *pkt, int offset, __be32 *addr) } /** + * ice_pkt_insert_u6_qfi - insert a u6 value QFI into a memory buffer for GTPU + * @pkt: packet buffer + * @offset: offset into buffer + * @data: 8 bit value to convert and insert into pkt at offset + * + * This function is designed for inserting QFI (6 bits) for GTPU. + */ +static void ice_pkt_insert_u6_qfi(u8 *pkt, int offset, u8 data) +{ + u8 ret; + + ret = (data & 0x3F) + (*(pkt + offset) & 0xC0); + memcpy(pkt + offset, &ret, sizeof(ret)); +} + +/** + * ice_pkt_insert_u8 - insert a u8 value into a memory buffer. + * @pkt: packet buffer + * @offset: offset into buffer + * @data: 8 bit value to convert and insert into pkt at offset + */ +static void ice_pkt_insert_u8(u8 *pkt, int offset, u8 data) +{ + memcpy(pkt + offset, &data, sizeof(data)); +} + +/** + * ice_pkt_insert_u8_tc - insert a u8 value into a memory buffer for TC ipv6. + * @pkt: packet buffer + * @offset: offset into buffer + * @data: 8 bit value to convert and insert into pkt at offset + * + * This function is designed for inserting Traffic Class (TC) for IPv6, + * since that TC is not aligned in number of bytes. Here we split it out + * into two part and fill each byte with data copy from pkt, then insert + * the two bytes data one by one. + */ +static void ice_pkt_insert_u8_tc(u8 *pkt, int offset, u8 data) +{ + u8 high, low; + + high = (data >> 4) + (*(pkt + offset) & 0xF0); + memcpy(pkt + offset, &high, sizeof(high)); + + low = (*(pkt + offset + 1) & 0x0F) + ((data & 0x0F) << 4); + memcpy(pkt + offset + 1, &low, sizeof(low)); +} + +/** * ice_pkt_insert_u16 - insert a be16 value into a memory buffer * @pkt: packet buffer * @offset: offset into buffer @@ -493,6 +855,16 @@ static void ice_pkt_insert_u32(u8 *pkt, int offset, __be32 data) } /** + * ice_pkt_insert_mac_addr - insert a MAC addr into a memory buffer. + * @pkt: packet buffer + * @addr: MAC address to convert and insert into pkt at offset + */ +static void ice_pkt_insert_mac_addr(u8 *pkt, u8 *addr) +{ + ether_addr_copy(pkt, addr); +} + +/** * ice_fdir_get_gen_prgm_pkt - generate a training packet * @hw: pointer to the hardware structure * @input: flow director filter data structure @@ -520,11 +892,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, case IPPROTO_SCTP: flow = ICE_FLTR_PTYPE_NONF_IPV4_SCTP; break; - case IPPROTO_IP: + default: flow = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; break; - default: - return ICE_ERR_PARAM; } } else if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) { switch (input->ip.v6.proto) { @@ -537,11 +907,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, case IPPROTO_SCTP: flow = ICE_FLTR_PTYPE_NONF_IPV6_SCTP; break; - case IPPROTO_IP: + default: flow = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; break; - default: - return ICE_ERR_PARAM; } } else { flow = input->flow_type; @@ -580,6 +948,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, input->ip.v4.dst_ip); ice_pkt_insert_u16(loc, ICE_IPV4_TCP_SRC_PORT_OFFSET, input->ip.v4.dst_port); + ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos); + ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); if (frag) loc[20] = ICE_FDIR_IPV4_PKT_FLAG_DF; break; @@ -592,6 +963,11 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, input->ip.v4.dst_ip); ice_pkt_insert_u16(loc, ICE_IPV4_UDP_SRC_PORT_OFFSET, input->ip.v4.dst_port); + ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos); + ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); + ice_pkt_insert_mac_addr(loc + ETH_ALEN, + input->ext_data.src_mac); break; case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, @@ -602,13 +978,87 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, input->ip.v4.dst_ip); ice_pkt_insert_u16(loc, ICE_IPV4_SCTP_SRC_PORT_OFFSET, input->ip.v4.dst_port); + ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos); + ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); break; case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, input->ip.v4.src_ip); ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, input->ip.v4.dst_ip); - ice_pkt_insert_u16(loc, ICE_IPV4_PROTO_OFFSET, 0); + ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos); + ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl); + ice_pkt_insert_u8(loc, ICE_IPV4_PROTO_OFFSET, + input->ip.v4.proto); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER: + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u32(loc, ICE_IPV4_GTPU_TEID_OFFSET, + input->gtpu_data.teid); + ice_pkt_insert_u6_qfi(loc, ICE_IPV4_GTPU_QFI_OFFSET, + input->gtpu_data.qfi); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3: + ice_pkt_insert_u32(loc, ICE_IPV4_L2TPV3_SESS_ID_OFFSET, + input->l2tpv3_data.session_id); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3: + ice_pkt_insert_u32(loc, ICE_IPV6_L2TPV3_SESS_ID_OFFSET, + input->l2tpv3_data.session_id); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_ESP: + ice_pkt_insert_u32(loc, ICE_IPV4_ESP_SPI_OFFSET, + input->ip.v4.sec_parm_idx); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_ESP: + ice_pkt_insert_u32(loc, ICE_IPV6_ESP_SPI_OFFSET, + input->ip.v6.sec_parm_idx); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_AH: + ice_pkt_insert_u32(loc, ICE_IPV4_AH_SPI_OFFSET, + input->ip.v4.sec_parm_idx); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_AH: + ice_pkt_insert_u32(loc, ICE_IPV6_AH_SPI_OFFSET, + input->ip.v6.sec_parm_idx); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP: + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u32(loc, ICE_IPV4_NAT_T_ESP_SPI_OFFSET, + input->ip.v4.sec_parm_idx); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP: + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, + input->ip.v6.src_ip); + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET, + input->ip.v6.dst_ip); + ice_pkt_insert_u32(loc, ICE_IPV6_NAT_T_ESP_SPI_OFFSET, + input->ip.v6.sec_parm_idx); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE: + case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION: + ice_pkt_insert_u16(loc, ICE_IPV4_UDP_SRC_PORT_OFFSET, + input->ip.v4.dst_port); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE: + case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION: + ice_pkt_insert_u16(loc, ICE_IPV6_UDP_SRC_PORT_OFFSET, + input->ip.v6.dst_port); + break; + case ICE_FLTR_PTYPE_NON_IP_L2: + ice_pkt_insert_u16(loc, ICE_MAC_ETHTYPE_OFFSET, + input->ext_data.ether_type); break; case ICE_FLTR_PTYPE_NONF_IPV6_TCP: ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, @@ -619,6 +1069,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, input->ip.v6.src_port); ice_pkt_insert_u16(loc, ICE_IPV6_TCP_SRC_PORT_OFFSET, input->ip.v6.dst_port); + ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc); + ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); break; case ICE_FLTR_PTYPE_NONF_IPV6_UDP: ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, @@ -629,6 +1082,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, input->ip.v6.src_port); ice_pkt_insert_u16(loc, ICE_IPV6_UDP_SRC_PORT_OFFSET, input->ip.v6.dst_port); + ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc); + ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); break; case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, @@ -639,12 +1095,20 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, input->ip.v6.src_port); ice_pkt_insert_u16(loc, ICE_IPV6_SCTP_SRC_PORT_OFFSET, input->ip.v6.dst_port); + ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc); + ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); break; case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, input->ip.v6.src_ip); ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET, input->ip.v6.dst_ip); + ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc); + ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim); + ice_pkt_insert_u8(loc, ICE_IPV6_PROTO_OFFSET, + input->ip.v6.proto); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); break; default: return ICE_ERR_PARAM; @@ -671,7 +1135,7 @@ bool ice_fdir_has_frag(enum ice_fltr_ptype flow) } /** - * ice_fdir_find_by_idx - find filter with idx + * ice_fdir_find_fltr_by_idx - find filter with idx * @hw: pointer to hardware structure * @fltr_idx: index to find. * diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h index 1c587766daab..d2d40e18ae8a 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.h +++ b/drivers/net/ethernet/intel/ice/ice_fdir.h @@ -25,6 +25,25 @@ #define ICE_IPV6_UDP_DST_PORT_OFFSET 56 #define ICE_IPV6_SCTP_SRC_PORT_OFFSET 54 #define ICE_IPV6_SCTP_DST_PORT_OFFSET 56 +#define ICE_MAC_ETHTYPE_OFFSET 12 +#define ICE_IPV4_TOS_OFFSET 15 +#define ICE_IPV4_TTL_OFFSET 22 +#define ICE_IPV6_TC_OFFSET 14 +#define ICE_IPV6_HLIM_OFFSET 21 +#define ICE_IPV6_PROTO_OFFSET 20 +#define ICE_IPV4_GTPU_TEID_OFFSET 46 +#define ICE_IPV4_GTPU_QFI_OFFSET 56 +#define ICE_IPV4_L2TPV3_SESS_ID_OFFSET 34 +#define ICE_IPV6_L2TPV3_SESS_ID_OFFSET 54 +#define ICE_IPV4_ESP_SPI_OFFSET 34 +#define ICE_IPV6_ESP_SPI_OFFSET 54 +#define ICE_IPV4_AH_SPI_OFFSET 38 +#define ICE_IPV6_AH_SPI_OFFSET 58 +#define ICE_IPV4_NAT_T_ESP_SPI_OFFSET 42 +#define ICE_IPV6_NAT_T_ESP_SPI_OFFSET 62 + +#define ICE_FDIR_MAX_FLTRS 16384 + /* IP v4 has 2 flag bits that enable fragment processing: DF and MF. DF * requests that the packet not be fragmented. MF indicates that a packet has * been fragmented. @@ -34,6 +53,8 @@ enum ice_fltr_prgm_desc_dest { ICE_FLTR_PRGM_DESC_DEST_DROP_PKT, ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX, + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP, + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER, }; enum ice_fltr_prgm_desc_fd_status { @@ -86,6 +107,7 @@ struct ice_fdir_v4 { u8 tos; u8 ip_ver; u8 proto; + u8 ttl; }; #define ICE_IPV6_ADDR_LEN_AS_U32 4 @@ -99,10 +121,35 @@ struct ice_fdir_v6 { __be32 sec_parm_idx; /* security parameter index */ u8 tc; u8 proto; + u8 hlim; +}; + +struct ice_fdir_udp_gtp { + u8 flags; + u8 msg_type; + __be16 rsrvd_len; + __be32 teid; + __be16 rsrvd_seq_nbr; + u8 rsrvd_n_pdu_nbr; + u8 rsrvd_next_ext_type; + u8 rsvrd_ext_len; + u8 pdu_type:4, + spare:4; + u8 ppp:1, + rqi:1, + qfi:6; + u32 rsvrd; + u8 next_ext; +}; + +struct ice_fdir_l2tpv3 { + __be32 session_id; }; struct ice_fdir_extra { u8 dst_mac[ETH_ALEN]; /* dest MAC address */ + u8 src_mac[ETH_ALEN]; /* src MAC address */ + __be16 ether_type; /* for NON_IP_L2 */ u32 usr_def[2]; /* user data */ __be16 vlan_type; /* VLAN ethertype */ __be16 vlan_tag; /* VLAN tag info */ @@ -117,11 +164,19 @@ struct ice_fdir_fltr { struct ice_fdir_v6 v6; } ip, mask; + struct ice_fdir_udp_gtp gtpu_data; + struct ice_fdir_udp_gtp gtpu_mask; + + struct ice_fdir_l2tpv3 l2tpv3_data; + struct ice_fdir_l2tpv3 l2tpv3_mask; + struct ice_fdir_extra ext_data; struct ice_fdir_extra ext_mask; /* flex byte filter data */ __be16 flex_word; + /* queue region size (=2^q_region) */ + u8 q_region; u16 flex_offset; u16 flex_fltr; @@ -129,9 +184,12 @@ struct ice_fdir_fltr { u16 q_index; u16 dest_vsi; u8 dest_ctl; + u8 cnt_ena; u8 fltr_status; u16 cnt_index; u32 fltr_id; + u8 fdid_prio; + u8 comp_report; }; /* Dummy packet filter definition structure */ diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 5e1fd30c0a0f..afe77f7a3199 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -2361,18 +2361,82 @@ ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) } /** - * ice_find_prof_id - find profile ID for a given field vector + * ice_prof_has_mask_idx - determine if profile index masking is identical + * @hw: pointer to the hardware structure + * @blk: HW block + * @prof: profile to check + * @idx: profile index to check + * @mask: mask to match + */ +static bool +ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx, + u16 mask) +{ + bool expect_no_mask = false; + bool found = false; + bool match = false; + u16 i; + + /* If mask is 0x0000 or 0xffff, then there is no masking */ + if (mask == 0 || mask == 0xffff) + expect_no_mask = true; + + /* Scan the enabled masks on this profile, for the specified idx */ + for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first + + hw->blk[blk].masks.count; i++) + if (hw->blk[blk].es.mask_ena[prof] & BIT(i)) + if (hw->blk[blk].masks.masks[i].in_use && + hw->blk[blk].masks.masks[i].idx == idx) { + found = true; + if (hw->blk[blk].masks.masks[i].mask == mask) + match = true; + break; + } + + if (expect_no_mask) { + if (found) + return false; + } else { + if (!match) + return false; + } + + return true; +} + +/** + * ice_prof_has_mask - determine if profile masking is identical + * @hw: pointer to the hardware structure + * @blk: HW block + * @prof: profile to check + * @masks: masks to match + */ +static bool +ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks) +{ + u16 i; + + /* es->mask_ena[prof] will have the mask */ + for (i = 0; i < hw->blk[blk].es.fvw; i++) + if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i])) + return false; + + return true; +} + +/** + * ice_find_prof_id_with_mask - find profile ID for a given field vector * @hw: pointer to the hardware structure * @blk: HW block * @fv: field vector to search for + * @masks: masks for FV * @prof_id: receives the profile ID */ static enum ice_status -ice_find_prof_id(struct ice_hw *hw, enum ice_block blk, - struct ice_fv_word *fv, u8 *prof_id) +ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, + struct ice_fv_word *fv, u16 *masks, u8 *prof_id) { struct ice_es *es = &hw->blk[blk].es; - u16 off; u8 i; /* For FD, we don't want to re-use a existed profile with the same @@ -2382,11 +2446,15 @@ ice_find_prof_id(struct ice_hw *hw, enum ice_block blk, return ICE_ERR_DOES_NOT_EXIST; for (i = 0; i < (u8)es->count; i++) { - off = i * es->fvw; + u16 off = i * es->fvw; if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv))) continue; + /* check if masks settings are the same for this profile */ + if (masks && !ice_prof_has_mask(hw, blk, i, masks)) + continue; + *prof_id = i; return 0; } @@ -2438,20 +2506,22 @@ static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type) * ice_alloc_tcam_ent - allocate hardware TCAM entry * @hw: pointer to the HW struct * @blk: the block to allocate the TCAM for + * @btm: true to allocate from bottom of table, false to allocate from top * @tcam_idx: pointer to variable to receive the TCAM entry * * This function allocates a new entry in a Profile ID TCAM for a specific * block. */ static enum ice_status -ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx) +ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm, + u16 *tcam_idx) { u16 res_type; if (!ice_tcam_ent_rsrc_type(blk, &res_type)) return ICE_ERR_PARAM; - return ice_alloc_hw_res(hw, res_type, 1, true, tcam_idx); + return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx); } /** @@ -2537,6 +2607,330 @@ ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) } /** + * ice_write_prof_mask_reg - write profile mask register + * @hw: pointer to the HW struct + * @blk: hardware block + * @mask_idx: mask index + * @idx: index of the FV which will use the mask + * @mask: the 16-bit mask + */ +static void +ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx, + u16 idx, u16 mask) +{ + u32 offset; + u32 val; + + switch (blk) { + case ICE_BLK_RSS: + offset = GLQF_HMASK(mask_idx); + val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M; + val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M; + break; + case ICE_BLK_FD: + offset = GLQF_FDMASK(mask_idx); + val = (idx << GLQF_FDMASK_MSK_INDEX_S) & GLQF_FDMASK_MSK_INDEX_M; + val |= (mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M; + break; + default: + ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n", + blk); + return; + } + + wr32(hw, offset, val); + ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n", + blk, idx, offset, val); +} + +/** + * ice_write_prof_mask_enable_res - write profile mask enable register + * @hw: pointer to the HW struct + * @blk: hardware block + * @prof_id: profile ID + * @enable_mask: enable mask + */ +static void +ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk, + u16 prof_id, u32 enable_mask) +{ + u32 offset; + + switch (blk) { + case ICE_BLK_RSS: + offset = GLQF_HMASK_SEL(prof_id); + break; + case ICE_BLK_FD: + offset = GLQF_FDMASK_SEL(prof_id); + break; + default: + ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n", + blk); + return; + } + + wr32(hw, offset, enable_mask); + ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n", + blk, prof_id, offset, enable_mask); +} + +/** + * ice_init_prof_masks - initial prof masks + * @hw: pointer to the HW struct + * @blk: hardware block + */ +static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk) +{ + u16 per_pf; + u16 i; + + mutex_init(&hw->blk[blk].masks.lock); + + per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs; + + hw->blk[blk].masks.count = per_pf; + hw->blk[blk].masks.first = hw->pf_id * per_pf; + + memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks)); + + for (i = hw->blk[blk].masks.first; + i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) + ice_write_prof_mask_reg(hw, blk, i, 0, 0); +} + +/** + * ice_init_all_prof_masks - initialize all prof masks + * @hw: pointer to the HW struct + */ +static void ice_init_all_prof_masks(struct ice_hw *hw) +{ + ice_init_prof_masks(hw, ICE_BLK_RSS); + ice_init_prof_masks(hw, ICE_BLK_FD); +} + +/** + * ice_alloc_prof_mask - allocate profile mask + * @hw: pointer to the HW struct + * @blk: hardware block + * @idx: index of FV which will use the mask + * @mask: the 16-bit mask + * @mask_idx: variable to receive the mask index + */ +static enum ice_status +ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask, + u16 *mask_idx) +{ + bool found_unused = false, found_copy = false; + enum ice_status status = ICE_ERR_MAX_LIMIT; + u16 unused_idx = 0, copy_idx = 0; + u16 i; + + if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) + return ICE_ERR_PARAM; + + mutex_lock(&hw->blk[blk].masks.lock); + + for (i = hw->blk[blk].masks.first; + i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) + if (hw->blk[blk].masks.masks[i].in_use) { + /* if mask is in use and it exactly duplicates the + * desired mask and index, then in can be reused + */ + if (hw->blk[blk].masks.masks[i].mask == mask && + hw->blk[blk].masks.masks[i].idx == idx) { + found_copy = true; + copy_idx = i; + break; + } + } else { + /* save off unused index, but keep searching in case + * there is an exact match later on + */ + if (!found_unused) { + found_unused = true; + unused_idx = i; + } + } + + if (found_copy) + i = copy_idx; + else if (found_unused) + i = unused_idx; + else + goto err_ice_alloc_prof_mask; + + /* update mask for a new entry */ + if (found_unused) { + hw->blk[blk].masks.masks[i].in_use = true; + hw->blk[blk].masks.masks[i].mask = mask; + hw->blk[blk].masks.masks[i].idx = idx; + hw->blk[blk].masks.masks[i].ref = 0; + ice_write_prof_mask_reg(hw, blk, i, idx, mask); + } + + hw->blk[blk].masks.masks[i].ref++; + *mask_idx = i; + status = 0; + +err_ice_alloc_prof_mask: + mutex_unlock(&hw->blk[blk].masks.lock); + + return status; +} + +/** + * ice_free_prof_mask - free profile mask + * @hw: pointer to the HW struct + * @blk: hardware block + * @mask_idx: index of mask + */ +static enum ice_status +ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx) +{ + if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) + return ICE_ERR_PARAM; + + if (!(mask_idx >= hw->blk[blk].masks.first && + mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count)) + return ICE_ERR_DOES_NOT_EXIST; + + mutex_lock(&hw->blk[blk].masks.lock); + + if (!hw->blk[blk].masks.masks[mask_idx].in_use) + goto exit_ice_free_prof_mask; + + if (hw->blk[blk].masks.masks[mask_idx].ref > 1) { + hw->blk[blk].masks.masks[mask_idx].ref--; + goto exit_ice_free_prof_mask; + } + + /* remove mask */ + hw->blk[blk].masks.masks[mask_idx].in_use = false; + hw->blk[blk].masks.masks[mask_idx].mask = 0; + hw->blk[blk].masks.masks[mask_idx].idx = 0; + + /* update mask as unused entry */ + ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk, + mask_idx); + ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0); + +exit_ice_free_prof_mask: + mutex_unlock(&hw->blk[blk].masks.lock); + + return 0; +} + +/** + * ice_free_prof_masks - free all profile masks for a profile + * @hw: pointer to the HW struct + * @blk: hardware block + * @prof_id: profile ID + */ +static enum ice_status +ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id) +{ + u32 mask_bm; + u16 i; + + if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) + return ICE_ERR_PARAM; + + mask_bm = hw->blk[blk].es.mask_ena[prof_id]; + for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++) + if (mask_bm & BIT(i)) + ice_free_prof_mask(hw, blk, i); + + return 0; +} + +/** + * ice_shutdown_prof_masks - releases lock for masking + * @hw: pointer to the HW struct + * @blk: hardware block + * + * This should be called before unloading the driver + */ +static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk) +{ + u16 i; + + mutex_lock(&hw->blk[blk].masks.lock); + + for (i = hw->blk[blk].masks.first; + i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) { + ice_write_prof_mask_reg(hw, blk, i, 0, 0); + + hw->blk[blk].masks.masks[i].in_use = false; + hw->blk[blk].masks.masks[i].idx = 0; + hw->blk[blk].masks.masks[i].mask = 0; + } + + mutex_unlock(&hw->blk[blk].masks.lock); + mutex_destroy(&hw->blk[blk].masks.lock); +} + +/** + * ice_shutdown_all_prof_masks - releases all locks for masking + * @hw: pointer to the HW struct + * + * This should be called before unloading the driver + */ +static void ice_shutdown_all_prof_masks(struct ice_hw *hw) +{ + ice_shutdown_prof_masks(hw, ICE_BLK_RSS); + ice_shutdown_prof_masks(hw, ICE_BLK_FD); +} + +/** + * ice_update_prof_masking - set registers according to masking + * @hw: pointer to the HW struct + * @blk: hardware block + * @prof_id: profile ID + * @masks: masks + */ +static enum ice_status +ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id, + u16 *masks) +{ + bool err = false; + u32 ena_mask = 0; + u16 idx; + u16 i; + + /* Only support FD and RSS masking, otherwise nothing to be done */ + if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) + return 0; + + for (i = 0; i < hw->blk[blk].es.fvw; i++) + if (masks[i] && masks[i] != 0xFFFF) { + if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) { + ena_mask |= BIT(idx); + } else { + /* not enough bitmaps */ + err = true; + break; + } + } + + if (err) { + /* free any bitmaps we have allocated */ + for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++) + if (ena_mask & BIT(i)) + ice_free_prof_mask(hw, blk, i); + + return ICE_ERR_OUT_OF_RANGE; + } + + /* enable the masks for this profile */ + ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask); + + /* store enabled masks with profile so that they can be freed later */ + hw->blk[blk].es.mask_ena[prof_id] = ena_mask; + + return 0; +} + +/** * ice_write_es - write an extraction sequence to hardware * @hw: pointer to the HW struct * @blk: the block in which to write the extraction sequence @@ -2575,6 +2969,7 @@ ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) if (hw->blk[blk].es.ref_count[prof_id] > 0) { if (!--hw->blk[blk].es.ref_count[prof_id]) { ice_write_es(hw, blk, prof_id, NULL); + ice_free_prof_masks(hw, blk, prof_id); return ice_free_prof_id(hw, blk, prof_id); } } @@ -2937,6 +3332,7 @@ void ice_free_hw_tbls(struct ice_hw *hw) devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t); devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count); devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena); } list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) { @@ -2944,6 +3340,7 @@ void ice_free_hw_tbls(struct ice_hw *hw) devm_kfree(ice_hw_to_dev(hw), r); } mutex_destroy(&hw->rss_locks); + ice_shutdown_all_prof_masks(hw); memset(hw->blk, 0, sizeof(hw->blk)); } @@ -2997,6 +3394,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw) memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw); memset(es->ref_count, 0, es->count * sizeof(*es->ref_count)); memset(es->written, 0, es->count * sizeof(*es->written)); + memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena)); } } @@ -3010,6 +3408,7 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw) mutex_init(&hw->rss_locks); INIT_LIST_HEAD(&hw->rss_list_head); + ice_init_all_prof_masks(hw); for (i = 0; i < ICE_BLK_COUNT; i++) { struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; struct ice_prof_tcam *prof = &hw->blk[i].prof; @@ -3112,6 +3511,11 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw) sizeof(*es->written), GFP_KERNEL); if (!es->written) goto err; + + es->mask_ena = devm_kcalloc(ice_hw_to_dev(hw), es->count, + sizeof(*es->mask_ena), GFP_KERNEL); + if (!es->mask_ena) + goto err; } return 0; @@ -3711,22 +4115,79 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) return 0; } +/* The entries here needs to match the order of enum ice_ptype_attrib */ +static const struct ice_ptype_attrib_info ice_ptype_attributes[] = { + { ICE_GTP_PDU_EH, ICE_GTP_PDU_FLAG_MASK }, + { ICE_GTP_SESSION, ICE_GTP_FLAGS_MASK }, + { ICE_GTP_DOWNLINK, ICE_GTP_FLAGS_MASK }, + { ICE_GTP_UPLINK, ICE_GTP_FLAGS_MASK }, +}; + +/** + * ice_get_ptype_attrib_info - get PTYPE attribute information + * @type: attribute type + * @info: pointer to variable to the attribute information + */ +static void +ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type, + struct ice_ptype_attrib_info *info) +{ + *info = ice_ptype_attributes[type]; +} + +/** + * ice_add_prof_attrib - add any PTG with attributes to profile + * @prof: pointer to the profile to which PTG entries will be added + * @ptg: PTG to be added + * @ptype: PTYPE that needs to be looked up + * @attr: array of attributes that will be considered + * @attr_cnt: number of elements in the attribute array + */ +static enum ice_status +ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, + const struct ice_ptype_attributes *attr, u16 attr_cnt) +{ + bool found = false; + u16 i; + + for (i = 0; i < attr_cnt; i++) + if (attr[i].ptype == ptype) { + found = true; + + prof->ptg[prof->ptg_cnt] = ptg; + ice_get_ptype_attrib_info(attr[i].attrib, + &prof->attr[prof->ptg_cnt]); + + if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) + return ICE_ERR_MAX_LIMIT; + } + + if (!found) + return ICE_ERR_DOES_NOT_EXIST; + + return 0; +} + /** * ice_add_prof - add profile * @hw: pointer to the HW struct * @blk: hardware block * @id: profile tracking ID * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits) + * @attr: array of attributes + * @attr_cnt: number of elements in attr array * @es: extraction sequence (length of array is determined by the block) + * @masks: mask for extraction sequence * - * This function registers a profile, which matches a set of PTGs with a + * This function registers a profile, which matches a set of PTYPES with a * particular extraction sequence. While the hardware profile is allocated * it will not be written until the first call to ice_add_flow that specifies * the ID value used here. */ enum ice_status ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], - struct ice_fv_word *es) + const struct ice_ptype_attributes *attr, u16 attr_cnt, + struct ice_fv_word *es, u16 *masks) { u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE); DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); @@ -3740,7 +4201,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], mutex_lock(&hw->blk[blk].es.prof_map_lock); /* search for existing profile */ - status = ice_find_prof_id(hw, blk, es, &prof_id); + status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id); if (status) { /* allocate profile ID */ status = ice_alloc_prof_id(hw, blk, &prof_id); @@ -3758,6 +4219,9 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], if (status) goto err_ice_add_prof; } + status = ice_update_prof_masking(hw, blk, prof_id, masks); + if (status) + goto err_ice_add_prof; /* and write new es */ ice_write_es(hw, blk, prof_id, es); @@ -3792,7 +4256,6 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], BITS_PER_BYTE) { u16 ptype; u8 ptg; - u8 m; ptype = byte * BITS_PER_BYTE + bit; @@ -3807,15 +4270,25 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], continue; set_bit(ptg, ptgs_used); - prof->ptg[prof->ptg_cnt] = ptg; - - if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) + /* Check to see there are any attributes for + * this PTYPE, and add them if found. + */ + status = ice_add_prof_attrib(prof, ptg, ptype, + attr, attr_cnt); + if (status == ICE_ERR_MAX_LIMIT) break; + if (status) { + /* This is simple a PTYPE/PTG with no + * attribute + */ + prof->ptg[prof->ptg_cnt] = ptg; + prof->attr[prof->ptg_cnt].flags = 0; + prof->attr[prof->ptg_cnt].mask = 0; - /* nothing left in byte, then exit */ - m = ~(u8)((1 << (bit + 1)) - 1); - if (!(ptypes[byte] & m)) - break; + if (++prof->ptg_cnt >= + ICE_MAX_PTG_PER_PROFILE) + break; + } } bytes--; @@ -4326,7 +4799,12 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, } /* for re-enabling, reallocate a TCAM */ - status = ice_alloc_tcam_ent(hw, blk, &tcam->tcam_idx); + /* for entries with empty attribute masks, allocate entry from + * the bottom of the TCAM table; otherwise, allocate from the + * top of the table in order to give it higher priority + */ + status = ice_alloc_tcam_ent(hw, blk, tcam->attr.mask == 0, + &tcam->tcam_idx); if (status) return status; @@ -4336,8 +4814,8 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, return ICE_ERR_NO_MEMORY; status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id, - tcam->ptg, vsig, 0, 0, vl_msk, dc_msk, - nm_msk); + tcam->ptg, vsig, 0, tcam->attr.flags, + vl_msk, dc_msk, nm_msk); if (status) goto err_ice_prof_tcam_ena_dis; @@ -4485,7 +4963,12 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, } /* allocate the TCAM entry index */ - status = ice_alloc_tcam_ent(hw, blk, &tcam_idx); + /* for entries with empty attribute masks, allocate entry from + * the bottom of the TCAM table; otherwise, allocate from the + * top of the table in order to give it higher priority + */ + status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0, + &tcam_idx); if (status) { devm_kfree(ice_hw_to_dev(hw), p); goto err_ice_add_prof_id_vsig; @@ -4494,6 +4977,7 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, t->tcam[i].ptg = map->ptg[i]; t->tcam[i].prof_id = map->prof_id; t->tcam[i].tcam_idx = tcam_idx; + t->tcam[i].attr = map->attr[i]; t->tcam[i].in_use = true; p->type = ICE_TCAM_ADD; diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h index 20deddb807c5..8a58e79729b9 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -27,7 +27,8 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, enum ice_status ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], - struct ice_fv_word *es); + const struct ice_ptype_attributes *attr, u16 attr_cnt, + struct ice_fv_word *es, u16 *masks); enum ice_status ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); enum ice_status diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h index 24063c1351b2..abc156ce9d8c 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_type.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -190,6 +190,64 @@ enum ice_sect { ICE_SECT_COUNT }; +#define ICE_MAC_IPV4_GTPU_IPV4_FRAG 331 +#define ICE_MAC_IPV4_GTPU_IPV4_PAY 332 +#define ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY 333 +#define ICE_MAC_IPV4_GTPU_IPV4_TCP 334 +#define ICE_MAC_IPV4_GTPU_IPV4_ICMP 335 +#define ICE_MAC_IPV6_GTPU_IPV4_FRAG 336 +#define ICE_MAC_IPV6_GTPU_IPV4_PAY 337 +#define ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY 338 +#define ICE_MAC_IPV6_GTPU_IPV4_TCP 339 +#define ICE_MAC_IPV6_GTPU_IPV4_ICMP 340 +#define ICE_MAC_IPV4_GTPU_IPV6_FRAG 341 +#define ICE_MAC_IPV4_GTPU_IPV6_PAY 342 +#define ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY 343 +#define ICE_MAC_IPV4_GTPU_IPV6_TCP 344 +#define ICE_MAC_IPV4_GTPU_IPV6_ICMPV6 345 +#define ICE_MAC_IPV6_GTPU_IPV6_FRAG 346 +#define ICE_MAC_IPV6_GTPU_IPV6_PAY 347 +#define ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY 348 +#define ICE_MAC_IPV6_GTPU_IPV6_TCP 349 +#define ICE_MAC_IPV6_GTPU_IPV6_ICMPV6 350 + +/* Attributes that can modify PTYPE definitions. + * + * These values will represent special attributes for PTYPEs, which will + * resolve into metadata packet flags definitions that can be used in the TCAM + * for identifying a PTYPE with specific characteristics. + */ +enum ice_ptype_attrib_type { + /* GTP PTYPEs */ + ICE_PTYPE_ATTR_GTP_PDU_EH, + ICE_PTYPE_ATTR_GTP_SESSION, + ICE_PTYPE_ATTR_GTP_DOWNLINK, + ICE_PTYPE_ATTR_GTP_UPLINK, +}; + +struct ice_ptype_attrib_info { + u16 flags; + u16 mask; +}; + +/* TCAM flag definitions */ +#define ICE_GTP_PDU BIT(14) +#define ICE_GTP_PDU_LINK BIT(13) + +/* GTP attributes */ +#define ICE_GTP_PDU_FLAG_MASK (ICE_GTP_PDU) +#define ICE_GTP_PDU_EH ICE_GTP_PDU + +#define ICE_GTP_FLAGS_MASK (ICE_GTP_PDU | ICE_GTP_PDU_LINK) +#define ICE_GTP_SESSION 0 +#define ICE_GTP_DOWNLINK ICE_GTP_PDU +#define ICE_GTP_UPLINK (ICE_GTP_PDU | ICE_GTP_PDU_LINK) + +struct ice_ptype_attributes { + u16 ptype; + enum ice_ptype_attrib_type attrib; +}; + /* package labels */ struct ice_label { __le16 value; @@ -335,6 +393,7 @@ struct ice_es { u16 count; u16 fvw; u16 *ref_count; + u32 *mask_ena; struct list_head prof_map; struct ice_fv_word *t; struct mutex prof_map_lock; /* protect access to profiles list */ @@ -372,12 +431,14 @@ struct ice_prof_map { u8 prof_id; u8 ptg_cnt; u8 ptg[ICE_MAX_PTG_PER_PROFILE]; + struct ice_ptype_attrib_info attr[ICE_MAX_PTG_PER_PROFILE]; }; #define ICE_INVALID_TCAM 0xFFFF struct ice_tcam_inf { u16 tcam_idx; + struct ice_ptype_attrib_info attr; u8 ptg; u8 prof_id; u8 in_use; @@ -478,6 +539,21 @@ struct ice_prof_redir { u16 count; }; +struct ice_mask { + u16 mask; /* 16-bit mask */ + u16 idx; /* index */ + u16 ref; /* reference count */ + u8 in_use; /* non-zero if used */ +}; + +struct ice_masks { + struct mutex lock; /* lock to protect this structure */ + u16 first; /* first mask owned by the PF */ + u16 count; /* number of masks owned by the PF */ +#define ICE_PROF_MASK_COUNT 32 + struct ice_mask masks[ICE_PROF_MASK_COUNT]; +}; + /* Tables per block */ struct ice_blk_info { struct ice_xlt1 xlt1; @@ -485,6 +561,7 @@ struct ice_blk_info { struct ice_prof_tcam prof; struct ice_prof_redir prof_redir; struct ice_es es; + struct ice_masks masks; u8 overwrite; /* set to true to allow overwrite of table entries */ u8 is_list_init; }; @@ -513,6 +590,7 @@ struct ice_chs_chg { u16 vsig; u16 orig_vsig; u16 tcam_idx; + struct ice_ptype_attrib_info attr; }; #define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c index 89a0cef20506..8e8bfc6fa2b4 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c @@ -9,18 +9,50 @@ struct ice_flow_field_info { enum ice_flow_seg_hdr hdr; s16 off; /* Offset from start of a protocol header, in bits */ u16 size; /* Size of fields in bits */ + u16 mask; /* 16-bit mask for field */ }; #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \ .hdr = _hdr, \ .off = (_offset_bytes) * BITS_PER_BYTE, \ .size = (_size_bytes) * BITS_PER_BYTE, \ + .mask = 0, \ +} + +#define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \ + .hdr = _hdr, \ + .off = (_offset_bytes) * BITS_PER_BYTE, \ + .size = (_size_bytes) * BITS_PER_BYTE, \ + .mask = _mask, \ } /* Table containing properties of supported protocol header fields */ static const struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { + /* Ether */ + /* ICE_FLOW_FIELD_IDX_ETH_DA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN), + /* ICE_FLOW_FIELD_IDX_ETH_SA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN), + /* ICE_FLOW_FIELD_IDX_S_VLAN */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, sizeof(__be16)), + /* ICE_FLOW_FIELD_IDX_C_VLAN */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, sizeof(__be16)), + /* ICE_FLOW_FIELD_IDX_ETH_TYPE */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, sizeof(__be16)), /* IPv4 / IPv6 */ + /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, 1, 0x00fc), + /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, 1, 0x0ff0), + /* ICE_FLOW_FIELD_IDX_IPV4_TTL */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, 1, 0xff00), + /* ICE_FLOW_FIELD_IDX_IPV4_PROT */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, 1, 0x00ff), + /* ICE_FLOW_FIELD_IDX_IPV6_TTL */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, 1, 0x00ff), + /* ICE_FLOW_FIELD_IDX_IPV6_PROT */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, 1, 0xff00), /* ICE_FLOW_FIELD_IDX_IPV4_SA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)), /* ICE_FLOW_FIELD_IDX_IPV4_DA */ @@ -42,21 +74,111 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)), /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)), + /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, 1), + /* ARP */ + /* ICE_FLOW_FIELD_IDX_ARP_SIP */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, sizeof(struct in_addr)), + /* ICE_FLOW_FIELD_IDX_ARP_DIP */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, sizeof(struct in_addr)), + /* ICE_FLOW_FIELD_IDX_ARP_SHA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN), + /* ICE_FLOW_FIELD_IDX_ARP_DHA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN), + /* ICE_FLOW_FIELD_IDX_ARP_OP */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, sizeof(__be16)), + /* ICMP */ + /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, 1), + /* ICE_FLOW_FIELD_IDX_ICMP_CODE */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, 1), /* GRE */ /* ICE_FLOW_FIELD_IDX_GRE_KEYID */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, sizeof_field(struct gre_full_hdr, key)), + /* GTP */ + /* ICE_FLOW_FIELD_IDX_GTPC_TEID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12, sizeof(__be32)), + /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12, sizeof(__be32)), + /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12, sizeof(__be32)), + /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22, sizeof(__be16), + 0x3f00), + /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12, sizeof(__be32)), + /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12, sizeof(__be32)), + /* PPPoE */ + /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2, sizeof(__be16)), + /* PFCP */ + /* ICE_FLOW_FIELD_IDX_PFCP_SEID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12, sizeof(__be64)), + /* L2TPv3 */ + /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0, sizeof(__be32)), + /* ESP */ + /* ICE_FLOW_FIELD_IDX_ESP_SPI */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0, sizeof(__be32)), + /* AH */ + /* ICE_FLOW_FIELD_IDX_AH_SPI */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4, sizeof(__be32)), + /* NAT_T_ESP */ + /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8, sizeof(__be32)), }; /* Bitmaps indicating relevant packet types for a particular protocol header * - * Packet types for packets with an Outer/First/Single IPv4 header + * Packet types for packets with an Outer/First/Single MAC header + */ +static const u32 ice_ptypes_mac_ofos[] = { + 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB, + 0x0000077E, 0x00000000, 0x00000000, 0x00000000, + 0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with an Innermost/Last MAC VLAN header */ +static const u32 ice_ptypes_macvlan_il[] = { + 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000, + 0x0000077E, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with an Outer/First/Single IPv4 header, does NOT + * include IPv4 other PTYPEs */ static const u32 ice_ptypes_ipv4_ofos[] = { 0x1DC00000, 0x04000800, 0x00000000, 0x00000000, + 0x00000000, 0x00000155, 0x00000000, 0x00000000, + 0x00000000, 0x000FC000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with an Outer/First/Single IPv4 header, includes + * IPv4 other PTYPEs + */ +static const u32 ice_ptypes_ipv4_ofos_all[] = { + 0x1DC00000, 0x04000800, 0x00000000, 0x00000000, + 0x00000000, 0x00000155, 0x00000000, 0x00000000, + 0x00000000, 0x000FC000, 0x83E0F800, 0x00000101, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -67,7 +189,7 @@ static const u32 ice_ptypes_ipv4_ofos[] = { static const u32 ice_ptypes_ipv4_il[] = { 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B, 0x0000000E, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x001FF800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -75,12 +197,28 @@ static const u32 ice_ptypes_ipv4_il[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; -/* Packet types for packets with an Outer/First/Single IPv6 header */ +/* Packet types for packets with an Outer/First/Single IPv6 header, does NOT + * include IPv6 other PTYPEs + */ static const u32 ice_ptypes_ipv6_ofos[] = { 0x00000000, 0x00000000, 0x77000000, 0x10002000, + 0x00000000, 0x000002AA, 0x00000000, 0x00000000, + 0x00000000, 0x03F00000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with an Outer/First/Single IPv6 header, includes + * IPv6 other PTYPEs + */ +static const u32 ice_ptypes_ipv6_ofos_all[] = { + 0x00000000, 0x00000000, 0x77000000, 0x10002000, + 0x00000000, 0x000002AA, 0x00000000, 0x00000000, + 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -91,7 +229,7 @@ static const u32 ice_ptypes_ipv6_ofos[] = { static const u32 ice_ptypes_ipv6_il[] = { 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000, 0x00000770, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x7FE00000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -111,6 +249,18 @@ static const u32 ice_ipv4_ofos_no_l4[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; +/* Packet types for packets with an Outermost/First ARP header */ +static const u32 ice_ptypes_arp_of[] = { + 0x00000800, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */ static const u32 ice_ipv4_il_no_l4[] = { 0x60000000, 0x18043008, 0x80000002, 0x6010c021, @@ -153,7 +303,7 @@ static const u32 ice_ipv6_il_no_l4[] = { static const u32 ice_ptypes_udp_il[] = { 0x81000000, 0x20204040, 0x04000010, 0x80810102, 0x00000040, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00410000, 0x90842000, 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -165,7 +315,7 @@ static const u32 ice_ptypes_udp_il[] = { static const u32 ice_ptypes_tcp_il[] = { 0x04000000, 0x80810102, 0x10000040, 0x02040408, 0x00000102, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00820000, 0x21084000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -177,6 +327,18 @@ static const u32 ice_ptypes_tcp_il[] = { static const u32 ice_ptypes_sctp_il[] = { 0x08000000, 0x01020204, 0x20000081, 0x04080810, 0x00000204, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x01040000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with an Outermost/First ICMP header */ +static const u32 ice_ptypes_icmp_of[] = { + 0x10000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -185,6 +347,18 @@ static const u32 ice_ptypes_sctp_il[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; +/* Packet types for packets with an Innermost/Last ICMP header */ +static const u32 ice_ptypes_icmp_il[] = { + 0x00000000, 0x02040408, 0x40000102, 0x08101020, + 0x00000408, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x42108000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + /* Packet types for packets with an Outermost/First GRE header */ static const u32 ice_ptypes_gre_of[] = { 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000, @@ -197,6 +371,218 @@ static const u32 ice_ptypes_gre_of[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; +/* Packet types for packets with an Innermost/Last MAC header */ +static const u32 ice_ptypes_mac_il[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for GTPC */ +static const u32 ice_ptypes_gtpc[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000180, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for GTPC with TEID */ +static const u32 ice_ptypes_gtpc_tid[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000060, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for GTPU */ +static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = { + { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH }, +}; + +static const struct ice_ptype_attributes ice_attr_gtpu_down[] = { + { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK }, +}; + +static const struct ice_ptype_attributes ice_attr_gtpu_up[] = { + { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK }, +}; + +static const u32 ice_ptypes_gtpu[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for PPPoE */ +static const u32 ice_ptypes_pppoe[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x03ffe000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with PFCP NODE header */ +static const u32 ice_ptypes_pfcp_node[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x80000000, 0x00000002, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with PFCP SESSION header */ +static const u32 ice_ptypes_pfcp_session[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000005, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for L2TPv3 */ +static const u32 ice_ptypes_l2tpv3[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000300, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for ESP */ +static const u32 ice_ptypes_esp[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000003, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for AH */ +static const u32 ice_ptypes_ah[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x0000000C, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with NAT_T ESP header */ +static const u32 ice_ptypes_nat_t_esp[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000030, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static const u32 ice_ptypes_mac_non_ip_ofos[] = { + 0x00000846, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00400000, 0x03FFF000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + /* Manage parameters and info. used during the creation of a flow profile */ struct ice_flow_prof_params { enum ice_block blk; @@ -208,12 +594,30 @@ struct ice_flow_prof_params { * This will give us the direction flags. */ struct ice_fv_word es[ICE_MAX_FV_WORDS]; + /* attributes can be used to add attributes to a particular PTYPE */ + const struct ice_ptype_attributes *attr; + u16 attr_cnt; + + u16 mask[ICE_MAX_FV_WORDS]; DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX); }; +#define ICE_FLOW_RSS_HDRS_INNER_MASK \ + (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \ + ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \ + ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \ + ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \ + ICE_FLOW_SEG_HDR_NAT_T_ESP) + +#define ICE_FLOW_SEG_HDRS_L2_MASK \ + (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN) #define ICE_FLOW_SEG_HDRS_L3_MASK \ - (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6) + (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP) #define ICE_FLOW_SEG_HDRS_L4_MASK \ + (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \ + ICE_FLOW_SEG_HDR_SCTP) +/* mask for L4 protocols that are NOT part of IPv4/6 OTHER PTYPE groups */ +#define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \ (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP) /** @@ -243,8 +647,11 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) /* Sizes of fixed known protocol headers without header options */ #define ICE_FLOW_PROT_HDR_SZ_MAC 14 +#define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2) #define ICE_FLOW_PROT_HDR_SZ_IPV4 20 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40 +#define ICE_FLOW_PROT_HDR_SZ_ARP 28 +#define ICE_FLOW_PROT_HDR_SZ_ICMP 8 #define ICE_FLOW_PROT_HDR_SZ_TCP 20 #define ICE_FLOW_PROT_HDR_SZ_UDP 8 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12 @@ -256,16 +663,27 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) */ static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg) { - u16 sz = ICE_FLOW_PROT_HDR_SZ_MAC; + u16 sz; + + /* L2 headers */ + sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ? + ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC; /* L3 headers */ if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) sz += ICE_FLOW_PROT_HDR_SZ_IPV4; else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6) sz += ICE_FLOW_PROT_HDR_SZ_IPV6; + else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP) + sz += ICE_FLOW_PROT_HDR_SZ_ARP; + else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK) + /* An L3 header is required if L4 is specified */ + return 0; /* L4 headers */ - if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP) + if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP) + sz += ICE_FLOW_PROT_HDR_SZ_ICMP; + else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP) sz += ICE_FLOW_PROT_HDR_SZ_TCP; else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP) sz += ICE_FLOW_PROT_HDR_SZ_UDP; @@ -298,8 +716,39 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) hdrs = prof->segs[i].hdrs; + if (hdrs & ICE_FLOW_SEG_HDR_ETH) { + src = !i ? (const unsigned long *)ice_ptypes_mac_ofos : + (const unsigned long *)ice_ptypes_mac_il; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } + + if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) { + src = (const unsigned long *)ice_ptypes_macvlan_il; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } + + if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) { + bitmap_and(params->ptypes, params->ptypes, + (const unsigned long *)ice_ptypes_arp_of, + ICE_FLOW_PTYPE_MAX); + } + if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) && - !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) { + (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) { + src = i ? (const unsigned long *)ice_ptypes_ipv4_il : + (const unsigned long *)ice_ptypes_ipv4_ofos_all; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) && + (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) { + src = i ? (const unsigned long *)ice_ptypes_ipv6_il : + (const unsigned long *)ice_ptypes_ipv6_ofos_all; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) && + !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) { src = !i ? (const unsigned long *)ice_ipv4_ofos_no_l4 : (const unsigned long *)ice_ipv4_il_no_l4; bitmap_and(params->ptypes, params->ptypes, src, @@ -310,7 +759,7 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) bitmap_and(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) && - !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) { + !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) { src = !i ? (const unsigned long *)ice_ipv6_ofos_no_l4 : (const unsigned long *)ice_ipv6_il_no_l4; bitmap_and(params->ptypes, params->ptypes, src, @@ -322,6 +771,20 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) ICE_FLOW_PTYPE_MAX); } + if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) { + src = (const unsigned long *)ice_ptypes_mac_non_ip_ofos; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) { + src = (const unsigned long *)ice_ptypes_pppoe; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else { + src = (const unsigned long *)ice_ptypes_pppoe; + bitmap_andnot(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } + if (hdrs & ICE_FLOW_SEG_HDR_UDP) { src = (const unsigned long *)ice_ptypes_udp_il; bitmap_and(params->ptypes, params->ptypes, src, @@ -334,12 +797,89 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) src = (const unsigned long *)ice_ptypes_sctp_il; bitmap_and(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); + } + + if (hdrs & ICE_FLOW_SEG_HDR_ICMP) { + src = !i ? (const unsigned long *)ice_ptypes_icmp_of : + (const unsigned long *)ice_ptypes_icmp_il; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) { if (!i) { src = (const unsigned long *)ice_ptypes_gre_of; bitmap_and(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } + } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) { + src = (const unsigned long *)ice_ptypes_gtpc; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) { + src = (const unsigned long *)ice_ptypes_gtpc_tid; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) { + src = (const unsigned long *)ice_ptypes_gtpu; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + + /* Attributes for GTP packet with downlink */ + params->attr = ice_attr_gtpu_down; + params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down); + } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) { + src = (const unsigned long *)ice_ptypes_gtpu; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + + /* Attributes for GTP packet with uplink */ + params->attr = ice_attr_gtpu_up; + params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up); + } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) { + src = (const unsigned long *)ice_ptypes_gtpu; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + + /* Attributes for GTP packet with Extension Header */ + params->attr = ice_attr_gtpu_eh; + params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh); + } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) { + src = (const unsigned long *)ice_ptypes_gtpu; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) { + src = (const unsigned long *)ice_ptypes_l2tpv3; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) { + src = (const unsigned long *)ice_ptypes_esp; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_AH) { + src = (const unsigned long *)ice_ptypes_ah; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) { + src = (const unsigned long *)ice_ptypes_nat_t_esp; + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } + + if (hdrs & ICE_FLOW_SEG_HDR_PFCP) { + if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE) + src = (const unsigned long *)ice_ptypes_pfcp_node; + else + src = (const unsigned long *)ice_ptypes_pfcp_session; + + bitmap_and(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else { + src = (const unsigned long *)ice_ptypes_pfcp_node; + bitmap_andnot(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + + src = (const unsigned long *)ice_ptypes_pfcp_session; + bitmap_andnot(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); } } @@ -352,6 +892,7 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) * @params: information about the flow to be processed * @seg: packet segment index of the field to be extracted * @fld: ID of field to be extracted + * @match: bit field of all fields * * This function determines the protocol ID, offset, and size of the given * field. It then allocates one or more extraction sequence entries for the @@ -359,17 +900,73 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) */ static enum ice_status ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, - u8 seg, enum ice_flow_field fld) + u8 seg, enum ice_flow_field fld, u64 match) { + enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX; enum ice_prot_id prot_id = ICE_PROT_ID_INVAL; u8 fv_words = hw->blk[params->blk].es.fvw; struct ice_flow_fld_info *flds; u16 cnt, ese_bits, i; + u16 sib_mask = 0; + u16 mask; u16 off; flds = params->prof->segs[seg].fields; switch (fld) { + case ICE_FLOW_FIELD_IDX_ETH_DA: + case ICE_FLOW_FIELD_IDX_ETH_SA: + case ICE_FLOW_FIELD_IDX_S_VLAN: + case ICE_FLOW_FIELD_IDX_C_VLAN: + prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL; + break; + case ICE_FLOW_FIELD_IDX_ETH_TYPE: + prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL; + break; + case ICE_FLOW_FIELD_IDX_IPV4_DSCP: + prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; + break; + case ICE_FLOW_FIELD_IDX_IPV6_DSCP: + prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; + break; + case ICE_FLOW_FIELD_IDX_IPV4_TTL: + case ICE_FLOW_FIELD_IDX_IPV4_PROT: + prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; + + /* TTL and PROT share the same extraction seq. entry. + * Each is considered a sibling to the other in terms of sharing + * the same extraction sequence entry. + */ + if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL) + sib = ICE_FLOW_FIELD_IDX_IPV4_PROT; + else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT) + sib = ICE_FLOW_FIELD_IDX_IPV4_TTL; + + /* If the sibling field is also included, that field's + * mask needs to be included. + */ + if (match & BIT(sib)) + sib_mask = ice_flds_info[sib].mask; + break; + case ICE_FLOW_FIELD_IDX_IPV6_TTL: + case ICE_FLOW_FIELD_IDX_IPV6_PROT: + prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; + + /* TTL and PROT share the same extraction seq. entry. + * Each is considered a sibling to the other in terms of sharing + * the same extraction sequence entry. + */ + if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL) + sib = ICE_FLOW_FIELD_IDX_IPV6_PROT; + else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT) + sib = ICE_FLOW_FIELD_IDX_IPV6_TTL; + + /* If the sibling field is also included, that field's + * mask needs to be included. + */ + if (match & BIT(sib)) + sib_mask = ice_flds_info[sib].mask; + break; case ICE_FLOW_FIELD_IDX_IPV4_SA: case ICE_FLOW_FIELD_IDX_IPV4_DA: prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; @@ -380,6 +977,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, break; case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT: case ICE_FLOW_FIELD_IDX_TCP_DST_PORT: + case ICE_FLOW_FIELD_IDX_TCP_FLAGS: prot_id = ICE_PROT_TCP_IL; break; case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT: @@ -390,6 +988,49 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT: prot_id = ICE_PROT_SCTP_IL; break; + case ICE_FLOW_FIELD_IDX_GTPC_TEID: + case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID: + case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID: + case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID: + case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID: + case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI: + /* GTP is accessed through UDP OF protocol */ + prot_id = ICE_PROT_UDP_OF; + break; + case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID: + prot_id = ICE_PROT_PPPOE; + break; + case ICE_FLOW_FIELD_IDX_PFCP_SEID: + prot_id = ICE_PROT_UDP_IL_OR_S; + break; + case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID: + prot_id = ICE_PROT_L2TPV3; + break; + case ICE_FLOW_FIELD_IDX_ESP_SPI: + prot_id = ICE_PROT_ESP_F; + break; + case ICE_FLOW_FIELD_IDX_AH_SPI: + prot_id = ICE_PROT_ESP_2; + break; + case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI: + prot_id = ICE_PROT_UDP_IL_OR_S; + break; + case ICE_FLOW_FIELD_IDX_ARP_SIP: + case ICE_FLOW_FIELD_IDX_ARP_DIP: + case ICE_FLOW_FIELD_IDX_ARP_SHA: + case ICE_FLOW_FIELD_IDX_ARP_DHA: + case ICE_FLOW_FIELD_IDX_ARP_OP: + prot_id = ICE_PROT_ARP_OF; + break; + case ICE_FLOW_FIELD_IDX_ICMP_TYPE: + case ICE_FLOW_FIELD_IDX_ICMP_CODE: + /* ICMP type and code share the same extraction seq. entry */ + prot_id = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) ? + ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL; + sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ? + ICE_FLOW_FIELD_IDX_ICMP_CODE : + ICE_FLOW_FIELD_IDX_ICMP_TYPE; + break; case ICE_FLOW_FIELD_IDX_GRE_KEYID: prot_id = ICE_PROT_GRE_OF; break; @@ -407,6 +1048,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, ICE_FLOW_FV_EXTRACT_SZ; flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits); flds[fld].xtrct.idx = params->es_cnt; + flds[fld].xtrct.mask = ice_flds_info[fld].mask; /* Adjust the next field-entry index after accommodating the number of * entries this field consumes @@ -416,24 +1058,34 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, /* Fill in the extraction sequence entries needed for this field */ off = flds[fld].xtrct.off; + mask = flds[fld].xtrct.mask; for (i = 0; i < cnt; i++) { - u8 idx; - - /* Make sure the number of extraction sequence required - * does not exceed the block's capability + /* Only consume an extraction sequence entry if there is no + * sibling field associated with this field or the sibling entry + * already extracts the word shared with this field. */ - if (params->es_cnt >= fv_words) - return ICE_ERR_MAX_LIMIT; + if (sib == ICE_FLOW_FIELD_IDX_MAX || + flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL || + flds[sib].xtrct.off != off) { + u8 idx; - /* some blocks require a reversed field vector layout */ - if (hw->blk[params->blk].es.reverse) - idx = fv_words - params->es_cnt - 1; - else - idx = params->es_cnt; + /* Make sure the number of extraction sequence required + * does not exceed the block's capability + */ + if (params->es_cnt >= fv_words) + return ICE_ERR_MAX_LIMIT; - params->es[idx].prot_id = prot_id; - params->es[idx].off = off; - params->es_cnt++; + /* some blocks require a reversed field vector layout */ + if (hw->blk[params->blk].es.reverse) + idx = fv_words - params->es_cnt - 1; + else + idx = params->es_cnt; + + params->es[idx].prot_id = prot_id; + params->es[idx].off = off; + params->mask[idx] = mask | sib_mask; + params->es_cnt++; + } off += ICE_FLOW_FV_EXTRACT_SZ; } @@ -533,14 +1185,15 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw, u8 i; for (i = 0; i < prof->segs_cnt; i++) { - u8 j; + u64 match = params->prof->segs[i].match; + enum ice_flow_field j; - for_each_set_bit(j, (unsigned long *)&prof->segs[i].match, + for_each_set_bit(j, (unsigned long *)&match, ICE_FLOW_FIELD_IDX_MAX) { - status = ice_flow_xtract_fld(hw, params, i, - (enum ice_flow_field)j); + status = ice_flow_xtract_fld(hw, params, i, j, match); if (status) return status; + clear_bit(j, (unsigned long *)&match); } /* Process raw matching bytes */ @@ -751,7 +1404,8 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, /* Add a HW profile for this flow profile */ status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes, - params->es); + params->attr, params->attr_cnt, params->es, + params->mask); if (status) { ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n"); goto out; @@ -1158,6 +1812,9 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, seg->raws_cnt++; } +#define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \ + (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN) + #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \ (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6) @@ -1165,7 +1822,8 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP) #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \ - (ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \ + (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \ + ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \ ICE_FLOW_RSS_SEG_HDR_L4_MASKS) /** @@ -1193,7 +1851,8 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields, ICE_FLOW_SET_HDRS(segs, flow_hdr); - if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS) + if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS & + ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER) return ICE_ERR_PARAM; val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS); diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h index 829f90b1e998..eec9def8ffca 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.h +++ b/drivers/net/ethernet/intel/ice/ice_flow.h @@ -30,6 +30,80 @@ #define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT) #define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT) +#define ICE_FLOW_HASH_GTP_TEID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)) + +#define ICE_FLOW_HASH_GTP_IPV4_TEID \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID) +#define ICE_FLOW_HASH_GTP_IPV6_TEID \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID) + +#define ICE_FLOW_HASH_GTP_U_TEID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)) + +#define ICE_FLOW_HASH_GTP_U_IPV4_TEID \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_TEID) +#define ICE_FLOW_HASH_GTP_U_IPV6_TEID \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_TEID) + +#define ICE_FLOW_HASH_GTP_U_EH_TEID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID)) + +#define ICE_FLOW_HASH_GTP_U_EH_QFI \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_QFI)) + +#define ICE_FLOW_HASH_GTP_U_IPV4_EH \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_EH_TEID | \ + ICE_FLOW_HASH_GTP_U_EH_QFI) +#define ICE_FLOW_HASH_GTP_U_IPV6_EH \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_EH_TEID | \ + ICE_FLOW_HASH_GTP_U_EH_QFI) + +#define ICE_FLOW_HASH_PPPOE_SESS_ID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)) + +#define ICE_FLOW_HASH_PPPOE_SESS_ID_ETH \ + (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_PPPOE_SESS_ID) +#define ICE_FLOW_HASH_PPPOE_TCP_ID \ + (ICE_FLOW_HASH_TCP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID) +#define ICE_FLOW_HASH_PPPOE_UDP_ID \ + (ICE_FLOW_HASH_UDP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID) + +#define ICE_FLOW_HASH_PFCP_SEID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)) +#define ICE_FLOW_HASH_PFCP_IPV4_SEID \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_PFCP_SEID) +#define ICE_FLOW_HASH_PFCP_IPV6_SEID \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_PFCP_SEID) + +#define ICE_FLOW_HASH_L2TPV3_SESS_ID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)) +#define ICE_FLOW_HASH_L2TPV3_IPV4_SESS_ID \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_L2TPV3_SESS_ID) +#define ICE_FLOW_HASH_L2TPV3_IPV6_SESS_ID \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_L2TPV3_SESS_ID) + +#define ICE_FLOW_HASH_ESP_SPI \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)) +#define ICE_FLOW_HASH_ESP_IPV4_SPI \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_ESP_SPI) +#define ICE_FLOW_HASH_ESP_IPV6_SPI \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_ESP_SPI) + +#define ICE_FLOW_HASH_AH_SPI \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)) +#define ICE_FLOW_HASH_AH_IPV4_SPI \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_AH_SPI) +#define ICE_FLOW_HASH_AH_IPV6_SPI \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_AH_SPI) + +#define ICE_FLOW_HASH_NAT_T_ESP_SPI \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI)) +#define ICE_FLOW_HASH_NAT_T_ESP_IPV4_SPI \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_NAT_T_ESP_SPI) +#define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI) + /* Protocol header fields within a packet segment. A segment consists of one or * more protocol headers that make up a logical group of protocol headers. Each * logical group of protocol headers encapsulates or is encapsulated using/by @@ -38,16 +112,66 @@ */ enum ice_flow_seg_hdr { ICE_FLOW_SEG_HDR_NONE = 0x00000000, + ICE_FLOW_SEG_HDR_ETH = 0x00000001, + ICE_FLOW_SEG_HDR_VLAN = 0x00000002, ICE_FLOW_SEG_HDR_IPV4 = 0x00000004, ICE_FLOW_SEG_HDR_IPV6 = 0x00000008, + ICE_FLOW_SEG_HDR_ARP = 0x00000010, + ICE_FLOW_SEG_HDR_ICMP = 0x00000020, ICE_FLOW_SEG_HDR_TCP = 0x00000040, ICE_FLOW_SEG_HDR_UDP = 0x00000080, ICE_FLOW_SEG_HDR_SCTP = 0x00000100, ICE_FLOW_SEG_HDR_GRE = 0x00000200, + ICE_FLOW_SEG_HDR_GTPC = 0x00000400, + ICE_FLOW_SEG_HDR_GTPC_TEID = 0x00000800, + ICE_FLOW_SEG_HDR_GTPU_IP = 0x00001000, + ICE_FLOW_SEG_HDR_GTPU_EH = 0x00002000, + ICE_FLOW_SEG_HDR_GTPU_DWN = 0x00004000, + ICE_FLOW_SEG_HDR_GTPU_UP = 0x00008000, + ICE_FLOW_SEG_HDR_PPPOE = 0x00010000, + ICE_FLOW_SEG_HDR_PFCP_NODE = 0x00020000, + ICE_FLOW_SEG_HDR_PFCP_SESSION = 0x00040000, + ICE_FLOW_SEG_HDR_L2TPV3 = 0x00080000, + ICE_FLOW_SEG_HDR_ESP = 0x00100000, + ICE_FLOW_SEG_HDR_AH = 0x00200000, + ICE_FLOW_SEG_HDR_NAT_T_ESP = 0x00400000, + ICE_FLOW_SEG_HDR_ETH_NON_IP = 0x00800000, + /* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and + * ICE_FLOW_SEG_HDR_IPV6 which include the IPV4 other PTYPEs + */ + ICE_FLOW_SEG_HDR_IPV_OTHER = 0x20000000, }; +/* These segments all have the same PTYPES, but are otherwise distinguished by + * the value of the gtp_eh_pdu and gtp_eh_pdu_link flags: + * + * gtp_eh_pdu gtp_eh_pdu_link + * ICE_FLOW_SEG_HDR_GTPU_IP 0 0 + * ICE_FLOW_SEG_HDR_GTPU_EH 1 don't care + * ICE_FLOW_SEG_HDR_GTPU_DWN 1 0 + * ICE_FLOW_SEG_HDR_GTPU_UP 1 1 + */ +#define ICE_FLOW_SEG_HDR_GTPU (ICE_FLOW_SEG_HDR_GTPU_IP | \ + ICE_FLOW_SEG_HDR_GTPU_EH | \ + ICE_FLOW_SEG_HDR_GTPU_DWN | \ + ICE_FLOW_SEG_HDR_GTPU_UP) +#define ICE_FLOW_SEG_HDR_PFCP (ICE_FLOW_SEG_HDR_PFCP_NODE | \ + ICE_FLOW_SEG_HDR_PFCP_SESSION) + enum ice_flow_field { + /* L2 */ + ICE_FLOW_FIELD_IDX_ETH_DA, + ICE_FLOW_FIELD_IDX_ETH_SA, + ICE_FLOW_FIELD_IDX_S_VLAN, + ICE_FLOW_FIELD_IDX_C_VLAN, + ICE_FLOW_FIELD_IDX_ETH_TYPE, /* L3 */ + ICE_FLOW_FIELD_IDX_IPV4_DSCP, + ICE_FLOW_FIELD_IDX_IPV6_DSCP, + ICE_FLOW_FIELD_IDX_IPV4_TTL, + ICE_FLOW_FIELD_IDX_IPV4_PROT, + ICE_FLOW_FIELD_IDX_IPV6_TTL, + ICE_FLOW_FIELD_IDX_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV4_SA, ICE_FLOW_FIELD_IDX_IPV4_DA, ICE_FLOW_FIELD_IDX_IPV6_SA, @@ -59,9 +183,42 @@ enum ice_flow_field { ICE_FLOW_FIELD_IDX_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, + ICE_FLOW_FIELD_IDX_TCP_FLAGS, + /* ARP */ + ICE_FLOW_FIELD_IDX_ARP_SIP, + ICE_FLOW_FIELD_IDX_ARP_DIP, + ICE_FLOW_FIELD_IDX_ARP_SHA, + ICE_FLOW_FIELD_IDX_ARP_DHA, + ICE_FLOW_FIELD_IDX_ARP_OP, + /* ICMP */ + ICE_FLOW_FIELD_IDX_ICMP_TYPE, + ICE_FLOW_FIELD_IDX_ICMP_CODE, /* GRE */ ICE_FLOW_FIELD_IDX_GRE_KEYID, - /* The total number of enums must not exceed 64 */ + /* GTPC_TEID */ + ICE_FLOW_FIELD_IDX_GTPC_TEID, + /* GTPU_IP */ + ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, + /* GTPU_EH */ + ICE_FLOW_FIELD_IDX_GTPU_EH_TEID, + ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, + /* GTPU_UP */ + ICE_FLOW_FIELD_IDX_GTPU_UP_TEID, + /* GTPU_DWN */ + ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID, + /* PPPoE */ + ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID, + /* PFCP */ + ICE_FLOW_FIELD_IDX_PFCP_SEID, + /* L2TPv3 */ + ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, + /* ESP */ + ICE_FLOW_FIELD_IDX_ESP_SPI, + /* AH */ + ICE_FLOW_FIELD_IDX_AH_SPI, + /* NAT_T ESP */ + ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI, + /* The total number of enums must not exceed 64 */ ICE_FLOW_FIELD_IDX_MAX }; @@ -138,6 +295,7 @@ struct ice_flow_seg_xtrct { u16 off; /* Starting offset of the field in header in bytes */ u8 idx; /* Index of FV entry used */ u8 disp; /* Displacement of field in bits fr. FV entry's start */ + u16 mask; /* Mask for field */ }; enum ice_flow_fld_match_type { diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 093a1818a392..67b5b9b9d009 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -306,8 +306,23 @@ #define GLQF_FD_SIZE_FD_BSIZE_S 16 #define GLQF_FD_SIZE_FD_BSIZE_M ICE_M(0x7FFF, 16) #define GLQF_FDINSET(_i, _j) (0x00412000 + ((_i) * 4 + (_j) * 512)) +#define GLQF_FDMASK(_i) (0x00410800 + ((_i) * 4)) +#define GLQF_FDMASK_MAX_INDEX 31 +#define GLQF_FDMASK_MSK_INDEX_S 0 +#define GLQF_FDMASK_MSK_INDEX_M ICE_M(0x1F, 0) +#define GLQF_FDMASK_MASK_S 16 +#define GLQF_FDMASK_MASK_M ICE_M(0xFFFF, 16) #define GLQF_FDMASK_SEL(_i) (0x00410400 + ((_i) * 4)) #define GLQF_FDSWAP(_i, _j) (0x00413000 + ((_i) * 4 + (_j) * 512)) +#define GLQF_HMASK(_i) (0x0040FC00 + ((_i) * 4)) +#define GLQF_HMASK_MAX_INDEX 31 +#define GLQF_HMASK_MSK_INDEX_S 0 +#define GLQF_HMASK_MSK_INDEX_M ICE_M(0x1F, 0) +#define GLQF_HMASK_MASK_S 16 +#define GLQF_HMASK_MASK_M ICE_M(0xFFFF, 16) +#define GLQF_HMASK_SEL(_i) (0x00410000 + ((_i) * 4)) +#define GLQF_HMASK_SEL_MAX_INDEX 127 +#define GLQF_HMASK_SEL_MASK_SEL_S 0 #define PFQF_FD_ENA 0x0043A000 #define PFQF_FD_ENA_FD_ENA_M BIT(0) #define PFQF_FD_SIZE 0x00460100 @@ -369,6 +384,9 @@ #define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4)) #define VSIQF_FD_CNT_FD_GCNT_S 0 #define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0) +#define VSIQF_FD_CNT_FD_BCNT_S 16 +#define VSIQF_FD_CNT_FD_BCNT_M ICE_M(0x3FFF, 16) +#define VSIQF_FD_SIZE(_VSI) (0x00462000 + ((_VSI) * 4)) #define VSIQF_HKEY_MAX_INDEX 12 #define VSIQF_HLUT_MAX_INDEX 15 #define PFPM_APM 0x000B8080 diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 4ec24c3e813f..21329ed3087e 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -55,6 +55,7 @@ struct ice_fltr_desc { #define ICE_FXD_FLTR_QW0_COMP_REPORT_M \ (0x3ULL << ICE_FXD_FLTR_QW0_COMP_REPORT_S) #define ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL 0x1ULL +#define ICE_FXD_FLTR_QW0_COMP_REPORT_SW 0x2ULL #define ICE_FXD_FLTR_QW0_FD_SPACE_S 14 #define ICE_FXD_FLTR_QW0_FD_SPACE_M (0x3ULL << ICE_FXD_FLTR_QW0_FD_SPACE_S) @@ -128,6 +129,7 @@ struct ice_fltr_desc { #define ICE_FXD_FLTR_QW1_FDID_PRI_S 25 #define ICE_FXD_FLTR_QW1_FDID_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_FDID_PRI_S) #define ICE_FXD_FLTR_QW1_FDID_PRI_ONE 0x1ULL +#define ICE_FXD_FLTR_QW1_FDID_PRI_THREE 0x3ULL #define ICE_FXD_FLTR_QW1_FDID_MDID_S 28 #define ICE_FXD_FLTR_QW1_FDID_MDID_M (0xFULL << ICE_FXD_FLTR_QW1_FDID_MDID_S) @@ -138,6 +140,26 @@ struct ice_fltr_desc { (0xFFFFFFFFULL << ICE_FXD_FLTR_QW1_FDID_S) #define ICE_FXD_FLTR_QW1_FDID_ZERO 0x0ULL +/* definition for FD filter programming status descriptor WB format */ +#define ICE_FXD_FLTR_WB_QW1_DD_S 0 +#define ICE_FXD_FLTR_WB_QW1_DD_M (0x1ULL << ICE_FXD_FLTR_WB_QW1_DD_S) +#define ICE_FXD_FLTR_WB_QW1_DD_YES 0x1ULL + +#define ICE_FXD_FLTR_WB_QW1_PROG_ID_S 1 +#define ICE_FXD_FLTR_WB_QW1_PROG_ID_M \ + (0x3ULL << ICE_FXD_FLTR_WB_QW1_PROG_ID_S) +#define ICE_FXD_FLTR_WB_QW1_PROG_ADD 0x0ULL +#define ICE_FXD_FLTR_WB_QW1_PROG_DEL 0x1ULL + +#define ICE_FXD_FLTR_WB_QW1_FAIL_S 4 +#define ICE_FXD_FLTR_WB_QW1_FAIL_M (0x1ULL << ICE_FXD_FLTR_WB_QW1_FAIL_S) +#define ICE_FXD_FLTR_WB_QW1_FAIL_YES 0x1ULL + +#define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S 5 +#define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M \ + (0x1ULL << ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S) +#define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES 0x1ULL + struct ice_rx_ptype_decoded { u32 ptype:10; u32 known:1; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 8d4e2ad4328d..c345432fac72 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -343,6 +343,9 @@ static int ice_vsi_clear(struct ice_vsi *vsi) pf->vsi[vsi->idx] = NULL; if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL) pf->next_vsi = vsi->idx; + if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL && + vsi->vf_id != ICE_INVAL_VFID) + pf->next_vsi = vsi->idx; ice_vsi_free_arrays(vsi); mutex_unlock(&pf->sw_mutex); @@ -454,8 +457,8 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id) goto unlock_pf; } - if (vsi->type == ICE_VSI_CTRL) { - /* Use the last VSI slot as the index for the control VSI */ + if (vsi->type == ICE_VSI_CTRL && vf_id == ICE_INVAL_VFID) { + /* Use the last VSI slot as the index for PF control VSI */ vsi->idx = pf->num_alloc_vsi - 1; pf->ctrl_vsi_idx = vsi->idx; pf->vsi[vsi->idx] = vsi; @@ -468,6 +471,9 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id) pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, pf->next_vsi); } + + if (vsi->type == ICE_VSI_CTRL && vf_id != ICE_INVAL_VFID) + pf->vf[vf_id].ctrl_vsi_idx = vsi->idx; goto unlock_pf; err_rings: @@ -506,7 +512,7 @@ static int ice_alloc_fd_res(struct ice_vsi *vsi) if (!b_val) return -EPERM; - if (vsi->type != ICE_VSI_PF) + if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF)) return -EPERM; if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) @@ -517,6 +523,13 @@ static int ice_alloc_fd_res(struct ice_vsi *vsi) /* each VSI gets same "best_effort" quota */ vsi->num_bfltr = b_val; + if (vsi->type == ICE_VSI_VF) { + vsi->num_gfltr = 0; + + /* each VSI gets same "best_effort" quota */ + vsi->num_bfltr = b_val; + } + return 0; } @@ -856,7 +869,8 @@ static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) u8 dflt_q_group, dflt_q_prio; u16 dflt_q, report_q, val; - if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL) + if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL && + vsi->type != ICE_VSI_VF) return; val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID; @@ -1179,7 +1193,24 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) num_q_vectors = vsi->num_q_vectors; /* reserve slots from OS requested IRQs */ - base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, vsi->idx); + if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) { + struct ice_vf *vf; + int i; + + ice_for_each_vf(pf, i) { + vf = &pf->vf[i]; + if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) { + base = pf->vsi[vf->ctrl_vsi_idx]->base_vector; + break; + } + } + if (i == pf->num_alloc_vfs) + base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, + ICE_RES_VF_CTRL_VEC_ID); + } else { + base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, + vsi->idx); + } if (base < 0) { dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n", @@ -2308,7 +2339,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, struct ice_vsi *vsi; int ret, i; - if (vsi_type == ICE_VSI_VF) + if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL) vsi = ice_vsi_alloc(pf, vsi_type, vf_id); else vsi = ice_vsi_alloc(pf, vsi_type, ICE_INVAL_VFID); @@ -2323,7 +2354,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, if (vsi->type == ICE_VSI_PF) vsi->ethtype = ETH_P_PAUSE; - if (vsi->type == ICE_VSI_VF) + if (vsi->type == ICE_VSI_VF || vsi->type == ICE_VSI_CTRL) vsi->vf_id = vf_id; ice_alloc_fd_res(vsi); @@ -2770,7 +2801,24 @@ int ice_vsi_release(struct ice_vsi *vsi) * many interrupts each VF needs. SR-IOV MSIX resources are also * cleared in the same manner. */ - if (vsi->type != ICE_VSI_VF) { + if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) { + struct ice_vf *vf; + int i; + + ice_for_each_vf(pf, i) { + vf = &pf->vf[i]; + if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) + break; + } + if (i == pf->num_alloc_vfs) { + /* No other VFs left that have control VSI, reclaim SW + * interrupts back to the common pool + */ + ice_free_res(pf->irq_tracker, vsi->base_vector, + ICE_RES_VF_CTRL_VEC_ID); + pf->num_avail_sw_msix += vsi->num_q_vectors; + } + } else if (vsi->type != ICE_VSI_VF) { /* reclaim SW interrupts back to the common pool */ ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); pf->num_avail_sw_msix += vsi->num_q_vectors; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 2c23c8f468a5..f318d7f607e4 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1044,7 +1044,7 @@ struct ice_aq_task { }; /** - * ice_wait_for_aq_event - Wait for an AdminQ event from firmware + * ice_aq_wait_for_event - Wait for an AdminQ event from firmware * @pf: pointer to the PF private structure * @opcode: the opcode to wait for * @timeout: how long to wait, in jiffies @@ -2071,6 +2071,7 @@ static void ice_service_task(struct work_struct *work) ice_process_vflr_event(pf); ice_clean_mailboxq_subtask(pf); ice_sync_arfs_fltrs(pf); + ice_flush_fdir_ctx(pf); /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ ice_service_task_complete(pf); @@ -2082,6 +2083,7 @@ static void ice_service_task(struct work_struct *work) test_bit(__ICE_MDD_EVENT_PENDING, pf->state) || test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) || + test_bit(__ICE_FD_VF_FLUSH_CTX, pf->state) || test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) mod_timer(&pf->serv_tmr, jiffies); } @@ -2220,8 +2222,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) /* skip this unused q_vector */ continue; } - err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0, - q_vector->name, q_vector); + if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) + err = devm_request_irq(dev, irq_num, vsi->irq_handler, + IRQF_SHARED, q_vector->name, + q_vector); + else + err = devm_request_irq(dev, irq_num, vsi->irq_handler, + 0, q_vector->name, q_vector); if (err) { netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", err); @@ -4314,7 +4321,7 @@ static void ice_set_wake(struct ice_pf *pf) } /** - * ice_setup_magic_mc_wake - setup device to wake on multicast magic packet + * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet * @pf: pointer to the PF struct * * Issue firmware command to enable multicast magic wake, making diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h index 7f4c1ec1eff2..199aa5b71540 100644 --- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h +++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h @@ -13,6 +13,9 @@ enum ice_prot_id { ICE_PROT_ID_INVAL = 0, ICE_PROT_MAC_OF_OR_S = 1, + ICE_PROT_MAC_IL = 4, + ICE_PROT_ETYPE_OL = 9, + ICE_PROT_ETYPE_IL = 10, ICE_PROT_IPV4_OF_OR_S = 32, ICE_PROT_IPV4_IL = 33, ICE_PROT_IPV6_OF_OR_S = 40, @@ -21,7 +24,14 @@ enum ice_prot_id { ICE_PROT_UDP_OF = 52, ICE_PROT_UDP_IL_OR_S = 53, ICE_PROT_GRE_OF = 64, + ICE_PROT_ESP_F = 88, + ICE_PROT_ESP_2 = 89, ICE_PROT_SCTP_IL = 96, + ICE_PROT_ICMP_IL = 98, + ICE_PROT_ICMPV6_IL = 100, + ICE_PROT_PPPOE = 103, + ICE_PROT_L2TPV3 = 104, + ICE_PROT_ARP_OF = 118, ICE_PROT_META_ID = 255, /* when offset == metadata */ ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */ }; diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 2403cb38b93c..f890337cc24a 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -1857,7 +1857,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, } /** - * ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry + * ice_sched_rm_agg_vsi_info - remove aggregator related VSI info entry * @pi: port information structure * @vsi_handle: software VSI handle * diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index d4bfa7905652..c71f2fbbb262 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -444,22 +444,6 @@ void ice_free_rx_ring(struct ice_ring *rx_ring) } /** - * ice_rx_offset - Return expected offset into page to access data - * @rx_ring: Ring we are requesting offset of - * - * Returns the offset value for ring into the data buffer. - */ -static unsigned int ice_rx_offset(struct ice_ring *rx_ring) -{ - if (ice_ring_uses_build_skb(rx_ring)) - return ICE_SKB_PAD; - else if (ice_is_xdp_ena_vsi(rx_ring->vsi)) - return XDP_PACKET_HEADROOM; - - return 0; -} - -/** * ice_setup_rx_ring - Allocate the Rx descriptors * @rx_ring: the Rx ring to set up * @@ -493,7 +477,6 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring) rx_ring->next_to_use = 0; rx_ring->next_to_clean = 0; - rx_ring->rx_offset = ice_rx_offset(rx_ring); if (ice_is_xdp_ena_vsi(rx_ring->vsi)) WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); @@ -1114,6 +1097,11 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) dma_rmb(); if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { + struct ice_vsi *ctrl_vsi = rx_ring->vsi; + + if (rx_desc->wb.rxdid == FDIR_DESC_RXDID && + ctrl_vsi->vf_id != ICE_INVAL_VFID) + ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc); ice_put_rx_buf(rx_ring, NULL, 0); cleaned_count++; continue; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index 02b12736ea80..207f6ee3a7f6 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -143,6 +143,7 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, case ICE_RX_PTYPE_INNER_PROT_UDP: case ICE_RX_PTYPE_INNER_PROT_SCTP: skb->ip_summed = CHECKSUM_UNNECESSARY; + break; default: break; } diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index a6cb0c35748c..2893143d9e62 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -192,6 +192,24 @@ enum ice_fltr_ptype { ICE_FLTR_PTYPE_NONF_IPV4_TCP, ICE_FLTR_PTYPE_NONF_IPV4_SCTP, ICE_FLTR_PTYPE_NONF_IPV4_OTHER, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER, + ICE_FLTR_PTYPE_NONF_IPV6_GTPU_IPV6_OTHER, + ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3, + ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3, + ICE_FLTR_PTYPE_NONF_IPV4_ESP, + ICE_FLTR_PTYPE_NONF_IPV6_ESP, + ICE_FLTR_PTYPE_NONF_IPV4_AH, + ICE_FLTR_PTYPE_NONF_IPV6_AH, + ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP, + ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP, + ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE, + ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION, + ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE, + ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION, + ICE_FLTR_PTYPE_NON_IP_L2, ICE_FLTR_PTYPE_FRAG_IPV4, ICE_FLTR_PTYPE_NONF_IPV6_UDP, ICE_FLTR_PTYPE_NONF_IPV6_TCP, diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c new file mode 100644 index 000000000000..1f4ba38b1599 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -0,0 +1,2204 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021, Intel Corporation. */ + +#include "ice.h" +#include "ice_base.h" +#include "ice_lib.h" +#include "ice_flow.h" + +#define to_fltr_conf_from_desc(p) \ + container_of(p, struct virtchnl_fdir_fltr_conf, input) + +#define ICE_FLOW_PROF_TYPE_S 0 +#define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S) +#define ICE_FLOW_PROF_VSI_S 32 +#define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S) + +/* Flow profile ID format: + * [0:31] - flow type, flow + tun_offs + * [32:63] - VSI index + */ +#define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \ + ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \ + (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M))) + +#define GTPU_TEID_OFFSET 4 +#define GTPU_EH_QFI_OFFSET 1 +#define GTPU_EH_QFI_MASK 0x3F +#define PFCP_S_OFFSET 0 +#define PFCP_S_MASK 0x1 +#define PFCP_PORT_NR 8805 + +#define FDIR_INSET_FLAG_ESP_S 0 +#define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S) +#define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S) +#define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S) + +enum ice_fdir_tunnel_type { + ICE_FDIR_TUNNEL_TYPE_NONE = 0, + ICE_FDIR_TUNNEL_TYPE_GTPU, + ICE_FDIR_TUNNEL_TYPE_GTPU_EH, +}; + +struct virtchnl_fdir_fltr_conf { + struct ice_fdir_fltr input; + enum ice_fdir_tunnel_type ttype; + u64 inset_flag; + u32 flow_id; +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ether[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv4[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv4_tcp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_TCP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv4_udp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_UDP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv4_sctp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_SCTP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv6[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV6, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv6_tcp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV6, + VIRTCHNL_PROTO_HDR_TCP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv6_udp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV6, + VIRTCHNL_PROTO_HDR_UDP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv6_sctp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV6, + VIRTCHNL_PROTO_HDR_SCTP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_UDP, + VIRTCHNL_PROTO_HDR_GTPU_IP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu_eh[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_UDP, + VIRTCHNL_PROTO_HDR_GTPU_IP, + VIRTCHNL_PROTO_HDR_GTPU_EH, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv4_l2tpv3[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_L2TPV3, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv6_l2tpv3[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV6, + VIRTCHNL_PROTO_HDR_L2TPV3, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv4_esp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_ESP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv6_esp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV6, + VIRTCHNL_PROTO_HDR_ESP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv4_ah[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_AH, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv6_ah[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV6, + VIRTCHNL_PROTO_HDR_AH, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv4_nat_t_esp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_UDP, + VIRTCHNL_PROTO_HDR_ESP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv6_nat_t_esp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV6, + VIRTCHNL_PROTO_HDR_UDP, + VIRTCHNL_PROTO_HDR_ESP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv4_pfcp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_UDP, + VIRTCHNL_PROTO_HDR_PFCP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +static enum virtchnl_proto_hdr_type vc_pattern_ipv6_pfcp[] = { + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_IPV6, + VIRTCHNL_PROTO_HDR_UDP, + VIRTCHNL_PROTO_HDR_PFCP, + VIRTCHNL_PROTO_HDR_NONE, +}; + +struct virtchnl_fdir_pattern_match_item { + enum virtchnl_proto_hdr_type *list; + u64 input_set; + u64 *meta; +}; + +static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_os[] = { + {vc_pattern_ipv4, 0, NULL}, + {vc_pattern_ipv4_tcp, 0, NULL}, + {vc_pattern_ipv4_udp, 0, NULL}, + {vc_pattern_ipv4_sctp, 0, NULL}, + {vc_pattern_ipv6, 0, NULL}, + {vc_pattern_ipv6_tcp, 0, NULL}, + {vc_pattern_ipv6_udp, 0, NULL}, + {vc_pattern_ipv6_sctp, 0, NULL}, +}; + +static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_comms[] = { + {vc_pattern_ipv4, 0, NULL}, + {vc_pattern_ipv4_tcp, 0, NULL}, + {vc_pattern_ipv4_udp, 0, NULL}, + {vc_pattern_ipv4_sctp, 0, NULL}, + {vc_pattern_ipv6, 0, NULL}, + {vc_pattern_ipv6_tcp, 0, NULL}, + {vc_pattern_ipv6_udp, 0, NULL}, + {vc_pattern_ipv6_sctp, 0, NULL}, + {vc_pattern_ether, 0, NULL}, + {vc_pattern_ipv4_gtpu, 0, NULL}, + {vc_pattern_ipv4_gtpu_eh, 0, NULL}, + {vc_pattern_ipv4_l2tpv3, 0, NULL}, + {vc_pattern_ipv6_l2tpv3, 0, NULL}, + {vc_pattern_ipv4_esp, 0, NULL}, + {vc_pattern_ipv6_esp, 0, NULL}, + {vc_pattern_ipv4_ah, 0, NULL}, + {vc_pattern_ipv6_ah, 0, NULL}, + {vc_pattern_ipv4_nat_t_esp, 0, NULL}, + {vc_pattern_ipv6_nat_t_esp, 0, NULL}, + {vc_pattern_ipv4_pfcp, 0, NULL}, + {vc_pattern_ipv6_pfcp, 0, NULL}, +}; + +struct virtchnl_fdir_inset_map { + enum virtchnl_proto_hdr_field field; + enum ice_flow_field fld; + u64 flag; + u64 mask; +}; + +static const struct virtchnl_fdir_inset_map fdir_inset_map[] = { + {VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0}, + {VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0}, + {VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0}, + {VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0}, + {VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0}, + {VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0}, + {VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0}, + {VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0}, + {VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0}, + {VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0}, + {VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0}, + {VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0}, + {VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0}, + {VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0}, + {VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0}, + {VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0}, + {VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0}, + {VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0}, + {VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0}, + {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI, + FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M}, + {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI, + FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M}, + {VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0}, + {VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0}, + {VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0}, +}; + +/** + * ice_vc_fdir_param_check + * @vf: pointer to the VF structure + * @vsi_id: VF relative VSI ID + * + * Check for the valid VSI ID, PF's state and VF's state + * + * Return: 0 on success, and -EINVAL on error. + */ +static int +ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id) +{ + struct ice_pf *pf = vf->pf; + + if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) + return -EINVAL; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + return -EINVAL; + + if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)) + return -EINVAL; + + if (vsi_id != vf->lan_vsi_num) + return -EINVAL; + + if (!ice_vc_isvalid_vsi_id(vf, vsi_id)) + return -EINVAL; + + if (!pf->vsi[vf->lan_vsi_idx]) + return -EINVAL; + + return 0; +} + +/** + * ice_vf_start_ctrl_vsi + * @vf: pointer to the VF structure + * + * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF + * + * Return: 0 on success, and other on error. + */ +static int ice_vf_start_ctrl_vsi(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct ice_vsi *ctrl_vsi; + struct device *dev; + int err; + + dev = ice_pf_to_dev(pf); + if (vf->ctrl_vsi_idx != ICE_NO_VSI) + return -EEXIST; + + ctrl_vsi = ice_vf_ctrl_vsi_setup(vf); + if (!ctrl_vsi) { + dev_dbg(dev, "Could not setup control VSI for VF %d\n", + vf->vf_id); + return -ENOMEM; + } + + err = ice_vsi_open_ctrl(ctrl_vsi); + if (err) { + dev_dbg(dev, "Could not open control VSI for VF %d\n", + vf->vf_id); + goto err_vsi_open; + } + + return 0; + +err_vsi_open: + ice_vsi_release(ctrl_vsi); + if (vf->ctrl_vsi_idx != ICE_NO_VSI) { + pf->vsi[vf->ctrl_vsi_idx] = NULL; + vf->ctrl_vsi_idx = ICE_NO_VSI; + } + return err; +} + +/** + * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type + * @vf: pointer to the VF structure + * @flow: filter flow type + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) +{ + struct ice_vf_fdir *fdir = &vf->fdir; + + if (!fdir->fdir_prof) { + fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf), + ICE_FLTR_PTYPE_MAX, + sizeof(*fdir->fdir_prof), + GFP_KERNEL); + if (!fdir->fdir_prof) + return -ENOMEM; + } + + if (!fdir->fdir_prof[flow]) { + fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf), + sizeof(**fdir->fdir_prof), + GFP_KERNEL); + if (!fdir->fdir_prof[flow]) + return -ENOMEM; + } + + return 0; +} + +/** + * ice_vc_fdir_free_prof - free profile for this filter flow type + * @vf: pointer to the VF structure + * @flow: filter flow type + */ +static void +ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) +{ + struct ice_vf_fdir *fdir = &vf->fdir; + + if (!fdir->fdir_prof) + return; + + if (!fdir->fdir_prof[flow]) + return; + + devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]); + fdir->fdir_prof[flow] = NULL; +} + +/** + * ice_vc_fdir_free_prof_all - free all the profile for this VF + * @vf: pointer to the VF structure + */ +static void ice_vc_fdir_free_prof_all(struct ice_vf *vf) +{ + struct ice_vf_fdir *fdir = &vf->fdir; + enum ice_fltr_ptype flow; + + if (!fdir->fdir_prof) + return; + + for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++) + ice_vc_fdir_free_prof(vf, flow); + + devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof); + fdir->fdir_prof = NULL; +} + +/** + * ice_vc_fdir_parse_flow_fld + * @proto_hdr: virtual channel protocol filter header + * @conf: FDIR configuration for each filter + * @fld: field type array + * @fld_cnt: field counter + * + * Parse the virtual channel filter header and store them into field type array + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr, + struct virtchnl_fdir_fltr_conf *conf, + enum ice_flow_field *fld, int *fld_cnt) +{ + struct virtchnl_proto_hdr hdr; + u32 i; + + memcpy(&hdr, proto_hdr, sizeof(hdr)); + + for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) && + VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++) + if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) { + if (fdir_inset_map[i].mask && + ((fdir_inset_map[i].mask & conf->inset_flag) != + fdir_inset_map[i].flag)) + continue; + + fld[*fld_cnt] = fdir_inset_map[i].fld; + *fld_cnt += 1; + if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX) + return -EINVAL; + VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr, + fdir_inset_map[i].field); + } + + return 0; +} + +/** + * ice_vc_fdir_set_flow_fld + * @vf: pointer to the VF structure + * @fltr: virtual channel add cmd buffer + * @conf: FDIR configuration for each filter + * @seg: array of one or more packet segments that describe the flow + * + * Parse the virtual channel add msg buffer's field vector and store them into + * flow's packet segment field + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, + struct virtchnl_fdir_fltr_conf *conf, + struct ice_flow_seg_info *seg) +{ + struct virtchnl_fdir_rule *rule = &fltr->rule_cfg; + enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX]; + struct device *dev = ice_pf_to_dev(vf->pf); + struct virtchnl_proto_hdrs *proto; + int fld_cnt = 0; + int i; + + proto = &rule->proto_hdrs; + for (i = 0; i < proto->count; i++) { + struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; + int ret; + + ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt); + if (ret) + return ret; + } + + if (fld_cnt == 0) { + dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id); + return -EINVAL; + } + + for (i = 0; i < fld_cnt; i++) + ice_flow_set_fld(seg, fld[i], + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + + return 0; +} + +/** + * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header + * @vf: pointer to the VF structure + * @conf: FDIR configuration for each filter + * @seg: array of one or more packet segments that describe the flow + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, + struct virtchnl_fdir_fltr_conf *conf, + struct ice_flow_seg_info *seg) +{ + enum ice_fltr_ptype flow = conf->input.flow_type; + enum ice_fdir_tunnel_type ttype = conf->ttype; + struct device *dev = ice_pf_to_dev(vf->pf); + + switch (flow) { + case ICE_FLTR_PTYPE_NON_IP_L2: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_ESP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_AH: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_TCP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_UDP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) { + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + } else { + dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", + flow, vf->vf_id); + return -EINVAL; + } + break; + case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_ESP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_AH: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_TCP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_UDP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + default: + dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n", + flow, vf->vf_id); + return -EINVAL; + } + + return 0; +} + +/** + * ice_vc_fdir_rem_prof - remove profile for this filter flow type + * @vf: pointer to the VF structure + * @flow: filter flow type + * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter + */ +static void +ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun) +{ + struct ice_vf_fdir *fdir = &vf->fdir; + struct ice_fd_hw_prof *vf_prof; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vf_vsi; + struct device *dev; + struct ice_hw *hw; + u64 prof_id; + int i; + + dev = ice_pf_to_dev(pf); + hw = &pf->hw; + if (!fdir->fdir_prof || !fdir->fdir_prof[flow]) + return; + + vf_prof = fdir->fdir_prof[flow]; + + vf_vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vf_vsi) { + dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id); + return; + } + + if (!fdir->prof_entry_cnt[flow][tun]) + return; + + prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, + flow, tun ? ICE_FLTR_PTYPE_MAX : 0); + + for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++) + if (vf_prof->entry_h[i][tun]) { + u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]); + + ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id); + ice_flow_rem_entry(hw, ICE_BLK_FD, + vf_prof->entry_h[i][tun]); + vf_prof->entry_h[i][tun] = 0; + } + + ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); + devm_kfree(dev, vf_prof->fdir_seg[tun]); + vf_prof->fdir_seg[tun] = NULL; + + for (i = 0; i < vf_prof->cnt; i++) + vf_prof->vsi_h[i] = 0; + + fdir->prof_entry_cnt[flow][tun] = 0; +} + +/** + * ice_vc_fdir_rem_prof_all - remove profile for this VF + * @vf: pointer to the VF structure + */ +static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf) +{ + enum ice_fltr_ptype flow; + + for (flow = ICE_FLTR_PTYPE_NONF_NONE; + flow < ICE_FLTR_PTYPE_MAX; flow++) { + ice_vc_fdir_rem_prof(vf, flow, 0); + ice_vc_fdir_rem_prof(vf, flow, 1); + } +} + +/** + * ice_vc_fdir_write_flow_prof + * @vf: pointer to the VF structure + * @flow: filter flow type + * @seg: array of one or more packet segments that describe the flow + * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter + * + * Write the flow's profile config and packet segment into the hardware + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, + struct ice_flow_seg_info *seg, int tun) +{ + struct ice_vf_fdir *fdir = &vf->fdir; + struct ice_vsi *vf_vsi, *ctrl_vsi; + struct ice_flow_seg_info *old_seg; + struct ice_flow_prof *prof = NULL; + struct ice_fd_hw_prof *vf_prof; + enum ice_status status; + struct device *dev; + struct ice_pf *pf; + struct ice_hw *hw; + u64 entry1_h = 0; + u64 entry2_h = 0; + u64 prof_id; + int ret; + + pf = vf->pf; + dev = ice_pf_to_dev(pf); + hw = &pf->hw; + vf_vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vf_vsi) + return -EINVAL; + + ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; + if (!ctrl_vsi) + return -EINVAL; + + vf_prof = fdir->fdir_prof[flow]; + old_seg = vf_prof->fdir_seg[tun]; + if (old_seg) { + if (!memcmp(old_seg, seg, sizeof(*seg))) { + dev_dbg(dev, "Duplicated profile for VF %d!\n", + vf->vf_id); + return -EEXIST; + } + + if (fdir->fdir_fltr_cnt[flow][tun]) { + ret = -EINVAL; + dev_dbg(dev, "Input set conflicts for VF %d\n", + vf->vf_id); + goto err_exit; + } + + /* remove previously allocated profile */ + ice_vc_fdir_rem_prof(vf, flow, tun); + } + + prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow, + tun ? ICE_FLTR_PTYPE_MAX : 0); + + status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, + tun + 1, &prof); + ret = ice_status_to_errno(status); + if (ret) { + dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n", + flow, vf->vf_id); + goto err_exit; + } + + status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, + vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, &entry1_h); + ret = ice_status_to_errno(status); + if (ret) { + dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n", + flow, vf->vf_id); + goto err_prof; + } + + status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, + ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, &entry2_h); + ret = ice_status_to_errno(status); + if (ret) { + dev_dbg(dev, + "Could not add flow 0x%x Ctrl VSI entry for VF %d\n", + flow, vf->vf_id); + goto err_entry_1; + } + + vf_prof->fdir_seg[tun] = seg; + vf_prof->cnt = 0; + fdir->prof_entry_cnt[flow][tun] = 0; + + vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h; + vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx; + vf_prof->cnt++; + fdir->prof_entry_cnt[flow][tun]++; + + vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h; + vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx; + vf_prof->cnt++; + fdir->prof_entry_cnt[flow][tun]++; + + return 0; + +err_entry_1: + ice_rem_prof_id_flow(hw, ICE_BLK_FD, + ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id); + ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h); +err_prof: + ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); +err_exit: + return ret; +} + +/** + * ice_vc_fdir_config_input_set + * @vf: pointer to the VF structure + * @fltr: virtual channel add cmd buffer + * @conf: FDIR configuration for each filter + * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter + * + * Config the input set type and value for virtual channel add msg buffer + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, + struct virtchnl_fdir_fltr_conf *conf, int tun) +{ + struct ice_fdir_fltr *input = &conf->input; + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_flow_seg_info *seg; + enum ice_fltr_ptype flow; + int ret; + + flow = input->flow_type; + ret = ice_vc_fdir_alloc_prof(vf, flow); + if (ret) { + dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id); + return ret; + } + + seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); + if (!seg) + return -ENOMEM; + + ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg); + if (ret) { + dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id); + goto err_exit; + } + + ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg); + if (ret) { + dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id); + goto err_exit; + } + + ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun); + if (ret == -EEXIST) { + devm_kfree(dev, seg); + } else if (ret) { + dev_dbg(dev, "Write flow profile for VF %d failed\n", + vf->vf_id); + goto err_exit; + } + + return 0; + +err_exit: + devm_kfree(dev, seg); + return ret; +} + +/** + * ice_vc_fdir_match_pattern + * @fltr: virtual channel add cmd buffer + * @type: virtual channel protocol filter header type + * + * Matching the header type by comparing fltr and type's value. + * + * Return: true on success, and false on error. + */ +static bool +ice_vc_fdir_match_pattern(struct virtchnl_fdir_add *fltr, + enum virtchnl_proto_hdr_type *type) +{ + struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; + int i = 0; + + while ((i < proto->count) && + (*type == proto->proto_hdr[i].type) && + (*type != VIRTCHNL_PROTO_HDR_NONE)) { + type++; + i++; + } + + return ((i == proto->count) && (*type == VIRTCHNL_PROTO_HDR_NONE)); +} + +/** + * ice_vc_fdir_get_pattern - get while list pattern + * @vf: pointer to the VF info + * @len: filter list length + * + * Return: pointer to allowed filter list + */ +static const struct virtchnl_fdir_pattern_match_item * +ice_vc_fdir_get_pattern(struct ice_vf *vf, int *len) +{ + const struct virtchnl_fdir_pattern_match_item *item; + struct ice_pf *pf = vf->pf; + struct ice_hw *hw; + + hw = &pf->hw; + if (!strncmp(hw->active_pkg_name, "ICE COMMS Package", + sizeof(hw->active_pkg_name))) { + item = vc_fdir_pattern_comms; + *len = ARRAY_SIZE(vc_fdir_pattern_comms); + } else { + item = vc_fdir_pattern_os; + *len = ARRAY_SIZE(vc_fdir_pattern_os); + } + + return item; +} + +/** + * ice_vc_fdir_search_pattern + * @vf: pointer to the VF info + * @fltr: virtual channel add cmd buffer + * + * Search for matched pattern from supported pattern list + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_search_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr) +{ + const struct virtchnl_fdir_pattern_match_item *pattern; + int len, i; + + pattern = ice_vc_fdir_get_pattern(vf, &len); + + for (i = 0; i < len; i++) + if (ice_vc_fdir_match_pattern(fltr, pattern[i].list)) + return 0; + + return -EINVAL; +} + +/** + * ice_vc_fdir_parse_pattern + * @vf: pointer to the VF info + * @fltr: virtual channel add cmd buffer + * @conf: FDIR configuration for each filter + * + * Parse the virtual channel filter's pattern and store them into conf + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, + struct virtchnl_fdir_fltr_conf *conf) +{ + struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; + enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE; + enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE; + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_fdir_fltr *input = &conf->input; + int i; + + if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) { + dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n", + proto->count, vf->vf_id); + return -EINVAL; + } + + for (i = 0; i < proto->count; i++) { + struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; + struct ip_esp_hdr *esph; + struct ip_auth_hdr *ah; + struct sctphdr *sctph; + struct ipv6hdr *ip6h; + struct udphdr *udph; + struct tcphdr *tcph; + struct ethhdr *eth; + struct iphdr *iph; + u8 s_field; + u8 *rawh; + + switch (hdr->type) { + case VIRTCHNL_PROTO_HDR_ETH: + eth = (struct ethhdr *)hdr->buffer; + input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2; + + if (hdr->field_selector) + input->ext_data.ether_type = eth->h_proto; + break; + case VIRTCHNL_PROTO_HDR_IPV4: + iph = (struct iphdr *)hdr->buffer; + l3 = VIRTCHNL_PROTO_HDR_IPV4; + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; + + if (hdr->field_selector) { + input->ip.v4.src_ip = iph->saddr; + input->ip.v4.dst_ip = iph->daddr; + input->ip.v4.tos = iph->tos; + input->ip.v4.proto = iph->protocol; + } + break; + case VIRTCHNL_PROTO_HDR_IPV6: + ip6h = (struct ipv6hdr *)hdr->buffer; + l3 = VIRTCHNL_PROTO_HDR_IPV6; + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; + + if (hdr->field_selector) { + memcpy(input->ip.v6.src_ip, + ip6h->saddr.in6_u.u6_addr8, + sizeof(ip6h->saddr)); + memcpy(input->ip.v6.dst_ip, + ip6h->daddr.in6_u.u6_addr8, + sizeof(ip6h->daddr)); + input->ip.v6.tc = ((u8)(ip6h->priority) << 4) | + (ip6h->flow_lbl[0] >> 4); + input->ip.v6.proto = ip6h->nexthdr; + } + break; + case VIRTCHNL_PROTO_HDR_TCP: + tcph = (struct tcphdr *)hdr->buffer; + if (l3 == VIRTCHNL_PROTO_HDR_IPV4) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP; + + if (hdr->field_selector) { + if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { + input->ip.v4.src_port = tcph->source; + input->ip.v4.dst_port = tcph->dest; + } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { + input->ip.v6.src_port = tcph->source; + input->ip.v6.dst_port = tcph->dest; + } + } + break; + case VIRTCHNL_PROTO_HDR_UDP: + udph = (struct udphdr *)hdr->buffer; + if (l3 == VIRTCHNL_PROTO_HDR_IPV4) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP; + + if (hdr->field_selector) { + if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { + input->ip.v4.src_port = udph->source; + input->ip.v4.dst_port = udph->dest; + } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { + input->ip.v6.src_port = udph->source; + input->ip.v6.dst_port = udph->dest; + } + } + break; + case VIRTCHNL_PROTO_HDR_SCTP: + sctph = (struct sctphdr *)hdr->buffer; + if (l3 == VIRTCHNL_PROTO_HDR_IPV4) + input->flow_type = + ICE_FLTR_PTYPE_NONF_IPV4_SCTP; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) + input->flow_type = + ICE_FLTR_PTYPE_NONF_IPV6_SCTP; + + if (hdr->field_selector) { + if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { + input->ip.v4.src_port = sctph->source; + input->ip.v4.dst_port = sctph->dest; + } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { + input->ip.v6.src_port = sctph->source; + input->ip.v6.dst_port = sctph->dest; + } + } + break; + case VIRTCHNL_PROTO_HDR_L2TPV3: + if (l3 == VIRTCHNL_PROTO_HDR_IPV4) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3; + + if (hdr->field_selector) + input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer); + break; + case VIRTCHNL_PROTO_HDR_ESP: + esph = (struct ip_esp_hdr *)hdr->buffer; + if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && + l4 == VIRTCHNL_PROTO_HDR_UDP) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && + l4 == VIRTCHNL_PROTO_HDR_UDP) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && + l4 == VIRTCHNL_PROTO_HDR_NONE) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && + l4 == VIRTCHNL_PROTO_HDR_NONE) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP; + + if (l4 == VIRTCHNL_PROTO_HDR_UDP) + conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP; + else + conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC; + + if (hdr->field_selector) { + if (l3 == VIRTCHNL_PROTO_HDR_IPV4) + input->ip.v4.sec_parm_idx = esph->spi; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) + input->ip.v6.sec_parm_idx = esph->spi; + } + break; + case VIRTCHNL_PROTO_HDR_AH: + ah = (struct ip_auth_hdr *)hdr->buffer; + if (l3 == VIRTCHNL_PROTO_HDR_IPV4) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH; + + if (hdr->field_selector) { + if (l3 == VIRTCHNL_PROTO_HDR_IPV4) + input->ip.v4.sec_parm_idx = ah->spi; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) + input->ip.v6.sec_parm_idx = ah->spi; + } + break; + case VIRTCHNL_PROTO_HDR_PFCP: + rawh = (u8 *)hdr->buffer; + s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK; + if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE; + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1) + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION; + + if (hdr->field_selector) { + if (l3 == VIRTCHNL_PROTO_HDR_IPV4) + input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR); + else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) + input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR); + } + break; + case VIRTCHNL_PROTO_HDR_GTPU_IP: + rawh = (u8 *)hdr->buffer; + input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER; + + if (hdr->field_selector) + input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]); + conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU; + break; + case VIRTCHNL_PROTO_HDR_GTPU_EH: + rawh = (u8 *)hdr->buffer; + + if (hdr->field_selector) + input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK; + conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH; + break; + default: + dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n", + hdr->type, vf->vf_id); + return -EINVAL; + } + } + + return 0; +} + +/** + * ice_vc_fdir_parse_action + * @vf: pointer to the VF info + * @fltr: virtual channel add cmd buffer + * @conf: FDIR configuration for each filter + * + * Parse the virtual channel filter's action and store them into conf + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, + struct virtchnl_fdir_fltr_conf *conf) +{ + struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set; + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_fdir_fltr *input = &conf->input; + u32 dest_num = 0; + u32 mark_num = 0; + int i; + + if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) { + dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n", + as->count, vf->vf_id); + return -EINVAL; + } + + for (i = 0; i < as->count; i++) { + struct virtchnl_filter_action *action = &as->actions[i]; + + switch (action->type) { + case VIRTCHNL_ACTION_PASSTHRU: + dest_num++; + input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER; + break; + case VIRTCHNL_ACTION_DROP: + dest_num++; + input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT; + break; + case VIRTCHNL_ACTION_QUEUE: + dest_num++; + input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; + input->q_index = action->act_conf.queue.index; + break; + case VIRTCHNL_ACTION_Q_REGION: + dest_num++; + input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP; + input->q_index = action->act_conf.queue.index; + input->q_region = action->act_conf.queue.region; + break; + case VIRTCHNL_ACTION_MARK: + mark_num++; + input->fltr_id = action->act_conf.mark_id; + input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE; + break; + default: + dev_dbg(dev, "Invalid action type:0x%x for VF %d\n", + action->type, vf->vf_id); + return -EINVAL; + } + } + + if (dest_num == 0 || dest_num >= 2) { + dev_dbg(dev, "Invalid destination action for VF %d\n", + vf->vf_id); + return -EINVAL; + } + + if (mark_num >= 2) { + dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id); + return -EINVAL; + } + + return 0; +} + +/** + * ice_vc_validate_fdir_fltr - validate the virtual channel filter + * @vf: pointer to the VF info + * @fltr: virtual channel add cmd buffer + * @conf: FDIR configuration for each filter + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, + struct virtchnl_fdir_fltr_conf *conf) +{ + int ret; + + ret = ice_vc_fdir_search_pattern(vf, fltr); + if (ret) + return ret; + + ret = ice_vc_fdir_parse_pattern(vf, fltr, conf); + if (ret) + return ret; + + return ice_vc_fdir_parse_action(vf, fltr, conf); +} + +/** + * ice_vc_fdir_comp_rules - compare if two filter rules have the same value + * @conf_a: FDIR configuration for filter a + * @conf_b: FDIR configuration for filter b + * + * Return: 0 on success, and other on error. + */ +static bool +ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a, + struct virtchnl_fdir_fltr_conf *conf_b) +{ + struct ice_fdir_fltr *a = &conf_a->input; + struct ice_fdir_fltr *b = &conf_b->input; + + if (conf_a->ttype != conf_b->ttype) + return false; + if (a->flow_type != b->flow_type) + return false; + if (memcmp(&a->ip, &b->ip, sizeof(a->ip))) + return false; + if (memcmp(&a->mask, &b->mask, sizeof(a->mask))) + return false; + if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data))) + return false; + if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask))) + return false; + if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data))) + return false; + if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask))) + return false; + if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data))) + return false; + if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask))) + return false; + + return true; +} + +/** + * ice_vc_fdir_is_dup_fltr + * @vf: pointer to the VF info + * @conf: FDIR configuration for each filter + * + * Check if there is duplicated rule with same conf value + * + * Return: 0 true success, and false on error. + */ +static bool +ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf) +{ + struct ice_fdir_fltr *desc; + bool ret; + + list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) { + struct virtchnl_fdir_fltr_conf *node = + to_fltr_conf_from_desc(desc); + + ret = ice_vc_fdir_comp_rules(node, conf); + if (ret) + return true; + } + + return false; +} + +/** + * ice_vc_fdir_insert_entry + * @vf: pointer to the VF info + * @conf: FDIR configuration for each filter + * @id: pointer to ID value allocated by driver + * + * Insert FDIR conf entry into list and allocate ID for this filter + * + * Return: 0 true success, and other on error. + */ +static int +ice_vc_fdir_insert_entry(struct ice_vf *vf, + struct virtchnl_fdir_fltr_conf *conf, u32 *id) +{ + struct ice_fdir_fltr *input = &conf->input; + int i; + + /* alloc ID corresponding with conf */ + i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0, + ICE_FDIR_MAX_FLTRS, GFP_KERNEL); + if (i < 0) + return -EINVAL; + *id = i; + + list_add(&input->fltr_node, &vf->fdir.fdir_rule_list); + return 0; +} + +/** + * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value + * @vf: pointer to the VF info + * @conf: FDIR configuration for each filter + * @id: filter rule's ID + */ +static void +ice_vc_fdir_remove_entry(struct ice_vf *vf, + struct virtchnl_fdir_fltr_conf *conf, u32 id) +{ + struct ice_fdir_fltr *input = &conf->input; + + idr_remove(&vf->fdir.fdir_rule_idr, id); + list_del(&input->fltr_node); +} + +/** + * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value + * @vf: pointer to the VF info + * @id: filter rule's ID + * + * Return: NULL on error, and other on success. + */ +static struct virtchnl_fdir_fltr_conf * +ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id) +{ + return idr_find(&vf->fdir.fdir_rule_idr, id); +} + +/** + * ice_vc_fdir_flush_entry - remove all FDIR conf entry + * @vf: pointer to the VF info + */ +static void ice_vc_fdir_flush_entry(struct ice_vf *vf) +{ + struct virtchnl_fdir_fltr_conf *conf; + struct ice_fdir_fltr *desc, *temp; + + list_for_each_entry_safe(desc, temp, + &vf->fdir.fdir_rule_list, fltr_node) { + conf = to_fltr_conf_from_desc(desc); + list_del(&desc->fltr_node); + devm_kfree(ice_pf_to_dev(vf->pf), conf); + } +} + +/** + * ice_vc_fdir_write_fltr - write filter rule into hardware + * @vf: pointer to the VF info + * @conf: FDIR configuration for each filter + * @add: true implies add rule, false implies del rules + * @is_tun: false implies non-tunnel type filter, true implies tunnel filter + * + * Return: 0 on success, and other on error. + */ +static int ice_vc_fdir_write_fltr(struct ice_vf *vf, + struct virtchnl_fdir_fltr_conf *conf, + bool add, bool is_tun) +{ + struct ice_fdir_fltr *input = &conf->input; + struct ice_vsi *vsi, *ctrl_vsi; + struct ice_fltr_desc desc; + enum ice_status status; + struct device *dev; + struct ice_pf *pf; + struct ice_hw *hw; + int ret; + u8 *pkt; + + pf = vf->pf; + dev = ice_pf_to_dev(pf); + hw = &pf->hw; + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { + dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id); + return -EINVAL; + } + + input->dest_vsi = vsi->idx; + input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW; + + ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; + if (!ctrl_vsi) { + dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id); + return -EINVAL; + } + + pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL); + if (!pkt) + return -ENOMEM; + + ice_fdir_get_prgm_desc(hw, input, &desc, add); + status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); + ret = ice_status_to_errno(status); + if (ret) { + dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n", + vf->vf_id, input->flow_type); + goto err_free_pkt; + } + + ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); + if (ret) + goto err_free_pkt; + + return 0; + +err_free_pkt: + devm_kfree(dev, pkt); + return ret; +} + +/** + * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler + * @t: pointer to timer_list + */ +static void ice_vf_fdir_timer(struct timer_list *t) +{ + struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr); + struct ice_vf_fdir_ctx *ctx_done; + struct ice_vf_fdir *fdir; + unsigned long flags; + struct ice_vf *vf; + struct ice_pf *pf; + + fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq); + vf = container_of(fdir, struct ice_vf, fdir); + ctx_done = &fdir->ctx_done; + pf = vf->pf; + spin_lock_irqsave(&fdir->ctx_lock, flags); + if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) { + spin_unlock_irqrestore(&fdir->ctx_lock, flags); + WARN_ON_ONCE(1); + return; + } + + ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID; + + ctx_done->flags |= ICE_VF_FDIR_CTX_VALID; + ctx_done->conf = ctx_irq->conf; + ctx_done->stat = ICE_FDIR_CTX_TIMEOUT; + ctx_done->v_opcode = ctx_irq->v_opcode; + spin_unlock_irqrestore(&fdir->ctx_lock, flags); + + set_bit(__ICE_FD_VF_FLUSH_CTX, pf->state); + ice_service_task_schedule(pf); +} + +/** + * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler + * @ctrl_vsi: pointer to a VF's CTRL VSI + * @rx_desc: pointer to FDIR Rx queue descriptor + */ +void +ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, + union ice_32b_rx_flex_desc *rx_desc) +{ + struct ice_pf *pf = ctrl_vsi->back; + struct ice_vf_fdir_ctx *ctx_done; + struct ice_vf_fdir_ctx *ctx_irq; + struct ice_vf_fdir *fdir; + unsigned long flags; + struct device *dev; + struct ice_vf *vf; + int ret; + + vf = &pf->vf[ctrl_vsi->vf_id]; + + fdir = &vf->fdir; + ctx_done = &fdir->ctx_done; + ctx_irq = &fdir->ctx_irq; + dev = ice_pf_to_dev(pf); + spin_lock_irqsave(&fdir->ctx_lock, flags); + if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) { + spin_unlock_irqrestore(&fdir->ctx_lock, flags); + WARN_ON_ONCE(1); + return; + } + + ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID; + + ctx_done->flags |= ICE_VF_FDIR_CTX_VALID; + ctx_done->conf = ctx_irq->conf; + ctx_done->stat = ICE_FDIR_CTX_IRQ; + ctx_done->v_opcode = ctx_irq->v_opcode; + memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc)); + spin_unlock_irqrestore(&fdir->ctx_lock, flags); + + ret = del_timer(&ctx_irq->rx_tmr); + if (!ret) + dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id); + + set_bit(__ICE_FD_VF_FLUSH_CTX, pf->state); + ice_service_task_schedule(pf); +} + +/** + * ice_vf_fdir_dump_info - dump FDIR information for diagnosis + * @vf: pointer to the VF info + */ +static void ice_vf_fdir_dump_info(struct ice_vf *vf) +{ + struct ice_vsi *vf_vsi; + u32 fd_size, fd_cnt; + struct device *dev; + struct ice_pf *pf; + struct ice_hw *hw; + u16 vsi_num; + + pf = vf->pf; + hw = &pf->hw; + dev = ice_pf_to_dev(pf); + vf_vsi = pf->vsi[vf->lan_vsi_idx]; + vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx); + + fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num)); + fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num)); + dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x", + vf->vf_id, + (fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, + (fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S, + (fd_cnt & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, + (fd_cnt & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S); +} + +/** + * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor + * @vf: pointer to the VF info + * @ctx: FDIR context info for post processing + * @status: virtchnl FDIR program status + * + * Return: 0 on success, and other on error. + */ +static int +ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, + enum virtchnl_fdir_prgm_status *status) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + u32 stat_err, error, prog_id; + int ret; + + stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0); + if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >> + ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) { + *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id); + ret = -EINVAL; + goto err_exit; + } + + prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >> + ICE_FXD_FLTR_WB_QW1_PROG_ID_S; + if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD && + ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) { + dev_err(dev, "VF %d: Desc show add, but ctx not", + vf->vf_id); + *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; + ret = -EINVAL; + goto err_exit; + } + + if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL && + ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) { + dev_err(dev, "VF %d: Desc show del, but ctx not", + vf->vf_id); + *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; + ret = -EINVAL; + goto err_exit; + } + + error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >> + ICE_FXD_FLTR_WB_QW1_FAIL_S; + if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) { + if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) { + dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table", + vf->vf_id); + *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + } else { + dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry", + vf->vf_id); + *status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST; + } + ret = -EINVAL; + goto err_exit; + } + + error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >> + ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S; + if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) { + dev_err(dev, "VF %d: Profile matching error", vf->vf_id); + *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + ret = -EINVAL; + goto err_exit; + } + + *status = VIRTCHNL_FDIR_SUCCESS; + + return 0; + +err_exit: + ice_vf_fdir_dump_info(vf); + return ret; +} + +/** + * ice_vc_add_fdir_fltr_post + * @vf: pointer to the VF structure + * @ctx: FDIR context info for post processing + * @status: virtchnl FDIR program status + * @success: true implies success, false implies failure + * + * Post process for flow director add command. If success, then do post process + * and send back success msg by virtchnl. Otherwise, do context reversion and + * send back failure msg by virtchnl. + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, + enum virtchnl_fdir_prgm_status status, + bool success) +{ + struct virtchnl_fdir_fltr_conf *conf = ctx->conf; + struct device *dev = ice_pf_to_dev(vf->pf); + enum virtchnl_status_code v_ret; + struct virtchnl_fdir_add *resp; + int ret, len, is_tun; + + v_ret = VIRTCHNL_STATUS_SUCCESS; + len = sizeof(*resp); + resp = kzalloc(len, GFP_KERNEL); + if (!resp) { + len = 0; + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); + goto err_exit; + } + + if (!success) + goto err_exit; + + is_tun = 0; + resp->status = status; + resp->flow_id = conf->flow_id; + vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++; + + ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, + (u8 *)resp, len); + kfree(resp); + + dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n", + vf->vf_id, conf->flow_id, + (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ? + "add" : "del"); + return ret; + +err_exit: + if (resp) + resp->status = status; + ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); + devm_kfree(dev, conf); + + ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, + (u8 *)resp, len); + kfree(resp); + return ret; +} + +/** + * ice_vc_del_fdir_fltr_post + * @vf: pointer to the VF structure + * @ctx: FDIR context info for post processing + * @status: virtchnl FDIR program status + * @success: true implies success, false implies failure + * + * Post process for flow director del command. If success, then do post process + * and send back success msg by virtchnl. Otherwise, do context reversion and + * send back failure msg by virtchnl. + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, + enum virtchnl_fdir_prgm_status status, + bool success) +{ + struct virtchnl_fdir_fltr_conf *conf = ctx->conf; + struct device *dev = ice_pf_to_dev(vf->pf); + enum virtchnl_status_code v_ret; + struct virtchnl_fdir_del *resp; + int ret, len, is_tun; + + v_ret = VIRTCHNL_STATUS_SUCCESS; + len = sizeof(*resp); + resp = kzalloc(len, GFP_KERNEL); + if (!resp) { + len = 0; + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); + goto err_exit; + } + + if (!success) + goto err_exit; + + is_tun = 0; + resp->status = status; + ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); + vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--; + + ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, + (u8 *)resp, len); + kfree(resp); + + dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n", + vf->vf_id, conf->flow_id, + (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ? + "add" : "del"); + devm_kfree(dev, conf); + return ret; + +err_exit: + if (resp) + resp->status = status; + if (success) + devm_kfree(dev, conf); + + ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, + (u8 *)resp, len); + kfree(resp); + return ret; +} + +/** + * ice_flush_fdir_ctx + * @pf: pointer to the PF structure + * + * Flush all the pending event on ctx_done list and process them. + */ +void ice_flush_fdir_ctx(struct ice_pf *pf) +{ + int i; + + if (!test_and_clear_bit(__ICE_FD_VF_FLUSH_CTX, pf->state)) + return; + + ice_for_each_vf(pf, i) { + struct device *dev = ice_pf_to_dev(pf); + enum virtchnl_fdir_prgm_status status; + struct ice_vf *vf = &pf->vf[i]; + struct ice_vf_fdir_ctx *ctx; + unsigned long flags; + int ret; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + continue; + + if (vf->ctrl_vsi_idx == ICE_NO_VSI) + continue; + + ctx = &vf->fdir.ctx_done; + spin_lock_irqsave(&vf->fdir.ctx_lock, flags); + if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) { + spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); + continue; + } + spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); + + WARN_ON(ctx->stat == ICE_FDIR_CTX_READY); + if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) { + status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT; + dev_err(dev, "VF %d: ctrl_vsi irq timeout\n", + vf->vf_id); + goto err_exit; + } + + ret = ice_vf_verify_rx_desc(vf, ctx, &status); + if (ret) + goto err_exit; + + if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) + ice_vc_add_fdir_fltr_post(vf, ctx, status, true); + else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER) + ice_vc_del_fdir_fltr_post(vf, ctx, status, true); + else + dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id); + + spin_lock_irqsave(&vf->fdir.ctx_lock, flags); + ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; + spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); + continue; +err_exit: + if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) + ice_vc_add_fdir_fltr_post(vf, ctx, status, false); + else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER) + ice_vc_del_fdir_fltr_post(vf, ctx, status, false); + else + dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id); + + spin_lock_irqsave(&vf->fdir.ctx_lock, flags); + ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; + spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); + } +} + +/** + * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler + * @vf: pointer to the VF structure + * @conf: FDIR configuration for each filter + * @v_opcode: virtual channel operation code + * + * Return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf, + enum virtchnl_ops v_opcode) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vf_fdir_ctx *ctx; + unsigned long flags; + + ctx = &vf->fdir.ctx_irq; + spin_lock_irqsave(&vf->fdir.ctx_lock, flags); + if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) || + (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) { + spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); + dev_dbg(dev, "VF %d: Last request is still in progress\n", + vf->vf_id); + return -EBUSY; + } + ctx->flags |= ICE_VF_FDIR_CTX_VALID; + spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); + + ctx->conf = conf; + ctx->v_opcode = v_opcode; + ctx->stat = ICE_FDIR_CTX_READY; + timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0); + + mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies)); + + return 0; +} + +/** + * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler + * @vf: pointer to the VF structure + * + * Return: 0 on success, and other on error. + */ +static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf) +{ + struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq; + unsigned long flags; + + del_timer(&ctx->rx_tmr); + spin_lock_irqsave(&vf->fdir.ctx_lock, flags); + ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; + spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); +} + +/** + * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * Return: 0 on success, and other on error. + */ +int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg; + struct virtchnl_fdir_add *stat = NULL; + struct virtchnl_fdir_fltr_conf *conf; + enum virtchnl_status_code v_ret; + struct device *dev; + struct ice_pf *pf; + int is_tun = 0; + int len = 0; + int ret; + + pf = vf->pf; + dev = ice_pf_to_dev(pf); + ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); + if (ret) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); + goto err_exit; + } + + ret = ice_vf_start_ctrl_vsi(vf); + if (ret && (ret != -EEXIST)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n", + vf->vf_id, ret); + goto err_exit; + } + + stat = kzalloc(sizeof(*stat), GFP_KERNEL); + if (!stat) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); + goto err_exit; + } + + conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL); + if (!conf) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id); + goto err_exit; + } + + len = sizeof(*stat); + ret = ice_vc_validate_fdir_fltr(vf, fltr, conf); + if (ret) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; + dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id); + goto err_free_conf; + } + + if (fltr->validate_only) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_SUCCESS; + devm_kfree(dev, conf); + ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, + v_ret, (u8 *)stat, len); + goto exit; + } + + ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun); + if (ret) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT; + dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n", + vf->vf_id, ret); + goto err_free_conf; + } + + ret = ice_vc_fdir_is_dup_fltr(vf, conf); + if (ret) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST; + dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n", + vf->vf_id); + goto err_free_conf; + } + + ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id); + if (ret) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id); + goto err_free_conf; + } + + ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER); + if (ret) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); + goto err_free_conf; + } + + ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun); + if (ret) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", + vf->vf_id, ret); + goto err_rem_entry; + } + +exit: + kfree(stat); + return ret; + +err_rem_entry: + ice_vc_fdir_clear_irq_ctx(vf); + ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); +err_free_conf: + devm_kfree(dev, conf); +err_exit: + ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret, + (u8 *)stat, len); + kfree(stat); + return ret; +} + +/** + * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * Return: 0 on success, and other on error. + */ +int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg; + struct virtchnl_fdir_del *stat = NULL; + struct virtchnl_fdir_fltr_conf *conf; + enum virtchnl_status_code v_ret; + struct device *dev; + struct ice_pf *pf; + int is_tun = 0; + int len = 0; + int ret; + + pf = vf->pf; + dev = ice_pf_to_dev(pf); + ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); + if (ret) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); + goto err_exit; + } + + stat = kzalloc(sizeof(*stat), GFP_KERNEL); + if (!stat) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); + goto err_exit; + } + + len = sizeof(*stat); + + conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id); + if (!conf) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST; + dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n", + vf->vf_id, fltr->flow_id); + goto err_exit; + } + + /* Just return failure when ctrl_vsi idx is invalid */ + if (vf->ctrl_vsi_idx == ICE_NO_VSI) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id); + goto err_exit; + } + + ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER); + if (ret) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); + goto err_exit; + } + + ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun); + if (ret) { + v_ret = VIRTCHNL_STATUS_SUCCESS; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", + vf->vf_id, ret); + goto err_del_tmr; + } + + kfree(stat); + + return ret; + +err_del_tmr: + ice_vc_fdir_clear_irq_ctx(vf); +err_exit: + ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret, + (u8 *)stat, len); + kfree(stat); + return ret; +} + +/** + * ice_vf_fdir_init - init FDIR resource for VF + * @vf: pointer to the VF info + */ +void ice_vf_fdir_init(struct ice_vf *vf) +{ + struct ice_vf_fdir *fdir = &vf->fdir; + + idr_init(&fdir->fdir_rule_idr); + INIT_LIST_HEAD(&fdir->fdir_rule_list); + + spin_lock_init(&fdir->ctx_lock); + fdir->ctx_irq.flags = 0; + fdir->ctx_done.flags = 0; +} + +/** + * ice_vf_fdir_exit - destroy FDIR resource for VF + * @vf: pointer to the VF info + */ +void ice_vf_fdir_exit(struct ice_vf *vf) +{ + ice_vc_fdir_flush_entry(vf); + idr_destroy(&vf->fdir.fdir_rule_idr); + ice_vc_fdir_rem_prof_all(vf); + ice_vc_fdir_free_prof_all(vf); +} diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h new file mode 100644 index 000000000000..f4e629f4c09b --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021, Intel Corporation. */ + +#ifndef _ICE_VIRTCHNL_FDIR_H_ +#define _ICE_VIRTCHNL_FDIR_H_ + +struct ice_vf; +struct ice_pf; + +enum ice_fdir_ctx_stat { + ICE_FDIR_CTX_READY, + ICE_FDIR_CTX_IRQ, + ICE_FDIR_CTX_TIMEOUT, +}; + +struct ice_vf_fdir_ctx { + struct timer_list rx_tmr; + enum virtchnl_ops v_opcode; + enum ice_fdir_ctx_stat stat; + union ice_32b_rx_flex_desc rx_desc; +#define ICE_VF_FDIR_CTX_VALID BIT(0) + u32 flags; + + void *conf; +}; + +/* VF FDIR information structure */ +struct ice_vf_fdir { + u16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; + int prof_entry_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; + struct ice_fd_hw_prof **fdir_prof; + + struct idr fdir_rule_idr; + struct list_head fdir_rule_list; + + spinlock_t ctx_lock; /* protects FDIR context info */ + struct ice_vf_fdir_ctx ctx_irq; + struct ice_vf_fdir_ctx ctx_done; +}; + +#ifdef CONFIG_PCI_IOV +int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg); +int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg); +void ice_vf_fdir_init(struct ice_vf *vf); +void ice_vf_fdir_exit(struct ice_vf *vf); +void +ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, + union ice_32b_rx_flex_desc *rx_desc); +void ice_flush_fdir_ctx(struct ice_pf *pf); +#else +static inline void +ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, union ice_32b_rx_flex_desc *rx_desc) { } +static inline void ice_flush_fdir_ctx(struct ice_pf *pf) { } +#endif /* CONFIG_PCI_IOV */ +#endif /* _ICE_VIRTCHNL_FDIR_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 1f38a8d0c525..78679ece2e08 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -202,6 +202,25 @@ static void ice_vf_vsi_release(struct ice_vf *vf) } /** + * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access + * @vf: VF that control VSI is being invalidated on + */ +static void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf) +{ + vf->ctrl_vsi_idx = ICE_NO_VSI; +} + +/** + * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it + * @vf: VF that control VSI is being released on + */ +static void ice_vf_ctrl_vsi_release(struct ice_vf *vf) +{ + ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]); + ice_vf_ctrl_invalidate_vsi(vf); +} + +/** * ice_free_vf_res - Free a VF's resources * @vf: pointer to the VF info */ @@ -214,6 +233,10 @@ static void ice_free_vf_res(struct ice_vf *vf) * accessing the VF's VSI after it's freed or invalidated. */ clear_bit(ICE_VF_STATE_INIT, vf->vf_states); + ice_vf_fdir_exit(vf); + /* free VF control VSI */ + if (vf->ctrl_vsi_idx != ICE_NO_VSI) + ice_vf_ctrl_vsi_release(vf); /* free VSI and disconnect it from the parent uplink */ if (vf->lan_vsi_idx != ICE_NO_VSI) { @@ -560,6 +583,28 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) } /** + * ice_vf_ctrl_vsi_setup - Set up a VF control VSI + * @vf: VF to setup control VSI for + * + * Returns pointer to the successfully allocated VSI struct on success, + * otherwise returns NULL on failure. + */ +struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) +{ + struct ice_port_info *pi = ice_vf_get_port_info(vf); + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id); + if (!vsi) { + dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n"); + ice_vf_ctrl_invalidate_vsi(vf); + } + + return vsi; +} + +/** * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space * @pf: pointer to PF structure * @vf: pointer to VF that the first MSIX vector index is being calculated for @@ -1256,6 +1301,13 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ice_for_each_vf(pf, v) { vf = &pf->vf[v]; + ice_vf_fdir_exit(vf); + /* clean VF control VSI when resetting VFs since it should be + * setup only when VF creates its first FDIR rule. + */ + if (vf->ctrl_vsi_idx != ICE_NO_VSI) + ice_vf_ctrl_invalidate_vsi(vf); + ice_vf_pre_vsi_rebuild(vf); ice_vf_rebuild_vsi(vf); ice_vf_post_vsi_rebuild(vf); @@ -1374,6 +1426,13 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) dev_err(dev, "disabling promiscuous mode failed\n"); } + ice_vf_fdir_exit(vf); + /* clean VF control VSI when resetting VF since it should be setup + * only when VF creates its first FDIR rule. + */ + if (vf->ctrl_vsi_idx != ICE_NO_VSI) + ice_vf_ctrl_vsi_release(vf); + ice_vf_pre_vsi_rebuild(vf); ice_vf_rebuild_vsi_with_release(vf); ice_vf_post_vsi_rebuild(vf); @@ -1532,7 +1591,7 @@ teardown: } /** - * ice_set_dflt_settings - set VF defaults during initialization/creation + * ice_set_dflt_settings_vfs - set VF defaults during initialization/creation * @pf: PF holding reference to all VFs for default configuration */ static void ice_set_dflt_settings_vfs(struct ice_pf *pf) @@ -1549,6 +1608,12 @@ static void ice_set_dflt_settings_vfs(struct ice_pf *pf) set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps); vf->spoofchk = true; vf->num_vf_qs = pf->num_qps_per_vf; + + /* ctrl_vsi_idx will be set to a valid value only when VF + * creates its first fdir rule. + */ + ice_vf_ctrl_invalidate_vsi(vf); + ice_vf_fdir_init(vf); } } @@ -1848,7 +1913,7 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) * * send msg to VF */ -static int +int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) { @@ -1996,6 +2061,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; } + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF; + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; @@ -2084,7 +2152,7 @@ static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id) * * check for the valid VSI ID */ -static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) +bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) { struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; @@ -3816,6 +3884,12 @@ error_handler: case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: err = ice_vc_dis_vlan_stripping(vf); break; + case VIRTCHNL_OP_ADD_FDIR_FILTER: + err = ice_vc_add_fdir_fltr(vf, msg); + break; + case VIRTCHNL_OP_DEL_FDIR_FILTER: + err = ice_vc_del_fdir_fltr(vf, msg); + break; case VIRTCHNL_OP_UNKNOWN: default: dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode, @@ -4108,7 +4182,7 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf) } /** - * ice_print_vfs_mdd_event - print VFs malicious driver detect event + * ice_print_vfs_mdd_events - print VFs malicious driver detect event * @pf: pointer to the PF structure * * Called from ice_handle_mdd_event to rate limit and print VFs MDD events. diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 0f519fba3770..46abc5388fc7 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -4,6 +4,7 @@ #ifndef _ICE_VIRTCHNL_PF_H_ #define _ICE_VIRTCHNL_PF_H_ #include "ice.h" +#include "ice_virtchnl_fdir.h" /* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */ #define ICE_MAX_VLAN_PER_VF 8 @@ -70,6 +71,8 @@ struct ice_vf { u16 vf_id; /* VF ID in the PF space */ u16 lan_vsi_idx; /* index into PF struct */ + u16 ctrl_vsi_idx; + struct ice_vf_fdir fdir; /* first vector index of this VF in the PF space */ int first_vector_idx; struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */ @@ -138,6 +141,11 @@ void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event); void ice_print_vfs_mdd_events(struct ice_pf *pf); void ice_print_vf_rx_mdd_event(struct ice_vf *vf); +struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf); +int +ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, + enum virtchnl_status_code v_retval, u8 *msg, u16 msglen); +bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id); #else /* CONFIG_PCI_IOV */ #define ice_process_vflr_event(pf) do {} while (0) #define ice_free_vfs(pf) do {} while (0) diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 83f3c9574ed1..17ab8ef024ad 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -358,18 +358,18 @@ xsk_pool_if_up: * This function allocates a number of Rx buffers from the fill ring * or the internal recycle mechanism and places them on the Rx ring. * - * Returns false if all allocations were successful, true if any fail. + * Returns true if all allocations were successful, false if any fail. */ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) { union ice_32b_rx_flex_desc *rx_desc; u16 ntu = rx_ring->next_to_use; struct ice_rx_buf *rx_buf; - bool ret = false; + bool ok = true; dma_addr_t dma; if (!count) - return false; + return true; rx_desc = ICE_RX_DESC(rx_ring, ntu); rx_buf = &rx_ring->rx_buf[ntu]; @@ -377,7 +377,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) do { rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool); if (!rx_buf->xdp) { - ret = true; + ok = false; break; } @@ -402,7 +402,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) ice_release_rx_desc(rx_ring, ntu); } - return ret; + return ok; } /** @@ -473,6 +473,14 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp) xdp_prog = READ_ONCE(rx_ring->xdp_prog); act = bpf_prog_run_xdp(xdp_prog, xdp); + + if (likely(act == XDP_REDIRECT)) { + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); + result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED; + rcu_read_unlock(); + return result; + } + switch (act) { case XDP_PASS: break; @@ -480,10 +488,6 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp) xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index]; result = ice_xmit_xdp_buff(xdp, xdp_ring); break; - case XDP_REDIRECT: - err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED; - break; default: bpf_warn_invalid_xdp_action(act); fallthrough; diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 5d87957b2627..44111f65afc7 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright(c) 2007 - 2018 Intel Corporation. */ -#ifndef _E1000_HW_H_ -#define _E1000_HW_H_ +#ifndef _E1000_IGB_HW_H_ +#define _E1000_IGB_HW_H_ #include <linux/types.h> #include <linux/delay.h> @@ -551,4 +551,4 @@ s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); -#endif /* _E1000_HW_H_ */ +#endif /* _E1000_IGB_HW_H_ */ diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c index 33cceb77e960..29383112bc19 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.c +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c @@ -441,7 +441,7 @@ out_no_read: } /** - * e1000_init_mbx_params_pf - set initial values for pf mailbox + * igb_init_mbx_params_pf - set initial values for pf mailbox * @hw: pointer to the HW structure * * Initializes the hw->mbx struct to correct values for pf mailbox diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 8c8eb82e6272..a018000f7db9 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -836,6 +836,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw) break; case e1000_ms_auto: data &= ~CR_1000T_MS_ENABLE; + break; default: break; } diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index aaa954aae574..7bda8c5edea5 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -748,8 +748,8 @@ void igb_ptp_suspend(struct igb_adapter *adapter); void igb_ptp_rx_hang(struct igb_adapter *adapter); void igb_ptp_tx_hang(struct igb_adapter *adapter); void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); -void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, - struct sk_buff *skb); +int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, + struct sk_buff *skb); int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); void igb_set_flag_queue_pairs(struct igb_adapter *, const u32); diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 28baf203459a..7545da216d8b 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2347,35 +2347,23 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) IGB_TEST_LEN*ETH_GSTRING_LEN); break; case ETH_SS_STATS: - for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { - memcpy(p, igb_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) { - memcpy(p, igb_gstrings_net_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) + ethtool_sprintf(&p, + igb_gstrings_stats[i].stat_string); + for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) + ethtool_sprintf(&p, + igb_gstrings_net_stats[i].stat_string); for (i = 0; i < adapter->num_tx_queues; i++) { - sprintf(p, "tx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_restart", i); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + ethtool_sprintf(&p, "tx_queue_%u_restart", i); } for (i = 0; i < adapter->num_rx_queues; i++) { - sprintf(p, "rx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_drops", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_csum_err", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_alloc_failed", i); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); + ethtool_sprintf(&p, "rx_queue_%u_drops", i); + ethtool_sprintf(&p, "rx_queue_%u_csum_err", i); + ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i); } /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ break; @@ -3022,6 +3010,7 @@ static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) break; case ETHTOOL_SRXCLSRLDEL: ret = igb_del_ethtool_nfc_entry(adapter, cmd); + break; default: break; } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index cb0d07ff2492..c9e8c65a3cfe 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2037,7 +2037,7 @@ static void igb_power_down_link(struct igb_adapter *adapter) } /** - * Detect and switch function for Media Auto Sense + * igb_check_swap_media - Detect and switch function for Media Auto Sense * @adapter: address of the board private structure **/ static void igb_check_swap_media(struct igb_adapter *adapter) @@ -3114,7 +3114,7 @@ static s32 igb_init_i2c(struct igb_adapter *adapter) return 0; /* Initialize the i2c bus which is controlled by the registers. - * This bus will use the i2c_algo_bit structue that implements + * This bus will use the i2c_algo_bit structure that implements * the protocol through toggling of the 4 bits in the register. */ adapter->i2c_adap.owner = THIS_MODULE; @@ -4019,7 +4019,7 @@ static int igb_sw_init(struct igb_adapter *adapter) } /** - * igb_open - Called when a network interface is made active + * __igb_open - Called when a network interface is made active * @netdev: network interface device structure * @resuming: indicates whether we are in a resume call * @@ -4137,7 +4137,7 @@ int igb_open(struct net_device *netdev) } /** - * igb_close - Disables a network interface + * __igb_close - Disables a network interface * @netdev: network interface device structure * @suspending: indicates we are in a suspend call * @@ -5855,7 +5855,7 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, */ if (tx_ring->launchtime_enable) { ts = ktime_to_timespec64(first->skb->tstamp); - first->skb->tstamp = ktime_set(0, 0); + skb_txtime_consumed(first->skb); context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); } else { context_desc->seqnum_seed = 0; @@ -8213,7 +8213,8 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring, new_buff->pagecnt_bias = old_buff->pagecnt_bias; } -static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer) +static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, + int rx_buf_pgcnt) { unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; struct page *page = rx_buffer->page; @@ -8224,7 +8225,7 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer) #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) + if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) return false; #else #define IGB_LAST_OFFSET \ @@ -8300,9 +8301,10 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, return NULL; if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { - igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb); - xdp->data += IGB_TS_HDR_LEN; - size -= IGB_TS_HDR_LEN; + if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) { + xdp->data += IGB_TS_HDR_LEN; + size -= IGB_TS_HDR_LEN; + } } /* Determine available headroom for copy */ @@ -8363,8 +8365,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, /* pull timestamp out of packet data */ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { - igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb); - __skb_pull(skb, IGB_TS_HDR_LEN); + if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb)) + __skb_pull(skb, IGB_TS_HDR_LEN); } /* update buffer offset */ @@ -8613,11 +8615,17 @@ static unsigned int igb_rx_offset(struct igb_ring *rx_ring) } static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring, - const unsigned int size) + const unsigned int size, int *rx_buf_pgcnt) { struct igb_rx_buffer *rx_buffer; rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + *rx_buf_pgcnt = +#if (PAGE_SIZE < 8192) + page_count(rx_buffer->page); +#else + 0; +#endif prefetchw(rx_buffer->page); /* we are reusing so sync this buffer for CPU use */ @@ -8633,9 +8641,9 @@ static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring, } static void igb_put_rx_buffer(struct igb_ring *rx_ring, - struct igb_rx_buffer *rx_buffer) + struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt) { - if (igb_can_reuse_rx_page(rx_buffer)) { + if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) { /* hand second half of page back to the ring */ igb_reuse_rx_page(rx_ring, rx_buffer); } else { @@ -8663,6 +8671,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) unsigned int xdp_xmit = 0; struct xdp_buff xdp; u32 frame_sz = 0; + int rx_buf_pgcnt; /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ #if (PAGE_SIZE < 8192) @@ -8692,7 +8701,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) */ dma_rmb(); - rx_buffer = igb_get_rx_buffer(rx_ring, size); + rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt); /* retrieve a buffer from the ring */ if (!skb) { @@ -8735,7 +8744,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) break; } - igb_put_rx_buffer(rx_ring, rx_buffer); + igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt); cleaned_count++; /* fetch next buffer in frame if non-eop */ diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 7cc5428c3b3d..ba61fe9bfaf4 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -856,6 +856,9 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) dev_kfree_skb_any(skb); } +#define IGB_RET_PTP_DISABLED 1 +#define IGB_RET_PTP_INVALID 2 + /** * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp * @q_vector: Pointer to interrupt specific structure @@ -864,19 +867,29 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) * * This function is meant to retrieve a timestamp from the first buffer of an * incoming frame. The value is stored in little endian format starting on - * byte 8. + * byte 8 + * + * Returns: 0 if success, nonzero if failure **/ -void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, - struct sk_buff *skb) +int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, + struct sk_buff *skb) { - __le64 *regval = (__le64 *)va; struct igb_adapter *adapter = q_vector->adapter; + __le64 *regval = (__le64 *)va; int adjust = 0; + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) + return IGB_RET_PTP_DISABLED; + /* The timestamp is recorded in little endian format. * DWORD: 0 1 2 3 * Field: Reserved Reserved SYSTIML SYSTIMH */ + + /* check reserved dwords are zero, be/le doesn't matter for zero */ + if (regval[0]) + return IGB_RET_PTP_INVALID; + igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), le64_to_cpu(regval[1])); @@ -896,6 +909,8 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, } skb_hwtstamps(skb)->hwtstamp = ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); + + return 0; } /** @@ -906,13 +921,15 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, * This function is meant to retrieve a timestamp from the internal registers * of the adapter and store it in the skb. **/ -void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, - struct sk_buff *skb) +void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb) { struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; - u64 regval; int adjust = 0; + u64 regval; + + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) + return; /* If this bit is set, then the RX registers contain the time stamp. No * other packet will be time stamped until we read these registers, so @@ -1008,6 +1025,7 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter, switch (config->tx_type) { case HWTSTAMP_TX_OFF: tsync_tx_ctl = 0; + break; case HWTSTAMP_TX_ON: break; default: diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 5d2809dfd06a..1b08a7dc7bc4 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -547,7 +547,7 @@ void igc_ptp_init(struct igc_adapter *adapter); void igc_ptp_reset(struct igc_adapter *adapter); void igc_ptp_suspend(struct igc_adapter *adapter); void igc_ptp_stop(struct igc_adapter *adapter); -void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va, +void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va, struct sk_buff *skb); int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index b909f00a79e6..35ed997af075 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -441,11 +441,6 @@ #define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ #define MII_CR_POWER_DOWN 0x0800 /* Power down */ #define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ -#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ -#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ -#define MII_CR_SPEED_1000 0x0040 -#define MII_CR_SPEED_100 0x2000 -#define MII_CR_SPEED_10 0x0000 /* PHY Status Register */ #define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 824a6c454bca..8722294ab90c 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -1711,6 +1711,9 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev, Autoneg); } + /* Set pause flow control settings */ + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + switch (hw->fc.requested_mode) { case igc_fc_full: ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); @@ -1725,9 +1728,7 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev, Asym_Pause); break; default: - ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - Asym_Pause); + break; } status = pm_runtime_suspended(&adapter->pdev->dev) ? diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c index 7ec04e48860c..cc83bb5c15e8 100644 --- a/drivers/net/ethernet/intel/igc/igc_i225.c +++ b/drivers/net/ethernet/intel/igc/igc_i225.c @@ -6,7 +6,7 @@ #include "igc_hw.h" /** - * igc_get_hw_semaphore_i225 - Acquire hardware semaphore + * igc_acquire_nvm_i225 - Acquire exclusive access to EEPROM * @hw: pointer to the HW structure * * Acquire the necessary semaphores for exclusive access to the EEPROM. diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 7ac9597ddb84..baa45a1f3a65 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -941,7 +941,7 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); ktime_t txtime = first->skb->tstamp; - first->skb->tstamp = ktime_set(0, 0); + skb_txtime_consumed(first->skb); context_desc->launch_time = igc_tx_launchtime(adapter, txtime); } else { @@ -3580,7 +3580,7 @@ void igc_up(struct igc_adapter *adapter) netif_tx_start_all_queues(adapter->netdev); /* start the watchdog. */ - hw->mac.get_link_status = 1; + hw->mac.get_link_status = true; schedule_work(&adapter->watchdog_task); } @@ -3831,10 +3831,19 @@ static void igc_reset_task(struct work_struct *work) adapter = container_of(work, struct igc_adapter, reset_task); + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGC_DOWN, &adapter->state) || + test_bit(__IGC_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + igc_rings_dump(adapter); igc_regs_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); igc_reinit_locked(adapter); + rtnl_unlock(); } /** @@ -4000,7 +4009,7 @@ static irqreturn_t igc_msix_other(int irq, void *data) } if (icr & IGC_ICR_LSC) { - hw->mac.get_link_status = 1; + hw->mac.get_link_status = true; /* guard against interrupt when we're going down */ if (!test_bit(__IGC_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); @@ -4378,7 +4387,7 @@ static irqreturn_t igc_intr_msi(int irq, void *data) } if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { - hw->mac.get_link_status = 1; + hw->mac.get_link_status = true; if (!test_bit(__IGC_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); } @@ -4420,7 +4429,7 @@ static irqreturn_t igc_intr(int irq, void *data) } if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { - hw->mac.get_link_status = 1; + hw->mac.get_link_status = true; /* guard against interrupt when we're going down */ if (!test_bit(__IGC_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); @@ -4574,7 +4583,7 @@ static int __igc_open(struct net_device *netdev, bool resuming) netif_tx_start_all_queues(netdev); /* start the watchdog. */ - hw->mac.get_link_status = 1; + hw->mac.get_link_status = true; schedule_work(&adapter->watchdog_task); return IGC_SUCCESS; @@ -4915,7 +4924,7 @@ int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx) { struct igc_mac_info *mac = &adapter->hw.mac; - mac->autoneg = 0; + mac->autoneg = false; /* Make sure dplx is at most 1 bit and lsb of speed is not set * for the switch() below to work @@ -4937,13 +4946,13 @@ int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx) mac->forced_speed_duplex = ADVERTISE_100_FULL; break; case SPEED_1000 + DUPLEX_FULL: - mac->autoneg = 1; + mac->autoneg = true; adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; break; case SPEED_1000 + DUPLEX_HALF: /* not supported */ goto err_inval; case SPEED_2500 + DUPLEX_FULL: - mac->autoneg = 1; + mac->autoneg = true; adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; break; case SPEED_2500 + DUPLEX_HALF: /* not supported */ diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index ac0b9c85da7c..545f4d0e67cf 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -152,46 +152,54 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, } /** - * igc_ptp_rx_pktstamp - retrieve Rx per packet timestamp + * igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer * @q_vector: Pointer to interrupt specific structure * @va: Pointer to address containing Rx buffer * @skb: Buffer containing timestamp and packet * - * This function is meant to retrieve the first timestamp from the - * first buffer of an incoming frame. The value is stored in little - * endian format starting on byte 0. There's a second timestamp - * starting on byte 8. - **/ -void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va, + * This function retrieves the timestamp saved in the beginning of packet + * buffer. While two timestamps are available, one in timer0 reference and the + * other in timer1 reference, this function considers only the timestamp in + * timer0 reference. + */ +void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va, struct sk_buff *skb) { struct igc_adapter *adapter = q_vector->adapter; - __le64 *regval = (__le64 *)va; - int adjust = 0; - - /* The timestamp is recorded in little endian format. - * DWORD: | 0 | 1 | 2 | 3 - * Field: | Timer0 Low | Timer0 High | Timer1 Low | Timer1 High + u64 regval; + int adjust; + + /* Timestamps are saved in little endian at the beginning of the packet + * buffer following the layout: + * + * DWORD: | 0 | 1 | 2 | 3 | + * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH | + * + * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds + * part of the timestamp. */ - igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), - le64_to_cpu(regval[0])); - - /* adjust timestamp for the RX latency based on link speed */ - if (adapter->hw.mac.type == igc_i225) { - switch (adapter->link_speed) { - case SPEED_10: - adjust = IGC_I225_RX_LATENCY_10; - break; - case SPEED_100: - adjust = IGC_I225_RX_LATENCY_100; - break; - case SPEED_1000: - adjust = IGC_I225_RX_LATENCY_1000; - break; - case SPEED_2500: - adjust = IGC_I225_RX_LATENCY_2500; - break; - } + regval = le32_to_cpu(va[2]); + regval |= (u64)le32_to_cpu(va[3]) << 32; + igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); + + /* Adjust timestamp for the RX latency based on link speed */ + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGC_I225_RX_LATENCY_10; + break; + case SPEED_100: + adjust = IGC_I225_RX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGC_I225_RX_LATENCY_1000; + break; + case SPEED_2500: + adjust = IGC_I225_RX_LATENCY_2500; + break; + default: + adjust = 0; + netdev_warn_once(adapter->netdev, "Imprecise timestamp\n"); + break; } skb_hwtstamps(skb)->hwtstamp = ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 8d3798a32f0e..e324e42fab2d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1351,7 +1351,7 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, } /** - * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter + * ixgbe_fdir_add_signature_filter_82599 - Adds a signature hash filter * @hw: pointer to hardware structure * @input: unique input dword * @common: compressed common input dword @@ -1542,6 +1542,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, switch (input_mask->formatted.vm_pool & 0x7F) { case 0x0: fdirm |= IXGBE_FDIRM_POOL; + break; case 0x7F: break; default: @@ -1557,6 +1558,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, hw_dbg(hw, " Error on src/dst port mask\n"); return IXGBE_ERR_CONFIG; } + break; case IXGBE_ATR_L4TYPE_MASK: break; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 62ddb452f862..03ccbe6b66d2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -93,6 +93,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) default: break; } + break; default: break; } @@ -2707,7 +2708,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) } /** - * ixgbe_enable_rx_buff - Enables the receive data path + * ixgbe_enable_rx_buff_generic - Enables the receive data path * @hw: pointer to hardware structure * * Enables the receive data path @@ -3029,14 +3030,14 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) } /** + * ixgbe_set_vmdq_san_mac_generic - Associate VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @vmdq: VMDq pool index + * * This function should only be involved in the IOV mode. * In IOV mode, Default pool is next pool after the number of * VFs advertized and not 0. * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] - * - * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address - * @hw: pointer to hardware struct - * @vmdq: VMDq pool index **/ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) { @@ -3896,7 +3897,7 @@ static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg, } /** - * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data + * ixgbe_get_thermal_sensor_data_generic - Gathers thermal sensor data * @hw: pointer to hardware structure * * Returns the thermal sensor data structure @@ -4054,8 +4055,7 @@ void ixgbe_get_orom_version(struct ixgbe_hw *hw, } /** - * ixgbe_get_oem_prod_version Etrack ID from EEPROM - * + * ixgbe_get_oem_prod_version - Etrack ID from EEPROM * @hw: pointer to hardware structure * @nvm_ver: pointer to output structure * diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index a280aa34ca1d..4ceaca0f6ce3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1368,45 +1368,33 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { - char *p = (char *)data; unsigned int i; + u8 *p = data; switch (stringset) { case ETH_SS_TEST: - for (i = 0; i < IXGBE_TEST_LEN; i++) { - memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; - } + for (i = 0; i < IXGBE_TEST_LEN; i++) + ethtool_sprintf(&p, ixgbe_gstrings_test[i]); break; case ETH_SS_STATS: - for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { - memcpy(p, ixgbe_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) + ethtool_sprintf(&p, + ixgbe_gstrings_stats[i].stat_string); for (i = 0; i < netdev->num_tx_queues; i++) { - sprintf(p, "tx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); } for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { - sprintf(p, "rx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); } for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { - sprintf(p, "tx_pb_%u_pxon", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_pb_%u_pxoff", i); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&p, "tx_pb_%u_pxon", i); + ethtool_sprintf(&p, "tx_pb_%u_pxoff", i); } for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { - sprintf(p, "rx_pb_%u_pxon", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_pb_%u_pxoff", i); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&p, "rx_pb_%u_pxon", i); + ethtool_sprintf(&p, "rx_pb_%u_pxoff", i); } /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index df389a11d3af..0218f6c9b925 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -132,6 +132,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, else *tx = (tc + 4) << 4; /* 96, 112 */ } + break; default: break; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 730c28a1a204..7ba1c2985ef7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -225,7 +225,7 @@ static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) } /** - * ixgbe_check_from_parent - Determine whether PCIe info should come from parent + * ixgbe_pcie_from_parent - Determine whether PCIe info should come from parent * @hw: hw specific details * * This function is used by probe to determine whether a device's PCI-Express @@ -4118,6 +4118,8 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, #endif } + ring->rx_offset = ixgbe_rx_offset(ring); + if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) { u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); @@ -6156,7 +6158,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) } /** - * ixgbe_eee_capable - helper function to determine EEE support on X550 + * ixgbe_set_eee_capable - helper function to determine EEE support on X550 * @adapter: board private structure */ static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter) @@ -6578,7 +6580,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; - rx_ring->rx_offset = ixgbe_rx_offset(rx_ring); /* XDP RX-queue info */ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index fc389eecdd2b..73bc170d1ae9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -461,12 +461,13 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) } /** - * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without - * the SWFW lock + * ixgbe_read_phy_reg_mdi - read PHY register * @hw: pointer to hardware structure * @reg_addr: 32 bit address of PHY register to read * @device_type: 5 bit device type * @phy_data: Pointer to read data from PHY register + * + * Reads a value from a specified PHY register without the SWFW lock **/ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 22a874eee2e8..23ddfd79fc8b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -999,6 +999,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, switch (config->tx_type) { case HWTSTAMP_TX_OFF: tsync_tx_ctl = 0; + break; case HWTSTAMP_TX_ON: break; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 4b93ba149ec5..d5cfb51ff648 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -701,7 +701,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) } /** - * ixgbe_release_nvm_semaphore - Release hardware semaphore + * ixgbe_release_swfw_sync_semaphore - Release hardware semaphore * @hw: pointer to hardware structure * * This function clears hardware semaphore bits. diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 5e339afa682a..9724ffb16518 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1248,7 +1248,7 @@ static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) } /** - * ixgbe_fw_recovery_mode - Check FW NVM recovery mode + * ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode * @hw: pointer t hardware structure * * Returns true if in FW NVM recovery mode. diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index 3771857cf887..91ad5b902673 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -104,6 +104,13 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, xdp_prog = READ_ONCE(rx_ring->xdp_prog); act = bpf_prog_run_xdp(xdp_prog, xdp); + if (likely(act == XDP_REDIRECT)) { + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); + result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED; + rcu_read_unlock(); + return result; + } + switch (act) { case XDP_PASS: break; @@ -115,10 +122,6 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, } result = ixgbe_xmit_xdp_ring(adapter, xdpf); break; - case XDP_REDIRECT: - err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED; - break; default: bpf_warn_invalid_xdp_action(act); fallthrough; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 449d7d5b280d..ba2ed8a43d2d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -2633,6 +2633,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) adapter->num_rx_queues = rss; adapter->num_tx_queues = rss; adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0; + break; default: break; } diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index bfe6dfcec4ab..5fc347abab3c 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -121,9 +121,11 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) } /** + * ixgbevf_hv_reset_hw_vf - reset via Hyper-V + * @hw: pointer to private hardware struct + * * Hyper-V variant; the VF/PF communication is through the PCI * config space. - * @hw: pointer to private hardware struct */ static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw) { @@ -513,9 +515,11 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, } /** - * Hyper-V variant - just a stub. + * ixgbevf_hv_update_mc_addr_list_vf - stub * @hw: unused * @netdev: unused + * + * Hyper-V variant - just a stub. */ static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, struct net_device *netdev) @@ -564,9 +568,11 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) } /** - * Hyper-V variant - just a stub. + * ixgbevf_hv_update_xcast_mode - stub * @hw: unused * @xcast_mode: unused + * + * Hyper-V variant - just a stub. */ static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) { @@ -608,7 +614,7 @@ mbx_err: } /** - * Hyper-V variant - just a stub. + * ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub. * @hw: unused * @vlan: unused * @vind: unused @@ -726,11 +732,13 @@ out: } /** - * Hyper-V variant; there is no mailbox communication. + * ixgbevf_hv_check_mac_link_vf - check link * @hw: pointer to private hardware struct * @speed: pointer to link speed * @link_up: true is link is up, false otherwise * @autoneg_wait_to_complete: unused + * + * Hyper-V variant; there is no mailbox communication. */ static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index 7fe15a3286f4..fe0989c0fc25 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -6,7 +6,7 @@ config NET_VENDOR_MARVELL bool "Marvell devices" default y - depends on PCI || CPU_PXA168 || MV64X60 || PPC32 || PLAT_ORION || INET || COMPILE_TEST + depends on PCI || CPU_PXA168 || PPC32 || PLAT_ORION || INET || COMPILE_TEST help If you have a network (Ethernet) card belonging to this class, say Y. @@ -19,7 +19,7 @@ if NET_VENDOR_MARVELL config MV643XX_ETH tristate "Marvell Discovery (643XX) and Orion ethernet support" - depends on MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST + depends on PPC32 || PLAT_ORION || COMPILE_TEST depends on INET select PHYLIB select MVMDIO diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 90e6111ce534..3bfb659b5c99 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2684,7 +2684,7 @@ static const struct of_device_id mv643xx_eth_shared_ids[] = { MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids); #endif -#if defined(CONFIG_OF_IRQ) && !defined(CONFIG_MV64X60) +#ifdef CONFIG_OF_IRQ #define mv643xx_eth_property(_np, _name, _v) \ do { \ u32 tmp; \ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 68deae529bc9..fac6474ad694 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -30,10 +30,35 @@ static LIST_HEAD(cgx_list); /* Convert firmware speed encoding to user format(Mbps) */ -static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX]; +static const u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX] = { + [CGX_LINK_NONE] = 0, + [CGX_LINK_10M] = 10, + [CGX_LINK_100M] = 100, + [CGX_LINK_1G] = 1000, + [CGX_LINK_2HG] = 2500, + [CGX_LINK_5G] = 5000, + [CGX_LINK_10G] = 10000, + [CGX_LINK_20G] = 20000, + [CGX_LINK_25G] = 25000, + [CGX_LINK_40G] = 40000, + [CGX_LINK_50G] = 50000, + [CGX_LINK_80G] = 80000, + [CGX_LINK_100G] = 100000, +}; /* Convert firmware lmac type encoding to string */ -static char *cgx_lmactype_string[LMAC_MODE_MAX]; +static const char *cgx_lmactype_string[LMAC_MODE_MAX] = { + [LMAC_MODE_SGMII] = "SGMII", + [LMAC_MODE_XAUI] = "XAUI", + [LMAC_MODE_RXAUI] = "RXAUI", + [LMAC_MODE_10G_R] = "10G_R", + [LMAC_MODE_40G_R] = "40G_R", + [LMAC_MODE_QSGMII] = "QSGMII", + [LMAC_MODE_25G_R] = "25G_R", + [LMAC_MODE_50G_R] = "50G_R", + [LMAC_MODE_100G_R] = "100G_R", + [LMAC_MODE_USXGMII] = "USXGMII", +}; /* CGX PHY management internal APIs */ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en); @@ -659,34 +684,6 @@ int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id) return err; } -static inline void cgx_link_usertable_init(void) -{ - cgx_speed_mbps[CGX_LINK_NONE] = 0; - cgx_speed_mbps[CGX_LINK_10M] = 10; - cgx_speed_mbps[CGX_LINK_100M] = 100; - cgx_speed_mbps[CGX_LINK_1G] = 1000; - cgx_speed_mbps[CGX_LINK_2HG] = 2500; - cgx_speed_mbps[CGX_LINK_5G] = 5000; - cgx_speed_mbps[CGX_LINK_10G] = 10000; - cgx_speed_mbps[CGX_LINK_20G] = 20000; - cgx_speed_mbps[CGX_LINK_25G] = 25000; - cgx_speed_mbps[CGX_LINK_40G] = 40000; - cgx_speed_mbps[CGX_LINK_50G] = 50000; - cgx_speed_mbps[CGX_LINK_80G] = 80000; - cgx_speed_mbps[CGX_LINK_100G] = 100000; - - cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII"; - cgx_lmactype_string[LMAC_MODE_XAUI] = "XAUI"; - cgx_lmactype_string[LMAC_MODE_RXAUI] = "RXAUI"; - cgx_lmactype_string[LMAC_MODE_10G_R] = "10G_R"; - cgx_lmactype_string[LMAC_MODE_40G_R] = "40G_R"; - cgx_lmactype_string[LMAC_MODE_QSGMII] = "QSGMII"; - cgx_lmactype_string[LMAC_MODE_25G_R] = "25G_R"; - cgx_lmactype_string[LMAC_MODE_50G_R] = "50G_R"; - cgx_lmactype_string[LMAC_MODE_100G_R] = "100G_R"; - cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII"; -} - static int cgx_link_usertable_index_map(int speed) { switch (speed) { @@ -828,7 +825,7 @@ static inline void link_status_user_format(u64 lstat, struct cgx_link_user_info *linfo, struct cgx *cgx, u8 lmac_id) { - char *lmac_string; + const char *lmac_string; linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat); linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat); @@ -1377,7 +1374,6 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) list_add(&cgx->cgx_list, &cgx_list); - cgx_link_usertable_init(); cgx_populate_features(cgx); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index ea456099b33c..55629c66586e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -74,13 +74,13 @@ struct otx2_mbox { struct otx2_mbox_dev *dev; }; -/* Header which preceeds all mbox messages */ +/* Header which precedes all mbox messages */ struct mbox_hdr { u64 msg_size; /* Total msgs size embedded */ u16 num_msgs; /* No of msgs embedded */ }; -/* Header which preceeds every msg and is also part of it */ +/* Header which precedes every msg and is also part of it */ struct mbox_msghdr { u16 pcifunc; /* Who's sending this msg */ u16 id; /* Mbox message ID */ @@ -216,6 +216,9 @@ M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \ npc_mcam_read_entry_rsp) \ M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \ msg_req, npc_mcam_read_base_rule_rsp) \ +M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \ + npc_mcam_get_stats_req, \ + npc_mcam_get_stats_rsp) \ /* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \ M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \ nix_lf_alloc_req, nix_lf_alloc_rsp) \ @@ -277,8 +280,8 @@ struct msg_req { struct mbox_msghdr hdr; }; -/* Generic rsponse msg used a ack or response for those mbox - * messages which doesn't have a specific rsp msg format. +/* Generic response msg used an ack or response for those mbox + * messages which don't have a specific rsp msg format. */ struct msg_rsp { struct mbox_msghdr hdr; @@ -299,7 +302,7 @@ struct ready_msg_rsp { /* Structure for requesting resource provisioning. * 'modify' flag to be used when either requesting more - * or to detach partial of a cetain resource type. + * or to detach partial of a certain resource type. * Rest of the fields specify how many of what type to * be attached. * To request LFs from two blocks of same type this mailbox @@ -489,7 +492,7 @@ struct cgx_set_link_mode_rsp { }; #define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */ -#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precison time protocol */ +#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precision time protocol */ #define RVU_MAC_VERSION BIT_ULL(2) #define RVU_MAC_CGX BIT_ULL(3) #define RVU_MAC_RPM BIT_ULL(4) @@ -605,6 +608,7 @@ enum nix_af_status { NIX_AF_INVAL_SSO_PF_FUNC = -420, NIX_AF_ERR_TX_VTAG_NOSPC = -421, NIX_AF_ERR_RX_VTAG_INUSE = -422, + NIX_AF_ERR_NPC_KEY_NOT_SUPP = -423, }; /* For NIX RX vtag action */ @@ -1141,6 +1145,7 @@ struct npc_install_flow_req { u64 features; u16 entry; u16 channel; + u16 chan_mask; u8 intf; u8 set_cntr; /* If counter is available set counter for this entry ? */ u8 default_rule; @@ -1193,6 +1198,17 @@ struct npc_mcam_read_base_rule_rsp { struct mcam_entry entry; }; +struct npc_mcam_get_stats_req { + struct mbox_msghdr hdr; + u16 entry; /* mcam entry */ +}; + +struct npc_mcam_get_stats_rsp { + struct mbox_msghdr hdr; + u64 stat; /* counter stats */ + u8 stat_ena; /* enabled */ +}; + enum ptp_op { PTP_OP_ADJFINE = 0, PTP_OP_GET_CLOCK = 1, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index 3c640f6aba92..1e012e787260 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -167,6 +167,8 @@ enum key_fields { NPC_IPPROTO_SCTP, NPC_IPPROTO_AH, NPC_IPPROTO_ESP, + NPC_IPPROTO_ICMP, + NPC_IPPROTO_ICMP6, NPC_SPORT_TCP, NPC_DPORT_TCP, NPC_SPORT_UDP, @@ -420,6 +422,11 @@ struct nix_tx_action { #define TX_VTAG1_LID_MASK GENMASK_ULL(42, 40) #define TX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32) +/* NPC MCAM reserved entry index per nixlf */ +#define NIXLF_UCAST_ENTRY 0 +#define NIXLF_BCAST_ENTRY 1 +#define NIXLF_PROMISC_ENTRY 2 + struct npc_mcam_kex { /* MKEX Profle Header */ u64 mkex_sign; /* "mcam-kex-profile" (8 bytes/ASCII characters) */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h index b192692b4fc4..5c372d2c24a1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h @@ -13499,8 +13499,6 @@ static struct npc_mcam_kex npc_mkex_default = { [NPC_LT_LC_IP] = { /* SIP+DIP: 8 bytes, KW2[63:0] */ KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10), - /* TOS: 1 byte, KW1[63:56] */ - KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf), }, /* Layer C: IPv6 */ [NPC_LT_LC_IP6] = { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index d9a1a71c7ccc..ab24a5e8ee8a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -2462,8 +2462,10 @@ static void rvu_unregister_interrupts(struct rvu *rvu) INTR_MASK(rvu->hw->total_pfs) & ~1ULL); for (irq = 0; irq < rvu->num_vec; irq++) { - if (rvu->irq_allocated[irq]) + if (rvu->irq_allocated[irq]) { free_irq(pci_irq_vector(rvu->pdev, irq), rvu); + rvu->irq_allocated[irq] = false; + } } pci_free_irq_vectors(rvu->pdev); @@ -2975,8 +2977,8 @@ static void rvu_remove(struct pci_dev *pdev) struct rvu *rvu = pci_get_drvdata(pdev); rvu_dbg_exit(rvu); - rvu_unregister_interrupts(rvu); rvu_unregister_dl(rvu); + rvu_unregister_interrupts(rvu); rvu_flr_wq_destroy(rvu); rvu_cgx_exit(rvu); rvu_fwdata_exit(rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index fa6e46e36ae4..c2cc4806d13c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -548,6 +548,12 @@ static inline int is_afvf(u16 pcifunc) return !(pcifunc & ~RVU_PFVF_FUNC_MASK); } +/* check if PF_FUNC is AF */ +static inline bool is_pffunc_af(u16 pcifunc) +{ + return !pcifunc; +} + static inline bool is_rvu_fwdata_valid(struct rvu *rvu) { return (rvu->fwdata->header_magic == RVU_FWDATA_HEADER_MAGIC) && @@ -640,7 +646,8 @@ int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool en); void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan, u8 *mac_addr); void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, - int nixlf, u64 chan, bool allmulti); + int nixlf, u64 chan, u8 chan_cnt, + bool allmulti); void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, @@ -665,9 +672,6 @@ int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena); int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel); int npc_flow_steering_init(struct rvu *rvu, int blkaddr); const char *npc_get_field_name(u8 hdr); -bool rvu_npc_write_default_rule(struct rvu *rvu, int blkaddr, int nixlf, - u16 pcifunc, u8 intf, struct mcam_entry *entry, - int *entry_index); int npc_get_bank(struct npc_mcam *mcam, int index); void npc_mcam_enable_flows(struct rvu *rvu, u16 target); void npc_mcam_disable_flows(struct rvu *rvu, u16 target); @@ -678,6 +682,12 @@ void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, u8 *intf, u8 *ena); bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature); u32 rvu_cgx_get_fifolen(struct rvu *rvu); +void *rvu_first_cgx_pdata(struct rvu *rvu); + +int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf, + int type); +bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, + int index); /* CPT APIs */ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index e668e482383a..6e2bf4fcd29c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -89,6 +89,21 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu) return rvu->cgx_idmap[cgx_id]; } +/* Return first enabled CGX instance if none are enabled then return NULL */ +void *rvu_first_cgx_pdata(struct rvu *rvu) +{ + int first_enabled_cgx = 0; + void *cgxd = NULL; + + for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) { + cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu); + if (cgxd) + break; + } + + return cgxd; +} + /* Based on P2X connectivity find mapped NIX block for a PF */ static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf, int cgx_id, int lmac_id) @@ -711,10 +726,9 @@ int rvu_mbox_handler_cgx_features_get(struct rvu *rvu, u32 rvu_cgx_get_fifolen(struct rvu *rvu) { struct mac_ops *mac_ops; - int rvu_def_cgx_id = 0; u32 fifo_len; - mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu)); + mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); fifo_len = mac_ops ? mac_ops->fifo_len : 0; return fifo_len; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index aa2ca8780b9c..9bf8eaabf9ab 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -234,12 +234,14 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { - int index, off = 0, flag = 0, go_back = 0, off_prev; + int index, off = 0, flag = 0, go_back = 0, len = 0; struct rvu *rvu = filp->private_data; int lf, pf, vf, pcifunc; struct rvu_block block; int bytes_not_copied; + int lf_str_size = 12; int buf_size = 2048; + char *lfs; char *buf; /* don't allow partial reads */ @@ -249,12 +251,20 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) return -ENOSPC; - off += scnprintf(&buf[off], buf_size - 1 - off, "\npcifunc\t\t"); + + lfs = kzalloc(lf_str_size, GFP_KERNEL); + if (!lfs) { + kfree(buf); + return -ENOMEM; + } + off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size, + "pcifunc"); for (index = 0; index < BLK_COUNT; index++) - if (strlen(rvu->hw->block[index].name)) - off += scnprintf(&buf[off], buf_size - 1 - off, - "%*s\t", (index - 1) * 2, - rvu->hw->block[index].name); + if (strlen(rvu->hw->block[index].name)) { + off += scnprintf(&buf[off], buf_size - 1 - off, + "%-*s", lf_str_size, + rvu->hw->block[index].name); + } off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); for (pf = 0; pf < rvu->hw->total_pfs; pf++) { for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { @@ -263,14 +273,15 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, continue; if (vf) { + sprintf(lfs, "PF%d:VF%d", pf, vf - 1); go_back = scnprintf(&buf[off], buf_size - 1 - off, - "PF%d:VF%d\t\t", pf, - vf - 1); + "%-*s", lf_str_size, lfs); } else { + sprintf(lfs, "PF%d", pf); go_back = scnprintf(&buf[off], buf_size - 1 - off, - "PF%d\t\t", pf); + "%-*s", lf_str_size, lfs); } off += go_back; @@ -278,20 +289,22 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, block = rvu->hw->block[index]; if (!strlen(block.name)) continue; - off_prev = off; + len = 0; + lfs[len] = '\0'; for (lf = 0; lf < block.lf.max; lf++) { if (block.fn_map[lf] != pcifunc) continue; flag = 1; - off += scnprintf(&buf[off], buf_size - 1 - - off, "%3d,", lf); + len += sprintf(&lfs[len], "%d,", lf); } - if (flag && off_prev != off) - off--; - else - go_back++; + + if (flag) + len--; + lfs[len] = '\0'; off += scnprintf(&buf[off], buf_size - 1 - off, - "\t"); + "%-*s", lf_str_size, lfs); + if (!strlen(lfs)) + go_back += lf_str_size; } if (!flag) off -= go_back; @@ -303,6 +316,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, } bytes_not_copied = copy_to_user(buffer, buf, off); + kfree(lfs); kfree(buf); if (bytes_not_copied) @@ -319,7 +333,6 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) struct rvu *rvu = filp->private; struct pci_dev *pdev = NULL; struct mac_ops *mac_ops; - int rvu_def_cgx_id = 0; char cgx[10], lmac[10]; struct rvu_pfvf *pfvf; int pf, domain, blkid; @@ -327,7 +340,10 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) u16 pcifunc; domain = 2; - mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu)); + mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); + /* There can be no CGX devices at all */ + if (!mac_ops) + return 0; seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n", mac_ops->name); for (pf = 0; pf < rvu->hw->total_pfs; pf++) { @@ -1818,7 +1834,6 @@ static void rvu_dbg_cgx_init(struct rvu *rvu) { struct mac_ops *mac_ops; unsigned long lmac_bmap; - int rvu_def_cgx_id = 0; int i, lmac_id; char dname[20]; void *cgx; @@ -1826,7 +1841,7 @@ static void rvu_dbg_cgx_init(struct rvu *rvu) if (!cgx_get_cgxcnt_max()) return; - mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu)); + mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); if (!mac_ops) return; @@ -2002,7 +2017,7 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype)); break; case NPC_OUTER_VID: - seq_printf(s, "%d ", ntohs(rule->packet.vlan_tci)); + seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci)); seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.vlan_tci)); break; @@ -2145,7 +2160,7 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused) seq_printf(s, "\tmcam entry: %d\n", iter->entry); rvu_dbg_npc_mcam_show_flows(s, iter); - if (iter->intf == NIX_INTF_RX) { + if (is_npc_intf_rx(iter->intf)) { target = iter->rx_action.pf_func; pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; seq_printf(s, "\tForward to: PF%d ", pf); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index d3000194e2d3..0a8bd667cb11 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -273,7 +273,8 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) pfvf->rx_chan_cnt = 1; pfvf->tx_chan_cnt = 1; rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, - pfvf->rx_chan_base, false); + pfvf->rx_chan_base, + pfvf->rx_chan_cnt, false); break; } @@ -2629,7 +2630,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) struct nix_rx_flowkey_alg *field; struct nix_rx_flowkey_alg tmp; u32 key_type, valid_key; - int l4_key_offset; + int l4_key_offset = 0; if (!alg) return -EINVAL; @@ -3088,7 +3089,8 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf); else rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, - pfvf->rx_chan_base, allmulti); + pfvf->rx_chan_base, + pfvf->rx_chan_cnt, allmulti); return 0; } @@ -3635,9 +3637,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, if (err) return err; - rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); - - npc_mcam_disable_flows(rvu, pcifunc); + rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); return rvu_cgx_start_stop_io(rvu, pcifunc, false); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 04bb0803a5c5..0bc4529691ec 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -22,10 +22,6 @@ #define RSVD_MCAM_ENTRIES_PER_PF 2 /* Bcast & Promisc */ #define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */ -#define NIXLF_UCAST_ENTRY 0 -#define NIXLF_BCAST_ENTRY 1 -#define NIXLF_PROMISC_ENTRY 2 - #define NPC_PARSE_RESULT_DMAC_OFFSET 8 #define NPC_HW_TSTAMP_OFFSET 8 #define NPC_KEX_CHAN_MASK 0xFFFULL @@ -96,6 +92,10 @@ int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel) if (is_npc_intf_tx(intf)) return 0; + /* return in case of AF installed rules */ + if (is_pffunc_af(pcifunc)) + return 0; + if (is_afvf(pcifunc)) { end = rvu_get_num_lbk_chans(); if (end < 0) @@ -196,8 +196,8 @@ static int npc_get_ucast_mcam_index(struct npc_mcam *mcam, u16 pcifunc, return mcam->nixlf_offset + (max + nixlf) * RSVD_MCAM_ENTRIES_PER_NIXLF; } -static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, - u16 pcifunc, int nixlf, int type) +int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, + u16 pcifunc, int nixlf, int type) { int pf = rvu_get_pf(pcifunc); int index; @@ -230,8 +230,8 @@ int npc_get_bank(struct npc_mcam *mcam, int index) return bank; } -static bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, - int blkaddr, int index) +bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, int index) { int bank = npc_get_bank(mcam, index); u64 cfg; @@ -647,13 +647,17 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, } void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, - int nixlf, u64 chan, bool allmulti) + int nixlf, u64 chan, u8 chan_cnt, + bool allmulti) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct npc_install_flow_req req = { 0 }; + struct npc_install_flow_rsp rsp = { 0 }; struct npc_mcam *mcam = &rvu->hw->mcam; - int blkaddr, ucast_idx, index, kwi; - struct mcam_entry entry = { {0} }; - struct nix_rx_action action = { }; + int blkaddr, ucast_idx, index; + u8 mac_addr[ETH_ALEN] = { 0 }; + struct nix_rx_action action; + u64 relaxed_mask; /* Only PF or AF VF can add a promiscuous entry */ if ((pcifunc & RVU_PFVF_FUNC_MASK) && !is_afvf(pcifunc)) @@ -663,24 +667,15 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, if (blkaddr < 0) return; + *(u64 *)&action = 0x00; index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_PROMISC_ENTRY); - entry.kw[0] = chan; - entry.kw_mask[0] = 0xFFFULL; - - if (allmulti) { - kwi = NPC_KEXOF_DMAC / sizeof(u64); - entry.kw[kwi] = BIT_ULL(40); /* LSB bit of 1st byte in DMAC */ - entry.kw_mask[kwi] = BIT_ULL(40); - } - - ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, - nixlf, NIXLF_UCAST_ENTRY); - /* If the corresponding PF's ucast action is RSS, * use the same action for promisc also */ + ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_UCAST_ENTRY); if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx)) *(u64 *)&action = npc_get_mcam_action(rvu, mcam, blkaddr, ucast_idx); @@ -691,9 +686,36 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, action.pf_func = pcifunc; } - entry.action = *(u64 *)&action; - npc_config_mcam_entry(rvu, mcam, blkaddr, index, - pfvf->nix_rx_intf, &entry, true); + if (allmulti) { + mac_addr[0] = 0x01; /* LSB bit of 1st byte in DMAC */ + ether_addr_copy(req.packet.dmac, mac_addr); + ether_addr_copy(req.mask.dmac, mac_addr); + req.features = BIT_ULL(NPC_DMAC); + } + + req.chan_mask = 0xFFFU; + if (chan_cnt > 1) { + if (!is_power_of_2(chan_cnt)) { + dev_err(rvu->dev, + "%s: channel count more than 1, must be power of 2\n", __func__); + return; + } + relaxed_mask = GENMASK_ULL(BITS_PER_LONG_LONG - 1, + ilog2(chan_cnt)); + req.chan_mask &= relaxed_mask; + } + + req.channel = chan; + req.intf = pfvf->nix_rx_intf; + req.entry = index; + req.op = action.op; + req.hdr.pcifunc = 0; /* AF is requester */ + req.vf = pcifunc; + req.index = action.index; + req.match_id = action.match_id; + req.flow_key_alg = action.flow_key_alg; + + rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc, @@ -728,12 +750,14 @@ void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf) void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan) { + struct rvu_pfvf *pfvf; + struct npc_install_flow_req req = { 0 }; + struct npc_install_flow_rsp rsp = { 0 }; struct npc_mcam *mcam = &rvu->hw->mcam; - struct mcam_entry entry = { {0} }; struct rvu_hwinfo *hw = rvu->hw; - struct nix_rx_action action; - struct rvu_pfvf *pfvf; int blkaddr, index; + u32 req_index = 0; + u8 op; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) @@ -755,32 +779,29 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_BCAST_ENTRY); - /* Match ingress channel */ - entry.kw[0] = chan; - entry.kw_mask[0] = 0xfffull; - - /* Match broadcast MAC address. - * DMAC is extracted at 0th bit of PARSE_KEX::KW1 - */ - entry.kw[1] = 0xffffffffffffull; - entry.kw_mask[1] = 0xffffffffffffull; - - *(u64 *)&action = 0x00; if (!hw->cap.nix_rx_multicast) { /* Early silicon doesn't support pkt replication, * so install entry with UCAST action, so that PF * receives all broadcast packets. */ - action.op = NIX_RX_ACTIONOP_UCAST; - action.pf_func = pcifunc; + op = NIX_RX_ACTIONOP_UCAST; } else { - action.index = pfvf->bcast_mce_idx; - action.op = NIX_RX_ACTIONOP_MCAST; + op = NIX_RX_ACTIONOP_MCAST; + req_index = pfvf->bcast_mce_idx; } - entry.action = *(u64 *)&action; - npc_config_mcam_entry(rvu, mcam, blkaddr, index, - pfvf->nix_rx_intf, &entry, true); + eth_broadcast_addr((u8 *)&req.packet.dmac); + eth_broadcast_addr((u8 *)&req.mask.dmac); + req.features = BIT_ULL(NPC_DMAC); + req.channel = chan; + req.intf = pfvf->nix_rx_intf; + req.entry = index; + req.op = op; + req.hdr.pcifunc = 0; /* AF is requester */ + req.vf = pcifunc; + req.index = req_index; + + rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable) @@ -967,7 +988,7 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct npc_mcam *mcam = &rvu->hw->mcam; - struct rvu_npc_mcam_rule *rule; + struct rvu_npc_mcam_rule *rule, *tmp; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); @@ -977,15 +998,18 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) mutex_lock(&mcam->lock); /* Disable MCAM entries directing traffic to this 'pcifunc' */ - list_for_each_entry(rule, &mcam->mcam_rules, list) { + list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) { if (is_npc_intf_rx(rule->intf) && rule->rx_action.pf_func == pcifunc) { npc_enable_mcam_entry(rvu, mcam, blkaddr, rule->entry, false); rule->enable = false; /* Indicate that default rule is disabled */ - if (rule->default_rule) + if (rule->default_rule) { pfvf->def_ucast_rule = NULL; + list_del(&rule->list); + kfree(rule); + } } } @@ -1674,6 +1698,9 @@ void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc, static int npc_mcam_verify_entry(struct npc_mcam *mcam, u16 pcifunc, int entry) { + /* verify AF installed entries */ + if (is_pffunc_af(pcifunc)) + return 0; /* Verify if entry is valid and if it is indeed * allocated to the requesting PFFUNC. */ @@ -2268,6 +2295,10 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, goto exit; } + /* For AF installed rules, the nix_intf should be set to target NIX */ + if (is_pffunc_af(req->hdr.pcifunc)) + nix_intf = req->intf; + npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, nix_intf, &req->entry_data, req->enable_entry); @@ -2490,10 +2521,10 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); if (index >= mcam->bmap_entries) break; + entry = index + 1; if (mcam->entry2cntr_map[index] != req->cntr) continue; - entry = index + 1; npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, index, req->cntr); } @@ -2730,30 +2761,6 @@ int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req, return 0; } -bool rvu_npc_write_default_rule(struct rvu *rvu, int blkaddr, int nixlf, - u16 pcifunc, u8 intf, struct mcam_entry *entry, - int *index) -{ - struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); - struct npc_mcam *mcam = &rvu->hw->mcam; - bool enable; - u8 nix_intf; - - if (is_npc_intf_tx(intf)) - nix_intf = pfvf->nix_tx_intf; - else - nix_intf = pfvf->nix_rx_intf; - - *index = npc_get_nixlf_mcam_index(mcam, pcifunc, - nixlf, NIXLF_UCAST_ENTRY); - /* dont force enable unicast entry */ - enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, *index); - npc_config_mcam_entry(rvu, mcam, blkaddr, *index, nix_intf, - entry, enable); - - return enable; -} - int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu, struct msg_req *req, struct npc_mcam_read_base_rule_rsp *rsp) @@ -2799,3 +2806,42 @@ read_entry: out: return rc; } + +int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu, + struct npc_mcam_get_stats_req *req, + struct npc_mcam_get_stats_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 index, cntr; + int blkaddr; + u64 regval; + u32 bank; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + + index = req->entry & (mcam->banksize - 1); + bank = npc_get_bank(mcam, req->entry); + + /* read MCAM entry STAT_ACT register */ + regval = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank)); + + if (!(regval & BIT_ULL(9))) { + rsp->stat_ena = 0; + mutex_unlock(&mcam->lock); + return 0; + } + + cntr = regval & 0x1FF; + + rsp->stat_ena = 1; + rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(cntr)); + rsp->stat &= BIT_ULL(48) - 1; + + mutex_unlock(&mcam->lock); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index 4ba9d54ce4e3..7f35b62eea13 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -29,6 +29,8 @@ static const char * const npc_flow_names[] = { [NPC_IPPROTO_TCP] = "ip proto tcp", [NPC_IPPROTO_UDP] = "ip proto udp", [NPC_IPPROTO_SCTP] = "ip proto sctp", + [NPC_IPPROTO_ICMP] = "ip proto icmp", + [NPC_IPPROTO_ICMP6] = "ip proto icmp6", [NPC_IPPROTO_AH] = "ip proto AH", [NPC_IPPROTO_ESP] = "ip proto ESP", [NPC_SPORT_TCP] = "tcp source port", @@ -427,6 +429,7 @@ do { \ * packet header fields below. * Example: Source IP is 4 bytes and starts at 12th byte of IP header */ + NPC_SCAN_HDR(NPC_TOS, NPC_LID_LC, NPC_LT_LC_IP, 1, 1); NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4); NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4); NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16); @@ -477,9 +480,12 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) BIT_ULL(NPC_IPPROTO_SCTP); } - /* for AH, check if corresponding layer type is present in the key */ - if (npc_check_field(rvu, blkaddr, NPC_LD, intf)) + /* for AH/ICMP/ICMPv6/, check if corresponding layer type is present in the key */ + if (npc_check_field(rvu, blkaddr, NPC_LD, intf)) { *features |= BIT_ULL(NPC_IPPROTO_AH); + *features |= BIT_ULL(NPC_IPPROTO_ICMP); + *features |= BIT_ULL(NPC_IPPROTO_ICMP6); + } /* for ESP, check if corresponding layer type is present in the key */ if (npc_check_field(rvu, blkaddr, NPC_LE, intf)) @@ -597,7 +603,7 @@ static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf) dev_info(rvu->dev, "Unsupported flow(s):\n"); for_each_set_bit(bit, (unsigned long *)&unsupported, 64) dev_info(rvu->dev, "%s ", npc_get_field_name(bit)); - return -EOPNOTSUPP; + return NIX_AF_ERR_NPC_KEY_NOT_SUPP; } return 0; @@ -769,6 +775,12 @@ static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry, if (features & BIT_ULL(NPC_IPPROTO_SCTP)) npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP, 0, ~0ULL, 0, intf); + if (features & BIT_ULL(NPC_IPPROTO_ICMP)) + npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP, + 0, ~0ULL, 0, intf); + if (features & BIT_ULL(NPC_IPPROTO_ICMP6)) + npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP6, + 0, ~0ULL, 0, intf); if (features & BIT_ULL(NPC_OUTER_VID)) npc_update_entry(rvu, NPC_LB, entry, @@ -798,6 +810,7 @@ do { \ NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0); NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0, ntohs(mask->etype), 0); + NPC_WRITE_FLOW(NPC_TOS, tos, pkt->tos, 0, mask->tos, 0); NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0, ntohl(mask->ip4src), 0); NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0, @@ -903,9 +916,11 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, struct npc_install_flow_req *req, u16 target) { struct nix_rx_action action; + u64 chan_mask; - npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, - ~0ULL, 0, NIX_INTF_RX); + chan_mask = req->chan_mask ? req->chan_mask : ~0ULL; + npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, chan_mask, 0, + NIX_INTF_RX); *(u64 *)&action = 0x00; action.pf_func = target; @@ -998,33 +1013,21 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, if (is_npc_intf_tx(req->intf)) goto find_rule; - if (def_ucast_rule) + if (req->default_rule) { + entry_index = npc_get_nixlf_mcam_index(mcam, target, nixlf, + NIXLF_UCAST_ENTRY); + enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, entry_index); + } + + /* update mcam entry with default unicast rule attributes */ + if (def_ucast_rule && (msg_from_vf || (req->default_rule && req->append))) { missing_features = (def_ucast_rule->features ^ features) & def_ucast_rule->features; - - if (req->default_rule && req->append) { - /* add to default rule */ if (missing_features) npc_update_flow(rvu, entry, missing_features, &def_ucast_rule->packet, &def_ucast_rule->mask, &dummy, req->intf); - enable = rvu_npc_write_default_rule(rvu, blkaddr, - nixlf, target, - pfvf->nix_rx_intf, entry, - &entry_index); - installed_features = req->features | missing_features; - } else if (req->default_rule && !req->append) { - /* overwrite default rule */ - enable = rvu_npc_write_default_rule(rvu, blkaddr, - nixlf, target, - pfvf->nix_rx_intf, entry, - &entry_index); - } else if (msg_from_vf) { - /* normal rule - include default rule also to it for VF */ - npc_update_flow(rvu, entry, missing_features, - &def_ucast_rule->packet, &def_ucast_rule->mask, - &dummy, req->intf); installed_features = req->features | missing_features; } @@ -1036,12 +1039,9 @@ find_rule: return -ENOMEM; new = true; } - /* no counter for default rule */ - if (req->default_rule) - goto update_rule; /* allocate new counter if rule has no counter */ - if (req->set_cntr && !rule->has_cntr) + if (!req->default_rule && req->set_cntr && !rule->has_cntr) rvu_mcam_add_counter_to_rule(rvu, owner, rule, rsp); /* if user wants to delete an existing counter for a rule then @@ -1051,7 +1051,14 @@ find_rule: rvu_mcam_remove_counter_from_rule(rvu, owner, rule); write_req.hdr.pcifunc = owner; - write_req.entry = req->entry; + + /* AF owns the default rules so change the owner just to relax + * the checks in rvu_mbox_handler_npc_mcam_write_entry + */ + if (req->default_rule) + write_req.hdr.pcifunc = 0; + + write_req.entry = entry_index; write_req.intf = req->intf; write_req.enable_entry = (u8)enable; /* if counter is available then clear and use it */ @@ -1069,7 +1076,7 @@ find_rule: kfree(rule); return err; } -update_rule: + /* update rule */ memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet)); memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask)); rule->entry = entry_index; @@ -1145,8 +1152,13 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, else target = req->hdr.pcifunc; - if (npc_check_unsupported_flows(rvu, req->features, req->intf)) - return -EOPNOTSUPP; + /* ignore chan_mask in case pf func is not AF, revisit later */ + if (!is_pffunc_af(req->hdr.pcifunc)) + req->chan_mask = 0xFFF; + + err = npc_check_unsupported_flows(rvu, req->features, req->intf); + if (err) + return err; if (npc_mcam_verify_channel(rvu, target, req->intf, req->channel)) return -EINVAL; @@ -1278,6 +1290,7 @@ static int npc_update_dmac_value(struct rvu *rvu, int npcblkaddr, write_req.hdr.pcifunc = rule->owner; write_req.entry = rule->entry; + write_req.intf = pfvf->nix_rx_intf; mutex_unlock(&mcam->lock); err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, &rsp); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index 745aa8a19499..457c94793e63 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ - otx2_ptp.o otx2_flows.o cn10k.o + otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o rvu_nicvf-y := otx2_vf.o ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index a518c2283f18..45730d0d92f2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -18,6 +18,7 @@ #include <linux/ptp_clock_kernel.h> #include <linux/timecounter.h> #include <linux/soc/marvell/octeontx2/asm.h> +#include <net/pkt_cls.h> #include <mbox.h> #include <npc.h> @@ -264,6 +265,7 @@ struct otx2_flow_config { #define OTX2_MAX_NTUPLE_FLOWS 32 #define OTX2_MAX_UNICAST_FLOWS 8 #define OTX2_MAX_VLAN_FLOWS 1 +#define OTX2_MAX_TC_FLOWS OTX2_MAX_NTUPLE_FLOWS #define OTX2_MCAM_COUNT (OTX2_MAX_NTUPLE_FLOWS + \ OTX2_MAX_UNICAST_FLOWS + \ OTX2_MAX_VLAN_FLOWS) @@ -274,10 +276,20 @@ struct otx2_flow_config { #define OTX2_PER_VF_VLAN_FLOWS 2 /* rx+tx per VF */ #define OTX2_VF_VLAN_RX_INDEX 0 #define OTX2_VF_VLAN_TX_INDEX 1 + u32 tc_flower_offset; u32 ntuple_max_flows; + u32 tc_max_flows; struct list_head flow_list; }; +struct otx2_tc_info { + /* hash table to store TC offloaded flows */ + struct rhashtable flow_table; + struct rhashtable_params flow_ht_params; + DECLARE_BITMAP(tc_entries_bitmap, OTX2_MAX_TC_FLOWS); + unsigned long num_entries; +}; + struct dev_hw_ops { int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura); void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq, @@ -305,6 +317,8 @@ struct otx2_nic { #define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8) #define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9) #define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10) +#define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11) +#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12) u64 flags; struct otx2_qset qset; @@ -347,6 +361,7 @@ struct otx2_nic { struct hwtstamp_config tstamp; struct otx2_flow_config *flow_cfg; + struct otx2_tc_info tc_info; }; static inline bool is_otx2_lbkvf(struct pci_dev *pdev) @@ -802,4 +817,9 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); u16 otx2_get_max_mtu(struct otx2_nic *pfvf); +/* tc support */ +int otx2_init_tc(struct otx2_nic *nic); +void otx2_shutdown_tc(struct otx2_nic *nic); +int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data); #endif /* OTX2_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 0dbbf38e0597..0b4fa92ba821 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -57,10 +57,13 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf) flow_cfg->ntuple_max_flows = rsp->count; flow_cfg->ntuple_offset = 0; pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT; + flow_cfg->tc_max_flows = flow_cfg->ntuple_max_flows; + pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT; } else { flow_cfg->vf_vlan_offset = 0; flow_cfg->ntuple_offset = flow_cfg->vf_vlan_offset + vf_vlan_max_flows; + flow_cfg->tc_flower_offset = flow_cfg->ntuple_offset; flow_cfg->unicast_offset = flow_cfg->ntuple_offset + OTX2_MAX_NTUPLE_FLOWS; flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset + @@ -69,6 +72,7 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf) pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT; pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT; pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT; + pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT; } for (i = 0; i < rsp->count; i++) @@ -93,6 +97,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) INIT_LIST_HEAD(&pf->flow_cfg->flow_list); pf->flow_cfg->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS; + pf->flow_cfg->tc_max_flows = pf->flow_cfg->ntuple_max_flows; err = otx2_alloc_mcam_entries(pf); if (err) @@ -257,17 +262,19 @@ int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, u32 *rule_locs) { + u32 rule_cnt = nfc->rule_cnt; u32 location = 0; int idx = 0; int err = 0; nfc->data = pfvf->flow_cfg->ntuple_max_flows; - while ((!err || err == -ENOENT) && idx < nfc->rule_cnt) { + while ((!err || err == -ENOENT) && idx < rule_cnt) { err = otx2_get_flow(pfvf, nfc, location); if (!err) rule_locs[idx++] = location; location++; } + nfc->rule_cnt = rule_cnt; return err; } @@ -301,6 +308,35 @@ static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp, sizeof(pmask->ip4dst)); req->features |= BIT_ULL(NPC_DIP_IPV4); } + if (ipv4_usr_mask->tos) { + pkt->tos = ipv4_usr_hdr->tos; + pmask->tos = ipv4_usr_mask->tos; + req->features |= BIT_ULL(NPC_TOS); + } + if (ipv4_usr_mask->proto) { + switch (ipv4_usr_hdr->proto) { + case IPPROTO_ICMP: + req->features |= BIT_ULL(NPC_IPPROTO_ICMP); + break; + case IPPROTO_TCP: + req->features |= BIT_ULL(NPC_IPPROTO_TCP); + break; + case IPPROTO_UDP: + req->features |= BIT_ULL(NPC_IPPROTO_UDP); + break; + case IPPROTO_SCTP: + req->features |= BIT_ULL(NPC_IPPROTO_SCTP); + break; + case IPPROTO_AH: + req->features |= BIT_ULL(NPC_IPPROTO_AH); + break; + case IPPROTO_ESP: + req->features |= BIT_ULL(NPC_IPPROTO_ESP); + break; + default: + return -EOPNOTSUPP; + } + } pkt->etype = cpu_to_be16(ETH_P_IP); pmask->etype = cpu_to_be16(0xFFFF); req->features |= BIT_ULL(NPC_ETYPE); @@ -325,6 +361,11 @@ static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp, sizeof(pmask->ip4dst)); req->features |= BIT_ULL(NPC_DIP_IPV4); } + if (ipv4_l4_mask->tos) { + pkt->tos = ipv4_l4_hdr->tos; + pmask->tos = ipv4_l4_mask->tos; + req->features |= BIT_ULL(NPC_TOS); + } if (ipv4_l4_mask->psrc) { memcpy(&pkt->sport, &ipv4_l4_hdr->psrc, sizeof(pkt->sport)); @@ -375,10 +416,14 @@ static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp, sizeof(pmask->ip4dst)); req->features |= BIT_ULL(NPC_DIP_IPV4); } + if (ah_esp_mask->tos) { + pkt->tos = ah_esp_hdr->tos; + pmask->tos = ah_esp_mask->tos; + req->features |= BIT_ULL(NPC_TOS); + } /* NPC profile doesn't extract AH/ESP header fields */ - if ((ah_esp_mask->spi & ah_esp_hdr->spi) || - (ah_esp_mask->tos & ah_esp_mask->tos)) + if (ah_esp_mask->spi & ah_esp_hdr->spi) return -EOPNOTSUPP; if (flow_type == AH_V4_FLOW) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 53ab1814d74b..03004fdac0c6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1672,6 +1672,7 @@ int otx2_stop(struct net_device *netdev) struct otx2_nic *pf = netdev_priv(netdev); struct otx2_cq_poll *cq_poll = NULL; struct otx2_qset *qset = &pf->qset; + struct otx2_rss_info *rss; int qidx, vec, wrk; netif_carrier_off(netdev); @@ -1684,6 +1685,10 @@ int otx2_stop(struct net_device *netdev) /* First stop packet Rx/Tx */ otx2_rxtx_enable(pf, false); + /* Clear RSS enable flag */ + rss = &pf->hw.rss_info; + rss->enable = false; + /* Cleanup Queue IRQ */ vec = pci_irq_vector(pf->pdev, pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START); @@ -1760,6 +1765,24 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; } +static netdev_features_t otx2_fix_features(struct net_device *dev, + netdev_features_t features) +{ + /* check if n-tuple filters are ON */ + if ((features & NETIF_F_HW_TC) && (dev->features & NETIF_F_NTUPLE)) { + netdev_info(dev, "Disabling n-tuple filters\n"); + features &= ~NETIF_F_NTUPLE; + } + + /* check if tc hw offload is ON */ + if ((features & NETIF_F_NTUPLE) && (dev->features & NETIF_F_HW_TC)) { + netdev_info(dev, "Disabling TC hardware offload\n"); + features &= ~NETIF_F_HW_TC; + } + + return features; +} + static void otx2_set_rx_mode(struct net_device *netdev) { struct otx2_nic *pf = netdev_priv(netdev); @@ -1822,6 +1845,12 @@ static int otx2_set_features(struct net_device *netdev, if ((changed & NETIF_F_NTUPLE) && !ntuple) otx2_destroy_ntuple_flows(pf); + if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && + pf->tc_info.num_entries) { + netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n"); + return -EBUSY; + } + return 0; } @@ -2220,6 +2249,7 @@ static const struct net_device_ops otx2_netdev_ops = { .ndo_open = otx2_open, .ndo_stop = otx2_stop, .ndo_start_xmit = otx2_xmit, + .ndo_fix_features = otx2_fix_features, .ndo_set_mac_address = otx2_set_mac_address, .ndo_change_mtu = otx2_change_mtu, .ndo_set_rx_mode = otx2_set_rx_mode, @@ -2230,6 +2260,7 @@ static const struct net_device_ops otx2_netdev_ops = { .ndo_set_vf_mac = otx2_set_vf_mac, .ndo_set_vf_vlan = otx2_set_vf_vlan, .ndo_get_vf_config = otx2_get_vf_config, + .ndo_setup_tc = otx2_setup_tc, }; static int otx2_wq_init(struct otx2_nic *pf) @@ -2449,6 +2480,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) NETIF_F_HW_VLAN_STAG_RX; netdev->features |= netdev->hw_features; + /* HW supports tc offload but mutually exclusive with n-tuple filters */ + if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT) + netdev->hw_features |= NETIF_F_HW_TC; + netdev->gso_max_segs = OTX2_MAX_GSO_SEGS; netdev->watchdog_timeo = OTX2_TX_TIMEOUT; @@ -2470,6 +2505,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) otx2_set_ethtool_ops(netdev); + err = otx2_init_tc(pf); + if (err) + goto err_mcam_flow_del; + /* Enable link notifications */ otx2_cgx_config_linkevents(pf, true); @@ -2479,6 +2518,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; +err_mcam_flow_del: + otx2_mcam_flow_del(pf); err_unreg_netdev: unregister_netdev(netdev); err_del_mcam_entries: @@ -2646,6 +2687,7 @@ static void otx2_remove(struct pci_dev *pdev) otx2_ptp_destroy(pf); otx2_mcam_flow_del(pf); + otx2_shutdown_tc(pf); otx2_detach_resources(&pf->mbox); if (pf->hw.lmt_base) iounmap(pf->hw.lmt_base); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h index 21b811c6ee0f..f4fd72ee9a25 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h @@ -152,6 +152,7 @@ #define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16) #define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16) #define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16) +#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16) #define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16) #define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16) #define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c new file mode 100644 index 000000000000..51157b283f6f --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -0,0 +1,787 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Physcial Function ethernet driver + * + * Copyright (C) 2021 Marvell. + */ +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/inetdevice.h> +#include <linux/rhashtable.h> +#include <linux/bitfield.h> +#include <net/flow_dissector.h> +#include <net/pkt_cls.h> +#include <net/tc_act/tc_gact.h> +#include <net/tc_act/tc_mirred.h> +#include <net/tc_act/tc_vlan.h> +#include <net/ipv6.h> + +#include "otx2_common.h" + +/* Egress rate limiting definitions */ +#define MAX_BURST_EXPONENT 0x0FULL +#define MAX_BURST_MANTISSA 0xFFULL +#define MAX_BURST_SIZE 130816ULL +#define MAX_RATE_DIVIDER_EXPONENT 12ULL +#define MAX_RATE_EXPONENT 0x0FULL +#define MAX_RATE_MANTISSA 0xFFULL + +/* Bitfields in NIX_TLX_PIR register */ +#define TLX_RATE_MANTISSA GENMASK_ULL(8, 1) +#define TLX_RATE_EXPONENT GENMASK_ULL(12, 9) +#define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13) +#define TLX_BURST_MANTISSA GENMASK_ULL(36, 29) +#define TLX_BURST_EXPONENT GENMASK_ULL(40, 37) + +struct otx2_tc_flow_stats { + u64 bytes; + u64 pkts; + u64 used; +}; + +struct otx2_tc_flow { + struct rhash_head node; + unsigned long cookie; + u16 entry; + unsigned int bitpos; + struct rcu_head rcu; + struct otx2_tc_flow_stats stats; + spinlock_t lock; /* lock for stats */ +}; + +static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp, + u32 *burst_mantissa) +{ + unsigned int tmp; + + /* Burst is calculated as + * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256 + * Max supported burst size is 130,816 bytes. + */ + burst = min_t(u32, burst, MAX_BURST_SIZE); + if (burst) { + *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0; + tmp = burst - rounddown_pow_of_two(burst); + if (burst < MAX_BURST_MANTISSA) + *burst_mantissa = tmp * 2; + else + *burst_mantissa = tmp / (1ULL << (*burst_exp - 7)); + } else { + *burst_exp = MAX_BURST_EXPONENT; + *burst_mantissa = MAX_BURST_MANTISSA; + } +} + +static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp, + u32 *mantissa, u32 *div_exp) +{ + unsigned int tmp; + + /* Rate calculation by hardware + * + * PIR_ADD = ((256 + mantissa) << exp) / 256 + * rate = (2 * PIR_ADD) / ( 1 << div_exp) + * The resultant rate is in Mbps. + */ + + /* 2Mbps to 100Gbps can be expressed with div_exp = 0. + * Setting this to '0' will ease the calculation of + * exponent and mantissa. + */ + *div_exp = 0; + + if (maxrate) { + *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0; + tmp = maxrate - rounddown_pow_of_two(maxrate); + if (maxrate < MAX_RATE_MANTISSA) + *mantissa = tmp * 2; + else + *mantissa = tmp / (1ULL << (*exp - 7)); + } else { + /* Instead of disabling rate limiting, set all values to max */ + *exp = MAX_RATE_EXPONENT; + *mantissa = MAX_RATE_MANTISSA; + } +} + +static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate) +{ + struct otx2_hw *hw = &nic->hw; + struct nix_txschq_config *req; + u32 burst_exp, burst_mantissa; + u32 exp, mantissa, div_exp; + int txschq, err; + + /* All SQs share the same TL4, so pick the first scheduler */ + txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; + + /* Get exponent and mantissa values from the desired rate */ + otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa); + otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); + + mutex_lock(&nic->mbox.lock); + req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox); + if (!req) { + mutex_unlock(&nic->mbox.lock); + return -ENOMEM; + } + + req->lvl = NIX_TXSCH_LVL_TL4; + req->num_regs = 1; + req->reg[0] = NIX_AF_TL4X_PIR(txschq); + req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) | + FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) | + FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | + FIELD_PREP(TLX_RATE_EXPONENT, exp) | + FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); + + err = otx2_sync_mbox_msg(&nic->mbox); + mutex_unlock(&nic->mbox.lock); + return err; +} + +static int otx2_tc_validate_flow(struct otx2_nic *nic, + struct flow_action *actions, + struct netlink_ext_ack *extack) +{ + if (nic->flags & OTX2_FLAG_INTF_DOWN) { + NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); + return -EINVAL; + } + + if (!flow_action_has_entries(actions)) { + NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action"); + return -EINVAL; + } + + if (!flow_offload_has_one_action(actions)) { + NL_SET_ERR_MSG_MOD(extack, + "Egress MATCHALL offload supports only 1 policing action"); + return -EINVAL; + } + return 0; +} + +static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, + struct tc_cls_matchall_offload *cls) +{ + struct netlink_ext_ack *extack = cls->common.extack; + struct flow_action *actions = &cls->rule->action; + struct flow_action_entry *entry; + u32 rate; + int err; + + err = otx2_tc_validate_flow(nic, actions, extack); + if (err) + return err; + + if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) { + NL_SET_ERR_MSG_MOD(extack, + "Only one Egress MATCHALL ratelimiter can be offloaded"); + return -ENOMEM; + } + + entry = &cls->rule->action.entries[0]; + switch (entry->id) { + case FLOW_ACTION_POLICE: + if (entry->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } + /* Convert bytes per second to Mbps */ + rate = entry->police.rate_bytes_ps * 8; + rate = max_t(u32, rate / 1000000, 1); + err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate); + if (err) + return err; + nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; + break; + default: + NL_SET_ERR_MSG_MOD(extack, + "Only police action is supported with Egress MATCHALL offload"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic, + struct tc_cls_matchall_offload *cls) +{ + struct netlink_ext_ack *extack = cls->common.extack; + int err; + + if (nic->flags & OTX2_FLAG_INTF_DOWN) { + NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); + return -EINVAL; + } + + err = otx2_set_matchall_egress_rate(nic, 0, 0); + nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; + return err; +} + +static int otx2_tc_parse_actions(struct otx2_nic *nic, + struct flow_action *flow_action, + struct npc_install_flow_req *req) +{ + struct flow_action_entry *act; + struct net_device *target; + struct otx2_nic *priv; + int i; + + if (!flow_action_has_entries(flow_action)) { + netdev_info(nic->netdev, "no tc actions specified"); + return -EINVAL; + } + + flow_action_for_each(i, act, flow_action) { + switch (act->id) { + case FLOW_ACTION_DROP: + req->op = NIX_RX_ACTIONOP_DROP; + return 0; + case FLOW_ACTION_ACCEPT: + req->op = NIX_RX_ACTION_DEFAULT; + return 0; + case FLOW_ACTION_REDIRECT_INGRESS: + target = act->dev; + priv = netdev_priv(target); + /* npc_install_flow_req doesn't support passing a target pcifunc */ + if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { + netdev_info(nic->netdev, + "can't redirect to other pf/vf\n"); + return -EOPNOTSUPP; + } + req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; + req->op = NIX_RX_ACTION_DEFAULT; + return 0; + case FLOW_ACTION_VLAN_POP: + req->vtag0_valid = true; + /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */ + req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; + break; + default: + return -EOPNOTSUPP; + } + } + + return 0; +} + +static int otx2_tc_prepare_flow(struct otx2_nic *nic, + struct flow_cls_offload *f, + struct npc_install_flow_req *req) +{ + struct flow_msg *flow_spec = &req->packet; + struct flow_msg *flow_mask = &req->mask; + struct flow_dissector *dissector; + struct flow_rule *rule; + u8 ip_proto = 0; + + rule = flow_cls_offload_flow_rule(f); + dissector = rule->match.dissector; + + if ((dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_IP)))) { + netdev_info(nic->netdev, "unsupported flow used key 0x%x", + dissector->used_keys); + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + + /* All EtherTypes can be matched, no hw limitation */ + flow_spec->etype = match.key->n_proto; + flow_mask->etype = match.mask->n_proto; + req->features |= BIT_ULL(NPC_ETYPE); + + if (match.mask->ip_proto && + (match.key->ip_proto != IPPROTO_TCP && + match.key->ip_proto != IPPROTO_UDP && + match.key->ip_proto != IPPROTO_SCTP && + match.key->ip_proto != IPPROTO_ICMP && + match.key->ip_proto != IPPROTO_ICMPV6)) { + netdev_info(nic->netdev, + "ip_proto=0x%x not supported\n", + match.key->ip_proto); + return -EOPNOTSUPP; + } + if (match.mask->ip_proto) + ip_proto = match.key->ip_proto; + + if (ip_proto == IPPROTO_UDP) + req->features |= BIT_ULL(NPC_IPPROTO_UDP); + else if (ip_proto == IPPROTO_TCP) + req->features |= BIT_ULL(NPC_IPPROTO_TCP); + else if (ip_proto == IPPROTO_SCTP) + req->features |= BIT_ULL(NPC_IPPROTO_SCTP); + else if (ip_proto == IPPROTO_ICMP) + req->features |= BIT_ULL(NPC_IPPROTO_ICMP); + else if (ip_proto == IPPROTO_ICMPV6) + req->features |= BIT_ULL(NPC_IPPROTO_ICMP6); + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + if (!is_zero_ether_addr(match.mask->src)) { + netdev_err(nic->netdev, "src mac match not supported\n"); + return -EOPNOTSUPP; + } + + if (!is_zero_ether_addr(match.mask->dst)) { + ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst); + ether_addr_copy(flow_mask->dmac, + (u8 *)&match.mask->dst); + req->features |= BIT_ULL(NPC_DMAC); + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { + struct flow_match_ip match; + + flow_rule_match_ip(rule, &match); + if ((ntohs(flow_spec->etype) != ETH_P_IP) && + match.mask->tos) { + netdev_err(nic->netdev, "tos not supported\n"); + return -EOPNOTSUPP; + } + if (match.mask->ttl) { + netdev_err(nic->netdev, "ttl not supported\n"); + return -EOPNOTSUPP; + } + flow_spec->tos = match.key->tos; + flow_mask->tos = match.mask->tos; + req->features |= BIT_ULL(NPC_TOS); + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + u16 vlan_tci, vlan_tci_mask; + + flow_rule_match_vlan(rule, &match); + + if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) { + netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", + ntohs(match.key->vlan_tpid)); + return -EOPNOTSUPP; + } + + if (match.mask->vlan_id || + match.mask->vlan_dei || + match.mask->vlan_priority) { + vlan_tci = match.key->vlan_id | + match.key->vlan_dei << 12 | + match.key->vlan_priority << 13; + + vlan_tci_mask = match.mask->vlan_id | + match.key->vlan_dei << 12 | + match.key->vlan_priority << 13; + + flow_spec->vlan_tci = htons(vlan_tci); + flow_mask->vlan_tci = htons(vlan_tci_mask); + req->features |= BIT_ULL(NPC_OUTER_VID); + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(rule, &match); + + flow_spec->ip4dst = match.key->dst; + flow_mask->ip4dst = match.mask->dst; + req->features |= BIT_ULL(NPC_DIP_IPV4); + + flow_spec->ip4src = match.key->src; + flow_mask->ip4src = match.mask->src; + req->features |= BIT_ULL(NPC_SIP_IPV4); + } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + struct flow_match_ipv6_addrs match; + + flow_rule_match_ipv6_addrs(rule, &match); + + if (ipv6_addr_loopback(&match.key->dst) || + ipv6_addr_loopback(&match.key->src)) { + netdev_err(nic->netdev, + "Flow matching on IPv6 loopback addr is not supported\n"); + return -EOPNOTSUPP; + } + + if (!ipv6_addr_any(&match.mask->dst)) { + memcpy(&flow_spec->ip6dst, + (struct in6_addr *)&match.key->dst, + sizeof(flow_spec->ip6dst)); + memcpy(&flow_mask->ip6dst, + (struct in6_addr *)&match.mask->dst, + sizeof(flow_spec->ip6dst)); + req->features |= BIT_ULL(NPC_DIP_IPV6); + } + + if (!ipv6_addr_any(&match.mask->src)) { + memcpy(&flow_spec->ip6src, + (struct in6_addr *)&match.key->src, + sizeof(flow_spec->ip6src)); + memcpy(&flow_mask->ip6src, + (struct in6_addr *)&match.mask->src, + sizeof(flow_spec->ip6src)); + req->features |= BIT_ULL(NPC_SIP_IPV6); + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(rule, &match); + + flow_spec->dport = match.key->dst; + flow_mask->dport = match.mask->dst; + if (ip_proto == IPPROTO_UDP) + req->features |= BIT_ULL(NPC_DPORT_UDP); + else if (ip_proto == IPPROTO_TCP) + req->features |= BIT_ULL(NPC_DPORT_TCP); + else if (ip_proto == IPPROTO_SCTP) + req->features |= BIT_ULL(NPC_DPORT_SCTP); + + flow_spec->sport = match.key->src; + flow_mask->sport = match.mask->src; + if (ip_proto == IPPROTO_UDP) + req->features |= BIT_ULL(NPC_SPORT_UDP); + else if (ip_proto == IPPROTO_TCP) + req->features |= BIT_ULL(NPC_SPORT_TCP); + else if (ip_proto == IPPROTO_SCTP) + req->features |= BIT_ULL(NPC_SPORT_SCTP); + } + + return otx2_tc_parse_actions(nic, &rule->action, req); +} + +static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry) +{ + struct npc_delete_flow_req *req; + int err; + + mutex_lock(&nic->mbox.lock); + req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox); + if (!req) { + mutex_unlock(&nic->mbox.lock); + return -ENOMEM; + } + + req->entry = entry; + + /* Send message to AF */ + err = otx2_sync_mbox_msg(&nic->mbox); + if (err) { + netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n", + entry); + mutex_unlock(&nic->mbox.lock); + return -EFAULT; + } + mutex_unlock(&nic->mbox.lock); + + return 0; +} + +static int otx2_tc_del_flow(struct otx2_nic *nic, + struct flow_cls_offload *tc_flow_cmd) +{ + struct otx2_tc_info *tc_info = &nic->tc_info; + struct otx2_tc_flow *flow_node; + + flow_node = rhashtable_lookup_fast(&tc_info->flow_table, + &tc_flow_cmd->cookie, + tc_info->flow_ht_params); + if (!flow_node) { + netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n", + tc_flow_cmd->cookie); + return -EINVAL; + } + + otx2_del_mcam_flow_entry(nic, flow_node->entry); + + WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table, + &flow_node->node, + nic->tc_info.flow_ht_params)); + kfree_rcu(flow_node, rcu); + + clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap); + tc_info->num_entries--; + + return 0; +} + +static int otx2_tc_add_flow(struct otx2_nic *nic, + struct flow_cls_offload *tc_flow_cmd) +{ + struct otx2_tc_info *tc_info = &nic->tc_info; + struct otx2_tc_flow *new_node, *old_node; + struct npc_install_flow_req *req; + int rc; + + if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) + return -ENOMEM; + + /* allocate memory for the new flow and it's node */ + new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); + if (!new_node) + return -ENOMEM; + spin_lock_init(&new_node->lock); + new_node->cookie = tc_flow_cmd->cookie; + + mutex_lock(&nic->mbox.lock); + req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); + if (!req) { + mutex_unlock(&nic->mbox.lock); + return -ENOMEM; + } + + rc = otx2_tc_prepare_flow(nic, tc_flow_cmd, req); + if (rc) { + otx2_mbox_reset(&nic->mbox.mbox, 0); + mutex_unlock(&nic->mbox.lock); + return rc; + } + + /* If a flow exists with the same cookie, delete it */ + old_node = rhashtable_lookup_fast(&tc_info->flow_table, + &tc_flow_cmd->cookie, + tc_info->flow_ht_params); + if (old_node) + otx2_tc_del_flow(nic, tc_flow_cmd); + + if (bitmap_full(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows)) { + netdev_err(nic->netdev, "Not enough MCAM space to add the flow\n"); + otx2_mbox_reset(&nic->mbox.mbox, 0); + mutex_unlock(&nic->mbox.lock); + return -ENOMEM; + } + + new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap, + nic->flow_cfg->tc_max_flows); + req->channel = nic->hw.rx_chan_base; + req->entry = nic->flow_cfg->entry[nic->flow_cfg->tc_flower_offset + + nic->flow_cfg->tc_max_flows - new_node->bitpos]; + req->intf = NIX_INTF_RX; + req->set_cntr = 1; + new_node->entry = req->entry; + + /* Send message to AF */ + rc = otx2_sync_mbox_msg(&nic->mbox); + if (rc) { + netdev_err(nic->netdev, "Failed to install MCAM flow entry\n"); + mutex_unlock(&nic->mbox.lock); + goto out; + } + mutex_unlock(&nic->mbox.lock); + + /* add new flow to flow-table */ + rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node, + nic->tc_info.flow_ht_params); + if (rc) { + otx2_del_mcam_flow_entry(nic, req->entry); + kfree_rcu(new_node, rcu); + goto out; + } + + set_bit(new_node->bitpos, tc_info->tc_entries_bitmap); + tc_info->num_entries++; +out: + return rc; +} + +static int otx2_tc_get_flow_stats(struct otx2_nic *nic, + struct flow_cls_offload *tc_flow_cmd) +{ + struct otx2_tc_info *tc_info = &nic->tc_info; + struct npc_mcam_get_stats_req *req; + struct npc_mcam_get_stats_rsp *rsp; + struct otx2_tc_flow_stats *stats; + struct otx2_tc_flow *flow_node; + int err; + + flow_node = rhashtable_lookup_fast(&tc_info->flow_table, + &tc_flow_cmd->cookie, + tc_info->flow_ht_params); + if (!flow_node) { + netdev_info(nic->netdev, "tc flow not found for cookie %lx", + tc_flow_cmd->cookie); + return -EINVAL; + } + + mutex_lock(&nic->mbox.lock); + + req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox); + if (!req) { + mutex_unlock(&nic->mbox.lock); + return -ENOMEM; + } + + req->entry = flow_node->entry; + + err = otx2_sync_mbox_msg(&nic->mbox); + if (err) { + netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n", + req->entry); + mutex_unlock(&nic->mbox.lock); + return -EFAULT; + } + + rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp + (&nic->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + mutex_unlock(&nic->mbox.lock); + return PTR_ERR(rsp); + } + + mutex_unlock(&nic->mbox.lock); + + if (!rsp->stat_ena) + return -EINVAL; + + stats = &flow_node->stats; + + spin_lock(&flow_node->lock); + flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0, + FLOW_ACTION_HW_STATS_IMMEDIATE); + stats->pkts = rsp->stat; + spin_unlock(&flow_node->lock); + + return 0; +} + +static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, + struct flow_cls_offload *cls_flower) +{ + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return otx2_tc_add_flow(nic, cls_flower); + case FLOW_CLS_DESTROY: + return otx2_tc_del_flow(nic, cls_flower); + case FLOW_CLS_STATS: + return otx2_tc_get_flow_stats(nic, cls_flower); + default: + return -EOPNOTSUPP; + } +} + +static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct otx2_nic *nic = cb_priv; + + if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return otx2_setup_tc_cls_flower(nic, type_data); + default: + break; + } + + return -EOPNOTSUPP; +} + +static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic, + struct tc_cls_matchall_offload *cls_matchall) +{ + switch (cls_matchall->command) { + case TC_CLSMATCHALL_REPLACE: + return otx2_tc_egress_matchall_install(nic, cls_matchall); + case TC_CLSMATCHALL_DESTROY: + return otx2_tc_egress_matchall_delete(nic, cls_matchall); + case TC_CLSMATCHALL_STATS: + default: + break; + } + + return -EOPNOTSUPP; +} + +static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct otx2_nic *nic = cb_priv; + + if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSMATCHALL: + return otx2_setup_tc_egress_matchall(nic, type_data); + default: + break; + } + + return -EOPNOTSUPP; +} + +static LIST_HEAD(otx2_block_cb_list); + +static int otx2_setup_tc_block(struct net_device *netdev, + struct flow_block_offload *f) +{ + struct otx2_nic *nic = netdev_priv(netdev); + flow_setup_cb_t *cb; + bool ingress; + + if (f->block_shared) + return -EOPNOTSUPP; + + if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { + cb = otx2_setup_tc_block_ingress_cb; + ingress = true; + } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { + cb = otx2_setup_tc_block_egress_cb; + ingress = false; + } else { + return -EOPNOTSUPP; + } + + return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb, + nic, nic, ingress); +} + +int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_BLOCK: + return otx2_setup_tc_block(netdev, type_data); + default: + return -EOPNOTSUPP; + } +} + +static const struct rhashtable_params tc_flow_ht_params = { + .head_offset = offsetof(struct otx2_tc_flow, node), + .key_offset = offsetof(struct otx2_tc_flow, cookie), + .key_len = sizeof(((struct otx2_tc_flow *)0)->cookie), + .automatic_shrinking = true, +}; + +int otx2_init_tc(struct otx2_nic *nic) +{ + struct otx2_tc_info *tc = &nic->tc_info; + + tc->flow_ht_params = tc_flow_ht_params; + return rhashtable_init(&tc->flow_table, &tc->flow_ht_params); +} + +void otx2_shutdown_tc(struct otx2_nic *nic) +{ + struct otx2_tc_info *tc = &nic->tc_info; + + rhashtable_destroy(&tc->flow_table); +} diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index d1e4d42e497d..3712e1786091 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1544,8 +1544,8 @@ static int pxa168_eth_remove(struct platform_device *pdev) clk_disable_unprepare(pep->clk); mdiobus_unregister(pep->smi_bus); mdiobus_free(pep->smi_bus); - unregister_netdev(dev); cancel_work_sync(&pep->tx_timeout_task); + unregister_netdev(dev); free_netdev(dev); return 0; } diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index dbec8e187a68..2a752fb6b758 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4135,7 +4135,7 @@ static int sky2_set_coalesce(struct net_device *dev, /* * Hardware is limited to min of 128 and max of 2048 for ring size * and rounded up to next power of two - * to avoid division in modulus calclation + * to avoid division in modulus calculation */ static unsigned long roundup_ring_size(unsigned long pending) { diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile index 3a777b4a6cd3..79d4cdbbcbf5 100644 --- a/drivers/net/ethernet/mediatek/Makefile +++ b/drivers/net/ethernet/mediatek/Makefile @@ -4,5 +4,5 @@ # obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o -mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o +mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 01d3ee4b5829..0396f0db855f 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -19,6 +19,7 @@ #include <linux/interrupt.h> #include <linux/pinctrl/devinfo.h> #include <linux/phylink.h> +#include <net/dsa.h> #include "mtk_eth_soc.h" @@ -1264,13 +1265,12 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, break; /* find out which mac the packet come from. values start at 1 */ - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) || + (trxd.rxd4 & RX_DMA_SPECIAL_TAG)) mac = 0; - } else { - mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & - RX_DMA_FPORT_MASK; - mac--; - } + else + mac = ((trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & + RX_DMA_FPORT_MASK) - 1; if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT || !eth->netdev[mac])) @@ -2233,6 +2233,9 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config) val |= config; + if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0])) + val |= MTK_GDMA_SPECIAL_TAG; + mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); } /* Reset and enable PSE */ @@ -2255,12 +2258,17 @@ static int mtk_open(struct net_device *dev) /* we run 2 netdevs on the same dma ring so we only bring it up once */ if (!refcount_read(ð->dma_refcnt)) { - int err = mtk_start_dma(eth); + u32 gdm_config = MTK_GDMA_TO_PDMA; + int err; + err = mtk_start_dma(eth); if (err) return err; - mtk_gdm_config(eth, MTK_GDMA_TO_PDMA); + if (eth->soc->offload_version && mtk_ppe_start(ð->ppe) == 0) + gdm_config = MTK_GDMA_TO_PPE; + + mtk_gdm_config(eth, gdm_config); napi_enable(ð->tx_napi); napi_enable(ð->rx_napi); @@ -2327,6 +2335,9 @@ static int mtk_stop(struct net_device *dev) mtk_dma_free(eth); + if (eth->soc->offload_version) + mtk_ppe_stop(ð->ppe); + return 0; } @@ -2832,6 +2843,7 @@ static const struct net_device_ops mtk_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mtk_poll_controller, #endif + .ndo_setup_tc = mtk_eth_setup_tc, }; static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) @@ -3088,6 +3100,17 @@ static int mtk_probe(struct platform_device *pdev) goto err_free_dev; } + if (eth->soc->offload_version) { + err = mtk_ppe_init(ð->ppe, eth->dev, + eth->base + MTK_ETH_PPE_BASE, 2); + if (err) + goto err_free_dev; + + err = mtk_eth_offload_init(eth); + if (err) + goto err_free_dev; + } + for (i = 0; i < MTK_MAX_DEVS; i++) { if (!eth->netdev[i]) continue; @@ -3162,6 +3185,7 @@ static const struct mtk_soc_data mt7621_data = { .hw_features = MTK_HW_FEATURES, .required_clks = MT7621_CLKS_BITMAP, .required_pctl = false, + .offload_version = 2, }; static const struct mtk_soc_data mt7622_data = { @@ -3170,6 +3194,7 @@ static const struct mtk_soc_data mt7622_data = { .hw_features = MTK_HW_FEATURES, .required_clks = MT7622_CLKS_BITMAP, .required_pctl = false, + .offload_version = 2, }; static const struct mtk_soc_data mt7623_data = { diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index fd3cec8f06ba..1a6750c08bb9 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -15,6 +15,8 @@ #include <linux/u64_stats_sync.h> #include <linux/refcount.h> #include <linux/phylink.h> +#include <linux/rhashtable.h> +#include "mtk_ppe.h" #define MTK_QDMA_PAGE_SIZE 2048 #define MTK_MAX_RX_LENGTH 1536 @@ -40,7 +42,8 @@ NETIF_F_HW_VLAN_CTAG_RX | \ NETIF_F_SG | NETIF_F_TSO | \ NETIF_F_TSO6 | \ - NETIF_F_IPV6_CSUM) + NETIF_F_IPV6_CSUM |\ + NETIF_F_HW_TC) #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM) #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1)) @@ -82,10 +85,12 @@ /* GDM Exgress Control Register */ #define MTK_GDMA_FWD_CFG(x) (0x500 + (x * 0x1000)) +#define MTK_GDMA_SPECIAL_TAG BIT(24) #define MTK_GDMA_ICS_EN BIT(22) #define MTK_GDMA_TCS_EN BIT(21) #define MTK_GDMA_UCS_EN BIT(20) #define MTK_GDMA_TO_PDMA 0x0 +#define MTK_GDMA_TO_PPE 0x4444 #define MTK_GDMA_DROP_ALL 0x7777 /* Unicast Filter MAC Address Register - Low */ @@ -301,10 +306,17 @@ #define RX_DMA_VID(_x) ((_x) & 0xfff) /* QDMA descriptor rxd4 */ +#define MTK_RXD4_FOE_ENTRY GENMASK(13, 0) +#define MTK_RXD4_PPE_CPU_REASON GENMASK(18, 14) +#define MTK_RXD4_SRC_PORT GENMASK(21, 19) +#define MTK_RXD4_ALG GENMASK(31, 22) + +/* QDMA descriptor rxd4 */ #define RX_DMA_L4_VALID BIT(24) #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */ #define RX_DMA_FPORT_SHIFT 19 #define RX_DMA_FPORT_MASK 0x7 +#define RX_DMA_SPECIAL_TAG BIT(22) /* PHY Indirect Access Control registers */ #define MTK_PHY_IAC 0x10004 @@ -802,6 +814,7 @@ struct mtk_soc_data { u32 caps; u32 required_clks; bool required_pctl; + u8 offload_version; netdev_features_t hw_features; }; @@ -901,6 +914,9 @@ struct mtk_eth { u32 tx_int_status_reg; u32 rx_dma_l4_valid; int ip_align; + + struct mtk_ppe ppe; + struct rhashtable flow_table; }; /* struct mtk_mac - the structure that holds the info about the MACs of the @@ -945,4 +961,9 @@ int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id); int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id); int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id); +int mtk_eth_offload_init(struct mtk_eth *eth); +int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data); + + #endif /* MTK_ETH_H */ diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c new file mode 100644 index 000000000000..a1a9959a2461 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -0,0 +1,511 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ + +#include <linux/kernel.h> +#include <linux/jiffies.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/etherdevice.h> +#include <linux/platform_device.h> +#include "mtk_ppe.h" +#include "mtk_ppe_regs.h" + +static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val) +{ + writel(val, ppe->base + reg); +} + +static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg) +{ + return readl(ppe->base + reg); +} + +static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set) +{ + u32 val; + + val = ppe_r32(ppe, reg); + val &= ~mask; + val |= set; + ppe_w32(ppe, reg, val); + + return val; +} + +static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val) +{ + return ppe_m32(ppe, reg, 0, val); +} + +static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val) +{ + return ppe_m32(ppe, reg, val, 0); +} + +static int mtk_ppe_wait_busy(struct mtk_ppe *ppe) +{ + unsigned long timeout = jiffies + HZ; + + while (time_is_before_jiffies(timeout)) { + if (!(ppe_r32(ppe, MTK_PPE_GLO_CFG) & MTK_PPE_GLO_CFG_BUSY)) + return 0; + + usleep_range(10, 20); + } + + dev_err(ppe->dev, "PPE table busy"); + + return -ETIMEDOUT; +} + +static void mtk_ppe_cache_clear(struct mtk_ppe *ppe) +{ + ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR); + ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR); +} + +static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable) +{ + mtk_ppe_cache_clear(ppe); + + ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN, + enable * MTK_PPE_CACHE_CTL_EN); +} + +static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e) +{ + u32 hv1, hv2, hv3; + u32 hash; + + switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) { + case MTK_PPE_PKT_TYPE_BRIDGE: + hv1 = e->bridge.src_mac_lo; + hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16); + hv2 = e->bridge.src_mac_hi >> 16; + hv2 ^= e->bridge.dest_mac_lo; + hv3 = e->bridge.dest_mac_hi; + break; + case MTK_PPE_PKT_TYPE_IPV4_ROUTE: + case MTK_PPE_PKT_TYPE_IPV4_HNAPT: + hv1 = e->ipv4.orig.ports; + hv2 = e->ipv4.orig.dest_ip; + hv3 = e->ipv4.orig.src_ip; + break; + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T: + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T: + hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3]; + hv1 ^= e->ipv6.ports; + + hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2]; + hv2 ^= e->ipv6.dest_ip[0]; + + hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1]; + hv3 ^= e->ipv6.src_ip[0]; + break; + case MTK_PPE_PKT_TYPE_IPV4_DSLITE: + case MTK_PPE_PKT_TYPE_IPV6_6RD: + default: + WARN_ON_ONCE(1); + return MTK_PPE_HASH_MASK; + } + + hash = (hv1 & hv2) | ((~hv1) & hv3); + hash = (hash >> 24) | ((hash & 0xffffff) << 8); + hash ^= hv1 ^ hv2 ^ hv3; + hash ^= hash >> 16; + hash <<= 1; + hash &= MTK_PPE_ENTRIES - 1; + + return hash; +} + +static inline struct mtk_foe_mac_info * +mtk_foe_entry_l2(struct mtk_foe_entry *entry) +{ + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); + + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) + return &entry->ipv6.l2; + + return &entry->ipv4.l2; +} + +static inline u32 * +mtk_foe_entry_ib2(struct mtk_foe_entry *entry) +{ + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); + + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) + return &entry->ipv6.ib2; + + return &entry->ipv4.ib2; +} + +int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto, + u8 pse_port, u8 *src_mac, u8 *dest_mac) +{ + struct mtk_foe_mac_info *l2; + u32 ports_pad, val; + + memset(entry, 0, sizeof(*entry)); + + val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) | + FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) | + FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) | + MTK_FOE_IB1_BIND_TTL | + MTK_FOE_IB1_BIND_CACHE; + entry->ib1 = val; + + val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) | + FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) | + FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port); + + if (is_multicast_ether_addr(dest_mac)) + val |= MTK_FOE_IB2_MULTICAST; + + ports_pad = 0xa5a5a500 | (l4proto & 0xff); + if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE) + entry->ipv4.orig.ports = ports_pad; + if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T) + entry->ipv6.ports = ports_pad; + + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) { + entry->ipv6.ib2 = val; + l2 = &entry->ipv6.l2; + } else { + entry->ipv4.ib2 = val; + l2 = &entry->ipv4.l2; + } + + l2->dest_mac_hi = get_unaligned_be32(dest_mac); + l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4); + l2->src_mac_hi = get_unaligned_be32(src_mac); + l2->src_mac_lo = get_unaligned_be16(src_mac + 4); + + if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T) + l2->etype = ETH_P_IPV6; + else + l2->etype = ETH_P_IP; + + return 0; +} + +int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port) +{ + u32 *ib2 = mtk_foe_entry_ib2(entry); + u32 val; + + val = *ib2; + val &= ~MTK_FOE_IB2_DEST_PORT; + val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port); + *ib2 = val; + + return 0; +} + +int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress, + __be32 src_addr, __be16 src_port, + __be32 dest_addr, __be16 dest_port) +{ + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); + struct mtk_ipv4_tuple *t; + + switch (type) { + case MTK_PPE_PKT_TYPE_IPV4_HNAPT: + if (egress) { + t = &entry->ipv4.new; + break; + } + fallthrough; + case MTK_PPE_PKT_TYPE_IPV4_DSLITE: + case MTK_PPE_PKT_TYPE_IPV4_ROUTE: + t = &entry->ipv4.orig; + break; + case MTK_PPE_PKT_TYPE_IPV6_6RD: + entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr); + entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr); + return 0; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + t->src_ip = be32_to_cpu(src_addr); + t->dest_ip = be32_to_cpu(dest_addr); + + if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE) + return 0; + + t->src_port = be16_to_cpu(src_port); + t->dest_port = be16_to_cpu(dest_port); + + return 0; +} + +int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry, + __be32 *src_addr, __be16 src_port, + __be32 *dest_addr, __be16 dest_port) +{ + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); + u32 *src, *dest; + int i; + + switch (type) { + case MTK_PPE_PKT_TYPE_IPV4_DSLITE: + src = entry->dslite.tunnel_src_ip; + dest = entry->dslite.tunnel_dest_ip; + break; + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T: + case MTK_PPE_PKT_TYPE_IPV6_6RD: + entry->ipv6.src_port = be16_to_cpu(src_port); + entry->ipv6.dest_port = be16_to_cpu(dest_port); + fallthrough; + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T: + src = entry->ipv6.src_ip; + dest = entry->ipv6.dest_ip; + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + }; + + for (i = 0; i < 4; i++) + src[i] = be32_to_cpu(src_addr[i]); + for (i = 0; i < 4; i++) + dest[i] = be32_to_cpu(dest_addr[i]); + + return 0; +} + +int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port) +{ + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry); + + l2->etype = BIT(port); + + if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER)) + entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1); + else + l2->etype |= BIT(8); + + entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG; + + return 0; +} + +int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid) +{ + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry); + + switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) { + case 0: + entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG | + FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1); + l2->vlan1 = vid; + return 0; + case 1: + if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) { + l2->vlan1 = vid; + l2->etype |= BIT(8); + } else { + l2->vlan2 = vid; + entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1); + } + return 0; + default: + return -ENOSPC; + } +} + +int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid) +{ + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry); + + if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) || + (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) + l2->etype = ETH_P_PPP_SES; + + entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE; + l2->pppoe_id = sid; + + return 0; +} + +static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry) +{ + return !(entry->ib1 & MTK_FOE_IB1_STATIC) && + FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND; +} + +int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, + u16 timestamp) +{ + struct mtk_foe_entry *hwe; + u32 hash; + + timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP; + entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP; + entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp); + + hash = mtk_ppe_hash_entry(entry); + hwe = &ppe->foe_table[hash]; + if (!mtk_foe_entry_usable(hwe)) { + hwe++; + hash++; + + if (!mtk_foe_entry_usable(hwe)) + return -ENOSPC; + } + + memcpy(&hwe->data, &entry->data, sizeof(hwe->data)); + wmb(); + hwe->ib1 = entry->ib1; + + dma_wmb(); + + mtk_ppe_cache_clear(ppe); + + return hash; +} + +int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base, + int version) +{ + struct mtk_foe_entry *foe; + + /* need to allocate a separate device, since it PPE DMA access is + * not coherent. + */ + ppe->base = base; + ppe->dev = dev; + ppe->version = version; + + foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe), + &ppe->foe_phys, GFP_KERNEL); + if (!foe) + return -ENOMEM; + + ppe->foe_table = foe; + + mtk_ppe_debugfs_init(ppe); + + return 0; +} + +static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe) +{ + static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 }; + int i, k; + + memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table)); + + if (!IS_ENABLED(CONFIG_SOC_MT7621)) + return; + + /* skip all entries that cross the 1024 byte boundary */ + for (i = 0; i < MTK_PPE_ENTRIES; i += 128) + for (k = 0; k < ARRAY_SIZE(skip); k++) + ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC; +} + +int mtk_ppe_start(struct mtk_ppe *ppe) +{ + u32 val; + + mtk_ppe_init_foe_table(ppe); + ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys); + + val = MTK_PPE_TB_CFG_ENTRY_80B | + MTK_PPE_TB_CFG_AGE_NON_L4 | + MTK_PPE_TB_CFG_AGE_UNBIND | + MTK_PPE_TB_CFG_AGE_TCP | + MTK_PPE_TB_CFG_AGE_UDP | + MTK_PPE_TB_CFG_AGE_TCP_FIN | + FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS, + MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) | + FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE, + MTK_PPE_KEEPALIVE_DISABLE) | + FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) | + FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE, + MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) | + FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM, + MTK_PPE_ENTRIES_SHIFT); + ppe_w32(ppe, MTK_PPE_TB_CFG, val); + + ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK, + MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6); + + mtk_ppe_cache_enable(ppe, true); + + val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG | + MTK_PPE_FLOW_CFG_IP4_UDP_FRAG | + MTK_PPE_FLOW_CFG_IP6_3T_ROUTE | + MTK_PPE_FLOW_CFG_IP6_5T_ROUTE | + MTK_PPE_FLOW_CFG_IP6_6RD | + MTK_PPE_FLOW_CFG_IP4_NAT | + MTK_PPE_FLOW_CFG_IP4_NAPT | + MTK_PPE_FLOW_CFG_IP4_DSLITE | + MTK_PPE_FLOW_CFG_L2_BRIDGE | + MTK_PPE_FLOW_CFG_IP4_NAT_FRAG; + ppe_w32(ppe, MTK_PPE_FLOW_CFG, val); + + val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) | + FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3); + ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val); + + val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) | + FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1); + ppe_w32(ppe, MTK_PPE_BIND_AGE0, val); + + val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) | + FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7); + ppe_w32(ppe, MTK_PPE_BIND_AGE1, val); + + val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF; + ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val); + + val = MTK_PPE_BIND_LIMIT1_FULL | + FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1); + ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val); + + val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) | + FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1); + ppe_w32(ppe, MTK_PPE_BIND_RATE, val); + + /* enable PPE */ + val = MTK_PPE_GLO_CFG_EN | + MTK_PPE_GLO_CFG_IP4_L4_CS_DROP | + MTK_PPE_GLO_CFG_IP4_CS_DROP | + MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE; + ppe_w32(ppe, MTK_PPE_GLO_CFG, val); + + ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0); + + return 0; +} + +int mtk_ppe_stop(struct mtk_ppe *ppe) +{ + u32 val; + int i; + + for (i = 0; i < MTK_PPE_ENTRIES; i++) + ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE, + MTK_FOE_STATE_INVALID); + + mtk_ppe_cache_enable(ppe, false); + + /* disable offload engine */ + ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN); + ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0); + + /* disable aging */ + val = MTK_PPE_TB_CFG_AGE_NON_L4 | + MTK_PPE_TB_CFG_AGE_UNBIND | + MTK_PPE_TB_CFG_AGE_TCP | + MTK_PPE_TB_CFG_AGE_UDP | + MTK_PPE_TB_CFG_AGE_TCP_FIN; + ppe_clear(ppe, MTK_PPE_TB_CFG, val); + + return mtk_ppe_wait_busy(ppe); +} diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h new file mode 100644 index 000000000000..51bd5e75bbbd --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h @@ -0,0 +1,287 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ + +#ifndef __MTK_PPE_H +#define __MTK_PPE_H + +#include <linux/kernel.h> +#include <linux/bitfield.h> + +#define MTK_ETH_PPE_BASE 0xc00 + +#define MTK_PPE_ENTRIES_SHIFT 3 +#define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT) +#define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1) + +#define MTK_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0) +#define MTK_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8) +#define MTK_FOE_IB1_UNBIND_PREBIND BIT(24) + +#define MTK_FOE_IB1_BIND_TIMESTAMP GENMASK(14, 0) +#define MTK_FOE_IB1_BIND_KEEPALIVE BIT(15) +#define MTK_FOE_IB1_BIND_VLAN_LAYER GENMASK(18, 16) +#define MTK_FOE_IB1_BIND_PPPOE BIT(19) +#define MTK_FOE_IB1_BIND_VLAN_TAG BIT(20) +#define MTK_FOE_IB1_BIND_PKT_SAMPLE BIT(21) +#define MTK_FOE_IB1_BIND_CACHE BIT(22) +#define MTK_FOE_IB1_BIND_TUNNEL_DECAP BIT(23) +#define MTK_FOE_IB1_BIND_TTL BIT(24) + +#define MTK_FOE_IB1_PACKET_TYPE GENMASK(27, 25) +#define MTK_FOE_IB1_STATE GENMASK(29, 28) +#define MTK_FOE_IB1_UDP BIT(30) +#define MTK_FOE_IB1_STATIC BIT(31) + +enum { + MTK_PPE_PKT_TYPE_IPV4_HNAPT = 0, + MTK_PPE_PKT_TYPE_IPV4_ROUTE = 1, + MTK_PPE_PKT_TYPE_BRIDGE = 2, + MTK_PPE_PKT_TYPE_IPV4_DSLITE = 3, + MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T = 4, + MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T = 5, + MTK_PPE_PKT_TYPE_IPV6_6RD = 7, +}; + +#define MTK_FOE_IB2_QID GENMASK(3, 0) +#define MTK_FOE_IB2_PSE_QOS BIT(4) +#define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5) +#define MTK_FOE_IB2_MULTICAST BIT(8) + +#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12) +#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16) +#define MTK_FOE_IB2_WHNAT_NAT BIT(17) + +#define MTK_FOE_IB2_PORT_MG GENMASK(17, 12) + +#define MTK_FOE_IB2_PORT_AG GENMASK(23, 18) + +#define MTK_FOE_IB2_DSCP GENMASK(31, 24) + +#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0) +#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6) +#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14) + +enum { + MTK_FOE_STATE_INVALID, + MTK_FOE_STATE_UNBIND, + MTK_FOE_STATE_BIND, + MTK_FOE_STATE_FIN +}; + +struct mtk_foe_mac_info { + u16 vlan1; + u16 etype; + + u32 dest_mac_hi; + + u16 vlan2; + u16 dest_mac_lo; + + u32 src_mac_hi; + + u16 pppoe_id; + u16 src_mac_lo; +}; + +struct mtk_foe_bridge { + u32 dest_mac_hi; + + u16 src_mac_lo; + u16 dest_mac_lo; + + u32 src_mac_hi; + + u32 ib2; + + u32 _rsv[5]; + + u32 udf_tsid; + struct mtk_foe_mac_info l2; +}; + +struct mtk_ipv4_tuple { + u32 src_ip; + u32 dest_ip; + union { + struct { + u16 dest_port; + u16 src_port; + }; + struct { + u8 protocol; + u8 _pad[3]; /* fill with 0xa5a5a5 */ + }; + u32 ports; + }; +}; + +struct mtk_foe_ipv4 { + struct mtk_ipv4_tuple orig; + + u32 ib2; + + struct mtk_ipv4_tuple new; + + u16 timestamp; + u16 _rsv0[3]; + + u32 udf_tsid; + + struct mtk_foe_mac_info l2; +}; + +struct mtk_foe_ipv4_dslite { + struct mtk_ipv4_tuple ip4; + + u32 tunnel_src_ip[4]; + u32 tunnel_dest_ip[4]; + + u8 flow_label[3]; + u8 priority; + + u32 udf_tsid; + + u32 ib2; + + struct mtk_foe_mac_info l2; +}; + +struct mtk_foe_ipv6 { + u32 src_ip[4]; + u32 dest_ip[4]; + + union { + struct { + u8 protocol; + u8 _pad[3]; /* fill with 0xa5a5a5 */ + }; /* 3-tuple */ + struct { + u16 dest_port; + u16 src_port; + }; /* 5-tuple */ + u32 ports; + }; + + u32 _rsv[3]; + + u32 udf; + + u32 ib2; + struct mtk_foe_mac_info l2; +}; + +struct mtk_foe_ipv6_6rd { + u32 src_ip[4]; + u32 dest_ip[4]; + u16 dest_port; + u16 src_port; + + u32 tunnel_src_ip; + u32 tunnel_dest_ip; + + u16 hdr_csum; + u8 dscp; + u8 ttl; + + u8 flag; + u8 pad; + u8 per_flow_6rd_id; + u8 pad2; + + u32 ib2; + struct mtk_foe_mac_info l2; +}; + +struct mtk_foe_entry { + u32 ib1; + + union { + struct mtk_foe_bridge bridge; + struct mtk_foe_ipv4 ipv4; + struct mtk_foe_ipv4_dslite dslite; + struct mtk_foe_ipv6 ipv6; + struct mtk_foe_ipv6_6rd ipv6_6rd; + u32 data[19]; + }; +}; + +enum { + MTK_PPE_CPU_REASON_TTL_EXCEEDED = 0x02, + MTK_PPE_CPU_REASON_OPTION_HEADER = 0x03, + MTK_PPE_CPU_REASON_NO_FLOW = 0x07, + MTK_PPE_CPU_REASON_IPV4_FRAG = 0x08, + MTK_PPE_CPU_REASON_IPV4_DSLITE_FRAG = 0x09, + MTK_PPE_CPU_REASON_IPV4_DSLITE_NO_TCP_UDP = 0x0a, + MTK_PPE_CPU_REASON_IPV6_6RD_NO_TCP_UDP = 0x0b, + MTK_PPE_CPU_REASON_TCP_FIN_SYN_RST = 0x0c, + MTK_PPE_CPU_REASON_UN_HIT = 0x0d, + MTK_PPE_CPU_REASON_HIT_UNBIND = 0x0e, + MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f, + MTK_PPE_CPU_REASON_HIT_BIND_TCP_FIN = 0x10, + MTK_PPE_CPU_REASON_HIT_TTL_1 = 0x11, + MTK_PPE_CPU_REASON_HIT_BIND_VLAN_VIOLATION = 0x12, + MTK_PPE_CPU_REASON_KEEPALIVE_UC_OLD_HDR = 0x13, + MTK_PPE_CPU_REASON_KEEPALIVE_MC_NEW_HDR = 0x14, + MTK_PPE_CPU_REASON_KEEPALIVE_DUP_OLD_HDR = 0x15, + MTK_PPE_CPU_REASON_HIT_BIND_FORCE_CPU = 0x16, + MTK_PPE_CPU_REASON_TUNNEL_OPTION_HEADER = 0x17, + MTK_PPE_CPU_REASON_MULTICAST_TO_CPU = 0x18, + MTK_PPE_CPU_REASON_MULTICAST_TO_GMAC1_CPU = 0x19, + MTK_PPE_CPU_REASON_HIT_PRE_BIND = 0x1a, + MTK_PPE_CPU_REASON_PACKET_SAMPLING = 0x1b, + MTK_PPE_CPU_REASON_EXCEED_MTU = 0x1c, + MTK_PPE_CPU_REASON_PPE_BYPASS = 0x1e, + MTK_PPE_CPU_REASON_INVALID = 0x1f, +}; + +struct mtk_ppe { + struct device *dev; + void __iomem *base; + int version; + + struct mtk_foe_entry *foe_table; + dma_addr_t foe_phys; + + void *acct_table; +}; + +int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base, + int version); +int mtk_ppe_start(struct mtk_ppe *ppe); +int mtk_ppe_stop(struct mtk_ppe *ppe); + +static inline void +mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash) +{ + ppe->foe_table[hash].ib1 = 0; + dma_wmb(); +} + +static inline int +mtk_foe_entry_timestamp(struct mtk_ppe *ppe, u16 hash) +{ + u32 ib1 = READ_ONCE(ppe->foe_table[hash].ib1); + + if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) + return -1; + + return FIELD_GET(MTK_FOE_IB1_BIND_TIMESTAMP, ib1); +} + +int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto, + u8 pse_port, u8 *src_mac, u8 *dest_mac); +int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port); +int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool orig, + __be32 src_addr, __be16 src_port, + __be32 dest_addr, __be16 dest_port); +int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry, + __be32 *src_addr, __be16 src_port, + __be32 *dest_addr, __be16 dest_port); +int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port); +int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid); +int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid); +int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, + u16 timestamp); +int mtk_ppe_debugfs_init(struct mtk_ppe *ppe); + +#endif diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c new file mode 100644 index 000000000000..8ae9efab6d02 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ + +#include <linux/kernel.h> +#include <linux/debugfs.h> +#include "mtk_eth_soc.h" + +struct mtk_flow_addr_info +{ + void *src, *dest; + u16 *src_port, *dest_port; + bool ipv6; +}; + +static const char *mtk_foe_entry_state_str(int state) +{ + static const char * const state_str[] = { + [MTK_FOE_STATE_INVALID] = "INV", + [MTK_FOE_STATE_UNBIND] = "UNB", + [MTK_FOE_STATE_BIND] = "BND", + [MTK_FOE_STATE_FIN] = "FIN", + }; + + if (state >= ARRAY_SIZE(state_str) || !state_str[state]) + return "UNK"; + + return state_str[state]; +} + +static const char *mtk_foe_pkt_type_str(int type) +{ + static const char * const type_str[] = { + [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T", + [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T", + [MTK_PPE_PKT_TYPE_BRIDGE] = "L2", + [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE", + [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T", + [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T", + [MTK_PPE_PKT_TYPE_IPV6_6RD] = "6RD", + }; + + if (type >= ARRAY_SIZE(type_str) || !type_str[type]) + return "UNKNOWN"; + + return type_str[type]; +} + +static void +mtk_print_addr(struct seq_file *m, u32 *addr, bool ipv6) +{ + u32 n_addr[4]; + int i; + + if (!ipv6) { + seq_printf(m, "%pI4h", addr); + return; + } + + for (i = 0; i < ARRAY_SIZE(n_addr); i++) + n_addr[i] = htonl(addr[i]); + seq_printf(m, "%pI6", n_addr); +} + +static void +mtk_print_addr_info(struct seq_file *m, struct mtk_flow_addr_info *ai) +{ + mtk_print_addr(m, ai->src, ai->ipv6); + if (ai->src_port) + seq_printf(m, ":%d", *ai->src_port); + seq_printf(m, "->"); + mtk_print_addr(m, ai->dest, ai->ipv6); + if (ai->dest_port) + seq_printf(m, ":%d", *ai->dest_port); +} + +static int +mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind) +{ + struct mtk_ppe *ppe = m->private; + int i, count; + + for (i = 0, count = 0; i < MTK_PPE_ENTRIES; i++) { + struct mtk_foe_entry *entry = &ppe->foe_table[i]; + struct mtk_foe_mac_info *l2; + struct mtk_flow_addr_info ai = {}; + unsigned char h_source[ETH_ALEN]; + unsigned char h_dest[ETH_ALEN]; + int type, state; + u32 ib2; + + + state = FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1); + if (!state) + continue; + + if (bind && state != MTK_FOE_STATE_BIND) + continue; + + type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); + seq_printf(m, "%05x %s %7s", i, + mtk_foe_entry_state_str(state), + mtk_foe_pkt_type_str(type)); + + switch (type) { + case MTK_PPE_PKT_TYPE_IPV4_HNAPT: + case MTK_PPE_PKT_TYPE_IPV4_DSLITE: + ai.src_port = &entry->ipv4.orig.src_port; + ai.dest_port = &entry->ipv4.orig.dest_port; + fallthrough; + case MTK_PPE_PKT_TYPE_IPV4_ROUTE: + ai.src = &entry->ipv4.orig.src_ip; + ai.dest = &entry->ipv4.orig.dest_ip; + break; + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T: + ai.src_port = &entry->ipv6.src_port; + ai.dest_port = &entry->ipv6.dest_port; + fallthrough; + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T: + case MTK_PPE_PKT_TYPE_IPV6_6RD: + ai.src = &entry->ipv6.src_ip; + ai.dest = &entry->ipv6.dest_ip; + ai.ipv6 = true; + break; + } + + seq_printf(m, " orig="); + mtk_print_addr_info(m, &ai); + + switch (type) { + case MTK_PPE_PKT_TYPE_IPV4_HNAPT: + case MTK_PPE_PKT_TYPE_IPV4_DSLITE: + ai.src_port = &entry->ipv4.new.src_port; + ai.dest_port = &entry->ipv4.new.dest_port; + fallthrough; + case MTK_PPE_PKT_TYPE_IPV4_ROUTE: + ai.src = &entry->ipv4.new.src_ip; + ai.dest = &entry->ipv4.new.dest_ip; + seq_printf(m, " new="); + mtk_print_addr_info(m, &ai); + break; + } + + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) { + l2 = &entry->ipv6.l2; + ib2 = entry->ipv6.ib2; + } else { + l2 = &entry->ipv4.l2; + ib2 = entry->ipv4.ib2; + } + + *((__be32 *)h_source) = htonl(l2->src_mac_hi); + *((__be16 *)&h_source[4]) = htons(l2->src_mac_lo); + *((__be32 *)h_dest) = htonl(l2->dest_mac_hi); + *((__be16 *)&h_dest[4]) = htons(l2->dest_mac_lo); + + seq_printf(m, " eth=%pM->%pM etype=%04x" + " vlan=%d,%d ib1=%08x ib2=%08x\n", + h_source, h_dest, ntohs(l2->etype), + l2->vlan1, l2->vlan2, entry->ib1, ib2); + } + + return 0; +} + +static int +mtk_ppe_debugfs_foe_show_all(struct seq_file *m, void *private) +{ + return mtk_ppe_debugfs_foe_show(m, private, false); +} + +static int +mtk_ppe_debugfs_foe_show_bind(struct seq_file *m, void *private) +{ + return mtk_ppe_debugfs_foe_show(m, private, true); +} + +static int +mtk_ppe_debugfs_foe_open_all(struct inode *inode, struct file *file) +{ + return single_open(file, mtk_ppe_debugfs_foe_show_all, + inode->i_private); +} + +static int +mtk_ppe_debugfs_foe_open_bind(struct inode *inode, struct file *file) +{ + return single_open(file, mtk_ppe_debugfs_foe_show_bind, + inode->i_private); +} + +int mtk_ppe_debugfs_init(struct mtk_ppe *ppe) +{ + static const struct file_operations fops_all = { + .open = mtk_ppe_debugfs_foe_open_all, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + }; + + static const struct file_operations fops_bind = { + .open = mtk_ppe_debugfs_foe_open_bind, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + }; + + struct dentry *root; + + root = debugfs_create_dir("mtk_ppe", NULL); + if (!root) + return -ENOMEM; + + debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all); + debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind); + + return 0; +} diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c new file mode 100644 index 000000000000..d0c46786571f --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> + */ + +#include <linux/if_ether.h> +#include <linux/rhashtable.h> +#include <linux/if_ether.h> +#include <linux/ip.h> +#include <net/flow_offload.h> +#include <net/pkt_cls.h> +#include <net/dsa.h> +#include "mtk_eth_soc.h" + +struct mtk_flow_data { + struct ethhdr eth; + + union { + struct { + __be32 src_addr; + __be32 dst_addr; + } v4; + }; + + __be16 src_port; + __be16 dst_port; + + struct { + u16 id; + __be16 proto; + u8 num; + } vlan; + struct { + u16 sid; + u8 num; + } pppoe; +}; + +struct mtk_flow_entry { + struct rhash_head node; + unsigned long cookie; + u16 hash; +}; + +static const struct rhashtable_params mtk_flow_ht_params = { + .head_offset = offsetof(struct mtk_flow_entry, node), + .head_offset = offsetof(struct mtk_flow_entry, cookie), + .key_len = sizeof(unsigned long), + .automatic_shrinking = true, +}; + +static u32 +mtk_eth_timestamp(struct mtk_eth *eth) +{ + return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP; +} + +static int +mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data, + bool egress) +{ + return mtk_foe_entry_set_ipv4_tuple(foe, egress, + data->v4.src_addr, data->src_port, + data->v4.dst_addr, data->dst_port); +} + +static void +mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth) +{ + void *dest = eth + act->mangle.offset; + const void *src = &act->mangle.val; + + if (act->mangle.offset > 8) + return; + + if (act->mangle.mask == 0xffff) { + src += 2; + dest += 2; + } + + memcpy(dest, src, act->mangle.mask ? 2 : 4); +} + + +static int +mtk_flow_mangle_ports(const struct flow_action_entry *act, + struct mtk_flow_data *data) +{ + u32 val = ntohl(act->mangle.val); + + switch (act->mangle.offset) { + case 0: + if (act->mangle.mask == ~htonl(0xffff)) + data->dst_port = cpu_to_be16(val); + else + data->src_port = cpu_to_be16(val >> 16); + break; + case 2: + data->dst_port = cpu_to_be16(val); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int +mtk_flow_mangle_ipv4(const struct flow_action_entry *act, + struct mtk_flow_data *data) +{ + __be32 *dest; + + switch (act->mangle.offset) { + case offsetof(struct iphdr, saddr): + dest = &data->v4.src_addr; + break; + case offsetof(struct iphdr, daddr): + dest = &data->v4.dst_addr; + break; + default: + return -EINVAL; + } + + memcpy(dest, &act->mangle.val, sizeof(u32)); + + return 0; +} + +static int +mtk_flow_get_dsa_port(struct net_device **dev) +{ +#if IS_ENABLED(CONFIG_NET_DSA) + struct dsa_port *dp; + + dp = dsa_port_from_netdev(*dev); + if (IS_ERR(dp)) + return -ENODEV; + + if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK) + return -ENODEV; + + *dev = dp->cpu_dp->master; + + return dp->index; +#else + return -ENODEV; +#endif +} + +static int +mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, + struct net_device *dev) +{ + int pse_port, dsa_port; + + dsa_port = mtk_flow_get_dsa_port(&dev); + if (dsa_port >= 0) + mtk_foe_entry_set_dsa(foe, dsa_port); + + if (dev == eth->netdev[0]) + pse_port = 1; + else if (dev == eth->netdev[1]) + pse_port = 2; + else + return -EOPNOTSUPP; + + mtk_foe_entry_set_pse_port(foe, pse_port); + + return 0; +} + +static int +mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct flow_action_entry *act; + struct mtk_flow_data data = {}; + struct mtk_foe_entry foe; + struct net_device *odev = NULL; + struct mtk_flow_entry *entry; + int offload_type = 0; + u16 addr_type = 0; + u32 timestamp; + u8 l4proto = 0; + int err = 0; + int hash; + int i; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) { + struct flow_match_meta match; + + flow_rule_match_meta(rule, &match); + } else { + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_control(rule, &match); + addr_type = match.key->addr_type; + } else { + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + l4proto = match.key->ip_proto; + } else { + return -EOPNOTSUPP; + } + + flow_action_for_each(i, act, &rule->action) { + switch (act->id) { + case FLOW_ACTION_MANGLE: + if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH) + mtk_flow_offload_mangle_eth(act, &data.eth); + break; + case FLOW_ACTION_REDIRECT: + odev = act->dev; + break; + case FLOW_ACTION_CSUM: + break; + case FLOW_ACTION_VLAN_PUSH: + if (data.vlan.num == 1 || + act->vlan.proto != htons(ETH_P_8021Q)) + return -EOPNOTSUPP; + + data.vlan.id = act->vlan.vid; + data.vlan.proto = act->vlan.proto; + data.vlan.num++; + break; + case FLOW_ACTION_PPPOE_PUSH: + if (data.pppoe.num == 1) + return -EOPNOTSUPP; + + data.pppoe.sid = act->pppoe.sid; + data.pppoe.num++; + break; + default: + return -EOPNOTSUPP; + } + } + + switch (addr_type) { + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; + break; + default: + return -EOPNOTSUPP; + } + + if (!is_valid_ether_addr(data.eth.h_source) || + !is_valid_ether_addr(data.eth.h_dest)) + return -EINVAL; + + err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0, + data.eth.h_source, + data.eth.h_dest); + if (err) + return err; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports ports; + + flow_rule_match_ports(rule, &ports); + data.src_port = ports.key->src; + data.dst_port = ports.key->dst; + } else { + return -EOPNOTSUPP; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs addrs; + + flow_rule_match_ipv4_addrs(rule, &addrs); + + data.v4.src_addr = addrs.key->src; + data.v4.dst_addr = addrs.key->dst; + + mtk_flow_set_ipv4_addr(&foe, &data, false); + } + + flow_action_for_each(i, act, &rule->action) { + if (act->id != FLOW_ACTION_MANGLE) + continue; + + switch (act->mangle.htype) { + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: + err = mtk_flow_mangle_ports(act, &data); + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: + err = mtk_flow_mangle_ipv4(act, &data); + break; + case FLOW_ACT_MANGLE_HDR_TYPE_ETH: + /* handled earlier */ + break; + default: + return -EOPNOTSUPP; + } + + if (err) + return err; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + err = mtk_flow_set_ipv4_addr(&foe, &data, true); + if (err) + return err; + } + + if (data.vlan.num == 1) { + if (data.vlan.proto != htons(ETH_P_8021Q)) + return -EOPNOTSUPP; + + mtk_foe_entry_set_vlan(&foe, data.vlan.id); + } + if (data.pppoe.num == 1) + mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid); + + err = mtk_flow_set_output_device(eth, &foe, odev); + if (err) + return err; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->cookie = f->cookie; + timestamp = mtk_eth_timestamp(eth); + hash = mtk_foe_entry_commit(ð->ppe, &foe, timestamp); + if (hash < 0) { + err = hash; + goto free; + } + + entry->hash = hash; + err = rhashtable_insert_fast(ð->flow_table, &entry->node, + mtk_flow_ht_params); + if (err < 0) + goto clear_flow; + + return 0; +clear_flow: + mtk_foe_entry_clear(ð->ppe, hash); +free: + kfree(entry); + return err; +} + +static int +mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f) +{ + struct mtk_flow_entry *entry; + + entry = rhashtable_lookup(ð->flow_table, &f->cookie, + mtk_flow_ht_params); + if (!entry) + return -ENOENT; + + mtk_foe_entry_clear(ð->ppe, entry->hash); + rhashtable_remove_fast(ð->flow_table, &entry->node, + mtk_flow_ht_params); + kfree(entry); + + return 0; +} + +static int +mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f) +{ + struct mtk_flow_entry *entry; + int timestamp; + u32 idle; + + entry = rhashtable_lookup(ð->flow_table, &f->cookie, + mtk_flow_ht_params); + if (!entry) + return -ENOENT; + + timestamp = mtk_foe_entry_timestamp(ð->ppe, entry->hash); + if (timestamp < 0) + return -ETIMEDOUT; + + idle = mtk_eth_timestamp(eth) - timestamp; + f->stats.lastused = jiffies - idle * HZ; + + return 0; +} + +static int +mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) +{ + struct flow_cls_offload *cls = type_data; + struct net_device *dev = cb_priv; + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + + if (!tc_can_offload(dev)) + return -EOPNOTSUPP; + + if (type != TC_SETUP_CLSFLOWER) + return -EOPNOTSUPP; + + switch (cls->command) { + case FLOW_CLS_REPLACE: + return mtk_flow_offload_replace(eth, cls); + case FLOW_CLS_DESTROY: + return mtk_flow_offload_destroy(eth, cls); + case FLOW_CLS_STATS: + return mtk_flow_offload_stats(eth, cls); + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int +mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + static LIST_HEAD(block_cb_list); + struct flow_block_cb *block_cb; + flow_setup_cb_t *cb; + + if (!eth->ppe.foe_table) + return -EOPNOTSUPP; + + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + cb = mtk_eth_setup_tc_block_cb; + f->driver_block_list = &block_cb_list; + + switch (f->command) { + case FLOW_BLOCK_BIND: + block_cb = flow_block_cb_lookup(f->block, cb, dev); + if (block_cb) { + flow_block_cb_incref(block_cb); + return 0; + } + block_cb = flow_block_cb_alloc(cb, dev, dev, NULL); + if (IS_ERR(block_cb)) + return PTR_ERR(block_cb); + + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &block_cb_list); + return 0; + case FLOW_BLOCK_UNBIND: + block_cb = flow_block_cb_lookup(f->block, cb, dev); + if (!block_cb) + return -ENOENT; + + if (flow_block_cb_decref(block_cb)) { + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); + } + return 0; + default: + return -EOPNOTSUPP; + } +} + +int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + if (type == TC_SETUP_FT) + return mtk_eth_setup_tc_block(dev, type_data); + + return -EOPNOTSUPP; +} + +int mtk_eth_offload_init(struct mtk_eth *eth) +{ + if (!eth->ppe.foe_table) + return 0; + + return rhashtable_init(ð->flow_table, &mtk_flow_ht_params); +} diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h new file mode 100644 index 000000000000..0c45ea0900f1 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ + +#ifndef __MTK_PPE_REGS_H +#define __MTK_PPE_REGS_H + +#define MTK_PPE_GLO_CFG 0x200 +#define MTK_PPE_GLO_CFG_EN BIT(0) +#define MTK_PPE_GLO_CFG_TSID_EN BIT(1) +#define MTK_PPE_GLO_CFG_IP4_L4_CS_DROP BIT(2) +#define MTK_PPE_GLO_CFG_IP4_CS_DROP BIT(3) +#define MTK_PPE_GLO_CFG_TTL0_DROP BIT(4) +#define MTK_PPE_GLO_CFG_PPE_BSWAP BIT(5) +#define MTK_PPE_GLO_CFG_PSE_HASH_OFS BIT(6) +#define MTK_PPE_GLO_CFG_MCAST_TB_EN BIT(7) +#define MTK_PPE_GLO_CFG_FLOW_DROP_KA BIT(8) +#define MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE BIT(9) +#define MTK_PPE_GLO_CFG_UDP_LITE_EN BIT(10) +#define MTK_PPE_GLO_CFG_UDP_LEN_DROP BIT(11) +#define MTK_PPE_GLO_CFG_MCAST_ENTRIES GNEMASK(13, 12) +#define MTK_PPE_GLO_CFG_BUSY BIT(31) + +#define MTK_PPE_FLOW_CFG 0x204 +#define MTK_PPE_FLOW_CFG_IP4_TCP_FRAG BIT(6) +#define MTK_PPE_FLOW_CFG_IP4_UDP_FRAG BIT(7) +#define MTK_PPE_FLOW_CFG_IP6_3T_ROUTE BIT(8) +#define MTK_PPE_FLOW_CFG_IP6_5T_ROUTE BIT(9) +#define MTK_PPE_FLOW_CFG_IP6_6RD BIT(10) +#define MTK_PPE_FLOW_CFG_IP4_NAT BIT(12) +#define MTK_PPE_FLOW_CFG_IP4_NAPT BIT(13) +#define MTK_PPE_FLOW_CFG_IP4_DSLITE BIT(14) +#define MTK_PPE_FLOW_CFG_L2_BRIDGE BIT(15) +#define MTK_PPE_FLOW_CFG_IP_PROTO_BLACKLIST BIT(16) +#define MTK_PPE_FLOW_CFG_IP4_NAT_FRAG BIT(17) +#define MTK_PPE_FLOW_CFG_IP4_HASH_FLOW_LABEL BIT(18) +#define MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY BIT(19) +#define MTK_PPE_FLOW_CFG_IP6_HASH_GRE_KEY BIT(20) + +#define MTK_PPE_IP_PROTO_CHK 0x208 +#define MTK_PPE_IP_PROTO_CHK_IPV4 GENMASK(15, 0) +#define MTK_PPE_IP_PROTO_CHK_IPV6 GENMASK(31, 16) + +#define MTK_PPE_TB_CFG 0x21c +#define MTK_PPE_TB_CFG_ENTRY_NUM GENMASK(2, 0) +#define MTK_PPE_TB_CFG_ENTRY_80B BIT(3) +#define MTK_PPE_TB_CFG_SEARCH_MISS GENMASK(5, 4) +#define MTK_PPE_TB_CFG_AGE_PREBIND BIT(6) +#define MTK_PPE_TB_CFG_AGE_NON_L4 BIT(7) +#define MTK_PPE_TB_CFG_AGE_UNBIND BIT(8) +#define MTK_PPE_TB_CFG_AGE_TCP BIT(9) +#define MTK_PPE_TB_CFG_AGE_UDP BIT(10) +#define MTK_PPE_TB_CFG_AGE_TCP_FIN BIT(11) +#define MTK_PPE_TB_CFG_KEEPALIVE GENMASK(13, 12) +#define MTK_PPE_TB_CFG_HASH_MODE GENMASK(15, 14) +#define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16) +#define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18) + +enum { + MTK_PPE_SCAN_MODE_DISABLED, + MTK_PPE_SCAN_MODE_CHECK_AGE, + MTK_PPE_SCAN_MODE_KEEPALIVE_AGE, +}; + +enum { + MTK_PPE_KEEPALIVE_DISABLE, + MTK_PPE_KEEPALIVE_UNICAST_CPU, + MTK_PPE_KEEPALIVE_DUP_CPU = 3, +}; + +enum { + MTK_PPE_SEARCH_MISS_ACTION_DROP, + MTK_PPE_SEARCH_MISS_ACTION_FORWARD = 2, + MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD = 3, +}; + +#define MTK_PPE_TB_BASE 0x220 + +#define MTK_PPE_TB_USED 0x224 +#define MTK_PPE_TB_USED_NUM GENMASK(13, 0) + +#define MTK_PPE_BIND_RATE 0x228 +#define MTK_PPE_BIND_RATE_BIND GENMASK(15, 0) +#define MTK_PPE_BIND_RATE_PREBIND GENMASK(31, 16) + +#define MTK_PPE_BIND_LIMIT0 0x22c +#define MTK_PPE_BIND_LIMIT0_QUARTER GENMASK(13, 0) +#define MTK_PPE_BIND_LIMIT0_HALF GENMASK(29, 16) + +#define MTK_PPE_BIND_LIMIT1 0x230 +#define MTK_PPE_BIND_LIMIT1_FULL GENMASK(13, 0) +#define MTK_PPE_BIND_LIMIT1_NON_L4 GENMASK(23, 16) + +#define MTK_PPE_KEEPALIVE 0x234 +#define MTK_PPE_KEEPALIVE_TIME GENMASK(15, 0) +#define MTK_PPE_KEEPALIVE_TIME_TCP GENMASK(23, 16) +#define MTK_PPE_KEEPALIVE_TIME_UDP GENMASK(31, 24) + +#define MTK_PPE_UNBIND_AGE 0x238 +#define MTK_PPE_UNBIND_AGE_MIN_PACKETS GENMASK(31, 16) +#define MTK_PPE_UNBIND_AGE_DELTA GENMASK(7, 0) + +#define MTK_PPE_BIND_AGE0 0x23c +#define MTK_PPE_BIND_AGE0_DELTA_NON_L4 GENMASK(30, 16) +#define MTK_PPE_BIND_AGE0_DELTA_UDP GENMASK(14, 0) + +#define MTK_PPE_BIND_AGE1 0x240 +#define MTK_PPE_BIND_AGE1_DELTA_TCP_FIN GENMASK(30, 16) +#define MTK_PPE_BIND_AGE1_DELTA_TCP GENMASK(14, 0) + +#define MTK_PPE_HASH_SEED 0x244 + +#define MTK_PPE_DEFAULT_CPU_PORT 0x248 +#define MTK_PPE_DEFAULT_CPU_PORT_MASK(_n) (GENMASK(2, 0) << ((_n) * 4)) + +#define MTK_PPE_MTU_DROP 0x308 + +#define MTK_PPE_VLAN_MTU0 0x30c +#define MTK_PPE_VLAN_MTU0_NONE GENMASK(13, 0) +#define MTK_PPE_VLAN_MTU0_1TAG GENMASK(29, 16) + +#define MTK_PPE_VLAN_MTU1 0x310 +#define MTK_PPE_VLAN_MTU1_2TAG GENMASK(13, 0) +#define MTK_PPE_VLAN_MTU1_3TAG GENMASK(29, 16) + +#define MTK_PPE_VPM_TPID 0x318 + +#define MTK_PPE_CACHE_CTL 0x320 +#define MTK_PPE_CACHE_CTL_EN BIT(0) +#define MTK_PPE_CACHE_CTL_LOCK_CLR BIT(4) +#define MTK_PPE_CACHE_CTL_REQ BIT(8) +#define MTK_PPE_CACHE_CTL_CLEAR BIT(9) +#define MTK_PPE_CACHE_CTL_CMD GENMASK(13, 12) + +#define MTK_PPE_MIB_CFG 0x334 +#define MTK_PPE_MIB_CFG_EN BIT(0) +#define MTK_PPE_MIB_CFG_RD_CLR BIT(1) + +#define MTK_PPE_MIB_TB_BASE 0x338 + +#define MTK_PPE_MIB_CACHE_CTL 0x350 +#define MTK_PPE_MIB_CACHE_CTL_EN BIT(0) +#define MTK_PPE_MIB_CACHE_CTL_FLUSH BIT(2) + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index e8cecd50558d..9d79c5ec31e9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -263,15 +263,15 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent) return 0; } -static void dump_buf(void *buf, int size, int data_only, int offset) +static void dump_buf(void *buf, int size, int data_only, int offset, int idx) { __be32 *p = buf; int i; for (i = 0; i < size; i += 16) { - pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), - be32_to_cpu(p[1]), be32_to_cpu(p[2]), - be32_to_cpu(p[3])); + pr_debug("cmd[%d]: %03x: %08x %08x %08x %08x\n", idx, offset, + be32_to_cpu(p[0]), be32_to_cpu(p[1]), + be32_to_cpu(p[2]), be32_to_cpu(p[3])); p += 4; offset += 16; } @@ -802,39 +802,41 @@ static void dump_command(struct mlx5_core_dev *dev, int dump_len; int i; + mlx5_core_dbg(dev, "cmd[%d]: start dump\n", ent->idx); data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); if (data_only) mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, - "dump command data %s(0x%x) %s\n", - mlx5_command_str(op), op, + "cmd[%d]: dump command data %s(0x%x) %s\n", + ent->idx, mlx5_command_str(op), op, input ? "INPUT" : "OUTPUT"); else - mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n", - mlx5_command_str(op), op, + mlx5_core_dbg(dev, "cmd[%d]: dump command %s(0x%x) %s\n", + ent->idx, mlx5_command_str(op), op, input ? "INPUT" : "OUTPUT"); if (data_only) { if (input) { - dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset); + dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset, ent->idx); offset += sizeof(ent->lay->in); } else { - dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset); + dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset, ent->idx); offset += sizeof(ent->lay->out); } } else { - dump_buf(ent->lay, sizeof(*ent->lay), 0, offset); + dump_buf(ent->lay, sizeof(*ent->lay), 0, offset, ent->idx); offset += sizeof(*ent->lay); } for (i = 0; i < n && next; i++) { if (data_only) { dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset); - dump_buf(next->buf, dump_len, 1, offset); + dump_buf(next->buf, dump_len, 1, offset, ent->idx); offset += MLX5_CMD_DATA_BLOCK_SIZE; } else { - mlx5_core_dbg(dev, "command block:\n"); - dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset); + mlx5_core_dbg(dev, "cmd[%d]: command block:\n", ent->idx); + dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset, + ent->idx); offset += sizeof(struct mlx5_cmd_prot_block); } next = next->next; @@ -842,6 +844,8 @@ static void dump_command(struct mlx5_core_dev *dev, if (data_only) pr_debug("\n"); + + mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx); } static u16 msg_to_opcode(struct mlx5_cmd_msg *in) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index b051417ede67..4def64d0e669 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -58,9 +58,6 @@ static bool is_eth_supported(struct mlx5_core_dev *dev) if (!IS_ENABLED(CONFIG_MLX5_CORE_EN)) return false; - if (is_eth_rep_supported(dev)) - return false; - if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) return false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index d7d8a68ef23d..38c7c44fe883 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -137,18 +137,18 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, * unregistering devlink instance while holding devlink_mutext. * Hence, do not support reload. */ - NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated\n"); + NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated"); return -EOPNOTSUPP; } if (mlx5_lag_is_active(dev)) { - NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode\n"); + NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode"); return -EOPNOTSUPP; } switch (action) { case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: - mlx5_unload_one(dev, false); + mlx5_unload_one(dev); return 0; case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET) @@ -170,13 +170,13 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a *actions_performed = BIT(action); switch (action) { case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: - return mlx5_load_one(dev, false); + return mlx5_load_one(dev); case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET) break; /* On fw_activate action, also driver is reloaded and reinit performed */ *actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); - return mlx5_load_one(dev, false); + return mlx5_load_one(dev); default: /* Unsupported action should not get to this function */ WARN_ON(1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 2eb022ad7fd0..01a1d02dcf15 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -1100,7 +1100,7 @@ int mlx5_fw_tracer_reload(struct mlx5_fw_tracer *tracer) int err; if (IS_ERR_OR_NULL(tracer)) - return -EINVAL; + return 0; dev = tracer->dev; mlx5_fw_tracer_cleanup(tracer); @@ -1126,8 +1126,7 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void switch (eqe->sub_type) { case MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE: - if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) - queue_work(tracer->work_queue, &tracer->ownership_change_work); + queue_work(tracer->work_queue, &tracer->ownership_change_work); break; case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE: if (likely(tracer->str_db.loaded)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 7435fe6829b6..9ea3f3befe74 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -92,14 +92,15 @@ struct page_pool; MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0) #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) -#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2) +#define MLX5_ALIGN_MTTS(mtts) (ALIGN(mtts, 8)) +#define MLX5_ALIGNED_MTTS_OCTW(mtts) ((mtts) / 2) +#define MLX5_MTT_OCTW(mtts) (MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts))) /* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between * WQEs, This page will absorb write overflow by the hardware, when * receiving packets larger than MTU. These oversize packets are * dropped by the driver at a later stage. */ -#define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE + 1, 8)) -#define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS)) +#define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1)) #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS) #define MLX5E_MAX_RQ_NUM_MTTS \ ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */ @@ -880,7 +881,6 @@ struct mlx5e_priv { #endif struct devlink_health_reporter *tx_reporter; struct devlink_health_reporter *rx_reporter; - struct devlink_port dl_port; struct mlx5e_xsk xsk; #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE) struct mlx5e_hv_vhca_stats_agent stats_agent; @@ -1174,6 +1174,7 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv); void mlx5e_destroy_netdev(struct mlx5e_priv *priv); int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, const struct mlx5e_profile *new_profile, void *new_ppriv); +void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv); void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv); void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu); void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c index a69c62d72d16..765f3064689d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c @@ -2,37 +2,65 @@ /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ #include "en/devlink.h" +#include "eswitch.h" + +static void +mlx5e_devlink_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_id *ppid) +{ + u64 parent_id; + + parent_id = mlx5_query_nic_system_image_guid(dev); + ppid->id_len = sizeof(parent_id); + memcpy(ppid->id, &parent_id, sizeof(parent_id)); +} int mlx5e_devlink_port_register(struct mlx5e_priv *priv) { struct devlink *devlink = priv_to_devlink(priv->mdev); struct devlink_port_attrs attrs = {}; + struct netdev_phys_item_id ppid = {}; + struct devlink_port *dl_port; + unsigned int dl_port_index; if (mlx5_core_is_pf(priv->mdev)) { attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; attrs.phys.port_number = PCI_FUNC(priv->mdev->pdev->devfn); + if (MLX5_ESWITCH_MANAGER(priv->mdev)) { + mlx5e_devlink_get_port_parent_id(priv->mdev, &ppid); + memcpy(attrs.switch_id.id, ppid.id, ppid.id_len); + attrs.switch_id.id_len = ppid.id_len; + } + dl_port_index = mlx5_esw_vport_to_devlink_port_index(priv->mdev, + MLX5_VPORT_UPLINK); } else { attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL; + dl_port_index = mlx5_esw_vport_to_devlink_port_index(priv->mdev, 0); } - devlink_port_attrs_set(&priv->dl_port, &attrs); + dl_port = mlx5e_devlink_get_dl_port(priv); + memset(dl_port, 0, sizeof(*dl_port)); + devlink_port_attrs_set(dl_port, &attrs); - return devlink_port_register(devlink, &priv->dl_port, 1); + return devlink_port_register(devlink, dl_port, dl_port_index); } void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv) { - devlink_port_type_eth_set(&priv->dl_port, priv->netdev); + struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv); + + devlink_port_type_eth_set(dl_port, priv->netdev); } void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv) { - devlink_port_unregister(&priv->dl_port); + struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv); + + devlink_port_unregister(dl_port); } struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev) { struct mlx5e_priv *priv = netdev_priv(dev); - return &priv->dl_port; + return mlx5e_devlink_get_dl_port(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h index 83123a801adc..10b50feb9883 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h @@ -12,4 +12,10 @@ void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv); void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv); struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev); +static inline struct devlink_port * +mlx5e_devlink_get_dl_port(struct mlx5e_priv *priv) +{ + return &priv->mdev->mlx5e_res.dl_port; +} + #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index d57b6f06382f..bb5d108f75d0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -174,7 +174,7 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_port_ptp *c, int txq_ix, sq->mdev = mdev; sq->ch_ix = c->ix; sq->txq_ix = txq_ix; - sq->uar_map = mdev->mlx5e_res.bfreg.map; + sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); sq->stats = &c->priv->port_ptp_stats.sq[tc]; @@ -475,7 +475,7 @@ int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, c->ix = 0; c->pdev = mlx5_core_dma_dev(priv->mdev); c->netdev = priv->netdev; - c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); + c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key); c->num_tc = params->num_tc; c->stats = &priv->port_ptp_stats.ch; c->lag_port = lag_port; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index 065126370acd..11a44d30adc7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -169,6 +169,9 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD); struct mlx5e_priv *priv = cb_priv; + if (!priv->netdev || !netif_device_present(priv->netdev)) + return -EOPNOTSUPP; + switch (type) { case TC_SETUP_CLSFLOWER: return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags); @@ -321,6 +324,9 @@ mlx5e_rep_indr_offload(struct net_device *netdev, struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev); int err = 0; + if (!netif_device_present(indr_priv->rpriv->netdev)) + return -EOPNOTSUPP; + switch (flower->command) { case FLOW_CLS_REPLACE: err = mlx5e_configure_flower(netdev, priv, flower, flags); @@ -621,11 +627,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, int err; reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK); - if (reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG) - reg_c0 = 0; - reg_c1 = be32_to_cpu(cqe->ft_metadata); - - if (!reg_c0) + if (!reg_c0 || reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG) return true; /* If reg_c0 is not equal to the default flow tag then skb->mark @@ -633,6 +635,8 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, */ skb->mark = 0; + reg_c1 = be32_to_cpu(cqe->ft_metadata); + priv = netdev_priv(skb->dev); esw = priv->mdev->priv.eswitch; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index d80bbd17e5f8..34b3b316b688 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -4,6 +4,7 @@ #include "health.h" #include "params.h" #include "txrx.h" +#include "devlink.h" static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state) { @@ -615,9 +616,10 @@ static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = { void mlx5e_reporter_rx_create(struct mlx5e_priv *priv) { + struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv); struct devlink_health_reporter *reporter; - reporter = devlink_port_health_reporter_create(&priv->dl_port, &mlx5_rx_reporter_ops, + reporter = devlink_port_health_reporter_create(dl_port, &mlx5_rx_reporter_ops, MLX5E_REPORTER_RX_GRACEFUL_PERIOD, priv); if (IS_ERR(reporter)) { netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n", @@ -633,4 +635,5 @@ void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv) return; devlink_port_health_reporter_destroy(priv->rx_reporter); + priv->rx_reporter = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index d7275c84313e..63ee3b9416de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -3,6 +3,7 @@ #include "health.h" #include "en/ptp.h" +#include "en/devlink.h" static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) { @@ -572,9 +573,10 @@ static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = { void mlx5e_reporter_tx_create(struct mlx5e_priv *priv) { + struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv); struct devlink_health_reporter *reporter; - reporter = devlink_port_health_reporter_create(&priv->dl_port, &mlx5_tx_reporter_ops, + reporter = devlink_port_health_reporter_create(dl_port, &mlx5_tx_reporter_ops, MLX5_REPORTER_TX_GRACEFUL_PERIOD, priv); if (IS_ERR(reporter)) { netdev_warn(priv->netdev, @@ -591,4 +593,5 @@ void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv) return; devlink_port_health_reporter_destroy(priv->tx_reporter); + priv->tx_reporter = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index f3f6eb081948..df13e5094034 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -695,7 +695,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, zone_rule->nat = nat; - spec = kzalloc(sizeof(*spec), GFP_KERNEL); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return -ENOMEM; @@ -737,7 +737,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, zone_rule->attr = attr; - kfree(spec); + kvfree(spec); ct_dbg("Offloaded ct entry rule in zone %d", entry->tuple.zone); return 0; @@ -749,7 +749,7 @@ err_rule: err_mod_hdr: kfree(attr); err_attr: - kfree(spec); + kvfree(spec); return err; } @@ -1181,7 +1181,8 @@ int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec) mlx5e_tc_match_to_reg_get_match(spec, CTSTATE_TO_REG, &ctstate, &ctstate_mask); - if (ctstate_mask) + + if ((ctstate & ctstate_mask) == MLX5_CT_STATE_TRK_BIT) return -EOPNOTSUPP; ctstate_mask |= MLX5_CT_STATE_TRK_BIT; @@ -1539,6 +1540,14 @@ mlx5_tc_ct_free_pre_ct_tables(struct mlx5_ct_ft *ft) mlx5_tc_ct_free_pre_ct(ft, &ft->pre_ct); } +/* To avoid false lock dependency warning set the ct_entries_ht lock + * class different than the lock class of the ht being used when deleting + * last flow from a group and then deleting a group, we get into del_sw_flow_group() + * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but + * it's different than the ht->mutex here. + */ +static struct lock_class_key ct_entries_ht_lock_key; + static struct mlx5_ct_ft * mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone, struct nf_flowtable *nf_ft) @@ -1573,6 +1582,8 @@ mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone, if (err) goto err_init; + lockdep_set_class(&ft->ct_entries_ht.mutex, &ct_entries_ht_lock_key); + err = rhashtable_insert_fast(&ct_priv->zone_ht, &ft->node, zone_params); if (err) @@ -1674,10 +1685,10 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft; u32 fte_id = 1; - post_ct_spec = kzalloc(sizeof(*post_ct_spec), GFP_KERNEL); + post_ct_spec = kvzalloc(sizeof(*post_ct_spec), GFP_KERNEL); ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL); if (!post_ct_spec || !ct_flow) { - kfree(post_ct_spec); + kvfree(post_ct_spec); kfree(ct_flow); return ERR_PTR(-ENOMEM); } @@ -1787,6 +1798,10 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, ct_flow->post_ct_attr->prio = 0; ct_flow->post_ct_attr->ft = ct_priv->post_ct; + /* Splits were handled before CT */ + if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB) + ct_flow->post_ct_attr->esw_attr->split_count = 0; + ct_flow->post_ct_attr->inner_match_level = MLX5_MATCH_NONE; ct_flow->post_ct_attr->outer_match_level = MLX5_MATCH_NONE; ct_flow->post_ct_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP); @@ -1812,7 +1827,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, attr->ct_attr.ct_flow = ct_flow; dealloc_mod_hdr_actions(&pre_mod_acts); - kfree(post_ct_spec); + kvfree(post_ct_spec); return rule; @@ -1833,7 +1848,7 @@ err_alloc_pre: err_idr: mlx5_tc_ct_del_ft_cb(ct_priv, ft); err_ft: - kfree(post_ct_spec); + kvfree(post_ct_spec); kfree(ct_flow); netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err); return ERR_PTR(err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index f8075a604605..172e0474f2e6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -685,14 +685,14 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, u16 vport_num; int err = 0; - if (flow_attr->ip_version == 4) { + if (flow_attr->tun_ip_version == 4) { /* Addresses are swapped for decap */ attr.fl.fl4.saddr = esw_attr->rx_tun_attr->dst_ip.v4; attr.fl.fl4.daddr = esw_attr->rx_tun_attr->src_ip.v4; err = mlx5e_route_lookup_ipv4_get(priv, priv->netdev, &attr); } #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) - else if (flow_attr->ip_version == 6) { + else if (flow_attr->tun_ip_version == 6) { /* Addresses are swapped for decap */ attr.fl.fl6.saddr = esw_attr->rx_tun_attr->dst_ip.v6; attr.fl.fl6.daddr = esw_attr->rx_tun_attr->src_ip.v6; @@ -718,10 +718,10 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, esw_attr->rx_tun_attr->decap_vport = vport_num; out: - if (flow_attr->ip_version == 4) + if (flow_attr->tun_ip_version == 4) mlx5e_route_lookup_ipv4_put(&attr); #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) - else if (flow_attr->ip_version == 6) + else if (flow_attr->tun_ip_version == 6) mlx5e_route_lookup_ipv6_put(&attr); #endif return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h index 67de2bf36861..89d5ca91566e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h @@ -76,10 +76,12 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv, static inline int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, struct net_device *mirred_dev, - struct mlx5e_encap_entry *e) { return -EOPNOTSUPP; } -int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv, - struct net_device *mirred_dev, - struct mlx5e_encap_entry *e) + struct mlx5e_encap_entry *e) +{ return -EOPNOTSUPP; } +static inline int +mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5e_encap_entry *e) { return -EOPNOTSUPP; } #endif int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 6a116335bb21..01d435e15ad3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -2,6 +2,7 @@ /* Copyright (c) 2021 Mellanox Technologies. */ #include <net/fib_notifier.h> +#include <net/nexthop.h> #include "tc_tun_encap.h" #include "en_tc.h" #include "tc_tun.h" @@ -89,6 +90,7 @@ int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow, * required to establish routing. */ flow_flag_set(flow, TUN_RX); + flow->attr->tun_ip_version = ip_version; return 0; } @@ -1091,7 +1093,7 @@ int mlx5e_attach_decap_route(struct mlx5e_priv *priv, if (err || !esw_attr->rx_tun_attr->decap_vport) goto out; - key.ip_version = attr->ip_version; + key.ip_version = attr->tun_ip_version; if (key.ip_version == 4) key.endpoint_ip.v4 = esw_attr->rx_tun_attr->dst_ip.v4; else diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c index e472ed0eacfb..7ed3f9f79f11 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c @@ -227,6 +227,10 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv, option_key = (struct geneve_opt *)&enc_opts.key->data[0]; option_mask = (struct geneve_opt *)&enc_opts.mask->data[0]; + if (option_mask->opt_class == 0 && option_mask->type == 0 && + !memchr_inv(option_mask->opt_data, 0, option_mask->length * 4)) + return 0; + if (option_key->length > max_tlv_option_data_len) { NL_SET_ERR_MSG_MOD(extack, "Matching on GENEVE options: unsupported option len"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c index 37fc1d77ded7..41db93883fea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c @@ -84,7 +84,7 @@ static int mlx5e_alloc_trap_rq(struct mlx5e_priv *priv, struct mlx5e_rq_param *r if (err) goto err_free_frags; - rq->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); + rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey.key); mlx5e_rq_set_trap_handlers(rq, params); @@ -213,7 +213,7 @@ static int mlx5e_create_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct ml return -ENOMEM; tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); - MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.td.tdn); + MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn); MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_NONE); MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT); MLX5_SET(tirc, tirc, inline_rqn, rqn); @@ -266,7 +266,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv) t->tstamp = &priv->tstamp; t->pdev = mlx5_core_dma_dev(priv->mdev); t->netdev = priv->netdev; - t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); + t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key); t->stats = &priv->trap_stats.ch; netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll, 64); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 381a9c8c9da9..34119ce92031 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -60,7 +60,7 @@ static int rx_err_add_rule(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec; int err = 0; - spec = kzalloc(sizeof(*spec), GFP_KERNEL); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return -ENOMEM; @@ -101,7 +101,7 @@ out: if (err) mlx5_modify_header_dealloc(mdev, modify_hdr); out_spec: - kfree(spec); + kvfree(spec); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index d06532d0baa4..f7c880edae37 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -84,7 +84,7 @@ static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); - MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.td.tdn); + MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn); MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); MLX5_SET(tirc, tirc, indirect_table, rqtn); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index a6cf008057b5..8c166ee56d8b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -38,15 +38,16 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 *in) { + struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs; int err; err = mlx5_core_create_tir(mdev, in, &tir->tirn); if (err) return err; - mutex_lock(&mdev->mlx5e_res.td.list_lock); - list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list); - mutex_unlock(&mdev->mlx5e_res.td.list_lock); + mutex_lock(&res->td.list_lock); + list_add(&tir->list, &res->td.tirs_list); + mutex_unlock(&res->td.list_lock); return 0; } @@ -54,10 +55,12 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 *in) void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir) { - mutex_lock(&mdev->mlx5e_res.td.list_lock); + struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs; + + mutex_lock(&res->td.list_lock); mlx5_core_destroy_tir(mdev, tir->tirn); list_del(&tir->list); - mutex_unlock(&mdev->mlx5e_res.td.list_lock); + mutex_unlock(&res->td.list_lock); } void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc) @@ -99,7 +102,7 @@ static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) { - struct mlx5e_resources *res = &mdev->mlx5e_res; + struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs; int err; err = mlx5_core_alloc_pd(mdev, &res->pdn); @@ -126,8 +129,8 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) goto err_destroy_mkey; } - INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list); - mutex_init(&mdev->mlx5e_res.td.list_lock); + INIT_LIST_HEAD(&res->td.tirs_list); + mutex_init(&res->td.list_lock); return 0; @@ -142,7 +145,7 @@ err_dealloc_pd: void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev) { - struct mlx5e_resources *res = &mdev->mlx5e_res; + struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs; mlx5_free_bfreg(mdev, &res->bfreg); mlx5_core_destroy_mkey(mdev, &res->mkey); @@ -180,8 +183,8 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); - mutex_lock(&mdev->mlx5e_res.td.list_lock); - list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { + mutex_lock(&mdev->mlx5e_res.hw_objs.td.list_lock); + list_for_each_entry(tir, &mdev->mlx5e_res.hw_objs.td.tirs_list, list) { tirn = tir->tirn; err = mlx5_core_modify_tir(mdev, tirn, in); if (err) @@ -192,7 +195,7 @@ out: kvfree(in); if (err) netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err); - mutex_unlock(&mdev->mlx5e_res.td.list_lock); + mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index abdf721bb264..f5f2a8fd0046 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1887,6 +1887,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; + int err; if (!MLX5_CAP_GEN(mdev, cqe_compression)) return -EOPNOTSUPP; @@ -1896,7 +1897,10 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, return -EINVAL; } - mlx5e_modify_rx_cqe_compression_locked(priv, enable); + err = mlx5e_modify_rx_cqe_compression_locked(priv, enable); + if (err) + return err; + priv->channels.params.rx_cqe_compress_def = enable; return 0; @@ -2014,8 +2018,13 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable) */ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + struct mlx5e_params old_params; + + old_params = priv->channels.params; priv->channels.params = new_channels.params; err = mlx5e_num_channels_changed(priv); + if (err) + priv->channels.params = old_params; goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 16ce7756ac43..cf1d3c9c88af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -36,6 +36,7 @@ #include <linux/tcp.h> #include <linux/mlx5/fs.h> #include "en.h" +#include "en_rep.h" #include "lib/mpfs.h" static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, @@ -435,6 +436,9 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct mlx5e_priv *priv = netdev_priv(dev); + if (mlx5e_is_uplink_rep(priv)) + return 0; /* no vlan table for uplink rep */ + if (be16_to_cpu(proto) == ETH_P_8021Q) return mlx5e_vlan_rx_add_cvid(priv, vid); else if (be16_to_cpu(proto) == ETH_P_8021AD) @@ -447,6 +451,9 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct mlx5e_priv *priv = netdev_priv(dev); + if (mlx5e_is_uplink_rep(priv)) + return 0; /* no vlan table for uplink rep */ + if (be16_to_cpu(proto) == ETH_P_8021Q) { clear_bit(vid, priv->fs.vlan.active_cvlans); mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index ec2fcb2a2977..d40fc2672530 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -302,7 +302,7 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); mlx5e_mkey_set_relaxed_ordering(mdev, mkc); MLX5_SET(mkc, mkc, qpn, 0xffffff); - MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn); + MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn); MLX5_SET64(mkc, mkc, len, npages << page_shift); MLX5_SET(mkc, mkc, translations_octword_size, MLX5_MTT_OCTW(npages)); @@ -334,9 +334,9 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq rq->wqe_overflow.addr); } -static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix) +static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix) { - return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT; + return MLX5E_REQUIRED_MTTS(wqe_ix) << PAGE_SHIFT; } static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) @@ -577,7 +577,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i); u32 byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz; - u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i); + u64 dma_offset = mlx5e_get_mpwqe_offset(i); wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom); wqe->data[0].byte_count = cpu_to_be32(byte_count); @@ -1019,7 +1019,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, sq->pdev = c->pdev; sq->mkey_be = c->mkey_be; sq->channel = c; - sq->uar_map = mdev->mlx5e_res.bfreg.map; + sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); sq->xsk_pool = xsk_pool; @@ -1090,7 +1090,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c, int err; sq->channel = c; - sq->uar_map = mdev->mlx5e_res.bfreg.map; + sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); @@ -1174,7 +1174,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, sq->priv = c->priv; sq->ch_ix = c->ix; sq->txq_ix = txq_ix; - sq->uar_map = mdev->mlx5e_res.bfreg.map; + sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); @@ -1257,7 +1257,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev, MLX5_SET(sqc, sqc, flush_in_error_en, 1); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); - MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index); + MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index); MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma); @@ -2032,7 +2032,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->cpu = cpu; c->pdev = mlx5_core_dma_dev(priv->mdev); c->netdev = priv->netdev; - c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); + c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key); c->num_tc = params->num_tc; c->xdp = !!params->xdp_prog; c->stats = &priv->channel_stats[ix].ch; @@ -2217,7 +2217,7 @@ void mlx5e_build_rq_param(struct mlx5e_priv *priv, MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); MLX5_SET(wq, wq, log_wq_stride, mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs)); - MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn); + MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn); MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter); MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en); @@ -2248,7 +2248,7 @@ void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, void *wq = MLX5_ADDR_OF(sqc, sqc, wq); MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); - MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn); + MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.hw_objs.pdn); param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(priv->mdev)); } @@ -2368,8 +2368,9 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params, { switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - return order_base_2(MLX5E_UMR_WQEBBS) + - mlx5e_get_rq_log_wq_sz(rqp->rqc); + return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, + order_base_2(MLX5E_UMR_WQEBBS) + + mlx5e_get_rq_log_wq_sz(rqp->rqc)); default: /* MLX5_WQ_TYPE_CYCLIC */ return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; } @@ -2502,8 +2503,10 @@ void mlx5e_close_channels(struct mlx5e_channels *chs) { int i; - if (chs->port_ptp) + if (chs->port_ptp) { mlx5e_port_ptp_close(chs->port_ptp); + chs->port_ptp = NULL; + } for (i = 0; i < chs->num; i++) mlx5e_close_channel(chs->c[i]); @@ -3421,10 +3424,10 @@ int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn) { void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); - MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn); + MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn); if (MLX5_GET(tisc, tisc, tls_en)) - MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.pdn); + MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn); if (mlx5_lag_is_lacp_owner(mdev)) MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1); @@ -3494,7 +3497,7 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc) { - MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); + MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.hw_objs.td.tdn); MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); MLX5_SET(tirc, tirc, indirect_table, rqtn); MLX5_SET(tirc, tirc, tunneled_offload_en, @@ -3769,8 +3772,16 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { struct mlx5e_priv *priv = netdev_priv(dev); + bool tc_unbind = false; int err; + if (type == TC_SETUP_BLOCK && + ((struct flow_block_offload *)type_data)->command == FLOW_BLOCK_UNBIND) + tc_unbind = true; + + if (!netif_device_present(dev) && !tc_unbind) + return -ENODEV; + switch (type) { case TC_SETUP_BLOCK: { struct flow_block_offload *f = type_data; @@ -3815,6 +3826,15 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s) s->tx_dropped += sq_stats->dropped; } } + if (priv->port_ptp_opened) { + for (i = 0; i < priv->max_opened_tc; i++) { + struct mlx5e_sq_stats *sq_stats = &priv->port_ptp_stats.sq[i]; + + s->tx_packets += sq_stats->packets; + s->tx_bytes += sq_stats->bytes; + s->tx_dropped += sq_stats->dropped; + } + } } void @@ -3823,6 +3843,9 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_pport_stats *pstats = &priv->stats.pport; + if (!netif_device_present(dev)) + return; + /* In switchdev mode, monitor counters doesn't monitor * rx/tx stats of 802_3. The update stats mechanism * should keep the 802_3 layout counters updated @@ -3834,10 +3857,17 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) } if (mlx5e_is_uplink_rep(priv)) { + struct mlx5e_vport_stats *vstats = &priv->stats.vport; + stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok); stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok); stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok); stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok); + + /* vport multicast also counts packets that are dropped due to steering + * or rx out of buffer + */ + stats->multicast = VPORT_COUNTER_GET(vstats, received_eth_multicast.packets); } else { mlx5e_fold_sw_stats64(priv, stats); } @@ -3857,11 +3887,19 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors; } +static void mlx5e_nic_set_rx_mode(struct mlx5e_priv *priv) +{ + if (mlx5e_is_uplink_rep(priv)) + return; /* no rx mode for uplink rep */ + + queue_work(priv->wq, &priv->set_rx_mode_work); +} + static void mlx5e_set_rx_mode(struct net_device *dev) { struct mlx5e_priv *priv = netdev_priv(dev); - queue_work(priv->wq, &priv->set_rx_mode_work); + mlx5e_nic_set_rx_mode(priv); } static int mlx5e_set_mac(struct net_device *netdev, void *addr) @@ -3876,7 +3914,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr) ether_addr_copy(netdev->dev_addr, saddr->sa_data); netif_addr_unlock_bh(netdev); - queue_work(priv->wq, &priv->set_rx_mode_work); + mlx5e_nic_set_rx_mode(priv); return 0; } @@ -4414,6 +4452,9 @@ static int mlx5e_set_vf_link_state(struct net_device *dev, int vf, struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; + if (mlx5e_is_uplink_rep(priv)) + return -EOPNOTSUPP; + return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1, mlx5_ifla_link2vport(link_state)); } @@ -4425,6 +4466,9 @@ int mlx5e_get_vf_config(struct net_device *dev, struct mlx5_core_dev *mdev = priv->mdev; int err; + if (!netif_device_present(dev)) + return -EOPNOTSUPP; + err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi); if (err) return err; @@ -4441,6 +4485,32 @@ int mlx5e_get_vf_stats(struct net_device *dev, return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1, vf_stats); } + +static bool +mlx5e_has_offload_stats(const struct net_device *dev, int attr_id) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + if (!netif_device_present(dev)) + return false; + + if (!mlx5e_is_uplink_rep(priv)) + return false; + + return mlx5e_rep_has_offload_stats(dev, attr_id); +} + +static int +mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, + void *sp) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + if (!mlx5e_is_uplink_rep(priv)) + return -EOPNOTSUPP; + + return mlx5e_rep_get_offload_stats(attr_id, dev, sp); +} #endif static bool mlx5e_tunnel_proto_supported_tx(struct mlx5_core_dev *mdev, u8 proto_type) @@ -4683,8 +4753,10 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) struct mlx5e_channel *c = priv->channels.c[i]; mlx5e_rq_replace_xdp_prog(&c->rq, prog); - if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) + if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) { + bpf_prog_inc(prog); mlx5e_rq_replace_xdp_prog(&c->xskrq, prog); + } } unlock: @@ -4797,6 +4869,8 @@ const struct net_device_ops mlx5e_netdev_ops = { .ndo_get_vf_config = mlx5e_get_vf_config, .ndo_set_vf_link_state = mlx5e_set_vf_link_state, .ndo_get_vf_stats = mlx5e_get_vf_stats, + .ndo_has_offload_stats = mlx5e_has_offload_stats, + .ndo_get_offload_stats = mlx5e_get_offload_stats, #endif .ndo_get_devlink_port = mlx5e_get_devlink_port, }; @@ -4958,6 +5032,11 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 priv->max_nch); params->num_tc = 1; + /* Set an initial non-zero value, so that mlx5e_select_queue won't + * divide by zero if called before first activating channels. + */ + priv->num_tc_x_num_ch = params->num_channels * params->num_tc; + /* SQ */ params->log_sq_size = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE : @@ -5253,10 +5332,6 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, if (err) mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); - err = mlx5e_devlink_port_register(priv); - if (err) - mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err); - mlx5e_health_create_reporters(priv); return 0; @@ -5265,7 +5340,6 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) { mlx5e_health_destroy_reporters(priv); - mlx5e_devlink_port_unregister(priv); mlx5e_tls_cleanup(priv); mlx5e_ipsec_cleanup(priv); } @@ -5405,7 +5479,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) return; mlx5e_dcbnl_init_app(priv); - queue_work(priv->wq, &priv->set_rx_mode_work); + mlx5e_nic_set_rx_mode(priv); rtnl_lock(); if (netif_running(netdev)) @@ -5428,7 +5502,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) netif_device_detach(priv->netdev); rtnl_unlock(); - queue_work(priv->wq, &priv->set_rx_mode_work); + mlx5e_nic_set_rx_mode(priv); mlx5e_hv_vhca_stats_destroy(priv); if (mlx5e_monitor_counter_supported(priv)) @@ -5474,8 +5548,6 @@ int mlx5e_priv_init(struct mlx5e_priv *priv, struct net_device *netdev, struct mlx5_core_dev *mdev) { - memset(priv, 0, sizeof(*priv)); - /* priv init */ priv->mdev = mdev; priv->netdev = netdev; @@ -5508,12 +5580,18 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv) { int i; + /* bail if change profile failed and also rollback failed */ + if (!priv->mdev) + return; + destroy_workqueue(priv->wq); free_cpumask_var(priv->scratchpad.cpumask); for (i = 0; i < priv->htb.max_qos_sqs; i++) kfree(priv->htb.qos_sq_stats[i]); kvfree(priv->htb.qos_sq_stats); + + memset(priv, 0, sizeof(*priv)); } struct net_device * @@ -5630,11 +5708,10 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv) } static int -mlx5e_netdev_attach_profile(struct mlx5e_priv *priv, +mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev, const struct mlx5e_profile *new_profile, void *new_ppriv) { - struct net_device *netdev = priv->netdev; - struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_priv *priv = netdev_priv(netdev); int err; err = mlx5e_priv_init(priv, netdev, mdev); @@ -5647,10 +5724,16 @@ mlx5e_netdev_attach_profile(struct mlx5e_priv *priv, priv->ppriv = new_ppriv; err = new_profile->init(priv->mdev, priv->netdev); if (err) - return err; + goto priv_cleanup; err = mlx5e_attach_netdev(priv); if (err) - new_profile->cleanup(priv); + goto profile_cleanup; + return err; + +profile_cleanup: + new_profile->cleanup(priv); +priv_cleanup: + mlx5e_priv_cleanup(priv); return err; } @@ -5659,13 +5742,14 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, { unsigned int new_max_nch = mlx5e_calc_max_nch(priv, new_profile); const struct mlx5e_profile *orig_profile = priv->profile; + struct net_device *netdev = priv->netdev; + struct mlx5_core_dev *mdev = priv->mdev; void *orig_ppriv = priv->ppriv; int err, rollback_err; /* sanity */ if (new_max_nch != priv->max_nch) { - netdev_warn(priv->netdev, - "%s: Replacing profile with different max channels\n", + netdev_warn(netdev, "%s: Replacing profile with different max channels\n", __func__); return -EINVAL; } @@ -5675,25 +5759,27 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, priv->profile->cleanup(priv); mlx5e_priv_cleanup(priv); - err = mlx5e_netdev_attach_profile(priv, new_profile, new_ppriv); + err = mlx5e_netdev_attach_profile(netdev, mdev, new_profile, new_ppriv); if (err) { /* roll back to original profile */ - netdev_warn(priv->netdev, "%s: new profile init failed, %d\n", - __func__, err); + netdev_warn(netdev, "%s: new profile init failed, %d\n", __func__, err); goto rollback; } return 0; rollback: - rollback_err = mlx5e_netdev_attach_profile(priv, orig_profile, orig_ppriv); - if (rollback_err) { - netdev_err(priv->netdev, - "%s: failed to rollback to orig profile, %d\n", + rollback_err = mlx5e_netdev_attach_profile(netdev, mdev, orig_profile, orig_ppriv); + if (rollback_err) + netdev_err(netdev, "%s: failed to rollback to orig profile, %d\n", __func__, rollback_err); - } return err; } +void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv) +{ + mlx5e_netdev_change_profile(priv, &mlx5e_nic_profile, NULL); +} + void mlx5e_destroy_netdev(struct mlx5e_priv *priv) { struct net_device *netdev = priv->netdev; @@ -5776,10 +5862,17 @@ static int mlx5e_probe(struct auxiliary_device *adev, priv->profile = profile; priv->ppriv = NULL; + + err = mlx5e_devlink_port_register(priv); + if (err) { + mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err); + goto err_destroy_netdev; + } + err = profile->init(mdev, netdev); if (err) { mlx5_core_err(mdev, "mlx5e_nic_profile init failed, %d\n", err); - goto err_destroy_netdev; + goto err_devlink_cleanup; } err = mlx5e_resume(adev); @@ -5797,12 +5890,15 @@ static int mlx5e_probe(struct auxiliary_device *adev, mlx5e_devlink_port_type_eth_set(priv); mlx5e_dcbnl_init_app(priv); + mlx5_uplink_netdev_set(mdev, netdev); return 0; err_resume: mlx5e_suspend(adev, state); err_profile_cleanup: profile->cleanup(priv); +err_devlink_cleanup: + mlx5e_devlink_port_unregister(priv); err_destroy_netdev: mlx5e_destroy_netdev(priv); return err; @@ -5817,6 +5913,7 @@ static void mlx5e_remove(struct auxiliary_device *adev) unregister_netdev(priv->netdev); mlx5e_suspend(adev, state); priv->profile->cleanup(priv); + mlx5e_devlink_port_unregister(priv); mlx5e_destroy_netdev(priv); } @@ -5842,18 +5939,18 @@ int mlx5e_init(void) mlx5e_ipsec_build_inverse_table(); mlx5e_build_ptys2ethtool_map(); - ret = mlx5e_rep_init(); + ret = auxiliary_driver_register(&mlx5e_driver); if (ret) return ret; - ret = auxiliary_driver_register(&mlx5e_driver); + ret = mlx5e_rep_init(); if (ret) - mlx5e_rep_cleanup(); + auxiliary_driver_unregister(&mlx5e_driver); return ret; } void mlx5e_cleanup(void) { - auxiliary_driver_unregister(&mlx5e_driver); mlx5e_rep_cleanup(); + auxiliary_driver_unregister(&mlx5e_driver); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index a132fff7a980..4cc902e0d71b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -44,6 +44,7 @@ #include "en_tc.h" #include "en/rep/tc.h" #include "en/rep/neigh.h" +#include "en/devlink.h" #include "fs_core.h" #include "lib/mlx5.h" #define CREATE_TRACE_POINTS @@ -69,16 +70,6 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev, fw_rev_sub(mdev), mdev->board_id); } -static void mlx5e_uplink_rep_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *drvinfo) -{ - struct mlx5e_priv *priv = netdev_priv(dev); - - mlx5e_rep_get_drvinfo(dev, drvinfo); - strlcpy(drvinfo->bus_info, pci_name(priv->mdev->pdev), - sizeof(drvinfo->bus_info)); -} - static const struct counter_desc sw_rep_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) }, @@ -285,46 +276,6 @@ static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev) return mlx5e_ethtool_get_rxfh_indir_size(priv); } -static void mlx5e_uplink_rep_get_pause_stats(struct net_device *netdev, - struct ethtool_pause_stats *stats) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - - mlx5e_stats_pause_get(priv, stats); -} - -static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pauseparam) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - - mlx5e_ethtool_get_pauseparam(priv, pauseparam); -} - -static int mlx5e_uplink_rep_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pauseparam) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - - return mlx5e_ethtool_set_pauseparam(priv, pauseparam); -} - -static int mlx5e_uplink_rep_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *link_ksettings) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - - return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings); -} - -static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev, - const struct ethtool_link_ksettings *link_ksettings) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - - return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings); -} - static const struct ethtool_ops mlx5e_rep_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | @@ -344,34 +295,6 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = { .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size, }; -static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = { - .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_MAX_FRAMES | - ETHTOOL_COALESCE_USE_ADAPTIVE, - .get_drvinfo = mlx5e_uplink_rep_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_strings = mlx5e_rep_get_strings, - .get_sset_count = mlx5e_rep_get_sset_count, - .get_ethtool_stats = mlx5e_rep_get_ethtool_stats, - .get_ringparam = mlx5e_rep_get_ringparam, - .set_ringparam = mlx5e_rep_set_ringparam, - .get_channels = mlx5e_rep_get_channels, - .set_channels = mlx5e_rep_set_channels, - .get_coalesce = mlx5e_rep_get_coalesce, - .set_coalesce = mlx5e_rep_set_coalesce, - .get_link_ksettings = mlx5e_uplink_rep_get_link_ksettings, - .set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings, - .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size, - .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size, - .get_rxfh = mlx5e_get_rxfh, - .set_rxfh = mlx5e_set_rxfh, - .get_rxnfc = mlx5e_get_rxnfc, - .set_rxnfc = mlx5e_set_rxnfc, - .get_pause_stats = mlx5e_uplink_rep_get_pause_stats, - .get_pauseparam = mlx5e_uplink_rep_get_pauseparam, - .set_pauseparam = mlx5e_uplink_rep_set_pauseparam, -}; - static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) { @@ -522,7 +445,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) return (rep->vport == MLX5_VPORT_UPLINK); } -static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id) +bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id) { switch (attr_id) { case IFLA_OFFLOAD_XSTATS_CPU_HIT: @@ -542,8 +465,8 @@ mlx5e_get_sw_stats64(const struct net_device *dev, return 0; } -static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev, - void *sp) +int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev, + void *sp) { switch (attr_id) { case IFLA_OFFLOAD_XSTATS_CPU_HIT: @@ -568,34 +491,6 @@ static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu) return mlx5e_change_mtu(netdev, new_mtu, NULL); } -static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu) -{ - return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx); -} - -static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr) -{ - struct sockaddr *saddr = addr; - - if (!is_valid_ether_addr(saddr->sa_data)) - return -EADDRNOTAVAIL; - - ether_addr_copy(netdev->dev_addr, saddr->sa_data); - return 0; -} - -static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, - __be16 vlan_proto) -{ - netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n"); - - if (vlan != 0) - return -EOPNOTSUPP; - - /* allow setting 0-vid for compatibility with libvirt */ - return 0; -} - static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -641,29 +536,10 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { .ndo_change_carrier = mlx5e_rep_change_carrier, }; -static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = { - .ndo_open = mlx5e_open, - .ndo_stop = mlx5e_close, - .ndo_start_xmit = mlx5e_xmit, - .ndo_set_mac_address = mlx5e_uplink_rep_set_mac, - .ndo_setup_tc = mlx5e_rep_setup_tc, - .ndo_get_devlink_port = mlx5e_rep_get_devlink_port, - .ndo_get_stats64 = mlx5e_get_stats, - .ndo_has_offload_stats = mlx5e_rep_has_offload_stats, - .ndo_get_offload_stats = mlx5e_rep_get_offload_stats, - .ndo_change_mtu = mlx5e_uplink_rep_change_mtu, - .ndo_features_check = mlx5e_features_check, - .ndo_set_vf_mac = mlx5e_set_vf_mac, - .ndo_set_vf_rate = mlx5e_set_vf_rate, - .ndo_get_vf_config = mlx5e_get_vf_config, - .ndo_get_vf_stats = mlx5e_get_vf_stats, - .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan, - .ndo_set_features = mlx5e_set_features, -}; - bool mlx5e_eswitch_uplink_rep(struct net_device *netdev) { - return netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep; + return netdev->netdev_ops == &mlx5e_netdev_ops && + mlx5e_is_uplink_rep(netdev_priv(netdev)); } bool mlx5e_eswitch_vf_rep(struct net_device *netdev) @@ -713,26 +589,15 @@ static void mlx5e_build_rep_params(struct net_device *netdev) } static void mlx5e_build_rep_netdev(struct net_device *netdev, - struct mlx5_core_dev *mdev, - struct mlx5_eswitch_rep *rep) + struct mlx5_core_dev *mdev) { SET_NETDEV_DEV(netdev, mdev->device); - if (rep->vport == MLX5_VPORT_UPLINK) { - netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep; - /* we want a persistent mac for the uplink rep */ - mlx5_query_mac_address(mdev, netdev->dev_addr); - netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops; - mlx5e_dcbnl_build_rep_netdev(netdev); - } else { - netdev->netdev_ops = &mlx5e_netdev_ops_rep; - eth_hw_addr_random(netdev); - netdev->ethtool_ops = &mlx5e_rep_ethtool_ops; - } + netdev->netdev_ops = &mlx5e_netdev_ops_rep; + eth_hw_addr_random(netdev); + netdev->ethtool_ops = &mlx5e_rep_ethtool_ops; netdev->watchdog_timeo = 15 * HZ; - netdev->features |= NETIF_F_NETNS_LOCAL; - #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) netdev->hw_features |= NETIF_F_HW_TC; #endif @@ -744,12 +609,9 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev, netdev->hw_features |= NETIF_F_TSO6; netdev->hw_features |= NETIF_F_RXCSUM; - if (rep->vport == MLX5_VPORT_UPLINK) - netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; - else - netdev->features |= NETIF_F_VLAN_CHALLENGED; - netdev->features |= netdev->hw_features; + netdev->features |= NETIF_F_VLAN_CHALLENGED; + netdev->features |= NETIF_F_NETNS_LOCAL; } static int mlx5e_init_rep(struct mlx5_core_dev *mdev, @@ -1115,6 +977,14 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) mlx5e_dcbnl_initialize(priv); mlx5e_dcbnl_init_app(priv); mlx5e_rep_neigh_init(rpriv); + + netdev->wanted_features |= NETIF_F_HW_TC; + + rtnl_lock(); + if (netif_running(netdev)) + mlx5e_open(netdev); + netif_device_attach(netdev); + rtnl_unlock(); } static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv) @@ -1122,6 +992,12 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv) struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_core_dev *mdev = priv->mdev; + rtnl_lock(); + if (netif_running(priv->netdev)) + mlx5e_close(priv->netdev); + netif_device_detach(priv->netdev); + rtnl_unlock(); + mlx5e_rep_neigh_cleanup(rpriv); mlx5e_dcbnl_delete_app(priv); mlx5_notifier_unregister(mdev, &priv->events_nb); @@ -1198,33 +1074,64 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = { .update_carrier = mlx5e_update_carrier, .rx_handlers = &mlx5e_rx_handlers_rep, .max_tc = MLX5E_MAX_NUM_TC, - .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), + /* XSK is needed so we can replace profile with NIC netdev */ + .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK), .stats_grps = mlx5e_ul_rep_stats_grps, .stats_grps_num = mlx5e_ul_rep_stats_grps_num, }; /* e-Switch vport representors */ static int -mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) +mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) +{ + struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev)); + struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); + struct devlink_port *dl_port; + int err; + + rpriv->netdev = priv->netdev; + + err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile, + rpriv); + if (err) + return err; + + dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport); + if (dl_port) + devlink_port_type_eth_set(dl_port, rpriv->netdev); + + return 0; +} + +static void +mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv) +{ + struct net_device *netdev = rpriv->netdev; + struct devlink_port *dl_port; + struct mlx5_core_dev *dev; + struct mlx5e_priv *priv; + + priv = netdev_priv(netdev); + dev = priv->mdev; + + dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport); + if (dl_port) + devlink_port_type_clear(dl_port); + mlx5e_netdev_attach_nic_profile(priv); +} + +static int +mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) { + struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); const struct mlx5e_profile *profile; - struct mlx5e_rep_priv *rpriv; struct devlink_port *dl_port; struct net_device *netdev; struct mlx5e_priv *priv; unsigned int txqs, rxqs; int nch, err; - rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); - if (!rpriv) - return -ENOMEM; - - /* rpriv->rep to be looked up when profile->init() is called */ - rpriv->rep = rep; - - profile = (rep->vport == MLX5_VPORT_UPLINK) ? - &mlx5e_uplink_rep_profile : &mlx5e_rep_profile; - + profile = &mlx5e_rep_profile; nch = mlx5e_get_max_num_channels(dev); txqs = nch * profile->max_tc; rxqs = nch * profile->rq_groups; @@ -1233,21 +1140,11 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) mlx5_core_warn(dev, "Failed to create representor netdev for vport %d\n", rep->vport); - kfree(rpriv); return -EINVAL; } - mlx5e_build_rep_netdev(netdev, dev, rep); - + mlx5e_build_rep_netdev(netdev, dev); rpriv->netdev = netdev; - rep->rep_data[REP_ETH].priv = rpriv; - INIT_LIST_HEAD(&rpriv->vport_sqs_list); - - if (rep->vport == MLX5_VPORT_UPLINK) { - err = mlx5e_create_mdev_resources(dev); - if (err) - goto err_destroy_netdev; - } priv = netdev_priv(netdev); priv->profile = profile; @@ -1255,7 +1152,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) err = profile->init(dev, netdev); if (err) { netdev_warn(netdev, "rep profile init failed, %d\n", err); - goto err_destroy_mdev_resources; + goto err_destroy_netdev; } err = mlx5e_attach_netdev(netdev_priv(netdev)); @@ -1285,13 +1182,34 @@ err_detach_netdev: err_cleanup_profile: priv->profile->cleanup(priv); -err_destroy_mdev_resources: - if (rep->vport == MLX5_VPORT_UPLINK) - mlx5e_destroy_mdev_resources(dev); - err_destroy_netdev: mlx5e_destroy_netdev(netdev_priv(netdev)); - kfree(rpriv); + return err; +} + +static int +mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) +{ + struct mlx5e_rep_priv *rpriv; + int err; + + rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); + if (!rpriv) + return -ENOMEM; + + /* rpriv->rep to be looked up when profile->init() is called */ + rpriv->rep = rep; + rep->rep_data[REP_ETH].priv = rpriv; + INIT_LIST_HEAD(&rpriv->vport_sqs_list); + + if (rep->vport == MLX5_VPORT_UPLINK) + err = mlx5e_vport_uplink_rep_load(dev, rep); + else + err = mlx5e_vport_vf_rep_load(dev, rep); + + if (err) + kfree(rpriv); + return err; } @@ -1305,15 +1223,19 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep) struct devlink_port *dl_port; void *ppriv = priv->ppriv; + if (rep->vport == MLX5_VPORT_UPLINK) { + mlx5e_vport_uplink_rep_unload(rpriv); + goto free_ppriv; + } + dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport); if (dl_port) devlink_port_type_clear(dl_port); unregister_netdev(netdev); mlx5e_detach_netdev(priv); priv->profile->cleanup(priv); - if (rep->vport == MLX5_VPORT_UPLINK) - mlx5e_destroy_mdev_resources(priv->mdev); mlx5e_destroy_netdev(priv); +free_ppriv: kfree(ppriv); /* mlx5e_rep_priv */ } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index d1696404cca9..931fa619cb01 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -220,6 +220,10 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw, const struct net_device *lag_dev); int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup); +bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id); +int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev, + void *sp); + bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv); int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv); void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv); @@ -240,6 +244,11 @@ static inline int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { return 0; } static inline void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) {} static inline int mlx5e_rep_init(void) { return 0; }; static inline void mlx5e_rep_cleanup(void) {}; +static inline bool mlx5e_rep_has_offload_stats(const struct net_device *dev, + int attr_id) { return false; } +static inline int mlx5e_rep_get_offload_stats(int attr_id, + const struct net_device *dev, + void *sp) { return -EOPNOTSUPP; } #endif static inline bool mlx5e_is_vport_rep(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 1b6ad94ebb10..f90894eea9e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -52,6 +52,7 @@ #include "en/health.h" #include "en/params.h" #include "devlink.h" +#include "en/devlink.h" static struct sk_buff * mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, @@ -500,7 +501,6 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) struct mlx5e_icosq *sq = rq->icosq; struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5e_umr_wqe *umr_wqe; - u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); u16 pi; int err; int i; @@ -531,7 +531,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) umr_wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR); - umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset); + umr_wqe->uctrl.xlt_offset = + cpu_to_be16(MLX5_ALIGNED_MTTS_OCTW(MLX5E_REQUIRED_MTTS(ix))); sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX, @@ -669,6 +670,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) get_cqe_opcode(cqe)); mlx5e_dump_error_cqe(&sq->cq, sq->sqn, (struct mlx5_err_cqe *)cqe); + mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) queue_work(cq->priv->wq, &sq->recover_work); break; @@ -1822,6 +1824,7 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe struct mlx5e_priv *priv = netdev_priv(rq->netdev); struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; + struct devlink_port *dl_port; struct sk_buff *skb; u32 cqe_bcnt; u16 trap_id; @@ -1844,7 +1847,8 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); skb_push(skb, ETH_HLEN); - mlx5_devlink_trap_report(rq->mdev, trap_id, skb, &priv->dl_port); + dl_port = mlx5e_devlink_get_dl_port(priv); + mlx5_devlink_trap_report(rq->mdev, trap_id, skb, dl_port); dev_kfree_skb_any(skb); free_wqe: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 0da69b98f38f..4bd882a1018c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -445,12 +445,16 @@ static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp) mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn); } -static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc) +static int mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc) { - u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn; + u32 *indirection_rqt, rqn; struct mlx5e_priv *priv = hp->func_priv; int i, ix, sz = MLX5E_INDIR_RQT_SIZE; + indirection_rqt = kzalloc(sz, GFP_KERNEL); + if (!indirection_rqt) + return -ENOMEM; + mlx5e_build_default_indir_rqt(indirection_rqt, sz, hp->num_channels); @@ -462,6 +466,9 @@ static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc) rqn = hp->pair->rqn[ix]; MLX5_SET(rqtc, rqtc, rq_num[i], rqn); } + + kfree(indirection_rqt); + return 0; } static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp) @@ -482,12 +489,15 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp) MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); MLX5_SET(rqtc, rqtc, rqt_max_size, sz); - mlx5e_hairpin_fill_rqt_rqns(hp, rqtc); + err = mlx5e_hairpin_fill_rqt_rqns(hp, rqtc); + if (err) + goto out; err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn); if (!err) hp->indir_rqt.enabled = true; +out: kvfree(in); return err; } @@ -1077,19 +1087,23 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, if (flow_flag_test(flow, CT)) { mod_hdr_acts = &attr->parse_attr->mod_hdr_acts; - return mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv), + rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv), flow, spec, attr, mod_hdr_acts); + } else { + rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); } - rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); if (IS_ERR(rule)) return rule; if (attr->esw_attr->split_count) { flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr); if (IS_ERR(flow->rule[1])) { - mlx5_eswitch_del_offloaded_rule(esw, rule, attr); + if (flow_flag_test(flow, CT)) + mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr); + else + mlx5_eswitch_del_offloaded_rule(esw, rule, attr); return flow->rule[1]; } } @@ -1947,6 +1961,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, misc_parameters); void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); + void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters_3); + void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters_3); struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct flow_dissector *dissector = rule->match.dissector; u16 addr_type = 0; @@ -1976,6 +1994,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, BIT(FLOW_DISSECTOR_KEY_CT) | BIT(FLOW_DISSECTOR_KEY_ENC_IP) | BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | + BIT(FLOW_DISSECTOR_KEY_ICMP) | BIT(FLOW_DISSECTOR_KEY_MPLS))) { NL_SET_ERR_MSG_MOD(extack, "Unsupported key"); netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n", @@ -2295,6 +2314,58 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, if (match.mask->flags) *match_level = MLX5_MATCH_L4; } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) { + struct flow_match_icmp match; + + flow_rule_match_icmp(rule, &match); + switch (ip_proto) { + case IPPROTO_ICMP: + if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & + MLX5_FLEX_PROTO_ICMP)) + return -EOPNOTSUPP; + MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type, + match.mask->type); + MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type, + match.key->type); + MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code, + match.mask->code); + MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code, + match.key->code); + break; + case IPPROTO_ICMPV6: + if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & + MLX5_FLEX_PROTO_ICMPV6)) + return -EOPNOTSUPP; + MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type, + match.mask->type); + MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type, + match.key->type); + MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code, + match.mask->code); + MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code, + match.key->code); + break; + default: + NL_SET_ERR_MSG_MOD(extack, + "Code and type matching only with ICMP and ICMPv6"); + netdev_err(priv->netdev, + "Code and type matching only with ICMP and ICMPv6\n"); + return -EINVAL; + } + if (match.mask->code || match.mask->type) { + *match_level = MLX5_MATCH_L4; + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3; + } + } + /* Currenlty supported only for MPLS over UDP */ + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) && + !netif_is_bareudp(filter_dev)) { + NL_SET_ERR_MSG_MOD(extack, + "Matching on MPLS is supported only for MPLS over UDP"); + netdev_err(priv->netdev, + "Matching on MPLS is supported only for MPLS over UDP\n"); + return -EOPNOTSUPP; + } return 0; } @@ -2899,6 +2970,37 @@ static int is_action_keys_supported(const struct flow_action_entry *act, return 0; } +static bool modify_tuple_supported(bool modify_tuple, bool ct_clear, + bool ct_flow, struct netlink_ext_ack *extack, + struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec) +{ + if (!modify_tuple || ct_clear) + return true; + + if (ct_flow) { + NL_SET_ERR_MSG_MOD(extack, + "can't offload tuple modification with non-clear ct()"); + netdev_info(priv->netdev, + "can't offload tuple modification with non-clear ct()"); + return false; + } + + /* Add ct_state=-trk match so it will be offloaded for non ct flows + * (or after clear action), as otherwise, since the tuple is changed, + * we can't restore ct state + */ + if (mlx5_tc_ct_add_no_trk_match(spec)) { + NL_SET_ERR_MSG_MOD(extack, + "can't offload tuple modification with ct matches and no ct(clear) action"); + netdev_info(priv->netdev, + "can't offload tuple modification with ct matches and no ct(clear) action"); + return false; + } + + return true; +} + static bool modify_header_match_supported(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct flow_action *flow_action, @@ -2937,18 +3039,9 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv, return err; } - /* Add ct_state=-trk match so it will be offloaded for non ct flows - * (or after clear action), as otherwise, since the tuple is changed, - * we can't restore ct state - */ - if (!ct_clear && modify_tuple && - mlx5_tc_ct_add_no_trk_match(spec)) { - NL_SET_ERR_MSG_MOD(extack, - "can't offload tuple modify header with ct matches"); - netdev_info(priv->netdev, - "can't offload tuple modify header with ct matches"); + if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack, + priv, spec)) return false; - } ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); if (modify_ip_header && ip_proto != IPPROTO_TCP && @@ -2979,7 +3072,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv, actions = flow->attr->action; if (mlx5e_is_eswitch_flow(flow)) { - if (flow->attr->esw_attr->split_count && ct_flow) { + if (flow->attr->esw_attr->split_count && ct_flow && + !MLX5_CAP_GEN(flow->attr->esw_attr->in_mdev, reg_c_preserve)) { /* All registers used by ct are cleared when using * split rules. */ @@ -3779,6 +3873,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, return err; flow_flag_set(flow, CT); + esw_attr->split_count = esw_attr->out_count; break; default: NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported"); @@ -3841,11 +3936,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, return -EOPNOTSUPP; } - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { - NL_SET_ERR_MSG_MOD(extack, - "Mirroring goto chain rules isn't supported"); - return -EOPNOTSUPP; - } attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; } @@ -4265,6 +4355,11 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow; int err = 0; + if (!mlx5_esw_hold(priv->mdev)) + return -EAGAIN; + + mlx5_esw_get(priv->mdev); + rcu_read_lock(); flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params); if (flow) { @@ -4302,11 +4397,14 @@ rcu_unlock: if (err) goto err_free; + mlx5_esw_release(priv->mdev); return 0; err_free: mlx5e_flow_put(priv, flow); out: + mlx5_esw_put(priv->mdev); + mlx5_esw_release(priv->mdev); return err; } @@ -4346,6 +4444,7 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, trace_mlx5e_delete_flower(f); mlx5e_flow_put(priv, flow); + mlx5_esw_put(priv->mdev); return 0; errout: @@ -4445,7 +4544,8 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate, */ if (rate) { rate = (rate * BITS_PER_BYTE) + 500000; - rate_mbps = max_t(u64, do_div(rate, 1000000), 1); + do_div(rate, 1000000); + rate_mbps = max_t(u32, rate, 1); } err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps); @@ -4480,6 +4580,10 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv, flow_action_for_each(i, act, flow_action) { switch (act->id) { case FLOW_ACTION_POLICE: + if (act->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } err = apply_police_params(priv, act->police.rate_bytes_ps, extack); if (err) return err; @@ -4646,10 +4750,6 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr, MLX5_FLOW_NAMESPACE_KERNEL); - if (IS_ERR(tc->ct)) { - err = PTR_ERR(tc->ct); - goto err_ct; - } tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event; err = register_netdevice_notifier_dev_net(priv->netdev, @@ -4665,7 +4765,6 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) err_reg: mlx5_tc_ct_clean(tc->ct); -err_ct: mlx5_chains_destroy(tc->chains); err_chains: rhashtable_destroy(&tc->ht); @@ -4724,8 +4823,6 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht) esw_chains(esw), &esw->offloads.mod_hdr, MLX5_FLOW_NAMESPACE_FDB); - if (IS_ERR(uplink_priv->ct_priv)) - goto err_ct; mapping = mapping_create(sizeof(struct tunnel_match_key), TUNNEL_INFO_BITS_MASK, true); @@ -4765,7 +4862,6 @@ err_enc_opts_mapping: mapping_destroy(uplink_priv->tunnel_mapping); err_tun_mapping: mlx5_tc_ct_clean(uplink_priv->ct_priv); -err_ct: netdev_warn(priv->netdev, "Failed to initialize tc (eswitch), err: %d", err); return err; @@ -4838,9 +4934,17 @@ static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv, int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { - unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD); + unsigned long flags = MLX5_TC_FLAG(INGRESS); struct mlx5e_priv *priv = cb_priv; + if (!priv->netdev || !netif_device_present(priv->netdev)) + return -EOPNOTSUPP; + + if (mlx5e_is_uplink_rep(priv)) + flags |= MLX5_TC_FLAG(ESW_OFFLOAD); + else + flags |= MLX5_TC_FLAG(NIC_OFFLOAD); + switch (type) { case TC_SETUP_CLSFLOWER: return mlx5e_setup_tc_cls_flower(priv, type_data, flags); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 89003ae7775a..25c091795bcd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -79,6 +79,7 @@ struct mlx5_flow_attr { u8 inner_match_level; u8 outer_match_level; u8 ip_version; + u8 tun_ip_version; u32 flags; union { struct mlx5_esw_flow_attr esw_attr[0]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index bdbffe484fce..d2efe2455955 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -576,7 +576,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq, pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS); wqe = MLX5E_TX_FETCH_WQE(sq, pi); - prefetchw(wqe->data); + net_prefetchw(wqe->data); *session = (struct mlx5e_tx_mpwqe) { .wqe = wqe, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index cb1e181f4c6a..7bfc84238b3d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -120,7 +120,7 @@ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u1 struct mlx5_vport *vport; vport = mlx5_eswitch_get_vport(esw, vport_num); - return vport->dl_port; + return IS_ERR(vport) ? ERR_CAST(vport) : vport->dl_port; } int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c index 6f6772bf61a2..3da7becc1069 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c @@ -248,7 +248,7 @@ err_mod_hdr_regc0: err_ethertype: kfree(rule); out: - kfree(rule_spec); + kvfree(rule_spec); return err; } @@ -328,7 +328,7 @@ static int mlx5_create_indir_recirc_group(struct mlx5_eswitch *esw, e->recirc_cnt = 0; out: - kfree(in); + kvfree(in); return err; } @@ -347,7 +347,7 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw, spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) { - kfree(in); + kvfree(in); return -ENOMEM; } @@ -371,8 +371,8 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw, } err_out: - kfree(spec); - kfree(in); + kvfree(spec); + kvfree(in); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h index cb9eafd1b4ee..21d56b49d14b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h @@ -30,13 +30,13 @@ mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr); #else /* indir API stubs */ -struct mlx5_esw_indir_table * +static inline struct mlx5_esw_indir_table * mlx5_esw_indir_table_init(void) { return NULL; } -void +static inline void mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir) { } @@ -57,7 +57,7 @@ mlx5_esw_indir_table_put(struct mlx5_eswitch *esw, { } -bool +static inline bool mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, u16 vport_num, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index aba17835465b..6b260dacf853 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -435,6 +435,7 @@ static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw) esw->fdb_table.legacy.addr_grp = NULL; esw->fdb_table.legacy.allmulti_grp = NULL; esw->fdb_table.legacy.promisc_grp = NULL; + atomic64_set(&esw->user_count, 0); } static int esw_create_legacy_table(struct mlx5_eswitch *esw) @@ -442,6 +443,7 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw) int err; memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb)); + atomic64_set(&esw->user_count, 0); err = esw_create_legacy_vepa_table(esw); if (err) @@ -1141,6 +1143,8 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, struct mlx5_vport *vport; vport = mlx5_eswitch_get_vport(esw, vport_num); + if (IS_ERR(vport)) + return PTR_ERR(vport); if (!vport->qos.enabled) return -EOPNOTSUPP; @@ -1279,6 +1283,8 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num, int ret; vport = mlx5_eswitch_get_vport(esw, vport_num); + if (IS_ERR(vport)) + return PTR_ERR(vport); mutex_lock(&esw->state_lock); WARN_ON(vport->enabled); @@ -1326,6 +1332,8 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) struct mlx5_vport *vport; vport = mlx5_eswitch_get_vport(esw, vport_num); + if (IS_ERR(vport)) + return; mutex_lock(&esw->state_lock); if (!vport->enabled) @@ -1714,7 +1722,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) if (!ESW_ALLOWED(esw)) return 0; - mutex_lock(&esw->mode_lock); + down_write(&esw->mode_lock); if (esw->mode == MLX5_ESWITCH_NONE) { ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs); } else { @@ -1726,7 +1734,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) if (!ret) esw->esw_funcs.num_vfs = num_vfs; } - mutex_unlock(&esw->mode_lock); + up_write(&esw->mode_lock); return ret; } @@ -1774,10 +1782,10 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) if (!ESW_ALLOWED(esw)) return; - mutex_lock(&esw->mode_lock); + down_write(&esw->mode_lock); mlx5_eswitch_disable_locked(esw, clear_vf); esw->esw_funcs.num_vfs = 0; - mutex_unlock(&esw->mode_lock); + up_write(&esw->mode_lock); } int mlx5_eswitch_init(struct mlx5_core_dev *dev) @@ -1834,7 +1842,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ida_init(&esw->offloads.vport_metadata_ida); xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC); mutex_init(&esw->state_lock); - mutex_init(&esw->mode_lock); + init_rwsem(&esw->mode_lock); mlx5_esw_for_all_vports(esw, i, vport) { vport->vport = mlx5_eswitch_index_to_vport_num(esw, i); @@ -1870,7 +1878,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) esw->dev->priv.eswitch = NULL; destroy_workqueue(esw->work_queue); esw_offloads_cleanup_reps(esw); - mutex_destroy(&esw->mode_lock); mutex_destroy(&esw->state_lock); WARN_ON(!xa_empty(&esw->offloads.vhca_map)); xa_destroy(&esw->offloads.vhca_map); @@ -2034,6 +2041,10 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, vport = 0; } mutex_lock(&esw->state_lock); + if (esw->mode != MLX5_ESWITCH_LEGACY) { + err = -EOPNOTSUPP; + goto unlock; + } err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state); if (err) { @@ -2105,7 +2116,7 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, u16 vport, u16 vlan, u8 qos) { u8 set_flags = 0; - int err; + int err = 0; if (!ESW_ALLOWED(esw)) return -EPERM; @@ -2114,9 +2125,18 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT; mutex_lock(&esw->state_lock); + if (esw->mode != MLX5_ESWITCH_LEGACY) { + if (!vlan) + goto unlock; /* compatibility with libvirt */ + + err = -EOPNOTSUPP; + goto unlock; + } + err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags); - mutex_unlock(&esw->state_lock); +unlock: + mutex_unlock(&esw->state_lock); return err; } @@ -2133,6 +2153,10 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, return PTR_ERR(evport); mutex_lock(&esw->state_lock); + if (esw->mode != MLX5_ESWITCH_LEGACY) { + err = -EOPNOTSUPP; + goto unlock; + } pschk = evport->info.spoofchk; evport->info.spoofchk = spoofchk; if (pschk && !is_valid_ether_addr(evport->info.mac)) @@ -2143,8 +2167,9 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, err = esw_acl_ingress_lgcy_setup(esw, evport); if (err) evport->info.spoofchk = pschk; - mutex_unlock(&esw->state_lock); +unlock: + mutex_unlock(&esw->state_lock); return err; } @@ -2265,6 +2290,7 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, u16 vport, bool setting) { struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); + int err = 0; if (!ESW_ALLOWED(esw)) return -EPERM; @@ -2272,12 +2298,17 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, return PTR_ERR(evport); mutex_lock(&esw->state_lock); + if (esw->mode != MLX5_ESWITCH_LEGACY) { + err = -EOPNOTSUPP; + goto unlock; + } evport->info.trusted = setting; if (evport->enabled) esw_vport_change_handle_locked(evport); - mutex_unlock(&esw->state_lock); - return 0; +unlock: + mutex_unlock(&esw->state_lock); + return err; } static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw) @@ -2552,3 +2583,94 @@ void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifie { blocking_notifier_chain_unregister(&esw->n_head, nb); } + +/** + * mlx5_esw_hold() - Try to take a read lock on esw mode lock. + * @mdev: mlx5 core device. + * + * Should be called by esw resources callers. + * + * Return: true on success or false. + */ +bool mlx5_esw_hold(struct mlx5_core_dev *mdev) +{ + struct mlx5_eswitch *esw = mdev->priv.eswitch; + + /* e.g. VF doesn't have eswitch so nothing to do */ + if (!ESW_ALLOWED(esw)) + return true; + + if (down_read_trylock(&esw->mode_lock) != 0) + return true; + + return false; +} + +/** + * mlx5_esw_release() - Release a read lock on esw mode lock. + * @mdev: mlx5 core device. + */ +void mlx5_esw_release(struct mlx5_core_dev *mdev) +{ + struct mlx5_eswitch *esw = mdev->priv.eswitch; + + if (ESW_ALLOWED(esw)) + up_read(&esw->mode_lock); +} + +/** + * mlx5_esw_get() - Increase esw user count. + * @mdev: mlx5 core device. + */ +void mlx5_esw_get(struct mlx5_core_dev *mdev) +{ + struct mlx5_eswitch *esw = mdev->priv.eswitch; + + if (ESW_ALLOWED(esw)) + atomic64_inc(&esw->user_count); +} + +/** + * mlx5_esw_put() - Decrease esw user count. + * @mdev: mlx5 core device. + */ +void mlx5_esw_put(struct mlx5_core_dev *mdev) +{ + struct mlx5_eswitch *esw = mdev->priv.eswitch; + + if (ESW_ALLOWED(esw)) + atomic64_dec_if_positive(&esw->user_count); +} + +/** + * mlx5_esw_try_lock() - Take a write lock on esw mode lock. + * @esw: eswitch device. + * + * Should be called by esw mode change routine. + * + * Return: + * * 0 - esw mode if successfully locked and refcount is 0. + * * -EBUSY - refcount is not 0. + * * -EINVAL - In the middle of switching mode or lock is already held. + */ +int mlx5_esw_try_lock(struct mlx5_eswitch *esw) +{ + if (down_write_trylock(&esw->mode_lock) == 0) + return -EINVAL; + + if (atomic64_read(&esw->user_count) > 0) { + up_write(&esw->mode_lock); + return -EBUSY; + } + + return esw->mode; +} + +/** + * mlx5_esw_unlock() - Release write lock on esw mode lock + * @esw: eswitch device. + */ +void mlx5_esw_unlock(struct mlx5_eswitch *esw) +{ + up_write(&esw->mode_lock); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index fdf5c8c05c1b..56d85cedb9bd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -271,7 +271,8 @@ struct mlx5_eswitch { /* Protects eswitch mode change that occurs via one or more * user commands, i.e. sriov state change, devlink commands. */ - struct mutex mode_lock; + struct rw_semaphore mode_lock; + atomic64_t user_count; struct { bool enabled; @@ -761,6 +762,14 @@ struct mlx5_esw_event_info { int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n); void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n); + +bool mlx5_esw_hold(struct mlx5_core_dev *dev); +void mlx5_esw_release(struct mlx5_core_dev *dev); +void mlx5_esw_get(struct mlx5_core_dev *dev); +void mlx5_esw_put(struct mlx5_core_dev *dev); +int mlx5_esw_try_lock(struct mlx5_eswitch *esw); +void mlx5_esw_unlock(struct mlx5_eswitch *esw); + #else /* CONFIG_MLX5_ESWITCH */ /* eswitch API stubs */ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } @@ -781,6 +790,13 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) { return ERR_PTR(-EOPNOTSUPP); } + +static inline unsigned int +mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, + u16 vport_num) +{ + return vport_num; +} #endif /* CONFIG_MLX5_ESWITCH */ #endif /* __MLX5_ESWITCH_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 94cb0217b4f3..d5de6bf622ce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -40,7 +40,6 @@ #include "eswitch.h" #include "esw/indir_table.h" #include "esw/acl/ofld.h" -#include "esw/indir_table.h" #include "rdma.h" #include "en.h" #include "fs_core.h" @@ -551,7 +550,8 @@ esw_setup_dests(struct mlx5_flow_destination *dest, if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) && MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve) && - mlx5_eswitch_vport_match_metadata_enabled(esw)) + mlx5_eswitch_vport_match_metadata_enabled(esw) && + MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level)) attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE; if (attr->dest_ft) { @@ -1446,7 +1446,7 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) return ERR_PTR(-EOPNOTSUPP); - spec = kzalloc(sizeof(*spec), GFP_KERNEL); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return ERR_PTR(-ENOMEM); @@ -1469,7 +1469,7 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) dest.ft = esw->offloads.ft_offloads; flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); - kfree(spec); + kvfree(spec); if (IS_ERR(flow_rule)) esw_warn(esw->dev, @@ -1854,6 +1854,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) /* Holds true only as long as DMFS is the default */ mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns, MLX5_FLOW_STEERING_MODE_DMFS); + atomic64_set(&esw->user_count, 0); } static int esw_create_offloads_table(struct mlx5_eswitch *esw) @@ -2259,9 +2260,11 @@ int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num) if (esw->mode != MLX5_ESWITCH_OFFLOADS) return 0; - err = mlx5_esw_offloads_devlink_port_register(esw, vport_num); - if (err) - return err; + if (vport_num != MLX5_VPORT_UPLINK) { + err = mlx5_esw_offloads_devlink_port_register(esw, vport_num); + if (err) + return err; + } err = mlx5_esw_offloads_rep_load(esw, vport_num); if (err) @@ -2269,7 +2272,8 @@ int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num) return err; load_err: - mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); + if (vport_num != MLX5_VPORT_UPLINK) + mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); return err; } @@ -2279,7 +2283,9 @@ void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num) return; mlx5_esw_offloads_rep_unload(esw, vport_num); - mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); + + if (vport_num != MLX5_VPORT_UPLINK) + mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); } #define ESW_OFFLOADS_DEVCOM_PAIR (0) @@ -2554,6 +2560,9 @@ static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) struct mlx5_vport *vport; vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); + if (IS_ERR(vport)) + return PTR_ERR(vport); + return esw_vport_create_offloads_acl_tables(esw, vport); } @@ -2562,6 +2571,9 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) struct mlx5_vport *vport; vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); + if (IS_ERR(vport)) + return; + esw_vport_destroy_offloads_acl_tables(esw, vport); } @@ -2573,6 +2585,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); mutex_init(&esw->fdb_table.offloads.vports.lock); hash_init(esw->fdb_table.offloads.vports.table); + atomic64_set(&esw->user_count, 0); indir = mlx5_esw_indir_table_init(); if (IS_ERR(indir)) { @@ -2914,8 +2927,14 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, if (esw_mode_from_devlink(mode, &mlx5_mode)) return -EINVAL; - mutex_lock(&esw->mode_lock); - cur_mlx5_mode = esw->mode; + err = mlx5_esw_try_lock(esw); + if (err < 0) { + NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy"); + return err; + } + cur_mlx5_mode = err; + err = 0; + if (cur_mlx5_mode == mlx5_mode) goto unlock; @@ -2927,7 +2946,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, err = -EINVAL; unlock: - mutex_unlock(&esw->mode_lock); + mlx5_esw_unlock(esw); return err; } @@ -2940,14 +2959,14 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) if (IS_ERR(esw)) return PTR_ERR(esw); - mutex_lock(&esw->mode_lock); + down_write(&esw->mode_lock); err = eswitch_devlink_esw_mode_check(esw); if (err) goto unlock; err = esw_mode_to_devlink(esw->mode, mode); unlock: - mutex_unlock(&esw->mode_lock); + up_write(&esw->mode_lock); return err; } @@ -2963,7 +2982,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, if (IS_ERR(esw)) return PTR_ERR(esw); - mutex_lock(&esw->mode_lock); + down_write(&esw->mode_lock); err = eswitch_devlink_esw_mode_check(esw); if (err) goto out; @@ -3002,7 +3021,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, } esw->offloads.inline_mode = mlx5_mode; - mutex_unlock(&esw->mode_lock); + up_write(&esw->mode_lock); return 0; revert_inline_mode: @@ -3012,7 +3031,7 @@ revert_inline_mode: vport, esw->offloads.inline_mode); out: - mutex_unlock(&esw->mode_lock); + up_write(&esw->mode_lock); return err; } @@ -3025,14 +3044,14 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) if (IS_ERR(esw)) return PTR_ERR(esw); - mutex_lock(&esw->mode_lock); + down_write(&esw->mode_lock); err = eswitch_devlink_esw_mode_check(esw); if (err) goto unlock; err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); unlock: - mutex_unlock(&esw->mode_lock); + up_write(&esw->mode_lock); return err; } @@ -3048,7 +3067,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, if (IS_ERR(esw)) return PTR_ERR(esw); - mutex_lock(&esw->mode_lock); + down_write(&esw->mode_lock); err = eswitch_devlink_esw_mode_check(esw); if (err) goto unlock; @@ -3094,7 +3113,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, } unlock: - mutex_unlock(&esw->mode_lock); + up_write(&esw->mode_lock); return err; } @@ -3109,14 +3128,14 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, return PTR_ERR(esw); - mutex_lock(&esw->mode_lock); + down_write(&esw->mode_lock); err = eswitch_devlink_esw_mode_check(esw); if (err) goto unlock; *encap = esw->offloads.encap; unlock: - mutex_unlock(&esw->mode_lock); + up_write(&esw->mode_lock); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c index 80da50e12915..bd66ab2af5b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c @@ -575,6 +575,7 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn, MLX5_SET(qpc, qpc, log_sq_size, ilog2(conn->qp.sq.size)); MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn); MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn); + MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev)); MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma); if (MLX5_CAP_GEN(mdev, cqe_version) == 1) MLX5_SET(qpc, qpc, user_index, 0xFFFFFF); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 66ad599bd488..f5517ea2f6be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -2395,14 +2395,12 @@ static int init_root_tree(struct mlx5_flow_steering *steering, struct init_tree_node *init_node, struct fs_node *fs_parent_node) { - int i; - struct mlx5_flow_namespace *fs_ns; int err; + int i; - fs_get_obj(fs_ns, fs_parent_node); for (i = 0; i < init_node->ar_size; i++) { err = init_root_tree_recursive(steering, &init_node->children[i], - &fs_ns->node, + fs_parent_node, init_node, i); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index f9042e147c7f..d5d57630015f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -104,7 +104,7 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev) if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) { complete(&fw_reset->done); } else { - mlx5_load_one(dev, false); + mlx5_load_one(dev); devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0, BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE)); @@ -119,7 +119,7 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work) int err; mlx5_enter_error_state(dev, true); - mlx5_unload_one(dev, false); + mlx5_unload_one(dev); err = mlx5_health_wait_pci_up(dev); if (err) mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n"); @@ -199,16 +199,11 @@ static void mlx5_fw_live_patch_event(struct work_struct *work) struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset, fw_live_patch_work); struct mlx5_core_dev *dev = fw_reset->dev; - struct mlx5_fw_tracer *tracer; mlx5_core_info(dev, "Live patch updated firmware version: %d.%d.%d\n", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); - tracer = dev->tracer; - if (IS_ERR_OR_NULL(tracer)) - return; - - if (mlx5_fw_tracer_reload(tracer)) + if (mlx5_fw_tracer_reload(dev->tracer)) mlx5_core_err(dev, "Failed to reload FW tracer\n"); } @@ -342,7 +337,7 @@ static void mlx5_sync_reset_now_event(struct work_struct *work) } mlx5_enter_error_state(dev, true); - mlx5_unload_one(dev, false); + mlx5_unload_one(dev); done: fw_reset->ret = err; mlx5_fw_reset_complete_reload(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 0c32c485eb58..a0a851640804 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -335,12 +335,12 @@ static int mlx5_health_try_recover(struct mlx5_core_dev *dev) return -EIO; } mlx5_core_err(dev, "starting health recovery flow\n"); - mlx5_recover_device(dev); - if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state) || - mlx5_health_check_fatal_sensors(dev)) { + if (mlx5_recover_device(dev) || mlx5_health_check_fatal_sensors(dev)) { mlx5_core_err(dev, "health recovery failed\n"); return -EIO; } + + mlx5_core_info(dev, "health revovery succeded\n"); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 1eeca45cfcdf..48303286c133 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -233,6 +233,7 @@ int mlx5i_create_underlay_qp(struct mlx5e_priv *priv) } qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); + MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(priv->mdev)); MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD); MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, @@ -694,6 +695,7 @@ static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev) static void mlx5_rdma_netdev_free(struct net_device *netdev) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; struct mlx5i_priv *ipriv = priv->ppriv; const struct mlx5e_profile *profile = priv->profile; @@ -702,13 +704,13 @@ static void mlx5_rdma_netdev_free(struct net_device *netdev) if (!ipriv->sub_interface) { mlx5i_pkey_qpn_ht_cleanup(netdev); - mlx5e_destroy_mdev_resources(priv->mdev); + mlx5e_destroy_mdev_resources(mdev); } } static bool mlx5_is_sub_interface(struct mlx5_core_dev *mdev) { - return mdev->mlx5e_res.pdn != 0; + return mdev->mlx5e_res.hw_objs.pdn != 0; } static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 83a05371e2aa..127bb92da150 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -768,7 +768,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, spin_lock(&lag_lock); ldev = mlx5_lag_dev_get(dev); - if (ldev && __mlx5_lag_is_roce(ldev)) { + if (ldev && __mlx5_lag_is_active(ldev)) { num_ports = MLX5_MAX_PORTS; mdev[MLX5_LAG_P1] = ldev->pf[MLX5_LAG_P1].dev; mdev[MLX5_LAG_P2] = ldev->pf[MLX5_LAG_P2].dev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index b0e129d0f6d8..1e7f26b240de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -495,15 +495,15 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, return -EINVAL; field_select = MLX5_MTPPS_FS_ENABLE; + pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index); + if (pin < 0) + return -EBUSY; + if (on) { bool rt_mode = mlx5_real_time_mode(mdev); u32 nsec; s64 sec; - pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index); - if (pin < 0) - return -EBUSY; - pin_mode = MLX5_PIN_MODE_OUT; pattern = MLX5_OUT_PATTERN_PERIODIC; ts.tv_sec = rq->perout.period.sec; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c index 57eb91bcbca7..e995f8378df7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c @@ -46,7 +46,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY); - MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.pdn); + MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.hw_objs.pdn); err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); if (!err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c index a68738c8f4bc..97324d9d4f2a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c @@ -55,10 +55,6 @@ void mlx5_cleanup_reserved_gids(struct mlx5_core_dev *dev) int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count) { - if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { - mlx5_core_err(dev, "Cannot reserve GIDs when interfaces are up\n"); - return -EPERM; - } if (dev->roce.reserved_gids.start < count) { mlx5_core_warn(dev, "GID table exhausted attempting to reserve %d more GIDs\n", count); @@ -79,7 +75,6 @@ int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count) void mlx5_core_unreserve_gids(struct mlx5_core_dev *dev, unsigned int count) { - WARN(test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state), "Unreserving GIDs when interfaces are up"); WARN(count > dev->roce.reserved_gids.count, "Unreserving %u GIDs when only %u reserved", count, dev->roce.reserved_gids.count); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h index d046db7bb047..2f536c5d30b1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h @@ -95,4 +95,13 @@ static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev) return devlink_net(priv_to_devlink(dev)); } +static inline void mlx5_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev) +{ + mdev->mlx5e_res.uplink_netdev = netdev; +} + +static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev) +{ + return mdev->mlx5e_res.uplink_netdev; +} #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index c568896cfb23..e3a417d17707 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1235,7 +1235,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev) mlx5_put_uars_page(dev, dev->priv.uar); } -int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) +int mlx5_init_one(struct mlx5_core_dev *dev) { int err = 0; @@ -1247,16 +1247,14 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) /* remove any previous indication of internal error */ dev->state = MLX5_DEVICE_STATE_UP; - err = mlx5_function_setup(dev, boot); + err = mlx5_function_setup(dev, true); if (err) goto err_function; - if (boot) { - err = mlx5_init_once(dev); - if (err) { - mlx5_core_err(dev, "sw objs init failed\n"); - goto function_teardown; - } + err = mlx5_init_once(dev); + if (err) { + mlx5_core_err(dev, "sw objs init failed\n"); + goto function_teardown; } err = mlx5_load(dev); @@ -1265,16 +1263,11 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); - if (boot) { - err = mlx5_devlink_register(priv_to_devlink(dev), dev->device); - if (err) - goto err_devlink_reg; - - err = mlx5_register_device(dev); - } else { - err = mlx5_attach_device(dev); - } + err = mlx5_devlink_register(priv_to_devlink(dev), dev->device); + if (err) + goto err_devlink_reg; + err = mlx5_register_device(dev); if (err) goto err_register; @@ -1282,16 +1275,14 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) return 0; err_register: - if (boot) - mlx5_devlink_unregister(priv_to_devlink(dev)); + mlx5_devlink_unregister(priv_to_devlink(dev)); err_devlink_reg: clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); mlx5_unload(dev); err_load: - if (boot) - mlx5_cleanup_once(dev); + mlx5_cleanup_once(dev); function_teardown: - mlx5_function_teardown(dev, boot); + mlx5_function_teardown(dev, true); err_function: dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; out: @@ -1299,33 +1290,84 @@ out: return err; } -void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) +void mlx5_uninit_one(struct mlx5_core_dev *dev) { mutex_lock(&dev->intf_state_mutex); - if (cleanup) { - mlx5_unregister_device(dev); - mlx5_devlink_unregister(priv_to_devlink(dev)); - } else { - mlx5_detach_device(dev); - } + mlx5_unregister_device(dev); + mlx5_devlink_unregister(priv_to_devlink(dev)); if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { mlx5_core_warn(dev, "%s: interface is down, NOP\n", __func__); - if (cleanup) - mlx5_cleanup_once(dev); + mlx5_cleanup_once(dev); goto out; } clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); + mlx5_unload(dev); + mlx5_cleanup_once(dev); + mlx5_function_teardown(dev, true); +out: + mutex_unlock(&dev->intf_state_mutex); +} + +int mlx5_load_one(struct mlx5_core_dev *dev) +{ + int err = 0; + + mutex_lock(&dev->intf_state_mutex); + if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { + mlx5_core_warn(dev, "interface is up, NOP\n"); + goto out; + } + /* remove any previous indication of internal error */ + dev->state = MLX5_DEVICE_STATE_UP; + + err = mlx5_function_setup(dev, false); + if (err) + goto err_function; + + err = mlx5_load(dev); + if (err) + goto err_load; + + set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); + + err = mlx5_attach_device(dev); + if (err) + goto err_attach; + + mutex_unlock(&dev->intf_state_mutex); + return 0; +err_attach: + clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); mlx5_unload(dev); +err_load: + mlx5_function_teardown(dev, false); +err_function: + dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; +out: + mutex_unlock(&dev->intf_state_mutex); + return err; +} - if (cleanup) - mlx5_cleanup_once(dev); +void mlx5_unload_one(struct mlx5_core_dev *dev) +{ + mutex_lock(&dev->intf_state_mutex); - mlx5_function_teardown(dev, cleanup); + mlx5_detach_device(dev); + + if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { + mlx5_core_warn(dev, "%s: interface is down, NOP\n", + __func__); + goto out; + } + + clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); + mlx5_unload(dev); + mlx5_function_teardown(dev, false); out: mutex_unlock(&dev->intf_state_mutex); } @@ -1397,7 +1439,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev) mutex_destroy(&dev->intf_state_mutex); } -static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) +static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct mlx5_core_dev *dev; struct devlink *devlink; @@ -1433,11 +1475,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) goto pci_init_err; } - err = mlx5_load_one(dev, true); + err = mlx5_init_one(dev); if (err) { - mlx5_core_err(dev, "mlx5_load_one failed with error code %d\n", + mlx5_core_err(dev, "mlx5_init_one failed with error code %d\n", err); - goto err_load_one; + goto err_init_one; } err = mlx5_crdump_enable(dev); @@ -1449,7 +1491,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) devlink_reload_enable(devlink); return 0; -err_load_one: +err_init_one: mlx5_pci_close(dev); pci_init_err: mlx5_mdev_uninit(dev); @@ -1469,7 +1511,7 @@ static void remove_one(struct pci_dev *pdev) devlink_reload_disable(devlink); mlx5_crdump_disable(dev); mlx5_drain_health_wq(dev); - mlx5_unload_one(dev, true); + mlx5_uninit_one(dev); mlx5_pci_close(dev); mlx5_mdev_uninit(dev); mlx5_adev_idx_free(dev->priv.adev_idx); @@ -1485,7 +1527,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, mlx5_enter_error_state(dev, false); mlx5_error_sw_reset(dev); - mlx5_unload_one(dev, false); + mlx5_unload_one(dev); mlx5_drain_health_wq(dev); mlx5_pci_disable_device(dev); @@ -1555,7 +1597,7 @@ static void mlx5_pci_resume(struct pci_dev *pdev) mlx5_core_info(dev, "%s was called\n", __func__); - err = mlx5_load_one(dev, false); + err = mlx5_load_one(dev); if (err) mlx5_core_err(dev, "%s: mlx5_load_one failed with error code: %d\n", __func__, err); @@ -1627,7 +1669,7 @@ static void shutdown(struct pci_dev *pdev) mlx5_core_info(dev, "Shutdown was called\n"); err = mlx5_try_fast_unload(dev); if (err) - mlx5_unload_one(dev, false); + mlx5_unload_one(dev); mlx5_pci_disable_device(dev); } @@ -1635,7 +1677,7 @@ static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); - mlx5_unload_one(dev, false); + mlx5_unload_one(dev); return 0; } @@ -1644,7 +1686,7 @@ static int mlx5_resume(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); - return mlx5_load_one(dev, false); + return mlx5_load_one(dev); } static const struct pci_device_id mlx5_core_pci_table[] = { @@ -1676,20 +1718,23 @@ MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table); void mlx5_disable_device(struct mlx5_core_dev *dev) { mlx5_error_sw_reset(dev); - mlx5_unload_one(dev, false); + mlx5_unload_one(dev); } -void mlx5_recover_device(struct mlx5_core_dev *dev) +int mlx5_recover_device(struct mlx5_core_dev *dev) { + int ret = -EIO; + mlx5_pci_disable_device(dev); if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED) - mlx5_pci_resume(dev->pdev); + ret = mlx5_load_one(dev); + return ret; } static struct pci_driver mlx5_core_driver = { .name = KBUILD_MODNAME, .id_table = mlx5_core_pci_table, - .probe = init_one, + .probe = probe_one, .remove = remove_one, .suspend = mlx5_suspend, .resume = mlx5_resume, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index efe403c7e354..37c8ec7d2217 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -134,7 +134,7 @@ void mlx5_error_sw_reset(struct mlx5_core_dev *dev); u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev); int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev); void mlx5_disable_device(struct mlx5_core_dev *dev); -void mlx5_recover_device(struct mlx5_core_dev *dev); +int mlx5_recover_device(struct mlx5_core_dev *dev); int mlx5_sriov_init(struct mlx5_core_dev *dev); void mlx5_sriov_cleanup(struct mlx5_core_dev *dev); int mlx5_sriov_attach(struct mlx5_core_dev *dev); @@ -267,8 +267,10 @@ static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev) int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx); void mlx5_mdev_uninit(struct mlx5_core_dev *dev); -void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup); -int mlx5_load_one(struct mlx5_core_dev *dev, bool boot); +int mlx5_init_one(struct mlx5_core_dev *dev); +void mlx5_uninit_one(struct mlx5_core_dev *dev); +void mlx5_unload_one(struct mlx5_core_dev *dev); +int mlx5_load_one(struct mlx5_core_dev *dev); int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c index b265f27b2166..90b524c59f3c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c @@ -181,15 +181,13 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table) u16 max_functions; u16 function_id; int err = 0; - bool ecpu; int i; max_functions = mlx5_sf_max_functions(dev); function_id = MLX5_CAP_GEN(dev, sf_base_id); - ecpu = mlx5_read_embedded_cpu(dev); /* Arm the vhca context as the vhca event notifier */ for (i = 0; i < max_functions; i++) { - err = mlx5_vhca_event_arm(dev, function_id, ecpu); + err = mlx5_vhca_event_arm(dev, function_id); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h index 4de02902aef1..149fd9e698cf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h @@ -47,7 +47,7 @@ static inline void mlx5_sf_driver_unregister(void) static inline bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev) { - return 0; + return false; } #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c index c4bf555c25ea..42c8ee03fe3e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c @@ -41,14 +41,15 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia goto remap_err; } - err = mlx5_load_one(mdev, true); + err = mlx5_init_one(mdev); if (err) { - mlx5_core_warn(mdev, "mlx5_load_one err=%d\n", err); - goto load_one_err; + mlx5_core_warn(mdev, "mlx5_init_one err=%d\n", err); + goto init_one_err; } + devlink_reload_enable(devlink); return 0; -load_one_err: +init_one_err: iounmap(mdev->iseg); remap_err: mlx5_mdev_uninit(mdev); @@ -63,7 +64,8 @@ static void mlx5_sf_dev_remove(struct auxiliary_device *adev) struct devlink *devlink; devlink = priv_to_devlink(sf_dev->mdev); - mlx5_unload_one(sf_dev->mdev, true); + devlink_reload_disable(devlink); + mlx5_uninit_one(sf_dev->mdev); iounmap(sf_dev->mdev->iseg); mlx5_mdev_uninit(sf_dev->mdev); mlx5_devlink_free(devlink); @@ -73,7 +75,7 @@ static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev) { struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); - mlx5_unload_one(sf_dev->mdev, false); + mlx5_unload_one(sf_dev->mdev); } static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c index c2ba41bb7a70..60a6328a9ca0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c @@ -492,7 +492,7 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi break; default: break; - }; + } return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c index 58b6be0b03d7..c9bddde04047 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c @@ -5,8 +5,9 @@ #include "priv.h" #include "sf.h" #include "mlx5_ifc_vhca_event.h" -#include "vhca_event.h" #include "ecpf.h" +#include "vhca_event.h" +#include "mlx5_core.h" struct mlx5_sf_hw { u32 usr_sfnum; @@ -18,7 +19,6 @@ struct mlx5_sf_hw_table { struct mlx5_core_dev *dev; struct mlx5_sf_hw *sfs; int max_local_functions; - u8 ecpu: 1; struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */ struct notifier_block vhca_nb; }; @@ -64,7 +64,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum) } if (sw_id == -ENOSPC) { err = -ENOSPC; - goto err; + goto exist_err; } hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sw_id); @@ -72,7 +72,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum) if (err) goto err; - err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, table->ecpu, usr_sfnum); + err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, usr_sfnum); if (err) goto vhca_err; @@ -118,7 +118,7 @@ void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id) hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id); mutex_lock(&table->table_lock); - err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, table->ecpu, out, sizeof(out)); + err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, out, sizeof(out)); if (err) goto err; state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state); @@ -164,7 +164,6 @@ int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev) table->dev = dev; table->sfs = sfs; table->max_local_functions = max_functions; - table->ecpu = mlx5_read_embedded_cpu(dev); dev->priv.sf_hw_table = table; mlx5_core_dbg(dev, "SF HW table: max sfs = %d\n", max_functions); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h index 1daf5a122ba3..4fc870140d71 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h @@ -20,7 +20,7 @@ struct mlx5_ifc_vhca_state_context_bits { u8 sw_function_id[0x20]; - u8 reserved_at_40[0x80]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_query_vhca_state_out_bits { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c index af2f2dd9db25..28b14b05086f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c @@ -19,52 +19,51 @@ struct mlx5_vhca_event_work { struct mlx5_vhca_state_event event; }; -int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, - bool ecpu, u32 *out, u32 outlen) +int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, u32 *out, u32 outlen) { u32 in[MLX5_ST_SZ_DW(query_vhca_state_in)] = {}; MLX5_SET(query_vhca_state_in, in, opcode, MLX5_CMD_OP_QUERY_VHCA_STATE); MLX5_SET(query_vhca_state_in, in, function_id, function_id); - MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, ecpu); + MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, 0); return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } static int mlx5_cmd_modify_vhca_state(struct mlx5_core_dev *dev, u16 function_id, - bool ecpu, u32 *in, u32 inlen) + u32 *in, u32 inlen) { u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {}; MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE); MLX5_SET(modify_vhca_state_in, in, function_id, function_id); - MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu); + MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, 0); return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } -int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id) +int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn_id) { u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {}; u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {}; MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE); MLX5_SET(modify_vhca_state_in, in, function_id, function_id); - MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu); + MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, 0); MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.sw_function_id, 1); MLX5_SET(modify_vhca_state_in, in, vhca_state_context.sw_function_id, sw_fn_id); return mlx5_cmd_exec_inout(dev, modify_vhca_state, in, out); } -int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu) +int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id) { u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {}; MLX5_SET(modify_vhca_state_in, in, vhca_state_context.arm_change_event, 1); MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.arm_change_event, 1); - return mlx5_cmd_modify_vhca_state(dev, function_id, ecpu, in, sizeof(in)); + return mlx5_cmd_modify_vhca_state(dev, function_id, in, sizeof(in)); } static void @@ -73,7 +72,7 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event * u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {}; int err; - err = mlx5_cmd_query_vhca_state(dev, event->function_id, event->ecpu, out, sizeof(out)); + err = mlx5_cmd_query_vhca_state(dev, event->function_id, out, sizeof(out)); if (err) return; @@ -82,7 +81,7 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event * event->new_vhca_state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state); - mlx5_vhca_event_arm(dev, event->function_id, event->ecpu); + mlx5_vhca_event_arm(dev, event->function_id); blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event); } @@ -94,6 +93,7 @@ static void mlx5_vhca_state_work_handler(struct work_struct *_work) struct mlx5_core_dev *dev = notifier->dev; mlx5_vhca_event_notify(dev, &work->event); + kfree(work); } static int @@ -110,7 +110,6 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v INIT_WORK(&work->work, &mlx5_vhca_state_work_handler); work->notifier = notifier; work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id); - work->event.ecpu = be16_to_cpu(eqe->data.vhca_state.ec_function); mlx5_events_work_enqueue(notifier->dev, &work->work); return NOTIFY_OK; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h index 1fe1ec6f4d4b..013cdfe90616 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h @@ -10,7 +10,6 @@ struct mlx5_vhca_state_event { u16 function_id; u16 sw_function_id; u8 new_vhca_state; - bool ecpu; }; static inline bool mlx5_vhca_event_supported(const struct mlx5_core_dev *dev) @@ -25,10 +24,10 @@ void mlx5_vhca_event_start(struct mlx5_core_dev *dev); void mlx5_vhca_event_stop(struct mlx5_core_dev *dev); int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); -int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id); -int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu); +int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn_id); +int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id); int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, - bool ecpu, u32 *out, u32 outlen); + u32 *out, u32 outlen); #else static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index 83c4c877d558..8a6a56f9dc4e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -169,6 +169,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev, MLX5_SET(qpc, qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt)); MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ); MLX5_SET(qpc, qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt)); + MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev)); MLX5_SET64(qpc, qpc, dbr_addr, dr_qp->wq_ctrl.db.dma); if (MLX5_CAP_GEN(mdev, cqe_version) == 1) MLX5_SET(qpc, qpc, user_index, 0xFFFFFF); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c index 9ec079247c4b..c5f62d2a058f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c @@ -331,7 +331,7 @@ static void dr_ste_v0_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr, MLX5_SET(ste_sx_transmit, hw_ste_p, action_type, DR_STE_ACTION_TYPE_PUSH_VLAN); MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr); - /* Due to HW limitation we need to set this bit, otherwise reforamt + + /* Due to HW limitation we need to set this bit, otherwise reformat + * push vlan will not work. */ if (go_back) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index 4088d6e51508..616ebc38381a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -264,8 +264,8 @@ static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr) static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p) { u64 index = - (MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) | - MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32) << 26); + ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) | + ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32)) << 26); return index << 6; } @@ -437,21 +437,6 @@ static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action) dr_ste_v1_set_reparse(hw_ste_p); } -static void dr_ste_v1_set_rx_decap_l3(u8 *hw_ste_p, - u8 *s_action, - u16 decap_actions, - u32 decap_index) -{ - MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id, - DR_STE_V1_ACTION_ID_MODIFY_LIST); - MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions, - decap_actions); - MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr, - decap_index); - - dr_ste_v1_set_reparse(hw_ste_p); -} - static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p, u8 *s_action, u16 num_of_actions, @@ -571,9 +556,6 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, bool allow_ctr = true; if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) { - dr_ste_v1_set_rx_decap_l3(last_ste, action, - attr->decap_actions, - attr->decap_index); dr_ste_v1_set_rewrite_actions(last_ste, action, attr->decap_actions, attr->decap_index); @@ -1532,6 +1514,7 @@ static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *val DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port); DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn); + misc_mask->source_eswitch_owner_vhca_id = 0; } static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 52fdc34251ba..7e9a7cb31720 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1728,7 +1728,7 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor return err; event_id = mlxsw_reg_mfde_event_id_get(mfde_pl); - err = devlink_fmsg_u8_pair_put(fmsg, "id", event_id); + err = devlink_fmsg_u32_pair_put(fmsg, "id", event_id); if (err) return err; switch (event_id) { @@ -1806,6 +1806,10 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val); if (err) return err; + val = mlxsw_reg_mfde_log_ip_get(mfde_pl); + err = devlink_fmsg_u64_pair_put(fmsg, "log_ip", val); + if (err) + return err; } else if (event_id == MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP) { val = mlxsw_reg_mfde_pipes_mask_get(mfde_pl); err = devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 8af7d9d03475..80712dc803d0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -58,6 +58,25 @@ struct mlxsw_tx_info { bool is_emad; }; +struct mlxsw_rx_md_info { + u32 cookie_index; + u32 latency; + u32 tx_congestion; + union { + /* Valid when 'tx_port_valid' is set. */ + u16 tx_sys_port; + u16 tx_lag_id; + }; + u8 tx_lag_port_index; /* Valid when 'tx_port_is_lag' is set. */ + u8 tx_tc; + u8 latency_valid:1, + tx_congestion_valid:1, + tx_tc_valid:1, + tx_port_valid:1, + tx_port_is_lag:1, + unused:3; +}; + bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core, const struct mlxsw_tx_info *tx_info); int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, @@ -515,7 +534,7 @@ enum mlxsw_devlink_param_id { struct mlxsw_skb_cb { union { struct mlxsw_tx_info tx_info; - u32 cookie_index; /* Only used during receive */ + struct mlxsw_rx_md_info rx_md_info; }; }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 4d699fe98cb6..78d9c0196f2b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -2007,3 +2007,134 @@ int mlxsw_afa_block_append_l4port(struct mlxsw_afa_block *block, bool is_dport, return 0; } EXPORT_SYMBOL(mlxsw_afa_block_append_l4port); + +/* Mirror Sampler Action + * --------------------- + * The SAMPLER_ACTION is used to mirror packets with a probability (sampling). + */ + +#define MLXSW_AFA_SAMPLER_CODE 0x13 +#define MLXSW_AFA_SAMPLER_SIZE 1 + +/* afa_sampler_mirror_agent + * Mirror (SPAN) agent. + */ +MLXSW_ITEM32(afa, sampler, mirror_agent, 0x04, 0, 3); + +#define MLXSW_AFA_SAMPLER_RATE_MAX (BIT(24) - 1) + +/* afa_sampler_mirror_probability_rate + * Mirroring probability. + * Valid values are 1 to 2^24 - 1 + */ +MLXSW_ITEM32(afa, sampler, mirror_probability_rate, 0x08, 0, 24); + +static void mlxsw_afa_sampler_pack(char *payload, u8 mirror_agent, u32 rate) +{ + mlxsw_afa_sampler_mirror_agent_set(payload, mirror_agent); + mlxsw_afa_sampler_mirror_probability_rate_set(payload, rate); +} + +struct mlxsw_afa_sampler { + struct mlxsw_afa_resource resource; + int span_id; + u8 local_port; + bool ingress; +}; + +static void mlxsw_afa_sampler_destroy(struct mlxsw_afa_block *block, + struct mlxsw_afa_sampler *sampler) +{ + mlxsw_afa_resource_del(&sampler->resource); + block->afa->ops->sampler_del(block->afa->ops_priv, sampler->local_port, + sampler->span_id, sampler->ingress); + kfree(sampler); +} + +static void mlxsw_afa_sampler_destructor(struct mlxsw_afa_block *block, + struct mlxsw_afa_resource *resource) +{ + struct mlxsw_afa_sampler *sampler; + + sampler = container_of(resource, struct mlxsw_afa_sampler, resource); + mlxsw_afa_sampler_destroy(block, sampler); +} + +static struct mlxsw_afa_sampler * +mlxsw_afa_sampler_create(struct mlxsw_afa_block *block, u8 local_port, + struct psample_group *psample_group, u32 rate, + u32 trunc_size, bool truncate, bool ingress, + struct netlink_ext_ack *extack) +{ + struct mlxsw_afa_sampler *sampler; + int err; + + sampler = kzalloc(sizeof(*sampler), GFP_KERNEL); + if (!sampler) + return ERR_PTR(-ENOMEM); + + err = block->afa->ops->sampler_add(block->afa->ops_priv, local_port, + psample_group, rate, trunc_size, + truncate, ingress, &sampler->span_id, + extack); + if (err) + goto err_sampler_add; + + sampler->ingress = ingress; + sampler->local_port = local_port; + sampler->resource.destructor = mlxsw_afa_sampler_destructor; + mlxsw_afa_resource_add(block, &sampler->resource); + return sampler; + +err_sampler_add: + kfree(sampler); + return ERR_PTR(err); +} + +static int +mlxsw_afa_block_append_allocated_sampler(struct mlxsw_afa_block *block, + u8 mirror_agent, u32 rate) +{ + char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_SAMPLER_CODE, + MLXSW_AFA_SAMPLER_SIZE); + + if (IS_ERR(act)) + return PTR_ERR(act); + mlxsw_afa_sampler_pack(act, mirror_agent, rate); + return 0; +} + +int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u8 local_port, + struct psample_group *psample_group, + u32 rate, u32 trunc_size, bool truncate, + bool ingress, + struct netlink_ext_ack *extack) +{ + struct mlxsw_afa_sampler *sampler; + int err; + + if (rate > MLXSW_AFA_SAMPLER_RATE_MAX) { + NL_SET_ERR_MSG_MOD(extack, "Sampling rate is too high"); + return -EINVAL; + } + + sampler = mlxsw_afa_sampler_create(block, local_port, psample_group, + rate, trunc_size, truncate, ingress, + extack); + if (IS_ERR(sampler)) + return PTR_ERR(sampler); + + err = mlxsw_afa_block_append_allocated_sampler(block, sampler->span_id, + rate); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Cannot append sampler action"); + goto err_append_allocated_sampler; + } + + return 0; + +err_append_allocated_sampler: + mlxsw_afa_sampler_destroy(block, sampler); + return err; +} +EXPORT_SYMBOL(mlxsw_afa_block_append_sampler); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index b652497b1002..b65bf98eb5ab 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -30,6 +30,12 @@ struct mlxsw_afa_ops { u16 *p_policer_index, struct netlink_ext_ack *extack); void (*policer_del)(void *priv, u16 policer_index); + int (*sampler_add)(void *priv, u8 local_port, + struct psample_group *psample_group, u32 rate, + u32 trunc_size, bool truncate, bool ingress, + int *p_span_id, struct netlink_ext_ack *extack); + void (*sampler_del)(void *priv, u8 local_port, int span_id, + bool ingress); bool dummy_first_set; }; @@ -92,5 +98,10 @@ int mlxsw_afa_block_append_police(struct mlxsw_afa_block *block, u32 fa_index, u64 rate_bytes_ps, u32 burst, u16 *p_policer_index, struct netlink_ext_ack *extack); +int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u8 local_port, + struct psample_group *psample_group, + u32 rate, u32 trunc_size, bool truncate, + bool ingress, + struct netlink_ext_ack *extack); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index d0052537e627..8e8456811384 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -540,6 +540,55 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci, spin_unlock(&q->lock); } +static void mlxsw_pci_cqe_rdq_md_tx_port_init(struct sk_buff *skb, + const char *cqe) +{ + struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb); + + if (mlxsw_pci_cqe2_tx_lag_get(cqe)) { + cb->rx_md_info.tx_port_is_lag = true; + cb->rx_md_info.tx_lag_id = mlxsw_pci_cqe2_tx_lag_id_get(cqe); + cb->rx_md_info.tx_lag_port_index = + mlxsw_pci_cqe2_tx_lag_subport_get(cqe); + } else { + cb->rx_md_info.tx_port_is_lag = false; + cb->rx_md_info.tx_sys_port = + mlxsw_pci_cqe2_tx_system_port_get(cqe); + } + + if (cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_MULTI_PORT && + cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_INVALID) + cb->rx_md_info.tx_port_valid = 1; + else + cb->rx_md_info.tx_port_valid = 0; +} + +static void mlxsw_pci_cqe_rdq_md_init(struct sk_buff *skb, const char *cqe) +{ + struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb); + + cb->rx_md_info.tx_congestion = mlxsw_pci_cqe2_mirror_cong_get(cqe); + if (cb->rx_md_info.tx_congestion != MLXSW_PCI_CQE2_MIRROR_CONG_INVALID) + cb->rx_md_info.tx_congestion_valid = 1; + else + cb->rx_md_info.tx_congestion_valid = 0; + cb->rx_md_info.tx_congestion <<= MLXSW_PCI_CQE2_MIRROR_CONG_SHIFT; + + cb->rx_md_info.latency = mlxsw_pci_cqe2_mirror_latency_get(cqe); + if (cb->rx_md_info.latency != MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID) + cb->rx_md_info.latency_valid = 1; + else + cb->rx_md_info.latency_valid = 0; + + cb->rx_md_info.tx_tc = mlxsw_pci_cqe2_mirror_tclass_get(cqe); + if (cb->rx_md_info.tx_tc != MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID) + cb->rx_md_info.tx_tc_valid = 1; + else + cb->rx_md_info.tx_tc_valid = 0; + + mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe); +} + static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, struct mlxsw_pci_queue *q, u16 consumer_counter_limit, @@ -581,11 +630,15 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) cookie_index = mlxsw_pci_cqe2_user_def_val_orig_pkt_len_get(cqe); - mlxsw_skb_cb(skb)->cookie_index = cookie_index; + mlxsw_skb_cb(skb)->rx_md_info.cookie_index = cookie_index; } else if (rx_info.trap_id >= MLXSW_TRAP_ID_MIRROR_SESSION0 && rx_info.trap_id <= MLXSW_TRAP_ID_MIRROR_SESSION7 && mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) { rx_info.mirror_reason = mlxsw_pci_cqe2_mirror_reason_get(cqe); + mlxsw_pci_cqe_rdq_md_init(skb, cqe); + } else if (rx_info.trap_id == MLXSW_TRAP_ID_PKT_SAMPLE && + mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) { + mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe); } byte_count = mlxsw_pci_cqe_byte_count_get(cqe); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index a2c1fbd3e0d1..7b531228d6c0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -173,6 +173,15 @@ MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16); */ MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14); +#define MLXSW_PCI_CQE2_MIRROR_CONG_INVALID 0xFFFF + +/* pci_cqe_mirror_cong_high + * Congestion level in units of 8KB of the egress traffic class of the original + * packet that does mirroring to the CPU. Value of 0xFFFF means that the + * congestion level is invalid. + */ +MLXSW_ITEM32(pci, cqe2, mirror_cong_high, 0x08, 16, 4); + /* pci_cqe_trap_id * Trap ID that captured the packet. */ @@ -208,6 +217,59 @@ MLXSW_ITEM32(pci, cqe0, dqn, 0x0C, 1, 5); MLXSW_ITEM32(pci, cqe12, dqn, 0x0C, 1, 6); mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12); +#define MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID 0x1F + +/* pci_cqe_mirror_tclass + * The egress traffic class of the original packet that does mirroring to the + * CPU. Value of 0x1F means that the traffic class is invalid. + */ +MLXSW_ITEM32(pci, cqe2, mirror_tclass, 0x10, 27, 5); + +/* pci_cqe_tx_lag + * The Tx port of a packet that is mirrored / sampled to the CPU is a LAG. + */ +MLXSW_ITEM32(pci, cqe2, tx_lag, 0x10, 24, 1); + +/* pci_cqe_tx_lag_subport + * The port index within the LAG of a packet that is mirrored / sampled to the + * CPU. Reserved when tx_lag is 0. + */ +MLXSW_ITEM32(pci, cqe2, tx_lag_subport, 0x10, 16, 8); + +#define MLXSW_PCI_CQE2_TX_PORT_MULTI_PORT 0xFFFE +#define MLXSW_PCI_CQE2_TX_PORT_INVALID 0xFFFF + +/* pci_cqe_tx_lag_id + * The Tx LAG ID of the original packet that is mirrored / sampled to the CPU. + * Value of 0xFFFE means multi-port. Value fo 0xFFFF means that the Tx LAG ID + * is invalid. Reserved when tx_lag is 0. + */ +MLXSW_ITEM32(pci, cqe2, tx_lag_id, 0x10, 0, 16); + +/* pci_cqe_tx_system_port + * The Tx port of the original packet that is mirrored / sampled to the CPU. + * Value of 0xFFFE means multi-port. Value fo 0xFFFF means that the Tx port is + * invalid. Reserved when tx_lag is 1. + */ +MLXSW_ITEM32(pci, cqe2, tx_system_port, 0x10, 0, 16); + +/* pci_cqe_mirror_cong_low + * Congestion level in units of 8KB of the egress traffic class of the original + * packet that does mirroring to the CPU. Value of 0xFFFF means that the + * congestion level is invalid. + */ +MLXSW_ITEM32(pci, cqe2, mirror_cong_low, 0x14, 20, 12); + +#define MLXSW_PCI_CQE2_MIRROR_CONG_SHIFT 13 /* Units of 8KB. */ + +static inline u16 mlxsw_pci_cqe2_mirror_cong_get(const char *cqe) +{ + u16 cong_high = mlxsw_pci_cqe2_mirror_cong_high_get(cqe); + u16 cong_low = mlxsw_pci_cqe2_mirror_cong_low_get(cqe); + + return cong_high << 12 | cong_low; +} + /* pci_cqe_user_def_val_orig_pkt_len * When trap_id is an ACL: User defined value from policy engine action. */ @@ -218,6 +280,15 @@ MLXSW_ITEM32(pci, cqe2, user_def_val_orig_pkt_len, 0x14, 0, 20); */ MLXSW_ITEM32(pci, cqe2, mirror_reason, 0x18, 24, 8); +#define MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID 0xFFFFFF + +/* pci_cqe_mirror_latency + * End-to-end latency of the original packet that does mirroring to the CPU. + * Value of 0xFFFFFF means that the latency is invalid. Units are according to + * MOGCR.mirror_latency_units. + */ +MLXSW_ITEM32(pci, cqe2, mirror_latency, 0x1C, 8, 24); + /* pci_cqe_owner * Ownership bit. */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index c4adc7f740d3..900b4bf5bb5b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -842,6 +842,14 @@ MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, spvid, sub_port, 0x00, 8, 8); +/* reg_spvid_egr_et_set + * When VLAN is pushed at ingress (for untagged packets or for + * QinQ push mode) then the EtherType is decided at the egress port. + * Reserved when Spectrum-1. + * Access: RW + */ +MLXSW_ITEM32(reg, spvid, egr_et_set, 0x04, 24, 1); + /* reg_spvid_et_vlan * EtherType used for when VLAN is pushed at ingress (for untagged * packets or for QinQ push mode). @@ -849,6 +857,7 @@ MLXSW_ITEM32(reg, spvid, sub_port, 0x00, 8, 8); * 1: ether_type1 * 2: ether_type2 - Reserved when Spectrum-1, supported by Spectrum-2 * Ethertype IDs are configured by SVER. + * Reserved when egr_et_set = 1. * Access: RW */ MLXSW_ITEM32(reg, spvid, et_vlan, 0x04, 16, 2); @@ -2079,6 +2088,41 @@ static inline void mlxsw_reg_spvc_pack(char *payload, u8 local_port, bool et1, mlxsw_reg_spvc_et0_set(payload, et0); } +/* SPEVET - Switch Port Egress VLAN EtherType + * ------------------------------------------ + * The switch port egress VLAN EtherType configures which EtherType to push at + * egress for packets incoming through a local port for which 'SPVID.egr_et_set' + * is set. + */ +#define MLXSW_REG_SPEVET_ID 0x202A +#define MLXSW_REG_SPEVET_LEN 0x08 + +MLXSW_REG_DEFINE(spevet, MLXSW_REG_SPEVET_ID, MLXSW_REG_SPEVET_LEN); + +/* reg_spevet_local_port + * Egress Local port number. + * Not supported to CPU port. + * Access: Index + */ +MLXSW_ITEM32(reg, spevet, local_port, 0x00, 16, 8); + +/* reg_spevet_et_vlan + * Egress EtherType VLAN to push when SPVID.egr_et_set field set for the packet: + * 0: ether_type0 - (default) + * 1: ether_type1 + * 2: ether_type2 + * Access: RW + */ +MLXSW_ITEM32(reg, spevet, et_vlan, 0x04, 16, 2); + +static inline void mlxsw_reg_spevet_pack(char *payload, u8 local_port, + u8 et_vlan) +{ + MLXSW_REG_ZERO(spevet, payload); + mlxsw_reg_spevet_local_port_set(payload, local_port); + mlxsw_reg_spevet_et_vlan_set(payload, et_vlan); +} + /* CWTP - Congetion WRED ECN TClass Profile * ---------------------------------------- * Configures the profiles for queues of egress port and traffic class @@ -5637,7 +5681,7 @@ static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port) MLXSW_REG_DEFINE(pmaos, MLXSW_REG_PMAOS_ID, MLXSW_REG_PMAOS_LEN); -/* reg_slot_index +/* reg_pmaos_slot_index * Slot index. * Access: Index */ @@ -8086,6 +8130,60 @@ mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif, mlxsw_reg_rtdp_ipip_expected_gre_key_set(payload, expected_gre_key); } +/* RATRAD - Router Adjacency Table Activity Dump Register + * ------------------------------------------------------ + * The RATRAD register is used to dump and optionally clear activity bits of + * router adjacency table entries. + */ +#define MLXSW_REG_RATRAD_ID 0x8022 +#define MLXSW_REG_RATRAD_LEN 0x210 + +MLXSW_REG_DEFINE(ratrad, MLXSW_REG_RATRAD_ID, MLXSW_REG_RATRAD_LEN); + +enum { + /* Read activity */ + MLXSW_REG_RATRAD_OP_READ_ACTIVITY, + /* Read and clear activity */ + MLXSW_REG_RATRAD_OP_READ_CLEAR_ACTIVITY, +}; + +/* reg_ratrad_op + * Access: Operation + */ +MLXSW_ITEM32(reg, ratrad, op, 0x00, 30, 2); + +/* reg_ratrad_ecmp_size + * ecmp_size is the amount of sequential entries from adjacency_index. Valid + * ranges: + * Spectrum-1: 32-64, 512, 1024, 2048, 4096 + * Spectrum-2/3: 32-128, 256, 512, 1024, 2048, 4096 + * Access: Index + */ +MLXSW_ITEM32(reg, ratrad, ecmp_size, 0x00, 0, 13); + +/* reg_ratrad_adjacency_index + * Index into the adjacency table. + * Access: Index + */ +MLXSW_ITEM32(reg, ratrad, adjacency_index, 0x04, 0, 24); + +/* reg_ratrad_activity_vector + * Activity bit per adjacency index. + * Bits higher than ecmp_size are reserved. + * Access: RO + */ +MLXSW_ITEM_BIT_ARRAY(reg, ratrad, activity_vector, 0x10, 0x200, 1); + +static inline void mlxsw_reg_ratrad_pack(char *payload, u32 adjacency_index, + u16 ecmp_size) +{ + MLXSW_REG_ZERO(ratrad, payload); + mlxsw_reg_ratrad_op_set(payload, + MLXSW_REG_RATRAD_OP_READ_CLEAR_ACTIVITY); + mlxsw_reg_ratrad_ecmp_size_set(payload, ecmp_size); + mlxsw_reg_ratrad_adjacency_index_set(payload, adjacency_index); +} + /* RIGR-V2 - Router Interface Group Register Version 2 * --------------------------------------------------- * The RIGR_V2 register is used to add, remove and query egress interface list @@ -9925,15 +10023,28 @@ MLXSW_ITEM32(reg, mpar, enable, 0x04, 31, 1); */ MLXSW_ITEM32(reg, mpar, pa_id, 0x04, 0, 4); +#define MLXSW_REG_MPAR_RATE_MAX 3500000000UL + +/* reg_mpar_probability_rate + * Sampling rate. + * Valid values are: 1 to 3.5*10^9 + * Value of 1 means "sample all". Default is 1. + * Reserved when Spectrum-1. + * Access: RW + */ +MLXSW_ITEM32(reg, mpar, probability_rate, 0x08, 0, 32); + static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port, enum mlxsw_reg_mpar_i_e i_e, - bool enable, u8 pa_id) + bool enable, u8 pa_id, + u32 probability_rate) { MLXSW_REG_ZERO(mpar, payload); mlxsw_reg_mpar_local_port_set(payload, local_port); mlxsw_reg_mpar_enable_set(payload, enable); mlxsw_reg_mpar_i_e_set(payload, i_e); mlxsw_reg_mpar_pa_id_set(payload, pa_id); + mlxsw_reg_mpar_probability_rate_set(payload, probability_rate); } /* MGIR - Management General Information Register @@ -10577,6 +10688,8 @@ MLXSW_ITEM32(reg, mpagr, trigger, 0x00, 0, 4); */ MLXSW_ITEM32(reg, mpagr, pa_id, 0x04, 0, 4); +#define MLXSW_REG_MPAGR_RATE_MAX 3500000000UL + /* reg_mpagr_probability_rate * Sampling rate. * Valid values are: 1 to 3.5*10^9 @@ -10919,7 +11032,7 @@ MLXSW_REG_DEFINE(mfde, MLXSW_REG_MFDE_ID, MLXSW_REG_MFDE_LEN); * Which irisc triggered the event * Access: RO */ -MLXSW_ITEM32(reg, mfde, irisc_id, 0x00, 8, 4); +MLXSW_ITEM32(reg, mfde, irisc_id, 0x00, 24, 8); enum mlxsw_reg_mfde_event_id { MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO = 1, @@ -10930,7 +11043,7 @@ enum mlxsw_reg_mfde_event_id { /* reg_mfde_event_id * Access: RO */ -MLXSW_ITEM32(reg, mfde, event_id, 0x00, 0, 8); +MLXSW_ITEM32(reg, mfde, event_id, 0x00, 0, 16); enum mlxsw_reg_mfde_method { MLXSW_REG_MFDE_METHOD_QUERY, @@ -10979,6 +11092,13 @@ MLXSW_ITEM32(reg, mfde, log_address, 0x10, 0, 32); */ MLXSW_ITEM32(reg, mfde, log_id, 0x14, 0, 4); +/* reg_mfde_log_ip + * IP (instruction pointer) that triggered the timeout. + * Valid in case event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO + * Access: RO + */ +MLXSW_ITEM64(reg, mfde, log_ip, 0x18, 0, 64); + /* reg_mfde_pipes_mask * Bit per kvh pipe. * Access: RO @@ -11995,6 +12115,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(sfmr), MLXSW_REG(spvmlr), MLXSW_REG(spvc), + MLXSW_REG(spevet), MLXSW_REG(cwtp), MLXSW_REG(cwtpm), MLXSW_REG(pgcr), @@ -12047,6 +12168,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(rtar), MLXSW_REG(ratr), MLXSW_REG(rtdp), + MLXSW_REG(ratrad), MLXSW_REG(rdpm), MLXSW_REG(ricnt), MLXSW_REG(rrcr), diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 1650d9852b5b..efc7acb4842c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -23,6 +23,8 @@ #include <linux/netlink.h> #include <linux/jhash.h> #include <linux/log2.h> +#include <linux/refcount.h> +#include <linux/rhashtable.h> #include <net/switchdev.h> #include <net/pkt_cls.h> #include <net/netevent.h> @@ -45,7 +47,7 @@ #define MLXSW_SP1_FWREV_MAJOR 13 #define MLXSW_SP1_FWREV_MINOR 2008 -#define MLXSW_SP1_FWREV_SUBMINOR 2018 +#define MLXSW_SP1_FWREV_SUBMINOR 2406 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { @@ -62,7 +64,7 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { #define MLXSW_SP2_FWREV_MAJOR 29 #define MLXSW_SP2_FWREV_MINOR 2008 -#define MLXSW_SP2_FWREV_SUBMINOR 2018 +#define MLXSW_SP2_FWREV_SUBMINOR 2406 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { .major = MLXSW_SP2_FWREV_MAJOR, @@ -77,7 +79,7 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { #define MLXSW_SP3_FWREV_MAJOR 30 #define MLXSW_SP3_FWREV_MINOR 2008 -#define MLXSW_SP3_FWREV_SUBMINOR 2018 +#define MLXSW_SP3_FWREV_SUBMINOR 2406 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { .major = MLXSW_SP3_FWREV_MAJOR, @@ -400,6 +402,22 @@ int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type) return 0; } +int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 ethtype) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char spevet_pl[MLXSW_REG_SPEVET_LEN]; + u8 sver_type; + int err; + + err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); + if (err) + return err; + + mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl); +} + static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, u16 ethtype) { @@ -2212,32 +2230,6 @@ void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); } -void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, - u8 local_port) -{ - struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; - struct mlxsw_sp_port_sample *sample; - u32 size; - - if (unlikely(!mlxsw_sp_port)) { - dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", - local_port); - goto out; - } - - rcu_read_lock(); - sample = rcu_dereference(mlxsw_sp_port->sample); - if (!sample) - goto out_unlock; - size = sample->truncate ? sample->trunc_size : skb->len; - psample_sample_packet(sample->psample_group, skb, size, - mlxsw_sp_port->dev->ifindex, 0, sample->rate); -out_unlock: - rcu_read_unlock(); -out: - consume_skb(skb); -} - #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ _is_ctrl, SP_##_trap_group, DISCARD) @@ -2576,6 +2568,142 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { .get_stats = mlxsw_sp2_get_stats, }; +struct mlxsw_sp_sample_trigger_node { + struct mlxsw_sp_sample_trigger trigger; + struct mlxsw_sp_sample_params params; + struct rhash_head ht_node; + struct rcu_head rcu; + refcount_t refcount; +}; + +static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = { + .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger), + .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node), + .key_len = sizeof(struct mlxsw_sp_sample_trigger), + .automatic_shrinking = true, +}; + +static void +mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key, + const struct mlxsw_sp_sample_trigger *trigger) +{ + memset(key, 0, sizeof(*key)); + key->type = trigger->type; + key->local_port = trigger->local_port; +} + +/* RCU read lock must be held */ +struct mlxsw_sp_sample_params * +mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_sample_trigger *trigger) +{ + struct mlxsw_sp_sample_trigger_node *trigger_node; + struct mlxsw_sp_sample_trigger key; + + mlxsw_sp_sample_trigger_key_init(&key, trigger); + trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key, + mlxsw_sp_sample_trigger_ht_params); + if (!trigger_node) + return NULL; + + return &trigger_node->params; +} + +static int +mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_sample_trigger *trigger, + const struct mlxsw_sp_sample_params *params) +{ + struct mlxsw_sp_sample_trigger_node *trigger_node; + int err; + + trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL); + if (!trigger_node) + return -ENOMEM; + + trigger_node->trigger = *trigger; + trigger_node->params = *params; + refcount_set(&trigger_node->refcount, 1); + + err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht, + &trigger_node->ht_node, + mlxsw_sp_sample_trigger_ht_params); + if (err) + goto err_rhashtable_insert; + + return 0; + +err_rhashtable_insert: + kfree(trigger_node); + return err; +} + +static void +mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_sample_trigger_node *trigger_node) +{ + rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht, + &trigger_node->ht_node, + mlxsw_sp_sample_trigger_ht_params); + kfree_rcu(trigger_node, rcu); +} + +int +mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_sample_trigger *trigger, + const struct mlxsw_sp_sample_params *params, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_sample_trigger_node *trigger_node; + struct mlxsw_sp_sample_trigger key; + + ASSERT_RTNL(); + + mlxsw_sp_sample_trigger_key_init(&key, trigger); + + trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, + &key, + mlxsw_sp_sample_trigger_ht_params); + if (!trigger_node) + return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key, + params); + + if (trigger_node->params.psample_group != params->psample_group || + trigger_node->params.truncate != params->truncate || + trigger_node->params.rate != params->rate || + trigger_node->params.trunc_size != params->trunc_size) { + NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger"); + return -EINVAL; + } + + refcount_inc(&trigger_node->refcount); + + return 0; +} + +void +mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_sample_trigger *trigger) +{ + struct mlxsw_sp_sample_trigger_node *trigger_node; + struct mlxsw_sp_sample_trigger key; + + ASSERT_RTNL(); + + mlxsw_sp_sample_trigger_key_init(&key, trigger); + + trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, + &key, + mlxsw_sp_sample_trigger_ht_params); + if (!trigger_node) + return; + + if (!refcount_dec_and_test(&trigger_node->refcount)) + return; + + mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node); +} + static int mlxsw_sp_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr); @@ -2730,6 +2858,13 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_port_module_info_init; } + err = rhashtable_init(&mlxsw_sp->sample_trigger_ht, + &mlxsw_sp_sample_trigger_ht_params); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n"); + goto err_sample_trigger_init; + } + err = mlxsw_sp_ports_create(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); @@ -2739,6 +2874,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, return 0; err_ports_create: + rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); +err_sample_trigger_init: mlxsw_sp_port_module_info_fini(mlxsw_sp); err_port_module_info_init: mlxsw_sp_dpipe_fini(mlxsw_sp); @@ -2788,6 +2925,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops; mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; @@ -2796,7 +2934,6 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; - mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops; mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; @@ -2804,6 +2941,8 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops; mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops; + mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops; + mlxsw_sp->router_ops = &mlxsw_sp1_router_ops; mlxsw_sp->listeners = mlxsw_sp1_listener; mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; @@ -2817,6 +2956,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; @@ -2825,7 +2965,6 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; - mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops; mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; @@ -2833,6 +2972,8 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; + mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; + mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); @@ -2844,6 +2985,7 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; @@ -2852,7 +2994,6 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; - mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; @@ -2860,6 +3001,8 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; + mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; + mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); @@ -2870,6 +3013,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); mlxsw_sp_ports_remove(mlxsw_sp); + rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); mlxsw_sp_port_module_info_fini(mlxsw_sp); mlxsw_sp_dpipe_fini(mlxsw_sp); unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), @@ -4283,7 +4427,7 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, if (br_vlan_enabled(br_dev)) { br_vlan_get_proto(br_dev, &proto); if (proto == ETH_P_8021AD) { - NL_SET_ERR_MSG_MOD(extack, "Uppers are not supported on top of an 802.1ad bridge"); + NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge"); return -EOPNOTSUPP; } } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index d9d9e1f488f9..97d074d7b78d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -16,6 +16,7 @@ #include <linux/in6.h> #include <linux/notifier.h> #include <linux/net_namespace.h> +#include <linux/spinlock.h> #include <net/psample.h> #include <net/pkt_cls.h> #include <net/red.h> @@ -86,10 +87,15 @@ enum mlxsw_sp_rif_type { MLXSW_SP_RIF_TYPE_MAX, }; -struct mlxsw_sp_rif_ops; +struct mlxsw_sp_router_ops; -extern const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[]; -extern const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[]; +extern const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops; +extern const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops; + +struct mlxsw_sp_switchdev_ops; + +extern const struct mlxsw_sp_switchdev_ops mlxsw_sp1_switchdev_ops; +extern const struct mlxsw_sp_switchdev_ops mlxsw_sp2_switchdev_ops; enum mlxsw_sp_fid_type { MLXSW_SP_FID_TYPE_8021Q, @@ -133,6 +139,7 @@ struct mlxsw_sp_ptp_state; struct mlxsw_sp_ptp_ops; struct mlxsw_sp_span_ops; struct mlxsw_sp_qdisc_state; +struct mlxsw_sp_mall_entry; struct mlxsw_sp_port_mapping { u8 module; @@ -148,6 +155,7 @@ struct mlxsw_sp { const unsigned char *mac_mask; struct mlxsw_sp_upper *lags; struct mlxsw_sp_port_mapping **port_mapping; + struct rhashtable sample_trigger_ht; struct mlxsw_sp_sb *sb; struct mlxsw_sp_bridge *bridge; struct mlxsw_sp_router *router; @@ -164,6 +172,7 @@ struct mlxsw_sp { struct mlxsw_sp_counter_pool *counter_pool; struct mlxsw_sp_span *span; struct mlxsw_sp_trap *trap; + const struct mlxsw_sp_switchdev_ops *switchdev_ops; const struct mlxsw_sp_kvdl_ops *kvdl_ops; const struct mlxsw_afa_ops *afa_ops; const struct mlxsw_afk_ops *afk_ops; @@ -171,7 +180,6 @@ struct mlxsw_sp { const struct mlxsw_sp_acl_rulei_ops *acl_rulei_ops; const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops; const struct mlxsw_sp_nve_ops **nve_ops_arr; - const struct mlxsw_sp_rif_ops **rif_ops_arr; const struct mlxsw_sp_sb_vals *sb_vals; const struct mlxsw_sp_sb_ops *sb_ops; const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; @@ -179,6 +187,8 @@ struct mlxsw_sp { const struct mlxsw_sp_span_ops *span_ops; const struct mlxsw_sp_policer_core_ops *policer_core_ops; const struct mlxsw_sp_trap_ops *trap_ops; + const struct mlxsw_sp_mall_ops *mall_ops; + const struct mlxsw_sp_router_ops *router_ops; const struct mlxsw_listener *listeners; size_t listeners_count; u32 lowest_shaper_bs; @@ -232,7 +242,18 @@ struct mlxsw_sp_port_pcpu_stats { u32 tx_dropped; }; -struct mlxsw_sp_port_sample { +enum mlxsw_sp_sample_trigger_type { + MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS, + MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS, + MLXSW_SP_SAMPLE_TRIGGER_TYPE_POLICY_ENGINE, +}; + +struct mlxsw_sp_sample_trigger { + enum mlxsw_sp_sample_trigger_type type; + u8 local_port; /* Reserved when trigger type is not ingress / egress. */ +}; + +struct mlxsw_sp_sample_params { struct psample_group *psample_group; u32 trunc_size; u32 rate; @@ -302,7 +323,6 @@ struct mlxsw_sp_port { struct mlxsw_sp_port_xstats xstats; struct delayed_work update_dw; } periodic_hw_stats; - struct mlxsw_sp_port_sample __rcu *sample; struct list_head vlans_list; struct mlxsw_sp_port_vlan *default_vlan; struct mlxsw_sp_qdisc_state *qdisc; @@ -531,6 +551,17 @@ void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_hdroom *hdroom); int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port, const struct mlxsw_sp_hdroom *hdroom); +struct mlxsw_sp_sample_params * +mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_sample_trigger *trigger); +int +mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_sample_trigger *trigger, + const struct mlxsw_sp_sample_params *params, + struct netlink_ext_ack *extack); +void +mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_sample_trigger *trigger); extern const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals; extern const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals; @@ -568,8 +599,6 @@ void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, u8 local_port, void *priv); void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, u8 local_port); -void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, - u8 local_port); int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed); int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, @@ -586,6 +615,8 @@ int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable); int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, bool learn_enable); int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type); +int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 ethtype); int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, u16 ethtype); struct mlxsw_sp_port_vlan * @@ -924,6 +955,12 @@ int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, u16 fid, struct netlink_ext_ack *extack); +int mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + struct mlxsw_sp_flow_block *block, + struct psample_group *psample_group, u32 rate, + u32 trunc_size, bool truncate, + struct netlink_ext_ack *extack); struct mlxsw_sp_acl_rule; @@ -1033,6 +1070,19 @@ extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops; extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops; /* spectrum_matchall.c */ +struct mlxsw_sp_mall_ops { + int (*sample_add)(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_mall_entry *mall_entry, + struct netlink_ext_ack *extack); + void (*sample_del)(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_mall_entry *mall_entry); +}; + +extern const struct mlxsw_sp_mall_ops mlxsw_sp1_mall_ops; +extern const struct mlxsw_sp_mall_ops mlxsw_sp2_mall_ops; + enum mlxsw_sp_mall_action_type { MLXSW_SP_MALL_ACTION_TYPE_MIRROR, MLXSW_SP_MALL_ACTION_TYPE_SAMPLE, @@ -1048,6 +1098,11 @@ struct mlxsw_sp_mall_trap_entry { int span_id; }; +struct mlxsw_sp_mall_sample_entry { + struct mlxsw_sp_sample_params params; + int span_id; /* Relevant for Spectrum-2 onwards. */ +}; + struct mlxsw_sp_mall_entry { struct list_head list; unsigned long cookie; @@ -1057,7 +1112,7 @@ struct mlxsw_sp_mall_entry { union { struct mlxsw_sp_mall_mirror_entry mirror; struct mlxsw_sp_mall_trap_entry trap; - struct mlxsw_sp_port_sample sample; + struct mlxsw_sp_mall_sample_entry sample; }; struct rcu_head rcu; }; @@ -1068,7 +1123,8 @@ int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp, void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block, struct tc_cls_matchall_offload *f); int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block, - struct mlxsw_sp_port *mlxsw_sp_port); + struct mlxsw_sp_port *mlxsw_sp_port, + struct netlink_ext_ack *extack); void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block, struct mlxsw_sp_port *mlxsw_sp_port); int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 8cfa03a75374..67cedfa76f78 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -688,6 +688,31 @@ int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp, return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack); } +int mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + struct mlxsw_sp_flow_block *block, + struct psample_group *psample_group, u32 rate, + u32 trunc_size, bool truncate, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_flow_block_binding *binding; + struct mlxsw_sp_port *mlxsw_sp_port; + + if (!list_is_singular(&block->binding_list)) { + NL_SET_ERR_MSG_MOD(extack, "Only a single sampling source is allowed"); + return -EOPNOTSUPP; + } + binding = list_first_entry(&block->binding_list, + struct mlxsw_sp_flow_block_binding, list); + mlxsw_sp_port = binding->mlxsw_sp_port; + + return mlxsw_afa_block_append_sampler(rulei->act_block, + mlxsw_sp_port->local_port, + psample_group, rate, trunc_size, + truncate, binding->ingress, + extack); +} + struct mlxsw_sp_acl_rule * mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c index 90372d1c28d4..c72aa38424dc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c @@ -192,6 +192,22 @@ static void mlxsw_sp_act_policer_del(void *priv, u16 policer_index) policer_index); } +static int mlxsw_sp1_act_sampler_add(void *priv, u8 local_port, + struct psample_group *psample_group, + u32 rate, u32 trunc_size, bool truncate, + bool ingress, int *p_span_id, + struct netlink_ext_ack *extack) +{ + NL_SET_ERR_MSG_MOD(extack, "Sampling action is not supported on Spectrum-1"); + return -EOPNOTSUPP; +} + +static void mlxsw_sp1_act_sampler_del(void *priv, u8 local_port, int span_id, + bool ingress) +{ + WARN_ON_ONCE(1); +} + const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = { .kvdl_set_add = mlxsw_sp1_act_kvdl_set_add, .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, @@ -204,8 +220,73 @@ const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = { .mirror_del = mlxsw_sp_act_mirror_del, .policer_add = mlxsw_sp_act_policer_add, .policer_del = mlxsw_sp_act_policer_del, + .sampler_add = mlxsw_sp1_act_sampler_add, + .sampler_del = mlxsw_sp1_act_sampler_del, }; +static int mlxsw_sp2_act_sampler_add(void *priv, u8 local_port, + struct psample_group *psample_group, + u32 rate, u32 trunc_size, bool truncate, + bool ingress, int *p_span_id, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_span_agent_parms agent_parms = { + .session_id = MLXSW_SP_SPAN_SESSION_ID_SAMPLING, + }; + struct mlxsw_sp_sample_trigger trigger = { + .type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_POLICY_ENGINE, + }; + struct mlxsw_sp_sample_params params; + struct mlxsw_sp_port *mlxsw_sp_port; + struct mlxsw_sp *mlxsw_sp = priv; + int err; + + params.psample_group = psample_group; + params.trunc_size = trunc_size; + params.rate = rate; + params.truncate = truncate; + err = mlxsw_sp_sample_trigger_params_set(mlxsw_sp, &trigger, ¶ms, + extack); + if (err) + return err; + + err = mlxsw_sp_span_agent_get(mlxsw_sp, p_span_id, &agent_parms); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to get SPAN agent"); + goto err_span_agent_get; + } + + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, ingress); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to get analyzed port"); + goto err_analyzed_port_get; + } + + return 0; + +err_analyzed_port_get: + mlxsw_sp_span_agent_put(mlxsw_sp, *p_span_id); +err_span_agent_get: + mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger); + return err; +} + +static void mlxsw_sp2_act_sampler_del(void *priv, u8 local_port, int span_id, + bool ingress) +{ + struct mlxsw_sp_sample_trigger trigger = { + .type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_POLICY_ENGINE, + }; + struct mlxsw_sp_port *mlxsw_sp_port; + struct mlxsw_sp *mlxsw_sp = priv; + + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress); + mlxsw_sp_span_agent_put(mlxsw_sp, span_id); + mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger); +} + const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = { .kvdl_set_add = mlxsw_sp2_act_kvdl_set_add, .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, @@ -218,6 +299,8 @@ const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = { .mirror_del = mlxsw_sp_act_mirror_del, .policer_add = mlxsw_sp_act_policer_add, .policer_del = mlxsw_sp_act_policer_del, + .sampler_add = mlxsw_sp2_act_sampler_add, + .sampler_del = mlxsw_sp2_act_sampler_del, .dummy_first_set = true, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c index ed81d4fa48ac..1a2fef2a5379 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c @@ -912,9 +912,8 @@ static u64 mlxsw_sp_dpipe_table_adj_size(struct mlxsw_sp *mlxsw_sp) u64 size = 0; mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) - if (mlxsw_sp_nexthop_offload(nh) && - !mlxsw_sp_nexthop_group_has_ipip(nh) && - !mlxsw_sp_nexthop_is_discard(nh)) + if (mlxsw_sp_nexthop_is_forward(nh) && + !mlxsw_sp_nexthop_group_has_ipip(nh)) size++; return size; } @@ -1105,9 +1104,8 @@ start_again: nh_skip = nh_count; nh_count = 0; mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) { - if (!mlxsw_sp_nexthop_offload(nh) || - mlxsw_sp_nexthop_group_has_ipip(nh) || - mlxsw_sp_nexthop_is_discard(nh)) + if (!mlxsw_sp_nexthop_is_forward(nh) || + mlxsw_sp_nexthop_group_has_ipip(nh)) continue; if (nh_count < nh_skip) @@ -1180,6 +1178,7 @@ out: static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable) { + char ratr_pl[MLXSW_REG_RATR_LEN]; struct mlxsw_sp *mlxsw_sp = priv; struct mlxsw_sp_nexthop *nh; u32 adj_hash_index = 0; @@ -1187,9 +1186,8 @@ static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable) u32 adj_size = 0; mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) { - if (!mlxsw_sp_nexthop_offload(nh) || - mlxsw_sp_nexthop_group_has_ipip(nh) || - mlxsw_sp_nexthop_is_discard(nh)) + if (!mlxsw_sp_nexthop_is_forward(nh) || + mlxsw_sp_nexthop_group_has_ipip(nh)) continue; mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_size, @@ -1198,8 +1196,9 @@ static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable) mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh); else mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh); - mlxsw_sp_nexthop_update(mlxsw_sp, - adj_index + adj_hash_index, nh); + mlxsw_sp_nexthop_eth_update(mlxsw_sp, + adj_index + adj_hash_index, nh, + true, ratr_pl); } return 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c index 0456cda33808..9e50c823a354 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c @@ -71,7 +71,7 @@ static int mlxsw_sp_flow_block_bind(struct mlxsw_sp *mlxsw_sp, return -EOPNOTSUPP; } - err = mlxsw_sp_mall_port_bind(block, mlxsw_sp_port); + err = mlxsw_sp_mall_port_bind(block, mlxsw_sp_port, extack); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 41855e58564b..be3791ca6069 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -24,6 +24,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, const struct flow_action_entry *act; int mirror_act_count = 0; int police_act_count = 0; + int sample_act_count = 0; int err, i; if (!flow_action_has_entries(flow_action)) @@ -190,6 +191,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, return -EOPNOTSUPP; } + if (act->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } + /* The kernel might adjust the requested burst size so * that it is not exactly a power of two. Re-adjust it * here since the hardware only supports burst sizes @@ -204,6 +210,23 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, return err; break; } + case FLOW_ACTION_SAMPLE: { + if (sample_act_count++) { + NL_SET_ERR_MSG_MOD(extack, "Multiple sample actions per rule are not supported"); + return -EOPNOTSUPP; + } + + err = mlxsw_sp_acl_rulei_act_sample(mlxsw_sp, rulei, + block, + act->sample.psample_group, + act->sample.rate, + act->sample.trunc_size, + act->sample.truncate, + extack); + if (err) + return err; + break; + } default: NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c index 6ccca39bae84..b8b08a6a1d10 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -127,14 +127,16 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr) static int mlxsw_sp_ipip_nexthop_update_gre4(struct mlxsw_sp *mlxsw_sp, u32 adj_index, - struct mlxsw_sp_ipip_entry *ipip_entry) + struct mlxsw_sp_ipip_entry *ipip_entry, + bool force, char *ratr_pl) { u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb); __be32 daddr4 = mlxsw_sp_ipip_netdev_daddr4(ipip_entry->ol_dev); - char ratr_pl[MLXSW_REG_RATR_LEN]; + enum mlxsw_reg_ratr_op op; - mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, - true, MLXSW_REG_RATR_TYPE_IPIP, + op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY : + MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY; + mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_IPIP, adj_index, rif_index); mlxsw_reg_ratr_ipip4_entry_pack(ratr_pl, be32_to_cpu(daddr4)); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h index 87bef9880e5e..f0837b42d1d6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h @@ -40,7 +40,8 @@ struct mlxsw_sp_ipip_ops { enum mlxsw_sp_l3proto ul_proto; /* Underlay. */ int (*nexthop_update)(struct mlxsw_sp *mlxsw_sp, u32 adj_index, - struct mlxsw_sp_ipip_entry *ipip_entry); + struct mlxsw_sp_ipip_entry *ipip_entry, + bool force, char *ratr_pl); bool (*can_offload)(const struct mlxsw_sp *mlxsw_sp, const struct net_device *ol_dev); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c index f30599ad6019..ce58a795c6fc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c @@ -24,7 +24,8 @@ mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie static int mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_mall_entry *mall_entry) + struct mlxsw_sp_mall_entry *mall_entry, + struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_span_agent_parms agent_parms = {}; @@ -33,28 +34,35 @@ mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port, int err; if (!mall_entry->mirror.to_dev) { - netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); + NL_SET_ERR_MSG(extack, "Could not find requested device"); return -EINVAL; } agent_parms.to_dev = mall_entry->mirror.to_dev; err = mlxsw_sp_span_agent_get(mlxsw_sp, &mall_entry->mirror.span_id, &agent_parms); - if (err) + if (err) { + NL_SET_ERR_MSG(extack, "Failed to get SPAN agent"); return err; + } err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, mall_entry->ingress); - if (err) + if (err) { + NL_SET_ERR_MSG(extack, "Failed to get analyzed port"); goto err_analyzed_port_get; + } trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS : MLXSW_SP_SPAN_TRIGGER_EGRESS; parms.span_id = mall_entry->mirror.span_id; + parms.probability_rate = 1; err = mlxsw_sp_span_agent_bind(mlxsw_sp, trigger, mlxsw_sp_port, &parms); - if (err) + if (err) { + NL_SET_ERR_MSG(extack, "Failed to bind SPAN agent"); goto err_agent_bind; + } return 0; @@ -93,46 +101,64 @@ static int mlxsw_sp_mall_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_mall_port_sample_add(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_mall_entry *mall_entry) + struct mlxsw_sp_mall_entry *mall_entry, + struct netlink_ext_ack *extack) { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_sample_trigger trigger; int err; - if (rtnl_dereference(mlxsw_sp_port->sample)) { - netdev_err(mlxsw_sp_port->dev, "sample already active\n"); - return -EEXIST; - } - rcu_assign_pointer(mlxsw_sp_port->sample, &mall_entry->sample); + if (mall_entry->ingress) + trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS; + else + trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS; + trigger.local_port = mlxsw_sp_port->local_port; + err = mlxsw_sp_sample_trigger_params_set(mlxsw_sp, &trigger, + &mall_entry->sample.params, + extack); + if (err) + return err; - err = mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true, - mall_entry->sample.rate); + err = mlxsw_sp->mall_ops->sample_add(mlxsw_sp, mlxsw_sp_port, + mall_entry, extack); if (err) goto err_port_sample_set; return 0; err_port_sample_set: - RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL); + mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger); return err; } static void -mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port) +mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_mall_entry *mall_entry) { - if (!mlxsw_sp_port->sample) - return; + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_sample_trigger trigger; - mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1); - RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL); + if (mall_entry->ingress) + trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS; + else + trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS; + trigger.local_port = mlxsw_sp_port->local_port; + + mlxsw_sp->mall_ops->sample_del(mlxsw_sp, mlxsw_sp_port, mall_entry); + mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger); } static int mlxsw_sp_mall_port_rule_add(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_mall_entry *mall_entry) + struct mlxsw_sp_mall_entry *mall_entry, + struct netlink_ext_ack *extack) { switch (mall_entry->type) { case MLXSW_SP_MALL_ACTION_TYPE_MIRROR: - return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry); + return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry, + extack); case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE: - return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry); + return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry, + extack); default: WARN_ON(1); return -EINVAL; @@ -148,7 +174,7 @@ mlxsw_sp_mall_port_rule_del(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_mall_port_mirror_del(mlxsw_sp_port, mall_entry); break; case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE: - mlxsw_sp_mall_port_sample_del(mlxsw_sp_port); + mlxsw_sp_mall_port_sample_del(mlxsw_sp_port, mall_entry); break; default: WARN_ON(1); @@ -238,27 +264,17 @@ int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp, mall_entry->mirror.to_dev = act->dev; } else if (act->id == FLOW_ACTION_SAMPLE && protocol == htons(ETH_P_ALL)) { - if (!mall_entry->ingress) { - NL_SET_ERR_MSG(f->common.extack, "Sample is not supported on egress"); - err = -EOPNOTSUPP; - goto errout; - } if (flower_prio_valid && mall_entry->priority >= flower_min_prio) { NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules"); err = -EOPNOTSUPP; goto errout; } - if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) { - NL_SET_ERR_MSG(f->common.extack, "Sample rate not supported"); - err = -EOPNOTSUPP; - goto errout; - } mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_SAMPLE; - mall_entry->sample.psample_group = act->sample.psample_group; - mall_entry->sample.truncate = act->sample.truncate; - mall_entry->sample.trunc_size = act->sample.trunc_size; - mall_entry->sample.rate = act->sample.rate; + mall_entry->sample.params.psample_group = act->sample.psample_group; + mall_entry->sample.params.truncate = act->sample.truncate; + mall_entry->sample.params.trunc_size = act->sample.trunc_size; + mall_entry->sample.params.rate = act->sample.rate; } else { err = -EOPNOTSUPP; goto errout; @@ -266,7 +282,7 @@ int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp, list_for_each_entry(binding, &block->binding_list, list) { err = mlxsw_sp_mall_port_rule_add(binding->mlxsw_sp_port, - mall_entry); + mall_entry, f->common.extack); if (err) goto rollback; } @@ -314,13 +330,15 @@ void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block, } int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block, - struct mlxsw_sp_port *mlxsw_sp_port) + struct mlxsw_sp_port *mlxsw_sp_port, + struct netlink_ext_ack *extack) { struct mlxsw_sp_mall_entry *mall_entry; int err; list_for_each_entry(mall_entry, &block->mall.list, list) { - err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry); + err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry, + extack); if (err) goto rollback; } @@ -355,3 +373,104 @@ int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index, *p_max_prio = block->mall.max_prio; return 0; } + +static int mlxsw_sp1_mall_sample_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_mall_entry *mall_entry, + struct netlink_ext_ack *extack) +{ + u32 rate = mall_entry->sample.params.rate; + + if (!mall_entry->ingress) { + NL_SET_ERR_MSG(extack, "Sampling is not supported on egress"); + return -EOPNOTSUPP; + } + + if (rate > MLXSW_REG_MPSC_RATE_MAX) { + NL_SET_ERR_MSG(extack, "Unsupported sampling rate"); + return -EOPNOTSUPP; + } + + return mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true, rate); +} + +static void mlxsw_sp1_mall_sample_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_mall_entry *mall_entry) +{ + mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1); +} + +const struct mlxsw_sp_mall_ops mlxsw_sp1_mall_ops = { + .sample_add = mlxsw_sp1_mall_sample_add, + .sample_del = mlxsw_sp1_mall_sample_del, +}; + +static int mlxsw_sp2_mall_sample_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_mall_entry *mall_entry, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_span_trigger_parms trigger_parms = {}; + struct mlxsw_sp_span_agent_parms agent_parms = { + .to_dev = NULL, /* Mirror to CPU. */ + .session_id = MLXSW_SP_SPAN_SESSION_ID_SAMPLING, + }; + u32 rate = mall_entry->sample.params.rate; + enum mlxsw_sp_span_trigger span_trigger; + int err; + + err = mlxsw_sp_span_agent_get(mlxsw_sp, &mall_entry->sample.span_id, + &agent_parms); + if (err) { + NL_SET_ERR_MSG(extack, "Failed to get SPAN agent"); + return err; + } + + err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, + mall_entry->ingress); + if (err) { + NL_SET_ERR_MSG(extack, "Failed to get analyzed port"); + goto err_analyzed_port_get; + } + + span_trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS : + MLXSW_SP_SPAN_TRIGGER_EGRESS; + trigger_parms.span_id = mall_entry->sample.span_id; + trigger_parms.probability_rate = rate; + err = mlxsw_sp_span_agent_bind(mlxsw_sp, span_trigger, mlxsw_sp_port, + &trigger_parms); + if (err) { + NL_SET_ERR_MSG(extack, "Failed to bind SPAN agent"); + goto err_agent_bind; + } + + return 0; + +err_agent_bind: + mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress); +err_analyzed_port_get: + mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->sample.span_id); + return err; +} + +static void mlxsw_sp2_mall_sample_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_mall_entry *mall_entry) +{ + struct mlxsw_sp_span_trigger_parms trigger_parms = {}; + enum mlxsw_sp_span_trigger span_trigger; + + span_trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS : + MLXSW_SP_SPAN_TRIGGER_EGRESS; + trigger_parms.span_id = mall_entry->sample.span_id; + mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port, + &trigger_parms); + mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress); + mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->sample.span_id); +} + +const struct mlxsw_sp_mall_ops mlxsw_sp2_mall_ops = { + .sample_add = mlxsw_sp2_mall_sample_add, + .sample_del = mlxsw_sp2_mall_sample_del, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h index 2796d3659979..d8104fc6c900 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h @@ -18,7 +18,6 @@ struct mlxsw_sp_nve_config { u32 ul_tb_id; enum mlxsw_sp_l3proto ul_proto; union mlxsw_sp_l3addr ul_sip; - u16 ethertype; }; struct mlxsw_sp_nve { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c index 3e2bb22e9ca6..b84bb4b65098 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c @@ -113,7 +113,6 @@ static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve, config->ul_proto = MLXSW_SP_L3_PROTO_IPV4; config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr; config->udp_dport = cfg->dst_port; - config->ethertype = params->ethertype; } static int __mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp, @@ -318,20 +317,14 @@ static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp, } static int -mlxsw_sp2_nve_decap_ethertype_set(struct mlxsw_sp *mlxsw_sp, u16 ethertype) +mlxsw_sp2_nve_decap_ethertype_set(struct mlxsw_sp *mlxsw_sp) { char spvid_pl[MLXSW_REG_SPVID_LEN] = {}; - u8 sver_type; - int err; mlxsw_reg_spvid_tport_set(spvid_pl, true); mlxsw_reg_spvid_local_port_set(spvid_pl, MLXSW_REG_TUNNEL_PORT_NVE); - err = mlxsw_sp_ethtype_to_sver_type(ethertype, &sver_type); - if (err) - return err; - - mlxsw_reg_spvid_et_vlan_set(spvid_pl, sver_type); + mlxsw_reg_spvid_egr_et_set_set(spvid_pl, true); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); } @@ -367,7 +360,7 @@ mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp, if (err) goto err_spvtr_write; - err = mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp, config->ethertype); + err = mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp); if (err) goto err_decap_ethertype_set; @@ -392,8 +385,6 @@ static void mlxsw_sp2_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp) char spvtr_pl[MLXSW_REG_SPVTR_LEN]; char tngcr_pl[MLXSW_REG_TNGCR_LEN]; - /* Set default EtherType */ - mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp, ETH_P_8021Q); mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE, MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c index fd672c6c9133..baf17c0b2702 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -1341,6 +1341,7 @@ static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp, goto err_analyzed_port_get; trigger_parms.span_id = span_id; + trigger_parms.probability_rate = 1; err = mlxsw_sp_span_agent_bind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port, &trigger_parms); if (err) @@ -1404,7 +1405,9 @@ static int mlxsw_sp_qevent_trap_configure(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_mall_entry *mall_entry, struct mlxsw_sp_qevent_binding *qevent_binding) { - struct mlxsw_sp_span_agent_parms agent_parms = {}; + struct mlxsw_sp_span_agent_parms agent_parms = { + .session_id = MLXSW_SP_SPAN_SESSION_ID_BUFFER, + }; int err; err = mlxsw_sp_trap_group_policer_hw_id_get(mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index eda99d82766a..6ccaa194733b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -113,6 +113,10 @@ struct mlxsw_sp_rif_ops { void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac); }; +struct mlxsw_sp_router_ops { + int (*init)(struct mlxsw_sp *mlxsw_sp); +}; + static struct mlxsw_sp_rif * mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, const struct net_device *dev); @@ -2662,6 +2666,10 @@ static void mlxsw_sp_router_neigh_event_work(struct work_struct *work) goto out; } + if (neigh_entry->connected && entry_connected && + !memcmp(neigh_entry->ha, ha, ETH_ALEN)) + goto out; + memcpy(neigh_entry->ha, ha, ETH_ALEN); mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected); mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected, @@ -2842,6 +2850,15 @@ enum mlxsw_sp_nexthop_type { MLXSW_SP_NEXTHOP_TYPE_IPIP, }; +enum mlxsw_sp_nexthop_action { + /* Nexthop forwards packets to an egress RIF */ + MLXSW_SP_NEXTHOP_ACTION_FORWARD, + /* Nexthop discards packets */ + MLXSW_SP_NEXTHOP_ACTION_DISCARD, + /* Nexthop traps packets */ + MLXSW_SP_NEXTHOP_ACTION_TRAP, +}; + struct mlxsw_sp_nexthop_key { struct fib_nh *fib_nh; }; @@ -2862,16 +2879,16 @@ struct mlxsw_sp_nexthop { int norm_nh_weight; int num_adj_entries; struct mlxsw_sp_rif *rif; - u8 should_offload:1, /* set indicates this neigh is connected and - * should be put to KVD linear area of this group. + u8 should_offload:1, /* set indicates this nexthop should be written + * to the adjacency table. */ - offloaded:1, /* set in case the neigh is actually put into - * KVD linear area of this group. + offloaded:1, /* set indicates this nexthop was written to the + * adjacency table. */ - update:1, /* set indicates that MAC of this neigh should be - * updated in HW + update:1; /* set indicates this nexthop should be updated in the + * adjacency table (f.e., its MAC changed). */ - discard:1; /* nexthop is programmed to discard packets */ + enum mlxsw_sp_nexthop_action action; enum mlxsw_sp_nexthop_type type; union { struct mlxsw_sp_neigh_entry *neigh_entry; @@ -2894,7 +2911,9 @@ struct mlxsw_sp_nexthop_group_info { u16 count; int sum_norm_weight; u8 adj_index_valid:1, - gateway:1; /* routes using the group use a gateway */ + gateway:1, /* routes using the group use a gateway */ + is_resilient:1; + struct list_head list; /* member in nh_res_grp_list */ struct mlxsw_sp_nexthop nexthops[0]; #define nh_rif nexthops[0].rif }; @@ -2979,14 +2998,15 @@ struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router, return list_next_entry(nh, router_list_node); } -bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh) +bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh) { - return nh->offloaded; + return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD; } unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh) { - if (!nh->offloaded) + if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH || + !mlxsw_sp_nexthop_is_forward(nh)) return NULL; return nh->neigh_entry->ha; } @@ -3036,11 +3056,6 @@ bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh) return false; } -bool mlxsw_sp_nexthop_is_discard(const struct mlxsw_sp_nexthop *nh) -{ - return nh->discard; -} - static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = { .key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key), .head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node), @@ -3403,20 +3418,38 @@ err_mass_update_vr: return err; } -static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, - struct mlxsw_sp_nexthop *nh) +static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, + u32 adj_index, + struct mlxsw_sp_nexthop *nh, + bool force, char *ratr_pl) { struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry; - char ratr_pl[MLXSW_REG_RATR_LEN]; + enum mlxsw_reg_ratr_op op; + u16 rif_index; - mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, - true, MLXSW_REG_RATR_TYPE_ETHERNET, - adj_index, nh->rif->rif_index); - if (nh->discard) + rif_index = nh->rif ? nh->rif->rif_index : + mlxsw_sp->router->lb_rif_index; + op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY : + MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY; + mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET, + adj_index, rif_index); + switch (nh->action) { + case MLXSW_SP_NEXTHOP_ACTION_FORWARD: + mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha); + break; + case MLXSW_SP_NEXTHOP_ACTION_DISCARD: mlxsw_reg_ratr_trap_action_set(ratr_pl, MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS); - else - mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha); + break; + case MLXSW_SP_NEXTHOP_ACTION_TRAP: + mlxsw_reg_ratr_trap_action_set(ratr_pl, + MLXSW_REG_RATR_TRAP_ACTION_TRAP); + mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0); + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } if (nh->counter_valid) mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true); else @@ -3425,15 +3458,17 @@ static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); } -int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, - struct mlxsw_sp_nexthop *nh) +int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, + struct mlxsw_sp_nexthop *nh, bool force, + char *ratr_pl) { int i; for (i = 0; i < nh->num_adj_entries; i++) { int err; - err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh); + err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i, + nh, force, ratr_pl); if (err) return err; } @@ -3443,17 +3478,20 @@ int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, - struct mlxsw_sp_nexthop *nh) + struct mlxsw_sp_nexthop *nh, + bool force, char *ratr_pl) { const struct mlxsw_sp_ipip_ops *ipip_ops; ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt]; - return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry); + return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry, + force, ratr_pl); } static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, - struct mlxsw_sp_nexthop *nh) + struct mlxsw_sp_nexthop *nh, bool force, + char *ratr_pl) { int i; @@ -3461,7 +3499,7 @@ static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp, int err; err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i, - nh); + nh, force, ratr_pl); if (err) return err; } @@ -3469,11 +3507,29 @@ static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp, return 0; } +static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, + struct mlxsw_sp_nexthop *nh, bool force, + char *ratr_pl) +{ + /* When action is discard or trap, the nexthop must be + * programmed as an Ethernet nexthop. + */ + if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH || + nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD || + nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP) + return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh, + force, ratr_pl); + else + return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh, + force, ratr_pl); +} + static int mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group_info *nhgi, bool reallocate) { + char ratr_pl[MLXSW_REG_RATR_LEN]; u32 adj_index = nhgi->adj_index; /* base */ struct mlxsw_sp_nexthop *nh; int i; @@ -3489,16 +3545,8 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp, if (nh->update || reallocate) { int err = 0; - switch (nh->type) { - case MLXSW_SP_NEXTHOP_TYPE_ETH: - err = mlxsw_sp_nexthop_update - (mlxsw_sp, adj_index, nh); - break; - case MLXSW_SP_NEXTHOP_TYPE_IPIP: - err = mlxsw_sp_nexthop_ipip_update - (mlxsw_sp, adj_index, nh); - break; - } + err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, + true, ratr_pl); if (err) return err; nh->update = 0; @@ -3524,34 +3572,69 @@ mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp, return 0; } -static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size) +struct mlxsw_sp_adj_grp_size_range { + u16 start; /* Inclusive */ + u16 end; /* Inclusive */ +}; + +/* Ordered by range start value */ +static const struct mlxsw_sp_adj_grp_size_range +mlxsw_sp1_adj_grp_size_ranges[] = { + { .start = 1, .end = 64 }, + { .start = 512, .end = 512 }, + { .start = 1024, .end = 1024 }, + { .start = 2048, .end = 2048 }, + { .start = 4096, .end = 4096 }, +}; + +/* Ordered by range start value */ +static const struct mlxsw_sp_adj_grp_size_range +mlxsw_sp2_adj_grp_size_ranges[] = { + { .start = 1, .end = 128 }, + { .start = 256, .end = 256 }, + { .start = 512, .end = 512 }, + { .start = 1024, .end = 1024 }, + { .start = 2048, .end = 2048 }, + { .start = 4096, .end = 4096 }, +}; + +static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp, + u16 *p_adj_grp_size) { - /* Valid sizes for an adjacency group are: - * 1-64, 512, 1024, 2048 and 4096. - */ - if (*p_adj_grp_size <= 64) - return; - else if (*p_adj_grp_size <= 512) - *p_adj_grp_size = 512; - else if (*p_adj_grp_size <= 1024) - *p_adj_grp_size = 1024; - else if (*p_adj_grp_size <= 2048) - *p_adj_grp_size = 2048; - else - *p_adj_grp_size = 4096; + int i; + + for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) { + const struct mlxsw_sp_adj_grp_size_range *size_range; + + size_range = &mlxsw_sp->router->adj_grp_size_ranges[i]; + + if (*p_adj_grp_size >= size_range->start && + *p_adj_grp_size <= size_range->end) + return; + + if (*p_adj_grp_size <= size_range->end) { + *p_adj_grp_size = size_range->end; + return; + } + } } -static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size, +static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp, + u16 *p_adj_grp_size, unsigned int alloc_size) { - if (alloc_size >= 4096) - *p_adj_grp_size = 4096; - else if (alloc_size >= 2048) - *p_adj_grp_size = 2048; - else if (alloc_size >= 1024) - *p_adj_grp_size = 1024; - else if (alloc_size >= 512) - *p_adj_grp_size = 512; + int i; + + for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) { + const struct mlxsw_sp_adj_grp_size_range *size_range; + + size_range = &mlxsw_sp->router->adj_grp_size_ranges[i]; + + if (alloc_size >= size_range->end) { + *p_adj_grp_size = size_range->end; + return; + } + } } static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp, @@ -3563,7 +3646,7 @@ static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp, /* Round up the requested group size to the next size supported * by the device and make sure the request can be satisfied. */ - mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size); + mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size); err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, *p_adj_grp_size, &alloc_size); @@ -3573,7 +3656,7 @@ static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp, * entries than requested. Try to use as much of them as * possible. */ - mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size); + mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size); return 0; } @@ -3681,9 +3764,29 @@ mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp, } static void +mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_nexthop *nh, + u16 bucket_index) +{ + struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp; + bool offload = false, trap = false; + + if (nh->offloaded) { + if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP) + trap = true; + else + offload = true; + } + nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id, + bucket_index, offload, trap); +} + +static void mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp) { + int i; + /* Do not update the flags if the nexthop group is being destroyed * since: * 1. The nexthop objects is being deleted, in which case the flags are @@ -3697,6 +3800,18 @@ mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp, nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id, nh_grp->nhgi->adj_index_valid, false); + + /* Update flags of individual nexthop buckets in case of a resilient + * nexthop group. + */ + if (!nh_grp->nhgi->is_resilient) + return; + + for (i = 0; i < nh_grp->nhgi->count; i++) { + struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i]; + + mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i); + } } static void @@ -3750,6 +3865,10 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); goto set_trap; } + /* Flags of individual nexthop buckets might need to be + * updated. + */ + mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp); return 0; } mlxsw_sp_nexthop_group_normalize(nhgi); @@ -3832,10 +3951,15 @@ set_trap: static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh, bool removing) { - if (!removing) + if (!removing) { + nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD; nh->should_offload = 1; - else + } else if (nh->nhgi->is_resilient) { + nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP; + nh->should_offload = 1; + } else { nh->should_offload = 0; + } nh->update = 1; } @@ -4250,6 +4374,85 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, } } +static void +mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_nexthop_group *nh_grp, + unsigned long *activity) +{ + char *ratrad_pl; + int i, err; + + ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL); + if (!ratrad_pl) + return; + + mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index, + nh_grp->nhgi->count); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl); + if (err) + goto out; + + for (i = 0; i < nh_grp->nhgi->count; i++) { + if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i)) + continue; + bitmap_set(activity, i, 1); + } + +out: + kfree(ratrad_pl); +} + +#define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000 /* ms */ + +static void +mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_nexthop_group *nh_grp) +{ + unsigned long *activity; + + activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL); + if (!activity) + return; + + mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity); + nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id, + nh_grp->nhgi->count, activity); + + bitmap_free(activity); +} + +static void +mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp) +{ + unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL; + + mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw, + msecs_to_jiffies(interval)); +} + +static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work) +{ + struct mlxsw_sp_nexthop_group_info *nhgi; + struct mlxsw_sp_router *router; + bool reschedule = false; + + router = container_of(work, struct mlxsw_sp_router, + nh_grp_activity_dw.work); + + mutex_lock(&router->lock); + + list_for_each_entry(nhgi, &router->nh_res_grp_list, list) { + mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp); + reschedule = true; + } + + mutex_unlock(&router->lock); + + if (!reschedule) + return; + mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp); +} + static int mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp, const struct nh_notifier_single_info *nh, @@ -4268,6 +4471,29 @@ mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp, } static int +mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp, + const struct nh_notifier_single_info *nh, + struct netlink_ext_ack *extack) +{ + int err; + + err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack); + if (err) + return err; + + /* Device only nexthops with an IPIP device are programmed as + * encapsulating adjacency entries. + */ + if (!nh->gw_family && !nh->is_reject && + !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) { + NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway"); + return -EINVAL; + } + + return 0; +} + +static int mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp, const struct nh_notifier_grp_info *nh_grp, struct netlink_ext_ack *extack) @@ -4284,21 +4510,83 @@ mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp, int err; nh = &nh_grp->nh_entries[i].nh; - err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, - extack); + err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh, + extack); if (err) return err; + } - /* Device only nexthops with an IPIP device are programmed as - * encapsulating adjacency entries. - */ - if (!nh->gw_family && !nh->is_reject && - !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) { - NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway"); - return -EINVAL; + return 0; +} + +static int +mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp, + const struct nh_notifier_res_table_info *nh_res_table, + struct netlink_ext_ack *extack) +{ + unsigned int alloc_size; + bool valid_size = false; + int err, i; + + if (nh_res_table->num_nh_buckets < 32) { + NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32"); + return -EINVAL; + } + + for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) { + const struct mlxsw_sp_adj_grp_size_range *size_range; + + size_range = &mlxsw_sp->router->adj_grp_size_ranges[i]; + + if (nh_res_table->num_nh_buckets >= size_range->start && + nh_res_table->num_nh_buckets <= size_range->end) { + valid_size = true; + break; } } + if (!valid_size) { + NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets"); + return -EINVAL; + } + + err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp, + MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + nh_res_table->num_nh_buckets, + &alloc_size); + if (err || nh_res_table->num_nh_buckets != alloc_size) { + NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition"); + return -EINVAL; + } + + return 0; +} + +static int +mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp, + const struct nh_notifier_res_table_info *nh_res_table, + struct netlink_ext_ack *extack) +{ + int err; + u16 i; + + err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp, + nh_res_table, + extack); + if (err) + return err; + + for (i = 0; i < nh_res_table->num_nh_buckets; i++) { + const struct nh_notifier_single_info *nh; + int err; + + nh = &nh_res_table->nhs[i]; + err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh, + extack); + if (err) + return err; + } + return 0; } @@ -4306,7 +4594,11 @@ static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp, unsigned long event, struct nh_notifier_info *info) { - if (event != NEXTHOP_EVENT_REPLACE) + struct nh_notifier_single_info *nh; + + if (event != NEXTHOP_EVENT_REPLACE && + event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE && + event != NEXTHOP_EVENT_BUCKET_REPLACE) return 0; switch (info->type) { @@ -4317,6 +4609,14 @@ static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp, return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp, info->nh_grp, info->extack); + case NH_NOTIFIER_INFO_TYPE_RES_TABLE: + return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp, + info->nh_res_table, + info->extack); + case NH_NOTIFIER_INFO_TYPE_RES_BUCKET: + nh = &info->nh_res_bucket->new_nh; + return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh, + info->extack); default: NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type"); return -EOPNOTSUPP; @@ -4334,6 +4634,7 @@ static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp, return info->nh->gw_family || info->nh->is_reject || mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL); case NH_NOTIFIER_INFO_TYPE_GRP: + case NH_NOTIFIER_INFO_TYPE_RES_TABLE: /* Already validated earlier. */ return true; default: @@ -4346,7 +4647,7 @@ static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp, { u16 lb_rif_index = mlxsw_sp->router->lb_rif_index; - nh->discard = 1; + nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD; nh->should_offload = 1; /* While nexthops that discard packets do not forward packets * via an egress RIF, they still need to be programmed using a @@ -4398,6 +4699,15 @@ mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp, if (nh_obj->is_reject) mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh); + /* In a resilient nexthop group, all the nexthops must be written to + * the adjacency table. Even if they do not have a valid neighbour or + * RIF. + */ + if (nh_grp->nhgi->is_resilient && !nh->should_offload) { + nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP; + nh->should_offload = 1; + } + return 0; err_type_init: @@ -4409,11 +4719,12 @@ err_type_init: static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh) { - if (nh->discard) + if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD) mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh); mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh); list_del(&nh->router_list_node); mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh); + nh->should_offload = 0; } static int @@ -4423,6 +4734,7 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_nexthop_group_info *nhgi; struct mlxsw_sp_nexthop *nh; + bool is_resilient = false; unsigned int nhs; int err, i; @@ -4433,6 +4745,10 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp, case NH_NOTIFIER_INFO_TYPE_GRP: nhs = info->nh_grp->num_nh; break; + case NH_NOTIFIER_INFO_TYPE_RES_TABLE: + nhs = info->nh_res_table->num_nh_buckets; + is_resilient = true; + break; default: return -EINVAL; } @@ -4443,6 +4759,7 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp, nh_grp->nhgi = nhgi; nhgi->nh_grp = nh_grp; nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info); + nhgi->is_resilient = is_resilient; nhgi->count = nhs; for (i = 0; i < nhgi->count; i++) { struct nh_notifier_single_info *nh_obj; @@ -4458,6 +4775,10 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp, nh_obj = &info->nh_grp->nh_entries[i].nh; weight = info->nh_grp->nh_entries[i].weight; break; + case NH_NOTIFIER_INFO_TYPE_RES_TABLE: + nh_obj = &info->nh_res_table->nhs[i]; + weight = 1; + break; default: err = -EINVAL; goto err_nexthop_obj_init; @@ -4473,6 +4794,15 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp, goto err_group_refresh; } + /* Add resilient nexthop groups to a list so that the activity of their + * nexthop buckets will be periodically queried and cleared. + */ + if (nhgi->is_resilient) { + if (list_empty(&mlxsw_sp->router->nh_res_grp_list)) + mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp); + list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list); + } + return 0; err_group_refresh: @@ -4491,8 +4821,15 @@ mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp) { struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi; + struct mlxsw_sp_router *router = mlxsw_sp->router; int i; + if (nhgi->is_resilient) { + list_del(&nhgi->list); + if (list_empty(&mlxsw_sp->router->nh_res_grp_list)) + cancel_delayed_work(&router->nh_grp_activity_dw); + } + for (i = nhgi->count - 1; i >= 0; i--) { struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i]; @@ -4685,6 +5022,135 @@ static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp); } +static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp, + u32 adj_index, char *ratr_pl) +{ + MLXSW_REG_ZERO(ratr, ratr_pl); + mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ); + mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index); + mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16); + + return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); +} + +static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new) +{ + /* Clear the opcode and activity on both the old and new payload as + * they are irrelevant for the comparison. + */ + mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ); + mlxsw_reg_ratr_a_set(ratr_pl, 0); + mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ); + mlxsw_reg_ratr_a_set(ratr_pl_new, 0); + + /* If the contents of the adjacency entry are consistent with the + * replacement request, then replacement was successful. + */ + if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN)) + return 0; + + return -EINVAL; +} + +static int +mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh, + struct nh_notifier_info *info) +{ + u16 bucket_index = info->nh_res_bucket->bucket_index; + struct netlink_ext_ack *extack = info->extack; + bool force = info->nh_res_bucket->force; + char ratr_pl_new[MLXSW_REG_RATR_LEN]; + char ratr_pl[MLXSW_REG_RATR_LEN]; + u32 adj_index; + int err; + + /* No point in trying an atomic replacement if the idle timer interval + * is smaller than the interval in which we query and clear activity. + */ + force = info->nh_res_bucket->idle_timer_ms < + MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL; + + adj_index = nh->nhgi->adj_index + bucket_index; + err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket"); + return err; + } + + if (!force) { + err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index, + ratr_pl_new); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent"); + return err; + } + + err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement"); + return err; + } + } + + nh->update = 0; + nh->offloaded = 1; + mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index); + + return 0; +} + +static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp, + struct nh_notifier_info *info) +{ + u16 bucket_index = info->nh_res_bucket->bucket_index; + struct netlink_ext_ack *extack = info->extack; + struct mlxsw_sp_nexthop_group_info *nhgi; + struct nh_notifier_single_info *nh_obj; + struct mlxsw_sp_nexthop_group *nh_grp; + struct mlxsw_sp_nexthop *nh; + int err; + + nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id); + if (!nh_grp) { + NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found"); + return -EINVAL; + } + + nhgi = nh_grp->nhgi; + + if (bucket_index >= nhgi->count) { + NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range"); + return -EINVAL; + } + + nh = &nhgi->nexthops[bucket_index]; + mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh); + + nh_obj = &info->nh_res_bucket->new_nh; + err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement"); + goto err_nexthop_obj_init; + } + + err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info); + if (err) + goto err_nexthop_obj_bucket_adj_update; + + return 0; + +err_nexthop_obj_bucket_adj_update: + mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh); +err_nexthop_obj_init: + nh_obj = &info->nh_res_bucket->old_nh; + mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1); + /* The old adjacency entry was not overwritten */ + nh->update = 0; + nh->offloaded = 1; + return err; +} + static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb, unsigned long event, void *ptr) { @@ -4699,8 +5165,6 @@ static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb, mutex_lock(&router->lock); - ASSERT_RTNL(); - switch (event) { case NEXTHOP_EVENT_REPLACE: err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info); @@ -4708,6 +5172,10 @@ static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb, case NEXTHOP_EVENT_DEL: mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info); break; + case NEXTHOP_EVENT_BUCKET_REPLACE: + err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp, + info); + break; default: break; } @@ -7667,7 +8135,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, int i, err; type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev); - ops = mlxsw_sp->rif_ops_arr[type]; + ops = mlxsw_sp->router->rif_ops_arr[type]; vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack); if (IS_ERR(vr)) @@ -8865,7 +9333,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = { .deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure, }; -const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = { +static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = { [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops, [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops, [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops, @@ -9050,7 +9518,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = { .deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure, }; -const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = { +static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = { [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops, [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops, [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops, @@ -9302,6 +9770,36 @@ static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp) mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->router->lb_rif_index); } +static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp) +{ + size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges); + + mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr; + mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges; + mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count; + + return 0; +} + +const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = { + .init = mlxsw_sp1_router_init, +}; + +static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp) +{ + size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges); + + mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr; + mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges; + mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count; + + return 0; +} + +const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = { + .init = mlxsw_sp2_router_init, +}; + int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, struct netlink_ext_ack *extack) { @@ -9315,6 +9813,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, mlxsw_sp->router = router; router->mlxsw_sp = mlxsw_sp; + err = mlxsw_sp->router_ops->init(mlxsw_sp); + if (err) + goto err_router_ops_init; + err = mlxsw_sp_router_xm_init(mlxsw_sp); if (err) goto err_xm_init; @@ -9328,6 +9830,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_ll_op_ctx_init; + INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list); + INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw, + mlxsw_sp_nh_grp_activity_work); + INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list); err = __mlxsw_sp_router_init(mlxsw_sp); if (err) @@ -9451,10 +9957,12 @@ err_ipips_init: err_rifs_init: __mlxsw_sp_router_fini(mlxsw_sp); err_router_init: + cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw); mlxsw_sp_router_ll_op_ctx_fini(router); err_ll_op_ctx_init: mlxsw_sp_router_xm_fini(mlxsw_sp); err_xm_init: +err_router_ops_init: mutex_destroy(&mlxsw_sp->router->lock); kfree(mlxsw_sp->router); return err; @@ -9481,6 +9989,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) mlxsw_sp_ipips_fini(mlxsw_sp); mlxsw_sp_rifs_fini(mlxsw_sp); __mlxsw_sp_router_fini(mlxsw_sp); + cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw); mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router); mlxsw_sp_router_xm_fini(mlxsw_sp); mutex_destroy(&mlxsw_sp->router->lock); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 2875ee8ec537..be7708a375e1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -78,6 +78,10 @@ struct mlxsw_sp_router { struct mlxsw_sp_fib_entry_op_ctx *ll_op_ctx; u16 lb_rif_index; struct mlxsw_sp_router_xm *xm; + const struct mlxsw_sp_adj_grp_size_range *adj_grp_size_ranges; + size_t adj_grp_size_ranges_count; + struct delayed_work nh_grp_activity_dw; + struct list_head nh_res_grp_list; }; struct mlxsw_sp_fib_entry_priv { @@ -195,20 +199,20 @@ mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_ipip_entry *except); struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router, struct mlxsw_sp_nexthop *nh); -bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh); +bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh); unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh); int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index, u32 *p_adj_size, u32 *p_adj_hash_index); struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh); bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh); -bool mlxsw_sp_nexthop_is_discard(const struct mlxsw_sp_nexthop *nh); #define mlxsw_sp_nexthop_for_each(nh, router) \ for (nh = mlxsw_sp_nexthop_next(router, NULL); nh; \ nh = mlxsw_sp_nexthop_next(router, nh)) int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh, u64 *p_counter); -int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, - struct mlxsw_sp_nexthop *nh); +int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, + struct mlxsw_sp_nexthop *nh, bool force, + char *ratr_pl); void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh); void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 1892cea05ee7..3398cc01e5ec 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -186,6 +186,7 @@ mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry, /* Create a new port analayzer entry for local_port. */ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); + mlxsw_reg_mpat_session_id_set(mpat_pl, sparms.session_id); mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); @@ -203,6 +204,7 @@ mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry, int pa_id = span_entry->id; mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type); + mlxsw_reg_mpat_session_id_set(mpat_pl, span_entry->parms.session_id); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); } @@ -938,7 +940,8 @@ mlxsw_sp_span_entry_find_by_parms(struct mlxsw_sp *mlxsw_sp, if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev && curr->parms.policer_enable == sparms->policer_enable && - curr->parms.policer_id == sparms->policer_id) + curr->parms.policer_id == sparms->policer_id && + curr->parms.session_id == sparms->session_id) return curr; } return NULL; @@ -1085,6 +1088,7 @@ int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id, sparms.policer_id = parms->policer_id; sparms.policer_enable = parms->policer_enable; + sparms.session_id = parms->session_id; span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms); if (!span_entry) return -ENOBUFS; @@ -1227,8 +1231,12 @@ __mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span *span, return -EINVAL; } + if (trigger_entry->parms.probability_rate > MLXSW_REG_MPAR_RATE_MAX) + return -EINVAL; + mlxsw_reg_mpar_pack(mpar_pl, trigger_entry->local_port, i_e, enable, - trigger_entry->parms.span_id); + trigger_entry->parms.span_id, + trigger_entry->parms.probability_rate); return mlxsw_reg_write(span->mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); } @@ -1362,8 +1370,11 @@ mlxsw_sp2_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry * return -EINVAL; } + if (trigger_entry->parms.probability_rate > MLXSW_REG_MPAGR_RATE_MAX) + return -EINVAL; + mlxsw_reg_mpagr_pack(mpagr_pl, trigger, trigger_entry->parms.span_id, - 1); + trigger_entry->parms.probability_rate); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpagr), mpagr_pl); } @@ -1561,7 +1572,9 @@ int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp, trigger, mlxsw_sp_port); if (trigger_entry) { - if (trigger_entry->parms.span_id != parms->span_id) + if (trigger_entry->parms.span_id != parms->span_id || + trigger_entry->parms.probability_rate != + parms->probability_rate) return -EINVAL; refcount_inc(&trigger_entry->ref_count); goto out; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h index aa1cd409c0e2..efaefd1ae863 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h @@ -13,6 +13,19 @@ struct mlxsw_sp; struct mlxsw_sp_port; +/* SPAN session identifiers that correspond to MLXSW_TRAP_ID_MIRROR_SESSION<i> + * trap identifiers. The session identifier is an attribute of the SPAN agent, + * which determines the trap identifier of packets that are mirrored to the + * CPU. Packets that are trapped to the CPU for the same logical reason (e.g., + * buffer drops) should use the same session identifier. + */ +enum mlxsw_sp_span_session_id { + MLXSW_SP_SPAN_SESSION_ID_BUFFER, + MLXSW_SP_SPAN_SESSION_ID_SAMPLING, + + __MLXSW_SP_SPAN_SESSION_ID_MAX = 8, +}; + struct mlxsw_sp_span_parms { struct mlxsw_sp_port *dest_port; /* NULL for unoffloaded SPAN. */ unsigned int ttl; @@ -23,6 +36,7 @@ struct mlxsw_sp_span_parms { u16 vid; u16 policer_id; bool policer_enable; + enum mlxsw_sp_span_session_id session_id; }; enum mlxsw_sp_span_trigger { @@ -35,12 +49,14 @@ enum mlxsw_sp_span_trigger { struct mlxsw_sp_span_trigger_parms { int span_id; + u32 probability_rate; }; struct mlxsw_sp_span_agent_parms { const struct net_device *to_dev; u16 policer_id; bool policer_enable; + enum mlxsw_sp_span_session_id session_id; }; struct mlxsw_sp_span_entry_ops; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 23b7e8d6386b..c1f05c17557d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -98,6 +98,10 @@ struct mlxsw_sp_bridge_ops { const struct mlxsw_sp_fid *fid); }; +struct mlxsw_sp_switchdev_ops { + void (*init)(struct mlxsw_sp *mlxsw_sp); +}; + static int mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_bridge_port *bridge_port, @@ -2296,7 +2300,7 @@ mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, vid, ETH_P_8021AD, extack); } -static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021ad_ops = { +static const struct mlxsw_sp_bridge_ops mlxsw_sp1_bridge_8021ad_ops = { .port_join = mlxsw_sp_bridge_8021ad_port_join, .port_leave = mlxsw_sp_bridge_8021ad_port_leave, .vxlan_join = mlxsw_sp_bridge_8021ad_vxlan_join, @@ -2305,6 +2309,53 @@ static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021ad_ops = { .fid_vid = mlxsw_sp_bridge_8021q_fid_vid, }; +static int +mlxsw_sp2_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device, + struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_port *mlxsw_sp_port, + struct netlink_ext_ack *extack) +{ + int err; + + /* The EtherType of decapsulated packets is determined at the egress + * port to allow 802.1d and 802.1ad bridges with VXLAN devices to + * co-exist. + */ + err = mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021AD); + if (err) + return err; + + err = mlxsw_sp_bridge_8021ad_port_join(bridge_device, bridge_port, + mlxsw_sp_port, extack); + if (err) + goto err_bridge_8021ad_port_join; + + return 0; + +err_bridge_8021ad_port_join: + mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q); + return err; +} + +static void +mlxsw_sp2_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device, + struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_port *mlxsw_sp_port) +{ + mlxsw_sp_bridge_8021ad_port_leave(bridge_device, bridge_port, + mlxsw_sp_port); + mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q); +} + +static const struct mlxsw_sp_bridge_ops mlxsw_sp2_bridge_8021ad_ops = { + .port_join = mlxsw_sp2_bridge_8021ad_port_join, + .port_leave = mlxsw_sp2_bridge_8021ad_port_leave, + .vxlan_join = mlxsw_sp_bridge_8021ad_vxlan_join, + .fid_get = mlxsw_sp_bridge_8021q_fid_get, + .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup, + .fid_vid = mlxsw_sp_bridge_8021q_fid_vid, +}; + int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *brport_dev, struct net_device *br_dev, @@ -3535,6 +3586,24 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier); } +static void mlxsw_sp1_switchdev_init(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp1_bridge_8021ad_ops; +} + +const struct mlxsw_sp_switchdev_ops mlxsw_sp1_switchdev_ops = { + .init = mlxsw_sp1_switchdev_init, +}; + +static void mlxsw_sp2_switchdev_init(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp2_bridge_8021ad_ops; +} + +const struct mlxsw_sp_switchdev_ops mlxsw_sp2_switchdev_ops = { + .init = mlxsw_sp2_switchdev_init, +}; + int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_sp_bridge *bridge; @@ -3549,7 +3618,8 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops; bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops; - bridge->bridge_8021ad_ops = &mlxsw_sp_bridge_8021ad_ops; + + mlxsw_sp->switchdev_ops->init(mlxsw_sp); return mlxsw_sp_fdb_init(mlxsw_sp); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 4ef12e3e021a..26d01adbedad 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -49,8 +49,14 @@ enum { #define MLXSW_SP_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT enum { + /* Packet was mirrored from ingress. */ + MLXSW_SP_MIRROR_REASON_INGRESS = 1, + /* Packet was mirrored from policy engine. */ + MLXSW_SP_MIRROR_REASON_POLICY_ENGINE = 2, /* Packet was early dropped. */ MLXSW_SP_MIRROR_REASON_INGRESS_WRED = 9, + /* Packet was mirrored from egress. */ + MLXSW_SP_MIRROR_REASON_EGRESS = 14, }; static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, @@ -106,7 +112,7 @@ static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port, static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u8 local_port, void *trap_ctx) { - u32 cookie_index = mlxsw_skb_cb(skb)->cookie_index; + u32 cookie_index = mlxsw_skb_cb(skb)->rx_md_info.cookie_index; const struct flow_action_cookie *fa_cookie; struct devlink_port *in_devlink_port; struct mlxsw_sp_port *mlxsw_sp_port; @@ -202,21 +208,175 @@ static void mlxsw_sp_rx_ptp_listener(struct sk_buff *skb, u8 local_port, mlxsw_sp_ptp_receive(mlxsw_sp, skb, local_port); } +static struct mlxsw_sp_port * +mlxsw_sp_sample_tx_port_get(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_rx_md_info *rx_md_info) +{ + u8 local_port; + + if (!rx_md_info->tx_port_valid) + return NULL; + + if (rx_md_info->tx_port_is_lag) + local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core, + rx_md_info->tx_lag_id, + rx_md_info->tx_lag_port_index); + else + local_port = rx_md_info->tx_sys_port; + + if (local_port >= mlxsw_core_max_ports(mlxsw_sp->core)) + return NULL; + + return mlxsw_sp->ports[local_port]; +} + +/* The latency units are determined according to MOGCR.mirror_latency_units. It + * defaults to 64 nanoseconds. + */ +#define MLXSW_SP_MIRROR_LATENCY_SHIFT 6 + +static void mlxsw_sp_psample_md_init(struct mlxsw_sp *mlxsw_sp, + struct psample_metadata *md, + struct sk_buff *skb, int in_ifindex, + bool truncate, u32 trunc_size) +{ + struct mlxsw_rx_md_info *rx_md_info = &mlxsw_skb_cb(skb)->rx_md_info; + struct mlxsw_sp_port *mlxsw_sp_port; + + md->trunc_size = truncate ? trunc_size : skb->len; + md->in_ifindex = in_ifindex; + mlxsw_sp_port = mlxsw_sp_sample_tx_port_get(mlxsw_sp, rx_md_info); + md->out_ifindex = mlxsw_sp_port && mlxsw_sp_port->dev ? + mlxsw_sp_port->dev->ifindex : 0; + md->out_tc_valid = rx_md_info->tx_tc_valid; + md->out_tc = rx_md_info->tx_tc; + md->out_tc_occ_valid = rx_md_info->tx_congestion_valid; + md->out_tc_occ = rx_md_info->tx_congestion; + md->latency_valid = rx_md_info->latency_valid; + md->latency = rx_md_info->latency; + md->latency <<= MLXSW_SP_MIRROR_LATENCY_SHIFT; +} + static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u8 local_port, void *trap_ctx) { struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx); + struct mlxsw_sp_sample_trigger trigger; + struct mlxsw_sp_sample_params *params; + struct mlxsw_sp_port *mlxsw_sp_port; + struct psample_metadata md = {}; + int err; + + err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx); + if (err) + return; + + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + if (!mlxsw_sp_port) + goto out; + + trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS; + trigger.local_port = local_port; + params = mlxsw_sp_sample_trigger_params_lookup(mlxsw_sp, &trigger); + if (!params) + goto out; + + /* The psample module expects skb->data to point to the start of the + * Ethernet header. + */ + skb_push(skb, ETH_HLEN); + mlxsw_sp_psample_md_init(mlxsw_sp, &md, skb, + mlxsw_sp_port->dev->ifindex, params->truncate, + params->trunc_size); + psample_sample_packet(params->psample_group, skb, params->rate, &md); +out: + consume_skb(skb); +} + +static void mlxsw_sp_rx_sample_tx_listener(struct sk_buff *skb, u8 local_port, + void *trap_ctx) +{ + struct mlxsw_rx_md_info *rx_md_info = &mlxsw_skb_cb(skb)->rx_md_info; + struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx); + struct mlxsw_sp_port *mlxsw_sp_port, *mlxsw_sp_port_tx; + struct mlxsw_sp_sample_trigger trigger; + struct mlxsw_sp_sample_params *params; + struct psample_metadata md = {}; + int err; + + /* Locally generated packets are not reported from the policy engine + * trigger, so do not report them from the egress trigger as well. + */ + if (local_port == MLXSW_PORT_CPU_PORT) + goto out; + + err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx); + if (err) + return; + + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + if (!mlxsw_sp_port) + goto out; + + /* Packet was sampled from Tx, so we need to retrieve the sample + * parameters based on the Tx port and not the Rx port. + */ + mlxsw_sp_port_tx = mlxsw_sp_sample_tx_port_get(mlxsw_sp, rx_md_info); + if (!mlxsw_sp_port_tx) + goto out; + + trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS; + trigger.local_port = mlxsw_sp_port_tx->local_port; + params = mlxsw_sp_sample_trigger_params_lookup(mlxsw_sp, &trigger); + if (!params) + goto out; + + /* The psample module expects skb->data to point to the start of the + * Ethernet header. + */ + skb_push(skb, ETH_HLEN); + mlxsw_sp_psample_md_init(mlxsw_sp, &md, skb, + mlxsw_sp_port->dev->ifindex, params->truncate, + params->trunc_size); + psample_sample_packet(params->psample_group, skb, params->rate, &md); +out: + consume_skb(skb); +} + +static void mlxsw_sp_rx_sample_acl_listener(struct sk_buff *skb, u8 local_port, + void *trap_ctx) +{ + struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx); + struct mlxsw_sp_sample_trigger trigger = { + .type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_POLICY_ENGINE, + }; + struct mlxsw_sp_sample_params *params; + struct mlxsw_sp_port *mlxsw_sp_port; + struct psample_metadata md = {}; int err; err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx); if (err) return; - /* The sample handler expects skb->data to point to the start of the + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + if (!mlxsw_sp_port) + goto out; + + params = mlxsw_sp_sample_trigger_params_lookup(mlxsw_sp, &trigger); + if (!params) + goto out; + + /* The psample module expects skb->data to point to the start of the * Ethernet header. */ skb_push(skb, ETH_HLEN); - mlxsw_sp_sample_receive(mlxsw_sp, skb, local_port); + mlxsw_sp_psample_md_init(mlxsw_sp, &md, skb, + mlxsw_sp_port->dev->ifindex, params->truncate, + params->trunc_size); + psample_sample_packet(params->psample_group, skb, params->rate, &md); +out: + consume_skb(skb); } #define MLXSW_SP_TRAP_DROP(_id, _group_id) \ @@ -464,11 +624,6 @@ static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = { .priority = 2, }, { - .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_SAMPLE, 0), - .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PKT_SAMPLE, - .priority = 0, - }, - { .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_TRAP, 18), .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_FLOW_LOGGING, .priority = 4, @@ -993,14 +1148,6 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = { }, }, { - .trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_SAMPLE, ACL_SAMPLE, - MIRROR), - .listeners_arr = { - MLXSW_RXL(mlxsw_sp_rx_sample_listener, PKT_SAMPLE, - MIRROR_TO_CPU, false, SP_PKT_SAMPLE, DISCARD), - }, - }, - { .trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_TRAP, ACL_TRAP, TRAP), .listeners_arr = { MLXSW_SP_RXL_NO_MARK(ACL0, FLOW_LOGGING, TRAP_TO_CPU, @@ -1709,10 +1856,23 @@ int mlxsw_sp_trap_group_policer_hw_id_get(struct mlxsw_sp *mlxsw_sp, u16 id, static const struct mlxsw_sp_trap_group_item mlxsw_sp1_trap_group_items_arr[] = { + { + .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_SAMPLE, 0), + .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PKT_SAMPLE, + .priority = 0, + }, }; static const struct mlxsw_sp_trap_item mlxsw_sp1_trap_items_arr[] = { + { + .trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_SAMPLE, ACL_SAMPLE, + MIRROR), + .listeners_arr = { + MLXSW_RXL(mlxsw_sp_rx_sample_listener, PKT_SAMPLE, + MIRROR_TO_CPU, false, SP_PKT_SAMPLE, DISCARD), + }, + }, }; static int @@ -1749,6 +1909,12 @@ mlxsw_sp2_trap_group_items_arr[] = { .priority = 0, .fixed_policer = true, }, + { + .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_SAMPLE, 0), + .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PKT_SAMPLE, + .priority = 0, + .fixed_policer = true, + }, }; static const struct mlxsw_sp_trap_item @@ -1760,6 +1926,21 @@ mlxsw_sp2_trap_items_arr[] = { }, .is_source = true, }, + { + .trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_SAMPLE, ACL_SAMPLE, + MIRROR), + .listeners_arr = { + MLXSW_RXL_MIRROR(mlxsw_sp_rx_sample_listener, 1, + SP_PKT_SAMPLE, + MLXSW_SP_MIRROR_REASON_INGRESS), + MLXSW_RXL_MIRROR(mlxsw_sp_rx_sample_tx_listener, 1, + SP_PKT_SAMPLE, + MLXSW_SP_MIRROR_REASON_EGRESS), + MLXSW_RXL_MIRROR(mlxsw_sp_rx_sample_acl_listener, 1, + SP_PKT_SAMPLE, + MLXSW_SP_MIRROR_REASON_POLICY_ENGINE), + }, + }, }; static int diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c index 2c0dcd7acf3f..a66a179236fd 100644 --- a/drivers/net/ethernet/microchip/encx24j600.c +++ b/drivers/net/ethernet/microchip/encx24j600.c @@ -222,7 +222,6 @@ static int encx24j600_wait_for_autoneg(struct encx24j600_priv *priv) unsigned long timeout = jiffies + msecs_to_jiffies(2000); u16 phstat1; u16 estat; - int ret = 0; phstat1 = encx24j600_read_phy(priv, PHSTAT1); while ((phstat1 & ANDONE) == 0) { @@ -258,7 +257,7 @@ static int encx24j600_wait_for_autoneg(struct encx24j600_priv *priv) encx24j600_write_reg(priv, MACLCON, 0x370f); } - return ret; + return 0; } /* Access the PHY to determine link status */ diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig index 05cb040c2677..2d3157e4d081 100644 --- a/drivers/net/ethernet/mscc/Kconfig +++ b/drivers/net/ethernet/mscc/Kconfig @@ -11,7 +11,7 @@ config NET_VENDOR_MICROSEMI if NET_VENDOR_MICROSEMI -# Users should depend on NET_SWITCHDEV, HAS_IOMEM +# Users should depend on NET_SWITCHDEV, HAS_IOMEM, BRIDGE config MSCC_OCELOT_SWITCH_LIB select NET_DEVLINK select REGMAP_MMIO @@ -24,6 +24,7 @@ config MSCC_OCELOT_SWITCH_LIB config MSCC_OCELOT_SWITCH tristate "Ocelot switch driver" + depends on BRIDGE || BRIDGE=n depends on NET_SWITCHDEV depends on HAS_IOMEM depends on OF_NET diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 46e5c9136bac..1a36b416fd9b 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -687,7 +687,7 @@ static int ocelot_xtr_poll_xfh(struct ocelot *ocelot, int grp, u32 *xfh) int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb) { struct skb_shared_hwtstamps *shhwtstamps; - u64 tod_in_ns, full_ts_in_ns, cpuq; + u64 tod_in_ns, full_ts_in_ns; u64 timestamp, src_port, len; u32 xfh[OCELOT_TAG_LEN / 4]; struct net_device *dev; @@ -704,7 +704,6 @@ int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb) ocelot_xfh_get_src_port(xfh, &src_port); ocelot_xfh_get_len(xfh, &len); ocelot_xfh_get_rew_val(xfh, ×tamp); - ocelot_xfh_get_cpuq(xfh, &cpuq); if (WARN_ON(src_port >= ocelot->num_phys_ports)) return -EINVAL; @@ -767,17 +766,11 @@ int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb) /* Everything we see on an interface that is in the HW bridge * has already been forwarded. */ - if (ocelot->bridge_mask & BIT(src_port)) + if (ocelot->ports[src_port]->bridge) skb->offload_fwd_mark = 1; skb->protocol = eth_type_trans(skb, dev); -#if IS_ENABLED(CONFIG_BRIDGE_MRP) - if (skb->protocol == cpu_to_be16(ETH_P_MRP) && - cpuq & BIT(OCELOT_MRP_CPUQ)) - skb->offload_fwd_mark = 0; -#endif - *nskb = skb; return 0; @@ -1190,6 +1183,26 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond, return mask; } +static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, + struct net_device *bridge) +{ + u32 mask = 0; + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (!ocelot_port) + continue; + + if (ocelot_port->stp_state == BR_STATE_FORWARDING && + ocelot_port->bridge == bridge) + mask |= BIT(port); + } + + return mask; +} + static u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot) { u32 mask = 0; @@ -1239,10 +1252,12 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot) */ mask = GENMASK(ocelot->num_phys_ports - 1, 0); mask &= ~cpu_fwd_mask; - } else if (ocelot->bridge_fwd_mask & BIT(port)) { + } else if (ocelot_port->bridge) { + struct net_device *bridge = ocelot_port->bridge; struct net_device *bond = ocelot_port->bond; - mask = ocelot->bridge_fwd_mask & ~BIT(port); + mask = ocelot_get_bridge_fwd_mask(ocelot, bridge); + mask &= ~BIT(port); if (bond) { mask &= ~ocelot_get_bond_mask(ocelot, bond, false); @@ -1263,29 +1278,16 @@ EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask); void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - u32 port_cfg; + u32 learn_ena = 0; - if (!(BIT(port) & ocelot->bridge_mask)) - return; - - port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, port); + ocelot_port->stp_state = state; - switch (state) { - case BR_STATE_FORWARDING: - ocelot->bridge_fwd_mask |= BIT(port); - fallthrough; - case BR_STATE_LEARNING: - if (ocelot_port->learn_ena) - port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA; - break; + if ((state == BR_STATE_LEARNING || state == BR_STATE_FORWARDING) && + ocelot_port->learn_ena) + learn_ena = ANA_PORT_PORT_CFG_LEARN_ENA; - default: - port_cfg &= ~ANA_PORT_PORT_CFG_LEARN_ENA; - ocelot->bridge_fwd_mask &= ~BIT(port); - break; - } - - ocelot_write_gix(ocelot, port_cfg, ANA_PORT_PORT_CFG, port); + ocelot_rmw_gix(ocelot, learn_ena, ANA_PORT_PORT_CFG_LEARN_ENA, + ANA_PORT_PORT_CFG, port); ocelot_apply_bridge_fwd_mask(ocelot); } @@ -1512,43 +1514,28 @@ int ocelot_port_mdb_del(struct ocelot *ocelot, int port, } EXPORT_SYMBOL(ocelot_port_mdb_del); -int ocelot_port_bridge_join(struct ocelot *ocelot, int port, - struct net_device *bridge) +void ocelot_port_bridge_join(struct ocelot *ocelot, int port, + struct net_device *bridge) { - if (!ocelot->bridge_mask) { - ocelot->hw_bridge_dev = bridge; - } else { - if (ocelot->hw_bridge_dev != bridge) - /* This is adding the port to a second bridge, this is - * unsupported */ - return -ENODEV; - } + struct ocelot_port *ocelot_port = ocelot->ports[port]; - ocelot->bridge_mask |= BIT(port); + ocelot_port->bridge = bridge; - return 0; + ocelot_apply_bridge_fwd_mask(ocelot); } EXPORT_SYMBOL(ocelot_port_bridge_join); -int ocelot_port_bridge_leave(struct ocelot *ocelot, int port, - struct net_device *bridge) +void ocelot_port_bridge_leave(struct ocelot *ocelot, int port, + struct net_device *bridge) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; struct ocelot_vlan pvid = {0}, native_vlan = {0}; - int ret; - - ocelot->bridge_mask &= ~BIT(port); - - if (!ocelot->bridge_mask) - ocelot->hw_bridge_dev = NULL; - ret = ocelot_port_vlan_filtering(ocelot, port, false); - if (ret) - return ret; + ocelot_port->bridge = NULL; ocelot_port_set_pvid(ocelot, port, pvid); ocelot_port_set_native_vlan(ocelot, port, native_vlan); - - return 0; + ocelot_apply_bridge_fwd_mask(ocelot); } EXPORT_SYMBOL(ocelot_port_bridge_leave); @@ -2051,6 +2038,9 @@ int ocelot_init(struct ocelot *ocelot) ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i); } + + ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_BLACKHOLE); + /* Allow broadcast and unknown L2 multicast to the CPU. */ ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index a41b458b1b3e..8b843d3c9189 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -220,6 +220,11 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port, "Last action must be GOTO"); return -EOPNOTSUPP; } + if (a->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, + "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } filter->action.police_ena = true; rate = a->police.rate_bytes_ps; filter->action.pol.rate = div_u64(rate, 1000) * 8; diff --git a/drivers/net/ethernet/mscc/ocelot_mrp.c b/drivers/net/ethernet/mscc/ocelot_mrp.c index 683da320bfd8..08b481a93460 100644 --- a/drivers/net/ethernet/mscc/ocelot_mrp.c +++ b/drivers/net/ethernet/mscc/ocelot_mrp.c @@ -1,9 +1,6 @@ // SPDX-License-Identifier: (GPL-2.0 OR MIT) /* Microsemi Ocelot Switch driver * - * This contains glue logic between the switchdev driver operations and the - * mscc_ocelot_switch_lib. - * * Copyright (c) 2017, 2019 Microsemi Corporation * Copyright 2020-2021 NXP Semiconductors */ @@ -15,13 +12,34 @@ #include "ocelot.h" #include "ocelot_vcap.h" -static int ocelot_mrp_del_vcap(struct ocelot *ocelot, int port) +static const u8 mrp_test_dmac[] = { 0x01, 0x15, 0x4e, 0x00, 0x00, 0x01 }; +static const u8 mrp_control_dmac[] = { 0x01, 0x15, 0x4e, 0x00, 0x00, 0x02 }; + +static int ocelot_mrp_find_partner_port(struct ocelot *ocelot, + struct ocelot_port *p) +{ + int i; + + for (i = 0; i < ocelot->num_phys_ports; ++i) { + struct ocelot_port *ocelot_port = ocelot->ports[i]; + + if (!ocelot_port || p == ocelot_port) + continue; + + if (ocelot_port->mrp_ring_id == p->mrp_ring_id) + return i; + } + + return -1; +} + +static int ocelot_mrp_del_vcap(struct ocelot *ocelot, int id) { struct ocelot_vcap_block *block_vcap_is2; struct ocelot_vcap_filter *filter; block_vcap_is2 = &ocelot->block[VCAP_IS2]; - filter = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, port, + filter = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, id, false); if (!filter) return 0; @@ -29,6 +47,87 @@ static int ocelot_mrp_del_vcap(struct ocelot *ocelot, int port) return ocelot_vcap_filter_del(ocelot, filter); } +static int ocelot_mrp_redirect_add_vcap(struct ocelot *ocelot, int src_port, + int dst_port) +{ + const u8 mrp_test_mask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + struct ocelot_vcap_filter *filter; + int err; + + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return -ENOMEM; + + filter->key_type = OCELOT_VCAP_KEY_ETYPE; + filter->prio = 1; + filter->id.cookie = src_port; + filter->id.tc_offload = false; + filter->block_id = VCAP_IS2; + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + filter->ingress_port_mask = BIT(src_port); + ether_addr_copy(filter->key.etype.dmac.value, mrp_test_dmac); + ether_addr_copy(filter->key.etype.dmac.mask, mrp_test_mask); + filter->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; + filter->action.port_mask = BIT(dst_port); + + err = ocelot_vcap_filter_add(ocelot, filter, NULL); + if (err) + kfree(filter); + + return err; +} + +static int ocelot_mrp_copy_add_vcap(struct ocelot *ocelot, int port, + int prio, unsigned long cookie) +{ + const u8 mrp_mask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; + struct ocelot_vcap_filter *filter; + int err; + + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return -ENOMEM; + + filter->key_type = OCELOT_VCAP_KEY_ETYPE; + filter->prio = prio; + filter->id.cookie = cookie; + filter->id.tc_offload = false; + filter->block_id = VCAP_IS2; + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + filter->ingress_port_mask = BIT(port); + /* Here is possible to use control or test dmac because the mask + * doesn't cover the LSB + */ + ether_addr_copy(filter->key.etype.dmac.value, mrp_test_dmac); + ether_addr_copy(filter->key.etype.dmac.mask, mrp_mask); + filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; + filter->action.port_mask = 0x0; + filter->action.cpu_copy_ena = true; + filter->action.cpu_qu_num = OCELOT_MRP_CPUQ; + + err = ocelot_vcap_filter_add(ocelot, filter, NULL); + if (err) + kfree(filter); + + return err; +} + +static void ocelot_mrp_save_mac(struct ocelot *ocelot, + struct ocelot_port *port) +{ + ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_test_dmac, + port->pvid_vlan.vid, ENTRYTYPE_LOCKED); + ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_control_dmac, + port->pvid_vlan.vid, ENTRYTYPE_LOCKED); +} + +static void ocelot_mrp_del_mac(struct ocelot *ocelot, + struct ocelot_port *port) +{ + ocelot_mact_forget(ocelot, mrp_test_dmac, port->pvid_vlan.vid); + ocelot_mact_forget(ocelot, mrp_control_dmac, port->pvid_vlan.vid); +} + int ocelot_mrp_add(struct ocelot *ocelot, int port, const struct switchdev_obj_mrp *mrp) { @@ -45,18 +144,7 @@ int ocelot_mrp_add(struct ocelot *ocelot, int port, if (mrp->p_port != dev && mrp->s_port != dev) return 0; - if (ocelot->mrp_ring_id != 0 && - ocelot->mrp_s_port && - ocelot->mrp_p_port) - return -EINVAL; - - if (mrp->p_port == dev) - ocelot->mrp_p_port = dev; - - if (mrp->s_port == dev) - ocelot->mrp_s_port = dev; - - ocelot->mrp_ring_id = mrp->ring_id; + ocelot_port->mrp_ring_id = mrp->ring_id; return 0; } @@ -66,33 +154,14 @@ int ocelot_mrp_del(struct ocelot *ocelot, int port, const struct switchdev_obj_mrp *mrp) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - struct ocelot_port_private *priv; - struct net_device *dev; if (!ocelot_port) return -EOPNOTSUPP; - priv = container_of(ocelot_port, struct ocelot_port_private, port); - dev = priv->dev; - - if (ocelot->mrp_p_port != dev && ocelot->mrp_s_port != dev) + if (ocelot_port->mrp_ring_id != mrp->ring_id) return 0; - if (ocelot->mrp_ring_id == 0 && - !ocelot->mrp_s_port && - !ocelot->mrp_p_port) - return -EINVAL; - - if (ocelot_mrp_del_vcap(ocelot, priv->chip_port)) - return -EINVAL; - - if (ocelot->mrp_p_port == dev) - ocelot->mrp_p_port = NULL; - - if (ocelot->mrp_s_port == dev) - ocelot->mrp_s_port = NULL; - - ocelot->mrp_ring_id = 0; + ocelot_port->mrp_ring_id = 0; return 0; } @@ -102,49 +171,39 @@ int ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port, const struct switchdev_obj_ring_role_mrp *mrp) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - struct ocelot_vcap_filter *filter; - struct ocelot_port_private *priv; - struct net_device *dev; + int dst_port; int err; if (!ocelot_port) return -EOPNOTSUPP; - priv = container_of(ocelot_port, struct ocelot_port_private, port); - dev = priv->dev; - - if (ocelot->mrp_ring_id != mrp->ring_id) - return -EINVAL; - - if (!mrp->sw_backup) + if (mrp->ring_role != BR_MRP_RING_ROLE_MRC && !mrp->sw_backup) return -EOPNOTSUPP; - if (ocelot->mrp_p_port != dev && ocelot->mrp_s_port != dev) + if (ocelot_port->mrp_ring_id != mrp->ring_id) return 0; - filter = kzalloc(sizeof(*filter), GFP_ATOMIC); - if (!filter) - return -ENOMEM; + ocelot_mrp_save_mac(ocelot, ocelot_port); - filter->key_type = OCELOT_VCAP_KEY_ETYPE; - filter->prio = 1; - filter->id.cookie = priv->chip_port; - filter->id.tc_offload = false; - filter->block_id = VCAP_IS2; - filter->type = OCELOT_VCAP_FILTER_OFFLOAD; - filter->ingress_port_mask = BIT(priv->chip_port); - *(__be16 *)filter->key.etype.etype.value = htons(ETH_P_MRP); - *(__be16 *)filter->key.etype.etype.mask = htons(0xffff); - filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; - filter->action.port_mask = 0x0; - filter->action.cpu_copy_ena = true; - filter->action.cpu_qu_num = OCELOT_MRP_CPUQ; + if (mrp->ring_role != BR_MRP_RING_ROLE_MRC) + return ocelot_mrp_copy_add_vcap(ocelot, port, 1, port); - err = ocelot_vcap_filter_add(ocelot, filter, NULL); + dst_port = ocelot_mrp_find_partner_port(ocelot, ocelot_port); + if (dst_port == -1) + return -EINVAL; + + err = ocelot_mrp_redirect_add_vcap(ocelot, port, dst_port); if (err) - kfree(filter); + return err; - return err; + err = ocelot_mrp_copy_add_vcap(ocelot, port, 2, + port + ocelot->num_phys_ports); + if (err) { + ocelot_mrp_del_vcap(ocelot, port); + return err; + } + + return 0; } EXPORT_SYMBOL(ocelot_mrp_add_ring_role); @@ -152,24 +211,32 @@ int ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port, const struct switchdev_obj_ring_role_mrp *mrp) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - struct ocelot_port_private *priv; - struct net_device *dev; + int i; if (!ocelot_port) return -EOPNOTSUPP; - priv = container_of(ocelot_port, struct ocelot_port_private, port); - dev = priv->dev; - - if (ocelot->mrp_ring_id != mrp->ring_id) - return -EINVAL; - - if (!mrp->sw_backup) + if (mrp->ring_role != BR_MRP_RING_ROLE_MRC && !mrp->sw_backup) return -EOPNOTSUPP; - if (ocelot->mrp_p_port != dev && ocelot->mrp_s_port != dev) + if (ocelot_port->mrp_ring_id != mrp->ring_id) return 0; - return ocelot_mrp_del_vcap(ocelot, priv->chip_port); + ocelot_mrp_del_vcap(ocelot, port); + ocelot_mrp_del_vcap(ocelot, port + ocelot->num_phys_ports); + + for (i = 0; i < ocelot->num_phys_ports; ++i) { + ocelot_port = ocelot->ports[i]; + + if (!ocelot_port) + continue; + + if (ocelot_port->mrp_ring_id != 0) + goto out; + } + + ocelot_mrp_del_mac(ocelot, ocelot->ports[port]); +out: + return 0; } EXPORT_SYMBOL(ocelot_mrp_del_ring_role); diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 12cb6867a2d0..36f32a4d9b0f 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -251,6 +251,12 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv, return -EEXIST; } + if (action->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, + "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } + pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8; pol.burst = action->police.burst; @@ -1111,77 +1117,213 @@ static int ocelot_port_obj_del(struct net_device *dev, return ret; } -static int ocelot_netdevice_bridge_join(struct ocelot *ocelot, int port, - struct net_device *bridge) +static void ocelot_inherit_brport_flags(struct ocelot *ocelot, int port, + struct net_device *brport_dev) +{ + struct switchdev_brport_flags flags = {0}; + int flag; + + flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; + + for_each_set_bit(flag, &flags.mask, 32) + if (br_port_flag_is_set(brport_dev, BIT(flag))) + flags.val |= BIT(flag); + + ocelot_port_bridge_flags(ocelot, port, flags); +} + +static void ocelot_clear_brport_flags(struct ocelot *ocelot, int port) { struct switchdev_brport_flags flags; - int err; flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; - flags.val = flags.mask; + flags.val = flags.mask & ~BR_LEARNING; + + ocelot_port_bridge_flags(ocelot, port, flags); +} + +static int ocelot_switchdev_sync(struct ocelot *ocelot, int port, + struct net_device *brport_dev, + struct net_device *bridge_dev, + struct netlink_ext_ack *extack) +{ + clock_t ageing_time; + u8 stp_state; + int err; - err = ocelot_port_bridge_join(ocelot, port, bridge); + ocelot_inherit_brport_flags(ocelot, port, brport_dev); + + stp_state = br_port_get_stp_state(brport_dev); + ocelot_bridge_stp_state_set(ocelot, port, stp_state); + + err = ocelot_port_vlan_filtering(ocelot, port, + br_vlan_enabled(bridge_dev)); if (err) return err; - ocelot_port_bridge_flags(ocelot, port, flags); + ageing_time = br_get_ageing_time(bridge_dev); + ocelot_port_attr_ageing_set(ocelot, port, ageing_time); + + err = br_mdb_replay(bridge_dev, brport_dev, + &ocelot_switchdev_blocking_nb, extack); + if (err && err != -EOPNOTSUPP) + return err; + + err = br_fdb_replay(bridge_dev, brport_dev, &ocelot_switchdev_nb); + if (err) + return err; + + err = br_vlan_replay(bridge_dev, brport_dev, + &ocelot_switchdev_blocking_nb, extack); + if (err && err != -EOPNOTSUPP) + return err; + + return 0; +} + +static int ocelot_switchdev_unsync(struct ocelot *ocelot, int port) +{ + int err; + + err = ocelot_port_vlan_filtering(ocelot, port, false); + if (err) + return err; + + ocelot_clear_brport_flags(ocelot, port); + + ocelot_bridge_stp_state_set(ocelot, port, BR_STATE_FORWARDING); + + return 0; +} + +static int ocelot_netdevice_bridge_join(struct net_device *dev, + struct net_device *brport_dev, + struct net_device *bridge, + struct netlink_ext_ack *extack) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; + int err; + + ocelot_port_bridge_join(ocelot, port, bridge); + + err = ocelot_switchdev_sync(ocelot, port, brport_dev, bridge, extack); + if (err) + goto err_switchdev_sync; return 0; + +err_switchdev_sync: + ocelot_port_bridge_leave(ocelot, port, bridge); + return err; } -static int ocelot_netdevice_bridge_leave(struct ocelot *ocelot, int port, +static int ocelot_netdevice_bridge_leave(struct net_device *dev, + struct net_device *brport_dev, struct net_device *bridge) { - struct switchdev_brport_flags flags; + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; int err; - flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; - flags.val = flags.mask & ~BR_LEARNING; + err = ocelot_switchdev_unsync(ocelot, port); + if (err) + return err; - err = ocelot_port_bridge_leave(ocelot, port, bridge); + ocelot_port_bridge_leave(ocelot, port, bridge); - ocelot_port_bridge_flags(ocelot, port, flags); + return 0; +} +static int ocelot_netdevice_lag_join(struct net_device *dev, + struct net_device *bond, + struct netdev_lag_upper_info *info, + struct netlink_ext_ack *extack) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + struct net_device *bridge_dev; + int port = priv->chip_port; + int err; + + err = ocelot_port_lag_join(ocelot, port, bond, info); + if (err == -EOPNOTSUPP) { + NL_SET_ERR_MSG_MOD(extack, "Offloading not supported"); + return 0; + } + + bridge_dev = netdev_master_upper_dev_get(bond); + if (!bridge_dev || !netif_is_bridge_master(bridge_dev)) + return 0; + + err = ocelot_netdevice_bridge_join(dev, bond, bridge_dev, extack); + if (err) + goto err_bridge_join; + + return 0; + +err_bridge_join: + ocelot_port_lag_leave(ocelot, port, bond); return err; } -static int ocelot_netdevice_changeupper(struct net_device *dev, - struct netdev_notifier_changeupper_info *info) +static int ocelot_netdevice_lag_leave(struct net_device *dev, + struct net_device *bond) { struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; + struct net_device *bridge_dev; int port = priv->chip_port; + + ocelot_port_lag_leave(ocelot, port, bond); + + bridge_dev = netdev_master_upper_dev_get(bond); + if (!bridge_dev || !netif_is_bridge_master(bridge_dev)) + return 0; + + return ocelot_netdevice_bridge_leave(dev, bond, bridge_dev); +} + +static int ocelot_netdevice_changeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct netlink_ext_ack *extack; int err = 0; + extack = netdev_notifier_info_to_extack(&info->info); + if (netif_is_bridge_master(info->upper_dev)) { - if (info->linking) { - err = ocelot_netdevice_bridge_join(ocelot, port, - info->upper_dev); - } else { - err = ocelot_netdevice_bridge_leave(ocelot, port, + if (info->linking) + err = ocelot_netdevice_bridge_join(dev, dev, + info->upper_dev, + extack); + else + err = ocelot_netdevice_bridge_leave(dev, dev, info->upper_dev); - } } if (netif_is_lag_master(info->upper_dev)) { - if (info->linking) { - err = ocelot_port_lag_join(ocelot, port, - info->upper_dev, - info->upper_info); - if (err == -EOPNOTSUPP) { - NL_SET_ERR_MSG_MOD(info->info.extack, - "Offloading not supported"); - err = 0; - } - } else { - ocelot_port_lag_leave(ocelot, port, - info->upper_dev); - } + if (info->linking) + err = ocelot_netdevice_lag_join(dev, info->upper_dev, + info->upper_info, extack); + else + ocelot_netdevice_lag_leave(dev, info->upper_dev); } return notifier_from_errno(err); } +/* Treat CHANGEUPPER events on an offloaded LAG as individual CHANGEUPPER + * events for the lower physical ports of the LAG. + * If the LAG upper isn't offloaded, ignore its CHANGEUPPER events. + * In case the LAG joined a bridge, notify that we are offloading it and can do + * forwarding in hardware towards it. + */ static int ocelot_netdevice_lag_changeupper(struct net_device *dev, struct netdev_notifier_changeupper_info *info) @@ -1191,6 +1333,12 @@ ocelot_netdevice_lag_changeupper(struct net_device *dev, int err = NOTIFY_DONE; netdev_for_each_lower_dev(dev, lower, iter) { + struct ocelot_port_private *priv = netdev_priv(lower); + struct ocelot_port *ocelot_port = &priv->port; + + if (ocelot_port->bond != dev) + return NOTIFY_OK; + err = ocelot_netdevice_changeupper(lower, info); if (err) return notifier_from_errno(err); diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c index 37a232911395..7945393a0655 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.c +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@ -761,6 +761,7 @@ static void is1_entry_set(struct ocelot *ocelot, int ix, vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_ETYPE, etype.value, etype.mask); } + break; } default: break; diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 8f2f091bce89..9cfcd5500462 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -6657,7 +6657,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu) /** * s2io_set_link - Set the LInk status - * @work: work struct containing a pointer to device private structue + * @work: work struct containing a pointer to device private structure * Description: Sets the link status for the adapter */ diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index bdbf0726145e..605a1617b195 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -419,8 +419,8 @@ nfp_abm_port_get_stats_strings(struct nfp_app *app, struct nfp_port *port, return data; alink = repr->app_priv; for (i = 0; i < alink->vnic->dp.num_r_vecs; i++) { - data = nfp_pr_et(data, "q%u_no_wait", i); - data = nfp_pr_et(data, "q%u_delayed", i); + ethtool_sprintf(&data, "q%u_no_wait", i); + ethtool_sprintf(&data, "q%u_delayed", i); } return data; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 5defd31d481c..aa06fcb38f8b 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -327,8 +327,14 @@ int nfp_compile_flow_metadata(struct nfp_app *app, goto err_free_ctx_entry; } + /* Do net allocate a mask-id for pre_tun_rules. These flows are used to + * configure the pre_tun table and are never actually send to the + * firmware as an add-flow message. This causes the mask-id allocation + * on the firmware to get out of sync if allocated here. + */ new_mask_id = 0; - if (!nfp_check_mask_add(app, nfp_flow->mask_data, + if (!nfp_flow->pre_tun_rule.dev && + !nfp_check_mask_add(app, nfp_flow->mask_data, nfp_flow->meta.mask_len, &nfp_flow->meta.flags, &new_mask_id)) { NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id"); @@ -359,7 +365,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app, goto err_remove_mask; } - if (!nfp_check_mask_remove(app, nfp_flow->mask_data, + if (!nfp_flow->pre_tun_rule.dev && + !nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len, NULL, &new_mask_id)) { NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id"); @@ -374,8 +381,10 @@ int nfp_compile_flow_metadata(struct nfp_app *app, return 0; err_remove_mask: - nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len, - NULL, &new_mask_id); + if (!nfp_flow->pre_tun_rule.dev) + nfp_check_mask_remove(app, nfp_flow->mask_data, + nfp_flow->meta.mask_len, + NULL, &new_mask_id); err_remove_rhash: WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table, &ctx_entry->ht_node, @@ -406,9 +415,10 @@ int nfp_modify_flow_metadata(struct nfp_app *app, __nfp_modify_flow_metadata(priv, nfp_flow); - nfp_check_mask_remove(app, nfp_flow->mask_data, - nfp_flow->meta.mask_len, &nfp_flow->meta.flags, - &new_mask_id); + if (!nfp_flow->pre_tun_rule.dev) + nfp_check_mask_remove(app, nfp_flow->mask_data, + nfp_flow->meta.mask_len, &nfp_flow->meta.flags, + &new_mask_id); /* Update flow payload with mask ids. */ nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 1c59aff2163c..d72225d64a75 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1142,6 +1142,12 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app, return -EOPNOTSUPP; } + if (!(key_layer & NFP_FLOWER_LAYER_IPV4) && + !(key_layer & NFP_FLOWER_LAYER_IPV6)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on ipv4/ipv6 eth_type must be present"); + return -EOPNOTSUPP; + } + /* Skip fields known to exist. */ mask += sizeof(struct nfp_flower_meta_tci); ext += sizeof(struct nfp_flower_meta_tci); @@ -1152,6 +1158,13 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app, mask += sizeof(struct nfp_flower_in_port); ext += sizeof(struct nfp_flower_in_port); + /* Ensure destination MAC address matches pre_tun_dev. */ + mac = (struct nfp_flower_mac_mpls *)ext; + if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC must match output dev MAC"); + return -EOPNOTSUPP; + } + /* Ensure destination MAC address is fully matched. */ mac = (struct nfp_flower_mac_mpls *)mask; if (!is_broadcast_ether_addr(&mac->mac_dst[0])) { @@ -1159,6 +1172,11 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app, return -EOPNOTSUPP; } + if (mac->mpls_lse) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported"); + return -EOPNOTSUPP; + } + mask += sizeof(struct nfp_flower_mac_mpls); ext += sizeof(struct nfp_flower_mac_mpls); if (key_layer & NFP_FLOWER_LAYER_IPV4 || diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c index d4ce8f9ef3cc..88bea6ad59bc 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c @@ -104,6 +104,11 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev, return -EOPNOTSUPP; } + if (action->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not support packets per second"); + return -EOPNOTSUPP; + } + rate = action->police.rate_bytes_ps; burst = action->police.burst; netdev_port_id = nfp_repr_get_port_id(netdev); diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 7248d248f604..d19c02e99114 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -16,8 +16,9 @@ #define NFP_FL_MAX_ROUTES 32 #define NFP_TUN_PRE_TUN_RULE_LIMIT 32 -#define NFP_TUN_PRE_TUN_RULE_DEL 0x1 -#define NFP_TUN_PRE_TUN_IDX_BIT 0x8 +#define NFP_TUN_PRE_TUN_RULE_DEL BIT(0) +#define NFP_TUN_PRE_TUN_IDX_BIT BIT(3) +#define NFP_TUN_PRE_TUN_IPV6_BIT BIT(7) /** * struct nfp_tun_pre_run_rule - rule matched before decap @@ -1268,6 +1269,7 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, { struct nfp_flower_priv *app_priv = app->priv; struct nfp_tun_offloaded_mac *mac_entry; + struct nfp_flower_meta_tci *key_meta; struct nfp_tun_pre_tun_rule payload; struct net_device *internal_dev; int err; @@ -1290,6 +1292,15 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, if (!mac_entry) return -ENOENT; + /* Set/clear IPV6 bit. cpu_to_be16() swap will lead to MSB being + * set/clear for port_idx. + */ + key_meta = (struct nfp_flower_meta_tci *)flow->unmasked_data; + if (key_meta->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV6) + mac_entry->index |= NFP_TUN_PRE_TUN_IPV6_BIT; + else + mac_entry->index &= ~NFP_TUN_PRE_TUN_IPV6_BIT; + payload.port_idx = cpu_to_be16(mac_entry->index); /* Copy mac id and vlan to flow - dev may not exist at delete time. */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 9c9ae33d84ce..1b482446536d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -429,17 +429,6 @@ static int nfp_net_set_ringparam(struct net_device *netdev, return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt); } -__printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...) -{ - va_list args; - - va_start(args, fmt); - vsnprintf(data, ETH_GSTRING_LEN, fmt, args); - va_end(args); - - return data + ETH_GSTRING_LEN; -} - static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); @@ -454,29 +443,29 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) int i; for (i = 0; i < nn->max_r_vecs; i++) { - data = nfp_pr_et(data, "rvec_%u_rx_pkts", i); - data = nfp_pr_et(data, "rvec_%u_tx_pkts", i); - data = nfp_pr_et(data, "rvec_%u_tx_busy", i); + ethtool_sprintf(&data, "rvec_%u_rx_pkts", i); + ethtool_sprintf(&data, "rvec_%u_tx_pkts", i); + ethtool_sprintf(&data, "rvec_%u_tx_busy", i); } - data = nfp_pr_et(data, "hw_rx_csum_ok"); - data = nfp_pr_et(data, "hw_rx_csum_inner_ok"); - data = nfp_pr_et(data, "hw_rx_csum_complete"); - data = nfp_pr_et(data, "hw_rx_csum_err"); - data = nfp_pr_et(data, "rx_replace_buf_alloc_fail"); - data = nfp_pr_et(data, "rx_tls_decrypted_packets"); - data = nfp_pr_et(data, "hw_tx_csum"); - data = nfp_pr_et(data, "hw_tx_inner_csum"); - data = nfp_pr_et(data, "tx_gather"); - data = nfp_pr_et(data, "tx_lso"); - data = nfp_pr_et(data, "tx_tls_encrypted_packets"); - data = nfp_pr_et(data, "tx_tls_ooo"); - data = nfp_pr_et(data, "tx_tls_drop_no_sync_data"); - - data = nfp_pr_et(data, "hw_tls_no_space"); - data = nfp_pr_et(data, "rx_tls_resync_req_ok"); - data = nfp_pr_et(data, "rx_tls_resync_req_ign"); - data = nfp_pr_et(data, "rx_tls_resync_sent"); + ethtool_sprintf(&data, "hw_rx_csum_ok"); + ethtool_sprintf(&data, "hw_rx_csum_inner_ok"); + ethtool_sprintf(&data, "hw_rx_csum_complete"); + ethtool_sprintf(&data, "hw_rx_csum_err"); + ethtool_sprintf(&data, "rx_replace_buf_alloc_fail"); + ethtool_sprintf(&data, "rx_tls_decrypted_packets"); + ethtool_sprintf(&data, "hw_tx_csum"); + ethtool_sprintf(&data, "hw_tx_inner_csum"); + ethtool_sprintf(&data, "tx_gather"); + ethtool_sprintf(&data, "tx_lso"); + ethtool_sprintf(&data, "tx_tls_encrypted_packets"); + ethtool_sprintf(&data, "tx_tls_ooo"); + ethtool_sprintf(&data, "tx_tls_drop_no_sync_data"); + + ethtool_sprintf(&data, "hw_tls_no_space"); + ethtool_sprintf(&data, "rx_tls_resync_req_ok"); + ethtool_sprintf(&data, "rx_tls_resync_req_ign"); + ethtool_sprintf(&data, "rx_tls_resync_sent"); return data; } @@ -550,19 +539,19 @@ nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int num_vecs, bool repr) swap_off = repr * NN_ET_SWITCH_STATS_LEN; for (i = 0; i < NN_ET_SWITCH_STATS_LEN; i++) - data = nfp_pr_et(data, nfp_net_et_stats[i + swap_off].name); + ethtool_sprintf(&data, nfp_net_et_stats[i + swap_off].name); for (i = NN_ET_SWITCH_STATS_LEN; i < NN_ET_SWITCH_STATS_LEN * 2; i++) - data = nfp_pr_et(data, nfp_net_et_stats[i - swap_off].name); + ethtool_sprintf(&data, nfp_net_et_stats[i - swap_off].name); for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++) - data = nfp_pr_et(data, nfp_net_et_stats[i].name); + ethtool_sprintf(&data, nfp_net_et_stats[i].name); for (i = 0; i < num_vecs; i++) { - data = nfp_pr_et(data, "rxq_%u_pkts", i); - data = nfp_pr_et(data, "rxq_%u_bytes", i); - data = nfp_pr_et(data, "txq_%u_pkts", i); - data = nfp_pr_et(data, "txq_%u_bytes", i); + ethtool_sprintf(&data, "rxq_%u_pkts", i); + ethtool_sprintf(&data, "rxq_%u_bytes", i); + ethtool_sprintf(&data, "txq_%u_pkts", i); + ethtool_sprintf(&data, "txq_%u_bytes", i); } return data; @@ -610,15 +599,15 @@ static u8 *nfp_vnic_get_tlv_stats_strings(struct nfp_net *nn, u8 *data) memcpy(data, nfp_tlv_stat_names[id], ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } else { - data = nfp_pr_et(data, "dev_unknown_stat%u", id); + ethtool_sprintf(&data, "dev_unknown_stat%u", id); } } for (i = 0; i < nn->max_r_vecs; i++) { - data = nfp_pr_et(data, "rxq_%u_pkts", i); - data = nfp_pr_et(data, "rxq_%u_bytes", i); - data = nfp_pr_et(data, "txq_%u_pkts", i); - data = nfp_pr_et(data, "txq_%u_bytes", i); + ethtool_sprintf(&data, "rxq_%u_pkts", i); + ethtool_sprintf(&data, "rxq_%u_bytes", i); + ethtool_sprintf(&data, "txq_%u_pkts", i); + ethtool_sprintf(&data, "txq_%u_bytes", i); } return data; @@ -666,7 +655,7 @@ static u8 *nfp_mac_get_stats_strings(struct net_device *netdev, u8 *data) return data; for (i = 0; i < ARRAY_SIZE(nfp_mac_et_stats); i++) - data = nfp_pr_et(data, "mac.%s", nfp_mac_et_stats[i].name); + ethtool_sprintf(&data, "mac.%s", nfp_mac_et_stats[i].name); return data; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index d7fd203bb180..ae4da189d955 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -92,8 +92,6 @@ struct nfp_port { extern const struct ethtool_ops nfp_port_ethtool_ops; -__printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...); - int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 55cef5b16aa5..a6823c4d355d 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -612,15 +612,6 @@ void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter, void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter, struct pch_gbe_rx_ring *rx_ring); void pch_gbe_update_stats(struct pch_gbe_adapter *adapter); -u32 pch_ch_control_read(struct pci_dev *pdev); -void pch_ch_control_write(struct pci_dev *pdev, u32 val); -u32 pch_ch_event_read(struct pci_dev *pdev); -void pch_ch_event_write(struct pci_dev *pdev, u32 val); -u32 pch_src_uuid_lo_read(struct pci_dev *pdev); -u32 pch_src_uuid_hi_read(struct pci_dev *pdev); -u64 pch_rx_snap_read(struct pci_dev *pdev); -u64 pch_tx_snap_read(struct pci_dev *pdev); -int pch_set_station_address(u8 *addr, struct pci_dev *pdev); /* pch_gbe_param.c */ void pch_gbe_check_options(struct pch_gbe_adapter *adapter); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 140cee7c459d..334af49e5add 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/net_tstamp.h> #include <linux/ptp_classify.h> +#include <linux/ptp_pch.h> #include <linux/gpio.h> #define DRV_VERSION "1.01" diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c index b0d8499d373b..e4a5416adc80 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c @@ -184,6 +184,10 @@ static int ionic_sriov_configure(struct pci_dev *pdev, int num_vfs) struct device *dev = ionic->dev; int ret = 0; + if (ionic->lif && + test_bit(IONIC_LIF_F_FW_RESET, ionic->lif->state)) + return -EBUSY; + if (num_vfs > 0) { ret = pci_enable_sriov(pdev, num_vfs); if (ret) { diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c index fb2b5bf179d7..0532f7cf086d 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -14,18 +14,20 @@ static void ionic_watchdog_cb(struct timer_list *t) { struct ionic *ionic = from_timer(ionic, t, watchdog_timer); + struct ionic_lif *lif = ionic->lif; int hb; mod_timer(&ionic->watchdog_timer, round_jiffies(jiffies + ionic->watchdog_period)); - if (!ionic->lif) + if (!lif) return; hb = ionic_heartbeat_check(ionic); - if (hb >= 0) - ionic_link_status_check_request(ionic->lif, CAN_NOT_SLEEP); + if (hb >= 0 && + !test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + ionic_link_status_check_request(lif, CAN_NOT_SLEEP); } void ionic_init_devinfo(struct ionic *ionic) @@ -585,9 +587,9 @@ void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa) void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb, void *cb_arg) { - struct device *dev = q->lif->ionic->dev; struct ionic_desc_info *desc_info; struct ionic_lif *lif = q->lif; + struct device *dev = q->dev; desc_info = &q->info[q->head_idx]; desc_info->cb = cb; @@ -629,7 +631,7 @@ void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info, /* stop index must be for a descriptor that is not yet completed */ if (unlikely(!ionic_q_is_posted(q, stop_index))) - dev_err(q->lif->ionic->dev, + dev_err(q->dev, "ionic stop is not posted %s stop %u tail %u head %u\n", q->name, stop_index, q->tail_idx, q->head_idx); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index 690768ff0143..ca7e55455165 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -170,11 +170,20 @@ typedef void (*ionic_desc_cb)(struct ionic_queue *q, struct ionic_desc_info *desc_info, struct ionic_cq_info *cq_info, void *cb_arg); -struct ionic_page_info { +#define IONIC_PAGE_SIZE PAGE_SIZE +#define IONIC_PAGE_SPLIT_SZ (PAGE_SIZE / 2) +#define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\ + __GFP_COMP | __GFP_MEMALLOC) + +struct ionic_buf_info { struct page *page; dma_addr_t dma_addr; + u32 page_offset; + u32 len; }; +#define IONIC_MAX_FRAGS (1 + IONIC_TX_MAX_SG_ELEMS_V1) + struct ionic_desc_info { union { void *desc; @@ -187,8 +196,9 @@ struct ionic_desc_info { struct ionic_txq_sg_desc *txq_sg_desc; struct ionic_rxq_sg_desc *rxq_sgl_desc; }; - unsigned int npages; - struct ionic_page_info pages[IONIC_RX_MAX_SG_ELEMS + 1]; + unsigned int bytes; + unsigned int nbufs; + struct ionic_buf_info bufs[IONIC_MAX_FRAGS]; ionic_desc_cb cb; void *cb_arg; }; @@ -199,10 +209,12 @@ struct ionic_queue { struct device *dev; struct ionic_lif *lif; struct ionic_desc_info *info; + u64 dbval; u16 head_idx; u16 tail_idx; unsigned int index; unsigned int num_descs; + unsigned int max_sg_elems; u64 dbell_count; u64 stop; u64 wake; @@ -211,7 +223,6 @@ struct ionic_queue { unsigned int type; unsigned int hw_index; unsigned int hw_type; - u64 dbval; union { void *base; struct ionic_txq_desc *txq; @@ -229,7 +240,7 @@ struct ionic_queue { unsigned int sg_desc_size; unsigned int pid; char name[IONIC_QUEUE_NAME_MAX_SZ]; -}; +} ____cacheline_aligned_in_smp; #define IONIC_INTR_INDEX_NOT_ASSIGNED -1 #define IONIC_INTR_NAME_MAX_SZ 32 @@ -256,7 +267,7 @@ struct ionic_cq { u64 compl_count; void *base; dma_addr_t base_pa; -}; +} ____cacheline_aligned_in_smp; struct ionic; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index 0832bedcb3b4..b1e78b452fad 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -29,11 +29,9 @@ static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf) static void ionic_get_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *buf) { - struct ionic_lif *lif; + struct ionic_lif *lif = netdev_priv(netdev); u32 i; - lif = netdev_priv(netdev); - memset(buf, 0, stats->n_stats * sizeof(*buf)); for (i = 0; i < ionic_num_stats_grps; i++) ionic_stats_groups[i].get_values(lif, &buf); @@ -209,6 +207,14 @@ static int ionic_get_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseER_Full); break; + case IONIC_XCVR_PID_SFP_10GBASE_T: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + break; + case IONIC_XCVR_PID_SFP_1000BASE_T: + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + break; case IONIC_XCVR_PID_UNKNOWN: /* This means there's no module plugged in */ break; @@ -264,12 +270,10 @@ static int ionic_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *ks) { struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_dev *idev = &lif->ionic->idev; struct ionic *ionic = lif->ionic; - struct ionic_dev *idev; int err = 0; - idev = &lif->ionic->idev; - /* set autoneg */ if (ks->base.autoneg != idev->port_info->config.an_enable) { mutex_lock(&ionic->dev_cmd_lock); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h index 31ccfcdc2b0a..88210142395d 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_if.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h @@ -320,7 +320,7 @@ struct ionic_lif_identify_comp { /** * enum ionic_lif_capability - LIF capabilities * @IONIC_LIF_CAP_ETH: LIF supports Ethernet - * @IONIC_LIF_CAP_RDMA: LIF support RDMA + * @IONIC_LIF_CAP_RDMA: LIF supports RDMA */ enum ionic_lif_capability { IONIC_LIF_CAP_ETH = BIT(0), @@ -404,7 +404,7 @@ union ionic_lif_config { * @max_ucast_filters: Number of perfect unicast addresses supported * @max_mcast_filters: Number of perfect multicast addresses supported * @min_frame_size: Minimum size of frames to be sent - * @max_frame_size: Maximim size of frames to be sent + * @max_frame_size: Maximum size of frames to be sent * @config: LIF config struct with features, mtu, mac, q counts * * @rdma: RDMA identify structure @@ -692,7 +692,7 @@ enum ionic_txq_desc_opcode { * checksums are also updated. * * IONIC_TXQ_DESC_OPCODE_TSO: - * Device preforms TCP segmentation offload + * Device performs TCP segmentation offload * (TSO). @hdr_len is the number of bytes * to the end of TCP header (the offset to * the TCP payload). @mss is the desired @@ -982,13 +982,13 @@ struct ionic_rxq_comp { }; enum ionic_pkt_type { - IONIC_PKT_TYPE_NON_IP = 0x000, - IONIC_PKT_TYPE_IPV4 = 0x001, - IONIC_PKT_TYPE_IPV4_TCP = 0x003, - IONIC_PKT_TYPE_IPV4_UDP = 0x005, - IONIC_PKT_TYPE_IPV6 = 0x008, - IONIC_PKT_TYPE_IPV6_TCP = 0x018, - IONIC_PKT_TYPE_IPV6_UDP = 0x028, + IONIC_PKT_TYPE_NON_IP = 0x00, + IONIC_PKT_TYPE_IPV4 = 0x01, + IONIC_PKT_TYPE_IPV4_TCP = 0x03, + IONIC_PKT_TYPE_IPV4_UDP = 0x05, + IONIC_PKT_TYPE_IPV6 = 0x08, + IONIC_PKT_TYPE_IPV6_TCP = 0x18, + IONIC_PKT_TYPE_IPV6_UDP = 0x28, /* below types are only used if encap offloads are enabled on lif */ IONIC_PKT_TYPE_ENCAP_NON_IP = 0x40, IONIC_PKT_TYPE_ENCAP_IPV4 = 0x41, @@ -1111,6 +1111,8 @@ enum ionic_xcvr_pid { IONIC_XCVR_PID_QSFP_100G_CWDM4 = 69, IONIC_XCVR_PID_QSFP_100G_PSM4 = 70, IONIC_XCVR_PID_SFP_25GBASE_ACC = 71, + IONIC_XCVR_PID_SFP_10GBASE_T = 72, + IONIC_XCVR_PID_SFP_1000BASE_T = 73, }; /** @@ -1331,7 +1333,7 @@ enum ionic_stats_ctl_cmd { * @IONIC_PORT_ATTR_STATE: Port state attribute * @IONIC_PORT_ATTR_SPEED: Port speed attribute * @IONIC_PORT_ATTR_MTU: Port MTU attribute - * @IONIC_PORT_ATTR_AUTONEG: Port autonegotation attribute + * @IONIC_PORT_ATTR_AUTONEG: Port autonegotiation attribute * @IONIC_PORT_ATTR_FEC: Port FEC attribute * @IONIC_PORT_ATTR_PAUSE: Port pause attribute * @IONIC_PORT_ATTR_LOOPBACK: Port loopback attribute @@ -1951,8 +1953,8 @@ enum ionic_qos_sched_type { * @pfc_cos: Priority-Flow Control class of service * @dwrr_weight: QoS class scheduling weight * @strict_rlmt: Rate limit for strict priority scheduling - * @rw_dot1q_pcp: Rewrite dot1q pcp to this value (valid iff F_RW_DOT1Q_PCP) - * @rw_ip_dscp: Rewrite ip dscp to this value (valid iff F_RW_IP_DSCP) + * @rw_dot1q_pcp: Rewrite dot1q pcp to value (valid iff F_RW_DOT1Q_PCP) + * @rw_ip_dscp: Rewrite ip dscp to value (valid iff F_RW_IP_DSCP) * @dot1q_pcp: Dot1q pcp value * @ndscp: Number of valid dscp values in the ip_dscp field * @ip_dscp: IP dscp values diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 11140915c2da..889d234e2ffa 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -120,23 +120,34 @@ static void ionic_link_status_check(struct ionic_lif *lif) if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) return; + /* Don't put carrier back up if we're in a broken state */ + if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) { + clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); + return; + } + link_status = le16_to_cpu(lif->info->status.link_status); link_up = link_status == IONIC_PORT_OPER_STATUS_UP; if (link_up) { - if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) { + int err = 0; + + if (netdev->flags & IFF_UP && netif_running(netdev)) { mutex_lock(&lif->queue_lock); - ionic_start_queues(lif); + err = ionic_start_queues(lif); + if (err) { + netdev_err(lif->netdev, + "Failed to start queues: %d\n", err); + set_bit(IONIC_LIF_F_BROKEN, lif->state); + netif_carrier_off(lif->netdev); + } mutex_unlock(&lif->queue_lock); } - if (!netif_carrier_ok(netdev)) { - u32 link_speed; - + if (!err && !netif_carrier_ok(netdev)) { ionic_port_identify(lif->ionic); - link_speed = le32_to_cpu(lif->info->status.link_speed); netdev_info(netdev, "Link up - %d Gbps\n", - link_speed / 1000); + le32_to_cpu(lif->info->status.link_speed) / 1000); netif_carrier_on(netdev); } } else { @@ -145,7 +156,7 @@ static void ionic_link_status_check(struct ionic_lif *lif) netif_carrier_off(netdev); } - if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) { + if (netdev->flags & IFF_UP && netif_running(netdev)) { mutex_lock(&lif->queue_lock); ionic_stop_queues(lif); mutex_unlock(&lif->queue_lock); @@ -382,6 +393,8 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) static void ionic_qcqs_free(struct ionic_lif *lif) { struct device *dev = lif->ionic->dev; + struct ionic_qcq *adminqcq; + unsigned long irqflags; if (lif->notifyqcq) { ionic_qcq_free(lif, lif->notifyqcq); @@ -390,9 +403,14 @@ static void ionic_qcqs_free(struct ionic_lif *lif) } if (lif->adminqcq) { - ionic_qcq_free(lif, lif->adminqcq); - devm_kfree(dev, lif->adminqcq); + spin_lock_irqsave(&lif->adminq_lock, irqflags); + adminqcq = READ_ONCE(lif->adminqcq); lif->adminqcq = NULL; + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); + if (adminqcq) { + ionic_qcq_free(lif, adminqcq); + devm_kfree(dev, adminqcq); + } } if (lif->rxqcqs) { @@ -495,6 +513,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, goto err_out; } + new->q.dev = dev; new->flags = flags; new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info), @@ -506,6 +525,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, } new->q.type = type; + new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems; err = ionic_q_init(lif, idev, &new->q, index, name, num_descs, desc_size, sg_desc_size, pid); @@ -716,10 +736,8 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) unsigned int intr_index; int err; - if (qcq->flags & IONIC_QCQ_F_INTR) - intr_index = qcq->intr.index; - else - intr_index = lif->rxqcqs[q->index]->intr.index; + intr_index = qcq->intr.index; + ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index); dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); @@ -837,7 +855,7 @@ static bool ionic_notifyq_service(struct ionic_cq *cq, switch (le16_to_cpu(comp->event.ecode)) { case IONIC_EVENT_LINK_CHANGE: - ionic_link_status_check_request(lif, false); + ionic_link_status_check_request(lif, CAN_NOT_SLEEP); break; case IONIC_EVENT_RESET: work = kzalloc(sizeof(*work), GFP_ATOMIC); @@ -875,6 +893,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget) struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr; struct ionic_lif *lif = napi_to_cq(napi)->lif; struct ionic_dev *idev = &lif->ionic->idev; + unsigned long irqflags; unsigned int flags = 0; int n_work = 0; int a_work = 0; @@ -884,14 +903,16 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget) n_work = ionic_cq_service(&lif->notifyqcq->cq, budget, ionic_notifyq_service, NULL, NULL); + spin_lock_irqsave(&lif->adminq_lock, irqflags); if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED) a_work = ionic_cq_service(&lif->adminqcq->cq, budget, ionic_adminq_service, NULL, NULL); + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); work_done = max(n_work, a_work); if (work_done < budget && napi_complete_done(napi, work_done)) { flags |= IONIC_INTR_CRED_UNMASK; - lif->adminqcq->cq.bound_intr->rearm_count++; + intr->rearm_count++; } if (work_done || flags) { @@ -1441,7 +1462,7 @@ static int ionic_start_queues_reconfig(struct ionic_lif *lif) */ err = ionic_txrx_init(lif); mutex_unlock(&lif->queue_lock); - ionic_link_status_check_request(lif, true); + ionic_link_status_check_request(lif, CAN_SLEEP); netif_device_attach(lif->netdev); return err; @@ -1480,7 +1501,8 @@ static void ionic_tx_timeout_work(struct work_struct *ws) { struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work); - netdev_info(lif->netdev, "Tx Timeout recovery\n"); + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return; /* if we were stopped before this scheduled job was launched, * don't bother the queues as they are already stopped. @@ -1496,6 +1518,7 @@ static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct ionic_lif *lif = netdev_priv(netdev); + netdev_info(lif->netdev, "Tx Timeout triggered - txq %d\n", txqueue); schedule_work(&lif->tx_timeout_work); } @@ -1837,6 +1860,12 @@ static int ionic_start_queues(struct ionic_lif *lif) { int err; + if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) + return -EIO; + + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return -EBUSY; + if (test_and_set_bit(IONIC_LIF_F_UP, lif->state)) return 0; @@ -1855,13 +1884,17 @@ static int ionic_open(struct net_device *netdev) struct ionic_lif *lif = netdev_priv(netdev); int err; + /* If recovering from a broken state, clear the bit and we'll try again */ + if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state)) + netdev_info(netdev, "clearing broken state\n"); + err = ionic_txrx_alloc(lif); if (err) return err; err = ionic_txrx_init(lif); if (err) - goto err_out; + goto err_txrx_free; err = netif_set_real_num_tx_queues(netdev, lif->nxqs); if (err) @@ -1882,7 +1915,7 @@ static int ionic_open(struct net_device *netdev) err_txrx_deinit: ionic_txrx_deinit(lif); -err_out: +err_txrx_free: ionic_txrx_free(lif); return err; } @@ -2202,6 +2235,9 @@ static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) swap(a->cq_base, b->cq_base); swap(a->cq_base_pa, b->cq_base_pa); swap(a->cq_size, b->cq_size); + + ionic_debugfs_del_qcq(a); + ionic_debugfs_add_qcq(a->q.lif, a); } int ionic_reconfigure_queues(struct ionic_lif *lif, @@ -2351,7 +2387,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, swap(lif->nxqs, qparam->nxqs); err_out_reinit_unlock: - /* re-init the queues, but don't loose an error code */ + /* re-init the queues, but don't lose an error code */ if (err) ionic_start_queues_reconfig(lif); else @@ -2450,7 +2486,6 @@ int ionic_lif_alloc(struct ionic *ionic) lif->index = 0; lif->ntxq_descs = IONIC_DEF_TXRX_DESC; lif->nrxq_descs = IONIC_DEF_TXRX_DESC; - lif->tx_budget = IONIC_TX_BUDGET_DEFAULT; /* Convert the default coalesce value to actual hw resolution */ lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT; @@ -2601,7 +2636,7 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif) } clear_bit(IONIC_LIF_F_FW_RESET, lif->state); - ionic_link_status_check_request(lif, true); + ionic_link_status_check_request(lif, CAN_SLEEP); netif_device_attach(lif->netdev); dev_info(ionic->dev, "FW Up: LIFs restarted\n"); @@ -2972,7 +3007,7 @@ int ionic_lif_register(struct ionic_lif *lif) return err; } - ionic_link_status_check_request(lif, true); + ionic_link_status_check_request(lif, CAN_SLEEP); lif->registered = true; ionic_lif_set_netdev_info(lif); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index 563dba384a53..be5cc89b2bd9 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -139,6 +139,7 @@ enum ionic_lif_state_flags { IONIC_LIF_F_LINK_CHECK_REQUESTED, IONIC_LIF_F_FW_RESET, IONIC_LIF_F_SPLIT_INTR, + IONIC_LIF_F_BROKEN, IONIC_LIF_F_TX_DIM_INTR, IONIC_LIF_F_RX_DIM_INTR, @@ -159,16 +160,11 @@ struct ionic_qtype_info { #define IONIC_LIF_NAME_MAX_SZ 32 struct ionic_lif { - char name[IONIC_LIF_NAME_MAX_SZ]; - struct list_head list; struct net_device *netdev; DECLARE_BITMAP(state, IONIC_LIF_F_STATE_SIZE); struct ionic *ionic; - bool registered; unsigned int index; unsigned int hw_index; - unsigned int kern_pid; - u64 __iomem *kern_dbpage; struct mutex queue_lock; /* lock for queue structures */ spinlock_t adminq_lock; /* lock for AdminQ operations */ struct ionic_qcq *adminqcq; @@ -177,20 +173,25 @@ struct ionic_lif { struct ionic_tx_stats *txqstats; struct ionic_qcq **rxqcqs; struct ionic_rx_stats *rxqstats; + struct ionic_deferred deferred; + struct work_struct tx_timeout_work; u64 last_eid; + unsigned int kern_pid; + u64 __iomem *kern_dbpage; unsigned int neqs; unsigned int nxqs; unsigned int ntxq_descs; unsigned int nrxq_descs; u32 rx_copybreak; - u32 tx_budget; unsigned int rx_mode; u64 hw_features; + bool registered; bool mc_overflow; - unsigned int nmcast; bool uc_overflow; u16 lif_type; + unsigned int nmcast; unsigned int nucast; + char name[IONIC_LIF_NAME_MAX_SZ]; union ionic_lif_identity *identity; struct ionic_lif_info *info; @@ -205,16 +206,14 @@ struct ionic_lif { u32 rss_ind_tbl_sz; struct ionic_rx_filters rx_filters; - struct ionic_deferred deferred; - unsigned long *dbid_inuse; - unsigned int dbid_count; - struct dentry *dentry; u32 rx_coalesce_usecs; /* what the user asked for */ u32 rx_coalesce_hw; /* what the hw is using */ u32 tx_coalesce_usecs; /* what the user asked for */ u32 tx_coalesce_hw; /* what the hw is using */ + unsigned long *dbid_inuse; + unsigned int dbid_count; - struct work_struct tx_timeout_work; + struct dentry *dentry; }; struct ionic_queue_params { diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index fbc57de6683e..c4b2906a2ae6 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -187,10 +187,17 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode) static void ionic_adminq_flush(struct ionic_lif *lif) { - struct ionic_queue *q = &lif->adminqcq->q; struct ionic_desc_info *desc_info; + unsigned long irqflags; + struct ionic_queue *q; + + spin_lock_irqsave(&lif->adminq_lock, irqflags); + if (!lif->adminqcq) { + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); + return; + } - spin_lock(&lif->adminq_lock); + q = &lif->adminqcq->q; while (q->tail_idx != q->head_idx) { desc_info = &q->info[q->tail_idx]; @@ -199,7 +206,7 @@ static void ionic_adminq_flush(struct ionic_lif *lif) desc_info->cb_arg = NULL; q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); } - spin_unlock(&lif->adminq_lock); + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); } static int ionic_adminq_check_err(struct ionic_lif *lif, @@ -234,17 +241,15 @@ static void ionic_adminq_cb(struct ionic_queue *q, { struct ionic_admin_ctx *ctx = cb_arg; struct ionic_admin_comp *comp; - struct device *dev; if (!ctx) return; comp = cq_info->cq_desc; - dev = &q->lif->netdev->dev; memcpy(&ctx->comp, comp, sizeof(*comp)); - dev_dbg(dev, "comp admin queue command:\n"); + dev_dbg(q->dev, "comp admin queue command:\n"); dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1, &ctx->comp, sizeof(ctx->comp), true); @@ -254,15 +259,18 @@ static void ionic_adminq_cb(struct ionic_queue *q, static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) { struct ionic_desc_info *desc_info; + unsigned long irqflags; struct ionic_queue *q; int err = 0; - if (!lif->adminqcq) + spin_lock_irqsave(&lif->adminq_lock, irqflags); + if (!lif->adminqcq) { + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); return -EIO; + } q = &lif->adminqcq->q; - spin_lock(&lif->adminq_lock); if (!ionic_q_has_space(q, 1)) { err = -ENOSPC; goto err_out; @@ -282,7 +290,7 @@ static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) ionic_q_post(q, true, ionic_adminq_cb, ctx); err_out: - spin_unlock(&lif->adminq_lock); + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); return err; } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c index 6ae75b771a15..308b4ac6c57b 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c @@ -246,98 +246,73 @@ static u64 ionic_sw_stats_get_count(struct ionic_lif *lif) return total; } +static void ionic_sw_stats_get_tx_strings(struct ionic_lif *lif, u8 **buf, + int q_num) +{ + int i; + + for (i = 0; i < IONIC_NUM_TX_STATS; i++) + ethtool_sprintf(buf, "tx_%d_%s", q_num, + ionic_tx_stats_desc[i].name); + + if (!test_bit(IONIC_LIF_F_UP, lif->state) || + !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) + return; + + for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) + ethtool_sprintf(buf, "txq_%d_%s", q_num, + ionic_txq_stats_desc[i].name); + for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) + ethtool_sprintf(buf, "txq_%d_cq_%s", q_num, + ionic_dbg_cq_stats_desc[i].name); + for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) + ethtool_sprintf(buf, "txq_%d_intr_%s", q_num, + ionic_dbg_intr_stats_desc[i].name); + for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) + ethtool_sprintf(buf, "txq_%d_sg_cntr_%d", q_num, i); +} + +static void ionic_sw_stats_get_rx_strings(struct ionic_lif *lif, u8 **buf, + int q_num) +{ + int i; + + for (i = 0; i < IONIC_NUM_RX_STATS; i++) + ethtool_sprintf(buf, "rx_%d_%s", q_num, + ionic_rx_stats_desc[i].name); + + if (!test_bit(IONIC_LIF_F_UP, lif->state) || + !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) + return; + + for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) + ethtool_sprintf(buf, "rxq_%d_cq_%s", q_num, + ionic_dbg_cq_stats_desc[i].name); + for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) + ethtool_sprintf(buf, "rxq_%d_intr_%s", q_num, + ionic_dbg_intr_stats_desc[i].name); + for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) + ethtool_sprintf(buf, "rxq_%d_napi_%s", q_num, + ionic_dbg_napi_stats_desc[i].name); + for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) + ethtool_sprintf(buf, "rxq_%d_napi_work_done_%d", q_num, i); +} + static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf) { int i, q_num; - for (i = 0; i < IONIC_NUM_LIF_STATS; i++) { - snprintf(*buf, ETH_GSTRING_LEN, ionic_lif_stats_desc[i].name); - *buf += ETH_GSTRING_LEN; - } + for (i = 0; i < IONIC_NUM_LIF_STATS; i++) + ethtool_sprintf(buf, ionic_lif_stats_desc[i].name); - for (i = 0; i < IONIC_NUM_PORT_STATS; i++) { - snprintf(*buf, ETH_GSTRING_LEN, - ionic_port_stats_desc[i].name); - *buf += ETH_GSTRING_LEN; - } + for (i = 0; i < IONIC_NUM_PORT_STATS; i++) + ethtool_sprintf(buf, ionic_port_stats_desc[i].name); - for (q_num = 0; q_num < MAX_Q(lif); q_num++) { - for (i = 0; i < IONIC_NUM_TX_STATS; i++) { - snprintf(*buf, ETH_GSTRING_LEN, "tx_%d_%s", - q_num, ionic_tx_stats_desc[i].name); - *buf += ETH_GSTRING_LEN; - } + for (q_num = 0; q_num < MAX_Q(lif); q_num++) + ionic_sw_stats_get_tx_strings(lif, buf, q_num); - if (test_bit(IONIC_LIF_F_UP, lif->state) && - test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) { - for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) { - snprintf(*buf, ETH_GSTRING_LEN, - "txq_%d_%s", - q_num, - ionic_txq_stats_desc[i].name); - *buf += ETH_GSTRING_LEN; - } - for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { - snprintf(*buf, ETH_GSTRING_LEN, - "txq_%d_cq_%s", - q_num, - ionic_dbg_cq_stats_desc[i].name); - *buf += ETH_GSTRING_LEN; - } - for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) { - snprintf(*buf, ETH_GSTRING_LEN, - "txq_%d_intr_%s", - q_num, - ionic_dbg_intr_stats_desc[i].name); - *buf += ETH_GSTRING_LEN; - } - for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) { - snprintf(*buf, ETH_GSTRING_LEN, - "txq_%d_sg_cntr_%d", - q_num, i); - *buf += ETH_GSTRING_LEN; - } - } - } - for (q_num = 0; q_num < MAX_Q(lif); q_num++) { - for (i = 0; i < IONIC_NUM_RX_STATS; i++) { - snprintf(*buf, ETH_GSTRING_LEN, - "rx_%d_%s", - q_num, ionic_rx_stats_desc[i].name); - *buf += ETH_GSTRING_LEN; - } - - if (test_bit(IONIC_LIF_F_UP, lif->state) && - test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) { - for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { - snprintf(*buf, ETH_GSTRING_LEN, - "rxq_%d_cq_%s", - q_num, - ionic_dbg_cq_stats_desc[i].name); - *buf += ETH_GSTRING_LEN; - } - for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) { - snprintf(*buf, ETH_GSTRING_LEN, - "rxq_%d_intr_%s", - q_num, - ionic_dbg_intr_stats_desc[i].name); - *buf += ETH_GSTRING_LEN; - } - for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) { - snprintf(*buf, ETH_GSTRING_LEN, - "rxq_%d_napi_%s", - q_num, - ionic_dbg_napi_stats_desc[i].name); - *buf += ETH_GSTRING_LEN; - } - for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) { - snprintf(*buf, ETH_GSTRING_LEN, - "rxq_%d_napi_work_done_%d", - q_num, i); - *buf += ETH_GSTRING_LEN; - } - } - } + for (q_num = 0; q_num < MAX_Q(lif); q_num++) + ionic_sw_stats_get_rx_strings(lif, buf, q_num); } static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 162a1ff1e9d2..5985f7c504a9 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -10,12 +10,6 @@ #include "ionic_lif.h" #include "ionic_txrx.h" -static void ionic_rx_clean(struct ionic_queue *q, - struct ionic_desc_info *desc_info, - struct ionic_cq_info *cq_info, - void *cb_arg); - -static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); @@ -40,72 +34,149 @@ static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) return netdev_get_tx_queue(q->lif->netdev, q->index); } -static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q, - unsigned int len, bool frags) +static void ionic_rx_buf_reset(struct ionic_buf_info *buf_info) +{ + buf_info->page = NULL; + buf_info->page_offset = 0; + buf_info->dma_addr = 0; +} + +static int ionic_rx_page_alloc(struct ionic_queue *q, + struct ionic_buf_info *buf_info) { - struct ionic_lif *lif = q->lif; + struct net_device *netdev = q->lif->netdev; struct ionic_rx_stats *stats; - struct net_device *netdev; - struct sk_buff *skb; + struct device *dev; - netdev = lif->netdev; - stats = &q->lif->rxqstats[q->index]; + dev = q->dev; + stats = q_to_rx_stats(q); - if (frags) - skb = napi_get_frags(&q_to_qcq(q)->napi); - else - skb = netdev_alloc_skb_ip_align(netdev, len); + if (unlikely(!buf_info)) { + net_err_ratelimited("%s: %s invalid buf_info in alloc\n", + netdev->name, q->name); + return -EINVAL; + } - if (unlikely(!skb)) { - net_warn_ratelimited("%s: SKB alloc failed on %s!\n", - netdev->name, q->name); + buf_info->page = alloc_pages(IONIC_PAGE_GFP_MASK, 0); + if (unlikely(!buf_info->page)) { + net_err_ratelimited("%s: %s page alloc failed\n", + netdev->name, q->name); stats->alloc_err++; - return NULL; + return -ENOMEM; } + buf_info->page_offset = 0; - return skb; + buf_info->dma_addr = dma_map_page(dev, buf_info->page, buf_info->page_offset, + IONIC_PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) { + __free_pages(buf_info->page, 0); + ionic_rx_buf_reset(buf_info); + net_err_ratelimited("%s: %s dma map failed\n", + netdev->name, q->name); + stats->dma_map_err++; + return -EIO; + } + + return 0; +} + +static void ionic_rx_page_free(struct ionic_queue *q, + struct ionic_buf_info *buf_info) +{ + struct net_device *netdev = q->lif->netdev; + struct device *dev = q->dev; + + if (unlikely(!buf_info)) { + net_err_ratelimited("%s: %s invalid buf_info in free\n", + netdev->name, q->name); + return; + } + + if (!buf_info->page) + return; + + dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE); + __free_pages(buf_info->page, 0); + ionic_rx_buf_reset(buf_info); +} + +static bool ionic_rx_buf_recycle(struct ionic_queue *q, + struct ionic_buf_info *buf_info, u32 used) +{ + u32 size; + + /* don't re-use pages allocated in low-mem condition */ + if (page_is_pfmemalloc(buf_info->page)) + return false; + + /* don't re-use buffers from non-local numa nodes */ + if (page_to_nid(buf_info->page) != numa_mem_id()) + return false; + + size = ALIGN(used, IONIC_PAGE_SPLIT_SZ); + buf_info->page_offset += size; + if (buf_info->page_offset >= IONIC_PAGE_SIZE) + return false; + + get_page(buf_info->page); + + return true; } static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, struct ionic_desc_info *desc_info, - struct ionic_cq_info *cq_info) + struct ionic_rxq_comp *comp) { - struct ionic_rxq_comp *comp = cq_info->cq_desc; - struct device *dev = q->lif->ionic->dev; - struct ionic_page_info *page_info; + struct net_device *netdev = q->lif->netdev; + struct ionic_buf_info *buf_info; + struct ionic_rx_stats *stats; + struct device *dev = q->dev; struct sk_buff *skb; unsigned int i; u16 frag_len; u16 len; - page_info = &desc_info->pages[0]; + stats = q_to_rx_stats(q); + + buf_info = &desc_info->bufs[0]; len = le16_to_cpu(comp->len); - prefetch(page_address(page_info->page) + NET_IP_ALIGN); + prefetch(buf_info->page); - skb = ionic_rx_skb_alloc(q, len, true); - if (unlikely(!skb)) + skb = napi_get_frags(&q_to_qcq(q)->napi); + if (unlikely(!skb)) { + net_warn_ratelimited("%s: SKB alloc failed on %s!\n", + netdev->name, q->name); + stats->alloc_err++; return NULL; + } i = comp->num_sg_elems + 1; do { - if (unlikely(!page_info->page)) { - struct napi_struct *napi = &q_to_qcq(q)->napi; - - napi->skb = NULL; + if (unlikely(!buf_info->page)) { dev_kfree_skb(skb); return NULL; } - frag_len = min(len, (u16)PAGE_SIZE); + frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset); len -= frag_len; - dma_unmap_page(dev, dma_unmap_addr(page_info, dma_addr), - PAGE_SIZE, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(dev, + buf_info->dma_addr + buf_info->page_offset, + frag_len, DMA_FROM_DEVICE); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - page_info->page, 0, frag_len, PAGE_SIZE); - page_info->page = NULL; - page_info++; + buf_info->page, buf_info->page_offset, frag_len, + IONIC_PAGE_SIZE); + + if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) { + dma_unmap_page(dev, buf_info->dma_addr, + IONIC_PAGE_SIZE, DMA_FROM_DEVICE); + ionic_rx_buf_reset(buf_info); + } + + buf_info++; + i--; } while (i > 0); @@ -114,30 +185,37 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q, struct ionic_desc_info *desc_info, - struct ionic_cq_info *cq_info) + struct ionic_rxq_comp *comp) { - struct ionic_rxq_comp *comp = cq_info->cq_desc; - struct device *dev = q->lif->ionic->dev; - struct ionic_page_info *page_info; + struct net_device *netdev = q->lif->netdev; + struct ionic_buf_info *buf_info; + struct ionic_rx_stats *stats; + struct device *dev = q->dev; struct sk_buff *skb; u16 len; - page_info = &desc_info->pages[0]; + stats = q_to_rx_stats(q); + + buf_info = &desc_info->bufs[0]; len = le16_to_cpu(comp->len); - skb = ionic_rx_skb_alloc(q, len, false); - if (unlikely(!skb)) + skb = napi_alloc_skb(&q_to_qcq(q)->napi, len); + if (unlikely(!skb)) { + net_warn_ratelimited("%s: SKB alloc failed on %s!\n", + netdev->name, q->name); + stats->alloc_err++; return NULL; + } - if (unlikely(!page_info->page)) { + if (unlikely(!buf_info->page)) { dev_kfree_skb(skb); return NULL; } - dma_sync_single_for_cpu(dev, dma_unmap_addr(page_info, dma_addr), + dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset, len, DMA_FROM_DEVICE); - skb_copy_to_linear_data(skb, page_address(page_info->page), len); - dma_sync_single_for_device(dev, dma_unmap_addr(page_info, dma_addr), + skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len); + dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset, len, DMA_FROM_DEVICE); skb_put(skb, len); @@ -151,14 +229,13 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_cq_info *cq_info, void *cb_arg) { - struct ionic_rxq_comp *comp = cq_info->cq_desc; + struct ionic_rxq_comp *comp = cq_info->rxcq; + struct net_device *netdev = q->lif->netdev; struct ionic_qcq *qcq = q_to_qcq(q); struct ionic_rx_stats *stats; - struct net_device *netdev; struct sk_buff *skb; stats = q_to_rx_stats(q); - netdev = q->lif->netdev; if (comp->status) { stats->dropped++; @@ -169,9 +246,9 @@ static void ionic_rx_clean(struct ionic_queue *q, stats->bytes += le16_to_cpu(comp->len); if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak) - skb = ionic_rx_copybreak(q, desc_info, cq_info); + skb = ionic_rx_copybreak(q, desc_info, comp); else - skb = ionic_rx_frags(q, desc_info, cq_info); + skb = ionic_rx_frags(q, desc_info, comp); if (unlikely(!skb)) { stats->dropped++; @@ -227,7 +304,7 @@ static void ionic_rx_clean(struct ionic_queue *q, static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) { - struct ionic_rxq_comp *comp = cq_info->cq_desc; + struct ionic_rxq_comp *comp = cq_info->rxcq; struct ionic_queue *q = cq->bound_q; struct ionic_desc_info *desc_info; @@ -253,138 +330,75 @@ static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) return true; } -static int ionic_rx_page_alloc(struct ionic_queue *q, - struct ionic_page_info *page_info) -{ - struct ionic_lif *lif = q->lif; - struct ionic_rx_stats *stats; - struct net_device *netdev; - struct device *dev; - - netdev = lif->netdev; - dev = lif->ionic->dev; - stats = q_to_rx_stats(q); - - if (unlikely(!page_info)) { - net_err_ratelimited("%s: %s invalid page_info in alloc\n", - netdev->name, q->name); - return -EINVAL; - } - - page_info->page = dev_alloc_page(); - if (unlikely(!page_info->page)) { - net_err_ratelimited("%s: %s page alloc failed\n", - netdev->name, q->name); - stats->alloc_err++; - return -ENOMEM; - } - - page_info->dma_addr = dma_map_page(dev, page_info->page, 0, PAGE_SIZE, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(dev, page_info->dma_addr))) { - put_page(page_info->page); - page_info->dma_addr = 0; - page_info->page = NULL; - net_err_ratelimited("%s: %s dma map failed\n", - netdev->name, q->name); - stats->dma_map_err++; - return -EIO; - } - - return 0; -} - -static void ionic_rx_page_free(struct ionic_queue *q, - struct ionic_page_info *page_info) -{ - struct ionic_lif *lif = q->lif; - struct net_device *netdev; - struct device *dev; - - netdev = lif->netdev; - dev = lif->ionic->dev; - - if (unlikely(!page_info)) { - net_err_ratelimited("%s: %s invalid page_info in free\n", - netdev->name, q->name); - return; - } - - if (unlikely(!page_info->page)) { - net_err_ratelimited("%s: %s invalid page in free\n", - netdev->name, q->name); - return; - } - - dma_unmap_page(dev, page_info->dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); - - put_page(page_info->page); - page_info->dma_addr = 0; - page_info->page = NULL; -} - void ionic_rx_fill(struct ionic_queue *q) { struct net_device *netdev = q->lif->netdev; struct ionic_desc_info *desc_info; - struct ionic_page_info *page_info; struct ionic_rxq_sg_desc *sg_desc; struct ionic_rxq_sg_elem *sg_elem; + struct ionic_buf_info *buf_info; struct ionic_rxq_desc *desc; unsigned int remain_len; - unsigned int seg_len; + unsigned int frag_len; unsigned int nfrags; unsigned int i, j; unsigned int len; len = netdev->mtu + ETH_HLEN + VLAN_HLEN; - nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE; for (i = ionic_q_space_avail(q); i; i--) { + nfrags = 0; remain_len = len; desc_info = &q->info[q->head_idx]; desc = desc_info->desc; - sg_desc = desc_info->sg_desc; - page_info = &desc_info->pages[0]; - - if (page_info->page) { /* recycle the buffer */ - ionic_rxq_post(q, false, ionic_rx_clean, NULL); - continue; - } + buf_info = &desc_info->bufs[0]; - /* fill main descriptor - pages[0] */ - desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG : - IONIC_RXQ_DESC_OPCODE_SIMPLE; - desc_info->npages = nfrags; - if (unlikely(ionic_rx_page_alloc(q, page_info))) { - desc->addr = 0; - desc->len = 0; - return; + if (!buf_info->page) { /* alloc a new buffer? */ + if (unlikely(ionic_rx_page_alloc(q, buf_info))) { + desc->addr = 0; + desc->len = 0; + return; + } } - desc->addr = cpu_to_le64(page_info->dma_addr); - seg_len = min_t(unsigned int, PAGE_SIZE, len); - desc->len = cpu_to_le16(seg_len); - remain_len -= seg_len; - page_info++; - /* fill sg descriptors - pages[1..n] */ - for (j = 0; j < nfrags - 1; j++) { - if (page_info->page) /* recycle the sg buffer */ - continue; + /* fill main descriptor - buf[0] */ + desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset); + frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset); + desc->len = cpu_to_le16(frag_len); + remain_len -= frag_len; + buf_info++; + nfrags++; + /* fill sg descriptors - buf[1..n] */ + sg_desc = desc_info->sg_desc; + for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) { sg_elem = &sg_desc->elems[j]; - if (unlikely(ionic_rx_page_alloc(q, page_info))) { - sg_elem->addr = 0; - sg_elem->len = 0; - return; + if (!buf_info->page) { /* alloc a new sg buffer? */ + if (unlikely(ionic_rx_page_alloc(q, buf_info))) { + sg_elem->addr = 0; + sg_elem->len = 0; + return; + } } - sg_elem->addr = cpu_to_le64(page_info->dma_addr); - seg_len = min_t(unsigned int, PAGE_SIZE, remain_len); - sg_elem->len = cpu_to_le16(seg_len); - remain_len -= seg_len; - page_info++; + + sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset); + frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE - buf_info->page_offset); + sg_elem->len = cpu_to_le16(frag_len); + remain_len -= frag_len; + buf_info++; + nfrags++; + } + + /* clear end sg element as a sentinel */ + if (j < q->max_sg_elems) { + sg_elem = &sg_desc->elems[j]; + memset(sg_elem, 0, sizeof(*sg_elem)); } + desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG : + IONIC_RXQ_DESC_OPCODE_SIMPLE; + desc_info->nbufs = nfrags; + ionic_rxq_post(q, false, ionic_rx_clean, NULL); } @@ -395,21 +409,24 @@ void ionic_rx_fill(struct ionic_queue *q) void ionic_rx_empty(struct ionic_queue *q) { struct ionic_desc_info *desc_info; - struct ionic_page_info *page_info; + struct ionic_buf_info *buf_info; unsigned int i, j; for (i = 0; i < q->num_descs; i++) { desc_info = &q->info[i]; for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) { - page_info = &desc_info->pages[j]; - if (page_info->page) - ionic_rx_page_free(q, page_info); + buf_info = &desc_info->bufs[j]; + if (buf_info->page) + ionic_rx_page_free(q, buf_info); } - desc_info->npages = 0; + desc_info->nbufs = 0; desc_info->cb = NULL; desc_info->cb_arg = NULL; } + + q->head_idx = 0; + q->tail_idx = 0; } static void ionic_dim_update(struct ionic_qcq *qcq) @@ -525,7 +542,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) idev = &lif->ionic->idev; txcq = &lif->txqcqs[qi]->cq; - tx_work_done = ionic_cq_service(txcq, lif->tx_budget, + tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT, ionic_tx_service, NULL, NULL); rx_work_done = ionic_cq_service(rxcq, budget, @@ -558,7 +575,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, void *data, size_t len) { struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct device *dev = q->lif->ionic->dev; + struct device *dev = q->dev; dma_addr_t dma_addr; dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE); @@ -576,7 +593,7 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, size_t offset, size_t len) { struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct device *dev = q->lif->ionic->dev; + struct device *dev = q->dev; dma_addr_t dma_addr; dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE); @@ -588,42 +605,72 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, return dma_addr; } +static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb, + struct ionic_desc_info *desc_info) +{ + struct ionic_buf_info *buf_info = desc_info->bufs; + struct device *dev = q->dev; + dma_addr_t dma_addr; + unsigned int nfrags; + skb_frag_t *frag; + int frag_idx; + + dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); + if (dma_mapping_error(dev, dma_addr)) + return -EIO; + buf_info->dma_addr = dma_addr; + buf_info->len = skb_headlen(skb); + buf_info++; + + frag = skb_shinfo(skb)->frags; + nfrags = skb_shinfo(skb)->nr_frags; + for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) { + dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag)); + if (dma_mapping_error(dev, dma_addr)) + goto dma_fail; + buf_info->dma_addr = dma_addr; + buf_info->len = skb_frag_size(frag); + buf_info++; + } + + desc_info->nbufs = 1 + nfrags; + + return 0; + +dma_fail: + /* unwind the frag mappings and the head mapping */ + while (frag_idx > 0) { + frag_idx--; + buf_info--; + dma_unmap_page(dev, buf_info->dma_addr, + buf_info->len, DMA_TO_DEVICE); + } + dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE); + return -EIO; +} + static void ionic_tx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info, struct ionic_cq_info *cq_info, void *cb_arg) { - struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc; - struct ionic_txq_sg_elem *elem = sg_desc->elems; + struct ionic_buf_info *buf_info = desc_info->bufs; struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct ionic_txq_desc *desc = desc_info->desc; - struct device *dev = q->lif->ionic->dev; - u8 opcode, flags, nsge; + struct device *dev = q->dev; u16 queue_index; unsigned int i; - u64 addr; - - decode_txq_desc_cmd(le64_to_cpu(desc->cmd), - &opcode, &flags, &nsge, &addr); - - /* use unmap_single only if either this is not TSO, - * or this is first descriptor of a TSO - */ - if (opcode != IONIC_TXQ_DESC_OPCODE_TSO || - flags & IONIC_TXQ_DESC_FLAG_TSO_SOT) - dma_unmap_single(dev, (dma_addr_t)addr, - le16_to_cpu(desc->len), DMA_TO_DEVICE); - else - dma_unmap_page(dev, (dma_addr_t)addr, - le16_to_cpu(desc->len), DMA_TO_DEVICE); - for (i = 0; i < nsge; i++, elem++) - dma_unmap_page(dev, (dma_addr_t)le64_to_cpu(elem->addr), - le16_to_cpu(elem->len), DMA_TO_DEVICE); + if (desc_info->nbufs) { + dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr, + buf_info->len, DMA_TO_DEVICE); + buf_info++; + for (i = 1; i < desc_info->nbufs; i++, buf_info++) + dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr, + buf_info->len, DMA_TO_DEVICE); + } if (cb_arg) { struct sk_buff *skb = cb_arg; - u32 len = skb->len; queue_index = skb_get_queue_mapping(skb); if (unlikely(__netif_subqueue_stopped(q->lif->netdev, @@ -631,17 +678,21 @@ static void ionic_tx_clean(struct ionic_queue *q, netif_wake_subqueue(q->lif->netdev, queue_index); q->wake++; } - dev_kfree_skb_any(skb); + + desc_info->bytes = skb->len; stats->clean++; - netdev_tx_completed_queue(q_to_ndq(q), 1, len); + + dev_consume_skb_any(skb); } } static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) { - struct ionic_txq_comp *comp = cq_info->cq_desc; + struct ionic_txq_comp *comp = cq_info->txcq; struct ionic_queue *q = cq->bound_q; struct ionic_desc_info *desc_info; + int bytes = 0; + int pkts = 0; u16 index; if (!color_match(comp->color, cq->done_color)) @@ -652,13 +703,21 @@ static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) */ do { desc_info = &q->info[q->tail_idx]; + desc_info->bytes = 0; index = q->tail_idx; q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg); + if (desc_info->cb_arg) { + pkts++; + bytes += desc_info->bytes; + } desc_info->cb = NULL; desc_info->cb_arg = NULL; } while (index != le16_to_cpu(comp->comp_index)); + if (pkts && bytes) + netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes); + return true; } @@ -677,15 +736,25 @@ void ionic_tx_flush(struct ionic_cq *cq) void ionic_tx_empty(struct ionic_queue *q) { struct ionic_desc_info *desc_info; + int bytes = 0; + int pkts = 0; /* walk the not completed tx entries, if any */ while (q->head_idx != q->tail_idx) { desc_info = &q->info[q->tail_idx]; + desc_info->bytes = 0; q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg); + if (desc_info->cb_arg) { + pkts++; + bytes += desc_info->bytes; + } desc_info->cb = NULL; desc_info->cb_arg = NULL; } + + if (pkts && bytes) + netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes); } static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb) @@ -756,50 +825,33 @@ static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc desc->hdr_len = cpu_to_le16(hdrlen); desc->mss = cpu_to_le16(mss); - if (done) { + if (start) { skb_tx_timestamp(skb); netdev_tx_sent_queue(q_to_ndq(q), skb->len); - ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb); + ionic_txq_post(q, false, ionic_tx_clean, skb); } else { - ionic_txq_post(q, false, ionic_tx_clean, NULL); + ionic_txq_post(q, done, NULL, NULL); } } -static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q, - struct ionic_txq_sg_elem **elem) -{ - struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc; - struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc; - - *elem = sg_desc->elems; - return desc; -} - static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) { struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct ionic_desc_info *rewind_desc_info; - struct device *dev = q->lif->ionic->dev; + struct ionic_desc_info *desc_info; + struct ionic_buf_info *buf_info; struct ionic_txq_sg_elem *elem; struct ionic_txq_desc *desc; - unsigned int frag_left = 0; - unsigned int offset = 0; - u16 abort = q->head_idx; - unsigned int len_left; + unsigned int chunk_len; + unsigned int frag_rem; + unsigned int tso_rem; + unsigned int seg_rem; dma_addr_t desc_addr; + dma_addr_t frag_addr; unsigned int hdrlen; - unsigned int nfrags; - unsigned int seglen; - u64 total_bytes = 0; - u64 total_pkts = 0; - u16 rewind = abort; - unsigned int left; unsigned int len; unsigned int mss; - skb_frag_t *frag; bool start, done; bool outer_csum; - dma_addr_t addr; bool has_vlan; u16 desc_len; u8 desc_nsge; @@ -807,9 +859,14 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) bool encap; int err; + desc_info = &q->info[q->head_idx]; + buf_info = desc_info->bufs; + + if (unlikely(ionic_tx_map_skb(q, skb, desc_info))) + return -EIO; + + len = skb->len; mss = skb_shinfo(skb)->gso_size; - nfrags = skb_shinfo(skb)->nr_frags; - len_left = skb->len - skb_headlen(skb); outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) || (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); has_vlan = !!skb_vlan_tag_present(skb); @@ -834,125 +891,75 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) else hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); - seglen = hdrlen + mss; - left = skb_headlen(skb); + tso_rem = len; + seg_rem = min(tso_rem, hdrlen + mss); - desc = ionic_tx_tso_next(q, &elem); - start = true; + frag_addr = 0; + frag_rem = 0; - /* Chop skb->data up into desc segments */ + start = true; - while (left > 0) { - len = min(seglen, left); - frag_left = seglen - len; - desc_addr = ionic_tx_map_single(q, skb->data + offset, len); - if (dma_mapping_error(dev, desc_addr)) - goto err_out_abort; - desc_len = len; + while (tso_rem > 0) { + desc = NULL; + elem = NULL; + desc_addr = 0; + desc_len = 0; desc_nsge = 0; - left -= len; - offset += len; - if (nfrags > 0 && frag_left > 0) - continue; - done = (nfrags == 0 && left == 0); - ionic_tx_tso_post(q, desc, skb, - desc_addr, desc_nsge, desc_len, - hdrlen, mss, - outer_csum, - vlan_tci, has_vlan, - start, done); - total_pkts++; - total_bytes += start ? len : len + hdrlen; - desc = ionic_tx_tso_next(q, &elem); - start = false; - seglen = mss; - } - - /* Chop skb frags into desc segments */ - - for (frag = skb_shinfo(skb)->frags; len_left; frag++) { - offset = 0; - left = skb_frag_size(frag); - len_left -= left; - nfrags--; - stats->frags++; - - while (left > 0) { - if (frag_left > 0) { - len = min(frag_left, left); - frag_left -= len; - addr = ionic_tx_map_frag(q, frag, offset, len); - if (dma_mapping_error(dev, addr)) - goto err_out_abort; - elem->addr = cpu_to_le64(addr); - elem->len = cpu_to_le16(len); + /* use fragments until we have enough to post a single descriptor */ + while (seg_rem > 0) { + /* if the fragment is exhausted then move to the next one */ + if (frag_rem == 0) { + /* grab the next fragment */ + frag_addr = buf_info->dma_addr; + frag_rem = buf_info->len; + buf_info++; + } + chunk_len = min(frag_rem, seg_rem); + if (!desc) { + /* fill main descriptor */ + desc = desc_info->txq_desc; + elem = desc_info->txq_sg_desc->elems; + desc_addr = frag_addr; + desc_len = chunk_len; + } else { + /* fill sg descriptor */ + elem->addr = cpu_to_le64(frag_addr); + elem->len = cpu_to_le16(chunk_len); elem++; desc_nsge++; - left -= len; - offset += len; - if (nfrags > 0 && frag_left > 0) - continue; - done = (nfrags == 0 && left == 0); - ionic_tx_tso_post(q, desc, skb, desc_addr, - desc_nsge, desc_len, - hdrlen, mss, outer_csum, - vlan_tci, has_vlan, - start, done); - total_pkts++; - total_bytes += start ? len : len + hdrlen; - desc = ionic_tx_tso_next(q, &elem); - start = false; - } else { - len = min(mss, left); - frag_left = mss - len; - desc_addr = ionic_tx_map_frag(q, frag, - offset, len); - if (dma_mapping_error(dev, desc_addr)) - goto err_out_abort; - desc_len = len; - desc_nsge = 0; - left -= len; - offset += len; - if (nfrags > 0 && frag_left > 0) - continue; - done = (nfrags == 0 && left == 0); - ionic_tx_tso_post(q, desc, skb, desc_addr, - desc_nsge, desc_len, - hdrlen, mss, outer_csum, - vlan_tci, has_vlan, - start, done); - total_pkts++; - total_bytes += start ? len : len + hdrlen; - desc = ionic_tx_tso_next(q, &elem); - start = false; } + frag_addr += chunk_len; + frag_rem -= chunk_len; + tso_rem -= chunk_len; + seg_rem -= chunk_len; } + seg_rem = min(tso_rem, mss); + done = (tso_rem == 0); + /* post descriptor */ + ionic_tx_tso_post(q, desc, skb, + desc_addr, desc_nsge, desc_len, + hdrlen, mss, outer_csum, vlan_tci, has_vlan, + start, done); + start = false; + /* Buffer information is stored with the first tso descriptor */ + desc_info = &q->info[q->head_idx]; + desc_info->nbufs = 0; } - stats->pkts += total_pkts; - stats->bytes += total_bytes; + stats->pkts += DIV_ROUND_UP(len - hdrlen, mss); + stats->bytes += len; stats->tso++; - stats->tso_bytes += total_bytes; + stats->tso_bytes = len; return 0; - -err_out_abort: - while (rewind != q->head_idx) { - rewind_desc_info = &q->info[rewind]; - ionic_tx_clean(q, rewind_desc_info, NULL, NULL); - rewind = (rewind + 1) & (q->num_descs - 1); - } - q->head_idx = abort; - - return -ENOMEM; } -static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb) +static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb, + struct ionic_desc_info *desc_info) { - struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc; + struct ionic_txq_desc *desc = desc_info->txq_desc; + struct ionic_buf_info *buf_info = desc_info->bufs; struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct device *dev = q->lif->ionic->dev; - dma_addr_t dma_addr; bool has_vlan; u8 flags = 0; bool encap; @@ -961,23 +968,22 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb) has_vlan = !!skb_vlan_tag_present(skb); encap = skb->encapsulation; - dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); - if (dma_mapping_error(dev, dma_addr)) - return -ENOMEM; - flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL, - flags, skb_shinfo(skb)->nr_frags, dma_addr); + flags, skb_shinfo(skb)->nr_frags, + buf_info->dma_addr); desc->cmd = cpu_to_le64(cmd); - desc->len = cpu_to_le16(skb_headlen(skb)); - desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb)); - desc->csum_offset = cpu_to_le16(skb->csum_offset); + desc->len = cpu_to_le16(buf_info->len); if (has_vlan) { desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); stats->vlan_inserted++; + } else { + desc->vlan_tci = 0; } + desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb)); + desc->csum_offset = cpu_to_le16(skb->csum_offset); if (skb_csum_is_sctp(skb)) stats->crc32_csum++; @@ -987,12 +993,12 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb) return 0; } -static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb) +static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb, + struct ionic_desc_info *desc_info) { - struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc; + struct ionic_txq_desc *desc = desc_info->txq_desc; + struct ionic_buf_info *buf_info = desc_info->bufs; struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct device *dev = q->lif->ionic->dev; - dma_addr_t dma_addr; bool has_vlan; u8 flags = 0; bool encap; @@ -1001,67 +1007,66 @@ static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb) has_vlan = !!skb_vlan_tag_present(skb); encap = skb->encapsulation; - dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); - if (dma_mapping_error(dev, dma_addr)) - return -ENOMEM; - flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE, - flags, skb_shinfo(skb)->nr_frags, dma_addr); + flags, skb_shinfo(skb)->nr_frags, + buf_info->dma_addr); desc->cmd = cpu_to_le64(cmd); - desc->len = cpu_to_le16(skb_headlen(skb)); + desc->len = cpu_to_le16(buf_info->len); if (has_vlan) { desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); stats->vlan_inserted++; + } else { + desc->vlan_tci = 0; } + desc->csum_start = 0; + desc->csum_offset = 0; stats->csum_none++; return 0; } -static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb) +static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb, + struct ionic_desc_info *desc_info) { - struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc; - unsigned int len_left = skb->len - skb_headlen(skb); + struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc; + struct ionic_buf_info *buf_info = &desc_info->bufs[1]; struct ionic_txq_sg_elem *elem = sg_desc->elems; struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct device *dev = q->lif->ionic->dev; - dma_addr_t dma_addr; - skb_frag_t *frag; - u16 len; + unsigned int i; - for (frag = skb_shinfo(skb)->frags; len_left; frag++, elem++) { - len = skb_frag_size(frag); - elem->len = cpu_to_le16(len); - dma_addr = ionic_tx_map_frag(q, frag, 0, len); - if (dma_mapping_error(dev, dma_addr)) - return -ENOMEM; - elem->addr = cpu_to_le64(dma_addr); - len_left -= len; - stats->frags++; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) { + elem->addr = cpu_to_le64(buf_info->dma_addr); + elem->len = cpu_to_le16(buf_info->len); } + stats->frags += skb_shinfo(skb)->nr_frags; + return 0; } static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb) { + struct ionic_desc_info *desc_info = &q->info[q->head_idx]; struct ionic_tx_stats *stats = q_to_tx_stats(q); int err; + if (unlikely(ionic_tx_map_skb(q, skb, desc_info))) + return -EIO; + /* set up the initial descriptor */ if (skb->ip_summed == CHECKSUM_PARTIAL) - err = ionic_tx_calc_csum(q, skb); + err = ionic_tx_calc_csum(q, skb, desc_info); else - err = ionic_tx_calc_no_csum(q, skb); + err = ionic_tx_calc_no_csum(q, skb, desc_info); if (err) return err; /* add frags */ - err = ionic_tx_skb_frags(q, skb); + err = ionic_tx_skb_frags(q, skb, desc_info); if (err) return err; @@ -1077,17 +1082,19 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb) static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) { - int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems; struct ionic_tx_stats *stats = q_to_tx_stats(q); + int ndescs; int err; - /* If TSO, need roundup(skb->len/mss) descs */ + /* Each desc is mss long max, so a descriptor for each gso_seg */ if (skb_is_gso(skb)) - return (skb->len / skb_shinfo(skb)->gso_size) + 1; + ndescs = skb_shinfo(skb)->gso_segs; + else + ndescs = 1; /* If non-TSO, just need 1 desc and nr_frags sg elems */ - if (skb_shinfo(skb)->nr_frags <= sg_elems) - return 1; + if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems) + return ndescs; /* Too many frags, so linearize */ err = skb_linearize(skb); @@ -1096,8 +1103,7 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) stats->linearize++; - /* Need 1 desc and zero sg elems */ - return 1; + return ndescs; } static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 07824bf9d68d..dfaf10edfabf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -396,6 +396,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, tpa_param->tpa_ipv6_en_flg = 1; tpa_param->tpa_pkt_split_flg = 1; tpa_param->tpa_gro_consistent_flg = 1; + break; default: break; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index b8dc5c4591ef..ed2b6fe5a78d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -4734,6 +4734,7 @@ void qed_inform_vf_link_state(struct qed_hwfn *hwfn) */ link.speed = (hwfn->cdev->num_hwfns > 1) ? 100000 : 40000; + break; default: /* In auto mode pass PF link image to VF */ break; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index 7760a3394e93..7ecb3dfe30bd 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -1425,6 +1425,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) { vfree(fw_dump->tmpl_hdr); + fw_dump->tmpl_hdr = NULL; if (qlcnic_83xx_md_check_extended_dump_capability(adapter)) extended = !qlcnic_83xx_extend_md_capab(adapter); @@ -1443,6 +1444,8 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) struct qlcnic_83xx_dump_template_hdr *hdr; hdr = fw_dump->tmpl_hdr; + if (!hdr) + return; hdr->drv_cap_mask = 0x1f; fw_dump->cap_mask = 0x1f; dev_info(&pdev->dev, diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index 3d00b3232308..0be5ac7ab261 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -56,20 +56,22 @@ static void __rmnet_map_ingress_handler(struct sk_buff *skb, struct rmnet_port *port) { + struct rmnet_map_header *map_header = (void *)skb->data; struct rmnet_endpoint *ep; u16 len, pad; u8 mux_id; - if (RMNET_MAP_GET_CD_BIT(skb)) { + if (map_header->flags & MAP_CMD_FLAG) { + /* Packet contains a MAP command (not data) */ if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS) return rmnet_map_command(skb, port); goto free_skb; } - mux_id = RMNET_MAP_GET_MUX_ID(skb); - pad = RMNET_MAP_GET_PAD(skb); - len = RMNET_MAP_GET_LENGTH(skb) - pad; + mux_id = map_header->mux_id; + pad = map_header->flags & MAP_PAD_LEN_MASK; + len = ntohs(map_header->pkt_len) - pad; if (mux_id >= RMNET_MAX_LOGICAL_EP) goto free_skb; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h index 576501db2a0b..2aea153f4247 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h @@ -32,18 +32,6 @@ enum rmnet_map_commands { RMNET_MAP_COMMAND_ENUM_LENGTH }; -#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \ - (Y)->data)->mux_id) -#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \ - (Y)->data)->cd_bit) -#define RMNET_MAP_GET_PAD(Y) (((struct rmnet_map_header *) \ - (Y)->data)->pad_len) -#define RMNET_MAP_GET_CMD_START(Y) ((struct rmnet_map_control_command *) \ - ((Y)->data + \ - sizeof(struct rmnet_map_header))) -#define RMNET_MAP_GET_LENGTH(Y) (ntohs(((struct rmnet_map_header *) \ - (Y)->data)->pkt_len)) - #define RMNET_MAP_COMMAND_REQUEST 0 #define RMNET_MAP_COMMAND_ACK 1 #define RMNET_MAP_COMMAND_UNSUPPORTED 2 diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c index beaee4962128..add0f5ade2e6 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c @@ -12,12 +12,13 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb, struct rmnet_port *port, int enable) { + struct rmnet_map_header *map_header = (void *)skb->data; struct rmnet_endpoint *ep; struct net_device *vnd; u8 mux_id; int r; - mux_id = RMNET_MAP_GET_MUX_ID(skb); + mux_id = map_header->mux_id; if (mux_id >= RMNET_MAX_LOGICAL_EP) { kfree_skb(skb); @@ -49,6 +50,7 @@ static void rmnet_map_send_ack(struct sk_buff *skb, unsigned char type, struct rmnet_port *port) { + struct rmnet_map_header *map_header = (void *)skb->data; struct rmnet_map_control_command *cmd; struct net_device *dev = skb->dev; @@ -58,7 +60,8 @@ static void rmnet_map_send_ack(struct sk_buff *skb, skb->protocol = htons(ETH_P_MAP); - cmd = RMNET_MAP_GET_CMD_START(skb); + /* Command data immediately follows the MAP header */ + cmd = (struct rmnet_map_control_command *)(map_header + 1); cmd->cmd_type = type & 0x03; netif_tx_lock(dev); @@ -71,11 +74,13 @@ static void rmnet_map_send_ack(struct sk_buff *skb, */ void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port) { + struct rmnet_map_header *map_header = (void *)skb->data; struct rmnet_map_control_command *cmd; unsigned char command_name; unsigned char rc = 0; - cmd = RMNET_MAP_GET_CMD_START(skb); + /* Command data immediately follows the MAP header */ + cmd = (struct rmnet_map_control_command *)(map_header + 1); command_name = cmd->command_name; switch (command_name) { diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c index 21d38167f961..0ac2ff828320 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c @@ -197,22 +197,16 @@ rmnet_map_ipv4_ul_csum_header(void *iphdr, struct rmnet_map_ul_csum_header *ul_header, struct sk_buff *skb) { - struct iphdr *ip4h = (struct iphdr *)iphdr; - __be16 *hdr = (__be16 *)ul_header, offset; + struct iphdr *ip4h = iphdr; + u16 val; - offset = htons((__force u16)(skb_transport_header(skb) - - (unsigned char *)iphdr)); - ul_header->csum_start_offset = offset; - ul_header->csum_insert_offset = skb->csum_offset; - ul_header->csum_enabled = 1; + val = MAP_CSUM_UL_ENABLED_FLAG; if (ip4h->protocol == IPPROTO_UDP) - ul_header->udp_ind = 1; - else - ul_header->udp_ind = 0; + val |= MAP_CSUM_UL_UDP_FLAG; + val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK; - /* Changing remaining fields to network order */ - hdr++; - *hdr = htons((__force u16)*hdr); + ul_header->csum_start_offset = htons(skb_network_header_len(skb)); + ul_header->csum_info = htons(val); skb->ip_summed = CHECKSUM_NONE; @@ -239,23 +233,16 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr, struct rmnet_map_ul_csum_header *ul_header, struct sk_buff *skb) { - struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr; - __be16 *hdr = (__be16 *)ul_header, offset; - - offset = htons((__force u16)(skb_transport_header(skb) - - (unsigned char *)ip6hdr)); - ul_header->csum_start_offset = offset; - ul_header->csum_insert_offset = skb->csum_offset; - ul_header->csum_enabled = 1; + struct ipv6hdr *ip6h = ip6hdr; + u16 val; + val = MAP_CSUM_UL_ENABLED_FLAG; if (ip6h->nexthdr == IPPROTO_UDP) - ul_header->udp_ind = 1; - else - ul_header->udp_ind = 0; + val |= MAP_CSUM_UL_UDP_FLAG; + val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK; - /* Changing remaining fields to network order */ - hdr++; - *hdr = htons((__force u16)*hdr); + ul_header->csum_start_offset = htons(skb_network_header_len(skb)); + ul_header->csum_info = htons(val); skb->ip_summed = CHECKSUM_NONE; @@ -284,6 +271,7 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, return map_header; } + BUILD_BUG_ON(MAP_PAD_LEN_MASK < 3); padding = ALIGN(map_datalen, 4) - map_datalen; if (padding == 0) @@ -297,7 +285,8 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, done: map_header->pkt_len = htons(map_datalen + padding); - map_header->pad_len = padding & 0x3F; + /* This is a data packet, so the CMD bit is 0 */ + map_header->flags = padding & MAP_PAD_LEN_MASK; return map_header; } @@ -319,7 +308,7 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, return NULL; maph = (struct rmnet_map_header *)skb->data; - packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header); + packet_len = ntohs(maph->pkt_len) + sizeof(*maph); if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) packet_len += sizeof(struct rmnet_map_dl_csum_trailer); @@ -328,7 +317,7 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, return NULL; /* Some hardware can send us empty frames. Catch them */ - if (ntohs(maph->pkt_len) == 0) + if (!maph->pkt_len) return NULL; skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC); @@ -361,7 +350,7 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len) csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len); - if (!csum_trailer->valid) { + if (!(csum_trailer->flags & MAP_CSUM_DL_VALID_FLAG)) { priv->stats.csum_valid_unset++; return -EINVAL; } @@ -421,10 +410,7 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, } sw_csum: - ul_header->csum_start_offset = 0; - ul_header->csum_insert_offset = 0; - ul_header->csum_enabled = 0; - ul_header->udp_ind = 0; + memset(ul_header, 0, sizeof(*ul_header)); priv->stats.csum_sw++; } diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 7aad0ba53372..7a8bb7e833f3 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -1586,12 +1586,10 @@ DECLARE_RTL_COND(rtl_counters_cond) static void rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd) { - dma_addr_t paddr = tp->counters_phys_addr; - u32 cmd; + u32 cmd = lower_32_bits(tp->counters_phys_addr); - RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32); + RTL_W32(tp, CounterAddrHigh, upper_32_bits(tp->counters_phys_addr)); rtl_pci_commit(tp); - cmd = (u64)paddr & DMA_BIT_MASK(32); RTL_W32(tp, CounterAddrLow, cmd); RTL_W32(tp, CounterAddrLow, cmd | counter_cmd); @@ -1903,6 +1901,15 @@ static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data) return ret; } +static void rtl8169_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *data) +{ + data->rx_max_pending = NUM_RX_DESC; + data->rx_pending = NUM_RX_DESC; + data->tx_max_pending = NUM_TX_DESC; + data->tx_pending = NUM_TX_DESC; +} + static const struct ethtool_ops rtl8169_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES, @@ -1923,6 +1930,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .set_eee = rtl8169_set_eee, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_ringparam = rtl8169_get_ringparam, }; static void rtl_enable_eee(struct rtl8169_private *tp) @@ -4646,6 +4654,9 @@ static void rtl8169_down(struct rtl8169_private *tp) rtl8169_update_counters(tp); + pci_clear_master(tp->pci_dev); + rtl_pci_commit(tp); + rtl8169_cleanup(tp, true); rtl_prepare_power_down(tp); @@ -4653,6 +4664,7 @@ static void rtl8169_down(struct rtl8169_private *tp) static void rtl8169_up(struct rtl8169_private *tp) { + pci_set_master(tp->pci_dev); phy_resume(tp->phydev); rtl8169_init_phy(tp); napi_enable(&tp->napi); @@ -5307,8 +5319,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rtl_hw_reset(tp); - pci_set_master(pdev); - rc = rtl_alloc_irq(tp); if (rc < 0) { dev_err(&pdev->dev, "Can't allocate interrupt\n"); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index f029c7c03804..ebedb1a11132 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -560,7 +560,7 @@ static struct sh_eth_cpu_data r7s72100_data = { EESR_TDE, .fdr_value = 0x0000070f, - .trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5, + .trscer_err_mask = TRSCER_RMAFCE | TRSCER_RRFCE, .no_psr = 1, .apr = 1, @@ -701,7 +701,7 @@ static struct sh_eth_cpu_data rcar_gen2_data = { EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, .fdr_value = 0x00000f0f, - .trscer_err_mask = DESC_I_RINT8, + .trscer_err_mask = TRSCER_RMAFCE, .apr = 1, .mpr = 1, @@ -782,7 +782,7 @@ static struct sh_eth_cpu_data r7s9210_data = { .fdr_value = 0x0000070f, - .trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5, + .trscer_err_mask = TRSCER_RMAFCE | TRSCER_RRFCE, .apr = 1, .mpr = 1, @@ -1094,7 +1094,7 @@ static struct sh_eth_cpu_data sh771x_data = { EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, - .trscer_err_mask = DESC_I_RINT8, + .trscer_err_mask = TRSCER_RMAFCE, .tsu = 1, .dual_port = 1, @@ -1749,7 +1749,7 @@ static void sh_eth_emac_interrupt(struct net_device *ndev) link_stat = sh_eth_read(ndev, PSR); if (mdp->ether_link_active_low) link_stat = ~link_stat; - if (!(link_stat & PHY_ST_LINK)) { + if (!(link_stat & PSR_LMON)) { sh_eth_rcv_snd_disable(ndev); } else { /* Link Up */ diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index c1b3751b12c4..a5c07c6ff44a 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -171,7 +171,7 @@ enum GECMR_BIT { }; /* EDMR */ -enum DMAC_M_BIT { +enum EDMR_BIT { EDMR_NBST = 0x80, EDMR_EL = 0x40, /* Litte endian */ EDMR_DL1 = 0x20, EDMR_DL0 = 0x10, @@ -180,13 +180,13 @@ enum DMAC_M_BIT { }; /* EDTRR */ -enum DMAC_T_BIT { +enum EDTRR_BIT { EDTRR_TRNS_GETHER = 0x03, EDTRR_TRNS_ETHER = 0x01, }; /* EDRRR */ -enum EDRRR_R_BIT { +enum EDRRR_BIT { EDRRR_R = 0x01, }; @@ -208,7 +208,7 @@ enum PIR_BIT { }; /* PSR */ -enum PHY_STATUS_BIT { PHY_ST_LINK = 0x01, }; +enum PSR_BIT { PSR_LMON = 0x01, }; /* EESR */ enum EESR_BIT { @@ -288,27 +288,6 @@ enum EESIPR_BIT { EESIPR_CERFIP = 0x00000001, }; -/* Receive descriptor 0 bits */ -enum RD_STS_BIT { - RD_RACT = 0x80000000, RD_RDLE = 0x40000000, - RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000, - RD_RFE = 0x08000000, RD_RFS10 = 0x00000200, - RD_RFS9 = 0x00000100, RD_RFS8 = 0x00000080, - RD_RFS7 = 0x00000040, RD_RFS6 = 0x00000020, - RD_RFS5 = 0x00000010, RD_RFS4 = 0x00000008, - RD_RFS3 = 0x00000004, RD_RFS2 = 0x00000002, - RD_RFS1 = 0x00000001, -}; -#define RDF1ST RD_RFP1 -#define RDFEND RD_RFP0 -#define RD_RFP (RD_RFP1|RD_RFP0) - -/* Receive descriptor 1 bits */ -enum RD_LEN_BIT { - RD_RFL = 0x0000ffff, /* receive frame length */ - RD_RBL = 0xffff0000, /* receive buffer length */ -}; - /* FCFTR */ enum FCFTR_BIT { FCFTR_RFF2 = 0x00040000, FCFTR_RFF1 = 0x00020000, @@ -318,28 +297,13 @@ enum FCFTR_BIT { #define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0) #define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0) -/* Transmit descriptor 0 bits */ -enum TD_STS_BIT { - TD_TACT = 0x80000000, TD_TDLE = 0x40000000, - TD_TFP1 = 0x20000000, TD_TFP0 = 0x10000000, - TD_TFE = 0x08000000, TD_TWBI = 0x04000000, -}; -#define TDF1ST TD_TFP1 -#define TDFEND TD_TFP0 -#define TD_TFP (TD_TFP1|TD_TFP0) - -/* Transmit descriptor 1 bits */ -enum TD_LEN_BIT { - TD_TBL = 0xffff0000, /* transmit buffer length */ -}; - /* RMCR */ enum RMCR_BIT { RMCR_RNC = 0x00000001, }; /* ECMR */ -enum FELIC_MODE_BIT { +enum ECMR_BIT { ECMR_TRCCM = 0x04000000, ECMR_RCSC = 0x00800000, ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000, ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000, @@ -350,7 +314,7 @@ enum FELIC_MODE_BIT { }; /* ECSR */ -enum ECSR_STATUS_BIT { +enum ECSR_BIT { ECSR_BRCRX = 0x20, ECSR_PSRTO = 0x10, ECSR_LCHNG = 0x04, ECSR_MPD = 0x02, ECSR_ICD = 0x01, @@ -360,7 +324,7 @@ enum ECSR_STATUS_BIT { ECSR_ICD | ECSIPR_MPDIP) /* ECSIPR */ -enum ECSIPR_STATUS_MASK_BIT { +enum ECSIPR_BIT { ECSIPR_BRCRXIP = 0x20, ECSIPR_PSRTOIP = 0x10, ECSIPR_LCHNGIP = 0x04, ECSIPR_MPDIP = 0x02, ECSIPR_ICDIP = 0x01, @@ -380,14 +344,20 @@ enum MPR_BIT { }; /* TRSCER */ -enum DESC_I_BIT { - DESC_I_TINT4 = 0x0800, DESC_I_TINT3 = 0x0400, DESC_I_TINT2 = 0x0200, - DESC_I_TINT1 = 0x0100, DESC_I_RINT8 = 0x0080, DESC_I_RINT5 = 0x0010, - DESC_I_RINT4 = 0x0008, DESC_I_RINT3 = 0x0004, DESC_I_RINT2 = 0x0002, - DESC_I_RINT1 = 0x0001, +enum TRSCER_BIT { + TRSCER_CNDCE = 0x00000800, + TRSCER_DLCCE = 0x00000400, + TRSCER_CDCE = 0x00000200, + TRSCER_TROCE = 0x00000100, + TRSCER_RMAFCE = 0x00000080, + TRSCER_RRFCE = 0x00000010, + TRSCER_RTLFCE = 0x00000008, + TRSCER_RTSFCE = 0x00000004, + TRSCER_PRECE = 0x00000002, + TRSCER_CERFCE = 0x00000001, }; -#define DEFAULT_TRSCER_ERR_MASK (DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2) +#define DEFAULT_TRSCER_ERR_MASK (TRSCER_RMAFCE | TRSCER_RRFCE | TRSCER_CDCE) /* RPADIR */ enum RPADIR_BIT { @@ -445,6 +415,24 @@ struct sh_eth_txdesc { u32 pad0; /* padding data */ } __aligned(2) __packed; +/* Transmit descriptor 0 bits */ +enum TD_STS_BIT { + TD_TACT = 0x80000000, + TD_TDLE = 0x40000000, + TD_TFP1 = 0x20000000, + TD_TFP0 = 0x10000000, + TD_TFE = 0x08000000, + TD_TWBI = 0x04000000, +}; +#define TDF1ST TD_TFP1 +#define TDFEND TD_TFP0 +#define TD_TFP (TD_TFP1 | TD_TFP0) + +/* Transmit descriptor 1 bits */ +enum TD_LEN_BIT { + TD_TBL = 0xffff0000, /* transmit buffer length */ +}; + /* The sh ether Rx buffer descriptors. * This structure should be 20 bytes. */ @@ -455,6 +443,34 @@ struct sh_eth_rxdesc { u32 pad0; /* padding data */ } __aligned(2) __packed; +/* Receive descriptor 0 bits */ +enum RD_STS_BIT { + RD_RACT = 0x80000000, + RD_RDLE = 0x40000000, + RD_RFP1 = 0x20000000, + RD_RFP0 = 0x10000000, + RD_RFE = 0x08000000, + RD_RFS10 = 0x00000200, + RD_RFS9 = 0x00000100, + RD_RFS8 = 0x00000080, + RD_RFS7 = 0x00000040, + RD_RFS6 = 0x00000020, + RD_RFS5 = 0x00000010, + RD_RFS4 = 0x00000008, + RD_RFS3 = 0x00000004, + RD_RFS2 = 0x00000002, + RD_RFS1 = 0x00000001, +}; +#define RDF1ST RD_RFP1 +#define RDFEND RD_RFP0 +#define RD_RFP (RD_RFP1 | RD_RFP0) + +/* Receive descriptor 1 bits */ +enum RD_LEN_BIT { + RD_RFL = 0x0000ffff, /* receive frame length */ + RD_RBL = 0xffff0000, /* receive buffer length */ +}; + /* This structure is used by each CPU dependency handling. */ struct sh_eth_cpu_data { /* mandatory functions */ diff --git a/drivers/net/ethernet/sfc/falcon/net_driver.h b/drivers/net/ethernet/sfc/falcon/net_driver.h index a529ff395ead..a381cf9ec4f3 100644 --- a/drivers/net/ethernet/sfc/falcon/net_driver.h +++ b/drivers/net/ethernet/sfc/falcon/net_driver.h @@ -637,7 +637,7 @@ union ef4_multicast_hash { * struct ef4_nic - an Efx NIC * @name: Device name (net device name or bus id before net device registered) * @pci_dev: The PCI device - * @node: List node for maintaning primary/secondary function lists + * @node: List node for maintaining primary/secondary function lists * @primary: &struct ef4_nic instance for the primary function of this * controller. May be the same structure, and may be %NULL if no * primary function is bound. Serialised by rtnl_lock. diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index b9449cf36e31..dfc85cc68173 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1715,14 +1715,17 @@ static int netsec_netdev_init(struct net_device *ndev) goto err1; /* set phy power down */ - data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) | - BMCR_PDOWN; - netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data); + data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR); + netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, + data | BMCR_PDOWN); ret = netsec_reset_hardware(priv, true); if (ret) goto err2; + /* Restore phy power state */ + netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data); + spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock); spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock); diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 6f271c46368d..d065b11b7b10 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -182,6 +182,12 @@ struct stmmac_extra_stats { /* TSO */ unsigned long tx_tso_frames; unsigned long tx_tso_nfrags; + /* EST */ + unsigned long mtl_est_cgce; + unsigned long mtl_est_hlbs; + unsigned long mtl_est_hlbf; + unsigned long mtl_est_btre; + unsigned long mtl_est_btrlm; }; /* Safety Feature statistics exposed by ethtool */ @@ -309,6 +315,13 @@ enum dma_irq_status { #define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2) #define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3) +/* FPE defines */ +#define FPE_EVENT_UNKNOWN 0 +#define FPE_EVENT_TRSP BIT(0) +#define FPE_EVENT_TVER BIT(1) +#define FPE_EVENT_RRSP BIT(2) +#define FPE_EVENT_RVER BIT(3) + #define CORE_IRQ_MTL_RX_OVERFLOW BIT(8) /* Physical Coding Sublayer */ @@ -382,6 +395,8 @@ struct dma_features { unsigned int estsel; unsigned int fpesel; unsigned int tbssel; + /* Numbers of Auxiliary Snapshot Inputs */ + unsigned int aux_snapshot_n; }; /* RX Buffer size must be multiple of 4/8/16 bytes */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c index 223f69da7e95..c1a361305a5a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c @@ -90,6 +90,32 @@ imx8dxl_set_intf_mode(struct plat_stmmacenet_data *plat_dat) return ret; } +static int imx_dwmac_clks_config(void *priv, bool enabled) +{ + struct imx_priv_data *dwmac = priv; + int ret = 0; + + if (enabled) { + ret = clk_prepare_enable(dwmac->clk_mem); + if (ret) { + dev_err(dwmac->dev, "mem clock enable failed\n"); + return ret; + } + + ret = clk_prepare_enable(dwmac->clk_tx); + if (ret) { + dev_err(dwmac->dev, "tx clock enable failed\n"); + clk_disable_unprepare(dwmac->clk_mem); + return ret; + } + } else { + clk_disable_unprepare(dwmac->clk_tx); + clk_disable_unprepare(dwmac->clk_mem); + } + + return ret; +} + static int imx_dwmac_init(struct platform_device *pdev, void *priv) { struct plat_stmmacenet_data *plat_dat; @@ -98,39 +124,18 @@ static int imx_dwmac_init(struct platform_device *pdev, void *priv) plat_dat = dwmac->plat_dat; - ret = clk_prepare_enable(dwmac->clk_mem); - if (ret) { - dev_err(&pdev->dev, "mem clock enable failed\n"); - return ret; - } - - ret = clk_prepare_enable(dwmac->clk_tx); - if (ret) { - dev_err(&pdev->dev, "tx clock enable failed\n"); - goto clk_tx_en_failed; - } - if (dwmac->ops->set_intf_mode) { ret = dwmac->ops->set_intf_mode(plat_dat); if (ret) - goto intf_mode_failed; + return ret; } return 0; - -intf_mode_failed: - clk_disable_unprepare(dwmac->clk_tx); -clk_tx_en_failed: - clk_disable_unprepare(dwmac->clk_mem); - return ret; } static void imx_dwmac_exit(struct platform_device *pdev, void *priv) { - struct imx_priv_data *dwmac = priv; - - clk_disable_unprepare(dwmac->clk_tx); - clk_disable_unprepare(dwmac->clk_mem); + /* nothing to do now */ } static void imx_dwmac_fix_speed(void *priv, unsigned int speed) @@ -249,10 +254,15 @@ static int imx_dwmac_probe(struct platform_device *pdev) plat_dat->addr64 = dwmac->ops->addr_width; plat_dat->init = imx_dwmac_init; plat_dat->exit = imx_dwmac_exit; + plat_dat->clks_config = imx_dwmac_clks_config; plat_dat->fix_mac_speed = imx_dwmac_fix_speed; plat_dat->bsp_priv = dwmac; dwmac->plat_dat = plat_dat; + ret = imx_dwmac_clks_config(dwmac, true); + if (ret) + goto err_clks_config; + ret = imx_dwmac_init(pdev, dwmac); if (ret) goto err_dwmac_init; @@ -263,9 +273,11 @@ static int imx_dwmac_probe(struct platform_device *pdev) return 0; -err_dwmac_init: err_drv_probe: imx_dwmac_exit(pdev, plat_dat->bsp_priv); +err_dwmac_init: + imx_dwmac_clks_config(dwmac, false); +err_clks_config: err_parse_dt: err_match_data: stmmac_remove_config_dt(pdev, plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index 0b64f7710d17..992294d25706 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -8,9 +8,23 @@ #include "dwmac-intel.h" #include "dwmac4.h" #include "stmmac.h" +#include "stmmac_ptp.h" + +#define INTEL_MGBE_ADHOC_ADDR 0x15 +#define INTEL_MGBE_XPCS_ADDR 0x16 + +/* Selection for PTP Clock Freq belongs to PSE & PCH GbE */ +#define PSE_PTP_CLK_FREQ_MASK (GMAC_GPO0 | GMAC_GPO3) +#define PSE_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0) +#define PSE_PTP_CLK_FREQ_200MHZ (GMAC_GPO0 | GMAC_GPO3) +#define PSE_PTP_CLK_FREQ_256MHZ (0) +#define PCH_PTP_CLK_FREQ_MASK (GMAC_GPO0) +#define PCH_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0) +#define PCH_PTP_CLK_FREQ_200MHZ (0) struct intel_priv_data { int mdio_adhoc_addr; /* mdio address for serdes & etc */ + bool is_pse; }; /* This struct is used to associate PCI Function of MAC controller on a board, @@ -201,6 +215,134 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data) } } +/* Program PTP Clock Frequency for different variant of + * Intel mGBE that has slightly different GPO mapping + */ +static void intel_mgbe_ptp_clk_freq_config(void *npriv) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)npriv; + struct intel_priv_data *intel_priv; + u32 gpio_value; + + intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv; + + gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS); + + if (intel_priv->is_pse) { + /* For PSE GbE, use 200MHz */ + gpio_value &= ~PSE_PTP_CLK_FREQ_MASK; + gpio_value |= PSE_PTP_CLK_FREQ_200MHZ; + } else { + /* For PCH GbE, use 200MHz */ + gpio_value &= ~PCH_PTP_CLK_FREQ_MASK; + gpio_value |= PCH_PTP_CLK_FREQ_200MHZ; + } + + writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS); +} + +static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr, + u64 *art_time) +{ + u64 ns; + + ns = mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE3); + ns <<= GMAC4_ART_TIME_SHIFT; + ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE2); + ns <<= GMAC4_ART_TIME_SHIFT; + ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE1); + ns <<= GMAC4_ART_TIME_SHIFT; + ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE0); + + *art_time = ns; +} + +static int intel_crosststamp(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + struct intel_priv_data *intel_priv; + + struct stmmac_priv *priv = (struct stmmac_priv *)ctx; + void __iomem *ptpaddr = priv->ptpaddr; + void __iomem *ioaddr = priv->hw->pcsr; + unsigned long flags; + u64 art_time = 0; + u64 ptp_time = 0; + u32 num_snapshot; + u32 gpio_value; + u32 acr_value; + int ret; + u32 v; + int i; + + if (!boot_cpu_has(X86_FEATURE_ART)) + return -EOPNOTSUPP; + + intel_priv = priv->plat->bsp_priv; + + /* Enable Internal snapshot trigger */ + acr_value = readl(ptpaddr + PTP_ACR); + acr_value &= ~PTP_ACR_MASK; + switch (priv->plat->int_snapshot_num) { + case AUX_SNAPSHOT0: + acr_value |= PTP_ACR_ATSEN0; + break; + case AUX_SNAPSHOT1: + acr_value |= PTP_ACR_ATSEN1; + break; + case AUX_SNAPSHOT2: + acr_value |= PTP_ACR_ATSEN2; + break; + case AUX_SNAPSHOT3: + acr_value |= PTP_ACR_ATSEN3; + break; + default: + return -EINVAL; + } + writel(acr_value, ptpaddr + PTP_ACR); + + /* Clear FIFO */ + acr_value = readl(ptpaddr + PTP_ACR); + acr_value |= PTP_ACR_ATSFC; + writel(acr_value, ptpaddr + PTP_ACR); + + /* Trigger Internal snapshot signal + * Create a rising edge by just toggle the GPO1 to low + * and back to high. + */ + gpio_value = readl(ioaddr + GMAC_GPIO_STATUS); + gpio_value &= ~GMAC_GPO1; + writel(gpio_value, ioaddr + GMAC_GPIO_STATUS); + gpio_value |= GMAC_GPO1; + writel(gpio_value, ioaddr + GMAC_GPIO_STATUS); + + /* Poll for time sync operation done */ + ret = readl_poll_timeout(priv->ioaddr + GMAC_INT_STATUS, v, + (v & GMAC_INT_TSIE), 100, 10000); + + if (ret == -ETIMEDOUT) { + pr_err("%s: Wait for time sync operation timeout\n", __func__); + return ret; + } + + num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) & + GMAC_TIMESTAMP_ATSNS_MASK) >> + GMAC_TIMESTAMP_ATSNS_SHIFT; + + /* Repeat until the timestamps are from the FIFO last segment */ + for (i = 0; i < num_snapshot; i++) { + spin_lock_irqsave(&priv->ptp_lock, flags); + stmmac_get_ptptime(priv, ptpaddr, &ptp_time); + *device = ns_to_ktime(ptp_time); + spin_unlock_irqrestore(&priv->ptp_lock, flags); + get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time); + *system = convert_art_to_tsc(art_time); + } + + return 0; +} + static void common_default_data(struct plat_stmmacenet_data *plat) { plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ @@ -319,6 +461,8 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, return ret; } + plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config; + /* Set default value for multicast hash bins */ plat->multicast_filter_bins = HASH_TABLE_SIZE; @@ -333,6 +477,21 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, /* Use the last Rx queue */ plat->vlan_fail_q = plat->rx_queues_to_use - 1; + /* Intel mgbe SGMII interface uses pcs-xcps */ + if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII) { + plat->mdio_bus_data->has_xpcs = true; + plat->mdio_bus_data->xpcs_an_inband = true; + } + + /* Ensure mdio bus scan skips intel serdes and pcs-xpcs */ + plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR; + plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR; + + plat->int_snapshot_num = AUX_SNAPSHOT1; + + plat->has_crossts = true; + plat->crosststamp = intel_crosststamp; + return 0; } @@ -378,8 +537,12 @@ static struct stmmac_pci_info ehl_rgmii1g_info = { static int ehl_pse0_common_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { + struct intel_priv_data *intel_priv = plat->bsp_priv; + + intel_priv->is_pse = true; plat->bus_id = 2; plat->addr64 = 32; + return ehl_common_data(pdev, plat); } @@ -410,8 +573,12 @@ static struct stmmac_pci_info ehl_pse0_sgmii1g_info = { static int ehl_pse1_common_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { + struct intel_priv_data *intel_priv = plat->bsp_priv; + + intel_priv->is_pse = true; plat->bus_id = 3; plat->addr64 = 32; + return ehl_common_data(pdev, plat); } @@ -664,7 +831,7 @@ static int intel_eth_pci_probe(struct pci_dev *pdev, pci_set_master(pdev); plat->bsp_priv = intel_priv; - intel_priv->mdio_adhoc_addr = 0x15; + intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR; ret = info->setup(pdev, plat); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index bf3250e0e59c..749585fe6fc9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -352,6 +352,8 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) plat_dat->bsp_priv = gmac; plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed; plat_dat->multicast_filter_bins = 0; + plat_dat->tx_fifo_size = 8192; + plat_dat->rx_fifo_size = 8192; err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (err) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 6b75cf2603ff..e62efd166ec8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -1214,6 +1214,8 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) plat_dat->init = sun8i_dwmac_init; plat_dat->exit = sun8i_dwmac_exit; plat_dat->setup = sun8i_dwmac_setup; + plat_dat->tx_fifo_size = 4096; + plat_dat->rx_fifo_size = 16384; ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index 2bac49b49f73..90383abafa66 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -255,7 +255,7 @@ static void dwmac1000_get_hw_feature(void __iomem *ioaddr, } static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt, - u32 number_chan) + u32 queue) { writel(riwt, ioaddr + DMA_RX_WATCHDOG); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index 82df91c130f7..462ca7ed095a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -42,6 +42,7 @@ #define GMAC_HW_FEATURE3 0x00000128 #define GMAC_MDIO_ADDR 0x00000200 #define GMAC_MDIO_DATA 0x00000204 +#define GMAC_GPIO_STATUS 0x0000020C #define GMAC_ARP_ADDR 0x00000210 #define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8) #define GMAC_ADDR_LOW(reg) (0x304 + reg * 8) @@ -49,6 +50,7 @@ #define GMAC_L4_ADDR(reg) (0x904 + (reg) * 0x30) #define GMAC_L3_ADDR0(reg) (0x910 + (reg) * 0x30) #define GMAC_L3_ADDR1(reg) (0x914 + (reg) * 0x30) +#define GMAC_TIMESTAMP_STATUS 0x00000b20 /* RX Queues Routing */ #define GMAC_RXQCTRL_AVCPQ_MASK GENMASK(2, 0) @@ -143,6 +145,7 @@ #define GMAC_INT_PCS_PHYIS BIT(3) #define GMAC_INT_PMT_EN BIT(4) #define GMAC_INT_LPI_EN BIT(5) +#define GMAC_INT_TSIE BIT(12) #define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \ GMAC_INT_PCS_ANE) @@ -259,6 +262,7 @@ enum power_event { #define GMAC_HW_RXFIFOSIZE GENMASK(4, 0) /* MAC HW features2 bitmap */ +#define GMAC_HW_FEAT_AUXSNAPNUM GENMASK(30, 28) #define GMAC_HW_FEAT_PPSOUTNUM GENMASK(26, 24) #define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18) #define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12) @@ -278,6 +282,12 @@ enum power_event { #define GMAC_HW_FEAT_DVLAN BIT(5) #define GMAC_HW_FEAT_NRVF GENMASK(2, 0) +/* GMAC GPIO Status reg */ +#define GMAC_GPO0 BIT(16) +#define GMAC_GPO1 BIT(17) +#define GMAC_GPO2 BIT(18) +#define GMAC_GPO3 BIT(19) + /* MAC HW ADDR regs */ #define GMAC_HI_DCS GENMASK(18, 16) #define GMAC_HI_DCS_SHIFT 16 @@ -298,6 +308,11 @@ enum power_event { #define GMAC_L4DP0_SHIFT 16 #define GMAC_L4SP0 GENMASK(15, 0) +/* MAC Timestamp Status */ +#define GMAC_TIMESTAMP_AUXTSTRIG BIT(2) +#define GMAC_TIMESTAMP_ATSNS_MASK GENMASK(29, 25) +#define GMAC_TIMESTAMP_ATSNS_SHIFT 25 + /* MTL registers */ #define MTL_OPERATION_MODE 0x00000c00 #define MTL_FRPE BIT(15) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 29f765a246a0..95864f014ffa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -53,6 +53,10 @@ static void dwmac4_core_init(struct mac_device_info *hw, if (hw->pcs) value |= GMAC_PCS_IRQ_DEFAULT; + /* Enable FPE interrupt */ + if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26) + value |= GMAC_INT_FPE_EN; + writel(value, ioaddr + GMAC_INT_EN); } @@ -1245,6 +1249,8 @@ const struct stmmac_ops dwmac410_ops = { .config_l4_filter = dwmac4_config_l4_filter, .est_configure = dwmac5_est_configure, .fpe_configure = dwmac5_fpe_configure, + .fpe_send_mpacket = dwmac5_fpe_send_mpacket, + .fpe_irq_status = dwmac5_fpe_irq_status, .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, @@ -1294,6 +1300,8 @@ const struct stmmac_ops dwmac510_ops = { .config_l4_filter = dwmac4_config_l4_filter, .est_configure = dwmac5_est_configure, .fpe_configure = dwmac5_fpe_configure, + .fpe_send_mpacket = dwmac5_fpe_send_mpacket, + .fpe_irq_status = dwmac5_fpe_irq_status, .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index 62aa0e95beb7..8954b85eb850 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -210,12 +210,9 @@ static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) _dwmac4_dump_dma_regs(ioaddr, i, reg_space); } -static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan) +static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue) { - u32 chan; - - for (chan = 0; chan < number_chan; chan++) - writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(chan)); + writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(queue)); } static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode, @@ -415,6 +412,8 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr, /* IEEE 1588-2002 */ dma_cap->time_stamp = 0; + /* Number of Auxiliary Snapshot Inputs */ + dma_cap->aux_snapshot_n = (hw_cap & GMAC_HW_FEAT_AUXSNAPNUM) >> 28; /* MAC HW feature3 */ hw_cap = readl(ioaddr + GMAC_HW_FEATURE3); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c index 8f7ac24545ef..5b010ebfede9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c @@ -595,9 +595,95 @@ int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg, ctrl &= ~EEST; writel(ctrl, ioaddr + MTL_EST_CONTROL); + + /* Configure EST interrupt */ + if (cfg->enable) + ctrl = (IECGCE | IEHS | IEHF | IEBE | IECC); + else + ctrl = 0; + + writel(ctrl, ioaddr + MTL_EST_INT_EN); + return 0; } +void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev, + struct stmmac_extra_stats *x, u32 txqcnt) +{ + u32 status, value, feqn, hbfq, hbfs, btrl; + u32 txqcnt_mask = (1 << txqcnt) - 1; + + status = readl(ioaddr + MTL_EST_STATUS); + + value = (CGCE | HLBS | HLBF | BTRE | SWLC); + + /* Return if there is no error */ + if (!(status & value)) + return; + + if (status & CGCE) { + /* Clear Interrupt */ + writel(CGCE, ioaddr + MTL_EST_STATUS); + + x->mtl_est_cgce++; + } + + if (status & HLBS) { + value = readl(ioaddr + MTL_EST_SCH_ERR); + value &= txqcnt_mask; + + x->mtl_est_hlbs++; + + /* Clear Interrupt */ + writel(value, ioaddr + MTL_EST_SCH_ERR); + + /* Collecting info to shows all the queues that has HLBS + * issue. The only way to clear this is to clear the + * statistic + */ + if (net_ratelimit()) + netdev_err(dev, "EST: HLB(sched) Queue 0x%x\n", value); + } + + if (status & HLBF) { + value = readl(ioaddr + MTL_EST_FRM_SZ_ERR); + feqn = value & txqcnt_mask; + + value = readl(ioaddr + MTL_EST_FRM_SZ_CAP); + hbfq = (value & SZ_CAP_HBFQ_MASK(txqcnt)) >> SZ_CAP_HBFQ_SHIFT; + hbfs = value & SZ_CAP_HBFS_MASK; + + x->mtl_est_hlbf++; + + /* Clear Interrupt */ + writel(feqn, ioaddr + MTL_EST_FRM_SZ_ERR); + + if (net_ratelimit()) + netdev_err(dev, "EST: HLB(size) Queue %u Size %u\n", + hbfq, hbfs); + } + + if (status & BTRE) { + if ((status & BTRL) == BTRL_MAX) + x->mtl_est_btrlm++; + else + x->mtl_est_btre++; + + btrl = (status & BTRL) >> BTRL_SHIFT; + + if (net_ratelimit()) + netdev_info(dev, "EST: BTR Error Loop Count %u\n", + btrl); + + writel(BTRE, ioaddr + MTL_EST_STATUS); + } + + if (status & SWLC) { + writel(SWLC, ioaddr + MTL_EST_STATUS); + netdev_info(dev, "EST: SWOL has been switched\n"); + } +} + void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq, bool enable) { @@ -621,3 +707,52 @@ void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq, value |= EFPE; writel(value, ioaddr + MAC_FPE_CTRL_STS); } + +int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev) +{ + u32 value; + int status; + + status = FPE_EVENT_UNKNOWN; + + value = readl(ioaddr + MAC_FPE_CTRL_STS); + + if (value & TRSP) { + status |= FPE_EVENT_TRSP; + netdev_info(dev, "FPE: Respond mPacket is transmitted\n"); + } + + if (value & TVER) { + status |= FPE_EVENT_TVER; + netdev_info(dev, "FPE: Verify mPacket is transmitted\n"); + } + + if (value & RRSP) { + status |= FPE_EVENT_RRSP; + netdev_info(dev, "FPE: Respond mPacket is received\n"); + } + + if (value & RVER) { + status |= FPE_EVENT_RVER; + netdev_info(dev, "FPE: Verify mPacket is received\n"); + } + + return status; +} + +void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, enum stmmac_mpacket_type type) +{ + u32 value; + + value = readl(ioaddr + MAC_FPE_CTRL_STS); + + if (type == MPACKET_VERIFY) { + value &= ~SRSP; + value |= SVER; + } else { + value &= ~SVER; + value |= SRSP; + } + + writel(value, ioaddr + MAC_FPE_CTRL_STS); +} diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h index 56b0762c1276..ff555d8b0cdf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h @@ -12,6 +12,12 @@ #define TMOUTEN BIT(0) #define MAC_FPE_CTRL_STS 0x00000234 +#define TRSP BIT(19) +#define TVER BIT(18) +#define RRSP BIT(17) +#define RVER BIT(16) +#define SRSP BIT(2) +#define SVER BIT(1) #define EFPE BIT(0) #define MAC_PPS_CONTROL 0x00000b70 @@ -38,6 +44,36 @@ #define PTOV_SHIFT 24 #define SSWL BIT(1) #define EEST BIT(0) + +#define MTL_EST_STATUS 0x00000c58 +#define BTRL GENMASK(11, 8) +#define BTRL_SHIFT 8 +#define BTRL_MAX (0xF << BTRL_SHIFT) +#define SWOL BIT(7) +#define SWOL_SHIFT 7 +#define CGCE BIT(4) +#define HLBS BIT(3) +#define HLBF BIT(2) +#define BTRE BIT(1) +#define SWLC BIT(0) + +#define MTL_EST_SCH_ERR 0x00000c60 +#define MTL_EST_FRM_SZ_ERR 0x00000c64 +#define MTL_EST_FRM_SZ_CAP 0x00000c68 +#define SZ_CAP_HBFS_MASK GENMASK(14, 0) +#define SZ_CAP_HBFQ_SHIFT 16 +#define SZ_CAP_HBFQ_MASK(_val) ({ typeof(_val) (val) = (_val); \ + ((val) > 4 ? GENMASK(18, 16) : \ + (val) > 2 ? GENMASK(17, 16) : \ + BIT(16)); }) + +#define MTL_EST_INT_EN 0x00000c70 +#define IECGCE CGCE +#define IEHS HLBS +#define IEHF HLBF +#define IEBE BTRE +#define IECC SWLC + #define MTL_EST_GCL_CONTROL 0x00000c80 #define BTR_LOW 0x0 #define BTR_HIGH 0x1 @@ -98,6 +134,8 @@ #define GMAC_RXQCTRL_VFFQ_SHIFT 17 #define GMAC_RXQCTRL_VFFQE BIT(16) +#define GMAC_INT_FPE_EN BIT(17) + int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp); int dwmac5_safety_feat_irq_status(struct net_device *ndev, void __iomem *ioaddr, unsigned int asp, @@ -111,7 +149,12 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, u32 sub_second_inc, u32 systime_flags); int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg, unsigned int ptp_rate); +void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev, + struct stmmac_extra_stats *x, u32 txqcnt); void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq, bool enable); +void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, + enum stmmac_mpacket_type type); +int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev); #endif /* __DWMAC5_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 77308c5c5d29..f2cab5b76732 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -441,12 +441,9 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr, dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3; } -static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 nchan) +static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue) { - u32 i; - - for (i = 0; i < nchan; i++) - writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(i)); + writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(queue)); } static void dwxgmac2_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan) diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 979ac9fca23c..45edac5f60db 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -206,7 +206,7 @@ struct stmmac_dma_ops { void (*get_hw_feature)(void __iomem *ioaddr, struct dma_features *dma_cap); /* Program the HW RX Watchdog */ - void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan); + void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 queue); void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); @@ -393,8 +393,13 @@ struct stmmac_ops { void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr); int (*est_configure)(void __iomem *ioaddr, struct stmmac_est *cfg, unsigned int ptp_rate); + void (*est_irq_status)(void __iomem *ioaddr, struct net_device *dev, + struct stmmac_extra_stats *x, u32 txqcnt); void (*fpe_configure)(void __iomem *ioaddr, u32 num_txq, u32 num_rxq, bool enable); + void (*fpe_send_mpacket)(void __iomem *ioaddr, + enum stmmac_mpacket_type type); + int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev); }; #define stmmac_core_init(__priv, __args...) \ @@ -491,8 +496,14 @@ struct stmmac_ops { stmmac_do_void_callback(__priv, mac, set_arp_offload, __args) #define stmmac_est_configure(__priv, __args...) \ stmmac_do_callback(__priv, mac, est_configure, __args) +#define stmmac_est_irq_status(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, est_irq_status, __args) #define stmmac_fpe_configure(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, fpe_configure, __args) +#define stmmac_fpe_send_mpacket(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, fpe_send_mpacket, __args) +#define stmmac_fpe_irq_status(__priv, __args...) \ + stmmac_do_callback(__priv, mac, fpe_irq_status, __args) /* PTP and HW Timer helpers */ struct stmmac_hwtimestamp { @@ -504,6 +515,7 @@ struct stmmac_hwtimestamp { int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, int add_sub, int gmac4); void (*get_systime) (void __iomem *ioaddr, u64 *systime); + void (*get_ptptime)(void __iomem *ioaddr, u64 *ptp_time); }; #define stmmac_config_hw_tstamping(__priv, __args...) \ @@ -518,6 +530,8 @@ struct stmmac_hwtimestamp { stmmac_do_callback(__priv, ptp, adjust_systime, __args) #define stmmac_get_systime(__priv, __args...) \ stmmac_do_void_callback(__priv, ptp, get_systime, __args) +#define stmmac_get_ptptime(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, get_ptptime, __args) /* Helpers to manage the descriptors for chain and ring modes */ struct stmmac_mode_ops { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index e553b9a1f785..4faad331a4ca 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -147,9 +147,9 @@ struct stmmac_flow_entry { struct stmmac_priv { /* Frequently used values are kept adjacent for cache effect */ - u32 tx_coal_frames; - u32 tx_coal_timer; - u32 rx_coal_frames; + u32 tx_coal_frames[MTL_MAX_TX_QUEUES]; + u32 tx_coal_timer[MTL_MAX_TX_QUEUES]; + u32 rx_coal_frames[MTL_MAX_TX_QUEUES]; int tx_coalesce; int hwts_tx_en; @@ -160,7 +160,7 @@ struct stmmac_priv { unsigned int dma_buf_sz; unsigned int rx_copybreak; - u32 rx_riwt; + u32 rx_riwt[MTL_MAX_TX_QUEUES]; int hwts_rx_en; void __iomem *ioaddr; @@ -234,6 +234,12 @@ struct stmmac_priv { struct workqueue_struct *wq; struct work_struct service_task; + /* Workqueue for handling FPE hand-shaking */ + unsigned long fpe_task_state; + struct workqueue_struct *fpe_wq; + struct work_struct fpe_task; + char wq_name[IFNAMSIZ + 4]; + /* TC Handling */ unsigned int tc_entries_max; unsigned int tc_off_max; @@ -272,6 +278,8 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv); bool stmmac_eee_init(struct stmmac_priv *priv); int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt); int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size); +int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled); +void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable); #if IS_ENABLED(CONFIG_STMMAC_SELFTESTS) void stmmac_selftest_run(struct net_device *dev, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index c5642985ef95..61b11639ee0c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -158,6 +158,12 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { /* TSO */ STMMAC_STAT(tx_tso_frames), STMMAC_STAT(tx_tso_nfrags), + /* EST */ + STMMAC_STAT(mtl_est_cgce), + STMMAC_STAT(mtl_est_hlbs), + STMMAC_STAT(mtl_est_hlbf), + STMMAC_STAT(mtl_est_btre), + STMMAC_STAT(mtl_est_btrlm), }; #define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) @@ -756,28 +762,75 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv) return (riwt * 256) / (clk / 1000000); } -static int stmmac_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) +static int __stmmac_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + int queue) { struct stmmac_priv *priv = netdev_priv(dev); + u32 max_cnt; + u32 rx_cnt; + u32 tx_cnt; - ec->tx_coalesce_usecs = priv->tx_coal_timer; - ec->tx_max_coalesced_frames = priv->tx_coal_frames; + rx_cnt = priv->plat->rx_queues_to_use; + tx_cnt = priv->plat->tx_queues_to_use; + max_cnt = max(rx_cnt, tx_cnt); - if (priv->use_riwt) { - ec->rx_max_coalesced_frames = priv->rx_coal_frames; - ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt, priv); + if (queue < 0) + queue = 0; + else if (queue >= max_cnt) + return -EINVAL; + + if (queue < tx_cnt) { + ec->tx_coalesce_usecs = priv->tx_coal_timer[queue]; + ec->tx_max_coalesced_frames = priv->tx_coal_frames[queue]; + } else { + ec->tx_coalesce_usecs = 0; + ec->tx_max_coalesced_frames = 0; + } + + if (priv->use_riwt && queue < rx_cnt) { + ec->rx_max_coalesced_frames = priv->rx_coal_frames[queue]; + ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt[queue], + priv); + } else { + ec->rx_max_coalesced_frames = 0; + ec->rx_coalesce_usecs = 0; } return 0; } -static int stmmac_set_coalesce(struct net_device *dev, +static int stmmac_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { + return __stmmac_get_coalesce(dev, ec, -1); +} + +static int stmmac_get_per_queue_coalesce(struct net_device *dev, u32 queue, + struct ethtool_coalesce *ec) +{ + return __stmmac_get_coalesce(dev, ec, queue); +} + +static int __stmmac_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + int queue) +{ struct stmmac_priv *priv = netdev_priv(dev); - u32 rx_cnt = priv->plat->rx_queues_to_use; + bool all_queues = false; unsigned int rx_riwt; + u32 max_cnt; + u32 rx_cnt; + u32 tx_cnt; + + rx_cnt = priv->plat->rx_queues_to_use; + tx_cnt = priv->plat->tx_queues_to_use; + max_cnt = max(rx_cnt, tx_cnt); + + if (queue < 0) + all_queues = true; + else if (queue >= max_cnt) + return -EINVAL; if (priv->use_riwt && (ec->rx_coalesce_usecs > 0)) { rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv); @@ -785,8 +838,23 @@ static int stmmac_set_coalesce(struct net_device *dev, if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT)) return -EINVAL; - priv->rx_riwt = rx_riwt; - stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt); + if (all_queues) { + int i; + + for (i = 0; i < rx_cnt; i++) { + priv->rx_riwt[i] = rx_riwt; + stmmac_rx_watchdog(priv, priv->ioaddr, + rx_riwt, i); + priv->rx_coal_frames[i] = + ec->rx_max_coalesced_frames; + } + } else if (queue < rx_cnt) { + priv->rx_riwt[queue] = rx_riwt; + stmmac_rx_watchdog(priv, priv->ioaddr, + rx_riwt, queue); + priv->rx_coal_frames[queue] = + ec->rx_max_coalesced_frames; + } } if ((ec->tx_coalesce_usecs == 0) && @@ -797,13 +865,37 @@ static int stmmac_set_coalesce(struct net_device *dev, (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES)) return -EINVAL; - /* Only copy relevant parameters, ignore all others. */ - priv->tx_coal_frames = ec->tx_max_coalesced_frames; - priv->tx_coal_timer = ec->tx_coalesce_usecs; - priv->rx_coal_frames = ec->rx_max_coalesced_frames; + if (all_queues) { + int i; + + for (i = 0; i < tx_cnt; i++) { + priv->tx_coal_frames[i] = + ec->tx_max_coalesced_frames; + priv->tx_coal_timer[i] = + ec->tx_coalesce_usecs; + } + } else if (queue < tx_cnt) { + priv->tx_coal_frames[queue] = + ec->tx_max_coalesced_frames; + priv->tx_coal_timer[queue] = + ec->tx_coalesce_usecs; + } + return 0; } +static int stmmac_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + return __stmmac_set_coalesce(dev, ec, -1); +} + +static int stmmac_set_per_queue_coalesce(struct net_device *dev, u32 queue, + struct ethtool_coalesce *ec) +{ + return __stmmac_set_coalesce(dev, ec, queue); +} + static int stmmac_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *rxnfc, u32 *rule_locs) { @@ -1001,6 +1093,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = { .get_ts_info = stmmac_get_ts_info, .get_coalesce = stmmac_get_coalesce, .set_coalesce = stmmac_set_coalesce, + .get_per_queue_coalesce = stmmac_get_per_queue_coalesce, + .set_per_queue_coalesce = stmmac_set_per_queue_coalesce, .get_channels = stmmac_get_channels, .set_channels = stmmac_set_channels, .get_tunable = stmmac_get_tunable, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index d291612eeafb..113c51bcc0b5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -153,6 +153,16 @@ static void get_systime(void __iomem *ioaddr, u64 *systime) *systime = ns; } +static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time) +{ + u64 ns; + + ns = readl(ptpaddr + PTP_ATNR); + ns += readl(ptpaddr + PTP_ATSR) * NSEC_PER_SEC; + + *ptp_time = ns; +} + const struct stmmac_hwtimestamp stmmac_ptp = { .config_hw_tstamping = config_hw_tstamping, .init_systime = init_systime, @@ -160,4 +170,5 @@ const struct stmmac_hwtimestamp stmmac_ptp = { .config_addend = config_addend, .adjust_systime = adjust_systime, .get_systime = get_systime, + .get_ptptime = get_ptptime, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 208cae344ffa..170296820af0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -28,6 +28,7 @@ #include <linux/if_vlan.h> #include <linux/dma-mapping.h> #include <linux/slab.h> +#include <linux/pm_runtime.h> #include <linux/prefetch.h> #include <linux/pinctrl/consumer.h> #ifdef CONFIG_DEBUG_FS @@ -113,6 +114,38 @@ static void stmmac_exit_fs(struct net_device *dev); #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC)) +int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) +{ + int ret = 0; + + if (enabled) { + ret = clk_prepare_enable(priv->plat->stmmac_clk); + if (ret) + return ret; + ret = clk_prepare_enable(priv->plat->pclk); + if (ret) { + clk_disable_unprepare(priv->plat->stmmac_clk); + return ret; + } + if (priv->plat->clks_config) { + ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); + if (ret) { + clk_disable_unprepare(priv->plat->stmmac_clk); + clk_disable_unprepare(priv->plat->pclk); + return ret; + } + } + } else { + clk_disable_unprepare(priv->plat->stmmac_clk); + clk_disable_unprepare(priv->plat->pclk); + if (priv->plat->clks_config) + priv->plat->clks_config(priv->plat->bsp_priv, enabled); + } + + return ret; +} +EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); + /** * stmmac_verify_args - verify the driver parameters. * Description: it checks the driver parameters and set a default in case of @@ -433,6 +466,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, { struct skb_shared_hwtstamps shhwtstamp; bool found = false; + s64 adjust = 0; u64 ns = 0; if (!priv->hwts_tx_en) @@ -451,6 +485,13 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, } if (found) { + /* Correct the clk domain crossing(CDC) error */ + if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) { + adjust += -(2 * (NSEC_PER_SEC / + priv->plat->clk_ptp_rate)); + ns += adjust; + } + memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamp.hwtstamp = ns_to_ktime(ns); @@ -474,6 +515,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, { struct skb_shared_hwtstamps *shhwtstamp = NULL; struct dma_desc *desc = p; + u64 adjust = 0; u64 ns = 0; if (!priv->hwts_rx_en) @@ -485,6 +527,13 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, /* Check if timestamp is available */ if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); + + /* Correct the clk domain crossing(CDC) error */ + if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) { + adjust += 2 * (NSEC_PER_SEC / priv->plat->clk_ptp_rate); + ns -= adjust; + } + netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); shhwtstamp = skb_hwtstamps(skb); memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); @@ -922,6 +971,21 @@ static void stmmac_mac_an_restart(struct phylink_config *config) /* Not Supported */ } +static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) +{ + struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; + enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; + enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; + bool *hs_enable = &fpe_cfg->hs_enable; + + if (is_up && *hs_enable) { + stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY); + } else { + *lo_state = FPE_EVENT_UNKNOWN; + *lp_state = FPE_EVENT_UNKNOWN; + } +} + static void stmmac_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { @@ -932,6 +996,8 @@ static void stmmac_mac_link_down(struct phylink_config *config, priv->tx_lpi_enabled = false; stmmac_eee_init(priv); stmmac_set_eee_pls(priv, priv->hw, false); + + stmmac_fpe_link_state_handle(priv, false); } static void stmmac_mac_link_up(struct phylink_config *config, @@ -1030,6 +1096,8 @@ static void stmmac_mac_link_up(struct phylink_config *config, priv->tx_lpi_enabled = priv->eee_enabled; stmmac_set_eee_pls(priv, priv->hw, true); } + + stmmac_fpe_link_state_handle(priv, true); } static const struct phylink_mac_ops stmmac_phylink_mac_ops = { @@ -1117,6 +1185,8 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) priv->phylink_config.dev = &priv->dev->dev; priv->phylink_config.type = PHYLINK_NETDEV; priv->phylink_config.pcs_poll = true; + priv->phylink_config.ovr_an_inband = + priv->plat->mdio_bus_data->xpcs_an_inband; if (!fwnode) fwnode = dev_fwnode(priv->device); @@ -2183,7 +2253,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) /* We still have pending packets, let's call for a new scheduling */ if (tx_q->dirty_tx != tx_q->cur_tx) - hrtimer_start(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer), + hrtimer_start(&tx_q->txtimer, + STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), HRTIMER_MODE_REL); __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); @@ -2468,7 +2539,8 @@ static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; - hrtimer_start(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer), + hrtimer_start(&tx_q->txtimer, + STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), HRTIMER_MODE_REL); } @@ -2509,18 +2581,21 @@ static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) static void stmmac_init_coalesce(struct stmmac_priv *priv) { u32 tx_channel_count = priv->plat->tx_queues_to_use; + u32 rx_channel_count = priv->plat->rx_queues_to_use; u32 chan; - priv->tx_coal_frames = STMMAC_TX_FRAMES; - priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; - priv->rx_coal_frames = STMMAC_RX_FRAMES; - for (chan = 0; chan < tx_channel_count; chan++) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; + priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; + priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; + hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); tx_q->txtimer.function = stmmac_tx_timer; } + + for (chan = 0; chan < rx_channel_count; chan++) + priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; } static void stmmac_set_rings_length(struct stmmac_priv *priv) @@ -2737,6 +2812,26 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) } } +static int stmmac_fpe_start_wq(struct stmmac_priv *priv) +{ + char *name; + + clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); + + name = priv->wq_name; + sprintf(name, "%s-fpe", priv->dev->name); + + priv->fpe_wq = create_singlethread_workqueue(name); + if (!priv->fpe_wq) { + netdev_err(priv->dev, "%s: Failed to create workqueue\n", name); + + return -ENOMEM; + } + netdev_info(priv->dev, "FPE workqueue start"); + + return 0; +} + /** * stmmac_hw_setup - setup mac in a usable state. * @dev : pointer to the device structure. @@ -2825,10 +2920,15 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) priv->tx_lpi_timer = eee_timer * 1000; if (priv->use_riwt) { - if (!priv->rx_riwt) - priv->rx_riwt = DEF_DMA_RIWT; + u32 queue; + + for (queue = 0; queue < rx_cnt; queue++) { + if (!priv->rx_riwt[queue]) + priv->rx_riwt[queue] = DEF_DMA_RIWT; - ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt); + stmmac_rx_watchdog(priv, priv->ioaddr, + priv->rx_riwt[queue], queue); + } } if (priv->hw->pcs) @@ -2868,6 +2968,13 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) /* Start the ball rolling... */ stmmac_start_all_dma(priv); + if (priv->dma_cap.fpesel) { + stmmac_fpe_start_wq(priv); + + if (priv->plat->fpe_cfg->enable) + stmmac_fpe_handshake(priv, true); + } + return 0; } @@ -2894,15 +3001,21 @@ static int stmmac_open(struct net_device *dev) u32 chan; int ret; + ret = pm_runtime_get_sync(priv->device); + if (ret < 0) { + pm_runtime_put_noidle(priv->device); + return ret; + } + if (priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI && - priv->hw->xpcs == NULL) { + priv->hw->xpcs_args.an_mode != DW_AN_C73) { ret = stmmac_init_phy(dev); if (ret) { netdev_err(priv->dev, "%s: Cannot attach to PHY (error: %d)\n", __func__, ret); - return ret; + goto init_phy_error; } } @@ -3018,9 +3131,21 @@ init_error: free_dma_desc_resources(priv); dma_desc_error: phylink_disconnect_phy(priv->phylink); +init_phy_error: + pm_runtime_put(priv->device); return ret; } +static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) +{ + set_bit(__FPE_REMOVING, &priv->fpe_task_state); + + if (priv->fpe_wq) + destroy_workqueue(priv->fpe_wq); + + netdev_info(priv->dev, "FPE workqueue stop"); +} + /** * stmmac_release - close entry point of the driver * @dev : device pointer. @@ -3068,6 +3193,11 @@ static int stmmac_release(struct net_device *dev) stmmac_release_ptp(priv); + pm_runtime_put(priv->device); + + if (priv->dma_cap.fpesel) + stmmac_fpe_stop_wq(priv); + return 0; } @@ -3317,11 +3447,12 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) set_ic = true; - else if (!priv->tx_coal_frames) + else if (!priv->tx_coal_frames[queue]) set_ic = false; - else if (tx_packets > priv->tx_coal_frames) + else if (tx_packets > priv->tx_coal_frames[queue]) set_ic = true; - else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets) + else if ((tx_q->tx_count_frames % + priv->tx_coal_frames[queue]) < tx_packets) set_ic = true; else set_ic = false; @@ -3546,11 +3677,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) set_ic = true; - else if (!priv->tx_coal_frames) + else if (!priv->tx_coal_frames[queue]) set_ic = false; - else if (tx_packets > priv->tx_coal_frames) + else if (tx_packets > priv->tx_coal_frames[queue]) set_ic = true; - else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets) + else if ((tx_q->tx_count_frames % + priv->tx_coal_frames[queue]) < tx_packets) set_ic = true; else set_ic = false; @@ -3749,11 +3881,11 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) stmmac_refill_desc3(priv, rx_q, p); rx_q->rx_count_frames++; - rx_q->rx_count_frames += priv->rx_coal_frames; - if (rx_q->rx_count_frames > priv->rx_coal_frames) + rx_q->rx_count_frames += priv->rx_coal_frames[queue]; + if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) rx_q->rx_count_frames = 0; - use_rx_wd = !priv->rx_coal_frames; + use_rx_wd = !priv->rx_coal_frames[queue]; use_rx_wd |= rx_q->rx_count_frames > 0; if (!priv->use_riwt) use_rx_wd = false; @@ -4207,6 +4339,48 @@ static int stmmac_set_features(struct net_device *netdev, return 0; } +static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) +{ + struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; + enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; + enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; + bool *hs_enable = &fpe_cfg->hs_enable; + + if (status == FPE_EVENT_UNKNOWN || !*hs_enable) + return; + + /* If LP has sent verify mPacket, LP is FPE capable */ + if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) { + if (*lp_state < FPE_STATE_CAPABLE) + *lp_state = FPE_STATE_CAPABLE; + + /* If user has requested FPE enable, quickly response */ + if (*hs_enable) + stmmac_fpe_send_mpacket(priv, priv->ioaddr, + MPACKET_RESPONSE); + } + + /* If Local has sent verify mPacket, Local is FPE capable */ + if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) { + if (*lo_state < FPE_STATE_CAPABLE) + *lo_state = FPE_STATE_CAPABLE; + } + + /* If LP has sent response mPacket, LP is entering FPE ON */ + if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP) + *lp_state = FPE_STATE_ENTERING_ON; + + /* If Local has sent response mPacket, Local is entering FPE ON */ + if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP) + *lo_state = FPE_STATE_ENTERING_ON; + + if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) && + !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) && + priv->fpe_wq) { + queue_work(priv->fpe_wq, &priv->fpe_task); + } +} + /** * stmmac_interrupt - main ISR * @irq: interrupt number. @@ -4241,6 +4415,17 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) if (stmmac_safety_feat_interrupt(priv)) return IRQ_HANDLED; + if (priv->dma_cap.estsel) + stmmac_est_irq_status(priv, priv->ioaddr, priv->dev, + &priv->xstats, tx_cnt); + + if (priv->dma_cap.fpesel) { + int status = stmmac_fpe_irq_status(priv, priv->ioaddr, + priv->dev); + + stmmac_fpe_event_status(priv, status); + } + /* To handle GMAC own interrupts */ if ((priv->plat->has_gmac) || xmac) { int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); @@ -4704,6 +4889,12 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid bool is_double = false; int ret; + ret = pm_runtime_get_sync(priv->device); + if (ret < 0) { + pm_runtime_put_noidle(priv->device); + return ret; + } + if (be16_to_cpu(proto) == ETH_P_8021AD) is_double = true; @@ -4737,10 +4928,15 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi if (priv->hw->num_vlan) { ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); if (ret) - return ret; + goto del_vlan_error; } - return stmmac_vlan_update(priv, is_double); + ret = stmmac_vlan_update(priv, is_double); + +del_vlan_error: + pm_runtime_put(priv->device); + + return ret; } static const struct net_device_ops stmmac_netdev_ops = { @@ -4977,6 +5173,68 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) return ret; } +#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n" +static void stmmac_fpe_lp_task(struct work_struct *work) +{ + struct stmmac_priv *priv = container_of(work, struct stmmac_priv, + fpe_task); + struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; + enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; + enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; + bool *hs_enable = &fpe_cfg->hs_enable; + bool *enable = &fpe_cfg->enable; + int retries = 20; + + while (retries-- > 0) { + /* Bail out immediately if FPE handshake is OFF */ + if (*lo_state == FPE_STATE_OFF || !*hs_enable) + break; + + if (*lo_state == FPE_STATE_ENTERING_ON && + *lp_state == FPE_STATE_ENTERING_ON) { + stmmac_fpe_configure(priv, priv->ioaddr, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, + *enable); + + netdev_info(priv->dev, "configured FPE\n"); + + *lo_state = FPE_STATE_ON; + *lp_state = FPE_STATE_ON; + netdev_info(priv->dev, "!!! BOTH FPE stations ON\n"); + break; + } + + if ((*lo_state == FPE_STATE_CAPABLE || + *lo_state == FPE_STATE_ENTERING_ON) && + *lp_state != FPE_STATE_ON) { + netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT, + *lo_state, *lp_state); + stmmac_fpe_send_mpacket(priv, priv->ioaddr, + MPACKET_VERIFY); + } + /* Sleep then retry */ + msleep(500); + } + + clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); +} + +void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable) +{ + if (priv->plat->fpe_cfg->hs_enable != enable) { + if (enable) { + stmmac_fpe_send_mpacket(priv, priv->ioaddr, + MPACKET_VERIFY); + } else { + priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; + priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF; + } + + priv->plat->fpe_cfg->hs_enable = enable; + } +} + /** * stmmac_dvr_probe * @device: device pointer @@ -5034,6 +5292,9 @@ int stmmac_dvr_probe(struct device *device, INIT_WORK(&priv->service_task, stmmac_service_task); + /* Initialize Link Partner FPE workqueue */ + INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task); + /* Override with kernel parameters if supplied XXX CRS XXX * this needs to have multiple instances */ @@ -5179,6 +5440,10 @@ int stmmac_dvr_probe(struct device *device, stmmac_check_pcs_mode(priv); + pm_runtime_get_noresume(device); + pm_runtime_set_active(device); + pm_runtime_enable(device); + if (priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) { /* MDIO bus Registration */ @@ -5216,6 +5481,11 @@ int stmmac_dvr_probe(struct device *device, stmmac_init_fs(ndev); #endif + /* Let pm_runtime_put() disable the clocks. + * If CONFIG_PM is not enabled, the clocks will stay powered. + */ + pm_runtime_put(device); + return ret; error_serdes_powerup: @@ -5230,6 +5500,7 @@ error_mdio_register: stmmac_napi_del(ndev); error_hw_init: destroy_workqueue(priv->wq); + stmmac_bus_clks_config(priv, false); return ret; } @@ -5265,8 +5536,8 @@ int stmmac_dvr_remove(struct device *dev) phylink_destroy(priv->phylink); if (priv->plat->stmmac_rst) reset_control_assert(priv->plat->stmmac_rst); - clk_disable_unprepare(priv->plat->pclk); - clk_disable_unprepare(priv->plat->stmmac_clk); + pm_runtime_put(dev); + pm_runtime_disable(dev); if (priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); @@ -5289,6 +5560,7 @@ int stmmac_suspend(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); u32 chan; + int ret; if (!ndev || !netif_running(ndev)) return 0; @@ -5332,11 +5604,22 @@ int stmmac_suspend(struct device *dev) pinctrl_pm_select_sleep_state(priv->device); /* Disable clock in case of PWM is off */ clk_disable_unprepare(priv->plat->clk_ptp_ref); - clk_disable_unprepare(priv->plat->pclk); - clk_disable_unprepare(priv->plat->stmmac_clk); + ret = pm_runtime_force_suspend(dev); + if (ret) + return ret; } + mutex_unlock(&priv->lock); + if (priv->dma_cap.fpesel) { + /* Disable FPE */ + stmmac_fpe_configure(priv, priv->ioaddr, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, false); + + stmmac_fpe_handshake(priv, false); + } + priv->speed = SPEED_UNKNOWN; return 0; } @@ -5399,8 +5682,9 @@ int stmmac_resume(struct device *dev) } else { pinctrl_pm_select_default_state(priv->device); /* enable the clk previously disabled */ - clk_prepare_enable(priv->plat->stmmac_clk); - clk_prepare_enable(priv->plat->pclk); + ret = pm_runtime_force_resume(dev); + if (ret) + return ret; if (priv->plat->clk_ptp_ref) clk_prepare_enable(priv->plat->clk_ptp_ref); /* reset the phy so that it's ready */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index d64116e0543e..b750074f8f9c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -15,6 +15,7 @@ #include <linux/iopoll.h> #include <linux/mii.h> #include <linux/of_mdio.h> +#include <linux/pm_runtime.h> #include <linux/phy.h> #include <linux/property.h> #include <linux/slab.h> @@ -87,21 +88,29 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) u32 tmp, addr, value = MII_XGMAC_BUSY; int ret; + ret = pm_runtime_get_sync(priv->device); + if (ret < 0) { + pm_runtime_put_noidle(priv->device); + return ret; + } + /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, - !(tmp & MII_XGMAC_BUSY), 100, 10000)) - return -EBUSY; + !(tmp & MII_XGMAC_BUSY), 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } if (phyreg & MII_ADDR_C45) { phyreg &= ~MII_ADDR_C45; ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr); if (ret) - return ret; + goto err_disable_clks; } else { ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr); if (ret) - return ret; + goto err_disable_clks; value |= MII_XGMAC_SADDR; } @@ -112,8 +121,10 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, - !(tmp & MII_XGMAC_BUSY), 100, 10000)) - return -EBUSY; + !(tmp & MII_XGMAC_BUSY), 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } /* Set the MII address register to read */ writel(addr, priv->ioaddr + mii_address); @@ -121,11 +132,18 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, - !(tmp & MII_XGMAC_BUSY), 100, 10000)) - return -EBUSY; + !(tmp & MII_XGMAC_BUSY), 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } /* Read the data from the MII data register */ - return readl(priv->ioaddr + mii_data) & GENMASK(15, 0); + ret = (int)readl(priv->ioaddr + mii_data) & GENMASK(15, 0); + +err_disable_clks: + pm_runtime_put(priv->device); + + return ret; } static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr, @@ -138,21 +156,29 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr, u32 addr, tmp, value = MII_XGMAC_BUSY; int ret; + ret = pm_runtime_get_sync(priv->device); + if (ret < 0) { + pm_runtime_put_noidle(priv->device); + return ret; + } + /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, - !(tmp & MII_XGMAC_BUSY), 100, 10000)) - return -EBUSY; + !(tmp & MII_XGMAC_BUSY), 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } if (phyreg & MII_ADDR_C45) { phyreg &= ~MII_ADDR_C45; ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr); if (ret) - return ret; + goto err_disable_clks; } else { ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr); if (ret) - return ret; + goto err_disable_clks; value |= MII_XGMAC_SADDR; } @@ -164,16 +190,23 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr, /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, - !(tmp & MII_XGMAC_BUSY), 100, 10000)) - return -EBUSY; + !(tmp & MII_XGMAC_BUSY), 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } /* Set the MII address register to write */ writel(addr, priv->ioaddr + mii_address); writel(value, priv->ioaddr + mii_data); /* Wait until any existing MII operation is complete */ - return readl_poll_timeout(priv->ioaddr + mii_data, tmp, - !(tmp & MII_XGMAC_BUSY), 100, 10000); + ret = readl_poll_timeout(priv->ioaddr + mii_data, tmp, + !(tmp & MII_XGMAC_BUSY), 100, 10000); + +err_disable_clks: + pm_runtime_put(priv->device); + + return ret; } /** @@ -196,6 +229,12 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) int data = 0; u32 v; + data = pm_runtime_get_sync(priv->device); + if (data < 0) { + pm_runtime_put_noidle(priv->device); + return data; + } + value |= (phyaddr << priv->hw->mii.addr_shift) & priv->hw->mii.addr_mask; value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask; @@ -216,19 +255,26 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) } if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), - 100, 10000)) - return -EBUSY; + 100, 10000)) { + data = -EBUSY; + goto err_disable_clks; + } writel(data, priv->ioaddr + mii_data); writel(value, priv->ioaddr + mii_address); if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), - 100, 10000)) - return -EBUSY; + 100, 10000)) { + data = -EBUSY; + goto err_disable_clks; + } /* Read the data from the MII data register */ data = (int)readl(priv->ioaddr + mii_data) & MII_DATA_MASK; +err_disable_clks: + pm_runtime_put(priv->device); + return data; } @@ -247,10 +293,16 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, struct stmmac_priv *priv = netdev_priv(ndev); unsigned int mii_address = priv->hw->mii.addr; unsigned int mii_data = priv->hw->mii.data; + int ret, data = phydata; u32 value = MII_BUSY; - int data = phydata; u32 v; + ret = pm_runtime_get_sync(priv->device); + if (ret < 0) { + pm_runtime_put_noidle(priv->device); + return ret; + } + value |= (phyaddr << priv->hw->mii.addr_shift) & priv->hw->mii.addr_mask; value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask; @@ -275,16 +327,23 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), - 100, 10000)) - return -EBUSY; + 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } /* Set the MII address register to write */ writel(data, priv->ioaddr + mii_data); writel(value, priv->ioaddr + mii_address); /* Wait until any existing MII operation is complete */ - return readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), - 100, 10000); + ret = readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), + 100, 10000); + +err_disable_clks: + pm_runtime_put(priv->device); + + return ret; } /** diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 6dc9f10414e4..5a1e018884e6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -704,7 +704,6 @@ int stmmac_pltfr_remove(struct platform_device *pdev) } EXPORT_SYMBOL_GPL(stmmac_pltfr_remove); -#ifdef CONFIG_PM_SLEEP /** * stmmac_pltfr_suspend * @dev: device pointer @@ -712,7 +711,7 @@ EXPORT_SYMBOL_GPL(stmmac_pltfr_remove); * call the main suspend function and then, if required, on some platform, it * can call an exit helper. */ -static int stmmac_pltfr_suspend(struct device *dev) +static int __maybe_unused stmmac_pltfr_suspend(struct device *dev) { int ret; struct net_device *ndev = dev_get_drvdata(dev); @@ -733,7 +732,7 @@ static int stmmac_pltfr_suspend(struct device *dev) * the main resume function, on some platforms, it can call own init helper * if required. */ -static int stmmac_pltfr_resume(struct device *dev) +static int __maybe_unused stmmac_pltfr_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); @@ -744,10 +743,29 @@ static int stmmac_pltfr_resume(struct device *dev) return stmmac_resume(dev); } -#endif /* CONFIG_PM_SLEEP */ -SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend, - stmmac_pltfr_resume); +static int __maybe_unused stmmac_runtime_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + stmmac_bus_clks_config(priv, false); + + return 0; +} + +static int __maybe_unused stmmac_runtime_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + return stmmac_bus_clks_config(priv, true); +} + +const struct dev_pm_ops stmmac_pltfr_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume) + SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL) +}; EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops); MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 0989e2bb6ee3..b164ae22e35f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -9,6 +9,7 @@ *******************************************************************************/ #include "stmmac.h" #include "stmmac_ptp.h" +#include "dwmac4.h" /** * stmmac_adjust_freq @@ -165,6 +166,36 @@ static int stmmac_enable(struct ptp_clock_info *ptp, return ret; } +/** + * stmmac_get_syncdevicetime + * @device: current device time + * @system: system counter value read synchronously with device time + * @ctx: context provided by timekeeping code + * Description: Read device and system clock simultaneously and return the + * corrected clock values in ns. + **/ +static int stmmac_get_syncdevicetime(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)ctx; + + if (priv->plat->crosststamp) + return priv->plat->crosststamp(device, system, ctx); + else + return -EOPNOTSUPP; +} + +static int stmmac_getcrosststamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *xtstamp) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + + return get_device_system_crosststamp(stmmac_get_syncdevicetime, + priv, NULL, xtstamp); +} + /* structure describing a PTP hardware clock */ static struct ptp_clock_info stmmac_ptp_clock_ops = { .owner = THIS_MODULE, @@ -180,6 +211,7 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = { .gettime64 = stmmac_get_time, .settime64 = stmmac_set_time, .enable = stmmac_enable, + .getcrosststamp = stmmac_getcrosststamp, }; /** @@ -192,6 +224,9 @@ void stmmac_ptp_register(struct stmmac_priv *priv) { int i; + if (priv->plat->ptp_clk_freq_config) + priv->plat->ptp_clk_freq_config(priv); + for (i = 0; i < priv->dma_cap.pps_out_num; i++) { if (i >= STMMAC_PPS_MAX) break; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index 7abb1d47e7da..f88727ce4d30 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -23,6 +23,9 @@ #define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */ #define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */ #define PTP_TAR 0x18 /* Timestamp Addend Reg */ +#define PTP_ACR 0x40 /* Auxiliary Control Reg */ +#define PTP_ATNR 0x48 /* Auxiliary Timestamp - Nanoseconds Reg */ +#define PTP_ATSR 0x4c /* Auxiliary Timestamp - Seconds Reg */ #define PTP_STNSUR_ADDSUB_SHIFT 31 #define PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */ @@ -64,4 +67,24 @@ #define PTP_SSIR_SSINC_MASK 0xff #define GMAC4_PTP_SSIR_SSINC_SHIFT 16 +/* Auxiliary Control defines */ +#define PTP_ACR_ATSFC BIT(0) /* Auxiliary Snapshot FIFO Clear */ +#define PTP_ACR_ATSEN0 BIT(4) /* Auxiliary Snapshot 0 Enable */ +#define PTP_ACR_ATSEN1 BIT(5) /* Auxiliary Snapshot 1 Enable */ +#define PTP_ACR_ATSEN2 BIT(6) /* Auxiliary Snapshot 2 Enable */ +#define PTP_ACR_ATSEN3 BIT(7) /* Auxiliary Snapshot 3 Enable */ +#define PTP_ACR_MASK GENMASK(7, 4) /* Aux Snapshot Mask */ +#define PMC_ART_VALUE0 0x01 /* PMC_ART[15:0] timer value */ +#define PMC_ART_VALUE1 0x02 /* PMC_ART[31:16] timer value */ +#define PMC_ART_VALUE2 0x03 /* PMC_ART[47:32] timer value */ +#define PMC_ART_VALUE3 0x04 /* PMC_ART[63:48] timer value */ +#define GMAC4_ART_TIME_SHIFT 16 /* ART TIME 16-bits shift */ + +enum aux_snapshot { + AUX_SNAPSHOT0 = 0x10, + AUX_SNAPSHOT1 = 0x20, + AUX_SNAPSHOT2 = 0x40, + AUX_SNAPSHOT3 = 0x80, +}; + #endif /* __STMMAC_PTP_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 44bb133c3000..1d84ee359808 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -297,6 +297,17 @@ static int tc_init(struct stmmac_priv *priv) dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n", priv->tc_entries_max, priv->tc_off_max); + + if (!priv->plat->fpe_cfg) { + priv->plat->fpe_cfg = devm_kzalloc(priv->device, + sizeof(*priv->plat->fpe_cfg), + GFP_KERNEL); + if (!priv->plat->fpe_cfg) + return -ENOMEM; + } else { + memset(priv->plat->fpe_cfg, 0, sizeof(*priv->plat->fpe_cfg)); + } + return 0; } @@ -598,6 +609,87 @@ static int tc_del_flow(struct stmmac_priv *priv, return ret; } +#define VLAN_PRIO_FULL_MASK (0x07) + +static int tc_add_vlan_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + int tc = tc_classid_to_hwtc(priv->dev, cls->classid); + struct flow_match_vlan match; + + /* Nothing to do here */ + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) + return -EINVAL; + + if (tc < 0) { + netdev_err(priv->dev, "Invalid traffic class\n"); + return -EINVAL; + } + + flow_rule_match_vlan(rule, &match); + + if (match.mask->vlan_priority) { + u32 prio; + + if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) { + netdev_err(priv->dev, "Only full mask is supported for VLAN priority"); + return -EINVAL; + } + + prio = BIT(match.key->vlan_priority); + stmmac_rx_queue_prio(priv, priv->hw, prio, tc); + } + + return 0; +} + +static int tc_del_vlan_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + int tc = tc_classid_to_hwtc(priv->dev, cls->classid); + + /* Nothing to do here */ + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) + return -EINVAL; + + if (tc < 0) { + netdev_err(priv->dev, "Invalid traffic class\n"); + return -EINVAL; + } + + stmmac_rx_queue_prio(priv, priv->hw, 0, tc); + + return 0; +} + +static int tc_add_flow_cls(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + int ret; + + ret = tc_add_flow(priv, cls); + if (!ret) + return ret; + + return tc_add_vlan_flow(priv, cls); +} + +static int tc_del_flow_cls(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + int ret; + + ret = tc_del_flow(priv, cls); + if (!ret) + return ret; + + return tc_del_vlan_flow(priv, cls); +} + static int tc_setup_cls(struct stmmac_priv *priv, struct flow_cls_offload *cls) { @@ -609,10 +701,10 @@ static int tc_setup_cls(struct stmmac_priv *priv, switch (cls->command) { case FLOW_CLS_REPLACE: - ret = tc_add_flow(priv, cls); + ret = tc_add_flow_cls(priv, cls); break; case FLOW_CLS_DESTROY: - ret = tc_del_flow(priv, cls); + ret = tc_del_flow_cls(priv, cls); break; default: return -EOPNOTSUPP; @@ -748,13 +840,10 @@ static int tc_setup_taprio(struct stmmac_priv *priv, if (fpe && !priv->dma_cap.fpesel) return -EOPNOTSUPP; - ret = stmmac_fpe_configure(priv, priv->ioaddr, - priv->plat->tx_queues_to_use, - priv->plat->rx_queues_to_use, fpe); - if (ret && fpe) { - netdev_err(priv->dev, "failed to enable Frame Preemption\n"); - return ret; - } + /* Actual FPE register configuration will be done after FPE handshake + * is success. + */ + priv->plat->fpe_cfg->enable = fpe; ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, priv->plat->clk_ptp_rate); @@ -764,12 +853,29 @@ static int tc_setup_taprio(struct stmmac_priv *priv, } netdev_info(priv->dev, "configured EST\n"); + + if (fpe) { + stmmac_fpe_handshake(priv, true); + netdev_info(priv->dev, "start FPE handshake\n"); + } + return 0; disable: priv->plat->est->enable = false; stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, priv->plat->clk_ptp_rate); + + priv->plat->fpe_cfg->enable = false; + stmmac_fpe_configure(priv, priv->ioaddr, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, + false); + netdev_info(priv->dev, "disabled FPE\n"); + + stmmac_fpe_handshake(priv, false); + netdev_info(priv->dev, "stop FPE handshake\n"); + return ret; } diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 9ff894ba8d3e..54f45d8c79a7 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -1599,6 +1599,7 @@ static inline int cas_mdio_link_not_up(struct cas *cp) cas_phy_write(cp, MII_BMCR, val); break; } + break; default: break; } diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index 58f142ee78a3..9790656cf970 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -1674,8 +1674,8 @@ static void gem_init_phy(struct gem *gp) if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) { int i; - /* Those delay sucks, the HW seem to love them though, I'll - * serisouly consider breaking some locks here to be able + /* Those delays sucks, the HW seems to love them though, I'll + * seriously consider breaking some locks here to be able * to schedule instead */ for (i = 0; i < 3; i++) { diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 3a8775e0ca55..5d677db0aee5 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1880,7 +1880,7 @@ static int axienet_probe(struct platform_device *pdev) if (IS_ERR(lp->regs)) { dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n"); ret = PTR_ERR(lp->regs); - goto free_netdev; + goto cleanup_clk; } lp->regs_start = ethres->start; @@ -1958,18 +1958,18 @@ static int axienet_probe(struct platform_device *pdev) break; default: ret = -EINVAL; - goto free_netdev; + goto cleanup_clk; } } else { ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); if (ret) - goto free_netdev; + goto cleanup_clk; } if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n"); ret = -EINVAL; - goto free_netdev; + goto cleanup_clk; } /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ @@ -1982,7 +1982,7 @@ static int axienet_probe(struct platform_device *pdev) dev_err(&pdev->dev, "unable to get DMA resource\n"); of_node_put(np); - goto free_netdev; + goto cleanup_clk; } lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares); @@ -2002,12 +2002,12 @@ static int axienet_probe(struct platform_device *pdev) if (IS_ERR(lp->dma_regs)) { dev_err(&pdev->dev, "could not map DMA regs\n"); ret = PTR_ERR(lp->dma_regs); - goto free_netdev; + goto cleanup_clk; } if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { dev_err(&pdev->dev, "could not determine irqs\n"); ret = -ENOMEM; - goto free_netdev; + goto cleanup_clk; } /* Autodetect the need for 64-bit DMA pointers. @@ -2037,7 +2037,7 @@ static int axienet_probe(struct platform_device *pdev) ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); if (ret) { dev_err(&pdev->dev, "No suitable DMA available\n"); - goto free_netdev; + goto cleanup_clk; } /* Check for Ethernet core IRQ (optional) */ @@ -2068,12 +2068,12 @@ static int axienet_probe(struct platform_device *pdev) if (!lp->phy_node) { dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n"); ret = -EINVAL; - goto free_netdev; + goto cleanup_mdio; } lp->pcs_phy = of_mdio_find_device(lp->phy_node); if (!lp->pcs_phy) { ret = -EPROBE_DEFER; - goto free_netdev; + goto cleanup_mdio; } lp->phylink_config.pcs_poll = true; } @@ -2087,17 +2087,30 @@ static int axienet_probe(struct platform_device *pdev) if (IS_ERR(lp->phylink)) { ret = PTR_ERR(lp->phylink); dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); - goto free_netdev; + goto cleanup_mdio; } ret = register_netdev(lp->ndev); if (ret) { dev_err(lp->dev, "register_netdev() error (%i)\n", ret); - goto free_netdev; + goto cleanup_phylink; } return 0; +cleanup_phylink: + phylink_destroy(lp->phylink); + +cleanup_mdio: + if (lp->pcs_phy) + put_device(&lp->pcs_phy->dev); + if (lp->mii_bus) + axienet_mdio_teardown(lp); + of_node_put(lp->phy_node); + +cleanup_clk: + clk_disable_unprepare(lp->clk); + free_netdev: free_netdev(ndev); diff --git a/drivers/net/fddi/Kconfig b/drivers/net/fddi/Kconfig index f722079dfb6a..846bf41c2717 100644 --- a/drivers/net/fddi/Kconfig +++ b/drivers/net/fddi/Kconfig @@ -38,22 +38,6 @@ config DEFXX To compile this driver as a module, choose M here: the module will be called defxx. If unsure, say N. -config DEFXX_MMIO - bool - prompt "Use MMIO instead of PIO" if PCI || EISA - depends on DEFXX - default n if PCI || EISA - default y - help - This instructs the driver to use EISA or PCI memory-mapped I/O - (MMIO) as appropriate instead of programmed I/O ports (PIO). - Enabling this gives an improvement in processing time in parts - of the driver, but it may cause problems with EISA (DEFEA) - adapters. TURBOchannel does not have the concept of I/O ports, - so MMIO is always used for these (DEFTA) adapters. - - If unsure, say N. - config SKFP tristate "SysKonnect FDDI PCI support" depends on FDDI && PCI diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index 077c68498f04..6d1e3f49a3d3 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -21,7 +21,7 @@ * LVS Lawrence V. Stefani <lstefani@yahoo.com> * * Maintainers: - * macro Maciej W. Rozycki <macro@linux-mips.org> + * macro Maciej W. Rozycki <macro@orcam.me.uk> * * Credits: * I'd like to thank Patricia Cross for helping me get started with @@ -197,6 +197,7 @@ * 23 Oct 2006 macro Big-endian host support. * 14 Dec 2006 macro TURBOchannel support. * 01 Jul 2014 macro Fixes for DMA on 64-bit hosts. + * 10 Mar 2021 macro Dynamic MMIO vs port I/O. */ /* Include files */ @@ -225,8 +226,8 @@ /* Version information string should be updated prior to each new release! */ #define DRV_NAME "defxx" -#define DRV_VERSION "v1.11" -#define DRV_RELDATE "2014/07/01" +#define DRV_VERSION "v1.12" +#define DRV_RELDATE "2021/03/10" static const char version[] = DRV_NAME ": " DRV_VERSION " " DRV_RELDATE @@ -253,10 +254,10 @@ static const char version[] = #define DFX_BUS_TC(dev) 0 #endif -#ifdef CONFIG_DEFXX_MMIO -#define DFX_MMIO 1 +#if defined(CONFIG_EISA) || defined(CONFIG_PCI) +#define dfx_use_mmio bp->mmio #else -#define DFX_MMIO 0 +#define dfx_use_mmio true #endif /* Define module-wide (static) routines */ @@ -374,8 +375,6 @@ static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data) static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data) { struct device __maybe_unused *bdev = bp->bus_dev; - int dfx_bus_tc = DFX_BUS_TC(bdev); - int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; if (dfx_use_mmio) dfx_writel(bp, offset, data); @@ -398,8 +397,6 @@ static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data) static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data) { struct device __maybe_unused *bdev = bp->bus_dev; - int dfx_bus_tc = DFX_BUS_TC(bdev); - int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; if (dfx_use_mmio) dfx_readl(bp, offset, data); @@ -421,7 +418,7 @@ static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data) * None * * Arguments: - * bdev - pointer to device information + * bp - pointer to board information * bar_start - pointer to store the start addresses * bar_len - pointer to store the lengths of the areas * @@ -431,13 +428,13 @@ static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data) * Side Effects: * None */ -static void dfx_get_bars(struct device *bdev, +static void dfx_get_bars(DFX_board_t *bp, resource_size_t *bar_start, resource_size_t *bar_len) { + struct device *bdev = bp->bus_dev; int dfx_bus_pci = dev_is_pci(bdev); int dfx_bus_eisa = DFX_BUS_EISA(bdev); int dfx_bus_tc = DFX_BUS_TC(bdev); - int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; if (dfx_bus_pci) { int num = dfx_use_mmio ? 0 : 1; @@ -495,6 +492,13 @@ static const struct net_device_ops dfx_netdev_ops = { .ndo_set_mac_address = dfx_ctl_set_mac_address, }; +static void dfx_register_res_err(const char *print_name, bool mmio, + unsigned long start, unsigned long len) +{ + pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, aborting\n", + print_name, mmio ? "MMIO" : "I/O", len, start); +} + /* * ================ * = dfx_register = @@ -528,8 +532,6 @@ static int dfx_register(struct device *bdev) static int version_disp; int dfx_bus_pci = dev_is_pci(bdev); int dfx_bus_eisa = DFX_BUS_EISA(bdev); - int dfx_bus_tc = DFX_BUS_TC(bdev); - int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; const char *print_name = dev_name(bdev); struct net_device *dev; DFX_board_t *bp; /* board pointer */ @@ -567,46 +569,48 @@ static int dfx_register(struct device *bdev) bp->bus_dev = bdev; dev_set_drvdata(bdev, dev); - dfx_get_bars(bdev, bar_start, bar_len); - if (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0) { - pr_err("%s: Cannot use MMIO, no address set, aborting\n", - print_name); - pr_err("%s: Run ECU and set adapter's MMIO location\n", - print_name); - pr_err("%s: Or recompile driver with \"CONFIG_DEFXX_MMIO=n\"" - "\n", print_name); - err = -ENXIO; - goto err_out; + bp->mmio = true; + + dfx_get_bars(bp, bar_start, bar_len); + if (bar_len[0] == 0 || + (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0)) { + bp->mmio = false; + dfx_get_bars(bp, bar_start, bar_len); } - if (dfx_use_mmio) + if (dfx_use_mmio) { region = request_mem_region(bar_start[0], bar_len[0], - print_name); - else - region = request_region(bar_start[0], bar_len[0], print_name); + bdev->driver->name); + if (!region && (dfx_bus_eisa || dfx_bus_pci)) { + bp->mmio = false; + dfx_get_bars(bp, bar_start, bar_len); + } + } + if (!dfx_use_mmio) + region = request_region(bar_start[0], bar_len[0], + bdev->driver->name); if (!region) { - pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, " - "aborting\n", dfx_use_mmio ? "MMIO" : "I/O", print_name, - (long)bar_len[0], (long)bar_start[0]); + dfx_register_res_err(print_name, dfx_use_mmio, + bar_start[0], bar_len[0]); err = -EBUSY; goto err_out_disable; } if (bar_start[1] != 0) { - region = request_region(bar_start[1], bar_len[1], print_name); + region = request_region(bar_start[1], bar_len[1], + bdev->driver->name); if (!region) { - pr_err("%s: Cannot reserve I/O resource " - "0x%lx @ 0x%lx, aborting\n", print_name, - (long)bar_len[1], (long)bar_start[1]); + dfx_register_res_err(print_name, 0, + bar_start[1], bar_len[1]); err = -EBUSY; goto err_out_csr_region; } } if (bar_start[2] != 0) { - region = request_region(bar_start[2], bar_len[2], print_name); + region = request_region(bar_start[2], bar_len[2], + bdev->driver->name); if (!region) { - pr_err("%s: Cannot reserve I/O resource " - "0x%lx @ 0x%lx, aborting\n", print_name, - (long)bar_len[2], (long)bar_start[2]); + dfx_register_res_err(print_name, 0, + bar_start[2], bar_len[2]); err = -EBUSY; goto err_out_bh_region; } @@ -721,7 +725,6 @@ static void dfx_bus_init(struct net_device *dev) int dfx_bus_pci = dev_is_pci(bdev); int dfx_bus_eisa = DFX_BUS_EISA(bdev); int dfx_bus_tc = DFX_BUS_TC(bdev); - int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; u8 val; DBG_printk("In dfx_bus_init...\n"); @@ -1041,7 +1044,6 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name, int dfx_bus_pci = dev_is_pci(bdev); int dfx_bus_eisa = DFX_BUS_EISA(bdev); int dfx_bus_tc = DFX_BUS_TC(bdev); - int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; int alloc_size; /* total buffer size needed */ char *top_v, *curr_v; /* virtual addrs into memory block */ dma_addr_t top_p, curr_p; /* physical addrs into memory block */ @@ -3695,8 +3697,6 @@ static void dfx_unregister(struct device *bdev) struct net_device *dev = dev_get_drvdata(bdev); DFX_board_t *bp = netdev_priv(dev); int dfx_bus_pci = dev_is_pci(bdev); - int dfx_bus_tc = DFX_BUS_TC(bdev); - int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; resource_size_t bar_start[3] = {0}; /* pointers to ports */ resource_size_t bar_len[3] = {0}; /* resource lengths */ int alloc_size; /* total buffer size used */ @@ -3716,7 +3716,7 @@ static void dfx_unregister(struct device *bdev) dfx_bus_uninit(dev); - dfx_get_bars(bdev, bar_start, bar_len); + dfx_get_bars(bp, bar_start, bar_len); if (bar_start[2] != 0) release_region(bar_start[2], bar_len[2]); if (bar_start[1] != 0) @@ -3748,7 +3748,7 @@ static const struct pci_device_id dfx_pci_table[] = { MODULE_DEVICE_TABLE(pci, dfx_pci_table); static struct pci_driver dfx_pci_driver = { - .name = "defxx", + .name = DRV_NAME, .id_table = dfx_pci_table, .probe = dfx_pci_register, .remove = dfx_pci_unregister, @@ -3779,7 +3779,7 @@ MODULE_DEVICE_TABLE(eisa, dfx_eisa_table); static struct eisa_driver dfx_eisa_driver = { .id_table = dfx_eisa_table, .driver = { - .name = "defxx", + .name = DRV_NAME, .bus = &eisa_bus_type, .probe = dfx_dev_register, .remove = dfx_dev_unregister, @@ -3800,7 +3800,7 @@ MODULE_DEVICE_TABLE(tc, dfx_tc_table); static struct tc_driver dfx_tc_driver = { .id_table = dfx_tc_table, .driver = { - .name = "defxx", + .name = DRV_NAME, .bus = &tc_bus_type, .probe = dfx_dev_register, .remove = dfx_dev_unregister, diff --git a/drivers/net/fddi/defxx.h b/drivers/net/fddi/defxx.h index 9d30fde2ef3c..8193713e12db 100644 --- a/drivers/net/fddi/defxx.h +++ b/drivers/net/fddi/defxx.h @@ -16,7 +16,7 @@ * LVS Lawrence V. Stefani <lstefani@yahoo.com> * * Maintainers: - * macro Maciej W. Rozycki <macro@linux-mips.org> + * macro Maciej W. Rozycki <macro@orcam.me.uk> * * Modification History: * Date Name Description @@ -27,6 +27,7 @@ * 04 Aug 2003 macro Converted to the DMA API. * 23 Oct 2006 macro Big-endian host support. * 14 Dec 2006 macro TURBOchannel support. + * 10 Mar 2021 macro Dynamic MMIO vs port I/O. */ #ifndef _DEFXX_H_ @@ -1776,6 +1777,8 @@ typedef struct DFX_board_tag int port; } base; /* base address */ struct device *bus_dev; + /* Whether to use MMIO or port I/O. */ + bool mmio; u32 full_duplex_enb; /* FDDI Full Duplex enable (1 == on, 2 == off) */ u32 req_ttrt; /* requested TTRT value (in 80ns units) */ u32 burst_size; /* adapter burst size (enumerated) */ diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c index eaf85db53a5e..14f07050b6b1 100644 --- a/drivers/net/fddi/defza.c +++ b/drivers/net/fddi/defza.c @@ -60,7 +60,7 @@ static const char version[] = DRV_NAME ": " DRV_VERSION " " DRV_RELDATE " Maciej W. Rozycki\n"; -MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>"); +MODULE_AUTHOR("Maciej W. Rozycki <macro@orcam.me.uk>"); MODULE_DESCRIPTION("DEC FDDIcontroller 700 (DEFZA-xx) driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/fddi/skfp/h/smt.h b/drivers/net/fddi/skfp/h/smt.h index a0dbc0f57a55..b19c7a99d32a 100644 --- a/drivers/net/fddi/skfp/h/smt.h +++ b/drivers/net/fddi/skfp/h/smt.h @@ -411,7 +411,7 @@ struct smt_p_reason { #define SMT_RDF_ILLEGAL 0x00000005 /* read only (PMF) */ #define SMT_RDF_NOPARAM 0x6 /* parameter not supported (PMF) */ #define SMT_RDF_RANGE 0x8 /* out of range */ -#define SMT_RDF_AUTHOR 0x9 /* not autohorized */ +#define SMT_RDF_AUTHOR 0x9 /* not authorized */ #define SMT_RDF_LENGTH 0x0a /* length error */ #define SMT_RDF_TOOLONG 0x0b /* length error */ #define SMT_RDF_SBA 0x0d /* SBA denied */ @@ -450,7 +450,7 @@ struct smt_p_version { struct smt_p_0015 { struct smt_para para ; /* generic parameter header */ - u_int res_type ; /* recsource type */ + u_int res_type ; /* resource type */ } ; #define SYNC_BW 0x00000001L /* Synchronous Bandwidth */ @@ -489,7 +489,7 @@ struct smt_p_0017 { struct smt_p_0018 { struct smt_para para ; /* generic parameter header */ int sba_ov_req ; /* total sync bandwidth req for overhead*/ -} ; /* measuered in bytes per T_Neg */ +} ; /* measured in bytes per T_Neg */ /* * P19 : SBA Allocation Address @@ -562,7 +562,7 @@ struct smt_p_fsc { #define FSC_TYPE2 2 /* Special A/C indicator forwarding */ /* - * P00 21 : user defined authoriziation (see pmf.c) + * P00 21 : user defined authorization (see pmf.c) */ #define SMT_P_AUTHOR 0x0021 @@ -764,10 +764,8 @@ struct smt_sif_operation { struct smt_p_setcount setcount ; /* Set Count mandatory */ #endif /* must be last */ - struct smt_p_lem lem[1] ; /* phy lem status */ + struct smt_p_lem lem[]; /* phy lem status */ } ; -#define SIZEOF_SMT_SIF_OPERATION (sizeof(struct smt_sif_operation)- \ - sizeof(struct smt_p_lem)) /* * ECF : echo frame diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c index 774a6e3b0a67..6b68a53f1b38 100644 --- a/drivers/net/fddi/skfp/smt.c +++ b/drivers/net/fddi/skfp/smt.c @@ -1063,9 +1063,9 @@ static void smt_send_sif_operation(struct s_smc *smc, struct fddi_addr *dest, #endif if (!(mb = smt_build_frame(smc,SMT_SIF_OPER,SMT_REPLY, - SIZEOF_SMT_SIF_OPERATION+ports*sizeof(struct smt_p_lem)))) + struct_size(sif, lem, ports)))) return ; - sif = smtod(mb, struct smt_sif_operation *) ; + sif = smtod(mb, typeof(sif)); smt_fill_timestamp(smc,&sif->ts) ; /* set time stamp */ smt_fill_mac_status(smc,&sif->status) ; /* set mac status */ smt_fill_mac_counter(smc,&sif->mc) ; /* set mac counter field */ diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index 36eeb80406f2..4690c6a59054 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c @@ -2167,7 +2167,6 @@ static void __exit scc_cleanup_driver(void) MODULE_AUTHOR("Joerg Reuter <jreuter@yaina.de>"); MODULE_DESCRIPTION("AX.25 Device Driver for Z8530 based HDLC cards"); -MODULE_SUPPORTED_DEVICE("Z8530 based SCC cards for Amateur Radio"); MODULE_LICENSE("GPL"); module_init(scc_init_driver); module_exit(scc_cleanup_driver); diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index c64cc7639c39..5bce24731502 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -1017,6 +1017,26 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send, } /* RCU already held by caller */ +/* Batching/bouncing logic is designed to attempt to optimize + * performance. + * + * For small, non-LSO packets we copy the packet to a send buffer + * which is pre-registered with the Hyper-V side. This enables the + * hypervisor to avoid remapping the aperture to access the packet + * descriptor and data. + * + * If we already started using a buffer and the netdev is transmitting + * a burst of packets, keep on copying into the buffer until it is + * full or we are done collecting a burst. If there is an existing + * buffer with space for the RNDIS descriptor but not the packet, copy + * the RNDIS descriptor to the buffer, keeping the packet in place. + * + * If we do batching and send more than one packet using a single + * NetVSC message, free the SKBs of the packets copied, except for the + * last packet. This is done to streamline the handling of the case + * where the last packet only had the RNDIS descriptor copied to the + * send buffer, with the data pointers included in the NetVSC message. + */ int netvsc_send(struct net_device *ndev, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 15f262b70489..97b5c9b60503 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1612,34 +1612,23 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) { - memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) + ethtool_sprintf(&p, netvsc_stats[i].name); - for (i = 0; i < ARRAY_SIZE(vf_stats); i++) { - memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < ARRAY_SIZE(vf_stats); i++) + ethtool_sprintf(&p, vf_stats[i].name); for (i = 0; i < nvdev->num_chn; i++) { - sprintf(p, "tx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_xdp_drop", i); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); + ethtool_sprintf(&p, "rx_queue_%u_xdp_drop", i); } for_each_present_cpu(cpu) { - for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) { - sprintf(p, pcpu_stats[i].name, cpu); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) + ethtool_sprintf(&p, pcpu_stats[i].name, cpu); } break; diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig index b68f1289b89e..90a90262e0d0 100644 --- a/drivers/net/ipa/Kconfig +++ b/drivers/net/ipa/Kconfig @@ -1,6 +1,6 @@ config QCOM_IPA tristate "Qualcomm IPA support" - depends on 64BIT && NET && QCOM_SMEM + depends on NET && QCOM_SMEM depends on ARCH_QCOM || COMPILE_TEST depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST) select QCOM_MDT_LOADER if ARCH_QCOM diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index 390d3403386a..7f2a8fce5e0d 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -351,7 +351,7 @@ void *gsi_ring_virt(struct gsi_ring *ring, u32 index) /* Return the 32-bit DMA address associated with a ring index */ static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index) { - return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE; + return lower_32_bits(ring->addr) + index * GSI_RING_ELEMENT_SIZE; } /* Return the ring index of a 32-bit ring offset */ @@ -708,10 +708,9 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id) * high-order 32 bits of the address of the event ring, * respectively. */ - val = evt_ring->ring.addr & GENMASK(31, 0); + val = lower_32_bits(evt_ring->ring.addr); iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id)); - - val = evt_ring->ring.addr >> 32; + val = upper_32_bits(evt_ring->ring.addr); iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id)); /* Enable interrupt moderation by setting the moderation delay */ @@ -816,10 +815,9 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) * high-order 32 bits of the address of the channel ring, * respectively. */ - val = channel->tre_ring.addr & GENMASK(31, 0); + val = lower_32_bits(channel->tre_ring.addr); iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id)); - - val = channel->tre_ring.addr >> 32; + val = upper_32_bits(channel->tre_ring.addr); iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id)); /* Command channel gets low weighted round-robin priority */ @@ -829,14 +827,14 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */ - /* We enable the doorbell engine for IPA v3.5.1 */ - if (gsi->version == IPA_VERSION_3_5_1 && doorbell) + /* No need to use the doorbell engine starting at IPA v4.0 */ + if (gsi->version < IPA_VERSION_4_0 && doorbell) val |= USE_DB_ENG_FMASK; /* v4.0 introduces an escape buffer for prefetch. We use it * on all but the AP command channel. */ - if (gsi->version != IPA_VERSION_3_5_1 && !channel->command) { + if (gsi->version >= IPA_VERSION_4_0 && !channel->command) { /* If not otherwise set, prefetch buffers are used */ if (gsi->version < IPA_VERSION_4_5) val |= USE_ESCAPE_BUF_ONLY_FMASK; @@ -975,7 +973,7 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell) gsi_channel_reset_command(channel); /* Due to a hardware quirk we may need to reset RX channels twice. */ - if (gsi->version == IPA_VERSION_3_5_1 && !channel->toward_ipa) + if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa) gsi_channel_reset_command(channel); gsi_channel_program(channel, doorbell); @@ -1337,10 +1335,9 @@ static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev) int ret; ret = platform_get_irq_byname(pdev, "gsi"); - if (ret <= 0) { - dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret); + if (ret <= 0) return ret ? : -EINVAL; - } + irq = ret; ret = request_irq(irq, gsi_isr, 0, "gsi", gsi); @@ -1366,7 +1363,7 @@ static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel, u32 tre_index; /* Event xfer_ptr records the TRE it's associated with */ - tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0); + tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr)); tre_index = gsi_ring_index(&channel->tre_ring, tre_offset); return gsi_channel_trans_mapped(channel, tre_index); @@ -1439,15 +1436,18 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index) /* Initialize a ring, including allocating DMA memory for its entries */ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count) { - size_t size = count * GSI_RING_ELEMENT_SIZE; + u32 size = count * GSI_RING_ELEMENT_SIZE; struct device *dev = gsi->dev; dma_addr_t addr; - /* Hardware requires a 2^n ring size, with alignment equal to size */ + /* Hardware requires a 2^n ring size, with alignment equal to size. + * The size is a power of 2, so we can check alignment using just + * the bottom 32 bits for a DMA address of any size. + */ ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); - if (ring->virt && addr % size) { + if (ring->virt && lower_32_bits(addr) % size) { dma_free_coherent(dev, size, ring->virt, addr); - dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n", + dev_err(dev, "unable to alloc 0x%x-aligned ring buffer\n", size); return -EINVAL; /* Not a good error value, but distinct */ } else if (!ring->virt) { diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h index efc980f96109..d5996bdb20ef 100644 --- a/drivers/net/ipa/gsi.h +++ b/drivers/net/ipa/gsi.h @@ -16,8 +16,8 @@ #include "ipa_version.h" /* Maximum number of channels and event rings supported by the driver */ -#define GSI_CHANNEL_COUNT_MAX 17 -#define GSI_EVT_RING_COUNT_MAX 13 +#define GSI_CHANNEL_COUNT_MAX 23 +#define GSI_EVT_RING_COUNT_MAX 20 /* Maximum TLV FIFO size for a channel; 64 here is arbitrary (and high) */ #define GSI_TLV_MAX 64 diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index 35e35852c25c..2ac6dd8413de 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -71,13 +71,12 @@ struct ipa_cmd_hw_hdr_init_local { /* IPA_CMD_REGISTER_WRITE */ -/* For IPA v4.0+, this opcode gets modified with pipeline clear options */ - +/* For IPA v4.0+, the pipeline clear options are encoded in the opcode */ #define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8) #define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9) struct ipa_cmd_register_write { - __le16 flags; /* Unused/reserved for IPA v3.5.1 */ + __le16 flags; /* Unused/reserved prior to IPA v4.0 */ __le16 offset; __le32 value; __le32 value_mask; @@ -85,12 +84,12 @@ struct ipa_cmd_register_write { }; /* Field masks for ipa_cmd_register_write structure fields */ -/* The next field is present for IPA v4.0 and above */ +/* The next field is present for IPA v4.0+ */ #define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK GENMASK(14, 11) -/* The next field is present for IPA v3.5.1 only */ +/* The next field is not present for IPA v4.0+ */ #define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK GENMASK(15, 15) -/* The next field and its values are present for IPA v3.5.1 only */ +/* The next field and its values are not present for IPA v4.0+ */ #define REGISTER_WRITE_CLEAR_OPTIONS_FMASK GENMASK(1, 0) /* IPA_CMD_IP_PACKET_INIT */ @@ -123,7 +122,7 @@ struct ipa_cmd_hw_dma_mem_mem { /* Field masks for ipa_cmd_hw_dma_mem_mem structure fields */ #define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK GENMASK(0, 0) -/* The next two fields are present for IPA v3.5.1 only. */ +/* The next two fields are not present for IPA v4.0+ */ #define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK GENMASK(1, 1) #define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK GENMASK(3, 2) @@ -175,21 +174,23 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK); if (mem->offset > offset_max || ipa->mem_offset > offset_max - mem->offset) { - dev_err(dev, "IPv%c %s%s table region offset too large " - "(0x%04x + 0x%04x > 0x%04x)\n", - ipv6 ? '6' : '4', hashed ? "hashed " : "", - route ? "route" : "filter", - ipa->mem_offset, mem->offset, offset_max); + dev_err(dev, "IPv%c %s%s table region offset too large\n", + ipv6 ? '6' : '4', hashed ? "hashed " : "", + route ? "route" : "filter"); + dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n", + ipa->mem_offset, mem->offset, offset_max); + return false; } if (mem->offset > ipa->mem_size || mem->size > ipa->mem_size - mem->offset) { - dev_err(dev, "IPv%c %s%s table region out of range " - "(0x%04x + 0x%04x > 0x%04x)\n", - ipv6 ? '6' : '4', hashed ? "hashed " : "", - route ? "route" : "filter", - mem->offset, mem->size, ipa->mem_size); + dev_err(dev, "IPv%c %s%s table region out of range\n", + ipv6 ? '6' : '4', hashed ? "hashed " : "", + route ? "route" : "filter"); + dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n", + mem->offset, mem->size, ipa->mem_size); + return false; } @@ -205,22 +206,36 @@ static bool ipa_cmd_header_valid(struct ipa *ipa) u32 size_max; u32 size; + /* In ipa_cmd_hdr_init_local_add() we record the offset and size + * of the header table memory area. Make sure the offset and size + * fit in the fields that need to hold them, and that the entire + * range is within the overall IPA memory range. + */ offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK); if (mem->offset > offset_max || ipa->mem_offset > offset_max - mem->offset) { - dev_err(dev, "header table region offset too large " - "(0x%04x + 0x%04x > 0x%04x)\n", - ipa->mem_offset + mem->offset, offset_max); + dev_err(dev, "header table region offset too large\n"); + dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n", + ipa->mem_offset, mem->offset, offset_max); + return false; } size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK); size = ipa->mem[IPA_MEM_MODEM_HEADER].size; size += ipa->mem[IPA_MEM_AP_HEADER].size; - if (mem->offset > ipa->mem_size || size > ipa->mem_size - mem->offset) { - dev_err(dev, "header table region out of range " - "(0x%04x + 0x%04x > 0x%04x)\n", - mem->offset, size, ipa->mem_size); + + if (size > size_max) { + dev_err(dev, "header table region size too large\n"); + dev_err(dev, " (0x%04x > 0x%08x)\n", size, size_max); + + return false; + } + if (size > ipa->mem_size || mem->offset > ipa->mem_size - size) { + dev_err(dev, "header table region out of range\n"); + dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n", + mem->offset, size, ipa->mem_size); + return false; } @@ -237,11 +252,12 @@ static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa, u32 bit_count; /* The maximum offset in a register_write immediate command depends - * on the version of IPA. IPA v3.5.1 supports a 16 bit offset, but - * newer versions allow some additional high-order bits. + * on the version of IPA. A 16 bit offset is always supported, + * but starting with IPA v4.0 some additional high-order bits are + * allowed. */ bit_count = BITS_PER_BYTE * sizeof(payload->offset); - if (ipa->version != IPA_VERSION_3_5_1) + if (ipa->version >= IPA_VERSION_4_0) bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK); BUILD_BUG_ON(bit_count > 32); offset_max = ~0U >> (32 - bit_count); @@ -440,7 +456,11 @@ void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value, /* pipeline_clear_src_grp is not used */ clear_option = clear_full ? pipeline_clear_full : pipeline_clear_hps; - if (ipa->version != IPA_VERSION_3_5_1) { + /* IPA v4.0+ represents the pipeline clear options in the opcode. It + * also supports a larger offset by encoding additional high-order + * bits in the payload flags field. + */ + if (ipa->version >= IPA_VERSION_4_0) { u16 offset_high; u32 val; diff --git a/drivers/net/ipa/ipa_data-sc7180.c b/drivers/net/ipa/ipa_data-sc7180.c index 997b51ceb7d7..621ad15c9e67 100644 --- a/drivers/net/ipa/ipa_data-sc7180.c +++ b/drivers/net/ipa/ipa_data-sc7180.c @@ -9,6 +9,15 @@ #include "ipa_endpoint.h" #include "ipa_mem.h" +/* QSB configuration for the SC7180 SoC. */ +static const struct ipa_qsb_data ipa_qsb_data[] = { + [IPA_QSB_MASTER_DDR] = { + .max_writes = 8, + .max_reads = 12, + /* no outstanding read byte (beat) limit */ + }, +}; + /* Endpoint configuration for the SC7180 SoC. */ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { [IPA_ENDPOINT_AP_COMMAND_TX] = { @@ -22,11 +31,13 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .tlv_count = 20, }, .endpoint = { - .seq_type = IPA_SEQ_DMA_ONLY, .config = { .resource_group = 0, .dma_mode = true, .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX, + .tx = { + .seq_type = IPA_SEQ_DMA, + }, }, }, }, @@ -41,7 +52,6 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .tlv_count = 6, }, .endpoint = { - .seq_type = IPA_SEQ_INVALID, .config = { .resource_group = 0, .aggregation = true, @@ -64,14 +74,14 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { }, .endpoint = { .filter_support = true, - .seq_type = - IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP, .config = { .resource_group = 0, .checksum = true, .qmap = true, .status_enable = true, .tx = { + .seq_type = IPA_SEQ_1_PASS_SKIP_LAST_UC, + .seq_rep_type = IPA_SEQ_REP_DMA_PARSER, .status_endpoint = IPA_ENDPOINT_MODEM_AP_RX, }, @@ -89,7 +99,6 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .tlv_count = 6, }, .endpoint = { - .seq_type = IPA_SEQ_INVALID, .config = { .resource_group = 0, .checksum = true, @@ -206,7 +215,7 @@ static const struct ipa_mem ipa_mem_local_data[] = { [IPA_MEM_UC_INFO] = { .offset = 0x0080, .size = 0x0200, - .canary_count = 2, + .canary_count = 0, }, [IPA_MEM_V4_FILTER_HASHED] = { .offset = 0x0288, @@ -253,11 +262,6 @@ static const struct ipa_mem ipa_mem_local_data[] = { .size = 0x0140, .canary_count = 2, }, - [IPA_MEM_AP_HEADER] = { - .offset = 0x05e8, - .size = 0x0000, - .canary_count = 0, - }, [IPA_MEM_MODEM_PROC_CTX] = { .offset = 0x05f0, .size = 0x0200, @@ -273,7 +277,7 @@ static const struct ipa_mem ipa_mem_local_data[] = { .size = 0x0050, .canary_count = 2, }, - [IPA_MEM_STATS_QUOTA] = { + [IPA_MEM_STATS_QUOTA_MODEM] = { .offset = 0x0a50, .size = 0x0060, .canary_count = 2, @@ -283,11 +287,6 @@ static const struct ipa_mem ipa_mem_local_data[] = { .size = 0x0140, .canary_count = 0, }, - [IPA_MEM_STATS_DROP] = { - .offset = 0x0bf0, - .size = 0, - .canary_count = 0, - }, [IPA_MEM_MODEM] = { .offset = 0x0bf0, .size = 0x140c, @@ -300,7 +299,7 @@ static const struct ipa_mem ipa_mem_local_data[] = { }, }; -static struct ipa_mem_data ipa_mem_data = { +static const struct ipa_mem_data ipa_mem_data = { .local_count = ARRAY_SIZE(ipa_mem_local_data), .local = ipa_mem_local_data, .imem_addr = 0x146a8000, @@ -310,7 +309,7 @@ static struct ipa_mem_data ipa_mem_data = { }; /* Interconnect bandwidths are in 1000 byte/second units */ -static struct ipa_interconnect_data ipa_interconnect_data[] = { +static const struct ipa_interconnect_data ipa_interconnect_data[] = { { .name = "memory", .peak_bandwidth = 465000, /* 465 MBps */ @@ -329,7 +328,7 @@ static struct ipa_interconnect_data ipa_interconnect_data[] = { }, }; -static struct ipa_clock_data ipa_clock_data = { +static const struct ipa_clock_data ipa_clock_data = { .core_clock_rate = 100 * 1000 * 1000, /* Hz */ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), .interconnect_data = ipa_interconnect_data, @@ -338,6 +337,8 @@ static struct ipa_clock_data ipa_clock_data = { /* Configuration data for the SC7180 SoC. */ const struct ipa_data ipa_data_sc7180 = { .version = IPA_VERSION_4_2, + .qsb_count = ARRAY_SIZE(ipa_qsb_data), + .qsb_data = ipa_qsb_data, .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data), .endpoint_data = ipa_gsi_endpoint_data, .resource_data = &ipa_resource_data, diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-sdm845.c index 88c9c3562ab7..6b5173f47444 100644 --- a/drivers/net/ipa/ipa_data-sdm845.c +++ b/drivers/net/ipa/ipa_data-sdm845.c @@ -11,6 +11,18 @@ #include "ipa_endpoint.h" #include "ipa_mem.h" +/* QSB configuration for the SDM845 SoC. */ +static const struct ipa_qsb_data ipa_qsb_data[] = { + [IPA_QSB_MASTER_DDR] = { + .max_writes = 8, + .max_reads = 8, + }, + [IPA_QSB_MASTER_PCIE] = { + .max_writes = 4, + .max_reads = 12, + }, +}; + /* Endpoint configuration for the SDM845 SoC. */ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { [IPA_ENDPOINT_AP_COMMAND_TX] = { @@ -24,11 +36,13 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .tlv_count = 20, }, .endpoint = { - .seq_type = IPA_SEQ_DMA_ONLY, .config = { .resource_group = 1, .dma_mode = true, .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX, + .tx = { + .seq_type = IPA_SEQ_DMA, + }, }, }, }, @@ -43,7 +57,6 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .tlv_count = 8, }, .endpoint = { - .seq_type = IPA_SEQ_INVALID, .config = { .resource_group = 1, .aggregation = true, @@ -66,14 +79,13 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { }, .endpoint = { .filter_support = true, - .seq_type = - IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, .config = { .resource_group = 1, .checksum = true, .qmap = true, .status_enable = true, .tx = { + .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC, .status_endpoint = IPA_ENDPOINT_MODEM_AP_RX, }, @@ -91,7 +103,6 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .tlv_count = 8, }, .endpoint = { - .seq_type = IPA_SEQ_INVALID, .config = { .resource_group = 1, .checksum = true, @@ -293,11 +304,6 @@ static const struct ipa_mem ipa_mem_local_data[] = { .size = 0x0140, .canary_count = 2, }, - [IPA_MEM_AP_HEADER] = { - .offset = 0x07c8, - .size = 0x0000, - .canary_count = 0, - }, [IPA_MEM_MODEM_PROC_CTX] = { .offset = 0x07d0, .size = 0x0200, @@ -320,7 +326,7 @@ static const struct ipa_mem ipa_mem_local_data[] = { }, }; -static struct ipa_mem_data ipa_mem_data = { +static const struct ipa_mem_data ipa_mem_data = { .local_count = ARRAY_SIZE(ipa_mem_local_data), .local = ipa_mem_local_data, .imem_addr = 0x146bd000, @@ -330,7 +336,7 @@ static struct ipa_mem_data ipa_mem_data = { }; /* Interconnect bandwidths are in 1000 byte/second units */ -static struct ipa_interconnect_data ipa_interconnect_data[] = { +static const struct ipa_interconnect_data ipa_interconnect_data[] = { { .name = "memory", .peak_bandwidth = 600000, /* 600 MBps */ @@ -349,7 +355,7 @@ static struct ipa_interconnect_data ipa_interconnect_data[] = { }, }; -static struct ipa_clock_data ipa_clock_data = { +static const struct ipa_clock_data ipa_clock_data = { .core_clock_rate = 75 * 1000 * 1000, /* Hz */ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), .interconnect_data = ipa_interconnect_data, @@ -358,6 +364,8 @@ static struct ipa_clock_data ipa_clock_data = { /* Configuration data for the SDM845 SoC. */ const struct ipa_data ipa_data_sdm845 = { .version = IPA_VERSION_3_5_1, + .qsb_count = ARRAY_SIZE(ipa_qsb_data), + .qsb_data = ipa_qsb_data, .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data), .endpoint_data = ipa_gsi_endpoint_data, .resource_data = &ipa_resource_data, diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h index b476fc373f7f..7816583fc14a 100644 --- a/drivers/net/ipa/ipa_data.h +++ b/drivers/net/ipa/ipa_data.h @@ -18,8 +18,9 @@ * Boot-time configuration data is used to define the configuration of the * IPA and GSI resources to use for a given platform. This data is supplied * via the Device Tree match table, associated with a particular compatible - * string. The data defines information about resources, endpoints, and - * channels. + * string. The data defines information about how resources, endpoints and + * channels, memory, clocking and so on are allocated and used for the + * platform. * * Resources are data structures used internally by the IPA hardware. The * configuration data defines the number (or limits of the number) of various @@ -49,6 +50,24 @@ #define IPA_RESOURCE_GROUP_SRC_MAX 5 #define IPA_RESOURCE_GROUP_DST_MAX 5 +/** enum ipa_qsb_master_id - array index for IPA QSB configuration data */ +enum ipa_qsb_master_id { + IPA_QSB_MASTER_DDR, + IPA_QSB_MASTER_PCIE, +}; + +/** + * struct ipa_qsb_data - Qualcomm System Bus configuration data + * @max_writes: Maximum outstanding write requests for this master + * @max_reads: Maximum outstanding read requests for this master + * @max_reads_beats: Max outstanding read bytes in 8-byte "beats" (if non-zero) + */ +struct ipa_qsb_data { + u8 max_writes; + u8 max_reads; + u8 max_reads_beats; /* Not present for IPA v3.5.1 */ +}; + /** * struct gsi_channel_data - GSI channel configuration data * @tre_count: number of TREs in the channel ring @@ -57,10 +76,10 @@ * * A GSI channel is a unidirectional means of transferring data to or * from (and through) the IPA. A GSI channel has a ring buffer made - * up of "transfer elements" (TREs) that specify individual data transfers - * or IPA immediate commands. TREs are filled by the AP, and control - * is passed to IPA hardware by writing the last written element - * into a doorbell register. + * up of "transfer ring elements" (TREs) that specify individual data + * transfers or IPA immediate commands. TREs are filled by the AP, + * and control is passed to IPA hardware by writing the last written + * element into a doorbell register. * * When data transfer commands have completed the GSI generates an * event (a structure of data) and optionally signals the AP with @@ -79,12 +98,16 @@ struct gsi_channel_data { /** * struct ipa_endpoint_tx_data - configuration data for TX endpoints + * @seq_type: primary packet processing sequencer type + * @seq_rep_type: sequencer type for replication processing * @status_endpoint: endpoint to which status elements are sent * * The @status_endpoint is only valid if the endpoint's @status_enable * flag is set. */ struct ipa_endpoint_tx_data { + enum ipa_seq_type seq_type; + enum ipa_seq_rep_type seq_rep_type; enum ipa_endpoint_name status_endpoint; }; @@ -136,7 +159,6 @@ struct ipa_endpoint_config_data { /** * struct ipa_endpoint_data - IPA endpoint configuration data * @filter_support: whether endpoint supports filtering - * @seq_type: hardware sequencer type used for endpoint * @config: hardware configuration (see above) * * Not all endpoints support the IPA filtering capability. A filter table @@ -146,14 +168,10 @@ struct ipa_endpoint_config_data { * in the system, and indicate whether they support filtering. * * The remaining endpoint configuration data applies only to AP endpoints. - * The IPA hardware is implemented by sequencers, and the AP must program - * the type(s) of these sequencers at initialization time. The remaining - * endpoint configuration data is defined above. */ struct ipa_endpoint_data { bool filter_support; - /* The next two are specified only for AP endpoints */ - enum ipa_seq_type seq_type; + /* Everything else is specified only for AP endpoints */ struct ipa_endpoint_config_data config; }; @@ -247,7 +265,7 @@ struct ipa_resource_data { * @imem_addr: physical address of IPA region within IMEM * @imem_size: size in bytes of IPA IMEM region * @smem_id: item identifier for IPA region within SMEM memory - * @imem_size: size in bytes of the IPA SMEM region + * @smem_size: size in bytes of the IPA SMEM region */ struct ipa_mem_data { u32 local_count; @@ -285,15 +303,19 @@ struct ipa_clock_data { /** * struct ipa_data - combined IPA/GSI configuration data * @version: IPA hardware version - * @endpoint_count: number of entries in endpoint_data array + * @qsb_count: number of entries in the qsb_data array + * @qsb_data: Qualcomm System Bus configuration data + * @endpoint_count: number of entries in the endpoint_data array * @endpoint_data: IPA endpoint/GSI channel data * @resource_data: IPA resource configuration data - * @mem_count: number of entries in mem_data array - * @mem_data: IPA-local shared memory region data + * @mem_data: IPA memory region data + * @clock_data: IPA clock and interconnect data */ struct ipa_data { enum ipa_version version; - u32 endpoint_count; /* # entries in endpoint_data[] */ + u32 qsb_count; /* number of entries in qsb_data[] */ + const struct ipa_qsb_data *qsb_data; + u32 endpoint_count; /* number of entries in endpoint_data[] */ const struct ipa_gsi_endpoint_data *endpoint_data; const struct ipa_resource_data *resource_data; const struct ipa_mem_data *mem_data; diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 7209ee3c3124..38e83cd467b5 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2020 Linaro Ltd. + * Copyright (C) 2019-2021 Linaro Ltd. */ #include <linux/types.h> @@ -266,7 +266,7 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) * if (endpoint->toward_ipa) * assert(ipa->version != IPA_VERSION_4.2); * else - * assert(ipa->version == IPA_VERSION_3_5_1); + * assert(ipa->version < IPA_VERSION_4_0); */ mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; @@ -347,7 +347,7 @@ ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) { bool suspended; - if (endpoint->ipa->version != IPA_VERSION_3_5_1) + if (endpoint->ipa->version >= IPA_VERSION_4_0) return enable; /* For IPA v4.0+, no change made */ /* assert(!endpoint->toward_ipa); */ @@ -468,6 +468,20 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) iowrite32(val, endpoint->ipa->reg_virt + offset); } +static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint) +{ + u32 offset; + u32 val; + + if (!endpoint->toward_ipa) + return; + + offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id); + val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK); + + iowrite32(val, endpoint->ipa->reg_virt + offset); +} + /** * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register * @endpoint: Endpoint pointer @@ -515,7 +529,7 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) /* Where IPA will write the length */ offset = offsetof(struct rmnet_map_header, pkt_len); /* Upper bits are stored in HDR_EXT with IPA v4.5 */ - if (version == IPA_VERSION_4_5) + if (version >= IPA_VERSION_4_5) offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK); val |= HDR_OFST_PKT_SIZE_VALID_FMASK; @@ -562,7 +576,7 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) /* IPA v4.5 adds some most-significant bits to a few fields, * two of which are defined in the HDR (not HDR_EXT) register. */ - if (ipa->version == IPA_VERSION_4_5) { + if (ipa->version >= IPA_VERSION_4_5) { /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */ if (endpoint->data->qmap && !endpoint->toward_ipa) { u32 offset; @@ -776,7 +790,7 @@ static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds) if (!microseconds) return 0; /* Nothing to compute if timer period is 0 */ - if (ipa->version == IPA_VERSION_4_5) + if (ipa->version >= IPA_VERSION_4_5) return hol_block_timer_qtime_val(ipa, microseconds); /* Use 64 bit arithmetic to avoid overflow... */ @@ -884,18 +898,17 @@ static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint) static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) { u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id); - u32 seq_type = endpoint->seq_type; u32 val = 0; if (!endpoint->toward_ipa) return; /* Register not valid for RX endpoints */ - /* Sequencer type is made up of four nibbles */ - val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK); - val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK); - /* The second two apply to replicated packets */ - val |= u32_encode_bits((seq_type >> 8) & 0xf, HPS_REP_SEQ_TYPE_FMASK); - val |= u32_encode_bits((seq_type >> 12) & 0xf, DPS_REP_SEQ_TYPE_FMASK); + /* Low-order byte configures primary packet processing */ + val |= u32_encode_bits(endpoint->data->tx.seq_type, SEQ_TYPE_FMASK); + + /* Second byte configures replicated packet processing */ + val |= u32_encode_bits(endpoint->data->tx.seq_rep_type, + SEQ_REP_TYPE_FMASK); iowrite32(val, endpoint->ipa->reg_virt + offset); } @@ -1469,8 +1482,7 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) * is active, we need to handle things specially to recover. * All other cases just need to reset the underlying GSI channel. */ - special = ipa->version == IPA_VERSION_3_5_1 && - !endpoint->toward_ipa && + special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa && endpoint->data->aggregation; if (special && ipa_endpoint_aggr_active(endpoint)) ret = ipa_endpoint_reset_rx_aggr(endpoint); @@ -1490,6 +1502,7 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint) else (void)ipa_endpoint_program_suspend(endpoint, false); ipa_endpoint_init_cfg(endpoint); + ipa_endpoint_init_nat(endpoint); ipa_endpoint_init_hdr(endpoint); ipa_endpoint_init_hdr_ext(endpoint); ipa_endpoint_init_hdr_metadata_mask(endpoint); @@ -1568,8 +1581,10 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) (void)ipa_endpoint_program_suspend(endpoint, true); } - /* IPA v3.5.1 doesn't use channel stop for suspend */ - stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; + /* Starting with IPA v4.0, endpoints are suspended by stopping the + * underlying GSI channel rather than using endpoint suspend mode. + */ + stop_channel = endpoint->ipa->version >= IPA_VERSION_4_0; ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel); if (ret) dev_err(dev, "error %d suspending channel %u\n", ret, @@ -1589,8 +1604,10 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) if (!endpoint->toward_ipa) (void)ipa_endpoint_program_suspend(endpoint, false); - /* IPA v3.5.1 doesn't use channel start for resume */ - start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; + /* Starting with IPA v4.0, the underlying GSI channel must be + * restarted for resume. + */ + start_channel = endpoint->ipa->version >= IPA_VERSION_4_0; ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel); if (ret) dev_err(dev, "error %d resuming channel %u\n", ret, @@ -1766,7 +1783,6 @@ static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, endpoint->ipa = ipa; endpoint->ee_id = data->ee_id; - endpoint->seq_type = data->endpoint.seq_type; endpoint->channel_id = data->channel_id; endpoint->endpoint_id = data->endpoint_id; endpoint->toward_ipa = data->toward_ipa; diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h index 881ecc27bd6e..c6c55ea35394 100644 --- a/drivers/net/ipa/ipa_endpoint.h +++ b/drivers/net/ipa/ipa_endpoint.h @@ -46,7 +46,6 @@ enum ipa_endpoint_name { */ struct ipa_endpoint { struct ipa *ipa; - enum ipa_seq_type seq_type; enum gsi_ee_id ee_id; u32 channel_id; u32 endpoint_id; diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c index 97c1b55405cb..ba1bfc30210a 100644 --- a/drivers/net/ipa/ipa_main.c +++ b/drivers/net/ipa/ipa_main.c @@ -227,8 +227,8 @@ static void ipa_hardware_config_comp(struct ipa *ipa) { u32 val; - /* Nothing to configure for IPA v3.5.1 */ - if (ipa->version == IPA_VERSION_3_5_1) + /* Nothing to configure prior to IPA v4.0 */ + if (ipa->version < IPA_VERSION_4_0) return; val = ioread32(ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET); @@ -249,56 +249,59 @@ static void ipa_hardware_config_comp(struct ipa *ipa) iowrite32(val, ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET); } -/* Configure DDR and PCIe max read/write QSB values */ -static void ipa_hardware_config_qsb(struct ipa *ipa) +/* Configure DDR and (possibly) PCIe max read/write QSB values */ +static void +ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data) { - enum ipa_version version = ipa->version; - u32 max0; - u32 max1; + const struct ipa_qsb_data *data0; + const struct ipa_qsb_data *data1; u32 val; - /* QMB_0 represents DDR; QMB_1 represents PCIe */ - val = u32_encode_bits(8, GEN_QMB_0_MAX_WRITES_FMASK); - switch (version) { - case IPA_VERSION_4_2: - max1 = 0; /* PCIe not present */ - break; - case IPA_VERSION_4_5: - max1 = 8; - break; - default: - max1 = 4; - break; - } - val |= u32_encode_bits(max1, GEN_QMB_1_MAX_WRITES_FMASK); + /* assert(data->qsb_count > 0); */ + /* assert(data->qsb_count < 3); */ + + /* QMB 0 represents DDR; QMB 1 (if present) represents PCIe */ + data0 = &data->qsb_data[IPA_QSB_MASTER_DDR]; + if (data->qsb_count > 1) + data1 = &data->qsb_data[IPA_QSB_MASTER_PCIE]; + + /* Max outstanding write accesses for QSB masters */ + val = u32_encode_bits(data0->max_writes, GEN_QMB_0_MAX_WRITES_FMASK); + if (data->qsb_count > 1) + val |= u32_encode_bits(data1->max_writes, + GEN_QMB_1_MAX_WRITES_FMASK); iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_WRITES_OFFSET); - max1 = 12; - switch (version) { - case IPA_VERSION_3_5_1: - max0 = 8; - break; - case IPA_VERSION_4_0: - case IPA_VERSION_4_1: - max0 = 12; - break; - case IPA_VERSION_4_2: - max0 = 12; - max1 = 0; /* PCIe not present */ - break; - case IPA_VERSION_4_5: - max0 = 0; /* No limit (hardware maximum) */ - break; - } - val = u32_encode_bits(max0, GEN_QMB_0_MAX_READS_FMASK); - val |= u32_encode_bits(max1, GEN_QMB_1_MAX_READS_FMASK); - if (version != IPA_VERSION_3_5_1) { - /* GEN_QMB_0_MAX_READS_BEATS is 0 */ - /* GEN_QMB_1_MAX_READS_BEATS is 0 */ + /* Max outstanding read accesses for QSB masters */ + val = u32_encode_bits(data0->max_reads, GEN_QMB_0_MAX_READS_FMASK); + if (ipa->version >= IPA_VERSION_4_0) + val |= u32_encode_bits(data0->max_reads_beats, + GEN_QMB_0_MAX_READS_BEATS_FMASK); + if (data->qsb_count > 1) { + val |= u32_encode_bits(data1->max_reads, + GEN_QMB_1_MAX_READS_FMASK); + if (ipa->version >= IPA_VERSION_4_0) + val |= u32_encode_bits(data1->max_reads_beats, + GEN_QMB_1_MAX_READS_BEATS_FMASK); } iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_READS_OFFSET); } +/* The internal inactivity timer clock is used for the aggregation timer */ +#define TIMER_FREQUENCY 32000 /* 32 KHz inactivity timer clock */ + +/* Compute the value to use in the COUNTER_CFG register AGGR_GRANULARITY + * field to represent the given number of microseconds. The value is one + * less than the number of timer ticks in the requested period. 0 is not + * a valid granularity value. + */ +static u32 ipa_aggr_granularity_val(u32 usec) +{ + /* assert(usec != 0); */ + + return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1; +} + /* IPA uses unified Qtime starting at IPA v4.5, implementing various * timestamps and timers independent of the IPA core clock rate. The * Qtimer is based on a 56-bit timestamp incremented at each tick of @@ -385,8 +388,9 @@ static void ipa_hardware_dcd_deconfig(struct ipa *ipa) /** * ipa_hardware_config() - Primitive hardware initialization * @ipa: IPA pointer + * @data: IPA configuration data */ -static void ipa_hardware_config(struct ipa *ipa) +static void ipa_hardware_config(struct ipa *ipa, const struct ipa_data *data) { enum ipa_version version = ipa->version; u32 granularity; @@ -399,7 +403,7 @@ static void ipa_hardware_config(struct ipa *ipa) } /* Implement some hardware workarounds */ - if (version != IPA_VERSION_3_5_1 && version < IPA_VERSION_4_5) { + if (version >= IPA_VERSION_4_0 && version < IPA_VERSION_4_5) { /* Enable open global clocks (not needed for IPA v4.5) */ val = GLOBAL_FMASK; val |= GLOBAL_2X_CLK_FMASK; @@ -414,7 +418,7 @@ static void ipa_hardware_config(struct ipa *ipa) ipa_hardware_config_comp(ipa); /* Configure system bus limits */ - ipa_hardware_config_qsb(ipa); + ipa_hardware_config_qsb(ipa, data); if (version < IPA_VERSION_4_5) { /* Configure aggregation timer granularity */ @@ -610,7 +614,7 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data) */ ipa_clock_get(ipa); - ipa_hardware_config(ipa); + ipa_hardware_config(ipa, data); ret = ipa_endpoint_config(ipa); if (ret) @@ -735,8 +739,14 @@ MODULE_DEVICE_TABLE(of, ipa_match); static void ipa_validate_build(void) { #ifdef IPA_VALIDATE - /* We assume we're working on 64-bit hardware */ - BUILD_BUG_ON(!IS_ENABLED(CONFIG_64BIT)); + /* At one time we assumed a 64-bit build, allowing some do_div() + * calls to be replaced by simple division or modulo operations. + * We currently only perform divide and modulo operations on u32, + * u16, or size_t objects, and of those only size_t has any chance + * of being a 64-bit value. (It should be guaranteed 32 bits wide + * on a 32-bit build, but there is no harm in verifying that.) + */ + BUILD_BUG_ON(!IS_ENABLED(CONFIG_64BIT) && sizeof(size_t) != 4); /* Code assumes the EE ID for the AP is 0 (zeroed structure field) */ BUILD_BUG_ON(GSI_EE_AP != 0); diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c index f25029b9ec85..32907dde5dc6 100644 --- a/drivers/net/ipa/ipa_mem.c +++ b/drivers/net/ipa/ipa_mem.c @@ -61,6 +61,7 @@ int ipa_mem_setup(struct ipa *ipa) struct gsi_trans *trans; u32 offset; u16 size; + u32 val; /* Get a transaction to define the header memory region and to zero * the processing context and modem memory regions. @@ -89,8 +90,9 @@ int ipa_mem_setup(struct ipa *ipa) gsi_trans_commit_wait(trans); /* Tell the hardware where the processing context area is located */ - iowrite32(ipa->mem_offset + ipa->mem[IPA_MEM_MODEM_PROC_CTX].offset, - ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET); + offset = ipa->mem_offset + ipa->mem[IPA_MEM_MODEM_PROC_CTX].offset; + val = proc_cntxt_base_addr_encoded(ipa->version, offset); + iowrite32(val, ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET); return 0; } diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h index f99180f84f0d..f82e8939622b 100644 --- a/drivers/net/ipa/ipa_mem.h +++ b/drivers/net/ipa/ipa_mem.h @@ -28,6 +28,7 @@ struct ipa_mem_data; * The set of memory regions is defined in configuration data. They are * subject to these constraints: * - a zero offset and zero size represents and undefined region + * - a region's size does not include space for its "canary" values * - a region's offset is defined to be *past* all "canary" values * - offset must be large enough to account for all canaries * - a region's size may be zero, but may still have canaries @@ -56,9 +57,16 @@ enum ipa_mem_id { IPA_MEM_AP_HEADER, /* 0 canaries */ IPA_MEM_MODEM_PROC_CTX, /* 2 canaries */ IPA_MEM_AP_PROC_CTX, /* 0 canaries */ + IPA_MEM_NAT_TABLE, /* 4 canaries (IPA v4.5 and above) */ IPA_MEM_PDN_CONFIG, /* 2 canaries (IPA v4.0 and above) */ - IPA_MEM_STATS_QUOTA, /* 2 canaries (IPA v4.0 and above) */ + IPA_MEM_STATS_QUOTA_MODEM, /* 2 canaries (IPA v4.0 and above) */ + IPA_MEM_STATS_QUOTA_AP, /* 0 canaries (IPA v4.0 and above) */ IPA_MEM_STATS_TETHERING, /* 0 canaries (IPA v4.0 and above) */ + IPA_MEM_STATS_V4_FILTER, /* 0 canaries (IPA v4.0-v4.2) */ + IPA_MEM_STATS_V6_FILTER, /* 0 canaries (IPA v4.0-v4.2) */ + IPA_MEM_STATS_V4_ROUTE, /* 0 canaries (IPA v4.0-v4.2) */ + IPA_MEM_STATS_V6_ROUTE, /* 0 canaries (IPA v4.0-v4.2) */ + IPA_MEM_STATS_FILTER_ROUTE, /* 0 canaries (IPA v4.5 and above) */ IPA_MEM_STATS_DROP, /* 0 canaries (IPA v4.0 and above) */ IPA_MEM_MODEM, /* 0 canaries */ IPA_MEM_UC_EVENT_RING, /* 1 canary */ diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c index 2fc64483f275..ccdb4a6a4c75 100644 --- a/drivers/net/ipa/ipa_qmi.c +++ b/drivers/net/ipa/ipa_qmi.c @@ -249,6 +249,7 @@ static const struct qmi_msg_handler ipa_server_msg_handlers[] = { .decoded_size = IPA_QMI_DRIVER_INIT_COMPLETE_REQ_SZ, .fn = ipa_server_driver_init_complete, }, + { }, }; /* Handle an INIT_DRIVER response message from the modem. */ @@ -269,6 +270,7 @@ static const struct qmi_msg_handler ipa_client_msg_handlers[] = { .decoded_size = IPA_QMI_INIT_DRIVER_RSP_SZ, .fn = ipa_client_init_driver, }, + { }, }; /* Return a pointer to an init modem driver request structure, which contains @@ -377,8 +379,8 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi) /* None of the stats fields are valid (IPA v4.0 and above) */ - if (ipa->version != IPA_VERSION_3_5_1) { - mem = &ipa->mem[IPA_MEM_STATS_QUOTA]; + if (ipa->version >= IPA_VERSION_4_0) { + mem = &ipa->mem[IPA_MEM_STATS_QUOTA_MODEM]; if (mem->size) { req.hw_stats_quota_base_addr_valid = 1; req.hw_stats_quota_base_addr = diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c index 73413371e3d3..6838e8065072 100644 --- a/drivers/net/ipa/ipa_qmi_msg.c +++ b/drivers/net/ipa/ipa_qmi_msg.c @@ -56,7 +56,7 @@ struct qmi_elem_info ipa_indication_register_req_ei[] = { .elem_size = sizeof_field(struct ipa_indication_register_req, ipa_mhi_ready_ind_valid), - .tlv_type = 0x11, + .tlv_type = 0x12, .offset = offsetof(struct ipa_indication_register_req, ipa_mhi_ready_ind_valid), }, @@ -66,11 +66,51 @@ struct qmi_elem_info ipa_indication_register_req_ei[] = { .elem_size = sizeof_field(struct ipa_indication_register_req, ipa_mhi_ready_ind), - .tlv_type = 0x11, + .tlv_type = 0x12, .offset = offsetof(struct ipa_indication_register_req, ipa_mhi_ready_ind), }, { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = + sizeof_field(struct ipa_indication_register_req, + endpoint_desc_ind_valid), + .tlv_type = 0x13, + .offset = offsetof(struct ipa_indication_register_req, + endpoint_desc_ind_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = + sizeof_field(struct ipa_indication_register_req, + endpoint_desc_ind), + .tlv_type = 0x13, + .offset = offsetof(struct ipa_indication_register_req, + endpoint_desc_ind), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = + sizeof_field(struct ipa_indication_register_req, + bw_change_ind_valid), + .tlv_type = 0x14, + .offset = offsetof(struct ipa_indication_register_req, + bw_change_ind_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = + sizeof_field(struct ipa_indication_register_req, + bw_change_ind), + .tlv_type = 0x14, + .offset = offsetof(struct ipa_indication_register_req, + bw_change_ind), + }, + { .data_type = QMI_EOTI, }, }; @@ -530,7 +570,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { hw_stats_quota_base_addr_valid), }, { - .data_type = QMI_SIGNED_4_BYTE_ENUM, + .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof_field(struct ipa_init_modem_driver_req, @@ -545,17 +585,17 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { .elem_size = sizeof_field(struct ipa_init_modem_driver_req, hw_stats_quota_size_valid), - .tlv_type = 0x1f, + .tlv_type = 0x20, .offset = offsetof(struct ipa_init_modem_driver_req, hw_stats_quota_size_valid), }, { - .data_type = QMI_SIGNED_4_BYTE_ENUM, + .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof_field(struct ipa_init_modem_driver_req, hw_stats_quota_size), - .tlv_type = 0x1f, + .tlv_type = 0x20, .offset = offsetof(struct ipa_init_modem_driver_req, hw_stats_quota_size), }, @@ -564,18 +604,38 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { .elem_len = 1, .elem_size = sizeof_field(struct ipa_init_modem_driver_req, + hw_stats_drop_base_addr_valid), + .tlv_type = 0x21, + .offset = offsetof(struct ipa_init_modem_driver_req, + hw_stats_drop_base_addr_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = + sizeof_field(struct ipa_init_modem_driver_req, + hw_stats_drop_base_addr), + .tlv_type = 0x21, + .offset = offsetof(struct ipa_init_modem_driver_req, + hw_stats_drop_base_addr), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = + sizeof_field(struct ipa_init_modem_driver_req, hw_stats_drop_size_valid), - .tlv_type = 0x1f, + .tlv_type = 0x22, .offset = offsetof(struct ipa_init_modem_driver_req, hw_stats_drop_size_valid), }, { - .data_type = QMI_SIGNED_4_BYTE_ENUM, + .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof_field(struct ipa_init_modem_driver_req, hw_stats_drop_size), - .tlv_type = 0x1f, + .tlv_type = 0x22, .offset = offsetof(struct ipa_init_modem_driver_req, hw_stats_drop_size), }, diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h index 12b6621f4b0e..3233d145fd87 100644 --- a/drivers/net/ipa/ipa_qmi_msg.h +++ b/drivers/net/ipa/ipa_qmi_msg.h @@ -24,7 +24,7 @@ * information for each field. The qmi_send_*() interfaces require * the message size to be provided. */ -#define IPA_QMI_INDICATION_REGISTER_REQ_SZ 12 /* -> server handle */ +#define IPA_QMI_INDICATION_REGISTER_REQ_SZ 20 /* -> server handle */ #define IPA_QMI_INDICATION_REGISTER_RSP_SZ 7 /* <- server handle */ #define IPA_QMI_INIT_DRIVER_REQ_SZ 162 /* client handle -> */ #define IPA_QMI_INIT_DRIVER_RSP_SZ 25 /* client handle <- */ @@ -44,6 +44,10 @@ struct ipa_indication_register_req { u8 data_usage_quota_reached; u8 ipa_mhi_ready_ind_valid; u8 ipa_mhi_ready_ind; + u8 endpoint_desc_ind_valid; + u8 endpoint_desc_ind; + u8 bw_change_ind_valid; + u8 bw_change_ind; }; /* The response to a IPA_QMI_INDICATION_REGISTER request consists only of diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h index 732e691e9aa6..86fe2978e810 100644 --- a/drivers/net/ipa/ipa_reg.h +++ b/drivers/net/ipa/ipa_reg.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2020 Linaro Ltd. + * Copyright (C) 2018-2021 Linaro Ltd. */ #ifndef _IPA_REG_H_ #define _IPA_REG_H_ @@ -217,8 +217,18 @@ static inline u32 ipa_reg_bcr_val(enum ipa_version version) return 0x00000000; } -/* The value of the next register must be a multiple of 8 */ -#define IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET 0x000001e8 +/* The value of the next register must be a multiple of 8 (bottom 3 bits 0) */ +#define IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET 0x000001e8 + +/* Encoded value for LOCAL_PKT_PROC_CNTXT register BASE_ADDR field */ +static inline u32 proc_cntxt_base_addr_encoded(enum ipa_version version, + u32 addr) +{ + if (version < IPA_VERSION_4_5) + return u32_encode_bits(addr, GENMASK(16, 0)); + + return u32_encode_bits(addr, GENMASK(17, 0)); +} /* ipa->available defines the valid bits in the AGGR_FORCE_CLOSE register */ #define IPA_REG_AGGR_FORCE_CLOSE_OFFSET 0x000001ec @@ -227,18 +237,6 @@ static inline u32 ipa_reg_bcr_val(enum ipa_version version) #define IPA_REG_COUNTER_CFG_OFFSET 0x000001f0 #define AGGR_GRANULARITY_FMASK GENMASK(8, 4) -/* The internal inactivity timer clock is used for the aggregation timer */ -#define TIMER_FREQUENCY 32000 /* 32 KHz inactivity timer clock */ - -/* Compute the value to use in the AGGR_GRANULARITY field representing the - * given number of microseconds. The value is one less than the number of - * timer ticks in the requested period. 0 not a valid granularity value. - */ -static inline u32 ipa_aggr_granularity_val(u32 usec) -{ - return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1; -} - /* The next register is not present for IPA v4.5 */ #define IPA_REG_TX_CFG_OFFSET 0x000001fc /* The first three fields are present for IPA v3.5.1 only */ @@ -388,6 +386,18 @@ enum ipa_cs_offload_en { IPA_CS_OFFLOAD_DL = 0x2, }; +/* Valid only for TX (IPA consumer) endpoints */ +#define IPA_REG_ENDP_INIT_NAT_N_OFFSET(ep) \ + (0x0000080c + 0x0070 * (ep)) +#define NAT_EN_FMASK GENMASK(1, 0) + +/** enum ipa_nat_en - ENDP_INIT_NAT register NAT_EN field value */ +enum ipa_nat_en { + IPA_NAT_BYPASS = 0x0, + IPA_NAT_SRC = 0x1, + IPA_NAT_DST = 0x2, +}; + #define IPA_REG_ENDP_INIT_HDR_N_OFFSET(ep) \ (0x00000810 + 0x0070 * (ep)) #define HDR_LEN_FMASK GENMASK(5, 0) @@ -580,28 +590,40 @@ static inline u32 rsrc_grp_encoded(enum ipa_version version, u32 rsrc_grp) /* Valid only for TX (IPA consumer) endpoints */ #define IPA_REG_ENDP_INIT_SEQ_N_OFFSET(txep) \ (0x0000083c + 0x0070 * (txep)) -#define HPS_SEQ_TYPE_FMASK GENMASK(3, 0) -#define DPS_SEQ_TYPE_FMASK GENMASK(7, 4) -#define HPS_REP_SEQ_TYPE_FMASK GENMASK(11, 8) -#define DPS_REP_SEQ_TYPE_FMASK GENMASK(15, 12) +#define SEQ_TYPE_FMASK GENMASK(7, 0) +#define SEQ_REP_TYPE_FMASK GENMASK(15, 8) /** - * enum ipa_seq_type - HPS and DPS sequencer type fields in ENDP_INIT_SEQ_N - * @IPA_SEQ_DMA_ONLY: only DMA is performed - * @IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP: - * second packet processing pass + no decipher + microcontroller - * @IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP: - * packet processing + no decipher + no uCP + HPS REP DMA parser - * @IPA_SEQ_INVALID: invalid sequencer type + * enum ipa_seq_type - HPS and DPS sequencer type + * + * The low-order byte of the sequencer type register defines the number of + * passes a packet takes through the IPA pipeline. The last pass through can + * optionally skip the microprocessor. Deciphering is optional for all types; + * if enabled, an additional mask (two bits) is added to the type value. * - * The values defined here are broken into 4-bit nibbles that are written - * into fields of the ENDP_INIT_SEQ registers. + * Note: not all combinations of ipa_seq_type and ipa_seq_rep_type are + * supported (or meaningful). */ +#define IPA_SEQ_DECIPHER 0x11 enum ipa_seq_type { - IPA_SEQ_DMA_ONLY = 0x0000, - IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP = 0x0004, - IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP = 0x0806, - IPA_SEQ_INVALID = 0xffff, + IPA_SEQ_DMA = 0x00, + IPA_SEQ_1_PASS = 0x02, + IPA_SEQ_2_PASS_SKIP_LAST_UC = 0x04, + IPA_SEQ_1_PASS_SKIP_LAST_UC = 0x06, + IPA_SEQ_2_PASS = 0x0a, + IPA_SEQ_3_PASS_SKIP_LAST_UC = 0x0c, +}; + +/** + * enum ipa_seq_rep_type - replicated packet sequencer type + * + * This goes in the second byte of the endpoint sequencer type register. + * + * Note: not all combinations of ipa_seq_type and ipa_seq_rep_type are + * supported (or meaningful). + */ +enum ipa_seq_rep_type { + IPA_SEQ_REP_DMA_PARSER = 0x08, }; #define IPA_REG_ENDP_STATUS_N_OFFSET(ep) \ diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c index baaab3dd0e63..4236a50ff03a 100644 --- a/drivers/net/ipa/ipa_table.c +++ b/drivers/net/ipa/ipa_table.c @@ -118,21 +118,15 @@ /* Check things that can be validated at build time. */ static void ipa_table_validate_build(void) { - /* IPA hardware accesses memory 128 bytes at a time. Addresses - * referred to by entries in filter and route tables must be - * aligned on 128-byte byte boundaries. The only rule address - * ever use is the "zero rule", and it's aligned at the base - * of a coherent DMA allocation. + /* Filter and route tables contain DMA addresses that refer + * to filter or route rules. But the size of a table entry + * is 64 bits regardless of what the size of an AP DMA address + * is. A fixed constant defines the size of an entry, and + * code in ipa_table_init() uses a pointer to __le64 to + * initialize tables. */ - BUILD_BUG_ON(ARCH_DMA_MINALIGN % IPA_TABLE_ALIGN); - - /* Filter and route tables contain DMA addresses that refer to - * filter or route rules. We use a fixed constant to represent - * the size of either type of table entry. Code in ipa_table_init() - * uses a pointer to __le64 to initialize table entriews. - */ - BUILD_BUG_ON(IPA_TABLE_ENTRY_SIZE != sizeof(dma_addr_t)); - BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(__le64)); + BUILD_BUG_ON(sizeof(dma_addr_t) > IPA_TABLE_ENTRY_SIZE); + BUILD_BUG_ON(sizeof(__le64) != IPA_TABLE_ENTRY_SIZE); /* A "zero rule" is used to represent no filtering or no routing. * It is a 64-bit block of zeroed memory. Code in ipa_table_init() @@ -239,11 +233,6 @@ static void ipa_table_validate_build(void) #endif /* !IPA_VALIDATE */ -bool ipa_table_hash_support(struct ipa *ipa) -{ - return ipa->version != IPA_VERSION_4_2; -} - /* Zero entry count means no table, so just return a 0 address */ static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count) { @@ -668,6 +657,21 @@ int ipa_table_init(struct ipa *ipa) if (!virt) return -ENOMEM; + /* We put the "zero rule" at the base of our table area. The IPA + * hardware requires route and filter table rules to be aligned + * on a 128-byte boundary. As long as the alignment constraint + * is a power of 2, we can check alignment using just the bottom + * 32 bits for a DMA address of any size. + */ + BUILD_BUG_ON(!is_power_of_2(IPA_TABLE_ALIGN)); + if (lower_32_bits(addr) % IPA_TABLE_ALIGN) { + dev_err(dev, "table address %pad not %u-byte aligned\n", + &addr, IPA_TABLE_ALIGN); + dma_free_coherent(dev, size, virt, addr); + + return -ERANGE; + } + ipa->table_virt = virt; ipa->table_addr = addr; diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h index 1a68d20f19d6..889c2e93b122 100644 --- a/drivers/net/ipa/ipa_table.h +++ b/drivers/net/ipa/ipa_table.h @@ -55,7 +55,10 @@ static inline bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask) * ipa_table_hash_support() - Return true if hashed tables are supported * @ipa: IPA pointer */ -bool ipa_table_hash_support(struct ipa *ipa); +static inline bool ipa_table_hash_support(struct ipa *ipa) +{ + return ipa->version != IPA_VERSION_4_2; +} /** * ipa_table_reset() - Reset filter and route tables entries to "none" diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h index 2944e2a89023..ee2b3d02f3cd 100644 --- a/drivers/net/ipa/ipa_version.h +++ b/drivers/net/ipa/ipa_version.h @@ -8,17 +8,32 @@ /** * enum ipa_version + * @IPA_VERSION_3_0: IPA version 3.0/GSI version 1.0 + * @IPA_VERSION_3_1: IPA version 3.1/GSI version 1.1 + * @IPA_VERSION_3_5: IPA version 3.5/GSI version 1.2 + * @IPA_VERSION_3_5_1: IPA version 3.5.1/GSI version 1.3 + * @IPA_VERSION_4_0: IPA version 4.0/GSI version 2.0 + * @IPA_VERSION_4_1: IPA version 4.1/GSI version 2.0 + * @IPA_VERSION_4_2: IPA version 4.2/GSI version 2.2 + * @IPA_VERSION_4_5: IPA version 4.5/GSI version 2.5 + * @IPA_VERSION_4_7: IPA version 4.7/GSI version 2.7 + * @IPA_VERSION_4_9: IPA version 4.9/GSI version 2.9 + * @IPA_VERSION_4_11: IPA version 4.11/GSI version 2.11 (2.1.1) * * Defines the version of IPA (and GSI) hardware present on the platform. - * It seems this might be better defined elsewhere, but having it here gets - * it where it's needed. */ enum ipa_version { - IPA_VERSION_3_5_1, /* GSI version 1.3.0 */ - IPA_VERSION_4_0, /* GSI version 2.0 */ - IPA_VERSION_4_1, /* GSI version 2.1 */ - IPA_VERSION_4_2, /* GSI version 2.2 */ - IPA_VERSION_4_5, /* GSI version 2.5 */ + IPA_VERSION_3_0, + IPA_VERSION_3_1, + IPA_VERSION_3_5, + IPA_VERSION_3_5_1, + IPA_VERSION_4_0, + IPA_VERSION_4_1, + IPA_VERSION_4_2, + IPA_VERSION_4_5, + IPA_VERSION_4_7, + IPA_VERSION_4_9, + IPA_VERSION_4_11, }; #endif /* _IPA_VERSION_H_ */ diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig index a10cc460d7cf..d06e06f5e31a 100644 --- a/drivers/net/mdio/Kconfig +++ b/drivers/net/mdio/Kconfig @@ -200,6 +200,17 @@ config MDIO_BUS_MUX_MESON_G12A the amlogic g12a SoC. The multiplexers connects either the external or the internal MDIO bus to the parent bus. +config MDIO_BUS_MUX_BCM6368 + tristate "Broadcom BCM6368 MDIO bus multiplexers" + depends on OF && OF_MDIO && (BMIPS_GENERIC || COMPILE_TEST) + select MDIO_BUS_MUX + default BMIPS_GENERIC + help + This module provides a driver for MDIO bus multiplexers found in + BCM6368 based Broadcom SoCs. This multiplexer connects one of several + child MDIO bus to a parent bus. Buses could be internal as well as + external and selection logic lies inside the same multiplexer. + config MDIO_BUS_MUX_BCM_IPROC tristate "Broadcom iProc based MDIO bus multiplexers" depends on OF && OF_MDIO && (ARCH_BCM_IPROC || COMPILE_TEST) diff --git a/drivers/net/mdio/Makefile b/drivers/net/mdio/Makefile index 5c498dde463f..c3ec0ef989df 100644 --- a/drivers/net/mdio/Makefile +++ b/drivers/net/mdio/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o +obj-$(CONFIG_MDIO_BUS_MUX_BCM6368) += mdio-mux-bcm6368.o obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o obj-$(CONFIG_MDIO_BUS_MUX_MESON_G12A) += mdio-mux-meson-g12a.o diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c index fbd36891ee64..5d171e7f118d 100644 --- a/drivers/net/mdio/mdio-bcm-unimac.c +++ b/drivers/net/mdio/mdio-bcm-unimac.c @@ -5,20 +5,18 @@ * Copyright (C) 2014-2017 Broadcom */ +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> #include <linux/kernel.h> -#include <linux/phy.h> -#include <linux/platform_device.h> -#include <linux/sched.h> #include <linux/module.h> -#include <linux/io.h> -#include <linux/delay.h> -#include <linux/clk.h> - #include <linux/of.h> -#include <linux/of_platform.h> #include <linux/of_mdio.h> - +#include <linux/of_platform.h> +#include <linux/phy.h> #include <linux/platform_data/mdio-bcm-unimac.h> +#include <linux/platform_device.h> +#include <linux/sched.h> #define MDIO_CMD 0x00 #define MDIO_START_BUSY (1 << 29) diff --git a/drivers/net/mdio/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c index d3915f831854..0f457c436335 100644 --- a/drivers/net/mdio/mdio-bitbang.c +++ b/drivers/net/mdio/mdio-bitbang.c @@ -14,10 +14,10 @@ * Vitaly Bordug <vbordug@ru.mvista.com> */ -#include <linux/module.h> +#include <linux/delay.h> #include <linux/mdio-bitbang.h> +#include <linux/module.h> #include <linux/types.h> -#include <linux/delay.h> #define MDIO_READ 2 #define MDIO_WRITE 1 diff --git a/drivers/net/mdio/mdio-cavium.c b/drivers/net/mdio/mdio-cavium.c index 1afd6fc1a351..95ce274c1be1 100644 --- a/drivers/net/mdio/mdio-cavium.c +++ b/drivers/net/mdio/mdio-cavium.c @@ -4,9 +4,9 @@ */ #include <linux/delay.h> +#include <linux/io.h> #include <linux/module.h> #include <linux/phy.h> -#include <linux/io.h> #include "mdio-cavium.h" diff --git a/drivers/net/mdio/mdio-gpio.c b/drivers/net/mdio/mdio-gpio.c index 1b00235d7dc5..56c8f914f893 100644 --- a/drivers/net/mdio/mdio-gpio.c +++ b/drivers/net/mdio/mdio-gpio.c @@ -17,15 +17,15 @@ * Vitaly Bordug <vbordug@ru.mvista.com> */ -#include <linux/module.h> -#include <linux/slab.h> +#include <linux/gpio/consumer.h> #include <linux/interrupt.h> -#include <linux/platform_device.h> -#include <linux/platform_data/mdio-gpio.h> #include <linux/mdio-bitbang.h> #include <linux/mdio-gpio.h> -#include <linux/gpio/consumer.h> +#include <linux/module.h> #include <linux/of_mdio.h> +#include <linux/platform_data/mdio-gpio.h> +#include <linux/platform_device.h> +#include <linux/slab.h> struct mdio_gpio_info { struct mdiobb_ctrl ctrl; diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c index 25c25ea6da66..9cd71d896963 100644 --- a/drivers/net/mdio/mdio-ipq4019.c +++ b/drivers/net/mdio/mdio-ipq4019.c @@ -3,10 +3,10 @@ /* Copyright (c) 2020 Sartura Ltd. */ #include <linux/delay.h> -#include <linux/kernel.h> -#include <linux/module.h> #include <linux/io.h> #include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> #include <linux/of_address.h> #include <linux/of_mdio.h> #include <linux/phy.h> diff --git a/drivers/net/mdio/mdio-ipq8064.c b/drivers/net/mdio/mdio-ipq8064.c index 1bd18857e1c5..8fe8f0119fc1 100644 --- a/drivers/net/mdio/mdio-ipq8064.c +++ b/drivers/net/mdio/mdio-ipq8064.c @@ -7,12 +7,12 @@ #include <linux/delay.h> #include <linux/kernel.h> +#include <linux/mfd/syscon.h> #include <linux/module.h> -#include <linux/regmap.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_device.h> -#include <linux/mfd/syscon.h> +#include <linux/regmap.h> /* MII address register definitions */ #define MII_ADDR_REG_ADDR 0x10 diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c index 11f583fd4611..b36e5ea04ddf 100644 --- a/drivers/net/mdio/mdio-mscc-miim.c +++ b/drivers/net/mdio/mdio-mscc-miim.c @@ -6,14 +6,14 @@ * Copyright (c) 2017 Microsemi Corporation */ -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/phy.h> -#include <linux/platform_device.h> #include <linux/bitops.h> #include <linux/io.h> #include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> #include <linux/of_mdio.h> +#include <linux/phy.h> +#include <linux/platform_device.h> #define MSCC_MIIM_REG_STATUS 0x0 #define MSCC_MIIM_STATUS_STAT_PENDING BIT(2) diff --git a/drivers/net/mdio/mdio-mux-bcm-iproc.c b/drivers/net/mdio/mdio-mux-bcm-iproc.c index 42fb5f166136..641cfa41f492 100644 --- a/drivers/net/mdio/mdio-mux-bcm-iproc.c +++ b/drivers/net/mdio/mdio-mux-bcm-iproc.c @@ -3,14 +3,14 @@ * Copyright 2016 Broadcom */ #include <linux/clk.h> -#include <linux/platform_device.h> +#include <linux/delay.h> #include <linux/device.h> -#include <linux/of_mdio.h> +#include <linux/iopoll.h> +#include <linux/mdio-mux.h> #include <linux/module.h> +#include <linux/of_mdio.h> #include <linux/phy.h> -#include <linux/mdio-mux.h> -#include <linux/delay.h> -#include <linux/iopoll.h> +#include <linux/platform_device.h> #define MDIO_RATE_ADJ_EXT_OFFSET 0x000 #define MDIO_RATE_ADJ_INT_OFFSET 0x004 diff --git a/drivers/net/mdio/mdio-mux-bcm6368.c b/drivers/net/mdio/mdio-mux-bcm6368.c new file mode 100644 index 000000000000..6dcbf987d61b --- /dev/null +++ b/drivers/net/mdio/mdio-mux-bcm6368.c @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Broadcom BCM6368 mdiomux bus controller driver + * + * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com> + */ + +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mdio-mux.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/of_mdio.h> +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <linux/sched.h> + +#define MDIOC_REG 0x0 +#define MDIOC_EXT_MASK BIT(16) +#define MDIOC_REG_SHIFT 20 +#define MDIOC_PHYID_SHIFT 25 +#define MDIOC_RD_MASK BIT(30) +#define MDIOC_WR_MASK BIT(31) + +#define MDIOD_REG 0x4 + +struct bcm6368_mdiomux_desc { + void *mux_handle; + void __iomem *base; + struct device *dev; + struct mii_bus *mii_bus; + int ext_phy; +}; + +static int bcm6368_mdiomux_read(struct mii_bus *bus, int phy_id, int loc) +{ + struct bcm6368_mdiomux_desc *md = bus->priv; + uint32_t reg; + int ret; + + __raw_writel(0, md->base + MDIOC_REG); + + reg = MDIOC_RD_MASK | + (phy_id << MDIOC_PHYID_SHIFT) | + (loc << MDIOC_REG_SHIFT); + if (md->ext_phy) + reg |= MDIOC_EXT_MASK; + + __raw_writel(reg, md->base + MDIOC_REG); + udelay(50); + ret = __raw_readw(md->base + MDIOD_REG); + + return ret; +} + +static int bcm6368_mdiomux_write(struct mii_bus *bus, int phy_id, int loc, + uint16_t val) +{ + struct bcm6368_mdiomux_desc *md = bus->priv; + uint32_t reg; + + __raw_writel(0, md->base + MDIOC_REG); + + reg = MDIOC_WR_MASK | + (phy_id << MDIOC_PHYID_SHIFT) | + (loc << MDIOC_REG_SHIFT); + if (md->ext_phy) + reg |= MDIOC_EXT_MASK; + reg |= val; + + __raw_writel(reg, md->base + MDIOC_REG); + udelay(50); + + return 0; +} + +static int bcm6368_mdiomux_switch_fn(int current_child, int desired_child, + void *data) +{ + struct bcm6368_mdiomux_desc *md = data; + + md->ext_phy = desired_child; + + return 0; +} + +static int bcm6368_mdiomux_probe(struct platform_device *pdev) +{ + struct bcm6368_mdiomux_desc *md; + struct mii_bus *bus; + struct resource *res; + int rc; + + md = devm_kzalloc(&pdev->dev, sizeof(*md), GFP_KERNEL); + if (!md) + return -ENOMEM; + md->dev = &pdev->dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + /* + * Just ioremap, as this MDIO block is usually integrated into an + * Ethernet MAC controller register range + */ + md->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!md->base) { + dev_err(&pdev->dev, "failed to ioremap register\n"); + return -ENOMEM; + } + + md->mii_bus = devm_mdiobus_alloc(&pdev->dev); + if (!md->mii_bus) { + dev_err(&pdev->dev, "mdiomux bus alloc failed\n"); + return ENOMEM; + } + + bus = md->mii_bus; + bus->priv = md; + bus->name = "BCM6368 MDIO mux bus"; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id); + bus->parent = &pdev->dev; + bus->read = bcm6368_mdiomux_read; + bus->write = bcm6368_mdiomux_write; + bus->phy_mask = 0x3f; + bus->dev.of_node = pdev->dev.of_node; + + rc = mdiobus_register(bus); + if (rc) { + dev_err(&pdev->dev, "mdiomux registration failed\n"); + return rc; + } + + platform_set_drvdata(pdev, md); + + rc = mdio_mux_init(md->dev, md->dev->of_node, + bcm6368_mdiomux_switch_fn, &md->mux_handle, md, + md->mii_bus); + if (rc) { + dev_info(md->dev, "mdiomux initialization failed\n"); + goto out_register; + } + + dev_info(&pdev->dev, "Broadcom BCM6368 MDIO mux bus\n"); + + return 0; + +out_register: + mdiobus_unregister(bus); + return rc; +} + +static int bcm6368_mdiomux_remove(struct platform_device *pdev) +{ + struct bcm6368_mdiomux_desc *md = platform_get_drvdata(pdev); + + mdio_mux_uninit(md->mux_handle); + mdiobus_unregister(md->mii_bus); + + return 0; +} + +static const struct of_device_id bcm6368_mdiomux_ids[] = { + { .compatible = "brcm,bcm6368-mdio-mux", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, bcm6368_mdiomux_ids); + +static struct platform_driver bcm6368_mdiomux_driver = { + .driver = { + .name = "bcm6368-mdio-mux", + .of_match_table = bcm6368_mdiomux_ids, + }, + .probe = bcm6368_mdiomux_probe, + .remove = bcm6368_mdiomux_remove, +}; +module_platform_driver(bcm6368_mdiomux_driver); + +MODULE_AUTHOR("Álvaro Fernández Rojas <noltari@gmail.com>"); +MODULE_DESCRIPTION("BCM6368 mdiomux bus controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/mdio/mdio-mux-gpio.c b/drivers/net/mdio/mdio-mux-gpio.c index 10a758fdc9e6..3c7f16f06b45 100644 --- a/drivers/net/mdio/mdio-mux-gpio.c +++ b/drivers/net/mdio/mdio-mux-gpio.c @@ -3,13 +3,13 @@ * Copyright (C) 2011, 2012 Cavium, Inc. */ -#include <linux/platform_device.h> #include <linux/device.h> -#include <linux/of_mdio.h> +#include <linux/gpio/consumer.h> +#include <linux/mdio-mux.h> #include <linux/module.h> +#include <linux/of_mdio.h> #include <linux/phy.h> -#include <linux/mdio-mux.h> -#include <linux/gpio/consumer.h> +#include <linux/platform_device.h> #define DRV_VERSION "1.1" #define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver" diff --git a/drivers/net/mdio/mdio-mux-mmioreg.c b/drivers/net/mdio/mdio-mux-mmioreg.c index d1a8780e24d8..c02fb2a067ee 100644 --- a/drivers/net/mdio/mdio-mux-mmioreg.c +++ b/drivers/net/mdio/mdio-mux-mmioreg.c @@ -7,13 +7,13 @@ * Copyright 2012 Freescale Semiconductor, Inc. */ -#include <linux/platform_device.h> #include <linux/device.h> +#include <linux/mdio-mux.h> +#include <linux/module.h> #include <linux/of_address.h> #include <linux/of_mdio.h> -#include <linux/module.h> #include <linux/phy.h> -#include <linux/mdio-mux.h> +#include <linux/platform_device.h> struct mdio_mux_mmioreg_state { void *mux_handle; diff --git a/drivers/net/mdio/mdio-mux-multiplexer.c b/drivers/net/mdio/mdio-mux-multiplexer.c index d6564381aa3e..527acfc3c045 100644 --- a/drivers/net/mdio/mdio-mux-multiplexer.c +++ b/drivers/net/mdio/mdio-mux-multiplexer.c @@ -4,10 +4,10 @@ * Copyright 2019 NXP */ -#include <linux/platform_device.h> #include <linux/mdio-mux.h> #include <linux/module.h> #include <linux/mux/consumer.h> +#include <linux/platform_device.h> struct mdio_mux_multiplexer_state { struct mux_control *muxc; diff --git a/drivers/net/mdio/mdio-mux.c b/drivers/net/mdio/mdio-mux.c index 6a1d3540210b..110e4ee85785 100644 --- a/drivers/net/mdio/mdio-mux.c +++ b/drivers/net/mdio/mdio-mux.c @@ -3,12 +3,12 @@ * Copyright (C) 2011, 2012 Cavium, Inc. */ -#include <linux/platform_device.h> -#include <linux/mdio-mux.h> -#include <linux/of_mdio.h> #include <linux/device.h> +#include <linux/mdio-mux.h> #include <linux/module.h> +#include <linux/of_mdio.h> #include <linux/phy.h> +#include <linux/platform_device.h> #define DRV_DESCRIPTION "MDIO bus multiplexer driver" diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c index d1e1009d51af..8ce99c4888e1 100644 --- a/drivers/net/mdio/mdio-octeon.c +++ b/drivers/net/mdio/mdio-octeon.c @@ -3,13 +3,13 @@ * Copyright (C) 2009-2015 Cavium, Inc. */ -#include <linux/platform_device.h> +#include <linux/gfp.h> +#include <linux/io.h> +#include <linux/module.h> #include <linux/of_address.h> #include <linux/of_mdio.h> -#include <linux/module.h> -#include <linux/gfp.h> #include <linux/phy.h> -#include <linux/io.h> +#include <linux/platform_device.h> #include "mdio-cavium.h" diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c index 3d7eda99d34e..cb1761693b69 100644 --- a/drivers/net/mdio/mdio-thunder.c +++ b/drivers/net/mdio/mdio-thunder.c @@ -3,14 +3,14 @@ * Copyright (C) 2009-2016 Cavium, Inc. */ -#include <linux/of_address.h> -#include <linux/of_mdio.h> -#include <linux/module.h> +#include <linux/acpi.h> #include <linux/gfp.h> -#include <linux/phy.h> #include <linux/io.h> -#include <linux/acpi.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_mdio.h> #include <linux/pci.h> +#include <linux/phy.h> #include "mdio-cavium.h" diff --git a/drivers/net/mdio/mdio-xgene.c b/drivers/net/mdio/mdio-xgene.c index 461207cdf5d6..7ab4e26db08c 100644 --- a/drivers/net/mdio/mdio-xgene.c +++ b/drivers/net/mdio/mdio-xgene.c @@ -13,11 +13,11 @@ #include <linux/io.h> #include <linux/mdio/mdio-xgene.h> #include <linux/module.h> -#include <linux/of_platform.h> -#include <linux/of_net.h> #include <linux/of_mdio.h> -#include <linux/prefetch.h> +#include <linux/of_net.h> +#include <linux/of_platform.h> #include <linux/phy.h> +#include <linux/prefetch.h> #include <net/ip.h> static bool xgene_mdio_status; diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c index ea9d5855fb52..094494a68ddf 100644 --- a/drivers/net/mdio/of_mdio.c +++ b/drivers/net/mdio/of_mdio.c @@ -8,17 +8,17 @@ * out of the OpenFirmware device tree and using it to populate an mii_bus. */ -#include <linux/kernel.h> #include <linux/device.h> -#include <linux/netdevice.h> #include <linux/err.h> -#include <linux/phy.h> -#include <linux/phy_fixed.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_net.h> -#include <linux/module.h> +#include <linux/phy.h> +#include <linux/phy_fixed.h> #define DEFAULT_GPIO_RESET_DELAY 10 /* in microseconds */ diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile index ade086eed955..a1cbfa44a1e1 100644 --- a/drivers/net/netdevsim/Makefile +++ b/drivers/net/netdevsim/Makefile @@ -13,3 +13,7 @@ endif ifneq ($(CONFIG_XFRM_OFFLOAD),) netdevsim-objs += ipsec.o endif + +ifneq ($(CONFIG_PSAMPLE),) +netdevsim-objs += psample.o +endif diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index dbeb29fa16e8..6189a4c0d39e 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -1032,10 +1032,14 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev, if (err) goto err_fib_destroy; - err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count); + err = nsim_dev_psample_init(nsim_dev); if (err) goto err_health_exit; + err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count); + if (err) + goto err_psample_exit; + nsim_dev->take_snapshot = debugfs_create_file("take_snapshot", 0200, nsim_dev->ddir, @@ -1043,6 +1047,8 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev, &nsim_dev_take_snapshot_fops); return 0; +err_psample_exit: + nsim_dev_psample_exit(nsim_dev); err_health_exit: nsim_dev_health_exit(nsim_dev); err_fib_destroy: @@ -1118,14 +1124,20 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev) if (err) goto err_health_exit; - err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count); + err = nsim_dev_psample_init(nsim_dev); if (err) goto err_bpf_dev_exit; + err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count); + if (err) + goto err_psample_exit; + devlink_params_publish(devlink); devlink_reload_enable(devlink); return 0; +err_psample_exit: + nsim_dev_psample_exit(nsim_dev); err_bpf_dev_exit: nsim_bpf_dev_exit(nsim_dev); err_health_exit: @@ -1158,6 +1170,7 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev) return; debugfs_remove(nsim_dev->take_snapshot); nsim_dev_port_del_all(nsim_dev); + nsim_dev_psample_exit(nsim_dev); nsim_dev_health_exit(nsim_dev); nsim_fib_destroy(devlink, nsim_dev->fib_data); nsim_dev_traps_exit(devlink); diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index 46fb414f7ca6..fda6f37e7055 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -14,6 +14,7 @@ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. */ +#include <linux/bitmap.h> #include <linux/in6.h> #include <linux/kernel.h> #include <linux/list.h> @@ -47,15 +48,18 @@ struct nsim_fib_data { struct nsim_fib_entry nexthops; struct rhashtable fib_rt_ht; struct list_head fib_rt_list; - struct mutex fib_lock; /* Protects hashtable and list */ + struct mutex fib_lock; /* Protects FIB HT and list */ struct notifier_block nexthop_nb; struct rhashtable nexthop_ht; struct devlink *devlink; struct work_struct fib_event_work; struct list_head fib_event_queue; spinlock_t fib_event_queue_lock; /* Protects fib event queue list */ + struct mutex nh_lock; /* Protects NH HT */ struct dentry *ddir; bool fail_route_offload; + bool fail_res_nexthop_group_replace; + bool fail_nexthop_bucket_replace; }; struct nsim_fib_rt_key { @@ -116,6 +120,7 @@ struct nsim_nexthop { struct rhash_head ht_node; u64 occ; u32 id; + bool is_resilient; }; static const struct rhashtable_params nsim_nexthop_ht_params = { @@ -869,10 +874,8 @@ err_rt_offload_failed_flag_set: return err; } -static int nsim_fib_event(struct nsim_fib_event *fib_event) +static void nsim_fib_event(struct nsim_fib_event *fib_event) { - int err = 0; - switch (fib_event->family) { case AF_INET: nsim_fib4_event(fib_event->data, &fib_event->fen_info, @@ -885,8 +888,6 @@ static int nsim_fib_event(struct nsim_fib_event *fib_event) nsim_fib6_event_fini(&fib_event->fib6_event); break; } - - return err; } static int nsim_fib4_prepare_event(struct fib_notifier_info *info, @@ -1118,6 +1119,10 @@ static struct nsim_nexthop *nsim_nexthop_create(struct nsim_fib_data *data, for (i = 0; i < info->nh_grp->num_nh; i++) occ += info->nh_grp->nh_entries[i].weight; break; + case NH_NOTIFIER_INFO_TYPE_RES_TABLE: + occ = info->nh_res_table->num_nh_buckets; + nexthop->is_resilient = true; + break; default: NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type"); kfree(nexthop); @@ -1160,6 +1165,21 @@ err_num_decrease: } +static void nsim_nexthop_hw_flags_set(struct net *net, + const struct nsim_nexthop *nexthop, + bool trap) +{ + int i; + + nexthop_set_hw_flags(net, nexthop->id, false, trap); + + if (!nexthop->is_resilient) + return; + + for (i = 0; i < nexthop->occ; i++) + nexthop_bucket_set_hw_flags(net, nexthop->id, i, false, trap); +} + static int nsim_nexthop_add(struct nsim_fib_data *data, struct nsim_nexthop *nexthop, struct netlink_ext_ack *extack) @@ -1178,7 +1198,7 @@ static int nsim_nexthop_add(struct nsim_fib_data *data, goto err_nexthop_dismiss; } - nexthop_set_hw_flags(net, nexthop->id, false, true); + nsim_nexthop_hw_flags_set(net, nexthop, true); return 0; @@ -1207,7 +1227,7 @@ static int nsim_nexthop_replace(struct nsim_fib_data *data, goto err_nexthop_dismiss; } - nexthop_set_hw_flags(net, nexthop->id, false, true); + nsim_nexthop_hw_flags_set(net, nexthop, true); nsim_nexthop_account(data, nexthop_old->occ, false, extack); nsim_nexthop_destroy(nexthop_old); @@ -1258,6 +1278,32 @@ static void nsim_nexthop_remove(struct nsim_fib_data *data, nsim_nexthop_destroy(nexthop); } +static int nsim_nexthop_res_table_pre_replace(struct nsim_fib_data *data, + struct nh_notifier_info *info) +{ + if (data->fail_res_nexthop_group_replace) { + NL_SET_ERR_MSG_MOD(info->extack, "Failed to replace a resilient nexthop group"); + return -EINVAL; + } + + return 0; +} + +static int nsim_nexthop_bucket_replace(struct nsim_fib_data *data, + struct nh_notifier_info *info) +{ + if (data->fail_nexthop_bucket_replace) { + NL_SET_ERR_MSG_MOD(info->extack, "Failed to replace nexthop bucket"); + return -EINVAL; + } + + nexthop_bucket_set_hw_flags(info->net, info->id, + info->nh_res_bucket->bucket_index, + false, true); + + return 0; +} + static int nsim_nexthop_event_nb(struct notifier_block *nb, unsigned long event, void *ptr) { @@ -1266,8 +1312,7 @@ static int nsim_nexthop_event_nb(struct notifier_block *nb, unsigned long event, struct nh_notifier_info *info = ptr; int err = 0; - ASSERT_RTNL(); - + mutex_lock(&data->nh_lock); switch (event) { case NEXTHOP_EVENT_REPLACE: err = nsim_nexthop_insert(data, info); @@ -1275,10 +1320,17 @@ static int nsim_nexthop_event_nb(struct notifier_block *nb, unsigned long event, case NEXTHOP_EVENT_DEL: nsim_nexthop_remove(data, info); break; + case NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE: + err = nsim_nexthop_res_table_pre_replace(data, info); + break; + case NEXTHOP_EVENT_BUCKET_REPLACE: + err = nsim_nexthop_bucket_replace(data, info); + break; default: break; } + mutex_unlock(&data->nh_lock); return notifier_from_errno(err); } @@ -1289,11 +1341,68 @@ static void nsim_nexthop_free(void *ptr, void *arg) struct net *net; net = devlink_net(data->devlink); - nexthop_set_hw_flags(net, nexthop->id, false, false); + nsim_nexthop_hw_flags_set(net, nexthop, false); nsim_nexthop_account(data, nexthop->occ, false, NULL); nsim_nexthop_destroy(nexthop); } +static ssize_t nsim_nexthop_bucket_activity_write(struct file *file, + const char __user *user_buf, + size_t size, loff_t *ppos) +{ + struct nsim_fib_data *data = file->private_data; + struct net *net = devlink_net(data->devlink); + struct nsim_nexthop *nexthop; + unsigned long *activity; + loff_t pos = *ppos; + u16 bucket_index; + char buf[128]; + int err = 0; + u32 nhid; + + if (pos != 0) + return -EINVAL; + if (size > sizeof(buf)) + return -EINVAL; + if (copy_from_user(buf, user_buf, size)) + return -EFAULT; + if (sscanf(buf, "%u %hu", &nhid, &bucket_index) != 2) + return -EINVAL; + + rtnl_lock(); + + nexthop = rhashtable_lookup_fast(&data->nexthop_ht, &nhid, + nsim_nexthop_ht_params); + if (!nexthop || !nexthop->is_resilient || + bucket_index >= nexthop->occ) { + err = -EINVAL; + goto out; + } + + activity = bitmap_zalloc(nexthop->occ, GFP_KERNEL); + if (!activity) { + err = -ENOMEM; + goto out; + } + + bitmap_set(activity, bucket_index, 1); + nexthop_res_grp_activity_update(net, nhid, nexthop->occ, activity); + bitmap_free(activity); + +out: + rtnl_unlock(); + + *ppos = size; + return err ?: size; +} + +static const struct file_operations nsim_nexthop_bucket_activity_fops = { + .open = simple_open, + .write = nsim_nexthop_bucket_activity_write, + .llseek = no_llseek, + .owner = THIS_MODULE, +}; + static u64 nsim_fib_ipv4_resource_occ_get(void *priv) { struct nsim_fib_data *data = priv; @@ -1383,6 +1492,17 @@ nsim_fib_debugfs_init(struct nsim_fib_data *data, struct nsim_dev *nsim_dev) data->fail_route_offload = false; debugfs_create_bool("fail_route_offload", 0600, data->ddir, &data->fail_route_offload); + + data->fail_res_nexthop_group_replace = false; + debugfs_create_bool("fail_res_nexthop_group_replace", 0600, data->ddir, + &data->fail_res_nexthop_group_replace); + + data->fail_nexthop_bucket_replace = false; + debugfs_create_bool("fail_nexthop_bucket_replace", 0600, data->ddir, + &data->fail_nexthop_bucket_replace); + + debugfs_create_file("nexthop_bucket_activity", 0200, data->ddir, + data, &nsim_nexthop_bucket_activity_fops); return 0; } @@ -1408,6 +1528,7 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink, if (err) goto err_data_free; + mutex_init(&data->nh_lock); err = rhashtable_init(&data->nexthop_ht, &nsim_nexthop_ht_params); if (err) goto err_debugfs_exit; @@ -1473,6 +1594,7 @@ err_rhashtable_nexthop_destroy: data); mutex_destroy(&data->fib_lock); err_debugfs_exit: + mutex_destroy(&data->nh_lock); nsim_fib_debugfs_exit(data); err_data_free: kfree(data); @@ -1501,6 +1623,7 @@ void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data) WARN_ON_ONCE(!list_empty(&data->fib_event_queue)); WARN_ON_ONCE(!list_empty(&data->fib_rt_list)); mutex_destroy(&data->fib_lock); + mutex_destroy(&data->nh_lock); nsim_fib_debugfs_exit(data); kfree(data); } diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c index 21e2974660e7..04aebdf85747 100644 --- a/drivers/net/netdevsim/health.c +++ b/drivers/net/netdevsim/health.c @@ -235,15 +235,10 @@ static ssize_t nsim_dev_health_break_write(struct file *file, char *break_msg; int err; - break_msg = kmalloc(count + 1, GFP_KERNEL); - if (!break_msg) - return -ENOMEM; + break_msg = memdup_user_nul(data, count); + if (IS_ERR(break_msg)) + return PTR_ERR(break_msg); - if (copy_from_user(break_msg, data, count)) { - err = -EFAULT; - goto out; - } - break_msg[count] = '\0'; if (break_msg[count - 1] == '\n') break_msg[count - 1] = '\0'; diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 48163c5f2ec9..d735c21def4b 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -180,6 +180,20 @@ struct nsim_dev_health { int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink); void nsim_dev_health_exit(struct nsim_dev *nsim_dev); +#if IS_ENABLED(CONFIG_PSAMPLE) +int nsim_dev_psample_init(struct nsim_dev *nsim_dev); +void nsim_dev_psample_exit(struct nsim_dev *nsim_dev); +#else +static inline int nsim_dev_psample_init(struct nsim_dev *nsim_dev) +{ + return 0; +} + +static inline void nsim_dev_psample_exit(struct nsim_dev *nsim_dev) +{ +} +#endif + struct nsim_dev_port { struct list_head list; struct devlink_port devlink_port; @@ -229,6 +243,7 @@ struct nsim_dev { bool static_iana_vxlan; u32 sleep; } udp_ports; + struct nsim_dev_psample *psample; }; static inline struct net *nsim_dev_net(struct nsim_dev *nsim_dev) diff --git a/drivers/net/netdevsim/psample.c b/drivers/net/netdevsim/psample.c new file mode 100644 index 000000000000..5ec3bd7f891b --- /dev/null +++ b/drivers/net/netdevsim/psample.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Mellanox Technologies. All rights reserved */ + +#include <linux/debugfs.h> +#include <linux/err.h> +#include <linux/etherdevice.h> +#include <linux/inet.h> +#include <linux/kernel.h> +#include <linux/random.h> +#include <linux/slab.h> +#include <net/devlink.h> +#include <net/ip.h> +#include <net/psample.h> +#include <uapi/linux/ip.h> +#include <uapi/linux/udp.h> + +#include "netdevsim.h" + +#define NSIM_PSAMPLE_REPORT_INTERVAL_MS 100 +#define NSIM_PSAMPLE_INVALID_TC 0xFFFF +#define NSIM_PSAMPLE_L4_DATA_LEN 100 + +struct nsim_dev_psample { + struct delayed_work psample_dw; + struct dentry *ddir; + struct psample_group *group; + u32 rate; + u32 group_num; + u32 trunc_size; + int in_ifindex; + int out_ifindex; + u16 out_tc; + u64 out_tc_occ_max; + u64 latency_max; + bool is_active; +}; + +static struct sk_buff *nsim_dev_psample_skb_build(void) +{ + int tot_len, data_len = NSIM_PSAMPLE_L4_DATA_LEN; + struct sk_buff *skb; + struct udphdr *udph; + struct ethhdr *eth; + struct iphdr *iph; + + skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); + if (!skb) + return NULL; + tot_len = sizeof(struct iphdr) + sizeof(struct udphdr) + data_len; + + skb_reset_mac_header(skb); + eth = skb_put(skb, sizeof(struct ethhdr)); + eth_random_addr(eth->h_dest); + eth_random_addr(eth->h_source); + eth->h_proto = htons(ETH_P_IP); + skb->protocol = htons(ETH_P_IP); + + skb_set_network_header(skb, skb->len); + iph = skb_put(skb, sizeof(struct iphdr)); + iph->protocol = IPPROTO_UDP; + iph->saddr = in_aton("192.0.2.1"); + iph->daddr = in_aton("198.51.100.1"); + iph->version = 0x4; + iph->frag_off = 0; + iph->ihl = 0x5; + iph->tot_len = htons(tot_len); + iph->id = 0; + iph->ttl = 100; + iph->check = 0; + iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); + + skb_set_transport_header(skb, skb->len); + udph = skb_put_zero(skb, sizeof(struct udphdr) + data_len); + get_random_bytes(&udph->source, sizeof(u16)); + get_random_bytes(&udph->dest, sizeof(u16)); + udph->len = htons(sizeof(struct udphdr) + data_len); + + return skb; +} + +static void nsim_dev_psample_md_prepare(const struct nsim_dev_psample *psample, + struct psample_metadata *md) +{ + md->trunc_size = psample->trunc_size; + md->in_ifindex = psample->in_ifindex; + md->out_ifindex = psample->out_ifindex; + + if (psample->out_tc != NSIM_PSAMPLE_INVALID_TC) { + md->out_tc = psample->out_tc; + md->out_tc_valid = 1; + } + + if (psample->out_tc_occ_max) { + u64 out_tc_occ; + + get_random_bytes(&out_tc_occ, sizeof(u64)); + md->out_tc_occ = out_tc_occ & (psample->out_tc_occ_max - 1); + md->out_tc_occ_valid = 1; + } + + if (psample->latency_max) { + u64 latency; + + get_random_bytes(&latency, sizeof(u64)); + md->latency = latency & (psample->latency_max - 1); + md->latency_valid = 1; + } +} + +static void nsim_dev_psample_report_work(struct work_struct *work) +{ + struct nsim_dev_psample *psample; + struct psample_metadata md = {}; + struct sk_buff *skb; + unsigned long delay; + + psample = container_of(work, struct nsim_dev_psample, psample_dw.work); + + skb = nsim_dev_psample_skb_build(); + if (!skb) + goto out; + + nsim_dev_psample_md_prepare(psample, &md); + psample_sample_packet(psample->group, skb, psample->rate, &md); + consume_skb(skb); + +out: + delay = msecs_to_jiffies(NSIM_PSAMPLE_REPORT_INTERVAL_MS); + schedule_delayed_work(&psample->psample_dw, delay); +} + +static int nsim_dev_psample_enable(struct nsim_dev *nsim_dev) +{ + struct nsim_dev_psample *psample = nsim_dev->psample; + struct devlink *devlink; + unsigned long delay; + + if (psample->is_active) + return -EBUSY; + + devlink = priv_to_devlink(nsim_dev); + psample->group = psample_group_get(devlink_net(devlink), + psample->group_num); + if (!psample->group) + return -EINVAL; + + delay = msecs_to_jiffies(NSIM_PSAMPLE_REPORT_INTERVAL_MS); + schedule_delayed_work(&psample->psample_dw, delay); + + psample->is_active = true; + + return 0; +} + +static int nsim_dev_psample_disable(struct nsim_dev *nsim_dev) +{ + struct nsim_dev_psample *psample = nsim_dev->psample; + + if (!psample->is_active) + return -EINVAL; + + psample->is_active = false; + + cancel_delayed_work_sync(&psample->psample_dw); + psample_group_put(psample->group); + + return 0; +} + +static ssize_t nsim_dev_psample_enable_write(struct file *file, + const char __user *data, + size_t count, loff_t *ppos) +{ + struct nsim_dev *nsim_dev = file->private_data; + bool enable; + int err; + + err = kstrtobool_from_user(data, count, &enable); + if (err) + return err; + + if (enable) + err = nsim_dev_psample_enable(nsim_dev); + else + err = nsim_dev_psample_disable(nsim_dev); + + return err ? err : count; +} + +static const struct file_operations nsim_psample_enable_fops = { + .open = simple_open, + .write = nsim_dev_psample_enable_write, + .llseek = generic_file_llseek, + .owner = THIS_MODULE, +}; + +int nsim_dev_psample_init(struct nsim_dev *nsim_dev) +{ + struct nsim_dev_psample *psample; + int err; + + psample = kzalloc(sizeof(*psample), GFP_KERNEL); + if (!psample) + return -ENOMEM; + nsim_dev->psample = psample; + + INIT_DELAYED_WORK(&psample->psample_dw, nsim_dev_psample_report_work); + + psample->ddir = debugfs_create_dir("psample", nsim_dev->ddir); + if (IS_ERR(psample->ddir)) { + err = PTR_ERR(psample->ddir); + goto err_psample_free; + } + + /* Populate sampling parameters with sane defaults. */ + psample->rate = 100; + debugfs_create_u32("rate", 0600, psample->ddir, &psample->rate); + + psample->group_num = 10; + debugfs_create_u32("group_num", 0600, psample->ddir, + &psample->group_num); + + psample->trunc_size = 0; + debugfs_create_u32("trunc_size", 0600, psample->ddir, + &psample->trunc_size); + + psample->in_ifindex = 1; + debugfs_create_u32("in_ifindex", 0600, psample->ddir, + &psample->in_ifindex); + + psample->out_ifindex = 2; + debugfs_create_u32("out_ifindex", 0600, psample->ddir, + &psample->out_ifindex); + + psample->out_tc = 0; + debugfs_create_u16("out_tc", 0600, psample->ddir, &psample->out_tc); + + psample->out_tc_occ_max = 10000; + debugfs_create_u64("out_tc_occ_max", 0600, psample->ddir, + &psample->out_tc_occ_max); + + psample->latency_max = 50; + debugfs_create_u64("latency_max", 0600, psample->ddir, + &psample->latency_max); + + debugfs_create_file("enable", 0200, psample->ddir, nsim_dev, + &nsim_psample_enable_fops); + + return 0; + +err_psample_free: + kfree(nsim_dev->psample); + return err; +} + +void nsim_dev_psample_exit(struct nsim_dev *nsim_dev) +{ + debugfs_remove_recursive(nsim_dev->psample->ddir); + if (nsim_dev->psample->is_active) { + cancel_delayed_work_sync(&nsim_dev->psample->psample_dw); + psample_group_put(nsim_dev->psample->group); + } + kfree(nsim_dev->psample); +} diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c index 1aa9903d602e..944ba105cac1 100644 --- a/drivers/net/pcs/pcs-xpcs.c +++ b/drivers/net/pcs/pcs-xpcs.c @@ -15,6 +15,7 @@ #define SYNOPSYS_XPCS_USXGMII_ID 0x7996ced0 #define SYNOPSYS_XPCS_10GKR_ID 0x7996ced0 #define SYNOPSYS_XPCS_XLGMII_ID 0x7996ced0 +#define SYNOPSYS_XPCS_SGMII_ID 0x7996ced0 #define SYNOPSYS_XPCS_MASK 0xffffffff /* Vendor regs access */ @@ -57,6 +58,34 @@ #define DW_C73_2500KX BIT(0) #define DW_C73_5000KR BIT(1) +/* Clause 37 Defines */ +/* VR MII MMD registers offsets */ +#define DW_VR_MII_DIG_CTRL1 0x8000 +#define DW_VR_MII_AN_CTRL 0x8001 +#define DW_VR_MII_AN_INTR_STS 0x8002 + +/* VR_MII_DIG_CTRL1 */ +#define DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW BIT(9) + +/* VR_MII_AN_CTRL */ +#define DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT 3 +#define DW_VR_MII_TX_CONFIG_MASK BIT(3) +#define DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII 0x1 +#define DW_VR_MII_TX_CONFIG_MAC_SIDE_SGMII 0x0 +#define DW_VR_MII_AN_CTRL_PCS_MODE_SHIFT 1 +#define DW_VR_MII_PCS_MODE_MASK GENMASK(2, 1) +#define DW_VR_MII_PCS_MODE_C37_1000BASEX 0x0 +#define DW_VR_MII_PCS_MODE_C37_SGMII 0x2 + +/* VR_MII_AN_INTR_STS */ +#define DW_VR_MII_AN_STS_C37_ANSGM_FD BIT(1) +#define DW_VR_MII_AN_STS_C37_ANSGM_SP_SHIFT 2 +#define DW_VR_MII_AN_STS_C37_ANSGM_SP GENMASK(3, 2) +#define DW_VR_MII_C37_ANSGM_SP_10 0x0 +#define DW_VR_MII_C37_ANSGM_SP_100 0x1 +#define DW_VR_MII_C37_ANSGM_SP_1000 0x2 +#define DW_VR_MII_C37_ANSGM_SP_LNKSTS BIT(4) + static const int xpcs_usxgmii_features[] = { ETHTOOL_LINK_MODE_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT, @@ -105,6 +134,16 @@ static const int xpcs_xlgmii_features[] = { __ETHTOOL_LINK_MODE_MASK_NBITS, }; +static const int xpcs_sgmii_features[] = { + ETHTOOL_LINK_MODE_10baseT_Half_BIT, + ETHTOOL_LINK_MODE_10baseT_Full_BIT, + ETHTOOL_LINK_MODE_100baseT_Half_BIT, + ETHTOOL_LINK_MODE_100baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + __ETHTOOL_LINK_MODE_MASK_NBITS, +}; + static const phy_interface_t xpcs_usxgmii_interfaces[] = { PHY_INTERFACE_MODE_USXGMII, PHY_INTERFACE_MODE_MAX, @@ -120,27 +159,42 @@ static const phy_interface_t xpcs_xlgmii_interfaces[] = { PHY_INTERFACE_MODE_MAX, }; +static const phy_interface_t xpcs_sgmii_interfaces[] = { + PHY_INTERFACE_MODE_SGMII, + PHY_INTERFACE_MODE_MAX, +}; + static struct xpcs_id { u32 id; u32 mask; const int *supported; const phy_interface_t *interface; + int an_mode; } xpcs_id_list[] = { { .id = SYNOPSYS_XPCS_USXGMII_ID, .mask = SYNOPSYS_XPCS_MASK, .supported = xpcs_usxgmii_features, .interface = xpcs_usxgmii_interfaces, + .an_mode = DW_AN_C73, }, { .id = SYNOPSYS_XPCS_10GKR_ID, .mask = SYNOPSYS_XPCS_MASK, .supported = xpcs_10gkr_features, .interface = xpcs_10gkr_interfaces, + .an_mode = DW_AN_C73, }, { .id = SYNOPSYS_XPCS_XLGMII_ID, .mask = SYNOPSYS_XPCS_MASK, .supported = xpcs_xlgmii_features, .interface = xpcs_xlgmii_interfaces, + .an_mode = DW_AN_C73, + }, { + .id = SYNOPSYS_XPCS_SGMII_ID, + .mask = SYNOPSYS_XPCS_MASK, + .supported = xpcs_sgmii_features, + .interface = xpcs_sgmii_interfaces, + .an_mode = DW_AN_C37_SGMII, }, }; @@ -195,9 +249,20 @@ static int xpcs_poll_reset(struct mdio_xpcs_args *xpcs, int dev) return (ret & MDIO_CTRL1_RESET) ? -ETIMEDOUT : 0; } -static int xpcs_soft_reset(struct mdio_xpcs_args *xpcs, int dev) +static int xpcs_soft_reset(struct mdio_xpcs_args *xpcs) { - int ret; + int ret, dev; + + switch (xpcs->an_mode) { + case DW_AN_C73: + dev = MDIO_MMD_PCS; + break; + case DW_AN_C37_SGMII: + dev = MDIO_MMD_VEND2; + break; + default: + return -1; + } ret = xpcs_write(xpcs, dev, MDIO_CTRL1, MDIO_CTRL1_RESET); if (ret < 0) @@ -212,8 +277,8 @@ static int xpcs_soft_reset(struct mdio_xpcs_args *xpcs, int dev) dev_warn(&(__xpcs)->bus->dev, ##__args); \ }) -static int xpcs_read_fault(struct mdio_xpcs_args *xpcs, - struct phylink_link_state *state) +static int xpcs_read_fault_c73(struct mdio_xpcs_args *xpcs, + struct phylink_link_state *state) { int ret; @@ -263,7 +328,7 @@ static int xpcs_read_fault(struct mdio_xpcs_args *xpcs, return 0; } -static int xpcs_read_link(struct mdio_xpcs_args *xpcs, bool an) +static int xpcs_read_link_c73(struct mdio_xpcs_args *xpcs, bool an) { bool link = true; int ret; @@ -357,7 +422,7 @@ static int xpcs_config_usxgmii(struct mdio_xpcs_args *xpcs, int speed) return xpcs_write_vpcs(xpcs, MDIO_CTRL1, ret | DW_USXGMII_RST); } -static int xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs) +static int _xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs) { int ret, adv; @@ -401,11 +466,11 @@ static int xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs) return xpcs_write(xpcs, MDIO_MMD_AN, DW_SR_AN_ADV1, adv); } -static int xpcs_config_aneg(struct mdio_xpcs_args *xpcs) +static int xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs) { int ret; - ret = xpcs_config_aneg_c73(xpcs); + ret = _xpcs_config_aneg_c73(xpcs); if (ret < 0) return ret; @@ -418,8 +483,8 @@ static int xpcs_config_aneg(struct mdio_xpcs_args *xpcs) return xpcs_write(xpcs, MDIO_MMD_AN, MDIO_CTRL1, ret); } -static int xpcs_aneg_done(struct mdio_xpcs_args *xpcs, - struct phylink_link_state *state) +static int xpcs_aneg_done_c73(struct mdio_xpcs_args *xpcs, + struct phylink_link_state *state) { int ret; @@ -434,7 +499,7 @@ static int xpcs_aneg_done(struct mdio_xpcs_args *xpcs, /* Check if Aneg outcome is valid */ if (!(ret & DW_C73_AN_ADV_SF)) { - xpcs_config_aneg(xpcs); + xpcs_config_aneg_c73(xpcs); return 0; } @@ -444,8 +509,8 @@ static int xpcs_aneg_done(struct mdio_xpcs_args *xpcs, return 0; } -static int xpcs_read_lpa(struct mdio_xpcs_args *xpcs, - struct phylink_link_state *state) +static int xpcs_read_lpa_c73(struct mdio_xpcs_args *xpcs, + struct phylink_link_state *state) { int ret; @@ -493,8 +558,8 @@ static int xpcs_read_lpa(struct mdio_xpcs_args *xpcs, return 0; } -static void xpcs_resolve_lpa(struct mdio_xpcs_args *xpcs, - struct phylink_link_state *state) +static void xpcs_resolve_lpa_c73(struct mdio_xpcs_args *xpcs, + struct phylink_link_state *state) { int max_speed = xpcs_get_max_usxgmii_speed(state->lp_advertising); @@ -585,32 +650,84 @@ static int xpcs_validate(struct mdio_xpcs_args *xpcs, return 0; } +static int xpcs_config_aneg_c37_sgmii(struct mdio_xpcs_args *xpcs) +{ + int ret; + + /* For AN for C37 SGMII mode, the settings are :- + * 1) VR_MII_AN_CTRL Bit(2:1)[PCS_MODE] = 10b (SGMII AN) + * 2) VR_MII_AN_CTRL Bit(3) [TX_CONFIG] = 0b (MAC side SGMII) + * DW xPCS used with DW EQoS MAC is always MAC side SGMII. + * 3) VR_MII_DIG_CTRL1 Bit(9) [MAC_AUTO_SW] = 1b (Automatic + * speed/duplex mode change by HW after SGMII AN complete) + * + * Note: Since it is MAC side SGMII, there is no need to set + * SR_MII_AN_ADV. MAC side SGMII receives AN Tx Config from + * PHY about the link state change after C28 AN is completed + * between PHY and Link Partner. There is also no need to + * trigger AN restart for MAC-side SGMII. + */ + ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL); + if (ret < 0) + return ret; + + ret &= ~(DW_VR_MII_PCS_MODE_MASK | DW_VR_MII_TX_CONFIG_MASK); + ret |= (DW_VR_MII_PCS_MODE_C37_SGMII << + DW_VR_MII_AN_CTRL_PCS_MODE_SHIFT & + DW_VR_MII_PCS_MODE_MASK); + ret |= (DW_VR_MII_TX_CONFIG_MAC_SIDE_SGMII << + DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT & + DW_VR_MII_TX_CONFIG_MASK); + ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL, ret); + if (ret < 0) + return ret; + + ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1); + if (ret < 0) + return ret; + + ret |= DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW; + + return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret); +} + static int xpcs_config(struct mdio_xpcs_args *xpcs, const struct phylink_link_state *state) { int ret; - if (state->an_enabled) { - ret = xpcs_config_aneg(xpcs); + switch (xpcs->an_mode) { + case DW_AN_C73: + if (state->an_enabled) { + ret = xpcs_config_aneg_c73(xpcs); + if (ret) + return ret; + } + break; + case DW_AN_C37_SGMII: + ret = xpcs_config_aneg_c37_sgmii(xpcs); if (ret) return ret; + break; + default: + return -1; } return 0; } -static int xpcs_get_state(struct mdio_xpcs_args *xpcs, - struct phylink_link_state *state) +static int xpcs_get_state_c73(struct mdio_xpcs_args *xpcs, + struct phylink_link_state *state) { int ret; /* Link needs to be read first ... */ - state->link = xpcs_read_link(xpcs, state->an_enabled) > 0 ? 1 : 0; + state->link = xpcs_read_link_c73(xpcs, state->an_enabled) > 0 ? 1 : 0; /* ... and then we check the faults. */ - ret = xpcs_read_fault(xpcs, state); + ret = xpcs_read_fault_c73(xpcs, state); if (ret) { - ret = xpcs_soft_reset(xpcs, MDIO_MMD_PCS); + ret = xpcs_soft_reset(xpcs); if (ret) return ret; @@ -619,10 +736,10 @@ static int xpcs_get_state(struct mdio_xpcs_args *xpcs, return xpcs_config(xpcs, state); } - if (state->an_enabled && xpcs_aneg_done(xpcs, state)) { + if (state->an_enabled && xpcs_aneg_done_c73(xpcs, state)) { state->an_complete = true; - xpcs_read_lpa(xpcs, state); - xpcs_resolve_lpa(xpcs, state); + xpcs_read_lpa_c73(xpcs, state); + xpcs_resolve_lpa_c73(xpcs, state); } else if (state->an_enabled) { state->link = 0; } else if (state->link) { @@ -632,6 +749,70 @@ static int xpcs_get_state(struct mdio_xpcs_args *xpcs, return 0; } +static int xpcs_get_state_c37_sgmii(struct mdio_xpcs_args *xpcs, + struct phylink_link_state *state) +{ + int ret; + + /* Reset link_state */ + state->link = false; + state->speed = SPEED_UNKNOWN; + state->duplex = DUPLEX_UNKNOWN; + state->pause = 0; + + /* For C37 SGMII mode, we check DW_VR_MII_AN_INTR_STS for link + * status, speed and duplex. + */ + ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS); + if (ret < 0) + return false; + + if (ret & DW_VR_MII_C37_ANSGM_SP_LNKSTS) { + int speed_value; + + state->link = true; + + speed_value = (ret & DW_VR_MII_AN_STS_C37_ANSGM_SP) >> + DW_VR_MII_AN_STS_C37_ANSGM_SP_SHIFT; + if (speed_value == DW_VR_MII_C37_ANSGM_SP_1000) + state->speed = SPEED_1000; + else if (speed_value == DW_VR_MII_C37_ANSGM_SP_100) + state->speed = SPEED_100; + else + state->speed = SPEED_10; + + if (ret & DW_VR_MII_AN_STS_C37_ANSGM_FD) + state->duplex = DUPLEX_FULL; + else + state->duplex = DUPLEX_HALF; + } + + return 0; +} + +static int xpcs_get_state(struct mdio_xpcs_args *xpcs, + struct phylink_link_state *state) +{ + int ret; + + switch (xpcs->an_mode) { + case DW_AN_C73: + ret = xpcs_get_state_c73(xpcs, state); + if (ret) + return ret; + break; + case DW_AN_C37_SGMII: + ret = xpcs_get_state_c37_sgmii(xpcs, state); + if (ret) + return ret; + break; + default: + return -1; + } + + return 0; +} + static int xpcs_link_up(struct mdio_xpcs_args *xpcs, int speed, phy_interface_t interface) { @@ -646,6 +827,7 @@ static u32 xpcs_get_id(struct mdio_xpcs_args *xpcs) int ret; u32 id; + /* First, search C73 PCS using PCS MMD */ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MII_PHYSID1); if (ret < 0) return 0xffffffff; @@ -656,7 +838,26 @@ static u32 xpcs_get_id(struct mdio_xpcs_args *xpcs) if (ret < 0) return 0xffffffff; - return id | ret; + /* If Device IDs are not all zeros, we found C73 AN-type device */ + if (id | ret) + return id | ret; + + /* Next, search C37 PCS using Vendor-Specific MII MMD */ + ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_PHYSID1); + if (ret < 0) + return 0xffffffff; + + id = ret << 16; + + ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_PHYSID2); + if (ret < 0) + return 0xffffffff; + + /* If Device IDs are not all zeros, we found C37 AN-type device */ + if (id | ret) + return id | ret; + + return 0xffffffff; } static bool xpcs_check_features(struct mdio_xpcs_args *xpcs, @@ -676,6 +877,8 @@ static bool xpcs_check_features(struct mdio_xpcs_args *xpcs, for (i = 0; match->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++) set_bit(match->supported[i], xpcs->supported); + xpcs->an_mode = match->an_mode; + return true; } @@ -692,7 +895,7 @@ static int xpcs_probe(struct mdio_xpcs_args *xpcs, phy_interface_t interface) match = entry; if (xpcs_check_features(xpcs, match, interface)) - return xpcs_soft_reset(xpcs, MDIO_MMD_PCS); + return xpcs_soft_reset(xpcs); } } diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 698bea312adc..a615b3660b05 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -201,6 +201,12 @@ config MARVELL_10G_PHY help Support for the Marvell Alaska MV88X3310 and compatible PHYs. +config MARVELL_88X2222_PHY + tristate "Marvell 88X2222 PHY" + help + Support for the Marvell 88X2222 Dual-port Multi-speed Ethernet + Transceiver. + config MICREL_PHY tristate "Micrel PHYs" help diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index a13e402074cf..de683e3abe63 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -63,6 +63,7 @@ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o obj-$(CONFIG_LXT_PHY) += lxt.o obj-$(CONFIG_MARVELL_10G_PHY) += marvell10g.o obj-$(CONFIG_MARVELL_PHY) += marvell.o +obj-$(CONFIG_MARVELL_88X2222_PHY) += marvell-88x2222.o obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o obj-$(CONFIG_MICREL_PHY) += micrel.o diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index c2aa4c92edde..d7799beb811c 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -751,36 +751,6 @@ static void at803x_link_change_notify(struct phy_device *phydev) } } -static int at803x_aneg_done(struct phy_device *phydev) -{ - int ccr; - - int aneg_done = genphy_aneg_done(phydev); - if (aneg_done != BMSR_ANEGCOMPLETE) - return aneg_done; - - /* - * in SGMII mode, if copper side autoneg is successful, - * also check SGMII side autoneg result - */ - ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG); - if ((ccr & AT803X_MODE_CFG_MASK) != AT803X_MODE_CFG_SGMII) - return aneg_done; - - /* switch to SGMII/fiber page */ - phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL); - - /* check if the SGMII link is OK. */ - if (!(phy_read(phydev, AT803X_PSSR) & AT803X_PSSR_MR_AN_COMPLETE)) { - phydev_warn(phydev, "803x_aneg_done: SGMII link is not ok\n"); - aneg_done = 0; - } - /* switch back to copper page */ - phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL); - - return aneg_done; -} - static int at803x_read_status(struct phy_device *phydev) { int ss, err, old_link = phydev->link; @@ -1198,7 +1168,6 @@ static struct phy_driver at803x_driver[] = { .resume = at803x_resume, /* PHY_GBIT_FEATURES */ .read_status = at803x_read_status, - .aneg_done = at803x_aneg_done, .config_intr = &at803x_config_intr, .handle_interrupt = at803x_handle_interrupt, .get_tunable = at803x_get_tunable, diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index fa0be591ae79..82fe5f43f0e9 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -342,6 +342,10 @@ static int bcm54xx_config_init(struct phy_device *phydev) bcm54xx_adjust_rxrefclk(phydev); switch (BRCM_PHY_MODEL(phydev)) { + case PHY_ID_BCM50610: + case PHY_ID_BCM50610M: + err = bcm54xx_config_clock_delay(phydev); + break; case PHY_ID_BCM54210E: err = bcm54210e_config_init(phydev); break; @@ -399,6 +403,11 @@ static int bcm54xx_resume(struct phy_device *phydev) if (ret < 0) return ret; + /* Upon exiting power down, the PHY remains in an internal reset state + * for 40us + */ + fsleep(40); + return bcm54xx_config_init(phydev); } diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c new file mode 100644 index 000000000000..eca8c2f20684 --- /dev/null +++ b/drivers/net/phy/marvell-88x2222.c @@ -0,0 +1,519 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Marvell 88x2222 dual-port multi-speed ethernet transceiver. + * + * Supports: + * XAUI on the host side. + * 1000Base-X or 10GBase-R on the line side. + * SGMII over 1000Base-X. + */ +#include <linux/module.h> +#include <linux/phy.h> +#include <linux/gpio.h> +#include <linux/delay.h> +#include <linux/mdio.h> +#include <linux/marvell_phy.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/sfp.h> +#include <linux/netdevice.h> + +/* Port PCS Configuration */ +#define MV_PCS_CONFIG 0xF002 +#define MV_PCS_HOST_XAUI 0x73 +#define MV_PCS_LINE_10GBR (0x71 << 8) +#define MV_PCS_LINE_1GBX_AN (0x7B << 8) +#define MV_PCS_LINE_SGMII_AN (0x7F << 8) + +/* Port Reset and Power Down */ +#define MV_PORT_RST 0xF003 +#define MV_LINE_RST_SW BIT(15) +#define MV_HOST_RST_SW BIT(7) +#define MV_PORT_RST_SW (MV_LINE_RST_SW | MV_HOST_RST_SW) + +/* 1000Base-X/SGMII Control Register */ +#define MV_1GBX_CTRL (0x2000 + MII_BMCR) + +/* 1000BASE-X/SGMII Status Register */ +#define MV_1GBX_STAT (0x2000 + MII_BMSR) + +/* 1000Base-X Auto-Negotiation Advertisement Register */ +#define MV_1GBX_ADVERTISE (0x2000 + MII_ADVERTISE) + +/* 1000Base-X PHY Specific Status Register */ +#define MV_1GBX_PHY_STAT 0xA003 +#define MV_1GBX_PHY_STAT_AN_RESOLVED BIT(11) +#define MV_1GBX_PHY_STAT_DUPLEX BIT(13) +#define MV_1GBX_PHY_STAT_SPEED100 BIT(14) +#define MV_1GBX_PHY_STAT_SPEED1000 BIT(15) + +struct mv2222_data { + phy_interface_t line_interface; + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); +}; + +/* SFI PMA transmit enable */ +static int mv2222_tx_enable(struct phy_device *phydev) +{ + return phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_TXDIS, + MDIO_PMD_TXDIS_GLOBAL); +} + +/* SFI PMA transmit disable */ +static int mv2222_tx_disable(struct phy_device *phydev) +{ + return phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_TXDIS, + MDIO_PMD_TXDIS_GLOBAL); +} + +static int mv2222_soft_reset(struct phy_device *phydev) +{ + int val, ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_PORT_RST, + MV_PORT_RST_SW); + if (ret < 0) + return ret; + + return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND2, MV_PORT_RST, + val, !(val & MV_PORT_RST_SW), + 5000, 1000000, true); +} + +/* Returns negative on error, 0 if link is down, 1 if link is up */ +static int mv2222_read_status_10g(struct phy_device *phydev) +{ + int val, link = 0; + + val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1); + if (val < 0) + return val; + + if (val & MDIO_STAT1_LSTATUS) { + link = 1; + + /* 10GBASE-R do not support auto-negotiation */ + phydev->autoneg = AUTONEG_DISABLE; + phydev->speed = SPEED_10000; + phydev->duplex = DUPLEX_FULL; + } + + return link; +} + +/* Returns negative on error, 0 if link is down, 1 if link is up */ +static int mv2222_read_status_1g(struct phy_device *phydev) +{ + int val, link = 0; + + val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_STAT); + if (val < 0) + return val; + + if (!(val & BMSR_LSTATUS) || + (phydev->autoneg == AUTONEG_ENABLE && + !(val & BMSR_ANEGCOMPLETE))) + return 0; + + link = 1; + + val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_PHY_STAT); + if (val < 0) + return val; + + if (val & MV_1GBX_PHY_STAT_AN_RESOLVED) { + if (val & MV_1GBX_PHY_STAT_DUPLEX) + phydev->duplex = DUPLEX_FULL; + else + phydev->duplex = DUPLEX_HALF; + + if (val & MV_1GBX_PHY_STAT_SPEED1000) + phydev->speed = SPEED_1000; + else if (val & MV_1GBX_PHY_STAT_SPEED100) + phydev->speed = SPEED_100; + else + phydev->speed = SPEED_10; + } + + return link; +} + +static int mv2222_read_status(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + int link; + + phydev->link = 0; + phydev->speed = SPEED_UNKNOWN; + phydev->duplex = DUPLEX_UNKNOWN; + + if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER) + link = mv2222_read_status_10g(phydev); + else + link = mv2222_read_status_1g(phydev); + + if (link < 0) + return link; + + phydev->link = link; + + return 0; +} + +static int mv2222_disable_aneg(struct phy_device *phydev) +{ + int ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_CTRL, + BMCR_ANENABLE | BMCR_ANRESTART); + if (ret < 0) + return ret; + + return mv2222_soft_reset(phydev); +} + +static int mv2222_enable_aneg(struct phy_device *phydev) +{ + int ret = phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_CTRL, + BMCR_ANENABLE | BMCR_RESET); + if (ret < 0) + return ret; + + return mv2222_soft_reset(phydev); +} + +static int mv2222_set_sgmii_speed(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + + switch (phydev->speed) { + default: + case SPEED_1000: + if ((linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + priv->supported))) + return phy_modify_mmd(phydev, MDIO_MMD_PCS, + MV_1GBX_CTRL, + BMCR_SPEED1000 | BMCR_SPEED100, + BMCR_SPEED1000); + + fallthrough; + case SPEED_100: + if ((linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + priv->supported))) + return phy_modify_mmd(phydev, MDIO_MMD_PCS, + MV_1GBX_CTRL, + BMCR_SPEED1000 | BMCR_SPEED100, + BMCR_SPEED100); + fallthrough; + case SPEED_10: + if ((linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + priv->supported))) + return phy_modify_mmd(phydev, MDIO_MMD_PCS, + MV_1GBX_CTRL, + BMCR_SPEED1000 | BMCR_SPEED100, + BMCR_SPEED10); + + return -EINVAL; + } +} + +static bool mv2222_is_10g_capable(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + + return (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT, + priv->supported)); +} + +static bool mv2222_is_1gbx_capable(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + + return linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + priv->supported); +} + +static int mv2222_config_line(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + + switch (priv->line_interface) { + case PHY_INTERFACE_MODE_10GBASER: + return phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_PCS_CONFIG, + MV_PCS_HOST_XAUI | MV_PCS_LINE_10GBR); + case PHY_INTERFACE_MODE_1000BASEX: + return phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_PCS_CONFIG, + MV_PCS_HOST_XAUI | MV_PCS_LINE_1GBX_AN); + case PHY_INTERFACE_MODE_SGMII: + return phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_PCS_CONFIG, + MV_PCS_HOST_XAUI | MV_PCS_LINE_SGMII_AN); + default: + return -EINVAL; + } +} + +static int mv2222_setup_forced(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + bool changed = false; + int ret; + + switch (priv->line_interface) { + case PHY_INTERFACE_MODE_10GBASER: + if (phydev->speed == SPEED_1000 && + mv2222_is_1gbx_capable(phydev)) { + priv->line_interface = PHY_INTERFACE_MODE_1000BASEX; + changed = true; + } + + break; + case PHY_INTERFACE_MODE_1000BASEX: + if (phydev->speed == SPEED_10000 && + mv2222_is_10g_capable(phydev)) { + priv->line_interface = PHY_INTERFACE_MODE_10GBASER; + changed = true; + } + + break; + case PHY_INTERFACE_MODE_SGMII: + ret = mv2222_set_sgmii_speed(phydev); + if (ret < 0) + return ret; + + break; + default: + return -EINVAL; + } + + if (changed) { + ret = mv2222_config_line(phydev); + if (ret < 0) + return ret; + } + + return mv2222_disable_aneg(phydev); +} + +static int mv2222_config_aneg(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + int ret, adv; + + /* SFP is not present, do nothing */ + if (priv->line_interface == PHY_INTERFACE_MODE_NA) + return 0; + + if (phydev->autoneg == AUTONEG_DISABLE || + phydev->speed == SPEED_10000) + return mv2222_setup_forced(phydev); + + if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER && + mv2222_is_1gbx_capable(phydev)) { + priv->line_interface = PHY_INTERFACE_MODE_1000BASEX; + ret = mv2222_config_line(phydev); + if (ret < 0) + return ret; + } + + adv = linkmode_adv_to_mii_adv_x(priv->supported, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT); + + ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_ADVERTISE, + ADVERTISE_1000XFULL | + ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM, + adv); + if (ret < 0) + return ret; + + return mv2222_enable_aneg(phydev); +} + +static int mv2222_aneg_done(struct phy_device *phydev) +{ + int ret; + + if (mv2222_is_10g_capable(phydev)) { + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1); + if (ret < 0) + return ret; + + if (ret & MDIO_STAT1_LSTATUS) + return 1; + } + + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_STAT); + if (ret < 0) + return ret; + + return (ret & BMSR_ANEGCOMPLETE); +} + +static int mv2222_resume(struct phy_device *phydev) +{ + return mv2222_tx_enable(phydev); +} + +static int mv2222_suspend(struct phy_device *phydev) +{ + return mv2222_tx_disable(phydev); +} + +static int mv2222_get_features(struct phy_device *phydev) +{ + /* All supported linkmodes are set at probe */ + + return 0; +} + +static int mv2222_config_init(struct phy_device *phydev) +{ + if (phydev->interface != PHY_INTERFACE_MODE_XAUI) + return -EINVAL; + + phydev->autoneg = AUTONEG_DISABLE; + + return 0; +} + +static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id) +{ + struct phy_device *phydev = upstream; + phy_interface_t sfp_interface; + struct mv2222_data *priv; + struct device *dev; + int ret; + + __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_supported) = { 0, }; + + priv = (struct mv2222_data *)phydev->priv; + dev = &phydev->mdio.dev; + + sfp_parse_support(phydev->sfp_bus, id, sfp_supported); + sfp_interface = sfp_select_interface(phydev->sfp_bus, sfp_supported); + + dev_info(dev, "%s SFP module inserted\n", phy_modes(sfp_interface)); + + if (sfp_interface != PHY_INTERFACE_MODE_10GBASER && + sfp_interface != PHY_INTERFACE_MODE_1000BASEX && + sfp_interface != PHY_INTERFACE_MODE_SGMII) { + dev_err(dev, "Incompatible SFP module inserted\n"); + + return -EINVAL; + } + + priv->line_interface = sfp_interface; + linkmode_and(priv->supported, phydev->supported, sfp_supported); + + ret = mv2222_config_line(phydev); + if (ret < 0) + return ret; + + if (mutex_trylock(&phydev->lock)) { + if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER) + ret = mv2222_setup_forced(phydev); + else + ret = mv2222_config_aneg(phydev); + + mutex_unlock(&phydev->lock); + } + + return ret; +} + +static void mv2222_sfp_remove(void *upstream) +{ + struct phy_device *phydev = upstream; + struct mv2222_data *priv; + + priv = (struct mv2222_data *)phydev->priv; + + priv->line_interface = PHY_INTERFACE_MODE_NA; + linkmode_zero(priv->supported); +} + +static const struct sfp_upstream_ops sfp_phy_ops = { + .module_insert = mv2222_sfp_insert, + .module_remove = mv2222_sfp_remove, + .attach = phy_sfp_attach, + .detach = phy_sfp_detach, +}; + +static int mv2222_probe(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + struct mv2222_data *priv = NULL; + + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; + + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT, supported); + + linkmode_copy(phydev->supported, supported); + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->line_interface = PHY_INTERFACE_MODE_NA; + phydev->priv = priv; + + return phy_sfp_probe(phydev, &sfp_phy_ops); +} + +static struct phy_driver mv2222_drivers[] = { + { + .phy_id = MARVELL_PHY_ID_88X2222, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88X2222", + .get_features = mv2222_get_features, + .soft_reset = mv2222_soft_reset, + .config_init = mv2222_config_init, + .config_aneg = mv2222_config_aneg, + .aneg_done = mv2222_aneg_done, + .probe = mv2222_probe, + .suspend = mv2222_suspend, + .resume = mv2222_resume, + .read_status = mv2222_read_status, + }, +}; +module_phy_driver(mv2222_drivers); + +static struct mdio_device_id __maybe_unused mv2222_tbl[] = { + { MARVELL_PHY_ID_88X2222, MARVELL_PHY_ID_MASK }, + { } +}; +MODULE_DEVICE_TABLE(mdio, mv2222_tbl); + +MODULE_DESCRIPTION("Marvell 88x2222 ethernet transceiver driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index b1bb9b8e1e4e..74b64e52ffa2 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -781,6 +781,7 @@ static struct phy_driver mv3310_drivers[] = { .get_tunable = mv3310_get_tunable, .set_tunable = mv3310_set_tunable, .remove = mv3310_remove, + .set_loopback = genphy_c45_loopback, }, { .phy_id = MARVELL_PHY_ID_88E2110, @@ -796,6 +797,7 @@ static struct phy_driver mv3310_drivers[] = { .get_tunable = mv3310_get_tunable, .set_tunable = mv3310_set_tunable, .remove = mv3310_remove, + .set_loopback = genphy_c45_loopback, }, }; diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 3a7705228ed5..6e32da28e138 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -1362,6 +1362,12 @@ static int vsc8584_config_pre_init(struct phy_device *phydev) u16 crc, reg; int ret; + ret = vsc8584_pll5g_reset(phydev); + if (ret < 0) { + dev_err(dev, "failed LCPLL reset, ret: %d\n", ret); + return ret; + } + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); /* all writes below are broadcasted to all PHYs in the same package */ @@ -1466,6 +1472,24 @@ static int vsc8584_config_pre_init(struct phy_device *phydev) if (ret) goto out; + /* Write patch vector 0, to skip IB cal polling */ + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_GPIO); + reg = MSCC_ROM_TRAP_SERDES_6G_CFG; /* ROM address to trap, for patch vector 0 */ + ret = phy_base_write(phydev, MSCC_TRAP_ROM_ADDR(1), reg); + if (ret) + goto out; + + reg = MSCC_RAM_TRAP_SERDES_6G_CFG; /* RAM address to jump to, when patch vector 0 enabled */ + ret = phy_base_write(phydev, MSCC_PATCH_RAM_ADDR(1), reg); + if (ret) + goto out; + + reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL); + reg |= PATCH_VEC_ZERO_EN; /* bit 8, enable patch vector 0 */ + ret = phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg); + if (ret) + goto out; + vsc8584_micro_deassert_reset(phydev, true); out: @@ -1531,62 +1555,81 @@ static void vsc85xx_coma_mode_release(struct phy_device *phydev) vsc85xx_phy_write_page(phydev, MSCC_PHY_PAGE_STANDARD); } -static int vsc8584_config_init(struct phy_device *phydev) +static int vsc8584_config_host_serdes(struct phy_device *phydev) { struct vsc8531_private *vsc8531 = phydev->priv; - int ret, i; + int ret; u16 val; - phydev->mdix_ctrl = ETH_TP_MDI_AUTO; + ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_EXTENDED_GPIO); + if (ret) + return ret; - phy_lock_mdio_bus(phydev); + val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK); + val &= ~MAC_CFG_MASK; + if (phydev->interface == PHY_INTERFACE_MODE_QSGMII) { + val |= MAC_CFG_QSGMII; + } else if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { + val |= MAC_CFG_SGMII; + } else { + ret = -EINVAL; + return ret; + } - /* Some parts of the init sequence are identical for every PHY in the - * package. Some parts are modifying the GPIO register bank which is a - * set of registers that are affecting all PHYs, a few resetting the - * microprocessor common to all PHYs. The CRC check responsible of the - * checking the firmware within the 8051 microprocessor can only be - * accessed via the PHY whose internal address in the package is 0. - * All PHYs' interrupts mask register has to be zeroed before enabling - * any PHY's interrupt in this register. - * For all these reasons, we need to do the init sequence once and only - * once whatever is the first PHY in the package that is initialized and - * do the correct init sequence for all PHYs that are package-critical - * in this pre-init function. - */ - if (phy_package_init_once(phydev)) { - /* The following switch statement assumes that the lowest - * nibble of the phy_id_mask is always 0. This works because - * the lowest nibble of the PHY_ID's below are also 0. - */ - WARN_ON(phydev->drv->phy_id_mask & 0xf); + ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val); + if (ret) + return ret; - switch (phydev->phy_id & phydev->drv->phy_id_mask) { - case PHY_ID_VSC8504: - case PHY_ID_VSC8552: - case PHY_ID_VSC8572: - case PHY_ID_VSC8574: - ret = vsc8574_config_pre_init(phydev); - break; - case PHY_ID_VSC856X: - case PHY_ID_VSC8575: - case PHY_ID_VSC8582: - case PHY_ID_VSC8584: - ret = vsc8584_config_pre_init(phydev); - break; - default: - ret = -EINVAL; - break; - } + ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_STANDARD); + if (ret) + return ret; - if (ret) - goto err; - } + val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT | + PROC_CMD_READ_MOD_WRITE_PORT; + if (phydev->interface == PHY_INTERFACE_MODE_QSGMII) + val |= PROC_CMD_QSGMII_MAC; + else + val |= PROC_CMD_SGMII_MAC; + + ret = vsc8584_cmd(phydev, val); + if (ret) + return ret; + + usleep_range(10000, 20000); + + /* Disable SerDes for 100Base-FX */ + ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF | + PROC_CMD_FIBER_PORT(vsc8531->addr) | + PROC_CMD_FIBER_DISABLE | + PROC_CMD_READ_MOD_WRITE_PORT | + PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_100BASE_FX); + if (ret) + return ret; + + /* Disable SerDes for 1000Base-X */ + ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF | + PROC_CMD_FIBER_PORT(vsc8531->addr) | + PROC_CMD_FIBER_DISABLE | + PROC_CMD_READ_MOD_WRITE_PORT | + PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_1000BASE_X); + if (ret) + return ret; + + return vsc85xx_sd6g_config_v2(phydev); +} + +static int vsc8574_config_host_serdes(struct phy_device *phydev) +{ + struct vsc8531_private *vsc8531 = phydev->priv; + int ret; + u16 val; ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_GPIO); if (ret) - goto err; + return ret; val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK); val &= ~MAC_CFG_MASK; @@ -1598,17 +1641,17 @@ static int vsc8584_config_init(struct phy_device *phydev) val |= MAC_CFG_RGMII; } else { ret = -EINVAL; - goto err; + return ret; } ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val); if (ret) - goto err; + return ret; ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); if (ret) - goto err; + return ret; if (!phy_interface_is_rgmii(phydev)) { val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT | @@ -1620,7 +1663,7 @@ static int vsc8584_config_init(struct phy_device *phydev) ret = vsc8584_cmd(phydev, val); if (ret) - goto err; + return ret; usleep_range(10000, 20000); } @@ -1632,16 +1675,78 @@ static int vsc8584_config_init(struct phy_device *phydev) PROC_CMD_READ_MOD_WRITE_PORT | PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_100BASE_FX); if (ret) - goto err; + return ret; /* Disable SerDes for 1000Base-X */ - ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF | - PROC_CMD_FIBER_PORT(vsc8531->addr) | - PROC_CMD_FIBER_DISABLE | - PROC_CMD_READ_MOD_WRITE_PORT | - PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_1000BASE_X); - if (ret) - goto err; + return vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF | + PROC_CMD_FIBER_PORT(vsc8531->addr) | + PROC_CMD_FIBER_DISABLE | + PROC_CMD_READ_MOD_WRITE_PORT | + PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_1000BASE_X); +} + +static int vsc8584_config_init(struct phy_device *phydev) +{ + struct vsc8531_private *vsc8531 = phydev->priv; + int ret, i; + u16 val; + + phydev->mdix_ctrl = ETH_TP_MDI_AUTO; + + phy_lock_mdio_bus(phydev); + + /* Some parts of the init sequence are identical for every PHY in the + * package. Some parts are modifying the GPIO register bank which is a + * set of registers that are affecting all PHYs, a few resetting the + * microprocessor common to all PHYs. The CRC check responsible of the + * checking the firmware within the 8051 microprocessor can only be + * accessed via the PHY whose internal address in the package is 0. + * All PHYs' interrupts mask register has to be zeroed before enabling + * any PHY's interrupt in this register. + * For all these reasons, we need to do the init sequence once and only + * once whatever is the first PHY in the package that is initialized and + * do the correct init sequence for all PHYs that are package-critical + * in this pre-init function. + */ + if (phy_package_init_once(phydev)) { + /* The following switch statement assumes that the lowest + * nibble of the phy_id_mask is always 0. This works because + * the lowest nibble of the PHY_ID's below are also 0. + */ + WARN_ON(phydev->drv->phy_id_mask & 0xf); + + switch (phydev->phy_id & phydev->drv->phy_id_mask) { + case PHY_ID_VSC8504: + case PHY_ID_VSC8552: + case PHY_ID_VSC8572: + case PHY_ID_VSC8574: + ret = vsc8574_config_pre_init(phydev); + if (ret) + goto err; + ret = vsc8574_config_host_serdes(phydev); + if (ret) + goto err; + break; + case PHY_ID_VSC856X: + case PHY_ID_VSC8575: + case PHY_ID_VSC8582: + case PHY_ID_VSC8584: + ret = vsc8584_config_pre_init(phydev); + if (ret) + goto err; + ret = vsc8584_config_host_serdes(phydev); + if (ret) + goto err; + vsc85xx_coma_mode_release(phydev); + break; + default: + ret = -EINVAL; + break; + } + + if (ret) + goto err; + } phy_unlock_mdio_bus(phydev); diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index 077f2929c45e..91e3acb9e397 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -560,6 +560,14 @@ int gen10g_config_aneg(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(gen10g_config_aneg); +int genphy_c45_loopback(struct phy_device *phydev, bool enable) +{ + return phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, + MDIO_PCS_CTRL1_LOOPBACK, + enable ? MDIO_PCS_CTRL1_LOOPBACK : 0); +} +EXPORT_SYMBOL_GPL(genphy_c45_loopback); + struct phy_driver genphy_c45_driver = { .phy_id = 0xffffffff, .phy_id_mask = 0xffffffff, diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index cc38e326405a..a009d1769b08 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -512,10 +512,21 @@ phy_has_fixups_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(phy_has_fixups); +static ssize_t phy_dev_flags_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct phy_device *phydev = to_phy_device(dev); + + return sprintf(buf, "0x%08x\n", phydev->dev_flags); +} +static DEVICE_ATTR_RO(phy_dev_flags); + static struct attribute *phy_dev_attrs[] = { &dev_attr_phy_id.attr, &dev_attr_phy_interface.attr, &dev_attr_phy_has_fixups.attr, + &dev_attr_phy_dev_flags.attr, NULL, }; ATTRIBUTE_GROUPS(phy_dev); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 053c92e02cd8..96d8e88b4e46 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -271,8 +271,9 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode) pl->cfg_link_an_mode = MLO_AN_FIXED; fwnode_handle_put(dn); - if (fwnode_property_read_string(fwnode, "managed", &managed) == 0 && - strcmp(managed, "in-band-status") == 0) { + if ((fwnode_property_read_string(fwnode, "managed", &managed) == 0 && + strcmp(managed, "in-band-status") == 0) || + pl->config->ovr_an_inband) { if (pl->cfg_link_an_mode == MLO_AN_FIXED) { phylink_err(pl, "can't use both fixed-link and in-band-status\n"); @@ -476,7 +477,7 @@ static void phylink_major_config(struct phylink *pl, bool restart, err = pl->mac_ops->mac_finish(pl->config, pl->cur_link_an_mode, state->interface); if (err < 0) - phylink_err(pl, "mac_prepare failed: %pe\n", + phylink_err(pl, "mac_finish failed: %pe\n", ERR_PTR(err)); } } diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c index 4406b353123e..e26cf91bdec2 100644 --- a/drivers/net/plip/plip.c +++ b/drivers/net/plip/plip.c @@ -516,6 +516,7 @@ plip_receive(unsigned short nibble_timeout, struct net_device *dev, *data_p |= (c0 << 1) & 0xf0; write_data (dev, 0x00); /* send ACK */ *ns_p = PLIP_NB_BEGIN; + break; case PLIP_NB_2: break; } @@ -808,6 +809,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl, return HS_TIMEOUT; } } + break; case PLIP_PK_LENGTH_LSB: if (plip_send(nibble_timeout, dev, diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index d445ecb1d0c7..930e49ef15f6 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1560,12 +1560,34 @@ static void ppp_dev_priv_destructor(struct net_device *dev) ppp_destroy_interface(ppp); } +static int ppp_fill_forward_path(struct net_device_path_ctx *ctx, + struct net_device_path *path) +{ + struct ppp *ppp = netdev_priv(ctx->dev); + struct ppp_channel *chan; + struct channel *pch; + + if (ppp->flags & SC_MULTILINK) + return -EOPNOTSUPP; + + if (list_empty(&ppp->channels)) + return -ENODEV; + + pch = list_first_entry(&ppp->channels, struct channel, clist); + chan = pch->chan; + if (!chan->ops->fill_forward_path) + return -EOPNOTSUPP; + + return chan->ops->fill_forward_path(ctx, path, chan); +} + static const struct net_device_ops ppp_netdev_ops = { .ndo_init = ppp_dev_init, .ndo_uninit = ppp_dev_uninit, .ndo_start_xmit = ppp_start_xmit, .ndo_do_ioctl = ppp_net_ioctl, .ndo_get_stats64 = ppp_get_stats64, + .ndo_fill_forward_path = ppp_fill_forward_path, }; static struct device_type ppp_type = { diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index d7f50b835050..3619520340b7 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -25,7 +25,7 @@ * in pppoe_release. * 051000 : Initialization cleanup. * 111100 : Fix recvmsg. - * 050101 : Fix PADT procesing. + * 050101 : Fix PADT processing. * 140501 : Use pppoe_rcv_core to handle all backlog. (Alexey) * 170701 : Do not lock_sock with rwlock held. (DaveM) * Ignore discovery frames if user has socket @@ -96,7 +96,7 @@ struct pppoe_net { * we could use _single_ hash table for all * nets by injecting net id into the hash but * it would increase hash chains and add - * a few additional math comparations messy + * a few additional math comparisons messy * as well, moreover in case of SMP less locking * controversy here */ @@ -972,8 +972,31 @@ static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb) return __pppoe_xmit(sk, skb); } +static int pppoe_fill_forward_path(struct net_device_path_ctx *ctx, + struct net_device_path *path, + const struct ppp_channel *chan) +{ + struct sock *sk = (struct sock *)chan->private; + struct pppox_sock *po = pppox_sk(sk); + struct net_device *dev = po->pppoe_dev; + + if (sock_flag(sk, SOCK_DEAD) || + !(sk->sk_state & PPPOX_CONNECTED) || !dev) + return -1; + + path->type = DEV_PATH_PPPOE; + path->encap.proto = htons(ETH_P_PPP_SES); + path->encap.id = be16_to_cpu(po->num); + memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN); + path->dev = ctx->dev; + ctx->dev = dev; + + return 0; +} + static const struct ppp_channel_ops pppoe_chan_ops = { .start_xmit = pppoe_xmit, + .fill_forward_path = pppoe_fill_forward_path, }; static int pppoe_recvmsg(struct socket *sock, struct msghdr *m, diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 02e6bbb17b15..8d1f69dad603 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -387,6 +387,8 @@ static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *i err = register_netdev(dev); if (err) { + /* Set disconnected flag so that disconnect() returns early. */ + pnd->disconnected = 1; usb_driver_release_interface(&usbpn_driver, data_intf); goto out; } diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 8acf30115428..8ae565a801b5 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -920,7 +920,6 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ goto error2; } - usb_set_intfdata(ctx->data, dev); usb_set_intfdata(ctx->control, dev); if (ctx->ether_desc) { diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 90f1c0200042..20fb5638ac65 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -6553,7 +6553,10 @@ static int rtl_ops_init(struct r8152 *tp) ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153_hw_phy_cfg; ops->autosuspend_en = rtl8153_runtime_enable; - tp->rx_buf_sz = 32 * 1024; + if (tp->udev->speed < USB_SPEED_SUPER) + tp->rx_buf_sz = 16 * 1024; + else + tp->rx_buf_sz = 32 * 1024; tp->eee_en = true; tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; break; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 05abe360603b..91b73db37555 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -302,8 +302,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) if (rxq < rcv->real_num_rx_queues) { rq = &rcv_priv->rq[rxq]; rcv_xdp = rcu_access_pointer(rq->xdp_prog); - if (rcv_xdp) - skb_record_rx_queue(skb, rxq); + skb_record_rx_queue(skb, rxq); } skb_tx_timestamp(skb); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 254369a41540..bb4ea9dbc16b 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -195,6 +195,9 @@ struct virtnet_info { /* # of XDP queue pairs currently used by the driver */ u16 xdp_queue_pairs; + /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */ + bool xdp_enabled; + /* I like... big packets and I cannot lie! */ bool big_packets; @@ -481,12 +484,41 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, return 0; } -static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi) -{ - unsigned int qp; - - qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); - return &vi->sq[qp]; +/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on + * the current cpu, so it does not need to be locked. + * + * Here we use marco instead of inline functions because we have to deal with + * three issues at the same time: 1. the choice of sq. 2. judge and execute the + * lock/unlock of txq 3. make sparse happy. It is difficult for two inline + * functions to perfectly solve these three problems at the same time. + */ +#define virtnet_xdp_get_sq(vi) ({ \ + struct netdev_queue *txq; \ + typeof(vi) v = (vi); \ + unsigned int qp; \ + \ + if (v->curr_queue_pairs > nr_cpu_ids) { \ + qp = v->curr_queue_pairs - v->xdp_queue_pairs; \ + qp += smp_processor_id(); \ + txq = netdev_get_tx_queue(v->dev, qp); \ + __netif_tx_acquire(txq); \ + } else { \ + qp = smp_processor_id() % v->curr_queue_pairs; \ + txq = netdev_get_tx_queue(v->dev, qp); \ + __netif_tx_lock(txq, raw_smp_processor_id()); \ + } \ + v->sq + qp; \ +}) + +#define virtnet_xdp_put_sq(vi, q) { \ + struct netdev_queue *txq; \ + typeof(vi) v = (vi); \ + \ + txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \ + if (v->curr_queue_pairs > nr_cpu_ids) \ + __netif_tx_release(txq); \ + else \ + __netif_tx_unlock(txq); \ } static int virtnet_xdp_xmit(struct net_device *dev, @@ -512,7 +544,7 @@ static int virtnet_xdp_xmit(struct net_device *dev, if (!xdp_prog) return -ENXIO; - sq = virtnet_xdp_sq(vi); + sq = virtnet_xdp_get_sq(vi); if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { ret = -EINVAL; @@ -557,12 +589,13 @@ out: sq->stats.kicks += kicks; u64_stats_update_end(&sq->stats.syncp); + virtnet_xdp_put_sq(vi, sq); return ret; } static unsigned int virtnet_get_headroom(struct virtnet_info *vi) { - return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0; + return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0; } /* We copy the packet for XDP in the following cases: @@ -1459,12 +1492,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget) xdp_do_flush(); if (xdp_xmit & VIRTIO_XDP_TX) { - sq = virtnet_xdp_sq(vi); + sq = virtnet_xdp_get_sq(vi); if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { u64_stats_update_begin(&sq->stats.syncp); sq->stats.kicks++; u64_stats_update_end(&sq->stats.syncp); } + virtnet_xdp_put_sq(vi, sq); } return received; @@ -1982,7 +2016,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi) } virtqueue_set_affinity(vi->rq[i].vq, mask); virtqueue_set_affinity(vi->sq[i].vq, mask); - __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, false); + __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS); cpumask_clear(mask); } @@ -2105,25 +2139,21 @@ static int virtnet_set_channels(struct net_device *dev, static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data) { struct virtnet_info *vi = netdev_priv(dev); - char *p = (char *)data; unsigned int i, j; + u8 *p = data; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < vi->curr_queue_pairs; i++) { - for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { - snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s", - i, virtnet_rq_stats_desc[j].desc); - p += ETH_GSTRING_LEN; - } + for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) + ethtool_sprintf(&p, "rx_queue_%u_%s", i, + virtnet_rq_stats_desc[j].desc); } for (i = 0; i < vi->curr_queue_pairs; i++) { - for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { - snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_%s", - i, virtnet_sq_stats_desc[j].desc); - p += ETH_GSTRING_LEN; - } + for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) + ethtool_sprintf(&p, "tx_queue_%u_%s", i, + virtnet_sq_stats_desc[j].desc); } break; } @@ -2419,10 +2449,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, /* XDP requires extra queues for XDP_TX */ if (curr_qp + xdp_qp > vi->max_queue_pairs) { - NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available"); - netdev_warn(dev, "request %i queues but max is %i\n", + netdev_warn(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n", curr_qp + xdp_qp, vi->max_queue_pairs); - return -ENOMEM; + xdp_qp = 0; } old_prog = rtnl_dereference(vi->rq[0].xdp_prog); @@ -2456,11 +2485,14 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, vi->xdp_queue_pairs = xdp_qp; if (prog) { + vi->xdp_enabled = true; for (i = 0; i < vi->max_queue_pairs; i++) { rcu_assign_pointer(vi->rq[i].xdp_prog, prog); if (i == 0 && !old_prog) virtnet_clear_guest_offloads(vi); } + } else { + vi->xdp_enabled = false; } for (i = 0; i < vi->max_queue_pairs; i++) { @@ -2528,7 +2560,7 @@ static int virtnet_set_features(struct net_device *dev, int err; if ((dev->features ^ features) & NETIF_F_LRO) { - if (vi->xdp_queue_pairs) + if (vi->xdp_enabled) return -EBUSY; if (features & NETIF_F_LRO) diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 7ec8652f2c26..c0bd9cbc43b1 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -218,43 +218,28 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) static void vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) { - struct vmxnet3_adapter *adapter = netdev_priv(netdev); - if (stringset == ETH_SS_STATS) { - int i, j; - for (j = 0; j < adapter->num_tx_queues; j++) { - for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { - memcpy(buf, vmxnet3_tq_dev_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; - } - for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); - i++) { - memcpy(buf, vmxnet3_tq_driver_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; - } - } + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + int i, j; - for (j = 0; j < adapter->num_rx_queues; j++) { - for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { - memcpy(buf, vmxnet3_rq_dev_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; - } - for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); - i++) { - memcpy(buf, vmxnet3_rq_driver_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; - } - } + if (stringset != ETH_SS_STATS) + return; - for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { - memcpy(buf, vmxnet3_global_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; - } + for (j = 0; j < adapter->num_tx_queues; j++) { + for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) + ethtool_sprintf(&buf, vmxnet3_tq_dev_stats[i].desc); + for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) + ethtool_sprintf(&buf, vmxnet3_tq_driver_stats[i].desc); + } + + for (j = 0; j < adapter->num_rx_queues; j++) { + for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) + ethtool_sprintf(&buf, vmxnet3_rq_dev_stats[i].desc); + for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) + ethtool_sprintf(&buf, vmxnet3_rq_driver_stats[i].desc); } + + for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) + ethtool_sprintf(&buf, vmxnet3_global_stats[i].desc); } netdev_features_t vmxnet3_fix_features(struct net_device *netdev, diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 666dd201c3d5..7665817f3cb6 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -3703,6 +3703,7 @@ static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf, #if IS_ENABLED(CONFIG_IPV6) if (use_ipv6) { struct inet6_dev *idev = __in6_dev_get(lowerdev); + if (idev && idev->cnf.disable_ipv6) { NL_SET_ERR_MSG(extack, "IPv6 support disabled by administrator"); diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index 4aaa6388b9ee..5a6a945f6c81 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -23,6 +23,8 @@ struct x25_state { x25_hdlc_proto settings; + bool up; + spinlock_t up_lock; /* Protects "up" */ }; static int x25_ioctl(struct net_device *dev, struct ifreq *ifr); @@ -104,6 +106,8 @@ static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb) static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev) { + hdlc_device *hdlc = dev_to_hdlc(dev); + struct x25_state *x25st = state(hdlc); int result; /* There should be a pseudo header of 1 byte added by upper layers. @@ -114,11 +118,19 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } + spin_lock_bh(&x25st->up_lock); + if (!x25st->up) { + spin_unlock_bh(&x25st->up_lock); + kfree_skb(skb); + return NETDEV_TX_OK; + } + switch (skb->data[0]) { case X25_IFACE_DATA: /* Data to be transmitted */ skb_pull(skb, 1); if ((result = lapb_data_request(dev, skb)) != LAPB_OK) dev_kfree_skb(skb); + spin_unlock_bh(&x25st->up_lock); return NETDEV_TX_OK; case X25_IFACE_CONNECT: @@ -147,6 +159,7 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev) break; } + spin_unlock_bh(&x25st->up_lock); dev_kfree_skb(skb); return NETDEV_TX_OK; } @@ -164,6 +177,7 @@ static int x25_open(struct net_device *dev) .data_transmit = x25_data_transmit, }; hdlc_device *hdlc = dev_to_hdlc(dev); + struct x25_state *x25st = state(hdlc); struct lapb_parms_struct params; int result; @@ -190,6 +204,10 @@ static int x25_open(struct net_device *dev) if (result != LAPB_OK) return -EINVAL; + spin_lock_bh(&x25st->up_lock); + x25st->up = true; + spin_unlock_bh(&x25st->up_lock); + return 0; } @@ -197,6 +215,13 @@ static int x25_open(struct net_device *dev) static void x25_close(struct net_device *dev) { + hdlc_device *hdlc = dev_to_hdlc(dev); + struct x25_state *x25st = state(hdlc); + + spin_lock_bh(&x25st->up_lock); + x25st->up = false; + spin_unlock_bh(&x25st->up_lock); + lapb_unregister(dev); } @@ -205,15 +230,28 @@ static void x25_close(struct net_device *dev) static int x25_rx(struct sk_buff *skb) { struct net_device *dev = skb->dev; + hdlc_device *hdlc = dev_to_hdlc(dev); + struct x25_state *x25st = state(hdlc); if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { dev->stats.rx_dropped++; return NET_RX_DROP; } - if (lapb_data_received(dev, skb) == LAPB_OK) + spin_lock_bh(&x25st->up_lock); + if (!x25st->up) { + spin_unlock_bh(&x25st->up_lock); + kfree_skb(skb); + dev->stats.rx_dropped++; + return NET_RX_DROP; + } + + if (lapb_data_received(dev, skb) == LAPB_OK) { + spin_unlock_bh(&x25st->up_lock); return NET_RX_SUCCESS; + } + spin_unlock_bh(&x25st->up_lock); dev->stats.rx_errors++; dev_kfree_skb_any(skb); return NET_RX_DROP; @@ -298,6 +336,8 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr) return result; memcpy(&state(hdlc)->settings, &new_settings, size); + state(hdlc)->up = false; + spin_lock_init(&state(hdlc)->up_lock); /* There's no header_ops so hard_header_len should be 0. */ dev->hard_header_len = 0; diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index c3372498f4f1..45d74285265a 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -51,6 +51,8 @@ struct lapbethdev { struct list_head node; struct net_device *ethdev; /* link to ethernet device */ struct net_device *axdev; /* lapbeth device (lapb#) */ + bool up; + spinlock_t up_lock; /* Protects "up" */ }; static LIST_HEAD(lapbeth_devices); @@ -101,8 +103,9 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe rcu_read_lock(); lapbeth = lapbeth_get_x25_dev(dev); if (!lapbeth) - goto drop_unlock; - if (!netif_running(lapbeth->axdev)) + goto drop_unlock_rcu; + spin_lock_bh(&lapbeth->up_lock); + if (!lapbeth->up) goto drop_unlock; len = skb->data[0] + skb->data[1] * 256; @@ -117,11 +120,14 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe goto drop_unlock; } out: + spin_unlock_bh(&lapbeth->up_lock); rcu_read_unlock(); return 0; drop_unlock: kfree_skb(skb); goto out; +drop_unlock_rcu: + rcu_read_unlock(); drop: kfree_skb(skb); return 0; @@ -151,13 +157,11 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb) static netdev_tx_t lapbeth_xmit(struct sk_buff *skb, struct net_device *dev) { + struct lapbethdev *lapbeth = netdev_priv(dev); int err; - /* - * Just to be *really* sure not to send anything if the interface - * is down, the ethernet device may have gone. - */ - if (!netif_running(dev)) + spin_lock_bh(&lapbeth->up_lock); + if (!lapbeth->up) goto drop; /* There should be a pseudo header of 1 byte added by upper layers. @@ -194,6 +198,7 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb, goto drop; } out: + spin_unlock_bh(&lapbeth->up_lock); return NETDEV_TX_OK; drop: kfree_skb(skb); @@ -285,6 +290,7 @@ static const struct lapb_register_struct lapbeth_callbacks = { */ static int lapbeth_open(struct net_device *dev) { + struct lapbethdev *lapbeth = netdev_priv(dev); int err; if ((err = lapb_register(dev, &lapbeth_callbacks)) != LAPB_OK) { @@ -292,13 +298,22 @@ static int lapbeth_open(struct net_device *dev) return -ENODEV; } + spin_lock_bh(&lapbeth->up_lock); + lapbeth->up = true; + spin_unlock_bh(&lapbeth->up_lock); + return 0; } static int lapbeth_close(struct net_device *dev) { + struct lapbethdev *lapbeth = netdev_priv(dev); int err; + spin_lock_bh(&lapbeth->up_lock); + lapbeth->up = false; + spin_unlock_bh(&lapbeth->up_lock); + if ((err = lapb_unregister(dev)) != LAPB_OK) pr_err("lapb_unregister error: %d\n", err); @@ -356,6 +371,9 @@ static int lapbeth_new_device(struct net_device *dev) dev_hold(dev); lapbeth->ethdev = dev; + lapbeth->up = false; + spin_lock_init(&lapbeth->up_lock); + rc = -EIO; if (register_netdevice(ndev)) goto fail; @@ -403,8 +421,8 @@ static int lapbeth_device_event(struct notifier_block *this, if (lapbeth_get_x25_dev(dev) == NULL) lapbeth_new_device(dev); break; - case NETDEV_DOWN: - /* ethernet device closed -> close LAPB interface */ + case NETDEV_GOING_DOWN: + /* ethernet device closes -> close LAPB interface */ lapbeth = lapbeth_get_x25_dev(dev); if (lapbeth) dev_close(lapbeth->axdev); diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c index c41e72508d3d..2db9c948c0fc 100644 --- a/drivers/net/wireless/admtek/adm8211.c +++ b/drivers/net/wireless/admtek/adm8211.c @@ -28,7 +28,6 @@ MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>"); MODULE_AUTHOR("Jouni Malinen <j@w1.fi>"); MODULE_DESCRIPTION("Driver for IEEE 802.11b wireless cards based on ADMtek ADM8211"); -MODULE_SUPPORTED_DEVICE("ADM8211"); MODULE_LICENSE("GPL"); static unsigned int tx_ring_size __read_mostly = 16; diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 4c6e57f9976d..cef17f33c69e 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -90,7 +90,6 @@ MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state"); MODULE_AUTHOR("Jiri Slaby"); MODULE_AUTHOR("Nick Kossifidis"); MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards."); -MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards"); MODULE_LICENSE("Dual BSD/GPL"); static int ath5k_init(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index b66eeb577272..5abc2a5526ec 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -34,7 +34,6 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type); MODULE_AUTHOR("Atheros Communications"); MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); -MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards"); MODULE_LICENSE("Dual BSD/GPL"); static void ath9k_hw_set_clockrate(struct ath_hw *ah) diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 42a208787f5a..01f9c26f9bf3 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -37,7 +37,6 @@ static char *dev_info = "ath9k"; MODULE_AUTHOR("Atheros Communications"); MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); -MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards"); MODULE_LICENSE("Dual BSD/GPL"); static unsigned int ath9k_debug = ATH_DBG_DEFAULT; diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c index 707fe66727f8..febce4e8b3dd 100644 --- a/drivers/net/wireless/atmel/atmel.c +++ b/drivers/net/wireless/atmel/atmel.c @@ -75,7 +75,6 @@ MODULE_AUTHOR("Simon Kelley"); MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards."); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("Atmel at76c50x wireless cards"); /* The name of the firmware file to be loaded over-rides any automatic selection */ diff --git a/drivers/net/wireless/atmel/atmel_cs.c b/drivers/net/wireless/atmel/atmel_cs.c index 368eebefa741..453bb84cb338 100644 --- a/drivers/net/wireless/atmel/atmel_cs.c +++ b/drivers/net/wireless/atmel/atmel_cs.c @@ -57,7 +57,6 @@ MODULE_AUTHOR("Simon Kelley"); MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards."); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("Atmel at76c50x PCMCIA cards"); /*====================================================================*/ diff --git a/drivers/net/wireless/atmel/atmel_pci.c b/drivers/net/wireless/atmel/atmel_pci.c index 47f7ccb32414..f428dc79d916 100644 --- a/drivers/net/wireless/atmel/atmel_pci.c +++ b/drivers/net/wireless/atmel/atmel_pci.c @@ -16,7 +16,6 @@ MODULE_AUTHOR("Simon Kelley"); MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards."); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("Atmel at76c506 PCI wireless cards"); static const struct pci_device_id card_ids[] = { { 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID }, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c index 818e523f6025..39f3af2d0439 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c @@ -87,7 +87,6 @@ static int n_adapters_found; MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver."); -MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards"); MODULE_LICENSE("Dual BSD/GPL"); /* This needs to be adjusted when brcms_firmwares changes */ MODULE_FIRMWARE("brcm/bcm43xx-0.fw"); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c index 4c84c3001c3f..e87e68cc46e2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c @@ -12,7 +12,6 @@ MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver utilities."); -MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards"); MODULE_LICENSE("Dual BSD/GPL"); struct sk_buff *brcmu_pkt_buf_get_skb(uint len) diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index e35e1380ae43..60db38c38960 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -251,7 +251,6 @@ MODULE_AUTHOR("Benjamin Reed"); MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet cards. " "Direct support for ISA/PCI/MPI cards and support for PCMCIA when used with airo_cs."); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340/350"); module_param_hw_array(io, int, ioport, NULL, 0); module_param_hw_array(irq, int, irq, NULL, 0); module_param_array(rates, int, NULL, 0); diff --git a/drivers/net/wireless/cisco/airo_cs.c b/drivers/net/wireless/cisco/airo_cs.c index 3718f958c0fc..fcfe4c6d62f0 100644 --- a/drivers/net/wireless/cisco/airo_cs.c +++ b/drivers/net/wireless/cisco/airo_cs.c @@ -47,7 +47,6 @@ MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet " "cards. This is the module that links the PCMCIA card " "with the airo module."); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340 PCMCIA cards"); /*====================================================================*/ diff --git a/drivers/net/wireless/intersil/hostap/hostap_cs.c b/drivers/net/wireless/intersil/hostap/hostap_cs.c index 1a748670835a..ec7db2badc40 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_cs.c +++ b/drivers/net/wireless/intersil/hostap/hostap_cs.c @@ -26,7 +26,6 @@ static char *dev_info = "hostap_cs"; MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN " "cards (PC Card)."); -MODULE_SUPPORTED_DEVICE("Intersil Prism2-based WLAN cards (PC Card)"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/intersil/hostap/hostap_pci.c b/drivers/net/wireless/intersil/hostap/hostap_pci.c index 101887e6bd0f..52d77506effd 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_pci.c +++ b/drivers/net/wireless/intersil/hostap/hostap_pci.c @@ -27,7 +27,6 @@ static char *dev_info = "hostap_pci"; MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Support for Intersil Prism2.5-based 802.11 wireless LAN " "PCI cards."); -MODULE_SUPPORTED_DEVICE("Intersil Prism2.5-based WLAN PCI cards"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/intersil/hostap/hostap_plx.c b/drivers/net/wireless/intersil/hostap/hostap_plx.c index 841cfc68ce84..58247290fcbc 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_plx.c +++ b/drivers/net/wireless/intersil/hostap/hostap_plx.c @@ -30,7 +30,6 @@ static char *dev_info = "hostap_plx"; MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN " "cards (PLX)."); -MODULE_SUPPORTED_DEVICE("Intersil Prism2-based WLAN cards (PLX)"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c index 8f860c14da58..dec6ffdf07c4 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c @@ -1821,7 +1821,6 @@ static const struct pci_device_id rt2400pci_device_table[] = { MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT2400 PCI & PCMCIA Wireless LAN driver."); -MODULE_SUPPORTED_DEVICE("Ralink RT2460 PCI & PCMCIA chipset based cards"); MODULE_DEVICE_TABLE(pci, rt2400pci_device_table); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c index e940443c52ad..8faa0a80e73a 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c @@ -2119,7 +2119,6 @@ static const struct pci_device_id rt2500pci_device_table[] = { MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT2500 PCI & PCMCIA Wireless LAN driver."); -MODULE_SUPPORTED_DEVICE("Ralink RT2560 PCI & PCMCIA chipset based cards"); MODULE_DEVICE_TABLE(pci, rt2500pci_device_table); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c index fce05fc88aaf..bb5ed6630645 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c @@ -1956,7 +1956,6 @@ static const struct usb_device_id rt2500usb_device_table[] = { MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT2500 USB Wireless LAN driver."); -MODULE_SUPPORTED_DEVICE("Ralink RT2570 USB chipset based cards"); MODULE_DEVICE_TABLE(usb, rt2500usb_device_table); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c index 9a33baaa6184..1fde0e767ce3 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c @@ -439,7 +439,6 @@ static const struct pci_device_id rt2800pci_device_table[] = { MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver."); -MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards"); MODULE_FIRMWARE(FIRMWARE_RT2860); MODULE_DEVICE_TABLE(pci, rt2800pci_device_table); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c index 36ac18ca8082..b5c67f656cfd 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c @@ -1248,7 +1248,6 @@ static const struct usb_device_id rt2800usb_device_table[] = { MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT2800 USB Wireless LAN driver."); -MODULE_SUPPORTED_DEVICE("Ralink RT2870 USB chipset based cards"); MODULE_DEVICE_TABLE(usb, rt2800usb_device_table); MODULE_FIRMWARE(FIRMWARE_RT2870); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c index 02da5dd37ddd..82cfc2aadc2b 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c @@ -2993,8 +2993,6 @@ static const struct pci_device_id rt61pci_device_table[] = { MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT61 PCI & PCMCIA Wireless LAN driver."); -MODULE_SUPPORTED_DEVICE("Ralink RT2561, RT2561s & RT2661 " - "PCI & PCMCIA chipset based cards"); MODULE_DEVICE_TABLE(pci, rt61pci_device_table); MODULE_FIRMWARE(FIRMWARE_RT2561); MODULE_FIRMWARE(FIRMWARE_RT2561s); diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c index e69793773d87..5ff2c740c3ea 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c @@ -2513,7 +2513,6 @@ static const struct usb_device_id rt73usb_device_table[] = { MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT73 USB Wireless LAN driver."); -MODULE_SUPPORTED_DEVICE("Ralink RT2571W & RT2671 USB chipset based cards"); MODULE_DEVICE_TABLE(usb, rt73usb_device_table); MODULE_FIRMWARE(FIRMWARE_RT2571); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c index 9a3d2439a8e7..d98483298555 100644 --- a/drivers/net/wireless/rsi/rsi_91x_main.c +++ b/drivers/net/wireless/rsi/rsi_91x_main.c @@ -441,6 +441,5 @@ module_init(rsi_91x_hal_module_init); module_exit(rsi_91x_hal_module_exit); MODULE_AUTHOR("Redpine Signals Inc"); MODULE_DESCRIPTION("Station driver for RSI 91x devices"); -MODULE_SUPPORTED_DEVICE("RSI-91x"); MODULE_VERSION("0.1"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index 592e9dadcb55..fe0287b22a25 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -1571,7 +1571,6 @@ module_exit(rsi_module_exit); MODULE_AUTHOR("Redpine Signals Inc"); MODULE_DESCRIPTION("Common SDIO layer for RSI drivers"); -MODULE_SUPPORTED_DEVICE("RSI-91x"); MODULE_DEVICE_TABLE(sdio, rsi_dev_table); MODULE_FIRMWARE(FIRMWARE_RSI9113); MODULE_VERSION("0.1"); diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index a4a533c2a783..3fbe2a3c1455 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -928,7 +928,6 @@ module_usb_driver(rsi_driver); MODULE_AUTHOR("Redpine Signals Inc"); MODULE_DESCRIPTION("Common USB layer for RSI drivers"); -MODULE_SUPPORTED_DEVICE("RSI-91x"); MODULE_DEVICE_TABLE(usb, rsi_dev_table); MODULE_FIRMWARE(FIRMWARE_RSI9113); MODULE_VERSION("0.1"); diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c index 4dc7bd7e02b6..ee2baa2b2fcf 100644 --- a/drivers/nfc/fdp/fdp.c +++ b/drivers/nfc/fdp/fdp.c @@ -176,7 +176,7 @@ static void fdp_nci_set_data_pkt_counter(struct nci_dev *ndev, * * The firmware will be analyzed and applied when we send NCI_OP_PROP_PATCH_CMD * command with NCI_PATCH_TYPE_EOT parameter. The device will send a - * NFCC_PATCH_NTF packaet and a NCI_OP_CORE_RESET_NTF packet. + * NFCC_PATCH_NTF packet and a NCI_OP_CORE_RESET_NTF packet. */ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type) { @@ -236,15 +236,12 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type) static int fdp_nci_open(struct nci_dev *ndev) { - int r; struct fdp_nci_info *info = nci_get_drvdata(ndev); struct device *dev = &info->phy->i2c_dev->dev; dev_dbg(dev, "%s\n", __func__); - r = info->phy_ops->enable(info->phy); - - return r; + return info->phy_ops->enable(info->phy); } static int fdp_nci_close(struct nci_dev *ndev) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index e68a8c4ac5a6..0896e21642be 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -380,6 +380,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved) return true; nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; + nvme_req(req)->flags |= NVME_REQ_CANCELLED; blk_mq_complete_request(req); return true; } @@ -1225,28 +1226,12 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ); } -static int nvme_keep_alive(struct nvme_ctrl *ctrl) -{ - struct request *rq; - - rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, - BLK_MQ_REQ_RESERVED); - if (IS_ERR(rq)) - return PTR_ERR(rq); - - rq->timeout = ctrl->kato * HZ; - rq->end_io_data = ctrl; - - blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io); - - return 0; -} - static void nvme_keep_alive_work(struct work_struct *work) { struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), struct nvme_ctrl, ka_work); bool comp_seen = ctrl->comp_seen; + struct request *rq; if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { dev_dbg(ctrl->device, @@ -1256,12 +1241,18 @@ static void nvme_keep_alive_work(struct work_struct *work) return; } - if (nvme_keep_alive(ctrl)) { + rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, + BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); + if (IS_ERR(rq)) { /* allocation failure, reset the controller */ - dev_err(ctrl->device, "keep-alive failed\n"); + dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq)); nvme_reset_ctrl(ctrl); return; } + + rq->timeout = ctrl->kato * HZ; + rq->end_io_data = ctrl; + blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io); } static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) @@ -1440,7 +1431,7 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, goto out_free_id; } - error = -ENODEV; + error = NVME_SC_INVALID_NS | NVME_SC_DNR; if ((*id)->ncap == 0) /* namespace not allocated or attached */ goto out_free_id; @@ -1963,30 +1954,18 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); } -static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns) +/* + * Even though NVMe spec explicitly states that MDTS is not applicable to the + * write-zeroes, we are cautious and limit the size to the controllers + * max_hw_sectors value, which is based on the MDTS field and possibly other + * limiting factors. + */ +static void nvme_config_write_zeroes(struct request_queue *q, + struct nvme_ctrl *ctrl) { - u64 max_blocks; - - if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) || - (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) - return; - /* - * Even though NVMe spec explicitly states that MDTS is not - * applicable to the write-zeroes:- "The restriction does not apply to - * commands that do not transfer data between the host and the - * controller (e.g., Write Uncorrectable ro Write Zeroes command).". - * In order to be more cautious use controller's max_hw_sectors value - * to configure the maximum sectors for the write-zeroes which is - * configured based on the controller's MDTS field in the - * nvme_init_identify() if available. - */ - if (ns->ctrl->max_hw_sectors == UINT_MAX) - max_blocks = (u64)USHRT_MAX + 1; - else - max_blocks = ns->ctrl->max_hw_sectors + 1; - - blk_queue_max_write_zeroes_sectors(disk->queue, - nvme_lba_to_sect(ns, max_blocks)); + if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) && + !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) + blk_queue_max_write_zeroes_sectors(q, ctrl->max_hw_sectors); } static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids) @@ -2158,7 +2137,7 @@ static void nvme_update_disk_info(struct gendisk *disk, set_capacity_and_notify(disk, capacity); nvme_config_discard(disk, ns); - nvme_config_write_zeroes(disk, ns); + nvme_config_write_zeroes(disk->queue, ns->ctrl); set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) || test_bit(NVME_NS_FORCE_RO, &ns->flags)); @@ -4038,7 +4017,7 @@ static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid) static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids) { struct nvme_id_ns *id; - int ret = -ENODEV; + int ret = NVME_SC_INVALID_NS | NVME_SC_DNR; if (test_bit(NVME_NS_DEAD, &ns->flags)) goto out; @@ -4047,7 +4026,7 @@ static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids) if (ret) goto out; - ret = -ENODEV; + ret = NVME_SC_INVALID_NS | NVME_SC_DNR; if (!nvme_ns_ids_equal(&ns->head->ids, ids)) { dev_err(ns->ctrl->device, "identifiers changed for nsid %d\n", ns->head->ns_id); @@ -4065,7 +4044,7 @@ out: * * TODO: we should probably schedule a delayed retry here. */ - if (ret && ret != -ENOMEM && !(ret > 0 && !(ret & NVME_SC_DNR))) + if (ret > 0 && (ret & NVME_SC_DNR)) nvme_ns_remove(ns); } @@ -4095,6 +4074,12 @@ static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) nsid); break; } + if (!nvme_multi_css(ctrl)) { + dev_warn(ctrl->device, + "command set not reported for nsid: %d\n", + nsid); + break; + } nvme_alloc_ns(ctrl, nsid, &ids); break; default: diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index 733010d2eafd..888b108d87a4 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -19,6 +19,13 @@ #define NVMF_DEF_FAIL_FAST_TMO -1 /* + * Reserved one command for internal usage. This command is used for sending + * the connect command, as well as for the keep alive command on the admin + * queue once live. + */ +#define NVMF_RESERVED_TAGS 1 + +/* * Define a host as seen by the target. We allocate one at boot, but also * allow the override it when creating controllers. This is both to provide * persistence of the Host NQN over multiple boots, and to allow using diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 20dadd86e981..6ffa8de2a0d7 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1956,7 +1956,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) sizeof(op->rsp_iu), DMA_FROM_DEVICE); if (opstate == FCPOP_STATE_ABORTED) - status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); + status = cpu_to_le16(NVME_SC_HOST_ABORTED_CMD << 1); else if (freq->status) { status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); dev_info(ctrl->ctrl.device, @@ -2055,7 +2055,7 @@ done: nvme_fc_complete_rq(rq); check_error: - if (terminate_assoc) + if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING) queue_work(nvme_reset_wq, &ctrl->ioerr_work); } @@ -2443,6 +2443,7 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved) struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); + op->nreq.flags |= NVME_REQ_CANCELLED; __nvme_fc_abort_op(ctrl, op); return true; } @@ -2862,7 +2863,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); ctrl->tag_set.ops = &nvme_fc_mq_ops; ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; - ctrl->tag_set.reserved_tags = 1; /* fabric connect */ + ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS; ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; ctrl->tag_set.cmd_size = @@ -3484,7 +3485,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops; ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; - ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */ + ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS; ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node; ctrl->admin_tag_set.cmd_size = struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv, diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 17ab3320d28b..7249ae74f71f 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3246,6 +3246,7 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | + NVME_QUIRK_DISABLE_WRITE_ZEROES| NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 53ac4d7442ba..be905d4fdb47 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -736,8 +736,11 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) return ret; ctrl->ctrl.queue_count = nr_io_queues + 1; - if (ctrl->ctrl.queue_count < 2) - return 0; + if (ctrl->ctrl.queue_count < 2) { + dev_err(ctrl->ctrl.device, + "unable to set any I/O queues\n"); + return -ENOMEM; + } dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); @@ -798,7 +801,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, memset(set, 0, sizeof(*set)); set->ops = &nvme_rdma_admin_mq_ops; set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; - set->reserved_tags = 2; /* connect + keep-alive */ + set->reserved_tags = NVMF_RESERVED_TAGS; set->numa_node = nctrl->numa_node; set->cmd_size = sizeof(struct nvme_rdma_request) + NVME_RDMA_DATA_SGL_SIZE; @@ -811,7 +814,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, memset(set, 0, sizeof(*set)); set->ops = &nvme_rdma_mq_ops; set->queue_depth = nctrl->sqsize + 1; - set->reserved_tags = 1; /* fabric connect */ + set->reserved_tags = NVMF_RESERVED_TAGS; set->numa_node = nctrl->numa_node; set->flags = BLK_MQ_F_SHOULD_MERGE; set->cmd_size = sizeof(struct nvme_rdma_request) + diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 69f59d2c5799..a0f00cb8f9f3 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -287,7 +287,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, * directly, otherwise queue io_work. Also, only do that if we * are on the same cpu, so we don't introduce contention. */ - if (queue->io_cpu == __smp_processor_id() && + if (queue->io_cpu == raw_smp_processor_id() && sync && empty && mutex_trylock(&queue->send_mutex)) { queue->more_requests = !last; nvme_tcp_send_all(queue); @@ -568,6 +568,13 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, req->pdu_len = le32_to_cpu(pdu->r2t_length); req->pdu_sent = 0; + if (unlikely(!req->pdu_len)) { + dev_err(queue->ctrl->ctrl.device, + "req %d r2t len is %u, probably a bug...\n", + rq->tag, req->pdu_len); + return -EPROTO; + } + if (unlikely(req->data_sent + req->pdu_len > req->data_len)) { dev_err(queue->ctrl->ctrl.device, "req %d r2t len %u exceeded data len %u (%zu sent)\n", @@ -1575,7 +1582,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl, memset(set, 0, sizeof(*set)); set->ops = &nvme_tcp_admin_mq_ops; set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; - set->reserved_tags = 2; /* connect + keep-alive */ + set->reserved_tags = NVMF_RESERVED_TAGS; set->numa_node = nctrl->numa_node; set->flags = BLK_MQ_F_BLOCKING; set->cmd_size = sizeof(struct nvme_tcp_request); @@ -1587,7 +1594,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl, memset(set, 0, sizeof(*set)); set->ops = &nvme_tcp_mq_ops; set->queue_depth = nctrl->sqsize + 1; - set->reserved_tags = 1; /* fabric connect */ + set->reserved_tags = NVMF_RESERVED_TAGS; set->numa_node = nctrl->numa_node; set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; set->cmd_size = sizeof(struct nvme_tcp_request); @@ -1745,8 +1752,11 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) return ret; ctrl->queue_count = nr_io_queues + 1; - if (ctrl->queue_count < 2) - return 0; + if (ctrl->queue_count < 2) { + dev_err(ctrl->device, + "unable to set any I/O queues\n"); + return -ENOMEM; + } dev_info(ctrl->device, "creating %d I/O queues.\n", nr_io_queues); diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c index c7e3ec561ba0..bc2f344f0ae0 100644 --- a/drivers/nvme/host/zns.c +++ b/drivers/nvme/host/zns.c @@ -9,7 +9,13 @@ int nvme_revalidate_zones(struct nvme_ns *ns) { - return blk_revalidate_disk_zones(ns->disk, NULL); + struct request_queue *q = ns->queue; + int ret; + + ret = blk_revalidate_disk_zones(ns->disk, NULL); + if (!ret) + blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append); + return ret; } static int nvme_set_max_append(struct nvme_ctrl *ctrl) @@ -107,7 +113,6 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); blk_queue_max_open_zones(q, le32_to_cpu(id->mor) + 1); blk_queue_max_active_zones(q, le32_to_cpu(id->mar) + 1); - blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append); free_data: kfree(id); return status; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index be6fcdaf51a7..a027433b8be8 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -1118,9 +1118,20 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) { lockdep_assert_held(&ctrl->lock); - if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || - nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES || - nvmet_cc_mps(ctrl->cc) != 0 || + /* + * Only I/O controllers should verify iosqes,iocqes. + * Strictly speaking, the spec says a discovery controller + * should verify iosqes,iocqes are zeroed, however that + * would break backwards compatibility, so don't enforce it. + */ + if (ctrl->subsys->type != NVME_NQN_DISC && + (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || + nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) { + ctrl->csts = NVME_CSTS_CFS; + return; + } + + if (nvmet_cc_mps(ctrl->cc) != 0 || nvmet_cc_ams(ctrl->cc) != 0 || nvmet_cc_css(ctrl->cc) != 0) { ctrl->csts = NVME_CSTS_CFS; diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index cb6f86572b24..3e189e753bcf 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -349,7 +349,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops; ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; - ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */ + ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS; ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node; ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) + NVME_INLINE_SG_CNT * sizeof(struct scatterlist); @@ -520,7 +520,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); ctrl->tag_set.ops = &nvme_loop_mq_ops; ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; - ctrl->tag_set.reserved_tags = 1; /* fabric connect */ + ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS; ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) + diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 26c587ccd152..2798944899b7 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -50,9 +50,9 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req) /* * nvmet_passthru_map_sg is limitted to using a single bio so limit - * the mdts based on BIO_MAX_PAGES as well + * the mdts based on BIO_MAX_VECS as well */ - max_hw_sectors = min_not_zero(BIO_MAX_PAGES << (PAGE_SHIFT - 9), + max_hw_sectors = min_not_zero(BIO_MAX_VECS << (PAGE_SHIFT - 9), max_hw_sectors); page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12; @@ -191,7 +191,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq) struct bio *bio; int i; - if (req->sg_cnt > BIO_MAX_PAGES) + if (req->sg_cnt > BIO_MAX_VECS) return -EINVAL; if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) { diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 06b6b742bb21..6c1f3ab7649c 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -802,9 +802,8 @@ static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc) nvmet_req_uninit(&rsp->req); nvmet_rdma_release_rsp(rsp); if (wc->status != IB_WC_WR_FLUSH_ERR) { - pr_info("RDMA WRITE for CQE 0x%p failed with status %s (%d).\n", - wc->wr_cqe, ib_wc_status_msg(wc->status), - wc->status); + pr_info("RDMA WRITE for CQE failed with status %s (%d).\n", + ib_wc_status_msg(wc->status), wc->status); nvmet_rdma_error_comp(queue); } return; diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 8b0485ada315..d658c6e8263a 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -1098,11 +1098,11 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) cmd->rbytes_done += ret; } + nvmet_tcp_unmap_pdu_iovec(cmd); if (queue->data_digest) { nvmet_tcp_prep_recv_ddgst(cmd); return 0; } - nvmet_tcp_unmap_pdu_iovec(cmd); if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) && cmd->rbytes_done == cmd->req.transfer_len) { diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c index 6e411821583e..bc0a27de69d4 100644 --- a/drivers/of/of_net.c +++ b/drivers/of/of_net.c @@ -79,6 +79,9 @@ static const void *of_get_mac_addr_nvmem(struct device_node *np) } /** + * of_get_mac_address() + * @np: Caller's Device Node + * * Search the device tree for the best MAC address to use. 'mac-address' is * checked first, because that is supposed to contain to "most recent" MAC * address. If that isn't set, then 'local-mac-address' is checked next, diff --git a/drivers/opp/core.c b/drivers/opp/core.c index c2689386a906..1556998425d5 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -1492,7 +1492,11 @@ static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table, mutex_lock(&opp_table->lock); list_for_each_entry(temp, &opp_table->opp_list, node) { - if (dynamic == temp->dynamic) { + /* + * Refcount must be dropped only once for each OPP by OPP core, + * do that with help of "removed" flag. + */ + if (!temp->removed && dynamic == temp->dynamic) { opp = temp; break; } @@ -1502,10 +1506,27 @@ static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table, return opp; } -bool _opp_remove_all_static(struct opp_table *opp_table) +/* + * Can't call dev_pm_opp_put() from under the lock as debugfs removal needs to + * happen lock less to avoid circular dependency issues. This routine must be + * called without the opp_table->lock held. + */ +static void _opp_remove_all(struct opp_table *opp_table, bool dynamic) { struct dev_pm_opp *opp; + while ((opp = _opp_get_next(opp_table, dynamic))) { + opp->removed = true; + dev_pm_opp_put(opp); + + /* Drop the references taken by dev_pm_opp_add() */ + if (dynamic) + dev_pm_opp_put_opp_table(opp_table); + } +} + +bool _opp_remove_all_static(struct opp_table *opp_table) +{ mutex_lock(&opp_table->lock); if (!opp_table->parsed_static_opps) { @@ -1520,13 +1541,7 @@ bool _opp_remove_all_static(struct opp_table *opp_table) mutex_unlock(&opp_table->lock); - /* - * Can't remove the OPP from under the lock, debugfs removal needs to - * happen lock less to avoid circular dependency issues. - */ - while ((opp = _opp_get_next(opp_table, false))) - dev_pm_opp_put(opp); - + _opp_remove_all(opp_table, false); return true; } @@ -1539,25 +1554,12 @@ bool _opp_remove_all_static(struct opp_table *opp_table) void dev_pm_opp_remove_all_dynamic(struct device *dev) { struct opp_table *opp_table; - struct dev_pm_opp *opp; - int count = 0; opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return; - /* - * Can't remove the OPP from under the lock, debugfs removal needs to - * happen lock less to avoid circular dependency issues. - */ - while ((opp = _opp_get_next(opp_table, true))) { - dev_pm_opp_put(opp); - count++; - } - - /* Drop the references taken by dev_pm_opp_add() */ - while (count--) - dev_pm_opp_put_opp_table(opp_table); + _opp_remove_all(opp_table, true); /* Drop the reference taken by _find_opp_table() */ dev_pm_opp_put_opp_table(opp_table); diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index 50fb9dced3c5..407c3bfe51d9 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -56,6 +56,7 @@ extern struct list_head opp_tables, lazy_opp_tables; * @dynamic: not-created from static DT entries. * @turbo: true if turbo (boost) OPP * @suspend: true if suspend OPP + * @removed: flag indicating that OPP's reference is dropped by OPP core. * @pstate: Device's power domain's performance state. * @rate: Frequency in hertz * @level: Performance level @@ -78,6 +79,7 @@ struct dev_pm_opp { bool dynamic; bool turbo; bool suspend; + bool removed; unsigned int pstate; unsigned long rate; unsigned int level; diff --git a/drivers/parport/parport_amiga.c b/drivers/parport/parport_amiga.c index 1e88bcfe0d7b..84d5701d606c 100644 --- a/drivers/parport/parport_amiga.c +++ b/drivers/parport/parport_amiga.c @@ -241,6 +241,5 @@ module_platform_driver_probe(amiga_parallel_driver, amiga_parallel_probe); MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>"); MODULE_DESCRIPTION("Parport Driver for Amiga builtin Port"); -MODULE_SUPPORTED_DEVICE("Amiga builtin Parallel Port"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:amiga-parallel"); diff --git a/drivers/parport/parport_atari.c b/drivers/parport/parport_atari.c index 2ff0fe053e6e..1623f010cdcc 100644 --- a/drivers/parport/parport_atari.c +++ b/drivers/parport/parport_atari.c @@ -218,7 +218,6 @@ static void __exit parport_atari_exit(void) MODULE_AUTHOR("Andreas Schwab"); MODULE_DESCRIPTION("Parport Driver for Atari builtin Port"); -MODULE_SUPPORTED_DEVICE("Atari builtin Parallel Port"); MODULE_LICENSE("GPL"); module_init(parport_atari_init) diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c index 9228e8f90309..1e43b3f399a8 100644 --- a/drivers/parport/parport_gsc.c +++ b/drivers/parport/parport_gsc.c @@ -41,7 +41,6 @@ MODULE_AUTHOR("Helge Deller <deller@gmx.de>"); MODULE_DESCRIPTION("HP-PARISC PC-style parallel port driver"); -MODULE_SUPPORTED_DEVICE("integrated PC-style parallel port"); MODULE_LICENSE("GPL"); diff --git a/drivers/parport/parport_mfc3.c b/drivers/parport/parport_mfc3.c index d6bbe8446301..f4d0da741e85 100644 --- a/drivers/parport/parport_mfc3.c +++ b/drivers/parport/parport_mfc3.c @@ -359,7 +359,6 @@ static void __exit parport_mfc3_exit(void) MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>"); MODULE_DESCRIPTION("Parport Driver for Multiface 3 expansion cards Parallel Port"); -MODULE_SUPPORTED_DEVICE("Multiface 3 Parallel Port"); MODULE_LICENSE("GPL"); module_init(parport_mfc3_init) diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c index e840c1b5ab90..865fc41dbb6c 100644 --- a/drivers/parport/parport_sunbpp.c +++ b/drivers/parport/parport_sunbpp.c @@ -377,6 +377,5 @@ module_platform_driver(bpp_sbus_driver); MODULE_AUTHOR("Derrick J Brashear"); MODULE_DESCRIPTION("Parport Driver for Sparc bidirectional Port"); -MODULE_SUPPORTED_DEVICE("Sparc Bidirectional Parallel Port"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL"); diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c index cdbfa5df3a51..dbfa0b55d31a 100644 --- a/drivers/pci/hotplug/rpadlpar_sysfs.c +++ b/drivers/pci/hotplug/rpadlpar_sysfs.c @@ -34,12 +34,11 @@ static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr, if (nbytes >= MAX_DRC_NAME_LEN) return 0; - memcpy(drc_name, buf, nbytes); + strscpy(drc_name, buf, nbytes + 1); end = strchr(drc_name, '\n'); - if (!end) - end = &drc_name[nbytes]; - *end = '\0'; + if (end) + *end = '\0'; rc = dlpar_add_slot(drc_name); if (rc) @@ -65,12 +64,11 @@ static ssize_t remove_slot_store(struct kobject *kobj, if (nbytes >= MAX_DRC_NAME_LEN) return 0; - memcpy(drc_name, buf, nbytes); + strscpy(drc_name, buf, nbytes + 1); end = strchr(drc_name, '\n'); - if (!end) - end = &drc_name[nbytes]; - *end = '\0'; + if (end) + *end = '\0'; rc = dlpar_remove_slot(drc_name); if (rc) diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c index c9e790c74051..a047c421debe 100644 --- a/drivers/pci/hotplug/s390_pci_hpc.c +++ b/drivers/pci/hotplug/s390_pci_hpc.c @@ -93,8 +93,9 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) pci_dev_put(pdev); return -EBUSY; } + pci_dev_put(pdev); - zpci_remove_device(zdev); + zpci_remove_device(zdev, false); rc = zpci_disable_device(zdev); if (rc) diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index c6fe0cfec0f6..2d7502648219 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c @@ -26,7 +26,7 @@ #include <xen/platform_pci.h> #include <asm/xen/swiotlb-xen.h> -#define INVALID_GRANT_REF (0) + #define INVALID_EVTCHN (-1) struct pci_bus_entry { @@ -42,7 +42,7 @@ struct pcifront_device { struct list_head root_buses; int evtchn; - int gnt_ref; + grant_ref_t gnt_ref; int irq; diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c index 66ad5b3ece19..f2a85500258d 100644 --- a/drivers/perf/arm_dmc620_pmu.c +++ b/drivers/perf/arm_dmc620_pmu.c @@ -681,6 +681,7 @@ static int dmc620_pmu_device_probe(struct platform_device *pdev) if (!name) { dev_err(&pdev->dev, "Create name failed, PMU @%pa\n", &res->start); + ret = -ENOMEM; goto out_teardown_dev; } diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index ad4e630e73e2..461ec61530eb 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -1173,15 +1173,20 @@ config INTEL_PMC_CORE depends on PCI help The Intel Platform Controller Hub for Intel Core SoCs provides access - to Power Management Controller registers via a PCI interface. This + to Power Management Controller registers via various interfaces. This driver can utilize debugging capabilities and supported features as - exposed by the Power Management Controller. + exposed by the Power Management Controller. It also may perform some + tasks in the PMC in order to enable transition into the SLPS0 state. + It should be selected on all Intel platforms supported by the driver. Supported features: - SLP_S0_RESIDENCY counter - PCH IP Power Gating status - - LTR Ignore + - LTR Ignore / LTR Show - MPHY/PLL gating status (Sunrisepoint PCH only) + - SLPS0 Debug registers (Cannonlake/Icelake PCH) + - Low Power Mode registers (Tigerlake and beyond) + - PMC quirks as needed to enable SLPS0/S0ix config INTEL_PMT_CLASS tristate diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c index 80f4b7785c6c..091e48c217ed 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c @@ -185,5 +185,8 @@ void exit_enum_attributes(void) sysfs_remove_group(wmi_priv.enumeration_data[instance_id].attr_name_kobj, &enumeration_attr_group); } + wmi_priv.enumeration_instances_count = 0; + kfree(wmi_priv.enumeration_data); + wmi_priv.enumeration_data = NULL; } diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c index 75aedbb733be..8a49ba6e44f9 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c @@ -175,5 +175,8 @@ void exit_int_attributes(void) sysfs_remove_group(wmi_priv.integer_data[instance_id].attr_name_kobj, &integer_attr_group); } + wmi_priv.integer_instances_count = 0; + kfree(wmi_priv.integer_data); + wmi_priv.integer_data = NULL; } diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c index 3abcd95477c0..834b3e82ad9f 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c @@ -183,5 +183,8 @@ void exit_po_attributes(void) sysfs_remove_group(wmi_priv.po_data[instance_id].attr_name_kobj, &po_attr_group); } + wmi_priv.po_instances_count = 0; + kfree(wmi_priv.po_data); + wmi_priv.po_data = NULL; } diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c index ac75dce88a4c..552537852459 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c @@ -155,5 +155,8 @@ void exit_str_attributes(void) sysfs_remove_group(wmi_priv.str_data[instance_id].attr_name_kobj, &str_attr_group); } + wmi_priv.str_instances_count = 0; + kfree(wmi_priv.str_data); + wmi_priv.str_data = NULL; } diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c index cb81010ba1a2..7410ccae650c 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c @@ -210,25 +210,17 @@ static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot); */ static int create_attributes_level_sysfs_files(void) { - int ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr); + int ret; - if (ret) { - pr_debug("could not create reset_bios file\n"); + ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr); + if (ret) return ret; - } ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr); - if (ret) { - pr_debug("could not create changing_pending_reboot file\n"); - sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr); - } - return ret; -} + if (ret) + return ret; -static void release_reset_bios_data(void) -{ - sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr); - sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr); + return 0; } static ssize_t wmi_sysman_attr_show(struct kobject *kobj, struct attribute *attr, @@ -373,8 +365,6 @@ static void destroy_attribute_objs(struct kset *kset) */ static void release_attributes_data(void) { - release_reset_bios_data(); - mutex_lock(&wmi_priv.mutex); exit_enum_attributes(); exit_int_attributes(); @@ -386,11 +376,13 @@ static void release_attributes_data(void) wmi_priv.authentication_dir_kset = NULL; } if (wmi_priv.main_dir_kset) { + sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr); + sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr); destroy_attribute_objs(wmi_priv.main_dir_kset); kset_unregister(wmi_priv.main_dir_kset); + wmi_priv.main_dir_kset = NULL; } mutex_unlock(&wmi_priv.mutex); - } /** @@ -497,7 +489,6 @@ nextobj: err_attr_init: mutex_unlock(&wmi_priv.mutex); - release_attributes_data(); kfree(obj); return retval; } @@ -513,102 +504,91 @@ static int __init sysman_init(void) } ret = init_bios_attr_set_interface(); - if (ret || !wmi_priv.bios_attr_wdev) { - pr_debug("failed to initialize set interface\n"); - goto fail_set_interface; - } + if (ret) + return ret; ret = init_bios_attr_pass_interface(); - if (ret || !wmi_priv.password_attr_wdev) { - pr_debug("failed to initialize pass interface\n"); - goto fail_pass_interface; + if (ret) + goto err_exit_bios_attr_set_interface; + + if (!wmi_priv.bios_attr_wdev || !wmi_priv.password_attr_wdev) { + pr_debug("failed to find set or pass interface\n"); + ret = -ENODEV; + goto err_exit_bios_attr_pass_interface; } ret = class_register(&firmware_attributes_class); if (ret) - goto fail_class; + goto err_exit_bios_attr_pass_interface; wmi_priv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0), NULL, "%s", DRIVER_NAME); if (IS_ERR(wmi_priv.class_dev)) { ret = PTR_ERR(wmi_priv.class_dev); - goto fail_classdev; + goto err_unregister_class; } wmi_priv.main_dir_kset = kset_create_and_add("attributes", NULL, &wmi_priv.class_dev->kobj); if (!wmi_priv.main_dir_kset) { ret = -ENOMEM; - goto fail_main_kset; + goto err_destroy_classdev; } wmi_priv.authentication_dir_kset = kset_create_and_add("authentication", NULL, &wmi_priv.class_dev->kobj); if (!wmi_priv.authentication_dir_kset) { ret = -ENOMEM; - goto fail_authentication_kset; + goto err_release_attributes_data; } ret = create_attributes_level_sysfs_files(); if (ret) { pr_debug("could not create reset BIOS attribute\n"); - goto fail_reset_bios; + goto err_release_attributes_data; } ret = init_bios_attributes(ENUM, DELL_WMI_BIOS_ENUMERATION_ATTRIBUTE_GUID); if (ret) { pr_debug("failed to populate enumeration type attributes\n"); - goto fail_create_group; + goto err_release_attributes_data; } ret = init_bios_attributes(INT, DELL_WMI_BIOS_INTEGER_ATTRIBUTE_GUID); if (ret) { pr_debug("failed to populate integer type attributes\n"); - goto fail_create_group; + goto err_release_attributes_data; } ret = init_bios_attributes(STR, DELL_WMI_BIOS_STRING_ATTRIBUTE_GUID); if (ret) { pr_debug("failed to populate string type attributes\n"); - goto fail_create_group; + goto err_release_attributes_data; } ret = init_bios_attributes(PO, DELL_WMI_BIOS_PASSOBJ_ATTRIBUTE_GUID); if (ret) { pr_debug("failed to populate pass object type attributes\n"); - goto fail_create_group; + goto err_release_attributes_data; } return 0; -fail_create_group: +err_release_attributes_data: release_attributes_data(); -fail_reset_bios: - if (wmi_priv.authentication_dir_kset) { - kset_unregister(wmi_priv.authentication_dir_kset); - wmi_priv.authentication_dir_kset = NULL; - } - -fail_authentication_kset: - if (wmi_priv.main_dir_kset) { - kset_unregister(wmi_priv.main_dir_kset); - wmi_priv.main_dir_kset = NULL; - } - -fail_main_kset: +err_destroy_classdev: device_destroy(&firmware_attributes_class, MKDEV(0, 0)); -fail_classdev: +err_unregister_class: class_unregister(&firmware_attributes_class); -fail_class: +err_exit_bios_attr_pass_interface: exit_bios_attr_pass_interface(); -fail_pass_interface: +err_exit_bios_attr_set_interface: exit_bios_attr_set_interface(); -fail_set_interface: return ret; } diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index 2f5b8d09143e..57cc92891a57 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -90,6 +90,13 @@ static const struct dmi_system_id button_array_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x2 Detachable"), }, }, + { + .ident = "Lenovo ThinkPad X1 Tablet Gen 2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"), + }, + }, { } }; diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c index 8a8017f9ca91..3fdf4cbec9ad 100644 --- a/drivers/platform/x86/intel-vbtn.c +++ b/drivers/platform/x86/intel-vbtn.c @@ -48,8 +48,16 @@ static const struct key_entry intel_vbtn_keymap[] = { }; static const struct key_entry intel_vbtn_switchmap[] = { - { KE_SW, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */ - { KE_SW, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */ + /* + * SW_DOCK should only be reported for docking stations, but DSDTs using the + * intel-vbtn code, always seem to use this for 2-in-1s / convertibles and set + * SW_DOCK=1 when in laptop-mode (in tandem with setting SW_TABLET_MODE=0). + * This causes userspace to think the laptop is docked to a port-replicator + * and to disable suspend-on-lid-close, which is undesirable. + * Map the dock events to KEY_IGNORE to avoid this broken SW_DOCK reporting. + */ + { KE_IGNORE, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */ + { KE_IGNORE, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */ { KE_SW, 0xCC, { .sw = { SW_TABLET_MODE, 1 } } }, /* Tablet */ { KE_SW, 0xCD, { .sw = { SW_TABLET_MODE, 0 } } }, /* Laptop */ { KE_END } diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index ee2f757515b0..b5888aeb4bcf 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -863,34 +863,45 @@ out_unlock: } DEFINE_SHOW_ATTRIBUTE(pmc_core_pll); -static ssize_t pmc_core_ltr_ignore_write(struct file *file, - const char __user *userbuf, - size_t count, loff_t *ppos) +static int pmc_core_send_ltr_ignore(u32 value) { struct pmc_dev *pmcdev = &pmc; const struct pmc_reg_map *map = pmcdev->map; - u32 val, buf_size, fd; - int err; - - buf_size = count < 64 ? count : 64; - - err = kstrtou32_from_user(userbuf, buf_size, 10, &val); - if (err) - return err; + u32 reg; + int err = 0; mutex_lock(&pmcdev->lock); - if (val > map->ltr_ignore_max) { + if (value > map->ltr_ignore_max) { err = -EINVAL; goto out_unlock; } - fd = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset); - fd |= (1U << val); - pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, fd); + reg = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset); + reg |= BIT(value); + pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, reg); out_unlock: mutex_unlock(&pmcdev->lock); + + return err; +} + +static ssize_t pmc_core_ltr_ignore_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + u32 buf_size, value; + int err; + + buf_size = min_t(u32, count, 64); + + err = kstrtou32_from_user(userbuf, buf_size, 10, &value); + if (err) + return err; + + err = pmc_core_send_ltr_ignore(value); + return err == 0 ? count : err; } @@ -1244,6 +1255,15 @@ static int pmc_core_probe(struct platform_device *pdev) pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(); dmi_check_system(pmc_core_dmi_table); + /* + * On TGL, due to a hardware limitation, the GBE LTR blocks PC10 when + * a cable is attached. Tell the PMC to ignore it. + */ + if (pmcdev->map == &tgl_reg_map) { + dev_dbg(&pdev->dev, "ignoring GBE LTR\n"); + pmc_core_send_ltr_ignore(3); + } + pmc_core_dbgfs_register(pmcdev); device_initialized = true; diff --git a/drivers/platform/x86/intel_pmt_class.c b/drivers/platform/x86/intel_pmt_class.c index c8939fba4509..ee2b3bbeb83d 100644 --- a/drivers/platform/x86/intel_pmt_class.c +++ b/drivers/platform/x86/intel_pmt_class.c @@ -173,7 +173,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry, struct intel_pmt_namespace *ns, struct device *parent) { - struct resource res; + struct resource res = {0}; struct device *dev; int ret; diff --git a/drivers/platform/x86/intel_pmt_crashlog.c b/drivers/platform/x86/intel_pmt_crashlog.c index 97dd749c8290..92d315a16cfd 100644 --- a/drivers/platform/x86/intel_pmt_crashlog.c +++ b/drivers/platform/x86/intel_pmt_crashlog.c @@ -23,18 +23,17 @@ #define CRASH_TYPE_OOBMSM 1 /* Control Flags */ -#define CRASHLOG_FLAG_DISABLE BIT(27) +#define CRASHLOG_FLAG_DISABLE BIT(28) /* - * Bits 28 and 29 control the state of bit 31. + * Bits 29 and 30 control the state of bit 31. * - * Bit 28 will clear bit 31, if set, allowing a new crashlog to be captured. - * Bit 29 will immediately trigger a crashlog to be generated, setting bit 31. - * Bit 30 is read-only and reserved as 0. + * Bit 29 will clear bit 31, if set, allowing a new crashlog to be captured. + * Bit 30 will immediately trigger a crashlog to be generated, setting bit 31. * Bit 31 is the read-only status with a 1 indicating log is complete. */ -#define CRASHLOG_FLAG_TRIGGER_CLEAR BIT(28) -#define CRASHLOG_FLAG_TRIGGER_EXECUTE BIT(29) +#define CRASHLOG_FLAG_TRIGGER_CLEAR BIT(29) +#define CRASHLOG_FLAG_TRIGGER_EXECUTE BIT(30) #define CRASHLOG_FLAG_TRIGGER_COMPLETE BIT(31) #define CRASHLOG_FLAG_TRIGGER_MASK GENMASK(31, 28) diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index b881044b31b0..0d9e2ddbf904 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -4081,13 +4081,19 @@ static bool hotkey_notify_6xxx(const u32 hkey, case TP_HKEY_EV_KEY_NUMLOCK: case TP_HKEY_EV_KEY_FN: - case TP_HKEY_EV_KEY_FN_ESC: /* key press events, we just ignore them as long as the EC * is still reporting them in the normal keyboard stream */ *send_acpi_ev = false; *ignore_acpi_ev = true; return true; + case TP_HKEY_EV_KEY_FN_ESC: + /* Get the media key status to foce the status LED to update */ + acpi_evalf(hkey_handle, NULL, "GMKS", "v"); + *send_acpi_ev = false; + *ignore_acpi_ev = true; + return true; + case TP_HKEY_EV_TABLET_CHANGED: tpacpi_input_send_tabletsw(); hotkey_tablet_mode_notify_change(); @@ -9845,6 +9851,11 @@ static struct ibm_struct lcdshadow_driver_data = { * Thinkpad sensor interfaces */ +#define DYTC_CMD_QUERY 0 /* To get DYTC status - enable/revision */ +#define DYTC_QUERY_ENABLE_BIT 8 /* Bit 8 - 0 = disabled, 1 = enabled */ +#define DYTC_QUERY_SUBREV_BIT 16 /* Bits 16 - 27 - sub revision */ +#define DYTC_QUERY_REV_BIT 28 /* Bits 28 - 31 - revision */ + #define DYTC_CMD_GET 2 /* To get current IC function and mode */ #define DYTC_GET_LAPMODE_BIT 17 /* Set when in lapmode */ @@ -9855,6 +9866,7 @@ static bool has_palmsensor; static bool has_lapsensor; static bool palm_state; static bool lap_state; +static int dytc_version; static int dytc_command(int command, int *output) { @@ -9869,6 +9881,33 @@ static int dytc_command(int command, int *output) return 0; } +static int dytc_get_version(void) +{ + int err, output; + + /* Check if we've been called before - and just return cached value */ + if (dytc_version) + return dytc_version; + + /* Otherwise query DYTC and extract version information */ + err = dytc_command(DYTC_CMD_QUERY, &output); + /* + * If support isn't available (ENODEV) then don't return an error + * and don't create the sysfs group + */ + if (err == -ENODEV) + return 0; + /* For all other errors we can flag the failure */ + if (err) + return err; + + /* Check DYTC is enabled and supports mode setting */ + if (output & BIT(DYTC_QUERY_ENABLE_BIT)) + dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF; + + return 0; +} + static int lapsensor_get(bool *present, bool *state) { int output, err; @@ -9974,7 +10013,18 @@ static int tpacpi_proxsensor_init(struct ibm_init_struct *iibm) if (err) return err; } - if (has_lapsensor) { + + /* Check if we know the DYTC version, if we don't then get it */ + if (!dytc_version) { + err = dytc_get_version(); + if (err) + return err; + } + /* + * Platforms before DYTC version 5 claim to have a lap sensor, but it doesn't work, so we + * ignore them + */ + if (has_lapsensor && (dytc_version >= 5)) { err = sysfs_create_file(&tpacpi_pdev->dev.kobj, &dev_attr_dytc_lapmode.attr); if (err) return err; @@ -9999,14 +10049,9 @@ static struct ibm_struct proxsensor_driver_data = { * DYTC Platform Profile interface */ -#define DYTC_CMD_QUERY 0 /* To get DYTC status - enable/revision */ #define DYTC_CMD_SET 1 /* To enable/disable IC function mode */ #define DYTC_CMD_RESET 0x1ff /* To reset back to default */ -#define DYTC_QUERY_ENABLE_BIT 8 /* Bit 8 - 0 = disabled, 1 = enabled */ -#define DYTC_QUERY_SUBREV_BIT 16 /* Bits 16 - 27 - sub revision */ -#define DYTC_QUERY_REV_BIT 28 /* Bits 28 - 31 - revision */ - #define DYTC_GET_FUNCTION_BIT 8 /* Bits 8-11 - function setting */ #define DYTC_GET_MODE_BIT 12 /* Bits 12-15 - mode setting */ @@ -10142,8 +10187,13 @@ static int dytc_profile_set(struct platform_profile_handler *pprof, return err; if (profile == PLATFORM_PROFILE_BALANCED) { - /* To get back to balanced mode we just issue a reset command */ - err = dytc_command(DYTC_CMD_RESET, &output); + /* + * To get back to balanced mode we need to issue a reset command. + * Note we still need to disable CQL mode before hand and re-enable + * it afterwards, otherwise dytc_lapmode gets reset to 0 and stays + * stuck at 0 for aprox. 30 minutes. + */ + err = dytc_cql_command(DYTC_CMD_RESET, &output); if (err) goto unlock; } else { @@ -10211,28 +10261,28 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm) if (err) return err; + /* Check if we know the DYTC version, if we don't then get it */ + if (!dytc_version) { + err = dytc_get_version(); + if (err) + return err; + } /* Check DYTC is enabled and supports mode setting */ - if (output & BIT(DYTC_QUERY_ENABLE_BIT)) { - /* Only DYTC v5.0 and later has this feature. */ - int dytc_version; - - dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF; - if (dytc_version >= 5) { - dbg_printk(TPACPI_DBG_INIT, - "DYTC version %d: thermal mode available\n", dytc_version); - /* Create platform_profile structure and register */ - err = platform_profile_register(&dytc_profile); - /* - * If for some reason platform_profiles aren't enabled - * don't quit terminally. - */ - if (err) - return 0; + if (dytc_version >= 5) { + dbg_printk(TPACPI_DBG_INIT, + "DYTC version %d: thermal mode available\n", dytc_version); + /* Create platform_profile structure and register */ + err = platform_profile_register(&dytc_profile); + /* + * If for some reason platform_profiles aren't enabled + * don't quit terminally. + */ + if (err) + return 0; - dytc_profile_available = true; - /* Ensure initial values are correct */ - dytc_profile_refresh(); - } + dytc_profile_available = true; + /* Ensure initial values are correct */ + dytc_profile_refresh(); } return 0; } diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c index 75463c2e2b86..fa636951169e 100644 --- a/drivers/ptp/ptp_clockmatrix.c +++ b/drivers/ptp/ptp_clockmatrix.c @@ -1404,8 +1404,8 @@ static int idtcm_set_pll_mode(struct idtcm_channel *channel, /* PTP Hardware Clock interface */ -/** - * @brief Maximum absolute value for write phase offset in picoseconds +/* + * Maximum absolute value for write phase offset in picoseconds * * Destination signed register is 32-bit register in resolution of 50ps * diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c index ce10ecd41ba0..a17e8cc642c5 100644 --- a/drivers/ptp/ptp_pch.c +++ b/drivers/ptp/ptp_pch.c @@ -18,6 +18,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/ptp_clock_kernel.h> +#include <linux/ptp_pch.h> #include <linux/slab.h> #define STATION_ADDR_LEN 20 @@ -36,7 +37,8 @@ enum pch_status { PCH_FAILED, PCH_UNSUPPORTED, }; -/** + +/* * struct pch_ts_regs - IEEE 1588 registers */ struct pch_ts_regs { @@ -102,7 +104,8 @@ struct pch_ts_regs { #define PCH_IEEE1588_ETH (1 << 0) #define PCH_IEEE1588_CAN (1 << 1) -/** + +/* * struct pch_dev - Driver private data */ struct pch_dev { @@ -119,7 +122,7 @@ struct pch_dev { spinlock_t register_lock; }; -/** +/* * struct pch_params - 1588 module parameter */ struct pch_params { @@ -179,17 +182,6 @@ static inline void pch_block_reset(struct pch_dev *chip) iowrite32(val, (&chip->regs->control)); } -u32 pch_ch_control_read(struct pci_dev *pdev) -{ - struct pch_dev *chip = pci_get_drvdata(pdev); - u32 val; - - val = ioread32(&chip->regs->ch_control); - - return val; -} -EXPORT_SYMBOL(pch_ch_control_read); - void pch_ch_control_write(struct pci_dev *pdev, u32 val) { struct pch_dev *chip = pci_get_drvdata(pdev); @@ -296,6 +288,7 @@ static void pch_reset(struct pch_dev *chip) * IEEE 1588 hardware when looking at PTP * traffic on the ethernet interface * @addr: dress which contain the column separated address to be used. + * @pdev: PCI device. */ int pch_set_station_address(u8 *addr, struct pci_dev *pdev) { diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c index beb5f74944cd..08f4cf0ad9e3 100644 --- a/drivers/ptp/ptp_qoriq.c +++ b/drivers/ptp/ptp_qoriq.c @@ -189,15 +189,16 @@ int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) tmr_add = ptp_qoriq->tmr_add; adj = tmr_add; - /* calculate diff as adj*(scaled_ppm/65536)/1000000 - * and round() to the nearest integer + /* + * Calculate diff and round() to the nearest integer + * + * diff = adj * (ppb / 1000000000) + * = adj * scaled_ppm / 65536000000 */ - adj *= scaled_ppm; - diff = div_u64(adj, 8000000); - diff = (diff >> 13) + ((diff >> 12) & 1); + diff = mul_u64_u64_div_u64(adj, scaled_ppm, 32768000000); + diff = DIV64_U64_ROUND_UP(diff, 2); tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; - ptp_qoriq->write(®s->ctrl_regs->tmr_add, tmr_add); return 0; diff --git a/drivers/regulator/mt6315-regulator.c b/drivers/regulator/mt6315-regulator.c index d49a1534d8e9..9edc34981ee0 100644 --- a/drivers/regulator/mt6315-regulator.c +++ b/drivers/regulator/mt6315-regulator.c @@ -41,7 +41,7 @@ struct mt6315_chip { .type = REGULATOR_VOLTAGE, \ .id = _bid, \ .owner = THIS_MODULE, \ - .n_voltages = 0xbf, \ + .n_voltages = 0xc0, \ .linear_ranges = mt_volt_range1, \ .n_linear_ranges = ARRAY_SIZE(mt_volt_range1), \ .vsel_reg = _vsel, \ @@ -69,7 +69,7 @@ static unsigned int mt6315_map_mode(u32 mode) case MT6315_BUCK_MODE_LP: return REGULATOR_MODE_IDLE; default: - return -EINVAL; + return REGULATOR_MODE_INVALID; } } diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c index 833d398c6aa2..2f7ee212cb8c 100644 --- a/drivers/regulator/pca9450-regulator.c +++ b/drivers/regulator/pca9450-regulator.c @@ -797,6 +797,14 @@ static int pca9450_i2c_probe(struct i2c_client *i2c, return ret; } + /* Clear PRESET_EN bit in BUCK123_DVS to use DVS registers */ + ret = regmap_clear_bits(pca9450->regmap, PCA9450_REG_BUCK123_DVS, + BUCK123_PRESET_EN); + if (ret) { + dev_err(&i2c->dev, "Failed to clear PRESET_EN bit: %d\n", ret); + return ret; + } + /* Set reset behavior on assertion of WDOG_B signal */ ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_RESET_CTRL, WDOG_B_CFG_MASK, WDOG_B_CFG_COLD_LDO12); @@ -814,7 +822,7 @@ static int pca9450_i2c_probe(struct i2c_client *i2c, if (IS_ERR(pca9450->sd_vsel_gpio)) { dev_err(&i2c->dev, "Failed to get SD_VSEL GPIO\n"); - return ret; + return PTR_ERR(pca9450->sd_vsel_gpio); } dev_info(&i2c->dev, "%s probed.\n", diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c index 79a554f1029d..65a108c9121f 100644 --- a/drivers/regulator/qcom-rpmh-regulator.c +++ b/drivers/regulator/qcom-rpmh-regulator.c @@ -726,8 +726,8 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = { static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 16000), - .n_voltages = 5, + .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 235, 16000), + .n_voltages = 236, .pmic_mode_map = pmic_mode_map_pmic5_smps, .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode, }; @@ -901,7 +901,7 @@ static const struct rpmh_vreg_init_data pm8350_vreg_data[] = { }; static const struct rpmh_vreg_init_data pm8350c_vreg_data[] = { - RPMH_VREG("smps1", "smp%s1", &pmic5_hfsmps510, "vdd-s1"), + RPMH_VREG("smps1", "smp%s1", &pmic5_hfsmps515, "vdd-s1"), RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps510, "vdd-s2"), RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps510, "vdd-s3"), RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps510, "vdd-s4"), diff --git a/drivers/regulator/rt4831-regulator.c b/drivers/regulator/rt4831-regulator.c index 3d4695ded629..e3aaac90d238 100644 --- a/drivers/regulator/rt4831-regulator.c +++ b/drivers/regulator/rt4831-regulator.c @@ -153,9 +153,9 @@ static int rt4831_regulator_probe(struct platform_device *pdev) int i, ret; regmap = dev_get_regmap(pdev->dev.parent, NULL); - if (IS_ERR(regmap)) { + if (!regmap) { dev_err(&pdev->dev, "Failed to init regmap\n"); - return PTR_ERR(regmap); + return -ENODEV; } /* Configure DSV mode to normal by default */ diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 28c04a4efa66..3a945abf268c 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -63,7 +63,6 @@ void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); MODULE_DESCRIPTION("Linux on S/390 DASD device driver," " Copyright IBM Corp. 2000"); -MODULE_SUPPORTED_DEVICE("dasd"); MODULE_LICENSE("GPL"); /* @@ -3052,7 +3051,8 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, basedev = block->base; spin_lock_irq(&dq->lock); - if (basedev->state < DASD_STATE_READY) { + if (basedev->state < DASD_STATE_READY || + test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) { DBF_DEV_EVENT(DBF_ERR, basedev, "device not ready for request %p", req); rc = BLK_STS_IOERR; @@ -3487,8 +3487,6 @@ void dasd_generic_remove(struct ccw_device *cdev) struct dasd_device *device; struct dasd_block *block; - cdev->handler = NULL; - device = dasd_device_from_cdev(cdev); if (IS_ERR(device)) { dasd_remove_sysfs_files(cdev); @@ -3507,6 +3505,7 @@ void dasd_generic_remove(struct ccw_device *cdev) * no quite down yet. */ dasd_set_target_state(device, DASD_STATE_NEW); + cdev->handler = NULL; /* dasd_delete_device destroys the device reference. */ block = device->block; dasd_delete_device(device); diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 15692449a1c3..307a80f85c07 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -424,8 +424,10 @@ tty3270_update(struct timer_list *t) * last output position matches the start address * of this line. */ - if (s->string[1] == sba[0] && s->string[2] == sba[1]) - str += 3, len -= 3; + if (s->string[1] == sba[0] && s->string[2] == sba[1]) { + str += 3; + len -= 3; + } if (raw3270_request_add_data(wrq, str, len) != 0) break; list_del_init(&s->update); diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 1515fdc3c1ab..bd3c724bf695 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -15,6 +15,7 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/debugfs.h> +#include <linux/reboot.h> #include <asm/asm-offsets.h> #include <asm/ipl.h> @@ -238,6 +239,28 @@ static int __init zcore_reipl_init(void) return 0; } +static int zcore_reboot_and_on_panic_handler(struct notifier_block *self, + unsigned long event, + void *data) +{ + if (hsa_available) + release_hsa(); + + return NOTIFY_OK; +} + +static struct notifier_block zcore_reboot_notifier = { + .notifier_call = zcore_reboot_and_on_panic_handler, + /* we need to be notified before reipl and kdump */ + .priority = INT_MAX, +}; + +static struct notifier_block zcore_on_panic_notifier = { + .notifier_call = zcore_reboot_and_on_panic_handler, + /* we need to be notified before reipl and kdump */ + .priority = INT_MAX, +}; + static int __init zcore_init(void) { unsigned char arch; @@ -293,28 +316,15 @@ static int __init zcore_init(void) goto fail; zcore_dir = debugfs_create_dir("zcore" , NULL); - if (!zcore_dir) { - rc = -ENOMEM; - goto fail; - } zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir, NULL, &zcore_reipl_fops); - if (!zcore_reipl_file) { - rc = -ENOMEM; - goto fail_dir; - } zcore_hsa_file = debugfs_create_file("hsa", S_IRUSR|S_IWUSR, zcore_dir, NULL, &zcore_hsa_fops); - if (!zcore_hsa_file) { - rc = -ENOMEM; - goto fail_reipl_file; - } - return 0; -fail_reipl_file: - debugfs_remove(zcore_reipl_file); -fail_dir: - debugfs_remove(zcore_dir); + register_reboot_notifier(&zcore_reboot_notifier); + atomic_notifier_chain_register(&panic_notifier_list, &zcore_on_panic_notifier); + + return 0; fail: diag308(DIAG308_REL_HSA, NULL); return rc; diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 6420b197bb05..05e136cfb8be 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -47,7 +47,7 @@ static void ccw_timeout_log(struct ccw_device *cdev) orb = &private->orb; cc = stsch(sch->schid, &schib); - printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " + printk(KERN_WARNING "cio: ccw device timeout occurred at %lx, " "device information:\n", get_tod_clock()); printk(KERN_WARNING "cio: orb:\n"); print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index 68106be4ba7a..767ac41686fe 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c @@ -543,7 +543,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, if (ret) return ret; - return copy_to_user((void __user *)arg, &info, minsz); + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; } case VFIO_DEVICE_GET_REGION_INFO: { @@ -561,7 +561,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, if (ret) return ret; - return copy_to_user((void __user *)arg, &info, minsz); + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; } case VFIO_DEVICE_GET_IRQ_INFO: { @@ -582,7 +582,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, if (info.count == -1) return -EINVAL; - return copy_to_user((void __user *)arg, &info, minsz); + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; } case VFIO_DEVICE_SET_IRQS: { diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 41fc2e4135fe..1ffdd411201c 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -1286,7 +1286,7 @@ static int vfio_ap_mdev_get_device_info(unsigned long arg) info.num_regions = 0; info.num_irqs = 0; - return copy_to_user((void __user *)arg, &info, minsz); + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; } static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev, diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index a814698387bc..6954d4e831a3 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1453,7 +1453,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, static void qeth_tx_complete_pending_bufs(struct qeth_card *card, struct qeth_qdio_out_q *queue, - bool drain) + bool drain, int budget) { struct qeth_qdio_out_buffer *buf, *tmp; @@ -1465,7 +1465,7 @@ static void qeth_tx_complete_pending_bufs(struct qeth_card *card, if (drain) qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR); - qeth_tx_complete_buf(buf, drain, 0); + qeth_tx_complete_buf(buf, drain, budget); list_del(&buf->list_entry); kmem_cache_free(qeth_qdio_outbuf_cache, buf); @@ -1477,7 +1477,7 @@ static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free) { int j; - qeth_tx_complete_pending_bufs(q->card, q, true); + qeth_tx_complete_pending_bufs(q->card, q, true, 0); for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { if (!q->bufs[j]) @@ -2590,11 +2590,12 @@ static int qeth_ulp_setup(struct qeth_card *card) return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL); } -static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) +static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx, + gfp_t gfp) { struct qeth_qdio_out_buffer *newbuf; - newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC); + newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, gfp); if (!newbuf) return -ENOMEM; @@ -2629,7 +2630,7 @@ static struct qeth_qdio_out_q *qeth_alloc_output_queue(void) goto err_qdio_bufs; for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { - if (qeth_init_qdio_out_buf(q, i)) + if (qeth_alloc_out_buf(q, i, GFP_KERNEL)) goto err_out_bufs; } @@ -6088,7 +6089,8 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, /* Prepare the queue slot for immediate re-use: */ qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements); - if (qeth_init_qdio_out_buf(queue, bidx)) { + if (qeth_alloc_out_buf(queue, bidx, + GFP_ATOMIC)) { QETH_CARD_TEXT(card, 2, "outofbuf"); qeth_schedule_recovery(card); } @@ -6150,7 +6152,7 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget) unsigned int bytes = 0; int completed; - qeth_tx_complete_pending_bufs(card, queue, false); + qeth_tx_complete_pending_bufs(card, queue, false, budget); if (qeth_out_queue_is_empty(queue)) { napi_complete(napi); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index dd441eaec66e..35b42275a06c 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1123,24 +1123,6 @@ out: return 0; } -static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, - __be16 proto, u16 vid) -{ - struct qeth_card *card = dev->ml_priv; - - QETH_CARD_TEXT_(card, 4, "aid:%d", vid); - return 0; -} - -static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, - __be16 proto, u16 vid) -{ - struct qeth_card *card = dev->ml_priv; - - QETH_CARD_TEXT_(card, 4, "kid:%d", vid); - return 0; -} - static void qeth_l3_set_promisc_mode(struct qeth_card *card) { bool enable = card->dev->flags & IFF_PROMISC; @@ -1861,8 +1843,6 @@ static const struct net_device_ops qeth_l3_netdev_ops = { .ndo_do_ioctl = qeth_do_ioctl, .ndo_fix_features = qeth_fix_features, .ndo_set_features = qeth_set_features, - .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, .ndo_tx_timeout = qeth_tx_timeout, }; @@ -1878,8 +1858,6 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { .ndo_do_ioctl = qeth_do_ioctl, .ndo_fix_features = qeth_fix_features, .ndo_set_features = qeth_set_features, - .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, .ndo_tx_timeout = qeth_tx_timeout, .ndo_neigh_setup = qeth_l3_neigh_setup, }; @@ -1933,8 +1911,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) card->dev->needed_headroom = headroom; card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_RX; netif_keep_dst(card->dev); if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c index 00e72b97d0b6..d93595b39afa 100644 --- a/drivers/sbus/char/display7seg.c +++ b/drivers/sbus/char/display7seg.c @@ -50,7 +50,6 @@ MODULE_PARM_DESC(sol_compat, MODULE_AUTHOR("Eric Brower <ebrower@usa.net>"); MODULE_DESCRIPTION("7-Segment Display driver for Sun Microsystems CP1400/1500"); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("d7s"); struct d7s { void __iomem *regs; diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 38369766511c..f135a10f582b 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -80,7 +80,6 @@ MODULE_AUTHOR("Hewlett-Packard Company"); MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ HPSA_DRIVER_VERSION); -MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); MODULE_VERSION(HPSA_DRIVER_VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS("cciss"); diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index e663085a8944..f140eafc0d6a 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -21,6 +21,7 @@ #include <linux/bsg-lib.h> #include <asm/firmware.h> #include <asm/irq.h> +#include <asm/rtas.h> #include <asm/vio.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -158,6 +159,9 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *); static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *); static void ibmvfc_tgt_move_login(struct ibmvfc_target *); +static void ibmvfc_release_sub_crqs(struct ibmvfc_host *); +static void ibmvfc_init_sub_crqs(struct ibmvfc_host *); + static const char *unknown_error = "unknown error"; static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba, @@ -899,6 +903,9 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost) { int rc = 0; struct vio_dev *vdev = to_vio_dev(vhost->dev); + unsigned long flags; + + ibmvfc_release_sub_crqs(vhost); /* Re-enable the CRQ */ do { @@ -910,6 +917,15 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost) if (rc) dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc); + spin_lock_irqsave(vhost->host->host_lock, flags); + spin_lock(vhost->crq.q_lock); + vhost->do_enquiry = 1; + vhost->using_channels = 0; + spin_unlock(vhost->crq.q_lock); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + ibmvfc_init_sub_crqs(vhost); + return rc; } @@ -926,8 +942,8 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) unsigned long flags; struct vio_dev *vdev = to_vio_dev(vhost->dev); struct ibmvfc_queue *crq = &vhost->crq; - struct ibmvfc_queue *scrq; - int i; + + ibmvfc_release_sub_crqs(vhost); /* Close the CRQ */ do { @@ -947,16 +963,6 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) memset(crq->msgs.crq, 0, PAGE_SIZE); crq->cur = 0; - if (vhost->scsi_scrqs.scrqs) { - for (i = 0; i < nr_scsi_hw_queues; i++) { - scrq = &vhost->scsi_scrqs.scrqs[i]; - spin_lock(scrq->q_lock); - memset(scrq->msgs.scrq, 0, PAGE_SIZE); - scrq->cur = 0; - spin_unlock(scrq->q_lock); - } - } - /* And re-open it again */ rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, crq->msg_token, PAGE_SIZE); @@ -966,9 +972,12 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) dev_warn(vhost->dev, "Partner adapter not ready\n"); else if (rc != 0) dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc); + spin_unlock(vhost->crq.q_lock); spin_unlock_irqrestore(vhost->host->host_lock, flags); + ibmvfc_init_sub_crqs(vhost); + return rc; } @@ -5642,7 +5651,8 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost, rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE, &scrq->cookie, &scrq->hw_irq); - if (rc) { + /* H_CLOSED indicates successful register, but no CRQ partner */ + if (rc && rc != H_CLOSED) { dev_warn(dev, "Error registering sub-crq: %d\n", rc); if (rc == H_PARAMETER) dev_warn_once(dev, "Firmware may not support MQ\n"); @@ -5675,8 +5685,8 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost, irq_failed: do { - plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie); - } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); + rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie); + } while (rtas_busy_delay(rc)); reg_failed: ibmvfc_free_queue(vhost, scrq); LEAVE; @@ -5694,6 +5704,7 @@ static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index) free_irq(scrq->irq, scrq); irq_dispose_mapping(scrq->irq); + scrq->irq = 0; do { rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, @@ -5707,17 +5718,21 @@ static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index) LEAVE; } -static int ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost) +static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost) { int i, j; ENTER; + if (!vhost->mq_enabled) + return; vhost->scsi_scrqs.scrqs = kcalloc(nr_scsi_hw_queues, sizeof(*vhost->scsi_scrqs.scrqs), GFP_KERNEL); - if (!vhost->scsi_scrqs.scrqs) - return -1; + if (!vhost->scsi_scrqs.scrqs) { + vhost->do_enquiry = 0; + return; + } for (i = 0; i < nr_scsi_hw_queues; i++) { if (ibmvfc_register_scsi_channel(vhost, i)) { @@ -5726,13 +5741,12 @@ static int ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost) kfree(vhost->scsi_scrqs.scrqs); vhost->scsi_scrqs.scrqs = NULL; vhost->scsi_scrqs.active_queues = 0; - LEAVE; - return -1; + vhost->do_enquiry = 0; + break; } } LEAVE; - return 0; } static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost) @@ -5770,6 +5784,8 @@ static void ibmvfc_free_mem(struct ibmvfc_host *vhost) vhost->disc_buf_dma); dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf), vhost->login_buf, vhost->login_buf_dma); + dma_free_coherent(vhost->dev, sizeof(*vhost->channel_setup_buf), + vhost->channel_setup_buf, vhost->channel_setup_dma); dma_pool_destroy(vhost->sg_pool); ibmvfc_free_queue(vhost, async_q); LEAVE; @@ -5999,11 +6015,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) goto remove_shost; } - if (vhost->mq_enabled) { - rc = ibmvfc_init_sub_crqs(vhost); - if (rc) - dev_warn(dev, "Failed to allocate Sub-CRQs. rc=%d\n", rc); - } + ibmvfc_init_sub_crqs(vhost); if (shost_to_fc_host(shost)->rqst_q) blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index bc79a017e1a2..46a8f2d1d2b8 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -2421,7 +2421,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, memset(dstbuf, 0, 33); size = (nbytes < 32) ? nbytes : 32; if (copy_from_user(dstbuf, buf, size)) - return 0; + return -EFAULT; if (dent == phba->debug_InjErrLBA) { if ((dstbuf[0] == 'o') && (dstbuf[1] == 'f') && @@ -2430,7 +2430,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, } if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp))) - return 0; + return -EINVAL; if (dent == phba->debug_writeGuard) phba->lpfc_injerr_wgrd_cnt = (uint32_t)tmp; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index ffca03064797..6aa6de729187 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -413,7 +413,7 @@ mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc, * And add this object to port_table_list. */ if (!ioc->multipath_on_hba) { - port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); + port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC); if (!port) return NULL; diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c index 4adf9ded296a..329fd025c718 100644 --- a/drivers/scsi/myrs.c +++ b/drivers/scsi/myrs.c @@ -2273,12 +2273,12 @@ static void myrs_cleanup(struct myrs_hba *cs) if (cs->mmio_base) { cs->disable_intr(cs); iounmap(cs->mmio_base); + cs->mmio_base = NULL; } if (cs->irq) free_irq(cs->irq, cs); if (cs->io_addr) release_region(cs->io_addr, 0x80); - iounmap(cs->mmio_base); pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); scsi_host_put(cs->host); diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index 5d5f50d6a02d..ac89002646a3 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c @@ -55,7 +55,6 @@ MODULE_AUTHOR("YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>"); MODULE_DESCRIPTION("WorkBit NinjaSCSI-3 / NinjaSCSI-32Bi(16bit) PCMCIA SCSI host adapter module"); -MODULE_SUPPORTED_DEVICE("sd,sr,sg,st"); MODULE_LICENSE("GPL"); #include "nsp_io.h" diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 10e5e6c8087d..01620f3eab39 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -116,7 +116,6 @@ (min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \ QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0)) #endif -#endif #define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \ ? le16_to_cpu((iocb)->u.isp2x.target.extended) \ @@ -244,6 +243,7 @@ struct ctio_to_2xxx { #ifndef CTIO_RET_TYPE #define CTIO_RET_TYPE 0x17 /* CTIO return entry */ #define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */ +#endif struct fcp_hdr { uint8_t r_ctl; diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index ee558675eab4..994f1b8e3504 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -280,27 +280,28 @@ static int sd_zbc_update_wp_offset_cb(struct blk_zone *zone, unsigned int idx, static void sd_zbc_update_wp_offset_workfn(struct work_struct *work) { struct scsi_disk *sdkp; + unsigned long flags; unsigned int zno; int ret; sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work); - spin_lock_bh(&sdkp->zones_wp_offset_lock); + spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); for (zno = 0; zno < sdkp->nr_zones; zno++) { if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST) continue; - spin_unlock_bh(&sdkp->zones_wp_offset_lock); + spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); ret = sd_zbc_do_report_zones(sdkp, sdkp->zone_wp_update_buf, SD_BUF_SIZE, zno * sdkp->zone_blocks, true); - spin_lock_bh(&sdkp->zones_wp_offset_lock); + spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); if (!ret) sd_zbc_parse_report(sdkp, sdkp->zone_wp_update_buf + 64, zno, sd_zbc_update_wp_offset_cb, sdkp); } - spin_unlock_bh(&sdkp->zones_wp_offset_lock); + spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); scsi_device_put(sdkp->device); } @@ -324,6 +325,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba, struct request *rq = cmd->request; struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); unsigned int wp_offset, zno = blk_rq_zone_no(rq); + unsigned long flags; blk_status_t ret; ret = sd_zbc_cmnd_checks(cmd); @@ -337,7 +339,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba, if (!blk_req_zone_write_trylock(rq)) return BLK_STS_ZONE_RESOURCE; - spin_lock_bh(&sdkp->zones_wp_offset_lock); + spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); wp_offset = sdkp->zones_wp_offset[zno]; switch (wp_offset) { case SD_ZBC_INVALID_WP_OFST: @@ -366,7 +368,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba, *lba += wp_offset; } - spin_unlock_bh(&sdkp->zones_wp_offset_lock); + spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); if (ret) blk_req_zone_write_unlock(rq); return ret; @@ -445,6 +447,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd, struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); unsigned int zno = blk_rq_zone_no(rq); enum req_opf op = req_op(rq); + unsigned long flags; /* * If we got an error for a command that needs updating the write @@ -452,7 +455,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd, * invalid to force an update from disk the next time a zone append * command is issued. */ - spin_lock_bh(&sdkp->zones_wp_offset_lock); + spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); if (result && op != REQ_OP_ZONE_RESET_ALL) { if (op == REQ_OP_ZONE_APPEND) { @@ -496,7 +499,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd, } unlock_wp_offset: - spin_unlock_bh(&sdkp->zones_wp_offset_lock); + spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); return good_bytes; } diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index c53f456fbd09..a1dacb6e993e 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -48,7 +48,6 @@ MODULE_AUTHOR("Microsemi"); MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version " DRIVER_VERSION); -MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers"); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 841ad2fc369a..9ca536aae784 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -1269,8 +1269,8 @@ static int st_open(struct inode *inode, struct file *filp) spin_lock(&st_use_lock); if (STp->in_use) { spin_unlock(&st_use_lock); - scsi_tape_put(STp); DEBC_printk(STp, "Device already in use.\n"); + scsi_tape_put(STp); return (-EBUSY); } diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c index c55202b92a43..a981f261b304 100644 --- a/drivers/scsi/ufs/ufs-mediatek.c +++ b/drivers/scsi/ufs/ufs-mediatek.c @@ -911,7 +911,7 @@ static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm) if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc) return; - if (lpm & !hba->vreg_info.vcc->enabled) + if (lpm && !hba->vreg_info.vcc->enabled) regulator_set_mode(hba->vreg_info.vccq2->reg, REGULATOR_MODE_IDLE); else if (!lpm) diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index f97d7b0ae3b6..a9dc8d7c9f78 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -253,12 +253,17 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba) { int ret = 0; struct ufs_qcom_host *host = ufshcd_get_variant(hba); + bool reenable_intr = false; if (!host->core_reset) { dev_warn(hba->dev, "%s: reset control not set\n", __func__); goto out; } + reenable_intr = hba->is_irq_enabled; + disable_irq(hba->irq); + hba->is_irq_enabled = false; + ret = reset_control_assert(host->core_reset); if (ret) { dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n", @@ -280,6 +285,11 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba) usleep_range(1000, 1100); + if (reenable_intr) { + enable_irq(hba->irq); + hba->is_irq_enabled = true; + } + out: return ret; } diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 77161750c9fb..c86760788c72 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -95,8 +95,6 @@ 16, 4, buf, __len, false); \ } while (0) -static bool early_suspend; - int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, const char *prefix) { @@ -1535,7 +1533,7 @@ static ssize_t ufshcd_clkscale_enable_show(struct device *dev, { struct ufs_hba *hba = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_enabled); + return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled); } static ssize_t ufshcd_clkscale_enable_store(struct device *dev, @@ -4987,6 +4985,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) * UFS device needs urgent BKOPs. */ if (!hba->pm_op_in_progress && + !ufshcd_eh_in_progress(hba) && ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) && schedule_work(&hba->eeh_work)) { /* @@ -5784,13 +5783,20 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba) ufshcd_suspend_clkscaling(hba); ufshcd_clk_scaling_allow(hba, false); } + ufshcd_scsi_block_requests(hba); + /* Drain ufshcd_queuecommand() */ + down_write(&hba->clk_scaling_lock); + up_write(&hba->clk_scaling_lock); + cancel_work_sync(&hba->eeh_work); } static void ufshcd_err_handling_unprepare(struct ufs_hba *hba) { + ufshcd_scsi_unblock_requests(hba); ufshcd_release(hba); if (ufshcd_is_clkscaling_supported(hba)) ufshcd_clk_scaling_suspend(hba, false); + ufshcd_clear_ua_wluns(hba); pm_runtime_put(hba->dev); } @@ -5882,8 +5888,8 @@ static void ufshcd_err_handler(struct work_struct *work) spin_unlock_irqrestore(hba->host->host_lock, flags); ufshcd_err_handling_prepare(hba); spin_lock_irqsave(hba->host->host_lock, flags); - ufshcd_scsi_block_requests(hba); - hba->ufshcd_state = UFSHCD_STATE_RESET; + if (hba->ufshcd_state != UFSHCD_STATE_ERROR) + hba->ufshcd_state = UFSHCD_STATE_RESET; /* Complete requests that have door-bell cleared by h/w */ ufshcd_complete_requests(hba); @@ -6042,12 +6048,8 @@ skip_err_handling: } ufshcd_clear_eh_in_progress(hba); spin_unlock_irqrestore(hba->host->host_lock, flags); - ufshcd_scsi_unblock_requests(hba); ufshcd_err_handling_unprepare(hba); up(&hba->host_sem); - - if (!err && needs_reset) - ufshcd_clear_ua_wluns(hba); } /** @@ -7858,6 +7860,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async) unsigned long flags; ktime_t start = ktime_get(); + hba->ufshcd_state = UFSHCD_STATE_RESET; + ret = ufshcd_link_startup(hba); if (ret) goto out; @@ -8972,11 +8976,6 @@ int ufshcd_system_suspend(struct ufs_hba *hba) int ret = 0; ktime_t start = ktime_get(); - if (!hba) { - early_suspend = true; - return 0; - } - down(&hba->host_sem); if (!hba->is_powered) @@ -9028,14 +9027,6 @@ int ufshcd_system_resume(struct ufs_hba *hba) int ret = 0; ktime_t start = ktime_get(); - if (!hba) - return -EINVAL; - - if (unlikely(early_suspend)) { - early_suspend = false; - down(&hba->host_sem); - } - if (!hba->is_powered || pm_runtime_suspended(hba->dev)) /* * Let the runtime resume take care of resuming @@ -9068,9 +9059,6 @@ int ufshcd_runtime_suspend(struct ufs_hba *hba) int ret = 0; ktime_t start = ktime_get(); - if (!hba) - return -EINVAL; - if (!hba->is_powered) goto out; else @@ -9109,9 +9097,6 @@ int ufshcd_runtime_resume(struct ufs_hba *hba) int ret = 0; ktime_t start = ktime_get(); - if (!hba) - return -EINVAL; - if (!hba->is_powered) goto out; else diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 081f54ab7d86..8a79605d9652 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -17,8 +17,6 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * - * Maintained by: Jim Gill <jgill@vmware.com> - * */ #include <linux/kernel.h> diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h index 75966d3f326e..51a82f7803d3 100644 --- a/drivers/scsi/vmw_pvscsi.h +++ b/drivers/scsi/vmw_pvscsi.h @@ -17,8 +17,6 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * - * Maintained by: Jim Gill <jgill@vmware.com> - * */ #ifndef _VMW_PVSCSI_H_ diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c index e5d7fb81ad66..bd0fbcdbdefe 100644 --- a/drivers/sh/maple/maple.c +++ b/drivers/sh/maple/maple.c @@ -30,7 +30,6 @@ MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>"); MODULE_DESCRIPTION("Maple bus driver for Dreamcast"); MODULE_LICENSE("GPL v2"); -MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}"); static void maple_dma_handler(struct work_struct *work); static void maple_vblank_handler(struct work_struct *work); diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 442cc7c53a47..52ddb3255d88 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -1433,6 +1433,7 @@ static int cqspi_probe(struct platform_device *pdev) cqspi = spi_master_get_devdata(master); cqspi->pdev = pdev; + platform_set_drvdata(pdev, cqspi); /* Obtain configuration from OF. */ ret = cqspi_of_get_pdata(cqspi); diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index b22f73d7bfc4..6e798229fe25 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -78,8 +78,6 @@ source "drivers/staging/clocking-wizard/Kconfig" source "drivers/staging/fbtft/Kconfig" -source "drivers/staging/fsl-dpaa2/Kconfig" - source "drivers/staging/most/Kconfig" source "drivers/staging/ks7010/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 2245059e69c7..8d4d9812ecdf 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -29,7 +29,6 @@ obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/ obj-$(CONFIG_UNISYSSPAR) += unisys/ obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/ obj-$(CONFIG_FB_TFT) += fbtft/ -obj-$(CONFIG_FSL_DPAA2) += fsl-dpaa2/ obj-$(CONFIG_MOST) += most/ obj-$(CONFIG_KS7010) += ks7010/ obj-$(CONFIG_GREYBUS) += greybus/ diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c index 35b75f0c9200..81a246fbcc01 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1032.c +++ b/drivers/staging/comedi/drivers/addi_apci_1032.c @@ -260,6 +260,7 @@ static irqreturn_t apci1032_interrupt(int irq, void *d) struct apci1032_private *devpriv = dev->private; struct comedi_subdevice *s = dev->read_subdev; unsigned int ctrl; + unsigned short val; /* check interrupt is from this device */ if ((inl(devpriv->amcc_iobase + AMCC_OP_REG_INTCSR) & @@ -275,7 +276,8 @@ static irqreturn_t apci1032_interrupt(int irq, void *d) outl(ctrl & ~APCI1032_CTRL_INT_ENA, dev->iobase + APCI1032_CTRL_REG); s->state = inl(dev->iobase + APCI1032_STATUS_REG) & 0xffff; - comedi_buf_write_samples(s, &s->state, 1); + val = s->state; + comedi_buf_write_samples(s, &val, 1); comedi_handle_events(dev, s); /* enable the interrupt */ diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c index 11efb21555e3..b04c15dcfb57 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1500.c +++ b/drivers/staging/comedi/drivers/addi_apci_1500.c @@ -208,7 +208,7 @@ static irqreturn_t apci1500_interrupt(int irq, void *d) struct comedi_device *dev = d; struct apci1500_private *devpriv = dev->private; struct comedi_subdevice *s = dev->read_subdev; - unsigned int status = 0; + unsigned short status = 0; unsigned int val; val = inl(devpriv->amcc + AMCC_OP_REG_INTCSR); @@ -238,14 +238,14 @@ static irqreturn_t apci1500_interrupt(int irq, void *d) * * Mask Meaning * ---------- ------------------------------------------ - * 0x00000001 Event 1 has occurred - * 0x00000010 Event 2 has occurred - * 0x00000100 Counter/timer 1 has run down (not implemented) - * 0x00001000 Counter/timer 2 has run down (not implemented) - * 0x00010000 Counter 3 has run down (not implemented) - * 0x00100000 Watchdog has run down (not implemented) - * 0x01000000 Voltage error - * 0x10000000 Short-circuit error + * 0b00000001 Event 1 has occurred + * 0b00000010 Event 2 has occurred + * 0b00000100 Counter/timer 1 has run down (not implemented) + * 0b00001000 Counter/timer 2 has run down (not implemented) + * 0b00010000 Counter 3 has run down (not implemented) + * 0b00100000 Watchdog has run down (not implemented) + * 0b01000000 Voltage error + * 0b10000000 Short-circuit error */ comedi_buf_write_samples(s, &status, 1); comedi_handle_events(dev, s); diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c index 692893c7e5c3..090607760be6 100644 --- a/drivers/staging/comedi/drivers/adv_pci1710.c +++ b/drivers/staging/comedi/drivers/adv_pci1710.c @@ -300,11 +300,11 @@ static int pci1710_ai_eoc(struct comedi_device *dev, static int pci1710_ai_read_sample(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int cur_chan, - unsigned int *val) + unsigned short *val) { const struct boardtype *board = dev->board_ptr; struct pci1710_private *devpriv = dev->private; - unsigned int sample; + unsigned short sample; unsigned int chan; sample = inw(dev->iobase + PCI171X_AD_DATA_REG); @@ -345,7 +345,7 @@ static int pci1710_ai_insn_read(struct comedi_device *dev, pci1710_ai_setup_chanlist(dev, s, &insn->chanspec, 1, 1); for (i = 0; i < insn->n; i++) { - unsigned int val; + unsigned short val; /* start conversion */ outw(0, dev->iobase + PCI171X_SOFTTRG_REG); @@ -395,7 +395,7 @@ static void pci1710_handle_every_sample(struct comedi_device *dev, { struct comedi_cmd *cmd = &s->async->cmd; unsigned int status; - unsigned int val; + unsigned short val; int ret; status = inw(dev->iobase + PCI171X_STATUS_REG); @@ -455,7 +455,7 @@ static void pci1710_handle_fifo(struct comedi_device *dev, } for (i = 0; i < devpriv->max_samples; i++) { - unsigned int val; + unsigned short val; int ret; ret = pci1710_ai_read_sample(dev, s, s->async->cur_chan, &val); diff --git a/drivers/staging/comedi/drivers/amplc_pc236_common.c b/drivers/staging/comedi/drivers/amplc_pc236_common.c index 043752663188..981d281e87a1 100644 --- a/drivers/staging/comedi/drivers/amplc_pc236_common.c +++ b/drivers/staging/comedi/drivers/amplc_pc236_common.c @@ -126,7 +126,9 @@ static irqreturn_t pc236_interrupt(int irq, void *d) handled = pc236_intr_check(dev); if (dev->attached && handled) { - comedi_buf_write_samples(s, &s->state, 1); + unsigned short val = 0; + + comedi_buf_write_samples(s, &val, 1); comedi_handle_events(dev, s); } return IRQ_RETVAL(handled); diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c index d740c4782775..2f20bd56ec6c 100644 --- a/drivers/staging/comedi/drivers/cb_pcidas.c +++ b/drivers/staging/comedi/drivers/cb_pcidas.c @@ -1281,7 +1281,7 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev, devpriv->amcc + AMCC_OP_REG_INTCSR); ret = request_irq(pcidev->irq, cb_pcidas_interrupt, IRQF_SHARED, - dev->board_name, dev); + "cb_pcidas", dev); if (ret) { dev_dbg(dev->class_dev, "unable to allocate irq %d\n", pcidev->irq); diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c index fa987bb0e7cd..6d3ba399a7f0 100644 --- a/drivers/staging/comedi/drivers/cb_pcidas64.c +++ b/drivers/staging/comedi/drivers/cb_pcidas64.c @@ -4035,7 +4035,7 @@ static int auto_attach(struct comedi_device *dev, init_stc_registers(dev); retval = request_irq(pcidev->irq, handle_interrupt, IRQF_SHARED, - dev->board_name, dev); + "cb_pcidas64", dev); if (retval) { dev_dbg(dev->class_dev, "unable to allocate irq %u\n", pcidev->irq); diff --git a/drivers/staging/comedi/drivers/comedi_parport.c b/drivers/staging/comedi/drivers/comedi_parport.c index 9361b2dcf949..5338b5eea440 100644 --- a/drivers/staging/comedi/drivers/comedi_parport.c +++ b/drivers/staging/comedi/drivers/comedi_parport.c @@ -210,12 +210,13 @@ static irqreturn_t parport_interrupt(int irq, void *d) struct comedi_device *dev = d; struct comedi_subdevice *s = dev->read_subdev; unsigned int ctrl; + unsigned short val = 0; ctrl = inb(dev->iobase + PARPORT_CTRL_REG); if (!(ctrl & PARPORT_CTRL_IRQ_ENA)) return IRQ_NONE; - comedi_buf_write_samples(s, &s->state, 1); + comedi_buf_write_samples(s, &val, 1); comedi_handle_events(dev, s); return IRQ_HANDLED; diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c index 04e224f8b779..96f4107b8054 100644 --- a/drivers/staging/comedi/drivers/das6402.c +++ b/drivers/staging/comedi/drivers/das6402.c @@ -186,7 +186,7 @@ static irqreturn_t das6402_interrupt(int irq, void *d) if (status & DAS6402_STATUS_FFULL) { async->events |= COMEDI_CB_OVERFLOW; } else if (status & DAS6402_STATUS_FFNE) { - unsigned int val; + unsigned short val; val = das6402_ai_read_sample(dev, s); comedi_buf_write_samples(s, &val, 1); diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c index 4ea100ff6930..2881808d6606 100644 --- a/drivers/staging/comedi/drivers/das800.c +++ b/drivers/staging/comedi/drivers/das800.c @@ -427,7 +427,7 @@ static irqreturn_t das800_interrupt(int irq, void *d) struct comedi_cmd *cmd; unsigned long irq_flags; unsigned int status; - unsigned int val; + unsigned short val; bool fifo_empty; bool fifo_overflow; int i; diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c index 17e6018918bb..56682f01242f 100644 --- a/drivers/staging/comedi/drivers/dmm32at.c +++ b/drivers/staging/comedi/drivers/dmm32at.c @@ -404,7 +404,7 @@ static irqreturn_t dmm32at_isr(int irq, void *d) { struct comedi_device *dev = d; unsigned char intstat; - unsigned int val; + unsigned short val; int i; if (!dev->attached) { diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c index 726e40dc17b6..0d3d4cafce2e 100644 --- a/drivers/staging/comedi/drivers/me4000.c +++ b/drivers/staging/comedi/drivers/me4000.c @@ -924,7 +924,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id) struct comedi_subdevice *s = dev->read_subdev; int i; int c = 0; - unsigned int lval; + unsigned short lval; if (!dev->attached) return IRQ_NONE; diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c index 99e744172f4d..f1a45cf7342a 100644 --- a/drivers/staging/comedi/drivers/ni_6527.c +++ b/drivers/staging/comedi/drivers/ni_6527.c @@ -195,7 +195,9 @@ static irqreturn_t ni6527_interrupt(int irq, void *d) return IRQ_NONE; if (status & NI6527_STATUS_EDGE) { - comedi_buf_write_samples(s, &s->state, 1); + unsigned short val = 0; + + comedi_buf_write_samples(s, &val, 1); comedi_handle_events(dev, s); } diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c index eb3f9f7109da..7cd8497420f2 100644 --- a/drivers/staging/comedi/drivers/ni_65xx.c +++ b/drivers/staging/comedi/drivers/ni_65xx.c @@ -472,6 +472,7 @@ static irqreturn_t ni_65xx_interrupt(int irq, void *d) struct comedi_device *dev = d; struct comedi_subdevice *s = dev->read_subdev; unsigned int status; + unsigned short val = 0; status = readb(dev->mmio + NI_65XX_STATUS_REG); if ((status & NI_65XX_STATUS_INT) == 0) @@ -482,7 +483,7 @@ static irqreturn_t ni_65xx_interrupt(int irq, void *d) writeb(NI_65XX_CLR_EDGE_INT | NI_65XX_CLR_OVERFLOW_INT, dev->mmio + NI_65XX_CLR_REG); - comedi_buf_write_samples(s, &s->state, 1); + comedi_buf_write_samples(s, &val, 1); comedi_handle_events(dev, s); return IRQ_HANDLED; diff --git a/drivers/staging/comedi/drivers/pcl711.c b/drivers/staging/comedi/drivers/pcl711.c index 2dbf69e30965..bd6f42fe9e3c 100644 --- a/drivers/staging/comedi/drivers/pcl711.c +++ b/drivers/staging/comedi/drivers/pcl711.c @@ -184,7 +184,7 @@ static irqreturn_t pcl711_interrupt(int irq, void *d) struct comedi_device *dev = d; struct comedi_subdevice *s = dev->read_subdev; struct comedi_cmd *cmd = &s->async->cmd; - unsigned int data; + unsigned short data; if (!dev->attached) { dev_err(dev->class_dev, "spurious interrupt\n"); diff --git a/drivers/staging/comedi/drivers/pcl726.c b/drivers/staging/comedi/drivers/pcl726.c index 64eb649c9813..88f25d7e76f7 100644 --- a/drivers/staging/comedi/drivers/pcl726.c +++ b/drivers/staging/comedi/drivers/pcl726.c @@ -220,9 +220,11 @@ static irqreturn_t pcl726_interrupt(int irq, void *d) struct pcl726_private *devpriv = dev->private; if (devpriv->cmd_running) { + unsigned short val = 0; + pcl726_intr_cancel(dev, s); - comedi_buf_write_samples(s, &s->state, 1); + comedi_buf_write_samples(s, &val, 1); comedi_handle_events(dev, s); } diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c index 63e3011158f2..f4b4a686c710 100644 --- a/drivers/staging/comedi/drivers/pcl818.c +++ b/drivers/staging/comedi/drivers/pcl818.c @@ -423,7 +423,7 @@ static int pcl818_ai_eoc(struct comedi_device *dev, static bool pcl818_ai_write_sample(struct comedi_device *dev, struct comedi_subdevice *s, - unsigned int chan, unsigned int val) + unsigned int chan, unsigned short val) { struct pcl818_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c index 7956abcbae22..9f920819cd74 100644 --- a/drivers/staging/comedi/drivers/vmk80xx.c +++ b/drivers/staging/comedi/drivers/vmk80xx.c @@ -877,5 +877,4 @@ module_comedi_usb_driver(vmk80xx_driver, vmk80xx_usb_driver); MODULE_AUTHOR("Manuel Gebele <forensixs@gmx.de>"); MODULE_DESCRIPTION("Velleman USB Board Low-Level Driver"); -MODULE_SUPPORTED_DEVICE("K8055/K8061 aka VM110/VM140"); MODULE_LICENSE("GPL"); diff --git a/drivers/staging/fsl-dpaa2/Kconfig b/drivers/staging/fsl-dpaa2/Kconfig deleted file mode 100644 index 244237bb068a..000000000000 --- a/drivers/staging/fsl-dpaa2/Kconfig +++ /dev/null @@ -1,19 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Freescale DataPath Acceleration Architecture Gen2 (DPAA2) drivers -# - -config FSL_DPAA2 - bool "Freescale DPAA2 devices" - depends on FSL_MC_BUS - help - Build drivers for Freescale DataPath Acceleration - Architecture (DPAA2) family of SoCs. - -config FSL_DPAA2_ETHSW - tristate "Freescale DPAA2 Ethernet Switch" - depends on FSL_DPAA2 - depends on NET_SWITCHDEV - help - Driver for Freescale DPAA2 Ethernet Switch. Select - BRIDGE to have support for bridge tools. diff --git a/drivers/staging/fsl-dpaa2/Makefile b/drivers/staging/fsl-dpaa2/Makefile deleted file mode 100644 index 9645db7689c9..000000000000 --- a/drivers/staging/fsl-dpaa2/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Freescale DataPath Acceleration Architecture Gen2 (DPAA2) drivers -# - -obj-$(CONFIG_FSL_DPAA2_ETHSW) += ethsw/ diff --git a/drivers/staging/fsl-dpaa2/ethsw/Makefile b/drivers/staging/fsl-dpaa2/ethsw/Makefile deleted file mode 100644 index f6f2cf798faf..000000000000 --- a/drivers/staging/fsl-dpaa2/ethsw/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Makefile for the Freescale DPAA2 Ethernet Switch -# -# Copyright 2014-2017 Freescale Semiconductor Inc. -# Copyright 2017-2018 NXP - -obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o - -dpaa2-ethsw-objs := ethsw.o ethsw-ethtool.o dpsw.o diff --git a/drivers/staging/fsl-dpaa2/ethsw/README b/drivers/staging/fsl-dpaa2/ethsw/README deleted file mode 100644 index b48dcbf7c5fb..000000000000 --- a/drivers/staging/fsl-dpaa2/ethsw/README +++ /dev/null @@ -1,106 +0,0 @@ -DPAA2 Ethernet Switch driver -============================ - -This file provides documentation for the DPAA2 Ethernet Switch driver - - -Contents -======== - Supported Platforms - Architecture Overview - Creating an Ethernet Switch - Features - - - Supported Platforms -=================== -This driver provides networking support for Freescale LS2085A, LS2088A -DPAA2 SoCs. - - -Architecture Overview -===================== -The Ethernet Switch in the DPAA2 architecture consists of several hardware -resources that provide the functionality. These are allocated and -configured via the Management Complex (MC) portals. MC abstracts most of -these resources as DPAA2 objects and exposes ABIs through which they can -be configured and controlled. - -For a more detailed description of the DPAA2 architecture and its object -abstractions see: - drivers/staging/fsl-mc/README.txt - -The Ethernet Switch is built on top of a Datapath Switch (DPSW) object. - -Configuration interface: - - --------------------- - | DPAA2 Switch driver | - --------------------- - . - . - ---------- - | DPSW API | - ---------- - . software - ================= . ============== - . hardware - --------------------- - | MC hardware portals | - --------------------- - . - . - ------ - | DPSW | - ------ - -Driver uses the switch device driver model and exposes each switch port as -a network interface, which can be included in a bridge. Traffic switched -between ports is offloaded into the hardware. Exposed network interfaces -are not used for I/O, they are used just for configuration. This -limitation is going to be addressed in the future. - -The DPSW can have ports connected to DPNIs or to PHYs via DPMACs. - - - [ethA] [ethB] [ethC] [ethD] [ethE] [ethF] - : : : : : : - : : : : : : -[eth drv] [eth drv] [ ethsw drv ] - : : : : : : kernel -======================================================================== - : : : : : : hardware - [DPNI] [DPNI] [============= DPSW =================] - | | | | | | - | ---------- | [DPMAC] [DPMAC] - ------------------------------- | | - | | - [PHY] [PHY] - -For a more detailed description of the Ethernet switch device driver model -see: - Documentation/networking/switchdev.rst - -Creating an Ethernet Switch -=========================== -A device is created for the switch objects probed on the MC bus. Each DPSW -has a number of properties which determine the configuration options and -associated hardware resources. - -A DPSW object (and the other DPAA2 objects needed for a DPAA2 switch) can -be added to a container on the MC bus in one of two ways: statically, -through a Datapath Layout Binary file (DPL) that is parsed by MC at boot -time; or created dynamically at runtime, via the DPAA2 objects APIs. - -Features -======== -Driver configures DPSW to perform hardware switching offload of -unicast/multicast/broadcast (VLAN tagged or untagged) traffic between its -ports. - -It allows configuration of hardware learning, flooding, multicast groups, -port VLAN configuration and STP state. - -Static entries can be added/removed from the FDB. - -Hardware statistics for each port are provided through ethtool -S option. diff --git a/drivers/staging/fsl-dpaa2/ethsw/TODO b/drivers/staging/fsl-dpaa2/ethsw/TODO deleted file mode 100644 index 4d46857b0b2b..000000000000 --- a/drivers/staging/fsl-dpaa2/ethsw/TODO +++ /dev/null @@ -1,13 +0,0 @@ -* Add I/O capabilities on switch port netdevices. This will allow control -traffic to reach the CPU. -* Add ACL to redirect control traffic to CPU. -* Add support for multiple FDBs and switch port partitioning -* MC firmware uprev; the DPAA2 objects used by the Ethernet Switch driver -need to be kept in sync with binary interface changes in MC -* refine README file -* cleanup - -NOTE: At least first three of the above are required before getting the -DPAA2 Ethernet Switch driver out of staging. Another requirement is that -dpio driver is moved to drivers/soc (this is required for I/O). - diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw.h b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h deleted file mode 100644 index 9cfd8a8e0197..000000000000 --- a/drivers/staging/fsl-dpaa2/ethsw/dpsw.h +++ /dev/null @@ -1,594 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright 2017-2018 NXP - * - */ - -#ifndef __FSL_DPSW_H -#define __FSL_DPSW_H - -/* Data Path L2-Switch API - * Contains API for handling DPSW topology and functionality - */ - -struct fsl_mc_io; - -/** - * DPSW general definitions - */ - -/** - * Maximum number of traffic class priorities - */ -#define DPSW_MAX_PRIORITIES 8 -/** - * Maximum number of interfaces - */ -#define DPSW_MAX_IF 64 - -int dpsw_open(struct fsl_mc_io *mc_io, - u32 cmd_flags, - int dpsw_id, - u16 *token); - -int dpsw_close(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token); - -/** - * DPSW options - */ - -/** - * Disable flooding - */ -#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL -/** - * Disable Multicast - */ -#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL -/** - * Support control interface - */ -#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL -/** - * Disable flooding metering - */ -#define DPSW_OPT_FLOODING_METERING_DIS 0x0000000000000020ULL -/** - * Enable metering - */ -#define DPSW_OPT_METERING_EN 0x0000000000000040ULL - -/** - * enum dpsw_component_type - component type of a bridge - * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an - * enterprise VLAN bridge or of a Provider Bridge used - * to process C-tagged frames - * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a - * Provider Bridge - * - */ -enum dpsw_component_type { - DPSW_COMPONENT_TYPE_C_VLAN = 0, - DPSW_COMPONENT_TYPE_S_VLAN -}; - -int dpsw_enable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token); - -int dpsw_disable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token); - -int dpsw_reset(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token); - -/** - * DPSW IRQ Index and Events - */ - -#define DPSW_IRQ_INDEX_IF 0x0000 -#define DPSW_IRQ_INDEX_L2SW 0x0001 - -/** - * IRQ event - Indicates that the link state changed - */ -#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001 - -/** - * struct dpsw_irq_cfg - IRQ configuration - * @addr: Address that must be written to signal a message-based interrupt - * @val: Value to write into irq_addr address - * @irq_num: A user defined number associated with this IRQ - */ -struct dpsw_irq_cfg { - u64 addr; - u32 val; - int irq_num; -}; - -int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u8 en); - -int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 mask); - -int dpsw_get_irq_status(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 *status); - -int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 status); - -/** - * struct dpsw_attr - Structure representing DPSW attributes - * @id: DPSW object ID - * @options: Enable/Disable DPSW features - * @max_vlans: Maximum Number of VLANs - * @max_meters_per_if: Number of meters per interface - * @max_fdbs: Maximum Number of FDBs - * @max_fdb_entries: Number of FDB entries for default FDB table; - * 0 - indicates default 1024 entries. - * @fdb_aging_time: Default FDB aging time for default FDB table; - * 0 - indicates default 300 seconds - * @max_fdb_mc_groups: Number of multicast groups in each FDB table; - * 0 - indicates default 32 - * @mem_size: DPSW frame storage memory size - * @num_ifs: Number of interfaces - * @num_vlans: Current number of VLANs - * @num_fdbs: Current number of FDBs - * @component_type: Component type of this bridge - */ -struct dpsw_attr { - int id; - u64 options; - u16 max_vlans; - u8 max_meters_per_if; - u8 max_fdbs; - u16 max_fdb_entries; - u16 fdb_aging_time; - u16 max_fdb_mc_groups; - u16 num_ifs; - u16 mem_size; - u16 num_vlans; - u8 num_fdbs; - enum dpsw_component_type component_type; -}; - -int dpsw_get_attributes(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - struct dpsw_attr *attr); - -/** - * enum dpsw_action - Action selection for special/control frames - * @DPSW_ACTION_DROP: Drop frame - * @DPSW_ACTION_REDIRECT: Redirect frame to control port - */ -enum dpsw_action { - DPSW_ACTION_DROP = 0, - DPSW_ACTION_REDIRECT = 1 -}; - -/** - * Enable auto-negotiation - */ -#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL -/** - * Enable half-duplex mode - */ -#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL -/** - * Enable pause frames - */ -#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL -/** - * Enable a-symmetric pause frames - */ -#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL - -/** - * struct dpsw_link_cfg - Structure representing DPSW link configuration - * @rate: Rate - * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values - */ -struct dpsw_link_cfg { - u32 rate; - u64 options; -}; - -int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - struct dpsw_link_cfg *cfg); -/** - * struct dpsw_link_state - Structure representing DPSW link state - * @rate: Rate - * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values - * @up: 0 - covers two cases: down and disconnected, 1 - up - */ -struct dpsw_link_state { - u32 rate; - u64 options; - u8 up; -}; - -int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - struct dpsw_link_state *state); - -int dpsw_if_set_flooding(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - u8 en); - -int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - u8 en); - -/** - * struct dpsw_tci_cfg - Tag Control Information (TCI) configuration - * @pcp: Priority Code Point (PCP): a 3-bit field which refers - * to the IEEE 802.1p priority - * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used - * separately or in conjunction with PCP to indicate frames - * eligible to be dropped in the presence of congestion - * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN - * to which the frame belongs. The hexadecimal values - * of 0x000 and 0xFFF are reserved; - * all other values may be used as VLAN identifiers, - * allowing up to 4,094 VLANs - */ -struct dpsw_tci_cfg { - u8 pcp; - u8 dei; - u16 vlan_id; -}; - -int dpsw_if_set_tci(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - const struct dpsw_tci_cfg *cfg); - -int dpsw_if_get_tci(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - struct dpsw_tci_cfg *cfg); - -/** - * enum dpsw_stp_state - Spanning Tree Protocol (STP) states - * @DPSW_STP_STATE_BLOCKING: Blocking state - * @DPSW_STP_STATE_LISTENING: Listening state - * @DPSW_STP_STATE_LEARNING: Learning state - * @DPSW_STP_STATE_FORWARDING: Forwarding state - * - */ -enum dpsw_stp_state { - DPSW_STP_STATE_DISABLED = 0, - DPSW_STP_STATE_LISTENING = 1, - DPSW_STP_STATE_LEARNING = 2, - DPSW_STP_STATE_FORWARDING = 3, - DPSW_STP_STATE_BLOCKING = 0 -}; - -/** - * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration - * @vlan_id: VLAN ID STP state - * @state: STP state - */ -struct dpsw_stp_cfg { - u16 vlan_id; - enum dpsw_stp_state state; -}; - -int dpsw_if_set_stp(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - const struct dpsw_stp_cfg *cfg); - -/** - * enum dpsw_accepted_frames - Types of frames to accept - * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and - * priority tagged frames - * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or - * Priority-Tagged frames received on this interface. - * - */ -enum dpsw_accepted_frames { - DPSW_ADMIT_ALL = 1, - DPSW_ADMIT_ONLY_VLAN_TAGGED = 3 -}; - -/** - * enum dpsw_counter - Counters types - * @DPSW_CNT_ING_FRAME: Counts ingress frames - * @DPSW_CNT_ING_BYTE: Counts ingress bytes - * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames - * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame - * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames - * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes - * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames - * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes - * @DPSW_CNT_EGR_FRAME: Counts egress frames - * @DPSW_CNT_EGR_BYTE: Counts egress bytes - * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames - * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames - * @DPSW_CNT_ING_NO_BUFF_DISCARD: Counts ingress no buffer discarded frames - */ -enum dpsw_counter { - DPSW_CNT_ING_FRAME = 0x0, - DPSW_CNT_ING_BYTE = 0x1, - DPSW_CNT_ING_FLTR_FRAME = 0x2, - DPSW_CNT_ING_FRAME_DISCARD = 0x3, - DPSW_CNT_ING_MCAST_FRAME = 0x4, - DPSW_CNT_ING_MCAST_BYTE = 0x5, - DPSW_CNT_ING_BCAST_FRAME = 0x6, - DPSW_CNT_ING_BCAST_BYTES = 0x7, - DPSW_CNT_EGR_FRAME = 0x8, - DPSW_CNT_EGR_BYTE = 0x9, - DPSW_CNT_EGR_FRAME_DISCARD = 0xa, - DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb, - DPSW_CNT_ING_NO_BUFF_DISCARD = 0xc, -}; - -int dpsw_if_get_counter(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - enum dpsw_counter type, - u64 *counter); - -int dpsw_if_enable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id); - -int dpsw_if_disable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id); - -int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 if_id, - u16 frame_length); - -/** - * struct dpsw_vlan_cfg - VLAN Configuration - * @fdb_id: Forwarding Data Base - */ -struct dpsw_vlan_cfg { - u16 fdb_id; -}; - -int dpsw_vlan_add(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 vlan_id, - const struct dpsw_vlan_cfg *cfg); - -/** - * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces - * @num_ifs: The number of interfaces that are assigned to the egress - * list for this VLAN - * @if_id: The set of interfaces that are - * assigned to the egress list for this VLAN - */ -struct dpsw_vlan_if_cfg { - u16 num_ifs; - u16 if_id[DPSW_MAX_IF]; -}; - -int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 vlan_id, - const struct dpsw_vlan_if_cfg *cfg); - -int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 vlan_id, - const struct dpsw_vlan_if_cfg *cfg); - -int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 vlan_id, - const struct dpsw_vlan_if_cfg *cfg); - -int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 vlan_id, - const struct dpsw_vlan_if_cfg *cfg); - -int dpsw_vlan_remove(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 vlan_id); - -/** - * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic - * @DPSW_FDB_ENTRY_STATIC: Static entry - * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry - */ -enum dpsw_fdb_entry_type { - DPSW_FDB_ENTRY_STATIC = 0, - DPSW_FDB_ENTRY_DINAMIC = 1 -}; - -/** - * struct dpsw_fdb_unicast_cfg - Unicast entry configuration - * @type: Select static or dynamic entry - * @mac_addr: MAC address - * @if_egress: Egress interface ID - */ -struct dpsw_fdb_unicast_cfg { - enum dpsw_fdb_entry_type type; - u8 mac_addr[6]; - u16 if_egress; -}; - -int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - const struct dpsw_fdb_unicast_cfg *cfg); - -int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - const struct dpsw_fdb_unicast_cfg *cfg); - -#define DPSW_FDB_ENTRY_TYPE_DYNAMIC BIT(0) -#define DPSW_FDB_ENTRY_TYPE_UNICAST BIT(1) - -/** - * struct fdb_dump_entry - fdb snapshot entry - * @mac_addr: MAC address - * @type: bit0 - DINAMIC(1)/STATIC(0), bit1 - UNICAST(1)/MULTICAST(0) - * @if_info: unicast - egress interface, multicast - number of egress interfaces - * @if_mask: multicast - egress interface mask - */ -struct fdb_dump_entry { - u8 mac_addr[6]; - u8 type; - u8 if_info; - u8 if_mask[8]; -}; - -int dpsw_fdb_dump(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - u64 iova_addr, - u32 iova_size, - u16 *num_entries); - -/** - * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration - * @type: Select static or dynamic entry - * @mac_addr: MAC address - * @num_ifs: Number of external and internal interfaces - * @if_id: Egress interface IDs - */ -struct dpsw_fdb_multicast_cfg { - enum dpsw_fdb_entry_type type; - u8 mac_addr[6]; - u16 num_ifs; - u16 if_id[DPSW_MAX_IF]; -}; - -int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - const struct dpsw_fdb_multicast_cfg *cfg); - -int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - const struct dpsw_fdb_multicast_cfg *cfg); - -/** - * enum dpsw_fdb_learning_mode - Auto-learning modes - * @DPSW_FDB_LEARNING_MODE_DIS: Disable Auto-learning - * @DPSW_FDB_LEARNING_MODE_HW: Enable HW auto-Learning - * @DPSW_FDB_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU - * @DPSW_FDB_LEARNING_MODE_SECURE: Enable secure learning by CPU - * - * NONE - SECURE LEARNING - * SMAC found DMAC found CTLU Action - * v v Forward frame to - * 1. DMAC destination - * - v Forward frame to - * 1. DMAC destination - * 2. Control interface - * v - Forward frame to - * 1. Flooding list of interfaces - * - - Forward frame to - * 1. Flooding list of interfaces - * 2. Control interface - * SECURE LEARING - * SMAC found DMAC found CTLU Action - * v v Forward frame to - * 1. DMAC destination - * - v Forward frame to - * 1. Control interface - * v - Forward frame to - * 1. Flooding list of interfaces - * - - Forward frame to - * 1. Control interface - */ -enum dpsw_fdb_learning_mode { - DPSW_FDB_LEARNING_MODE_DIS = 0, - DPSW_FDB_LEARNING_MODE_HW = 1, - DPSW_FDB_LEARNING_MODE_NON_SECURE = 2, - DPSW_FDB_LEARNING_MODE_SECURE = 3 -}; - -int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u16 fdb_id, - enum dpsw_fdb_learning_mode mode); - -/** - * struct dpsw_fdb_attr - FDB Attributes - * @max_fdb_entries: Number of FDB entries - * @fdb_aging_time: Aging time in seconds - * @learning_mode: Learning mode - * @num_fdb_mc_groups: Current number of multicast groups - * @max_fdb_mc_groups: Maximum number of multicast groups - */ -struct dpsw_fdb_attr { - u16 max_fdb_entries; - u16 fdb_aging_time; - enum dpsw_fdb_learning_mode learning_mode; - u16 num_fdb_mc_groups; - u16 max_fdb_mc_groups; -}; - -int dpsw_get_api_version(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 *major_ver, - u16 *minor_ver); - -int dpsw_if_get_port_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, - u16 if_id, u8 mac_addr[6]); - -int dpsw_if_get_primary_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, - u16 token, u16 if_id, u8 mac_addr[6]); - -int dpsw_if_set_primary_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, - u16 token, u16 if_id, u8 mac_addr[6]); - -#endif /* __FSL_DPSW_H */ diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c deleted file mode 100644 index 703055e063ff..000000000000 --- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c +++ /dev/null @@ -1,1839 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * DPAA2 Ethernet Switch driver - * - * Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright 2017-2018 NXP - * - */ - -#include <linux/module.h> - -#include <linux/interrupt.h> -#include <linux/msi.h> -#include <linux/kthread.h> -#include <linux/workqueue.h> - -#include <linux/fsl/mc.h> - -#include "ethsw.h" - -/* Minimal supported DPSW version */ -#define DPSW_MIN_VER_MAJOR 8 -#define DPSW_MIN_VER_MINOR 1 - -#define DEFAULT_VLAN_ID 1 - -static int dpaa2_switch_add_vlan(struct ethsw_core *ethsw, u16 vid) -{ - int err; - - struct dpsw_vlan_cfg vcfg = { - .fdb_id = 0, - }; - - err = dpsw_vlan_add(ethsw->mc_io, 0, - ethsw->dpsw_handle, vid, &vcfg); - if (err) { - dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err); - return err; - } - ethsw->vlans[vid] = ETHSW_VLAN_MEMBER; - - return 0; -} - -static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv) -{ - struct net_device *netdev = port_priv->netdev; - struct dpsw_link_state state; - int err; - - err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, &state); - if (err) { - netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); - return true; - } - - WARN_ONCE(state.up > 1, "Garbage read into link_state"); - - return state.up ? true : false; -} - -static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid) -{ - struct ethsw_core *ethsw = port_priv->ethsw_data; - struct net_device *netdev = port_priv->netdev; - struct dpsw_tci_cfg tci_cfg = { 0 }; - bool up; - int err, ret; - - err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, - port_priv->idx, &tci_cfg); - if (err) { - netdev_err(netdev, "dpsw_if_get_tci err %d\n", err); - return err; - } - - tci_cfg.vlan_id = pvid; - - /* Interface needs to be down to change PVID */ - up = dpaa2_switch_port_is_up(port_priv); - if (up) { - err = dpsw_if_disable(ethsw->mc_io, 0, - ethsw->dpsw_handle, - port_priv->idx); - if (err) { - netdev_err(netdev, "dpsw_if_disable err %d\n", err); - return err; - } - } - - err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, - port_priv->idx, &tci_cfg); - if (err) { - netdev_err(netdev, "dpsw_if_set_tci err %d\n", err); - goto set_tci_error; - } - - /* Delete previous PVID info and mark the new one */ - port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID; - port_priv->vlans[pvid] |= ETHSW_VLAN_PVID; - port_priv->pvid = pvid; - -set_tci_error: - if (up) { - ret = dpsw_if_enable(ethsw->mc_io, 0, - ethsw->dpsw_handle, - port_priv->idx); - if (ret) { - netdev_err(netdev, "dpsw_if_enable err %d\n", ret); - return ret; - } - } - - return err; -} - -static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv, - u16 vid, u16 flags) -{ - struct ethsw_core *ethsw = port_priv->ethsw_data; - struct net_device *netdev = port_priv->netdev; - struct dpsw_vlan_if_cfg vcfg; - int err; - - if (port_priv->vlans[vid]) { - netdev_warn(netdev, "VLAN %d already configured\n", vid); - return -EEXIST; - } - - vcfg.num_ifs = 1; - vcfg.if_id[0] = port_priv->idx; - err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg); - if (err) { - netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err); - return err; - } - - port_priv->vlans[vid] = ETHSW_VLAN_MEMBER; - - if (flags & BRIDGE_VLAN_INFO_UNTAGGED) { - err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0, - ethsw->dpsw_handle, - vid, &vcfg); - if (err) { - netdev_err(netdev, - "dpsw_vlan_add_if_untagged err %d\n", err); - return err; - } - port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED; - } - - if (flags & BRIDGE_VLAN_INFO_PVID) { - err = dpaa2_switch_port_set_pvid(port_priv, vid); - if (err) - return err; - } - - return 0; -} - -static int dpaa2_switch_set_learning(struct ethsw_core *ethsw, bool enable) -{ - enum dpsw_fdb_learning_mode learn_mode; - int err; - - if (enable) - learn_mode = DPSW_FDB_LEARNING_MODE_HW; - else - learn_mode = DPSW_FDB_LEARNING_MODE_DIS; - - err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0, - learn_mode); - if (err) { - dev_err(ethsw->dev, "dpsw_fdb_set_learning_mode err %d\n", err); - return err; - } - ethsw->learning = enable; - - return 0; -} - -static int dpaa2_switch_port_set_flood(struct ethsw_port_priv *port_priv, bool enable) -{ - int err; - - err = dpsw_if_set_flooding(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, enable); - if (err) { - netdev_err(port_priv->netdev, - "dpsw_if_set_flooding err %d\n", err); - return err; - } - port_priv->flood = enable; - - return 0; -} - -static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state) -{ - struct dpsw_stp_cfg stp_cfg = { - .state = state, - }; - int err; - u16 vid; - - if (!netif_running(port_priv->netdev) || state == port_priv->stp_state) - return 0; /* Nothing to do */ - - for (vid = 0; vid <= VLAN_VID_MASK; vid++) { - if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) { - stp_cfg.vlan_id = vid; - err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, &stp_cfg); - if (err) { - netdev_err(port_priv->netdev, - "dpsw_if_set_stp err %d\n", err); - return err; - } - } - } - - port_priv->stp_state = state; - - return 0; -} - -static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid) -{ - struct ethsw_port_priv *ppriv_local = NULL; - int i, err; - - if (!ethsw->vlans[vid]) - return -ENOENT; - - err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid); - if (err) { - dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err); - return err; - } - ethsw->vlans[vid] = 0; - - for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { - ppriv_local = ethsw->ports[i]; - ppriv_local->vlans[vid] = 0; - } - - return 0; -} - -static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv, - const unsigned char *addr) -{ - struct dpsw_fdb_unicast_cfg entry = {0}; - int err; - - entry.if_egress = port_priv->idx; - entry.type = DPSW_FDB_ENTRY_STATIC; - ether_addr_copy(entry.mac_addr, addr); - - err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - 0, &entry); - if (err) - netdev_err(port_priv->netdev, - "dpsw_fdb_add_unicast err %d\n", err); - return err; -} - -static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv, - const unsigned char *addr) -{ - struct dpsw_fdb_unicast_cfg entry = {0}; - int err; - - entry.if_egress = port_priv->idx; - entry.type = DPSW_FDB_ENTRY_STATIC; - ether_addr_copy(entry.mac_addr, addr); - - err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - 0, &entry); - /* Silently discard error for calling multiple times the del command */ - if (err && err != -ENXIO) - netdev_err(port_priv->netdev, - "dpsw_fdb_remove_unicast err %d\n", err); - return err; -} - -static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv, - const unsigned char *addr) -{ - struct dpsw_fdb_multicast_cfg entry = {0}; - int err; - - ether_addr_copy(entry.mac_addr, addr); - entry.type = DPSW_FDB_ENTRY_STATIC; - entry.num_ifs = 1; - entry.if_id[0] = port_priv->idx; - - err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - 0, &entry); - /* Silently discard error for calling multiple times the add command */ - if (err && err != -ENXIO) - netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n", - err); - return err; -} - -static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv, - const unsigned char *addr) -{ - struct dpsw_fdb_multicast_cfg entry = {0}; - int err; - - ether_addr_copy(entry.mac_addr, addr); - entry.type = DPSW_FDB_ENTRY_STATIC; - entry.num_ifs = 1; - entry.if_id[0] = port_priv->idx; - - err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - 0, &entry); - /* Silently discard error for calling multiple times the del command */ - if (err && err != -ENAVAIL) - netdev_err(port_priv->netdev, - "dpsw_fdb_remove_multicast err %d\n", err); - return err; -} - -static int dpaa2_switch_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, const unsigned char *addr, - u16 vid, u16 flags, - struct netlink_ext_ack *extack) -{ - if (is_unicast_ether_addr(addr)) - return dpaa2_switch_port_fdb_add_uc(netdev_priv(dev), - addr); - else - return dpaa2_switch_port_fdb_add_mc(netdev_priv(dev), - addr); -} - -static int dpaa2_switch_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr, u16 vid) -{ - if (is_unicast_ether_addr(addr)) - return dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), - addr); - else - return dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), - addr); -} - -static void dpaa2_switch_port_get_stats(struct net_device *netdev, - struct rtnl_link_stats64 *stats) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - u64 tmp; - int err; - - err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, - DPSW_CNT_ING_FRAME, &stats->rx_packets); - if (err) - goto error; - - err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, - DPSW_CNT_EGR_FRAME, &stats->tx_packets); - if (err) - goto error; - - err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, - DPSW_CNT_ING_BYTE, &stats->rx_bytes); - if (err) - goto error; - - err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, - DPSW_CNT_EGR_BYTE, &stats->tx_bytes); - if (err) - goto error; - - err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, - DPSW_CNT_ING_FRAME_DISCARD, - &stats->rx_dropped); - if (err) - goto error; - - err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, - DPSW_CNT_ING_FLTR_FRAME, - &tmp); - if (err) - goto error; - stats->rx_dropped += tmp; - - err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, - DPSW_CNT_EGR_FRAME_DISCARD, - &stats->tx_dropped); - if (err) - goto error; - - return; - -error: - netdev_err(netdev, "dpsw_if_get_counter err %d\n", err); -} - -static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev, - int attr_id) -{ - return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT); -} - -static int dpaa2_switch_port_get_offload_stats(int attr_id, - const struct net_device *netdev, - void *sp) -{ - switch (attr_id) { - case IFLA_OFFLOAD_XSTATS_CPU_HIT: - dpaa2_switch_port_get_stats((struct net_device *)netdev, sp); - return 0; - } - - return -EINVAL; -} - -static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - int err; - - err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io, - 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, - (u16)ETHSW_L2_MAX_FRM(mtu)); - if (err) { - netdev_err(netdev, - "dpsw_if_set_max_frame_length() err %d\n", err); - return err; - } - - netdev->mtu = mtu; - return 0; -} - -static int dpaa2_switch_port_carrier_state_sync(struct net_device *netdev) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - struct dpsw_link_state state; - int err; - - /* Interrupts are received even though no one issued an 'ifconfig up' - * on the switch interface. Ignore these link state update interrupts - */ - if (!netif_running(netdev)) - return 0; - - err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx, &state); - if (err) { - netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); - return err; - } - - WARN_ONCE(state.up > 1, "Garbage read into link_state"); - - if (state.up != port_priv->link_state) { - if (state.up) - netif_carrier_on(netdev); - else - netif_carrier_off(netdev); - port_priv->link_state = state.up; - } - - return 0; -} - -static int dpaa2_switch_port_open(struct net_device *netdev) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - int err; - - /* No need to allow Tx as control interface is disabled */ - netif_tx_stop_all_queues(netdev); - - /* Explicitly set carrier off, otherwise - * netif_carrier_ok() will return true and cause 'ip link show' - * to report the LOWER_UP flag, even though the link - * notification wasn't even received. - */ - netif_carrier_off(netdev); - - err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx); - if (err) { - netdev_err(netdev, "dpsw_if_enable err %d\n", err); - return err; - } - - /* sync carrier state */ - err = dpaa2_switch_port_carrier_state_sync(netdev); - if (err) { - netdev_err(netdev, - "dpaa2_switch_port_carrier_state_sync err %d\n", err); - goto err_carrier_sync; - } - - return 0; - -err_carrier_sync: - dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx); - return err; -} - -static int dpaa2_switch_port_stop(struct net_device *netdev) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - int err; - - err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx); - if (err) { - netdev_err(netdev, "dpsw_if_disable err %d\n", err); - return err; - } - - return 0; -} - -static netdev_tx_t dpaa2_switch_port_dropframe(struct sk_buff *skb, - struct net_device *netdev) -{ - /* we don't support I/O for now, drop the frame */ - dev_kfree_skb_any(skb); - - return NETDEV_TX_OK; -} - -static int dpaa2_switch_port_parent_id(struct net_device *dev, - struct netdev_phys_item_id *ppid) -{ - struct ethsw_port_priv *port_priv = netdev_priv(dev); - - ppid->id_len = 1; - ppid->id[0] = port_priv->ethsw_data->dev_id; - - return 0; -} - -static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name, - size_t len) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - int err; - - err = snprintf(name, len, "p%d", port_priv->idx); - if (err >= len) - return -EINVAL; - - return 0; -} - -struct ethsw_dump_ctx { - struct net_device *dev; - struct sk_buff *skb; - struct netlink_callback *cb; - int idx; -}; - -static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry, - struct ethsw_dump_ctx *dump) -{ - int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC; - u32 portid = NETLINK_CB(dump->cb->skb).portid; - u32 seq = dump->cb->nlh->nlmsg_seq; - struct nlmsghdr *nlh; - struct ndmsg *ndm; - - if (dump->idx < dump->cb->args[2]) - goto skip; - - nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, - sizeof(*ndm), NLM_F_MULTI); - if (!nlh) - return -EMSGSIZE; - - ndm = nlmsg_data(nlh); - ndm->ndm_family = AF_BRIDGE; - ndm->ndm_pad1 = 0; - ndm->ndm_pad2 = 0; - ndm->ndm_flags = NTF_SELF; - ndm->ndm_type = 0; - ndm->ndm_ifindex = dump->dev->ifindex; - ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP; - - if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr)) - goto nla_put_failure; - - nlmsg_end(dump->skb, nlh); - -skip: - dump->idx++; - return 0; - -nla_put_failure: - nlmsg_cancel(dump->skb, nlh); - return -EMSGSIZE; -} - -static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry, - struct ethsw_port_priv *port_priv) -{ - int idx = port_priv->idx; - int valid; - - if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST) - valid = entry->if_info == port_priv->idx; - else - valid = entry->if_mask[idx / 8] & BIT(idx % 8); - - return valid; -} - -static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, - struct net_device *net_dev, - struct net_device *filter_dev, int *idx) -{ - struct ethsw_port_priv *port_priv = netdev_priv(net_dev); - struct ethsw_core *ethsw = port_priv->ethsw_data; - struct device *dev = net_dev->dev.parent; - struct fdb_dump_entry *fdb_entries; - struct fdb_dump_entry fdb_entry; - struct ethsw_dump_ctx dump = { - .dev = net_dev, - .skb = skb, - .cb = cb, - .idx = *idx, - }; - dma_addr_t fdb_dump_iova; - u16 num_fdb_entries; - u32 fdb_dump_size; - int err = 0, i; - u8 *dma_mem; - - fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry); - dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL); - if (!dma_mem) - return -ENOMEM; - - fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size, - DMA_FROM_DEVICE); - if (dma_mapping_error(dev, fdb_dump_iova)) { - netdev_err(net_dev, "dma_map_single() failed\n"); - err = -ENOMEM; - goto err_map; - } - - err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, 0, - fdb_dump_iova, fdb_dump_size, &num_fdb_entries); - if (err) { - netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err); - goto err_dump; - } - - dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE); - - fdb_entries = (struct fdb_dump_entry *)dma_mem; - for (i = 0; i < num_fdb_entries; i++) { - fdb_entry = fdb_entries[i]; - - if (!dpaa2_switch_port_fdb_valid_entry(&fdb_entry, port_priv)) - continue; - - err = dpaa2_switch_fdb_dump_nl(&fdb_entry, &dump); - if (err) - goto end; - } - -end: - *idx = dump.idx; - - kfree(dma_mem); - - return 0; - -err_dump: - dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE); -err_map: - kfree(dma_mem); - return err; -} - -static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv) -{ - struct ethsw_core *ethsw = port_priv->ethsw_data; - struct net_device *net_dev = port_priv->netdev; - struct device *dev = net_dev->dev.parent; - u8 mac_addr[ETH_ALEN]; - int err; - - if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR)) - return 0; - - /* Get firmware address, if any */ - err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle, - port_priv->idx, mac_addr); - if (err) { - dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n"); - return err; - } - - /* First check if firmware has any address configured by bootloader */ - if (!is_zero_ether_addr(mac_addr)) { - memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); - } else { - /* No MAC address configured, fill in net_dev->dev_addr - * with a random one - */ - eth_hw_addr_random(net_dev); - dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); - - /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all - * practical purposes, this will be our "permanent" mac address, - * at least until the next reboot. This move will also permit - * register_netdevice() to properly fill up net_dev->perm_addr. - */ - net_dev->addr_assign_type = NET_ADDR_PERM; - } - - return 0; -} - -static const struct net_device_ops dpaa2_switch_port_ops = { - .ndo_open = dpaa2_switch_port_open, - .ndo_stop = dpaa2_switch_port_stop, - - .ndo_set_mac_address = eth_mac_addr, - .ndo_get_stats64 = dpaa2_switch_port_get_stats, - .ndo_change_mtu = dpaa2_switch_port_change_mtu, - .ndo_has_offload_stats = dpaa2_switch_port_has_offload_stats, - .ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats, - .ndo_fdb_add = dpaa2_switch_port_fdb_add, - .ndo_fdb_del = dpaa2_switch_port_fdb_del, - .ndo_fdb_dump = dpaa2_switch_port_fdb_dump, - - .ndo_start_xmit = dpaa2_switch_port_dropframe, - .ndo_get_port_parent_id = dpaa2_switch_port_parent_id, - .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name, -}; - -static bool dpaa2_switch_port_dev_check(const struct net_device *netdev, - struct notifier_block *nb) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - - if (netdev->netdev_ops == &dpaa2_switch_port_ops && - (!nb || &port_priv->ethsw_data->port_nb == nb || - &port_priv->ethsw_data->port_switchdev_nb == nb || - &port_priv->ethsw_data->port_switchdevb_nb == nb)) - return true; - - return false; -} - -static void dpaa2_switch_links_state_update(struct ethsw_core *ethsw) -{ - int i; - - for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { - dpaa2_switch_port_carrier_state_sync(ethsw->ports[i]->netdev); - dpaa2_switch_port_set_mac_addr(ethsw->ports[i]); - } -} - -static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg) -{ - struct device *dev = (struct device *)arg; - struct ethsw_core *ethsw = dev_get_drvdata(dev); - - /* Mask the events and the if_id reserved bits to be cleared on read */ - u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000; - int err; - - err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, - DPSW_IRQ_INDEX_IF, &status); - if (err) { - dev_err(dev, "Can't get irq status (err %d)\n", err); - - err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, - DPSW_IRQ_INDEX_IF, 0xFFFFFFFF); - if (err) - dev_err(dev, "Can't clear irq status (err %d)\n", err); - goto out; - } - - if (status & DPSW_IRQ_EVENT_LINK_CHANGED) - dpaa2_switch_links_state_update(ethsw); - -out: - return IRQ_HANDLED; -} - -static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev) -{ - struct device *dev = &sw_dev->dev; - struct ethsw_core *ethsw = dev_get_drvdata(dev); - u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED; - struct fsl_mc_device_irq *irq; - int err; - - err = fsl_mc_allocate_irqs(sw_dev); - if (err) { - dev_err(dev, "MC irqs allocation failed\n"); - return err; - } - - if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) { - err = -EINVAL; - goto free_irq; - } - - err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, - DPSW_IRQ_INDEX_IF, 0); - if (err) { - dev_err(dev, "dpsw_set_irq_enable err %d\n", err); - goto free_irq; - } - - irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF]; - - err = devm_request_threaded_irq(dev, irq->msi_desc->irq, - NULL, - dpaa2_switch_irq0_handler_thread, - IRQF_NO_SUSPEND | IRQF_ONESHOT, - dev_name(dev), dev); - if (err) { - dev_err(dev, "devm_request_threaded_irq(): %d\n", err); - goto free_irq; - } - - err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle, - DPSW_IRQ_INDEX_IF, mask); - if (err) { - dev_err(dev, "dpsw_set_irq_mask(): %d\n", err); - goto free_devm_irq; - } - - err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, - DPSW_IRQ_INDEX_IF, 1); - if (err) { - dev_err(dev, "dpsw_set_irq_enable(): %d\n", err); - goto free_devm_irq; - } - - return 0; - -free_devm_irq: - devm_free_irq(dev, irq->msi_desc->irq, dev); -free_irq: - fsl_mc_free_irqs(sw_dev); - return err; -} - -static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev) -{ - struct device *dev = &sw_dev->dev; - struct ethsw_core *ethsw = dev_get_drvdata(dev); - int err; - - err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, - DPSW_IRQ_INDEX_IF, 0); - if (err) - dev_err(dev, "dpsw_set_irq_enable err %d\n", err); - - fsl_mc_free_irqs(sw_dev); -} - -static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev, - u8 state) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - - return dpaa2_switch_port_set_stp_state(port_priv, state); -} - -static int -dpaa2_switch_port_attr_br_flags_pre_set(struct net_device *netdev, - struct switchdev_brport_flags flags) -{ - if (flags.mask & ~(BR_LEARNING | BR_FLOOD)) - return -EINVAL; - - return 0; -} - -static int -dpaa2_switch_port_attr_br_flags_set(struct net_device *netdev, - struct switchdev_brport_flags flags) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - int err = 0; - - if (flags.mask & BR_LEARNING) { - /* Learning is enabled per switch */ - err = dpaa2_switch_set_learning(port_priv->ethsw_data, - !!(flags.val & BR_LEARNING)); - if (err) - return err; - } - - if (flags.mask & BR_FLOOD) { - err = dpaa2_switch_port_set_flood(port_priv, - !!(flags.val & BR_FLOOD)); - if (err) - return err; - } - - return 0; -} - -static int dpaa2_switch_port_attr_set(struct net_device *netdev, - const struct switchdev_attr *attr) -{ - int err = 0; - - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_STP_STATE: - err = dpaa2_switch_port_attr_stp_state_set(netdev, - attr->u.stp_state); - break; - case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: - err = dpaa2_switch_port_attr_br_flags_pre_set(netdev, - attr->u.brport_flags); - break; - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: - err = dpaa2_switch_port_attr_br_flags_set(netdev, - attr->u.brport_flags); - break; - case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: - /* VLANs are supported by default */ - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static int dpaa2_switch_port_vlans_add(struct net_device *netdev, - const struct switchdev_obj_port_vlan *vlan) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - struct ethsw_core *ethsw = port_priv->ethsw_data; - struct dpsw_attr *attr = ðsw->sw_attr; - int err = 0; - - /* Make sure that the VLAN is not already configured - * on the switch port - */ - if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER) - return -EEXIST; - - /* Check if there is space for a new VLAN */ - err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, - ðsw->sw_attr); - if (err) { - netdev_err(netdev, "dpsw_get_attributes err %d\n", err); - return err; - } - if (attr->max_vlans - attr->num_vlans < 1) - return -ENOSPC; - - /* Check if there is space for a new VLAN */ - err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, - ðsw->sw_attr); - if (err) { - netdev_err(netdev, "dpsw_get_attributes err %d\n", err); - return err; - } - if (attr->max_vlans - attr->num_vlans < 1) - return -ENOSPC; - - if (!port_priv->ethsw_data->vlans[vlan->vid]) { - /* this is a new VLAN */ - err = dpaa2_switch_add_vlan(port_priv->ethsw_data, vlan->vid); - if (err) - return err; - - port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL; - } - - return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags); -} - -static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc, - const unsigned char *addr) -{ - struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc; - struct netdev_hw_addr *ha; - - netif_addr_lock_bh(netdev); - list_for_each_entry(ha, &list->list, list) { - if (ether_addr_equal(ha->addr, addr)) { - netif_addr_unlock_bh(netdev); - return 1; - } - } - netif_addr_unlock_bh(netdev); - return 0; -} - -static int dpaa2_switch_port_mdb_add(struct net_device *netdev, - const struct switchdev_obj_port_mdb *mdb) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - int err; - - /* Check if address is already set on this port */ - if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr)) - return -EEXIST; - - err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr); - if (err) - return err; - - err = dev_mc_add(netdev, mdb->addr); - if (err) { - netdev_err(netdev, "dev_mc_add err %d\n", err); - dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr); - } - - return err; -} - -static int dpaa2_switch_port_obj_add(struct net_device *netdev, - const struct switchdev_obj *obj) -{ - int err; - - switch (obj->id) { - case SWITCHDEV_OBJ_ID_PORT_VLAN: - err = dpaa2_switch_port_vlans_add(netdev, - SWITCHDEV_OBJ_PORT_VLAN(obj)); - break; - case SWITCHDEV_OBJ_ID_PORT_MDB: - err = dpaa2_switch_port_mdb_add(netdev, - SWITCHDEV_OBJ_PORT_MDB(obj)); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid) -{ - struct ethsw_core *ethsw = port_priv->ethsw_data; - struct net_device *netdev = port_priv->netdev; - struct dpsw_vlan_if_cfg vcfg; - int i, err; - - if (!port_priv->vlans[vid]) - return -ENOENT; - - if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) { - err = dpaa2_switch_port_set_pvid(port_priv, 0); - if (err) - return err; - } - - vcfg.num_ifs = 1; - vcfg.if_id[0] = port_priv->idx; - if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) { - err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, - ethsw->dpsw_handle, - vid, &vcfg); - if (err) { - netdev_err(netdev, - "dpsw_vlan_remove_if_untagged err %d\n", - err); - } - port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED; - } - - if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) { - err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, - vid, &vcfg); - if (err) { - netdev_err(netdev, - "dpsw_vlan_remove_if err %d\n", err); - return err; - } - port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER; - - /* Delete VLAN from switch if it is no longer configured on - * any port - */ - for (i = 0; i < ethsw->sw_attr.num_ifs; i++) - if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER) - return 0; /* Found a port member in VID */ - - ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL; - - err = dpaa2_switch_dellink(ethsw, vid); - if (err) - return err; - } - - return 0; -} - -static int dpaa2_switch_port_vlans_del(struct net_device *netdev, - const struct switchdev_obj_port_vlan *vlan) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - - if (netif_is_bridge_master(vlan->obj.orig_dev)) - return -EOPNOTSUPP; - - return dpaa2_switch_port_del_vlan(port_priv, vlan->vid); -} - -static int dpaa2_switch_port_mdb_del(struct net_device *netdev, - const struct switchdev_obj_port_mdb *mdb) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - int err; - - if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr)) - return -ENOENT; - - err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr); - if (err) - return err; - - err = dev_mc_del(netdev, mdb->addr); - if (err) { - netdev_err(netdev, "dev_mc_del err %d\n", err); - return err; - } - - return err; -} - -static int dpaa2_switch_port_obj_del(struct net_device *netdev, - const struct switchdev_obj *obj) -{ - int err; - - switch (obj->id) { - case SWITCHDEV_OBJ_ID_PORT_VLAN: - err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj)); - break; - case SWITCHDEV_OBJ_ID_PORT_MDB: - err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj)); - break; - default: - err = -EOPNOTSUPP; - break; - } - return err; -} - -static int dpaa2_switch_port_attr_set_event(struct net_device *netdev, - struct switchdev_notifier_port_attr_info - *port_attr_info) -{ - int err; - - err = dpaa2_switch_port_attr_set(netdev, port_attr_info->attr); - - port_attr_info->handled = true; - return notifier_from_errno(err); -} - -/* For the moment, only flood setting needs to be updated */ -static int dpaa2_switch_port_bridge_join(struct net_device *netdev, - struct net_device *upper_dev) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - struct ethsw_core *ethsw = port_priv->ethsw_data; - struct ethsw_port_priv *other_port_priv; - struct net_device *other_dev; - struct list_head *iter; - int i, err; - - for (i = 0; i < ethsw->sw_attr.num_ifs; i++) - if (ethsw->ports[i]->bridge_dev && - (ethsw->ports[i]->bridge_dev != upper_dev)) { - netdev_err(netdev, - "Only one bridge supported per DPSW object!\n"); - return -EINVAL; - } - - netdev_for_each_lower_dev(upper_dev, other_dev, iter) { - if (!dpaa2_switch_port_dev_check(other_dev, NULL)) - continue; - - other_port_priv = netdev_priv(other_dev); - if (other_port_priv->ethsw_data != port_priv->ethsw_data) { - netdev_err(netdev, - "Interface from a different DPSW is in the bridge already!\n"); - return -EINVAL; - } - } - - /* Enable flooding */ - err = dpaa2_switch_port_set_flood(port_priv, 1); - if (!err) - port_priv->bridge_dev = upper_dev; - - return err; -} - -static int dpaa2_switch_port_bridge_leave(struct net_device *netdev) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - int err; - - /* Disable flooding */ - err = dpaa2_switch_port_set_flood(port_priv, 0); - if (!err) - port_priv->bridge_dev = NULL; - - return err; -} - -static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb, - unsigned long event, void *ptr) -{ - struct net_device *netdev = netdev_notifier_info_to_dev(ptr); - struct netdev_notifier_changeupper_info *info = ptr; - struct net_device *upper_dev; - int err = 0; - - if (!dpaa2_switch_port_dev_check(netdev, nb)) - return NOTIFY_DONE; - - /* Handle just upper dev link/unlink for the moment */ - if (event == NETDEV_CHANGEUPPER) { - upper_dev = info->upper_dev; - if (netif_is_bridge_master(upper_dev)) { - if (info->linking) - err = dpaa2_switch_port_bridge_join(netdev, upper_dev); - else - err = dpaa2_switch_port_bridge_leave(netdev); - } - } - - return notifier_from_errno(err); -} - -struct ethsw_switchdev_event_work { - struct work_struct work; - struct switchdev_notifier_fdb_info fdb_info; - struct net_device *dev; - unsigned long event; -}; - -static void dpaa2_switch_event_work(struct work_struct *work) -{ - struct ethsw_switchdev_event_work *switchdev_work = - container_of(work, struct ethsw_switchdev_event_work, work); - struct net_device *dev = switchdev_work->dev; - struct switchdev_notifier_fdb_info *fdb_info; - int err; - - rtnl_lock(); - fdb_info = &switchdev_work->fdb_info; - - switch (switchdev_work->event) { - case SWITCHDEV_FDB_ADD_TO_DEVICE: - if (!fdb_info->added_by_user) - break; - if (is_unicast_ether_addr(fdb_info->addr)) - err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev), - fdb_info->addr); - else - err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev), - fdb_info->addr); - if (err) - break; - fdb_info->offloaded = true; - call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev, - &fdb_info->info, NULL); - break; - case SWITCHDEV_FDB_DEL_TO_DEVICE: - if (!fdb_info->added_by_user) - break; - if (is_unicast_ether_addr(fdb_info->addr)) - dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr); - else - dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr); - break; - } - - rtnl_unlock(); - kfree(switchdev_work->fdb_info.addr); - kfree(switchdev_work); - dev_put(dev); -} - -/* Called under rcu_read_lock() */ -static int dpaa2_switch_port_event(struct notifier_block *nb, - unsigned long event, void *ptr) -{ - struct net_device *dev = switchdev_notifier_info_to_dev(ptr); - struct ethsw_port_priv *port_priv = netdev_priv(dev); - struct ethsw_switchdev_event_work *switchdev_work; - struct switchdev_notifier_fdb_info *fdb_info = ptr; - struct ethsw_core *ethsw = port_priv->ethsw_data; - - if (!dpaa2_switch_port_dev_check(dev, nb)) - return NOTIFY_DONE; - - if (event == SWITCHDEV_PORT_ATTR_SET) - return dpaa2_switch_port_attr_set_event(dev, ptr); - - switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); - if (!switchdev_work) - return NOTIFY_BAD; - - INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work); - switchdev_work->dev = dev; - switchdev_work->event = event; - - switch (event) { - case SWITCHDEV_FDB_ADD_TO_DEVICE: - case SWITCHDEV_FDB_DEL_TO_DEVICE: - memcpy(&switchdev_work->fdb_info, ptr, - sizeof(switchdev_work->fdb_info)); - switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); - if (!switchdev_work->fdb_info.addr) - goto err_addr_alloc; - - ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, - fdb_info->addr); - - /* Take a reference on the device to avoid being freed. */ - dev_hold(dev); - break; - default: - kfree(switchdev_work); - return NOTIFY_DONE; - } - - queue_work(ethsw->workqueue, &switchdev_work->work); - - return NOTIFY_DONE; - -err_addr_alloc: - kfree(switchdev_work); - return NOTIFY_BAD; -} - -static int dpaa2_switch_port_obj_event(unsigned long event, - struct net_device *netdev, - struct switchdev_notifier_port_obj_info *port_obj_info) -{ - int err = -EOPNOTSUPP; - - switch (event) { - case SWITCHDEV_PORT_OBJ_ADD: - err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj); - break; - case SWITCHDEV_PORT_OBJ_DEL: - err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj); - break; - } - - port_obj_info->handled = true; - return notifier_from_errno(err); -} - -static int dpaa2_switch_port_blocking_event(struct notifier_block *nb, - unsigned long event, void *ptr) -{ - struct net_device *dev = switchdev_notifier_info_to_dev(ptr); - - if (!dpaa2_switch_port_dev_check(dev, nb)) - return NOTIFY_DONE; - - switch (event) { - case SWITCHDEV_PORT_OBJ_ADD: - case SWITCHDEV_PORT_OBJ_DEL: - return dpaa2_switch_port_obj_event(event, dev, ptr); - case SWITCHDEV_PORT_ATTR_SET: - return dpaa2_switch_port_attr_set_event(dev, ptr); - } - - return NOTIFY_DONE; -} - -static int dpaa2_switch_register_notifier(struct device *dev) -{ - struct ethsw_core *ethsw = dev_get_drvdata(dev); - int err; - - ethsw->port_nb.notifier_call = dpaa2_switch_port_netdevice_event; - err = register_netdevice_notifier(ðsw->port_nb); - if (err) { - dev_err(dev, "Failed to register netdev notifier\n"); - return err; - } - - ethsw->port_switchdev_nb.notifier_call = dpaa2_switch_port_event; - err = register_switchdev_notifier(ðsw->port_switchdev_nb); - if (err) { - dev_err(dev, "Failed to register switchdev notifier\n"); - goto err_switchdev_nb; - } - - ethsw->port_switchdevb_nb.notifier_call = dpaa2_switch_port_blocking_event; - err = register_switchdev_blocking_notifier(ðsw->port_switchdevb_nb); - if (err) { - dev_err(dev, "Failed to register switchdev blocking notifier\n"); - goto err_switchdev_blocking_nb; - } - - return 0; - -err_switchdev_blocking_nb: - unregister_switchdev_notifier(ðsw->port_switchdev_nb); -err_switchdev_nb: - unregister_netdevice_notifier(ðsw->port_nb); - return err; -} - -static void dpaa2_switch_detect_features(struct ethsw_core *ethsw) -{ - ethsw->features = 0; - - if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6)) - ethsw->features |= ETHSW_FEATURE_MAC_ADDR; -} - -static int dpaa2_switch_init(struct fsl_mc_device *sw_dev) -{ - struct device *dev = &sw_dev->dev; - struct ethsw_core *ethsw = dev_get_drvdata(dev); - struct dpsw_stp_cfg stp_cfg; - int err; - u16 i; - - ethsw->dev_id = sw_dev->obj_desc.id; - - err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw->dpsw_handle); - if (err) { - dev_err(dev, "dpsw_open err %d\n", err); - return err; - } - - err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, - ðsw->sw_attr); - if (err) { - dev_err(dev, "dpsw_get_attributes err %d\n", err); - goto err_close; - } - - err = dpsw_get_api_version(ethsw->mc_io, 0, - ðsw->major, - ðsw->minor); - if (err) { - dev_err(dev, "dpsw_get_api_version err %d\n", err); - goto err_close; - } - - /* Minimum supported DPSW version check */ - if (ethsw->major < DPSW_MIN_VER_MAJOR || - (ethsw->major == DPSW_MIN_VER_MAJOR && - ethsw->minor < DPSW_MIN_VER_MINOR)) { - dev_err(dev, "DPSW version %d:%d not supported. Use %d.%d or greater.\n", - ethsw->major, - ethsw->minor, - DPSW_MIN_VER_MAJOR, DPSW_MIN_VER_MINOR); - err = -ENOTSUPP; - goto err_close; - } - - dpaa2_switch_detect_features(ethsw); - - err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle); - if (err) { - dev_err(dev, "dpsw_reset err %d\n", err); - goto err_close; - } - - err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0, - DPSW_FDB_LEARNING_MODE_HW); - if (err) { - dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err); - goto err_close; - } - - stp_cfg.vlan_id = DEFAULT_VLAN_ID; - stp_cfg.state = DPSW_STP_STATE_FORWARDING; - - for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { - err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i, - &stp_cfg); - if (err) { - dev_err(dev, "dpsw_if_set_stp err %d for port %d\n", - err, i); - goto err_close; - } - - err = dpsw_if_set_broadcast(ethsw->mc_io, 0, - ethsw->dpsw_handle, i, 1); - if (err) { - dev_err(dev, - "dpsw_if_set_broadcast err %d for port %d\n", - err, i); - goto err_close; - } - } - - ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered", - WQ_MEM_RECLAIM, "ethsw", - ethsw->sw_attr.id); - if (!ethsw->workqueue) { - err = -ENOMEM; - goto err_close; - } - - err = dpaa2_switch_register_notifier(dev); - if (err) - goto err_destroy_ordered_workqueue; - - return 0; - -err_destroy_ordered_workqueue: - destroy_workqueue(ethsw->workqueue); - -err_close: - dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); - return err; -} - -static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) -{ - struct net_device *netdev = port_priv->netdev; - struct ethsw_core *ethsw = port_priv->ethsw_data; - struct dpsw_vlan_if_cfg vcfg; - int err; - - /* Switch starts with all ports configured to VLAN 1. Need to - * remove this setting to allow configuration at bridge join - */ - vcfg.num_ifs = 1; - vcfg.if_id[0] = port_priv->idx; - - err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle, - DEFAULT_VLAN_ID, &vcfg); - if (err) { - netdev_err(netdev, "dpsw_vlan_remove_if_untagged err %d\n", - err); - return err; - } - - err = dpaa2_switch_port_set_pvid(port_priv, 0); - if (err) - return err; - - err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, - DEFAULT_VLAN_ID, &vcfg); - if (err) - netdev_err(netdev, "dpsw_vlan_remove_if err %d\n", err); - - return err; -} - -static void dpaa2_switch_unregister_notifier(struct device *dev) -{ - struct ethsw_core *ethsw = dev_get_drvdata(dev); - struct notifier_block *nb; - int err; - - nb = ðsw->port_switchdevb_nb; - err = unregister_switchdev_blocking_notifier(nb); - if (err) - dev_err(dev, - "Failed to unregister switchdev blocking notifier (%d)\n", - err); - - err = unregister_switchdev_notifier(ðsw->port_switchdev_nb); - if (err) - dev_err(dev, - "Failed to unregister switchdev notifier (%d)\n", err); - - err = unregister_netdevice_notifier(ðsw->port_nb); - if (err) - dev_err(dev, - "Failed to unregister netdev notifier (%d)\n", err); -} - -static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev) -{ - struct device *dev = &sw_dev->dev; - struct ethsw_core *ethsw = dev_get_drvdata(dev); - int err; - - dpaa2_switch_unregister_notifier(dev); - - err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); - if (err) - dev_warn(dev, "dpsw_close err %d\n", err); -} - -static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev) -{ - struct ethsw_port_priv *port_priv; - struct ethsw_core *ethsw; - struct device *dev; - int i; - - dev = &sw_dev->dev; - ethsw = dev_get_drvdata(dev); - - dpaa2_switch_teardown_irqs(sw_dev); - - dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); - - for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { - port_priv = ethsw->ports[i]; - unregister_netdev(port_priv->netdev); - free_netdev(port_priv->netdev); - } - kfree(ethsw->ports); - - dpaa2_switch_takedown(sw_dev); - - destroy_workqueue(ethsw->workqueue); - - fsl_mc_portal_free(ethsw->mc_io); - - kfree(ethsw); - - dev_set_drvdata(dev, NULL); - - return 0; -} - -static int dpaa2_switch_probe_port(struct ethsw_core *ethsw, - u16 port_idx) -{ - struct ethsw_port_priv *port_priv; - struct device *dev = ethsw->dev; - struct net_device *port_netdev; - int err; - - port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv)); - if (!port_netdev) { - dev_err(dev, "alloc_etherdev error\n"); - return -ENOMEM; - } - - port_priv = netdev_priv(port_netdev); - port_priv->netdev = port_netdev; - port_priv->ethsw_data = ethsw; - - port_priv->idx = port_idx; - port_priv->stp_state = BR_STATE_FORWARDING; - - /* Flooding is implicitly enabled */ - port_priv->flood = true; - - SET_NETDEV_DEV(port_netdev, dev); - port_netdev->netdev_ops = &dpaa2_switch_port_ops; - port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops; - - /* Set MTU limits */ - port_netdev->min_mtu = ETH_MIN_MTU; - port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH; - - err = dpaa2_switch_port_init(port_priv, port_idx); - if (err) - goto err_port_probe; - - err = dpaa2_switch_port_set_mac_addr(port_priv); - if (err) - goto err_port_probe; - - err = register_netdev(port_netdev); - if (err < 0) { - dev_err(dev, "register_netdev error %d\n", err); - goto err_port_probe; - } - - ethsw->ports[port_idx] = port_priv; - - return 0; - -err_port_probe: - free_netdev(port_netdev); - - return err; -} - -static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev) -{ - struct device *dev = &sw_dev->dev; - struct ethsw_core *ethsw; - int i, err; - - /* Allocate switch core*/ - ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL); - - if (!ethsw) - return -ENOMEM; - - ethsw->dev = dev; - dev_set_drvdata(dev, ethsw); - - err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, - ðsw->mc_io); - if (err) { - if (err == -ENXIO) - err = -EPROBE_DEFER; - else - dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); - goto err_free_drvdata; - } - - err = dpaa2_switch_init(sw_dev); - if (err) - goto err_free_cmdport; - - /* DEFAULT_VLAN_ID is implicitly configured on the switch */ - ethsw->vlans[DEFAULT_VLAN_ID] = ETHSW_VLAN_MEMBER; - - /* Learning is implicitly enabled */ - ethsw->learning = true; - - ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports), - GFP_KERNEL); - if (!(ethsw->ports)) { - err = -ENOMEM; - goto err_takedown; - } - - for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { - err = dpaa2_switch_probe_port(ethsw, i); - if (err) - goto err_free_ports; - } - - err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); - if (err) { - dev_err(ethsw->dev, "dpsw_enable err %d\n", err); - goto err_free_ports; - } - - /* Make sure the switch ports are disabled at probe time */ - for (i = 0; i < ethsw->sw_attr.num_ifs; i++) - dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i); - - /* Setup IRQs */ - err = dpaa2_switch_setup_irqs(sw_dev); - if (err) - goto err_stop; - - dev_info(dev, "probed %d port switch\n", ethsw->sw_attr.num_ifs); - return 0; - -err_stop: - dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); - -err_free_ports: - /* Cleanup registered ports only */ - for (i--; i >= 0; i--) { - unregister_netdev(ethsw->ports[i]->netdev); - free_netdev(ethsw->ports[i]->netdev); - } - kfree(ethsw->ports); - -err_takedown: - dpaa2_switch_takedown(sw_dev); - -err_free_cmdport: - fsl_mc_portal_free(ethsw->mc_io); - -err_free_drvdata: - kfree(ethsw); - dev_set_drvdata(dev, NULL); - - return err; -} - -static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = { - { - .vendor = FSL_MC_VENDOR_FREESCALE, - .obj_type = "dpsw", - }, - { .vendor = 0x0 } -}; -MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table); - -static struct fsl_mc_driver dpaa2_switch_drv = { - .driver = { - .name = KBUILD_MODNAME, - .owner = THIS_MODULE, - }, - .probe = dpaa2_switch_probe, - .remove = dpaa2_switch_remove, - .match_id_table = dpaa2_switch_match_id_table -}; - -module_fsl_mc_driver(dpaa2_switch_drv); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver"); diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h deleted file mode 100644 index 5f9211ccb1ef..000000000000 --- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h +++ /dev/null @@ -1,80 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * DPAA2 Ethernet Switch declarations - * - * Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright 2017-2018 NXP - * - */ - -#ifndef __ETHSW_H -#define __ETHSW_H - -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/rtnetlink.h> -#include <linux/if_vlan.h> -#include <uapi/linux/if_bridge.h> -#include <net/switchdev.h> -#include <linux/if_bridge.h> - -#include "dpsw.h" - -/* Number of IRQs supported */ -#define DPSW_IRQ_NUM 2 - -/* Port is member of VLAN */ -#define ETHSW_VLAN_MEMBER 1 -/* VLAN to be treated as untagged on egress */ -#define ETHSW_VLAN_UNTAGGED 2 -/* Untagged frames will be assigned to this VLAN */ -#define ETHSW_VLAN_PVID 4 -/* VLAN configured on the switch */ -#define ETHSW_VLAN_GLOBAL 8 - -/* Maximum Frame Length supported by HW (currently 10k) */ -#define DPAA2_MFL (10 * 1024) -#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN) -#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN) - -#define ETHSW_FEATURE_MAC_ADDR BIT(0) - -extern const struct ethtool_ops dpaa2_switch_port_ethtool_ops; - -struct ethsw_core; - -/* Per port private data */ -struct ethsw_port_priv { - struct net_device *netdev; - u16 idx; - struct ethsw_core *ethsw_data; - u8 link_state; - u8 stp_state; - bool flood; - - u8 vlans[VLAN_VID_MASK + 1]; - u16 pvid; - struct net_device *bridge_dev; -}; - -/* Switch data */ -struct ethsw_core { - struct device *dev; - struct fsl_mc_io *mc_io; - u16 dpsw_handle; - struct dpsw_attr sw_attr; - u16 major, minor; - unsigned long features; - int dev_id; - struct ethsw_port_priv **ports; - - u8 vlans[VLAN_VID_MASK + 1]; - bool learning; - - struct notifier_block port_nb; - struct notifier_block port_switchdev_nb; - struct notifier_block port_switchdevb_nb; - struct workqueue_struct *workqueue; -}; - -#endif /* __ETHSW_H */ diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c index dc09cc6e1c47..09e7b4cd0138 100644 --- a/drivers/staging/ks7010/ks_wlan_net.c +++ b/drivers/staging/ks7010/ks_wlan_net.c @@ -1120,6 +1120,7 @@ static int ks_wlan_set_scan(struct net_device *dev, { struct ks_wlan_private *priv = netdev_priv(dev); struct iw_scan_req *req = NULL; + int len; if (priv->sleep_mode == SLP_SLEEP) return -EPERM; @@ -1129,8 +1130,9 @@ static int ks_wlan_set_scan(struct net_device *dev, if (wrqu->data.length == sizeof(struct iw_scan_req) && wrqu->data.flags & IW_SCAN_THIS_ESSID) { req = (struct iw_scan_req *)extra; - priv->scan_ssid_len = req->essid_len; - memcpy(priv->scan_ssid, req->essid, priv->scan_ssid_len); + len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE); + priv->scan_ssid_len = len; + memcpy(priv->scan_ssid, req->essid, len); } else { priv->scan_ssid_len = 0; } diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c index fa1e34a0d456..182bb944c9b3 100644 --- a/drivers/staging/rtl8188eu/core/rtw_ap.c +++ b/drivers/staging/rtl8188eu/core/rtw_ap.c @@ -791,6 +791,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, WLAN_EID_SSID, &ie_len, pbss_network->ie_length - _BEACON_IE_OFFSET_); if (p && ie_len > 0) { + ie_len = min_t(int, ie_len, sizeof(pbss_network->ssid.ssid)); memset(&pbss_network->ssid, 0, sizeof(struct ndis_802_11_ssid)); memcpy(pbss_network->ssid.ssid, p + 2, ie_len); pbss_network->ssid.ssid_length = ie_len; @@ -811,6 +812,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, WLAN_EID_SUPP_RATES, &ie_len, pbss_network->ie_length - _BEACON_IE_OFFSET_); if (p) { + ie_len = min_t(int, ie_len, NDIS_802_11_LENGTH_RATES_EX); memcpy(supportRate, p + 2, ie_len); supportRateNum = ie_len; } @@ -819,6 +821,8 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, WLAN_EID_EXT_SUPP_RATES, &ie_len, pbss_network->ie_length - _BEACON_IE_OFFSET_); if (p) { + ie_len = min_t(int, ie_len, + NDIS_802_11_LENGTH_RATES_EX - supportRateNum); memcpy(supportRate + supportRateNum, p + 2, ie_len); supportRateNum += ie_len; } @@ -934,6 +938,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) pht_cap->mcs.rx_mask[0] = 0xff; pht_cap->mcs.rx_mask[1] = 0x0; + ie_len = min_t(int, ie_len, sizeof(pmlmepriv->htpriv.ht_cap)); memcpy(&pmlmepriv->htpriv.ht_cap, p + 2, ie_len); } diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c index bf22f130d3e1..58954b88a817 100644 --- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c @@ -1133,9 +1133,11 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a, break; } sec_len = *(pos++); len -= 1; - if (sec_len > 0 && sec_len <= len) { + if (sec_len > 0 && + sec_len <= len && + sec_len <= 32) { ssid[ssid_index].ssid_length = sec_len; - memcpy(ssid[ssid_index].ssid, pos, ssid[ssid_index].ssid_length); + memcpy(ssid[ssid_index].ssid, pos, sec_len); ssid_index++; } pos += sec_len; diff --git a/drivers/staging/rtl8192e/Kconfig b/drivers/staging/rtl8192e/Kconfig index 963a2ffbc1fb..39f5a6a7346a 100644 --- a/drivers/staging/rtl8192e/Kconfig +++ b/drivers/staging/rtl8192e/Kconfig @@ -27,6 +27,7 @@ config RTLLIB_CRYPTO_CCMP config RTLLIB_CRYPTO_TKIP tristate "Support for rtllib TKIP crypto" depends on RTLLIB + select CRYPTO select CRYPTO_LIB_ARC4 select CRYPTO_MICHAEL_MIC default y diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c index 16bcee13f64b..407effde5e71 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c @@ -406,9 +406,10 @@ static int _rtl92e_wx_set_scan(struct net_device *dev, struct iw_scan_req *req = (struct iw_scan_req *)b; if (req->essid_len) { - ieee->current_network.ssid_len = req->essid_len; - memcpy(ieee->current_network.ssid, req->essid, - req->essid_len); + int len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE); + + ieee->current_network.ssid_len = len; + memcpy(ieee->current_network.ssid, req->essid, len); } } diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c index d853586705fc..77bf88696a84 100644 --- a/drivers/staging/rtl8192u/r8192U_wx.c +++ b/drivers/staging/rtl8192u/r8192U_wx.c @@ -331,8 +331,10 @@ static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a, struct iw_scan_req *req = (struct iw_scan_req *)b; if (req->essid_len) { - ieee->current_network.ssid_len = req->essid_len; - memcpy(ieee->current_network.ssid, req->essid, req->essid_len); + int len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE); + + ieee->current_network.ssid_len = len; + memcpy(ieee->current_network.ssid, req->essid, len); } } diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c index 18116469bd31..75716f59044d 100644 --- a/drivers/staging/rtl8712/rtl871x_cmd.c +++ b/drivers/staging/rtl8712/rtl871x_cmd.c @@ -192,8 +192,10 @@ u8 r8712_sitesurvey_cmd(struct _adapter *padapter, psurveyPara->ss_ssidlen = 0; memset(psurveyPara->ss_ssid, 0, IW_ESSID_MAX_SIZE + 1); if (pssid && pssid->SsidLength) { - memcpy(psurveyPara->ss_ssid, pssid->Ssid, pssid->SsidLength); - psurveyPara->ss_ssidlen = cpu_to_le32(pssid->SsidLength); + int len = min_t(int, pssid->SsidLength, IW_ESSID_MAX_SIZE); + + memcpy(psurveyPara->ss_ssid, pssid->Ssid, len); + psurveyPara->ss_ssidlen = cpu_to_le32(len); } set_fwstate(pmlmepriv, _FW_UNDER_SURVEY); r8712_enqueue_cmd(pcmdpriv, ph2c); diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c index 81de5a9e6b67..60dd798a6e51 100644 --- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c +++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c @@ -924,7 +924,7 @@ static int r871x_wx_set_priv(struct net_device *dev, struct iw_point *dwrq = (struct iw_point *)awrq; len = dwrq->length; - ext = memdup_user(dwrq->pointer, len); + ext = strndup_user(dwrq->pointer, len); if (IS_ERR(ext)) return PTR_ERR(ext); diff --git a/drivers/staging/vt6655/rxtx.h b/drivers/staging/vt6655/rxtx.h index e7061d383306..c3c2c1566882 100644 --- a/drivers/staging/vt6655/rxtx.h +++ b/drivers/staging/vt6655/rxtx.h @@ -150,7 +150,7 @@ struct vnt_cts { u16 reserved; struct ieee80211_cts data; u16 reserved2; -} __packed; +} __packed __aligned(2); struct vnt_cts_fb { struct vnt_phy_field b; @@ -160,7 +160,7 @@ struct vnt_cts_fb { __le16 cts_duration_ba_f1; struct ieee80211_cts data; u16 reserved2; -} __packed; +} __packed __aligned(2); struct vnt_tx_fifo_head { u8 tx_key[WLAN_KEY_LEN_CCMP]; diff --git a/drivers/staging/wfx/bh.c b/drivers/staging/wfx/bh.c index cd6bcfdfbe9a..ed53d0b45592 100644 --- a/drivers/staging/wfx/bh.c +++ b/drivers/staging/wfx/bh.c @@ -5,6 +5,7 @@ * Copyright (c) 2017-2020, Silicon Laboratories, Inc. * Copyright (c) 2010, ST-Ericsson */ +#include <linux/gpio/consumer.h> #include <net/mac80211.h> #include "bh.h" diff --git a/drivers/staging/wfx/bh.h b/drivers/staging/wfx/bh.h index 92ef3298d4ac..78c49329e22a 100644 --- a/drivers/staging/wfx/bh.h +++ b/drivers/staging/wfx/bh.h @@ -8,6 +8,10 @@ #ifndef WFX_BH_H #define WFX_BH_H +#include <linux/atomic.h> +#include <linux/wait.h> +#include <linux/workqueue.h> + struct wfx_dev; struct wfx_hif { diff --git a/drivers/staging/wfx/bus.h b/drivers/staging/wfx/bus.h index ea3911485307..ca04b3da6204 100644 --- a/drivers/staging/wfx/bus.h +++ b/drivers/staging/wfx/bus.h @@ -8,6 +8,9 @@ #ifndef WFX_BUS_H #define WFX_BUS_H +#include <linux/mmc/sdio_func.h> +#include <linux/spi/spi.h> + #define WFX_REG_CONFIG 0x0 #define WFX_REG_CONTROL 0x1 #define WFX_REG_IN_OUT_QUEUE 0x2 diff --git a/drivers/staging/wfx/bus_sdio.c b/drivers/staging/wfx/bus_sdio.c index 588edce44854..e06d7e1ebe9c 100644 --- a/drivers/staging/wfx/bus_sdio.c +++ b/drivers/staging/wfx/bus_sdio.c @@ -5,13 +5,19 @@ * Copyright (c) 2017-2020, Silicon Laboratories, Inc. * Copyright (c) 2010, ST-Ericsson */ +#include <linux/module.h> #include <linux/mmc/sdio.h> #include <linux/mmc/sdio_func.h> #include <linux/mmc/card.h> +#include <linux/interrupt.h> #include <linux/of_irq.h> +#include <linux/irq.h> #include "bus.h" #include "wfx.h" +#include "hwio.h" +#include "main.h" +#include "bh.h" static const struct wfx_platform_data wfx_sdio_pdata = { .file_fw = "wfm_wf200", diff --git a/drivers/staging/wfx/bus_spi.c b/drivers/staging/wfx/bus_spi.c index f89855abe9f8..a99125d1a30d 100644 --- a/drivers/staging/wfx/bus_spi.c +++ b/drivers/staging/wfx/bus_spi.c @@ -6,12 +6,19 @@ * Copyright (c) 2011, Sagrad Inc. * Copyright (c) 2010, ST-Ericsson */ +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> #include <linux/spi/spi.h> +#include <linux/interrupt.h> #include <linux/irq.h> #include <linux/of.h> #include "bus.h" #include "wfx.h" +#include "hwio.h" +#include "main.h" +#include "bh.h" #define SET_WRITE 0x7FFF /* usage: and operation */ #define SET_READ 0x8000 /* usage: or operation */ diff --git a/drivers/staging/wfx/data_rx.c b/drivers/staging/wfx/data_rx.c index 2cfa16279220..385f2d42a0e2 100644 --- a/drivers/staging/wfx/data_rx.c +++ b/drivers/staging/wfx/data_rx.c @@ -5,8 +5,13 @@ * Copyright (c) 2017-2020, Silicon Laboratories, Inc. * Copyright (c) 2010, ST-Ericsson */ +#include <linux/etherdevice.h> +#include <net/mac80211.h> + #include "data_rx.h" #include "wfx.h" +#include "bh.h" +#include "sta.h" static void wfx_rx_handle_ba(struct wfx_vif *wvif, struct ieee80211_mgmt *mgmt) { diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c index 76f26e3c4381..77fb104efdec 100644 --- a/drivers/staging/wfx/data_tx.c +++ b/drivers/staging/wfx/data_tx.c @@ -6,9 +6,14 @@ * Copyright (c) 2010, ST-Ericsson */ #include <net/mac80211.h> +#include <linux/etherdevice.h> +#include "data_tx.h" #include "wfx.h" +#include "bh.h" #include "sta.h" +#include "queue.h" +#include "debug.h" #include "traces.h" #include "hif_tx_mib.h" diff --git a/drivers/staging/wfx/data_tx.h b/drivers/staging/wfx/data_tx.h index 6b3020097efa..401363d6b563 100644 --- a/drivers/staging/wfx/data_tx.h +++ b/drivers/staging/wfx/data_tx.h @@ -8,6 +8,9 @@ #ifndef WFX_DATA_TX_H #define WFX_DATA_TX_H +#include <linux/list.h> +#include <net/mac80211.h> + #include "hif_api_cmd.h" #include "hif_api_mib.h" diff --git a/drivers/staging/wfx/debug.c b/drivers/staging/wfx/debug.c index 3e87d13eb358..eedada78c25f 100644 --- a/drivers/staging/wfx/debug.c +++ b/drivers/staging/wfx/debug.c @@ -5,9 +5,15 @@ * Copyright (c) 2017-2020, Silicon Laboratories, Inc. * Copyright (c) 2010, ST-Ericsson */ +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/crc32.h> + #include "debug.h" #include "wfx.h" #include "sta.h" +#include "main.h" +#include "hif_tx.h" #include "hif_tx_mib.h" #define CREATE_TRACE_POINTS diff --git a/drivers/staging/wfx/fwio.c b/drivers/staging/wfx/fwio.c index 1bb9054871c4..1b8aec02d169 100644 --- a/drivers/staging/wfx/fwio.c +++ b/drivers/staging/wfx/fwio.c @@ -6,6 +6,8 @@ * Copyright (c) 2010, ST-Ericsson */ #include <linux/firmware.h> +#include <linux/slab.h> +#include <linux/mm.h> #include <linux/bitfield.h> #include "fwio.h" diff --git a/drivers/staging/wfx/hif_api_cmd.h b/drivers/staging/wfx/hif_api_cmd.h index 8b671c9ab97c..58c9bb036011 100644 --- a/drivers/staging/wfx/hif_api_cmd.h +++ b/drivers/staging/wfx/hif_api_cmd.h @@ -8,6 +8,10 @@ #ifndef WFX_HIF_API_CMD_H #define WFX_HIF_API_CMD_H +#include <linux/ieee80211.h> + +#include "hif_api_general.h" + enum hif_requests_ids { HIF_REQ_ID_RESET = 0x0a, HIF_REQ_ID_READ_MIB = 0x05, diff --git a/drivers/staging/wfx/hif_api_general.h b/drivers/staging/wfx/hif_api_general.h index 70b253d0265d..24188945718d 100644 --- a/drivers/staging/wfx/hif_api_general.h +++ b/drivers/staging/wfx/hif_api_general.h @@ -8,6 +8,15 @@ #ifndef WFX_HIF_API_GENERAL_H #define WFX_HIF_API_GENERAL_H +#ifdef __KERNEL__ +#include <linux/types.h> +#include <linux/if_ether.h> +#else +#include <net/ethernet.h> +#include <stdint.h> +#define __packed __attribute__((__packed__)) +#endif + #define HIF_ID_IS_INDICATION 0x80 #define HIF_COUNTER_MAX 7 diff --git a/drivers/staging/wfx/hif_tx.c b/drivers/staging/wfx/hif_tx.c index 17dc13321978..63b437261eb7 100644 --- a/drivers/staging/wfx/hif_tx.c +++ b/drivers/staging/wfx/hif_tx.c @@ -6,7 +6,11 @@ * Copyright (c) 2017-2020, Silicon Laboratories, Inc. * Copyright (c) 2010, ST-Ericsson */ +#include <linux/etherdevice.h> + +#include "hif_tx.h" #include "wfx.h" +#include "bh.h" #include "hwio.h" #include "debug.h" #include "sta.h" diff --git a/drivers/staging/wfx/hif_tx_mib.c b/drivers/staging/wfx/hif_tx_mib.c index 6432ed86505c..1926cf1b62be 100644 --- a/drivers/staging/wfx/hif_tx_mib.c +++ b/drivers/staging/wfx/hif_tx_mib.c @@ -6,8 +6,13 @@ * Copyright (c) 2010, ST-Ericsson * Copyright (C) 2010, ST-Ericsson SA */ + +#include <linux/etherdevice.h> + #include "wfx.h" +#include "hif_tx.h" #include "hif_tx_mib.h" +#include "hif_api_mib.h" int hif_set_output_power(struct wfx_vif *wvif, int val) { diff --git a/drivers/staging/wfx/hwio.c b/drivers/staging/wfx/hwio.c index 089bb41be149..36fbc5b5d64c 100644 --- a/drivers/staging/wfx/hwio.c +++ b/drivers/staging/wfx/hwio.c @@ -5,10 +5,13 @@ * Copyright (c) 2017-2020, Silicon Laboratories, Inc. * Copyright (c) 2010, ST-Ericsson */ +#include <linux/kernel.h> +#include <linux/delay.h> #include <linux/slab.h> #include "hwio.h" #include "wfx.h" +#include "bus.h" #include "traces.h" /* diff --git a/drivers/staging/wfx/hwio.h b/drivers/staging/wfx/hwio.h index 8bb9bcfc3182..0b8e4f7157df 100644 --- a/drivers/staging/wfx/hwio.h +++ b/drivers/staging/wfx/hwio.h @@ -8,6 +8,8 @@ #ifndef WFX_HWIO_H #define WFX_HWIO_H +#include <linux/types.h> + struct wfx_dev; int wfx_data_read(struct wfx_dev *wdev, void *buf, size_t buf_len); diff --git a/drivers/staging/wfx/key.c b/drivers/staging/wfx/key.c index c93d07dcdc10..2ab82bed4c1b 100644 --- a/drivers/staging/wfx/key.c +++ b/drivers/staging/wfx/key.c @@ -5,10 +5,12 @@ * Copyright (c) 2017-2020, Silicon Laboratories, Inc. * Copyright (c) 2010, ST-Ericsson */ +#include <linux/etherdevice.h> #include <net/mac80211.h> #include "key.h" #include "wfx.h" +#include "hif_tx_mib.h" static int wfx_alloc_key(struct wfx_dev *wdev) { diff --git a/drivers/staging/wfx/key.h b/drivers/staging/wfx/key.h index 4dc9feadaba2..70a44d0ca35e 100644 --- a/drivers/staging/wfx/key.h +++ b/drivers/staging/wfx/key.h @@ -8,6 +8,8 @@ #ifndef WFX_KEY_H #define WFX_KEY_H +#include <net/mac80211.h> + struct wfx_dev; struct wfx_vif; diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c index b9ea9a93fe1a..e7bc1988124a 100644 --- a/drivers/staging/wfx/main.c +++ b/drivers/staging/wfx/main.c @@ -10,21 +10,28 @@ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> * Copyright (c) 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al. */ +#include <linux/module.h> #include <linux/of.h> #include <linux/of_net.h> +#include <linux/gpio/consumer.h> #include <linux/mmc/sdio_func.h> #include <linux/spi/spi.h> +#include <linux/etherdevice.h> #include <linux/firmware.h> +#include "main.h" #include "wfx.h" #include "fwio.h" #include "hwio.h" #include "bus.h" +#include "bh.h" #include "sta.h" #include "key.h" #include "scan.h" #include "debug.h" +#include "data_tx.h" #include "hif_tx_mib.h" +#include "hif_api_cmd.h" #define WFX_PDS_MAX_SIZE 1500 diff --git a/drivers/staging/wfx/main.h b/drivers/staging/wfx/main.h index 086bcc041b90..a0db322383a3 100644 --- a/drivers/staging/wfx/main.h +++ b/drivers/staging/wfx/main.h @@ -10,8 +10,11 @@ #ifndef WFX_MAIN_H #define WFX_MAIN_H +#include <linux/device.h> #include <linux/gpio/consumer.h> +#include "hif_api_general.h" + struct wfx_dev; struct hwbus_ops; diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c index 3bddf282a4ce..31c37f69c295 100644 --- a/drivers/staging/wfx/queue.c +++ b/drivers/staging/wfx/queue.c @@ -5,9 +5,13 @@ * Copyright (c) 2017-2020, Silicon Laboratories, Inc. * Copyright (c) 2010, ST-Ericsson */ +#include <linux/sched.h> #include <net/mac80211.h> +#include "queue.h" #include "wfx.h" +#include "sta.h" +#include "data_tx.h" #include "traces.h" void wfx_tx_lock(struct wfx_dev *wdev) diff --git a/drivers/staging/wfx/queue.h b/drivers/staging/wfx/queue.h index e43aa9dfbc45..80ba19455ef3 100644 --- a/drivers/staging/wfx/queue.h +++ b/drivers/staging/wfx/queue.h @@ -8,6 +8,9 @@ #ifndef WFX_QUEUE_H #define WFX_QUEUE_H +#include <linux/skbuff.h> +#include <linux/atomic.h> + struct wfx_dev; struct wfx_vif; diff --git a/drivers/staging/wfx/scan.h b/drivers/staging/wfx/scan.h index e5b7eef78858..c7496a766478 100644 --- a/drivers/staging/wfx/scan.h +++ b/drivers/staging/wfx/scan.h @@ -8,6 +8,8 @@ #ifndef WFX_SCAN_H #define WFX_SCAN_H +#include <net/mac80211.h> + struct wfx_dev; struct wfx_vif; diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c index 5585f9e876e1..196779a1b89a 100644 --- a/drivers/staging/wfx/sta.c +++ b/drivers/staging/wfx/sta.c @@ -5,11 +5,17 @@ * Copyright (c) 2017-2020, Silicon Laboratories, Inc. * Copyright (c) 2010, ST-Ericsson */ +#include <linux/etherdevice.h> #include <net/mac80211.h> #include "sta.h" #include "wfx.h" +#include "fwio.h" +#include "bh.h" +#include "key.h" #include "scan.h" +#include "debug.h" +#include "hif_tx.h" #include "hif_tx_mib.h" #define HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES 2 diff --git a/drivers/staging/wfx/sta.h b/drivers/staging/wfx/sta.h index a3fb9fc93fa4..d7b5df5ea4e6 100644 --- a/drivers/staging/wfx/sta.h +++ b/drivers/staging/wfx/sta.h @@ -8,6 +8,8 @@ #ifndef WFX_STA_H #define WFX_STA_H +#include <net/mac80211.h> + struct wfx_dev; struct wfx_vif; diff --git a/drivers/staging/wfx/traces.h b/drivers/staging/wfx/traces.h index afe1074e09b3..e34c7a538c65 100644 --- a/drivers/staging/wfx/traces.h +++ b/drivers/staging/wfx/traces.h @@ -12,8 +12,11 @@ #define _WFX_TRACE_H #include <linux/tracepoint.h> +#include <net/mac80211.h> #include "bus.h" +#include "hif_api_cmd.h" +#include "hif_api_mib.h" /* The hell below need some explanations. For each symbolic number, we need to * define it with TRACE_DEFINE_ENUM() and in a list for __print_symbolic. diff --git a/drivers/staging/wfx/wfx.h b/drivers/staging/wfx/wfx.h index a185b82795c4..94898680ccde 100644 --- a/drivers/staging/wfx/wfx.h +++ b/drivers/staging/wfx/wfx.h @@ -10,6 +10,9 @@ #ifndef WFX_H #define WFX_H +#include <linux/completion.h> +#include <linux/workqueue.h> +#include <linux/mutex.h> #include <linux/nospec.h> #include <net/mac80211.h> diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index cf4718c6d35d..319a1e701163 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -747,7 +747,6 @@ module_platform_driver(optee_driver); MODULE_AUTHOR("Linaro"); MODULE_DESCRIPTION("OP-TEE driver"); -MODULE_SUPPORTED_DEVICE(""); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:optee"); diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c index 345917a58f2f..1c4aac8464a7 100644 --- a/drivers/thermal/thermal_sysfs.c +++ b/drivers/thermal/thermal_sysfs.c @@ -674,6 +674,9 @@ void thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev, { struct cooling_dev_stats *stats = cdev->stats; + if (!stats) + return; + spin_lock(&stats->lock); if (stats->state == new_state) diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index b63fecca6c2a..2a95b4ce06c0 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -768,12 +768,6 @@ static int tb_init_port(struct tb_port *port) tb_dump_port(port->sw->tb, &port->config); - /* Control port does not need HopID allocation */ - if (port->port) { - ida_init(&port->in_hopids); - ida_init(&port->out_hopids); - } - INIT_LIST_HEAD(&port->list); return 0; @@ -1842,10 +1836,8 @@ static void tb_switch_release(struct device *dev) dma_port_free(sw->dma_port); tb_switch_for_each_port(sw, port) { - if (!port->disabled) { - ida_destroy(&port->in_hopids); - ida_destroy(&port->out_hopids); - } + ida_destroy(&port->in_hopids); + ida_destroy(&port->out_hopids); } kfree(sw->uuid); @@ -2025,6 +2017,12 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, /* minimum setup for tb_find_cap and tb_drom_read to work */ sw->ports[i].sw = sw; sw->ports[i].port = i; + + /* Control port does not need HopID allocation */ + if (i) { + ida_init(&sw->ports[i].in_hopids); + ida_init(&sw->ports[i].out_hopids); + } } ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 1f000ac1728b..c348b1fc0efc 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -138,6 +138,10 @@ static void tb_discover_tunnels(struct tb_switch *sw) parent->boot = true; parent = tb_switch_parent(parent); } + } else if (tb_tunnel_is_dp(tunnel)) { + /* Keep the domain from powering down */ + pm_runtime_get_sync(&tunnel->src_port->sw->dev); + pm_runtime_get_sync(&tunnel->dst_port->sw->dev); } list_add_tail(&tunnel->list, &tcm->tunnel_list); diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 8b2797b6ee44..5e2374580e27 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -66,8 +66,7 @@ static void pty_close(struct tty_struct *tty, struct file *filp) wake_up_interruptible(&tty->link->read_wait); wake_up_interruptible(&tty->link->write_wait); if (tty->driver->subtype == PTY_TYPE_MASTER) { - struct file *f; - + set_bit(TTY_OTHER_CLOSED, &tty->flags); #ifdef CONFIG_UNIX98_PTYS if (tty->driver == ptm_driver) { mutex_lock(&devpts_mutex); @@ -76,17 +75,7 @@ static void pty_close(struct tty_struct *tty, struct file *filp) mutex_unlock(&devpts_mutex); } #endif - - /* - * This hack is required because a program can open a - * pty and redirect a console to it, but if the pty is - * closed and the console is not released, then the - * slave side will never close. So release the - * redirect when the master closes. - */ - f = tty_release_redirect(tty->link); - if (f) - fput(f); + tty_vhangup(tty->link); } } diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c index 9a872750581c..94af7a5ea497 100644 --- a/drivers/tty/serial/icom.c +++ b/drivers/tty/serial/icom.c @@ -1639,8 +1639,6 @@ module_exit(icom_exit); MODULE_AUTHOR("Michael Anderson <mjanders@us.ibm.com>"); MODULE_DESCRIPTION("IBM iSeries Serial IOA driver"); -MODULE_SUPPORTED_DEVICE - ("IBM iSeries 2745, 2771, 2772, 2742, 2793 and 2805 Communications adapters"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("icom_call_setup.bin"); MODULE_FIRMWARE("icom_res_dce.bin"); diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c index cd30da0ef083..0ea799bf8dbb 100644 --- a/drivers/tty/serial/jsm/jsm_driver.c +++ b/drivers/tty/serial/jsm/jsm_driver.c @@ -19,7 +19,6 @@ MODULE_AUTHOR("Digi International, https://www.digi.com"); MODULE_DESCRIPTION("Driver for the Digi International Neo and Classic PCI based product line"); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("jsm"); #define JSM_DRIVER_NAME "jsm" #define NR_PORTS 32 diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c index 9795b2e8b0b2..1b61d26bb7af 100644 --- a/drivers/tty/serial/max310x.c +++ b/drivers/tty/serial/max310x.c @@ -1056,9 +1056,9 @@ static int max310x_startup(struct uart_port *port) max310x_port_update(port, MAX310X_MODE1_REG, MAX310X_MODE1_TRNSCVCTRL_BIT, 0); - /* Reset FIFOs */ - max310x_port_write(port, MAX310X_MODE2_REG, - MAX310X_MODE2_FIFORST_BIT); + /* Configure MODE2 register & Reset FIFOs*/ + val = MAX310X_MODE2_RXEMPTINV_BIT | MAX310X_MODE2_FIFORST_BIT; + max310x_port_write(port, MAX310X_MODE2_REG, val); max310x_port_update(port, MAX310X_MODE2_REG, MAX310X_MODE2_FIFORST_BIT, 0); @@ -1086,27 +1086,8 @@ static int max310x_startup(struct uart_port *port) /* Clear IRQ status register */ max310x_port_read(port, MAX310X_IRQSTS_REG); - /* - * Let's ask for an interrupt after a timeout equivalent to - * the receiving time of 4 characters after the last character - * has been received. - */ - max310x_port_write(port, MAX310X_RXTO_REG, 4); - - /* - * Make sure we also get RX interrupts when the RX FIFO is - * filling up quickly, so get an interrupt when half of the RX - * FIFO has been filled in. - */ - max310x_port_write(port, MAX310X_FIFOTRIGLVL_REG, - MAX310X_FIFOTRIGLVL_RX(MAX310X_FIFO_SIZE / 2)); - - /* Enable RX timeout interrupt in LSR */ - max310x_port_write(port, MAX310X_LSR_IRQEN_REG, - MAX310X_LSR_RXTO_BIT); - - /* Enable LSR, RX FIFO trigger, CTS change interrupts */ - val = MAX310X_IRQ_LSR_BIT | MAX310X_IRQ_RXFIFO_BIT | MAX310X_IRQ_TXEMPTY_BIT; + /* Enable RX, TX, CTS change interrupts */ + val = MAX310X_IRQ_RXEMPTY_BIT | MAX310X_IRQ_TXEMPTY_BIT; max310x_port_write(port, MAX310X_IRQEN_REG, val | MAX310X_IRQ_CTS_BIT); return 0; diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 74733ec8f565..391bada4cedb 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -544,9 +544,7 @@ EXPORT_SYMBOL_GPL(tty_wakeup); * @tty: tty device * * This is available to the pty code so if the master closes, if the - * slave is a redirect it can release the redirect. It returns the - * filp for the redirect, which must be fput when the operations on - * the tty are completed. + * slave is a redirect it can release the redirect. */ struct file *tty_release_redirect(struct tty_struct *tty) { @@ -561,6 +559,7 @@ struct file *tty_release_redirect(struct tty_struct *tty) return f; } +EXPORT_SYMBOL_GPL(tty_release_redirect); /** * __tty_hangup - actual handler for hangup events diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c index f9170d177a89..5f0513c96c04 100644 --- a/drivers/usb/cdns3/cdnsp-ring.c +++ b/drivers/usb/cdns3/cdnsp-ring.c @@ -2197,7 +2197,10 @@ static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev, * inverted in the first TDs isoc TRB. */ field = TRB_TYPE(TRB_ISOC) | TRB_TLBPC(last_burst_pkt) | - start_cycle ? 0 : 1 | TRB_SIA | TRB_TBC(burst_count); + TRB_SIA | TRB_TBC(burst_count); + + if (!start_cycle) + field |= TRB_CYCLE; /* Fill the rest of the TRB fields, and remaining normal TRBs. */ for (i = 0; i < trbs_per_td; i++) { diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 37f824b59daa..39ddb5585ded 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1935,6 +1935,11 @@ static const struct usb_device_id acm_ids[] = { .driver_info = SEND_ZERO_PACKET, }, + /* Exclude Goodix Fingerprint Reader */ + { USB_DEVICE(0x27c6, 0x5395), + .driver_info = IGNORE_DEVICE, + }, + /* control interfaces without any protocol set */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, USB_CDC_PROTO_NONE) }, diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index c9f6e9758288..f27b4aecff3d 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -494,16 +494,24 @@ static int usblp_release(struct inode *inode, struct file *file) /* No kernel lock - fine */ static __poll_t usblp_poll(struct file *file, struct poll_table_struct *wait) { - __poll_t ret; + struct usblp *usblp = file->private_data; + __poll_t ret = 0; unsigned long flags; - struct usblp *usblp = file->private_data; /* Should we check file->f_mode & FMODE_WRITE before poll_wait()? */ poll_wait(file, &usblp->rwait, wait); poll_wait(file, &usblp->wwait, wait); + + mutex_lock(&usblp->mut); + if (!usblp->present) + ret |= EPOLLHUP; + mutex_unlock(&usblp->mut); + spin_lock_irqsave(&usblp->lock, flags); - ret = ((usblp->bidir && usblp->rcomplete) ? EPOLLIN | EPOLLRDNORM : 0) | - ((usblp->no_paper || usblp->wcomplete) ? EPOLLOUT | EPOLLWRNORM : 0); + if (usblp->bidir && usblp->rcomplete) + ret |= EPOLLIN | EPOLLRDNORM; + if (usblp->no_paper || usblp->wcomplete) + ret |= EPOLLOUT | EPOLLWRNORM; spin_unlock_irqrestore(&usblp->lock, flags); return ret; } diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 8f07b0516100..a566bb494e24 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -748,6 +748,38 @@ void usb_put_intf(struct usb_interface *intf) } EXPORT_SYMBOL_GPL(usb_put_intf); +/** + * usb_intf_get_dma_device - acquire a reference on the usb interface's DMA endpoint + * @intf: the usb interface + * + * While a USB device cannot perform DMA operations by itself, many USB + * controllers can. A call to usb_intf_get_dma_device() returns the DMA endpoint + * for the given USB interface, if any. The returned device structure must be + * released with put_device(). + * + * See also usb_get_dma_device(). + * + * Returns: A reference to the usb interface's DMA endpoint; or NULL if none + * exists. + */ +struct device *usb_intf_get_dma_device(struct usb_interface *intf) +{ + struct usb_device *udev = interface_to_usbdev(intf); + struct device *dmadev; + + if (!udev->bus) + return NULL; + + dmadev = get_device(udev->bus->sysdev); + if (!dmadev || !dmadev->dma_mask) { + put_device(dmadev); + return NULL; + } + + return dmadev; +} +EXPORT_SYMBOL_GPL(usb_intf_get_dma_device); + /* USB device locking * * USB devices and interfaces are locked using the semaphore in their diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index 846a47be6df7..fcaf04483ad0 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -358,8 +358,10 @@ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom) if (ret) dev_warn(qcom->dev, "failed to disable interconnect: %d\n", ret); + if (device_may_wakeup(qcom->dev)) + dwc3_qcom_enable_interrupts(qcom); + qcom->is_suspended = true; - dwc3_qcom_enable_interrupts(qcom); return 0; } @@ -372,7 +374,8 @@ static int dwc3_qcom_resume(struct dwc3_qcom *qcom) if (!qcom->is_suspended) return 0; - dwc3_qcom_disable_interrupts(qcom); + if (device_may_wakeup(qcom->dev)) + dwc3_qcom_disable_interrupts(qcom); for (i = 0; i < qcom->num_clocks; i++) { ret = clk_prepare_enable(qcom->clks[i]); @@ -650,16 +653,19 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev) ret = of_platform_populate(np, NULL, NULL, dev); if (ret) { dev_err(dev, "failed to register dwc3 core - %d\n", ret); - return ret; + goto node_put; } qcom->dwc3 = of_find_device_by_node(dwc3_np); if (!qcom->dwc3) { + ret = -ENODEV; dev_err(dev, "failed to get dwc3 platform device\n"); - return -ENODEV; } - return 0; +node_put: + of_node_put(dwc3_np); + + return ret; } static struct platform_device * @@ -938,6 +944,8 @@ static const struct dwc3_acpi_pdata sdm845_acpi_urs_pdata = { static const struct acpi_device_id dwc3_qcom_acpi_match[] = { { "QCOM2430", (unsigned long)&sdm845_acpi_pdata }, { "QCOM0304", (unsigned long)&sdm845_acpi_urs_pdata }, + { "QCOM0497", (unsigned long)&sdm845_acpi_urs_pdata }, + { "QCOM04A6", (unsigned long)&sdm845_acpi_pdata }, { }, }; MODULE_DEVICE_TABLE(acpi, dwc3_qcom_acpi_match); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index aebcf8ec0716..4a337f348651 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -783,8 +783,6 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) trace_dwc3_gadget_ep_disable(dep); - dwc3_remove_requests(dwc, dep); - /* make sure HW endpoint isn't stalled */ if (dep->flags & DWC3_EP_STALL) __dwc3_gadget_ep_set_halt(dep, 0, false); @@ -803,6 +801,8 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) dep->endpoint.desc = NULL; } + dwc3_remove_requests(dwc, dep); + return 0; } @@ -1617,7 +1617,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) { struct dwc3 *dwc = dep->dwc; - if (!dep->endpoint.desc || !dwc->pullups_connected) { + if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) { dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n", dep->name); return -ESHUTDOWN; @@ -2247,6 +2247,7 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) if (!is_on) { u32 count; + dwc->connected = false; /* * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a * Section 4.1.8 Table 4-7, it states that for a device-initiated @@ -2271,7 +2272,6 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) % dwc->ev_buf->length; } - dwc->connected = false; } else { __dwc3_gadget_start(dwc); } @@ -3321,8 +3321,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) { u32 reg; - dwc->connected = true; - /* * WORKAROUND: DWC3 revisions <1.88a have an issue which * would cause a missing Disconnect Event if there's a @@ -3362,6 +3360,7 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) * transfers." */ dwc3_stop_active_transfers(dwc); + dwc->connected = true; reg = dwc3_readl(dwc->regs, DWC3_DCTL); reg &= ~DWC3_DCTL_TSTCTRL_MASK; diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 0d56f33d63c2..15a607ccef8a 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -97,6 +97,8 @@ struct gadget_config_name { struct list_head list; }; +#define USB_MAX_STRING_WITH_NULL_LEN (USB_MAX_STRING_LEN+1) + static int usb_string_copy(const char *s, char **s_copy) { int ret; @@ -106,12 +108,16 @@ static int usb_string_copy(const char *s, char **s_copy) if (ret > USB_MAX_STRING_LEN) return -EOVERFLOW; - str = kstrdup(s, GFP_KERNEL); - if (!str) - return -ENOMEM; + if (copy) { + str = copy; + } else { + str = kmalloc(USB_MAX_STRING_WITH_NULL_LEN, GFP_KERNEL); + if (!str) + return -ENOMEM; + } + strcpy(str, s); if (str[ret - 1] == '\n') str[ret - 1] = '\0'; - kfree(copy); *s_copy = str; return 0; } diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c index 00d346965f7a..560382e0a8f3 100644 --- a/drivers/usb/gadget/function/f_uac1.c +++ b/drivers/usb/gadget/function/f_uac1.c @@ -499,6 +499,7 @@ static void f_audio_disable(struct usb_function *f) uac1->as_out_alt = 0; uac1->as_in_alt = 0; + u_audio_stop_playback(&uac1->g_audio); u_audio_stop_capture(&uac1->g_audio); } diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 5d960b6603b6..6f03e944e0e3 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -478,7 +478,7 @@ static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, } max_size_bw = num_channels(chmask) * ssize * - DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1))); + ((srate / (factor / (1 << (ep_desc->bInterval - 1)))) + 1); ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw, max_size_ep)); diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h index 3dfb460908fa..f558c3139ebe 100644 --- a/drivers/usb/gadget/function/u_ether_configfs.h +++ b/drivers/usb/gadget/function/u_ether_configfs.h @@ -182,12 +182,11 @@ out: \ size_t len) \ { \ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ - int ret; \ + int ret = -EINVAL; \ u8 val; \ \ mutex_lock(&opts->lock); \ - ret = sscanf(page, "%02hhx", &val); \ - if (ret > 0) { \ + if (sscanf(page, "%02hhx", &val) > 0) { \ opts->_n_ = val; \ ret = len; \ } \ diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c index f1ea51476add..1d3ebb07ccd4 100644 --- a/drivers/usb/gadget/udc/s3c2410_udc.c +++ b/drivers/usb/gadget/udc/s3c2410_udc.c @@ -1773,8 +1773,8 @@ static int s3c2410_udc_probe(struct platform_device *pdev) udc_info = dev_get_platdata(&pdev->dev); base_addr = devm_platform_ioremap_resource(pdev, 0); - if (!base_addr) { - retval = -ENOMEM; + if (IS_ERR(base_addr)) { + retval = PTR_ERR(base_addr); goto err_mem; } diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 84da8406d5b4..5bbccc9a0179 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -66,6 +66,7 @@ #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 #define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242 #define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142 +#define PCI_DEVICE_ID_ASMEDIA_3242_XHCI 0x3242 static const char hcd_name[] = "xhci_hcd"; @@ -276,11 +277,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) xhci->quirks |= XHCI_BROKEN_STREAMS; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && - pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) + pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) { xhci->quirks |= XHCI_TRUST_TX_LENGTH; + xhci->quirks |= XHCI_NO_64BIT_SUPPORT; + } if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && (pdev->device == PCI_DEVICE_ID_ASMEDIA_1142_XHCI || - pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI)) + pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI || + pdev->device == PCI_DEVICE_ID_ASMEDIA_3242_XHCI)) xhci->quirks |= XHCI_NO_64BIT_SUPPORT; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && @@ -295,6 +299,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == 0x9026) xhci->quirks |= XHCI_RESET_PLL_ON_DISCONNECT; + if (pdev->vendor == PCI_VENDOR_ID_AMD && + (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2 || + pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4)) + xhci->quirks |= XHCI_NO_SOFT_RETRY; + if (xhci->quirks & XHCI_RESET_ON_RESUME) xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "QUIRK: Resetting on resume"); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 5e548a1c93ab..ce38076901e2 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2484,7 +2484,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, remaining = 0; break; case COMP_USB_TRANSACTION_ERROR: - if ((ep_ring->err_count++ > MAX_SOFT_RETRY) || + if (xhci->quirks & XHCI_NO_SOFT_RETRY || + (ep_ring->err_count++ > MAX_SOFT_RETRY) || le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) break; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index bd27bd670104..1975016f46bf 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -883,44 +883,42 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci) xhci_set_cmd_ring_deq(xhci); } -static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) +/* + * Disable port wake bits if do_wakeup is not set. + * + * Also clear a possible internal port wake state left hanging for ports that + * detected termination but never successfully enumerated (trained to 0U). + * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done + * at enumeration clears this wake, force one here as well for unconnected ports + */ + +static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci, + struct xhci_hub *rhub, + bool do_wakeup) { - struct xhci_port **ports; - int port_index; unsigned long flags; u32 t1, t2, portsc; + int i; spin_lock_irqsave(&xhci->lock, flags); - /* disable usb3 ports Wake bits */ - port_index = xhci->usb3_rhub.num_ports; - ports = xhci->usb3_rhub.ports; - while (port_index--) { - t1 = readl(ports[port_index]->addr); - portsc = t1; - t1 = xhci_port_state_to_neutral(t1); - t2 = t1 & ~PORT_WAKE_BITS; - if (t1 != t2) { - writel(t2, ports[port_index]->addr); - xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n", - xhci->usb3_rhub.hcd->self.busnum, - port_index + 1, portsc, t2); - } - } + for (i = 0; i < rhub->num_ports; i++) { + portsc = readl(rhub->ports[i]->addr); + t1 = xhci_port_state_to_neutral(portsc); + t2 = t1; + + /* clear wake bits if do_wake is not set */ + if (!do_wakeup) + t2 &= ~PORT_WAKE_BITS; + + /* Don't touch csc bit if connected or connect change is set */ + if (!(portsc & (PORT_CSC | PORT_CONNECT))) + t2 |= PORT_CSC; - /* disable usb2 ports Wake bits */ - port_index = xhci->usb2_rhub.num_ports; - ports = xhci->usb2_rhub.ports; - while (port_index--) { - t1 = readl(ports[port_index]->addr); - portsc = t1; - t1 = xhci_port_state_to_neutral(t1); - t2 = t1 & ~PORT_WAKE_BITS; if (t1 != t2) { - writel(t2, ports[port_index]->addr); - xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n", - xhci->usb2_rhub.hcd->self.busnum, - port_index + 1, portsc, t2); + writel(t2, rhub->ports[i]->addr); + xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n", + rhub->hcd->self.busnum, i + 1, portsc, t2); } } spin_unlock_irqrestore(&xhci->lock, flags); @@ -983,8 +981,8 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) return -EINVAL; /* Clear root port wake on bits if wakeup not allowed. */ - if (!do_wakeup) - xhci_disable_port_wake_on_bits(xhci); + xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup); + xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup); if (!HCD_HW_ACCESSIBLE(hcd)) return 0; @@ -1088,6 +1086,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) struct usb_hcd *secondary_hcd; int retval = 0; bool comp_timer_running = false; + bool pending_portevent = false; if (!hcd->state) return 0; @@ -1226,13 +1225,22 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) done: if (retval == 0) { - /* Resume root hubs only when have pending events. */ - if (xhci_pending_portevent(xhci)) { + /* + * Resume roothubs only if there are pending events. + * USB 3 devices resend U3 LFPS wake after a 100ms delay if + * the first wake signalling failed, give it that chance. + */ + pending_portevent = xhci_pending_portevent(xhci); + if (!pending_portevent) { + msleep(120); + pending_portevent = xhci_pending_portevent(xhci); + } + + if (pending_portevent) { usb_hcd_resume_root_hub(xhci->shared_hcd); usb_hcd_resume_root_hub(hcd); } } - /* * If system is subject to the Quirk, Compliance Mode Timer needs to * be re-initialized Always after a system resume. Ports are subject diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index d41de5dc0452..ca822ad3b65b 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1891,6 +1891,7 @@ struct xhci_hcd { #define XHCI_SKIP_PHY_INIT BIT_ULL(37) #define XHCI_DISABLE_SPARSE BIT_ULL(38) #define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39) +#define XHCI_NO_SOFT_RETRY BIT_ULL(40) unsigned int num_active_eps; unsigned int limit_active_eps; diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c index 670e4d91e9ca..dcc88df72df4 100644 --- a/drivers/usb/misc/ldusb.c +++ b/drivers/usb/misc/ldusb.c @@ -117,7 +117,6 @@ MODULE_DEVICE_TABLE(usb, ld_usb_table); MODULE_AUTHOR("Michael Hund <mhund@ld-didactic.de>"); MODULE_DESCRIPTION("LD USB Driver"); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("LD USB Devices"); /* All interrupt in transfers are collected in a ring buffer to * avoid racing conditions and get better performance of the driver. diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c index e7334b7fb3a6..75fff2e4cbc6 100644 --- a/drivers/usb/renesas_usbhs/pipe.c +++ b/drivers/usb/renesas_usbhs/pipe.c @@ -746,6 +746,8 @@ struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv, void usbhs_pipe_free(struct usbhs_pipe *pipe) { + usbhsp_pipe_select(pipe); + usbhsp_pipe_cfg_set(pipe, 0xFFFF, 0); usbhsp_put_pipe(pipe); } diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 8d997b71056f..2db917eab799 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -86,6 +86,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1a86, 0x7522) }, { USB_DEVICE(0x1a86, 0x7523) }, { USB_DEVICE(0x4348, 0x5523) }, + { USB_DEVICE(0x9986, 0x7523) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 9e1c609792fb..a373cd63b3a4 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -145,6 +145,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */ { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ + { USB_DEVICE(0x10C4, 0x88D8) }, /* Acuity Brands nLight Air Adapter */ { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */ { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */ { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ @@ -201,6 +202,8 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */ { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */ + { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 Display serial interface */ + { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 M.2 Key E serial interface */ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index a493670c06e6..68401adcffde 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -3003,26 +3003,32 @@ static int edge_startup(struct usb_serial *serial) response = -ENODEV; } - usb_free_urb(edge_serial->interrupt_read_urb); - kfree(edge_serial->interrupt_in_buffer); - - usb_free_urb(edge_serial->read_urb); - kfree(edge_serial->bulk_in_buffer); - - kfree(edge_serial); - - return response; + goto error; } /* start interrupt read for this edgeport this interrupt will * continue as long as the edgeport is connected */ response = usb_submit_urb(edge_serial->interrupt_read_urb, GFP_KERNEL); - if (response) + if (response) { dev_err(ddev, "%s - Error %d submitting control urb\n", __func__, response); + + goto error; + } } return response; + +error: + usb_free_urb(edge_serial->interrupt_read_urb); + kfree(edge_serial->interrupt_in_buffer); + + usb_free_urb(edge_serial->read_urb); + kfree(edge_serial->bulk_in_buffer); + + kfree(edge_serial); + + return response; } diff --git a/drivers/usb/serial/xr_serial.c b/drivers/usb/serial/xr_serial.c index 483d07dee19d..0ca04906da4b 100644 --- a/drivers/usb/serial/xr_serial.c +++ b/drivers/usb/serial/xr_serial.c @@ -545,37 +545,13 @@ static void xr_close(struct usb_serial_port *port) static int xr_probe(struct usb_serial *serial, const struct usb_device_id *id) { - struct usb_driver *driver = serial->type->usb_driver; - struct usb_interface *control_interface; - int ret; - /* Don't bind to control interface */ if (serial->interface->cur_altsetting->desc.bInterfaceNumber == 0) return -ENODEV; - /* But claim the control interface during data interface probe */ - control_interface = usb_ifnum_to_if(serial->dev, 0); - if (!control_interface) - return -ENODEV; - - ret = usb_driver_claim_interface(driver, control_interface, NULL); - if (ret) { - dev_err(&serial->interface->dev, "Failed to claim control interface\n"); - return ret; - } - return 0; } -static void xr_disconnect(struct usb_serial *serial) -{ - struct usb_driver *driver = serial->type->usb_driver; - struct usb_interface *control_interface; - - control_interface = usb_ifnum_to_if(serial->dev, 0); - usb_driver_release_interface(driver, control_interface); -} - static const struct usb_device_id id_table[] = { { USB_DEVICE(0x04e2, 0x1410) }, /* XR21V141X */ { } @@ -590,7 +566,6 @@ static struct usb_serial_driver xr_device = { .id_table = id_table, .num_ports = 1, .probe = xr_probe, - .disconnect = xr_disconnect, .open = xr_open, .close = xr_close, .break_ctl = xr_break_ctl, diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index 5eb895b19c55..f4304ce69350 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c @@ -656,6 +656,13 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us) need_auto_sense = 1; } + /* Some devices (Kindle) require another command after SYNC CACHE */ + if ((us->fflags & US_FL_SENSE_AFTER_SYNC) && + srb->cmnd[0] == SYNCHRONIZE_CACHE) { + usb_stor_dbg(us, "-- sense after SYNC CACHE\n"); + need_auto_sense = 1; + } + /* * If we have a failure, we're going to do a REQUEST_SENSE * automatically. Note that we differentiate between a command diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 5732e9691f08..efa972be2ee3 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2212,6 +2212,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200, US_FL_NO_READ_DISC_INFO ), /* + * Reported by Matthias Schwarzott <zzam@gentoo.org> + * The Amazon Kindle treats SYNCHRONIZE CACHE as an indication that + * the host may be finished with it, and automatically ejects its + * emulated media unless it receives another command within one second. + */ +UNUSUAL_DEV( 0x1949, 0x0004, 0x0000, 0x9999, + "Amazon", + "Kindle", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_SENSE_AFTER_SYNC ), + +/* * Reported by Oliver Neukum <oneukum@suse.com> * This device morphes spontaneously into another device if the access * pattern of Windows isn't followed. Thus writable media would be dirty diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index be0b6469dd3d..ce7af398c7c1 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -942,6 +942,7 @@ static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv) port->supply_voltage = mv; port->current_limit = max_ma; + power_supply_changed(port->psy); if (port->tcpc->set_current_limit) ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv); @@ -2928,6 +2929,7 @@ static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo, port->pps_data.supported = false; port->usb_type = POWER_SUPPLY_USB_TYPE_PD; + power_supply_changed(port->psy); /* * Select the source PDO providing the most power which has a @@ -2952,6 +2954,7 @@ static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo, port->pps_data.supported = true; port->usb_type = POWER_SUPPLY_USB_TYPE_PD_PPS; + power_supply_changed(port->psy); } continue; default: @@ -3109,6 +3112,7 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port) port->pps_data.out_volt)); port->pps_data.op_curr = min(port->pps_data.max_curr, port->pps_data.op_curr); + power_supply_changed(port->psy); } return src_pdo; @@ -3344,6 +3348,7 @@ static int tcpm_set_charge(struct tcpm_port *port, bool charge) return ret; } port->vbus_charge = charge; + power_supply_changed(port->psy); return 0; } @@ -3523,6 +3528,7 @@ static void tcpm_reset_port(struct tcpm_port *port) port->try_src_count = 0; port->try_snk_count = 0; port->usb_type = POWER_SUPPLY_USB_TYPE_C; + power_supply_changed(port->psy); port->nr_sink_caps = 0; port->sink_cap_done = false; if (port->tcpc->enable_frs) @@ -5167,7 +5173,7 @@ static void tcpm_enable_frs_work(struct kthread_work *work) goto unlock; /* Send when the state machine is idle */ - if (port->state != SNK_READY || port->vdm_state != VDM_STATE_DONE || port->send_discover) + if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover) goto resched; port->upcoming_state = GET_SINK_CAP; @@ -5905,7 +5911,7 @@ static int tcpm_psy_set_prop(struct power_supply *psy, ret = -EINVAL; break; } - + power_supply_changed(port->psy); return ret; } @@ -6058,6 +6064,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) err = devm_tcpm_psy_register(port); if (err) goto out_role_sw_put; + power_supply_changed(port->psy); port->typec_port = typec_register_port(port->dev, &port->typec_caps); if (IS_ERR(port->typec_port)) { diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c index 6e6ef6317523..29bd1c5a283c 100644 --- a/drivers/usb/typec/tps6598x.c +++ b/drivers/usb/typec/tps6598x.c @@ -64,7 +64,6 @@ enum { struct tps6598x_rx_identity_reg { u8 status; struct usb_pd_identity identity; - u32 vdo[3]; } __packed; /* Standard Task return codes */ diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index 2305d425e6c9..8f1de1fbbeed 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c @@ -46,6 +46,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a int sockfd = 0; struct socket *socket; int rv; + struct task_struct *tcp_rx = NULL; + struct task_struct *tcp_tx = NULL; if (!sdev) { dev_err(dev, "sdev is null\n"); @@ -69,23 +71,47 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a } socket = sockfd_lookup(sockfd, &err); - if (!socket) + if (!socket) { + dev_err(dev, "failed to lookup sock"); goto err; + } - sdev->ud.tcp_socket = socket; - sdev->ud.sockfd = sockfd; + if (socket->type != SOCK_STREAM) { + dev_err(dev, "Expecting SOCK_STREAM - found %d", + socket->type); + goto sock_err; + } + /* unlock and create threads and get tasks */ spin_unlock_irq(&sdev->ud.lock); + tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx"); + if (IS_ERR(tcp_rx)) { + sockfd_put(socket); + return -EINVAL; + } + tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx"); + if (IS_ERR(tcp_tx)) { + kthread_stop(tcp_rx); + sockfd_put(socket); + return -EINVAL; + } - sdev->ud.tcp_rx = kthread_get_run(stub_rx_loop, &sdev->ud, - "stub_rx"); - sdev->ud.tcp_tx = kthread_get_run(stub_tx_loop, &sdev->ud, - "stub_tx"); + /* get task structs now */ + get_task_struct(tcp_rx); + get_task_struct(tcp_tx); + /* lock and update sdev->ud state */ spin_lock_irq(&sdev->ud.lock); + sdev->ud.tcp_socket = socket; + sdev->ud.sockfd = sockfd; + sdev->ud.tcp_rx = tcp_rx; + sdev->ud.tcp_tx = tcp_tx; sdev->ud.status = SDEV_ST_USED; spin_unlock_irq(&sdev->ud.lock); + wake_up_process(sdev->ud.tcp_rx); + wake_up_process(sdev->ud.tcp_tx); + } else { dev_info(dev, "stub down\n"); @@ -100,6 +126,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a return count; +sock_err: + sockfd_put(socket); err: spin_unlock_irq(&sdev->ud.lock); return -EINVAL; diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c index 96e5371dc335..c4b4256e5dad 100644 --- a/drivers/usb/usbip/vhci_sysfs.c +++ b/drivers/usb/usbip/vhci_sysfs.c @@ -312,6 +312,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, struct vhci *vhci; int err; unsigned long flags; + struct task_struct *tcp_rx = NULL; + struct task_struct *tcp_tx = NULL; /* * @rhport: port number of vhci_hcd @@ -349,12 +351,35 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, /* Extract socket from fd. */ socket = sockfd_lookup(sockfd, &err); - if (!socket) + if (!socket) { + dev_err(dev, "failed to lookup sock"); return -EINVAL; + } + if (socket->type != SOCK_STREAM) { + dev_err(dev, "Expecting SOCK_STREAM - found %d", + socket->type); + sockfd_put(socket); + return -EINVAL; + } + + /* create threads before locking */ + tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx"); + if (IS_ERR(tcp_rx)) { + sockfd_put(socket); + return -EINVAL; + } + tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx"); + if (IS_ERR(tcp_tx)) { + kthread_stop(tcp_rx); + sockfd_put(socket); + return -EINVAL; + } - /* now need lock until setting vdev status as used */ + /* get task structs now */ + get_task_struct(tcp_rx); + get_task_struct(tcp_tx); - /* begin a lock */ + /* now begin lock until setting vdev status set */ spin_lock_irqsave(&vhci->lock, flags); spin_lock(&vdev->ud.lock); @@ -364,6 +389,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, spin_unlock_irqrestore(&vhci->lock, flags); sockfd_put(socket); + kthread_stop_put(tcp_rx); + kthread_stop_put(tcp_tx); dev_err(dev, "port %d already used\n", rhport); /* @@ -382,6 +409,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, vdev->speed = speed; vdev->ud.sockfd = sockfd; vdev->ud.tcp_socket = socket; + vdev->ud.tcp_rx = tcp_rx; + vdev->ud.tcp_tx = tcp_tx; vdev->ud.status = VDEV_ST_NOTASSIGNED; usbip_kcov_handle_init(&vdev->ud); @@ -389,8 +418,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, spin_unlock_irqrestore(&vhci->lock, flags); /* end the lock */ - vdev->ud.tcp_rx = kthread_get_run(vhci_rx_loop, &vdev->ud, "vhci_rx"); - vdev->ud.tcp_tx = kthread_get_run(vhci_tx_loop, &vdev->ud, "vhci_tx"); + wake_up_process(vdev->ud.tcp_rx); + wake_up_process(vdev->ud.tcp_tx); rh_port_connect(vdev, speed); diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c index 100f680c572a..7383a543c6d1 100644 --- a/drivers/usb/usbip/vudc_sysfs.c +++ b/drivers/usb/usbip/vudc_sysfs.c @@ -90,8 +90,9 @@ unlock: } static BIN_ATTR_RO(dev_desc, sizeof(struct usb_device_descriptor)); -static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *attr, - const char *in, size_t count) +static ssize_t usbip_sockfd_store(struct device *dev, + struct device_attribute *attr, + const char *in, size_t count) { struct vudc *udc = (struct vudc *) dev_get_drvdata(dev); int rv; @@ -100,6 +101,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a struct socket *socket; unsigned long flags; int ret; + struct task_struct *tcp_rx = NULL; + struct task_struct *tcp_tx = NULL; rv = kstrtoint(in, 0, &sockfd); if (rv != 0) @@ -138,24 +141,54 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a goto unlock_ud; } - udc->ud.tcp_socket = socket; + if (socket->type != SOCK_STREAM) { + dev_err(dev, "Expecting SOCK_STREAM - found %d", + socket->type); + ret = -EINVAL; + goto sock_err; + } + /* unlock and create threads and get tasks */ spin_unlock_irq(&udc->ud.lock); spin_unlock_irqrestore(&udc->lock, flags); - udc->ud.tcp_rx = kthread_get_run(&v_rx_loop, - &udc->ud, "vudc_rx"); - udc->ud.tcp_tx = kthread_get_run(&v_tx_loop, - &udc->ud, "vudc_tx"); + tcp_rx = kthread_create(&v_rx_loop, &udc->ud, "vudc_rx"); + if (IS_ERR(tcp_rx)) { + sockfd_put(socket); + return -EINVAL; + } + tcp_tx = kthread_create(&v_tx_loop, &udc->ud, "vudc_tx"); + if (IS_ERR(tcp_tx)) { + kthread_stop(tcp_rx); + sockfd_put(socket); + return -EINVAL; + } + + /* get task structs now */ + get_task_struct(tcp_rx); + get_task_struct(tcp_tx); + /* lock and update udc->ud state */ spin_lock_irqsave(&udc->lock, flags); spin_lock_irq(&udc->ud.lock); + + udc->ud.tcp_socket = socket; + udc->ud.tcp_rx = tcp_rx; + udc->ud.tcp_tx = tcp_tx; udc->ud.status = SDEV_ST_USED; + spin_unlock_irq(&udc->ud.lock); ktime_get_ts64(&udc->start_time); v_start_timer(udc); udc->connected = 1; + + spin_unlock_irqrestore(&udc->lock, flags); + + wake_up_process(udc->ud.tcp_rx); + wake_up_process(udc->ud.tcp_tx); + return count; + } else { if (!udc->connected) { dev_err(dev, "Device not connected"); @@ -177,6 +210,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a return count; +sock_err: + sockfd_put(socket); unlock_ud: spin_unlock_irq(&udc->ud.lock); unlock: diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index 7c8bbfcf6c3e..d555a6a5d1ba 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -431,8 +431,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) } adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa, - dev, &ifc_vdpa_ops, - IFCVF_MAX_QUEUE_PAIRS * 2, NULL); + dev, &ifc_vdpa_ops, NULL); if (adapter == NULL) { IFCVF_ERR(pdev, "Failed to allocate vDPA structure"); return -ENOMEM; @@ -456,7 +455,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) vf->vring[i].irq = -EINVAL; - ret = vdpa_register_device(&adapter->vdpa); + ret = vdpa_register_device(&adapter->vdpa, IFCVF_MAX_QUEUE_PAIRS * 2); if (ret) { IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus"); goto err; diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 10e9b09932eb..71397fdafa6a 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -1982,7 +1982,7 @@ static int mlx5v_probe(struct auxiliary_device *adev, max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS); ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops, - 2 * mlx5_vdpa_max_qps(max_vqs), NULL); + NULL); if (IS_ERR(ndev)) return PTR_ERR(ndev); @@ -2009,7 +2009,7 @@ static int mlx5v_probe(struct auxiliary_device *adev, if (err) goto err_res; - err = vdpa_register_device(&mvdev->vdev); + err = vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs)); if (err) goto err_reg; diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index da67f07e24fd..5cffce67cab0 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -69,7 +69,6 @@ static void vdpa_release_dev(struct device *d) * initialized but before registered. * @parent: the parent device * @config: the bus operations that is supported by this device - * @nvqs: number of virtqueues supported by this device * @size: size of the parent structure that contains private data * @name: name of the vdpa device; optional. * @@ -81,7 +80,7 @@ static void vdpa_release_dev(struct device *d) */ struct vdpa_device *__vdpa_alloc_device(struct device *parent, const struct vdpa_config_ops *config, - int nvqs, size_t size, const char *name) + size_t size, const char *name) { struct vdpa_device *vdev; int err = -EINVAL; @@ -107,7 +106,6 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent, vdev->index = err; vdev->config = config; vdev->features_valid = false; - vdev->nvqs = nvqs; if (name) err = dev_set_name(&vdev->dev, "%s", name); @@ -136,10 +134,12 @@ static int vdpa_name_match(struct device *dev, const void *data) return (strcmp(dev_name(&vdev->dev), data) == 0); } -static int __vdpa_register_device(struct vdpa_device *vdev) +static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs) { struct device *dev; + vdev->nvqs = nvqs; + lockdep_assert_held(&vdpa_dev_mutex); dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match); if (dev) { @@ -155,15 +155,16 @@ static int __vdpa_register_device(struct vdpa_device *vdev) * Caller must invoke this routine in the management device dev_add() * callback after setting up valid mgmtdev for this vdpa device. * @vdev: the vdpa device to be registered to vDPA bus + * @nvqs: number of virtqueues supported by this device * * Returns an error when fail to add device to vDPA bus */ -int _vdpa_register_device(struct vdpa_device *vdev) +int _vdpa_register_device(struct vdpa_device *vdev, int nvqs) { if (!vdev->mdev) return -EINVAL; - return __vdpa_register_device(vdev); + return __vdpa_register_device(vdev, nvqs); } EXPORT_SYMBOL_GPL(_vdpa_register_device); @@ -171,15 +172,16 @@ EXPORT_SYMBOL_GPL(_vdpa_register_device); * vdpa_register_device - register a vDPA device * Callers must have a succeed call of vdpa_alloc_device() before. * @vdev: the vdpa device to be registered to vDPA bus + * @nvqs: number of virtqueues supported by this device * * Returns an error when fail to add to vDPA bus */ -int vdpa_register_device(struct vdpa_device *vdev) +int vdpa_register_device(struct vdpa_device *vdev, int nvqs) { int err; mutex_lock(&vdpa_dev_mutex); - err = __vdpa_register_device(vdev); + err = __vdpa_register_device(vdev, nvqs); mutex_unlock(&vdpa_dev_mutex); return err; } diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index d5942842432d..5b6b2f87d40c 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -235,7 +235,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr) ops = &vdpasim_config_ops; vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, - dev_attr->nvqs, dev_attr->name); + dev_attr->name); if (!vdpasim) goto err_alloc; diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c index d344c5b7c914..a1ab6163f7d1 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c @@ -110,8 +110,7 @@ out: static void vdpasim_net_get_config(struct vdpasim *vdpasim, void *config) { - struct virtio_net_config *net_config = - (struct virtio_net_config *)config; + struct virtio_net_config *net_config = config; net_config->mtu = cpu_to_vdpasim16(vdpasim, 1500); net_config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP); @@ -147,7 +146,7 @@ static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name) if (IS_ERR(simdev)) return PTR_ERR(simdev); - ret = _vdpa_register_device(&simdev->vdpa); + ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_NET_VQ_NUM); if (ret) goto reg_err; diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig index 5533df91b257..67d0bf4efa16 100644 --- a/drivers/vfio/Kconfig +++ b/drivers/vfio/Kconfig @@ -21,8 +21,8 @@ config VFIO_VIRQFD menuconfig VFIO tristate "VFIO Non-Privileged userspace driver framework" - depends on IOMMU_API - select VFIO_IOMMU_TYPE1 if (X86 || S390 || ARM || ARM64) + select IOMMU_API + select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64) help VFIO provides a framework for secure userspace device drivers. See Documentation/driver-api/vfio.rst for more details. diff --git a/drivers/vfio/platform/Kconfig b/drivers/vfio/platform/Kconfig index dc1a3c44f2c6..ab341108a0be 100644 --- a/drivers/vfio/platform/Kconfig +++ b/drivers/vfio/platform/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config VFIO_PLATFORM tristate "VFIO support for platform devices" - depends on VFIO && EVENTFD && (ARM || ARM64) + depends on VFIO && EVENTFD && (ARM || ARM64 || COMPILE_TEST) select VFIO_VIRQFD help Support for platform devices with VFIO. This is required to make @@ -12,7 +12,7 @@ config VFIO_PLATFORM config VFIO_AMBA tristate "VFIO support for AMBA devices" - depends on VFIO_PLATFORM && ARM_AMBA + depends on VFIO_PLATFORM && (ARM_AMBA || COMPILE_TEST) help Support for ARM AMBA devices with VFIO. This is required to make use of ARM AMBA devices present on the system using the VFIO diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 4bb162c1d649..be444407664a 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -189,7 +189,7 @@ static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu, } static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu, - dma_addr_t start, size_t size) + dma_addr_t start, u64 size) { struct rb_node *res = NULL; struct rb_node *node = iommu->dma_list.rb_node; @@ -785,7 +785,12 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, return -ENODEV; ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages); - if (ret == 1 && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { + if (ret != 1) + goto out; + + ret = 0; + + if (do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { ret = vfio_lock_acct(dma, 1, true); if (ret) { put_pfn(*pfn_base, dma->prot); @@ -797,6 +802,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, } } +out: mmput(mm); return ret; } @@ -1288,7 +1294,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, int ret = -EINVAL, retries = 0; unsigned long pgshift; dma_addr_t iova = unmap->iova; - unsigned long size = unmap->size; + u64 size = unmap->size; bool unmap_all = unmap->flags & VFIO_DMA_UNMAP_FLAG_ALL; bool invalidate_vaddr = unmap->flags & VFIO_DMA_UNMAP_FLAG_VADDR; struct rb_node *n, *first_n; @@ -1304,14 +1310,12 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, if (unmap_all) { if (iova || size) goto unlock; - size = SIZE_MAX; - } else if (!size || size & (pgsize - 1)) { + size = U64_MAX; + } else if (!size || size & (pgsize - 1) || + iova + size - 1 < iova || size > SIZE_MAX) { goto unlock; } - if (iova + size - 1 < iova || size > SIZE_MAX) - goto unlock; - /* When dirty tracking is enabled, allow only min supported pgsize */ if ((unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) && (!iommu->dirty_page_tracking || (bitmap->pgsize != pgsize))) { diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index ef688c8c0e0e..e0a27e336293 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -308,8 +308,10 @@ static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp) static void vhost_vdpa_config_put(struct vhost_vdpa *v) { - if (v->config_ctx) + if (v->config_ctx) { eventfd_ctx_put(v->config_ctx); + v->config_ctx = NULL; + } } static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp) @@ -329,8 +331,12 @@ static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp) if (!IS_ERR_OR_NULL(ctx)) eventfd_ctx_put(ctx); - if (IS_ERR(v->config_ctx)) - return PTR_ERR(v->config_ctx); + if (IS_ERR(v->config_ctx)) { + long ret = PTR_ERR(v->config_ctx); + + v->config_ctx = NULL; + return ret; + } v->vdpa->config->set_config_cb(v->vdpa, &cb); @@ -900,14 +906,10 @@ err: static void vhost_vdpa_clean_irq(struct vhost_vdpa *v) { - struct vhost_virtqueue *vq; int i; - for (i = 0; i < v->nvqs; i++) { - vq = &v->vqs[i]; - if (vq->call_ctx.producer.irq) - irq_bypass_unregister_producer(&vq->call_ctx.producer); - } + for (i = 0; i < v->nvqs; i++) + vhost_vdpa_unsetup_vq_irq(v, i); } static int vhost_vdpa_release(struct inode *inode, struct file *filep) diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index a262e12c6dc2..5ccb0705beae 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -332,8 +332,8 @@ static void vhost_vq_reset(struct vhost_dev *dev, vq->error_ctx = NULL; vq->kick = NULL; vq->log_ctx = NULL; - vhost_reset_is_le(vq); vhost_disable_cross_endian(vq); + vhost_reset_is_le(vq); vq->busyloop_timeout = 0; vq->umem = NULL; vq->iotlb = NULL; diff --git a/drivers/video/fbdev/aty/atyfb.h b/drivers/video/fbdev/aty/atyfb.h index 551372f9b9aa..465f55beb97f 100644 --- a/drivers/video/fbdev/aty/atyfb.h +++ b/drivers/video/fbdev/aty/atyfb.h @@ -287,11 +287,8 @@ static inline void aty_st_8(int regindex, u8 val, const struct atyfb_par *par) #endif } -#if defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) || \ -defined (CONFIG_FB_ATY_BACKLIGHT) extern void aty_st_lcd(int index, u32 val, const struct atyfb_par *par); extern u32 aty_ld_lcd(int index, const struct atyfb_par *par); -#endif /* * DAC operations diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c index e946903a86c2..1aef3d6ebd88 100644 --- a/drivers/video/fbdev/aty/atyfb_base.c +++ b/drivers/video/fbdev/aty/atyfb_base.c @@ -133,7 +133,7 @@ #define PRINTKE(fmt, args...) printk(KERN_ERR "atyfb: " fmt, ## args) #if defined(CONFIG_PMAC_BACKLIGHT) || defined(CONFIG_FB_ATY_GENERIC_LCD) || \ -defined(CONFIG_FB_ATY_BACKLIGHT) +defined(CONFIG_FB_ATY_BACKLIGHT) || defined (CONFIG_PPC_PMAC) static const u32 lt_lcd_regs[] = { CNFG_PANEL_LG, LCD_GEN_CNTL_LG, @@ -175,8 +175,8 @@ u32 aty_ld_lcd(int index, const struct atyfb_par *par) return aty_ld_le32(LCD_DATA, par); } } -#else /* defined(CONFIG_PMAC_BACKLIGHT) || defined(CONFIG_FB_ATY_BACKLIGHT) \ - defined(CONFIG_FB_ATY_GENERIC_LCD) */ +#else /* defined(CONFIG_PMAC_BACKLIGHT) || defined(CONFIG_FB_ATY_BACKLIGHT) || + defined(CONFIG_FB_ATY_GENERIC_LCD) || defined(CONFIG_PPC_PMAC) */ void aty_st_lcd(int index, u32 val, const struct atyfb_par *par) { } @@ -184,7 +184,8 @@ u32 aty_ld_lcd(int index, const struct atyfb_par *par) { return 0; } -#endif /* defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) */ +#endif /* defined(CONFIG_PMAC_BACKLIGHT) || defined(CONFIG_FB_ATY_BACKLIGHT) || + defined (CONFIG_FB_ATY_GENERIC_LCD) || defined(CONFIG_PPC_PMAC) */ #ifdef CONFIG_FB_ATY_GENERIC_LCD /* diff --git a/drivers/virt/acrn/hsm.c b/drivers/virt/acrn/hsm.c index 1f6b7c54a1a4..130e12b8652a 100644 --- a/drivers/virt/acrn/hsm.c +++ b/drivers/virt/acrn/hsm.c @@ -333,7 +333,7 @@ static long acrn_dev_ioctl(struct file *filp, unsigned int cmd, acrn_ioreq_request_clear(vm); break; case ACRN_IOCTL_PM_GET_CPU_STATE: - if (copy_from_user(&cstate_cmd, (void *)ioctl_param, + if (copy_from_user(&cstate_cmd, (void __user *)ioctl_param, sizeof(cstate_cmd))) return -EFAULT; @@ -404,6 +404,14 @@ fail_remove: } static DEVICE_ATTR_WO(remove_cpu); +static umode_t acrn_attr_visible(struct kobject *kobj, struct attribute *a, int n) +{ + if (a == &dev_attr_remove_cpu.attr) + return IS_ENABLED(CONFIG_HOTPLUG_CPU) ? a->mode : 0; + + return a->mode; +} + static struct attribute *acrn_attrs[] = { &dev_attr_remove_cpu.attr, NULL @@ -411,6 +419,7 @@ static struct attribute *acrn_attrs[] = { static struct attribute_group acrn_attr_group = { .attrs = acrn_attrs, + .is_visible = acrn_attr_visible, }; static const struct attribute_group *acrn_attr_groups[] = { diff --git a/drivers/virt/acrn/irqfd.c b/drivers/virt/acrn/irqfd.c index a8766d528e29..df5184979b28 100644 --- a/drivers/virt/acrn/irqfd.c +++ b/drivers/virt/acrn/irqfd.c @@ -112,7 +112,7 @@ static int acrn_irqfd_assign(struct acrn_vm *vm, struct acrn_irqfd *args) { struct eventfd_ctx *eventfd = NULL; struct hsm_irqfd *irqfd, *tmp; - unsigned int events; + __poll_t events; struct fd f; int ret = 0; @@ -158,9 +158,9 @@ static int acrn_irqfd_assign(struct acrn_vm *vm, struct acrn_irqfd *args) mutex_unlock(&vm->irqfds_lock); /* Check the pending event in this stage */ - events = f.file->f_op->poll(f.file, &irqfd->pt); + events = vfs_poll(f.file, &irqfd->pt); - if (events & POLLIN) + if (events & EPOLLIN) acrn_irqfd_inject(irqfd); fdput(f); diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 42e09cc1b8ac..4b15c00c0a0a 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -141,15 +141,14 @@ void virtio_config_changed(struct virtio_device *dev) } EXPORT_SYMBOL_GPL(virtio_config_changed); -void virtio_config_disable(struct virtio_device *dev) +static void virtio_config_disable(struct virtio_device *dev) { spin_lock_irq(&dev->config_lock); dev->config_enabled = false; spin_unlock_irq(&dev->config_lock); } -EXPORT_SYMBOL_GPL(virtio_config_disable); -void virtio_config_enable(struct virtio_device *dev) +static void virtio_config_enable(struct virtio_device *dev) { spin_lock_irq(&dev->config_lock); dev->config_enabled = true; @@ -158,7 +157,6 @@ void virtio_config_enable(struct virtio_device *dev) dev->config_change_pending = false; spin_unlock_irq(&dev->config_lock); } -EXPORT_SYMBOL_GPL(virtio_config_enable); void virtio_add_status(struct virtio_device *dev, unsigned int status) { diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index a286d22b6551..56128b9c46eb 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -548,8 +548,7 @@ static void virtio_mmio_release_dev(struct device *_d) { struct virtio_device *vdev = container_of(_d, struct virtio_device, dev); - struct virtio_mmio_device *vm_dev = - container_of(vdev, struct virtio_mmio_device, vdev); + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); struct platform_device *pdev = vm_dev->pdev; devm_kfree(&pdev->dev, vm_dev); diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c index 9867a3a936df..688b112e712b 100644 --- a/drivers/watchdog/cpu5wdt.c +++ b/drivers/watchdog/cpu5wdt.c @@ -273,7 +273,6 @@ module_exit(cpu5wdt_exit_module); MODULE_AUTHOR("Heiko Ronsdorf <hero@ihg.uni-duisburg.de>"); MODULE_DESCRIPTION("sma cpu5 watchdog driver"); -MODULE_SUPPORTED_DEVICE("sma cpu5 watchdog"); MODULE_LICENSE("GPL"); module_param_hw(port, int, ioport, 0); diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c index 808eeb4779e4..1eafe0b4d71c 100644 --- a/drivers/watchdog/cpwd.c +++ b/drivers/watchdog/cpwd.c @@ -172,7 +172,6 @@ MODULE_PARM_DESC(wd2_timeout, "Default watchdog2 timeout in 1/10secs"); MODULE_AUTHOR("Eric Brower <ebrower@usa.net>"); MODULE_DESCRIPTION("Hardware watchdog driver for Sun Microsystems CP1400/1500"); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("watchdog"); static void cpwd_writew(u16 val, void __iomem *addr) { diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c index 7008596a575f..747e346ed06c 100644 --- a/drivers/watchdog/riowd.c +++ b/drivers/watchdog/riowd.c @@ -46,7 +46,6 @@ MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); MODULE_DESCRIPTION("Hardware watchdog driver for Sun RIO"); -MODULE_SUPPORTED_DEVICE("watchdog"); MODULE_LICENSE("GPL"); #define DRIVER_NAME "riowd" diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c index da87f3a1e351..b8f2f971c2f0 100644 --- a/drivers/xen/events/events_2l.c +++ b/drivers/xen/events/events_2l.c @@ -47,6 +47,11 @@ static unsigned evtchn_2l_max_channels(void) return EVTCHN_2L_NR_CHANNELS; } +static void evtchn_2l_remove(evtchn_port_t evtchn, unsigned int cpu) +{ + clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); +} + static void evtchn_2l_bind_to_cpu(evtchn_port_t evtchn, unsigned int cpu, unsigned int old_cpu) { @@ -72,12 +77,6 @@ static bool evtchn_2l_is_pending(evtchn_port_t port) return sync_test_bit(port, BM(&s->evtchn_pending[0])); } -static bool evtchn_2l_test_and_set_mask(evtchn_port_t port) -{ - struct shared_info *s = HYPERVISOR_shared_info; - return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0])); -} - static void evtchn_2l_mask(evtchn_port_t port) { struct shared_info *s = HYPERVISOR_shared_info; @@ -355,18 +354,27 @@ static void evtchn_2l_resume(void) EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD); } +static int evtchn_2l_percpu_deinit(unsigned int cpu) +{ + memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) * + EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD); + + return 0; +} + static const struct evtchn_ops evtchn_ops_2l = { .max_channels = evtchn_2l_max_channels, .nr_channels = evtchn_2l_max_channels, + .remove = evtchn_2l_remove, .bind_to_cpu = evtchn_2l_bind_to_cpu, .clear_pending = evtchn_2l_clear_pending, .set_pending = evtchn_2l_set_pending, .is_pending = evtchn_2l_is_pending, - .test_and_set_mask = evtchn_2l_test_and_set_mask, .mask = evtchn_2l_mask, .unmask = evtchn_2l_unmask, .handle_events = evtchn_2l_handle_events, .resume = evtchn_2l_resume, + .percpu_deinit = evtchn_2l_percpu_deinit, }; void __init xen_evtchn_2l_init(void) diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index adb7260e94b2..8236e2364eeb 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -98,13 +98,19 @@ struct irq_info { short refcnt; u8 spurious_cnt; u8 is_accounted; - enum xen_irq_type type; /* type */ + short type; /* type: IRQT_* */ + u8 mask_reason; /* Why is event channel masked */ +#define EVT_MASK_REASON_EXPLICIT 0x01 +#define EVT_MASK_REASON_TEMPORARY 0x02 +#define EVT_MASK_REASON_EOI_PENDING 0x04 + u8 is_active; /* Is event just being handled? */ unsigned irq; evtchn_port_t evtchn; /* event channel */ unsigned short cpu; /* cpu bound */ unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */ unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */ u64 eoi_time; /* Time in jiffies when to EOI. */ + spinlock_t lock; union { unsigned short virq; @@ -154,6 +160,7 @@ static DEFINE_RWLOCK(evtchn_rwlock); * evtchn_rwlock * IRQ-desc lock * percpu eoi_list_lock + * irq_info->lock */ static LIST_HEAD(xen_irq_list_head); @@ -304,6 +311,8 @@ static int xen_irq_info_common_setup(struct irq_info *info, info->irq = irq; info->evtchn = evtchn; info->cpu = cpu; + info->mask_reason = EVT_MASK_REASON_EXPLICIT; + spin_lock_init(&info->lock); ret = set_evtchn_to_irq(evtchn, irq); if (ret < 0) @@ -377,6 +386,7 @@ static int xen_irq_info_pirq_setup(unsigned irq, static void xen_irq_info_cleanup(struct irq_info *info) { set_evtchn_to_irq(info->evtchn, -1); + xen_evtchn_port_remove(info->evtchn, info->cpu); info->evtchn = 0; channels_on_cpu_dec(info); } @@ -458,6 +468,34 @@ unsigned int cpu_from_evtchn(evtchn_port_t evtchn) return ret; } +static void do_mask(struct irq_info *info, u8 reason) +{ + unsigned long flags; + + spin_lock_irqsave(&info->lock, flags); + + if (!info->mask_reason) + mask_evtchn(info->evtchn); + + info->mask_reason |= reason; + + spin_unlock_irqrestore(&info->lock, flags); +} + +static void do_unmask(struct irq_info *info, u8 reason) +{ + unsigned long flags; + + spin_lock_irqsave(&info->lock, flags); + + info->mask_reason &= ~reason; + + if (!info->mask_reason) + unmask_evtchn(info->evtchn); + + spin_unlock_irqrestore(&info->lock, flags); +} + #ifdef CONFIG_X86 static bool pirq_check_eoi_map(unsigned irq) { @@ -604,7 +642,7 @@ static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious) } info->eoi_time = 0; - unmask_evtchn(evtchn); + do_unmask(info, EVT_MASK_REASON_EOI_PENDING); } static void xen_irq_lateeoi_worker(struct work_struct *work) @@ -773,6 +811,12 @@ static void xen_evtchn_close(evtchn_port_t port) BUG(); } +static void event_handler_exit(struct irq_info *info) +{ + smp_store_release(&info->is_active, 0); + clear_evtchn(info->evtchn); +} + static void pirq_query_unmask(int irq) { struct physdev_irq_status_query irq_status; @@ -791,14 +835,15 @@ static void pirq_query_unmask(int irq) static void eoi_pirq(struct irq_data *data) { - evtchn_port_t evtchn = evtchn_from_irq(data->irq); + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; int rc = 0; if (!VALID_EVTCHN(evtchn)) return; - clear_evtchn(evtchn); + event_handler_exit(info); if (pirq_needs_eoi(data->irq)) { rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); @@ -849,7 +894,8 @@ static unsigned int __startup_pirq(unsigned int irq) goto err; out: - unmask_evtchn(evtchn); + do_unmask(info, EVT_MASK_REASON_EXPLICIT); + eoi_pirq(irq_get_irq_data(irq)); return 0; @@ -876,7 +922,7 @@ static void shutdown_pirq(struct irq_data *data) if (!VALID_EVTCHN(evtchn)) return; - mask_evtchn(evtchn); + do_mask(info, EVT_MASK_REASON_EXPLICIT); xen_evtchn_close(evtchn); xen_irq_info_cleanup(info); } @@ -1628,6 +1674,8 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl) } info = info_for_irq(irq); + if (xchg_acquire(&info->is_active, 1)) + return; dev = (info->type == IRQT_EVTCHN) ? info->u.interdomain : NULL; if (dev) @@ -1720,10 +1768,10 @@ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq) } /* Rebind an evtchn so that it gets delivered to a specific cpu */ -static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu) +static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu) { struct evtchn_bind_vcpu bind_vcpu; - int masked; + evtchn_port_t evtchn = info ? info->evtchn : 0; if (!VALID_EVTCHN(evtchn)) return -1; @@ -1739,7 +1787,7 @@ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu) * Mask the event while changing the VCPU binding to prevent * it being delivered on an unexpected VCPU. */ - masked = test_and_set_mask(evtchn); + do_mask(info, EVT_MASK_REASON_TEMPORARY); /* * If this fails, it usually just indicates that we're dealing with a @@ -1749,8 +1797,7 @@ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu) if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) bind_evtchn_to_cpu(evtchn, tcpu, false); - if (!masked) - unmask_evtchn(evtchn); + do_unmask(info, EVT_MASK_REASON_TEMPORARY); return 0; } @@ -1789,7 +1836,7 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, unsigned int tcpu = select_target_cpu(dest); int ret; - ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu); + ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu); if (!ret) irq_data_update_effective_affinity(data, cpumask_of(tcpu)); @@ -1798,28 +1845,29 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, static void enable_dynirq(struct irq_data *data) { - evtchn_port_t evtchn = evtchn_from_irq(data->irq); + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; if (VALID_EVTCHN(evtchn)) - unmask_evtchn(evtchn); + do_unmask(info, EVT_MASK_REASON_EXPLICIT); } static void disable_dynirq(struct irq_data *data) { - evtchn_port_t evtchn = evtchn_from_irq(data->irq); + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; if (VALID_EVTCHN(evtchn)) - mask_evtchn(evtchn); + do_mask(info, EVT_MASK_REASON_EXPLICIT); } static void ack_dynirq(struct irq_data *data) { - evtchn_port_t evtchn = evtchn_from_irq(data->irq); + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; - if (!VALID_EVTCHN(evtchn)) - return; - - clear_evtchn(evtchn); + if (VALID_EVTCHN(evtchn)) + event_handler_exit(info); } static void mask_ack_dynirq(struct irq_data *data) @@ -1828,18 +1876,39 @@ static void mask_ack_dynirq(struct irq_data *data) ack_dynirq(data); } +static void lateeoi_ack_dynirq(struct irq_data *data) +{ + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (VALID_EVTCHN(evtchn)) { + do_mask(info, EVT_MASK_REASON_EOI_PENDING); + event_handler_exit(info); + } +} + +static void lateeoi_mask_ack_dynirq(struct irq_data *data) +{ + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (VALID_EVTCHN(evtchn)) { + do_mask(info, EVT_MASK_REASON_EXPLICIT); + event_handler_exit(info); + } +} + static int retrigger_dynirq(struct irq_data *data) { - evtchn_port_t evtchn = evtchn_from_irq(data->irq); - int masked; + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; if (!VALID_EVTCHN(evtchn)) return 0; - masked = test_and_set_mask(evtchn); + do_mask(info, EVT_MASK_REASON_TEMPORARY); set_evtchn(evtchn); - if (!masked) - unmask_evtchn(evtchn); + do_unmask(info, EVT_MASK_REASON_TEMPORARY); return 1; } @@ -1938,10 +2007,11 @@ static void restore_cpu_ipis(unsigned int cpu) /* Clear an irq's pending state, in preparation for polling on it */ void xen_clear_irq_pending(int irq) { - evtchn_port_t evtchn = evtchn_from_irq(irq); + struct irq_info *info = info_for_irq(irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; if (VALID_EVTCHN(evtchn)) - clear_evtchn(evtchn); + event_handler_exit(info); } EXPORT_SYMBOL(xen_clear_irq_pending); void xen_set_irq_pending(int irq) @@ -2053,8 +2123,8 @@ static struct irq_chip xen_lateeoi_chip __read_mostly = { .irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, - .irq_ack = mask_ack_dynirq, - .irq_mask_ack = mask_ack_dynirq, + .irq_ack = lateeoi_ack_dynirq, + .irq_mask_ack = lateeoi_mask_ack_dynirq, .irq_set_affinity = set_affinity_irq, .irq_retrigger = retrigger_dynirq, diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c index b234f1766810..ad9fe51d3fb3 100644 --- a/drivers/xen/events/events_fifo.c +++ b/drivers/xen/events/events_fifo.c @@ -209,12 +209,6 @@ static bool evtchn_fifo_is_pending(evtchn_port_t port) return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); } -static bool evtchn_fifo_test_and_set_mask(evtchn_port_t port) -{ - event_word_t *word = event_word_from_port(port); - return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); -} - static void evtchn_fifo_mask(evtchn_port_t port) { event_word_t *word = event_word_from_port(port); @@ -423,7 +417,6 @@ static const struct evtchn_ops evtchn_ops_fifo = { .clear_pending = evtchn_fifo_clear_pending, .set_pending = evtchn_fifo_set_pending, .is_pending = evtchn_fifo_is_pending, - .test_and_set_mask = evtchn_fifo_test_and_set_mask, .mask = evtchn_fifo_mask, .unmask = evtchn_fifo_unmask, .handle_events = evtchn_fifo_handle_events, diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h index 0a97c0549db7..4d3398eff9cd 100644 --- a/drivers/xen/events/events_internal.h +++ b/drivers/xen/events/events_internal.h @@ -14,13 +14,13 @@ struct evtchn_ops { unsigned (*nr_channels)(void); int (*setup)(evtchn_port_t port); + void (*remove)(evtchn_port_t port, unsigned int cpu); void (*bind_to_cpu)(evtchn_port_t evtchn, unsigned int cpu, unsigned int old_cpu); void (*clear_pending)(evtchn_port_t port); void (*set_pending)(evtchn_port_t port); bool (*is_pending)(evtchn_port_t port); - bool (*test_and_set_mask)(evtchn_port_t port); void (*mask)(evtchn_port_t port); void (*unmask)(evtchn_port_t port); @@ -54,6 +54,13 @@ static inline int xen_evtchn_port_setup(evtchn_port_t evtchn) return 0; } +static inline void xen_evtchn_port_remove(evtchn_port_t evtchn, + unsigned int cpu) +{ + if (evtchn_ops->remove) + evtchn_ops->remove(evtchn, cpu); +} + static inline void xen_evtchn_port_bind_to_cpu(evtchn_port_t evtchn, unsigned int cpu, unsigned int old_cpu) @@ -76,11 +83,6 @@ static inline bool test_evtchn(evtchn_port_t port) return evtchn_ops->is_pending(port); } -static inline bool test_and_set_mask(evtchn_port_t port) -{ - return evtchn_ops->test_and_set_mask(port); -} - static inline void mask_evtchn(evtchn_port_t port) { return evtchn_ops->mask(port); diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 5447c5156b2e..f01d58c7a042 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -133,20 +133,26 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count, if (NULL == add) return NULL; - add->grants = kvcalloc(count, sizeof(add->grants[0]), GFP_KERNEL); - add->map_ops = kvcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL); - add->unmap_ops = kvcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL); - add->kmap_ops = kvcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL); - add->kunmap_ops = kvcalloc(count, - sizeof(add->kunmap_ops[0]), GFP_KERNEL); + add->grants = kvmalloc_array(count, sizeof(add->grants[0]), + GFP_KERNEL); + add->map_ops = kvmalloc_array(count, sizeof(add->map_ops[0]), + GFP_KERNEL); + add->unmap_ops = kvmalloc_array(count, sizeof(add->unmap_ops[0]), + GFP_KERNEL); add->pages = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL); if (NULL == add->grants || NULL == add->map_ops || NULL == add->unmap_ops || - NULL == add->kmap_ops || - NULL == add->kunmap_ops || NULL == add->pages) goto err; + if (use_ptemod) { + add->kmap_ops = kvmalloc_array(count, sizeof(add->kmap_ops[0]), + GFP_KERNEL); + add->kunmap_ops = kvmalloc_array(count, sizeof(add->kunmap_ops[0]), + GFP_KERNEL); + if (NULL == add->kmap_ops || NULL == add->kunmap_ops) + goto err; + } #ifdef CONFIG_XEN_GRANT_DMA_ALLOC add->dma_flags = dma_flags; @@ -183,10 +189,14 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count, goto err; for (i = 0; i < count; i++) { - add->map_ops[i].handle = -1; - add->unmap_ops[i].handle = -1; - add->kmap_ops[i].handle = -1; - add->kunmap_ops[i].handle = -1; + add->grants[i].domid = DOMID_INVALID; + add->grants[i].ref = INVALID_GRANT_REF; + add->map_ops[i].handle = INVALID_GRANT_HANDLE; + add->unmap_ops[i].handle = INVALID_GRANT_HANDLE; + if (use_ptemod) { + add->kmap_ops[i].handle = INVALID_GRANT_HANDLE; + add->kunmap_ops[i].handle = INVALID_GRANT_HANDLE; + } } add->index = 0; @@ -274,7 +284,7 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data) map->grants[pgnr].ref, map->grants[pgnr].domid); gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags, - -1 /* handle */); + INVALID_GRANT_HANDLE); return 0; } @@ -292,7 +302,7 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map) if (!use_ptemod) { /* Note: it could already be mapped */ - if (map->map_ops[0].handle != -1) + if (map->map_ops[0].handle != INVALID_GRANT_HANDLE) return 0; for (i = 0; i < map->count; i++) { unsigned long addr = (unsigned long) @@ -301,7 +311,7 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map) map->grants[i].ref, map->grants[i].domid); gnttab_set_unmap_op(&map->unmap_ops[i], addr, - map->flags, -1 /* handle */); + map->flags, INVALID_GRANT_HANDLE); } } else { /* @@ -327,13 +337,13 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map) map->grants[i].ref, map->grants[i].domid); gnttab_set_unmap_op(&map->kunmap_ops[i], address, - flags, -1); + flags, INVALID_GRANT_HANDLE); } } pr_debug("map %d+%d\n", map->index, map->count); - err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL, - map->pages, map->count); + err = gnttab_map_refs(map->map_ops, map->kmap_ops, map->pages, + map->count); for (i = 0; i < map->count; i++) { if (map->map_ops[i].status == GNTST_okay) @@ -385,7 +395,7 @@ static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset, pr_debug("unmap handle=%d st=%d\n", map->unmap_ops[offset+i].handle, map->unmap_ops[offset+i].status); - map->unmap_ops[offset+i].handle = -1; + map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE; } return err; } @@ -401,13 +411,15 @@ static int unmap_grant_pages(struct gntdev_grant_map *map, int offset, * already unmapped some of the grants. Only unmap valid ranges. */ while (pages && !err) { - while (pages && map->unmap_ops[offset].handle == -1) { + while (pages && + map->unmap_ops[offset].handle == INVALID_GRANT_HANDLE) { offset++; pages--; } range = 0; while (range < pages) { - if (map->unmap_ops[offset+range].handle == -1) + if (map->unmap_ops[offset + range].handle == + INVALID_GRANT_HANDLE) break; range++; } |