diff options
Diffstat (limited to 'drivers')
161 files changed, 3892 insertions, 1530 deletions
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index f8fecfec5df9..9706613eecf9 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -879,6 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev) #define LPSS_GPIODEF0_DMA_LLP BIT(13) static DEFINE_MUTEX(lpss_iosf_mutex); +static bool lpss_iosf_d3_entered; static void lpss_iosf_enter_d3_state(void) { @@ -921,6 +922,9 @@ static void lpss_iosf_enter_d3_state(void) iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, LPSS_IOSF_GPIODEF0, value1, mask1); + + lpss_iosf_d3_entered = true; + exit: mutex_unlock(&lpss_iosf_mutex); } @@ -935,6 +939,11 @@ static void lpss_iosf_exit_d3_state(void) mutex_lock(&lpss_iosf_mutex); + if (!lpss_iosf_d3_entered) + goto exit; + + lpss_iosf_d3_entered = false; + iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, LPSS_IOSF_GPIODEF0, value1, mask1); @@ -944,13 +953,13 @@ static void lpss_iosf_exit_d3_state(void) iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE, LPSS_IOSF_PMCSR, value2, mask2); +exit: mutex_unlock(&lpss_iosf_mutex); } -static int acpi_lpss_suspend(struct device *dev, bool runtime) +static int acpi_lpss_suspend(struct device *dev, bool wakeup) { struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); - bool wakeup = runtime || device_may_wakeup(dev); int ret; if (pdata->dev_desc->flags & LPSS_SAVE_CTX) @@ -963,14 +972,14 @@ static int acpi_lpss_suspend(struct device *dev, bool runtime) * wrong status for devices being about to be powered off. See * lpss_iosf_enter_d3_state() for further information. */ - if ((runtime || !pm_suspend_via_firmware()) && + if (acpi_target_system_state() == ACPI_STATE_S0 && lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) lpss_iosf_enter_d3_state(); return ret; } -static int acpi_lpss_resume(struct device *dev, bool runtime) +static int acpi_lpss_resume(struct device *dev) { struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); int ret; @@ -979,8 +988,7 @@ static int acpi_lpss_resume(struct device *dev, bool runtime) * This call is kept first to be in symmetry with * acpi_lpss_runtime_suspend() one. */ - if ((runtime || !pm_resume_via_firmware()) && - lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) + if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) lpss_iosf_exit_d3_state(); ret = acpi_dev_resume(dev); @@ -1004,12 +1012,12 @@ static int acpi_lpss_suspend_late(struct device *dev) return 0; ret = pm_generic_suspend_late(dev); - return ret ? ret : acpi_lpss_suspend(dev, false); + return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev)); } static int acpi_lpss_resume_early(struct device *dev) { - int ret = acpi_lpss_resume(dev, false); + int ret = acpi_lpss_resume(dev); return ret ? ret : pm_generic_resume_early(dev); } @@ -1024,7 +1032,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev) static int acpi_lpss_runtime_resume(struct device *dev) { - int ret = acpi_lpss_resume(dev, true); + int ret = acpi_lpss_resume(dev); return ret ? ret : pm_generic_runtime_resume(dev); } diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index ee840be150b5..44f35ab3347d 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c @@ -709,15 +709,20 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) } else if ((walk_state-> parse_flags & ACPI_PARSE_MODULE_LEVEL) + && status != AE_CTRL_TRANSFER && ACPI_FAILURE(status)) { /* - * ACPI_PARSE_MODULE_LEVEL means that we are loading a table by - * executing it as a control method. However, if we encounter - * an error while loading the table, we need to keep trying to - * load the table rather than aborting the table load. Set the - * status to AE_OK to proceed with the table load. If we get a - * failure at this point, it means that the dispatcher got an - * error while processing Op (most likely an AML operand error. + * ACPI_PARSE_MODULE_LEVEL flag means that we are currently + * loading a table by executing it as a control method. + * However, if we encounter an error while loading the table, + * we need to keep trying to load the table rather than + * aborting the table load (setting the status to AE_OK + * continues the table load). If we get a failure at this + * point, it means that the dispatcher got an error while + * processing Op (most likely an AML operand error) or a + * control method was called from module level and the + * dispatcher returned AE_CTRL_TRANSFER. In the latter case, + * leave the status alone, there's nothing wrong with it. */ status = AE_OK; } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index fa0729c1e776..d81c653b9bf6 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -61,7 +61,7 @@ static int atomic_inc_return_safe(atomic_t *v) { unsigned int counter; - counter = (unsigned int)__atomic_add_unless(v, 1, 0); + counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0); if (counter <= (unsigned int)INT_MAX) return (int)counter; diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 7436b2d27fa3..a390c6d4f72d 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -298,7 +298,8 @@ static void reset_bdev(struct zram *zram) zram->backing_dev = NULL; zram->old_block_size = 0; zram->bdev = NULL; - + zram->disk->queue->backing_dev_info->capabilities |= + BDI_CAP_SYNCHRONOUS_IO; kvfree(zram->bitmap); zram->bitmap = NULL; } @@ -400,6 +401,18 @@ static ssize_t backing_dev_store(struct device *dev, zram->backing_dev = backing_dev; zram->bitmap = bitmap; zram->nr_pages = nr_pages; + /* + * With writeback feature, zram does asynchronous IO so it's no longer + * synchronous device so let's remove synchronous io flag. Othewise, + * upper layer(e.g., swap) could wait IO completion rather than + * (submit and return), which will cause system sluggish. + * Furthermore, when the IO function returns(e.g., swap_readpage), + * upper layer expects IO was done so it could deallocate the page + * freely but in fact, IO is going on so finally could cause + * use-after-free when the IO is really done. + */ + zram->disk->queue->backing_dev_info->capabilities &= + ~BDI_CAP_SYNCHRONOUS_IO; up_write(&zram->init_lock); pr_info("setup backing device %s\n", file_name); diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c index 7513411140b6..9ab3db8b3988 100644 --- a/drivers/clk/clkdev.c +++ b/drivers/clk/clkdev.c @@ -35,9 +35,6 @@ static struct clk *__of_clk_get(struct device_node *np, int index, struct clk *clk; int rc; - if (index < 0) - return ERR_PTR(-EINVAL); - rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index, &clkspec); if (rc) @@ -199,7 +196,7 @@ struct clk *clk_get(struct device *dev, const char *con_id) const char *dev_id = dev ? dev_name(dev) : NULL; struct clk *clk; - if (dev) { + if (dev && dev->of_node) { clk = __of_clk_get_by_name(dev->of_node, dev_id, con_id); if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER) return clk; diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 00caf37e52f9..c070cc7992e9 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -49,7 +49,7 @@ obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o obj-$(CONFIG_FSL_FTM_TIMER) += fsl_ftm_timer.o obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o -obj-$(CONFIG_MTK_TIMER) += mtk_timer.o +obj-$(CONFIG_MTK_TIMER) += timer-mediatek.o obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c deleted file mode 100644 index f9b724fd9950..000000000000 --- a/drivers/clocksource/mtk_timer.c +++ /dev/null @@ -1,268 +0,0 @@ -/* - * Mediatek SoCs General-Purpose Timer handling. - * - * Copyright (C) 2014 Matthias Brugger - * - * Matthias Brugger <matthias.bgg@gmail.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/clk.h> -#include <linux/clockchips.h> -#include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/irqreturn.h> -#include <linux/of.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> -#include <linux/sched_clock.h> -#include <linux/slab.h> - -#define GPT_IRQ_EN_REG 0x00 -#define GPT_IRQ_ENABLE(val) BIT((val) - 1) -#define GPT_IRQ_ACK_REG 0x08 -#define GPT_IRQ_ACK(val) BIT((val) - 1) - -#define TIMER_CTRL_REG(val) (0x10 * (val)) -#define TIMER_CTRL_OP(val) (((val) & 0x3) << 4) -#define TIMER_CTRL_OP_ONESHOT (0) -#define TIMER_CTRL_OP_REPEAT (1) -#define TIMER_CTRL_OP_FREERUN (3) -#define TIMER_CTRL_CLEAR (2) -#define TIMER_CTRL_ENABLE (1) -#define TIMER_CTRL_DISABLE (0) - -#define TIMER_CLK_REG(val) (0x04 + (0x10 * (val))) -#define TIMER_CLK_SRC(val) (((val) & 0x1) << 4) -#define TIMER_CLK_SRC_SYS13M (0) -#define TIMER_CLK_SRC_RTC32K (1) -#define TIMER_CLK_DIV1 (0x0) -#define TIMER_CLK_DIV2 (0x1) - -#define TIMER_CNT_REG(val) (0x08 + (0x10 * (val))) -#define TIMER_CMP_REG(val) (0x0C + (0x10 * (val))) - -#define GPT_CLK_EVT 1 -#define GPT_CLK_SRC 2 - -struct mtk_clock_event_device { - void __iomem *gpt_base; - u32 ticks_per_jiffy; - struct clock_event_device dev; -}; - -static void __iomem *gpt_sched_reg __read_mostly; - -static u64 notrace mtk_read_sched_clock(void) -{ - return readl_relaxed(gpt_sched_reg); -} - -static inline struct mtk_clock_event_device *to_mtk_clk( - struct clock_event_device *c) -{ - return container_of(c, struct mtk_clock_event_device, dev); -} - -static void mtk_clkevt_time_stop(struct mtk_clock_event_device *evt, u8 timer) -{ - u32 val; - - val = readl(evt->gpt_base + TIMER_CTRL_REG(timer)); - writel(val & ~TIMER_CTRL_ENABLE, evt->gpt_base + - TIMER_CTRL_REG(timer)); -} - -static void mtk_clkevt_time_setup(struct mtk_clock_event_device *evt, - unsigned long delay, u8 timer) -{ - writel(delay, evt->gpt_base + TIMER_CMP_REG(timer)); -} - -static void mtk_clkevt_time_start(struct mtk_clock_event_device *evt, - bool periodic, u8 timer) -{ - u32 val; - - /* Acknowledge interrupt */ - writel(GPT_IRQ_ACK(timer), evt->gpt_base + GPT_IRQ_ACK_REG); - - val = readl(evt->gpt_base + TIMER_CTRL_REG(timer)); - - /* Clear 2 bit timer operation mode field */ - val &= ~TIMER_CTRL_OP(0x3); - - if (periodic) - val |= TIMER_CTRL_OP(TIMER_CTRL_OP_REPEAT); - else - val |= TIMER_CTRL_OP(TIMER_CTRL_OP_ONESHOT); - - writel(val | TIMER_CTRL_ENABLE | TIMER_CTRL_CLEAR, - evt->gpt_base + TIMER_CTRL_REG(timer)); -} - -static int mtk_clkevt_shutdown(struct clock_event_device *clk) -{ - mtk_clkevt_time_stop(to_mtk_clk(clk), GPT_CLK_EVT); - return 0; -} - -static int mtk_clkevt_set_periodic(struct clock_event_device *clk) -{ - struct mtk_clock_event_device *evt = to_mtk_clk(clk); - - mtk_clkevt_time_stop(evt, GPT_CLK_EVT); - mtk_clkevt_time_setup(evt, evt->ticks_per_jiffy, GPT_CLK_EVT); - mtk_clkevt_time_start(evt, true, GPT_CLK_EVT); - return 0; -} - -static int mtk_clkevt_next_event(unsigned long event, - struct clock_event_device *clk) -{ - struct mtk_clock_event_device *evt = to_mtk_clk(clk); - - mtk_clkevt_time_stop(evt, GPT_CLK_EVT); - mtk_clkevt_time_setup(evt, event, GPT_CLK_EVT); - mtk_clkevt_time_start(evt, false, GPT_CLK_EVT); - - return 0; -} - -static irqreturn_t mtk_timer_interrupt(int irq, void *dev_id) -{ - struct mtk_clock_event_device *evt = dev_id; - - /* Acknowledge timer0 irq */ - writel(GPT_IRQ_ACK(GPT_CLK_EVT), evt->gpt_base + GPT_IRQ_ACK_REG); - evt->dev.event_handler(&evt->dev); - - return IRQ_HANDLED; -} - -static void -__init mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option) -{ - writel(TIMER_CTRL_CLEAR | TIMER_CTRL_DISABLE, - evt->gpt_base + TIMER_CTRL_REG(timer)); - - writel(TIMER_CLK_SRC(TIMER_CLK_SRC_SYS13M) | TIMER_CLK_DIV1, - evt->gpt_base + TIMER_CLK_REG(timer)); - - writel(0x0, evt->gpt_base + TIMER_CMP_REG(timer)); - - writel(TIMER_CTRL_OP(option) | TIMER_CTRL_ENABLE, - evt->gpt_base + TIMER_CTRL_REG(timer)); -} - -static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer) -{ - u32 val; - - /* Disable all interrupts */ - writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG); - - /* Acknowledge all spurious pending interrupts */ - writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG); - - val = readl(evt->gpt_base + GPT_IRQ_EN_REG); - writel(val | GPT_IRQ_ENABLE(timer), - evt->gpt_base + GPT_IRQ_EN_REG); -} - -static int __init mtk_timer_init(struct device_node *node) -{ - struct mtk_clock_event_device *evt; - struct resource res; - unsigned long rate = 0; - struct clk *clk; - - evt = kzalloc(sizeof(*evt), GFP_KERNEL); - if (!evt) - return -ENOMEM; - - evt->dev.name = "mtk_tick"; - evt->dev.rating = 300; - evt->dev.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; - evt->dev.set_state_shutdown = mtk_clkevt_shutdown; - evt->dev.set_state_periodic = mtk_clkevt_set_periodic; - evt->dev.set_state_oneshot = mtk_clkevt_shutdown; - evt->dev.tick_resume = mtk_clkevt_shutdown; - evt->dev.set_next_event = mtk_clkevt_next_event; - evt->dev.cpumask = cpu_possible_mask; - - evt->gpt_base = of_io_request_and_map(node, 0, "mtk-timer"); - if (IS_ERR(evt->gpt_base)) { - pr_err("Can't get resource\n"); - goto err_kzalloc; - } - - evt->dev.irq = irq_of_parse_and_map(node, 0); - if (evt->dev.irq <= 0) { - pr_err("Can't parse IRQ\n"); - goto err_mem; - } - - clk = of_clk_get(node, 0); - if (IS_ERR(clk)) { - pr_err("Can't get timer clock\n"); - goto err_irq; - } - - if (clk_prepare_enable(clk)) { - pr_err("Can't prepare clock\n"); - goto err_clk_put; - } - rate = clk_get_rate(clk); - - if (request_irq(evt->dev.irq, mtk_timer_interrupt, - IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) { - pr_err("failed to setup irq %d\n", evt->dev.irq); - goto err_clk_disable; - } - - evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); - - /* Configure clock source */ - mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN); - clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC), - node->name, rate, 300, 32, clocksource_mmio_readl_up); - gpt_sched_reg = evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC); - sched_clock_register(mtk_read_sched_clock, 32, rate); - - /* Configure clock event */ - mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT); - clockevents_config_and_register(&evt->dev, rate, 0x3, - 0xffffffff); - - mtk_timer_enable_irq(evt, GPT_CLK_EVT); - - return 0; - -err_clk_disable: - clk_disable_unprepare(clk); -err_clk_put: - clk_put(clk); -err_irq: - irq_dispose_mapping(evt->dev.irq); -err_mem: - iounmap(evt->gpt_base); - of_address_to_resource(node, 0, &res); - release_mem_region(res.start, resource_size(&res)); -err_kzalloc: - kfree(evt); - - return -EINVAL; -} -TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init); diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c index c337a8100a7b..aa624885e0e2 100644 --- a/drivers/clocksource/tegra20_timer.c +++ b/drivers/clocksource/tegra20_timer.c @@ -230,7 +230,7 @@ static int __init tegra20_init_timer(struct device_node *np) return ret; } - tegra_clockevent.cpumask = cpu_all_mask; + tegra_clockevent.cpumask = cpu_possible_mask; tegra_clockevent.irq = tegra_timer_irq.irq; clockevents_config_and_register(&tegra_clockevent, 1000000, 0x1, 0x1fffffff); @@ -259,6 +259,6 @@ static int __init tegra20_init_rtc(struct device_node *np) else clk_prepare_enable(clk); - return register_persistent_clock(NULL, tegra_read_persistent_clock64); + return register_persistent_clock(tegra_read_persistent_clock64); } TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc); diff --git a/drivers/clocksource/timer-atcpit100.c b/drivers/clocksource/timer-atcpit100.c index 5e23d7b4a722..b4bd2f5b801d 100644 --- a/drivers/clocksource/timer-atcpit100.c +++ b/drivers/clocksource/timer-atcpit100.c @@ -185,7 +185,7 @@ static struct timer_of to = { .set_state_oneshot = atcpit100_clkevt_set_oneshot, .tick_resume = atcpit100_clkevt_shutdown, .set_next_event = atcpit100_clkevt_next_event, - .cpumask = cpu_all_mask, + .cpumask = cpu_possible_mask, }, .of_irq = { diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c index 0eee03250cfc..f5b2eda30bf3 100644 --- a/drivers/clocksource/timer-keystone.c +++ b/drivers/clocksource/timer-keystone.c @@ -211,7 +211,7 @@ static int __init keystone_timer_init(struct device_node *np) event_dev->set_state_shutdown = keystone_shutdown; event_dev->set_state_periodic = keystone_set_periodic; event_dev->set_state_oneshot = keystone_shutdown; - event_dev->cpumask = cpu_all_mask; + event_dev->cpumask = cpu_possible_mask; event_dev->owner = THIS_MODULE; event_dev->name = TIMER_NAME; event_dev->irq = irq; diff --git a/drivers/clocksource/timer-mediatek.c b/drivers/clocksource/timer-mediatek.c new file mode 100644 index 000000000000..eb10321f8517 --- /dev/null +++ b/drivers/clocksource/timer-mediatek.c @@ -0,0 +1,328 @@ +/* + * Mediatek SoCs General-Purpose Timer handling. + * + * Copyright (C) 2014 Matthias Brugger + * + * Matthias Brugger <matthias.bgg@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/interrupt.h> +#include <linux/irqreturn.h> +#include <linux/sched_clock.h> +#include <linux/slab.h> +#include "timer-of.h" + +#define TIMER_CLK_EVT (1) +#define TIMER_CLK_SRC (2) + +#define TIMER_SYNC_TICKS (3) + +/* gpt */ +#define GPT_IRQ_EN_REG 0x00 +#define GPT_IRQ_ENABLE(val) BIT((val) - 1) +#define GPT_IRQ_ACK_REG 0x08 +#define GPT_IRQ_ACK(val) BIT((val) - 1) + +#define GPT_CTRL_REG(val) (0x10 * (val)) +#define GPT_CTRL_OP(val) (((val) & 0x3) << 4) +#define GPT_CTRL_OP_ONESHOT (0) +#define GPT_CTRL_OP_REPEAT (1) +#define GPT_CTRL_OP_FREERUN (3) +#define GPT_CTRL_CLEAR (2) +#define GPT_CTRL_ENABLE (1) +#define GPT_CTRL_DISABLE (0) + +#define GPT_CLK_REG(val) (0x04 + (0x10 * (val))) +#define GPT_CLK_SRC(val) (((val) & 0x1) << 4) +#define GPT_CLK_SRC_SYS13M (0) +#define GPT_CLK_SRC_RTC32K (1) +#define GPT_CLK_DIV1 (0x0) +#define GPT_CLK_DIV2 (0x1) + +#define GPT_CNT_REG(val) (0x08 + (0x10 * (val))) +#define GPT_CMP_REG(val) (0x0C + (0x10 * (val))) + +/* system timer */ +#define SYST_BASE (0x40) + +#define SYST_CON (SYST_BASE + 0x0) +#define SYST_VAL (SYST_BASE + 0x4) + +#define SYST_CON_REG(to) (timer_of_base(to) + SYST_CON) +#define SYST_VAL_REG(to) (timer_of_base(to) + SYST_VAL) + +/* + * SYST_CON_EN: Clock enable. Shall be set to + * - Start timer countdown. + * - Allow timeout ticks being updated. + * - Allow changing interrupt functions. + * + * SYST_CON_IRQ_EN: Set to allow interrupt. + * + * SYST_CON_IRQ_CLR: Set to clear interrupt. + */ +#define SYST_CON_EN BIT(0) +#define SYST_CON_IRQ_EN BIT(1) +#define SYST_CON_IRQ_CLR BIT(4) + +static void __iomem *gpt_sched_reg __read_mostly; + +static void mtk_syst_ack_irq(struct timer_of *to) +{ + /* Clear and disable interrupt */ + writel(SYST_CON_IRQ_CLR | SYST_CON_EN, SYST_CON_REG(to)); +} + +static irqreturn_t mtk_syst_handler(int irq, void *dev_id) +{ + struct clock_event_device *clkevt = dev_id; + struct timer_of *to = to_timer_of(clkevt); + + mtk_syst_ack_irq(to); + clkevt->event_handler(clkevt); + + return IRQ_HANDLED; +} + +static int mtk_syst_clkevt_next_event(unsigned long ticks, + struct clock_event_device *clkevt) +{ + struct timer_of *to = to_timer_of(clkevt); + + /* Enable clock to allow timeout tick update later */ + writel(SYST_CON_EN, SYST_CON_REG(to)); + + /* + * Write new timeout ticks. Timer shall start countdown + * after timeout ticks are updated. + */ + writel(ticks, SYST_VAL_REG(to)); + + /* Enable interrupt */ + writel(SYST_CON_EN | SYST_CON_IRQ_EN, SYST_CON_REG(to)); + + return 0; +} + +static int mtk_syst_clkevt_shutdown(struct clock_event_device *clkevt) +{ + /* Disable timer */ + writel(0, SYST_CON_REG(to_timer_of(clkevt))); + + return 0; +} + +static int mtk_syst_clkevt_resume(struct clock_event_device *clkevt) +{ + return mtk_syst_clkevt_shutdown(clkevt); +} + +static int mtk_syst_clkevt_oneshot(struct clock_event_device *clkevt) +{ + return 0; +} + +static u64 notrace mtk_gpt_read_sched_clock(void) +{ + return readl_relaxed(gpt_sched_reg); +} + +static void mtk_gpt_clkevt_time_stop(struct timer_of *to, u8 timer) +{ + u32 val; + + val = readl(timer_of_base(to) + GPT_CTRL_REG(timer)); + writel(val & ~GPT_CTRL_ENABLE, timer_of_base(to) + + GPT_CTRL_REG(timer)); +} + +static void mtk_gpt_clkevt_time_setup(struct timer_of *to, + unsigned long delay, u8 timer) +{ + writel(delay, timer_of_base(to) + GPT_CMP_REG(timer)); +} + +static void mtk_gpt_clkevt_time_start(struct timer_of *to, + bool periodic, u8 timer) +{ + u32 val; + + /* Acknowledge interrupt */ + writel(GPT_IRQ_ACK(timer), timer_of_base(to) + GPT_IRQ_ACK_REG); + + val = readl(timer_of_base(to) + GPT_CTRL_REG(timer)); + + /* Clear 2 bit timer operation mode field */ + val &= ~GPT_CTRL_OP(0x3); + + if (periodic) + val |= GPT_CTRL_OP(GPT_CTRL_OP_REPEAT); + else + val |= GPT_CTRL_OP(GPT_CTRL_OP_ONESHOT); + + writel(val | GPT_CTRL_ENABLE | GPT_CTRL_CLEAR, + timer_of_base(to) + GPT_CTRL_REG(timer)); +} + +static int mtk_gpt_clkevt_shutdown(struct clock_event_device *clk) +{ + mtk_gpt_clkevt_time_stop(to_timer_of(clk), TIMER_CLK_EVT); + + return 0; +} + +static int mtk_gpt_clkevt_set_periodic(struct clock_event_device *clk) +{ + struct timer_of *to = to_timer_of(clk); + + mtk_gpt_clkevt_time_stop(to, TIMER_CLK_EVT); + mtk_gpt_clkevt_time_setup(to, to->of_clk.period, TIMER_CLK_EVT); + mtk_gpt_clkevt_time_start(to, true, TIMER_CLK_EVT); + + return 0; +} + +static int mtk_gpt_clkevt_next_event(unsigned long event, + struct clock_event_device *clk) +{ + struct timer_of *to = to_timer_of(clk); + + mtk_gpt_clkevt_time_stop(to, TIMER_CLK_EVT); + mtk_gpt_clkevt_time_setup(to, event, TIMER_CLK_EVT); + mtk_gpt_clkevt_time_start(to, false, TIMER_CLK_EVT); + + return 0; +} + +static irqreturn_t mtk_gpt_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *clkevt = (struct clock_event_device *)dev_id; + struct timer_of *to = to_timer_of(clkevt); + + /* Acknowledge timer0 irq */ + writel(GPT_IRQ_ACK(TIMER_CLK_EVT), timer_of_base(to) + GPT_IRQ_ACK_REG); + clkevt->event_handler(clkevt); + + return IRQ_HANDLED; +} + +static void +__init mtk_gpt_setup(struct timer_of *to, u8 timer, u8 option) +{ + writel(GPT_CTRL_CLEAR | GPT_CTRL_DISABLE, + timer_of_base(to) + GPT_CTRL_REG(timer)); + + writel(GPT_CLK_SRC(GPT_CLK_SRC_SYS13M) | GPT_CLK_DIV1, + timer_of_base(to) + GPT_CLK_REG(timer)); + + writel(0x0, timer_of_base(to) + GPT_CMP_REG(timer)); + + writel(GPT_CTRL_OP(option) | GPT_CTRL_ENABLE, + timer_of_base(to) + GPT_CTRL_REG(timer)); +} + +static void mtk_gpt_enable_irq(struct timer_of *to, u8 timer) +{ + u32 val; + + /* Disable all interrupts */ + writel(0x0, timer_of_base(to) + GPT_IRQ_EN_REG); + + /* Acknowledge all spurious pending interrupts */ + writel(0x3f, timer_of_base(to) + GPT_IRQ_ACK_REG); + + val = readl(timer_of_base(to) + GPT_IRQ_EN_REG); + writel(val | GPT_IRQ_ENABLE(timer), + timer_of_base(to) + GPT_IRQ_EN_REG); +} + +static struct timer_of to = { + .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK, + + .clkevt = { + .name = "mtk-clkevt", + .rating = 300, + .cpumask = cpu_possible_mask, + }, + + .of_irq = { + .flags = IRQF_TIMER | IRQF_IRQPOLL, + }, +}; + +static int __init mtk_syst_init(struct device_node *node) +{ + int ret; + + to.clkevt.features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_ONESHOT; + to.clkevt.set_state_shutdown = mtk_syst_clkevt_shutdown; + to.clkevt.set_state_oneshot = mtk_syst_clkevt_oneshot; + to.clkevt.tick_resume = mtk_syst_clkevt_resume; + to.clkevt.set_next_event = mtk_syst_clkevt_next_event; + to.of_irq.handler = mtk_syst_handler; + + ret = timer_of_init(node, &to); + if (ret) + goto err; + + clockevents_config_and_register(&to.clkevt, timer_of_rate(&to), + TIMER_SYNC_TICKS, 0xffffffff); + + return 0; +err: + timer_of_cleanup(&to); + return ret; +} + +static int __init mtk_gpt_init(struct device_node *node) +{ + int ret; + + to.clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; + to.clkevt.set_state_shutdown = mtk_gpt_clkevt_shutdown; + to.clkevt.set_state_periodic = mtk_gpt_clkevt_set_periodic; + to.clkevt.set_state_oneshot = mtk_gpt_clkevt_shutdown; + to.clkevt.tick_resume = mtk_gpt_clkevt_shutdown; + to.clkevt.set_next_event = mtk_gpt_clkevt_next_event; + to.of_irq.handler = mtk_gpt_interrupt; + + ret = timer_of_init(node, &to); + if (ret) + goto err; + + /* Configure clock source */ + mtk_gpt_setup(&to, TIMER_CLK_SRC, GPT_CTRL_OP_FREERUN); + clocksource_mmio_init(timer_of_base(&to) + GPT_CNT_REG(TIMER_CLK_SRC), + node->name, timer_of_rate(&to), 300, 32, + clocksource_mmio_readl_up); + gpt_sched_reg = timer_of_base(&to) + GPT_CNT_REG(TIMER_CLK_SRC); + sched_clock_register(mtk_gpt_read_sched_clock, 32, timer_of_rate(&to)); + + /* Configure clock event */ + mtk_gpt_setup(&to, TIMER_CLK_EVT, GPT_CTRL_OP_REPEAT); + clockevents_config_and_register(&to.clkevt, timer_of_rate(&to), + TIMER_SYNC_TICKS, 0xffffffff); + + mtk_gpt_enable_irq(&to, TIMER_CLK_EVT); + + return 0; +err: + timer_of_cleanup(&to); + return ret; +} +TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_gpt_init); +TIMER_OF_DECLARE(mtk_mt6765, "mediatek,mt6765-timer", mtk_syst_init); diff --git a/drivers/clocksource/timer-sprd.c b/drivers/clocksource/timer-sprd.c index ef9ebeafb3ed..430cb99d8d79 100644 --- a/drivers/clocksource/timer-sprd.c +++ b/drivers/clocksource/timer-sprd.c @@ -156,4 +156,54 @@ static int __init sprd_timer_init(struct device_node *np) return 0; } +static struct timer_of suspend_to = { + .flags = TIMER_OF_BASE | TIMER_OF_CLOCK, +}; + +static u64 sprd_suspend_timer_read(struct clocksource *cs) +{ + return ~(u64)readl_relaxed(timer_of_base(&suspend_to) + + TIMER_VALUE_SHDW_LO) & cs->mask; +} + +static int sprd_suspend_timer_enable(struct clocksource *cs) +{ + sprd_timer_update_counter(timer_of_base(&suspend_to), + TIMER_VALUE_LO_MASK); + sprd_timer_enable(timer_of_base(&suspend_to), TIMER_CTL_PERIOD_MODE); + + return 0; +} + +static void sprd_suspend_timer_disable(struct clocksource *cs) +{ + sprd_timer_disable(timer_of_base(&suspend_to)); +} + +static struct clocksource suspend_clocksource = { + .name = "sprd_suspend_timer", + .rating = 200, + .read = sprd_suspend_timer_read, + .enable = sprd_suspend_timer_enable, + .disable = sprd_suspend_timer_disable, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP, +}; + +static int __init sprd_suspend_timer_init(struct device_node *np) +{ + int ret; + + ret = timer_of_init(np, &suspend_to); + if (ret) + return ret; + + clocksource_register_hz(&suspend_clocksource, + timer_of_rate(&suspend_to)); + + return 0; +} + TIMER_OF_DECLARE(sc9860_timer, "sprd,sc9860-timer", sprd_timer_init); +TIMER_OF_DECLARE(sc9860_persistent_timer, "sprd,sc9860-suspend-timer", + sprd_suspend_timer_init); diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c index 880a861ab3c8..29e2e1a78a43 100644 --- a/drivers/clocksource/timer-ti-32k.c +++ b/drivers/clocksource/timer-ti-32k.c @@ -78,8 +78,7 @@ static struct ti_32k ti_32k_timer = { .rating = 250, .read = ti_32k_read_cycles, .mask = CLOCKSOURCE_MASK(32), - .flags = CLOCK_SOURCE_IS_CONTINUOUS | - CLOCK_SOURCE_SUSPEND_NONSTOP, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, }, }; diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c index a6a0338eea77..f74689334f7c 100644 --- a/drivers/clocksource/zevio-timer.c +++ b/drivers/clocksource/zevio-timer.c @@ -162,7 +162,7 @@ static int __init zevio_timer_add(struct device_node *node) timer->clkevt.set_state_oneshot = zevio_timer_set_oneshot; timer->clkevt.tick_resume = zevio_timer_set_oneshot; timer->clkevt.rating = 200; - timer->clkevt.cpumask = cpu_all_mask; + timer->clkevt.cpumask = cpu_possible_mask; timer->clkevt.features = CLOCK_EVT_FEAT_ONESHOT; timer->clkevt.irq = irqnr; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 3c3971256130..d4ed0022b0dd 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -311,12 +311,20 @@ static DEFINE_MUTEX(intel_pstate_limits_lock); #ifdef CONFIG_ACPI -static bool intel_pstate_get_ppc_enable_status(void) +static bool intel_pstate_acpi_pm_profile_server(void) { if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) return true; + return false; +} + +static bool intel_pstate_get_ppc_enable_status(void) +{ + if (intel_pstate_acpi_pm_profile_server()) + return true; + return acpi_ppc; } @@ -459,6 +467,11 @@ static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *pol static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) { } + +static inline bool intel_pstate_acpi_pm_profile_server(void) +{ + return false; +} #endif static inline void update_turbo_state(void) @@ -1841,7 +1854,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum) intel_pstate_hwp_enable(cpu); id = x86_match_cpu(intel_pstate_hwp_boost_ids); - if (id) + if (id && intel_pstate_acpi_pm_profile_server()) hwp_boost = true; } diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 1c6cbda56afe..09d823d36d3a 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -266,6 +266,8 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, return; } + count -= initial; + if (initial) asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ : "+S"(input), "+D"(output) @@ -273,7 +275,7 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ : "+S"(input), "+D"(output) - : "d"(control_word), "b"(key), "c"(count - initial)); + : "d"(control_word), "b"(key), "c"(count)); } static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, @@ -284,6 +286,8 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, if (count < cbc_fetch_blocks) return cbc_crypt(input, output, key, iv, control_word, count); + count -= initial; + if (initial) asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ : "+S" (input), "+D" (output), "+a" (iv) @@ -291,7 +295,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ : "+S" (input), "+D" (output), "+a" (iv) - : "d" (control_word), "b" (key), "c" (count-initial)); + : "d" (control_word), "b" (key), "c" (count)); return iv; } diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 781a4a337557..d8e159feb573 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -87,6 +87,18 @@ config EFI_RUNTIME_WRAPPERS config EFI_ARMSTUB bool +config EFI_ARMSTUB_DTB_LOADER + bool "Enable the DTB loader" + depends on EFI_ARMSTUB + help + Select this config option to add support for the dtb= command + line parameter, allowing a device tree blob to be loaded into + memory from the EFI System Partition by the stub. + + The device tree is typically provided by the platform or by + the bootloader, so this option is mostly for development + purposes only. + config EFI_BOOTLOADER_CONTROL tristate "EFI Bootloader Control" depends on EFI_VARS diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 3bf0dca378a6..a7902fccdcfa 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -48,8 +48,21 @@ u64 cper_next_record_id(void) { static atomic64_t seq; - if (!atomic64_read(&seq)) - atomic64_set(&seq, ((u64)get_seconds()) << 32); + if (!atomic64_read(&seq)) { + time64_t time = ktime_get_real_seconds(); + + /* + * This code is unlikely to still be needed in year 2106, + * but just in case, let's use a few more bits for timestamps + * after y2038 to be sure they keep increasing monotonically + * for the next few hundred years... + */ + if (time < 0x80000000) + atomic64_set(&seq, (ktime_get_real_seconds()) << 32); + else + atomic64_set(&seq, 0x8000000000000000ull | + ktime_get_real_seconds() << 24); + } return atomic64_inc_return(&seq); } @@ -459,7 +472,7 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata else goto err_section_too_small; #if defined(CONFIG_ARM64) || defined(CONFIG_ARM) - } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_ARM)) { + } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) { struct cper_sec_proc_arm *arm_err = acpi_hest_get_payload(gdata); printk("%ssection_type: ARM processor error\n", newpfx); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 232f4915223b..2a29dd9c986d 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -82,8 +82,11 @@ struct mm_struct efi_mm = { .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem), .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), + .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0}, }; +struct workqueue_struct *efi_rts_wq; + static bool disable_runtime; static int __init setup_noefi(char *arg) { @@ -337,6 +340,18 @@ static int __init efisubsys_init(void) if (!efi_enabled(EFI_BOOT)) return 0; + /* + * Since we process only one efi_runtime_service() at a time, an + * ordered workqueue (which creates only one execution context) + * should suffice all our needs. + */ + efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0); + if (!efi_rts_wq) { + pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n"); + clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); + return 0; + } + /* We register the efi directory at /sys/firmware/efi */ efi_kobj = kobject_create_and_add("efi", firmware_kobj); if (!efi_kobj) { @@ -388,7 +403,7 @@ subsys_initcall(efisubsys_init); * and if so, populate the supplied memory descriptor with the appropriate * data. */ -int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) +int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) { efi_memory_desc_t *md; @@ -406,12 +421,6 @@ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) u64 size; u64 end; - if (!(md->attribute & EFI_MEMORY_RUNTIME) && - md->type != EFI_BOOT_SERVICES_DATA && - md->type != EFI_RUNTIME_SERVICES_DATA) { - continue; - } - size = md->num_pages << EFI_PAGE_SHIFT; end = md->phys_addr + size; if (phys_addr >= md->phys_addr && phys_addr < end) { diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c index 1ab80e06e7c5..5d06bd247d07 100644 --- a/drivers/firmware/efi/esrt.c +++ b/drivers/firmware/efi/esrt.c @@ -250,7 +250,10 @@ void __init efi_esrt_init(void) return; rc = efi_mem_desc_lookup(efi.esrt, &md); - if (rc < 0) { + if (rc < 0 || + (!(md.attribute & EFI_MEMORY_RUNTIME) && + md.type != EFI_BOOT_SERVICES_DATA && + md.type != EFI_RUNTIME_SERVICES_DATA)) { pr_warn("ESRT header is not in the memory map.\n"); return; } @@ -326,7 +329,8 @@ void __init efi_esrt_init(void) end = esrt_data + size; pr_info("Reserving ESRT space from %pa to %pa.\n", &esrt_data, &end); - efi_mem_reserve(esrt_data, esrt_data_size); + if (md.type == EFI_BOOT_SERVICES_DATA) + efi_mem_reserve(esrt_data, esrt_data_size); pr_debug("esrt-init: loaded.\n"); } diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index 01a9d78ee415..6920033de6d4 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -40,31 +40,6 @@ static u64 virtmap_base = EFI_RT_VIRTUAL_BASE; -efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, - void *__image, void **__fh) -{ - efi_file_io_interface_t *io; - efi_loaded_image_t *image = __image; - efi_file_handle_t *fh; - efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID; - efi_status_t status; - void *handle = (void *)(unsigned long)image->device_handle; - - status = sys_table_arg->boottime->handle_protocol(handle, - &fs_proto, (void **)&io); - if (status != EFI_SUCCESS) { - efi_printk(sys_table_arg, "Failed to handle fs_proto\n"); - return status; - } - - status = io->open_volume(io, &fh); - if (status != EFI_SUCCESS) - efi_printk(sys_table_arg, "Failed to open volume\n"); - - *__fh = fh; - return status; -} - void efi_char16_printk(efi_system_table_t *sys_table_arg, efi_char16_t *str) { @@ -202,9 +177,10 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, * 'dtb=' unless UEFI Secure Boot is disabled. We assume that secure * boot is enabled if we can't determine its state. */ - if (secure_boot != efi_secureboot_mode_disabled && - strstr(cmdline_ptr, "dtb=")) { - pr_efi(sys_table, "Ignoring DTB from command line.\n"); + if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) || + secure_boot != efi_secureboot_mode_disabled) { + if (strstr(cmdline_ptr, "dtb=")) + pr_efi(sys_table, "Ignoring DTB from command line.\n"); } else { status = handle_cmdline_files(sys_table, image, cmdline_ptr, "dtb=", diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index 50a9cab5a834..e94975f4655b 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -413,6 +413,34 @@ static efi_status_t efi_file_close(void *handle) return efi_call_proto(efi_file_handle, close, handle); } +static efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, + efi_loaded_image_t *image, + efi_file_handle_t **__fh) +{ + efi_file_io_interface_t *io; + efi_file_handle_t *fh; + efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID; + efi_status_t status; + void *handle = (void *)(unsigned long)efi_table_attr(efi_loaded_image, + device_handle, + image); + + status = efi_call_early(handle_protocol, handle, + &fs_proto, (void **)&io); + if (status != EFI_SUCCESS) { + efi_printk(sys_table_arg, "Failed to handle fs_proto\n"); + return status; + } + + status = efi_call_proto(efi_file_io_interface, open_volume, io, &fh); + if (status != EFI_SUCCESS) + efi_printk(sys_table_arg, "Failed to open volume\n"); + else + *__fh = fh; + + return status; +} + /* * Parse the ASCII string 'cmdline' for EFI options, denoted by the efi= * option, e.g. efi=nochunk. @@ -563,8 +591,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, /* Only open the volume once. */ if (!i) { - status = efi_open_volume(sys_table_arg, image, - (void **)&fh); + status = efi_open_volume(sys_table_arg, image, &fh); if (status != EFI_SUCCESS) goto free_files; } diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index f59564b72ddc..32799cf039ef 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -36,9 +36,6 @@ extern int __pure is_quiet(void); void efi_char16_printk(efi_system_table_t *, efi_char16_t *); -efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image, - void **__fh); - unsigned long get_dram_base(efi_system_table_t *sys_table_arg); efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c index ae54870b2788..aa66cbf23512 100644 --- a/drivers/firmware/efi/runtime-wrappers.c +++ b/drivers/firmware/efi/runtime-wrappers.c @@ -1,6 +1,15 @@ /* * runtime-wrappers.c - Runtime Services function call wrappers * + * Implementation summary: + * ----------------------- + * 1. When user/kernel thread requests to execute efi_runtime_service(), + * enqueue work to efi_rts_wq. + * 2. Caller thread waits for completion until the work is finished + * because it's dependent on the return status and execution of + * efi_runtime_service(). + * For instance, get_variable() and get_next_variable(). + * * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org> * * Split off from arch/x86/platform/efi/efi.c @@ -22,6 +31,9 @@ #include <linux/mutex.h> #include <linux/semaphore.h> #include <linux/stringify.h> +#include <linux/workqueue.h> +#include <linux/completion.h> + #include <asm/efi.h> /* @@ -33,6 +45,76 @@ #define __efi_call_virt(f, args...) \ __efi_call_virt_pointer(efi.systab->runtime, f, args) +/* efi_runtime_service() function identifiers */ +enum efi_rts_ids { + GET_TIME, + SET_TIME, + GET_WAKEUP_TIME, + SET_WAKEUP_TIME, + GET_VARIABLE, + GET_NEXT_VARIABLE, + SET_VARIABLE, + QUERY_VARIABLE_INFO, + GET_NEXT_HIGH_MONO_COUNT, + UPDATE_CAPSULE, + QUERY_CAPSULE_CAPS, +}; + +/* + * efi_runtime_work: Details of EFI Runtime Service work + * @arg<1-5>: EFI Runtime Service function arguments + * @status: Status of executing EFI Runtime Service + * @efi_rts_id: EFI Runtime Service function identifier + * @efi_rts_comp: Struct used for handling completions + */ +struct efi_runtime_work { + void *arg1; + void *arg2; + void *arg3; + void *arg4; + void *arg5; + efi_status_t status; + struct work_struct work; + enum efi_rts_ids efi_rts_id; + struct completion efi_rts_comp; +}; + +/* + * efi_queue_work: Queue efi_runtime_service() and wait until it's done + * @rts: efi_runtime_service() function identifier + * @rts_arg<1-5>: efi_runtime_service() function arguments + * + * Accesses to efi_runtime_services() are serialized by a binary + * semaphore (efi_runtime_lock) and caller waits until the work is + * finished, hence _only_ one work is queued at a time and the caller + * thread waits for completion. + */ +#define efi_queue_work(_rts, _arg1, _arg2, _arg3, _arg4, _arg5) \ +({ \ + struct efi_runtime_work efi_rts_work; \ + efi_rts_work.status = EFI_ABORTED; \ + \ + init_completion(&efi_rts_work.efi_rts_comp); \ + INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts); \ + efi_rts_work.arg1 = _arg1; \ + efi_rts_work.arg2 = _arg2; \ + efi_rts_work.arg3 = _arg3; \ + efi_rts_work.arg4 = _arg4; \ + efi_rts_work.arg5 = _arg5; \ + efi_rts_work.efi_rts_id = _rts; \ + \ + /* \ + * queue_work() returns 0 if work was already on queue, \ + * _ideally_ this should never happen. \ + */ \ + if (queue_work(efi_rts_wq, &efi_rts_work.work)) \ + wait_for_completion(&efi_rts_work.efi_rts_comp); \ + else \ + pr_err("Failed to queue work to efi_rts_wq.\n"); \ + \ + efi_rts_work.status; \ +}) + void efi_call_virt_check_flags(unsigned long flags, const char *call) { unsigned long cur_flags, mismatch; @@ -90,13 +172,98 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call) */ static DEFINE_SEMAPHORE(efi_runtime_lock); +/* + * Calls the appropriate efi_runtime_service() with the appropriate + * arguments. + * + * Semantics followed by efi_call_rts() to understand efi_runtime_work: + * 1. If argument was a pointer, recast it from void pointer to original + * pointer type. + * 2. If argument was a value, recast it from void pointer to original + * pointer type and dereference it. + */ +static void efi_call_rts(struct work_struct *work) +{ + struct efi_runtime_work *efi_rts_work; + void *arg1, *arg2, *arg3, *arg4, *arg5; + efi_status_t status = EFI_NOT_FOUND; + + efi_rts_work = container_of(work, struct efi_runtime_work, work); + arg1 = efi_rts_work->arg1; + arg2 = efi_rts_work->arg2; + arg3 = efi_rts_work->arg3; + arg4 = efi_rts_work->arg4; + arg5 = efi_rts_work->arg5; + + switch (efi_rts_work->efi_rts_id) { + case GET_TIME: + status = efi_call_virt(get_time, (efi_time_t *)arg1, + (efi_time_cap_t *)arg2); + break; + case SET_TIME: + status = efi_call_virt(set_time, (efi_time_t *)arg1); + break; + case GET_WAKEUP_TIME: + status = efi_call_virt(get_wakeup_time, (efi_bool_t *)arg1, + (efi_bool_t *)arg2, (efi_time_t *)arg3); + break; + case SET_WAKEUP_TIME: + status = efi_call_virt(set_wakeup_time, *(efi_bool_t *)arg1, + (efi_time_t *)arg2); + break; + case GET_VARIABLE: + status = efi_call_virt(get_variable, (efi_char16_t *)arg1, + (efi_guid_t *)arg2, (u32 *)arg3, + (unsigned long *)arg4, (void *)arg5); + break; + case GET_NEXT_VARIABLE: + status = efi_call_virt(get_next_variable, (unsigned long *)arg1, + (efi_char16_t *)arg2, + (efi_guid_t *)arg3); + break; + case SET_VARIABLE: + status = efi_call_virt(set_variable, (efi_char16_t *)arg1, + (efi_guid_t *)arg2, *(u32 *)arg3, + *(unsigned long *)arg4, (void *)arg5); + break; + case QUERY_VARIABLE_INFO: + status = efi_call_virt(query_variable_info, *(u32 *)arg1, + (u64 *)arg2, (u64 *)arg3, (u64 *)arg4); + break; + case GET_NEXT_HIGH_MONO_COUNT: + status = efi_call_virt(get_next_high_mono_count, (u32 *)arg1); + break; + case UPDATE_CAPSULE: + status = efi_call_virt(update_capsule, + (efi_capsule_header_t **)arg1, + *(unsigned long *)arg2, + *(unsigned long *)arg3); + break; + case QUERY_CAPSULE_CAPS: + status = efi_call_virt(query_capsule_caps, + (efi_capsule_header_t **)arg1, + *(unsigned long *)arg2, (u64 *)arg3, + (int *)arg4); + break; + default: + /* + * Ideally, we should never reach here because a caller of this + * function should have put the right efi_runtime_service() + * function identifier into efi_rts_work->efi_rts_id + */ + pr_err("Requested executing invalid EFI Runtime Service.\n"); + } + efi_rts_work->status = status; + complete(&efi_rts_work->efi_rts_comp); +} + static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) { efi_status_t status; if (down_interruptible(&efi_runtime_lock)) return EFI_ABORTED; - status = efi_call_virt(get_time, tm, tc); + status = efi_queue_work(GET_TIME, tm, tc, NULL, NULL, NULL); up(&efi_runtime_lock); return status; } @@ -107,7 +274,7 @@ static efi_status_t virt_efi_set_time(efi_time_t *tm) if (down_interruptible(&efi_runtime_lock)) return EFI_ABORTED; - status = efi_call_virt(set_time, tm); + status = efi_queue_work(SET_TIME, tm, NULL, NULL, NULL, NULL); up(&efi_runtime_lock); return status; } @@ -120,7 +287,8 @@ static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled, if (down_interruptible(&efi_runtime_lock)) return EFI_ABORTED; - status = efi_call_virt(get_wakeup_time, enabled, pending, tm); + status = efi_queue_work(GET_WAKEUP_TIME, enabled, pending, tm, NULL, + NULL); up(&efi_runtime_lock); return status; } @@ -131,7 +299,8 @@ static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) if (down_interruptible(&efi_runtime_lock)) return EFI_ABORTED; - status = efi_call_virt(set_wakeup_time, enabled, tm); + status = efi_queue_work(SET_WAKEUP_TIME, &enabled, tm, NULL, NULL, + NULL); up(&efi_runtime_lock); return status; } @@ -146,8 +315,8 @@ static efi_status_t virt_efi_get_variable(efi_char16_t *name, if (down_interruptible(&efi_runtime_lock)) return EFI_ABORTED; - status = efi_call_virt(get_variable, name, vendor, attr, data_size, - data); + status = efi_queue_work(GET_VARIABLE, name, vendor, attr, data_size, + data); up(&efi_runtime_lock); return status; } @@ -160,7 +329,8 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size, if (down_interruptible(&efi_runtime_lock)) return EFI_ABORTED; - status = efi_call_virt(get_next_variable, name_size, name, vendor); + status = efi_queue_work(GET_NEXT_VARIABLE, name_size, name, vendor, + NULL, NULL); up(&efi_runtime_lock); return status; } @@ -175,8 +345,8 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name, if (down_interruptible(&efi_runtime_lock)) return EFI_ABORTED; - status = efi_call_virt(set_variable, name, vendor, attr, data_size, - data); + status = efi_queue_work(SET_VARIABLE, name, vendor, &attr, &data_size, + data); up(&efi_runtime_lock); return status; } @@ -210,8 +380,8 @@ static efi_status_t virt_efi_query_variable_info(u32 attr, if (down_interruptible(&efi_runtime_lock)) return EFI_ABORTED; - status = efi_call_virt(query_variable_info, attr, storage_space, - remaining_space, max_variable_size); + status = efi_queue_work(QUERY_VARIABLE_INFO, &attr, storage_space, + remaining_space, max_variable_size, NULL); up(&efi_runtime_lock); return status; } @@ -242,7 +412,8 @@ static efi_status_t virt_efi_get_next_high_mono_count(u32 *count) if (down_interruptible(&efi_runtime_lock)) return EFI_ABORTED; - status = efi_call_virt(get_next_high_mono_count, count); + status = efi_queue_work(GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL, + NULL, NULL); up(&efi_runtime_lock); return status; } @@ -272,7 +443,8 @@ static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules, if (down_interruptible(&efi_runtime_lock)) return EFI_ABORTED; - status = efi_call_virt(update_capsule, capsules, count, sg_list); + status = efi_queue_work(UPDATE_CAPSULE, capsules, &count, &sg_list, + NULL, NULL); up(&efi_runtime_lock); return status; } @@ -289,8 +461,8 @@ static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules, if (down_interruptible(&efi_runtime_lock)) return EFI_ABORTED; - status = efi_call_virt(query_capsule_caps, capsules, count, max_size, - reset_type); + status = efi_queue_work(QUERY_CAPSULE_CAPS, capsules, &count, + max_size, reset_type, NULL); up(&efi_runtime_lock); return status; } diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index e2232cbcec8b..addd9fecc198 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -25,6 +25,7 @@ struct acpi_gpio_event { struct list_head node; + struct list_head initial_sync_list; acpi_handle handle; unsigned int pin; unsigned int irq; @@ -50,6 +51,9 @@ struct acpi_gpio_chip { struct list_head events; }; +static LIST_HEAD(acpi_gpio_initial_sync_list); +static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock); + static int acpi_gpiochip_find(struct gpio_chip *gc, void *data) { if (!gc->parent) @@ -85,6 +89,21 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin) return gpiochip_get_desc(chip, pin); } +static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event) +{ + mutex_lock(&acpi_gpio_initial_sync_list_lock); + list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list); + mutex_unlock(&acpi_gpio_initial_sync_list_lock); +} + +static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event) +{ + mutex_lock(&acpi_gpio_initial_sync_list_lock); + if (!list_empty(&event->initial_sync_list)) + list_del_init(&event->initial_sync_list); + mutex_unlock(&acpi_gpio_initial_sync_list_lock); +} + static irqreturn_t acpi_gpio_irq_handler(int irq, void *data) { struct acpi_gpio_event *event = data; @@ -136,7 +155,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, irq_handler_t handler = NULL; struct gpio_desc *desc; unsigned long irqflags; - int ret, pin, irq; + int ret, pin, irq, value; if (!acpi_gpio_get_irq_resource(ares, &agpio)) return AE_OK; @@ -167,6 +186,8 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, gpiod_direction_input(desc); + value = gpiod_get_value(desc); + ret = gpiochip_lock_as_irq(chip, pin); if (ret) { dev_err(chip->parent, "Failed to lock GPIO as interrupt\n"); @@ -208,6 +229,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, event->irq = irq; event->pin = pin; event->desc = desc; + INIT_LIST_HEAD(&event->initial_sync_list); ret = request_threaded_irq(event->irq, NULL, handler, irqflags, "ACPI:Event", event); @@ -222,6 +244,18 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, enable_irq_wake(irq); list_add_tail(&event->node, &acpi_gpio->events); + + /* + * Make sure we trigger the initial state of the IRQ when using RISING + * or FALLING. Note we run the handlers on late_init, the AML code + * may refer to OperationRegions from other (builtin) drivers which + * may be probed after us. + */ + if (handler == acpi_gpio_irq_handler && + (((irqflags & IRQF_TRIGGER_RISING) && value == 1) || + ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))) + acpi_gpio_add_to_initial_sync_list(event); + return AE_OK; fail_free_event: @@ -294,6 +328,8 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { struct gpio_desc *desc; + acpi_gpio_del_from_initial_sync_list(event); + if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) disable_irq_wake(event->irq); @@ -1158,3 +1194,21 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id) return con_id == NULL; } + +/* Sync the initial state of handlers after all builtin drivers have probed */ +static int acpi_gpio_initial_sync(void) +{ + struct acpi_gpio_event *event, *ep; + + mutex_lock(&acpi_gpio_initial_sync_list_lock); + list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list, + initial_sync_list) { + acpi_evaluate_object(event->handle, NULL, NULL, NULL); + list_del_init(&event->initial_sync_list); + } + mutex_unlock(&acpi_gpio_initial_sync_list_lock); + + return 0; +} +/* We must use _sync so that this runs after the first deferred_probe run */ +late_initcall_sync(acpi_gpio_initial_sync); diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 73021b388e12..dd3ff2f2cdce 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -429,6 +429,18 @@ static void adv7511_hpd_work(struct work_struct *work) else status = connector_status_disconnected; + /* + * The bridge resets its registers on unplug. So when we get a plug + * event and we're already supposed to be powered, cycle the bridge to + * restore its state. + */ + if (status == connector_status_connected && + adv7511->connector.status == connector_status_disconnected && + adv7511->powered) { + regcache_mark_dirty(adv7511->regmap); + adv7511_power_on(adv7511); + } + if (adv7511->connector.status != status) { adv7511->connector.status = status; if (status == connector_status_disconnected) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 130da5195f3b..81e32199d3ef 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1510,8 +1510,9 @@ int drm_atomic_helper_async_check(struct drm_device *dev, { struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; - struct drm_plane *plane; - struct drm_plane_state *old_plane_state, *new_plane_state; + struct drm_plane *plane = NULL; + struct drm_plane_state *old_plane_state = NULL; + struct drm_plane_state *new_plane_state = NULL; const struct drm_plane_helper_funcs *funcs; int i, n_planes = 0; @@ -1527,7 +1528,8 @@ int drm_atomic_helper_async_check(struct drm_device *dev, if (n_planes != 1) return -EINVAL; - if (!new_plane_state->crtc) + if (!new_plane_state->crtc || + old_plane_state->crtc != new_plane_state->crtc) return -EINVAL; funcs = plane->helper_private; diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c index 3c4000facb36..f973d287696a 100644 --- a/drivers/gpu/drm/drm_context.c +++ b/drivers/gpu/drm/drm_context.c @@ -372,7 +372,7 @@ int drm_legacy_addctx(struct drm_device *dev, void *data, ctx->handle = drm_legacy_ctxbitmap_next(dev); } DRM_DEBUG("%d\n", ctx->handle); - if (ctx->handle == -1) { + if (ctx->handle < 0) { DRM_DEBUG("Not enough free contexts.\n"); /* Should this return -EBUSY instead? */ return -ENOMEM; diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c index d638c0fb3418..b54fb78a283c 100644 --- a/drivers/gpu/drm/drm_lease.c +++ b/drivers/gpu/drm/drm_lease.c @@ -553,7 +553,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, /* Clone the lessor file to create a new file for us */ DRM_DEBUG_LEASE("Allocating lease file\n"); - lessee_file = filp_clone_open(lessor_file); + lessee_file = file_clone_open(lessor_file); if (IS_ERR(lessee_file)) { ret = PTR_ERR(lessee_file); goto out_lessee; diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 1d34619eb3fe..a951ec75d01f 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -320,6 +320,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) vc4_state->x_scaling[0] = VC4_SCALING_TPZ; if (vc4_state->y_scaling[0] == VC4_SCALING_NONE) vc4_state->y_scaling[0] = VC4_SCALING_TPZ; + } else { + vc4_state->x_scaling[1] = VC4_SCALING_NONE; + vc4_state->y_scaling[1] = VC4_SCALING_NONE; } vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE && diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index f10840ad465c..ccf42663a908 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -937,6 +937,18 @@ config SENSORS_MCP3021 This driver can also be built as a module. If so, the module will be called mcp3021. +config SENSORS_MLXREG_FAN + tristate "Mellanox Mellanox FAN driver" + depends on MELLANOX_PLATFORM + imply THERMAL + select REGMAP + help + This option enables support for the FAN control on the Mellanox + Ethernet and InfiniBand switches. The driver can be activated by the + platform device add call. Say Y to enable these. To compile this + driver as a module, choose 'M' here: the module will be called + mlxreg-fan. + config SENSORS_TC654 tristate "Microchip TC654/TC655 and compatibles" depends on I2C @@ -1256,6 +1268,16 @@ config SENSORS_NCT7904 This driver can also be built as a module. If so, the module will be called nct7904. +config SENSORS_NPCM7XX + tristate "Nuvoton NPCM750 and compatible PWM and Fan controllers" + imply THERMAL + help + This driver provides support for Nuvoton NPCM750/730/715/705 PWM + and Fan controllers. + + This driver can also be built as a module. If so, the module + will be called npcm750-pwm-fan. + config SENSORS_NSA320 tristate "ZyXEL NSA320 and compatible fan speed and temperature sensors" depends on GPIOLIB && OF diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index e7d52a36e6c4..842c92f83ce6 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -129,11 +129,13 @@ obj-$(CONFIG_SENSORS_MAX31790) += max31790.o obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o obj-$(CONFIG_SENSORS_TC654) += tc654.o +obj-$(CONFIG_SENSORS_MLXREG_FAN) += mlxreg-fan.o obj-$(CONFIG_SENSORS_MENF21BMC_HWMON) += menf21bmc_hwmon.o obj-$(CONFIG_SENSORS_NCT6683) += nct6683.o obj-$(CONFIG_SENSORS_NCT6775) += nct6775.o obj-$(CONFIG_SENSORS_NCT7802) += nct7802.o obj-$(CONFIG_SENSORS_NCT7904) += nct7904.o +obj-$(CONFIG_SENSORS_NPCM7XX) += npcm750-pwm-fan.o obj-$(CONFIG_SENSORS_NSA320) += nsa320-hwmon.o obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o obj-$(CONFIG_SENSORS_PC87360) += pc87360.o diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 9ef84998c7f3..90837f7c7d0f 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -194,8 +194,7 @@ struct adt7475_data { struct mutex lock; unsigned long measure_updated; - unsigned long limits_updated; - char valid; + bool valid; u8 config4; u8 config5; @@ -326,6 +325,9 @@ static ssize_t show_voltage(struct device *dev, struct device_attribute *attr, struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); unsigned short val; + if (IS_ERR(data)) + return PTR_ERR(data); + switch (sattr->nr) { case ALARM: return sprintf(buf, "%d\n", @@ -381,6 +383,9 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr, struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); int out; + if (IS_ERR(data)) + return PTR_ERR(data); + switch (sattr->nr) { case HYSTERSIS: mutex_lock(&data->lock); @@ -625,6 +630,9 @@ static ssize_t show_point2(struct device *dev, struct device_attribute *attr, struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); int out, val; + if (IS_ERR(data)) + return PTR_ERR(data); + mutex_lock(&data->lock); out = (data->range[sattr->index] >> 4) & 0x0F; val = reg2temp(data, data->temp[AUTOMIN][sattr->index]); @@ -683,6 +691,9 @@ static ssize_t show_tach(struct device *dev, struct device_attribute *attr, struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); int out; + if (IS_ERR(data)) + return PTR_ERR(data); + if (sattr->nr == ALARM) out = (data->alarms >> (sattr->index + 10)) & 1; else @@ -720,6 +731,9 @@ static ssize_t show_pwm(struct device *dev, struct device_attribute *attr, struct adt7475_data *data = adt7475_update_device(dev); struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); + if (IS_ERR(data)) + return PTR_ERR(data); + return sprintf(buf, "%d\n", data->pwm[sattr->nr][sattr->index]); } @@ -729,6 +743,9 @@ static ssize_t show_pwmchan(struct device *dev, struct device_attribute *attr, struct adt7475_data *data = adt7475_update_device(dev); struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); + if (IS_ERR(data)) + return PTR_ERR(data); + return sprintf(buf, "%d\n", data->pwmchan[sattr->index]); } @@ -738,6 +755,9 @@ static ssize_t show_pwmctrl(struct device *dev, struct device_attribute *attr, struct adt7475_data *data = adt7475_update_device(dev); struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); + if (IS_ERR(data)) + return PTR_ERR(data); + return sprintf(buf, "%d\n", data->pwmctl[sattr->index]); } @@ -945,6 +965,9 @@ static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr, int i = clamp_val(data->range[sattr->index] & 0xf, 0, ARRAY_SIZE(pwmfreq_table) - 1); + if (IS_ERR(data)) + return PTR_ERR(data); + return sprintf(buf, "%d\n", pwmfreq_table[i]); } @@ -1035,6 +1058,10 @@ static ssize_t cpu0_vid_show(struct device *dev, struct device_attribute *devattr, char *buf) { struct adt7475_data *data = adt7475_update_device(dev); + + if (IS_ERR(data)) + return PTR_ERR(data); + return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm)); } @@ -1385,6 +1412,121 @@ static void adt7475_remove_files(struct i2c_client *client, sysfs_remove_group(&client->dev.kobj, &vid_attr_group); } +static int adt7475_update_limits(struct i2c_client *client) +{ + struct adt7475_data *data = i2c_get_clientdata(client); + int i; + int ret; + + ret = adt7475_read(REG_CONFIG4); + if (ret < 0) + return ret; + data->config4 = ret; + + ret = adt7475_read(REG_CONFIG5); + if (ret < 0) + return ret; + data->config5 = ret; + + for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) { + if (!(data->has_voltage & (1 << i))) + continue; + /* Adjust values so they match the input precision */ + ret = adt7475_read(VOLTAGE_MIN_REG(i)); + if (ret < 0) + return ret; + data->voltage[MIN][i] = ret << 2; + + ret = adt7475_read(VOLTAGE_MAX_REG(i)); + if (ret < 0) + return ret; + data->voltage[MAX][i] = ret << 2; + } + + if (data->has_voltage & (1 << 5)) { + ret = adt7475_read(REG_VTT_MIN); + if (ret < 0) + return ret; + data->voltage[MIN][5] = ret << 2; + + ret = adt7475_read(REG_VTT_MAX); + if (ret < 0) + return ret; + data->voltage[MAX][5] = ret << 2; + } + + for (i = 0; i < ADT7475_TEMP_COUNT; i++) { + /* Adjust values so they match the input precision */ + ret = adt7475_read(TEMP_MIN_REG(i)); + if (ret < 0) + return ret; + data->temp[MIN][i] = ret << 2; + + ret = adt7475_read(TEMP_MAX_REG(i)); + if (ret < 0) + return ret; + data->temp[MAX][i] = ret << 2; + + ret = adt7475_read(TEMP_TMIN_REG(i)); + if (ret < 0) + return ret; + data->temp[AUTOMIN][i] = ret << 2; + + ret = adt7475_read(TEMP_THERM_REG(i)); + if (ret < 0) + return ret; + data->temp[THERM][i] = ret << 2; + + ret = adt7475_read(TEMP_OFFSET_REG(i)); + if (ret < 0) + return ret; + data->temp[OFFSET][i] = ret; + } + adt7475_read_hystersis(client); + + for (i = 0; i < ADT7475_TACH_COUNT; i++) { + if (i == 3 && !data->has_fan4) + continue; + ret = adt7475_read_word(client, TACH_MIN_REG(i)); + if (ret < 0) + return ret; + data->tach[MIN][i] = ret; + } + + for (i = 0; i < ADT7475_PWM_COUNT; i++) { + if (i == 1 && !data->has_pwm2) + continue; + ret = adt7475_read(PWM_MAX_REG(i)); + if (ret < 0) + return ret; + data->pwm[MAX][i] = ret; + + ret = adt7475_read(PWM_MIN_REG(i)); + if (ret < 0) + return ret; + data->pwm[MIN][i] = ret; + /* Set the channel and control information */ + adt7475_read_pwm(client, i); + } + + ret = adt7475_read(TEMP_TRANGE_REG(0)); + if (ret < 0) + return ret; + data->range[0] = ret; + + ret = adt7475_read(TEMP_TRANGE_REG(1)); + if (ret < 0) + return ret; + data->range[1] = ret; + + ret = adt7475_read(TEMP_TRANGE_REG(2)); + if (ret < 0) + return ret; + data->range[2] = ret; + + return 0; +} + static int adt7475_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -1562,6 +1704,11 @@ static int adt7475_probe(struct i2c_client *client, (data->bypass_attn & (1 << 3)) ? " in3" : "", (data->bypass_attn & (1 << 4)) ? " in4" : ""); + /* Limits and settings, should never change update more than once */ + ret = adt7475_update_limits(client); + if (ret) + goto eremove; + return 0; eremove: @@ -1658,121 +1805,122 @@ static void adt7475_read_pwm(struct i2c_client *client, int index) } } -static struct adt7475_data *adt7475_update_device(struct device *dev) +static int adt7475_update_measure(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct adt7475_data *data = i2c_get_clientdata(client); u16 ext; int i; + int ret; - mutex_lock(&data->lock); + ret = adt7475_read(REG_STATUS2); + if (ret < 0) + return ret; + data->alarms = ret << 8; - /* Measurement values update every 2 seconds */ - if (time_after(jiffies, data->measure_updated + HZ * 2) || - !data->valid) { - data->alarms = adt7475_read(REG_STATUS2) << 8; - data->alarms |= adt7475_read(REG_STATUS1); - - ext = (adt7475_read(REG_EXTEND2) << 8) | - adt7475_read(REG_EXTEND1); - for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) { - if (!(data->has_voltage & (1 << i))) - continue; - data->voltage[INPUT][i] = - (adt7475_read(VOLTAGE_REG(i)) << 2) | - ((ext >> (i * 2)) & 3); - } + ret = adt7475_read(REG_STATUS1); + if (ret < 0) + return ret; + data->alarms |= ret; - for (i = 0; i < ADT7475_TEMP_COUNT; i++) - data->temp[INPUT][i] = - (adt7475_read(TEMP_REG(i)) << 2) | - ((ext >> ((i + 5) * 2)) & 3); + ret = adt7475_read(REG_EXTEND2); + if (ret < 0) + return ret; - if (data->has_voltage & (1 << 5)) { - data->alarms |= adt7475_read(REG_STATUS4) << 24; - ext = adt7475_read(REG_EXTEND3); - data->voltage[INPUT][5] = adt7475_read(REG_VTT) << 2 | - ((ext >> 4) & 3); - } + ext = (ret << 8); - for (i = 0; i < ADT7475_TACH_COUNT; i++) { - if (i == 3 && !data->has_fan4) - continue; - data->tach[INPUT][i] = - adt7475_read_word(client, TACH_REG(i)); - } + ret = adt7475_read(REG_EXTEND1); + if (ret < 0) + return ret; - /* Updated by hw when in auto mode */ - for (i = 0; i < ADT7475_PWM_COUNT; i++) { - if (i == 1 && !data->has_pwm2) - continue; - data->pwm[INPUT][i] = adt7475_read(PWM_REG(i)); - } + ext |= ret; + + for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) { + if (!(data->has_voltage & (1 << i))) + continue; + ret = adt7475_read(VOLTAGE_REG(i)); + if (ret < 0) + return ret; + data->voltage[INPUT][i] = + (ret << 2) | + ((ext >> (i * 2)) & 3); + } - if (data->has_vid) - data->vid = adt7475_read(REG_VID) & 0x3f; + for (i = 0; i < ADT7475_TEMP_COUNT; i++) { + ret = adt7475_read(TEMP_REG(i)); + if (ret < 0) + return ret; + data->temp[INPUT][i] = + (ret << 2) | + ((ext >> ((i + 5) * 2)) & 3); + } - data->measure_updated = jiffies; + if (data->has_voltage & (1 << 5)) { + ret = adt7475_read(REG_STATUS4); + if (ret < 0) + return ret; + data->alarms |= ret << 24; + + ret = adt7475_read(REG_EXTEND3); + if (ret < 0) + return ret; + ext = ret; + + ret = adt7475_read(REG_VTT); + if (ret < 0) + return ret; + data->voltage[INPUT][5] = ret << 2 | + ((ext >> 4) & 3); } - /* Limits and settings, should never change update every 60 seconds */ - if (time_after(jiffies, data->limits_updated + HZ * 60) || - !data->valid) { - data->config4 = adt7475_read(REG_CONFIG4); - data->config5 = adt7475_read(REG_CONFIG5); - - for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) { - if (!(data->has_voltage & (1 << i))) - continue; - /* Adjust values so they match the input precision */ - data->voltage[MIN][i] = - adt7475_read(VOLTAGE_MIN_REG(i)) << 2; - data->voltage[MAX][i] = - adt7475_read(VOLTAGE_MAX_REG(i)) << 2; - } + for (i = 0; i < ADT7475_TACH_COUNT; i++) { + if (i == 3 && !data->has_fan4) + continue; + ret = adt7475_read_word(client, TACH_REG(i)); + if (ret < 0) + return ret; + data->tach[INPUT][i] = ret; + } - if (data->has_voltage & (1 << 5)) { - data->voltage[MIN][5] = adt7475_read(REG_VTT_MIN) << 2; - data->voltage[MAX][5] = adt7475_read(REG_VTT_MAX) << 2; - } + /* Updated by hw when in auto mode */ + for (i = 0; i < ADT7475_PWM_COUNT; i++) { + if (i == 1 && !data->has_pwm2) + continue; + ret = adt7475_read(PWM_REG(i)); + if (ret < 0) + return ret; + data->pwm[INPUT][i] = ret; + } - for (i = 0; i < ADT7475_TEMP_COUNT; i++) { - /* Adjust values so they match the input precision */ - data->temp[MIN][i] = - adt7475_read(TEMP_MIN_REG(i)) << 2; - data->temp[MAX][i] = - adt7475_read(TEMP_MAX_REG(i)) << 2; - data->temp[AUTOMIN][i] = - adt7475_read(TEMP_TMIN_REG(i)) << 2; - data->temp[THERM][i] = - adt7475_read(TEMP_THERM_REG(i)) << 2; - data->temp[OFFSET][i] = - adt7475_read(TEMP_OFFSET_REG(i)); - } - adt7475_read_hystersis(client); + if (data->has_vid) { + ret = adt7475_read(REG_VID); + if (ret < 0) + return ret; + data->vid = ret & 0x3f; + } - for (i = 0; i < ADT7475_TACH_COUNT; i++) { - if (i == 3 && !data->has_fan4) - continue; - data->tach[MIN][i] = - adt7475_read_word(client, TACH_MIN_REG(i)); - } + return 0; +} - for (i = 0; i < ADT7475_PWM_COUNT; i++) { - if (i == 1 && !data->has_pwm2) - continue; - data->pwm[MAX][i] = adt7475_read(PWM_MAX_REG(i)); - data->pwm[MIN][i] = adt7475_read(PWM_MIN_REG(i)); - /* Set the channel and control information */ - adt7475_read_pwm(client, i); - } +static struct adt7475_data *adt7475_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct adt7475_data *data = i2c_get_clientdata(client); + int ret; - data->range[0] = adt7475_read(TEMP_TRANGE_REG(0)); - data->range[1] = adt7475_read(TEMP_TRANGE_REG(1)); - data->range[2] = adt7475_read(TEMP_TRANGE_REG(2)); + mutex_lock(&data->lock); - data->limits_updated = jiffies; - data->valid = 1; + /* Measurement values update every 2 seconds */ + if (time_after(jiffies, data->measure_updated + HZ * 2) || + !data->valid) { + ret = adt7475_update_measure(dev); + if (ret) { + data->valid = false; + mutex_unlock(&data->lock); + return ERR_PTR(ret); + } + data->measure_updated = jiffies; + data->valid = true; } mutex_unlock(&data->lock); diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c index 1ea7ca510f84..aaebeb726d6a 100644 --- a/drivers/hwmon/emc1403.c +++ b/drivers/hwmon/emc1403.c @@ -443,8 +443,10 @@ static int emc1403_probe(struct i2c_client *client, switch (id->driver_data) { case emc1404: data->groups[2] = &emc1404_group; + /* fall through */ case emc1403: data->groups[1] = &emc1403_group; + /* fall through */ case emc1402: data->groups[0] = &emc1402_group; } diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c index 69031a0f7ed2..2f3f875c06ac 100644 --- a/drivers/hwmon/iio_hwmon.c +++ b/drivers/hwmon/iio_hwmon.c @@ -22,7 +22,6 @@ * struct iio_hwmon_state - device instance state * @channels: filled with array of channels from iio * @num_channels: number of channels in channels (saves counting twice) - * @hwmon_dev: associated hwmon device * @attr_group: the group of attributes * @groups: null terminated array of attribute groups * @attrs: null terminated array of attribute pointers. @@ -30,7 +29,6 @@ struct iio_hwmon_state { struct iio_channel *channels; int num_channels; - struct device *hwmon_dev; struct attribute_group attr_group; const struct attribute_group *groups[2]; struct attribute **attrs; @@ -68,12 +66,13 @@ static int iio_hwmon_probe(struct platform_device *pdev) enum iio_chan_type type; struct iio_channel *channels; const char *name = "iio_hwmon"; + struct device *hwmon_dev; char *sname; if (dev->of_node && dev->of_node->name) name = dev->of_node->name; - channels = iio_channel_get_all(dev); + channels = devm_iio_channel_get_all(dev); if (IS_ERR(channels)) { if (PTR_ERR(channels) == -ENODEV) return -EPROBE_DEFER; @@ -81,10 +80,8 @@ static int iio_hwmon_probe(struct platform_device *pdev) } st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL); - if (st == NULL) { - ret = -ENOMEM; - goto error_release_channels; - } + if (st == NULL) + return -ENOMEM; st->channels = channels; @@ -95,22 +92,18 @@ static int iio_hwmon_probe(struct platform_device *pdev) st->attrs = devm_kcalloc(dev, st->num_channels + 1, sizeof(*st->attrs), GFP_KERNEL); - if (st->attrs == NULL) { - ret = -ENOMEM; - goto error_release_channels; - } + if (st->attrs == NULL) + return -ENOMEM; for (i = 0; i < st->num_channels; i++) { a = devm_kzalloc(dev, sizeof(*a), GFP_KERNEL); - if (a == NULL) { - ret = -ENOMEM; - goto error_release_channels; - } + if (a == NULL) + return -ENOMEM; sysfs_attr_init(&a->dev_attr.attr); ret = iio_get_channel_type(&st->channels[i], &type); if (ret < 0) - goto error_release_channels; + return ret; switch (type) { case IIO_VOLTAGE: @@ -134,13 +127,11 @@ static int iio_hwmon_probe(struct platform_device *pdev) humidity_i++); break; default: - ret = -EINVAL; - goto error_release_channels; - } - if (a->dev_attr.attr.name == NULL) { - ret = -ENOMEM; - goto error_release_channels; + return -EINVAL; } + if (a->dev_attr.attr.name == NULL) + return -ENOMEM; + a->dev_attr.show = iio_hwmon_read_val; a->dev_attr.attr.mode = S_IRUGO; a->index = i; @@ -151,34 +142,13 @@ static int iio_hwmon_probe(struct platform_device *pdev) st->groups[0] = &st->attr_group; sname = devm_kstrdup(dev, name, GFP_KERNEL); - if (!sname) { - ret = -ENOMEM; - goto error_release_channels; - } + if (!sname) + return -ENOMEM; strreplace(sname, '-', '_'); - st->hwmon_dev = hwmon_device_register_with_groups(dev, sname, st, - st->groups); - if (IS_ERR(st->hwmon_dev)) { - ret = PTR_ERR(st->hwmon_dev); - goto error_release_channels; - } - platform_set_drvdata(pdev, st); - return 0; - -error_release_channels: - iio_channel_release_all(channels); - return ret; -} - -static int iio_hwmon_remove(struct platform_device *pdev) -{ - struct iio_hwmon_state *st = platform_get_drvdata(pdev); - - hwmon_device_unregister(st->hwmon_dev); - iio_channel_release_all(st->channels); - - return 0; + hwmon_dev = devm_hwmon_device_register_with_groups(dev, sname, st, + st->groups); + return PTR_ERR_OR_ZERO(hwmon_dev); } static const struct of_device_id iio_hwmon_of_match[] = { @@ -193,7 +163,6 @@ static struct platform_driver __refdata iio_hwmon_driver = { .of_match_table = iio_hwmon_of_match, }, .probe = iio_hwmon_probe, - .remove = iio_hwmon_remove, }; module_platform_driver(iio_hwmon_driver); diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 17c6460ae351..bb15d7816a29 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -99,12 +99,8 @@ static const struct tctl_offset tctl_offset_table[] = { { 0x17, "AMD Ryzen 7 1700X", 20000 }, { 0x17, "AMD Ryzen 7 1800X", 20000 }, { 0x17, "AMD Ryzen 7 2700X", 10000 }, - { 0x17, "AMD Ryzen Threadripper 1950X", 27000 }, - { 0x17, "AMD Ryzen Threadripper 1920X", 27000 }, - { 0x17, "AMD Ryzen Threadripper 1900X", 27000 }, - { 0x17, "AMD Ryzen Threadripper 1950", 10000 }, - { 0x17, "AMD Ryzen Threadripper 1920", 10000 }, - { 0x17, "AMD Ryzen Threadripper 1910", 10000 }, + { 0x17, "AMD Ryzen Threadripper 19", 27000 }, /* 19{00,20,50}X */ + { 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */ }; static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval) diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c new file mode 100644 index 000000000000..de46577c7d5a --- /dev/null +++ b/drivers/hwmon/mlxreg-fan.c @@ -0,0 +1,489 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) +// +// Copyright (c) 2018 Mellanox Technologies. All rights reserved. +// Copyright (c) 2018 Vadim Pasternak <vadimp@mellanox.com> + +#include <linux/bitops.h> +#include <linux/device.h> +#include <linux/hwmon.h> +#include <linux/module.h> +#include <linux/platform_data/mlxreg.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/thermal.h> + +#define MLXREG_FAN_MAX_TACHO 12 +#define MLXREG_FAN_MAX_STATE 10 +#define MLXREG_FAN_MIN_DUTY 51 /* 20% */ +#define MLXREG_FAN_MAX_DUTY 255 /* 100% */ +/* + * Minimum and maximum FAN allowed speed in percent: from 20% to 100%. Values + * MLXREG_FAN_MAX_STATE + x, where x is between 2 and 10 are used for + * setting FAN speed dynamic minimum. For example, if value is set to 14 (40%) + * cooling levels vector will be set to 4, 4, 4, 4, 4, 5, 6, 7, 8, 9, 10 to + * introduce PWM speed in percent: 40, 40, 40, 40, 40, 50, 60. 70, 80, 90, 100. + */ +#define MLXREG_FAN_SPEED_MIN (MLXREG_FAN_MAX_STATE + 2) +#define MLXREG_FAN_SPEED_MAX (MLXREG_FAN_MAX_STATE * 2) +#define MLXREG_FAN_SPEED_MIN_LEVEL 2 /* 20 percent */ +#define MLXREG_FAN_TACHO_SAMPLES_PER_PULSE_DEF 44 +#define MLXREG_FAN_TACHO_DIVIDER_DEF 1132 +/* + * FAN datasheet defines the formula for RPM calculations as RPM = 15/t-high. + * The logic in a programmable device measures the time t-high by sampling the + * tachometer every t-sample (with the default value 11.32 uS) and increment + * a counter (N) as long as the pulse has not change: + * RPM = 15 / (t-sample * (K + Regval)), where: + * Regval: is the value read from the programmable device register; + * - 0xff - represents tachometer fault; + * - 0xfe - represents tachometer minimum value , which is 4444 RPM; + * - 0x00 - represents tachometer maximum value , which is 300000 RPM; + * K: is 44 and it represents the minimum allowed samples per pulse; + * N: is equal K + Regval; + * In order to calculate RPM from the register value the following formula is + * used: RPM = 15 / ((Regval + K) * 11.32) * 10^(-6)), which in the + * default case is modified to: + * RPM = 15000000 * 100 / ((Regval + 44) * 1132); + * - for Regval 0x00, RPM will be 15000000 * 100 / (44 * 1132) = 30115; + * - for Regval 0xfe, RPM will be 15000000 * 100 / ((254 + 44) * 1132) = 4446; + * In common case the formula is modified to: + * RPM = 15000000 * 100 / ((Regval + samples) * divider). + */ +#define MLXREG_FAN_GET_RPM(rval, d, s) (DIV_ROUND_CLOSEST(15000000 * 100, \ + ((rval) + (s)) * (d))) +#define MLXREG_FAN_GET_FAULT(val, mask) (!!((val) ^ (mask))) +#define MLXREG_FAN_PWM_DUTY2STATE(duty) (DIV_ROUND_CLOSEST((duty) * \ + MLXREG_FAN_MAX_STATE, \ + MLXREG_FAN_MAX_DUTY)) +#define MLXREG_FAN_PWM_STATE2DUTY(stat) (DIV_ROUND_CLOSEST((stat) * \ + MLXREG_FAN_MAX_DUTY, \ + MLXREG_FAN_MAX_STATE)) + +/* + * struct mlxreg_fan_tacho - tachometer data (internal use): + * + * @connected: indicates if tachometer is connected; + * @reg: register offset; + * @mask: fault mask; + */ +struct mlxreg_fan_tacho { + bool connected; + u32 reg; + u32 mask; +}; + +/* + * struct mlxreg_fan_pwm - PWM data (internal use): + * + * @connected: indicates if PWM is connected; + * @reg: register offset; + */ +struct mlxreg_fan_pwm { + bool connected; + u32 reg; +}; + +/* + * struct mlxreg_fan - private data (internal use): + * + * @dev: basic device; + * @regmap: register map of parent device; + * @tacho: tachometer data; + * @pwm: PWM data; + * @samples: minimum allowed samples per pulse; + * @divider: divider value for tachometer RPM calculation; + * @cooling: cooling device levels; + * @cdev: cooling device; + */ +struct mlxreg_fan { + struct device *dev; + void *regmap; + struct mlxreg_core_platform_data *pdata; + struct mlxreg_fan_tacho tacho[MLXREG_FAN_MAX_TACHO]; + struct mlxreg_fan_pwm pwm; + int samples; + int divider; + u8 cooling_levels[MLXREG_FAN_MAX_STATE + 1]; + struct thermal_cooling_device *cdev; +}; + +static int +mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, + int channel, long *val) +{ + struct mlxreg_fan *fan = dev_get_drvdata(dev); + struct mlxreg_fan_tacho *tacho; + u32 regval; + int err; + + switch (type) { + case hwmon_fan: + tacho = &fan->tacho[channel]; + switch (attr) { + case hwmon_fan_input: + err = regmap_read(fan->regmap, tacho->reg, ®val); + if (err) + return err; + + *val = MLXREG_FAN_GET_RPM(regval, fan->divider, + fan->samples); + break; + + case hwmon_fan_fault: + err = regmap_read(fan->regmap, tacho->reg, ®val); + if (err) + return err; + + *val = MLXREG_FAN_GET_FAULT(regval, tacho->mask); + break; + + default: + return -EOPNOTSUPP; + } + break; + + case hwmon_pwm: + switch (attr) { + case hwmon_pwm_input: + err = regmap_read(fan->regmap, fan->pwm.reg, ®val); + if (err) + return err; + + *val = regval; + break; + + default: + return -EOPNOTSUPP; + } + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int +mlxreg_fan_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, + int channel, long val) +{ + struct mlxreg_fan *fan = dev_get_drvdata(dev); + + switch (type) { + case hwmon_pwm: + switch (attr) { + case hwmon_pwm_input: + if (val < MLXREG_FAN_MIN_DUTY || + val > MLXREG_FAN_MAX_DUTY) + return -EINVAL; + return regmap_write(fan->regmap, fan->pwm.reg, val); + default: + return -EOPNOTSUPP; + } + break; + + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static umode_t +mlxreg_fan_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr, + int channel) +{ + switch (type) { + case hwmon_fan: + if (!(((struct mlxreg_fan *)data)->tacho[channel].connected)) + return 0; + + switch (attr) { + case hwmon_fan_input: + case hwmon_fan_fault: + return 0444; + default: + break; + } + break; + + case hwmon_pwm: + if (!(((struct mlxreg_fan *)data)->pwm.connected)) + return 0; + + switch (attr) { + case hwmon_pwm_input: + return 0644; + default: + break; + } + break; + + default: + break; + } + + return 0; +} + +static const u32 mlxreg_fan_hwmon_fan_config[] = { + HWMON_F_INPUT | HWMON_F_FAULT, + HWMON_F_INPUT | HWMON_F_FAULT, + HWMON_F_INPUT | HWMON_F_FAULT, + HWMON_F_INPUT | HWMON_F_FAULT, + HWMON_F_INPUT | HWMON_F_FAULT, + HWMON_F_INPUT | HWMON_F_FAULT, + HWMON_F_INPUT | HWMON_F_FAULT, + HWMON_F_INPUT | HWMON_F_FAULT, + HWMON_F_INPUT | HWMON_F_FAULT, + HWMON_F_INPUT | HWMON_F_FAULT, + HWMON_F_INPUT | HWMON_F_FAULT, + HWMON_F_INPUT | HWMON_F_FAULT, + 0 +}; + +static const struct hwmon_channel_info mlxreg_fan_hwmon_fan = { + .type = hwmon_fan, + .config = mlxreg_fan_hwmon_fan_config, +}; + +static const u32 mlxreg_fan_hwmon_pwm_config[] = { + HWMON_PWM_INPUT, + 0 +}; + +static const struct hwmon_channel_info mlxreg_fan_hwmon_pwm = { + .type = hwmon_pwm, + .config = mlxreg_fan_hwmon_pwm_config, +}; + +static const struct hwmon_channel_info *mlxreg_fan_hwmon_info[] = { + &mlxreg_fan_hwmon_fan, + &mlxreg_fan_hwmon_pwm, + NULL +}; + +static const struct hwmon_ops mlxreg_fan_hwmon_hwmon_ops = { + .is_visible = mlxreg_fan_is_visible, + .read = mlxreg_fan_read, + .write = mlxreg_fan_write, +}; + +static const struct hwmon_chip_info mlxreg_fan_hwmon_chip_info = { + .ops = &mlxreg_fan_hwmon_hwmon_ops, + .info = mlxreg_fan_hwmon_info, +}; + +static int mlxreg_fan_get_max_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + *state = MLXREG_FAN_MAX_STATE; + return 0; +} + +static int mlxreg_fan_get_cur_state(struct thermal_cooling_device *cdev, + unsigned long *state) + +{ + struct mlxreg_fan *fan = cdev->devdata; + u32 regval; + int err; + + err = regmap_read(fan->regmap, fan->pwm.reg, ®val); + if (err) { + dev_err(fan->dev, "Failed to query PWM duty\n"); + return err; + } + + *state = MLXREG_FAN_PWM_DUTY2STATE(regval); + + return 0; +} + +static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev, + unsigned long state) + +{ + struct mlxreg_fan *fan = cdev->devdata; + unsigned long cur_state; + u32 regval; + int i; + int err; + + /* + * Verify if this request is for changing allowed FAN dynamical + * minimum. If it is - update cooling levels accordingly and update + * state, if current state is below the newly requested minimum state. + * For example, if current state is 5, and minimal state is to be + * changed from 4 to 6, fan->cooling_levels[0 to 5] will be changed all + * from 4 to 6. And state 5 (fan->cooling_levels[4]) should be + * overwritten. + */ + if (state >= MLXREG_FAN_SPEED_MIN && state <= MLXREG_FAN_SPEED_MAX) { + state -= MLXREG_FAN_MAX_STATE; + for (i = 0; i < state; i++) + fan->cooling_levels[i] = state; + for (i = state; i <= MLXREG_FAN_MAX_STATE; i++) + fan->cooling_levels[i] = i; + + err = regmap_read(fan->regmap, fan->pwm.reg, ®val); + if (err) { + dev_err(fan->dev, "Failed to query PWM duty\n"); + return err; + } + + cur_state = MLXREG_FAN_PWM_DUTY2STATE(regval); + if (state < cur_state) + return 0; + + state = cur_state; + } + + if (state > MLXREG_FAN_MAX_STATE) + return -EINVAL; + + /* Normalize the state to the valid speed range. */ + state = fan->cooling_levels[state]; + err = regmap_write(fan->regmap, fan->pwm.reg, + MLXREG_FAN_PWM_STATE2DUTY(state)); + if (err) { + dev_err(fan->dev, "Failed to write PWM duty\n"); + return err; + } + return 0; +} + +static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = { + .get_max_state = mlxreg_fan_get_max_state, + .get_cur_state = mlxreg_fan_get_cur_state, + .set_cur_state = mlxreg_fan_set_cur_state, +}; + +static int mlxreg_fan_config(struct mlxreg_fan *fan, + struct mlxreg_core_platform_data *pdata) +{ + struct mlxreg_core_data *data = pdata->data; + bool configured = false; + int tacho_num = 0, i; + + fan->samples = MLXREG_FAN_TACHO_SAMPLES_PER_PULSE_DEF; + fan->divider = MLXREG_FAN_TACHO_DIVIDER_DEF; + for (i = 0; i < pdata->counter; i++, data++) { + if (strnstr(data->label, "tacho", sizeof(data->label))) { + if (tacho_num == MLXREG_FAN_MAX_TACHO) { + dev_err(fan->dev, "too many tacho entries: %s\n", + data->label); + return -EINVAL; + } + fan->tacho[tacho_num].reg = data->reg; + fan->tacho[tacho_num].mask = data->mask; + fan->tacho[tacho_num++].connected = true; + } else if (strnstr(data->label, "pwm", sizeof(data->label))) { + if (fan->pwm.connected) { + dev_err(fan->dev, "duplicate pwm entry: %s\n", + data->label); + return -EINVAL; + } + fan->pwm.reg = data->reg; + fan->pwm.connected = true; + } else if (strnstr(data->label, "conf", sizeof(data->label))) { + if (configured) { + dev_err(fan->dev, "duplicate conf entry: %s\n", + data->label); + return -EINVAL; + } + /* Validate that conf parameters are not zeros. */ + if (!data->mask || !data->bit) { + dev_err(fan->dev, "invalid conf entry params: %s\n", + data->label); + return -EINVAL; + } + fan->samples = data->mask; + fan->divider = data->bit; + configured = true; + } else { + dev_err(fan->dev, "invalid label: %s\n", data->label); + return -EINVAL; + } + } + + /* Init cooling levels per PWM state. */ + for (i = 0; i < MLXREG_FAN_SPEED_MIN_LEVEL; i++) + fan->cooling_levels[i] = MLXREG_FAN_SPEED_MIN_LEVEL; + for (i = MLXREG_FAN_SPEED_MIN_LEVEL; i <= MLXREG_FAN_MAX_STATE; i++) + fan->cooling_levels[i] = i; + + return 0; +} + +static int mlxreg_fan_probe(struct platform_device *pdev) +{ + struct mlxreg_core_platform_data *pdata; + struct mlxreg_fan *fan; + struct device *hwm; + int err; + + pdata = dev_get_platdata(&pdev->dev); + if (!pdata) { + dev_err(&pdev->dev, "Failed to get platform data.\n"); + return -EINVAL; + } + + fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL); + if (!fan) + return -ENOMEM; + + fan->dev = &pdev->dev; + fan->regmap = pdata->regmap; + platform_set_drvdata(pdev, fan); + + err = mlxreg_fan_config(fan, pdata); + if (err) + return err; + + hwm = devm_hwmon_device_register_with_info(&pdev->dev, "mlxreg_fan", + fan, + &mlxreg_fan_hwmon_chip_info, + NULL); + if (IS_ERR(hwm)) { + dev_err(&pdev->dev, "Failed to register hwmon device\n"); + return PTR_ERR(hwm); + } + + if (IS_REACHABLE(CONFIG_THERMAL)) { + fan->cdev = thermal_cooling_device_register("mlxreg_fan", fan, + &mlxreg_fan_cooling_ops); + if (IS_ERR(fan->cdev)) { + dev_err(&pdev->dev, "Failed to register cooling device\n"); + return PTR_ERR(fan->cdev); + } + } + + return 0; +} + +static int mlxreg_fan_remove(struct platform_device *pdev) +{ + struct mlxreg_fan *fan = platform_get_drvdata(pdev); + + if (IS_REACHABLE(CONFIG_THERMAL)) + thermal_cooling_device_unregister(fan->cdev); + + return 0; +} + +static struct platform_driver mlxreg_fan_driver = { + .driver = { + .name = "mlxreg-fan", + }, + .probe = mlxreg_fan_probe, + .remove = mlxreg_fan_remove, +}; + +module_platform_driver(mlxreg_fan_driver); + +MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>"); +MODULE_DESCRIPTION("Mellanox FAN driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:mlxreg-fan"); diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index f9d1349c3286..c6bd61e4695a 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -1050,8 +1050,8 @@ struct nct6775_data { u64 beeps; u8 pwm_num; /* number of pwm */ - u8 pwm_mode[NUM_FAN]; /* 1->DC variable voltage, - * 0->PWM variable duty cycle + u8 pwm_mode[NUM_FAN]; /* 0->DC variable voltage, + * 1->PWM variable duty cycle */ enum pwm_enable pwm_enable[NUM_FAN]; /* 0->off @@ -2541,7 +2541,7 @@ static void pwm_update_registers(struct nct6775_data *data, int nr) case thermal_cruise: nct6775_write_value(data, data->REG_TARGET[nr], data->target_temp[nr]); - /* intentional */ + /* fall through */ default: reg = nct6775_read_value(data, data->REG_FAN_MODE[nr]); reg = (reg & ~data->tolerance_mask) | diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c index 95a68ab175c7..7815ddf149f6 100644 --- a/drivers/hwmon/nct7904.c +++ b/drivers/hwmon/nct7904.c @@ -77,7 +77,7 @@ struct nct7904_data { }; /* Access functions */ -static int nct7904_bank_lock(struct nct7904_data *data, unsigned bank) +static int nct7904_bank_lock(struct nct7904_data *data, unsigned int bank) { int ret; @@ -99,7 +99,7 @@ static inline void nct7904_bank_release(struct nct7904_data *data) /* Read 1-byte register. Returns unsigned reg or -ERRNO on error. */ static int nct7904_read_reg(struct nct7904_data *data, - unsigned bank, unsigned reg) + unsigned int bank, unsigned int reg) { struct i2c_client *client = data->client; int ret; @@ -117,7 +117,7 @@ static int nct7904_read_reg(struct nct7904_data *data, * -ERRNO on error. */ static int nct7904_read_reg16(struct nct7904_data *data, - unsigned bank, unsigned reg) + unsigned int bank, unsigned int reg) { struct i2c_client *client = data->client; int ret, hi; @@ -139,7 +139,7 @@ static int nct7904_read_reg16(struct nct7904_data *data, /* Write 1-byte register. Returns 0 or -ERRNO on error. */ static int nct7904_write_reg(struct nct7904_data *data, - unsigned bank, unsigned reg, u8 val) + unsigned int bank, unsigned int reg, u8 val) { struct i2c_client *client = data->client; int ret; @@ -159,7 +159,7 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel, unsigned int cnt, rpm; int ret; - switch(attr) { + switch (attr) { case hwmon_fan_input: ret = nct7904_read_reg16(data, BANK_0, FANIN1_HV_REG + channel * 2); @@ -200,7 +200,7 @@ static int nct7904_read_in(struct device *dev, u32 attr, int channel, index = nct7904_chan_to_index[channel]; - switch(attr) { + switch (attr) { case hwmon_in_input: ret = nct7904_read_reg16(data, BANK_0, VSEN1_HV_REG + index * 2); @@ -236,7 +236,7 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel, struct nct7904_data *data = dev_get_drvdata(dev); int ret, temp; - switch(attr) { + switch (attr) { case hwmon_temp_input: if (channel == 0) ret = nct7904_read_reg16(data, BANK_0, LTD_HV_REG); @@ -276,7 +276,7 @@ static int nct7904_read_pwm(struct device *dev, u32 attr, int channel, struct nct7904_data *data = dev_get_drvdata(dev); int ret; - switch(attr) { + switch (attr) { case hwmon_pwm_input: ret = nct7904_read_reg(data, BANK_3, FANCTL1_OUT_REG + channel); if (ret < 0) @@ -301,7 +301,7 @@ static int nct7904_write_pwm(struct device *dev, u32 attr, int channel, struct nct7904_data *data = dev_get_drvdata(dev); int ret; - switch(attr) { + switch (attr) { case hwmon_pwm_input: if (val < 0 || val > 255) return -EINVAL; @@ -322,7 +322,7 @@ static int nct7904_write_pwm(struct device *dev, u32 attr, int channel, static umode_t nct7904_pwm_is_visible(const void *_data, u32 attr, int channel) { - switch(attr) { + switch (attr) { case hwmon_pwm_input: case hwmon_pwm_enable: return S_IRUGO | S_IWUSR; @@ -431,15 +431,15 @@ static const struct hwmon_channel_info nct7904_in = { }; static const u32 nct7904_fan_config[] = { - HWMON_F_INPUT, - HWMON_F_INPUT, - HWMON_F_INPUT, - HWMON_F_INPUT, - HWMON_F_INPUT, - HWMON_F_INPUT, - HWMON_F_INPUT, - HWMON_F_INPUT, - 0 + HWMON_F_INPUT, + HWMON_F_INPUT, + HWMON_F_INPUT, + HWMON_F_INPUT, + HWMON_F_INPUT, + HWMON_F_INPUT, + HWMON_F_INPUT, + HWMON_F_INPUT, + 0 }; static const struct hwmon_channel_info nct7904_fan = { @@ -448,11 +448,11 @@ static const struct hwmon_channel_info nct7904_fan = { }; static const u32 nct7904_pwm_config[] = { - HWMON_PWM_INPUT | HWMON_PWM_ENABLE, - HWMON_PWM_INPUT | HWMON_PWM_ENABLE, - HWMON_PWM_INPUT | HWMON_PWM_ENABLE, - HWMON_PWM_INPUT | HWMON_PWM_ENABLE, - 0 + HWMON_PWM_INPUT | HWMON_PWM_ENABLE, + HWMON_PWM_INPUT | HWMON_PWM_ENABLE, + HWMON_PWM_INPUT | HWMON_PWM_ENABLE, + HWMON_PWM_INPUT | HWMON_PWM_ENABLE, + 0 }; static const struct hwmon_channel_info nct7904_pwm = { @@ -461,16 +461,16 @@ static const struct hwmon_channel_info nct7904_pwm = { }; static const u32 nct7904_temp_config[] = { - HWMON_T_INPUT, - HWMON_T_INPUT, - HWMON_T_INPUT, - HWMON_T_INPUT, - HWMON_T_INPUT, - HWMON_T_INPUT, - HWMON_T_INPUT, - HWMON_T_INPUT, - HWMON_T_INPUT, - 0 + HWMON_T_INPUT, + HWMON_T_INPUT, + HWMON_T_INPUT, + HWMON_T_INPUT, + HWMON_T_INPUT, + HWMON_T_INPUT, + HWMON_T_INPUT, + HWMON_T_INPUT, + HWMON_T_INPUT, + 0 }; static const struct hwmon_channel_info nct7904_temp = { diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c new file mode 100644 index 000000000000..8474d601aa63 --- /dev/null +++ b/drivers/hwmon/npcm750-pwm-fan.c @@ -0,0 +1,1057 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2014-2018 Nuvoton Technology corporation. + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/sysfs.h> +#include <linux/thermal.h> + +/* NPCM7XX PWM registers */ +#define NPCM7XX_PWM_REG_BASE(base, n) ((base) + ((n) * 0x1000L)) + +#define NPCM7XX_PWM_REG_PR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x00) +#define NPCM7XX_PWM_REG_CSR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x04) +#define NPCM7XX_PWM_REG_CR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x08) +#define NPCM7XX_PWM_REG_CNRx(base, n, ch) \ + (NPCM7XX_PWM_REG_BASE(base, n) + 0x0C + (12 * (ch))) +#define NPCM7XX_PWM_REG_CMRx(base, n, ch) \ + (NPCM7XX_PWM_REG_BASE(base, n) + 0x10 + (12 * (ch))) +#define NPCM7XX_PWM_REG_PDRx(base, n, ch) \ + (NPCM7XX_PWM_REG_BASE(base, n) + 0x14 + (12 * (ch))) +#define NPCM7XX_PWM_REG_PIER(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x3C) +#define NPCM7XX_PWM_REG_PIIR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x40) + +#define NPCM7XX_PWM_CTRL_CH0_MODE_BIT BIT(3) +#define NPCM7XX_PWM_CTRL_CH1_MODE_BIT BIT(11) +#define NPCM7XX_PWM_CTRL_CH2_MODE_BIT BIT(15) +#define NPCM7XX_PWM_CTRL_CH3_MODE_BIT BIT(19) + +#define NPCM7XX_PWM_CTRL_CH0_INV_BIT BIT(2) +#define NPCM7XX_PWM_CTRL_CH1_INV_BIT BIT(10) +#define NPCM7XX_PWM_CTRL_CH2_INV_BIT BIT(14) +#define NPCM7XX_PWM_CTRL_CH3_INV_BIT BIT(18) + +#define NPCM7XX_PWM_CTRL_CH0_EN_BIT BIT(0) +#define NPCM7XX_PWM_CTRL_CH1_EN_BIT BIT(8) +#define NPCM7XX_PWM_CTRL_CH2_EN_BIT BIT(12) +#define NPCM7XX_PWM_CTRL_CH3_EN_BIT BIT(16) + +/* Define the maximum PWM channel number */ +#define NPCM7XX_PWM_MAX_CHN_NUM 8 +#define NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE 4 +#define NPCM7XX_PWM_MAX_MODULES 2 + +/* Define the Counter Register, value = 100 for match 100% */ +#define NPCM7XX_PWM_COUNTER_DEFAULT_NUM 255 +#define NPCM7XX_PWM_CMR_DEFAULT_NUM 127 +#define NPCM7XX_PWM_CMR_MAX 255 + +/* default all PWM channels PRESCALE2 = 1 */ +#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH0 0x4 +#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH1 0x40 +#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH2 0x400 +#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH3 0x4000 + +#define PWM_OUTPUT_FREQ_25KHZ 25000 +#define PWN_CNT_DEFAULT 256 +#define MIN_PRESCALE1 2 +#define NPCM7XX_PWM_PRESCALE_SHIFT_CH01 8 + +#define NPCM7XX_PWM_PRESCALE2_DEFAULT (NPCM7XX_PWM_PRESCALE2_DEFAULT_CH0 | \ + NPCM7XX_PWM_PRESCALE2_DEFAULT_CH1 | \ + NPCM7XX_PWM_PRESCALE2_DEFAULT_CH2 | \ + NPCM7XX_PWM_PRESCALE2_DEFAULT_CH3) + +#define NPCM7XX_PWM_CTRL_MODE_DEFAULT (NPCM7XX_PWM_CTRL_CH0_MODE_BIT | \ + NPCM7XX_PWM_CTRL_CH1_MODE_BIT | \ + NPCM7XX_PWM_CTRL_CH2_MODE_BIT | \ + NPCM7XX_PWM_CTRL_CH3_MODE_BIT) + +/* NPCM7XX FAN Tacho registers */ +#define NPCM7XX_FAN_REG_BASE(base, n) ((base) + ((n) * 0x1000L)) + +#define NPCM7XX_FAN_REG_TCNT1(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x00) +#define NPCM7XX_FAN_REG_TCRA(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x02) +#define NPCM7XX_FAN_REG_TCRB(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x04) +#define NPCM7XX_FAN_REG_TCNT2(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x06) +#define NPCM7XX_FAN_REG_TPRSC(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x08) +#define NPCM7XX_FAN_REG_TCKC(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x0A) +#define NPCM7XX_FAN_REG_TMCTRL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x0C) +#define NPCM7XX_FAN_REG_TICTRL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x0E) +#define NPCM7XX_FAN_REG_TICLR(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x10) +#define NPCM7XX_FAN_REG_TIEN(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x12) +#define NPCM7XX_FAN_REG_TCPA(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x14) +#define NPCM7XX_FAN_REG_TCPB(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x16) +#define NPCM7XX_FAN_REG_TCPCFG(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x18) +#define NPCM7XX_FAN_REG_TINASEL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x1A) +#define NPCM7XX_FAN_REG_TINBSEL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x1C) + +#define NPCM7XX_FAN_TCKC_CLKX_NONE 0 +#define NPCM7XX_FAN_TCKC_CLK1_APB BIT(0) +#define NPCM7XX_FAN_TCKC_CLK2_APB BIT(3) + +#define NPCM7XX_FAN_TMCTRL_TBEN BIT(6) +#define NPCM7XX_FAN_TMCTRL_TAEN BIT(5) +#define NPCM7XX_FAN_TMCTRL_TBEDG BIT(4) +#define NPCM7XX_FAN_TMCTRL_TAEDG BIT(3) +#define NPCM7XX_FAN_TMCTRL_MODE_5 BIT(2) + +#define NPCM7XX_FAN_TICLR_CLEAR_ALL GENMASK(5, 0) +#define NPCM7XX_FAN_TICLR_TFCLR BIT(5) +#define NPCM7XX_FAN_TICLR_TECLR BIT(4) +#define NPCM7XX_FAN_TICLR_TDCLR BIT(3) +#define NPCM7XX_FAN_TICLR_TCCLR BIT(2) +#define NPCM7XX_FAN_TICLR_TBCLR BIT(1) +#define NPCM7XX_FAN_TICLR_TACLR BIT(0) + +#define NPCM7XX_FAN_TIEN_ENABLE_ALL GENMASK(5, 0) +#define NPCM7XX_FAN_TIEN_TFIEN BIT(5) +#define NPCM7XX_FAN_TIEN_TEIEN BIT(4) +#define NPCM7XX_FAN_TIEN_TDIEN BIT(3) +#define NPCM7XX_FAN_TIEN_TCIEN BIT(2) +#define NPCM7XX_FAN_TIEN_TBIEN BIT(1) +#define NPCM7XX_FAN_TIEN_TAIEN BIT(0) + +#define NPCM7XX_FAN_TICTRL_TFPND BIT(5) +#define NPCM7XX_FAN_TICTRL_TEPND BIT(4) +#define NPCM7XX_FAN_TICTRL_TDPND BIT(3) +#define NPCM7XX_FAN_TICTRL_TCPND BIT(2) +#define NPCM7XX_FAN_TICTRL_TBPND BIT(1) +#define NPCM7XX_FAN_TICTRL_TAPND BIT(0) + +#define NPCM7XX_FAN_TCPCFG_HIBEN BIT(7) +#define NPCM7XX_FAN_TCPCFG_EQBEN BIT(6) +#define NPCM7XX_FAN_TCPCFG_LOBEN BIT(5) +#define NPCM7XX_FAN_TCPCFG_CPBSEL BIT(4) +#define NPCM7XX_FAN_TCPCFG_HIAEN BIT(3) +#define NPCM7XX_FAN_TCPCFG_EQAEN BIT(2) +#define NPCM7XX_FAN_TCPCFG_LOAEN BIT(1) +#define NPCM7XX_FAN_TCPCFG_CPASEL BIT(0) + +/* FAN General Definition */ +/* Define the maximum FAN channel number */ +#define NPCM7XX_FAN_MAX_MODULE 8 +#define NPCM7XX_FAN_MAX_CHN_NUM_IN_A_MODULE 2 +#define NPCM7XX_FAN_MAX_CHN_NUM 16 + +/* + * Get Fan Tach Timeout (base on clock 214843.75Hz, 1 cnt = 4.654us) + * Timeout 94ms ~= 0x5000 + * (The minimum FAN speed could to support ~640RPM/pulse 1, + * 320RPM/pulse 2, ...-- 10.6Hz) + */ +#define NPCM7XX_FAN_TIMEOUT 0x5000 +#define NPCM7XX_FAN_TCNT 0xFFFF +#define NPCM7XX_FAN_TCPA (NPCM7XX_FAN_TCNT - NPCM7XX_FAN_TIMEOUT) +#define NPCM7XX_FAN_TCPB (NPCM7XX_FAN_TCNT - NPCM7XX_FAN_TIMEOUT) + +#define NPCM7XX_FAN_POLL_TIMER_200MS 200 +#define NPCM7XX_FAN_DEFAULT_PULSE_PER_REVOLUTION 2 +#define NPCM7XX_FAN_TINASEL_FANIN_DEFAULT 0 +#define NPCM7XX_FAN_CLK_PRESCALE 255 + +#define NPCM7XX_FAN_CMPA 0 +#define NPCM7XX_FAN_CMPB 1 + +/* Obtain the fan number */ +#define NPCM7XX_FAN_INPUT(fan, cmp) (((fan) << 1) + (cmp)) + +/* fan sample status */ +#define FAN_DISABLE 0xFF +#define FAN_INIT 0x00 +#define FAN_PREPARE_TO_GET_FIRST_CAPTURE 0x01 +#define FAN_ENOUGH_SAMPLE 0x02 + +struct npcm7xx_fan_dev { + u8 fan_st_flg; + u8 fan_pls_per_rev; + u16 fan_cnt; + u32 fan_cnt_tmp; +}; + +struct npcm7xx_cooling_device { + char name[THERMAL_NAME_LENGTH]; + struct npcm7xx_pwm_fan_data *data; + struct thermal_cooling_device *tcdev; + int pwm_port; + u8 *cooling_levels; + u8 max_state; + u8 cur_state; +}; + +struct npcm7xx_pwm_fan_data { + void __iomem *pwm_base; + void __iomem *fan_base; + unsigned long pwm_clk_freq; + unsigned long fan_clk_freq; + struct clk *pwm_clk; + struct clk *fan_clk; + struct mutex pwm_lock[NPCM7XX_PWM_MAX_MODULES]; + spinlock_t fan_lock[NPCM7XX_FAN_MAX_MODULE]; + int fan_irq[NPCM7XX_FAN_MAX_MODULE]; + bool pwm_present[NPCM7XX_PWM_MAX_CHN_NUM]; + bool fan_present[NPCM7XX_FAN_MAX_CHN_NUM]; + u32 input_clk_freq; + struct timer_list fan_timer; + struct npcm7xx_fan_dev fan_dev[NPCM7XX_FAN_MAX_CHN_NUM]; + struct npcm7xx_cooling_device *cdev[NPCM7XX_PWM_MAX_CHN_NUM]; + u8 fan_select; +}; + +static int npcm7xx_pwm_config_set(struct npcm7xx_pwm_fan_data *data, + int channel, u16 val) +{ + u32 pwm_ch = (channel % NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE); + u32 module = (channel / NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE); + u32 tmp_buf, ctrl_en_bit, env_bit; + + /* + * Config PWM Comparator register for setting duty cycle + */ + mutex_lock(&data->pwm_lock[module]); + + /* write new CMR value */ + iowrite32(val, NPCM7XX_PWM_REG_CMRx(data->pwm_base, module, pwm_ch)); + tmp_buf = ioread32(NPCM7XX_PWM_REG_CR(data->pwm_base, module)); + + switch (pwm_ch) { + case 0: + ctrl_en_bit = NPCM7XX_PWM_CTRL_CH0_EN_BIT; + env_bit = NPCM7XX_PWM_CTRL_CH0_INV_BIT; + break; + case 1: + ctrl_en_bit = NPCM7XX_PWM_CTRL_CH1_EN_BIT; + env_bit = NPCM7XX_PWM_CTRL_CH1_INV_BIT; + break; + case 2: + ctrl_en_bit = NPCM7XX_PWM_CTRL_CH2_EN_BIT; + env_bit = NPCM7XX_PWM_CTRL_CH2_INV_BIT; + break; + case 3: + ctrl_en_bit = NPCM7XX_PWM_CTRL_CH3_EN_BIT; + env_bit = NPCM7XX_PWM_CTRL_CH3_INV_BIT; + break; + default: + mutex_unlock(&data->pwm_lock[module]); + return -ENODEV; + } + + if (val == 0) { + /* Disable PWM */ + tmp_buf &= ~ctrl_en_bit; + tmp_buf |= env_bit; + } else { + /* Enable PWM */ + tmp_buf |= ctrl_en_bit; + tmp_buf &= ~env_bit; + } + + iowrite32(tmp_buf, NPCM7XX_PWM_REG_CR(data->pwm_base, module)); + mutex_unlock(&data->pwm_lock[module]); + + return 0; +} + +static inline void npcm7xx_fan_start_capture(struct npcm7xx_pwm_fan_data *data, + u8 fan, u8 cmp) +{ + u8 fan_id; + u8 reg_mode; + u8 reg_int; + unsigned long flags; + + fan_id = NPCM7XX_FAN_INPUT(fan, cmp); + + /* to check whether any fan tach is enable */ + if (data->fan_dev[fan_id].fan_st_flg != FAN_DISABLE) { + /* reset status */ + spin_lock_irqsave(&data->fan_lock[fan], flags); + + data->fan_dev[fan_id].fan_st_flg = FAN_INIT; + reg_int = ioread8(NPCM7XX_FAN_REG_TIEN(data->fan_base, fan)); + + /* + * the interrupt enable bits do not need to be cleared before + * it sets, the interrupt enable bits are cleared only on reset. + * the clock unit control register is behaving in the same + * manner that the interrupt enable register behave. + */ + if (cmp == NPCM7XX_FAN_CMPA) { + /* enable interrupt */ + iowrite8(reg_int | (NPCM7XX_FAN_TIEN_TAIEN | + NPCM7XX_FAN_TIEN_TEIEN), + NPCM7XX_FAN_REG_TIEN(data->fan_base, fan)); + + reg_mode = NPCM7XX_FAN_TCKC_CLK1_APB + | ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base, + fan)); + + /* start to Capture */ + iowrite8(reg_mode, NPCM7XX_FAN_REG_TCKC(data->fan_base, + fan)); + } else { + /* enable interrupt */ + iowrite8(reg_int | (NPCM7XX_FAN_TIEN_TBIEN | + NPCM7XX_FAN_TIEN_TFIEN), + NPCM7XX_FAN_REG_TIEN(data->fan_base, fan)); + + reg_mode = + NPCM7XX_FAN_TCKC_CLK2_APB + | ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base, + fan)); + + /* start to Capture */ + iowrite8(reg_mode, + NPCM7XX_FAN_REG_TCKC(data->fan_base, fan)); + } + + spin_unlock_irqrestore(&data->fan_lock[fan], flags); + } +} + +/* + * Enable a background timer to poll fan tach value, (200ms * 4) + * to polling all fan + */ +static void npcm7xx_fan_polling(struct timer_list *t) +{ + struct npcm7xx_pwm_fan_data *data; + int i; + + data = from_timer(data, t, fan_timer); + + /* + * Polling two module per one round, + * FAN01 & FAN89 / FAN23 & FAN1011 / FAN45 & FAN1213 / FAN67 & FAN1415 + */ + for (i = data->fan_select; i < NPCM7XX_FAN_MAX_MODULE; + i = i + 4) { + /* clear the flag and reset the counter (TCNT) */ + iowrite8(NPCM7XX_FAN_TICLR_CLEAR_ALL, + NPCM7XX_FAN_REG_TICLR(data->fan_base, i)); + + if (data->fan_present[i * 2]) { + iowrite16(NPCM7XX_FAN_TCNT, + NPCM7XX_FAN_REG_TCNT1(data->fan_base, i)); + npcm7xx_fan_start_capture(data, i, NPCM7XX_FAN_CMPA); + } + if (data->fan_present[(i * 2) + 1]) { + iowrite16(NPCM7XX_FAN_TCNT, + NPCM7XX_FAN_REG_TCNT2(data->fan_base, i)); + npcm7xx_fan_start_capture(data, i, NPCM7XX_FAN_CMPB); + } + } + + data->fan_select++; + data->fan_select &= 0x3; + + /* reset the timer interval */ + data->fan_timer.expires = jiffies + + msecs_to_jiffies(NPCM7XX_FAN_POLL_TIMER_200MS); + add_timer(&data->fan_timer); +} + +static inline void npcm7xx_fan_compute(struct npcm7xx_pwm_fan_data *data, + u8 fan, u8 cmp, u8 fan_id, u8 flag_int, + u8 flag_mode, u8 flag_clear) +{ + u8 reg_int; + u8 reg_mode; + u16 fan_cap; + + if (cmp == NPCM7XX_FAN_CMPA) + fan_cap = ioread16(NPCM7XX_FAN_REG_TCRA(data->fan_base, fan)); + else + fan_cap = ioread16(NPCM7XX_FAN_REG_TCRB(data->fan_base, fan)); + + /* clear capature flag, H/W will auto reset the NPCM7XX_FAN_TCNTx */ + iowrite8(flag_clear, NPCM7XX_FAN_REG_TICLR(data->fan_base, fan)); + + if (data->fan_dev[fan_id].fan_st_flg == FAN_INIT) { + /* First capture, drop it */ + data->fan_dev[fan_id].fan_st_flg = + FAN_PREPARE_TO_GET_FIRST_CAPTURE; + + /* reset counter */ + data->fan_dev[fan_id].fan_cnt_tmp = 0; + } else if (data->fan_dev[fan_id].fan_st_flg < FAN_ENOUGH_SAMPLE) { + /* + * collect the enough sample, + * (ex: 2 pulse fan need to get 2 sample) + */ + data->fan_dev[fan_id].fan_cnt_tmp += + (NPCM7XX_FAN_TCNT - fan_cap); + + data->fan_dev[fan_id].fan_st_flg++; + } else { + /* get enough sample or fan disable */ + if (data->fan_dev[fan_id].fan_st_flg == FAN_ENOUGH_SAMPLE) { + data->fan_dev[fan_id].fan_cnt_tmp += + (NPCM7XX_FAN_TCNT - fan_cap); + + /* compute finial average cnt per pulse */ + data->fan_dev[fan_id].fan_cnt = + data->fan_dev[fan_id].fan_cnt_tmp / + FAN_ENOUGH_SAMPLE; + + data->fan_dev[fan_id].fan_st_flg = FAN_INIT; + } + + reg_int = ioread8(NPCM7XX_FAN_REG_TIEN(data->fan_base, fan)); + + /* disable interrupt */ + iowrite8((reg_int & ~flag_int), + NPCM7XX_FAN_REG_TIEN(data->fan_base, fan)); + reg_mode = ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base, fan)); + + /* stop capturing */ + iowrite8((reg_mode & ~flag_mode), + NPCM7XX_FAN_REG_TCKC(data->fan_base, fan)); + } +} + +static inline void npcm7xx_check_cmp(struct npcm7xx_pwm_fan_data *data, + u8 fan, u8 cmp, u8 flag) +{ + u8 reg_int; + u8 reg_mode; + u8 flag_timeout; + u8 flag_cap; + u8 flag_clear; + u8 flag_int; + u8 flag_mode; + u8 fan_id; + + fan_id = NPCM7XX_FAN_INPUT(fan, cmp); + + if (cmp == NPCM7XX_FAN_CMPA) { + flag_cap = NPCM7XX_FAN_TICTRL_TAPND; + flag_timeout = NPCM7XX_FAN_TICTRL_TEPND; + flag_int = NPCM7XX_FAN_TIEN_TAIEN | NPCM7XX_FAN_TIEN_TEIEN; + flag_mode = NPCM7XX_FAN_TCKC_CLK1_APB; + flag_clear = NPCM7XX_FAN_TICLR_TACLR | NPCM7XX_FAN_TICLR_TECLR; + } else { + flag_cap = NPCM7XX_FAN_TICTRL_TBPND; + flag_timeout = NPCM7XX_FAN_TICTRL_TFPND; + flag_int = NPCM7XX_FAN_TIEN_TBIEN | NPCM7XX_FAN_TIEN_TFIEN; + flag_mode = NPCM7XX_FAN_TCKC_CLK2_APB; + flag_clear = NPCM7XX_FAN_TICLR_TBCLR | NPCM7XX_FAN_TICLR_TFCLR; + } + + if (flag & flag_timeout) { + reg_int = ioread8(NPCM7XX_FAN_REG_TIEN(data->fan_base, fan)); + + /* disable interrupt */ + iowrite8((reg_int & ~flag_int), + NPCM7XX_FAN_REG_TIEN(data->fan_base, fan)); + + /* clear interrupt flag */ + iowrite8(flag_clear, + NPCM7XX_FAN_REG_TICLR(data->fan_base, fan)); + + reg_mode = ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base, fan)); + + /* stop capturing */ + iowrite8((reg_mode & ~flag_mode), + NPCM7XX_FAN_REG_TCKC(data->fan_base, fan)); + + /* + * If timeout occurs (NPCM7XX_FAN_TIMEOUT), the fan doesn't + * connect or speed is lower than 10.6Hz (320RPM/pulse2). + * In these situation, the RPM output should be zero. + */ + data->fan_dev[fan_id].fan_cnt = 0; + } else { + /* input capture is occurred */ + if (flag & flag_cap) + npcm7xx_fan_compute(data, fan, cmp, fan_id, flag_int, + flag_mode, flag_clear); + } +} + +static irqreturn_t npcm7xx_fan_isr(int irq, void *dev_id) +{ + struct npcm7xx_pwm_fan_data *data = dev_id; + unsigned long flags; + int module; + u8 flag; + + module = irq - data->fan_irq[0]; + spin_lock_irqsave(&data->fan_lock[module], flags); + + flag = ioread8(NPCM7XX_FAN_REG_TICTRL(data->fan_base, module)); + if (flag > 0) { + npcm7xx_check_cmp(data, module, NPCM7XX_FAN_CMPA, flag); + npcm7xx_check_cmp(data, module, NPCM7XX_FAN_CMPB, flag); + spin_unlock_irqrestore(&data->fan_lock[module], flags); + return IRQ_HANDLED; + } + + spin_unlock_irqrestore(&data->fan_lock[module], flags); + + return IRQ_NONE; +} + +static int npcm7xx_read_pwm(struct device *dev, u32 attr, int channel, + long *val) +{ + struct npcm7xx_pwm_fan_data *data = dev_get_drvdata(dev); + u32 pmw_ch = (channel % NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE); + u32 module = (channel / NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE); + + switch (attr) { + case hwmon_pwm_input: + *val = ioread32 + (NPCM7XX_PWM_REG_CMRx(data->pwm_base, module, pmw_ch)); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int npcm7xx_write_pwm(struct device *dev, u32 attr, int channel, + long val) +{ + struct npcm7xx_pwm_fan_data *data = dev_get_drvdata(dev); + int err; + + switch (attr) { + case hwmon_pwm_input: + if (val < 0 || val > NPCM7XX_PWM_CMR_MAX) + return -EINVAL; + err = npcm7xx_pwm_config_set(data, channel, (u16)val); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static umode_t npcm7xx_pwm_is_visible(const void *_data, u32 attr, int channel) +{ + const struct npcm7xx_pwm_fan_data *data = _data; + + if (!data->pwm_present[channel]) + return 0; + + switch (attr) { + case hwmon_pwm_input: + return 0644; + default: + return 0; + } +} + +static int npcm7xx_read_fan(struct device *dev, u32 attr, int channel, + long *val) +{ + struct npcm7xx_pwm_fan_data *data = dev_get_drvdata(dev); + + switch (attr) { + case hwmon_fan_input: + *val = 0; + if (data->fan_dev[channel].fan_cnt <= 0) + return data->fan_dev[channel].fan_cnt; + + /* Convert the raw reading to RPM */ + if (data->fan_dev[channel].fan_cnt > 0 && + data->fan_dev[channel].fan_pls_per_rev > 0) + *val = ((data->input_clk_freq * 60) / + (data->fan_dev[channel].fan_cnt * + data->fan_dev[channel].fan_pls_per_rev)); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static umode_t npcm7xx_fan_is_visible(const void *_data, u32 attr, int channel) +{ + const struct npcm7xx_pwm_fan_data *data = _data; + + if (!data->fan_present[channel]) + return 0; + + switch (attr) { + case hwmon_fan_input: + return 0444; + default: + return 0; + } +} + +static int npcm7xx_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + switch (type) { + case hwmon_pwm: + return npcm7xx_read_pwm(dev, attr, channel, val); + case hwmon_fan: + return npcm7xx_read_fan(dev, attr, channel, val); + default: + return -EOPNOTSUPP; + } +} + +static int npcm7xx_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + switch (type) { + case hwmon_pwm: + return npcm7xx_write_pwm(dev, attr, channel, val); + default: + return -EOPNOTSUPP; + } +} + +static umode_t npcm7xx_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + switch (type) { + case hwmon_pwm: + return npcm7xx_pwm_is_visible(data, attr, channel); + case hwmon_fan: + return npcm7xx_fan_is_visible(data, attr, channel); + default: + return 0; + } +} + +static const u32 npcm7xx_pwm_config[] = { + HWMON_PWM_INPUT, + HWMON_PWM_INPUT, + HWMON_PWM_INPUT, + HWMON_PWM_INPUT, + HWMON_PWM_INPUT, + HWMON_PWM_INPUT, + HWMON_PWM_INPUT, + HWMON_PWM_INPUT, + 0 +}; + +static const struct hwmon_channel_info npcm7xx_pwm = { + .type = hwmon_pwm, + .config = npcm7xx_pwm_config, +}; + +static const u32 npcm7xx_fan_config[] = { + HWMON_F_INPUT, + HWMON_F_INPUT, + HWMON_F_INPUT, + HWMON_F_INPUT, + HWMON_F_INPUT, + HWMON_F_INPUT, + HWMON_F_INPUT, + HWMON_F_INPUT, + HWMON_F_INPUT, + HWMON_F_INPUT, + HWMON_F_INPUT, + HWMON_F_INPUT, + HWMON_F_INPUT, + HWMON_F_INPUT, + HWMON_F_INPUT, + HWMON_F_INPUT, + 0 +}; + +static const struct hwmon_channel_info npcm7xx_fan = { + .type = hwmon_fan, + .config = npcm7xx_fan_config, +}; + +static const struct hwmon_channel_info *npcm7xx_info[] = { + &npcm7xx_pwm, + &npcm7xx_fan, + NULL +}; + +static const struct hwmon_ops npcm7xx_hwmon_ops = { + .is_visible = npcm7xx_is_visible, + .read = npcm7xx_read, + .write = npcm7xx_write, +}; + +static const struct hwmon_chip_info npcm7xx_chip_info = { + .ops = &npcm7xx_hwmon_ops, + .info = npcm7xx_info, +}; + +static u32 npcm7xx_pwm_init(struct npcm7xx_pwm_fan_data *data) +{ + int m, ch; + u32 prescale_val, output_freq; + + data->pwm_clk_freq = clk_get_rate(data->pwm_clk); + + /* Adjust NPCM7xx PWMs output frequency to ~25Khz */ + output_freq = data->pwm_clk_freq / PWN_CNT_DEFAULT; + prescale_val = DIV_ROUND_CLOSEST(output_freq, PWM_OUTPUT_FREQ_25KHZ); + + /* If prescale_val = 0, then the prescale output clock is stopped */ + if (prescale_val < MIN_PRESCALE1) + prescale_val = MIN_PRESCALE1; + /* + * prescale_val need to decrement in one because in the PWM Prescale + * register the Prescale value increment by one + */ + prescale_val--; + + /* Setting PWM Prescale Register value register to both modules */ + prescale_val |= (prescale_val << NPCM7XX_PWM_PRESCALE_SHIFT_CH01); + + for (m = 0; m < NPCM7XX_PWM_MAX_MODULES ; m++) { + iowrite32(prescale_val, NPCM7XX_PWM_REG_PR(data->pwm_base, m)); + iowrite32(NPCM7XX_PWM_PRESCALE2_DEFAULT, + NPCM7XX_PWM_REG_CSR(data->pwm_base, m)); + iowrite32(NPCM7XX_PWM_CTRL_MODE_DEFAULT, + NPCM7XX_PWM_REG_CR(data->pwm_base, m)); + + for (ch = 0; ch < NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE; ch++) { + iowrite32(NPCM7XX_PWM_COUNTER_DEFAULT_NUM, + NPCM7XX_PWM_REG_CNRx(data->pwm_base, m, ch)); + } + } + + return output_freq / ((prescale_val & 0xf) + 1); +} + +static void npcm7xx_fan_init(struct npcm7xx_pwm_fan_data *data) +{ + int md; + int ch; + int i; + u32 apb_clk_freq; + + for (md = 0; md < NPCM7XX_FAN_MAX_MODULE; md++) { + /* stop FAN0~7 clock */ + iowrite8(NPCM7XX_FAN_TCKC_CLKX_NONE, + NPCM7XX_FAN_REG_TCKC(data->fan_base, md)); + + /* disable all interrupt */ + iowrite8(0x00, NPCM7XX_FAN_REG_TIEN(data->fan_base, md)); + + /* clear all interrupt */ + iowrite8(NPCM7XX_FAN_TICLR_CLEAR_ALL, + NPCM7XX_FAN_REG_TICLR(data->fan_base, md)); + + /* set FAN0~7 clock prescaler */ + iowrite8(NPCM7XX_FAN_CLK_PRESCALE, + NPCM7XX_FAN_REG_TPRSC(data->fan_base, md)); + + /* set FAN0~7 mode (high-to-low transition) */ + iowrite8((NPCM7XX_FAN_TMCTRL_MODE_5 | NPCM7XX_FAN_TMCTRL_TBEN | + NPCM7XX_FAN_TMCTRL_TAEN), + NPCM7XX_FAN_REG_TMCTRL(data->fan_base, md)); + + /* set FAN0~7 Initial Count/Cap */ + iowrite16(NPCM7XX_FAN_TCNT, + NPCM7XX_FAN_REG_TCNT1(data->fan_base, md)); + iowrite16(NPCM7XX_FAN_TCNT, + NPCM7XX_FAN_REG_TCNT2(data->fan_base, md)); + + /* set FAN0~7 compare (equal to count) */ + iowrite8((NPCM7XX_FAN_TCPCFG_EQAEN | NPCM7XX_FAN_TCPCFG_EQBEN), + NPCM7XX_FAN_REG_TCPCFG(data->fan_base, md)); + + /* set FAN0~7 compare value */ + iowrite16(NPCM7XX_FAN_TCPA, + NPCM7XX_FAN_REG_TCPA(data->fan_base, md)); + iowrite16(NPCM7XX_FAN_TCPB, + NPCM7XX_FAN_REG_TCPB(data->fan_base, md)); + + /* set FAN0~7 fan input FANIN 0~15 */ + iowrite8(NPCM7XX_FAN_TINASEL_FANIN_DEFAULT, + NPCM7XX_FAN_REG_TINASEL(data->fan_base, md)); + iowrite8(NPCM7XX_FAN_TINASEL_FANIN_DEFAULT, + NPCM7XX_FAN_REG_TINBSEL(data->fan_base, md)); + + for (i = 0; i < NPCM7XX_FAN_MAX_CHN_NUM_IN_A_MODULE; i++) { + ch = md * NPCM7XX_FAN_MAX_CHN_NUM_IN_A_MODULE + i; + data->fan_dev[ch].fan_st_flg = FAN_DISABLE; + data->fan_dev[ch].fan_pls_per_rev = + NPCM7XX_FAN_DEFAULT_PULSE_PER_REVOLUTION; + data->fan_dev[ch].fan_cnt = 0; + } + } + + apb_clk_freq = clk_get_rate(data->fan_clk); + + /* Fan tach input clock = APB clock / prescalar, default is 255. */ + data->input_clk_freq = apb_clk_freq / (NPCM7XX_FAN_CLK_PRESCALE + 1); +} + +static int +npcm7xx_pwm_cz_get_max_state(struct thermal_cooling_device *tcdev, + unsigned long *state) +{ + struct npcm7xx_cooling_device *cdev = tcdev->devdata; + + *state = cdev->max_state; + + return 0; +} + +static int +npcm7xx_pwm_cz_get_cur_state(struct thermal_cooling_device *tcdev, + unsigned long *state) +{ + struct npcm7xx_cooling_device *cdev = tcdev->devdata; + + *state = cdev->cur_state; + + return 0; +} + +static int +npcm7xx_pwm_cz_set_cur_state(struct thermal_cooling_device *tcdev, + unsigned long state) +{ + struct npcm7xx_cooling_device *cdev = tcdev->devdata; + int ret; + + if (state > cdev->max_state) + return -EINVAL; + + cdev->cur_state = state; + ret = npcm7xx_pwm_config_set(cdev->data, cdev->pwm_port, + cdev->cooling_levels[cdev->cur_state]); + + return ret; +} + +static const struct thermal_cooling_device_ops npcm7xx_pwm_cool_ops = { + .get_max_state = npcm7xx_pwm_cz_get_max_state, + .get_cur_state = npcm7xx_pwm_cz_get_cur_state, + .set_cur_state = npcm7xx_pwm_cz_set_cur_state, +}; + +static int npcm7xx_create_pwm_cooling(struct device *dev, + struct device_node *child, + struct npcm7xx_pwm_fan_data *data, + u32 pwm_port, u8 num_levels) +{ + int ret; + struct npcm7xx_cooling_device *cdev; + + cdev = devm_kzalloc(dev, sizeof(*cdev), GFP_KERNEL); + if (!cdev) + return -ENOMEM; + + cdev->cooling_levels = devm_kzalloc(dev, num_levels, GFP_KERNEL); + if (!cdev->cooling_levels) + return -ENOMEM; + + cdev->max_state = num_levels - 1; + ret = of_property_read_u8_array(child, "cooling-levels", + cdev->cooling_levels, + num_levels); + if (ret) { + dev_err(dev, "Property 'cooling-levels' cannot be read.\n"); + return ret; + } + snprintf(cdev->name, THERMAL_NAME_LENGTH, "%s%d", child->name, + pwm_port); + + cdev->tcdev = thermal_of_cooling_device_register(child, + cdev->name, + cdev, + &npcm7xx_pwm_cool_ops); + if (IS_ERR(cdev->tcdev)) + return PTR_ERR(cdev->tcdev); + + cdev->data = data; + cdev->pwm_port = pwm_port; + + data->cdev[pwm_port] = cdev; + + return 0; +} + +static int npcm7xx_en_pwm_fan(struct device *dev, + struct device_node *child, + struct npcm7xx_pwm_fan_data *data) +{ + u8 *fan_ch; + u32 pwm_port; + int ret, fan_cnt; + u8 index, ch; + + ret = of_property_read_u32(child, "reg", &pwm_port); + if (ret) + return ret; + + data->pwm_present[pwm_port] = true; + ret = npcm7xx_pwm_config_set(data, pwm_port, + NPCM7XX_PWM_CMR_DEFAULT_NUM); + + ret = of_property_count_u8_elems(child, "cooling-levels"); + if (ret > 0) { + ret = npcm7xx_create_pwm_cooling(dev, child, data, pwm_port, + ret); + if (ret) + return ret; + } + + fan_cnt = of_property_count_u8_elems(child, "fan-tach-ch"); + if (fan_cnt < 1) + return -EINVAL; + + fan_ch = devm_kzalloc(dev, sizeof(*fan_ch) * fan_cnt, GFP_KERNEL); + if (!fan_ch) + return -ENOMEM; + + ret = of_property_read_u8_array(child, "fan-tach-ch", fan_ch, fan_cnt); + if (ret) + return ret; + + for (ch = 0; ch < fan_cnt; ch++) { + index = fan_ch[ch]; + data->fan_present[index] = true; + data->fan_dev[index].fan_st_flg = FAN_INIT; + } + + return 0; +} + +static int npcm7xx_pwm_fan_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np, *child; + struct npcm7xx_pwm_fan_data *data; + struct resource *res; + struct device *hwmon; + char name[20]; + int ret, cnt; + u32 output_freq; + u32 i; + + np = dev->of_node; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm"); + if (!res) { + dev_err(dev, "pwm resource not found\n"); + return -ENODEV; + } + + data->pwm_base = devm_ioremap_resource(dev, res); + dev_dbg(dev, "pwm base resource is %pR\n", res); + if (IS_ERR(data->pwm_base)) + return PTR_ERR(data->pwm_base); + + data->pwm_clk = devm_clk_get(dev, "pwm"); + if (IS_ERR(data->pwm_clk)) { + dev_err(dev, "couldn't get pwm clock\n"); + return PTR_ERR(data->pwm_clk); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fan"); + if (!res) { + dev_err(dev, "fan resource not found\n"); + return -ENODEV; + } + + data->fan_base = devm_ioremap_resource(dev, res); + dev_dbg(dev, "fan base resource is %pR\n", res); + if (IS_ERR(data->fan_base)) + return PTR_ERR(data->fan_base); + + data->fan_clk = devm_clk_get(dev, "fan"); + if (IS_ERR(data->fan_clk)) { + dev_err(dev, "couldn't get fan clock\n"); + return PTR_ERR(data->fan_clk); + } + + output_freq = npcm7xx_pwm_init(data); + npcm7xx_fan_init(data); + + for (cnt = 0; cnt < NPCM7XX_PWM_MAX_MODULES ; cnt++) + mutex_init(&data->pwm_lock[cnt]); + + for (i = 0; i < NPCM7XX_FAN_MAX_MODULE; i++) { + spin_lock_init(&data->fan_lock[i]); + + data->fan_irq[i] = platform_get_irq(pdev, i); + if (data->fan_irq[i] < 0) { + dev_err(dev, "get IRQ fan%d failed\n", i); + return data->fan_irq[i]; + } + + sprintf(name, "NPCM7XX-FAN-MD%d", i); + ret = devm_request_irq(dev, data->fan_irq[i], npcm7xx_fan_isr, + 0, name, (void *)data); + if (ret) { + dev_err(dev, "register IRQ fan%d failed\n", i); + return ret; + } + } + + for_each_child_of_node(np, child) { + ret = npcm7xx_en_pwm_fan(dev, child, data); + if (ret) { + dev_err(dev, "enable pwm and fan failed\n"); + of_node_put(child); + return ret; + } + } + + hwmon = devm_hwmon_device_register_with_info(dev, "npcm7xx_pwm_fan", + data, &npcm7xx_chip_info, + NULL); + if (IS_ERR(hwmon)) { + dev_err(dev, "unable to register hwmon device\n"); + return PTR_ERR(hwmon); + } + + for (i = 0; i < NPCM7XX_FAN_MAX_CHN_NUM; i++) { + if (data->fan_present[i]) { + /* fan timer initialization */ + data->fan_timer.expires = jiffies + + msecs_to_jiffies(NPCM7XX_FAN_POLL_TIMER_200MS); + timer_setup(&data->fan_timer, + npcm7xx_fan_polling, 0); + add_timer(&data->fan_timer); + break; + } + } + + pr_info("NPCM7XX PWM-FAN Driver probed, output Freq %dHz[PWM], input Freq %dHz[FAN]\n", + output_freq, data->input_clk_freq); + + return 0; +} + +static const struct of_device_id of_pwm_fan_match_table[] = { + { .compatible = "nuvoton,npcm750-pwm-fan", }, + {}, +}; +MODULE_DEVICE_TABLE(of, of_pwm_fan_match_table); + +static struct platform_driver npcm7xx_pwm_fan_driver = { + .probe = npcm7xx_pwm_fan_probe, + .driver = { + .name = "npcm7xx_pwm_fan", + .of_match_table = of_pwm_fan_match_table, + }, +}; + +module_platform_driver(npcm7xx_pwm_fan_driver); + +MODULE_DESCRIPTION("Nuvoton NPCM7XX PWM and Fan Tacho driver"); +MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig index e71aec69e76e..a82018aaf473 100644 --- a/drivers/hwmon/pmbus/Kconfig +++ b/drivers/hwmon/pmbus/Kconfig @@ -130,7 +130,7 @@ config SENSORS_MAX34440 default n help If you say yes here you get hardware monitoring support for Maxim - MAX34440, MAX34441, MAX34446, MAX34460, and MAX34461. + MAX34440, MAX34441, MAX34446, MAX34451, MAX34460, and MAX34461. This driver can also be built as a module. If so, the module will be called max34440. diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c index 74a1f6f68fb3..47576c460010 100644 --- a/drivers/hwmon/pmbus/max34440.c +++ b/drivers/hwmon/pmbus/max34440.c @@ -27,7 +27,7 @@ #include <linux/i2c.h> #include "pmbus.h" -enum chips { max34440, max34441, max34446, max34460, max34461 }; +enum chips { max34440, max34441, max34446, max34451, max34460, max34461 }; #define MAX34440_MFR_VOUT_PEAK 0xd4 #define MAX34440_MFR_IOUT_PEAK 0xd5 @@ -44,6 +44,9 @@ enum chips { max34440, max34441, max34446, max34460, max34461 }; #define MAX34440_STATUS_OT_FAULT BIT(5) #define MAX34440_STATUS_OT_WARN BIT(6) +#define MAX34451_MFR_CHANNEL_CONFIG 0xe4 +#define MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK 0x3f + struct max34440_data { int id; struct pmbus_driver_info info; @@ -67,7 +70,7 @@ static int max34440_read_word_data(struct i2c_client *client, int page, int reg) MAX34440_MFR_VOUT_PEAK); break; case PMBUS_VIRT_READ_IOUT_AVG: - if (data->id != max34446) + if (data->id != max34446 && data->id != max34451) return -ENXIO; ret = pmbus_read_word_data(client, page, MAX34446_MFR_IOUT_AVG); @@ -143,7 +146,7 @@ static int max34440_write_word_data(struct i2c_client *client, int page, case PMBUS_VIRT_RESET_IOUT_HISTORY: ret = pmbus_write_word_data(client, page, MAX34440_MFR_IOUT_PEAK, 0); - if (!ret && data->id == max34446) + if (!ret && (data->id == max34446 || data->id == max34451)) ret = pmbus_write_word_data(client, page, MAX34446_MFR_IOUT_AVG, 0); @@ -202,6 +205,58 @@ static int max34440_read_byte_data(struct i2c_client *client, int page, int reg) return ret; } +static int max34451_set_supported_funcs(struct i2c_client *client, + struct max34440_data *data) +{ + /* + * Each of the channel 0-15 can be configured to monitor the following + * functions based on MFR_CHANNEL_CONFIG[5:0] + * 0x10: Sequencing + voltage monitoring (only valid for PAGES 0–11) + * 0x20: Voltage monitoring (no sequencing) + * 0x21: Voltage read only + * 0x22: Current monitoring + * 0x23: Current read only + * 0x30: General-purpose input active low + * 0x34: General-purpose input active high + * 0x00: Disabled + */ + + int page, rv; + + for (page = 0; page < 16; page++) { + rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page); + if (rv < 0) + return rv; + + rv = i2c_smbus_read_word_data(client, + MAX34451_MFR_CHANNEL_CONFIG); + if (rv < 0) + return rv; + + switch (rv & MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK) { + case 0x10: + case 0x20: + data->info.func[page] = PMBUS_HAVE_VOUT | + PMBUS_HAVE_STATUS_VOUT; + break; + case 0x21: + data->info.func[page] = PMBUS_HAVE_VOUT; + break; + case 0x22: + data->info.func[page] = PMBUS_HAVE_IOUT | + PMBUS_HAVE_STATUS_IOUT; + break; + case 0x23: + data->info.func[page] = PMBUS_HAVE_IOUT; + break; + default: + break; + } + } + + return 0; +} + static struct pmbus_driver_info max34440_info[] = { [max34440] = { .pages = 14, @@ -325,6 +380,30 @@ static struct pmbus_driver_info max34440_info[] = { .read_word_data = max34440_read_word_data, .write_word_data = max34440_write_word_data, }, + [max34451] = { + .pages = 21, + .format[PSC_VOLTAGE_OUT] = direct, + .format[PSC_TEMPERATURE] = direct, + .format[PSC_CURRENT_OUT] = direct, + .m[PSC_VOLTAGE_OUT] = 1, + .b[PSC_VOLTAGE_OUT] = 0, + .R[PSC_VOLTAGE_OUT] = 3, + .m[PSC_CURRENT_OUT] = 1, + .b[PSC_CURRENT_OUT] = 0, + .R[PSC_CURRENT_OUT] = 2, + .m[PSC_TEMPERATURE] = 1, + .b[PSC_TEMPERATURE] = 0, + .R[PSC_TEMPERATURE] = 2, + /* func 0-15 is set dynamically before probing */ + .func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .read_byte_data = max34440_read_byte_data, + .read_word_data = max34440_read_word_data, + .write_word_data = max34440_write_word_data, + }, [max34460] = { .pages = 18, .format[PSC_VOLTAGE_OUT] = direct, @@ -398,6 +477,7 @@ static int max34440_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct max34440_data *data; + int rv; data = devm_kzalloc(&client->dev, sizeof(struct max34440_data), GFP_KERNEL); @@ -406,6 +486,12 @@ static int max34440_probe(struct i2c_client *client, data->id = id->driver_data; data->info = max34440_info[id->driver_data]; + if (data->id == max34451) { + rv = max34451_set_supported_funcs(client, data); + if (rv) + return rv; + } + return pmbus_do_probe(client, id, &data->info); } @@ -413,6 +499,7 @@ static const struct i2c_device_id max34440_id[] = { {"max34440", max34440}, {"max34441", max34441}, {"max34446", max34446}, + {"max34451", max34451}, {"max34460", max34460}, {"max34461", max34461}, {} diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c index 1f41a4f89c08..8a873975cf12 100644 --- a/drivers/i2c/busses/i2c-xlp9xx.c +++ b/drivers/i2c/busses/i2c-xlp9xx.c @@ -191,28 +191,43 @@ static void xlp9xx_i2c_drain_rx_fifo(struct xlp9xx_i2c_dev *priv) if (priv->len_recv) { /* read length byte */ rlen = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO); + + /* + * We expect at least 2 interrupts for I2C_M_RECV_LEN + * transactions. The length is updated during the first + * interrupt, and the buffer contents are only copied + * during subsequent interrupts. If in case the interrupts + * get merged we would complete the transaction without + * copying out the bytes from RX fifo. To avoid this now we + * drain the fifo as and when data is available. + * We drained the rlen byte already, decrement total length + * by one. + */ + + len--; if (rlen > I2C_SMBUS_BLOCK_MAX || rlen == 0) { rlen = 0; /*abort transfer */ priv->msg_buf_remaining = 0; priv->msg_len = 0; - } else { - *buf++ = rlen; - if (priv->client_pec) - ++rlen; /* account for error check byte */ - /* update remaining bytes and message length */ - priv->msg_buf_remaining = rlen; - priv->msg_len = rlen + 1; + xlp9xx_i2c_update_rlen(priv); + return; } + + *buf++ = rlen; + if (priv->client_pec) + ++rlen; /* account for error check byte */ + /* update remaining bytes and message length */ + priv->msg_buf_remaining = rlen; + priv->msg_len = rlen + 1; xlp9xx_i2c_update_rlen(priv); priv->len_recv = false; - } else { - len = min(priv->msg_buf_remaining, len); - for (i = 0; i < len; i++, buf++) - *buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO); - - priv->msg_buf_remaining -= len; } + len = min(priv->msg_buf_remaining, len); + for (i = 0; i < len; i++, buf++) + *buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO); + + priv->msg_buf_remaining -= len; priv->msg_buf = buf; if (priv->msg_buf_remaining) diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 301285c54603..15c95aaa484c 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -624,7 +624,7 @@ static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr) static void i2c_adapter_lock_bus(struct i2c_adapter *adapter, unsigned int flags) { - rt_mutex_lock(&adapter->bus_lock); + rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter)); } /** diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c index 300ab4b672e4..29646aa6132e 100644 --- a/drivers/i2c/i2c-mux.c +++ b/drivers/i2c/i2c-mux.c @@ -144,7 +144,7 @@ static void i2c_mux_lock_bus(struct i2c_adapter *adapter, unsigned int flags) struct i2c_mux_priv *priv = adapter->algo_data; struct i2c_adapter *parent = priv->muxc->parent; - rt_mutex_lock(&parent->mux_lock); + rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter)); if (!(flags & I2C_LOCK_ROOT_ADAPTER)) return; i2c_lock_bus(parent, flags); @@ -181,7 +181,7 @@ static void i2c_parent_lock_bus(struct i2c_adapter *adapter, struct i2c_mux_priv *priv = adapter->algo_data; struct i2c_adapter *parent = priv->muxc->parent; - rt_mutex_lock(&parent->mux_lock); + rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter)); i2c_lock_bus(parent, flags); } diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index a6e904973ba8..475910ffbcb6 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -121,7 +121,7 @@ static int uverbs_try_lock_object(struct ib_uobject *uobj, bool exclusive) * this lock. */ if (!exclusive) - return __atomic_add_unless(&uobj->usecnt, 1, -1) == -1 ? + return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ? -EBUSY : 0; /* lock is either WRITE or DESTROY - should be exclusive */ diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index cc06e8404e9b..583d3a10b940 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1984,15 +1984,64 @@ static int modify_qp(struct ib_uverbs_file *file, goto release_qp; } - if ((cmd->base.attr_mask & IB_QP_AV) && - !rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) { - ret = -EINVAL; - goto release_qp; + if ((cmd->base.attr_mask & IB_QP_AV)) { + if (!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) { + ret = -EINVAL; + goto release_qp; + } + + if (cmd->base.attr_mask & IB_QP_STATE && + cmd->base.qp_state == IB_QPS_RTR) { + /* We are in INIT->RTR TRANSITION (if we are not, + * this transition will be rejected in subsequent checks). + * In the INIT->RTR transition, we cannot have IB_QP_PORT set, + * but the IB_QP_STATE flag is required. + * + * Since kernel 3.14 (commit dbf727de7440), the uverbs driver, + * when IB_QP_AV is set, has required inclusion of a valid + * port number in the primary AV. (AVs are created and handled + * differently for infiniband and ethernet (RoCE) ports). + * + * Check the port number included in the primary AV against + * the port number in the qp struct, which was set (and saved) + * in the RST->INIT transition. + */ + if (cmd->base.dest.port_num != qp->real_qp->port) { + ret = -EINVAL; + goto release_qp; + } + } else { + /* We are in SQD->SQD. (If we are not, this transition will + * be rejected later in the verbs layer checks). + * Check for both IB_QP_PORT and IB_QP_AV, these can be set + * together in the SQD->SQD transition. + * + * If only IP_QP_AV was set, add in IB_QP_PORT as well (the + * verbs layer driver does not track primary port changes + * resulting from path migration. Thus, in SQD, if the primary + * AV is modified, the primary port should also be modified). + * + * Note that in this transition, the IB_QP_STATE flag + * is not allowed. + */ + if (((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT)) + == (IB_QP_AV | IB_QP_PORT)) && + cmd->base.port_num != cmd->base.dest.port_num) { + ret = -EINVAL; + goto release_qp; + } + if ((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT)) + == IB_QP_AV) { + cmd->base.attr_mask |= IB_QP_PORT; + cmd->base.port_num = cmd->base.dest.port_num; + } + } } if ((cmd->base.attr_mask & IB_QP_ALT_PATH) && (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) || - !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) { + !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num) || + cmd->base.alt_port_num != cmd->base.alt_dest.port_num)) { ret = -EINVAL; goto release_qp; } diff --git a/drivers/input/keyboard/hilkbd.c b/drivers/input/keyboard/hilkbd.c index a4e404aaf64b..5c7afdec192c 100644 --- a/drivers/input/keyboard/hilkbd.c +++ b/drivers/input/keyboard/hilkbd.c @@ -57,8 +57,8 @@ MODULE_LICENSE("GPL v2"); #define HIL_DATA 0x1 #define HIL_CMD 0x3 #define HIL_IRQ 2 - #define hil_readb(p) readb(p) - #define hil_writeb(v,p) writeb((v),(p)) + #define hil_readb(p) readb((const volatile void __iomem *)(p)) + #define hil_writeb(v, p) writeb((v), (volatile void __iomem *)(p)) #else #error "HIL is not supported on this platform" diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index e9233db16e03..d564d21245c5 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -8,7 +8,7 @@ config ARM_GIC bool select IRQ_DOMAIN select IRQ_DOMAIN_HIERARCHY - select MULTI_IRQ_HANDLER + select GENERIC_IRQ_MULTI_HANDLER select GENERIC_IRQ_EFFECTIVE_AFF_MASK config ARM_GIC_PM @@ -34,7 +34,7 @@ config GIC_NON_BANKED config ARM_GIC_V3 bool select IRQ_DOMAIN - select MULTI_IRQ_HANDLER + select GENERIC_IRQ_MULTI_HANDLER select IRQ_DOMAIN_HIERARCHY select PARTITION_PERCPU select GENERIC_IRQ_EFFECTIVE_AFF_MASK @@ -66,7 +66,7 @@ config ARM_NVIC config ARM_VIC bool select IRQ_DOMAIN - select MULTI_IRQ_HANDLER + select GENERIC_IRQ_MULTI_HANDLER config ARM_VIC_NR int @@ -93,14 +93,14 @@ config ATMEL_AIC_IRQ bool select GENERIC_IRQ_CHIP select IRQ_DOMAIN - select MULTI_IRQ_HANDLER + select GENERIC_IRQ_MULTI_HANDLER select SPARSE_IRQ config ATMEL_AIC5_IRQ bool select GENERIC_IRQ_CHIP select IRQ_DOMAIN - select MULTI_IRQ_HANDLER + select GENERIC_IRQ_MULTI_HANDLER select SPARSE_IRQ config I8259 @@ -137,7 +137,7 @@ config DW_APB_ICTL config FARADAY_FTINTC010 bool select IRQ_DOMAIN - select MULTI_IRQ_HANDLER + select GENERIC_IRQ_MULTI_HANDLER select SPARSE_IRQ config HISILICON_IRQ_MBIGEN @@ -162,7 +162,7 @@ config CLPS711X_IRQCHIP bool depends on ARCH_CLPS711X select IRQ_DOMAIN - select MULTI_IRQ_HANDLER + select GENERIC_IRQ_MULTI_HANDLER select SPARSE_IRQ default y @@ -181,7 +181,7 @@ config OMAP_IRQCHIP config ORION_IRQCHIP bool select IRQ_DOMAIN - select MULTI_IRQ_HANDLER + select GENERIC_IRQ_MULTI_HANDLER config PIC32_EVIC bool diff --git a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c index 4eca5c763766..606efa64adff 100644 --- a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c @@ -45,6 +45,9 @@ static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain, */ info->scratchpad[0].ul = mc_bus_dev->icid; msi_info = msi_get_domain_info(msi_domain->parent); + + /* Allocate at least 32 MSIs, and always as a power of 2 */ + nvec = max_t(int, 32, roundup_pow_of_two(nvec)); return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info); } diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c index 25a98de5cfb2..8d6d009d1d58 100644 --- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c @@ -66,7 +66,7 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev, { struct pci_dev *pdev, *alias_dev; struct msi_domain_info *msi_info; - int alias_count = 0; + int alias_count = 0, minnvec = 1; if (!dev_is_pci(dev)) return -EINVAL; @@ -86,8 +86,18 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev, /* ITS specific DeviceID, as the core ITS ignores dev. */ info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev); - return msi_info->ops->msi_prepare(domain->parent, - dev, max(nvec, alias_count), info); + /* + * Always allocate a power of 2, and special case device 0 for + * broken systems where the DevID is not wired (and all devices + * appear as DevID 0). For that reason, we generously allocate a + * minimum of 32 MSIs for DevID 0. If you want more because all + * your devices are aliasing to DevID 0, consider fixing your HW. + */ + nvec = max(nvec, alias_count); + if (!info->scratchpad[0].ul) + minnvec = 32; + nvec = max_t(int, minnvec, roundup_pow_of_two(nvec)); + return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info); } static struct msi_domain_ops its_pci_msi_ops = { diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c index 8881a053c173..7b8e87b493fe 100644 --- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c @@ -73,6 +73,8 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev, /* ITS specific DeviceID, as the core ITS ignores dev. */ info->scratchpad[0].ul = dev_id; + /* Allocate at least 32 MSIs, and always as a power of 2 */ + nvec = max_t(int, 32, roundup_pow_of_two(nvec)); return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info); } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index d7842d312d3e..316a57530f6d 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -23,6 +23,8 @@ #include <linux/dma-iommu.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> +#include <linux/list.h> +#include <linux/list_sort.h> #include <linux/log2.h> #include <linux/mm.h> #include <linux/msi.h> @@ -160,7 +162,7 @@ static struct { } vpe_proxy; static LIST_HEAD(its_nodes); -static DEFINE_SPINLOCK(its_lock); +static DEFINE_RAW_SPINLOCK(its_lock); static struct rdists *gic_rdists; static struct irq_domain *its_parent; @@ -1421,112 +1423,176 @@ static struct irq_chip its_irq_chip = { .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, }; + /* * How we allocate LPIs: * - * The GIC has id_bits bits for interrupt identifiers. From there, we - * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as - * we allocate LPIs by chunks of 32, we can shift the whole thing by 5 - * bits to the right. + * lpi_range_list contains ranges of LPIs that are to available to + * allocate from. To allocate LPIs, just pick the first range that + * fits the required allocation, and reduce it by the required + * amount. Once empty, remove the range from the list. + * + * To free a range of LPIs, add a free range to the list, sort it and + * merge the result if the new range happens to be adjacent to an + * already free block. * - * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. + * The consequence of the above is that allocation is cost is low, but + * freeing is expensive. We assumes that freeing rarely occurs. */ -#define IRQS_PER_CHUNK_SHIFT 5 -#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT) -#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ -static unsigned long *lpi_bitmap; -static u32 lpi_chunks; -static DEFINE_SPINLOCK(lpi_lock); +static DEFINE_MUTEX(lpi_range_lock); +static LIST_HEAD(lpi_range_list); + +struct lpi_range { + struct list_head entry; + u32 base_id; + u32 span; +}; -static int its_lpi_to_chunk(int lpi) +static struct lpi_range *mk_lpi_range(u32 base, u32 span) { - return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT; + struct lpi_range *range; + + range = kzalloc(sizeof(*range), GFP_KERNEL); + if (range) { + INIT_LIST_HEAD(&range->entry); + range->base_id = base; + range->span = span; + } + + return range; } -static int its_chunk_to_lpi(int chunk) +static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b) { - return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192; + struct lpi_range *ra, *rb; + + ra = container_of(a, struct lpi_range, entry); + rb = container_of(b, struct lpi_range, entry); + + return rb->base_id - ra->base_id; } -static int __init its_lpi_init(u32 id_bits) +static void merge_lpi_ranges(void) { - lpi_chunks = its_lpi_to_chunk(1UL << id_bits); + struct lpi_range *range, *tmp; - lpi_bitmap = kcalloc(BITS_TO_LONGS(lpi_chunks), sizeof(long), - GFP_KERNEL); - if (!lpi_bitmap) { - lpi_chunks = 0; - return -ENOMEM; + list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { + if (!list_is_last(&range->entry, &lpi_range_list) && + (tmp->base_id == (range->base_id + range->span))) { + tmp->base_id = range->base_id; + tmp->span += range->span; + list_del(&range->entry); + kfree(range); + } } +} - pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks); - return 0; +static int alloc_lpi_range(u32 nr_lpis, u32 *base) +{ + struct lpi_range *range, *tmp; + int err = -ENOSPC; + + mutex_lock(&lpi_range_lock); + + list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { + if (range->span >= nr_lpis) { + *base = range->base_id; + range->base_id += nr_lpis; + range->span -= nr_lpis; + + if (range->span == 0) { + list_del(&range->entry); + kfree(range); + } + + err = 0; + break; + } + } + + mutex_unlock(&lpi_range_lock); + + pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis); + return err; } -static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids) +static int free_lpi_range(u32 base, u32 nr_lpis) { - unsigned long *bitmap = NULL; - int chunk_id; - int nr_chunks; - int i; + struct lpi_range *new; + int err = 0; + + mutex_lock(&lpi_range_lock); + + new = mk_lpi_range(base, nr_lpis); + if (!new) { + err = -ENOMEM; + goto out; + } + + list_add(&new->entry, &lpi_range_list); + list_sort(NULL, &lpi_range_list, lpi_range_cmp); + merge_lpi_ranges(); +out: + mutex_unlock(&lpi_range_lock); + return err; +} + +static int __init its_lpi_init(u32 id_bits) +{ + u32 lpis = (1UL << id_bits) - 8192; + u32 numlpis; + int err; + + numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer); + + if (numlpis > 2 && !WARN_ON(numlpis > lpis)) { + lpis = numlpis; + pr_info("ITS: Using hypervisor restricted LPI range [%u]\n", + lpis); + } - nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK); + /* + * Initializing the allocator is just the same as freeing the + * full range of LPIs. + */ + err = free_lpi_range(8192, lpis); + pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis); + return err; +} - spin_lock(&lpi_lock); +static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids) +{ + unsigned long *bitmap = NULL; + int err = 0; do { - chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks, - 0, nr_chunks, 0); - if (chunk_id < lpi_chunks) + err = alloc_lpi_range(nr_irqs, base); + if (!err) break; - nr_chunks--; - } while (nr_chunks > 0); + nr_irqs /= 2; + } while (nr_irqs > 0); - if (!nr_chunks) + if (err) goto out; - bitmap = kcalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK), - sizeof(long), - GFP_ATOMIC); + bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC); if (!bitmap) goto out; - for (i = 0; i < nr_chunks; i++) - set_bit(chunk_id + i, lpi_bitmap); - - *base = its_chunk_to_lpi(chunk_id); - *nr_ids = nr_chunks * IRQS_PER_CHUNK; + *nr_ids = nr_irqs; out: - spin_unlock(&lpi_lock); - if (!bitmap) *base = *nr_ids = 0; return bitmap; } -static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids) +static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids) { - int lpi; - - spin_lock(&lpi_lock); - - for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) { - int chunk = its_lpi_to_chunk(lpi); - - BUG_ON(chunk > lpi_chunks); - if (test_bit(chunk, lpi_bitmap)) { - clear_bit(chunk, lpi_bitmap); - } else { - pr_err("Bad LPI chunk %d\n", chunk); - } - } - - spin_unlock(&lpi_lock); - + WARN_ON(free_lpi_range(base, nr_ids)); kfree(bitmap); } @@ -1559,7 +1625,7 @@ static int __init its_alloc_lpi_tables(void) { phys_addr_t paddr; - lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS); + lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer); gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); if (!gic_rdists->prop_page) { pr_err("Failed to allocate PROPBASE\n"); @@ -1997,12 +2063,12 @@ static void its_cpu_init_collections(void) { struct its_node *its; - spin_lock(&its_lock); + raw_spin_lock(&its_lock); list_for_each_entry(its, &its_nodes, entry) its_cpu_init_collection(its); - spin_unlock(&its_lock); + raw_spin_unlock(&its_lock); } static struct its_device *its_find_device(struct its_node *its, u32 dev_id) @@ -2134,17 +2200,20 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, if (!its_alloc_device_table(its, dev_id)) return NULL; + if (WARN_ON(!is_power_of_2(nvecs))) + nvecs = roundup_pow_of_two(nvecs); + dev = kzalloc(sizeof(*dev), GFP_KERNEL); /* - * We allocate at least one chunk worth of LPIs bet device, - * and thus that many ITEs. The device may require less though. + * Even if the device wants a single LPI, the ITT must be + * sized as a power of two (and you need at least one bit...). */ - nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs)); + nr_ites = max(2, nvecs); sz = nr_ites * its->ite_size; sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; itt = kzalloc(sz, GFP_KERNEL); if (alloc_lpis) { - lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); + lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); if (lpi_map) col_map = kcalloc(nr_lpis, sizeof(*col_map), GFP_KERNEL); @@ -2379,9 +2448,9 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, /* If all interrupts have been freed, start mopping the floor */ if (bitmap_empty(its_dev->event_map.lpi_map, its_dev->event_map.nr_lpis)) { - its_lpi_free_chunks(its_dev->event_map.lpi_map, - its_dev->event_map.lpi_base, - its_dev->event_map.nr_lpis); + its_lpi_free(its_dev->event_map.lpi_map, + its_dev->event_map.lpi_base, + its_dev->event_map.nr_lpis); kfree(its_dev->event_map.col_map); /* Unmap device/itt */ @@ -2780,7 +2849,7 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain, } if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { - its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); + its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); its_free_prop_table(vm->vprop_page); } } @@ -2795,18 +2864,18 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq BUG_ON(!vm); - bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids); + bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); if (!bitmap) return -ENOMEM; if (nr_ids < nr_irqs) { - its_lpi_free_chunks(bitmap, base, nr_ids); + its_lpi_free(bitmap, base, nr_ids); return -ENOMEM; } vprop_page = its_allocate_prop_table(GFP_KERNEL); if (!vprop_page) { - its_lpi_free_chunks(bitmap, base, nr_ids); + its_lpi_free(bitmap, base, nr_ids); return -ENOMEM; } @@ -2833,7 +2902,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq if (i > 0) its_vpe_irq_domain_free(domain, virq, i - 1); - its_lpi_free_chunks(bitmap, base, nr_ids); + its_lpi_free(bitmap, base, nr_ids); its_free_prop_table(vprop_page); } @@ -3070,7 +3139,7 @@ static int its_save_disable(void) struct its_node *its; int err = 0; - spin_lock(&its_lock); + raw_spin_lock(&its_lock); list_for_each_entry(its, &its_nodes, entry) { void __iomem *base; @@ -3102,7 +3171,7 @@ err: writel_relaxed(its->ctlr_save, base + GITS_CTLR); } } - spin_unlock(&its_lock); + raw_spin_unlock(&its_lock); return err; } @@ -3112,7 +3181,7 @@ static void its_restore_enable(void) struct its_node *its; int ret; - spin_lock(&its_lock); + raw_spin_lock(&its_lock); list_for_each_entry(its, &its_nodes, entry) { void __iomem *base; int i; @@ -3164,7 +3233,7 @@ static void its_restore_enable(void) GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER))) its_cpu_init_collection(its); } - spin_unlock(&its_lock); + raw_spin_unlock(&its_lock); } static struct syscore_ops its_syscore_ops = { @@ -3398,9 +3467,9 @@ static int __init its_probe_one(struct resource *res, if (err) goto out_free_tables; - spin_lock(&its_lock); + raw_spin_lock(&its_lock); list_add(&its->entry, &its_nodes); - spin_unlock(&its_lock); + raw_spin_unlock(&its_lock); return 0; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 76ea56d779a1..e214181b77b7 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -877,7 +877,7 @@ static struct irq_chip gic_eoimode1_chip = { .flags = IRQCHIP_SET_TYPE_MASKED, }; -#define GIC_ID_NR (1U << gic_data.rdists.id_bits) +#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) @@ -1091,7 +1091,7 @@ static int __init gic_init_bases(void __iomem *dist_base, * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) */ typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); - gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer); + gic_data.rdists.gicd_typer = typer; gic_irqs = GICD_TYPER_IRQS(typer); if (gic_irqs > 1020) gic_irqs = 1020; diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c index fc5953dea509..2ff08986b536 100644 --- a/drivers/irqchip/irq-ingenic.c +++ b/drivers/irqchip/irq-ingenic.c @@ -165,6 +165,7 @@ static int __init intc_1chip_of_init(struct device_node *node, return ingenic_intc_of_init(node, 1); } IRQCHIP_DECLARE(jz4740_intc, "ingenic,jz4740-intc", intc_1chip_of_init); +IRQCHIP_DECLARE(jz4725b_intc, "ingenic,jz4725b-intc", intc_1chip_of_init); static int __init intc_2chip_of_init(struct device_node *node, struct device_node *parent) diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index 3a7e8905a97e..3df527fcf4e1 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -159,6 +159,7 @@ static const struct stm32_exti_bank *stm32mp1_exti_banks[] = { }; static const struct stm32_desc_irq stm32mp1_desc_irq[] = { + { .exti = 0, .irq_parent = 6 }, { .exti = 1, .irq_parent = 7 }, { .exti = 2, .irq_parent = 8 }, { .exti = 3, .irq_parent = 9 }, diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c index edb35a5c57ea..a99fc0ced7a7 100644 --- a/drivers/media/platform/vsp1/vsp1_drm.c +++ b/drivers/media/platform/vsp1/vsp1_drm.c @@ -728,9 +728,6 @@ EXPORT_SYMBOL_GPL(vsp1_du_setup_lif); */ void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index) { - struct vsp1_device *vsp1 = dev_get_drvdata(dev); - - mutex_lock(&vsp1->drm->lock); } EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin); @@ -846,6 +843,7 @@ void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index, drm_pipe->crc = cfg->crc; + mutex_lock(&vsp1->drm->lock); vsp1_du_pipeline_setup_inputs(vsp1, pipe); vsp1_du_pipeline_configure(pipe); mutex_unlock(&vsp1->drm->lock); diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c index fcfab6635f9c..81b150e5dfdb 100644 --- a/drivers/media/rc/bpf-lirc.c +++ b/drivers/media/rc/bpf-lirc.c @@ -174,6 +174,7 @@ static int lirc_bpf_detach(struct rc_dev *rcdev, struct bpf_prog *prog) rcu_assign_pointer(raw->progs, new_array); bpf_prog_array_free(old_array); + bpf_prog_put(prog); unlock: mutex_unlock(&ir_raw_handler_lock); return ret; diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c index 2e0066b1a31c..e7948908e78c 100644 --- a/drivers/media/rc/rc-ir-raw.c +++ b/drivers/media/rc/rc-ir-raw.c @@ -30,13 +30,13 @@ static int ir_raw_event_thread(void *data) while (kfifo_out(&raw->kfifo, &ev, 1)) { if (is_timing_event(ev)) { if (ev.duration == 0) - dev_err(&dev->dev, "nonsensical timing event of duration 0"); + dev_warn_once(&dev->dev, "nonsensical timing event of duration 0"); if (is_timing_event(raw->prev_ev) && !is_transition(&ev, &raw->prev_ev)) - dev_err(&dev->dev, "two consecutive events of type %s", - TO_STR(ev.pulse)); + dev_warn_once(&dev->dev, "two consecutive events of type %s", + TO_STR(ev.pulse)); if (raw->prev_ev.reset && ev.pulse == 0) - dev_err(&dev->dev, "timing event after reset should be pulse"); + dev_warn_once(&dev->dev, "timing event after reset should be pulse"); } list_for_each_entry(handler, &ir_raw_handler_list, list) if (dev->enabled_protocols & diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 2e222d9ee01f..ca68e1d2b2f9 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -679,6 +679,14 @@ static void ir_timer_repeat(struct timer_list *t) spin_unlock_irqrestore(&dev->keylock, flags); } +static unsigned int repeat_period(int protocol) +{ + if (protocol >= ARRAY_SIZE(protocols)) + return 100; + + return protocols[protocol].repeat_period; +} + /** * rc_repeat() - signals that a key is still pressed * @dev: the struct rc_dev descriptor of the device @@ -691,7 +699,7 @@ void rc_repeat(struct rc_dev *dev) { unsigned long flags; unsigned int timeout = nsecs_to_jiffies(dev->timeout) + - msecs_to_jiffies(protocols[dev->last_protocol].repeat_period); + msecs_to_jiffies(repeat_period(dev->last_protocol)); struct lirc_scancode sc = { .scancode = dev->last_scancode, .rc_proto = dev->last_protocol, .keycode = dev->keypressed ? dev->last_keycode : KEY_RESERVED, @@ -803,7 +811,7 @@ void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u32 scancode, if (dev->keypressed) { dev->keyup_jiffies = jiffies + nsecs_to_jiffies(dev->timeout) + - msecs_to_jiffies(protocols[protocol].repeat_period); + msecs_to_jiffies(repeat_period(protocol)); mod_timer(&dev->timer_keyup, dev->keyup_jiffies); } spin_unlock_irqrestore(&dev->keylock, flags); diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index 6b16946f9b05..8fd5ec4d6042 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c @@ -67,10 +67,8 @@ static struct file *cxl_getfile(const char *name, const struct file_operations *fops, void *priv, int flags) { - struct qstr this; - struct path path; struct file *file; - struct inode *inode = NULL; + struct inode *inode; int rc; /* strongly inspired by anon_inode_getfile() */ @@ -91,23 +89,11 @@ static struct file *cxl_getfile(const char *name, goto err_fs; } - file = ERR_PTR(-ENOMEM); - this.name = name; - this.len = strlen(name); - this.hash = 0; - path.dentry = d_alloc_pseudo(cxl_vfs_mount->mnt_sb, &this); - if (!path.dentry) + file = alloc_file_pseudo(inode, cxl_vfs_mount, name, + flags & (O_ACCMODE | O_NONBLOCK), fops); + if (IS_ERR(file)) goto err_inode; - path.mnt = mntget(cxl_vfs_mount); - d_instantiate(path.dentry, inode); - - file = alloc_file(&path, OPEN_FMODE(flags), fops); - if (IS_ERR(file)) { - path_put(&path); - goto err_fs; - } - file->f_flags = flags & (O_ACCMODE | O_NONBLOCK); file->private_data = priv; return file; diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 75f781c11e89..de4e6e5bf304 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -293,9 +293,10 @@ static void mxcmci_swap_buffers(struct mmc_data *data) int i; for_each_sg(data->sg, sg, data->sg_len, i) { - void *buf = kmap_atomic(sg_page(sg) + sg->offset; + void *buf = kmap_atomic(sg_page(sg) + sg->offset); buffer_swap32(buf, sg->length); kunmap_atomic(buf); + } } #else static inline void mxcmci_swap_buffers(struct mmc_data *data) {} diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 63e3844c5bec..217b790d22ed 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1717,6 +1717,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, goto err_upper_unlink; } + bond->nest_level = dev_get_nest_level(bond_dev) + 1; + /* If the mode uses primary, then the following is handled by * bond_change_active_slave(). */ @@ -1764,7 +1766,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, if (bond_mode_can_use_xmit_hash(bond)) bond_update_slave_arr(bond, NULL); - bond->nest_level = dev_get_nest_level(bond_dev); netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n", slave_dev->name, @@ -3415,6 +3416,13 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res, } } +static int bond_get_nest_level(struct net_device *bond_dev) +{ + struct bonding *bond = netdev_priv(bond_dev); + + return bond->nest_level; +} + static void bond_get_stats(struct net_device *bond_dev, struct rtnl_link_stats64 *stats) { @@ -3423,7 +3431,7 @@ static void bond_get_stats(struct net_device *bond_dev, struct list_head *iter; struct slave *slave; - spin_lock(&bond->stats_lock); + spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev)); memcpy(stats, &bond->bond_stats, sizeof(*stats)); rcu_read_lock(); @@ -4227,6 +4235,7 @@ static const struct net_device_ops bond_netdev_ops = { .ndo_neigh_setup = bond_neigh_setup, .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, + .ndo_get_lock_subclass = bond_get_nest_level, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_netpoll_setup = bond_netpoll_setup, .ndo_netpoll_cleanup = bond_netpoll_cleanup, @@ -4725,6 +4734,7 @@ static int bond_init(struct net_device *bond_dev) if (!bond->wq) return -ENOMEM; + bond->nest_level = SINGLE_DEPTH_NESTING; netdev_lockdep_set_classes(bond_dev); list_add_tail(&bond->bond_list, &bn->dev_list); diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 12ff0020ecd6..b7dfd4109d24 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -1072,6 +1072,7 @@ static void ems_usb_disconnect(struct usb_interface *intf) usb_free_urb(dev->intr_urb); kfree(dev->intr_in_buffer); + kfree(dev->tx_msg_buffer); } } diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 9ef07a06aceb..bb28c701381a 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2617,7 +2617,6 @@ static const struct mv88e6xxx_ops mv88e6085_ops = { .rmu_disable = mv88e6085_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, - .serdes_power = mv88e6341_serdes_power, }; static const struct mv88e6xxx_ops mv88e6095_ops = { @@ -2783,6 +2782,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .serdes_power = mv88e6341_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, }; @@ -2960,7 +2960,6 @@ static const struct mv88e6xxx_ops mv88e6175_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, - .serdes_power = mv88e6341_serdes_power, }; static const struct mv88e6xxx_ops mv88e6176_ops = { @@ -3336,6 +3335,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .serdes_power = mv88e6341_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, }; diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c index b6d735bf8011..342ae08ec3c2 100644 --- a/drivers/net/ethernet/8390/mac8390.c +++ b/drivers/net/ethernet/8390/mac8390.c @@ -153,9 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count, static void dayna_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page); -#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c)) -#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c)) - #define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c)) /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */ @@ -239,7 +236,7 @@ static enum mac8390_access mac8390_testio(unsigned long membase) unsigned long outdata = 0xA5A0B5B0; unsigned long indata = 0x00000000; /* Try writing 32 bits */ - memcpy_toio(membase, &outdata, 4); + memcpy_toio((void __iomem *)membase, &outdata, 4); /* Now compare them */ if (memcmp_withio(&outdata, membase, 4) == 0) return ACCESS_32; @@ -711,7 +708,7 @@ static void sane_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { unsigned long hdr_start = (ring_page - WD_START_PG)<<8; - memcpy_fromio(hdr, dev->mem_start + hdr_start, 4); + memcpy_fromio(hdr, (void __iomem *)dev->mem_start + hdr_start, 4); /* Fix endianness */ hdr->count = swab16(hdr->count); } @@ -725,13 +722,16 @@ static void sane_block_input(struct net_device *dev, int count, if (xfer_start + count > ei_status.rmem_end) { /* We must wrap the input move. */ int semi_count = ei_status.rmem_end - xfer_start; - memcpy_fromio(skb->data, dev->mem_start + xfer_base, + memcpy_fromio(skb->data, + (void __iomem *)dev->mem_start + xfer_base, semi_count); count -= semi_count; - memcpy_fromio(skb->data + semi_count, ei_status.rmem_start, - count); + memcpy_fromio(skb->data + semi_count, + (void __iomem *)ei_status.rmem_start, count); } else { - memcpy_fromio(skb->data, dev->mem_start + xfer_base, count); + memcpy_fromio(skb->data, + (void __iomem *)dev->mem_start + xfer_base, + count); } } @@ -740,7 +740,7 @@ static void sane_block_output(struct net_device *dev, int count, { long shmem = (start_page - WD_START_PG)<<8; - memcpy_toio(dev->mem_start + shmem, buf, count); + memcpy_toio((void __iomem *)dev->mem_start + shmem, buf, count); } /* dayna block input/output */ diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 1b9d3130af4d..17f12c18d225 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -333,6 +333,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); + io_sq->dma_addr_bits = ena_dev->dma_addr_bits; io_sq->desc_entry_size = (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? sizeof(struct ena_eth_io_tx_desc) : diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 4b5d625de8f0..8a3a60bb2688 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -1111,14 +1111,14 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata) if (pdata->tx_pause != pdata->phy.tx_pause) { new_state = 1; - pdata->hw_if.config_tx_flow_control(pdata); pdata->tx_pause = pdata->phy.tx_pause; + pdata->hw_if.config_tx_flow_control(pdata); } if (pdata->rx_pause != pdata->phy.rx_pause) { new_state = 1; - pdata->hw_if.config_rx_flow_control(pdata); pdata->rx_pause = pdata->phy.rx_pause; + pdata->hw_if.config_rx_flow_control(pdata); } /* Speed support */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 956860a69797..3bdab972420b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -762,7 +762,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self, hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC)); hw_atl_rpfl2multicast_flr_en_set(self, - IS_FILTER_ENABLED(IFF_MULTICAST), 0); + IS_FILTER_ENABLED(IFF_ALLMULTI), 0); hw_atl_rpfl2_accept_all_mc_packets_set(self, IS_FILTER_ENABLED(IFF_ALLMULTI)); diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 5d08d2aeb172..e337da6ba2a4 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1083,6 +1083,8 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) lmac->dmacs_count = (RX_DMAC_COUNT / bgx->lmac_count); lmac->dmacs = kcalloc(lmac->dmacs_count, sizeof(*lmac->dmacs), GFP_KERNEL); + if (!lmac->dmacs) + return -ENOMEM; /* Enable lmac */ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 00fc5f1afb1d..7dddb9e748b8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -1038,10 +1038,8 @@ static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb, OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid)); req->local_port = cpu_to_be16(f->fs.val.lport); req->peer_port = cpu_to_be16(f->fs.val.fport); - req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 | - f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24; - req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 | - f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24; + memcpy(&req->local_ip, f->fs.val.lip, 4); + memcpy(&req->peer_ip, f->fs.val.fip, 4); req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE || f->fs.newvlan == VLAN_REWRITE) | DELACK_V(f->fs.hitcnts) | diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index bc03c175a3cd..a8926e97935e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3072,6 +3072,7 @@ static void cxgb_del_udp_tunnel(struct net_device *netdev, adapter->geneve_port = 0; t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0); + break; default: return; } @@ -3157,6 +3158,7 @@ static void cxgb_add_udp_tunnel(struct net_device *netdev, t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F); + break; default: return; } diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 90c645b8538e..60641e202534 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2047,28 +2047,42 @@ static int enic_stop(struct net_device *netdev) return 0; } +static int _enic_change_mtu(struct net_device *netdev, int new_mtu) +{ + bool running = netif_running(netdev); + int err = 0; + + ASSERT_RTNL(); + if (running) { + err = enic_stop(netdev); + if (err) + return err; + } + + netdev->mtu = new_mtu; + + if (running) { + err = enic_open(netdev); + if (err) + return err; + } + + return 0; +} + static int enic_change_mtu(struct net_device *netdev, int new_mtu) { struct enic *enic = netdev_priv(netdev); - int running = netif_running(netdev); if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) return -EOPNOTSUPP; - if (running) - enic_stop(netdev); - - netdev->mtu = new_mtu; - if (netdev->mtu > enic->port_mtu) netdev_warn(netdev, - "interface MTU (%d) set higher than port MTU (%d)\n", - netdev->mtu, enic->port_mtu); + "interface MTU (%d) set higher than port MTU (%d)\n", + netdev->mtu, enic->port_mtu); - if (running) - enic_open(netdev); - - return 0; + return _enic_change_mtu(netdev, new_mtu); } static void enic_change_mtu_work(struct work_struct *work) @@ -2076,47 +2090,9 @@ static void enic_change_mtu_work(struct work_struct *work) struct enic *enic = container_of(work, struct enic, change_mtu_work); struct net_device *netdev = enic->netdev; int new_mtu = vnic_dev_mtu(enic->vdev); - int err; - unsigned int i; - - new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu)); rtnl_lock(); - - /* Stop RQ */ - del_timer_sync(&enic->notify_timer); - - for (i = 0; i < enic->rq_count; i++) - napi_disable(&enic->napi[i]); - - vnic_intr_mask(&enic->intr[0]); - enic_synchronize_irqs(enic); - err = vnic_rq_disable(&enic->rq[0]); - if (err) { - rtnl_unlock(); - netdev_err(netdev, "Unable to disable RQ.\n"); - return; - } - vnic_rq_clean(&enic->rq[0], enic_free_rq_buf); - vnic_cq_clean(&enic->cq[0]); - vnic_intr_clean(&enic->intr[0]); - - /* Fill RQ with new_mtu-sized buffers */ - netdev->mtu = new_mtu; - vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); - /* Need at least one buffer on ring to get going */ - if (vnic_rq_desc_used(&enic->rq[0]) == 0) { - rtnl_unlock(); - netdev_err(netdev, "Unable to alloc receive buffers.\n"); - return; - } - - /* Start RQ */ - vnic_rq_enable(&enic->rq[0]); - napi_enable(&enic->napi[0]); - vnic_intr_unmask(&enic->intr[0]); - enic_notify_timer_start(enic); - + (void)_enic_change_mtu(netdev, new_mtu); rtnl_unlock(); netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu); @@ -2916,7 +2892,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ enic->port_mtu = enic->config.mtu; - (void)enic_change_mtu(netdev, enic->port_mtu); err = enic_set_mac_addr(netdev, enic->mac_addr); if (err) { @@ -3006,6 +2981,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* MTU range: 68 - 9000 */ netdev->min_mtu = ENIC_MIN_MTU; netdev->max_mtu = ENIC_MAX_MTU; + netdev->mtu = enic->port_mtu; err = register_netdev(netdev); if (err) { diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 5b122728dcb4..09e9da10b786 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -983,6 +983,7 @@ static int nic_dev_init(struct pci_dev *pdev) hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS, nic_dev, link_status_event_handler); + SET_NETDEV_DEV(netdev, &pdev->dev); err = register_netdev(netdev); if (err) { dev_err(&pdev->dev, "Failed to register netdev\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index eb9eb7aa953a..405236cf0b04 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -858,8 +858,6 @@ struct mlx5e_profile { mlx5e_fp_handle_rx_cqe handle_rx_cqe; mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; } rx_handlers; - void (*netdev_registered_init)(struct mlx5e_priv *priv); - void (*netdev_registered_remove)(struct mlx5e_priv *priv); int max_tc; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 86bc9ac99586..722998d68564 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -443,16 +443,12 @@ static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app) bool is_new; int err; - if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP) - return -EINVAL; - - if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) - return -EINVAL; - - if (!MLX5_DSCP_SUPPORTED(priv->mdev)) - return -EINVAL; + if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) || + !MLX5_DSCP_SUPPORTED(priv->mdev)) + return -EOPNOTSUPP; - if (app->protocol >= MLX5E_MAX_DSCP) + if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) || + (app->protocol >= MLX5E_MAX_DSCP)) return -EINVAL; /* Save the old entry info */ @@ -500,16 +496,12 @@ static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app) struct mlx5e_priv *priv = netdev_priv(dev); int err; - if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP) - return -EINVAL; - - if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) - return -EINVAL; - - if (!MLX5_DSCP_SUPPORTED(priv->mdev)) - return -EINVAL; + if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) || + !MLX5_DSCP_SUPPORTED(priv->mdev)) + return -EOPNOTSUPP; - if (app->protocol >= MLX5E_MAX_DSCP) + if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) || + (app->protocol >= MLX5E_MAX_DSCP)) return -EINVAL; /* Skip if no dscp app entry */ @@ -1146,7 +1138,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state) { int err; - err = mlx5_set_trust_state(priv->mdev, trust_state); + err = mlx5_set_trust_state(priv->mdev, trust_state); if (err) return err; priv->dcbx_dp.trust_state = trust_state; @@ -1172,6 +1164,8 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv) struct mlx5_core_dev *mdev = priv->mdev; int err; + priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP; + if (!MLX5_DSCP_SUPPORTED(mdev)) return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index dae4156a710d..c592678ab5f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3712,7 +3712,8 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, if (!reset) { params->sw_mtu = new_mtu; - set_mtu_cb(priv); + if (set_mtu_cb) + set_mtu_cb(priv); netdev->mtu = params->sw_mtu; goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 3a2c4e548226..dfbcda0d0e08 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1970,15 +1970,15 @@ static bool actions_match_supported(struct mlx5e_priv *priv, static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) { struct mlx5_core_dev *fmdev, *pmdev; - u16 func_id, peer_id; + u64 fsystem_guid, psystem_guid; fmdev = priv->mdev; pmdev = peer_priv->mdev; - func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn)); - peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn)); + mlx5_query_nic_vport_system_image_guid(fmdev, &fsystem_guid); + mlx5_query_nic_vport_system_image_guid(pmdev, &psystem_guid); - return (func_id == peer_id); + return (fsystem_guid == psystem_guid); } static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index dd01ad4c0b54..40dba9e8af92 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1696,7 +1696,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) int vport_num; int err; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return 0; esw_info(dev, @@ -1765,7 +1765,7 @@ abort: void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) { - if (!esw || !MLX5_VPORT_MANAGER(esw->dev)) + if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev)) return; esw_info(esw->dev, "cleanup\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index af3bb2f7a504..b7c21eb21a21 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -76,6 +76,7 @@ void mlx5i_init(struct mlx5_core_dev *mdev, void *ppriv) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); + u16 max_mtu; /* priv init */ priv->mdev = mdev; @@ -84,6 +85,9 @@ void mlx5i_init(struct mlx5_core_dev *mdev, priv->ppriv = ppriv; mutex_init(&priv->state_lock); + mlx5_query_port_max_mtu(mdev, &max_mtu, 1); + netdev->mtu = max_mtu; + mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev), netdev->mtu); mlx5i_build_nic_params(mdev, &priv->channels.params); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 3c0d882ba183..f6f6a568d66a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -327,12 +327,16 @@ static void mlxsw_afa_resource_add(struct mlxsw_afa_block *block, list_add(&resource->list, &block->resource_list); } +static void mlxsw_afa_resource_del(struct mlxsw_afa_resource *resource) +{ + list_del(&resource->list); +} + static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block) { struct mlxsw_afa_resource *resource, *tmp; list_for_each_entry_safe(resource, tmp, &block->resource_list, list) { - list_del(&resource->list); resource->destructor(block, resource); } } @@ -530,6 +534,7 @@ static void mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block, struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref) { + mlxsw_afa_resource_del(&fwd_entry_ref->resource); mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry); kfree(fwd_entry_ref); } @@ -579,6 +584,7 @@ static void mlxsw_afa_counter_destroy(struct mlxsw_afa_block *block, struct mlxsw_afa_counter *counter) { + mlxsw_afa_resource_del(&counter->resource); block->afa->ops->counter_index_put(block->afa->ops_priv, counter->counter_index); kfree(counter); @@ -626,8 +632,8 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block, char *oneact; char *actions; - if (WARN_ON(block->finished)) - return NULL; + if (block->finished) + return ERR_PTR(-EINVAL); if (block->cur_act_index + action_size > block->afa->max_acts_per_set) { struct mlxsw_afa_set *set; @@ -637,7 +643,7 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block, */ set = mlxsw_afa_set_create(false); if (!set) - return NULL; + return ERR_PTR(-ENOBUFS); set->prev = block->cur_set; block->cur_act_index = 0; block->cur_set->next = set; @@ -724,8 +730,8 @@ int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block, MLXSW_AFA_VLAN_CODE, MLXSW_AFA_VLAN_SIZE); - if (!act) - return -ENOBUFS; + if (IS_ERR(act)) + return PTR_ERR(act); mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP, MLXSW_AFA_VLAN_CMD_SET_OUTER, vid, MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp, @@ -806,8 +812,8 @@ int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block) MLXSW_AFA_TRAPDISC_CODE, MLXSW_AFA_TRAPDISC_SIZE); - if (!act) - return -ENOBUFS; + if (IS_ERR(act)) + return PTR_ERR(act); mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP, MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD, 0); return 0; @@ -820,8 +826,8 @@ int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id) MLXSW_AFA_TRAPDISC_CODE, MLXSW_AFA_TRAPDISC_SIZE); - if (!act) - return -ENOBUFS; + if (IS_ERR(act)) + return PTR_ERR(act); mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP, MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD, trap_id); @@ -836,8 +842,8 @@ int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block, MLXSW_AFA_TRAPDISC_CODE, MLXSW_AFA_TRAPDISC_SIZE); - if (!act) - return -ENOBUFS; + if (IS_ERR(act)) + return PTR_ERR(act); mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP, MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, trap_id); @@ -856,6 +862,7 @@ static void mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block, struct mlxsw_afa_mirror *mirror) { + mlxsw_afa_resource_del(&mirror->resource); block->afa->ops->mirror_del(block->afa->ops_priv, mirror->local_in_port, mirror->span_id, @@ -908,8 +915,8 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block, char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAPDISC_CODE, MLXSW_AFA_TRAPDISC_SIZE); - if (!act) - return -ENOBUFS; + if (IS_ERR(act)) + return PTR_ERR(act); mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP, MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, 0); mlxsw_afa_trapdisc_mirror_pack(act, true, mirror_agent); @@ -996,8 +1003,8 @@ int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE, MLXSW_AFA_FORWARD_SIZE); - if (!act) { - err = -ENOBUFS; + if (IS_ERR(act)) { + err = PTR_ERR(act); goto err_append_action; } mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_PBS, @@ -1052,8 +1059,8 @@ int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block, { char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_POLCNT_CODE, MLXSW_AFA_POLCNT_SIZE); - if (!act) - return -ENOBUFS; + if (IS_ERR(act)) + return PTR_ERR(act); mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES, counter_index); return 0; @@ -1123,8 +1130,8 @@ int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid) char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_VIRFWD_CODE, MLXSW_AFA_VIRFWD_SIZE); - if (!act) - return -ENOBUFS; + if (IS_ERR(act)) + return PTR_ERR(act); mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid); return 0; } @@ -1193,8 +1200,8 @@ int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block, char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_MCROUTER_CODE, MLXSW_AFA_MCROUTER_SIZE); - if (!act) - return -ENOBUFS; + if (IS_ERR(act)) + return PTR_ERR(act); mlxsw_afa_mcrouter_pack(act, MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP, expected_irif, min_mtu, rmid_valid, kvdl_index); return 0; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 1decf3a1cad3..e57d23746585 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -80,7 +80,7 @@ nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port) return NFP_REPR_TYPE_VF; } - return NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC; + return __NFP_REPR_TYPE_MAX; } static struct net_device * @@ -91,6 +91,8 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id) u8 port = 0; repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port); + if (repr_type > NFP_REPR_TYPE_MAX) + return NULL; reprs = rcu_dereference(app->reprs[repr_type]); if (!reprs) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 60f59abab009..ef6a8d39db2f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -53,7 +53,7 @@ #include "dwmac1000.h" #include "hwif.h" -#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) +#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES) #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) /* Module parameters */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 8d375e51a526..6a393b16a1fc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -257,7 +257,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev, return -ENOMEM; /* Enable pci device */ - ret = pcim_enable_device(pdev); + ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__); @@ -300,9 +300,45 @@ static int stmmac_pci_probe(struct pci_dev *pdev, static void stmmac_pci_remove(struct pci_dev *pdev) { stmmac_dvr_remove(&pdev->dev); + pci_disable_device(pdev); } -static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume); +static int stmmac_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + + ret = stmmac_suspend(dev); + if (ret) + return ret; + + ret = pci_save_state(pdev); + if (ret) + return ret; + + pci_disable_device(pdev); + pci_wake_from_d3(pdev, true); + return 0; +} + +static int stmmac_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + + pci_restore_state(pdev); + pci_set_power_state(pdev, PCI_D0); + + ret = pci_enable_device(pdev); + if (ret) + return ret; + + pci_set_master(pdev); + + return stmmac_resume(dev); +} + +static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume); /* synthetic ID, no official vendor */ #define PCI_VENDOR_ID_STMMAC 0x700 diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 358edab9e72e..3e34cb8ac1d3 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2086,14 +2086,16 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, int i; for (i = 0; i < cpsw->data.slaves; i++) { - if (vid == cpsw->slaves[i].port_vlan) - return -EINVAL; + if (vid == cpsw->slaves[i].port_vlan) { + ret = -EINVAL; + goto err; + } } } dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); ret = cpsw_add_vlan_ale_entry(priv, vid); - +err: pm_runtime_put(cpsw->dev); return ret; } @@ -2119,22 +2121,17 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, for (i = 0; i < cpsw->data.slaves; i++) { if (vid == cpsw->slaves[i].port_vlan) - return -EINVAL; + goto err; } } dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0); - if (ret != 0) - return ret; - - ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, - HOST_PORT_NUM, ALE_VLAN, vid); - if (ret != 0) - return ret; - - ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, - 0, ALE_VLAN, vid); + ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, + HOST_PORT_NUM, ALE_VLAN, vid); + ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, + 0, ALE_VLAN, vid); +err: pm_runtime_put(cpsw->dev); return ret; } diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 93dc05c194d3..5766225a4ce1 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -394,7 +394,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); if (idx < 0) - return -EINVAL; + return -ENOENT; cpsw_ale_read(ale, idx, ale_entry); diff --git a/drivers/net/netdevsim/devlink.c b/drivers/net/netdevsim/devlink.c index ba663e5af168..5135fc371f01 100644 --- a/drivers/net/netdevsim/devlink.c +++ b/drivers/net/netdevsim/devlink.c @@ -207,6 +207,7 @@ void nsim_devlink_teardown(struct netdevsim *ns) struct net *net = nsim_to_net(ns); bool *reg_devlink = net_generic(net, nsim_devlink_id); + devlink_resources_unregister(ns->devlink, NULL); devlink_unregister(ns->devlink); devlink_free(ns->devlink); ns->devlink = NULL; diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c index 0831b7142df7..0c5b68e7da51 100644 --- a/drivers/net/phy/mdio-mux-bcm-iproc.c +++ b/drivers/net/phy/mdio-mux-bcm-iproc.c @@ -218,7 +218,7 @@ out: static int mdio_mux_iproc_remove(struct platform_device *pdev) { - struct iproc_mdiomux_desc *md = dev_get_platdata(&pdev->dev); + struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev); mdio_mux_uninit(md->mux_handle); mdiobus_unregister(md->mii_bus); diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index ed10d49eb5e0..aeca484a75b8 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1242,6 +1242,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) mod_timer(&dev->stat_monitor, jiffies + STAT_UPDATE_TIMER); } + + tasklet_schedule(&dev->bh); } return ret; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 53085c63277b..2b6ec927809e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -586,7 +586,8 @@ static struct sk_buff *receive_small(struct net_device *dev, struct receive_queue *rq, void *buf, void *ctx, unsigned int len, - unsigned int *xdp_xmit) + unsigned int *xdp_xmit, + unsigned int *rbytes) { struct sk_buff *skb; struct bpf_prog *xdp_prog; @@ -601,6 +602,7 @@ static struct sk_buff *receive_small(struct net_device *dev, int err; len -= vi->hdr_len; + *rbytes += len; rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); @@ -705,11 +707,13 @@ static struct sk_buff *receive_big(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, void *buf, - unsigned int len) + unsigned int len, + unsigned int *rbytes) { struct page *page = buf; struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); + *rbytes += len - vi->hdr_len; if (unlikely(!skb)) goto err; @@ -727,7 +731,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, void *buf, void *ctx, unsigned int len, - unsigned int *xdp_xmit) + unsigned int *xdp_xmit, + unsigned int *rbytes) { struct virtio_net_hdr_mrg_rxbuf *hdr = buf; u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); @@ -740,6 +745,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, int err; head_skb = NULL; + *rbytes += len - vi->hdr_len; rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); @@ -877,6 +883,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto err_buf; } + *rbytes += len; page = virt_to_head_page(buf); truesize = mergeable_ctx_to_truesize(ctx); @@ -932,6 +939,7 @@ err_skb: dev->stats.rx_length_errors++; break; } + *rbytes += len; page = virt_to_head_page(buf); put_page(page); } @@ -942,14 +950,13 @@ xdp_xmit: return NULL; } -static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, - void *buf, unsigned int len, void **ctx, - unsigned int *xdp_xmit) +static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, + void *buf, unsigned int len, void **ctx, + unsigned int *xdp_xmit, unsigned int *rbytes) { struct net_device *dev = vi->dev; struct sk_buff *skb; struct virtio_net_hdr_mrg_rxbuf *hdr; - int ret; if (unlikely(len < vi->hdr_len + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); @@ -961,23 +968,22 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, } else { put_page(virt_to_head_page(buf)); } - return 0; + return; } if (vi->mergeable_rx_bufs) - skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit); + skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, + rbytes); else if (vi->big_packets) - skb = receive_big(dev, vi, rq, buf, len); + skb = receive_big(dev, vi, rq, buf, len, rbytes); else - skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit); + skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, rbytes); if (unlikely(!skb)) - return 0; + return; hdr = skb_vnet_hdr(skb); - ret = skb->len; - if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -994,12 +1000,11 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, ntohs(skb->protocol), skb->len, skb->pkt_type); napi_gro_receive(&rq->napi, skb); - return ret; + return; frame_err: dev->stats.rx_frame_errors++; dev_kfree_skb(skb); - return 0; } /* Unlike mergeable buffers, all buffers are allocated to the @@ -1249,13 +1254,13 @@ static int virtnet_receive(struct receive_queue *rq, int budget, while (received < budget && (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { - bytes += receive_buf(vi, rq, buf, len, ctx, xdp_xmit); + receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &bytes); received++; } } else { while (received < budget && (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { - bytes += receive_buf(vi, rq, buf, len, NULL, xdp_xmit); + receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &bytes); received++; } } diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 90a4ad9a2d08..b3a1b6f5c406 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -1362,7 +1362,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ case 0x001: printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name); break; - case 0x010: + case 0x002: printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name); break; default: diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 45928b5b8d97..4fffa6988087 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -1785,7 +1785,8 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo) fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY; fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM; fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL; - fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus); + /* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */ + fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1; fwreq->bus_nr = devinfo->pdev->bus->number; return fwreq; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index e20c30b29c03..c8ea63d02619 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -178,6 +178,17 @@ const struct iwl_cfg iwl9260_2ac_cfg = { .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; +const struct iwl_cfg iwl9260_killer_2ac_cfg = { + .name = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)", + .fw_name_pre = IWL9260A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + const struct iwl_cfg iwl9270_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9270", .fw_name_pre = IWL9260A_FW_PRE, @@ -267,6 +278,34 @@ const struct iwl_cfg iwl9560_2ac_cfg_soc = { .soc_latency = 5000, }; +const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = { + .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, +}; + +const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = { + .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, +}; + const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = { .name = "Intel(R) Dual Band Wireless AC 9460", .fw_name_pre = IWL9000A_FW_PRE, @@ -327,6 +366,36 @@ const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = { .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK }; +const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = { + .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK +}; + +const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = { + .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK +}; + MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index c503b26793f6..84a816809723 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -551,6 +551,7 @@ extern const struct iwl_cfg iwl8275_2ac_cfg; extern const struct iwl_cfg iwl4165_2ac_cfg; extern const struct iwl_cfg iwl9160_2ac_cfg; extern const struct iwl_cfg iwl9260_2ac_cfg; +extern const struct iwl_cfg iwl9260_killer_2ac_cfg; extern const struct iwl_cfg iwl9270_2ac_cfg; extern const struct iwl_cfg iwl9460_2ac_cfg; extern const struct iwl_cfg iwl9560_2ac_cfg; @@ -558,10 +559,14 @@ extern const struct iwl_cfg iwl9460_2ac_cfg_soc; extern const struct iwl_cfg iwl9461_2ac_cfg_soc; extern const struct iwl_cfg iwl9462_2ac_cfg_soc; extern const struct iwl_cfg iwl9560_2ac_cfg_soc; +extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc; +extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc; extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk; +extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk; +extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl22000_2ac_cfg_hr; extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb; extern const struct iwl_cfg iwl22000_2ac_cfg_jf; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 38234bda9017..8520523b91b4 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -545,6 +545,9 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x1550, iwl9260_killer_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)}, @@ -554,6 +557,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, @@ -578,6 +582,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)}, @@ -604,6 +610,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)}, @@ -630,6 +638,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x31DC, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x31DC, 0x1030, iwl9560_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x31DC, 0x1551, iwl9560_killer_s_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x1552, iwl9560_killer_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)}, @@ -656,6 +666,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x34F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)}, @@ -682,6 +694,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)}, @@ -708,6 +722,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)}, @@ -743,6 +759,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x9DF0, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)}, @@ -771,6 +789,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)}, @@ -797,6 +817,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0xA370, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0xA370, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)}, diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index a57daecf1d57..9dd2ca62d84a 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -87,6 +87,7 @@ struct netfront_cb { /* IRQ name is queue name with "-tx" or "-rx" appended */ #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) +static DECLARE_WAIT_QUEUE_HEAD(module_load_q); static DECLARE_WAIT_QUEUE_HEAD(module_unload_q); struct netfront_stats { @@ -893,7 +894,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, struct sk_buff *skb, struct sk_buff_head *list) { - struct skb_shared_info *shinfo = skb_shinfo(skb); RING_IDX cons = queue->rx.rsp_cons; struct sk_buff *nskb; @@ -902,15 +902,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, RING_GET_RESPONSE(&queue->rx, ++cons); skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; - if (shinfo->nr_frags == MAX_SKB_FRAGS) { + if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; BUG_ON(pull_to <= skb_headlen(skb)); __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } - BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); + BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); - skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag), + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + skb_frag_page(nfrag), rx->offset, rx->status, PAGE_SIZE); skb_shinfo(nskb)->nr_frags = 0; @@ -1330,6 +1331,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) netif_carrier_off(netdev); xenbus_switch_state(dev, XenbusStateInitialising); + wait_event(module_load_q, + xenbus_read_driver_state(dev->otherend) != + XenbusStateClosed && + xenbus_read_driver_state(dev->otherend) != + XenbusStateUnknown); return netdev; exit: diff --git a/drivers/nubus/bus.c b/drivers/nubus/bus.c index a59b6c4bb5b8..ad3d17c42e23 100644 --- a/drivers/nubus/bus.c +++ b/drivers/nubus/bus.c @@ -5,6 +5,7 @@ // Copyright (C) 2017 Finn Thain #include <linux/device.h> +#include <linux/dma-mapping.h> #include <linux/list.h> #include <linux/nubus.h> #include <linux/seq_file.h> @@ -93,6 +94,8 @@ int nubus_device_register(struct nubus_board *board) board->dev.release = nubus_device_release; board->dev.bus = &nubus_bus_type; dev_set_name(&board->dev, "slot.%X", board->slot); + board->dev.dma_mask = &board->dev.coherent_dma_mask; + dma_set_mask(&board->dev, DMA_BIT_MASK(32)); return device_register(&board->dev); } diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 35b7fc87eac5..5cb40b2518f9 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -330,7 +330,7 @@ void pci_bus_add_device(struct pci_dev *dev) return; } - dev->is_added = 1; + pci_dev_assign_added(dev, true); } EXPORT_SYMBOL_GPL(pci_bus_add_device); @@ -347,14 +347,14 @@ void pci_bus_add_devices(const struct pci_bus *bus) list_for_each_entry(dev, &bus->devices, bus_list) { /* Skip already-added devices */ - if (dev->is_added) + if (pci_dev_is_added(dev)) continue; pci_bus_add_device(dev); } list_for_each_entry(dev, &bus->devices, bus_list) { /* Skip if device attach failed */ - if (!dev->is_added) + if (!pci_dev_is_added(dev)) continue; child = dev->subordinate; if (child) diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c index 4d6c20e47bed..cf0aa7cee5b0 100644 --- a/drivers/pci/controller/pcie-mobiveil.c +++ b/drivers/pci/controller/pcie-mobiveil.c @@ -107,7 +107,7 @@ #define CFG_WINDOW_TYPE 0 #define IO_WINDOW_TYPE 1 #define MEM_WINDOW_TYPE 2 -#define IB_WIN_SIZE (256 * 1024 * 1024 * 1024) +#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024) #define MAX_PIO_WINDOWS 8 /* Parameters for the waiting for link up routine */ diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 3a17b290df5d..ef0b1b6ba86f 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -509,7 +509,7 @@ static void enable_slot(struct acpiphp_slot *slot) list_for_each_entry(dev, &bus->devices, bus_list) { /* Assume that newly added devices are powered on already. */ - if (!dev->is_added) + if (!pci_dev_is_added(dev)) dev->current_state = PCI_D0; } diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 882f1f9596df..08817253c8a2 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -288,6 +288,7 @@ struct pci_sriov { /* pci_dev priv_flags */ #define PCI_DEV_DISCONNECTED 0 +#define PCI_DEV_ADDED 1 static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused) { @@ -300,6 +301,16 @@ static inline bool pci_dev_is_disconnected(const struct pci_dev *dev) return test_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags); } +static inline void pci_dev_assign_added(struct pci_dev *dev, bool added) +{ + assign_bit(PCI_DEV_ADDED, &dev->priv_flags, added); +} + +static inline bool pci_dev_is_added(const struct pci_dev *dev) +{ + return test_bit(PCI_DEV_ADDED, &dev->priv_flags); +} + #ifdef CONFIG_PCI_ATS void pci_restore_ats_state(struct pci_dev *dev); #else diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index ac876e32de4b..611adcd9c169 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -2433,13 +2433,13 @@ int pci_scan_slot(struct pci_bus *bus, int devfn) dev = pci_scan_single_device(bus, devfn); if (!dev) return 0; - if (!dev->is_added) + if (!pci_dev_is_added(dev)) nr++; for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) { dev = pci_scan_single_device(bus, devfn + fn); if (dev) { - if (!dev->is_added) + if (!pci_dev_is_added(dev)) nr++; dev->multifunction = 1; } diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index 6f072eae4f7a..5e3d0dced2b8 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -19,11 +19,12 @@ static void pci_stop_dev(struct pci_dev *dev) { pci_pme_active(dev, false); - if (dev->is_added) { + if (pci_dev_is_added(dev)) { device_release_driver(&dev->dev); pci_proc_detach_device(dev); pci_remove_sysfs_dev_files(dev); - dev->is_added = 0; + + pci_dev_assign_added(dev, false); } if (dev->bus->self) diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c index 322de58eebaf..f66521c7f846 100644 --- a/drivers/platform/mips/cpu_hwmon.c +++ b/drivers/platform/mips/cpu_hwmon.c @@ -30,7 +30,8 @@ int loongson3_cpu_temp(int cpu) case PRID_REV_LOONGSON3B_R2: reg = ((reg >> 8) & 0xff) - 100; break; - case PRID_REV_LOONGSON3A_R3: + case PRID_REV_LOONGSON3A_R3_0: + case PRID_REV_LOONGSON3A_R3_1: reg = (reg & 0xffff)*731/0x4000 - 273; break; } diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index a9f60d0ee02e..a23e7d394a0a 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -73,8 +73,8 @@ static int dasd_alloc_queue(struct dasd_block *); static void dasd_setup_queue(struct dasd_block *); static void dasd_free_queue(struct dasd_block *); static int dasd_flush_block_queue(struct dasd_block *); -static void dasd_device_tasklet(struct dasd_device *); -static void dasd_block_tasklet(struct dasd_block *); +static void dasd_device_tasklet(unsigned long); +static void dasd_block_tasklet(unsigned long); static void do_kick_device(struct work_struct *); static void do_restore_device(struct work_struct *); static void do_reload_device(struct work_struct *); @@ -125,8 +125,7 @@ struct dasd_device *dasd_alloc_device(void) dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); spin_lock_init(&device->mem_lock); atomic_set(&device->tasklet_scheduled, 0); - tasklet_init(&device->tasklet, - (void (*)(unsigned long)) dasd_device_tasklet, + tasklet_init(&device->tasklet, dasd_device_tasklet, (unsigned long) device); INIT_LIST_HEAD(&device->ccw_queue); timer_setup(&device->timer, dasd_device_timeout, 0); @@ -166,8 +165,7 @@ struct dasd_block *dasd_alloc_block(void) atomic_set(&block->open_count, -1); atomic_set(&block->tasklet_scheduled, 0); - tasklet_init(&block->tasklet, - (void (*)(unsigned long)) dasd_block_tasklet, + tasklet_init(&block->tasklet, dasd_block_tasklet, (unsigned long) block); INIT_LIST_HEAD(&block->ccw_queue); spin_lock_init(&block->queue_lock); @@ -2064,8 +2062,9 @@ EXPORT_SYMBOL_GPL(dasd_flush_device_queue); /* * Acquire the device lock and process queues for the device. */ -static void dasd_device_tasklet(struct dasd_device *device) +static void dasd_device_tasklet(unsigned long data) { + struct dasd_device *device = (struct dasd_device *) data; struct list_head final_queue; atomic_set (&device->tasklet_scheduled, 0); @@ -2783,8 +2782,9 @@ static void __dasd_block_start_head(struct dasd_block *block) * block layer request queue, creates ccw requests, enqueues them on * a dasd_device and processes ccw requests that have been returned. */ -static void dasd_block_tasklet(struct dasd_block *block) +static void dasd_block_tasklet(unsigned long data) { + struct dasd_block *block = (struct dasd_block *) data; struct list_head final_queue; struct list_head *l, *n; struct dasd_ccw_req *cqr; @@ -3127,6 +3127,7 @@ static int dasd_alloc_queue(struct dasd_block *block) block->tag_set.nr_hw_queues = nr_hw_queues; block->tag_set.queue_depth = queue_depth; block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + block->tag_set.numa_node = NUMA_NO_NODE; rc = blk_mq_alloc_tag_set(&block->tag_set); if (rc) diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index e36a114354fc..b9ce93e9df89 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -708,7 +708,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu, struct ccw1 *ccw; cqr = lcu->rsu_cqr; - strncpy((char *) &cqr->magic, "ECKD", 4); + memcpy((char *) &cqr->magic, "ECKD", 4); ASCEBC((char *) &cqr->magic, 4); ccw = cqr->cpaddr; ccw->cmd_code = DASD_ECKD_CCW_RSCK; diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index b9ebb565ee2c..fab35c6170cc 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -426,7 +426,7 @@ dasd_add_busid(const char *bus_id, int features) if (!devmap) { /* This bus_id is new. */ new->devindex = dasd_max_devindex++; - strncpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE); + strlcpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE); new->features = features; new->device = NULL; list_add(&new->list, &dasd_hashlists[hash]); diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index bbf95b78ef5d..4e7b55a14b1a 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1780,6 +1780,9 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device) struct dasd_eckd_private *private = device->private; int i; + if (!private) + return; + dasd_alias_disconnect_device_from_lcu(device); private->ned = NULL; private->sneq = NULL; @@ -2035,8 +2038,11 @@ static int dasd_eckd_basic_to_ready(struct dasd_device *device) static int dasd_eckd_online_to_ready(struct dasd_device *device) { - cancel_work_sync(&device->reload_device); - cancel_work_sync(&device->kick_validate); + if (cancel_work_sync(&device->reload_device)) + dasd_put_device(device); + if (cancel_work_sync(&device->kick_validate)) + dasd_put_device(device); + return 0; }; @@ -3535,7 +3541,7 @@ static int prepare_itcw(struct itcw *itcw, dcw = itcw_add_dcw(itcw, pfx_cmd, 0, &pfxdata, sizeof(pfxdata), total_data_size); - return PTR_RET(dcw); + return PTR_ERR_OR_ZERO(dcw); } static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index 6ef8714dc693..93bb09da7fdc 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c @@ -313,7 +313,7 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device, ktime_get_real_ts64(&ts); header.tv_sec = ts.tv_sec; header.tv_usec = ts.tv_nsec / NSEC_PER_USEC; - strncpy(header.busid, dev_name(&device->cdev->dev), + strlcpy(header.busid, dev_name(&device->cdev->dev), DASD_EER_BUSID_SIZE); spin_lock_irqsave(&bufferlock, flags); @@ -356,7 +356,7 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device, ktime_get_real_ts64(&ts); header.tv_sec = ts.tv_sec; header.tv_usec = ts.tv_nsec / NSEC_PER_USEC; - strncpy(header.busid, dev_name(&device->cdev->dev), + strlcpy(header.busid, dev_name(&device->cdev->dev), DASD_EER_BUSID_SIZE); spin_lock_irqsave(&bufferlock, flags); diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index b1fcb76dd272..98f66b7b6794 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -455,6 +455,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) bdev->tag_set.nr_hw_queues = nr_requests; bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + bdev->tag_set.numa_node = NUMA_NO_NODE; ret = blk_mq_alloc_tag_set(&bdev->tag_set); if (ret) diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index 0a4c13e1e76e..c6ab34f94b1b 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -12,11 +12,6 @@ GCOV_PROFILE_sclp_early_core.o := n KCOV_INSTRUMENT_sclp_early_core.o := n UBSAN_SANITIZE_sclp_early_core.o := n -ifneq ($(CC_FLAGS_MARCH),-march=z900) -CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH) -CFLAGS_sclp_early_core.o += -march=z900 -endif - CFLAGS_sclp_early_core.o += -D__NO_FORTIFY CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_EXPOLINE) diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index 79eb60958015..bbb3001b0961 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c @@ -334,37 +334,41 @@ do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe, int cmd, int perm) { struct kbentry tmp; + unsigned long kb_index, kb_table; ushort *key_map, val, ov; if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry))) return -EFAULT; + kb_index = (unsigned long) tmp.kb_index; #if NR_KEYS < 256 - if (tmp.kb_index >= NR_KEYS) + if (kb_index >= NR_KEYS) return -EINVAL; #endif + kb_table = (unsigned long) tmp.kb_table; #if MAX_NR_KEYMAPS < 256 - if (tmp.kb_table >= MAX_NR_KEYMAPS) + if (kb_table >= MAX_NR_KEYMAPS) return -EINVAL; + kb_table = array_index_nospec(kb_table , MAX_NR_KEYMAPS); #endif switch (cmd) { case KDGKBENT: - key_map = kbd->key_maps[tmp.kb_table]; + key_map = kbd->key_maps[kb_table]; if (key_map) { - val = U(key_map[tmp.kb_index]); + val = U(key_map[kb_index]); if (KTYP(val) >= KBD_NR_TYPES) val = K_HOLE; } else - val = (tmp.kb_index ? K_HOLE : K_NOSUCHMAP); + val = (kb_index ? K_HOLE : K_NOSUCHMAP); return put_user(val, &user_kbe->kb_value); case KDSKBENT: if (!perm) return -EPERM; - if (!tmp.kb_index && tmp.kb_value == K_NOSUCHMAP) { + if (!kb_index && tmp.kb_value == K_NOSUCHMAP) { /* disallocate map */ - key_map = kbd->key_maps[tmp.kb_table]; + key_map = kbd->key_maps[kb_table]; if (key_map) { - kbd->key_maps[tmp.kb_table] = NULL; + kbd->key_maps[kb_table] = NULL; kfree(key_map); } break; @@ -375,18 +379,18 @@ do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe, if (KVAL(tmp.kb_value) > kbd_max_vals[KTYP(tmp.kb_value)]) return -EINVAL; - if (!(key_map = kbd->key_maps[tmp.kb_table])) { + if (!(key_map = kbd->key_maps[kb_table])) { int j; key_map = kmalloc(sizeof(plain_map), GFP_KERNEL); if (!key_map) return -ENOMEM; - kbd->key_maps[tmp.kb_table] = key_map; + kbd->key_maps[kb_table] = key_map; for (j = 0; j < NR_KEYS; j++) key_map[j] = U(K_HOLE); } - ov = U(key_map[tmp.kb_index]); + ov = U(key_map[kb_index]); if (tmp.kb_value == ov) break; /* nothing to do */ /* @@ -395,7 +399,7 @@ do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe, if (((ov == K_SAK) || (tmp.kb_value == K_SAK)) && !capable(CAP_SYS_ADMIN)) return -EPERM; - key_map[tmp.kb_index] = U(tmp.kb_value); + key_map[kb_index] = U(tmp.kb_value); break; } return 0; diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index 76c158c41510..4f1a69c9d81d 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c @@ -61,7 +61,7 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn) struct appldata_product_id id; int rc; - strncpy(id.prod_nr, "LNXAPPL", 7); + memcpy(id.prod_nr, "LNXAPPL", 7); id.prod_fn = myhdr->applid; id.record_nr = myhdr->record_num; id.version_nr = myhdr->version; diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c index ee6f3b563728..e69b12a40636 100644 --- a/drivers/s390/char/sclp_async.c +++ b/drivers/s390/char/sclp_async.c @@ -64,42 +64,18 @@ static struct notifier_block call_home_panic_nb = { .priority = INT_MAX, }; -static int proc_handler_callhome(struct ctl_table *ctl, int write, - void __user *buffer, size_t *count, - loff_t *ppos) -{ - unsigned long val; - int len, rc; - char buf[3]; - - if (!*count || (*ppos && !write)) { - *count = 0; - return 0; - } - if (!write) { - len = snprintf(buf, sizeof(buf), "%d\n", callhome_enabled); - rc = copy_to_user(buffer, buf, sizeof(buf)); - if (rc != 0) - return -EFAULT; - } else { - len = *count; - rc = kstrtoul_from_user(buffer, len, 0, &val); - if (rc) - return rc; - if (val != 0 && val != 1) - return -EINVAL; - callhome_enabled = val; - } - *count = len; - *ppos += len; - return 0; -} +static int zero; +static int one = 1; static struct ctl_table callhome_table[] = { { .procname = "callhome", + .data = &callhome_enabled, + .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_handler_callhome, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, }, {} }; diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index 37e65a05517f..cdcde18e7220 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c @@ -113,16 +113,16 @@ static int crypt_enabled(struct tape_device *device) static void ext_to_int_kekl(struct tape390_kekl *in, struct tape3592_kekl *out) { - int i; + int len; memset(out, 0, sizeof(*out)); if (in->type == TAPE390_KEKL_TYPE_HASH) out->flags |= 0x40; if (in->type_on_tape == TAPE390_KEKL_TYPE_HASH) out->flags |= 0x80; - strncpy(out->label, in->label, 64); - for (i = strlen(in->label); i < sizeof(out->label); i++) - out->label[i] = ' '; + len = min(sizeof(out->label), strlen(in->label)); + memcpy(out->label, in->label, len); + memset(out->label + len, ' ', sizeof(out->label) - len); ASCEBC(out->label, sizeof(out->label)); } diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c index a07102472ce9..b58df0dd0039 100644 --- a/drivers/s390/char/tape_class.c +++ b/drivers/s390/char/tape_class.c @@ -54,10 +54,10 @@ struct tape_class_device *register_tape_dev( if (!tcd) return ERR_PTR(-ENOMEM); - strncpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN); + strlcpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN); for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/')) *s = '!'; - strncpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN); + strlcpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN); for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/')) *s = '!'; @@ -77,7 +77,7 @@ struct tape_class_device *register_tape_dev( tcd->class_device = device_create(tape_class, device, tcd->char_device->dev, NULL, "%s", tcd->device_name); - rc = PTR_RET(tcd->class_device); + rc = PTR_ERR_OR_ZERO(tcd->class_device); if (rc) goto fail_with_cdev; rc = sysfs_create_link( diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index afbdee74147d..51038ec309c1 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -471,14 +471,17 @@ int chp_new(struct chp_id chpid) { struct channel_subsystem *css = css_by_id(chpid.cssid); struct channel_path *chp; - int ret; + int ret = 0; + mutex_lock(&css->mutex); if (chp_is_registered(chpid)) - return 0; - chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL); - if (!chp) - return -ENOMEM; + goto out; + chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL); + if (!chp) { + ret = -ENOMEM; + goto out; + } /* fill in status, etc. */ chp->chpid = chpid; chp->state = 1; @@ -505,21 +508,20 @@ int chp_new(struct chp_id chpid) put_device(&chp->dev); goto out; } - mutex_lock(&css->mutex); + if (css->cm_enabled) { ret = chp_add_cmg_attr(chp); if (ret) { device_unregister(&chp->dev); - mutex_unlock(&css->mutex); goto out; } } css->chps[chpid.id] = chp; - mutex_unlock(&css->mutex); goto out; out_free: kfree(chp); out: + mutex_unlock(&css->mutex); return ret; } @@ -585,8 +587,7 @@ static void chp_process_crw(struct crw *crw0, struct crw *crw1, switch (crw0->erc) { case CRW_ERC_IPARM: /* Path has come. */ case CRW_ERC_INIT: - if (!chp_is_registered(chpid)) - chp_new(chpid); + chp_new(chpid); chsc_chp_online(chpid); break; case CRW_ERC_PERRI: /* Path has gone. */ diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 9029804dcd22..a0baee25134c 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -91,7 +91,7 @@ struct chsc_ssd_area { u16 sch; /* subchannel */ u8 chpid[8]; /* chpids 0-7 */ u16 fla[8]; /* full link addresses 0-7 */ -} __attribute__ ((packed)); +} __packed __aligned(PAGE_SIZE); int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) { @@ -319,7 +319,7 @@ struct chsc_sei { struct chsc_sei_nt2_area nt2_area; u8 nt_area[PAGE_SIZE - 24]; } u; -} __packed; +} __packed __aligned(PAGE_SIZE); /* * Node Descriptor as defined in SA22-7204, "Common I/O-Device Commands" @@ -841,7 +841,7 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable) u32 : 4; u32 fmt : 4; u32 : 16; - } __attribute__ ((packed)) *secm_area; + } *secm_area; unsigned long flags; int ret, ccode; @@ -1014,7 +1014,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp) u32 cmg : 8; u32 zeroes3; u32 data[NR_MEASUREMENT_CHARS]; - } __attribute__ ((packed)) *scmc_area; + } *scmc_area; chp->shared = -1; chp->cmg = -1; @@ -1142,7 +1142,7 @@ int __init chsc_get_cssid(int idx) u8 cssid; u32 : 24; } list[0]; - } __packed *sdcal_area; + } *sdcal_area; int ret; spin_lock_irq(&chsc_page_lock); @@ -1192,7 +1192,7 @@ chsc_determine_css_characteristics(void) u32 reserved4; u32 general_char[510]; u32 chsc_char[508]; - } __attribute__ ((packed)) *scsc_area; + } *scsc_area; spin_lock_irqsave(&chsc_page_lock, flags); memset(chsc_page, 0, PAGE_SIZE); @@ -1236,7 +1236,7 @@ int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta) unsigned int rsvd3[3]; u64 clock_delta; unsigned int rsvd4[2]; - } __attribute__ ((packed)) *rr; + } *rr; int rc; memset(page, 0, PAGE_SIZE); @@ -1261,7 +1261,7 @@ int chsc_sstpi(void *page, void *result, size_t size) unsigned int rsvd0[3]; struct chsc_header response; char data[]; - } __attribute__ ((packed)) *rr; + } *rr; int rc; memset(page, 0, PAGE_SIZE); @@ -1284,7 +1284,7 @@ int chsc_siosl(struct subchannel_id schid) u32 word3; struct chsc_header response; u32 word[11]; - } __attribute__ ((packed)) *siosl_area; + } *siosl_area; unsigned long flags; int ccode; int rc; diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 5c9f0dd33f4e..78aba8d94eec 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -15,12 +15,12 @@ #define NR_MEASUREMENT_CHARS 5 struct cmg_chars { u32 values[NR_MEASUREMENT_CHARS]; -} __attribute__ ((packed)); +}; #define NR_MEASUREMENT_ENTRIES 8 struct cmg_entry { u32 values[NR_MEASUREMENT_ENTRIES]; -} __attribute__ ((packed)); +}; struct channel_path_desc_fmt1 { u8 flags; @@ -38,7 +38,7 @@ struct channel_path_desc_fmt1 { u8 s:1; u8 f:1; u32 zeros[2]; -} __attribute__ ((packed)); +}; struct channel_path_desc_fmt3 { struct channel_path_desc_fmt1 fmt1_desc; @@ -59,7 +59,7 @@ struct css_chsc_char { u32:7; u32 pnso:1; /* bit 116 */ u32:11; -}__attribute__((packed)); +} __packed; extern struct css_chsc_char css_chsc_characteristics; @@ -82,7 +82,7 @@ struct chsc_ssqd_area { struct chsc_header response; u32:32; struct qdio_ssqd_desc qdio_ssqd; -} __packed; +} __packed __aligned(PAGE_SIZE); struct chsc_scssc_area { struct chsc_header request; @@ -102,7 +102,7 @@ struct chsc_scssc_area { u32 reserved[1004]; struct chsc_header response; u32:32; -} __packed; +} __packed __aligned(PAGE_SIZE); struct chsc_scpd { struct chsc_header request; @@ -120,7 +120,7 @@ struct chsc_scpd { struct chsc_header response; u32:32; u8 data[0]; -} __packed; +} __packed __aligned(PAGE_SIZE); struct chsc_sda_area { struct chsc_header request; @@ -199,7 +199,7 @@ struct chsc_scm_info { u32 reserved2[10]; u64 restok; struct sale scmal[248]; -} __packed; +} __packed __aligned(PAGE_SIZE); int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token); @@ -243,7 +243,7 @@ struct chsc_pnso_area { struct qdio_brinfo_entry_l3_ipv4 l3_ipv4[0]; struct qdio_brinfo_entry_l2 l2[0]; } entries; -} __packed; +} __packed __aligned(PAGE_SIZE); int chsc_pnso_brinfo(struct subchannel_id schid, struct chsc_pnso_area *brinfo_area, diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 5130d7c67239..de744ca158fd 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -526,76 +526,6 @@ int cio_disable_subchannel(struct subchannel *sch) } EXPORT_SYMBOL_GPL(cio_disable_subchannel); -static int cio_check_devno_blacklisted(struct subchannel *sch) -{ - if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) { - /* - * This device must not be known to Linux. So we simply - * say that there is no device and return ENODEV. - */ - CIO_MSG_EVENT(6, "Blacklisted device detected " - "at devno %04X, subchannel set %x\n", - sch->schib.pmcw.dev, sch->schid.ssid); - return -ENODEV; - } - return 0; -} - -/** - * cio_validate_subchannel - basic validation of subchannel - * @sch: subchannel structure to be filled out - * @schid: subchannel id - * - * Find out subchannel type and initialize struct subchannel. - * Return codes: - * 0 on success - * -ENXIO for non-defined subchannels - * -ENODEV for invalid subchannels or blacklisted devices - * -EIO for subchannels in an invalid subchannel set - */ -int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) -{ - char dbf_txt[15]; - int ccode; - int err; - - sprintf(dbf_txt, "valsch%x", schid.sch_no); - CIO_TRACE_EVENT(4, dbf_txt); - - /* - * The first subchannel that is not-operational (ccode==3) - * indicates that there aren't any more devices available. - * If stsch gets an exception, it means the current subchannel set - * is not valid. - */ - ccode = stsch(schid, &sch->schib); - if (ccode) { - err = (ccode == 3) ? -ENXIO : ccode; - goto out; - } - sch->st = sch->schib.pmcw.st; - sch->schid = schid; - - switch (sch->st) { - case SUBCHANNEL_TYPE_IO: - case SUBCHANNEL_TYPE_MSG: - if (!css_sch_is_valid(&sch->schib)) - err = -ENODEV; - else - err = cio_check_devno_blacklisted(sch); - break; - default: - err = 0; - } - if (err) - goto out; - - CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n", - sch->schid.ssid, sch->schid.sch_no, sch->st); -out: - return err; -} - /* * do_cio_interrupt() handles all normal I/O device IRQ's */ @@ -719,6 +649,7 @@ struct subchannel *cio_probe_console(void) { struct subchannel_id schid; struct subchannel *sch; + struct schib schib; int sch_no, ret; sch_no = cio_get_console_sch_no(); @@ -728,7 +659,11 @@ struct subchannel *cio_probe_console(void) } init_subchannel_id(&schid); schid.sch_no = sch_no; - sch = css_alloc_subchannel(schid); + ret = stsch(schid, &schib); + if (ret) + return ERR_PTR(-ENODEV); + + sch = css_alloc_subchannel(schid, &schib); if (IS_ERR(sch)) return sch; diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 94cd813bdcfe..9811fd8a0c73 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -119,7 +119,6 @@ DECLARE_PER_CPU(struct irb, cio_irb); #define to_subchannel(n) container_of(n, struct subchannel, dev) -extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id); extern int cio_enable_subchannel(struct subchannel *, u32); extern int cio_disable_subchannel (struct subchannel *); extern int cio_cancel (struct subchannel *); diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 9263a0fb3858..aea502922646 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -25,6 +25,7 @@ #include "css.h" #include "cio.h" +#include "blacklist.h" #include "cio_debug.h" #include "ioasm.h" #include "chsc.h" @@ -168,18 +169,53 @@ static void css_subchannel_release(struct device *dev) kfree(sch); } -struct subchannel *css_alloc_subchannel(struct subchannel_id schid) +static int css_validate_subchannel(struct subchannel_id schid, + struct schib *schib) +{ + int err; + + switch (schib->pmcw.st) { + case SUBCHANNEL_TYPE_IO: + case SUBCHANNEL_TYPE_MSG: + if (!css_sch_is_valid(schib)) + err = -ENODEV; + else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) { + CIO_MSG_EVENT(6, "Blacklisted device detected " + "at devno %04X, subchannel set %x\n", + schib->pmcw.dev, schid.ssid); + err = -ENODEV; + } else + err = 0; + break; + default: + err = 0; + } + if (err) + goto out; + + CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n", + schid.ssid, schid.sch_no, schib->pmcw.st); +out: + return err; +} + +struct subchannel *css_alloc_subchannel(struct subchannel_id schid, + struct schib *schib) { struct subchannel *sch; int ret; + ret = css_validate_subchannel(schid, schib); + if (ret < 0) + return ERR_PTR(ret); + sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA); if (!sch) return ERR_PTR(-ENOMEM); - ret = cio_validate_subchannel(sch, schid); - if (ret < 0) - goto err; + sch->schid = schid; + sch->schib = *schib; + sch->st = schib->pmcw.st; ret = css_sch_create_locks(sch); if (ret) @@ -244,8 +280,7 @@ static void ssd_register_chpids(struct chsc_ssd_info *ssd) for (i = 0; i < 8; i++) { mask = 0x80 >> i; if (ssd->path_mask & mask) - if (!chp_is_registered(ssd->chpid[i])) - chp_new(ssd->chpid[i]); + chp_new(ssd->chpid[i]); } } @@ -382,12 +417,12 @@ int css_register_subchannel(struct subchannel *sch) return ret; } -static int css_probe_device(struct subchannel_id schid) +static int css_probe_device(struct subchannel_id schid, struct schib *schib) { struct subchannel *sch; int ret; - sch = css_alloc_subchannel(schid); + sch = css_alloc_subchannel(schid, schib); if (IS_ERR(sch)) return PTR_ERR(sch); @@ -436,23 +471,23 @@ EXPORT_SYMBOL_GPL(css_sch_is_valid); static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) { struct schib schib; + int ccode; if (!slow) { /* Will be done on the slow path. */ return -EAGAIN; } - if (stsch(schid, &schib)) { - /* Subchannel is not provided. */ - return -ENXIO; - } - if (!css_sch_is_valid(&schib)) { - /* Unusable - ignore. */ - return 0; - } - CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid, - schid.sch_no); + /* + * The first subchannel that is not-operational (ccode==3) + * indicates that there aren't any more devices available. + * If stsch gets an exception, it means the current subchannel set + * is not valid. + */ + ccode = stsch(schid, &schib); + if (ccode) + return (ccode == 3) ? -ENXIO : ccode; - return css_probe_device(schid); + return css_probe_device(schid, &schib); } static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) @@ -1081,6 +1116,11 @@ static int __init channel_subsystem_init(void) if (ret) goto out_wq; + /* Register subchannels which are already in use. */ + cio_register_early_subchannels(); + /* Start initial subchannel evaluation. */ + css_schedule_eval_all(); + return ret; out_wq: destroy_workqueue(cio_work_q); @@ -1120,10 +1160,6 @@ int css_complete_work(void) */ static int __init channel_subsystem_init_sync(void) { - /* Register subchannels which are already in use. */ - cio_register_early_subchannels(); - /* Start initial subchannel evaluation. */ - css_schedule_eval_all(); css_complete_work(); return 0; } diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index 30357cbf350a..8d832900a63d 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -103,7 +103,8 @@ extern void css_driver_unregister(struct css_driver *); extern void css_sch_device_unregister(struct subchannel *); extern int css_register_subchannel(struct subchannel *); -extern struct subchannel *css_alloc_subchannel(struct subchannel_id); +extern struct subchannel *css_alloc_subchannel(struct subchannel_id, + struct schib *schib); extern struct subchannel *get_subchannel_by_schid(struct subchannel_id); extern int css_init_done; extern int max_ssid; diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index f4ca72dd862f..9c7d9da42ba0 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -631,21 +631,20 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q, unsigned long phys_aob = 0; if (!q->use_cq) - goto out; + return 0; if (!q->aobs[bufnr]) { struct qaob *aob = qdio_allocate_aob(); q->aobs[bufnr] = aob; } if (q->aobs[bufnr]) { - q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE; q->sbal_state[bufnr].aob = q->aobs[bufnr]; q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user; phys_aob = virt_to_phys(q->aobs[bufnr]); WARN_ON_ONCE(phys_aob & 0xFF); } -out: + q->sbal_state[bufnr].flags = 0; return phys_aob; } diff --git a/drivers/s390/cio/trace.h b/drivers/s390/cio/trace.h index 1f8d1c1e566d..0ebb29b6fd6d 100644 --- a/drivers/s390/cio/trace.h +++ b/drivers/s390/cio/trace.h @@ -30,6 +30,17 @@ DECLARE_EVENT_CLASS(s390_class_schib, __field(u16, schno) __field(u16, devno) __field_struct(struct schib, schib) + __field(u8, pmcw_ena) + __field(u8, pmcw_st) + __field(u8, pmcw_dnv) + __field(u16, pmcw_dev) + __field(u8, pmcw_lpm) + __field(u8, pmcw_pnom) + __field(u8, pmcw_lpum) + __field(u8, pmcw_pim) + __field(u8, pmcw_pam) + __field(u8, pmcw_pom) + __field(u64, pmcw_chpid) __field(int, cc) ), TP_fast_assign( @@ -38,18 +49,29 @@ DECLARE_EVENT_CLASS(s390_class_schib, __entry->schno = schid.sch_no; __entry->devno = schib->pmcw.dev; __entry->schib = *schib; + __entry->pmcw_ena = schib->pmcw.ena; + __entry->pmcw_st = schib->pmcw.ena; + __entry->pmcw_dnv = schib->pmcw.dnv; + __entry->pmcw_dev = schib->pmcw.dev; + __entry->pmcw_lpm = schib->pmcw.lpm; + __entry->pmcw_pnom = schib->pmcw.pnom; + __entry->pmcw_lpum = schib->pmcw.lpum; + __entry->pmcw_pim = schib->pmcw.pim; + __entry->pmcw_pam = schib->pmcw.pam; + __entry->pmcw_pom = schib->pmcw.pom; + memcpy(&__entry->pmcw_chpid, &schib->pmcw.chpid, 8); __entry->cc = cc; ), TP_printk("schid=%x.%x.%04x cc=%d ena=%d st=%d dnv=%d dev=%04x " "lpm=0x%02x pnom=0x%02x lpum=0x%02x pim=0x%02x pam=0x%02x " "pom=0x%02x chpids=%016llx", __entry->cssid, __entry->ssid, __entry->schno, __entry->cc, - __entry->schib.pmcw.ena, __entry->schib.pmcw.st, - __entry->schib.pmcw.dnv, __entry->schib.pmcw.dev, - __entry->schib.pmcw.lpm, __entry->schib.pmcw.pnom, - __entry->schib.pmcw.lpum, __entry->schib.pmcw.pim, - __entry->schib.pmcw.pam, __entry->schib.pmcw.pom, - *((u64 *) __entry->schib.pmcw.chpid) + __entry->pmcw_ena, __entry->pmcw_st, + __entry->pmcw_dnv, __entry->pmcw_dev, + __entry->pmcw_lpm, __entry->pmcw_pnom, + __entry->pmcw_lpum, __entry->pmcw_pim, + __entry->pmcw_pam, __entry->pmcw_pom, + __entry->pmcw_chpid ) ); @@ -89,6 +111,13 @@ TRACE_EVENT(s390_cio_tsch, __field(u8, ssid) __field(u16, schno) __field_struct(struct irb, irb) + __field(u8, scsw_dcc) + __field(u8, scsw_pno) + __field(u8, scsw_fctl) + __field(u8, scsw_actl) + __field(u8, scsw_stctl) + __field(u8, scsw_dstat) + __field(u8, scsw_cstat) __field(int, cc) ), TP_fast_assign( @@ -96,15 +125,22 @@ TRACE_EVENT(s390_cio_tsch, __entry->ssid = schid.ssid; __entry->schno = schid.sch_no; __entry->irb = *irb; + __entry->scsw_dcc = scsw_cc(&irb->scsw); + __entry->scsw_pno = scsw_pno(&irb->scsw); + __entry->scsw_fctl = scsw_fctl(&irb->scsw); + __entry->scsw_actl = scsw_actl(&irb->scsw); + __entry->scsw_stctl = scsw_stctl(&irb->scsw); + __entry->scsw_dstat = scsw_dstat(&irb->scsw); + __entry->scsw_cstat = scsw_cstat(&irb->scsw); __entry->cc = cc; ), TP_printk("schid=%x.%x.%04x cc=%d dcc=%d pno=%d fctl=0x%x actl=0x%x " "stctl=0x%x dstat=0x%x cstat=0x%x", __entry->cssid, __entry->ssid, __entry->schno, __entry->cc, - scsw_cc(&__entry->irb.scsw), scsw_pno(&__entry->irb.scsw), - scsw_fctl(&__entry->irb.scsw), scsw_actl(&__entry->irb.scsw), - scsw_stctl(&__entry->irb.scsw), - scsw_dstat(&__entry->irb.scsw), scsw_cstat(&__entry->irb.scsw) + __entry->scsw_dcc, __entry->scsw_pno, + __entry->scsw_fctl, __entry->scsw_actl, + __entry->scsw_stctl, + __entry->scsw_dstat, __entry->scsw_cstat ) ); @@ -122,6 +158,9 @@ TRACE_EVENT(s390_cio_tpi, __field(u8, cssid) __field(u8, ssid) __field(u16, schno) + __field(u8, adapter_IO) + __field(u8, isc) + __field(u8, type) ), TP_fast_assign( __entry->cc = cc; @@ -136,11 +175,14 @@ TRACE_EVENT(s390_cio_tpi, __entry->cssid = __entry->tpi_info.schid.cssid; __entry->ssid = __entry->tpi_info.schid.ssid; __entry->schno = __entry->tpi_info.schid.sch_no; + __entry->adapter_IO = __entry->tpi_info.adapter_IO; + __entry->isc = __entry->tpi_info.isc; + __entry->type = __entry->tpi_info.type; ), TP_printk("schid=%x.%x.%04x cc=%d a=%d isc=%d type=%d", __entry->cssid, __entry->ssid, __entry->schno, __entry->cc, - __entry->tpi_info.adapter_IO, __entry->tpi_info.isc, - __entry->tpi_info.type + __entry->adapter_IO, __entry->isc, + __entry->type ) ); @@ -299,16 +341,20 @@ TRACE_EVENT(s390_cio_interrupt, __field(u8, cssid) __field(u8, ssid) __field(u16, schno) + __field(u8, isc) + __field(u8, type) ), TP_fast_assign( __entry->tpi_info = *tpi_info; - __entry->cssid = __entry->tpi_info.schid.cssid; - __entry->ssid = __entry->tpi_info.schid.ssid; - __entry->schno = __entry->tpi_info.schid.sch_no; + __entry->cssid = tpi_info->schid.cssid; + __entry->ssid = tpi_info->schid.ssid; + __entry->schno = tpi_info->schid.sch_no; + __entry->isc = tpi_info->isc; + __entry->type = tpi_info->type; ), TP_printk("schid=%x.%x.%04x isc=%d type=%d", __entry->cssid, __entry->ssid, __entry->schno, - __entry->tpi_info.isc, __entry->tpi_info.type + __entry->isc, __entry->type ) ); @@ -321,11 +367,13 @@ TRACE_EVENT(s390_cio_adapter_int, TP_ARGS(tpi_info), TP_STRUCT__entry( __field_struct(struct tpi_info, tpi_info) + __field(u8, isc) ), TP_fast_assign( __entry->tpi_info = *tpi_info; + __entry->isc = tpi_info->isc; ), - TP_printk("isc=%d", __entry->tpi_info.isc) + TP_printk("isc=%d", __entry->isc) ); /** @@ -339,16 +387,30 @@ TRACE_EVENT(s390_cio_stcrw, TP_STRUCT__entry( __field_struct(struct crw, crw) __field(int, cc) + __field(u8, slct) + __field(u8, oflw) + __field(u8, chn) + __field(u8, rsc) + __field(u8, anc) + __field(u8, erc) + __field(u16, rsid) ), TP_fast_assign( __entry->crw = *crw; __entry->cc = cc; + __entry->slct = crw->slct; + __entry->oflw = crw->oflw; + __entry->chn = crw->chn; + __entry->rsc = crw->rsc; + __entry->anc = crw->anc; + __entry->erc = crw->erc; + __entry->rsid = crw->rsid; ), TP_printk("cc=%d slct=%d oflw=%d chn=%d rsc=%d anc=%d erc=0x%x " "rsid=0x%x", - __entry->cc, __entry->crw.slct, __entry->crw.oflw, - __entry->crw.chn, __entry->crw.rsc, __entry->crw.anc, - __entry->crw.erc, __entry->crw.rsid + __entry->cc, __entry->slct, __entry->oflw, + __entry->chn, __entry->rsc, __entry->anc, + __entry->erc, __entry->rsid ) ); diff --git a/drivers/s390/crypto/ap_asm.h b/drivers/s390/crypto/ap_asm.h deleted file mode 100644 index 16b59ce5e01d..000000000000 --- a/drivers/s390/crypto/ap_asm.h +++ /dev/null @@ -1,236 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright IBM Corp. 2016 - * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> - * - * Adjunct processor bus inline assemblies. - */ - -#ifndef _AP_ASM_H_ -#define _AP_ASM_H_ - -#include <asm/isc.h> - -/** - * ap_intructions_available() - Test if AP instructions are available. - * - * Returns 0 if the AP instructions are installed. - */ -static inline int ap_instructions_available(void) -{ - register unsigned long reg0 asm ("0") = AP_MKQID(0, 0); - register unsigned long reg1 asm ("1") = -ENODEV; - register unsigned long reg2 asm ("2") = 0UL; - - asm volatile( - " .long 0xb2af0000\n" /* PQAP(TAPQ) */ - "0: la %1,0\n" - "1:\n" - EX_TABLE(0b, 1b) - : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc"); - return reg1; -} - -/** - * ap_tapq(): Test adjunct processor queue. - * @qid: The AP queue number - * @info: Pointer to queue descriptor - * - * Returns AP queue status structure. - */ -static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info) -{ - register unsigned long reg0 asm ("0") = qid; - register struct ap_queue_status reg1 asm ("1"); - register unsigned long reg2 asm ("2") = 0UL; - - asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */ - : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); - if (info) - *info = reg2; - return reg1; -} - -/** - * ap_pqap_rapq(): Reset adjunct processor queue. - * @qid: The AP queue number - * - * Returns AP queue status structure. - */ -static inline struct ap_queue_status ap_rapq(ap_qid_t qid) -{ - register unsigned long reg0 asm ("0") = qid | 0x01000000UL; - register struct ap_queue_status reg1 asm ("1"); - register unsigned long reg2 asm ("2") = 0UL; - - asm volatile( - ".long 0xb2af0000" /* PQAP(RAPQ) */ - : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); - return reg1; -} - -/** - * ap_aqic(): Control interruption for a specific AP. - * @qid: The AP queue number - * @qirqctrl: struct ap_qirq_ctrl (64 bit value) - * @ind: The notification indicator byte - * - * Returns AP queue status. - */ -static inline struct ap_queue_status ap_aqic(ap_qid_t qid, - struct ap_qirq_ctrl qirqctrl, - void *ind) -{ - register unsigned long reg0 asm ("0") = qid | (3UL << 24); - register struct ap_qirq_ctrl reg1_in asm ("1") = qirqctrl; - register struct ap_queue_status reg1_out asm ("1"); - register void *reg2 asm ("2") = ind; - - asm volatile( - ".long 0xb2af0000" /* PQAP(AQIC) */ - : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2) - : - : "cc"); - return reg1_out; -} - -/** - * ap_qci(): Get AP configuration data - * - * Returns 0 on success, or -EOPNOTSUPP. - */ -static inline int ap_qci(void *config) -{ - register unsigned long reg0 asm ("0") = 0x04000000UL; - register unsigned long reg1 asm ("1") = -EINVAL; - register void *reg2 asm ("2") = (void *) config; - - asm volatile( - ".long 0xb2af0000\n" /* PQAP(QCI) */ - "0: la %1,0\n" - "1:\n" - EX_TABLE(0b, 1b) - : "+d" (reg0), "+d" (reg1), "+d" (reg2) - : - : "cc", "memory"); - - return reg1; -} - -/* - * union ap_qact_ap_info - used together with the - * ap_aqic() function to provide a convenient way - * to handle the ap info needed by the qact function. - */ -union ap_qact_ap_info { - unsigned long val; - struct { - unsigned int : 3; - unsigned int mode : 3; - unsigned int : 26; - unsigned int cat : 8; - unsigned int : 8; - unsigned char ver[2]; - }; -}; - -/** - * ap_qact(): Query AP combatibility type. - * @qid: The AP queue number - * @apinfo: On input the info about the AP queue. On output the - * alternate AP queue info provided by the qact function - * in GR2 is stored in. - * - * Returns AP queue status. Check response_code field for failures. - */ -static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit, - union ap_qact_ap_info *apinfo) -{ - register unsigned long reg0 asm ("0") = qid | (5UL << 24) - | ((ifbit & 0x01) << 22); - register unsigned long reg1_in asm ("1") = apinfo->val; - register struct ap_queue_status reg1_out asm ("1"); - register unsigned long reg2 asm ("2") = 0; - - asm volatile( - ".long 0xb2af0000" /* PQAP(QACT) */ - : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2) - : : "cc"); - apinfo->val = reg2; - return reg1_out; -} - -/** - * ap_nqap(): Send message to adjunct processor queue. - * @qid: The AP queue number - * @psmid: The program supplied message identifier - * @msg: The message text - * @length: The message length - * - * Returns AP queue status structure. - * Condition code 1 on NQAP can't happen because the L bit is 1. - * Condition code 2 on NQAP also means the send is incomplete, - * because a segment boundary was reached. The NQAP is repeated. - */ -static inline struct ap_queue_status ap_nqap(ap_qid_t qid, - unsigned long long psmid, - void *msg, size_t length) -{ - register unsigned long reg0 asm ("0") = qid | 0x40000000UL; - register struct ap_queue_status reg1 asm ("1"); - register unsigned long reg2 asm ("2") = (unsigned long) msg; - register unsigned long reg3 asm ("3") = (unsigned long) length; - register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32); - register unsigned long reg5 asm ("5") = psmid & 0xffffffff; - - asm volatile ( - "0: .long 0xb2ad0042\n" /* NQAP */ - " brc 2,0b" - : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3) - : "d" (reg4), "d" (reg5) - : "cc", "memory"); - return reg1; -} - -/** - * ap_dqap(): Receive message from adjunct processor queue. - * @qid: The AP queue number - * @psmid: Pointer to program supplied message identifier - * @msg: The message text - * @length: The message length - * - * Returns AP queue status structure. - * Condition code 1 on DQAP means the receive has taken place - * but only partially. The response is incomplete, hence the - * DQAP is repeated. - * Condition code 2 on DQAP also means the receive is incomplete, - * this time because a segment boundary was reached. Again, the - * DQAP is repeated. - * Note that gpr2 is used by the DQAP instruction to keep track of - * any 'residual' length, in case the instruction gets interrupted. - * Hence it gets zeroed before the instruction. - */ -static inline struct ap_queue_status ap_dqap(ap_qid_t qid, - unsigned long long *psmid, - void *msg, size_t length) -{ - register unsigned long reg0 asm("0") = qid | 0x80000000UL; - register struct ap_queue_status reg1 asm ("1"); - register unsigned long reg2 asm("2") = 0UL; - register unsigned long reg4 asm("4") = (unsigned long) msg; - register unsigned long reg5 asm("5") = (unsigned long) length; - register unsigned long reg6 asm("6") = 0UL; - register unsigned long reg7 asm("7") = 0UL; - - - asm volatile( - "0: .long 0xb2ae0064\n" /* DQAP */ - " brc 6,0b\n" - : "+d" (reg0), "=d" (reg1), "+d" (reg2), - "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7) - : : "cc", "memory"); - *psmid = (((unsigned long long) reg6) << 32) + reg7; - return reg1; -} - -#endif /* _AP_ASM_H_ */ diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 35a0c2b52f82..bf27fc4d1335 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -36,7 +36,6 @@ #include <linux/debugfs.h> #include "ap_bus.h" -#include "ap_asm.h" #include "ap_debug.h" /* @@ -174,24 +173,6 @@ static inline int ap_qact_available(void) return 0; } -/** - * ap_test_queue(): Test adjunct processor queue. - * @qid: The AP queue number - * @tbit: Test facilities bit - * @info: Pointer to queue descriptor - * - * Returns AP queue status structure. - */ -struct ap_queue_status ap_test_queue(ap_qid_t qid, - int tbit, - unsigned long *info) -{ - if (tbit) - qid |= 1UL << 23; /* set T bit*/ - return ap_tapq(qid, info); -} -EXPORT_SYMBOL(ap_test_queue); - /* * ap_query_configuration(): Fetch cryptographic config info * @@ -200,7 +181,7 @@ EXPORT_SYMBOL(ap_test_queue); * is returned, e.g. if the PQAP(QCI) instruction is not * available, the return value will be -EOPNOTSUPP. */ -int ap_query_configuration(struct ap_config_info *info) +static inline int ap_query_configuration(struct ap_config_info *info) { if (!ap_configuration_available()) return -EOPNOTSUPP; @@ -493,7 +474,7 @@ static int ap_poll_thread_start(void) return 0; mutex_lock(&ap_poll_thread_mutex); ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll"); - rc = PTR_RET(ap_poll_kthread); + rc = PTR_ERR_OR_ZERO(ap_poll_kthread); if (rc) ap_poll_kthread = NULL; mutex_unlock(&ap_poll_thread_mutex); @@ -1261,7 +1242,7 @@ static int __init ap_module_init(void) /* Create /sys/devices/ap. */ ap_root_device = root_device_register("ap"); - rc = PTR_RET(ap_root_device); + rc = PTR_ERR_OR_ZERO(ap_root_device); if (rc) goto out_bus; diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 6a273c5ebca5..936541937e15 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -15,6 +15,7 @@ #include <linux/device.h> #include <linux/types.h> +#include <asm/isc.h> #include <asm/ap.h> #define AP_DEVICES 256 /* Number of AP devices. */ diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c index 2c726df210f6..c13e43292cb7 100644 --- a/drivers/s390/crypto/ap_card.c +++ b/drivers/s390/crypto/ap_card.c @@ -14,7 +14,6 @@ #include <asm/facility.h> #include "ap_bus.h" -#include "ap_asm.h" /* * AP card related attributes. diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index ba3a2e13b0eb..e365171fe28f 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -14,26 +14,6 @@ #include <asm/facility.h> #include "ap_bus.h" -#include "ap_asm.h" - -/** - * ap_queue_irq_ctrl(): Control interruption on a AP queue. - * @qirqctrl: struct ap_qirq_ctrl (64 bit value) - * @ind: The notification indicator byte - * - * Returns AP queue status. - * - * Control interruption on the given AP queue. - * Just a simple wrapper function for the low level PQAP(AQIC) - * instruction available for other kernel modules. - */ -struct ap_queue_status ap_queue_irq_ctrl(ap_qid_t qid, - struct ap_qirq_ctrl qirqctrl, - void *ind) -{ - return ap_aqic(qid, qirqctrl, ind); -} -EXPORT_SYMBOL(ap_queue_irq_ctrl); /** * ap_queue_enable_interruption(): Enable interruption on an AP queue. diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 3929c8be8098..e663432395c1 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -699,7 +699,7 @@ static int query_crypto_facility(u16 cardnr, u16 domain, /* fill request cprb param block with FQ request */ preqparm = (struct fqreqparm *) preqcblk->req_parmb; memcpy(preqparm->subfunc_code, "FQ", 2); - strncpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array)); + memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array)); preqparm->rule_array_len = sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array); preqparm->lv1.len = sizeof(preqparm->lv1); diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c index 233e1e695208..da2c8dfd4d74 100644 --- a/drivers/s390/crypto/zcrypt_card.c +++ b/drivers/s390/crypto/zcrypt_card.c @@ -83,9 +83,21 @@ static ssize_t zcrypt_card_online_store(struct device *dev, static DEVICE_ATTR(online, 0644, zcrypt_card_online_show, zcrypt_card_online_store); +static ssize_t zcrypt_card_load_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct zcrypt_card *zc = to_ap_card(dev)->private; + + return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load)); +} + +static DEVICE_ATTR(load, 0444, zcrypt_card_load_show, NULL); + static struct attribute *zcrypt_card_attrs[] = { &dev_attr_type.attr, &dev_attr_online.attr, + &dev_attr_load.attr, NULL, }; diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h index 011d61d8a4ae..1752622b95f7 100644 --- a/drivers/s390/crypto/zcrypt_cca_key.h +++ b/drivers/s390/crypto/zcrypt_cca_key.h @@ -99,7 +99,7 @@ struct cca_pvt_ext_CRT_sec { * @mex: pointer to user input data * @p: pointer to memory area for the key * - * Returns the size of the key area or -EFAULT + * Returns the size of the key area or negative errno value. */ static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p) { @@ -118,6 +118,15 @@ static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p) unsigned char *temp; int i; + /* + * The inputdatalength was a selection criteria in the dispatching + * function zcrypt_rsa_modexpo(). However, do a plausibility check + * here to make sure the following copy_from_user() can't be utilized + * to compromise the system. + */ + if (WARN_ON_ONCE(mex->inputdatalength > 512)) + return -EINVAL; + memset(key, 0, sizeof(*key)); key->pubHdr = static_pub_hdr; @@ -178,6 +187,15 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p) struct cca_public_sec *pub; int short_len, long_len, pad_len, key_len, size; + /* + * The inputdatalength was a selection criteria in the dispatching + * function zcrypt_rsa_crt(). However, do a plausibility check + * here to make sure the following copy_from_user() can't be utilized + * to compromise the system. + */ + if (WARN_ON_ONCE(crt->inputdatalength > 512)) + return -EINVAL; + memset(key, 0, sizeof(*key)); short_len = (crt->inputdatalength + 1) / 2; diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index 97d4bacbc442..e70ae078c86b 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -246,7 +246,7 @@ int speed_idx_ep11(int req_type) * @ap_msg: pointer to AP message * @mex: pointer to user input data * - * Returns 0 on success or -EFAULT. + * Returns 0 on success or negative errno value. */ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq, struct ap_message *ap_msg, @@ -272,6 +272,14 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq, } __packed * msg = ap_msg->message; int size; + /* + * The inputdatalength was a selection criteria in the dispatching + * function zcrypt_rsa_modexpo(). However, make sure the following + * copy_from_user() never exceeds the allocated buffer space. + */ + if (WARN_ON_ONCE(mex->inputdatalength > PAGE_SIZE)) + return -EINVAL; + /* VUD.ciphertext */ msg->length = mex->inputdatalength + 2; if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength)) @@ -307,7 +315,7 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq, * @ap_msg: pointer to AP message * @crt: pointer to user input data * - * Returns 0 on success or -EFAULT. + * Returns 0 on success or negative errno value. */ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq, struct ap_message *ap_msg, @@ -334,6 +342,14 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq, } __packed * msg = ap_msg->message; int size; + /* + * The inputdatalength was a selection criteria in the dispatching + * function zcrypt_rsa_crt(). However, make sure the following + * copy_from_user() never exceeds the allocated buffer space. + */ + if (WARN_ON_ONCE(crt->inputdatalength > PAGE_SIZE)) + return -EINVAL; + /* VUD.ciphertext */ msg->length = crt->inputdatalength + 2; if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength)) diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c index 720434e18007..91a52f268353 100644 --- a/drivers/s390/crypto/zcrypt_queue.c +++ b/drivers/s390/crypto/zcrypt_queue.c @@ -75,8 +75,20 @@ static ssize_t zcrypt_queue_online_store(struct device *dev, static DEVICE_ATTR(online, 0644, zcrypt_queue_online_show, zcrypt_queue_online_store); +static ssize_t zcrypt_queue_load_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct zcrypt_queue *zq = to_ap_queue(dev)->private; + + return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load)); +} + +static DEVICE_ATTR(load, 0444, zcrypt_queue_load_show, NULL); + static struct attribute *zcrypt_queue_attrs[] = { &dev_attr_online.attr, + &dev_attr_load.attr, NULL, }; diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index a3a8c8d9d717..94f4d8fe85e0 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -101,7 +101,7 @@ static void __init zfcp_init_device_setup(char *devstr) token = strsep(&str, ","); if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE) goto err_out; - strncpy(busid, token, ZFCP_BUS_ID_SIZE); + strlcpy(busid, token, ZFCP_BUS_ID_SIZE); token = strsep(&str, ","); if (!token || kstrtoull(token, 0, (unsigned long long *) &wwpn)) diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c index 497a68389461..a43d44e7e7dd 100644 --- a/drivers/scsi/cxlflash/ocxl_hw.c +++ b/drivers/scsi/cxlflash/ocxl_hw.c @@ -88,10 +88,8 @@ static struct file *ocxlflash_getfile(struct device *dev, const char *name, const struct file_operations *fops, void *priv, int flags) { - struct qstr this; - struct path path; struct file *file; - struct inode *inode = NULL; + struct inode *inode; int rc; if (fops->owner && !try_module_get(fops->owner)) { @@ -116,29 +114,15 @@ static struct file *ocxlflash_getfile(struct device *dev, const char *name, goto err3; } - this.name = name; - this.len = strlen(name); - this.hash = 0; - path.dentry = d_alloc_pseudo(ocxlflash_vfs_mount->mnt_sb, &this); - if (!path.dentry) { - dev_err(dev, "%s: d_alloc_pseudo failed\n", __func__); - rc = -ENOMEM; - goto err4; - } - - path.mnt = mntget(ocxlflash_vfs_mount); - d_instantiate(path.dentry, inode); - - file = alloc_file(&path, OPEN_FMODE(flags), fops); + file = alloc_file_pseudo(inode, ocxlflash_vfs_mount, name, + flags & (O_ACCMODE | O_NONBLOCK), fops); if (IS_ERR(file)) { rc = PTR_ERR(file); dev_err(dev, "%s: alloc_file failed rc=%d\n", __func__, rc); - path_put(&path); - goto err3; + goto err4; } - file->f_flags = flags & (O_ACCMODE | O_NONBLOCK); file->private_data = priv; out: return file; diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index ea23c8dffc25..ffec695e0bfb 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -754,9 +754,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, case ELS_LOGO: if (fip->mode == FIP_MODE_VN2VN) { if (fip->state != FIP_ST_VNMP_UP) - return -EINVAL; + goto drop; if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI) - return -EINVAL; + goto drop; } else { if (fip->state != FIP_ST_ENABLED) return 0; @@ -799,9 +799,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, fip->send(fip, skb); return -EINPROGRESS; drop: - kfree_skb(skb); LIBFCOE_FIP_DBG(fip, "drop els_send op %u d_id %x\n", op, ntoh24(fh->fh_d_id)); + kfree_skb(skb); return -EINVAL; } EXPORT_SYMBOL(fcoe_ctlr_els_send); diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 31d31aad3de1..89b1f1af2fd4 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -2164,6 +2164,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", fc_rport_state(rdata)); + rdata->flags &= ~FC_RP_STARTED; fc_rport_enter_delete(rdata, RPORT_EV_STOP); mutex_unlock(&rdata->rp_mutex); kref_put(&rdata->kref, fc_rport_destroy); diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index d6093838f5f2..c972cc2b3d5b 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -284,11 +284,11 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) */ if (opcode != ISCSI_OP_SCSI_DATA_OUT) { iscsi_conn_printk(KERN_INFO, conn, - "task [op %x/%x itt " + "task [op %x itt " "0x%x/0x%x] " "rejected.\n", - task->hdr->opcode, opcode, - task->itt, task->hdr_itt); + opcode, task->itt, + task->hdr_itt); return -EACCES; } /* @@ -297,10 +297,10 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) */ if (conn->session->fast_abort) { iscsi_conn_printk(KERN_INFO, conn, - "task [op %x/%x itt " + "task [op %x itt " "0x%x/0x%x] fast abort.\n", - task->hdr->opcode, opcode, - task->itt, task->hdr_itt); + opcode, task->itt, + task->hdr_itt); return -EACCES; } break; diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 569392d0d4c9..e44c91edf92d 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -3343,11 +3343,10 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) { unsigned long flags; - __u64 data_out = b; spin_lock_irqsave(writeq_lock, flags); - writel((u32)(data_out), addr); - writel((u32)(data_out >> 32), (addr + 4)); + __raw_writel((u32)(b), addr); + __raw_writel((u32)(b >> 32), (addr + 4)); mmiowb(); spin_unlock_irqrestore(writeq_lock, flags); } @@ -3367,7 +3366,8 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr, static inline void _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) { - writeq(b, addr); + __raw_writeq(b, addr); + mmiowb(); } #else static inline void @@ -5268,7 +5268,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, /* send message 32-bits at a time */ for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) { - writel((u32)(request[i]), &ioc->chip->Doorbell); + writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell); if ((_base_wait_for_doorbell_ack(ioc, 5))) failed = 1; } @@ -5289,7 +5289,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, } /* read the first two 16-bits, it gives the total length of the reply */ - reply[0] = (u16)(readl(&ioc->chip->Doorbell) + reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_DATA_MASK); writel(0, &ioc->chip->HostInterruptStatus); if ((_base_wait_for_doorbell_int(ioc, 5))) { @@ -5298,7 +5298,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, ioc->name, __LINE__); return -EFAULT; } - reply[1] = (u16)(readl(&ioc->chip->Doorbell) + reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_DATA_MASK); writel(0, &ioc->chip->HostInterruptStatus); @@ -5312,7 +5312,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, if (i >= reply_bytes/2) /* overflow case */ readl(&ioc->chip->Doorbell); else - reply[i] = (u16)(readl(&ioc->chip->Doorbell) + reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_DATA_MASK); writel(0, &ioc->chip->HostInterruptStatus); } diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 091ec1207bea..cff83b9457f7 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -888,7 +888,7 @@ static void qedi_get_boot_tgt_info(struct nvm_iscsi_block *block, ipv6_en = !!(block->generic.ctrl_flags & NVM_ISCSI_CFG_GEN_IPV6_ENABLED); - snprintf(tgt->iscsi_name, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n", + snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s\n", block->target[index].target_name.byte); tgt->ipv6_en = ipv6_en; diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 89a4999fa631..c8731568f9c4 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2141,6 +2141,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) msleep(1000); qla24xx_disable_vp(vha); + qla2x00_wait_for_sess_deletion(vha); vha->flags.delete_progress = 1; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index f68eb6096559..2660a48d918a 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -214,6 +214,7 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); int qla24xx_async_abort_cmd(srb_t *); int qla24xx_post_relogin_work(struct scsi_qla_host *vha); +void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *); /* * Global Functions in qla_mid.c source file. diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 2c35b0b2baa0..7a3744006419 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -3708,6 +3708,10 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) return rval; done_free_sp: + spin_lock_irqsave(&vha->hw->vport_slock, flags); + list_del(&sp->elem); + spin_unlock_irqrestore(&vha->hw->vport_slock, flags); + if (sp->u.iocb_cmd.u.ctarg.req) { dma_free_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index db0e3279e07a..1b19b954bbae 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1489,11 +1489,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, wait_for_completion(&tm_iocb->u.tmf.comp); - rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ? - QLA_SUCCESS : QLA_FUNCTION_FAILED; + rval = tm_iocb->u.tmf.data; - if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) { - ql_dbg(ql_dbg_taskm, vha, 0x8030, + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x8030, "TM IOCB failed (%x).\n", rval); } diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 37ae0f6d8ae5..59fd5a9dfeb8 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -222,6 +222,8 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag) sp->fcport = fcport; sp->iocbs = 1; sp->vha = qpair->vha; + INIT_LIST_HEAD(&sp->elem); + done: if (!sp) QLA_QPAIR_MARK_NOT_BUSY(qpair); diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index a91cca52b5d5..dd93a22fe843 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2130,34 +2130,11 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) req_cnt = 1; handle = 0; - if (!sp) - goto skip_cmd_array; - - /* Check for room in outstanding command list. */ - handle = req->current_outstanding_cmd; - for (index = 1; index < req->num_outstanding_cmds; index++) { - handle++; - if (handle == req->num_outstanding_cmds) - handle = 1; - if (!req->outstanding_cmds[handle]) - break; - } - if (index == req->num_outstanding_cmds) { - ql_log(ql_log_warn, vha, 0x700b, - "No room on outstanding cmd array.\n"); - goto queuing_error; - } - - /* Prep command array. */ - req->current_outstanding_cmd = handle; - req->outstanding_cmds[handle] = sp; - sp->handle = handle; - - /* Adjust entry-counts as needed. */ - if (sp->type != SRB_SCSI_CMD) + if (sp && (sp->type != SRB_SCSI_CMD)) { + /* Adjust entry-counts as needed. */ req_cnt = sp->iocbs; + } -skip_cmd_array: /* Check for room on request queue. */ if (req->cnt < req_cnt + 2) { if (qpair->use_shadow_reg) @@ -2183,6 +2160,28 @@ skip_cmd_array: if (req->cnt < req_cnt + 2) goto queuing_error; + if (sp) { + /* Check for room in outstanding command list. */ + handle = req->current_outstanding_cmd; + for (index = 1; index < req->num_outstanding_cmds; index++) { + handle++; + if (handle == req->num_outstanding_cmds) + handle = 1; + if (!req->outstanding_cmds[handle]) + break; + } + if (index == req->num_outstanding_cmds) { + ql_log(ql_log_warn, vha, 0x700b, + "No room on outstanding cmd array.\n"); + goto queuing_error; + } + + /* Prep command array. */ + req->current_outstanding_cmd = handle; + req->outstanding_cmds[handle] = sp; + sp->handle = handle; + } + /* Prep packet */ req->cnt -= req_cnt; pkt = req->ring_ptr; @@ -2195,6 +2194,8 @@ skip_cmd_array: pkt->handle = handle; } + return pkt; + queuing_error: qpair->tgt_counters.num_alloc_iocb_failed++; return pkt; diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 9fa5a2557f2c..7756106d4555 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -631,6 +631,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) unsigned long flags; fc_port_t *fcport = NULL; + if (!vha->hw->flags.fw_started) + return; + /* Setup to process RIO completion. */ handle_cnt = 0; if (IS_CNA_CAPABLE(ha)) diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 7e875f575229..f0ec13d48bf3 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -4220,6 +4220,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; + if (!ha->flags.fw_started) + return QLA_SUCCESS; + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, "Entered %s.\n", __func__); @@ -4289,6 +4292,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; + if (!ha->flags.fw_started) + return QLA_SUCCESS; + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, "Entered %s.\n", __func__); diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index f6f0a759a7c2..aa727d07b702 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -152,11 +152,18 @@ int qla24xx_disable_vp(scsi_qla_host_t *vha) { unsigned long flags; - int ret; + int ret = QLA_SUCCESS; + fc_port_t *fcport; + + if (vha->hw->flags.fw_started) + ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); - ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); + list_for_each_entry(fcport, &vha->vp_fcports, list) + fcport->logout_on_delete = 0; + + qla2x00_mark_all_devices_lost(vha, 0); /* Remove port id from vp target map */ spin_lock_irqsave(&vha->hw->hardware_lock, flags); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 9f309e572be4..1fbd16c8c9a7 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -303,6 +303,7 @@ static void qla2x00_free_device(scsi_qla_host_t *); static int qla2xxx_map_queues(struct Scsi_Host *shost); static void qla2x00_destroy_deferred_work(struct qla_hw_data *); + struct scsi_host_template qla2xxx_driver_template = { .module = THIS_MODULE, .name = QLA2XXX_DRIVER_NAME, @@ -1147,7 +1148,7 @@ static inline int test_fcport_count(scsi_qla_host_t *vha) * qla2x00_wait_for_sess_deletion can only be called from remove_one. * it has dependency on UNLOADING flag to stop device discovery */ -static void +void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) { qla2x00_mark_all_devices_lost(vha, 0); @@ -3603,6 +3604,8 @@ qla2x00_remove_one(struct pci_dev *pdev) base_vha = pci_get_drvdata(pdev); ha = base_vha->hw; + ql_log(ql_log_info, base_vha, 0xb079, + "Removing driver\n"); /* Indicate device removal to prevent future board_disable and wait * until any pending board_disable has completed. */ @@ -3625,6 +3628,21 @@ qla2x00_remove_one(struct pci_dev *pdev) } qla2x00_wait_for_hba_ready(base_vha); + if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) { + if (ha->flags.fw_started) + qla2x00_abort_isp_cleanup(base_vha); + } else if (!IS_QLAFX00(ha)) { + if (IS_QLA8031(ha)) { + ql_dbg(ql_dbg_p3p, base_vha, 0xb07e, + "Clearing fcoe driver presence.\n"); + if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS) + ql_dbg(ql_dbg_p3p, base_vha, 0xb079, + "Error while clearing DRV-Presence.\n"); + } + + qla2x00_try_to_stop_firmware(base_vha); + } + qla2x00_wait_for_sess_deletion(base_vha); /* @@ -3648,14 +3666,6 @@ qla2x00_remove_one(struct pci_dev *pdev) qla2x00_delete_all_vps(ha, base_vha); - if (IS_QLA8031(ha)) { - ql_dbg(ql_dbg_p3p, base_vha, 0xb07e, - "Clearing fcoe driver presence.\n"); - if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS) - ql_dbg(ql_dbg_p3p, base_vha, 0xb079, - "Error while clearing DRV-Presence.\n"); - } - qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); qla2x00_dfs_remove(base_vha); @@ -3715,24 +3725,6 @@ qla2x00_free_device(scsi_qla_host_t *vha) qla2x00_stop_timer(vha); qla25xx_delete_queues(vha); - - if (ha->flags.fce_enabled) - qla2x00_disable_fce_trace(vha, NULL, NULL); - - if (ha->eft) - qla2x00_disable_eft_trace(vha); - - if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) { - if (ha->flags.fw_started) - qla2x00_abort_isp_cleanup(vha); - } else { - if (ha->flags.fw_started) { - /* Stop currently executing firmware. */ - qla2x00_try_to_stop_firmware(vha); - ha->flags.fw_started = 0; - } - } - vha->flags.online = 0; /* turn-off interrupts on the card */ @@ -6028,8 +6020,9 @@ qla2x00_do_dpc(void *data) set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); } - if (test_and_clear_bit(ISP_ABORT_NEEDED, - &base_vha->dpc_flags)) { + if (test_and_clear_bit + (ISP_ABORT_NEEDED, &base_vha->dpc_flags) && + !test_bit(UNLOADING, &base_vha->dpc_flags)) { ql_dbg(ql_dbg_dpc, base_vha, 0x4007, "ISP abort scheduled.\n"); diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 04458eb19d38..4499c787165f 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -1880,6 +1880,9 @@ qla24xx_beacon_off(struct scsi_qla_host *vha) if (IS_P3P_TYPE(ha)) return QLA_SUCCESS; + if (!ha->flags.fw_started) + return QLA_SUCCESS; + ha->beacon_blink_led = 0; if (IS_QLA2031(ha) || IS_QLA27XX(ha)) diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index cd2fdac000c9..ba9ba0e04f42 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1741,15 +1741,11 @@ sg_start_req(Sg_request *srp, unsigned char *cmd) * * With scsi-mq enabled, there are a fixed number of preallocated * requests equal in number to shost->can_queue. If all of the - * preallocated requests are already in use, then using GFP_ATOMIC with - * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL - * will cause blk_get_request() to sleep until an active command - * completes, freeing up a request. Neither option is ideal, but - * GFP_KERNEL is the better choice to prevent userspace from getting an - * unexpected EWOULDBLOCK. - * - * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually - * does not sleep except under memory pressure. + * preallocated requests are already in use, then blk_get_request() + * will sleep until an active command completes, freeing up a request. + * Although waiting in an asynchronous interface is less than ideal, we + * do not want to use BLK_MQ_REQ_NOWAIT here because userspace might + * not expect an EWOULDBLOCK from this condition. */ rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0); @@ -2185,6 +2181,7 @@ sg_add_sfp(Sg_device * sdp) write_lock_irqsave(&sdp->sfd_lock, iflags); if (atomic_read(&sdp->detaching)) { write_unlock_irqrestore(&sdp->sfd_lock, iflags); + kfree(sfp); return ERR_PTR(-ENODEV); } list_add_tail(&sfp->sfd_siblings, &sdp->sfds); diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 3f3cb72e0c0c..d0389b20574d 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -523,18 +523,26 @@ static int sr_init_command(struct scsi_cmnd *SCpnt) static int sr_block_open(struct block_device *bdev, fmode_t mode) { struct scsi_cd *cd; + struct scsi_device *sdev; int ret = -ENXIO; + cd = scsi_cd_get(bdev->bd_disk); + if (!cd) + goto out; + + sdev = cd->device; + scsi_autopm_get_device(sdev); check_disk_change(bdev); mutex_lock(&sr_mutex); - cd = scsi_cd_get(bdev->bd_disk); - if (cd) { - ret = cdrom_open(&cd->cdi, bdev, mode); - if (ret) - scsi_cd_put(cd); - } + ret = cdrom_open(&cd->cdi, bdev, mode); mutex_unlock(&sr_mutex); + + scsi_autopm_put_device(sdev); + if (ret) + scsi_cd_put(cd); + +out: return ret; } @@ -562,6 +570,8 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, if (ret) goto out; + scsi_autopm_get_device(sdev); + /* * Send SCSI addressing ioctls directly to mid level, send other * ioctls to cdrom/block level. @@ -570,15 +580,18 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: ret = scsi_ioctl(sdev, cmd, argp); - goto out; + goto put; } ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg); if (ret != -ENOSYS) - goto out; + goto put; ret = scsi_ioctl(sdev, cmd, argp); +put: + scsi_autopm_put_device(sdev); + out: mutex_unlock(&sr_mutex); return ret; diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 777e5f1e52d1..0cd947f78b5b 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -561,9 +561,14 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter, (btstat == BTSTAT_SUCCESS || btstat == BTSTAT_LINKED_COMMAND_COMPLETED || btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) { - cmd->result = (DID_OK << 16) | sdstat; - if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer) - cmd->result |= (DRIVER_SENSE << 24); + if (sdstat == SAM_STAT_COMMAND_TERMINATED) { + cmd->result = (DID_RESET << 16); + } else { + cmd->result = (DID_OK << 16) | sdstat; + if (sdstat == SAM_STAT_CHECK_CONDITION && + cmd->sense_buffer) + cmd->result |= (DRIVER_SENSE << 24); + } } else switch (btstat) { case BTSTAT_SUCCESS: diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index a1a0025b59e0..d5d33e12e952 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -402,6 +402,8 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) fput(asma->file); goto out; } + } else { + vma_set_anonymous(vma); } if (vma->vm_file) diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c index 514986b57c2d..25eb3891e34b 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_target.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c @@ -652,6 +652,7 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk) struct iscsi_param *param; u32 mrdsl, mbl; u32 max_npdu, max_iso_npdu; + u32 max_iso_payload; if (conn->login->leading_connection) { param = iscsi_find_param_from_key(MAXBURSTLENGTH, @@ -670,8 +671,10 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk) mrdsl = conn_ops->MaxRecvDataSegmentLength; max_npdu = mbl / mrdsl; - max_iso_npdu = CXGBIT_MAX_ISO_PAYLOAD / - (ISCSI_HDR_LEN + mrdsl + + max_iso_payload = rounddown(CXGBIT_MAX_ISO_PAYLOAD, csk->emss); + + max_iso_npdu = max_iso_payload / + (ISCSI_HDR_LEN + mrdsl + cxgbit_digest_len[csk->submode]); csk->max_iso_npdu = min(max_npdu, max_iso_npdu); @@ -741,6 +744,9 @@ static int cxgbit_set_params(struct iscsi_conn *conn) if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl) conn_ops->MaxRecvDataSegmentLength = cdev->mdsl; + if (cxgbit_set_digest(csk)) + return -1; + if (conn->login->leading_connection) { param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL, conn->param_list); @@ -764,7 +770,7 @@ static int cxgbit_set_params(struct iscsi_conn *conn) if (is_t5(cdev->lldi.adapter_type)) goto enable_ddp; else - goto enable_digest; + return 0; } if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) { @@ -781,10 +787,6 @@ enable_ddp: } } -enable_digest: - if (cxgbit_set_digest(csk)) - return -1; - return 0; } diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index a502f1af4a21..ed3114556fda 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -1560,9 +1560,12 @@ int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled) d->iotlb = niotlb; for (i = 0; i < d->nvqs; ++i) { - mutex_lock(&d->vqs[i]->mutex); - d->vqs[i]->iotlb = niotlb; - mutex_unlock(&d->vqs[i]->mutex); + struct vhost_virtqueue *vq = d->vqs[i]; + + mutex_lock(&vq->mutex); + vq->iotlb = niotlb; + __vhost_vq_meta_reset(vq); + mutex_unlock(&vq->mutex); } vhost_umem_clean(oiotlb); diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index 46a4484e3da7..c6f78d27947b 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c @@ -20,7 +20,7 @@ #include <drm/drm_connector.h> /* For DRM_MODE_PANEL_ORIENTATION_* */ static bool request_mem_succeeded = false; -static bool nowc = false; +static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC; static struct fb_var_screeninfo efifb_defined = { .activate = FB_ACTIVATE_NOW, @@ -68,8 +68,12 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green, static void efifb_destroy(struct fb_info *info) { - if (info->screen_base) - iounmap(info->screen_base); + if (info->screen_base) { + if (mem_flags & (EFI_MEMORY_UC | EFI_MEMORY_WC)) + iounmap(info->screen_base); + else + memunmap(info->screen_base); + } if (request_mem_succeeded) release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size); @@ -104,7 +108,7 @@ static int efifb_setup(char *options) else if (!strncmp(this_opt, "width:", 6)) screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0); else if (!strcmp(this_opt, "nowc")) - nowc = true; + mem_flags &= ~EFI_MEMORY_WC; } } @@ -164,6 +168,7 @@ static int efifb_probe(struct platform_device *dev) unsigned int size_remap; unsigned int size_total; char *option = NULL; + efi_memory_desc_t md; if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled) return -ENODEV; @@ -272,12 +277,35 @@ static int efifb_probe(struct platform_device *dev) info->apertures->ranges[0].base = efifb_fix.smem_start; info->apertures->ranges[0].size = size_remap; - if (nowc) - info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); - else - info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len); + if (!efi_mem_desc_lookup(efifb_fix.smem_start, &md)) { + if ((efifb_fix.smem_start + efifb_fix.smem_len) > + (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) { + pr_err("efifb: video memory @ 0x%lx spans multiple EFI memory regions\n", + efifb_fix.smem_start); + err = -EIO; + goto err_release_fb; + } + /* + * If the UEFI memory map covers the efifb region, we may only + * remap it using the attributes the memory map prescribes. + */ + mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB; + mem_flags &= md.attribute; + } + if (mem_flags & EFI_MEMORY_WC) + info->screen_base = ioremap_wc(efifb_fix.smem_start, + efifb_fix.smem_len); + else if (mem_flags & EFI_MEMORY_UC) + info->screen_base = ioremap(efifb_fix.smem_start, + efifb_fix.smem_len); + else if (mem_flags & EFI_MEMORY_WT) + info->screen_base = memremap(efifb_fix.smem_start, + efifb_fix.smem_len, MEMREMAP_WT); + else if (mem_flags & EFI_MEMORY_WB) + info->screen_base = memremap(efifb_fix.smem_start, + efifb_fix.smem_len, MEMREMAP_WB); if (!info->screen_base) { - pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n", + pr_err("efifb: abort, cannot remap video memory 0x%x @ 0x%lx\n", efifb_fix.smem_len, efifb_fix.smem_start); err = -EIO; goto err_release_fb; @@ -371,7 +399,10 @@ err_fb_dealoc: err_groups: sysfs_remove_groups(&dev->dev.kobj, efifb_groups); err_unmap: - iounmap(info->screen_base); + if (mem_flags & (EFI_MEMORY_UC | EFI_MEMORY_WC)) + iounmap(info->screen_base); + else + memunmap(info->screen_base); err_release_fb: framebuffer_release(info); err_release_mem: diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 6b237e3f4983..3988c0914322 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -513,7 +513,9 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, tell_host(vb, vb->inflate_vq); /* balloon's page migration 2nd step -- deflate "page" */ + spin_lock_irqsave(&vb_dev_info->pages_lock, flags); balloon_page_delete(page); + spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags); vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; set_page_pfns(vb, vb->pfns, page); tell_host(vb, vb->deflate_vq); |