diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-03-30 17:35:14 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-03-30 17:35:14 -0700 |
commit | 2d385336afcc43732aef1d51528c03f177ecd54e (patch) | |
tree | fff0780aea481a225c4d3460aad286c8e95aa1e7 | |
parent | 673b41e04a035d760bc0aff83fa9ee24fd9c2779 (diff) | |
parent | 8a13b02a010a743ea0725e9a5454f42cddb65cf0 (diff) |
Merge tag 'irq-core-2020-03-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner:
"Updates for the interrupt subsystem:
Treewide:
- Cleanup of setup_irq() which is not longer required because the
memory allocator is available early.
Most cleanup changes come through the various maintainer trees, so
the final removal of setup_irq() is postponed towards the end of
the merge window.
Core:
- Protection against unsafe invocation of interrupt handlers and
unsafe interrupt injection including a fixup of the offending
PCI/AER error injection mechanism.
Invoking interrupt handlers from arbitrary contexts, i.e. outside
of an actual interrupt, can cause inconsistent state on the
fragile x86 interrupt affinity changing hardware trainwreck.
Drivers:
- Second wave of support for the new ARM GICv4.1
- Multi-instance support for Xilinx and PLIC interrupt controllers
- CPU-Hotplug support for PLIC
- The obligatory new driver for X1000 TCU
- Enhancements, cleanups and fixes all over the place"
* tag 'irq-core-2020-03-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (58 commits)
unicore32: Replace setup_irq() by request_irq()
sh: Replace setup_irq() by request_irq()
hexagon: Replace setup_irq() by request_irq()
c6x: Replace setup_irq() by request_irq()
alpha: Replace setup_irq() by request_irq()
irqchip/gic-v4.1: Eagerly vmap vPEs
irqchip/gic-v4.1: Add VSGI property setup
irqchip/gic-v4.1: Add VSGI allocation/teardown
irqchip/gic-v4.1: Move doorbell management to the GICv4 abstraction layer
irqchip/gic-v4.1: Plumb set_vcpu_affinity SGI callbacks
irqchip/gic-v4.1: Plumb get/set_irqchip_state SGI callbacks
irqchip/gic-v4.1: Plumb mask/unmask SGI callbacks
irqchip/gic-v4.1: Add initial SGI configuration
irqchip/gic-v4.1: Plumb skeletal VSGI irqchip
irqchip/stm32: Retrigger both in eoi and unmask callbacks
irqchip/gic-v3: Move irq_domain_update_bus_token to after checking for NULL domain
irqchip/xilinx: Do not call irq_set_default_host()
irqchip/xilinx: Enable generic irq multi handler
irqchip/xilinx: Fill error code when irq domain registration fails
irqchip/xilinx: Add support for multiple instances
...
64 files changed, 1185 insertions, 420 deletions
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index da3e10d5f7fe..d17e44c99df9 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c @@ -213,32 +213,13 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr, * The special RTC interrupt type. The interrupt itself was * processed by PALcode, and comes in via entInt vector 1. */ - -struct irqaction timer_irqaction = { - .handler = rtc_timer_interrupt, - .name = "timer", -}; - void __init -init_rtc_irq(void) +init_rtc_irq(irq_handler_t handler) { irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip, handle_percpu_irq, "RTC"); - setup_irq(RTC_IRQ, &timer_irqaction); + if (!handler) + handler = rtc_timer_interrupt; + if (request_irq(RTC_IRQ, handler, 0, "timer", NULL)) + pr_err("Failed to register timer interrupt\n"); } - -/* Dummy irqactions. */ -struct irqaction isa_cascade_irqaction = { - .handler = no_action, - .name = "isa-cascade" -}; - -struct irqaction timer_cascade_irqaction = { - .handler = no_action, - .name = "timer-cascade" -}; - -struct irqaction halt_switch_irqaction = { - .handler = no_action, - .name = "halt-switch" -}; diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c index 5d54c076a8ae..1dcf0d9038fd 100644 --- a/arch/alpha/kernel/irq_i8259.c +++ b/arch/alpha/kernel/irq_i8259.c @@ -82,11 +82,6 @@ struct irq_chip i8259a_irq_type = { void __init init_i8259a_irqs(void) { - static struct irqaction cascade = { - .handler = no_action, - .name = "cascade", - }; - long i; outb(0xff, 0x21); /* mask all of 8259A-1 */ @@ -96,7 +91,8 @@ init_i8259a_irqs(void) irq_set_chip_and_handler(i, &i8259a_irq_type, handle_level_irq); } - setup_irq(2, &cascade); + if (request_irq(2, no_action, 0, "cascade", NULL)) + pr_err("Failed to request irq 2 (cascade)\n"); } diff --git a/arch/alpha/kernel/irq_impl.h b/arch/alpha/kernel/irq_impl.h index 16f2b0276f3a..fbf21892e66d 100644 --- a/arch/alpha/kernel/irq_impl.h +++ b/arch/alpha/kernel/irq_impl.h @@ -21,14 +21,9 @@ extern void isa_no_iack_sc_device_interrupt(unsigned long); extern void srm_device_interrupt(unsigned long); extern void pyxis_device_interrupt(unsigned long); -extern struct irqaction timer_irqaction; -extern struct irqaction isa_cascade_irqaction; -extern struct irqaction timer_cascade_irqaction; -extern struct irqaction halt_switch_irqaction; - extern void init_srm_irqs(long, unsigned long); extern void init_pyxis_irqs(unsigned long); -extern void init_rtc_irq(void); +extern void init_rtc_irq(irq_handler_t handler); extern void common_init_isa_dma(void); diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c index a968b10e687d..27070b5bd33e 100644 --- a/arch/alpha/kernel/irq_pyxis.c +++ b/arch/alpha/kernel/irq_pyxis.c @@ -107,5 +107,6 @@ init_pyxis_irqs(unsigned long ignore_mask) irq_set_status_flags(i, IRQ_LEVEL); } - setup_irq(16+7, &isa_cascade_irqaction); + if (request_irq(16 + 7, no_action, 0, "isa-cascade", NULL)) + pr_err("Failed to register isa-cascade interrupt\n"); } diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c index e56efd5b855f..ce5430056f65 100644 --- a/arch/alpha/kernel/sys_alcor.c +++ b/arch/alpha/kernel/sys_alcor.c @@ -133,7 +133,8 @@ alcor_init_irq(void) init_i8259a_irqs(); common_init_isa_dma(); - setup_irq(16+31, &isa_cascade_irqaction); + if (request_irq(16 + 31, no_action, 0, "isa-cascade", NULL)) + pr_err("Failed to register isa-cascade interrupt\n"); } diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c index 10bc46a4ec40..0aa6a27d0e2f 100644 --- a/arch/alpha/kernel/sys_cabriolet.c +++ b/arch/alpha/kernel/sys_cabriolet.c @@ -112,7 +112,8 @@ common_init_irq(void (*srm_dev_int)(unsigned long v)) } common_init_isa_dma(); - setup_irq(16+4, &isa_cascade_irqaction); + if (request_irq(16 + 4, no_action, 0, "isa-cascade", NULL)) + pr_err("Failed to register isa-cascade interrupt\n"); } #ifndef CONFIG_ALPHA_PC164 diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c index 5251937ec1b4..1cdfe55fb987 100644 --- a/arch/alpha/kernel/sys_eb64p.c +++ b/arch/alpha/kernel/sys_eb64p.c @@ -123,7 +123,8 @@ eb64p_init_irq(void) } common_init_isa_dma(); - setup_irq(16+5, &isa_cascade_irqaction); + if (request_irq(16 + 5, no_action, 0, "isa-cascade", NULL)) + pr_err("Failed to register isa-cascade interrupt\n"); } /* diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c index 8d34cf6e002a..533899a4a1a1 100644 --- a/arch/alpha/kernel/sys_marvel.c +++ b/arch/alpha/kernel/sys_marvel.c @@ -397,7 +397,7 @@ marvel_init_pci(void) static void __init marvel_init_rtc(void) { - init_rtc_irq(); + init_rtc_irq(NULL); } static void diff --git a/arch/alpha/kernel/sys_miata.c b/arch/alpha/kernel/sys_miata.c index 6fa07dc5339d..702292af2225 100644 --- a/arch/alpha/kernel/sys_miata.c +++ b/arch/alpha/kernel/sys_miata.c @@ -81,8 +81,10 @@ miata_init_irq(void) init_pyxis_irqs(0x63b0000); common_init_isa_dma(); - setup_irq(16+2, &halt_switch_irqaction); /* SRM only? */ - setup_irq(16+6, &timer_cascade_irqaction); + if (request_irq(16 + 2, no_action, 0, "halt-switch", NULL)) + pr_err("Failed to register halt-switch interrupt\n"); + if (request_irq(16 + 6, no_action, 0, "timer-cascade", NULL)) + pr_err("Failed to register timer-cascade interrupt\n"); } diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c index 07830cccabf9..d33074011960 100644 --- a/arch/alpha/kernel/sys_ruffian.c +++ b/arch/alpha/kernel/sys_ruffian.c @@ -82,7 +82,8 @@ ruffian_init_rtc(void) outb(0x31, 0x42); outb(0x13, 0x42); - setup_irq(0, &timer_irqaction); + if (request_irq(0, rtc_timer_interrupt, 0, "timer", NULL)) + pr_err("Failed to request irq 0 (timer)\n"); } static void diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c index a3db719d3c38..4d85eaeb44aa 100644 --- a/arch/alpha/kernel/sys_rx164.c +++ b/arch/alpha/kernel/sys_rx164.c @@ -106,7 +106,8 @@ rx164_init_irq(void) init_i8259a_irqs(); common_init_isa_dma(); - setup_irq(16+20, &isa_cascade_irqaction); + if (request_irq(16 + 20, no_action, 0, "isa-cascade", NULL)) + pr_err("Failed to register isa-cascade interrupt\n"); } diff --git a/arch/alpha/kernel/sys_sx164.c b/arch/alpha/kernel/sys_sx164.c index 1ec638a2746a..17cc203176c8 100644 --- a/arch/alpha/kernel/sys_sx164.c +++ b/arch/alpha/kernel/sys_sx164.c @@ -54,7 +54,8 @@ sx164_init_irq(void) else init_pyxis_irqs(0xff00003f0000UL); - setup_irq(16+6, &timer_cascade_irqaction); + if (request_irq(16 + 6, no_action, 0, "timer-cascade", NULL)) + pr_err("Failed to register timer-cascade interrupt\n"); } /* diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c index 8e64052811ab..2191bde161fd 100644 --- a/arch/alpha/kernel/sys_wildfire.c +++ b/arch/alpha/kernel/sys_wildfire.c @@ -156,10 +156,6 @@ static void __init wildfire_init_irq_per_pca(int qbbno, int pcano) { int i, irq_bias; - static struct irqaction isa_enable = { - .handler = no_action, - .name = "isa_enable", - }; irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA) + pcano * WILDFIRE_IRQ_PER_PCA; @@ -198,7 +194,8 @@ wildfire_init_irq_per_pca(int qbbno, int pcano) irq_set_status_flags(i + irq_bias, IRQ_LEVEL); } - setup_irq(32+irq_bias, &isa_enable); + if (request_irq(32 + irq_bias, no_action, 0, "isa_enable", NULL)) + pr_err("Failed to register isa_enable interrupt\n"); } static void __init diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index 0069360697ee..4d01c392ab14 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c @@ -242,7 +242,7 @@ common_init_rtc(void) outb(0x31, 0x42); outb(0x13, 0x42); - init_rtc_irq(); + init_rtc_irq(NULL); } @@ -396,9 +396,7 @@ time_init(void) if (alpha_using_qemu) { clocksource_register_hz(&qemu_cs, NSEC_PER_SEC); init_qemu_clockevent(); - - timer_irqaction.handler = qemu_timer_interrupt; - init_rtc_irq(); + init_rtc_irq(qemu_timer_interrupt); return; } diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index 947ef7981d92..c98ebae1aeac 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c @@ -302,10 +302,13 @@ static int sa1111_retrigger_irq(struct irq_data *d) break; } - if (i == 8) + if (i == 8) { pr_err("Danger Will Robinson: failed to re-trigger IRQ%d\n", d->irq); - return i == 8 ? -1 : 0; + return 0; + } + + return 1; } static int sa1111_type_irq(struct irq_data *d, unsigned int flags) diff --git a/arch/c6x/platforms/timer64.c b/arch/c6x/platforms/timer64.c index d98d94303498..661f4c7c6ef6 100644 --- a/arch/c6x/platforms/timer64.c +++ b/arch/c6x/platforms/timer64.c @@ -165,13 +165,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static struct irqaction timer_iact = { - .name = "timer", - .flags = IRQF_TIMER, - .handler = timer_interrupt, - .dev_id = &t64_clockevent_device, -}; - void __init timer64_init(void) { struct clock_event_device *cd = &t64_clockevent_device; @@ -238,7 +231,9 @@ void __init timer64_init(void) cd->cpumask = cpumask_of(smp_processor_id()); clockevents_register_device(cd); - setup_irq(cd->irq, &timer_iact); + if (request_irq(cd->irq, timer_interrupt, IRQF_TIMER, "timer", + &t64_clockevent_device)) + pr_err("Failed to request irq %d (timer)\n", cd->irq); out: of_node_put(np); diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c index 0bbbe652a513..619c56420aa0 100644 --- a/arch/hexagon/kernel/smp.c +++ b/arch/hexagon/kernel/smp.c @@ -114,12 +114,6 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg) local_irq_restore(flags); } -static struct irqaction ipi_intdesc = { - .handler = handle_ipi, - .flags = IRQF_TRIGGER_RISING, - .name = "ipi_handler" -}; - void __init smp_prepare_boot_cpu(void) { } @@ -132,8 +126,8 @@ void __init smp_prepare_boot_cpu(void) void start_secondary(void) { - unsigned int cpu; unsigned long thread_ptr; + unsigned int cpu, irq; /* Calculate thread_info pointer from stack pointer */ __asm__ __volatile__( @@ -155,7 +149,10 @@ void start_secondary(void) cpu = smp_processor_id(); - setup_irq(BASE_IPI_IRQ + cpu, &ipi_intdesc); + irq = BASE_IPI_IRQ + cpu; + if (request_irq(irq, handle_ipi, IRQF_TRIGGER_RISING, "ipi_handler", + NULL)) + pr_err("Failed to request irq %u (ipi_handler)\n", irq); /* Register the clock_event dummy */ setup_percpu_clockdev(); @@ -201,7 +198,7 @@ void __init smp_cpus_done(unsigned int max_cpus) void __init smp_prepare_cpus(unsigned int max_cpus) { - int i; + int i, irq = BASE_IPI_IRQ; /* * should eventually have some sort of machine @@ -213,8 +210,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus) set_cpu_present(i, true); /* Also need to register the interrupts for IPI */ - if (max_cpus > 1) - setup_irq(BASE_IPI_IRQ, &ipi_intdesc); + if (max_cpus > 1) { + if (request_irq(irq, handle_ipi, IRQF_TRIGGER_RISING, + "ipi_handler", NULL)) + pr_err("Failed to request irq %d (ipi_handler)\n", irq); + } } void smp_send_reschedule(int cpu) diff --git a/arch/hexagon/kernel/time.c b/arch/hexagon/kernel/time.c index f99e9257bed4..feffe527ac92 100644 --- a/arch/hexagon/kernel/time.c +++ b/arch/hexagon/kernel/time.c @@ -143,13 +143,6 @@ static irqreturn_t timer_interrupt(int irq, void *devid) return IRQ_HANDLED; } -/* This should also be pulled from devtree */ -static struct irqaction rtos_timer_intdesc = { - .handler = timer_interrupt, - .flags = IRQF_TIMER | IRQF_TRIGGER_RISING, - .name = "rtos_timer" -}; - /* * time_init_deferred - called by start_kernel to set up timer/clock source * @@ -163,6 +156,7 @@ void __init time_init_deferred(void) { struct resource *resource = NULL; struct clock_event_device *ce_dev = &hexagon_clockevent_dev; + unsigned long flag = IRQF_TIMER | IRQF_TRIGGER_RISING; ce_dev->cpumask = cpu_all_mask; @@ -195,7 +189,8 @@ void __init time_init_deferred(void) #endif clockevents_register_device(ce_dev); - setup_irq(ce_dev->irq, &rtos_timer_intdesc); + if (request_irq(ce_dev->irq, timer_interrupt, flag, "rtos_timer", NULL)) + pr_err("Failed to register rtos_timer interrupt\n"); } void __init time_init(void) diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 6a331bd57ea8..242f58ec086b 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -47,6 +47,8 @@ config MICROBLAZE select CPU_NO_EFFICIENT_FFS select MMU_GATHER_NO_RANGE if MMU select SPARSE_IRQ + select GENERIC_IRQ_MULTI_HANDLER + select HANDLE_DOMAIN_IRQ # Endianness selection choice diff --git a/arch/microblaze/include/asm/irq.h b/arch/microblaze/include/asm/irq.h index eac2fb4b3fb9..5166f0893e2b 100644 --- a/arch/microblaze/include/asm/irq.h +++ b/arch/microblaze/include/asm/irq.h @@ -14,7 +14,4 @@ struct pt_regs; extern void do_IRQ(struct pt_regs *regs); -/* should be defined in each interrupt controller driver */ -extern unsigned int xintc_get_irq(void); - #endif /* _ASM_MICROBLAZE_IRQ_H */ diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c index 903dad822fad..0b37dde60a1e 100644 --- a/arch/microblaze/kernel/irq.c +++ b/arch/microblaze/kernel/irq.c @@ -20,29 +20,10 @@ #include <linux/irqchip.h> #include <linux/of_irq.h> -static u32 concurrent_irq; - void __irq_entry do_IRQ(struct pt_regs *regs) { - unsigned int irq; - struct pt_regs *old_regs = set_irq_regs(regs); trace_hardirqs_off(); - - irq_enter(); - irq = xintc_get_irq(); -next_irq: - BUG_ON(!irq); - generic_handle_irq(irq); - - irq = xintc_get_irq(); - if (irq != -1U) { - pr_debug("next irq: %d\n", irq); - ++concurrent_irq; - goto next_irq; - } - - irq_exit(); - set_irq_regs(old_regs); + handle_arch_irq(regs); trace_hardirqs_on(); } diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index ffb3d94bf0cc..55ea614d89bf 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -157,5 +157,5 @@ void __init trap_init(void) /* Set the exception vector address */ csr_write(CSR_TVEC, &handle_exception); /* Enable interrupts */ - csr_write(CSR_IE, IE_SIE | IE_EIE); + csr_write(CSR_IE, IE_SIE); } diff --git a/arch/sh/boards/mach-cayman/irq.c b/arch/sh/boards/mach-cayman/irq.c index 3b6ea2d99013..0305d0b51730 100644 --- a/arch/sh/boards/mach-cayman/irq.c +++ b/arch/sh/boards/mach-cayman/irq.c @@ -40,16 +40,6 @@ static irqreturn_t cayman_interrupt_pci2(int irq, void *dev_id) return IRQ_NONE; } -static struct irqaction cayman_action_smsc = { - .name = "Cayman SMSC Mux", - .handler = cayman_interrupt_smsc, -}; - -static struct irqaction cayman_action_pci2 = { - .name = "Cayman PCI2 Mux", - .handler = cayman_interrupt_pci2, -}; - static void enable_cayman_irq(struct irq_data *data) { unsigned int irq = data->irq; @@ -149,6 +139,10 @@ void init_cayman_irq(void) } /* Setup the SMSC interrupt */ - setup_irq(SMSC_IRQ, &cayman_action_smsc); - setup_irq(PCI2_IRQ, &cayman_action_pci2); + if (request_irq(SMSC_IRQ, cayman_interrupt_smsc, 0, "Cayman SMSC Mux", + NULL)) + pr_err("Failed to register Cayman SMSC Mux interrupt\n"); + if (request_irq(PCI2_IRQ, cayman_interrupt_pci2, 0, "Cayman PCI2 Mux", + NULL)) + pr_err("Failed to register Cayman PCI2 Mux interrupt\n"); } diff --git a/arch/sh/drivers/dma/dma-pvr2.c b/arch/sh/drivers/dma/dma-pvr2.c index b5dbd1f75768..21c347543e19 100644 --- a/arch/sh/drivers/dma/dma-pvr2.c +++ b/arch/sh/drivers/dma/dma-pvr2.c @@ -64,11 +64,6 @@ static int pvr2_xfer_dma(struct dma_channel *chan) return 0; } -static struct irqaction pvr2_dma_irq = { - .name = "pvr2 DMA handler", - .handler = pvr2_dma_interrupt, -}; - static struct dma_ops pvr2_dma_ops = { .request = pvr2_request_dma, .get_residue = pvr2_get_dma_residue, @@ -84,7 +79,9 @@ static struct dma_info pvr2_dma_info = { static int __init pvr2_dma_init(void) { - setup_irq(HW_EVENT_PVR2_DMA, &pvr2_dma_irq); + if (request_irq(HW_EVENT_PVR2_DMA, pvr2_dma_interrupt, 0, + "pvr2 DMA handler", NULL)) + pr_err("Failed to register pvr2 DMA handler interrupt\n"); request_dma(PVR2_CASCADE_CHAN, "pvr2 cascade"); return register_dmac(&pvr2_dma_info); diff --git a/arch/unicore32/kernel/time.c b/arch/unicore32/kernel/time.c index 8b217a761bf0..c3a37edf4d40 100644 --- a/arch/unicore32/kernel/time.c +++ b/arch/unicore32/kernel/time.c @@ -72,13 +72,6 @@ static struct clocksource cksrc_puv3_oscr = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -static struct irqaction puv3_timer_irq = { - .name = "ost0", - .flags = IRQF_TIMER | IRQF_IRQPOLL, - .handler = puv3_ost0_interrupt, - .dev_id = &ckevt_puv3_osmr0, -}; - void __init time_init(void) { writel(0, OST_OIER); /* disable any timer interrupts */ @@ -94,7 +87,9 @@ void __init time_init(void) ckevt_puv3_osmr0.min_delta_ticks = MIN_OSCR_DELTA * 2; ckevt_puv3_osmr0.cpumask = cpumask_of(0); - setup_irq(IRQ_TIMER0, &puv3_timer_irq); + if (request_irq(IRQ_TIMER0, puv3_ost0_interrupt, + IRQF_TIMER | IRQF_IRQPOLL, "ost0", &ckevt_puv3_osmr0)) + pr_err("Failed to register ost0 interrupt\n"); clocksource_register_hz(&cksrc_puv3_oscr, CLOCK_TICK_RATE); clockevents_register_device(&ckevt_puv3_osmr0); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cb3633d243cb..51da546e0745 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -128,6 +128,7 @@ config X86 select GENERIC_GETTIMEOFDAY select GENERIC_VDSO_TIME_NS select GUP_GET_PTE_LOW_HIGH if X86_PAE + select HARDIRQS_SW_RESEND select HARDLOCKUP_CHECK_TIMESTAMP if X86_64 select HAVE_ACPI_APEI if ACPI select HAVE_ACPI_APEI_NMI if ACPI diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 48293d15f1e1..67768e54438b 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -557,6 +557,12 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, irqd->hwirq = virq + i; irqd_set_single_target(irqd); /* + * Prevent that any of these interrupts is invoked in + * non interrupt context via e.g. generic_handle_irq() + * as that can corrupt the affinity move state. + */ + irqd_set_handle_enforce_irqctx(irqd); + /* * Legacy vectors are already assigned when the IOAPIC * takes them over. They stay on the same vector. This is * required for check_timer() to work correctly as it might diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 6d397732138d..24fe08702ef7 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -458,7 +458,7 @@ config IMX_IRQSTEER Support for the i.MX IRQSTEER interrupt multiplexer/remapper. config IMX_INTMUX - def_bool y if ARCH_MXC + def_bool y if ARCH_MXC || COMPILE_TEST select IRQ_DOMAIN help Support for the i.MX INTMUX interrupt multiplexer. diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c index bb1ad451392f..2c999dc310c1 100644 --- a/drivers/irqchip/irq-atmel-aic.c +++ b/drivers/irqchip/irq-atmel-aic.c @@ -83,7 +83,7 @@ static int aic_retrigger(struct irq_data *d) irq_reg_writel(gc, d->mask, AT91_AIC_ISCR); irq_gc_unlock(gc); - return 0; + return 1; } static int aic_set_type(struct irq_data *d, unsigned type) diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c index 29333497ba10..fc1b3a9cdafc 100644 --- a/drivers/irqchip/irq-atmel-aic5.c +++ b/drivers/irqchip/irq-atmel-aic5.c @@ -128,7 +128,7 @@ static int aic5_retrigger(struct irq_data *d) irq_reg_writel(bgc, 1, AT91_AIC5_ISCR); irq_gc_unlock(bgc); - return 0; + return 1; } static int aic5_set_type(struct irq_data *d, unsigned type) diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c index 418245d31921..a1e004af23e7 100644 --- a/drivers/irqchip/irq-bcm2835.c +++ b/drivers/irqchip/irq-bcm2835.c @@ -61,6 +61,7 @@ | SHORTCUT1_MASK | SHORTCUT2_MASK) #define REG_FIQ_CONTROL 0x0c +#define FIQ_CONTROL_ENABLE BIT(7) #define NR_BANKS 3 #define IRQS_PER_BANK 32 @@ -135,6 +136,7 @@ static int __init armctrl_of_init(struct device_node *node, { void __iomem *base; int irq, b, i; + u32 reg; base = of_iomap(node, 0); if (!base) @@ -157,6 +159,19 @@ static int __init armctrl_of_init(struct device_node *node, handle_level_irq); irq_set_probe(irq); } + + reg = readl_relaxed(intc.enable[b]); + if (reg) { + writel_relaxed(reg, intc.disable[b]); + pr_err(FW_BUG "Bootloader left irq enabled: " + "bank %d irq %*pbl\n", b, IRQS_PER_BANK, ®); + } + } + + reg = readl_relaxed(base + REG_FIQ_CONTROL); + if (reg & FIQ_CONTROL_ENABLE) { + writel_relaxed(0, base + REG_FIQ_CONTROL); + pr_err(FW_BUG "Bootloader left fiq enabled\n"); } if (is_2836) { diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c index cbf01afcd2a6..eb9bce93cd05 100644 --- a/drivers/irqchip/irq-bcm7038-l1.c +++ b/drivers/irqchip/irq-bcm7038-l1.c @@ -50,7 +50,7 @@ struct bcm7038_l1_chip { struct bcm7038_l1_cpu { void __iomem *map_base; - u32 mask_cache[0]; + u32 mask_cache[]; }; /* diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 83b1186ffcad..54d142ccc63a 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -96,6 +96,7 @@ struct its_node { struct mutex dev_alloc_lock; struct list_head entry; void __iomem *base; + void __iomem *sgir_base; phys_addr_t phys_base; struct its_cmd_block *cmd_base; struct its_cmd_block *cmd_write; @@ -188,6 +189,15 @@ static DEFINE_IDA(its_vpeid_ida); #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) +/* + * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we + * always have vSGIs mapped. + */ +static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its) +{ + return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]); +} + static u16 get_its_list(struct its_vm *vm) { struct its_node *its; @@ -197,7 +207,7 @@ static u16 get_its_list(struct its_vm *vm) if (!is_v4(its)) continue; - if (vm->vlpi_count[its->list_nr]) + if (require_its_list_vmovp(vm, its)) __set_bit(its->list_nr, &its_list); } @@ -239,15 +249,41 @@ static struct its_vlpi_map *get_vlpi_map(struct irq_data *d) return NULL; } -static int irq_to_cpuid(struct irq_data *d) +static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags) +{ + raw_spin_lock_irqsave(&vpe->vpe_lock, *flags); + return vpe->col_idx; +} + +static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags) +{ + raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); +} + +static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags) { - struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_vlpi_map *map = get_vlpi_map(d); + int cpu; - if (map) - return map->vpe->col_idx; + if (map) { + cpu = vpe_to_cpuid_lock(map->vpe, flags); + } else { + /* Physical LPIs are already locked via the irq_desc lock */ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + cpu = its_dev->event_map.col_map[its_get_event_id(d)]; + /* Keep GCC quiet... */ + *flags = 0; + } + + return cpu; +} + +static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags) +{ + struct its_vlpi_map *map = get_vlpi_map(d); - return its_dev->event_map.col_map[its_get_event_id(d)]; + if (map) + vpe_to_cpuid_unlock(map->vpe, flags); } static struct its_collection *valid_col(struct its_collection *col) @@ -353,6 +389,15 @@ struct its_cmd_desc { struct { struct its_vpe *vpe; } its_invdb_cmd; + + struct { + struct its_vpe *vpe; + u8 sgi; + u8 priority; + bool enable; + bool group; + bool clear; + } its_vsgi_cmd; }; }; @@ -501,6 +546,31 @@ static void its_encode_db(struct its_cmd_block *cmd, bool db) its_mask_encode(&cmd->raw_cmd[2], db, 63, 63); } +static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi) +{ + its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32); +} + +static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio) +{ + its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20); +} + +static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp) +{ + its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10); +} + +static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr) +{ + its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9); +} + +static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en) +{ + its_mask_encode(&cmd->raw_cmd[0], en, 8, 8); +} + static inline void its_fixup_cmd(struct its_cmd_block *cmd) { /* Let's fixup BE commands */ @@ -866,6 +936,26 @@ static struct its_vpe *its_build_invdb_cmd(struct its_node *its, return valid_vpe(its, desc->its_invdb_cmd.vpe); } +static struct its_vpe *its_build_vsgi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + if (WARN_ON(!is_v4_1(its))) + return NULL; + + its_encode_cmd(cmd, GITS_CMD_VSGI); + its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id); + its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi); + its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority); + its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group); + its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear); + its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vsgi_cmd.vpe); +} + static u64 its_cmd_ptr_to_offset(struct its_node *its, struct its_cmd_block *ptr) { @@ -1214,7 +1304,7 @@ static void its_send_vmovp(struct its_vpe *vpe) if (!is_v4(its)) continue; - if (!vpe->its_vm->vlpi_count[its->list_nr]) + if (!require_its_list_vmovp(vpe->its_vm, its)) continue; desc.its_vmovp_cmd.col = &its->collections[col_id]; @@ -1321,7 +1411,7 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) static void wait_for_syncr(void __iomem *rdbase) { - while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) + while (readl_relaxed(rdbase + GICR_SYNCR) & 1) cpu_relax(); } @@ -1329,7 +1419,9 @@ static void direct_lpi_inv(struct irq_data *d) { struct its_vlpi_map *map = get_vlpi_map(d); void __iomem *rdbase; + unsigned long flags; u64 val; + int cpu; if (map) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); @@ -1344,10 +1436,14 @@ static void direct_lpi_inv(struct irq_data *d) } /* Target the redistributor this LPI is currently routed to */ - rdbase = per_cpu_ptr(gic_rdists->rdist, irq_to_cpuid(d))->rd_base; + cpu = irq_to_cpuid_lock(d, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; gic_write_lpir(val, rdbase + GICR_INVLPIR); wait_for_syncr(rdbase); + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + irq_to_cpuid_unlock(d, flags); } static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) @@ -1499,12 +1595,31 @@ static int its_irq_set_irqchip_state(struct irq_data *d, return 0; } +/* + * Two favourable cases: + * + * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times + * for vSGI delivery + * + * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough + * and we're better off mapping all VPEs always + * + * If neither (a) nor (b) is true, then we map vPEs on demand. + * + */ +static bool gic_requires_eager_mapping(void) +{ + if (!its_list_map || gic_rdists->has_rvpeid) + return true; + + return false; +} + static void its_map_vm(struct its_node *its, struct its_vm *vm) { unsigned long flags; - /* Not using the ITS list? Everything is always mapped. */ - if (!its_list_map) + if (gic_requires_eager_mapping()) return; raw_spin_lock_irqsave(&vmovp_lock, flags); @@ -1538,7 +1653,7 @@ static void its_unmap_vm(struct its_node *its, struct its_vm *vm) unsigned long flags; /* Not using the ITS list? Everything is always mapped. */ - if (!its_list_map) + if (gic_requires_eager_mapping()) return; raw_spin_lock_irqsave(&vmovp_lock, flags); @@ -2036,18 +2151,17 @@ static void its_write_baser(struct its_node *its, struct its_baser *baser, } static int its_setup_baser(struct its_node *its, struct its_baser *baser, - u64 cache, u64 shr, u32 psz, u32 order, - bool indirect) + u64 cache, u64 shr, u32 order, bool indirect) { u64 val = its_read_baser(its, baser); u64 esz = GITS_BASER_ENTRY_SIZE(val); u64 type = GITS_BASER_TYPE(val); u64 baser_phys, tmp; - u32 alloc_pages; + u32 alloc_pages, psz; struct page *page; void *base; -retry_alloc_baser: + psz = baser->psz; alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); if (alloc_pages > GITS_BASER_PAGES_MAX) { pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", @@ -2120,25 +2234,6 @@ retry_baser: goto retry_baser; } - if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { - /* - * Page size didn't stick. Let's try a smaller - * size and retry. If we reach 4K, then - * something is horribly wrong... - */ - free_pages((unsigned long)base, order); - baser->base = NULL; - - switch (psz) { - case SZ_16K: - psz = SZ_4K; - goto retry_alloc_baser; - case SZ_64K: - psz = SZ_16K; - goto retry_alloc_baser; - } - } - if (val != tmp) { pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", &its->phys_base, its_base_type_string[type], @@ -2164,13 +2259,14 @@ retry_baser: static bool its_parse_indirect_baser(struct its_node *its, struct its_baser *baser, - u32 psz, u32 *order, u32 ids) + u32 *order, u32 ids) { u64 tmp = its_read_baser(its, baser); u64 type = GITS_BASER_TYPE(tmp); u64 esz = GITS_BASER_ENTRY_SIZE(tmp); u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; u32 new_order = *order; + u32 psz = baser->psz; bool indirect = false; /* No need to enable Indirection if memory requirement < (psz*2)bytes */ @@ -2288,11 +2384,58 @@ static void its_free_tables(struct its_node *its) } } +static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser) +{ + u64 psz = SZ_64K; + + while (psz) { + u64 val, gpsz; + + val = its_read_baser(its, baser); + val &= ~GITS_BASER_PAGE_SIZE_MASK; + + switch (psz) { + case SZ_64K: + gpsz = GITS_BASER_PAGE_SIZE_64K; + break; + case SZ_16K: + gpsz = GITS_BASER_PAGE_SIZE_16K; + break; + case SZ_4K: + default: + gpsz = GITS_BASER_PAGE_SIZE_4K; + break; + } + + gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT; + + val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz); + its_write_baser(its, baser, val); + + if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz) + break; + + switch (psz) { + case SZ_64K: + psz = SZ_16K; + break; + case SZ_16K: + psz = SZ_4K; + break; + case SZ_4K: + default: + return -1; + } + } + + baser->psz = psz; + return 0; +} + static int its_alloc_tables(struct its_node *its) { u64 shr = GITS_BASER_InnerShareable; u64 cache = GITS_BASER_RaWaWb; - u32 psz = SZ_64K; int err, i; if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) @@ -2303,16 +2446,22 @@ static int its_alloc_tables(struct its_node *its) struct its_baser *baser = its->tables + i; u64 val = its_read_baser(its, baser); u64 type = GITS_BASER_TYPE(val); - u32 order = get_order(psz); bool indirect = false; + u32 order; - switch (type) { - case GITS_BASER_TYPE_NONE: + if (type == GITS_BASER_TYPE_NONE) continue; + if (its_probe_baser_psz(its, baser)) { + its_free_tables(its); + return -ENXIO; + } + + order = get_order(baser->psz); + + switch (type) { case GITS_BASER_TYPE_DEVICE: - indirect = its_parse_indirect_baser(its, baser, - psz, &order, + indirect = its_parse_indirect_baser(its, baser, &order, device_ids(its)); break; @@ -2328,20 +2477,18 @@ static int its_alloc_tables(struct its_node *its) } } - indirect = its_parse_indirect_baser(its, baser, - psz, &order, + indirect = its_parse_indirect_baser(its, baser, &order, ITS_MAX_VPEID_BITS); break; } - err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); + err = its_setup_baser(its, baser, cache, shr, order, indirect); if (err < 0) { its_free_tables(its); return err; } /* Update settings which will be used for next BASERn */ - psz = baser->psz; cache = baser->val & GITS_BASER_CACHEABILITY_MASK; shr = baser->val & GITS_BASER_SHAREABILITY_MASK; } @@ -2452,6 +2599,10 @@ static bool allocate_vpe_l2_table(int cpu, u32 id) if (!gic_rdists->has_rvpeid) return true; + /* Skip non-present CPUs */ + if (!base) + return true; + val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1; @@ -3482,17 +3633,25 @@ static int its_vpe_set_affinity(struct irq_data *d, { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); int from, cpu = cpumask_first(mask_val); + unsigned long flags; /* * Changing affinity is mega expensive, so let's be as lazy as * we can and only do it if we really have to. Also, if mapped * into the proxy device, we need to move the doorbell * interrupt to its new location. + * + * Another thing is that changing the affinity of a vPE affects + * *other interrupts* such as all the vLPIs that are routed to + * this vPE. This means that the irq_desc lock is not enough to + * protect us, and that we must ensure nobody samples vpe->col_idx + * during the update, hence the lock below which must also be + * taken on any vLPI handling path that evaluates vpe->col_idx. */ - if (vpe->col_idx == cpu) + from = vpe_to_cpuid_lock(vpe, &flags); + if (from == cpu) goto out; - from = vpe->col_idx; vpe->col_idx = cpu; /* @@ -3508,6 +3667,7 @@ static int its_vpe_set_affinity(struct irq_data *d, out: irq_data_update_effective_affinity(d, cpumask_of(cpu)); + vpe_to_cpuid_unlock(vpe, flags); return IRQ_SET_MASK_OK_DONE; } @@ -3528,7 +3688,7 @@ static void its_vpe_schedule(struct its_vpe *vpe) val = virt_to_phys(page_address(vpe->vpt_page)) & GENMASK_ULL(51, 16); val |= GICR_VPENDBASER_RaWaWb; - val |= GICR_VPENDBASER_NonShareable; + val |= GICR_VPENDBASER_InnerShareable; /* * There is no good way of finding out if the pending table is * empty as we can race against the doorbell interrupt very @@ -3619,9 +3779,11 @@ static void its_vpe_send_inv(struct irq_data *d) void __iomem *rdbase; /* Target the redistributor this VPE is currently known on */ + raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR); wait_for_syncr(rdbase); + raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); } else { its_vpe_send_cmd(vpe, its_send_inv); } @@ -3675,12 +3837,18 @@ static int its_vpe_set_irqchip_state(struct irq_data *d, return 0; } +static int its_vpe_retrigger(struct irq_data *d) +{ + return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} + static struct irq_chip its_vpe_irq_chip = { .name = "GICv4-vpe", .irq_mask = its_vpe_mask_irq, .irq_unmask = its_vpe_unmask_irq, .irq_eoi = irq_chip_eoi_parent, .irq_set_affinity = its_vpe_set_affinity, + .irq_retrigger = its_vpe_retrigger, .irq_set_irqchip_state = its_vpe_set_irqchip_state, .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, }; @@ -3782,8 +3950,12 @@ static void its_vpe_4_1_invall(struct its_vpe *vpe) val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id); /* Target the redistributor this vPE is currently known on */ + raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; gic_write_lpir(val, rdbase + GICR_INVALLR); + + wait_for_syncr(rdbase); + raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); } static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) @@ -3818,6 +3990,221 @@ static struct irq_chip its_vpe_4_1_irq_chip = { .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity, }; +static void its_configure_sgi(struct irq_data *d, bool clear) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_desc desc; + + desc.its_vsgi_cmd.vpe = vpe; + desc.its_vsgi_cmd.sgi = d->hwirq; + desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority; + desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled; + desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group; + desc.its_vsgi_cmd.clear = clear; + + /* + * GICv4.1 allows us to send VSGI commands to any ITS as long as the + * destination VPE is mapped there. Since we map them eagerly at + * activation time, we're pretty sure the first GICv4.1 ITS will do. + */ + its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc); +} + +static void its_sgi_mask_irq(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + vpe->sgi_config[d->hwirq].enabled = false; + its_configure_sgi(d, false); +} + +static void its_sgi_unmask_irq(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + vpe->sgi_config[d->hwirq].enabled = true; + its_configure_sgi(d, false); +} + +static int its_sgi_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, + bool force) +{ + /* + * There is no notion of affinity for virtual SGIs, at least + * not on the host (since they can only be targetting a vPE). + * Tell the kernel we've done whatever it asked for. + */ + return IRQ_SET_MASK_OK; +} + +static int its_sgi_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (state) { + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its = find_4_1_its(); + u64 val; + + val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id); + val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq); + writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K); + } else { + its_configure_sgi(d, true); + } + + return 0; +} + +static int its_sgi_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool *val) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + void __iomem *base; + unsigned long flags; + u32 count = 1000000; /* 1s! */ + u32 status; + int cpu; + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + /* + * Locking galore! We can race against two different events: + * + * - Concurent vPE affinity change: we must make sure it cannot + * happen, or we'll talk to the wrong redistributor. This is + * identical to what happens with vLPIs. + * + * - Concurrent VSGIPENDR access: As it involves accessing two + * MMIO registers, this must be made atomic one way or another. + */ + cpu = vpe_to_cpuid_lock(vpe, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K; + writel_relaxed(vpe->vpe_id, base + GICR_VSGIR); + do { + status = readl_relaxed(base + GICR_VSGIPENDR); + if (!(status & GICR_VSGIPENDR_BUSY)) + goto out; + + count--; + if (!count) { + pr_err_ratelimited("Unable to get SGI status\n"); + goto out; + } + cpu_relax(); + udelay(1); + } while (count); + +out: + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + vpe_to_cpuid_unlock(vpe, flags); + + if (!count) + return -ENXIO; + + *val = !!(status & (1 << d->hwirq)); + + return 0; +} + +static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case PROP_UPDATE_VSGI: + vpe->sgi_config[d->hwirq].priority = info->priority; + vpe->sgi_config[d->hwirq].group = info->group; + its_configure_sgi(d, false); + return 0; + + default: + return -EINVAL; + } +} + +static struct irq_chip its_sgi_irq_chip = { + .name = "GICv4.1-sgi", + .irq_mask = its_sgi_mask_irq, + .irq_unmask = its_sgi_unmask_irq, + .irq_set_affinity = its_sgi_set_affinity, + .irq_set_irqchip_state = its_sgi_set_irqchip_state, + .irq_get_irqchip_state = its_sgi_get_irqchip_state, + .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity, +}; + +static int its_sgi_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *args) +{ + struct its_vpe *vpe = args; + int i; + + /* Yes, we do want 16 SGIs */ + WARN_ON(nr_irqs != 16); + + for (i = 0; i < 16; i++) { + vpe->sgi_config[i].priority = 0; + vpe->sgi_config[i].enabled = false; + vpe->sgi_config[i].group = false; + + irq_domain_set_hwirq_and_chip(domain, virq + i, i, + &its_sgi_irq_chip, vpe); + irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY); + } + + return 0; +} + +static void its_sgi_irq_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + /* Nothing to do */ +} + +static int its_sgi_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + /* Write out the initial SGI configuration */ + its_configure_sgi(d, false); + return 0; +} + +static void its_sgi_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + /* + * The VSGI command is awkward: + * + * - To change the configuration, CLEAR must be set to false, + * leaving the pending bit unchanged. + * - To clear the pending bit, CLEAR must be set to true, leaving + * the configuration unchanged. + * + * You just can't do both at once, hence the two commands below. + */ + vpe->sgi_config[d->hwirq].enabled = false; + its_configure_sgi(d, false); + its_configure_sgi(d, true); +} + +static const struct irq_domain_ops its_sgi_domain_ops = { + .alloc = its_sgi_irq_domain_alloc, + .free = its_sgi_irq_domain_free, + .activate = its_sgi_irq_domain_activate, + .deactivate = its_sgi_irq_domain_deactivate, +}; + static int its_vpe_id_alloc(void) { return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); @@ -3851,6 +4238,7 @@ static int its_vpe_init(struct its_vpe *vpe) return -ENOMEM; } + raw_spin_lock_init(&vpe->vpe_lock); vpe->vpe_id = vpe_id; vpe->vpt_page = vpt_page; if (gic_rdists->has_rvpeid) @@ -3960,8 +4348,12 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, struct its_vpe *vpe = irq_data_get_irq_chip_data(d); struct its_node *its; - /* If we use the list map, we issue VMAPP on demand... */ - if (its_list_map) + /* + * If we use the list map, we issue VMAPP on demand... Unless + * we're on a GICv4.1 and we eagerly map the VPE on all ITSs + * so that VSGIs can work. + */ + if (!gic_requires_eager_mapping()) return 0; /* Map the VPE to the first possible CPU */ @@ -3987,10 +4379,10 @@ static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, struct its_node *its; /* - * If we use the list map, we unmap the VPE once no VLPIs are - * associated with the VM. + * If we use the list map on GICv4.0, we unmap the VPE once no + * VLPIs are associated with the VM. */ - if (its_list_map) + if (!gic_requires_eager_mapping()) return; list_for_each_entry(its, &its_nodes, entry) { @@ -4404,7 +4796,7 @@ static int __init its_probe_one(struct resource *res, struct page *page; int err; - its_base = ioremap(res->start, resource_size(res)); + its_base = ioremap(res->start, SZ_64K); if (!its_base) { pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); return -ENOMEM; @@ -4455,6 +4847,13 @@ static int __init its_probe_one(struct resource *res, if (is_v4_1(its)) { u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer); + + its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K); + if (!its->sgir_base) { + err = -ENOMEM; + goto out_free_its; + } + its->mpidr = readl_relaxed(its_base + GITS_MPIDR); pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n", @@ -4468,7 +4867,7 @@ static int __init its_probe_one(struct resource *res, get_order(ITS_CMD_QUEUE_SZ)); if (!page) { err = -ENOMEM; - goto out_free_its; + goto out_unmap_sgir; } its->cmd_base = (void *)page_address(page); its->cmd_write = its->cmd_base; @@ -4535,6 +4934,9 @@ out_free_tables: its_free_tables(its); out_free_cmd: free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); +out_unmap_sgir: + if (its->sgir_base) + iounmap(its->sgir_base); out_free_its: kfree(its); out_unmap: @@ -4818,6 +5220,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, struct device_node *of_node; struct its_node *its; bool has_v4 = false; + bool has_v4_1 = false; int err; gic_rdists = rdists; @@ -4838,12 +5241,25 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, if (err) return err; - list_for_each_entry(its, &its_nodes, entry) + list_for_each_entry(its, &its_nodes, entry) { has_v4 |= is_v4(its); + has_v4_1 |= is_v4_1(its); + } + + /* Don't bother with inconsistent systems */ + if (WARN_ON(!has_v4_1 && rdists->has_rvpeid)) + rdists->has_rvpeid = false; if (has_v4 & rdists->has_vlpis) { + const struct irq_domain_ops *sgi_ops; + + if (has_v4_1) + sgi_ops = &its_sgi_domain_ops; + else + sgi_ops = NULL; + if (its_init_vpe_domain() || - its_init_v4(parent_domain, &its_vpe_domain_ops)) { + its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) { rdists->has_vlpis = false; pr_err("ITS: Disabling GICv4 support\n"); } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 1eec9d4649d5..9dbc81b6f62e 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -724,6 +724,7 @@ static void __init gic_dist_init(void) unsigned int i; u64 affinity; void __iomem *base = gic_data.dist_base; + u32 val; /* Disable the distributor */ writel_relaxed(0, base + GICD_CTLR); @@ -756,9 +757,14 @@ static void __init gic_dist_init(void) /* Now do the common stuff, and wait for the distributor to drain */ gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp); + val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1; + if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) { + pr_info("Enabling SGIs without active state\n"); + val |= GICD_CTLR_nASSGIreq; + } + /* Enable distributor with ARE, Group1 */ - writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, - base + GICD_CTLR); + writel_relaxed(val, base + GICD_CTLR); /* * Set all global interrupts to the boot CPU only. ARE must be @@ -829,6 +835,7 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) typer = gic_read_typer(ptr + GICR_TYPER); if ((typer >> 32) == aff) { u64 offset = ptr - region->redist_base; + raw_spin_lock_init(&gic_data_rdist()->rd_lock); gic_data_rdist_rd_base() = ptr; gic_data_rdist()->phys_base = region->phys_base + offset; @@ -1609,7 +1616,6 @@ static int __init gic_init_bases(void __iomem *dist_base, gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, &gic_data); - irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); gic_data.rdists.has_rvpeid = true; gic_data.rdists.has_vlpis = true; @@ -1620,6 +1626,8 @@ static int __init gic_init_bases(void __iomem *dist_base, goto out_free; } + irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); + gic_data.has_rss = !!(typer & GICD_TYPER_RSS); pr_info("Distributor has %sRange Selector support\n", gic_data.has_rss ? "" : "no "); @@ -1785,6 +1793,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node) gic_v3_kvm_info.vcpu = r; gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; + gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; gic_set_kvm_info(&gic_v3_kvm_info); } @@ -2100,6 +2109,7 @@ static void __init gic_acpi_setup_kvm_info(void) } gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; + gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; gic_set_kvm_info(&gic_v3_kvm_info); } diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c index 45969927cc81..0c18714ae13e 100644 --- a/drivers/irqchip/irq-gic-v4.c +++ b/drivers/irqchip/irq-gic-v4.c @@ -85,6 +85,53 @@ static struct irq_domain *gic_domain; static const struct irq_domain_ops *vpe_domain_ops; +static const struct irq_domain_ops *sgi_domain_ops; + +static bool has_v4_1(void) +{ + return !!sgi_domain_ops; +} + +static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx) +{ + char *name; + int sgi_base; + + if (!has_v4_1()) + return 0; + + name = kasprintf(GFP_KERNEL, "GICv4-sgi-%d", task_pid_nr(current)); + if (!name) + goto err; + + vpe->fwnode = irq_domain_alloc_named_id_fwnode(name, idx); + if (!vpe->fwnode) + goto err; + + kfree(name); + name = NULL; + + vpe->sgi_domain = irq_domain_create_linear(vpe->fwnode, 16, + sgi_domain_ops, vpe); + if (!vpe->sgi_domain) + goto err; + + sgi_base = __irq_domain_alloc_irqs(vpe->sgi_domain, -1, 16, + NUMA_NO_NODE, vpe, + false, NULL); + if (sgi_base <= 0) + goto err; + + return 0; + +err: + if (vpe->sgi_domain) + irq_domain_remove(vpe->sgi_domain); + if (vpe->fwnode) + irq_domain_free_fwnode(vpe->fwnode); + kfree(name); + return -ENOMEM; +} int its_alloc_vcpu_irqs(struct its_vm *vm) { @@ -112,8 +159,13 @@ int its_alloc_vcpu_irqs(struct its_vm *vm) if (vpe_base_irq <= 0) goto err; - for (i = 0; i < vm->nr_vpes; i++) + for (i = 0; i < vm->nr_vpes; i++) { + int ret; vm->vpes[i]->irq = vpe_base_irq + i; + ret = its_alloc_vcpu_sgis(vm->vpes[i], i); + if (ret) + goto err; + } return 0; @@ -126,8 +178,28 @@ err: return -ENOMEM; } +static void its_free_sgi_irqs(struct its_vm *vm) +{ + int i; + + if (!has_v4_1()) + return; + + for (i = 0; i < vm->nr_vpes; i++) { + unsigned int irq = irq_find_mapping(vm->vpes[i]->sgi_domain, 0); + + if (WARN_ON(!irq)) + continue; + + irq_domain_free_irqs(irq, 16); + irq_domain_remove(vm->vpes[i]->sgi_domain); + irq_domain_free_fwnode(vm->vpes[i]->fwnode); + } +} + void its_free_vcpu_irqs(struct its_vm *vm) { + its_free_sgi_irqs(vm); irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes); irq_domain_remove(vm->domain); irq_domain_free_fwnode(vm->fwnode); @@ -138,18 +210,50 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info) return irq_set_vcpu_affinity(vpe->irq, info); } -int its_schedule_vpe(struct its_vpe *vpe, bool on) +int its_make_vpe_non_resident(struct its_vpe *vpe, bool db) +{ + struct irq_desc *desc = irq_to_desc(vpe->irq); + struct its_cmd_info info = { }; + int ret; + + WARN_ON(preemptible()); + + info.cmd_type = DESCHEDULE_VPE; + if (has_v4_1()) { + /* GICv4.1 can directly deal with doorbells */ + info.req_db = db; + } else { + /* Undo the nested disable_irq() calls... */ + while (db && irqd_irq_disabled(&desc->irq_data)) + enable_irq(vpe->irq); + } + + ret = its_send_vpe_cmd(vpe, &info); + if (!ret) + vpe->resident = false; + + return ret; +} + +int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en) { - struct its_cmd_info info; + struct its_cmd_info info = { }; int ret; WARN_ON(preemptible()); - info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE; + info.cmd_type = SCHEDULE_VPE; + if (has_v4_1()) { + info.g0en = g0en; + info.g1en = g1en; + } else { + /* Disabled the doorbell, as we're about to enter the guest */ + disable_irq_nosync(vpe->irq); + } ret = its_send_vpe_cmd(vpe, &info); if (!ret) - vpe->resident = on; + vpe->resident = true; return ret; } @@ -216,12 +320,28 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv) return irq_set_vcpu_affinity(irq, &info); } -int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops) +int its_prop_update_vsgi(int irq, u8 priority, bool group) +{ + struct its_cmd_info info = { + .cmd_type = PROP_UPDATE_VSGI, + { + .priority = priority, + .group = group, + }, + }; + + return irq_set_vcpu_affinity(irq, &info); +} + +int its_init_v4(struct irq_domain *domain, + const struct irq_domain_ops *vpe_ops, + const struct irq_domain_ops *sgi_ops) { if (domain) { pr_info("ITS: Enabling GICv4 support\n"); gic_domain = domain; - vpe_domain_ops = ops; + vpe_domain_ops = vpe_ops; + sgi_domain_ops = sgi_ops; return 0; } diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c index d000870d9b6b..b6f6aa7b2862 100644 --- a/drivers/irqchip/irq-i8259.c +++ b/drivers/irqchip/irq-i8259.c @@ -268,15 +268,6 @@ static void init_8259A(int auto_eoi) raw_spin_unlock_irqrestore(&i8259A_lock, flags); } -/* - * IRQ2 is cascade interrupt to second interrupt controller - */ -static struct irqaction irq2 = { - .handler = no_action, - .name = "cascade", - .flags = IRQF_NO_THREAD, -}; - static struct resource pic1_io_resource = { .name = "pic1", .start = PIC_MASTER_CMD, @@ -311,6 +302,10 @@ static const struct irq_domain_ops i8259A_ops = { */ struct irq_domain * __init __init_i8259_irqs(struct device_node *node) { + /* + * PIC_CASCADE_IR is cascade interrupt to second interrupt controller + */ + int irq = I8259A_IRQ_BASE + PIC_CASCADE_IR; struct irq_domain *domain; insert_resource(&ioport_resource, &pic1_io_resource); @@ -323,7 +318,8 @@ struct irq_domain * __init __init_i8259_irqs(struct device_node *node) if (!domain) panic("Failed to add i8259 IRQ domain"); - setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); + if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade", NULL)) + pr_err("Failed to register cascade interrupt\n"); register_syscore_ops(&i8259_syscore_ops); return domain; } diff --git a/drivers/irqchip/irq-ingenic-tcu.c b/drivers/irqchip/irq-ingenic-tcu.c index 6d05cefe9d79..7a7222d4c19c 100644 --- a/drivers/irqchip/irq-ingenic-tcu.c +++ b/drivers/irqchip/irq-ingenic-tcu.c @@ -180,3 +180,4 @@ err_free_tcu: IRQCHIP_DECLARE(jz4740_tcu_irq, "ingenic,jz4740-tcu", ingenic_tcu_irq_init); IRQCHIP_DECLARE(jz4725b_tcu_irq, "ingenic,jz4725b-tcu", ingenic_tcu_irq_init); IRQCHIP_DECLARE(jz4770_tcu_irq, "ingenic,jz4770-tcu", ingenic_tcu_irq_init); +IRQCHIP_DECLARE(x1000_tcu_irq, "ingenic,x1000-tcu", ingenic_tcu_irq_init); diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c index c5589ee0dfb3..9f3da4260ca6 100644 --- a/drivers/irqchip/irq-ingenic.c +++ b/drivers/irqchip/irq-ingenic.c @@ -58,11 +58,6 @@ static irqreturn_t intc_cascade(int irq, void *data) return IRQ_HANDLED; } -static struct irqaction intc_cascade_action = { - .handler = intc_cascade, - .name = "SoC intc cascade interrupt", -}; - static int __init ingenic_intc_of_init(struct device_node *node, unsigned num_chips) { @@ -130,7 +125,9 @@ static int __init ingenic_intc_of_init(struct device_node *node, irq_reg_writel(gc, IRQ_MSK(32), JZ_REG_INTC_SET_MASK); } - setup_irq(parent_irq, &intc_cascade_action); + if (request_irq(parent_irq, intc_cascade, 0, + "SoC intc cascade interrupt", NULL)) + pr_err("Failed to register SoC intc cascade interrupt\n"); return 0; out_domain_remove: diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index 6e5e3172796b..3819185bfd02 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c @@ -461,7 +461,7 @@ static int intc_irqpin_probe(struct platform_device *pdev) } i->iomem = devm_ioremap(dev, io[k]->start, - resource_size(io[k])); + resource_size(io[k])); if (!i->iomem) { dev_err(dev, "failed to remap IOMEM\n"); ret = -ENXIO; diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index aa4af886e43a..c34fb3ae0ff8 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -4,6 +4,7 @@ * Copyright (C) 2018 Christoph Hellwig */ #define pr_fmt(fmt) "plic: " fmt +#include <linux/cpu.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> @@ -55,7 +56,14 @@ #define CONTEXT_THRESHOLD 0x00 #define CONTEXT_CLAIM 0x04 -static void __iomem *plic_regs; +#define PLIC_DISABLE_THRESHOLD 0xf +#define PLIC_ENABLE_THRESHOLD 0 + +struct plic_priv { + struct cpumask lmask; + struct irq_domain *irqdomain; + void __iomem *regs; +}; struct plic_handler { bool present; @@ -66,6 +74,7 @@ struct plic_handler { */ raw_spinlock_t enable_lock; void __iomem *enable_base; + struct plic_priv *priv; }; static DEFINE_PER_CPU(struct plic_handler, plic_handlers); @@ -84,31 +93,40 @@ static inline void plic_toggle(struct plic_handler *handler, } static inline void plic_irq_toggle(const struct cpumask *mask, - int hwirq, int enable) + struct irq_data *d, int enable) { int cpu; + struct plic_priv *priv = irq_get_chip_data(d->irq); - writel(enable, plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID); + writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); for_each_cpu(cpu, mask) { struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); - if (handler->present) - plic_toggle(handler, hwirq, enable); + if (handler->present && + cpumask_test_cpu(cpu, &handler->priv->lmask)) + plic_toggle(handler, d->hwirq, enable); } } static void plic_irq_unmask(struct irq_data *d) { - unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d), - cpu_online_mask); + struct cpumask amask; + unsigned int cpu; + struct plic_priv *priv = irq_get_chip_data(d->irq); + + cpumask_and(&amask, &priv->lmask, cpu_online_mask); + cpu = cpumask_any_and(irq_data_get_affinity_mask(d), + &amask); if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) return; - plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); + plic_irq_toggle(cpumask_of(cpu), d, 1); } static void plic_irq_mask(struct irq_data *d) { - plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); + struct plic_priv *priv = irq_get_chip_data(d->irq); + + plic_irq_toggle(&priv->lmask, d, 0); } #ifdef CONFIG_SMP @@ -116,17 +134,21 @@ static int plic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { unsigned int cpu; + struct cpumask amask; + struct plic_priv *priv = irq_get_chip_data(d->irq); + + cpumask_and(&amask, &priv->lmask, mask_val); if (force) - cpu = cpumask_first(mask_val); + cpu = cpumask_first(&amask); else - cpu = cpumask_any_and(mask_val, cpu_online_mask); + cpu = cpumask_any_and(&amask, cpu_online_mask); if (cpu >= nr_cpu_ids) return -EINVAL; - plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); - plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); + plic_irq_toggle(&priv->lmask, d, 0); + plic_irq_toggle(cpumask_of(cpu), d, 1); irq_data_update_effective_affinity(d, cpumask_of(cpu)); @@ -187,8 +209,6 @@ static const struct irq_domain_ops plic_irqdomain_ops = { .free = irq_domain_free_irqs_top, }; -static struct irq_domain *plic_irqdomain; - /* * Handling an interrupt is a two-step process: first you claim the interrupt * by reading the claim register, then you complete the interrupt by writing @@ -205,7 +225,7 @@ static void plic_handle_irq(struct pt_regs *regs) csr_clear(CSR_IE, IE_EIE); while ((hwirq = readl(claim))) { - int irq = irq_find_mapping(plic_irqdomain, hwirq); + int irq = irq_find_mapping(handler->priv->irqdomain, hwirq); if (unlikely(irq <= 0)) pr_warn_ratelimited("can't find mapping for hwirq %lu\n", @@ -230,20 +250,48 @@ static int plic_find_hart_id(struct device_node *node) return -1; } +static void plic_set_threshold(struct plic_handler *handler, u32 threshold) +{ + /* priority must be > threshold to trigger an interrupt */ + writel(threshold, handler->hart_base + CONTEXT_THRESHOLD); +} + +static int plic_dying_cpu(unsigned int cpu) +{ + struct plic_handler *handler = this_cpu_ptr(&plic_handlers); + + csr_clear(CSR_IE, IE_EIE); + plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD); + + return 0; +} + +static int plic_starting_cpu(unsigned int cpu) +{ + struct plic_handler *handler = this_cpu_ptr(&plic_handlers); + + csr_set(CSR_IE, IE_EIE); + plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD); + + return 0; +} + static int __init plic_init(struct device_node *node, struct device_node *parent) { int error = 0, nr_contexts, nr_handlers = 0, i; u32 nr_irqs; + struct plic_priv *priv; - if (plic_regs) { - pr_warn("PLIC already present.\n"); - return -ENXIO; - } + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; - plic_regs = of_iomap(node, 0); - if (WARN_ON(!plic_regs)) - return -EIO; + priv->regs = of_iomap(node, 0); + if (WARN_ON(!priv->regs)) { + error = -EIO; + goto out_free_priv; + } error = -EINVAL; of_property_read_u32(node, "riscv,ndev", &nr_irqs); @@ -257,9 +305,9 @@ static int __init plic_init(struct device_node *node, goto out_iounmap; error = -ENOMEM; - plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1, - &plic_irqdomain_ops, NULL); - if (WARN_ON(!plic_irqdomain)) + priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1, + &plic_irqdomain_ops, priv); + if (WARN_ON(!priv->irqdomain)) goto out_iounmap; for (i = 0; i < nr_contexts; i++) { @@ -267,7 +315,6 @@ static int __init plic_init(struct device_node *node, struct plic_handler *handler; irq_hw_number_t hwirq; int cpu, hartid; - u32 threshold = 0; if (of_irq_parse_one(node, i, &parent)) { pr_err("failed to parse parent for context %d.\n", i); @@ -301,32 +348,36 @@ static int __init plic_init(struct device_node *node, handler = per_cpu_ptr(&plic_handlers, cpu); if (handler->present) { pr_warn("handler already present for context %d.\n", i); - threshold = 0xffffffff; + plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD); goto done; } + cpumask_set_cpu(cpu, &priv->lmask); handler->present = true; handler->hart_base = - plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART; + priv->regs + CONTEXT_BASE + i * CONTEXT_PER_HART; raw_spin_lock_init(&handler->enable_lock); handler->enable_base = - plic_regs + ENABLE_BASE + i * ENABLE_PER_HART; - + priv->regs + ENABLE_BASE + i * ENABLE_PER_HART; + handler->priv = priv; done: - /* priority must be > threshold to trigger an interrupt */ - writel(threshold, handler->hart_base + CONTEXT_THRESHOLD); for (hwirq = 1; hwirq <= nr_irqs; hwirq++) plic_toggle(handler, hwirq, 0); nr_handlers++; } + cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, + "irqchip/sifive/plic:starting", + plic_starting_cpu, plic_dying_cpu); pr_info("mapped %d interrupts with %d handlers for %d contexts.\n", nr_irqs, nr_handlers, nr_contexts); set_handle_irq(plic_handle_irq); return 0; out_iounmap: - iounmap(plic_regs); + iounmap(priv->regs); +out_free_priv: + kfree(priv); return error; } diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index e00f2fa27f00..faa8482c8246 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -604,12 +604,24 @@ static void stm32_exti_h_syscore_deinit(void) unregister_syscore_ops(&stm32_exti_h_syscore_ops); } +static int stm32_exti_h_retrigger(struct irq_data *d) +{ + struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); + const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank; + void __iomem *base = chip_data->host_data->base; + u32 mask = BIT(d->hwirq % IRQS_PER_BANK); + + writel_relaxed(mask, base + stm32_bank->swier_ofst); + + return 0; +} + static struct irq_chip stm32_exti_h_chip = { .name = "stm32-exti-h", .irq_eoi = stm32_exti_h_eoi, .irq_mask = stm32_exti_h_mask, .irq_unmask = stm32_exti_h_unmask, - .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_retrigger = stm32_exti_h_retrigger, .irq_set_type = stm32_exti_h_set_type, .irq_set_wake = stm32_exti_h_set_wake, .flags = IRQCHIP_MASK_ON_SUSPEND, diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c index 928858dada75..f1386733d3bc 100644 --- a/drivers/irqchip/irq-versatile-fpga.c +++ b/drivers/irqchip/irq-versatile-fpga.c @@ -6,6 +6,7 @@ #include <linux/irq.h> #include <linux/io.h> #include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> #include <linux/irqchip/versatile-fpga.h> #include <linux/irqdomain.h> #include <linux/module.h> @@ -68,12 +69,16 @@ static void fpga_irq_unmask(struct irq_data *d) static void fpga_irq_handle(struct irq_desc *desc) { + struct irq_chip *chip = irq_desc_get_chip(desc); struct fpga_irq_data *f = irq_desc_get_handler_data(desc); - u32 status = readl(f->base + IRQ_STATUS); + u32 status; + + chained_irq_enter(chip, desc); + status = readl(f->base + IRQ_STATUS); if (status == 0) { do_bad_IRQ(desc); - return; + goto out; } do { @@ -82,6 +87,9 @@ static void fpga_irq_handle(struct irq_desc *desc) status &= ~(1 << irq); generic_handle_irq(irq_find_mapping(f->domain, irq)); } while (status); + +out: + chained_irq_exit(chip, desc); } /* @@ -204,6 +212,9 @@ int __init fpga_irq_of_init(struct device_node *node, if (of_property_read_u32(node, "valid-mask", &valid_mask)) valid_mask = 0; + writel(clear_mask, base + IRQ_ENABLE_CLEAR); + writel(clear_mask, base + FIQ_ENABLE_CLEAR); + /* Some chips are cascaded from a parent IRQ */ parent_irq = irq_of_parse_and_map(node, 0); if (!parent_irq) { @@ -213,9 +224,6 @@ int __init fpga_irq_of_init(struct device_node *node, fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node); - writel(clear_mask, base + IRQ_ENABLE_CLEAR); - writel(clear_mask, base + FIQ_ENABLE_CLEAR); - /* * On Versatile AB/PB, some secondary interrupts have a direct * pass-thru to the primary controller for IRQs 20 and 22-31 which need diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c index f3f20a3cff50..3c87d925f74c 100644 --- a/drivers/irqchip/irq-vic.c +++ b/drivers/irqchip/irq-vic.c @@ -509,9 +509,7 @@ static int __init vic_of_init(struct device_node *node, void __iomem *regs; u32 interrupt_mask = ~0; u32 wakeup_mask = ~0; - - if (WARN(parent, "non-root VICs are not supported")) - return -EINVAL; + int parent_irq; regs = of_iomap(node, 0); if (WARN_ON(!regs)) @@ -519,11 +517,14 @@ static int __init vic_of_init(struct device_node *node, of_property_read_u32(node, "valid-mask", &interrupt_mask); of_property_read_u32(node, "valid-wakeup-mask", &wakeup_mask); + parent_irq = of_irq_get(node, 0); + if (parent_irq < 0) + parent_irq = 0; /* * Passing 0 as first IRQ makes the simple domain allocate descriptors */ - __vic_init(regs, 0, 0, interrupt_mask, wakeup_mask, node); + __vic_init(regs, parent_irq, 0, interrupt_mask, wakeup_mask, node); return 0; } diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c index e3043ded8973..7f811fe5bf69 100644 --- a/drivers/irqchip/irq-xilinx-intc.c +++ b/drivers/irqchip/irq-xilinx-intc.c @@ -38,29 +38,31 @@ struct xintc_irq_chip { void __iomem *base; struct irq_domain *root_domain; u32 intr_mask; + u32 nr_irq; }; -static struct xintc_irq_chip *xintc_irqc; +static struct xintc_irq_chip *primary_intc; -static void xintc_write(int reg, u32 data) +static void xintc_write(struct xintc_irq_chip *irqc, int reg, u32 data) { if (static_branch_unlikely(&xintc_is_be)) - iowrite32be(data, xintc_irqc->base + reg); + iowrite32be(data, irqc->base + reg); else - iowrite32(data, xintc_irqc->base + reg); + iowrite32(data, irqc->base + reg); } -static unsigned int xintc_read(int reg) +static u32 xintc_read(struct xintc_irq_chip *irqc, int reg) { if (static_branch_unlikely(&xintc_is_be)) - return ioread32be(xintc_irqc->base + reg); + return ioread32be(irqc->base + reg); else - return ioread32(xintc_irqc->base + reg); + return ioread32(irqc->base + reg); } static void intc_enable_or_unmask(struct irq_data *d) { - unsigned long mask = 1 << d->hwirq; + struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d); + unsigned long mask = BIT(d->hwirq); pr_debug("irq-xilinx: enable_or_unmask: %ld\n", d->hwirq); @@ -69,30 +71,35 @@ static void intc_enable_or_unmask(struct irq_data *d) * acks the irq before calling the interrupt handler */ if (irqd_is_level_type(d)) - xintc_write(IAR, mask); + xintc_write(irqc, IAR, mask); - xintc_write(SIE, mask); + xintc_write(irqc, SIE, mask); } static void intc_disable_or_mask(struct irq_data *d) { + struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d); + pr_debug("irq-xilinx: disable: %ld\n", d->hwirq); - xintc_write(CIE, 1 << d->hwirq); + xintc_write(irqc, CIE, BIT(d->hwirq)); } static void intc_ack(struct irq_data *d) { + struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d); + pr_debug("irq-xilinx: ack: %ld\n", d->hwirq); - xintc_write(IAR, 1 << d->hwirq); + xintc_write(irqc, IAR, BIT(d->hwirq)); } static void intc_mask_ack(struct irq_data *d) { - unsigned long mask = 1 << d->hwirq; + struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d); + unsigned long mask = BIT(d->hwirq); pr_debug("irq-xilinx: disable_and_ack: %ld\n", d->hwirq); - xintc_write(CIE, mask); - xintc_write(IAR, mask); + xintc_write(irqc, CIE, mask); + xintc_write(irqc, IAR, mask); } static struct irq_chip intc_dev = { @@ -103,13 +110,14 @@ static struct irq_chip intc_dev = { .irq_mask_ack = intc_mask_ack, }; -unsigned int xintc_get_irq(void) +static unsigned int xintc_get_irq_local(struct xintc_irq_chip *irqc) { - unsigned int hwirq, irq = -1; + unsigned int irq = 0; + u32 hwirq; - hwirq = xintc_read(IVR); + hwirq = xintc_read(irqc, IVR); if (hwirq != -1U) - irq = irq_find_mapping(xintc_irqc->root_domain, hwirq); + irq = irq_find_mapping(irqc->root_domain, hwirq); pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq); @@ -118,15 +126,18 @@ unsigned int xintc_get_irq(void) static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { - if (xintc_irqc->intr_mask & (1 << hw)) { + struct xintc_irq_chip *irqc = d->host_data; + + if (irqc->intr_mask & BIT(hw)) { irq_set_chip_and_handler_name(irq, &intc_dev, - handle_edge_irq, "edge"); + handle_edge_irq, "edge"); irq_clear_status_flags(irq, IRQ_LEVEL); } else { irq_set_chip_and_handler_name(irq, &intc_dev, - handle_level_irq, "level"); + handle_level_irq, "level"); irq_set_status_flags(irq, IRQ_LEVEL); } + irq_set_chip_data(irq, irqc); return 0; } @@ -138,43 +149,55 @@ static const struct irq_domain_ops xintc_irq_domain_ops = { static void xil_intc_irq_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); + struct xintc_irq_chip *irqc; u32 pending; + irqc = irq_data_get_irq_handler_data(&desc->irq_data); chained_irq_enter(chip, desc); do { - pending = xintc_get_irq(); - if (pending == -1U) + pending = xintc_get_irq_local(irqc); + if (pending == 0) break; generic_handle_irq(pending); } while (true); chained_irq_exit(chip, desc); } +static void xil_intc_handle_irq(struct pt_regs *regs) +{ + u32 hwirq; + struct xintc_irq_chip *irqc = primary_intc; + + do { + hwirq = xintc_read(irqc, IVR); + if (likely(hwirq != -1U)) { + int ret; + + ret = handle_domain_irq(irqc->root_domain, hwirq, regs); + WARN_ONCE(ret, "Unhandled HWIRQ %d\n", hwirq); + continue; + } + + break; + } while (1); +} + static int __init xilinx_intc_of_init(struct device_node *intc, struct device_node *parent) { - u32 nr_irq; - int ret, irq; struct xintc_irq_chip *irqc; - - if (xintc_irqc) { - pr_err("irq-xilinx: Multiple instances aren't supported\n"); - return -EINVAL; - } + int ret, irq; irqc = kzalloc(sizeof(*irqc), GFP_KERNEL); if (!irqc) return -ENOMEM; - - xintc_irqc = irqc; - irqc->base = of_iomap(intc, 0); BUG_ON(!irqc->base); - ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq); + ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &irqc->nr_irq); if (ret < 0) { pr_err("irq-xilinx: unable to read xlnx,num-intr-inputs\n"); - goto err_alloc; + goto error; } ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &irqc->intr_mask); @@ -183,34 +206,35 @@ static int __init xilinx_intc_of_init(struct device_node *intc, irqc->intr_mask = 0; } - if (irqc->intr_mask >> nr_irq) + if (irqc->intr_mask >> irqc->nr_irq) pr_warn("irq-xilinx: mismatch in kind-of-intr param\n"); pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n", - intc, nr_irq, irqc->intr_mask); + intc, irqc->nr_irq, irqc->intr_mask); /* * Disable all external interrupts until they are * explicity requested. */ - xintc_write(IER, 0); + xintc_write(irqc, IER, 0); /* Acknowledge any pending interrupts just in case. */ - xintc_write(IAR, 0xffffffff); + xintc_write(irqc, IAR, 0xffffffff); /* Turn on the Master Enable. */ - xintc_write(MER, MER_HIE | MER_ME); - if (!(xintc_read(MER) & (MER_HIE | MER_ME))) { + xintc_write(irqc, MER, MER_HIE | MER_ME); + if (xintc_read(irqc, MER) != (MER_HIE | MER_ME)) { static_branch_enable(&xintc_is_be); - xintc_write(MER, MER_HIE | MER_ME); + xintc_write(irqc, MER, MER_HIE | MER_ME); } - irqc->root_domain = irq_domain_add_linear(intc, nr_irq, + irqc->root_domain = irq_domain_add_linear(intc, irqc->nr_irq, &xintc_irq_domain_ops, irqc); if (!irqc->root_domain) { pr_err("irq-xilinx: Unable to create IRQ domain\n"); - goto err_alloc; + ret = -EINVAL; + goto error; } if (parent) { @@ -222,16 +246,17 @@ static int __init xilinx_intc_of_init(struct device_node *intc, } else { pr_err("irq-xilinx: interrupts property not in DT\n"); ret = -EINVAL; - goto err_alloc; + goto error; } } else { - irq_set_default_host(irqc->root_domain); + primary_intc = irqc; + set_handle_irq(xil_intc_handle_irq); } return 0; -err_alloc: - xintc_irqc = NULL; +error: + iounmap(irqc->base); kfree(irqc); return ret; diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c index abfe59284ff2..aa54bfcb0433 100644 --- a/drivers/irqchip/qcom-irq-combiner.c +++ b/drivers/irqchip/qcom-irq-combiner.c @@ -33,7 +33,7 @@ struct combiner { int parent_irq; u32 nirqs; u32 nregs; - struct combiner_reg regs[0]; + struct combiner_reg regs[]; }; static inline int irq_nr(u32 reg, u32 bit) diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 6e3c04b46fb1..7876dc4b28f8 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig @@ -34,6 +34,7 @@ config PCIEAER config PCIEAER_INJECT tristate "PCI Express error injection support" depends on PCIEAER + select GENERIC_IRQ_INJECTION help This enables PCI Express Root Port Advanced Error Reporting (AER) software error injector. diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c index 6988fe7389b9..21cc3d3387f7 100644 --- a/drivers/pci/pcie/aer_inject.c +++ b/drivers/pci/pcie/aer_inject.c @@ -16,7 +16,7 @@ #include <linux/module.h> #include <linux/init.h> -#include <linux/irq.h> +#include <linux/interrupt.h> #include <linux/miscdevice.h> #include <linux/pci.h> #include <linux/slab.h> @@ -468,9 +468,7 @@ static int aer_inject(struct aer_error_inj *einj) } pci_info(edev->port, "Injecting errors %08x/%08x into device %s\n", einj->cor_status, einj->uncor_status, pci_name(dev)); - local_irq_disable(); - generic_handle_irq(edev->irq); - local_irq_enable(); + ret = irq_inject_interrupt(edev->irq); } else { pci_err(rpdev, "AER device not found\n"); ret = -ENODEV; diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index 2d5e0435af0a..af3b24f26ff2 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c @@ -92,6 +92,7 @@ struct stm32_gpio_bank { u32 bank_nr; u32 bank_ioport_nr; u32 pin_backup[STM32_GPIO_PINS_PER_BANK]; + u8 irq_type[STM32_GPIO_PINS_PER_BANK]; }; struct stm32_pinctrl { @@ -303,6 +304,50 @@ static const struct gpio_chip stm32_gpio_template = { .get_direction = stm32_gpio_get_direction, }; +static void stm32_gpio_irq_trigger(struct irq_data *d) +{ + struct stm32_gpio_bank *bank = d->domain->host_data; + int level; + + /* If level interrupt type then retrig */ + level = stm32_gpio_get(&bank->gpio_chip, d->hwirq); + if ((level == 0 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_LOW) || + (level == 1 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_HIGH)) + irq_chip_retrigger_hierarchy(d); +} + +static void stm32_gpio_irq_eoi(struct irq_data *d) +{ + irq_chip_eoi_parent(d); + stm32_gpio_irq_trigger(d); +}; + +static int stm32_gpio_set_type(struct irq_data *d, unsigned int type) +{ + struct stm32_gpio_bank *bank = d->domain->host_data; + u32 parent_type; + + switch (type) { + case IRQ_TYPE_EDGE_RISING: + case IRQ_TYPE_EDGE_FALLING: + case IRQ_TYPE_EDGE_BOTH: + parent_type = type; + break; + case IRQ_TYPE_LEVEL_HIGH: + parent_type = IRQ_TYPE_EDGE_RISING; + break; + case IRQ_TYPE_LEVEL_LOW: + parent_type = IRQ_TYPE_EDGE_FALLING; + break; + default: + return -EINVAL; + } + + bank->irq_type[d->hwirq] = type; + + return irq_chip_set_type_parent(d, parent_type); +}; + static int stm32_gpio_irq_request_resources(struct irq_data *irq_data) { struct stm32_gpio_bank *bank = irq_data->domain->host_data; @@ -330,13 +375,19 @@ static void stm32_gpio_irq_release_resources(struct irq_data *irq_data) gpiochip_unlock_as_irq(&bank->gpio_chip, irq_data->hwirq); } +static void stm32_gpio_irq_unmask(struct irq_data *d) +{ + irq_chip_unmask_parent(d); + stm32_gpio_irq_trigger(d); +} + static struct irq_chip stm32_gpio_irq_chip = { .name = "stm32gpio", - .irq_eoi = irq_chip_eoi_parent, + .irq_eoi = stm32_gpio_irq_eoi, .irq_ack = irq_chip_ack_parent, .irq_mask = irq_chip_mask_parent, - .irq_unmask = irq_chip_unmask_parent, - .irq_set_type = irq_chip_set_type_parent, + .irq_unmask = stm32_gpio_irq_unmask, + .irq_set_type = stm32_gpio_set_type, .irq_set_wake = irq_chip_set_wake_parent, .irq_request_resources = stm32_gpio_irq_request_resources, .irq_release_resources = stm32_gpio_irq_release_resources, diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 9d53f545a3d5..63457908c9c4 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -70,6 +70,7 @@ struct vgic_global { /* Hardware has GICv4? */ bool has_gicv4; + bool has_gicv4_1; /* GIC system register CPU interface */ struct static_key_false gicv3_cpuif; diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index d37c17e68268..77d70b633531 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -102,6 +102,7 @@ enum cpuhp_state { CPUHP_AP_IRQ_ARMADA_XP_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_IRQ_MIPS_GIC_STARTING, + CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_MICROCODE_LOADER, CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index c5fe60ec6b84..80f637c3a6f3 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -248,6 +248,8 @@ extern void enable_percpu_nmi(unsigned int irq, unsigned int type); extern int prepare_percpu_nmi(unsigned int irq); extern void teardown_percpu_nmi(unsigned int irq); +extern int irq_inject_interrupt(unsigned int irq); + /* The following three functions are for the core kernel use only. */ extern void suspend_device_irqs(void); extern void resume_device_irqs(void); diff --git a/include/linux/irq.h b/include/linux/irq.h index 3ed5a055b5f4..9315fbb87db3 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -211,6 +211,8 @@ struct irq_data { * IRQD_CAN_RESERVE - Can use reservation mode * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change * required + * IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked + * from actual interrupt context. */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -234,6 +236,7 @@ enum { IRQD_DEFAULT_TRIGGER_SET = (1 << 25), IRQD_CAN_RESERVE = (1 << 26), IRQD_MSI_NOMASK_QUIRK = (1 << 27), + IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28), }; #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) @@ -303,6 +306,16 @@ static inline bool irqd_is_single_target(struct irq_data *d) return __irqd_to_state(d) & IRQD_SINGLE_TARGET; } +static inline void irqd_set_handle_enforce_irqctx(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_HANDLE_ENFORCE_IRQCTX; +} + +static inline bool irqd_is_handle_enforce_irqctx(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_HANDLE_ENFORCE_IRQCTX; +} + static inline bool irqd_is_wakeup_set(struct irq_data *d) { return __irqd_to_state(d) & IRQD_WAKEUP_STATE; diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h index b9850f5f1906..fa8c0455c352 100644 --- a/include/linux/irqchip/arm-gic-common.h +++ b/include/linux/irqchip/arm-gic-common.h @@ -32,6 +32,8 @@ struct gic_kvm_info { struct resource vctrl; /* vlpi support */ bool has_v4; + /* rvpeid support */ + bool has_v4_1; }; const struct gic_kvm_info *gic_get_kvm_info(void); diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 83439bfb6c5b..765d9b769b69 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -57,6 +57,7 @@ #define GICD_SPENDSGIR 0x0F20 #define GICD_CTLR_RWP (1U << 31) +#define GICD_CTLR_nASSGIreq (1U << 8) #define GICD_CTLR_DS (1U << 6) #define GICD_CTLR_ARE_NS (1U << 4) #define GICD_CTLR_ENABLE_G1A (1U << 1) @@ -90,6 +91,7 @@ #define GICD_TYPER_ESPIS(typer) \ (((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0) +#define GICD_TYPER2_nASSGIcap (1U << 8) #define GICD_TYPER2_VIL (1U << 7) #define GICD_TYPER2_VID GENMASK(4, 0) @@ -320,6 +322,9 @@ #define GICR_VPENDBASER_NonShareable \ GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable) +#define GICR_VPENDBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, InnerShareable) + #define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB) #define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC) #define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt) @@ -343,6 +348,15 @@ #define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58) #define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0) +#define GICR_VSGIR 0x0080 + +#define GICR_VSGIR_VPEID GENMASK(15, 0) + +#define GICR_VSGIPENDR 0x0088 + +#define GICR_VSGIPENDR_BUSY (1U << 31) +#define GICR_VSGIPENDR_PENDING GENMASK(15, 0) + /* * ITS registers, offsets from ITS_base */ @@ -366,6 +380,11 @@ #define GITS_TRANSLATER 0x10040 +#define GITS_SGIR 0x20020 + +#define GITS_SGIR_VPEID GENMASK_ULL(47, 32) +#define GITS_SGIR_VINTID GENMASK_ULL(3, 0) + #define GITS_CTLR_ENABLE (1U << 0) #define GITS_CTLR_ImDe (1U << 1) #define GITS_CTLR_ITS_NUMBER_SHIFT 4 @@ -500,8 +519,9 @@ #define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI) #define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI) #define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC) -/* VMOVP and INVDB are the odd ones, as they dont have a physical counterpart */ +/* VMOVP, VSGI and INVDB are the odd ones, as they dont have a physical counterpart */ #define GITS_CMD_VMOVP GITS_CMD_GICv4(2) +#define GITS_CMD_VSGI GITS_CMD_GICv4(3) #define GITS_CMD_INVDB GITS_CMD_GICv4(0xe) /* @@ -650,6 +670,7 @@ struct rdists { struct { + raw_spinlock_t rd_lock; void __iomem *rd_base; struct page *pend_page; phys_addr_t phys_base; diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index d9c34968467a..6976b8331b60 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -49,11 +49,23 @@ struct its_vpe { }; /* GICv4.1 implementations */ struct { + struct fwnode_handle *fwnode; + struct irq_domain *sgi_domain; + struct { + u8 priority; + bool enabled; + bool group; + } sgi_config[16]; atomic_t vmapp_count; }; }; /* + * Ensures mutual exclusion between affinity setting of the + * vPE and vLPI operations using vpe->col_idx. + */ + raw_spinlock_t vpe_lock; + /* * This collection ID is used to indirect the target * redistributor for this VPE. The ID itself isn't involved in * programming of the ITS. @@ -93,6 +105,7 @@ enum its_vcpu_info_cmd_type { SCHEDULE_VPE, DESCHEDULE_VPE, INVALL_VPE, + PROP_UPDATE_VSGI, }; struct its_cmd_info { @@ -105,19 +118,27 @@ struct its_cmd_info { bool g0en; bool g1en; }; + struct { + u8 priority; + bool group; + }; }; }; int its_alloc_vcpu_irqs(struct its_vm *vm); void its_free_vcpu_irqs(struct its_vm *vm); -int its_schedule_vpe(struct its_vpe *vpe, bool on); +int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en); +int its_make_vpe_non_resident(struct its_vpe *vpe, bool db); int its_invall_vpe(struct its_vpe *vpe); int its_map_vlpi(int irq, struct its_vlpi_map *map); int its_get_vlpi(int irq, struct its_vlpi_map *map); int its_unmap_vlpi(int irq); int its_prop_update_vlpi(int irq, u8 config, bool inv); +int its_prop_update_vsgi(int irq, u8 priority, bool group); struct irq_domain_ops; -int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops); +int its_init_v4(struct irq_domain *domain, + const struct irq_domain_ops *vpe_ops, + const struct irq_domain_ops *sgi_ops); #endif diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index f92d9a687372..20d501af4f2e 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -43,6 +43,10 @@ config GENERIC_IRQ_MIGRATION config AUTO_IRQ_AFFINITY bool +# Interrupt injection mechanism +config GENERIC_IRQ_INJECTION + bool + # Tasklet based software resend for pending interrupts on enable_irq() config HARDIRQS_SW_RESEND bool @@ -127,6 +131,7 @@ config SPARSE_IRQ config GENERIC_IRQ_DEBUGFS bool "Expose irq internals in debugfs" depends on DEBUG_FS + select GENERIC_IRQ_INJECTION default n ---help--- diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index b3fa2d87d2f3..41e7e37a0928 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -278,7 +278,7 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force) } } if (resend) - check_irq_resend(desc); + check_irq_resend(desc, false); return ret; } diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c index a949bd39e343..4f9f844074db 100644 --- a/kernel/irq/debugfs.c +++ b/kernel/irq/debugfs.c @@ -190,33 +190,7 @@ static ssize_t irq_debug_write(struct file *file, const char __user *user_buf, return -EFAULT; if (!strncmp(buf, "trigger", size)) { - unsigned long flags; - int err; - - /* Try the HW interface first */ - err = irq_set_irqchip_state(irq_desc_get_irq(desc), - IRQCHIP_STATE_PENDING, true); - if (!err) - return count; - - /* - * Otherwise, try to inject via the resend interface, - * which may or may not succeed. - */ - chip_bus_lock(desc); - raw_spin_lock_irqsave(&desc->lock, flags); - - if (irq_settings_is_level(desc) || desc->istate & IRQS_NMI) { - /* Can't do level nor NMIs, sorry */ - err = -EINVAL; - } else { - desc->istate |= IRQS_PENDING; - check_irq_resend(desc); - err = 0; - } - - raw_spin_unlock_irqrestore(&desc->lock, flags); - chip_bus_sync_unlock(desc); + int err = irq_inject_interrupt(irq_desc_get_irq(desc)); return err ? err : count; } diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index c9d8eb7f5c02..7db284b10ac9 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -108,7 +108,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc); irqreturn_t handle_irq_event(struct irq_desc *desc); /* Resending of interrupts :*/ -void check_irq_resend(struct irq_desc *desc); +int check_irq_resend(struct irq_desc *desc, bool inject); bool irq_wait_for_poll(struct irq_desc *desc); void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action); @@ -425,6 +425,10 @@ static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) { return desc->pending_mask; } +static inline bool handle_enforce_irqctx(struct irq_data *data) +{ + return irqd_is_handle_enforce_irqctx(data); +} bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear); #else /* CONFIG_GENERIC_PENDING_IRQ */ static inline bool irq_can_move_pcntxt(struct irq_data *data) @@ -451,6 +455,10 @@ static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear) { return false; } +static inline bool handle_enforce_irqctx(struct irq_data *data) +{ + return false; +} #endif /* !CONFIG_GENERIC_PENDING_IRQ */ #if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY) diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 98a5f10d1900..1a7723604399 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -638,9 +638,15 @@ void irq_init_desc(unsigned int irq) int generic_handle_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); + struct irq_data *data; if (!desc) return -EINVAL; + + data = irq_desc_get_irq_data(desc); + if (WARN_ON_ONCE(!in_irq() && handle_enforce_irqctx(data))) + return -EPERM; + generic_handle_irq_desc(desc); return 0; } diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 7527e5ef6fe5..35b8d97c3a1d 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -46,11 +46,11 @@ const struct fwnode_operations irqchip_fwnode_ops; EXPORT_SYMBOL_GPL(irqchip_fwnode_ops); /** - * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for + * __irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for * identifying an irq domain * @type: Type of irqchip_fwnode. See linux/irqdomain.h - * @name: Optional user provided domain name * @id: Optional user provided id if name != NULL + * @name: Optional user provided domain name * @pa: Optional user-provided physical address * * Allocate a struct irqchip_fwid, and return a poiner to the embedded @@ -1310,6 +1310,11 @@ int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs, void *arg) { + if (!domain->ops->alloc) { + pr_debug("domain->ops->alloc() is NULL\n"); + return -ENOSYS; + } + return domain->ops->alloc(domain, irq_base, nr_irqs, arg); } @@ -1347,11 +1352,6 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, return -EINVAL; } - if (!domain->ops->alloc) { - pr_debug("domain->ops->alloc() is NULL\n"); - return -ENOSYS; - } - if (realloc && irq_base >= 0) { virq = irq_base; } else { diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 98c04ca5fa43..27634f4022d0 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c @@ -47,6 +47,43 @@ static void resend_irqs(unsigned long arg) /* Tasklet to handle resend: */ static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0); +static int irq_sw_resend(struct irq_desc *desc) +{ + unsigned int irq = irq_desc_get_irq(desc); + + /* + * Validate whether this interrupt can be safely injected from + * non interrupt context + */ + if (handle_enforce_irqctx(&desc->irq_data)) + return -EINVAL; + + /* + * If the interrupt is running in the thread context of the parent + * irq we need to be careful, because we cannot trigger it + * directly. + */ + if (irq_settings_is_nested_thread(desc)) { + /* + * If the parent_irq is valid, we retrigger the parent, + * otherwise we do nothing. + */ + if (!desc->parent_irq) + return -EINVAL; + irq = desc->parent_irq; + } + + /* Set it pending and activate the softirq: */ + set_bit(irq, irqs_resend); + tasklet_schedule(&resend_tasklet); + return 0; +} + +#else +static int irq_sw_resend(struct irq_desc *desc) +{ + return -EINVAL; +} #endif /* @@ -54,49 +91,83 @@ static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0); * * Is called with interrupts disabled and desc->lock held. */ -void check_irq_resend(struct irq_desc *desc) +int check_irq_resend(struct irq_desc *desc, bool inject) { + int err = 0; + /* - * We do not resend level type interrupts. Level type - * interrupts are resent by hardware when they are still - * active. Clear the pending bit so suspend/resume does not - * get confused. + * We do not resend level type interrupts. Level type interrupts + * are resent by hardware when they are still active. Clear the + * pending bit so suspend/resume does not get confused. */ if (irq_settings_is_level(desc)) { desc->istate &= ~IRQS_PENDING; - return; + return -EINVAL; } + if (desc->istate & IRQS_REPLAY) - return; - if (desc->istate & IRQS_PENDING) { - desc->istate &= ~IRQS_PENDING; + return -EBUSY; + + if (!(desc->istate & IRQS_PENDING) && !inject) + return 0; + + desc->istate &= ~IRQS_PENDING; + + if (!desc->irq_data.chip->irq_retrigger || + !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) + err = irq_sw_resend(desc); + + /* If the retrigger was successfull, mark it with the REPLAY bit */ + if (!err) desc->istate |= IRQS_REPLAY; + return err; +} - if (!desc->irq_data.chip->irq_retrigger || - !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { -#ifdef CONFIG_HARDIRQS_SW_RESEND - unsigned int irq = irq_desc_get_irq(desc); - - /* - * If the interrupt is running in the thread - * context of the parent irq we need to be - * careful, because we cannot trigger it - * directly. - */ - if (irq_settings_is_nested_thread(desc)) { - /* - * If the parent_irq is valid, we - * retrigger the parent, otherwise we - * do nothing. - */ - if (!desc->parent_irq) - return; - irq = desc->parent_irq; - } - /* Set it pending and activate the softirq: */ - set_bit(irq, irqs_resend); - tasklet_schedule(&resend_tasklet); -#endif - } - } +#ifdef CONFIG_GENERIC_IRQ_INJECTION +/** + * irq_inject_interrupt - Inject an interrupt for testing/error injection + * @irq: The interrupt number + * + * This function must only be used for debug and testing purposes! + * + * Especially on x86 this can cause a premature completion of an interrupt + * affinity change causing the interrupt line to become stale. Very + * unlikely, but possible. + * + * The injection can fail for various reasons: + * - Interrupt is not activated + * - Interrupt is NMI type or currently replaying + * - Interrupt is level type + * - Interrupt does not support hardware retrigger and software resend is + * either not enabled or not possible for the interrupt. + */ +int irq_inject_interrupt(unsigned int irq) +{ + struct irq_desc *desc; + unsigned long flags; + int err; + + /* Try the state injection hardware interface first */ + if (!irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, true)) + return 0; + + /* That failed, try via the resend mechanism */ + desc = irq_get_desc_buslock(irq, &flags, 0); + if (!desc) + return -EINVAL; + + /* + * Only try to inject when the interrupt is: + * - not NMI type + * - activated + */ + if ((desc->istate & IRQS_NMI) || !irqd_is_activated(&desc->irq_data)) + err = -EINVAL; + else + err = check_irq_resend(desc, true); + + irq_put_desc_busunlock(desc, flags); + return err; } +EXPORT_SYMBOL_GPL(irq_inject_interrupt); +#endif diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index f45635a6f0ec..1bc09b523486 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -595,7 +595,9 @@ int vgic_v3_probe(const struct gic_kvm_info *info) /* GICv4 support? */ if (info->has_v4) { kvm_vgic_global_state.has_gicv4 = gicv4_enable; - kvm_info("GICv4 support %sabled\n", + kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable; + kvm_info("GICv4%s support %sabled\n", + kvm_vgic_global_state.has_gicv4_1 ? ".1" : "", gicv4_enable ? "en" : "dis"); } diff --git a/virt/kvm/arm/vgic/vgic-v4.c b/virt/kvm/arm/vgic/vgic-v4.c index 46f875589c47..1eb0f8c76219 100644 --- a/virt/kvm/arm/vgic/vgic-v4.c +++ b/virt/kvm/arm/vgic/vgic-v4.c @@ -67,10 +67,10 @@ * it. And if we've migrated our vcpu from one CPU to another, we must * tell the ITS (so that the messages reach the right redistributor). * This is done in two steps: first issue a irq_set_affinity() on the - * irq corresponding to the vcpu, then call its_schedule_vpe(). You - * must be in a non-preemptible context. On exit, another call to - * its_schedule_vpe() tells the redistributor that we're done with the - * vcpu. + * irq corresponding to the vcpu, then call its_make_vpe_resident(). + * You must be in a non-preemptible context. On exit, a call to + * its_make_vpe_non_resident() tells the redistributor that we're done + * with the vcpu. * * Finally, the doorbell handling: Each vcpu is allocated an interrupt * which will fire each time a VLPI is made pending whilst the vcpu is @@ -86,7 +86,8 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info) struct kvm_vcpu *vcpu = info; /* We got the message, no need to fire again */ - if (!irqd_irq_disabled(&irq_to_desc(irq)->irq_data)) + if (!kvm_vgic_global_state.has_gicv4_1 && + !irqd_irq_disabled(&irq_to_desc(irq)->irq_data)) disable_irq_nosync(irq); vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true; @@ -199,19 +200,11 @@ void vgic_v4_teardown(struct kvm *kvm) int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db) { struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; - struct irq_desc *desc = irq_to_desc(vpe->irq); if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident) return 0; - /* - * If blocking, a doorbell is required. Undo the nested - * disable_irq() calls... - */ - while (need_db && irqd_irq_disabled(&desc->irq_data)) - enable_irq(vpe->irq); - - return its_schedule_vpe(vpe, false); + return its_make_vpe_non_resident(vpe, need_db); } int vgic_v4_load(struct kvm_vcpu *vcpu) @@ -232,18 +225,19 @@ int vgic_v4_load(struct kvm_vcpu *vcpu) if (err) return err; - /* Disabled the doorbell, as we're about to enter the guest */ - disable_irq_nosync(vpe->irq); - - err = its_schedule_vpe(vpe, true); + err = its_make_vpe_resident(vpe, false, vcpu->kvm->arch.vgic.enabled); if (err) return err; /* * Now that the VPE is resident, let's get rid of a potential - * doorbell interrupt that would still be pending. + * doorbell interrupt that would still be pending. This is a + * GICv4.0 only "feature"... */ - return irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false); + if (!kvm_vgic_global_state.has_gicv4_1) + err = irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false); + + return err; } static struct vgic_its *vgic_get_its(struct kvm *kvm, |