summaryrefslogtreecommitdiff
path: root/drivers/irqchip
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2024-10-27 10:22:20 +0000
committerThomas Gleixner <tglx@linutronix.de>2024-10-27 17:30:16 +0100
commite6c24e2d05bb05de96ffb9bdb0ee62d20ad526f8 (patch)
tree36e92d418bf236b4fd00e2c889532bff8861eb5e /drivers/irqchip
parent5f994f534120f47432092fb36f5cb0c7a80ed2bf (diff)
irqchip/gic-v4: Correctly deal with set_affinity on lazily-mapped VPEs
Zenghui points out that a recent change to the way set_affinity is handled for VPEs has the potential to return an error if the VPE hasn't been mapped yet (because the guest hasn't emited a MAPTI command yet), affecting GICv4.0 implementations that rely on the ITSList feature. Fix this by making the set_affinity succeed in this case, and return early, without trying to touch the HW. Fixes: 1442ee0011983 ("irqchip/gic-v4: Don't allow a VMOVP on a dying VPE") Reported-by: Zenghui Yu <yuzenghui@huawei.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Zenghui Yu <yuzenghui@huawei.com> Link: https://lore.kernel.org/all/20241027102220.1858558-1-maz@kernel.org Link: https://lore.kernel.org/r/aab45cd3-e5ca-58cf-e081-e32a17f5b4e7@huawei.com
Diffstat (limited to 'drivers/irqchip')
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index ab597e74ba08..52f625e07658 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -3810,8 +3810,18 @@ static int its_vpe_set_affinity(struct irq_data *d,
* Check if we're racing against a VPE being destroyed, for
* which we don't want to allow a VMOVP.
*/
- if (!atomic_read(&vpe->vmapp_count))
- return -EINVAL;
+ if (!atomic_read(&vpe->vmapp_count)) {
+ if (gic_requires_eager_mapping())
+ return -EINVAL;
+
+ /*
+ * If we lazily map the VPEs, this isn't an error and
+ * we can exit cleanly.
+ */
+ cpu = cpumask_first(mask_val);
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+ return IRQ_SET_MASK_OK_DONE;
+ }
/*
* Changing affinity is mega expensive, so let's be as lazy as