summaryrefslogtreecommitdiff
path: root/drivers/irqchip/irq-gic-v3-its.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/irqchip/irq-gic-v3-its.c')
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c52
1 files changed, 26 insertions, 26 deletions
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 42e63272154e..105d116b4ea0 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1317,7 +1317,6 @@ static void its_send_vmovp(struct its_vpe *vpe)
{
struct its_cmd_desc desc = {};
struct its_node *its;
- unsigned long flags;
int col_id = vpe->col_idx;
desc.its_vmovp_cmd.vpe = vpe;
@@ -1330,6 +1329,12 @@ static void its_send_vmovp(struct its_vpe *vpe)
}
/*
+ * Protect against concurrent updates of the mapping state on
+ * individual VMs.
+ */
+ guard(raw_spinlock_irqsave)(&vpe->its_vm->vmapp_lock);
+
+ /*
* Yet another marvel of the architecture. If using the
* its_list "feature", we need to make sure that all ITSs
* receive all VMOVP commands in the same order. The only way
@@ -1337,8 +1342,7 @@ static void its_send_vmovp(struct its_vpe *vpe)
*
* Wall <-- Head.
*/
- raw_spin_lock_irqsave(&vmovp_lock, flags);
-
+ guard(raw_spinlock)(&vmovp_lock);
desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
@@ -1353,8 +1357,6 @@ static void its_send_vmovp(struct its_vpe *vpe)
desc.its_vmovp_cmd.col = &its->collections[col_id];
its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
}
-
- raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}
static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
@@ -1791,12 +1793,10 @@ static bool gic_requires_eager_mapping(void)
static void its_map_vm(struct its_node *its, struct its_vm *vm)
{
- unsigned long flags;
-
if (gic_requires_eager_mapping())
return;
- raw_spin_lock_irqsave(&vmovp_lock, flags);
+ guard(raw_spinlock_irqsave)(&vm->vmapp_lock);
/*
* If the VM wasn't mapped yet, iterate over the vpes and get
@@ -1809,37 +1809,31 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm)
for (i = 0; i < vm->nr_vpes; i++) {
struct its_vpe *vpe = vm->vpes[i];
- struct irq_data *d = irq_get_irq_data(vpe->irq);
- /* Map the VPE to the first possible CPU */
- vpe->col_idx = cpumask_first(cpu_online_mask);
- its_send_vmapp(its, vpe, true);
+ scoped_guard(raw_spinlock, &vpe->vpe_lock)
+ its_send_vmapp(its, vpe, true);
+
its_send_vinvall(its, vpe);
- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
}
}
-
- raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}
static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
{
- unsigned long flags;
-
/* Not using the ITS list? Everything is always mapped. */
if (gic_requires_eager_mapping())
return;
- raw_spin_lock_irqsave(&vmovp_lock, flags);
+ guard(raw_spinlock_irqsave)(&vm->vmapp_lock);
if (!--vm->vlpi_count[its->list_nr]) {
int i;
- for (i = 0; i < vm->nr_vpes; i++)
+ for (i = 0; i < vm->nr_vpes; i++) {
+ guard(raw_spinlock)(&vm->vpes[i]->vpe_lock);
its_send_vmapp(its, vm->vpes[i], false);
+ }
}
-
- raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}
static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
@@ -3926,6 +3920,8 @@ static void its_vpe_invall(struct its_vpe *vpe)
{
struct its_node *its;
+ guard(raw_spinlock_irqsave)(&vpe->its_vm->vmapp_lock);
+
list_for_each_entry(its, &its_nodes, entry) {
if (!is_v4(its))
continue;
@@ -4531,6 +4527,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
vm->db_lpi_base = base;
vm->nr_db_lpis = nr_ids;
vm->vprop_page = vprop_page;
+ raw_spin_lock_init(&vm->vmapp_lock);
if (gic_rdists->has_rvpeid)
irqchip = &its_vpe_4_1_irq_chip;
@@ -4562,6 +4559,10 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
struct its_node *its;
+ /* Map the VPE to the first possible CPU */
+ vpe->col_idx = cpumask_first(cpu_online_mask);
+ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
+
/*
* If we use the list map, we issue VMAPP on demand... Unless
* we're on a GICv4.1 and we eagerly map the VPE on all ITSs
@@ -4570,9 +4571,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
if (!gic_requires_eager_mapping())
return 0;
- /* Map the VPE to the first possible CPU */
- vpe->col_idx = cpumask_first(cpu_online_mask);
-
list_for_each_entry(its, &its_nodes, entry) {
if (!is_v4(its))
continue;
@@ -4581,8 +4579,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
its_send_vinvall(its, vpe);
}
- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
-
return 0;
}
@@ -5580,6 +5576,10 @@ static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
goto node_err;
}
+ if (acpi_get_madt_revision() >= 7 &&
+ (its_entry->flags & ACPI_MADT_ITS_NON_COHERENT))
+ its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
+
err = its_probe_one(its);
if (!err)
return 0;