summaryrefslogtreecommitdiff
path: root/drivers/perf
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/perf')
-rw-r--r--drivers/perf/arm_pmu.c22
-rw-r--r--drivers/perf/arm_pmu_acpi.c19
-rw-r--r--drivers/perf/arm_pmu_platform.c15
3 files changed, 20 insertions, 36 deletions
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 2b2af35db1b6..0c2ed11c0603 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -534,7 +534,7 @@ static int armpmu_count_irq_users(const int irq)
return count;
}
-void armpmu_free_cpu_irq(int irq, int cpu)
+void armpmu_free_irq(int irq, int cpu)
{
if (per_cpu(cpu_irq, cpu) == 0)
return;
@@ -549,15 +549,7 @@ void armpmu_free_cpu_irq(int irq, int cpu)
per_cpu(cpu_irq, cpu) = 0;
}
-void armpmu_free_irq(struct arm_pmu *armpmu, int cpu)
-{
- struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
- int irq = per_cpu(hw_events->irq, cpu);
-
- armpmu_free_cpu_irq(irq, cpu);
-}
-
-int armpmu_request_cpu_irq(int irq, int cpu)
+int armpmu_request_irq(int irq, int cpu)
{
int err = 0;
const irq_handler_t handler = armpmu_dispatch_irq;
@@ -598,16 +590,6 @@ err_out:
return err;
}
-int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
-{
- struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
- int irq = per_cpu(hw_events->irq, cpu);
- if (!irq)
- return 0;
-
- return armpmu_request_cpu_irq(irq, cpu);
-}
-
static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
{
struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
index 09a1a36cff57..0f197516d708 100644
--- a/drivers/perf/arm_pmu_acpi.c
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -89,7 +89,13 @@ static int arm_pmu_acpi_parse_irqs(void)
pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
}
+ /*
+ * Log and request the IRQ so the core arm_pmu code can manage
+ * it. We'll have to sanity-check IRQs later when we associate
+ * them with their PMUs.
+ */
per_cpu(pmu_irqs, cpu) = irq;
+ armpmu_request_irq(irq, cpu);
}
return 0;
@@ -205,14 +211,6 @@ static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
cpumask_set_cpu(cpu, &pmu->supported_cpus);
/*
- * Log and request the IRQ so the core arm_pmu code can manage it. In
- * some situations (e.g. mismatched PPIs), we may fail to request the
- * IRQ. However, it may be too late for us to do anything about it.
- * The common ARM PMU code will log a warning in this case.
- */
- armpmu_request_irq(pmu, cpu);
-
- /*
* Ideally, we'd probe the PMU here when we find the first matching
* CPU. We can't do that for several reasons; see the comment in
* arm_pmu_acpi_init().
@@ -281,11 +279,6 @@ static int arm_pmu_acpi_init(void)
if (acpi_disabled)
return 0;
- /*
- * We can't request IRQs yet, since we don't know the cookie value
- * until we know which CPUs share the same logical PMU. We'll handle
- * that in arm_pmu_acpi_cpu_starting().
- */
ret = arm_pmu_acpi_parse_irqs();
if (ret)
return ret;
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 1dc3c1f574e0..7729eda5909d 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -159,10 +159,15 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
static int armpmu_request_irqs(struct arm_pmu *armpmu)
{
+ struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
int cpu, err;
for_each_cpu(cpu, &armpmu->supported_cpus) {
- err = armpmu_request_irq(armpmu, cpu);
+ int irq = per_cpu(hw_events->irq, cpu);
+ if (!irq)
+ continue;
+
+ err = armpmu_request_irq(irq, cpu);
if (err)
break;
}
@@ -173,9 +178,13 @@ static int armpmu_request_irqs(struct arm_pmu *armpmu)
static void armpmu_free_irqs(struct arm_pmu *armpmu)
{
int cpu;
+ struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
- for_each_cpu(cpu, &armpmu->supported_cpus)
- armpmu_free_irq(armpmu, cpu);
+ for_each_cpu(cpu, &armpmu->supported_cpus) {
+ int irq = per_cpu(hw_events->irq, cpu);
+
+ armpmu_free_irq(irq, cpu);
+ }
}
int arm_pmu_device_probe(struct platform_device *pdev,