summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/pmu-emul.c
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2022-11-13 16:38:24 +0000
committerMarc Zyngier <maz@kernel.org>2022-11-17 15:39:51 +0000
commit9917264d74d9063341968a8e071266358496777b (patch)
treed8aa61e39ce9c7eafce537f5f6570681984518ae /arch/arm64/kvm/pmu-emul.c
parent0cb9c3c87a9d3287eaf353936e6846d885102439 (diff)
KVM: arm64: PMU: Simplify setting a counter to a specific value
kvm_pmu_set_counter_value() is pretty odd, as it tries to update the counter value while taking into account the value that is currently held by the running perf counter. This is not only complicated, this is quite wrong. Nowhere in the architecture is it said that the counter would be offset by something that is pending. The counter should be updated with the value set by SW, and start counting from there if required. Remove the odd computation and just assign the provided value after having released the perf event (which is then restarted). Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20221113163832.3154370-9-maz@kernel.org
Diffstat (limited to 'arch/arm64/kvm/pmu-emul.c')
-rw-r--r--arch/arm64/kvm/pmu-emul.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index faab0f57a45d..ea0c8411641f 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -23,6 +23,7 @@ static LIST_HEAD(arm_pmus);
static DEFINE_MUTEX(arm_pmus_lock);
static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
+static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
static u32 kvm_pmu_event_mask(struct kvm *kvm)
{
@@ -131,8 +132,10 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
if (!kvm_vcpu_has_pmu(vcpu))
return;
+ kvm_pmu_release_perf_event(&vcpu->arch.pmu.pmc[select_idx]);
+
reg = counter_index_to_reg(select_idx);
- __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
+ __vcpu_sys_reg(vcpu, reg) = val;
/* Recreate the perf event to reflect the updated sample_period */
kvm_pmu_create_perf_event(vcpu, select_idx);