diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> | 2020-05-05 12:47:15 +0530 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2020-05-05 21:20:14 +1000 |
commit | 87013f9c602cfbbc0734fb2f703df9fc884d05d9 (patch) | |
tree | d9dbe3fd72c5b09e31b6f906d1e47ddd4dfe62a9 /arch/powerpc/kvm/book3s_hv_rm_mmu.c | |
parent | 15759cb054efdd45e6db8433a829a5734e6d50f6 (diff) |
powerpc/kvm/book3s: switch from raw_spin_*lock to arch_spin_lock.
These functions can get called in realmode. Hence use low level
arch_spin_lock which is safe to be called in realmode.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200505071729.54912-9-aneesh.kumar@linux.ibm.com
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_rm_mmu.c')
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rm_mmu.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 220305454c23..03f8347de48b 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -948,7 +948,7 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu, return ret; /* Check if we've been invalidated */ - raw_spin_lock(&kvm->mmu_lock.rlock); + arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); if (mmu_notifier_retry(kvm, mmu_seq)) { ret = H_TOO_HARD; goto out_unlock; @@ -960,7 +960,7 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu, kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE); out_unlock: - raw_spin_unlock(&kvm->mmu_lock.rlock); + arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); return ret; } @@ -984,7 +984,7 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu, return ret; /* Check if we've been invalidated */ - raw_spin_lock(&kvm->mmu_lock.rlock); + arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); if (mmu_notifier_retry(kvm, mmu_seq)) { ret = H_TOO_HARD; goto out_unlock; @@ -996,7 +996,7 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu, kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE); out_unlock: - raw_spin_unlock(&kvm->mmu_lock.rlock); + arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); return ret; } |