diff options
-rw-r--r-- | virt/kvm/pfncache.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index 8c4db3dcaf6d..b4295474519f 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -98,7 +98,7 @@ bool kvm_gpc_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa, } EXPORT_SYMBOL_GPL(kvm_gpc_check); -static void gpc_unmap_khva(struct kvm *kvm, kvm_pfn_t pfn, void *khva) +static void gpc_unmap_khva(kvm_pfn_t pfn, void *khva) { /* Unmap the old pfn/page if it was mapped before. */ if (!is_error_noslot_pfn(pfn) && khva) { @@ -177,7 +177,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) * the existing mapping and didn't create a new one. */ if (new_khva != old_khva) - gpc_unmap_khva(kvm, new_pfn, new_khva); + gpc_unmap_khva(new_pfn, new_khva); kvm_release_pfn_clean(new_pfn); @@ -329,7 +329,7 @@ out_unlock: mutex_unlock(&gpc->refresh_lock); if (unmap_old) - gpc_unmap_khva(kvm, old_pfn, old_khva); + gpc_unmap_khva(old_pfn, old_khva); return ret; } @@ -358,7 +358,7 @@ void kvm_gpc_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) write_unlock_irq(&gpc->lock); mutex_unlock(&gpc->refresh_lock); - gpc_unmap_khva(kvm, old_pfn, old_khva); + gpc_unmap_khva(old_pfn, old_khva); } EXPORT_SYMBOL_GPL(kvm_gpc_unmap); |