summaryrefslogtreecommitdiff
path: root/virt/kvm/pfncache.c
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-09-05 07:59:28 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-09-05 07:59:28 +0200
commit35f2e3c267f07a42bc1bf08081e963b3a33e6d7c (patch)
treecab274350db930656d6ea9a0c9989f210acf4722 /virt/kvm/pfncache.c
parente4cdd25cafac3f61c74c146db5de7a5c9bd7b6d0 (diff)
parent7e18e42e4b280c85b76967a9106a13ca61c16179 (diff)
Merge 6.0-rc4 into tty-next
We need the tty/serial fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'virt/kvm/pfncache.c')
-rw-r--r--virt/kvm/pfncache.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index ab519f72f2cd..68ff41d39545 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -112,27 +112,28 @@ static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_s
{
/*
* mn_active_invalidate_count acts for all intents and purposes
- * like mmu_notifier_count here; but the latter cannot be used
- * here because the invalidation of caches in the mmu_notifier
- * event occurs _before_ mmu_notifier_count is elevated.
+ * like mmu_invalidate_in_progress here; but the latter cannot
+ * be used here because the invalidation of caches in the
+ * mmu_notifier event occurs _before_ mmu_invalidate_in_progress
+ * is elevated.
*
* Note, it does not matter that mn_active_invalidate_count
* is not protected by gpc->lock. It is guaranteed to
* be elevated before the mmu_notifier acquires gpc->lock, and
- * isn't dropped until after mmu_notifier_seq is updated.
+ * isn't dropped until after mmu_invalidate_seq is updated.
*/
if (kvm->mn_active_invalidate_count)
return true;
/*
* Ensure mn_active_invalidate_count is read before
- * mmu_notifier_seq. This pairs with the smp_wmb() in
+ * mmu_invalidate_seq. This pairs with the smp_wmb() in
* mmu_notifier_invalidate_range_end() to guarantee either the
* old (non-zero) value of mn_active_invalidate_count or the
- * new (incremented) value of mmu_notifier_seq is observed.
+ * new (incremented) value of mmu_invalidate_seq is observed.
*/
smp_rmb();
- return kvm->mmu_notifier_seq != mmu_seq;
+ return kvm->mmu_invalidate_seq != mmu_seq;
}
static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
@@ -155,7 +156,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
gpc->valid = false;
do {
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
write_unlock_irq(&gpc->lock);