summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2024-10-10 11:23:51 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2024-10-25 13:00:47 -0400
commitdc06193532af4ba88ed20daeef88f22b053ebb91 (patch)
tree175e34eddbc1fc07b7eb2b6a06aad8d234de4f3c /include/linux
parent8eaa98004b23e02adb3e11120132e0e2fecc6e0e (diff)
KVM: Move x86's API to release a faultin page to common KVM
Move KVM x86's helper that "finishes" the faultin process to common KVM so that the logic can be shared across all architectures. Note, not all architectures implement a fast page fault path, but the gist of the comment applies to all architectures. Tested-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Sean Christopherson <seanjc@google.com> Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-ID: <20241010182427.1434605-50-seanjc@google.com>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/kvm_host.h26
1 files changed, 26 insertions, 0 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 6efdc00b4254..3e06393e5f1e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1231,6 +1231,32 @@ static inline void kvm_release_page_unused(struct page *page)
void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
+static inline void kvm_release_faultin_page(struct kvm *kvm, struct page *page,
+ bool unused, bool dirty)
+{
+ lockdep_assert_once(lockdep_is_held(&kvm->mmu_lock) || unused);
+
+ if (!page)
+ return;
+
+ /*
+ * If the page that KVM got from the *primary MMU* is writable, and KVM
+ * installed or reused a SPTE, mark the page/folio dirty. Note, this
+ * may mark a folio dirty even if KVM created a read-only SPTE, e.g. if
+ * the GFN is write-protected. Folios can't be safely marked dirty
+ * outside of mmu_lock as doing so could race with writeback on the
+ * folio. As a result, KVM can't mark folios dirty in the fast page
+ * fault handler, and so KVM must (somewhat) speculatively mark the
+ * folio dirty if KVM could locklessly make the SPTE writable.
+ */
+ if (unused)
+ kvm_release_page_unused(page);
+ else if (dirty)
+ kvm_release_page_dirty(page);
+ else
+ kvm_release_page_clean(page);
+}
+
kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn,
unsigned int foll, bool *writable,
struct page **refcounted_page);