summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2024-10-10 11:23:37 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2024-10-25 12:59:08 -0400
commit68e51d0a437b09dcef621b4cc8e4fe02605bf5c4 (patch)
tree6561b1dee6b8ef7281677328b73874604fc0943c /virt
parent2e5fdf60a9a69971b5e642d32e09bd6b0223f47c (diff)
KVM: Disallow direct access (w/o mmu_notifier) to unpinned pfn by default
Add an off-by-default module param to control whether or not KVM is allowed to map memory that isn't pinned, i.e. that KVM can't guarantee won't be freed while it is mapped into KVM and/or the guest. Don't remove the functionality entirely, as there are use cases where mapping unpinned memory is safe (as defined by the platform owner), e.g. when memory is hidden from the kernel and managed by userspace, in which case userspace is already fully trusted to not muck with guest memory mappings. But for more typical setups, mapping unpinned memory is wildly unsafe, and unnecessary. The APIs are used exclusively by x86's nested virtualization support, and there is no known (or sane) use case for mapping PFN-mapped memory a KVM guest _and_ letting the guest use it for virtualization structures. Tested-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Sean Christopherson <seanjc@google.com> Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-ID: <20241010182427.1434605-36-seanjc@google.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 49ac13872e09..becf640e369c 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -95,6 +95,13 @@ module_param(halt_poll_ns_shrink, uint, 0644);
EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
/*
+ * Allow direct access (from KVM or the CPU) without MMU notifier protection
+ * to unpinned pages.
+ */
+static bool allow_unsafe_mappings;
+module_param(allow_unsafe_mappings, bool, 0444);
+
+/*
* Ordering of locks:
*
* kvm->lock --> kvm->slots_lock --> kvm->irq_lock
@@ -2811,6 +2818,9 @@ static kvm_pfn_t kvm_resolve_pfn(struct kvm_follow_pfn *kfp, struct page *page,
* reference to such pages would cause KVM to prematurely free a page
* it doesn't own (KVM gets and puts the one and only reference).
* Don't allow those pages until the FIXME is resolved.
+ *
+ * Don't grab a reference for pins, callers that pin pages are required
+ * to check refcounted_page, i.e. must not blindly release the pfn.
*/
if (map) {
pfn = map->pfn;
@@ -2929,6 +2939,14 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
bool write_fault = kfp->flags & FOLL_WRITE;
int r;
+ /*
+ * Remapped memory cannot be pinned in any meaningful sense. Bail if
+ * the caller wants to pin the page, i.e. access the page outside of
+ * MMU notifier protection, and unsafe umappings are disallowed.
+ */
+ if (kfp->pin && !allow_unsafe_mappings)
+ return -EINVAL;
+
r = follow_pfnmap_start(&args);
if (r) {
/*