diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-25 10:15:35 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-25 10:15:35 -0700 |
commit | 616a99dd146a799d0cac43f884a3a46571bd2796 (patch) | |
tree | a359ed8eebce395d55e9f199674c1680c3e35298 | |
parent | 94ca94bbbb5f50180ecaac31566dfe5ed44c7994 (diff) | |
parent | f8be156be163a052a067306417cd0ff679068c97 (diff) |
Merge tag 'for-linus-urgent' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Paolo Bonzini:
"A selftests fix for ARM, and the fix for page reference count
underflow. This is a very small fix that was provided by Nick Piggin
and tested by myself"
* tag 'for-linus-urgent' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: do not allow mapping valid but non-reference-counted pages
KVM: selftests: Fix mapping length truncation in m{,un}map()
-rw-r--r-- | tools/testing/selftests/kvm/set_memory_region_test.c | 4 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 19 |
2 files changed, 19 insertions, 4 deletions
diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c index 978f5b5f4dc0..d8812f27648c 100644 --- a/tools/testing/selftests/kvm/set_memory_region_test.c +++ b/tools/testing/selftests/kvm/set_memory_region_test.c @@ -376,7 +376,7 @@ static void test_add_max_memory_regions(void) pr_info("Adding slots 0..%i, each memory region with %dK size\n", (max_mem_slots - 1), MEM_REGION_SIZE >> 10); - mem = mmap(NULL, MEM_REGION_SIZE * max_mem_slots + alignment, + mem = mmap(NULL, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host"); mem_aligned = (void *)(((size_t) mem + alignment - 1) & ~(alignment - 1)); @@ -401,7 +401,7 @@ static void test_add_max_memory_regions(void) TEST_ASSERT(ret == -1 && errno == EINVAL, "Adding one more memory slot should fail with EINVAL"); - munmap(mem, MEM_REGION_SIZE * max_mem_slots + alignment); + munmap(mem, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment); munmap(mem_extra, MEM_REGION_SIZE); kvm_vm_free(vm); } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 6a6bc7af0e28..46fb042837d2 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2055,6 +2055,13 @@ static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) return true; } +static int kvm_try_get_pfn(kvm_pfn_t pfn) +{ + if (kvm_is_reserved_pfn(pfn)) + return 1; + return get_page_unless_zero(pfn_to_page(pfn)); +} + static int hva_to_pfn_remapped(struct vm_area_struct *vma, unsigned long addr, bool *async, bool write_fault, bool *writable, @@ -2104,13 +2111,21 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, * Whoever called remap_pfn_range is also going to call e.g. * unmap_mapping_range before the underlying pages are freed, * causing a call to our MMU notifier. + * + * Certain IO or PFNMAP mappings can be backed with valid + * struct pages, but be allocated without refcounting e.g., + * tail pages of non-compound higher order allocations, which + * would then underflow the refcount when the caller does the + * required put_page. Don't allow those pages here. */ - kvm_get_pfn(pfn); + if (!kvm_try_get_pfn(pfn)) + r = -EFAULT; out: pte_unmap_unlock(ptep, ptl); *p_pfn = pfn; - return 0; + + return r; } /* |