summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/mmu.c')
-rw-r--r--arch/arm64/kvm/mmu.c34
1 files changed, 30 insertions, 4 deletions
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 0625bf2353c2..183c107c06b2 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -433,6 +433,32 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
return 0;
}
+static struct kvm_pgtable_mm_ops kvm_user_mm_ops = {
+ /* We shouldn't need any other callback to walk the PT */
+ .phys_to_virt = kvm_host_va,
+};
+
+static int get_user_mapping_size(struct kvm *kvm, u64 addr)
+{
+ struct kvm_pgtable pgt = {
+ .pgd = (kvm_pte_t *)kvm->mm->pgd,
+ .ia_bits = VA_BITS,
+ .start_level = (KVM_PGTABLE_MAX_LEVELS -
+ CONFIG_PGTABLE_LEVELS),
+ .mm_ops = &kvm_user_mm_ops,
+ };
+ kvm_pte_t pte = 0; /* Keep GCC quiet... */
+ u32 level = ~0;
+ int ret;
+
+ ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level);
+ VM_BUG_ON(ret);
+ VM_BUG_ON(level >= KVM_PGTABLE_MAX_LEVELS);
+ VM_BUG_ON(!(pte & PTE_VALID));
+
+ return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level));
+}
+
static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
.zalloc_page = stage2_memcache_zalloc_page,
.zalloc_pages_exact = kvm_host_zalloc_pages_exact,
@@ -780,7 +806,7 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
* Returns the size of the mapping.
*/
static unsigned long
-transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
+transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long hva, kvm_pfn_t *pfnp,
phys_addr_t *ipap)
{
@@ -791,8 +817,8 @@ transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
* sure that the HVA and IPA are sufficiently aligned and that the
* block map is contained within the memslot.
*/
- if (kvm_is_transparent_hugepage(pfn) &&
- fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
+ if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
+ get_user_mapping_size(kvm, hva) >= PMD_SIZE) {
/*
* The address we faulted on is backed by a transparent huge
* page. However, because we map the compound huge page and
@@ -1051,7 +1077,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* backed by a THP and thus use block mapping if possible.
*/
if (vma_pagesize == PAGE_SIZE && !(force_pte || device))
- vma_pagesize = transparent_hugepage_adjust(memslot, hva,
+ vma_pagesize = transparent_hugepage_adjust(kvm, memslot, hva,
&pfn, &fault_ipa);
if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {