diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2024-07-11 18:27:54 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2024-07-26 14:46:15 -0400 |
commit | 4b5f67120a88c713b82907d55a767693382e9e9d (patch) | |
tree | 66ccc638b7d2b86d86580eb501769beaa65ea984 | |
parent | e300614f10bd2f33252c8ba40b34d6c3fbf95d72 (diff) |
KVM: extend kvm_range_has_memory_attributes() to check subset of attributes
While currently there is no other attribute than KVM_MEMORY_ATTRIBUTE_PRIVATE,
KVM code such as kvm_mem_is_private() is written to expect their existence.
Allow using kvm_range_has_memory_attributes() as a multi-page version of
kvm_mem_is_private(), without it breaking later when more attributes are
introduced.
Reviewed-by: Michael Roth <michael.roth@amd.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/x86/kvm/mmu/mmu.c | 2 | ||||
-rw-r--r-- | include/linux/kvm_host.h | 2 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 13 |
3 files changed, 9 insertions, 8 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 26ef5b6ac3c1..43a02891babb 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -7513,7 +7513,7 @@ static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot, const unsigned long end = start + KVM_PAGES_PER_HPAGE(level); if (level == PG_LEVEL_2M) - return kvm_range_has_memory_attributes(kvm, start, end, attrs); + return kvm_range_has_memory_attributes(kvm, start, end, ~0, attrs); for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) { if (hugepage_test_mixed(slot, gfn, level - 1) || diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 45373d42f314..c223b97df03e 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -2414,7 +2414,7 @@ static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn } bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end, - unsigned long attrs); + unsigned long mask, unsigned long attrs); bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_arch_post_set_memory_attributes(struct kvm *kvm, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 30328ff2d840..92901656a0d4 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2408,21 +2408,21 @@ static u64 kvm_supported_mem_attributes(struct kvm *kvm) /* * Returns true if _all_ gfns in the range [@start, @end) have attributes - * matching @attrs. + * such that the bits in @mask match @attrs. */ bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end, - unsigned long attrs) + unsigned long mask, unsigned long attrs) { XA_STATE(xas, &kvm->mem_attr_array, start); - unsigned long mask = kvm_supported_mem_attributes(kvm); unsigned long index; void *entry; + mask &= kvm_supported_mem_attributes(kvm); if (attrs & ~mask) return false; if (end == start + 1) - return kvm_get_memory_attributes(kvm, start) == attrs; + return (kvm_get_memory_attributes(kvm, start) & mask) == attrs; guard(rcu)(); if (!attrs) @@ -2433,7 +2433,8 @@ bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end, entry = xas_next(&xas); } while (xas_retry(&xas, entry)); - if (xas.xa_index != index || xa_to_value(entry) != attrs) + if (xas.xa_index != index || + (xa_to_value(entry) & mask) != attrs) return false; } @@ -2532,7 +2533,7 @@ static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end, mutex_lock(&kvm->slots_lock); /* Nothing to do if the entire range as the desired attributes. */ - if (kvm_range_has_memory_attributes(kvm, start, end, attributes)) + if (kvm_range_has_memory_attributes(kvm, start, end, ~0, attributes)) goto out_unlock; /* |