summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVitaly Kuznetsov <vkuznets@redhat.com>2023-12-05 11:36:21 +0100
committerSean Christopherson <seanjc@google.com>2023-12-07 09:34:23 -0800
commitaf9d544a452114eb54638015544b884e1befd0fb (patch)
tree40a015e47d14904558536bc001aefcfbdfc2ccbf
parente7ad84db4d718e18c7a133e941ba4c7d4c6d4cbf (diff)
KVM: x86: Introduce helper to handle Hyper-V paravirt TLB flush requests
As a preparation to making Hyper-V emulation optional, introduce a helper to handle pending KVM_REQ_HV_TLB_FLUSH requests. No functional change intended. Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com> Tested-by: Jeremi Piotrowski <jpiotrowski@linux.microsoft.com> Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Link: https://lore.kernel.org/r/20231205103630.1391318-8-vkuznets@redhat.com Signed-off-by: Sean Christopherson <seanjc@google.com>
-rw-r--r--arch/x86/kvm/hyperv.h13
-rw-r--r--arch/x86/kvm/svm/nested.c10
-rw-r--r--arch/x86/kvm/vmx/nested.c10
3 files changed, 17 insertions, 16 deletions
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index 9d8fa6ba6341..77f6549aa5de 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -247,6 +247,19 @@ static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu)
return kvm_hv_get_assist_page(vcpu);
}
+static inline void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu,
+ bool tdp_enabled)
+{
+ /*
+ * KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or
+ * L2's VP_ID upon request from the guest. Make sure we check for
+ * pending entries in the right FIFO upon L1/L2 transition as these
+ * requests are put by other vCPUs asynchronously.
+ */
+ if (to_hv_vcpu(vcpu) && tdp_enabled)
+ kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu);
+}
+
int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu);
#endif
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 3fea8c47679e..74c04102ef01 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -487,14 +487,8 @@ static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
{
- /*
- * KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or
- * L2's VP_ID upon request from the guest. Make sure we check for
- * pending entries in the right FIFO upon L1/L2 transition as these
- * requests are put by other vCPUs asynchronously.
- */
- if (to_hv_vcpu(vcpu) && npt_enabled)
- kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu);
+ /* Handle pending Hyper-V TLB flush requests */
+ kvm_hv_nested_transtion_tlb_flush(vcpu, npt_enabled);
/*
* TODO: optimize unconditional TLB flush/MMU sync. A partial list of
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index c5ec0ef51ff7..382c0746d069 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -1139,14 +1139,8 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- /*
- * KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or
- * L2's VP_ID upon request from the guest. Make sure we check for
- * pending entries in the right FIFO upon L1/L2 transition as these
- * requests are put by other vCPUs asynchronously.
- */
- if (to_hv_vcpu(vcpu) && enable_ept)
- kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu);
+ /* Handle pending Hyper-V TLB flush requests */
+ kvm_hv_nested_transtion_tlb_flush(vcpu, enable_ept);
/*
* If vmcs12 doesn't use VPID, L1 expects linear and combined mappings