diff options
author | Takashi Iwai <tiwai@suse.de> | 2022-08-24 08:03:46 +0200 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2022-08-24 08:04:56 +0200 |
commit | 384c687fb4ad0774aaae58ed7f43cc738aa57a73 (patch) | |
tree | da35994e3876cdcb9f7d144c6e372b0a942db02d /arch/arm64/kvm/mmu.c | |
parent | 18afcf90d8807fef66d1fd428eeb2b407df90fa8 (diff) | |
parent | 0db78532ff144a52757d636a26803099d78f431e (diff) |
Merge branch 'topic/memalloc-cleanup' into for-next
ALSA: Drop hackish GFP giveaway for CONTINUOUS pages
This is a series of cleanup patches for dropping the current hackish
way of passing the GFP_* flags for CONTINOUS and VMALLOC memory
allocations. There are only three users for this legacy feature, and
all of them seem superfluous. And, if any driver requires the memory
restriction in future, it can now pass the proper device pointer for
specifying the DMA mask.
Link: https://lore.kernel.org/r/20220823115740.14123-1-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
Diffstat (limited to 'arch/arm64/kvm/mmu.c')
-rw-r--r-- | arch/arm64/kvm/mmu.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 87f1cd0df36e..c9a13e487187 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -993,7 +993,7 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot, * THP doesn't start to split while we are adjusting the * refcounts. * - * We are sure this doesn't happen, because mmu_notifier_retry + * We are sure this doesn't happen, because mmu_invalidate_retry * was successful and we are holding the mmu_lock, so if this * THP is trying to split, it will be blocked in the mmu * notifier before touching any of the pages, specifically @@ -1188,9 +1188,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, return ret; } - mmu_seq = vcpu->kvm->mmu_notifier_seq; + mmu_seq = vcpu->kvm->mmu_invalidate_seq; /* - * Ensure the read of mmu_notifier_seq happens before we call + * Ensure the read of mmu_invalidate_seq happens before we call * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk * the page we just got a reference to gets unmapped before we have a * chance to grab the mmu_lock, which ensure that if the page gets @@ -1246,7 +1246,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, else write_lock(&kvm->mmu_lock); pgt = vcpu->arch.hw_mmu->pgt; - if (mmu_notifier_retry(kvm, mmu_seq)) + if (mmu_invalidate_retry(kvm, mmu_seq)) goto out_unlock; /* |