summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2021-05-21 15:29:34 +1000
committerDave Airlie <airlied@redhat.com>2021-05-21 15:29:40 +1000
commitc99c4d0ca57c978dcc2a2f41ab8449684ea154cc (patch)
tree3fd20557381e99063293ae5d399a54d0108bcdde /drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
parent2ba047855096fff551402a87272b520fe97323f5 (diff)
parent2bb5b5f688cbbd5030629905d3ed8032ab46e79f (diff)
Merge tag 'amd-drm-next-5.14-2021-05-19' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-5.14-2021-05-19: amdgpu: - Aldebaran updates - More LTTPR display work - Vangogh updates - SDMA 5.x GCR fixes - RAS fixes - PCIe ASPM support - Modifier fixes - Enable TMZ on Renoir - Buffer object code cleanup - Display overlay fixes - Initial support for multiple eDP panels - Initial SR-IOV support for Aldebaran - DP link training refactor - Misc code cleanups and bug fixes - SMU regression fixes for variable sized arrays - MAINTAINERS fixes for amdgpu amdkfd: - Initial SR-IOV support for Aldebaran - Topology fixes - Initial HMM SVM support - Misc code cleanups and bug fixes radeon: - Misc code cleanups and bug fixes - SMU regression fixes for variable sized arrays - Flickering fix for Oland with multiple 4K displays UAPI: - amdgpu: Drop AMDGPU_GEM_CREATE_SHADOW flag. This was always a kernel internal flag and userspace use of it has always been blocked. It's no longer needed so remove it. - amdkgd: HMM SVM support Overview: https://patchwork.freedesktop.org/series/85562/ Porposed userspace: https://github.com/RadeonOpenCompute/ROCT-Thunk-Interface/tree/fxkamd/hmm-wip Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210520031258.231896-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c86
1 files changed, 86 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 828b5167ff12..2741c28ff1b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -155,3 +155,89 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
mmu_interval_notifier_remove(&bo->notifier);
bo->notifier.mm = NULL;
}
+
+int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
+ struct mm_struct *mm, struct page **pages,
+ uint64_t start, uint64_t npages,
+ struct hmm_range **phmm_range, bool readonly,
+ bool mmap_locked)
+{
+ struct hmm_range *hmm_range;
+ unsigned long timeout;
+ unsigned long i;
+ unsigned long *pfns;
+ int r = 0;
+
+ hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL);
+ if (unlikely(!hmm_range))
+ return -ENOMEM;
+
+ pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
+ if (unlikely(!pfns)) {
+ r = -ENOMEM;
+ goto out_free_range;
+ }
+
+ hmm_range->notifier = notifier;
+ hmm_range->default_flags = HMM_PFN_REQ_FAULT;
+ if (!readonly)
+ hmm_range->default_flags |= HMM_PFN_REQ_WRITE;
+ hmm_range->hmm_pfns = pfns;
+ hmm_range->start = start;
+ hmm_range->end = start + npages * PAGE_SIZE;
+
+ /* Assuming 512MB takes maxmium 1 second to fault page address */
+ timeout = max(npages >> 17, 1ULL) * HMM_RANGE_DEFAULT_TIMEOUT;
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+retry:
+ hmm_range->notifier_seq = mmu_interval_read_begin(notifier);
+
+ if (likely(!mmap_locked))
+ mmap_read_lock(mm);
+
+ r = hmm_range_fault(hmm_range);
+
+ if (likely(!mmap_locked))
+ mmap_read_unlock(mm);
+ if (unlikely(r)) {
+ /*
+ * FIXME: This timeout should encompass the retry from
+ * mmu_interval_read_retry() as well.
+ */
+ if (r == -EBUSY && !time_after(jiffies, timeout))
+ goto retry;
+ goto out_free_pfns;
+ }
+
+ /*
+ * Due to default_flags, all pages are HMM_PFN_VALID or
+ * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
+ * the notifier_lock, and mmu_interval_read_retry() must be done first.
+ */
+ for (i = 0; pages && i < npages; i++)
+ pages[i] = hmm_pfn_to_page(pfns[i]);
+
+ *phmm_range = hmm_range;
+
+ return 0;
+
+out_free_pfns:
+ kvfree(pfns);
+out_free_range:
+ kfree(hmm_range);
+
+ return r;
+}
+
+int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range)
+{
+ int r;
+
+ r = mmu_interval_read_retry(hmm_range->notifier,
+ hmm_range->notifier_seq);
+ kvfree(hmm_range->hmm_pfns);
+ kfree(hmm_range);
+
+ return r;
+}