From ba2472eaf7255dfba27cea0b674ffcc0ee348293 Mon Sep 17 00:00:00 2001 From: Nirmoy Das Date: Tue, 29 Jun 2021 13:44:13 +0200 Subject: drm/amdgpu: return early for non-TTM_PL_TT type BOs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Return early for non-TTM_PL_TT BOs so that we don't pass wrong pointer to amdgpu_gtt_mgr_has_gart_addr() which assumes ttm_resource argument to be TTM_PL_TT type BO's. v3: remove extra braces. v2: merge if-conditions. Signed-off-by: Nirmoy Das Reviewed-by: Christian König Signed-off-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20210629114413.3371-1-nirmoy.das@amd.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 80dff29f2bc7..ac35aebdf9f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -924,7 +924,8 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, bo_mem->mem_type == AMDGPU_PL_OA) return -EINVAL; - if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { + if (bo_mem->mem_type != TTM_PL_TT || + !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { gtt->offset = AMDGPU_BO_INVALID_OFFSET; return 0; } -- cgit v1.2.3-70-g09d2 From 8dbe43e99f0f62fc4f829b4fedc5d628a329fc38 Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Mon, 28 Jun 2021 17:53:38 -0500 Subject: drm/amdgpu: Set ttm caching flags during bo allocation The ttm caching flags (ttm_cached, ttm_write_combined etc) are used to determine a buffer object's mapping attributes in both CPU page table and GPU page table (when that buffer is also accessed by GPU). Currently the ttm caching flags are set in function amdgpu_ttm_io_mem_reserve which is called during DRM_AMDGPU_GEM_MMAP ioctl. This has a problem since the GPU mapping of the buffer object (ioctl DRM_AMDGPU_GEM_VA) can happen earlier than the mmap time, thus the GPU page table update code can't pick up the right ttm caching flags to decide the right GPU page table attributes. This patch moves the ttm caching flags setting to function amdgpu_vram_mgr_new - this function is called during the first step of a buffer object create (eg, DRM_AMDGPU_GEM_CREATE) so the later both CPU and GPU mapping function calls will pick up this flag for CPU/GPU page table set up. v2: rebase (Alex) Signed-off-by: Oak Zeng Suggested-by: Christian Koenig Reviewed-by: Christian Koenig Reviewed-by: Feifei Xu Tested-by: Po Huang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 4 ---- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 5 +++++ 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 6a214a4dfe04..a2d1ab192457 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -590,10 +590,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, mem->bus.offset += adev->gmc.aper_base; mem->bus.is_iomem = true; - if (adev->gmc.xgmi.connected_to_cpu) - mem->bus.caching = ttm_cached; - else - mem->bus.caching = ttm_write_combined; break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 436ec246a7da..2fd77c36a1ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -463,6 +463,11 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, if (i == 1) node->base.placement |= TTM_PL_FLAG_CONTIGUOUS; + if (adev->gmc.xgmi.connected_to_cpu) + node->base.bus.caching = ttm_cached; + else + node->base.bus.caching = ttm_write_combined; + atomic64_add(vis_usage, &mgr->vis_usage); *res = &node->base; return 0; -- cgit v1.2.3-70-g09d2 From 8c21fc49a8e637bee5c868dafbd7e3c885a926bd Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Thu, 6 May 2021 12:23:07 -0500 Subject: drm/amdkfd: add owner ref param to get hmm pages The parameter is used in the dev_private_owner to decide if device pages in the range require to be migrated back to system memory, based if they are or not in the same memory domain. In this case, this reference could come from the same memory domain with devices connected to the same hive. Signed-off-by: Alex Sierra Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 4 ++-- 4 files changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index d6c54c7f7679..4b153daf283d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -160,7 +160,7 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, struct mm_struct *mm, struct page **pages, uint64_t start, uint64_t npages, struct hmm_range **phmm_range, bool readonly, - bool mmap_locked) + bool mmap_locked, void *owner) { struct hmm_range *hmm_range; unsigned long timeout; @@ -185,6 +185,7 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, hmm_range->hmm_pfns = pfns; hmm_range->start = start; hmm_range->end = start + npages * PAGE_SIZE; + hmm_range->dev_private_owner = owner; /* Assuming 512MB takes maxmium 1 second to fault page address */ timeout = max(npages >> 17, 1ULL) * HMM_RANGE_DEFAULT_TIMEOUT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h index 7f7d37a457c3..14a3c1864085 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h @@ -34,7 +34,7 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, struct mm_struct *mm, struct page **pages, uint64_t start, uint64_t npages, struct hmm_range **phmm_range, bool readonly, - bool mmap_locked); + bool mmap_locked, void *owner); int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range); #if defined(CONFIG_HMM_MIRROR) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index a2d1ab192457..2e9ad6e0dfbb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -692,7 +692,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) readonly = amdgpu_ttm_tt_is_readonly(ttm); r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start, ttm->num_pages, >t->range, readonly, - false); + false, NULL); out_putmm: mmput(mm); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 34abf6460585..e64427c31373 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1416,7 +1416,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm, r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL, prange->start << PAGE_SHIFT, prange->npages, &hmm_range, - false, true); + false, true, NULL); if (r) { pr_debug("failed %d to get svm range pages\n", r); goto unreserve_out; @@ -2728,7 +2728,7 @@ void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm) r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL, prange->start << PAGE_SHIFT, prange->npages, &hmm_range, - false, true); + false, true, NULL); if (!r) { amdgpu_hmm_range_get_pages_done(hmm_range); prange->validated_once = true; -- cgit v1.2.3-70-g09d2