diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2007 |
1 files changed, 894 insertions, 1113 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 2616e2eafdeb..c875f1cdd2af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -32,7 +32,6 @@ #include <linux/dma-mapping.h> #include <linux/iommu.h> -#include <linux/hmm.h> #include <linux/pagemap.h> #include <linux/sched/task.h> #include <linux/sched/mm.h> @@ -41,14 +40,14 @@ #include <linux/swap.h> #include <linux/swiotlb.h> #include <linux/dma-buf.h> +#include <linux/sizes.h> +#include <linux/module.h> #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> -#include <drm/ttm/ttm_module.h> -#include <drm/ttm/ttm_page_alloc.h> +#include <drm/ttm/ttm_range_manager.h> -#include <drm/drm_debugfs.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" @@ -57,79 +56,26 @@ #include "amdgpu_amdkfd.h" #include "amdgpu_sdma.h" #include "amdgpu_ras.h" +#include "amdgpu_atomfirmware.h" +#include "amdgpu_res_cursor.h" #include "bif/bif_4_1_d.h" -static int amdgpu_map_buffer(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem, unsigned num_pages, - uint64_t offset, unsigned window, - struct amdgpu_ring *ring, - uint64_t *addr); +MODULE_IMPORT_NS(DMA_BUF); -static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); -static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); +#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128 -static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) -{ - return 0; -} +static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, + struct ttm_tt *ttm, + struct ttm_resource *bo_mem); +static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, + struct ttm_tt *ttm); -/** - * amdgpu_init_mem_type - Initialize a memory manager for a specific type of - * memory request. - * - * @bdev: The TTM BO device object (contains a reference to amdgpu_device) - * @type: The type of memory requested - * @man: The memory type manager for each domain - * - * This is called by ttm_bo_init_mm() when a buffer object is being - * initialized. - */ -static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, - struct ttm_mem_type_manager *man) +static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev, + unsigned int type, + uint64_t size_in_page) { - struct amdgpu_device *adev; - - adev = amdgpu_ttm_adev(bdev); - - switch (type) { - case TTM_PL_SYSTEM: - /* System memory */ - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_CACHED; - break; - case TTM_PL_TT: - /* GTT memory */ - man->func = &amdgpu_gtt_mgr_func; - man->gpu_offset = adev->gmc.gart_start; - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_CACHED; - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; - break; - case TTM_PL_VRAM: - /* "On-card" video ram */ - man->func = &amdgpu_vram_mgr_func; - man->gpu_offset = adev->gmc.vram_start; - man->flags = TTM_MEMTYPE_FLAG_FIXED | - TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; - man->default_caching = TTM_PL_FLAG_WC; - break; - case AMDGPU_PL_GDS: - case AMDGPU_PL_GWS: - case AMDGPU_PL_OA: - /* On-chip GDS memory*/ - man->func = &ttm_bo_manager_func; - man->gpu_offset = 0; - man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA; - man->available_caching = TTM_PL_FLAG_UNCACHED; - man->default_caching = TTM_PL_FLAG_UNCACHED; - break; - default: - DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); - return -EINVAL; - } - return 0; + return ttm_range_man_init(&adev->mman.bdev, type, + false, size_in_page); } /** @@ -148,7 +94,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, static const struct ttm_place placements = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM + .mem_type = TTM_PL_SYSTEM, + .flags = 0 }; /* Don't handle scatter gather BOs */ @@ -168,7 +115,22 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, } abo = ttm_to_amdgpu_bo(bo); - switch (bo->mem.mem_type) { + if (abo->flags & AMDGPU_AMDKFD_CREATE_SVM_BO) { + struct dma_fence *fence; + struct dma_resv *resv = &bo->base._resv; + + rcu_read_lock(); + fence = rcu_dereference(resv->fence_excl); + if (fence && !fence->ops->signaled) + dma_fence_enable_sw_signaling(fence); + + placement->num_placement = 0; + placement->num_busy_placement = 0; + rcu_read_unlock(); + return; + } + + switch (bo->resource->mem_type) { case AMDGPU_PL_GDS: case AMDGPU_PL_GWS: case AMDGPU_PL_OA: @@ -190,17 +152,20 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, * BOs to be evicted from VRAM */ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | - AMDGPU_GEM_DOMAIN_GTT); + AMDGPU_GEM_DOMAIN_GTT | + AMDGPU_GEM_DOMAIN_CPU); abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; abo->placements[0].lpfn = 0; abo->placement.busy_placement = &abo->placements[1]; abo->placement.num_busy_placement = 1; } else { /* Move to GTT memory */ - amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); + amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT | + AMDGPU_GEM_DOMAIN_CPU); } break; case TTM_PL_TT: + case AMDGPU_PL_PREEMPT: default: amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); break; @@ -209,206 +174,191 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, } /** - * amdgpu_verify_access - Verify access for a mmap call + * amdgpu_ttm_map_buffer - Map memory into the GART windows + * @bo: buffer object to map + * @mem: memory object to map + * @mm_cur: range to map + * @num_pages: number of pages to map + * @window: which GART window to use + * @ring: DMA ring to use for the copy + * @tmz: if we should setup a TMZ enabled mapping + * @addr: resulting address inside the MC address space * - * @bo: The buffer object to map - * @filp: The file pointer from the process performing the mmap - * - * This is called by ttm_bo_mmap() to verify whether a process - * has the right to mmap a BO to their process space. + * Setup one of the GART windows to access a specific piece of memory or return + * the physical address for local memory. */ -static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) +static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, + struct ttm_resource *mem, + struct amdgpu_res_cursor *mm_cur, + unsigned num_pages, unsigned window, + struct amdgpu_ring *ring, bool tmz, + uint64_t *addr) { - struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); + struct amdgpu_device *adev = ring->adev; + struct amdgpu_job *job; + unsigned num_dw, num_bytes; + struct dma_fence *fence; + uint64_t src_addr, dst_addr; + void *cpu_addr; + uint64_t flags; + unsigned int i; + int r; - /* - * Don't verify access for KFD BOs. They don't have a GEM - * object associated with them. - */ - if (abo->kfd_bo) + BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < + AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); + BUG_ON(mem->mem_type == AMDGPU_PL_PREEMPT); + + /* Map only what can't be accessed directly */ + if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) { + *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) + + mm_cur->start; return 0; + } - if (amdgpu_ttm_tt_get_usermm(bo->ttm)) - return -EPERM; - return drm_vma_node_verify_access(&abo->tbo.base.vma_node, - filp->private_data); -} + *addr = adev->gmc.gart_start; + *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * + AMDGPU_GPU_PAGE_SIZE; + *addr += mm_cur->start & ~PAGE_MASK; -/** - * amdgpu_move_null - Register memory for a buffer object - * - * @bo: The bo to assign the memory to - * @new_mem: The memory to be assigned. - * - * Assign the memory from new_mem to the memory of the buffer object bo. - */ -static void amdgpu_move_null(struct ttm_buffer_object *bo, - struct ttm_mem_reg *new_mem) -{ - struct ttm_mem_reg *old_mem = &bo->mem; + num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); + num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE; - BUG_ON(old_mem->mm_node != NULL); - *old_mem = *new_mem; - new_mem->mm_node = NULL; -} + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, + AMDGPU_IB_POOL_DELAYED, &job); + if (r) + return r; -/** - * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer. - * - * @bo: The bo to assign the memory to. - * @mm_node: Memory manager node for drm allocator. - * @mem: The region where the bo resides. - * - */ -static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, - struct drm_mm_node *mm_node, - struct ttm_mem_reg *mem) -{ - uint64_t addr = 0; + src_addr = num_dw * 4; + src_addr += job->ibs[0].gpu_addr; - if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) { - addr = mm_node->start << PAGE_SHIFT; - addr += bo->bdev->man[mem->mem_type].gpu_offset; - } - return addr; -} + dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo); + dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8; + amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, + dst_addr, num_bytes, false); -/** - * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to - * @offset. It also modifies the offset to be within the drm_mm_node returned - * - * @mem: The region where the bo resides. - * @offset: The offset that drm_mm_node is used for finding. - * - */ -static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem, - unsigned long *offset) -{ - struct drm_mm_node *mm_node = mem->mm_node; + amdgpu_ring_pad_ib(ring, &job->ibs[0]); + WARN_ON(job->ibs[0].length_dw > num_dw); + + flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem); + if (tmz) + flags |= AMDGPU_PTE_TMZ; + + cpu_addr = &job->ibs[0].ptr[num_dw]; + + if (mem->mem_type == TTM_PL_TT) { + dma_addr_t *dma_addr; + + dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT]; + r = amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, + cpu_addr); + if (r) + goto error_free; + } else { + dma_addr_t dma_address; - while (*offset >= (mm_node->size << PAGE_SHIFT)) { - *offset -= (mm_node->size << PAGE_SHIFT); - ++mm_node; + dma_address = mm_cur->start; + dma_address += adev->vm_manager.vram_base_offset; + + for (i = 0; i < num_pages; ++i) { + r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, + &dma_address, flags, cpu_addr); + if (r) + goto error_free; + + dma_address += PAGE_SIZE; + } } - return mm_node; + + r = amdgpu_job_submit(job, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, &fence); + if (r) + goto error_free; + + dma_fence_put(fence); + + return r; + +error_free: + amdgpu_job_free(job); + return r; } /** - * amdgpu_copy_ttm_mem_to_mem - Helper function for copy + * amdgpu_ttm_copy_mem_to_mem - Helper function for copy + * @adev: amdgpu device + * @src: buffer/address where to read from + * @dst: buffer/address where to write to + * @size: number of bytes to copy + * @tmz: if a secure copy should be used + * @resv: resv object to sync to + * @f: Returns the last fence if multiple jobs are submitted. * * The function copies @size bytes from {src->mem + src->offset} to * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a * move and different for a BO to BO copy. * - * @f: Returns the last fence if multiple jobs are submitted. */ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, - struct amdgpu_copy_mem *src, - struct amdgpu_copy_mem *dst, - uint64_t size, + const struct amdgpu_copy_mem *src, + const struct amdgpu_copy_mem *dst, + uint64_t size, bool tmz, struct dma_resv *resv, struct dma_fence **f) { + const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE * + AMDGPU_GPU_PAGE_SIZE); + struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; - struct drm_mm_node *src_mm, *dst_mm; - uint64_t src_node_start, dst_node_start, src_node_size, - dst_node_size, src_page_offset, dst_page_offset; + struct amdgpu_res_cursor src_mm, dst_mm; struct dma_fence *fence = NULL; int r = 0; - const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE * - AMDGPU_GPU_PAGE_SIZE); if (!adev->mman.buffer_funcs_enabled) { DRM_ERROR("Trying to move memory with ring turned off.\n"); return -EINVAL; } - src_mm = amdgpu_find_mm_node(src->mem, &src->offset); - src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) + - src->offset; - src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset; - src_page_offset = src_node_start & (PAGE_SIZE - 1); - - dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset); - dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) + - dst->offset; - dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset; - dst_page_offset = dst_node_start & (PAGE_SIZE - 1); + amdgpu_res_first(src->mem, src->offset, size, &src_mm); + amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm); mutex_lock(&adev->mman.gtt_window_lock); - - while (size) { - unsigned long cur_size; - uint64_t from = src_node_start, to = dst_node_start; + while (src_mm.remaining) { + uint32_t src_page_offset = src_mm.start & ~PAGE_MASK; + uint32_t dst_page_offset = dst_mm.start & ~PAGE_MASK; struct dma_fence *next; + uint32_t cur_size; + uint64_t from, to; /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst * begins at an offset, then adjust the size accordingly */ - cur_size = min3(min(src_node_size, dst_node_size), size, - GTT_MAX_BYTES); - if (cur_size + src_page_offset > GTT_MAX_BYTES || - cur_size + dst_page_offset > GTT_MAX_BYTES) - cur_size -= max(src_page_offset, dst_page_offset); - - /* Map only what needs to be accessed. Map src to window 0 and - * dst to window 1 - */ - if (src->mem->start == AMDGPU_BO_INVALID_OFFSET) { - r = amdgpu_map_buffer(src->bo, src->mem, - PFN_UP(cur_size + src_page_offset), - src_node_start, 0, ring, - &from); - if (r) - goto error; - /* Adjust the offset because amdgpu_map_buffer returns - * start of mapped page - */ - from += src_page_offset; - } + cur_size = max(src_page_offset, dst_page_offset); + cur_size = min(min3(src_mm.size, dst_mm.size, size), + (uint64_t)(GTT_MAX_BYTES - cur_size)); + + /* Map src to window 0 and dst to window 1. */ + r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm, + PFN_UP(cur_size + src_page_offset), + 0, ring, tmz, &from); + if (r) + goto error; - if (dst->mem->start == AMDGPU_BO_INVALID_OFFSET) { - r = amdgpu_map_buffer(dst->bo, dst->mem, - PFN_UP(cur_size + dst_page_offset), - dst_node_start, 1, ring, - &to); - if (r) - goto error; - to += dst_page_offset; - } + r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm, + PFN_UP(cur_size + dst_page_offset), + 1, ring, tmz, &to); + if (r) + goto error; r = amdgpu_copy_buffer(ring, from, to, cur_size, - resv, &next, false, true); + resv, &next, false, true, tmz); if (r) goto error; dma_fence_put(fence); fence = next; - size -= cur_size; - if (!size) - break; - - src_node_size -= cur_size; - if (!src_node_size) { - src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm, - src->mem); - src_node_size = (src_mm->size << PAGE_SHIFT); - src_page_offset = 0; - } else { - src_node_start += cur_size; - src_page_offset = src_node_start & (PAGE_SIZE - 1); - } - dst_node_size -= cur_size; - if (!dst_node_size) { - dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm, - dst->mem); - dst_node_size = (dst_mm->size << PAGE_SHIFT); - dst_page_offset = 0; - } else { - dst_node_start += cur_size; - dst_page_offset = dst_node_start & (PAGE_SIZE - 1); - } + amdgpu_res_next(&src_mm, cur_size); + amdgpu_res_next(&dst_mm, cur_size); } error: mutex_unlock(&adev->mman.gtt_window_lock); @@ -418,18 +368,19 @@ error: return r; } -/** +/* * amdgpu_move_blit - Copy an entire buffer to another buffer * * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to * help move buffers to and from VRAM. */ static int amdgpu_move_blit(struct ttm_buffer_object *bo, - bool evict, bool no_wait_gpu, - struct ttm_mem_reg *new_mem, - struct ttm_mem_reg *old_mem) + bool evict, + struct ttm_resource *new_mem, + struct ttm_resource *old_mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); + struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_copy_mem src, dst; struct dma_fence *fence = NULL; int r; @@ -443,14 +394,14 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst, new_mem->num_pages << PAGE_SHIFT, + amdgpu_bo_encrypted(abo), bo->base.resv, &fence); if (r) goto error; /* clear the space being freed */ if (old_mem->mem_type == TTM_PL_VRAM && - (ttm_to_amdgpu_bo(bo)->flags & - AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) { + (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) { struct dma_fence *wipe_fence = NULL; r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON, @@ -465,9 +416,9 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, /* Always block for VM page tables before committing the new location */ if (bo->type == ttm_bo_type_kernel) - r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem); + r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem); else - r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); + r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem); dma_fence_put(fence); return r; @@ -478,118 +429,16 @@ error: return r; } -/** - * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer - * - * Called by amdgpu_bo_move(). - */ -static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) -{ - struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_reg tmp_mem; - struct ttm_place placements; - struct ttm_placement placement; - int r; - - /* create space/pages for new_mem in GTT space */ - tmp_mem = *new_mem; - tmp_mem.mm_node = NULL; - placement.num_placement = 1; - placement.placement = &placements; - placement.num_busy_placement = 1; - placement.busy_placement = &placements; - placements.fpfn = 0; - placements.lpfn = 0; - placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); - if (unlikely(r)) { - pr_err("Failed to find GTT space for blit from VRAM\n"); - return r; - } - - /* set caching flags */ - r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); - if (unlikely(r)) { - goto out_cleanup; - } - - /* Bind the memory to the GTT space */ - r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx); - if (unlikely(r)) { - goto out_cleanup; - } - - /* blit VRAM to GTT */ - r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem); - if (unlikely(r)) { - goto out_cleanup; - } - - /* move BO (in tmp_mem) to new_mem */ - r = ttm_bo_move_ttm(bo, ctx, new_mem); -out_cleanup: - ttm_bo_mem_put(bo, &tmp_mem); - return r; -} - -/** - * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM - * - * Called by amdgpu_bo_move(). - */ -static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) -{ - struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_reg tmp_mem; - struct ttm_placement placement; - struct ttm_place placements; - int r; - - /* make space in GTT for old_mem buffer */ - tmp_mem = *new_mem; - tmp_mem.mm_node = NULL; - placement.num_placement = 1; - placement.placement = &placements; - placement.num_busy_placement = 1; - placement.busy_placement = &placements; - placements.fpfn = 0; - placements.lpfn = 0; - placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); - if (unlikely(r)) { - pr_err("Failed to find GTT space for blit to VRAM\n"); - return r; - } - - /* move/bind old memory to GTT space */ - r = ttm_bo_move_ttm(bo, ctx, &tmp_mem); - if (unlikely(r)) { - goto out_cleanup; - } - - /* copy to VRAM */ - r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem); - if (unlikely(r)) { - goto out_cleanup; - } -out_cleanup: - ttm_bo_mem_put(bo, &tmp_mem); - return r; -} - -/** +/* * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy * * Called by amdgpu_bo_move() */ static bool amdgpu_mem_visible(struct amdgpu_device *adev, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { - struct drm_mm_node *nodes = mem->mm_node; + uint64_t mem_size = (u64)mem->num_pages << PAGE_SHIFT; + struct amdgpu_res_cursor cursor; if (mem->mem_type == TTM_PL_SYSTEM || mem->mem_type == TTM_PL_TT) @@ -597,47 +446,67 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev, if (mem->mem_type != TTM_PL_VRAM) return false; - /* ttm_mem_reg_ioremap only supports contiguous memory */ - if (nodes->size != mem->num_pages) + amdgpu_res_first(mem, 0, mem_size, &cursor); + + /* ttm_resource_ioremap only supports contiguous memory */ + if (cursor.size != mem_size) return false; - return ((nodes->start + nodes->size) << PAGE_SHIFT) - <= adev->gmc.visible_vram_size; + return cursor.start + cursor.size <= adev->gmc.visible_vram_size; } -/** +/* * amdgpu_bo_move - Move a buffer object to a new memory location * * Called by ttm_bo_handle_move_mem() */ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem, + struct ttm_place *hop) { struct amdgpu_device *adev; struct amdgpu_bo *abo; - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = bo->resource; int r; + if (new_mem->mem_type == TTM_PL_TT || + new_mem->mem_type == AMDGPU_PL_PREEMPT) { + r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem); + if (r) + return r; + } + /* Can't move a pinned BO */ abo = ttm_to_amdgpu_bo(bo); - if (WARN_ON_ONCE(abo->pin_count > 0)) + if (WARN_ON_ONCE(abo->tbo.pin_count > 0)) return -EINVAL; adev = amdgpu_ttm_adev(bo->bdev); if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { - amdgpu_move_null(bo, new_mem); - return 0; + ttm_bo_move_null(bo, new_mem); + goto out; } - if ((old_mem->mem_type == TTM_PL_TT && - new_mem->mem_type == TTM_PL_SYSTEM) || - (old_mem->mem_type == TTM_PL_SYSTEM && - new_mem->mem_type == TTM_PL_TT)) { - /* bind is enough */ - amdgpu_move_null(bo, new_mem); - return 0; + if (old_mem->mem_type == TTM_PL_SYSTEM && + (new_mem->mem_type == TTM_PL_TT || + new_mem->mem_type == AMDGPU_PL_PREEMPT)) { + ttm_bo_move_null(bo, new_mem); + goto out; } + if ((old_mem->mem_type == TTM_PL_TT || + old_mem->mem_type == AMDGPU_PL_PREEMPT) && + new_mem->mem_type == TTM_PL_SYSTEM) { + r = ttm_bo_wait_ctx(bo, ctx); + if (r) + return r; + + amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); + ttm_resource_free(bo, &bo->resource); + ttm_bo_assign_mem(bo, new_mem); + goto out; + } + if (old_mem->mem_type == AMDGPU_PL_GDS || old_mem->mem_type == AMDGPU_PL_GWS || old_mem->mem_type == AMDGPU_PL_OA || @@ -645,28 +514,37 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, new_mem->mem_type == AMDGPU_PL_GWS || new_mem->mem_type == AMDGPU_PL_OA) { /* Nothing to save here */ - amdgpu_move_null(bo, new_mem); - return 0; + ttm_bo_move_null(bo, new_mem); + goto out; } - if (!adev->mman.buffer_funcs_enabled) { - r = -ENODEV; - goto memcpy; + if (bo->type == ttm_bo_type_device && + new_mem->mem_type == TTM_PL_VRAM && + old_mem->mem_type != TTM_PL_VRAM) { + /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU + * accesses the BO after it's moved. + */ + abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; } - if (old_mem->mem_type == TTM_PL_VRAM && - new_mem->mem_type == TTM_PL_SYSTEM) { - r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem); - } else if (old_mem->mem_type == TTM_PL_SYSTEM && - new_mem->mem_type == TTM_PL_VRAM) { - r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem); + if (adev->mman.buffer_funcs_enabled) { + if (((old_mem->mem_type == TTM_PL_SYSTEM && + new_mem->mem_type == TTM_PL_VRAM) || + (old_mem->mem_type == TTM_PL_VRAM && + new_mem->mem_type == TTM_PL_SYSTEM))) { + hop->fpfn = 0; + hop->lpfn = 0; + hop->mem_type = TTM_PL_TT; + hop->flags = TTM_PL_FLAG_TEMPORARY; + return -EMULTIHOP; + } + + r = amdgpu_move_blit(bo, evict, new_mem, old_mem); } else { - r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, - new_mem, old_mem); + r = -ENODEV; } if (r) { -memcpy: /* Check that all memory is CPU accessible */ if (!amdgpu_mem_visible(adev, old_mem) || !amdgpu_mem_visible(adev, new_mem)) { @@ -679,59 +557,43 @@ memcpy: return r; } - if (bo->type == ttm_bo_type_device && - new_mem->mem_type == TTM_PL_VRAM && - old_mem->mem_type != TTM_PL_VRAM) { - /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU - * accesses the BO after it's moved. - */ - abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - } - +out: /* update statistics */ - atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); + atomic64_add(bo->base.size, &adev->num_bytes_moved); + amdgpu_bo_move_notify(bo, evict, new_mem); return 0; } -/** +/* * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault * * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault() */ -static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, + struct ttm_resource *mem) { - struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); - struct drm_mm_node *mm_node = mem->mm_node; - - mem->bus.addr = NULL; - mem->bus.offset = 0; - mem->bus.size = mem->num_pages << PAGE_SHIFT; - mem->bus.base = 0; - mem->bus.is_iomem = false; - if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) - return -EINVAL; + size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; + switch (mem->mem_type) { case TTM_PL_SYSTEM: /* system memory */ return 0; case TTM_PL_TT: + case AMDGPU_PL_PREEMPT: break; case TTM_PL_VRAM: mem->bus.offset = mem->start << PAGE_SHIFT; /* check if it's visible */ - if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size) + if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size) return -EINVAL; - /* Only physically contiguous buffers apply. In a contiguous - * buffer, size of the first mm_node would match the number of - * pages in ttm_mem_reg. - */ + if (adev->mman.aper_base_kaddr && - (mm_node->size == mem->num_pages)) + mem->placement & TTM_PL_FLAG_CONTIGUOUS) mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr + mem->bus.offset; - mem->bus.base = adev->gmc.aper_base; + mem->bus.offset += adev->gmc.aper_base; mem->bus.is_iomem = true; break; default: @@ -740,51 +602,56 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ return 0; } -static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, + unsigned long page_offset) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); + struct amdgpu_res_cursor cursor; + + amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0, + &cursor); + return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT; } -static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, - unsigned long page_offset) +/** + * amdgpu_ttm_domain_start - Returns GPU start address + * @adev: amdgpu device object + * @type: type of the memory + * + * Returns: + * GPU start address of a memory domain + */ + +uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type) { - struct drm_mm_node *mm; - unsigned long offset = (page_offset << PAGE_SHIFT); + switch (type) { + case TTM_PL_TT: + return adev->gmc.gart_start; + case TTM_PL_VRAM: + return adev->gmc.vram_start; + } - mm = amdgpu_find_mm_node(&bo->mem, &offset); - return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + - (offset >> PAGE_SHIFT); + return 0; } /* * TTM backend functions. */ struct amdgpu_ttm_tt { - struct ttm_dma_tt ttm; + struct ttm_tt ttm; struct drm_gem_object *gobj; u64 offset; uint64_t userptr; struct task_struct *usertask; uint32_t userflags; + bool bound; #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) struct hmm_range *range; #endif }; #ifdef CONFIG_DRM_AMDGPU_USERPTR -/* flags used by HMM internal, not related to CPU/GPU PTE flags */ -static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = { - (1 << 0), /* HMM_PFN_VALID */ - (1 << 1), /* HMM_PFN_WRITE */ - 0 /* HMM_PFN_DEVICE_PRIVATE */ -}; - -static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = { - 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */ - 0, /* HMM_PFN_NONE */ - 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */ -}; - -/** +/* * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user * memory and start HMM tracking CPU page table update * @@ -797,10 +664,8 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) struct amdgpu_ttm_tt *gtt = (void *)ttm; unsigned long start = gtt->userptr; struct vm_area_struct *vma; - struct hmm_range *range; - unsigned long timeout; struct mm_struct *mm; - unsigned long i; + bool readonly; int r = 0; mm = bo->notifier.mm; @@ -816,31 +681,9 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) if (!mmget_not_zero(mm)) /* Happens during process shutdown */ return -ESRCH; - range = kzalloc(sizeof(*range), GFP_KERNEL); - if (unlikely(!range)) { - r = -ENOMEM; - goto out; - } - range->notifier = &bo->notifier; - range->flags = hmm_range_flags; - range->values = hmm_range_values; - range->pfn_shift = PAGE_SHIFT; - range->start = bo->notifier.interval_tree.start; - range->end = bo->notifier.interval_tree.last + 1; - range->default_flags = hmm_range_flags[HMM_PFN_VALID]; - if (!amdgpu_ttm_tt_is_readonly(ttm)) - range->default_flags |= range->flags[HMM_PFN_WRITE]; - - range->pfns = kvmalloc_array(ttm->num_pages, sizeof(*range->pfns), - GFP_KERNEL); - if (unlikely(!range->pfns)) { - r = -ENOMEM; - goto out_free_ranges; - } - - down_read(&mm->mmap_sem); - vma = find_vma(mm, start); - if (unlikely(!vma || start < vma->vm_start)) { + mmap_read_lock(mm); + vma = vma_lookup(mm, start); + if (unlikely(!vma)) { r = -EFAULT; goto out_unlock; } @@ -849,54 +692,22 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) r = -EPERM; goto out_unlock; } - up_read(&mm->mmap_sem); - timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); -retry: - range->notifier_seq = mmu_interval_read_begin(&bo->notifier); - - down_read(&mm->mmap_sem); - r = hmm_range_fault(range, 0); - up_read(&mm->mmap_sem); - if (unlikely(r <= 0)) { - /* - * FIXME: This timeout should encompass the retry from - * mmu_interval_read_retry() as well. - */ - if ((r == 0 || r == -EBUSY) && !time_after(jiffies, timeout)) - goto retry; - goto out_free_pfns; - } - - for (i = 0; i < ttm->num_pages; i++) { - /* FIXME: The pages cannot be touched outside the notifier_lock */ - pages[i] = hmm_device_entry_to_page(range, range->pfns[i]); - if (unlikely(!pages[i])) { - pr_err("Page fault failed for pfn[%lu] = 0x%llx\n", - i, range->pfns[i]); - r = -ENOMEM; - - goto out_free_pfns; - } - } + readonly = amdgpu_ttm_tt_is_readonly(ttm); + r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start, + ttm->num_pages, >t->range, readonly, + true, NULL); +out_unlock: + mmap_read_unlock(mm); + if (r) + pr_debug("failed %d to get user pages 0x%lx\n", r, start); - gtt->range = range; mmput(mm); - return 0; - -out_unlock: - up_read(&mm->mmap_sem); -out_free_pfns: - kvfree(range->pfns); -out_free_ranges: - kfree(range); -out: - mmput(mm); return r; } -/** +/* * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change * Check if the pages backing this ttm range have been invalidated * @@ -910,10 +721,10 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) if (!gtt || !gtt->userptr) return false; - DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n", + DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n", gtt->userptr, ttm->num_pages); - WARN_ONCE(!gtt->range || !gtt->range->pfns, + WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns, "No user pages to check\n"); if (gtt->range) { @@ -921,10 +732,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) * FIXME: Must always hold notifier_lock for this, and must * not ignore the return code. */ - r = mmu_interval_read_retry(gtt->range->notifier, - gtt->range->notifier_seq); - kvfree(gtt->range->pfns); - kfree(gtt->range); + r = amdgpu_hmm_range_get_pages_done(gtt->range); gtt->range = NULL; } @@ -932,7 +740,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) } #endif -/** +/* * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary. * * Called by amdgpu_cs_list_validate(). This creates the page list @@ -947,65 +755,63 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) ttm->pages[i] = pages ? pages[i] : NULL; } -/** - * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages +/* + * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages * * Called by amdgpu_ttm_backend_bind() **/ -static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) +static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev, + struct ttm_tt *ttm) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; - unsigned nents; - int r; - int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); enum dma_data_direction direction = write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + int r; /* Allocate an SG array and squash pages into it */ r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, - ttm->num_pages << PAGE_SHIFT, + (u64)ttm->num_pages << PAGE_SHIFT, GFP_KERNEL); if (r) goto release_sg; /* Map SG to device */ - r = -ENOMEM; - nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); - if (nents != ttm->sg->nents) + r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); + if (r) goto release_sg; /* convert SG to linear array of pages and dma addresses */ - drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, - gtt->ttm.dma_address, ttm->num_pages); + drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, + ttm->num_pages); return 0; release_sg: kfree(ttm->sg); + ttm->sg = NULL; return r; } -/** +/* * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages */ -static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) +static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev, + struct ttm_tt *ttm) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; - int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); enum dma_data_direction direction = write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; /* double check that we don't free the table twice */ - if (!ttm->sg->sgl) + if (!ttm->sg || !ttm->sg->sgl) return; /* unmap the pages mapped to the device */ - dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); - + dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); sg_free_table(ttm->sg); #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) @@ -1014,8 +820,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) for (i = 0; i < ttm->num_pages; i++) { if (ttm->pages[i] != - hmm_device_entry_to_page(gtt->range, - gtt->range->pfns[i])) + hmm_pfn_to_page(gtt->range->hmm_pfns[i])) break; } @@ -1024,7 +829,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) #endif } -int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, +static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, struct ttm_buffer_object *tbo, uint64_t flags) { @@ -1033,59 +838,87 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, struct amdgpu_ttm_tt *gtt = (void *)ttm; int r; - if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) { + if (amdgpu_bo_encrypted(abo)) + flags |= AMDGPU_PTE_TMZ; + + if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) { uint64_t page_idx = 1; r = amdgpu_gart_bind(adev, gtt->offset, page_idx, - ttm->pages, gtt->ttm.dma_address, flags); + gtt->ttm.dma_address, flags); if (r) goto gart_bind_fail; - /* Patch mtype of the second part BO */ + /* The memory type of the first page defaults to UC. Now + * modify the memory type to NC from the second page of + * the BO onward. + */ flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC); r = amdgpu_gart_bind(adev, gtt->offset + (page_idx << PAGE_SHIFT), ttm->num_pages - page_idx, - &ttm->pages[page_idx], &(gtt->ttm.dma_address[page_idx]), flags); } else { r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, - ttm->pages, gtt->ttm.dma_address, flags); + gtt->ttm.dma_address, flags); } gart_bind_fail: if (r) - DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", + DRM_ERROR("failed to bind %u pages at 0x%08llX\n", ttm->num_pages, gtt->offset); return r; } -/** +/* * amdgpu_ttm_backend_bind - Bind GTT memory * * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem(). * This handles binding GTT memory to the device address space. */ -static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, - struct ttm_mem_reg *bo_mem) +static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, + struct ttm_tt *ttm, + struct ttm_resource *bo_mem) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void*)ttm; uint64_t flags; int r = 0; + if (!bo_mem) + return -EINVAL; + + if (gtt->bound) + return 0; + if (gtt->userptr) { - r = amdgpu_ttm_tt_pin_userptr(ttm); + r = amdgpu_ttm_tt_pin_userptr(bdev, ttm); if (r) { DRM_ERROR("failed to pin userptr\n"); return r; } + } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) { + if (!ttm->sg) { + struct dma_buf_attachment *attach; + struct sg_table *sgt; + + attach = gtt->gobj->import_attach; + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) + return PTR_ERR(sgt); + + ttm->sg = sgt; + } + + drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, + ttm->num_pages); } + if (!ttm->num_pages) { - WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", + WARN(1, "nothing to bind %u pages for mreg %p back %p!\n", ttm->num_pages, bo_mem, ttm); } @@ -1094,7 +927,8 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, bo_mem->mem_type == AMDGPU_PL_OA) return -EINVAL; - if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { + if (bo_mem->mem_type != TTM_PL_TT || + !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { gtt->offset = AMDGPU_BO_INVALID_OFFSET; return 0; } @@ -1105,74 +939,76 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, /* bind pages into GART page tables */ gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, - ttm->pages, gtt->ttm.dma_address, flags); + gtt->ttm.dma_address, flags); if (r) - DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", + DRM_ERROR("failed to bind %u pages at 0x%08llX\n", ttm->num_pages, gtt->offset); + gtt->bound = true; return r; } -/** - * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object +/* + * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either + * through AGP or GART aperture. + * + * If bo is accessible through AGP aperture, then use AGP aperture + * to access bo; otherwise allocate logical space in GART aperture + * and map bo to GART aperture. */ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct ttm_operation_ctx ctx = { false, false }; - struct amdgpu_ttm_tt *gtt = (void*)bo->ttm; - struct ttm_mem_reg tmp; + struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; struct ttm_placement placement; struct ttm_place placements; + struct ttm_resource *tmp; uint64_t addr, flags; int r; - if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET) + if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET) return 0; addr = amdgpu_gmc_agp_addr(bo); if (addr != AMDGPU_BO_INVALID_OFFSET) { - bo->mem.start = addr >> PAGE_SHIFT; - } else { + bo->resource->start = addr >> PAGE_SHIFT; + return 0; + } - /* allocate GART space */ - tmp = bo->mem; - tmp.mm_node = NULL; - placement.num_placement = 1; - placement.placement = &placements; - placement.num_busy_placement = 1; - placement.busy_placement = &placements; - placements.fpfn = 0; - placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; - placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) | - TTM_PL_FLAG_TT; - - r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); - if (unlikely(r)) - return r; + /* allocate GART space */ + placement.num_placement = 1; + placement.placement = &placements; + placement.num_busy_placement = 1; + placement.busy_placement = &placements; + placements.fpfn = 0; + placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; + placements.mem_type = TTM_PL_TT; + placements.flags = bo->resource->placement; - /* compute PTE flags for this buffer object */ - flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp); + r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); + if (unlikely(r)) + return r; - /* Bind pages */ - gtt->offset = (u64)tmp.start << PAGE_SHIFT; - r = amdgpu_ttm_gart_bind(adev, bo, flags); - if (unlikely(r)) { - ttm_bo_mem_put(bo, &tmp); - return r; - } + /* compute PTE flags for this buffer object */ + flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp); - ttm_bo_mem_put(bo, &bo->mem); - bo->mem = tmp; + /* Bind pages */ + gtt->offset = (u64)tmp->start << PAGE_SHIFT; + r = amdgpu_ttm_gart_bind(adev, bo, flags); + if (unlikely(r)) { + ttm_resource_free(bo, &tmp); + return r; } - bo->offset = (bo->mem.start << PAGE_SHIFT) + - bo->bdev->man[bo->mem.mem_type].gpu_offset; + amdgpu_gart_invalidate_tlb(adev); + ttm_resource_free(bo, &bo->resource); + ttm_bo_assign_mem(bo, tmp); return 0; } -/** +/* * amdgpu_ttm_recover_gart - Rebind GTT pages * * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to @@ -1187,199 +1023,191 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) if (!tbo->ttm) return 0; - flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem); + flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource); r = amdgpu_ttm_gart_bind(adev, tbo, flags); return r; } -/** +/* * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages * * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and * ttm_tt_destroy(). */ -static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) +static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, + struct ttm_tt *ttm) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; int r; /* if the pages have userptr pinning then clear that first */ - if (gtt->userptr) - amdgpu_ttm_tt_unpin_userptr(ttm); + if (gtt->userptr) { + amdgpu_ttm_tt_unpin_userptr(bdev, ttm); + } else if (ttm->sg && gtt->gobj->import_attach) { + struct dma_buf_attachment *attach; + + attach = gtt->gobj->import_attach; + dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL); + ttm->sg = NULL; + } + + if (!gtt->bound) + return; if (gtt->offset == AMDGPU_BO_INVALID_OFFSET) - return 0; + return; /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); if (r) - DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n", - gtt->ttm.ttm.num_pages, gtt->offset); - return r; + DRM_ERROR("failed to unbind %u pages at 0x%08llX\n", + gtt->ttm.num_pages, gtt->offset); + gtt->bound = false; } -static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm) +static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev, + struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; if (gtt->usertask) put_task_struct(gtt->usertask); - ttm_dma_tt_fini(>t->ttm); + ttm_tt_fini(>t->ttm); kfree(gtt); } -static struct ttm_backend_func amdgpu_backend_func = { - .bind = &amdgpu_ttm_backend_bind, - .unbind = &amdgpu_ttm_backend_unbind, - .destroy = &amdgpu_ttm_backend_destroy, -}; - /** * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO * * @bo: The buffer object to create a GTT ttm_tt object around + * @page_flags: Page flags to be added to the ttm_tt object * * Called by ttm_tt_create(). */ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { + struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_ttm_tt *gtt; + enum ttm_caching caching; gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); if (gtt == NULL) { return NULL; } - gtt->ttm.ttm.func = &amdgpu_backend_func; gtt->gobj = &bo->base; + if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) + caching = ttm_write_combined; + else + caching = ttm_cached; + /* allocate space for the uninitialized page entries */ - if (ttm_sg_tt_init(>t->ttm, bo, page_flags)) { + if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) { kfree(gtt); return NULL; } - return >t->ttm.ttm; + return >t->ttm; } -/** +/* * amdgpu_ttm_tt_populate - Map GTT pages visible to the device * * Map the pages of a ttm_tt object to an address space visible * to the underlying device. */ -static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, - struct ttm_operation_ctx *ctx) +static int amdgpu_ttm_tt_populate(struct ttm_device *bdev, + struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; + pgoff_t i; + int ret; /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */ - if (gtt && gtt->userptr) { + if (gtt->userptr) { ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); if (!ttm->sg) return -ENOMEM; - - ttm->page_flags |= TTM_PAGE_FLAG_SG; - ttm->state = tt_unbound; return 0; } - if (ttm->page_flags & TTM_PAGE_FLAG_SG) { - if (!ttm->sg) { - struct dma_buf_attachment *attach; - struct sg_table *sgt; - - attach = gtt->gobj->import_attach; - sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); - if (IS_ERR(sgt)) - return PTR_ERR(sgt); - - ttm->sg = sgt; - } - - drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, - gtt->ttm.dma_address, - ttm->num_pages); - ttm->state = tt_unbound; + if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) return 0; - } -#ifdef CONFIG_SWIOTLB - if (adev->need_swiotlb && swiotlb_nr_tbl()) { - return ttm_dma_populate(>t->ttm, adev->dev, ctx); - } -#endif + ret = ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx); + if (ret) + return ret; - /* fall back to generic helper to populate the page array - * and map them to the device */ - return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx); + for (i = 0; i < ttm->num_pages; ++i) + ttm->pages[i]->mapping = bdev->dev_mapping; + + return 0; } -/** +/* * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays * * Unmaps pages of a ttm_tt object from the device address space and * unpopulates the page array backing it. */ -static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) +static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev, + struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_device *adev; + pgoff_t i; + + amdgpu_ttm_backend_unbind(bdev, ttm); - if (gtt && gtt->userptr) { + if (gtt->userptr) { amdgpu_ttm_tt_set_user_pages(ttm, NULL); kfree(ttm->sg); - ttm->page_flags &= ~TTM_PAGE_FLAG_SG; - return; - } - - if (ttm->sg && gtt->gobj->import_attach) { - struct dma_buf_attachment *attach; - - attach = gtt->gobj->import_attach; - dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL); ttm->sg = NULL; return; } - if (ttm->page_flags & TTM_PAGE_FLAG_SG) + if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) return; - adev = amdgpu_ttm_adev(ttm->bdev); - -#ifdef CONFIG_SWIOTLB - if (adev->need_swiotlb && swiotlb_nr_tbl()) { - ttm_dma_unpopulate(>t->ttm, adev->dev); - return; - } -#endif + for (i = 0; i < ttm->num_pages; ++i) + ttm->pages[i]->mapping = NULL; - /* fall back to generic helper to unmap and unpopulate array */ - ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm); + adev = amdgpu_ttm_adev(bdev); + return ttm_pool_free(&adev->mman.bdev.pool, ttm); } /** * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current * task * - * @ttm: The ttm_tt object to bind this userptr object to + * @bo: The ttm_buffer_object to bind this userptr to * @addr: The address in the current tasks VM space to use * @flags: Requirements of userptr object. * * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages * to current task */ -int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, - uint32_t flags) +int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo, + uint64_t addr, uint32_t flags) { - struct amdgpu_ttm_tt *gtt = (void *)ttm; + struct amdgpu_ttm_tt *gtt; - if (gtt == NULL) - return -EINVAL; + if (!bo->ttm) { + /* TODO: We want a separate TTM object type for userptrs */ + bo->ttm = amdgpu_ttm_tt_create(bo, 0); + if (bo->ttm == NULL) + return -ENOMEM; + } + + /* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */ + bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL; + gtt = (void *)bo->ttm; gtt->userptr = addr; gtt->userflags = flags; @@ -1391,7 +1219,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, return 0; } -/** +/* * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object */ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) @@ -1407,13 +1235,13 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) return gtt->usertask->mm; } -/** +/* * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an * address range for the current task. * */ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, - unsigned long end) + unsigned long end, unsigned long *userptr) { struct amdgpu_ttm_tt *gtt = (void *)ttm; unsigned long size; @@ -1424,14 +1252,16 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, /* Return false if no part of the ttm_tt object lies within * the range */ - size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; + size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE; if (gtt->userptr > end || gtt->userptr + size <= start) return false; + if (userptr) + *userptr = gtt->userptr; return true; } -/** +/* * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr? */ bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm) @@ -1444,7 +1274,7 @@ bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm) return true; } -/** +/* * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only? */ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) @@ -1465,33 +1295,39 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) * * Figure out the flags to use for a VM PDE (Page Directory Entry). */ -uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem) +uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem) { uint64_t flags = 0; if (mem && mem->mem_type != TTM_PL_SYSTEM) flags |= AMDGPU_PTE_VALID; - if (mem && mem->mem_type == TTM_PL_TT) { + if (mem && (mem->mem_type == TTM_PL_TT || + mem->mem_type == AMDGPU_PL_PREEMPT)) { flags |= AMDGPU_PTE_SYSTEM; - if (ttm->caching_state == tt_cached) + if (ttm->caching == ttm_cached) flags |= AMDGPU_PTE_SNOOPED; } + if (mem && mem->mem_type == TTM_PL_VRAM && + mem->bus.caching == ttm_cached) + flags |= AMDGPU_PTE_SNOOPED; + return flags; } /** * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object * + * @adev: amdgpu_device pointer * @ttm: The ttm_tt object to compute the flags for * @mem: The memory registry backing this ttm_tt object - + * * Figure out the flags to use for a VM PTE (Page Table Entry). */ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem); @@ -1504,7 +1340,7 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, return flags; } -/** +/* * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer * object. * @@ -1516,24 +1352,25 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place) { - unsigned long num_pages = bo->mem.num_pages; - struct drm_mm_node *node = bo->mem.mm_node; + unsigned long num_pages = bo->resource->num_pages; + struct amdgpu_res_cursor cursor; struct dma_resv_list *flist; struct dma_fence *f; int i; - /* Don't evict VM page tables while they are busy, otherwise we can't - * cleanly handle page faults. - */ + /* Swapout? */ + if (bo->resource->mem_type == TTM_PL_SYSTEM) + return true; + if (bo->type == ttm_bo_type_kernel && - !dma_resv_test_signaled_rcu(bo->base.resv, true)) + !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo))) return false; /* If bo is a KFD BO, check if the bo belongs to the current process. * If true, then return false as any KFD process needs all its BOs to * be resident to run successfully */ - flist = dma_resv_get_list(bo->base.resv); + flist = dma_resv_shared_list(bo->base.resv); if (flist) { for (i = 0; i < flist->shared_count; ++i) { f = rcu_dereference_protected(flist->shared[i], @@ -1543,19 +1380,33 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, } } - switch (bo->mem.mem_type) { + switch (bo->resource->mem_type) { + case AMDGPU_PL_PREEMPT: + /* Preemptible BOs don't own system resources managed by the + * driver (pages, VRAM, GART space). They point to resources + * owned by someone else (e.g. pageable memory in user mode + * or a DMABuf). They are used in a preemptible context so we + * can guarantee no deadlocks and good QoS in case of MMU + * notifiers or DMABuf move notifiers from the resource owner. + */ + return false; case TTM_PL_TT: + if (amdgpu_bo_is_amdgpu_bo(bo) && + amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo))) + return false; return true; case TTM_PL_VRAM: /* Check each drm MM node individually */ - while (num_pages) { - if (place->fpfn < (node->start + node->size) && - !(place->lpfn && place->lpfn <= node->start)) + amdgpu_res_first(bo->resource, 0, (u64)num_pages << PAGE_SHIFT, + &cursor); + while (cursor.remaining) { + if (place->fpfn < PFN_DOWN(cursor.start + cursor.size) + && !(place->lpfn && + place->lpfn <= PFN_DOWN(cursor.start))) return true; - num_pages -= node->size; - ++node; + amdgpu_res_next(&cursor, cursor.size); } return false; @@ -1566,6 +1417,41 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, return ttm_bo_eviction_valuable(bo, place); } +static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos, + void *buf, size_t size, bool write) +{ + while (size) { + uint64_t aligned_pos = ALIGN_DOWN(pos, 4); + uint64_t bytes = 4 - (pos & 0x3); + uint32_t shift = (pos & 0x3) * 8; + uint32_t mask = 0xffffffff << shift; + uint32_t value = 0; + + if (size < bytes) { + mask &= 0xffffffff >> (bytes - size) * 8; + bytes = size; + } + + if (mask != 0xffffffff) { + amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false); + if (write) { + value &= ~mask; + value |= (*(uint32_t *)buf << shift) & mask; + amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true); + } else { + value = (value & mask) >> shift; + memcpy(buf, &value, bytes); + } + } else { + amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write); + } + + pos += bytes; + buf += bytes; + size -= bytes; + } +} + /** * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object. * @@ -1579,78 +1465,56 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, * access for debugging purposes. */ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, - unsigned long offset, - void *buf, int len, int write) + unsigned long offset, void *buf, int len, + int write) { struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); - struct drm_mm_node *nodes; - uint32_t value = 0; + struct amdgpu_res_cursor cursor; int ret = 0; - uint64_t pos; - unsigned long flags; - if (bo->mem.mem_type != TTM_PL_VRAM) + if (bo->resource->mem_type != TTM_PL_VRAM) return -EIO; - nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset); - pos = (nodes->start << PAGE_SHIFT) + offset; - - while (len && pos < adev->gmc.mc_vram_size) { - uint64_t aligned_pos = pos & ~(uint64_t)3; - uint32_t bytes = 4 - (pos & 3); - uint32_t shift = (pos & 3) * 8; - uint32_t mask = 0xffffffff << shift; - - if (len < bytes) { - mask &= 0xffffffff >> (bytes - len) * 8; - bytes = len; + amdgpu_res_first(bo->resource, offset, len, &cursor); + while (cursor.remaining) { + size_t count, size = cursor.size; + loff_t pos = cursor.start; + + count = amdgpu_device_aper_access(adev, pos, buf, size, write); + size -= count; + if (size) { + /* using MM to access rest vram and handle un-aligned address */ + pos += count; + buf += count; + amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write); } - spin_lock_irqsave(&adev->mmio_idx_lock, flags); - WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000); - WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31); - if (!write || mask != 0xffffffff) - value = RREG32_NO_KIQ(mmMM_DATA); - if (write) { - value &= ~mask; - value |= (*(uint32_t *)buf << shift) & mask; - WREG32_NO_KIQ(mmMM_DATA, value); - } - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); - if (!write) { - value = (value & mask) >> shift; - memcpy(buf, &value, bytes); - } - - ret += bytes; - buf = (uint8_t *)buf + bytes; - pos += bytes; - len -= bytes; - if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) { - ++nodes; - pos = (nodes->start << PAGE_SHIFT); - } + ret += cursor.size; + buf += cursor.size; + amdgpu_res_next(&cursor, cursor.size); } return ret; } -static struct ttm_bo_driver amdgpu_bo_driver = { +static void +amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo) +{ + amdgpu_bo_move_notify(bo, false, NULL); +} + +static struct ttm_device_funcs amdgpu_bo_driver = { .ttm_tt_create = &amdgpu_ttm_tt_create, .ttm_tt_populate = &amdgpu_ttm_tt_populate, .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, - .invalidate_caches = &amdgpu_invalidate_caches, - .init_mem_type = &amdgpu_init_mem_type, + .ttm_tt_destroy = &amdgpu_ttm_backend_destroy, .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, .evict_flags = &amdgpu_evict_flags, .move = &amdgpu_bo_move, - .verify_access = &amdgpu_verify_access, - .move_notify = &amdgpu_bo_move_notify, + .delete_mem_notify = &amdgpu_bo_delete_mem_notify, .release_notify = &amdgpu_bo_release_notify, - .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, - .io_mem_free = &amdgpu_ttm_io_mem_free, .io_mem_pfn = amdgpu_ttm_io_mem_pfn, .access_memory = &amdgpu_ttm_access_memory, .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify @@ -1668,8 +1532,8 @@ static struct ttm_bo_driver amdgpu_bo_driver = { */ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) { - amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo, - NULL, &adev->fw_vram_usage.va); + amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo, + NULL, &adev->mman.fw_vram_usage_va); } /** @@ -1683,19 +1547,19 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) { uint64_t vram_size = adev->gmc.visible_vram_size; - adev->fw_vram_usage.va = NULL; - adev->fw_vram_usage.reserved_bo = NULL; + adev->mman.fw_vram_usage_va = NULL; + adev->mman.fw_vram_usage_reserved_bo = NULL; - if (adev->fw_vram_usage.size == 0 || - adev->fw_vram_usage.size > vram_size) + if (adev->mman.fw_vram_usage_size == 0 || + adev->mman.fw_vram_usage_size > vram_size) return 0; return amdgpu_bo_create_kernel_at(adev, - adev->fw_vram_usage.start_offset, - adev->fw_vram_usage.size, + adev->mman.fw_vram_usage_start_offset, + adev->mman.fw_vram_usage_size, AMDGPU_GEM_DOMAIN_VRAM, - &adev->fw_vram_usage.reserved_bo, - &adev->fw_vram_usage.va); + &adev->mman.fw_vram_usage_reserved_bo, + &adev->mman.fw_vram_usage_va); } /* @@ -1717,70 +1581,90 @@ static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev) amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL); ctx->c2p_bo = NULL; - amdgpu_bo_free_kernel(&ctx->p2c_bo, NULL, NULL); - ctx->p2c_bo = NULL; - return 0; } -/** - * amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training - * - * @adev: amdgpu_device pointer - * - * create bo vram reservation from memory training. - */ -static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev) +static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev) { - int ret; struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; memset(ctx, 0, sizeof(*ctx)); - if (!adev->fw_vram_usage.mem_train_support) { - DRM_DEBUG("memory training does not support!\n"); - return 0; - } - ctx->c2p_train_data_offset = adev->fw_vram_usage.mem_train_fb_loc; - ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET); - ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES; + ctx->c2p_train_data_offset = + ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M); + ctx->p2c_train_data_offset = + (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET); + ctx->train_data_size = + GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES; DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", - ctx->train_data_size, - ctx->p2c_train_data_offset, - ctx->c2p_train_data_offset); + ctx->train_data_size, + ctx->p2c_train_data_offset, + ctx->c2p_train_data_offset); +} - ret = amdgpu_bo_create_kernel_at(adev, - ctx->p2c_train_data_offset, - ctx->train_data_size, - AMDGPU_GEM_DOMAIN_VRAM, - &ctx->p2c_bo, - NULL); - if (ret) { - DRM_ERROR("alloc p2c_bo failed(%d)!\n", ret); - goto Err_out; +/* + * reserve TMR memory at the top of VRAM which holds + * IP Discovery data and is protected by PSP. + */ +static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) +{ + int ret; + struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; + bool mem_train_support = false; + + if (!amdgpu_sriov_vf(adev)) { + if (amdgpu_atomfirmware_mem_training_supported(adev)) + mem_train_support = true; + else + DRM_DEBUG("memory training does not support!\n"); } - ret = amdgpu_bo_create_kernel_at(adev, + /* + * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all + * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc) + * + * Otherwise, fallback to legacy approach to check and reserve tmr block for ip + * discovery data and G6 memory training data respectively + */ + adev->mman.discovery_tmr_size = + amdgpu_atomfirmware_get_fw_reserved_fb_size(adev); + if (!adev->mman.discovery_tmr_size) + adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET; + + if (mem_train_support) { + /* reserve vram for mem train according to TMR location */ + amdgpu_ttm_training_data_block_init(adev); + ret = amdgpu_bo_create_kernel_at(adev, ctx->c2p_train_data_offset, ctx->train_data_size, AMDGPU_GEM_DOMAIN_VRAM, &ctx->c2p_bo, NULL); + if (ret) { + DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret); + amdgpu_ttm_training_reserve_vram_fini(adev); + return ret; + } + ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS; + } + + ret = amdgpu_bo_create_kernel_at(adev, + adev->gmc.real_vram_size - adev->mman.discovery_tmr_size, + adev->mman.discovery_tmr_size, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->mman.discovery_memory, + NULL); if (ret) { - DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret); - goto Err_out; + DRM_ERROR("alloc tmr failed(%d)!\n", ret); + amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL); + return ret; } - ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS; return 0; - -Err_out: - amdgpu_ttm_training_reserve_vram_fini(adev); - return ret; } -/** +/* * amdgpu_ttm_init - Init the memory management (ttm) as well as various * gtt/vram related fields. * @@ -1794,15 +1678,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) uint64_t gtt_size; int r; u64 vis_vram_limit; - void *stolen_vga_buf; mutex_init(&adev->mman.gtt_window_lock); /* No others user of address space so set it to 0 */ - r = ttm_bo_device_init(&adev->mman.bdev, - &amdgpu_bo_driver, - adev->ddev->anon_inode->i_mapping, - adev->ddev->vma_offset_manager, + r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev, + adev_to_drm(adev)->anon_inode->i_mapping, + adev_to_drm(adev)->vma_offset_manager, + adev->need_swiotlb, dma_addressing_limited(adev->dev)); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); @@ -1810,12 +1693,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) } adev->mman.initialized = true; - /* We opt to avoid OOM on system pages allocations */ - adev->mman.bdev.no_retry = true; - /* Initialize VRAM pool with all of VRAM divided into pages */ - r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, - adev->gmc.real_vram_size >> PAGE_SHIFT); + r = amdgpu_vram_mgr_init(adev); if (r) { DRM_ERROR("Failed initializing VRAM heap.\n"); return r; @@ -1830,8 +1709,15 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) /* Change the size here instead of the init above so only lpfn is affected */ amdgpu_ttm_set_buffer_funcs_status(adev, false); #ifdef CONFIG_64BIT - adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base, - adev->gmc.visible_vram_size); +#ifdef CONFIG_X86 + if (adev->gmc.xgmi.connected_to_cpu) + adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base, + adev->gmc.visible_vram_size); + + else +#endif + adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base, + adev->gmc.visible_vram_size); #endif /* @@ -1844,33 +1730,37 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) } /* - *The reserved vram for memory training must be pinned to the specified - *place on the VRAM, so reserve it early. + * only NAVI10 and onwards ASIC support for IP discovery. + * If IP discovery enabled, a block of memory should be + * reserved for IP discovey. */ - r = amdgpu_ttm_training_reserve_vram_init(adev); - if (r) - return r; + if (adev->mman.discovery_bin) { + r = amdgpu_ttm_reserve_tmr(adev); + if (r) + return r; + } /* allocate memory as required for VGA * This is used for VGA emulation and pre-OS scanout buffers to * avoid display artifacts while transitioning between pre-OS * and driver. */ - r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &adev->stolen_vga_memory, - NULL, &stolen_vga_buf); + r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->mman.stolen_vga_memory, + NULL); if (r) return r; - - /* - * reserve one TMR (64K) memory at the top of VRAM which holds - * IP Discovery data and is protected by PSP. - */ - r = amdgpu_bo_create_kernel_at(adev, - adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE, - DISCOVERY_TMR_SIZE, + r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size, + adev->mman.stolen_extended_size, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->mman.stolen_extended_memory, + NULL); + if (r) + return r; + r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset, + adev->mman.stolen_reserved_size, AMDGPU_GEM_DOMAIN_VRAM, - &adev->discovery_memory, + &adev->mman.stolen_reserved_memory, NULL); if (r) return r; @@ -1892,7 +1782,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) gtt_size = (uint64_t)amdgpu_gtt_size << 20; /* Initialize GTT memory pool */ - r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT); + r = amdgpu_gtt_mgr_init(adev, gtt_size); if (r) { DRM_ERROR("Failed initializing GTT heap.\n"); return r; @@ -1900,48 +1790,36 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_INFO("amdgpu: %uM of GTT memory ready.\n", (unsigned)(gtt_size / (1024 * 1024))); + /* Initialize preemptible memory pool */ + r = amdgpu_preempt_mgr_init(adev); + if (r) { + DRM_ERROR("Failed initializing PREEMPT heap.\n"); + return r; + } + /* Initialize various on-chip memory pools */ - r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS, - adev->gds.gds_size); + r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size); if (r) { DRM_ERROR("Failed initializing GDS heap.\n"); return r; } - r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS, - adev->gds.gws_size); + r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size); if (r) { DRM_ERROR("Failed initializing gws heap.\n"); return r; } - r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA, - adev->gds.oa_size); + r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size); if (r) { DRM_ERROR("Failed initializing oa heap.\n"); return r; } - /* Register debugfs entries for amdgpu_ttm */ - r = amdgpu_ttm_debugfs_init(adev); - if (r) { - DRM_ERROR("Failed to init debugfs\n"); - return r; - } return 0; } -/** - * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm - */ -void amdgpu_ttm_late_init(struct amdgpu_device *adev) -{ - void *stolen_vga_buf; - /* return the VGA stolen memory (if any) back to VRAM */ - amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf); -} - -/** +/* * amdgpu_ttm_fini - De-initialize the TTM memory pools */ void amdgpu_ttm_fini(struct amdgpu_device *adev) @@ -1949,22 +1827,24 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) if (!adev->mman.initialized) return; - amdgpu_ttm_debugfs_fini(adev); amdgpu_ttm_training_reserve_vram_fini(adev); + /* return the stolen vga memory back to VRAM */ + amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); + amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); /* return the IP Discovery TMR memory back to VRAM */ - amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL); + amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL); + if (adev->mman.stolen_reserved_size) + amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory, + NULL, NULL); amdgpu_ttm_fw_reserve_vram_fini(adev); - if (adev->mman.aper_base_kaddr) - iounmap(adev->mman.aper_base_kaddr); - adev->mman.aper_base_kaddr = NULL; - - ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); - ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); - ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS); - ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS); - ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA); - ttm_bo_device_release(&adev->mman.bdev); + amdgpu_vram_mgr_fini(adev); + amdgpu_gtt_mgr_fini(adev); + amdgpu_preempt_mgr_fini(adev); + ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS); + ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS); + ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA); + ttm_device_fini(&adev->mman.bdev); adev->mman.initialized = false; DRM_INFO("amdgpu: ttm finalized\n"); } @@ -1980,21 +1860,23 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) */ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) { - struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM]; + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); uint64_t size; int r; - if (!adev->mman.initialized || adev->in_gpu_reset || + if (!adev->mman.initialized || amdgpu_in_reset(adev) || adev->mman.buffer_funcs_enabled == enable) return; if (enable) { struct amdgpu_ring *ring; - struct drm_sched_rq *rq; + struct drm_gpu_scheduler *sched; ring = adev->mman.buffer_funcs_ring; - rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; - r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL); + sched = &ring->sched; + r = drm_sched_entity_init(&adev->mman.entity, + DRM_SCHED_PRIORITY_KERNEL, &sched, + 1, NULL); if (r) { DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", r); @@ -2015,86 +1897,14 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) adev->mman.buffer_funcs_enabled = enable; } -int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_file *file_priv = filp->private_data; - struct amdgpu_device *adev = file_priv->minor->dev->dev_private; - - if (adev == NULL) - return -EINVAL; - - return ttm_bo_mmap(filp, vma, &adev->mman.bdev); -} - -static int amdgpu_map_buffer(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem, unsigned num_pages, - uint64_t offset, unsigned window, - struct amdgpu_ring *ring, - uint64_t *addr) -{ - struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; - struct amdgpu_device *adev = ring->adev; - struct ttm_tt *ttm = bo->ttm; - struct amdgpu_job *job; - unsigned num_dw, num_bytes; - dma_addr_t *dma_address; - struct dma_fence *fence; - uint64_t src_addr, dst_addr; - uint64_t flags; - int r; - - BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < - AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); - - *addr = adev->gmc.gart_start; - *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * - AMDGPU_GPU_PAGE_SIZE; - - num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); - num_bytes = num_pages * 8; - - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job); - if (r) - return r; - - src_addr = num_dw * 4; - src_addr += job->ibs[0].gpu_addr; - - dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo); - dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8; - amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, - dst_addr, num_bytes); - - amdgpu_ring_pad_ib(ring, &job->ibs[0]); - WARN_ON(job->ibs[0].length_dw > num_dw); - - dma_address = >t->ttm.dma_address[offset >> PAGE_SHIFT]; - flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem); - r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags, - &job->ibs[0].ptr[num_dw]); - if (r) - goto error_free; - - r = amdgpu_job_submit(job, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, &fence); - if (r) - goto error_free; - - dma_fence_put(fence); - - return r; - -error_free: - amdgpu_job_free(job); - return r; -} - int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, struct dma_resv *resv, struct dma_fence **fence, bool direct_submit, - bool vm_needs_flush) + bool vm_needs_flush, bool tmz) { + enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT : + AMDGPU_IB_POOL_DELAYED; struct amdgpu_device *adev = ring->adev; struct amdgpu_job *job; @@ -2112,18 +1922,19 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, num_loops = DIV_ROUND_UP(byte_count, max_bytes); num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8); - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job); if (r) return r; if (vm_needs_flush) { - job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo); + job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ? + adev->gmc.pdb0_bo : adev->gart.bo); job->vm_needs_flush = true; } if (resv) { r = amdgpu_sync_resv(adev, &job->sync, resv, - AMDGPU_FENCE_OWNER_UNDEFINED, - false); + AMDGPU_SYNC_ALWAYS, + AMDGPU_FENCE_OWNER_UNDEFINED); if (r) { DRM_ERROR("sync failed (%d).\n", r); goto error_free; @@ -2134,7 +1945,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, uint32_t cur_size_in_bytes = min(byte_count, max_bytes); amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset, - dst_offset, cur_size_in_bytes); + dst_offset, cur_size_in_bytes, tmz); src_offset += cur_size_in_bytes; dst_offset += cur_size_in_bytes; @@ -2168,9 +1979,9 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes; struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; - struct drm_mm_node *mm_node; - unsigned long num_pages; + struct amdgpu_res_cursor cursor; unsigned int num_loops, num_dw; + uint64_t num_bytes; struct amdgpu_job *job; int r; @@ -2180,61 +1991,56 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, return -EINVAL; } - if (bo->tbo.mem.mem_type == TTM_PL_TT) { + if (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT) { + DRM_ERROR("Trying to clear preemptible memory.\n"); + return -EINVAL; + } + + if (bo->tbo.resource->mem_type == TTM_PL_TT) { r = amdgpu_ttm_alloc_gart(&bo->tbo); if (r) return r; } - num_pages = bo->tbo.num_pages; - mm_node = bo->tbo.mem.mm_node; + num_bytes = bo->tbo.resource->num_pages << PAGE_SHIFT; num_loops = 0; - while (num_pages) { - uint64_t byte_count = mm_node->size << PAGE_SHIFT; - num_loops += DIV_ROUND_UP_ULL(byte_count, max_bytes); - num_pages -= mm_node->size; - ++mm_node; + amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor); + while (cursor.remaining) { + num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes); + amdgpu_res_next(&cursor, cursor.size); } num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw; /* for IB padding */ num_dw += 64; - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, + &job); if (r) return r; if (resv) { r = amdgpu_sync_resv(adev, &job->sync, resv, - AMDGPU_FENCE_OWNER_UNDEFINED, false); + AMDGPU_SYNC_ALWAYS, + AMDGPU_FENCE_OWNER_UNDEFINED); if (r) { DRM_ERROR("sync failed (%d).\n", r); goto error_free; } } - num_pages = bo->tbo.num_pages; - mm_node = bo->tbo.mem.mm_node; - - while (num_pages) { - uint64_t byte_count = mm_node->size << PAGE_SHIFT; - uint64_t dst_addr; - - dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem); - while (byte_count) { - uint32_t cur_size_in_bytes = min_t(uint64_t, byte_count, - max_bytes); - - amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, - dst_addr, cur_size_in_bytes); + amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor); + while (cursor.remaining) { + uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes); + uint64_t dst_addr = cursor.start; - dst_addr += cur_size_in_bytes; - byte_count -= cur_size_in_bytes; - } + dst_addr += amdgpu_ttm_domain_start(adev, + bo->tbo.resource->mem_type); + amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr, + cur_size); - num_pages -= mm_node->size; - ++mm_node; + amdgpu_res_next(&cursor, cur_size); } amdgpu_ring_pad_ib(ring, &job->ibs[0]); @@ -2251,34 +2057,108 @@ error_free: return r; } +/** + * amdgpu_ttm_evict_resources - evict memory buffers + * @adev: amdgpu device object + * @mem_type: evicted BO's memory type + * + * Evicts all @mem_type buffers on the lru list of the memory type. + * + * Returns: + * 0 for success or a negative error code on failure. + */ +int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type) +{ + struct ttm_resource_manager *man; + + switch (mem_type) { + case TTM_PL_VRAM: + case TTM_PL_TT: + case AMDGPU_PL_GWS: + case AMDGPU_PL_GDS: + case AMDGPU_PL_OA: + man = ttm_manager_type(&adev->mman.bdev, mem_type); + break; + default: + DRM_ERROR("Trying to evict invalid memory type\n"); + return -EINVAL; + } + + return ttm_resource_manager_evict_all(&adev->mman.bdev, man); +} + #if defined(CONFIG_DEBUG_FS) -static int amdgpu_mm_dump_table(struct seq_file *m, void *data) +static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused) { - struct drm_info_node *node = (struct drm_info_node *)m->private; - unsigned ttm_pl = (uintptr_t)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; - struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl]; + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, + TTM_PL_VRAM); struct drm_printer p = drm_seq_file_printer(m); man->func->debug(man, &p); return 0; } -static const struct drm_info_list amdgpu_ttm_debugfs_list[] = { - {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM}, - {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT}, - {"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS}, - {"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS}, - {"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA}, - {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, -#ifdef CONFIG_SWIOTLB - {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} -#endif -}; +static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; -/** + return ttm_pool_debugfs(&adev->mman.bdev.pool, m); +} + +static int amdgpu_mm_tt_table_show(struct seq_file *m, void *unused) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, + TTM_PL_TT); + struct drm_printer p = drm_seq_file_printer(m); + + man->func->debug(man, &p); + return 0; +} + +static int amdgpu_mm_gds_table_show(struct seq_file *m, void *unused) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, + AMDGPU_PL_GDS); + struct drm_printer p = drm_seq_file_printer(m); + + man->func->debug(man, &p); + return 0; +} + +static int amdgpu_mm_gws_table_show(struct seq_file *m, void *unused) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, + AMDGPU_PL_GWS); + struct drm_printer p = drm_seq_file_printer(m); + + man->func->debug(man, &p); + return 0; +} + +static int amdgpu_mm_oa_table_show(struct seq_file *m, void *unused) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, + AMDGPU_PL_OA); + struct drm_printer p = drm_seq_file_printer(m); + + man->func->debug(man, &p); + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_vram_table); +DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_tt_table); +DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gds_table); +DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gws_table); +DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_oa_table); +DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool); + +/* * amdgpu_ttm_vram_read - Linear read access to VRAM * * Accesses VRAM via MMIO for debugging purposes. @@ -2288,7 +2168,6 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, { struct amdgpu_device *adev = file_inode(f)->i_private; ssize_t result = 0; - int r; if (size & 0x3 || *pos & 0x3) return -EINVAL; @@ -2296,33 +2175,25 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, if (*pos >= adev->gmc.mc_vram_size) return -ENXIO; + size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos)); while (size) { - unsigned long flags; - uint32_t value; - - if (*pos >= adev->gmc.mc_vram_size) - return result; + size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4); + uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ]; - spin_lock_irqsave(&adev->mmio_idx_lock, flags); - WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); - WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31); - value = RREG32_NO_KIQ(mmMM_DATA); - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); - - r = put_user(value, (uint32_t *)buf); - if (r) - return r; + amdgpu_device_vram_access(adev, *pos, value, bytes, false); + if (copy_to_user(buf, value, bytes)) + return -EFAULT; - result += 4; - buf += 4; - *pos += 4; - size -= 4; + result += bytes; + buf += bytes; + *pos += bytes; + size -= bytes; } return result; } -/** +/* * amdgpu_ttm_vram_write - Linear write access to VRAM * * Accesses VRAM via MMIO for debugging purposes. @@ -2341,7 +2212,6 @@ static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, return -ENXIO; while (size) { - unsigned long flags; uint32_t value; if (*pos >= adev->gmc.mc_vram_size) @@ -2351,11 +2221,7 @@ static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, if (r) return r; - spin_lock_irqsave(&adev->mmio_idx_lock, flags); - WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); - WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31); - WREG32_NO_KIQ(mmMM_DATA, value); - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + amdgpu_device_mm_access(adev, *pos, &value, 4, true); result += 4; buf += 4; @@ -2373,59 +2239,7 @@ static const struct file_operations amdgpu_ttm_vram_fops = { .llseek = default_llseek, }; -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS - -/** - * amdgpu_ttm_gtt_read - Linear read access to GTT memory - */ -static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf, - size_t size, loff_t *pos) -{ - struct amdgpu_device *adev = file_inode(f)->i_private; - ssize_t result = 0; - int r; - - while (size) { - loff_t p = *pos / PAGE_SIZE; - unsigned off = *pos & ~PAGE_MASK; - size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); - struct page *page; - void *ptr; - - if (p >= adev->gart.num_cpu_pages) - return result; - - page = adev->gart.pages[p]; - if (page) { - ptr = kmap(page); - ptr += off; - - r = copy_to_user(buf, ptr, cur_size); - kunmap(adev->gart.pages[p]); - } else - r = clear_user(buf, cur_size); - - if (r) - return -EFAULT; - - result += cur_size; - buf += cur_size; - *pos += cur_size; - size -= cur_size; - } - - return result; -} - -static const struct file_operations amdgpu_ttm_gtt_fops = { - .owner = THIS_MODULE, - .read = amdgpu_ttm_gtt_read, - .llseek = default_llseek -}; - -#endif - -/** +/* * amdgpu_iomem_read - Virtual read access to GPU mapped memory * * This function is used to read memory that has been mapped to the @@ -2481,7 +2295,7 @@ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, return result; } -/** +/* * amdgpu_iomem_write - Virtual write access to GPU mapped memory * * This function is used to write memory that has been mapped to the @@ -2539,62 +2353,29 @@ static const struct file_operations amdgpu_ttm_iomem_fops = { .llseek = default_llseek }; -static const struct { - char *name; - const struct file_operations *fops; - int domain; -} ttm_debugfs_entries[] = { - { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM }, -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS - { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT }, -#endif - { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM }, -}; - #endif -static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) +void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) { #if defined(CONFIG_DEBUG_FS) - unsigned count; - - struct drm_minor *minor = adev->ddev->primary; - struct dentry *ent, *root = minor->debugfs_root; - - for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) { - ent = debugfs_create_file( - ttm_debugfs_entries[count].name, - S_IFREG | S_IRUGO, root, - adev, - ttm_debugfs_entries[count].fops); - if (IS_ERR(ent)) - return PTR_ERR(ent); - if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM) - i_size_write(ent->d_inode, adev->gmc.mc_vram_size); - else if (ttm_debugfs_entries[count].domain == TTM_PL_TT) - i_size_write(ent->d_inode, adev->gmc.gart_size); - adev->mman.debugfs_entries[count] = ent; - } - - count = ARRAY_SIZE(amdgpu_ttm_debugfs_list); - -#ifdef CONFIG_SWIOTLB - if (!(adev->need_swiotlb && swiotlb_nr_tbl())) - --count; -#endif - - return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count); -#else - return 0; -#endif -} - -static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) -{ -#if defined(CONFIG_DEBUG_FS) - unsigned i; - - for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++) - debugfs_remove(adev->mman.debugfs_entries[i]); + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + + debugfs_create_file_size("amdgpu_vram", 0444, root, adev, + &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size); + debugfs_create_file("amdgpu_iomem", 0444, root, adev, + &amdgpu_ttm_iomem_fops); + debugfs_create_file("amdgpu_vram_mm", 0444, root, adev, + &amdgpu_mm_vram_table_fops); + debugfs_create_file("amdgpu_gtt_mm", 0444, root, adev, + &amdgpu_mm_tt_table_fops); + debugfs_create_file("amdgpu_gds_mm", 0444, root, adev, + &amdgpu_mm_gds_table_fops); + debugfs_create_file("amdgpu_gws_mm", 0444, root, adev, + &amdgpu_mm_gws_table_fops); + debugfs_create_file("amdgpu_oa_mm", 0444, root, adev, + &amdgpu_mm_oa_table_fops); + debugfs_create_file("ttm_page_pool", 0444, root, adev, + &amdgpu_ttm_page_pool_fops); #endif } |