diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 472 |
1 files changed, 177 insertions, 295 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index a785acc09f20..3bef0432cac2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -47,7 +47,6 @@ #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> -#include <drm/drm_debugfs.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" @@ -57,6 +56,7 @@ #include "amdgpu_sdma.h" #include "amdgpu_ras.h" #include "amdgpu_atomfirmware.h" +#include "amdgpu_res_cursor.h" #include "bif/bif_4_1_d.h" #define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128 @@ -179,54 +179,11 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) } /** - * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer. - * - * @bo: The bo to assign the memory to. - * @mm_node: Memory manager node for drm allocator. - * @mem: The region where the bo resides. - * - */ -static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, - struct drm_mm_node *mm_node, - struct ttm_resource *mem) -{ - uint64_t addr = 0; - - if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) { - addr = mm_node->start << PAGE_SHIFT; - addr += amdgpu_ttm_domain_start(amdgpu_ttm_adev(bo->bdev), - mem->mem_type); - } - return addr; -} - -/** - * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to - * @offset. It also modifies the offset to be within the drm_mm_node returned - * - * @mem: The region where the bo resides. - * @offset: The offset that drm_mm_node is used for finding. - * - */ -static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_resource *mem, - uint64_t *offset) -{ - struct drm_mm_node *mm_node = mem->mm_node; - - while (*offset >= (mm_node->size << PAGE_SHIFT)) { - *offset -= (mm_node->size << PAGE_SHIFT); - ++mm_node; - } - return mm_node; -} - -/** * amdgpu_ttm_map_buffer - Map memory into the GART windows * @bo: buffer object to map * @mem: memory object to map - * @mm_node: drm_mm node object to map + * @mm_cur: range to map * @num_pages: number of pages to map - * @offset: offset into @mm_node where to start * @window: which GART window to use * @ring: DMA ring to use for the copy * @tmz: if we should setup a TMZ enabled mapping @@ -237,10 +194,10 @@ static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_resource *mem, */ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, struct ttm_resource *mem, - struct drm_mm_node *mm_node, - unsigned num_pages, uint64_t offset, - unsigned window, struct amdgpu_ring *ring, - bool tmz, uint64_t *addr) + struct amdgpu_res_cursor *mm_cur, + unsigned num_pages, unsigned window, + struct amdgpu_ring *ring, bool tmz, + uint64_t *addr) { struct amdgpu_device *adev = ring->adev; struct amdgpu_job *job; @@ -257,14 +214,15 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, /* Map only what can't be accessed directly */ if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) { - *addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset; + *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) + + mm_cur->start; return 0; } *addr = adev->gmc.gart_start; *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GPU_PAGE_SIZE; - *addr += offset & ~PAGE_MASK; + *addr += mm_cur->start & ~PAGE_MASK; num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); num_bytes = num_pages * 8; @@ -292,17 +250,17 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, cpu_addr = &job->ibs[0].ptr[num_dw]; if (mem->mem_type == TTM_PL_TT) { - dma_addr_t *dma_address; + dma_addr_t *dma_addr; - dma_address = &bo->ttm->dma_address[offset >> PAGE_SHIFT]; - r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags, + dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT]; + r = amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr); if (r) goto error_free; } else { dma_addr_t dma_address; - dma_address = (mm_node->start << PAGE_SHIFT) + offset; + dma_address = mm_cur->start; dma_address += adev->vm_manager.vram_base_offset; for (i = 0; i < num_pages; ++i) { @@ -354,9 +312,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GPU_PAGE_SIZE); - uint64_t src_node_size, dst_node_size, src_offset, dst_offset; struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; - struct drm_mm_node *src_mm, *dst_mm; + struct amdgpu_res_cursor src_mm, dst_mm; struct dma_fence *fence = NULL; int r = 0; @@ -365,29 +322,13 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, return -EINVAL; } - src_offset = src->offset; - if (src->mem->mm_node) { - src_mm = amdgpu_find_mm_node(src->mem, &src_offset); - src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset; - } else { - src_mm = NULL; - src_node_size = ULLONG_MAX; - } - - dst_offset = dst->offset; - if (dst->mem->mm_node) { - dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset); - dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset; - } else { - dst_mm = NULL; - dst_node_size = ULLONG_MAX; - } + amdgpu_res_first(src->mem, src->offset, size, &src_mm); + amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm); mutex_lock(&adev->mman.gtt_window_lock); - - while (size) { - uint32_t src_page_offset = src_offset & ~PAGE_MASK; - uint32_t dst_page_offset = dst_offset & ~PAGE_MASK; + while (src_mm.remaining) { + uint32_t src_page_offset = src_mm.start & ~PAGE_MASK; + uint32_t dst_page_offset = dst_mm.start & ~PAGE_MASK; struct dma_fence *next; uint32_t cur_size; uint64_t from, to; @@ -396,19 +337,19 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, * begins at an offset, then adjust the size accordingly */ cur_size = max(src_page_offset, dst_page_offset); - cur_size = min(min3(src_node_size, dst_node_size, size), + cur_size = min(min3(src_mm.size, dst_mm.size, size), (uint64_t)(GTT_MAX_BYTES - cur_size)); /* Map src to window 0 and dst to window 1. */ - r = amdgpu_ttm_map_buffer(src->bo, src->mem, src_mm, + r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm, PFN_UP(cur_size + src_page_offset), - src_offset, 0, ring, tmz, &from); + 0, ring, tmz, &from); if (r) goto error; - r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, dst_mm, + r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm, PFN_UP(cur_size + dst_page_offset), - dst_offset, 1, ring, tmz, &to); + 1, ring, tmz, &to); if (r) goto error; @@ -420,27 +361,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, dma_fence_put(fence); fence = next; - size -= cur_size; - if (!size) - break; - - src_node_size -= cur_size; - if (!src_node_size) { - ++src_mm; - src_node_size = src_mm->size << PAGE_SHIFT; - src_offset = 0; - } else { - src_offset += cur_size; - } - - dst_node_size -= cur_size; - if (!dst_node_size) { - ++dst_mm; - dst_node_size = dst_mm->size << PAGE_SHIFT; - dst_offset = 0; - } else { - dst_offset += cur_size; - } + amdgpu_res_next(&src_mm, cur_size); + amdgpu_res_next(&dst_mm, cur_size); } error: mutex_unlock(&adev->mman.gtt_window_lock); @@ -519,7 +441,8 @@ error: static bool amdgpu_mem_visible(struct amdgpu_device *adev, struct ttm_resource *mem) { - struct drm_mm_node *nodes = mem->mm_node; + uint64_t mem_size = (u64)mem->num_pages << PAGE_SHIFT; + struct amdgpu_res_cursor cursor; if (mem->mem_type == TTM_PL_SYSTEM || mem->mem_type == TTM_PL_TT) @@ -527,12 +450,13 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev, if (mem->mem_type != TTM_PL_VRAM) return false; + amdgpu_res_first(mem, 0, mem_size, &cursor); + /* ttm_resource_ioremap only supports contiguous memory */ - if (nodes->size != mem->num_pages) + if (cursor.size != mem_size) return false; - return ((nodes->start + nodes->size) << PAGE_SHIFT) - <= adev->gmc.visible_vram_size; + return cursor.start + cursor.size <= adev->gmc.visible_vram_size; } /* @@ -674,7 +598,10 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resourc mem->bus.offset += adev->gmc.aper_base; mem->bus.is_iomem = true; - mem->bus.caching = ttm_write_combined; + if (adev->gmc.xgmi.connected_to_cpu) + mem->bus.caching = ttm_cached; + else + mem->bus.caching = ttm_write_combined; break; default: return -EINVAL; @@ -686,12 +613,10 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, unsigned long page_offset) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); - uint64_t offset = (page_offset << PAGE_SHIFT); - struct drm_mm_node *mm; + struct amdgpu_res_cursor cursor; - mm = amdgpu_find_mm_node(&bo->mem, &offset); - offset += adev->gmc.aper_base; - return mm->start + (offset >> PAGE_SHIFT); + amdgpu_res_first(&bo->mem, (u64)page_offset << PAGE_SHIFT, 0, &cursor); + return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT; } /** @@ -898,15 +823,14 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev, { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; - int r; - int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); enum dma_data_direction direction = write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + int r; /* Allocate an SG array and squash pages into it */ r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, - ttm->num_pages << PAGE_SHIFT, + (u64)ttm->num_pages << PAGE_SHIFT, GFP_KERNEL); if (r) goto release_sg; @@ -936,13 +860,12 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev, { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; - int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); enum dma_data_direction direction = write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; /* double check that we don't free the table twice */ - if (!ttm->sg->sgl) + if (!ttm->sg || !ttm->sg->sgl) return; /* unmap the pages mapped to the device */ @@ -1162,13 +1085,13 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, struct amdgpu_ttm_tt *gtt = (void *)ttm; int r; - if (!gtt->bound) - return; - /* if the pages have userptr pinning then clear that first */ if (gtt->userptr) amdgpu_ttm_tt_unpin_userptr(bdev, ttm); + if (!gtt->bound) + return; + if (gtt->offset == AMDGPU_BO_INVALID_OFFSET) return; @@ -1430,6 +1353,10 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem) flags |= AMDGPU_PTE_SNOOPED; } + if (mem && mem->mem_type == TTM_PL_VRAM && + mem->bus.caching == ttm_cached) + flags |= AMDGPU_PTE_SNOOPED; + return flags; } @@ -1469,7 +1396,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place) { unsigned long num_pages = bo->mem.num_pages; - struct drm_mm_node *node = bo->mem.mm_node; + struct amdgpu_res_cursor cursor; struct dma_resv_list *flist; struct dma_fence *f; int i; @@ -1501,13 +1428,15 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, case TTM_PL_VRAM: /* Check each drm MM node individually */ - while (num_pages) { - if (place->fpfn < (node->start + node->size) && - !(place->lpfn && place->lpfn <= node->start)) + amdgpu_res_first(&bo->mem, 0, (u64)num_pages << PAGE_SHIFT, + &cursor); + while (cursor.remaining) { + if (place->fpfn < PFN_DOWN(cursor.start + cursor.size) + && !(place->lpfn && + place->lpfn <= PFN_DOWN(cursor.start))) return true; - num_pages -= node->size; - ++node; + amdgpu_res_next(&cursor, cursor.size); } return false; @@ -1531,41 +1460,36 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, * access for debugging purposes. */ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, - unsigned long offset, - void *buf, int len, int write) + unsigned long offset, void *buf, int len, + int write) { struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); - struct drm_mm_node *nodes; + struct amdgpu_res_cursor cursor; + unsigned long flags; uint32_t value = 0; int ret = 0; - uint64_t pos; - unsigned long flags; if (bo->mem.mem_type != TTM_PL_VRAM) return -EIO; - pos = offset; - nodes = amdgpu_find_mm_node(&abo->tbo.mem, &pos); - pos += (nodes->start << PAGE_SHIFT); - - while (len && pos < adev->gmc.mc_vram_size) { - uint64_t aligned_pos = pos & ~(uint64_t)3; - uint64_t bytes = 4 - (pos & 3); - uint32_t shift = (pos & 3) * 8; + amdgpu_res_first(&bo->mem, offset, len, &cursor); + while (cursor.remaining) { + uint64_t aligned_pos = cursor.start & ~(uint64_t)3; + uint64_t bytes = 4 - (cursor.start & 3); + uint32_t shift = (cursor.start & 3) * 8; uint32_t mask = 0xffffffff << shift; - if (len < bytes) { - mask &= 0xffffffff >> (bytes - len) * 8; - bytes = len; + if (cursor.size < bytes) { + mask &= 0xffffffff >> (bytes - cursor.size) * 8; + bytes = cursor.size; } if (mask != 0xffffffff) { spin_lock_irqsave(&adev->mmio_idx_lock, flags); WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000); WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31); - if (!write || mask != 0xffffffff) - value = RREG32_NO_KIQ(mmMM_DATA); + value = RREG32_NO_KIQ(mmMM_DATA); if (write) { value &= ~mask; value |= (*(uint32_t *)buf << shift) & mask; @@ -1577,21 +1501,15 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, memcpy(buf, &value, bytes); } } else { - bytes = (nodes->start + nodes->size) << PAGE_SHIFT; - bytes = min(bytes - pos, (uint64_t)len & ~0x3ull); - - amdgpu_device_vram_access(adev, pos, (uint32_t *)buf, - bytes, write); + bytes = cursor.size & ~0x3ULL; + amdgpu_device_vram_access(adev, cursor.start, + (uint32_t *)buf, bytes, + write); } ret += bytes; buf = (uint8_t *)buf + bytes; - pos += bytes; - len -= bytes; - if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) { - ++nodes; - pos = (nodes->start << PAGE_SHIFT); - } + amdgpu_res_next(&cursor, bytes); } return ret; @@ -1696,7 +1614,7 @@ static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev) (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET); ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES; - + DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", ctx->train_data_size, ctx->p2c_train_data_offset, @@ -1812,8 +1730,15 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) /* Change the size here instead of the init above so only lpfn is affected */ amdgpu_ttm_set_buffer_funcs_status(adev, false); #ifdef CONFIG_64BIT - adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base, - adev->gmc.visible_vram_size); +#ifdef CONFIG_X86 + if (adev->gmc.xgmi.connected_to_cpu) + adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base, + adev->gmc.visible_vram_size); + + else +#endif + adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base, + adev->gmc.visible_vram_size); #endif /* @@ -2053,7 +1978,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, return r; if (vm_needs_flush) { - job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo); + job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ? + adev->gmc.pdb0_bo : adev->gart.bo); job->vm_needs_flush = true; } if (resv) { @@ -2104,9 +2030,9 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes; struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; - struct drm_mm_node *mm_node; - unsigned long num_pages; + struct amdgpu_res_cursor cursor; unsigned int num_loops, num_dw; + uint64_t num_bytes; struct amdgpu_job *job; int r; @@ -2122,15 +2048,13 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, return r; } - num_pages = bo->tbo.mem.num_pages; - mm_node = bo->tbo.mem.mm_node; + num_bytes = bo->tbo.mem.num_pages << PAGE_SHIFT; num_loops = 0; - while (num_pages) { - uint64_t byte_count = mm_node->size << PAGE_SHIFT; - num_loops += DIV_ROUND_UP_ULL(byte_count, max_bytes); - num_pages -= mm_node->size; - ++mm_node; + amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor); + while (cursor.remaining) { + num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes); + amdgpu_res_next(&cursor, cursor.size); } num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw; @@ -2152,27 +2076,16 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, } } - num_pages = bo->tbo.mem.num_pages; - mm_node = bo->tbo.mem.mm_node; + amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor); + while (cursor.remaining) { + uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes); + uint64_t dst_addr = cursor.start; - while (num_pages) { - uint64_t byte_count = mm_node->size << PAGE_SHIFT; - uint64_t dst_addr; + dst_addr += amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type); + amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr, + cur_size); - dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem); - while (byte_count) { - uint32_t cur_size_in_bytes = min_t(uint64_t, byte_count, - max_bytes); - - amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, - dst_addr, cur_size_in_bytes); - - dst_addr += cur_size_in_bytes; - byte_count -= cur_size_in_bytes; - } - - num_pages -= mm_node->size; - ++mm_node; + amdgpu_res_next(&cursor, cur_size); } amdgpu_ring_pad_ib(ring, &job->ibs[0]); @@ -2191,36 +2104,74 @@ error_free: #if defined(CONFIG_DEBUG_FS) -static int amdgpu_mm_dump_table(struct seq_file *m, void *data) +static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused) { - struct drm_info_node *node = (struct drm_info_node *)m->private; - unsigned ttm_pl = (uintptr_t)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl); + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, + TTM_PL_VRAM); struct drm_printer p = drm_seq_file_printer(m); man->func->debug(man, &p); return 0; } -static int amdgpu_ttm_pool_debugfs(struct seq_file *m, void *data) +static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused) { - struct drm_info_node *node = (struct drm_info_node *)m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; return ttm_pool_debugfs(&adev->mman.bdev.pool, m); } -static const struct drm_info_list amdgpu_ttm_debugfs_list[] = { - {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM}, - {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT}, - {"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS}, - {"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS}, - {"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA}, - {"ttm_page_pool", amdgpu_ttm_pool_debugfs, 0, NULL}, -}; +static int amdgpu_mm_tt_table_show(struct seq_file *m, void *unused) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, + TTM_PL_TT); + struct drm_printer p = drm_seq_file_printer(m); + + man->func->debug(man, &p); + return 0; +} + +static int amdgpu_mm_gds_table_show(struct seq_file *m, void *unused) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, + AMDGPU_PL_GDS); + struct drm_printer p = drm_seq_file_printer(m); + + man->func->debug(man, &p); + return 0; +} + +static int amdgpu_mm_gws_table_show(struct seq_file *m, void *unused) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, + AMDGPU_PL_GWS); + struct drm_printer p = drm_seq_file_printer(m); + + man->func->debug(man, &p); + return 0; +} + +static int amdgpu_mm_oa_table_show(struct seq_file *m, void *unused) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, + AMDGPU_PL_OA); + struct drm_printer p = drm_seq_file_printer(m); + + man->func->debug(man, &p); + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_vram_table); +DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_tt_table); +DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gds_table); +DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gws_table); +DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_oa_table); +DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool); /* * amdgpu_ttm_vram_read - Linear read access to VRAM @@ -2308,58 +2259,6 @@ static const struct file_operations amdgpu_ttm_vram_fops = { .llseek = default_llseek, }; -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS - -/* - * amdgpu_ttm_gtt_read - Linear read access to GTT memory - */ -static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf, - size_t size, loff_t *pos) -{ - struct amdgpu_device *adev = file_inode(f)->i_private; - ssize_t result = 0; - int r; - - while (size) { - loff_t p = *pos / PAGE_SIZE; - unsigned off = *pos & ~PAGE_MASK; - size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); - struct page *page; - void *ptr; - - if (p >= adev->gart.num_cpu_pages) - return result; - - page = adev->gart.pages[p]; - if (page) { - ptr = kmap(page); - ptr += off; - - r = copy_to_user(buf, ptr, cur_size); - kunmap(adev->gart.pages[p]); - } else - r = clear_user(buf, cur_size); - - if (r) - return -EFAULT; - - result += cur_size; - buf += cur_size; - *pos += cur_size; - size -= cur_size; - } - - return result; -} - -static const struct file_operations amdgpu_ttm_gtt_fops = { - .owner = THIS_MODULE, - .read = amdgpu_ttm_gtt_read, - .llseek = default_llseek -}; - -#endif - /* * amdgpu_iomem_read - Virtual read access to GPU mapped memory * @@ -2474,46 +2373,29 @@ static const struct file_operations amdgpu_ttm_iomem_fops = { .llseek = default_llseek }; -static const struct { - char *name; - const struct file_operations *fops; - int domain; -} ttm_debugfs_entries[] = { - { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM }, -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS - { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT }, #endif - { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM }, -}; -#endif - -int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) +void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) { #if defined(CONFIG_DEBUG_FS) - unsigned count; - struct drm_minor *minor = adev_to_drm(adev)->primary; - struct dentry *ent, *root = minor->debugfs_root; - - for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) { - ent = debugfs_create_file( - ttm_debugfs_entries[count].name, - S_IFREG | S_IRUGO, root, - adev, - ttm_debugfs_entries[count].fops); - if (IS_ERR(ent)) - return PTR_ERR(ent); - if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM) - i_size_write(ent->d_inode, adev->gmc.mc_vram_size); - else if (ttm_debugfs_entries[count].domain == TTM_PL_TT) - i_size_write(ent->d_inode, adev->gmc.gart_size); - adev->mman.debugfs_entries[count] = ent; - } - - count = ARRAY_SIZE(amdgpu_ttm_debugfs_list); - return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count); -#else - return 0; + struct dentry *root = minor->debugfs_root; + + debugfs_create_file_size("amdgpu_vram", 0444, root, adev, + &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size); + debugfs_create_file("amdgpu_iomem", 0444, root, adev, + &amdgpu_ttm_iomem_fops); + debugfs_create_file("amdgpu_vram_mm", 0444, root, adev, + &amdgpu_mm_vram_table_fops); + debugfs_create_file("amdgpu_gtt_mm", 0444, root, adev, + &amdgpu_mm_tt_table_fops); + debugfs_create_file("amdgpu_gds_mm", 0444, root, adev, + &amdgpu_mm_gds_table_fops); + debugfs_create_file("amdgpu_gws_mm", 0444, root, adev, + &amdgpu_mm_gws_table_fops); + debugfs_create_file("amdgpu_oa_mm", 0444, root, adev, + &amdgpu_mm_oa_table_fops); + debugfs_create_file("ttm_page_pool", 0444, root, adev, + &amdgpu_ttm_page_pool_fops); #endif } |