diff options
author | Matthew Brost <matthew.brost@intel.com> | 2023-07-07 22:23:57 -0700 |
---|---|---|
committer | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2023-12-21 11:35:18 -0500 |
commit | b06d47be7c83165d3b3e45e1d5f9520b79c7f5cc (patch) | |
tree | b27852e1de1904c7dc2b689f4594e63ea3c6f685 /drivers/gpu/drm/xe/xe_bo.c | |
parent | 5cecdd0bb6bf4b8979b7d071017560daecfc9200 (diff) |
drm/xe: Port Xe to GPUVA
Rather than open coding VM binds and VMA tracking, use the GPUVA
library. GPUVA provides a common infrastructure for VM binds to use mmap
/ munmap semantics and support for VK sparse bindings.
The concepts are:
1) xe_vm inherits from drm_gpuva_manager
2) xe_vma inherits from drm_gpuva
3) xe_vma_op inherits from drm_gpuva_op
4) VM bind operations (MAP, UNMAP, PREFETCH, UNMAP_ALL) call into the
GPUVA code to generate an VMA operations list which is parsed, committed,
and executed.
v2 (CI): Add break after default in case statement.
v3: Rebase
v4: Fix some error handling
v5: Use unlocked version VMA in error paths
v6: Rebase, address some review feedback mainly Thomas H
v7: Fix compile error in xe_vma_op_unwind, address checkpatch
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_bo.c')
-rw-r--r-- | drivers/gpu/drm/xe/xe_bo.c | 31 |
1 files changed, 21 insertions, 10 deletions
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index fb351c36cdc2..00b8b5e7f197 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -412,7 +412,9 @@ static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo, { struct dma_resv_iter cursor; struct dma_fence *fence; - struct xe_vma *vma; + struct drm_gpuva *gpuva; + struct drm_gem_object *obj = &bo->ttm.base; + struct drm_gpuvm_bo *vm_bo; int ret = 0; dma_resv_assert_held(bo->ttm.base.resv); @@ -425,10 +427,12 @@ static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo, dma_resv_iter_end(&cursor); } - list_for_each_entry(vma, &bo->vmas, bo_link) { - struct xe_vm *vm = xe_vma_vm(vma); + drm_gem_for_each_gpuvm_bo(vm_bo, obj) { + drm_gpuvm_bo_for_each_va(gpuva, vm_bo) { + struct xe_vma *vma = gpuva_to_vma(gpuva); + struct xe_vm *vm = xe_vma_vm(vma); - trace_xe_vma_evict(vma); + trace_xe_vma_evict(vma); if (xe_vm_in_fault_mode(vm)) { /* Wait for pending binds / unbinds. */ @@ -454,7 +458,6 @@ static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo, } else { bool vm_resv_locked = false; - struct xe_vm *vm = xe_vma_vm(vma); /* * We need to put the vma on the vm's rebind_list, @@ -462,9 +465,9 @@ static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo, * that we indeed have it locked, put the vma an the * vm's notifier.rebind_list instead and scoop later. */ - if (dma_resv_trylock(&vm->resv)) + if (dma_resv_trylock(xe_vm_resv(vm))) vm_resv_locked = true; - else if (ctx->resv != &vm->resv) { + else if (ctx->resv != xe_vm_resv(vm)) { spin_lock(&vm->notifier.list_lock); list_move_tail(&vma->notifier.rebind_link, &vm->notifier.rebind_list); @@ -477,7 +480,8 @@ static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo, list_add_tail(&vma->rebind_link, &vm->rebind_list); if (vm_resv_locked) - dma_resv_unlock(&vm->resv); + dma_resv_unlock(xe_vm_resv(vm)); + } } } @@ -1285,7 +1289,7 @@ xe_bo_create_locked_range(struct xe_device *xe, } } - bo = __xe_bo_create_locked(xe, bo, tile, vm ? &vm->resv : NULL, + bo = __xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL, vm && !xe_vm_in_fault_mode(vm) && flags & XE_BO_CREATE_USER_BIT ? &vm->lru_bulk_move : NULL, size, @@ -1293,6 +1297,13 @@ xe_bo_create_locked_range(struct xe_device *xe, if (IS_ERR(bo)) return bo; + /* + * Note that instead of taking a reference no the drm_gpuvm_resv_bo(), + * to ensure the shared resv doesn't disappear under the bo, the bo + * will keep a reference to the vm, and avoid circular references + * by having all the vm's bo refereferences released at vm close + * time. + */ if (vm && xe_bo_is_user(bo)) xe_vm_get(vm); bo->vm = vm; @@ -1600,7 +1611,7 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict) xe_vm_assert_held(vm); ctx.allow_res_evict = allow_res_evict; - ctx.resv = &vm->resv; + ctx.resv = xe_vm_resv(vm); } return ttm_bo_validate(&bo->ttm, &bo->placement, &ctx); |