summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
diff options
context:
space:
mode:
authorLucas Stach <l.stach@pengutronix.de>2017-12-06 17:49:39 +0100
committerAlex Deucher <alexander.deucher@amd.com>2017-12-07 11:51:56 -0500
commit1b1f42d8fde4fef1ed7873bf5aa91755f8c3de35 (patch)
tree3039b957f8ef645419b5649d28dc7ece3e9ceecd /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
parent9ce6aae12c66adf87b5861f8fa5705ea11d0b6ee (diff)
drm: move amd_gpu_scheduler into common location
This moves and renames the AMDGPU scheduler to a common location in DRM in order to facilitate re-use by other drivers. This is mostly a straight forward rename with no code changes. One notable exception is the function to_drm_sched_fence(), which is no longer a inline header function to avoid the need to export the drm_sched_fence_ops_scheduled and drm_sched_fence_ops_finished structures. Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de> Acked-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Lucas Stach <l.stach@pengutronix.de> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 3ecdbdfb04dd..dbe37d621796 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2643,7 +2643,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
AMDGPU_VM_PTE_COUNT(adev) * 8);
unsigned ring_instance;
struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
int r, i;
u64 flags;
uint64_t init_pde_value = 0;
@@ -2663,8 +2663,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
ring_instance %= adev->vm_manager.vm_pte_num_rings;
ring = adev->vm_manager.vm_pte_rings[ring_instance];
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
- r = amd_sched_entity_init(&ring->sched, &vm->entity,
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ r = drm_sched_entity_init(&ring->sched, &vm->entity,
rq, amdgpu_sched_jobs, NULL);
if (r)
return r;
@@ -2744,7 +2744,7 @@ error_free_root:
vm->root.base.bo = NULL;
error_free_sched_entity:
- amd_sched_entity_fini(&ring->sched, &vm->entity);
+ drm_sched_entity_fini(&ring->sched, &vm->entity);
return r;
}
@@ -2803,7 +2803,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
- amd_sched_entity_fini(vm->entity.sched, &vm->entity);
+ drm_sched_entity_fini(vm->entity.sched, &vm->entity);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n");