diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 107 |
1 files changed, 67 insertions, 40 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 4fb20e870e63..bfc47bea23db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -25,21 +25,34 @@ #include <linux/wait.h> #include <linux/sched.h> +#include <drm/drm_drv.h> + #include "amdgpu.h" #include "amdgpu_trace.h" -static void amdgpu_job_timedout(struct drm_sched_job *s_job) +static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) { struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); struct amdgpu_job *job = to_amdgpu_job(s_job); struct amdgpu_task_info ti; + struct amdgpu_device *adev = ring->adev; + int idx; + + if (!drm_dev_enter(adev_to_drm(adev), &idx)) { + DRM_INFO("%s - device unplugged skipping recovery on scheduler:%s", + __func__, s_job->sched->name); + + /* Effectively the job is aborted as the device is gone */ + return DRM_GPU_SCHED_STAT_ENODEV; + } memset(&ti, 0, sizeof(struct amdgpu_task_info)); - if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { + if (amdgpu_gpu_recovery && + amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { DRM_ERROR("ring %s timeout, but soft recovered\n", s_job->sched->name); - return; + goto exit; } amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); @@ -49,10 +62,17 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n", ti.process_name, ti.tgid, ti.task_name, ti.pid); - if (amdgpu_device_should_recover_gpu(ring->adev)) + if (amdgpu_device_should_recover_gpu(ring->adev)) { amdgpu_device_gpu_recover(ring->adev, job); - else + } else { drm_sched_suspend_timeout(&ring->sched); + if (amdgpu_sriov_vf(adev)) + adev->virt.tdr_debug = true; + } + +exit: + drm_dev_exit(idx); + return DRM_GPU_SCHED_STAT_NOMINAL; } int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, @@ -87,7 +107,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, } int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, - struct amdgpu_job **job) + enum amdgpu_ib_pool_type pool_type, + struct amdgpu_job **job) { int r; @@ -95,7 +116,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, if (r) return r; - r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]); + r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]); if (r) kfree(*job); @@ -106,44 +127,52 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) { struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); struct dma_fence *f; + struct dma_fence *hw_fence; unsigned i; - /* use sched fence if available */ - f = job->base.s_fence ? &job->base.s_fence->finished : job->fence; + if (job->hw_fence.ops == NULL) + hw_fence = job->external_hw_fence; + else + hw_fence = &job->hw_fence; + /* use sched fence if available */ + f = job->base.s_fence ? &job->base.s_fence->finished : hw_fence; for (i = 0; i < job->num_ibs; ++i) amdgpu_ib_free(ring->adev, &job->ibs[i], f); } static void amdgpu_job_free_cb(struct drm_sched_job *s_job) { - struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); struct amdgpu_job *job = to_amdgpu_job(s_job); drm_sched_job_cleanup(s_job); - amdgpu_ring_priority_put(ring, s_job->s_priority); - dma_fence_put(job->fence); amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->sched_sync); - kfree(job); + + /* only put the hw fence if has embedded fence */ + if (job->hw_fence.ops != NULL) + dma_fence_put(&job->hw_fence); + else + kfree(job); } void amdgpu_job_free(struct amdgpu_job *job) { amdgpu_job_free_resources(job); - - dma_fence_put(job->fence); amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->sched_sync); - kfree(job); + + /* only put the hw fence if has embedded fence */ + if (job->hw_fence.ops != NULL) + dma_fence_put(&job->hw_fence); + else + kfree(job); } int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, void *owner, struct dma_fence **f) { - enum drm_sched_priority priority; - struct amdgpu_ring *ring; int r; if (!f) @@ -153,14 +182,11 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, if (r) return r; - job->owner = owner; + drm_sched_job_arm(&job->base); + *f = dma_fence_get(&job->base.s_fence->finished); amdgpu_job_free_resources(job); - priority = job->base.s_priority; - drm_sched_entity_push_job(&job->base, entity); - - ring = to_amdgpu_ring(entity->rq->sched); - amdgpu_ring_priority_get(ring, priority); + drm_sched_entity_push_job(&job->base); return 0; } @@ -172,11 +198,14 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, job->base.sched = &ring->sched; r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence); - job->fence = dma_fence_get(*fence); + /* record external_hw_fence for direct submit */ + job->external_hw_fence = dma_fence_get(*fence); if (r) return r; amdgpu_job_free(job); + dma_fence_put(*fence); + return 0; } @@ -187,17 +216,13 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, struct amdgpu_job *job = to_amdgpu_job(sched_job); struct amdgpu_vm *vm = job->vm; struct dma_fence *fence; - bool explicit = false; int r; - fence = amdgpu_sync_get_fence(&job->sync, &explicit); - if (fence && explicit) { - if (drm_sched_dependency_optimized(fence, s_entity)) { - r = amdgpu_sync_fence(ring->adev, &job->sched_sync, - fence, false); - if (r) - DRM_ERROR("Error adding fence (%d)\n", r); - } + fence = amdgpu_sync_get_fence(&job->sync); + if (fence && drm_sched_dependency_optimized(fence, s_entity)) { + r = amdgpu_sync_fence(&job->sched_sync, fence); + if (r) + DRM_ERROR("Error adding fence (%d)\n", r); } while (fence == NULL && vm && !job->vmid) { @@ -207,7 +232,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, if (r) DRM_ERROR("Error getting VM ID (%d)\n", r); - fence = amdgpu_sync_get_fence(&job->sync, NULL); + fence = amdgpu_sync_get_fence(&job->sync); } return fence; @@ -238,10 +263,12 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) if (r) DRM_ERROR("Error scheduling IBs (%d)\n", r); } - /* if gpu reset, hw fence will be replaced here */ - dma_fence_put(job->fence); - job->fence = dma_fence_get(fence); + if (!job->job_run_counter) + dma_fence_get(fence); + else if (finished->error < 0) + dma_fence_put(&job->hw_fence); + job->job_run_counter++; amdgpu_job_free_resources(job); fence = r ? ERR_PTR(r) : fence; @@ -258,7 +285,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) int i; /* Signal all jobs not yet scheduled */ - for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { struct drm_sched_rq *rq = &sched->sched_rq[i]; if (!rq) @@ -278,7 +305,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) } /* Signal all jobs already scheduled to HW */ - list_for_each_entry(s_job, &sched->ring_mirror_list, node) { + list_for_each_entry(s_job, &sched->pending_list, list) { struct drm_sched_fence *s_fence = s_job->s_fence; dma_fence_set_error(&s_fence->finished, -EHWPOISON); |