diff options
author | Jack Zhang <Jack.Zhang1@amd.com> | 2021-05-12 15:06:35 +0800 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2021-08-16 15:16:58 -0400 |
commit | c530b02f39850a639b72d01ebbf7e5d745c60831 (patch) | |
tree | 274c664d421fc31914f5b6a241094cd29231e011 /drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | |
parent | 554594567b1fa3da74f88ec7b2dc83d000c58e98 (diff) |
drm/amd/amdgpu embed hw_fence into amdgpu_job
Why: Previously hw fence is alloced separately with job.
It caused historical lifetime issues and corner cases.
The ideal situation is to take fence to manage both job
and fence's lifetime, and simplify the design of gpu-scheduler.
How:
We propose to embed hw_fence into amdgpu_job.
1. We cover the normal job submission by this method.
2. For ib_test, and submit without a parent job keep the
legacy way to create a hw fence separately.
v2:
use AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT to show that the fence is
embedded in a job.
v3:
remove redundant variable ring in amdgpu_job
v4:
add tdr sequence support for this feature. Add a job_run_counter to
indicate whether this job is a resubmit job.
v5
add missing handling in amdgpu_fence_enable_signaling
Signed-off-by: Jingwen Chen <Jingwen.Chen2@amd.com>
Signed-off-by: Jack Zhang <Jack.Zhang7@hotmail.com>
Reviewed-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed by: Monk Liu <monk.liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 39 |
1 files changed, 28 insertions, 11 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index d33e6d97cc89..de29518673dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -127,11 +127,16 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) { struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); struct dma_fence *f; + struct dma_fence *hw_fence; unsigned i; - /* use sched fence if available */ - f = job->base.s_fence ? &job->base.s_fence->finished : job->fence; + if (job->hw_fence.ops == NULL) + hw_fence = job->external_hw_fence; + else + hw_fence = &job->hw_fence; + /* use sched fence if available */ + f = job->base.s_fence ? &job->base.s_fence->finished : hw_fence; for (i = 0; i < job->num_ibs; ++i) amdgpu_ib_free(ring->adev, &job->ibs[i], f); } @@ -142,20 +147,27 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job) drm_sched_job_cleanup(s_job); - dma_fence_put(job->fence); amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->sched_sync); - kfree(job); + + /* only put the hw fence if has embedded fence */ + if (job->hw_fence.ops != NULL) + dma_fence_put(&job->hw_fence); + else + kfree(job); } void amdgpu_job_free(struct amdgpu_job *job) { amdgpu_job_free_resources(job); - - dma_fence_put(job->fence); amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->sched_sync); - kfree(job); + + /* only put the hw fence if has embedded fence */ + if (job->hw_fence.ops != NULL) + dma_fence_put(&job->hw_fence); + else + kfree(job); } int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, @@ -184,11 +196,14 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, job->base.sched = &ring->sched; r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence); - job->fence = dma_fence_get(*fence); + /* record external_hw_fence for direct submit */ + job->external_hw_fence = dma_fence_get(*fence); if (r) return r; amdgpu_job_free(job); + dma_fence_put(*fence); + return 0; } @@ -246,10 +261,12 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) if (r) DRM_ERROR("Error scheduling IBs (%d)\n", r); } - /* if gpu reset, hw fence will be replaced here */ - dma_fence_put(job->fence); - job->fence = dma_fence_get(fence); + if (!job->job_run_counter) + dma_fence_get(fence); + else if (finished->error < 0) + dma_fence_put(&job->hw_fence); + job->job_run_counter++; amdgpu_job_free_resources(job); fence = r ? ERR_PTR(r) : fence; |