summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2017-12-21 11:17:45 +1000
committerDave Airlie <airlied@redhat.com>2017-12-21 11:17:45 +1000
commitdf2869abd92b740af141ee2eb081bfc69bd80877 (patch)
tree61088b24d70246d16fd3d3e04d022255076decc5 /drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
parent991eecc1c0743be7b942367af36637796e578e08 (diff)
parent4f4b94ee616500f326650f5b25439f1f7d606ea7 (diff)
Merge branch 'drm-next-4.16' of git://people.freedesktop.org/~agd5f/linux into drm-next
* 'drm-next-4.16' of git://people.freedesktop.org/~agd5f/linux: (171 commits) drm/amdgpu: fix test for shadow page tables drm/amd/display: Expose dpp1_set_cursor_attributes drm/amd/display: Update FMT and OPPBUF functions drm/amd/display: check for null before calling is_blanked drm/amd/display: dal 3.1.27 drm/amd/display: Fix unused variable warnings. drm/amd/display: Only blank DCN when we have set_blank implementation drm/amd/display: Put dcn_mi_registers with other structs drm/amd/display: hubp refactor drm/amd/display: integrating optc pseudocode drm/amd/display: Call validate_fbc should_enable_fbc drm/amd/display: Clean up DCN cursor code drm/amd/display: fix 180 full screen pipe split drm/amd/display: reprogram surface config on scaling change drm/amd/display: Remove dwbc from pipe_ctx drm/amd/display: Use the maximum link setting which EDP reported. drm/amd/display: Add hdr_supported flag drm/amd/display: fix global sync param retrieval when not pipe splitting drm/amd/display: Update HUBP drm/amd/display: fix rotated surface scaling ...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index bdc210ac74f8..56d9ee5013a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -28,7 +28,7 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
-static void amdgpu_job_timedout(struct amd_sched_job *s_job)
+static void amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
@@ -37,7 +37,7 @@ static void amdgpu_job_timedout(struct amd_sched_job *s_job)
atomic_read(&job->ring->fence_drv.last_seq),
job->ring->fence_drv.sync_seq);
- amdgpu_gpu_recover(job->adev, job);
+ amdgpu_device_gpu_recover(job->adev, job, false);
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -96,7 +96,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
amdgpu_ib_free(job->adev, &job->ibs[i], f);
}
-static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
+static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
@@ -118,7 +118,7 @@ void amdgpu_job_free(struct amdgpu_job *job)
}
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
- struct amd_sched_entity *entity, void *owner,
+ struct drm_sched_entity *entity, void *owner,
struct dma_fence **f)
{
int r;
@@ -127,7 +127,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
if (!f)
return -EINVAL;
- r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
+ r = drm_sched_job_init(&job->base, &ring->sched, entity, owner);
if (r)
return r;
@@ -136,13 +136,13 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
amdgpu_ring_priority_get(job->ring, job->base.s_priority);
- amd_sched_entity_push_job(&job->base, entity);
+ drm_sched_entity_push_job(&job->base, entity);
return 0;
}
-static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
- struct amd_sched_entity *s_entity)
+static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity)
{
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm;
@@ -151,7 +151,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit);
if (fence && explicit) {
- if (amd_sched_dependency_optimized(fence, s_entity)) {
+ if (drm_sched_dependency_optimized(fence, s_entity)) {
r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false);
if (r)
DRM_ERROR("Error adding fence to sync (%d)\n", r);
@@ -173,7 +173,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
return fence;
}
-static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
{
struct dma_fence *fence = NULL, *finished;
struct amdgpu_device *adev;
@@ -211,7 +211,7 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
return fence;
}
-const struct amd_sched_backend_ops amdgpu_sched_ops = {
+const struct drm_sched_backend_ops amdgpu_sched_ops = {
.dependency = amdgpu_job_dependency,
.run_job = amdgpu_job_run,
.timedout_job = amdgpu_job_timedout,