diff options
Diffstat (limited to 'drivers/gpu/drm/amd')
31 files changed, 267 insertions, 259 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 8816853e50c0..f99d4873bf22 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -673,7 +673,7 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, goto err; } - ret = amdgpu_job_alloc(adev, 1, &job, NULL); + ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job); if (ret) goto err; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 491d4846fc02..e1320edfc527 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -26,7 +26,6 @@ #include <drm/display/drm_dp_helper.h> #include <drm/drm_edid.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 275da612cd87..459150460815 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -293,12 +293,8 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, } for (i = 0; i < p->gang_size; ++i) { - ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm); - if (ret) - goto free_all_kdata; - - ret = drm_sched_job_init(&p->jobs[i]->base, p->entities[i], - &fpriv->vm); + ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm, + num_ibs[i], &p->jobs[i]); if (ret) goto free_all_kdata; } @@ -432,7 +428,7 @@ static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p, dma_fence_put(old); } - r = amdgpu_sync_fence(&p->gang_leader->sync, fence); + r = amdgpu_sync_fence(&p->sync, fence); dma_fence_put(fence); if (r) return r; @@ -454,9 +450,20 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p, return r; } - r = amdgpu_sync_fence(&p->gang_leader->sync, fence); - dma_fence_put(fence); + r = amdgpu_sync_fence(&p->sync, fence); + if (r) + goto error; + /* + * When we have an explicit dependency it might be necessary to insert a + * pipeline sync to make sure that all caches etc are flushed and the + * next job actually sees the results from the previous one. + */ + if (fence->context == p->gang_leader->base.entity->fence_context) + r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence); + +error: + dma_fence_put(fence); return r; } @@ -1105,7 +1112,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_sync_fence(&job->sync, fpriv->prt_va->last_pt_update); + r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update); if (r) return r; @@ -1116,7 +1123,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update); + r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update); if (r) return r; } @@ -1135,7 +1142,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update); + r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update); if (r) return r; } @@ -1148,7 +1155,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_sync_fence(&job->sync, vm->last_update); + r = amdgpu_sync_fence(&p->sync, vm->last_update); if (r) return r; @@ -1180,7 +1187,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - struct amdgpu_job *leader = p->gang_leader; struct amdgpu_bo_list_entry *e; unsigned int i; int r; @@ -1192,14 +1198,14 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) sync_mode = amdgpu_bo_explicit_sync(bo) ? AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER; - r = amdgpu_sync_resv(p->adev, &leader->sync, resv, sync_mode, + r = amdgpu_sync_resv(p->adev, &p->sync, resv, sync_mode, &fpriv->vm); if (r) return r; } - for (i = 0; i < p->gang_size - 1; ++i) { - r = amdgpu_sync_clone(&leader->sync, &p->jobs[i]->sync); + for (i = 0; i < p->gang_size; ++i) { + r = amdgpu_sync_push_to_job(&p->sync, p->jobs[i]); if (r) return r; } @@ -1245,7 +1251,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, struct dma_fence *fence; fence = &p->jobs[i]->base.s_fence->scheduled; - r = amdgpu_sync_fence(&leader->sync, fence); + r = drm_sched_job_add_dependency(&leader->base, fence); if (r) goto error_cleanup; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h index cbaa19b2b8a3..207e801c24ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h @@ -75,6 +75,8 @@ struct amdgpu_cs_parser { unsigned num_post_deps; struct amdgpu_cs_post_dep *post_deps; + + struct amdgpu_sync sync; }; int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3d5d5d49cfab..b2b1c66bfe39 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -37,6 +37,7 @@ #include <linux/pci-p2pdma.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> #include <linux/vgaarb.h> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 83a271578265..b22471b3bd63 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -39,8 +39,8 @@ #include <linux/pm_runtime.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_edid.h> -#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_vblank.h> @@ -1249,7 +1249,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, const struct drm_mode_config_funcs amdgpu_mode_funcs = { .fb_create = amdgpu_display_user_framebuffer_create, - .output_poll_changed = drm_fb_helper_output_poll_changed, }; static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] = diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 72b77b3ac4fb..5697d456d7d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -25,6 +25,7 @@ #include <drm/amdgpu_drm.h> #include <drm/drm_aperture.h> #include <drm/drm_drv.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem.h> #include <drm/drm_vblank.h> #include <drm/drm_managed.h> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 258cffe3c06a..774c77bb8f4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -182,7 +182,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, need_ctx_switch = ring->current_ctx != fence_ctx; if (ring->funcs->emit_pipeline_sync && job && - ((tmp = amdgpu_sync_get_fence(&job->sched_sync)) || + ((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) || (amdgpu_sriov_vf(adev) && need_ctx_switch) || amdgpu_vm_need_pipeline_sync(ring, job))) { need_pipe_sync = true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 03d115d2b5ed..2a9a2593dc18 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -170,26 +170,27 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, * * @vm: vm to allocate id for * @ring: ring we want to submit job to - * @sync: sync object where we add dependencies * @idle: resulting idle VMID + * @fence: fence to wait for if no id could be grabbed * * Try to find an idle VMID, if none is idle add a fence to wait to the sync * object. Returns -ENOMEM when we are out of memory. */ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync, - struct amdgpu_vmid **idle) + struct amdgpu_vmid **idle, + struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct dma_fence **fences; unsigned i; - int r; - if (!dma_fence_is_signaled(ring->vmid_wait)) - return amdgpu_sync_fence(sync, ring->vmid_wait); + if (!dma_fence_is_signaled(ring->vmid_wait)) { + *fence = dma_fence_get(ring->vmid_wait); + return 0; + } fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL); if (!fences) @@ -228,10 +229,10 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, return -ENOMEM; } - r = amdgpu_sync_fence(sync, &array->base); + *fence = dma_fence_get(&array->base); dma_fence_put(ring->vmid_wait); ring->vmid_wait = &array->base; - return r; + return 0; } kfree(fences); @@ -243,19 +244,17 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, * * @vm: vm to allocate id for * @ring: ring we want to submit job to - * @sync: sync object where we add dependencies - * @fence: fence protecting ID from reuse * @job: job who wants to use the VMID * @id: resulting VMID + * @fence: fence to wait for if no id could be grabbed * * Try to assign a reserved VMID. */ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync, - struct dma_fence *fence, struct amdgpu_job *job, - struct amdgpu_vmid **id) + struct amdgpu_vmid **id, + struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; @@ -282,7 +281,8 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, tmp = amdgpu_sync_peek_fence(&(*id)->active, ring); if (tmp) { *id = NULL; - return amdgpu_sync_fence(sync, tmp); + *fence = dma_fence_get(tmp); + return 0; } needs_flush = true; } @@ -290,7 +290,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, /* Good we can use this VMID. Remember this submission as * user of the VMID. */ - r = amdgpu_sync_fence(&(*id)->active, fence); + r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished); if (r) return r; @@ -304,19 +304,17 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, * * @vm: vm to allocate id for * @ring: ring we want to submit job to - * @sync: sync object where we add dependencies - * @fence: fence protecting ID from reuse * @job: job who wants to use the VMID * @id: resulting VMID + * @fence: fence to wait for if no id could be grabbed * * Try to reuse a VMID for this submission. */ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync, - struct dma_fence *fence, struct amdgpu_job *job, - struct amdgpu_vmid **id) + struct amdgpu_vmid **id, + struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; @@ -352,7 +350,8 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, /* Good, we can use this VMID. Remember this submission as * user of the VMID. */ - r = amdgpu_sync_fence(&(*id)->active, fence); + r = amdgpu_sync_fence(&(*id)->active, + &job->base.s_fence->finished); if (r) return r; @@ -370,15 +369,13 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, * * @vm: vm to allocate id for * @ring: ring we want to submit job to - * @sync: sync object where we add dependencies - * @fence: fence protecting ID from reuse * @job: job who wants to use the VMID + * @fence: fence to wait for if no id could be grabbed * * Allocate an id for the vm, adding fences to the sync obj as necessary. */ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync, struct dma_fence *fence, - struct amdgpu_job *job) + struct amdgpu_job *job, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; @@ -388,16 +385,16 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, int r = 0; mutex_lock(&id_mgr->lock); - r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle); + r = amdgpu_vmid_grab_idle(vm, ring, &idle, fence); if (r || !idle) goto error; if (vm->reserved_vmid[vmhub]) { - r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id); + r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence); if (r || !id) goto error; } else { - r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id); + r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence); if (r) goto error; @@ -406,7 +403,8 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, id = idle; /* Remember this submission as user of the VMID */ - r = amdgpu_sync_fence(&id->active, fence); + r = amdgpu_sync_fence(&id->active, + &job->base.s_fence->finished); if (r) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h index 06c8a0034fa5..57efe61dceed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h @@ -84,8 +84,7 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned vmhub); int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync, struct dma_fence *fence, - struct amdgpu_job *job); + struct amdgpu_job *job, struct dma_fence **fence); void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, unsigned vmid); void amdgpu_vmid_reset_all(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index cd968e781077..032651a655f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -88,8 +88,9 @@ exit: return DRM_GPU_SCHED_STAT_NOMINAL; } -int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, - struct amdgpu_job **job, struct amdgpu_vm *vm) +int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct drm_sched_entity *entity, void *owner, + unsigned int num_ibs, struct amdgpu_job **job) { if (num_ibs == 0) return -EINVAL; @@ -105,28 +106,34 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, (*job)->base.sched = &adev->rings[0]->sched; (*job)->vm = vm; - amdgpu_sync_create(&(*job)->sync); - amdgpu_sync_create(&(*job)->sched_sync); + amdgpu_sync_create(&(*job)->explicit_sync); (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter); (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET; - return 0; + if (!entity) + return 0; + + return drm_sched_job_init(&(*job)->base, entity, owner); } -int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, - enum amdgpu_ib_pool_type pool_type, - struct amdgpu_job **job) +int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, + struct drm_sched_entity *entity, void *owner, + size_t size, enum amdgpu_ib_pool_type pool_type, + struct amdgpu_job **job) { int r; - r = amdgpu_job_alloc(adev, 1, job, NULL); + r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job); if (r) return r; (*job)->num_ibs = 1; r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]); - if (r) + if (r) { + if (entity) + drm_sched_job_cleanup(&(*job)->base); kfree(*job); + } return r; } @@ -166,9 +173,7 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job) drm_sched_job_cleanup(s_job); - amdgpu_sync_free(&job->sync); - amdgpu_sync_free(&job->sched_sync); - + amdgpu_sync_free(&job->explicit_sync); dma_fence_put(&job->hw_fence); } @@ -190,9 +195,11 @@ void amdgpu_job_set_gang_leader(struct amdgpu_job *job, void amdgpu_job_free(struct amdgpu_job *job) { + if (job->base.entity) + drm_sched_job_cleanup(&job->base); + amdgpu_job_free_resources(job); - amdgpu_sync_free(&job->sync); - amdgpu_sync_free(&job->sched_sync); + amdgpu_sync_free(&job->explicit_sync); if (job->gang_submit != &job->base.s_fence->scheduled) dma_fence_put(job->gang_submit); @@ -202,25 +209,16 @@ void amdgpu_job_free(struct amdgpu_job *job) dma_fence_put(&job->hw_fence); } -int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, - void *owner, struct dma_fence **f) +struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job) { - int r; - - if (!f) - return -EINVAL; - - r = drm_sched_job_init(&job->base, entity, owner); - if (r) - return r; + struct dma_fence *f; drm_sched_job_arm(&job->base); - - *f = dma_fence_get(&job->base.s_fence->finished); + f = dma_fence_get(&job->base.s_fence->finished); amdgpu_job_free_resources(job); drm_sched_entity_push_job(&job->base); - return 0; + return f; } int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, @@ -238,30 +236,19 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, return 0; } -static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, - struct drm_sched_entity *s_entity) +static struct dma_fence * +amdgpu_job_prepare_job(struct drm_sched_job *sched_job, + struct drm_sched_entity *s_entity) { struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched); struct amdgpu_job *job = to_amdgpu_job(sched_job); - struct amdgpu_vm *vm = job->vm; - struct dma_fence *fence; + struct dma_fence *fence = NULL; int r; - fence = amdgpu_sync_get_fence(&job->sync); - if (fence && drm_sched_dependency_optimized(fence, s_entity)) { - r = amdgpu_sync_fence(&job->sched_sync, fence); - if (r) - DRM_ERROR("Error adding fence (%d)\n", r); - } - - while (fence == NULL && vm && !job->vmid) { - r = amdgpu_vmid_grab(vm, ring, &job->sync, - &job->base.s_fence->finished, - job); + while (!fence && job->vm && !job->vmid) { + r = amdgpu_vmid_grab(job->vm, ring, job, &fence); if (r) DRM_ERROR("Error getting VM ID (%d)\n", r); - - fence = amdgpu_sync_get_fence(&job->sync); } if (!fence && job->gang_submit) @@ -281,8 +268,6 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) job = to_amdgpu_job(sched_job); finished = &job->base.s_fence->finished; - BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL)); - trace_amdgpu_sched_run_job(job); /* Skip job if VRAM is lost and never resubmit gangs */ @@ -341,7 +326,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) } const struct drm_sched_backend_ops amdgpu_sched_ops = { - .dependency = amdgpu_job_dependency, + .prepare_job = amdgpu_job_prepare_job, .run_job = amdgpu_job_run, .timedout_job = amdgpu_job_timedout, .free_job = amdgpu_job_free_cb diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index ab7b150e5d50..a372802ea4e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -47,8 +47,7 @@ enum amdgpu_ib_pool_type; struct amdgpu_job { struct drm_sched_job base; struct amdgpu_vm *vm; - struct amdgpu_sync sync; - struct amdgpu_sync sched_sync; + struct amdgpu_sync explicit_sync; struct dma_fence hw_fence; struct dma_fence *gang_submit; uint32_t preamble_status; @@ -78,18 +77,20 @@ static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job) return to_amdgpu_ring(job->base.entity->rq->sched); } -int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, - struct amdgpu_job **job, struct amdgpu_vm *vm); -int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, - enum amdgpu_ib_pool_type pool, struct amdgpu_job **job); +int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct drm_sched_entity *entity, void *owner, + unsigned int num_ibs, struct amdgpu_job **job); +int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, + struct drm_sched_entity *entity, void *owner, + size_t size, enum amdgpu_ib_pool_type pool_type, + struct amdgpu_job **job); void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, struct amdgpu_bo *gws, struct amdgpu_bo *oa); void amdgpu_job_free_resources(struct amdgpu_job *job); void amdgpu_job_set_gang_leader(struct amdgpu_job *job, struct amdgpu_job *leader); void amdgpu_job_free(struct amdgpu_job *job); -int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, - void *owner, struct dma_fence **f); +struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job); int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, struct dma_fence **fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index 8f472517d181..6f81ed4fb0d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -150,14 +150,15 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle, const unsigned ib_size_dw = 16; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; ib = &job->ibs[0]; - ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0); + ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0, + PACKETJ_TYPE0); ib->ptr[1] = 0xDEADBEEF; for (i = 2; i < 16; i += 2) { ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index d75e0370a074..ba348046fcec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -29,6 +29,7 @@ #include "amdgpu.h" #include <drm/amdgpu_drm.h> #include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> #include "amdgpu_uvd.h" #include "amdgpu_vce.h" #include "atom.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 37322550d750..8a39300b1a84 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -36,7 +36,6 @@ #include <drm/drm_encoder.h> #include <drm/drm_fixed.h> #include <drm/drm_crtc_helper.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_framebuffer.h> #include <drm/drm_probe_helper.h> #include <linux/i2c.h> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 090e66a1b284..bac7976975bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -259,6 +259,14 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, return 0; } +/* Free the entry back to the slab */ +static void amdgpu_sync_entry_free(struct amdgpu_sync_entry *e) +{ + hash_del(&e->node); + dma_fence_put(e->fence); + kmem_cache_free(amdgpu_sync_slab, e); +} + /** * amdgpu_sync_peek_fence - get the next fence not signaled yet * @@ -280,9 +288,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, struct drm_sched_fence *s_fence = to_drm_sched_fence(f); if (dma_fence_is_signaled(f)) { - hash_del(&e->node); - dma_fence_put(f); - kmem_cache_free(amdgpu_sync_slab, e); + amdgpu_sync_entry_free(e); continue; } if (ring && s_fence) { @@ -355,15 +361,42 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone) if (r) return r; } else { - hash_del(&e->node); - dma_fence_put(f); - kmem_cache_free(amdgpu_sync_slab, e); + amdgpu_sync_entry_free(e); } } return 0; } +/** + * amdgpu_sync_push_to_job - push fences into job + * @sync: sync object to get the fences from + * @job: job to push the fences into + * + * Add all unsignaled fences from sync to job. + */ +int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job) +{ + struct amdgpu_sync_entry *e; + struct hlist_node *tmp; + struct dma_fence *f; + int i, r; + + hash_for_each_safe(sync->fences, i, tmp, e, node) { + f = e->fence; + if (dma_fence_is_signaled(f)) { + amdgpu_sync_entry_free(e); + continue; + } + + dma_fence_get(f); + r = drm_sched_job_add_dependency(&job->base, f); + if (r) + return r; + } + return 0; +} + int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr) { struct amdgpu_sync_entry *e; @@ -375,9 +408,7 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr) if (r) return r; - hash_del(&e->node); - dma_fence_put(e->fence); - kmem_cache_free(amdgpu_sync_slab, e); + amdgpu_sync_entry_free(e); } return 0; @@ -396,11 +427,8 @@ void amdgpu_sync_free(struct amdgpu_sync *sync) struct hlist_node *tmp; unsigned int i; - hash_for_each_safe(sync->fences, i, tmp, e, node) { - hash_del(&e->node); - dma_fence_put(e->fence); - kmem_cache_free(amdgpu_sync_slab, e); - } + hash_for_each_safe(sync->fences, i, tmp, e, node) + amdgpu_sync_entry_free(e); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h index 2d5c613cda10..cf1e9e858efd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h @@ -30,6 +30,7 @@ struct dma_fence; struct dma_resv; struct amdgpu_device; struct amdgpu_ring; +struct amdgpu_job; enum amdgpu_sync_mode { AMDGPU_SYNC_ALWAYS, @@ -54,6 +55,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, struct amdgpu_ring *ring); struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone); +int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job); int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr); void amdgpu_sync_free(struct amdgpu_sync *sync); int amdgpu_sync_init(void); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index c8169cea3032..7b5074e776f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -190,7 +190,6 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, struct amdgpu_device *adev = ring->adev; unsigned offset, num_pages, num_dw, num_bytes; uint64_t src_addr, dst_addr; - struct dma_fence *fence; struct amdgpu_job *job; void *cpu_addr; uint64_t flags; @@ -230,7 +229,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE; - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + num_dw * 4 + num_bytes, AMDGPU_IB_POOL_DELAYED, &job); if (r) return r; @@ -270,18 +271,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, } } - r = amdgpu_job_submit(job, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, &fence); - if (r) - goto error_free; - - dma_fence_put(fence); - - return r; - -error_free: - amdgpu_job_free(job); - return r; + dma_fence_put(amdgpu_job_submit(job)); + return 0; } /** @@ -1395,7 +1386,8 @@ static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos, } static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, - unsigned long offset, void *buf, int len, int write) + unsigned long offset, void *buf, + int len, int write) { struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); @@ -1419,26 +1411,27 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, memcpy(adev->mman.sdma_access_ptr, buf, len); num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, &job); + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + num_dw * 4, AMDGPU_IB_POOL_DELAYED, + &job); if (r) goto out; amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm); - src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start; + src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + + src_mm.start; dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo); if (write) swap(src_addr, dst_addr); - amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false); + amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, + PAGE_SIZE, false); amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > num_dw); - r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence); - if (r) { - amdgpu_job_free(job); - goto out; - } + fence = amdgpu_job_submit(job); if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout)) r = -ETIMEDOUT; @@ -1988,7 +1981,9 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev, AMDGPU_IB_POOL_DELAYED; int r; - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, job); + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + num_dw * 4, pool, job); if (r) return r; @@ -1998,17 +1993,11 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev, adev->gart.bo); (*job)->vm_needs_flush = true; } - if (resv) { - r = amdgpu_sync_resv(adev, &(*job)->sync, resv, - AMDGPU_SYNC_ALWAYS, - AMDGPU_FENCE_OWNER_UNDEFINED); - if (r) { - DRM_ERROR("sync failed (%d).\n", r); - amdgpu_job_free(*job); - return r; - } - } - return 0; + if (!resv) + return 0; + + return drm_sched_job_add_resv_dependencies(&(*job)->base, resv, + DMA_RESV_USAGE_BOOKKEEP); } int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, @@ -2053,8 +2042,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, if (direct_submit) r = amdgpu_job_submit_direct(job, ring, fence); else - r = amdgpu_job_submit(job, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, fence); + *fence = amdgpu_job_submit(job); if (r) goto error_free; @@ -2099,16 +2087,8 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data, amdgpu_ring_pad_ib(ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > num_dw); - r = amdgpu_job_submit(job, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, fence); - if (r) - goto error_free; - + *fence = amdgpu_job_submit(job); return 0; - -error_free: - amdgpu_job_free(job); - return r; } int amdgpu_fill_buffer(struct amdgpu_bo *bo, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 6eac649499d3..e00bb654e24b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1132,7 +1132,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, unsigned offset_idx = 0; unsigned offset[3] = { UVD_BASE_SI, 0, 0 }; - r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT : + r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + 64, direct ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_DELAYED, &job); if (r) return r; @@ -1175,16 +1177,13 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, if (r) goto err_free; } else { - r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv, - AMDGPU_SYNC_ALWAYS, - AMDGPU_FENCE_OWNER_UNDEFINED); + r = drm_sched_job_add_resv_dependencies(&job->base, + bo->tbo.base.resv, + DMA_RESV_USAGE_KERNEL); if (r) goto err_free; - r = amdgpu_job_submit(job, &adev->uvd.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, &f); - if (r) - goto err_free; + f = amdgpu_job_submit(job); } amdgpu_bo_reserve(bo, true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 02cb3a12dd76..b239e874f2d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -450,8 +450,10 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, + &job); if (r) return r; @@ -538,7 +540,9 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, struct dma_fence *f = NULL; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + ib_size_dw * 4, direct ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_DELAYED, &job); if (r) @@ -570,8 +574,7 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, if (direct) r = amdgpu_job_submit_direct(job, ring, &f); else - r = amdgpu_job_submit(job, &ring->adev->vce.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, &f); + f = amdgpu_job_submit(job); if (r) goto err; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index c448c1bdf84d..33f3415096f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -600,15 +600,16 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_ib *ib_msg, struct dma_fence **fence) { + u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); struct amdgpu_device *adev = ring->adev; struct dma_fence *f = NULL; struct amdgpu_job *job; struct amdgpu_ib *ib; - uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); int i, r; - r = amdgpu_job_alloc_with_ib(adev, 64, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, + 64, AMDGPU_IB_POOL_DIRECT, + &job); if (r) goto err; @@ -787,8 +788,9 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, if (sq) ib_size_dw += 8; - r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, + ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, + &job); if (r) goto err; @@ -916,8 +918,9 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand if (sq) ib_size_dw += 8; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, + ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, + &job); if (r) return r; @@ -982,8 +985,9 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han if (sq) ib_size_dw += 8; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, + ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, + &job); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 69e105fa41f6..59cf64216fbb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -47,6 +47,32 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table) return r; } +/* Allocate a new job for @count PTE updates */ +static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p, + unsigned int count) +{ + enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE + : AMDGPU_IB_POOL_DELAYED; + struct drm_sched_entity *entity = p->immediate ? &p->vm->immediate + : &p->vm->delayed; + unsigned int ndw; + int r; + + /* estimate how many dw we need */ + ndw = AMDGPU_VM_SDMA_MIN_NUM_DW; + if (p->pages_addr) + ndw += count * 2; + ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW); + + r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM, + ndw * 4, pool, &p->job); + if (r) + return r; + + p->num_dw_left = ndw; + return 0; +} + /** * amdgpu_vm_sdma_prepare - prepare SDMA command submission * @@ -61,21 +87,22 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, struct dma_resv *resv, enum amdgpu_sync_mode sync_mode) { - enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE - : AMDGPU_IB_POOL_DELAYED; - unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW; + struct amdgpu_sync sync; int r; - r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, &p->job); + r = amdgpu_vm_sdma_alloc_job(p, 0); if (r) return r; - p->num_dw_left = ndw; - if (!resv) return 0; - return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm); + amdgpu_sync_create(&sync); + r = amdgpu_sync_resv(p->adev, &sync, resv, sync_mode, p->vm); + if (!r) + r = amdgpu_sync_push_to_job(&sync, p->job); + amdgpu_sync_free(&sync); + return r; } /** @@ -91,20 +118,16 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, struct dma_fence **fence) { struct amdgpu_ib *ib = p->job->ibs; - struct drm_sched_entity *entity; struct amdgpu_ring *ring; struct dma_fence *f; - int r; - entity = p->immediate ? &p->vm->immediate : &p->vm->delayed; - ring = container_of(entity->rq->sched, struct amdgpu_ring, sched); + ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring, + sched); WARN_ON(ib->length_dw == 0); amdgpu_ring_pad_ib(ring, ib); WARN_ON(ib->length_dw > p->num_dw_left); - r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f); - if (r) - goto error; + f = amdgpu_job_submit(p->job); if (p->unlocked) { struct dma_fence *tmp = dma_fence_get(f); @@ -127,10 +150,6 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, } dma_fence_put(f); return 0; - -error: - amdgpu_job_free(p->job); - return r; } /** @@ -210,8 +229,6 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, uint64_t flags) { struct amdgpu_bo *bo = &vmbo->bo; - enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE - : AMDGPU_IB_POOL_DELAYED; struct dma_resv_iter cursor; unsigned int i, ndw, nptes; struct dma_fence *fence; @@ -221,7 +238,7 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, /* Wait for PD/PT moves to be completed */ dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL); dma_resv_for_each_fence_unlocked(&cursor, fence) { - r = amdgpu_sync_fence(&p->job->sync, fence); + r = drm_sched_job_add_dependency(&p->job->base, fence); if (r) { dma_resv_iter_end(&cursor); return r; @@ -238,19 +255,9 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, if (r) return r; - /* estimate how many dw we need */ - ndw = 32; - if (p->pages_addr) - ndw += count * 2; - ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW); - ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW); - - r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, - &p->job); + r = amdgpu_vm_sdma_alloc_job(p, count); if (r) return r; - - p->num_dw_left = ndw; } if (!p->pages_addr) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index a2452fc304c5..248f1a4e915f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -21,6 +21,7 @@ * */ +#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 6ac680c4c6e2..cd9c19060d89 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -21,6 +21,7 @@ * */ +#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 354ae09cc9a2..76323deecc58 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -23,6 +23,7 @@ #include <linux/pci.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 33977b0ba19d..01cf3ab111cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -21,6 +21,7 @@ * */ +#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index a83efdc8aa0c..21e46817d82d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -371,7 +371,9 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, * translation. Avoid this by doing the invalidation from the SDMA * itself. */ - r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE, + r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + 16 * 4, AMDGPU_IB_POOL_IMMEDIATE, &job); if (r) goto error_alloc; @@ -380,10 +382,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, job->vm_needs_flush = true; job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop; amdgpu_ring_pad_ib(ring, &job->ibs[0]); - r = amdgpu_job_submit(job, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, &fence); - if (r) - goto error_submit; + fence = amdgpu_job_submit(job); mutex_unlock(&adev->mman.gtt_window_lock); @@ -392,9 +391,6 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, return; -error_submit: - amdgpu_job_free(job); - error_alloc: mutex_unlock(&adev->mman.gtt_window_lock); DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 375c440957dc..5fe872f4bea7 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -216,8 +216,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; @@ -280,8 +280,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index e668b3baa8c6..e407be6cb63c 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -213,7 +213,7 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) * * Open up a stream for HW test */ -static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, +static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle, struct amdgpu_bo *bo, struct dma_fence **fence) { @@ -224,8 +224,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; @@ -276,7 +276,7 @@ err: * * Close up a stream for HW test or if userspace failed to do so */ -static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, +static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle, struct amdgpu_bo *bo, struct dma_fence **fence) { @@ -287,8 +287,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 77227761e669..10048ce16aea 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -64,8 +64,11 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); num_bytes = npages * 8; - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, - AMDGPU_IB_POOL_DELAYED, &job); + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + num_dw * 4 + num_bytes, + AMDGPU_IB_POOL_DELAYED, + &job); if (r) return r; @@ -88,18 +91,10 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, cpu_addr = &job->ibs[0].ptr[num_dw]; amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr); - r = amdgpu_job_submit(job, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, &fence); - if (r) - goto error_free; - + fence = amdgpu_job_submit(job); dma_fence_put(fence); return r; - -error_free: - amdgpu_job_free(job); - return r; } /** diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index ef4f788edabe..a8772082883e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -82,7 +82,6 @@ #include <drm/drm_atomic_uapi.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_edid.h> #include <drm/drm_vblank.h> @@ -2820,7 +2819,6 @@ const struct amdgpu_ip_block_version dm_ip_block = static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { .fb_create = amdgpu_display_user_framebuffer_create, .get_format_info = amd_get_format_info, - .output_poll_changed = drm_fb_helper_output_poll_changed, .atomic_check = amdgpu_dm_atomic_check, .atomic_commit = drm_atomic_helper_commit, }; |