diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-02-21 14:44:44 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-02-21 14:44:44 -0800 |
commit | d99676af540c2dc829999928fb81c58c80a1dce4 (patch) | |
tree | a78602eb6fa5d46d867c00ee187179ced6c18766 /drivers/gpu/drm/scheduler/sched_main.c | |
parent | 10e2ec8edece2566b40f69bae035a555ece71ab4 (diff) | |
parent | f730f39eb981af249d57336b47cfe3925632a7fd (diff) |
Merge tag 'drm-next-2021-02-19' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie:
"A pretty normal tree, lots of refactoring across the board, ttm, i915,
nouveau, and bunch of features in various drivers.
docs:
- lots of updated docs
core:
- require crtc to have unique primary plane
- fourcc macro fix
- PCI bar quirk for bar resizing
- don't sent hotplug on error
- move vm code to legacy
- nuke hose only used on old oboslete alpha
dma-buf:
- kernel doc updates
- improved lock tracking
dp/hdmi:
- DP-HDMI2.1 protocol converter support
ttm:
- bo size handling cleanup
- release a pinned bo warning
- cleanup lru handler
- avoid using pages with drm_prime_sg_to_page_addr_arrays
cma-helper:
- prime/mmap fixes
bridge:
- add DP support
gma500:
- remove gma3600 support
i915:
- try eDP fast/narrow link again with fallback
- Intel eDP backlight control
- replace display register read/write macros
- refactor intel_display.c
- display power improvements
- HPD code cleanup
- Rocketlake display fixes
- Power/backlight/RPM fixes
- DG1 display fix
- IVB/BYT clear residuals security fix again
- make i915 mitigations options via parameter
- HSW GT1 GPU hangs fixes
- DG1 workaround hang fixes
- TGL DMAR hang avoidance
- Lots of GT fixes
- follow on fixes for residuals clear
- gen7 per-engine-reset support
- HDCP2.2 + HDCP1.4 GEN12 DP MST support
- TGL clear color support
- backlight refactoring
- VRR/Adaptive sync enabling on DP/EDP for TGL+
- async flips for all ilk+
amdgpu:
- rework IH ring handling (Vega/Navi)
- rework HDP handling (Vega/Navi)
- swSMU updates for renoir/vangogh
- Sienna Cichild overdrive support
- FP16 on DCE8-11 support
- GPU reset on navy flounder/vangogh
- SMU profile fixes for APU
- SR-IOV fixes
- Vangogh SMU fixes
- fan speed control fixes
amdkfd:
- config handling fix
- buffer free fix
- recursive lock warnings fix
nouveau:
- Turing MMU fault recovery fixes
- mDP connectors reporting fix
- audio locking fixes
- rework engines/instances code to support new scheme
tegra:
- VIC newer firmware support
- display/gr2d fixes for older tegra
- pm reference leak fix
mediatek:
- SOC MT8183 support
- decouple sub driver + share mtk mutex driver
radeon:
- PCI resource fix for some platforms
ingenic:
- pm support
- 8-bit delta RGB panels
vmwgfx:
- managed driver helpers
vc4:
- BCM2711 DSI1 support
- converted to atomic helpers
- enable 10/12 bpc outputs
- gem prime mmap helpers
- CEC fix
omap:
- use degamma table
- CTM support
- rework DSI support
imx:
- stack usage fixes
- drm managed support
- imx-tve clock provider leak fix
-
rcar-du:
- default mode fixes
- conversion to managed API
hisilicon:
- use simple encoder
vkms:
- writeback connector support
d3:
- BT2020 support"
* tag 'drm-next-2021-02-19' of git://anongit.freedesktop.org/drm/drm: (1459 commits)
drm/amdgpu: Set reference clock to 100Mhz on Renoir (v2)
drm/radeon: OLAND boards don't have VCE
drm/amdkfd: Fix recursive lock warnings
drm/amd/display: Add FPU wrappers to dcn21_validate_bandwidth()
drm/amd/display: Fix potential integer overflow
drm/amdgpu/display: remove hdcp_srm sysfs on device removal
drm/amdgpu: fix CGTS_TCC_DISABLE register offset on gfx10.3
drm/i915/gt: Correct surface base address for renderclear
drm/i915: Disallow plane x+w>stride on ilk+ with X-tiling
drm/nouveau/top/ga100: initial support
drm/nouveau/top: add ioctrl/nvjpg
drm/nouveau/privring: rename from ibus
drm/nouveau/nvkm: remove nvkm_subdev.index
drm/nouveau/nvkm: determine subdev id/order from layout
drm/nouveau/vic: switch to instanced constructor
drm/nouveau/sw: switch to instanced constructor
drm/nouveau/sec2: switch to instanced constructor
drm/nouveau/sec: switch to instanced constructor
drm/nouveau/pm: switch to instanced constructor
drm/nouveau/nvenc: switch to instanced constructor
...
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_main.c')
-rw-r--r-- | drivers/gpu/drm/scheduler/sched_main.c | 123 |
1 files changed, 67 insertions, 56 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index b498d474ef9e..92637b70c9bf 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -60,8 +60,6 @@ #define to_drm_sched_job(sched_job) \ container_of((sched_job), struct drm_sched_job, queue_node) -static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); - /** * drm_sched_rq_init - initialize a given run queue struct * @@ -164,6 +162,40 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq) } /** + * drm_sched_job_done - complete a job + * @s_job: pointer to the job which is done + * + * Finish the job's fence and wake up the worker thread. + */ +static void drm_sched_job_done(struct drm_sched_job *s_job) +{ + struct drm_sched_fence *s_fence = s_job->s_fence; + struct drm_gpu_scheduler *sched = s_fence->sched; + + atomic_dec(&sched->hw_rq_count); + atomic_dec(&sched->score); + + trace_drm_sched_process_job(s_fence); + + dma_fence_get(&s_fence->finished); + drm_sched_fence_finished(s_fence); + dma_fence_put(&s_fence->finished); + wake_up_interruptible(&sched->wake_up_worker); +} + +/** + * drm_sched_job_done_cb - the callback for a done job + * @f: fence + * @cb: fence callbacks + */ +static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb) +{ + struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb); + + drm_sched_job_done(s_job); +} + +/** * drm_sched_dependency_optimized * * @fence: the dependency fence @@ -199,7 +231,7 @@ EXPORT_SYMBOL(drm_sched_dependency_optimized); static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) { if (sched->timeout != MAX_SCHEDULE_TIMEOUT && - !list_empty(&sched->ring_mirror_list)) + !list_empty(&sched->pending_list)) schedule_delayed_work(&sched->work_tdr, sched->timeout); } @@ -259,7 +291,7 @@ void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, { spin_lock(&sched->job_list_lock); - if (list_empty(&sched->ring_mirror_list)) + if (list_empty(&sched->pending_list)) cancel_delayed_work(&sched->work_tdr); else mod_delayed_work(system_wq, &sched->work_tdr, remaining); @@ -273,7 +305,7 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job) struct drm_gpu_scheduler *sched = s_job->sched; spin_lock(&sched->job_list_lock); - list_add_tail(&s_job->node, &sched->ring_mirror_list); + list_add_tail(&s_job->list, &sched->pending_list); drm_sched_start_timeout(sched); spin_unlock(&sched->job_list_lock); } @@ -287,8 +319,8 @@ static void drm_sched_job_timedout(struct work_struct *work) /* Protects against concurrent deletion in drm_sched_get_cleanup_job */ spin_lock(&sched->job_list_lock); - job = list_first_entry_or_null(&sched->ring_mirror_list, - struct drm_sched_job, node); + job = list_first_entry_or_null(&sched->pending_list, + struct drm_sched_job, list); if (job) { /* @@ -296,7 +328,7 @@ static void drm_sched_job_timedout(struct work_struct *work) * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread * is parked at which point it's safe. */ - list_del_init(&job->node); + list_del_init(&job->list); spin_unlock(&sched->job_list_lock); job->sched->ops->timedout_job(job); @@ -372,7 +404,7 @@ EXPORT_SYMBOL(drm_sched_increase_karma); * Stop the scheduler and also removes and frees all completed jobs. * Note: bad job will not be freed as it might be used later and so it's * callers responsibility to release it manually if it's not part of the - * mirror list any more. + * pending list any more. * */ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) @@ -393,26 +425,27 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) * Add at the head of the queue to reflect it was the earliest * job extracted. */ - list_add(&bad->node, &sched->ring_mirror_list); + list_add(&bad->list, &sched->pending_list); /* * Iterate the job list from later to earlier one and either deactive - * their HW callbacks or remove them from mirror list if they already + * their HW callbacks or remove them from pending list if they already * signaled. * This iteration is thread safe as sched thread is stopped. */ - list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) { + list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list, + list) { if (s_job->s_fence->parent && dma_fence_remove_callback(s_job->s_fence->parent, &s_job->cb)) { atomic_dec(&sched->hw_rq_count); } else { /* - * remove job from ring_mirror_list. + * remove job from pending_list. * Locking here is for concurrent resume timeout */ spin_lock(&sched->job_list_lock); - list_del_init(&s_job->node); + list_del_init(&s_job->list); spin_unlock(&sched->job_list_lock); /* @@ -463,7 +496,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) * so no new jobs are being inserted or removed. Also concurrent * GPU recovers can't run in parallel. */ - list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { + list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { struct dma_fence *fence = s_job->s_fence->parent; atomic_inc(&sched->hw_rq_count); @@ -473,14 +506,14 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) if (fence) { r = dma_fence_add_callback(fence, &s_job->cb, - drm_sched_process_job); + drm_sched_job_done_cb); if (r == -ENOENT) - drm_sched_process_job(fence, &s_job->cb); + drm_sched_job_done(s_job); else if (r) DRM_ERROR("fence add callback failed (%d)\n", r); } else - drm_sched_process_job(NULL, &s_job->cb); + drm_sched_job_done(s_job); } if (full_recovery) { @@ -494,7 +527,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) EXPORT_SYMBOL(drm_sched_start); /** - * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list + * drm_sched_resubmit_jobs - helper to relunch job from pending ring list * * @sched: scheduler instance * @@ -506,7 +539,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) bool found_guilty = false; struct dma_fence *fence; - list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { + list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { struct drm_sched_fence *s_fence = s_job->s_fence; if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { @@ -566,7 +599,7 @@ int drm_sched_job_init(struct drm_sched_job *job, return -ENOMEM; job->id = atomic64_inc_return(&sched->job_id_count); - INIT_LIST_HEAD(&job->node); + INIT_LIST_HEAD(&job->list); return 0; } @@ -636,36 +669,11 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched) } /** - * drm_sched_process_job - process a job - * - * @f: fence - * @cb: fence callbacks - * - * Called after job has finished execution. - */ -static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) -{ - struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb); - struct drm_sched_fence *s_fence = s_job->s_fence; - struct drm_gpu_scheduler *sched = s_fence->sched; - - atomic_dec(&sched->hw_rq_count); - atomic_dec(&sched->score); - - trace_drm_sched_process_job(s_fence); - - dma_fence_get(&s_fence->finished); - drm_sched_fence_finished(s_fence); - dma_fence_put(&s_fence->finished); - wake_up_interruptible(&sched->wake_up_worker); -} - -/** * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed * * @sched: scheduler instance * - * Returns the next finished job from the mirror list (if there is one) + * Returns the next finished job from the pending list (if there is one) * ready for it to be destroyed. */ static struct drm_sched_job * @@ -675,7 +683,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) /* * Don't destroy jobs while the timeout worker is running OR thread - * is being parked and hence assumed to not touch ring_mirror_list + * is being parked and hence assumed to not touch pending_list */ if ((sched->timeout != MAX_SCHEDULE_TIMEOUT && !cancel_delayed_work(&sched->work_tdr)) || @@ -684,12 +692,12 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) spin_lock(&sched->job_list_lock); - job = list_first_entry_or_null(&sched->ring_mirror_list, - struct drm_sched_job, node); + job = list_first_entry_or_null(&sched->pending_list, + struct drm_sched_job, list); if (job && dma_fence_is_signaled(&job->s_fence->finished)) { - /* remove job from ring_mirror_list */ - list_del_init(&job->node); + /* remove job from pending_list */ + list_del_init(&job->list); } else { job = NULL; /* queue timeout for next job */ @@ -809,9 +817,9 @@ static int drm_sched_main(void *param) if (!IS_ERR_OR_NULL(fence)) { s_fence->parent = dma_fence_get(fence); r = dma_fence_add_callback(fence, &sched_job->cb, - drm_sched_process_job); + drm_sched_job_done_cb); if (r == -ENOENT) - drm_sched_process_job(fence, &sched_job->cb); + drm_sched_job_done(sched_job); else if (r) DRM_ERROR("fence add callback failed (%d)\n", r); @@ -820,7 +828,7 @@ static int drm_sched_main(void *param) if (IS_ERR(fence)) dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); - drm_sched_process_job(NULL, &sched_job->cb); + drm_sched_job_done(sched_job); } wake_up(&sched->job_scheduled); @@ -858,7 +866,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, init_waitqueue_head(&sched->wake_up_worker); init_waitqueue_head(&sched->job_scheduled); - INIT_LIST_HEAD(&sched->ring_mirror_list); + INIT_LIST_HEAD(&sched->pending_list); spin_lock_init(&sched->job_list_lock); atomic_set(&sched->hw_rq_count, 0); INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); @@ -891,6 +899,9 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) if (sched->thread) kthread_stop(sched->thread); + /* Confirm no work left behind accessing device structures */ + cancel_delayed_work_sync(&sched->work_tdr); + sched->ready = false; } EXPORT_SYMBOL(drm_sched_fini); |