diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2023-11-20 09:50:08 +0100 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2023-11-20 09:50:09 +0100 |
commit | c79b972eb88b077d2765e7790d0902b3dc94d55c (patch) | |
tree | 1ba476c2339679189ed5fdd8b7afe9bb5792605a /include/drm/gpu_scheduler.h | |
parent | 98b1cc82c4affc16f5598d4fa14b1858671b2263 (diff) | |
parent | 3b434a3445fff3149128db0169da864d67057325 (diff) |
Merge tag 'drm-misc-next-2023-11-17' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 6.8:
UAPI Changes:
- drm: Introduce CLOSE_FB ioctl
- drm/dp-mst: Documentation for the PATH property
- fdinfo: Do not align to a MB if the size is larger than 1MiB
- virtio-gpu: add explicit virtgpu context debug name
Cross-subsystem Changes:
- dma-buf: Add dma_fence_timestamp helper
Core Changes:
- client: Do not acquire module reference
- edid: split out drm_eld, add SAD helpers
- format-helper: Cache format conversion buffers
- sched: Move from a kthread to a workqueue, rename some internal
functions to make it clearer, implement dynamic job-flow control
- gpuvm: Provide more features to handle GEM objects
- tests: Remove slow kunit tests
Driver Changes:
- ivpu: Update FW API, new debugfs file, a new NOP job submission test
mode, improve suspend/resume, PM improvements, MMU PT optimizations,
firmware profiling frequency support, support for uncached buffers,
switch to gem shmem helpers, replace kthread with threaded
interrupts
- panfrost: PM improvements
- qaic: Allow to run with a single MSI, support host/device time
synchronization, misc improvements
- simplefb: Support memory-regions, support power-domains
- ssd130x: Unitialized variable fixes
- omapdrm: dma-fence lockdep annotation fix
- tidss: dma-fence lockdep annotation fix
- v3d: Support BCM2712 (RaspberryPi5), Support fdinfo and gputop
- panel:
- edp: Support AUO B116XTN02, BOE NT116WHM-N21,836X2, NV116WHM-N49
V8.0, plus a whole bunch of panels used on Mediatek chromebooks.
Note that the one missing s-o-b for 0da611a87021 ("dma-buf: add
dma_fence_timestamp helper") has been supplied here, and rebasing the
entire tree with upsetting committers didn't seem worth the trouble:
https://lore.kernel.org/dri-devel/ce94020e-a7d4-4799-b87d-fbea7b14a268@gmail.com/
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
From: Maxime Ripard <mripard@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/y4awn5vcfy2lr2hpauo7rc4nfpnc6kksr7btmnwaz7zk63pwoi@gwwef5iqpzva
Diffstat (limited to 'include/drm/gpu_scheduler.h')
-rw-r--r-- | include/drm/gpu_scheduler.h | 50 |
1 files changed, 38 insertions, 12 deletions
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index d2fb81e34174..9a50348bd5c0 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -320,6 +320,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); * @sched: the scheduler instance on which this job is scheduled. * @s_fence: contains the fences for the scheduling of job. * @finish_cb: the callback for the finished fence. + * @credits: the number of credits this job contributes to the scheduler * @work: Helper to reschdeule job kill to different context. * @id: a unique id assigned to each job scheduled on the scheduler. * @karma: increment on every hang caused by this job. If this exceeds the hang @@ -339,6 +340,8 @@ struct drm_sched_job { struct drm_gpu_scheduler *sched; struct drm_sched_fence *s_fence; + u32 credits; + /* * work is used only after finish_cb has been used and will not be * accessed anymore. @@ -462,29 +465,42 @@ struct drm_sched_backend_ops { * and it's time to clean it up. */ void (*free_job)(struct drm_sched_job *sched_job); + + /** + * @update_job_credits: Called when the scheduler is considering this + * job for execution. + * + * This callback returns the number of credits the job would take if + * pushed to the hardware. Drivers may use this to dynamically update + * the job's credit count. For instance, deduct the number of credits + * for already signalled native fences. + * + * This callback is optional. + */ + u32 (*update_job_credits)(struct drm_sched_job *sched_job); }; /** * struct drm_gpu_scheduler - scheduler instance-specific data * * @ops: backend operations provided by the driver. - * @hw_submission_limit: the max size of the hardware queue. + * @credit_limit: the credit limit of this scheduler + * @credit_count: the current credit count of this scheduler * @timeout: the time after which a job is removed from the scheduler. * @name: name of the ring for which this scheduler is being used. * @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT, * as there's usually one run-queue per priority, but could be less. * @sched_rq: An allocated array of run-queues of size @num_rqs; - * @wake_up_worker: the wait queue on which the scheduler sleeps until a job - * is ready to be scheduled. * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler * waits on this wait queue until all the scheduled jobs are * finished. - * @hw_rq_count: the number of jobs currently in the hardware queue. * @job_id_count: used to assign unique id to the each job. + * @submit_wq: workqueue used to queue @work_run_job and @work_free_job * @timeout_wq: workqueue used to queue @work_tdr + * @work_run_job: work which calls run_job op of each scheduler. + * @work_free_job: work which calls free_job op of each scheduler. * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the * timeout interval is over. - * @thread: the kthread on which the scheduler which run. * @pending_list: the list of jobs which are currently in the job queue. * @job_list_lock: lock to protect the pending_list. * @hang_limit: once the hangs by a job crosses this limit then it is marked @@ -493,24 +509,27 @@ struct drm_sched_backend_ops { * @_score: score used when the driver doesn't provide one * @ready: marks if the underlying HW is ready to work * @free_guilty: A hit to time out handler to free the guilty job. + * @pause_submit: pause queuing of @work_run_job on @submit_wq + * @own_submit_wq: scheduler owns allocation of @submit_wq * @dev: system &struct device * * One scheduler is implemented for each hardware ring. */ struct drm_gpu_scheduler { const struct drm_sched_backend_ops *ops; - uint32_t hw_submission_limit; + u32 credit_limit; + atomic_t credit_count; long timeout; const char *name; u32 num_rqs; struct drm_sched_rq **sched_rq; - wait_queue_head_t wake_up_worker; wait_queue_head_t job_scheduled; - atomic_t hw_rq_count; atomic64_t job_id_count; + struct workqueue_struct *submit_wq; struct workqueue_struct *timeout_wq; + struct work_struct work_run_job; + struct work_struct work_free_job; struct delayed_work work_tdr; - struct task_struct *thread; struct list_head pending_list; spinlock_t job_list_lock; int hang_limit; @@ -518,19 +537,22 @@ struct drm_gpu_scheduler { atomic_t _score; bool ready; bool free_guilty; + bool pause_submit; + bool own_submit_wq; struct device *dev; }; int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_backend_ops *ops, - u32 num_rqs, uint32_t hw_submission, unsigned int hang_limit, + struct workqueue_struct *submit_wq, + u32 num_rqs, u32 credit_limit, unsigned int hang_limit, long timeout, struct workqueue_struct *timeout_wq, atomic_t *score, const char *name, struct device *dev); void drm_sched_fini(struct drm_gpu_scheduler *sched); int drm_sched_job_init(struct drm_sched_job *job, struct drm_sched_entity *entity, - void *owner); + u32 credits, void *owner); void drm_sched_job_arm(struct drm_sched_job *job); int drm_sched_job_add_dependency(struct drm_sched_job *job, struct dma_fence *fence); @@ -550,8 +572,12 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, struct drm_gpu_scheduler **sched_list, unsigned int num_sched_list); +void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched); void drm_sched_job_cleanup(struct drm_sched_job *job); -void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched); +void drm_sched_wakeup(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity); +bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched); +void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched); +void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched); void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); |