summaryrefslogtreecommitdiff
path: root/include/drm/gpu_scheduler.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/drm/gpu_scheduler.h')
-rw-r--r--include/drm/gpu_scheduler.h56
1 files changed, 34 insertions, 22 deletions
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 9c437a057e5d..95e17504e46a 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -33,11 +33,11 @@
#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
/**
- * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining
+ * DRM_SCHED_FENCE_DONT_PIPELINE - Prevent dependency pipelining
*
* Setting this flag on a scheduler fence prevents pipelining of jobs depending
* on this fence. In other words we always insert a full CPU round trip before
- * dependen jobs are pushed to the hw queue.
+ * dependent jobs are pushed to the hw queue.
*/
#define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS
@@ -71,7 +71,7 @@ enum drm_sched_priority {
DRM_SCHED_PRIORITY_COUNT
};
-/* Used to chose between FIFO and RR jobs scheduling */
+/* Used to choose between FIFO and RR job-scheduling */
extern int drm_sched_policy;
#define DRM_SCHED_POLICY_RR 0
@@ -97,13 +97,21 @@ struct drm_sched_entity {
struct list_head list;
/**
+ * @lock:
+ *
+ * Lock protecting the run-queue (@rq) to which this entity belongs,
+ * @priority and the list of schedulers (@sched_list, @num_sched_list).
+ */
+ spinlock_t lock;
+
+ /**
* @rq:
*
* Runqueue on which this entity is currently scheduled.
*
* FIXME: Locking is very unclear for this. Writers are protected by
- * @rq_lock, but readers are generally lockless and seem to just race
- * with not even a READ_ONCE.
+ * @lock, but readers are generally lockless and seem to just race with
+ * not even a READ_ONCE.
*/
struct drm_sched_rq *rq;
@@ -136,18 +144,11 @@ struct drm_sched_entity {
* @priority:
*
* Priority of the entity. This can be modified by calling
- * drm_sched_entity_set_priority(). Protected by &rq_lock.
+ * drm_sched_entity_set_priority(). Protected by @lock.
*/
enum drm_sched_priority priority;
/**
- * @rq_lock:
- *
- * Lock to modify the runqueue to which this entity belongs.
- */
- spinlock_t rq_lock;
-
- /**
* @job_queue: the list of jobs of this entity.
*/
struct spsc_queue job_queue;
@@ -198,7 +199,7 @@ struct drm_sched_entity {
*
* Points to the finished fence of the last scheduled job. Only written
* by the scheduler thread, can be accessed locklessly from
- * drm_sched_job_arm() iff the queue is empty.
+ * drm_sched_job_arm() if the queue is empty.
*/
struct dma_fence __rcu *last_scheduled;
@@ -243,21 +244,23 @@ struct drm_sched_entity {
/**
* struct drm_sched_rq - queue of entities to be scheduled.
*
- * @lock: to modify the entities list.
* @sched: the scheduler to which this rq belongs to.
- * @entities: list of the entities to be scheduled.
+ * @lock: protects @entities, @rb_tree_root and @current_entity.
* @current_entity: the entity which is to be scheduled.
- * @rb_tree_root: root of time based priory queue of entities for FIFO scheduling
+ * @entities: list of the entities to be scheduled.
+ * @rb_tree_root: root of time based priority queue of entities for FIFO scheduling
*
* Run queue is a set of entities scheduling command submissions for
* one specific ring. It implements the scheduling policy that selects
* the next entity to emit commands from.
*/
struct drm_sched_rq {
- spinlock_t lock;
struct drm_gpu_scheduler *sched;
- struct list_head entities;
+
+ spinlock_t lock;
+ /* Following members are protected by the @lock: */
struct drm_sched_entity *current_entity;
+ struct list_head entities;
struct rb_root_cached rb_tree_root;
};
@@ -321,7 +324,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* @s_fence: contains the fences for the scheduling of job.
* @finish_cb: the callback for the finished fence.
* @credits: the number of credits this job contributes to the scheduler
- * @work: Helper to reschdeule job kill to different context.
+ * @work: Helper to reschedule job kill to different context.
* @id: a unique id assigned to each job scheduled on the scheduler.
* @karma: increment on every hang caused by this job. If this exceeds the hang
* limit of the scheduler then the job is marked guilty and will not
@@ -337,6 +340,14 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
struct drm_sched_job {
struct spsc_node queue_node;
struct list_head list;
+
+ /**
+ * @sched:
+ *
+ * The scheduler this job is or will be scheduled on. Gets set by
+ * drm_sched_job_arm(). Valid until drm_sched_backend_ops.free_job()
+ * has finished.
+ */
struct drm_gpu_scheduler *sched;
struct drm_sched_fence *s_fence;
@@ -579,7 +590,7 @@ bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
-void drm_sched_start(struct drm_gpu_scheduler *sched);
+void drm_sched_start(struct drm_gpu_scheduler *sched, int errno);
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
void drm_sched_increase_karma(struct drm_sched_job *bad);
void drm_sched_reset_karma(struct drm_sched_job *bad);
@@ -593,7 +604,8 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
struct drm_sched_entity *entity);
-void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts);
+void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
+ struct drm_sched_rq *rq, ktime_t ts);
int drm_sched_entity_init(struct drm_sched_entity *entity,
enum drm_sched_priority priority,