diff options
Diffstat (limited to 'drivers/gpu/drm/amd/scheduler')
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h | 82 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 744 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 186 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/sched_fence.c | 173 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/spsc_queue.h | 121 |
5 files changed, 0 insertions, 1306 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h deleted file mode 100644 index b42a78922505..000000000000 --- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) -#define _GPU_SCHED_TRACE_H_ - -#include <linux/stringify.h> -#include <linux/types.h> -#include <linux/tracepoint.h> - -#include <drm/drmP.h> - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM gpu_sched -#define TRACE_INCLUDE_FILE gpu_sched_trace - -TRACE_EVENT(amd_sched_job, - TP_PROTO(struct amd_sched_job *sched_job, struct amd_sched_entity *entity), - TP_ARGS(sched_job, entity), - TP_STRUCT__entry( - __field(struct amd_sched_entity *, entity) - __field(struct dma_fence *, fence) - __field(const char *, name) - __field(uint64_t, id) - __field(u32, job_count) - __field(int, hw_job_count) - ), - - TP_fast_assign( - __entry->entity = entity; - __entry->id = sched_job->id; - __entry->fence = &sched_job->s_fence->finished; - __entry->name = sched_job->sched->name; - __entry->job_count = spsc_queue_count(&entity->job_queue); - __entry->hw_job_count = atomic_read( - &sched_job->sched->hw_rq_count); - ), - TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d", - __entry->entity, __entry->id, - __entry->fence, __entry->name, - __entry->job_count, __entry->hw_job_count) -); - -TRACE_EVENT(amd_sched_process_job, - TP_PROTO(struct amd_sched_fence *fence), - TP_ARGS(fence), - TP_STRUCT__entry( - __field(struct dma_fence *, fence) - ), - - TP_fast_assign( - __entry->fence = &fence->finished; - ), - TP_printk("fence=%p signaled", __entry->fence) -); - -#endif - -/* This part must be outside protection */ -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . -#include <trace/define_trace.h> diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c deleted file mode 100644 index dcb987e6d94a..000000000000 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ /dev/null @@ -1,744 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * - */ -#include <linux/kthread.h> -#include <linux/wait.h> -#include <linux/sched.h> -#include <uapi/linux/sched/types.h> -#include <drm/drmP.h> -#include "gpu_scheduler.h" - -#include "spsc_queue.h" - -#define CREATE_TRACE_POINTS -#include "gpu_sched_trace.h" - -#define to_amd_sched_job(sched_job) \ - container_of((sched_job), struct amd_sched_job, queue_node) - -static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); -static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); -static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); - -/* Initialize a given run queue struct */ -static void amd_sched_rq_init(struct amd_sched_rq *rq) -{ - spin_lock_init(&rq->lock); - INIT_LIST_HEAD(&rq->entities); - rq->current_entity = NULL; -} - -static void amd_sched_rq_add_entity(struct amd_sched_rq *rq, - struct amd_sched_entity *entity) -{ - if (!list_empty(&entity->list)) - return; - spin_lock(&rq->lock); - list_add_tail(&entity->list, &rq->entities); - spin_unlock(&rq->lock); -} - -static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, - struct amd_sched_entity *entity) -{ - if (list_empty(&entity->list)) - return; - spin_lock(&rq->lock); - list_del_init(&entity->list); - if (rq->current_entity == entity) - rq->current_entity = NULL; - spin_unlock(&rq->lock); -} - -/** - * Select an entity which could provide a job to run - * - * @rq The run queue to check. - * - * Try to find a ready entity, returns NULL if none found. - */ -static struct amd_sched_entity * -amd_sched_rq_select_entity(struct amd_sched_rq *rq) -{ - struct amd_sched_entity *entity; - - spin_lock(&rq->lock); - - entity = rq->current_entity; - if (entity) { - list_for_each_entry_continue(entity, &rq->entities, list) { - if (amd_sched_entity_is_ready(entity)) { - rq->current_entity = entity; - spin_unlock(&rq->lock); - return entity; - } - } - } - - list_for_each_entry(entity, &rq->entities, list) { - - if (amd_sched_entity_is_ready(entity)) { - rq->current_entity = entity; - spin_unlock(&rq->lock); - return entity; - } - - if (entity == rq->current_entity) - break; - } - - spin_unlock(&rq->lock); - - return NULL; -} - -/** - * Init a context entity used by scheduler when submit to HW ring. - * - * @sched The pointer to the scheduler - * @entity The pointer to a valid amd_sched_entity - * @rq The run queue this entity belongs - * @kernel If this is an entity for the kernel - * @jobs The max number of jobs in the job queue - * - * return 0 if succeed. negative error code on failure -*/ -int amd_sched_entity_init(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, - struct amd_sched_rq *rq, - uint32_t jobs, atomic_t *guilty) -{ - if (!(sched && entity && rq)) - return -EINVAL; - - memset(entity, 0, sizeof(struct amd_sched_entity)); - INIT_LIST_HEAD(&entity->list); - entity->rq = rq; - entity->sched = sched; - entity->guilty = guilty; - - spin_lock_init(&entity->rq_lock); - spin_lock_init(&entity->queue_lock); - spsc_queue_init(&entity->job_queue); - - atomic_set(&entity->fence_seq, 0); - entity->fence_context = dma_fence_context_alloc(2); - - return 0; -} - -/** - * Query if entity is initialized - * - * @sched Pointer to scheduler instance - * @entity The pointer to a valid scheduler entity - * - * return true if entity is initialized, false otherwise -*/ -static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity) -{ - return entity->sched == sched && - entity->rq != NULL; -} - -/** - * Check if entity is idle - * - * @entity The pointer to a valid scheduler entity - * - * Return true if entity don't has any unscheduled jobs. - */ -static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) -{ - rmb(); - if (spsc_queue_peek(&entity->job_queue) == NULL) - return true; - - return false; -} - -/** - * Check if entity is ready - * - * @entity The pointer to a valid scheduler entity - * - * Return true if entity could provide a job. - */ -static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) -{ - if (spsc_queue_peek(&entity->job_queue) == NULL) - return false; - - if (READ_ONCE(entity->dependency)) - return false; - - return true; -} - -/** - * Destroy a context entity - * - * @sched Pointer to scheduler instance - * @entity The pointer to a valid scheduler entity - * - * Cleanup and free the allocated resources. - */ -void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity) -{ - int r; - - if (!amd_sched_entity_is_initialized(sched, entity)) - return; - /** - * The client will not queue more IBs during this fini, consume existing - * queued IBs or discard them on SIGKILL - */ - if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL) - r = -ERESTARTSYS; - else - r = wait_event_killable(sched->job_scheduled, - amd_sched_entity_is_idle(entity)); - amd_sched_entity_set_rq(entity, NULL); - if (r) { - struct amd_sched_job *job; - - /* Park the kernel for a moment to make sure it isn't processing - * our enity. - */ - kthread_park(sched->thread); - kthread_unpark(sched->thread); - if (entity->dependency) { - dma_fence_remove_callback(entity->dependency, - &entity->cb); - dma_fence_put(entity->dependency); - entity->dependency = NULL; - } - - while ((job = to_amd_sched_job(spsc_queue_pop(&entity->job_queue)))) { - struct amd_sched_fence *s_fence = job->s_fence; - amd_sched_fence_scheduled(s_fence); - dma_fence_set_error(&s_fence->finished, -ESRCH); - amd_sched_fence_finished(s_fence); - WARN_ON(s_fence->parent); - dma_fence_put(&s_fence->finished); - sched->ops->free_job(job); - } - } -} - -static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) -{ - struct amd_sched_entity *entity = - container_of(cb, struct amd_sched_entity, cb); - entity->dependency = NULL; - dma_fence_put(f); - amd_sched_wakeup(entity->sched); -} - -static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb) -{ - struct amd_sched_entity *entity = - container_of(cb, struct amd_sched_entity, cb); - entity->dependency = NULL; - dma_fence_put(f); -} - -void amd_sched_entity_set_rq(struct amd_sched_entity *entity, - struct amd_sched_rq *rq) -{ - if (entity->rq == rq) - return; - - spin_lock(&entity->rq_lock); - - if (entity->rq) - amd_sched_rq_remove_entity(entity->rq, entity); - - entity->rq = rq; - if (rq) - amd_sched_rq_add_entity(rq, entity); - - spin_unlock(&entity->rq_lock); -} - -bool amd_sched_dependency_optimized(struct dma_fence* fence, - struct amd_sched_entity *entity) -{ - struct amd_gpu_scheduler *sched = entity->sched; - struct amd_sched_fence *s_fence; - - if (!fence || dma_fence_is_signaled(fence)) - return false; - if (fence->context == entity->fence_context) - return true; - s_fence = to_amd_sched_fence(fence); - if (s_fence && s_fence->sched == sched) - return true; - - return false; -} - -static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) -{ - struct amd_gpu_scheduler *sched = entity->sched; - struct dma_fence * fence = entity->dependency; - struct amd_sched_fence *s_fence; - - if (fence->context == entity->fence_context) { - /* We can ignore fences from ourself */ - dma_fence_put(entity->dependency); - return false; - } - - s_fence = to_amd_sched_fence(fence); - if (s_fence && s_fence->sched == sched) { - - /* - * Fence is from the same scheduler, only need to wait for - * it to be scheduled - */ - fence = dma_fence_get(&s_fence->scheduled); - dma_fence_put(entity->dependency); - entity->dependency = fence; - if (!dma_fence_add_callback(fence, &entity->cb, - amd_sched_entity_clear_dep)) - return true; - - /* Ignore it when it is already scheduled */ - dma_fence_put(fence); - return false; - } - - if (!dma_fence_add_callback(entity->dependency, &entity->cb, - amd_sched_entity_wakeup)) - return true; - - dma_fence_put(entity->dependency); - return false; -} - -static struct amd_sched_job * -amd_sched_entity_pop_job(struct amd_sched_entity *entity) -{ - struct amd_gpu_scheduler *sched = entity->sched; - struct amd_sched_job *sched_job = to_amd_sched_job( - spsc_queue_peek(&entity->job_queue)); - - if (!sched_job) - return NULL; - - while ((entity->dependency = sched->ops->dependency(sched_job, entity))) - if (amd_sched_entity_add_dependency_cb(entity)) - return NULL; - - /* skip jobs from entity that marked guilty */ - if (entity->guilty && atomic_read(entity->guilty)) - dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); - - spsc_queue_pop(&entity->job_queue); - return sched_job; -} - -/** - * Submit a job to the job queue - * - * @sched_job The pointer to job required to submit - * - * Returns 0 for success, negative error code otherwise. - */ -void amd_sched_entity_push_job(struct amd_sched_job *sched_job, - struct amd_sched_entity *entity) -{ - struct amd_gpu_scheduler *sched = sched_job->sched; - bool first = false; - - trace_amd_sched_job(sched_job, entity); - - spin_lock(&entity->queue_lock); - first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); - - spin_unlock(&entity->queue_lock); - - /* first job wakes up scheduler */ - if (first) { - /* Add the entity to the run queue */ - spin_lock(&entity->rq_lock); - amd_sched_rq_add_entity(entity->rq, entity); - spin_unlock(&entity->rq_lock); - amd_sched_wakeup(sched); - } -} - -/* job_finish is called after hw fence signaled - */ -static void amd_sched_job_finish(struct work_struct *work) -{ - struct amd_sched_job *s_job = container_of(work, struct amd_sched_job, - finish_work); - struct amd_gpu_scheduler *sched = s_job->sched; - - /* remove job from ring_mirror_list */ - spin_lock(&sched->job_list_lock); - list_del_init(&s_job->node); - if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { - struct amd_sched_job *next; - - spin_unlock(&sched->job_list_lock); - cancel_delayed_work_sync(&s_job->work_tdr); - spin_lock(&sched->job_list_lock); - - /* queue TDR for next job */ - next = list_first_entry_or_null(&sched->ring_mirror_list, - struct amd_sched_job, node); - - if (next) - schedule_delayed_work(&next->work_tdr, sched->timeout); - } - spin_unlock(&sched->job_list_lock); - dma_fence_put(&s_job->s_fence->finished); - sched->ops->free_job(s_job); -} - -static void amd_sched_job_finish_cb(struct dma_fence *f, - struct dma_fence_cb *cb) -{ - struct amd_sched_job *job = container_of(cb, struct amd_sched_job, - finish_cb); - schedule_work(&job->finish_work); -} - -static void amd_sched_job_begin(struct amd_sched_job *s_job) -{ - struct amd_gpu_scheduler *sched = s_job->sched; - - dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb, - amd_sched_job_finish_cb); - - spin_lock(&sched->job_list_lock); - list_add_tail(&s_job->node, &sched->ring_mirror_list); - if (sched->timeout != MAX_SCHEDULE_TIMEOUT && - list_first_entry_or_null(&sched->ring_mirror_list, - struct amd_sched_job, node) == s_job) - schedule_delayed_work(&s_job->work_tdr, sched->timeout); - spin_unlock(&sched->job_list_lock); -} - -static void amd_sched_job_timedout(struct work_struct *work) -{ - struct amd_sched_job *job = container_of(work, struct amd_sched_job, - work_tdr.work); - - job->sched->ops->timedout_job(job); -} - -void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *bad) -{ - struct amd_sched_job *s_job; - struct amd_sched_entity *entity, *tmp; - int i;; - - spin_lock(&sched->job_list_lock); - list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { - if (s_job->s_fence->parent && - dma_fence_remove_callback(s_job->s_fence->parent, - &s_job->s_fence->cb)) { - dma_fence_put(s_job->s_fence->parent); - s_job->s_fence->parent = NULL; - atomic_dec(&sched->hw_rq_count); - } - } - spin_unlock(&sched->job_list_lock); - - if (bad && bad->s_priority != AMD_SCHED_PRIORITY_KERNEL) { - atomic_inc(&bad->karma); - /* don't increase @bad's karma if it's from KERNEL RQ, - * becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs) - * corrupt but keep in mind that kernel jobs always considered good. - */ - for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_KERNEL; i++ ) { - struct amd_sched_rq *rq = &sched->sched_rq[i]; - - spin_lock(&rq->lock); - list_for_each_entry_safe(entity, tmp, &rq->entities, list) { - if (bad->s_fence->scheduled.context == entity->fence_context) { - if (atomic_read(&bad->karma) > bad->sched->hang_limit) - if (entity->guilty) - atomic_set(entity->guilty, 1); - break; - } - } - spin_unlock(&rq->lock); - if (&entity->list != &rq->entities) - break; - } - } -} - -void amd_sched_job_kickout(struct amd_sched_job *s_job) -{ - struct amd_gpu_scheduler *sched = s_job->sched; - - spin_lock(&sched->job_list_lock); - list_del_init(&s_job->node); - spin_unlock(&sched->job_list_lock); -} - -void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) -{ - struct amd_sched_job *s_job, *tmp; - bool found_guilty = false; - int r; - - spin_lock(&sched->job_list_lock); - s_job = list_first_entry_or_null(&sched->ring_mirror_list, - struct amd_sched_job, node); - if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT) - schedule_delayed_work(&s_job->work_tdr, sched->timeout); - - list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { - struct amd_sched_fence *s_fence = s_job->s_fence; - struct dma_fence *fence; - uint64_t guilty_context; - - if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { - found_guilty = true; - guilty_context = s_job->s_fence->scheduled.context; - } - - if (found_guilty && s_job->s_fence->scheduled.context == guilty_context) - dma_fence_set_error(&s_fence->finished, -ECANCELED); - - spin_unlock(&sched->job_list_lock); - fence = sched->ops->run_job(s_job); - atomic_inc(&sched->hw_rq_count); - if (fence) { - s_fence->parent = dma_fence_get(fence); - r = dma_fence_add_callback(fence, &s_fence->cb, - amd_sched_process_job); - if (r == -ENOENT) - amd_sched_process_job(fence, &s_fence->cb); - else if (r) - DRM_ERROR("fence add callback failed (%d)\n", - r); - dma_fence_put(fence); - } else { - amd_sched_process_job(NULL, &s_fence->cb); - } - spin_lock(&sched->job_list_lock); - } - spin_unlock(&sched->job_list_lock); -} - -/* init a sched_job with basic field */ -int amd_sched_job_init(struct amd_sched_job *job, - struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, - void *owner) -{ - job->sched = sched; - job->s_priority = entity->rq - sched->sched_rq; - job->s_fence = amd_sched_fence_create(entity, owner); - if (!job->s_fence) - return -ENOMEM; - job->id = atomic64_inc_return(&sched->job_id_count); - - INIT_WORK(&job->finish_work, amd_sched_job_finish); - INIT_LIST_HEAD(&job->node); - INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout); - - return 0; -} - -/** - * Return ture if we can push more jobs to the hw. - */ -static bool amd_sched_ready(struct amd_gpu_scheduler *sched) -{ - return atomic_read(&sched->hw_rq_count) < - sched->hw_submission_limit; -} - -/** - * Wake up the scheduler when it is ready - */ -static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) -{ - if (amd_sched_ready(sched)) - wake_up_interruptible(&sched->wake_up_worker); -} - -/** - * Select next entity to process -*/ -static struct amd_sched_entity * -amd_sched_select_entity(struct amd_gpu_scheduler *sched) -{ - struct amd_sched_entity *entity; - int i; - - if (!amd_sched_ready(sched)) - return NULL; - - /* Kernel run queue has higher priority than normal run queue*/ - for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) { - entity = amd_sched_rq_select_entity(&sched->sched_rq[i]); - if (entity) - break; - } - - return entity; -} - -static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) -{ - struct amd_sched_fence *s_fence = - container_of(cb, struct amd_sched_fence, cb); - struct amd_gpu_scheduler *sched = s_fence->sched; - - dma_fence_get(&s_fence->finished); - atomic_dec(&sched->hw_rq_count); - amd_sched_fence_finished(s_fence); - - trace_amd_sched_process_job(s_fence); - dma_fence_put(&s_fence->finished); - wake_up_interruptible(&sched->wake_up_worker); -} - -static bool amd_sched_blocked(struct amd_gpu_scheduler *sched) -{ - if (kthread_should_park()) { - kthread_parkme(); - return true; - } - - return false; -} - -static int amd_sched_main(void *param) -{ - struct sched_param sparam = {.sched_priority = 1}; - struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param; - int r; - - sched_setscheduler(current, SCHED_FIFO, &sparam); - - while (!kthread_should_stop()) { - struct amd_sched_entity *entity = NULL; - struct amd_sched_fence *s_fence; - struct amd_sched_job *sched_job; - struct dma_fence *fence; - - wait_event_interruptible(sched->wake_up_worker, - (!amd_sched_blocked(sched) && - (entity = amd_sched_select_entity(sched))) || - kthread_should_stop()); - - if (!entity) - continue; - - sched_job = amd_sched_entity_pop_job(entity); - if (!sched_job) - continue; - - s_fence = sched_job->s_fence; - - atomic_inc(&sched->hw_rq_count); - amd_sched_job_begin(sched_job); - - fence = sched->ops->run_job(sched_job); - amd_sched_fence_scheduled(s_fence); - - if (fence) { - s_fence->parent = dma_fence_get(fence); - r = dma_fence_add_callback(fence, &s_fence->cb, - amd_sched_process_job); - if (r == -ENOENT) - amd_sched_process_job(fence, &s_fence->cb); - else if (r) - DRM_ERROR("fence add callback failed (%d)\n", - r); - dma_fence_put(fence); - } else { - amd_sched_process_job(NULL, &s_fence->cb); - } - - wake_up(&sched->job_scheduled); - } - return 0; -} - -/** - * Init a gpu scheduler instance - * - * @sched The pointer to the scheduler - * @ops The backend operations for this scheduler. - * @hw_submissions Number of hw submissions to do. - * @name Name used for debugging - * - * Return 0 on success, otherwise error code. -*/ -int amd_sched_init(struct amd_gpu_scheduler *sched, - const struct amd_sched_backend_ops *ops, - unsigned hw_submission, - unsigned hang_limit, - long timeout, - const char *name) -{ - int i; - sched->ops = ops; - sched->hw_submission_limit = hw_submission; - sched->name = name; - sched->timeout = timeout; - sched->hang_limit = hang_limit; - for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++) - amd_sched_rq_init(&sched->sched_rq[i]); - - init_waitqueue_head(&sched->wake_up_worker); - init_waitqueue_head(&sched->job_scheduled); - INIT_LIST_HEAD(&sched->ring_mirror_list); - spin_lock_init(&sched->job_list_lock); - atomic_set(&sched->hw_rq_count, 0); - atomic64_set(&sched->job_id_count, 0); - - /* Each scheduler will run on a seperate kernel thread */ - sched->thread = kthread_run(amd_sched_main, sched, sched->name); - if (IS_ERR(sched->thread)) { - DRM_ERROR("Failed to create scheduler for %s.\n", name); - return PTR_ERR(sched->thread); - } - - return 0; -} - -/** - * Destroy a gpu scheduler - * - * @sched The pointer to the scheduler - */ -void amd_sched_fini(struct amd_gpu_scheduler *sched) -{ - if (sched->thread) - kthread_stop(sched->thread); -} diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h deleted file mode 100644 index b590fcc2786a..000000000000 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef _GPU_SCHEDULER_H_ -#define _GPU_SCHEDULER_H_ - -#include <linux/kfifo.h> -#include <linux/dma-fence.h> -#include "spsc_queue.h" - -struct amd_gpu_scheduler; -struct amd_sched_rq; - -enum amd_sched_priority { - AMD_SCHED_PRIORITY_MIN, - AMD_SCHED_PRIORITY_LOW = AMD_SCHED_PRIORITY_MIN, - AMD_SCHED_PRIORITY_NORMAL, - AMD_SCHED_PRIORITY_HIGH_SW, - AMD_SCHED_PRIORITY_HIGH_HW, - AMD_SCHED_PRIORITY_KERNEL, - AMD_SCHED_PRIORITY_MAX, - AMD_SCHED_PRIORITY_INVALID = -1, - AMD_SCHED_PRIORITY_UNSET = -2 -}; - - -/** - * A scheduler entity is a wrapper around a job queue or a group - * of other entities. Entities take turns emitting jobs from their - * job queues to corresponding hardware ring based on scheduling - * policy. -*/ -struct amd_sched_entity { - struct list_head list; - struct amd_sched_rq *rq; - spinlock_t rq_lock; - struct amd_gpu_scheduler *sched; - - spinlock_t queue_lock; - struct spsc_queue job_queue; - - atomic_t fence_seq; - uint64_t fence_context; - - struct dma_fence *dependency; - struct dma_fence_cb cb; - atomic_t *guilty; /* points to ctx's guilty */ -}; - -/** - * Run queue is a set of entities scheduling command submissions for - * one specific ring. It implements the scheduling policy that selects - * the next entity to emit commands from. -*/ -struct amd_sched_rq { - spinlock_t lock; - struct list_head entities; - struct amd_sched_entity *current_entity; -}; - -struct amd_sched_fence { - struct dma_fence scheduled; - struct dma_fence finished; - struct dma_fence_cb cb; - struct dma_fence *parent; - struct amd_gpu_scheduler *sched; - spinlock_t lock; - void *owner; -}; - -struct amd_sched_job { - struct spsc_node queue_node; - struct amd_gpu_scheduler *sched; - struct amd_sched_fence *s_fence; - struct dma_fence_cb finish_cb; - struct work_struct finish_work; - struct list_head node; - struct delayed_work work_tdr; - uint64_t id; - atomic_t karma; - enum amd_sched_priority s_priority; -}; - -extern const struct dma_fence_ops amd_sched_fence_ops_scheduled; -extern const struct dma_fence_ops amd_sched_fence_ops_finished; -static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f) -{ - if (f->ops == &amd_sched_fence_ops_scheduled) - return container_of(f, struct amd_sched_fence, scheduled); - - if (f->ops == &amd_sched_fence_ops_finished) - return container_of(f, struct amd_sched_fence, finished); - - return NULL; -} - -static inline bool amd_sched_invalidate_job(struct amd_sched_job *s_job, int threshold) -{ - return (s_job && atomic_inc_return(&s_job->karma) > threshold); -} - -/** - * Define the backend operations called by the scheduler, - * these functions should be implemented in driver side -*/ -struct amd_sched_backend_ops { - struct dma_fence *(*dependency)(struct amd_sched_job *sched_job, - struct amd_sched_entity *s_entity); - struct dma_fence *(*run_job)(struct amd_sched_job *sched_job); - void (*timedout_job)(struct amd_sched_job *sched_job); - void (*free_job)(struct amd_sched_job *sched_job); -}; - -/** - * One scheduler is implemented for each hardware ring -*/ -struct amd_gpu_scheduler { - const struct amd_sched_backend_ops *ops; - uint32_t hw_submission_limit; - long timeout; - const char *name; - struct amd_sched_rq sched_rq[AMD_SCHED_PRIORITY_MAX]; - wait_queue_head_t wake_up_worker; - wait_queue_head_t job_scheduled; - atomic_t hw_rq_count; - atomic64_t job_id_count; - struct task_struct *thread; - struct list_head ring_mirror_list; - spinlock_t job_list_lock; - int hang_limit; -}; - -int amd_sched_init(struct amd_gpu_scheduler *sched, - const struct amd_sched_backend_ops *ops, - uint32_t hw_submission, unsigned hang_limit, long timeout, const char *name); -void amd_sched_fini(struct amd_gpu_scheduler *sched); - -int amd_sched_entity_init(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, - struct amd_sched_rq *rq, - uint32_t jobs, atomic_t* guilty); -void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity); -void amd_sched_entity_push_job(struct amd_sched_job *sched_job, - struct amd_sched_entity *entity); -void amd_sched_entity_set_rq(struct amd_sched_entity *entity, - struct amd_sched_rq *rq); - -int amd_sched_fence_slab_init(void); -void amd_sched_fence_slab_fini(void); - -struct amd_sched_fence *amd_sched_fence_create( - struct amd_sched_entity *s_entity, void *owner); -void amd_sched_fence_scheduled(struct amd_sched_fence *fence); -void amd_sched_fence_finished(struct amd_sched_fence *fence); -int amd_sched_job_init(struct amd_sched_job *job, - struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, - void *owner); -void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *job); -void amd_sched_job_recovery(struct amd_gpu_scheduler *sched); -bool amd_sched_dependency_optimized(struct dma_fence* fence, - struct amd_sched_entity *entity); -void amd_sched_job_kickout(struct amd_sched_job *s_job); - -#endif diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c deleted file mode 100644 index 33f54d0a5c4f..000000000000 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * - */ -#include <linux/kthread.h> -#include <linux/wait.h> -#include <linux/sched.h> -#include <drm/drmP.h> -#include "gpu_scheduler.h" - -static struct kmem_cache *sched_fence_slab; - -int amd_sched_fence_slab_init(void) -{ - sched_fence_slab = kmem_cache_create( - "amd_sched_fence", sizeof(struct amd_sched_fence), 0, - SLAB_HWCACHE_ALIGN, NULL); - if (!sched_fence_slab) - return -ENOMEM; - - return 0; -} - -void amd_sched_fence_slab_fini(void) -{ - rcu_barrier(); - kmem_cache_destroy(sched_fence_slab); -} - -struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity, - void *owner) -{ - struct amd_sched_fence *fence = NULL; - unsigned seq; - - fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL); - if (fence == NULL) - return NULL; - - fence->owner = owner; - fence->sched = entity->sched; - spin_lock_init(&fence->lock); - - seq = atomic_inc_return(&entity->fence_seq); - dma_fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled, - &fence->lock, entity->fence_context, seq); - dma_fence_init(&fence->finished, &amd_sched_fence_ops_finished, - &fence->lock, entity->fence_context + 1, seq); - - return fence; -} - -void amd_sched_fence_scheduled(struct amd_sched_fence *fence) -{ - int ret = dma_fence_signal(&fence->scheduled); - - if (!ret) - DMA_FENCE_TRACE(&fence->scheduled, - "signaled from irq context\n"); - else - DMA_FENCE_TRACE(&fence->scheduled, - "was already signaled\n"); -} - -void amd_sched_fence_finished(struct amd_sched_fence *fence) -{ - int ret = dma_fence_signal(&fence->finished); - - if (!ret) - DMA_FENCE_TRACE(&fence->finished, - "signaled from irq context\n"); - else - DMA_FENCE_TRACE(&fence->finished, - "was already signaled\n"); -} - -static const char *amd_sched_fence_get_driver_name(struct dma_fence *fence) -{ - return "amd_sched"; -} - -static const char *amd_sched_fence_get_timeline_name(struct dma_fence *f) -{ - struct amd_sched_fence *fence = to_amd_sched_fence(f); - return (const char *)fence->sched->name; -} - -static bool amd_sched_fence_enable_signaling(struct dma_fence *f) -{ - return true; -} - -/** - * amd_sched_fence_free - free up the fence memory - * - * @rcu: RCU callback head - * - * Free up the fence memory after the RCU grace period. - */ -static void amd_sched_fence_free(struct rcu_head *rcu) -{ - struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); - struct amd_sched_fence *fence = to_amd_sched_fence(f); - - dma_fence_put(fence->parent); - kmem_cache_free(sched_fence_slab, fence); -} - -/** - * amd_sched_fence_release_scheduled - callback that fence can be freed - * - * @fence: fence - * - * This function is called when the reference count becomes zero. - * It just RCU schedules freeing up the fence. - */ -static void amd_sched_fence_release_scheduled(struct dma_fence *f) -{ - struct amd_sched_fence *fence = to_amd_sched_fence(f); - - call_rcu(&fence->finished.rcu, amd_sched_fence_free); -} - -/** - * amd_sched_fence_release_finished - drop extra reference - * - * @f: fence - * - * Drop the extra reference from the scheduled fence to the base fence. - */ -static void amd_sched_fence_release_finished(struct dma_fence *f) -{ - struct amd_sched_fence *fence = to_amd_sched_fence(f); - - dma_fence_put(&fence->scheduled); -} - -const struct dma_fence_ops amd_sched_fence_ops_scheduled = { - .get_driver_name = amd_sched_fence_get_driver_name, - .get_timeline_name = amd_sched_fence_get_timeline_name, - .enable_signaling = amd_sched_fence_enable_signaling, - .signaled = NULL, - .wait = dma_fence_default_wait, - .release = amd_sched_fence_release_scheduled, -}; - -const struct dma_fence_ops amd_sched_fence_ops_finished = { - .get_driver_name = amd_sched_fence_get_driver_name, - .get_timeline_name = amd_sched_fence_get_timeline_name, - .enable_signaling = amd_sched_fence_enable_signaling, - .signaled = NULL, - .wait = dma_fence_default_wait, - .release = amd_sched_fence_release_finished, -}; diff --git a/drivers/gpu/drm/amd/scheduler/spsc_queue.h b/drivers/gpu/drm/amd/scheduler/spsc_queue.h deleted file mode 100644 index 5902f35ce759..000000000000 --- a/drivers/gpu/drm/amd/scheduler/spsc_queue.h +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef AMD_SCHEDULER_SPSC_QUEUE_H_ -#define AMD_SCHEDULER_SPSC_QUEUE_H_ - -#include <linux/atomic.h> - -/** SPSC lockless queue */ - -struct spsc_node { - - /* Stores spsc_node* */ - struct spsc_node *next; -}; - -struct spsc_queue { - - struct spsc_node *head; - - /* atomic pointer to struct spsc_node* */ - atomic_long_t tail; - - atomic_t job_count; -}; - -static inline void spsc_queue_init(struct spsc_queue *queue) -{ - queue->head = NULL; - atomic_long_set(&queue->tail, (long)&queue->head); - atomic_set(&queue->job_count, 0); -} - -static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue) -{ - return queue->head; -} - -static inline int spsc_queue_count(struct spsc_queue *queue) -{ - return atomic_read(&queue->job_count); -} - -static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node) -{ - struct spsc_node **tail; - - node->next = NULL; - - preempt_disable(); - - tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next); - WRITE_ONCE(*tail, node); - atomic_inc(&queue->job_count); - - /* - * In case of first element verify new node will be visible to the consumer - * thread when we ping the kernel thread that there is new work to do. - */ - smp_wmb(); - - preempt_enable(); - - return tail == &queue->head; -} - - -static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue) -{ - struct spsc_node *next, *node; - - /* Verify reading from memory and not the cache */ - smp_rmb(); - - node = READ_ONCE(queue->head); - - if (!node) - return NULL; - - next = READ_ONCE(node->next); - WRITE_ONCE(queue->head, next); - - if (unlikely(!next)) { - /* slowpath for the last element in the queue */ - - if (atomic_long_cmpxchg(&queue->tail, - (long)&node->next, (long) &queue->head) != (long)&node->next) { - /* Updating tail failed wait for new next to appear */ - do { - smp_rmb(); - } while (unlikely(!(queue->head = READ_ONCE(node->next)))); - } - } - - atomic_dec(&queue->job_count); - return node; -} - - - -#endif /* AMD_SCHEDULER_SPSC_QUEUE_H_ */ |