diff options
author | Nirmoy Das <nirmoy.das@intel.com> | 2024-03-21 17:11:42 +0100 |
---|---|---|
committer | Lucas De Marchi <lucas.demarchi@intel.com> | 2024-03-26 15:40:19 -0700 |
commit | 5dffaa1bb94a6bc75393476fbe3c8a704ff4fcf8 (patch) | |
tree | bdb67868f4d9824611be80f93e2d1576d3d57537 | |
parent | 4b217c7fa6ba8b80fcc5cd36086d7ee51b2cc54f (diff) |
drm/xe: Create a helper function to init job's user fence
Refactor xe_sync_entry_signal so it doesn't have to
modify xe_sched_job struct instead create a new helper function
to set user fence values for a job.
v2: Move the sync type check to xe_sched_job_init_user_fence(Lucas)
Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240321161142.4954-1-nirmoy.das@intel.com
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
-rw-r--r-- | drivers/gpu/drm/xe/xe_exec.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_sched_job.c | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_sched_job.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_sync.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_sync.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_vm.c | 10 |
6 files changed, 32 insertions, 16 deletions
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c index 7692ebfe7d47..9d53ef8c49cc 100644 --- a/drivers/gpu/drm/xe/xe_exec.c +++ b/drivers/gpu/drm/xe/xe_exec.c @@ -249,7 +249,7 @@ retry: goto err_unlock_list; } for (i = 0; i < num_syncs; i++) - xe_sync_entry_signal(&syncs[i], NULL, fence); + xe_sync_entry_signal(&syncs[i], fence); xe_exec_queue_last_fence_set(q, vm, fence); dma_fence_put(fence); } @@ -359,9 +359,10 @@ retry: drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished, DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE); - for (i = 0; i < num_syncs; i++) - xe_sync_entry_signal(&syncs[i], job, - &job->drm.s_fence->finished); + for (i = 0; i < num_syncs; i++) { + xe_sync_entry_signal(&syncs[i], &job->drm.s_fence->finished); + xe_sched_job_init_user_fence(job, &syncs[i]); + } if (xe_exec_queue_is_lr(q)) q->ring_ops->emit_job(job); diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c index 8151ddafb940..add5a8b89be8 100644 --- a/drivers/gpu/drm/xe/xe_sched_job.c +++ b/drivers/gpu/drm/xe/xe_sched_job.c @@ -5,6 +5,7 @@ #include "xe_sched_job.h" +#include <drm/xe_drm.h> #include <linux/dma-fence-array.h> #include <linux/slab.h> @@ -15,6 +16,7 @@ #include "xe_hw_fence.h" #include "xe_lrc.h" #include "xe_macros.h" +#include "xe_sync_types.h" #include "xe_trace.h" #include "xe_vm.h" @@ -278,6 +280,22 @@ int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm) return drm_sched_job_add_dependency(&job->drm, fence); } +/** + * xe_sched_job_init_user_fence - Initialize user_fence for the job + * @job: job whose user_fence needs an init + * @sync: sync to be use to init user_fence + */ +void xe_sched_job_init_user_fence(struct xe_sched_job *job, + struct xe_sync_entry *sync) +{ + if (sync->type != DRM_XE_SYNC_TYPE_USER_FENCE) + return; + + job->user_fence.used = true; + job->user_fence.addr = sync->addr; + job->user_fence.value = sync->timeline_value; +} + struct xe_sched_job_snapshot * xe_sched_job_snapshot_capture(struct xe_sched_job *job) { diff --git a/drivers/gpu/drm/xe/xe_sched_job.h b/drivers/gpu/drm/xe/xe_sched_job.h index f1a660648cf0..c75018f4660d 100644 --- a/drivers/gpu/drm/xe/xe_sched_job.h +++ b/drivers/gpu/drm/xe/xe_sched_job.h @@ -10,6 +10,7 @@ struct drm_printer; struct xe_vm; +struct xe_sync_entry; #define XE_SCHED_HANG_LIMIT 1 #define XE_SCHED_JOB_TIMEOUT LONG_MAX @@ -58,6 +59,8 @@ void xe_sched_job_arm(struct xe_sched_job *job); void xe_sched_job_push(struct xe_sched_job *job); int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm); +void xe_sched_job_init_user_fence(struct xe_sched_job *job, + struct xe_sync_entry *sync); static inline struct xe_sched_job * to_xe_sched_job(struct drm_sched_job *drm) diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c index 02c9577fe418..65f1f1628235 100644 --- a/drivers/gpu/drm/xe/xe_sync.c +++ b/drivers/gpu/drm/xe/xe_sync.c @@ -224,8 +224,7 @@ int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job) return 0; } -void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job, - struct dma_fence *fence) +void xe_sync_entry_signal(struct xe_sync_entry *sync, struct dma_fence *fence) { if (!(sync->flags & DRM_XE_SYNC_FLAG_SIGNAL)) return; @@ -254,10 +253,6 @@ void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job, user_fence_put(sync->ufence); dma_fence_put(fence); } - } else if (sync->type == DRM_XE_SYNC_TYPE_USER_FENCE) { - job->user_fence.used = true; - job->user_fence.addr = sync->addr; - job->user_fence.value = sync->timeline_value; } } diff --git a/drivers/gpu/drm/xe/xe_sync.h b/drivers/gpu/drm/xe/xe_sync.h index 0fd0d51208e6..3e03396af2c6 100644 --- a/drivers/gpu/drm/xe/xe_sync.h +++ b/drivers/gpu/drm/xe/xe_sync.h @@ -26,7 +26,6 @@ int xe_sync_entry_wait(struct xe_sync_entry *sync); int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job); void xe_sync_entry_signal(struct xe_sync_entry *sync, - struct xe_sched_job *job, struct dma_fence *fence); void xe_sync_entry_cleanup(struct xe_sync_entry *sync); struct dma_fence * diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index d82d7cd27123..694fbb546372 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -1700,7 +1700,7 @@ next: xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence; if (last_op) { for (i = 0; i < num_syncs; i++) - xe_sync_entry_signal(&syncs[i], NULL, fence); + xe_sync_entry_signal(&syncs[i], fence); } return fence; @@ -1774,7 +1774,7 @@ next: if (last_op) { for (i = 0; i < num_syncs; i++) - xe_sync_entry_signal(&syncs[i], NULL, + xe_sync_entry_signal(&syncs[i], cf ? &cf->base : fence); } @@ -1835,7 +1835,7 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm); if (last_op) { for (i = 0; i < num_syncs; i++) - xe_sync_entry_signal(&syncs[i], NULL, fence); + xe_sync_entry_signal(&syncs[i], fence); } } @@ -2056,7 +2056,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, struct dma_fence *fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm); - xe_sync_entry_signal(&syncs[i], NULL, fence); + xe_sync_entry_signal(&syncs[i], fence); dma_fence_put(fence); } } @@ -2934,7 +2934,7 @@ static int vm_bind_ioctl_signal_fences(struct xe_vm *vm, return PTR_ERR(fence); for (i = 0; i < num_syncs; i++) - xe_sync_entry_signal(&syncs[i], NULL, fence); + xe_sync_entry_signal(&syncs[i], fence); xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm, fence); |