diff options
Diffstat (limited to 'drivers/misc/habanalabs/common/command_submission.c')
-rw-r--r-- | drivers/misc/habanalabs/common/command_submission.c | 1016 |
1 files changed, 617 insertions, 399 deletions
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index b2b974ecc431..beb482310a58 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -11,11 +11,25 @@ #include <linux/uaccess.h> #include <linux/slab.h> -#define HL_CS_FLAGS_SIG_WAIT (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT) +#define HL_CS_FLAGS_TYPE_MASK (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \ + HL_CS_FLAGS_COLLECTIVE_WAIT) + +/** + * enum hl_cs_wait_status - cs wait status + * @CS_WAIT_STATUS_BUSY: cs was not completed yet + * @CS_WAIT_STATUS_COMPLETED: cs completed + * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone + */ +enum hl_cs_wait_status { + CS_WAIT_STATUS_BUSY, + CS_WAIT_STATUS_COMPLETED, + CS_WAIT_STATUS_GONE +}; static void job_wq_completion(struct work_struct *work); -static long _hl_cs_wait_ioctl(struct hl_device *hdev, - struct hl_ctx *ctx, u64 timeout_us, u64 seq); +static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, + u64 timeout_us, u64 seq, + enum hl_cs_wait_status *status, s64 *timestamp); static void cs_do_release(struct kref *ref); static void hl_sob_reset(struct kref *ref) @@ -38,6 +52,38 @@ void hl_sob_reset_error(struct kref *ref) hw_sob->q_idx, hw_sob->sob_id); } +/** + * hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet + * @sob_base: sob base id + * @sob_mask: sob user mask, each bit represents a sob offset from sob base + * @mask: generated mask + * + * Return: 0 if given parameters are valid + */ +int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask) +{ + int i; + + if (sob_mask == 0) + return -EINVAL; + + if (sob_mask == 0x1) { + *mask = ~(1 << (sob_base & 0x7)); + } else { + /* find msb in order to verify sob range is valid */ + for (i = BITS_PER_BYTE - 1 ; i >= 0 ; i--) + if (BIT(i) & sob_mask) + break; + + if (i > (HL_MAX_SOBS_PER_MONITOR - (sob_base & 0x7) - 1)) + return -EINVAL; + + *mask = ~sob_mask; + } + + return 0; +} + static void hl_fence_release(struct kref *kref) { struct hl_fence *fence = @@ -53,7 +99,8 @@ static void hl_fence_release(struct kref *kref) goto free; if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) || - (hl_cs_cmpl->type == CS_TYPE_WAIT)) { + (hl_cs_cmpl->type == CS_TYPE_WAIT) || + (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)) { dev_dbg(hdev->dev, "CS 0x%llx type %d finished, sob_id: %d, sob_val: 0x%x\n", @@ -80,6 +127,10 @@ static void hl_fence_release(struct kref *kref) * hence the above scenario is avoided. */ kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset); + + if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) + hdev->asic_funcs->reset_sob_group(hdev, + hl_cs_cmpl->sob_group); } free: @@ -102,10 +153,11 @@ static void hl_fence_init(struct hl_fence *fence) { kref_init(&fence->refcount); fence->error = 0; + fence->timestamp = ktime_set(0, 0); init_completion(&fence->completion); } -static void cs_get(struct hl_cs *cs) +void cs_get(struct hl_cs *cs) { kref_get(&cs->refcount); } @@ -120,6 +172,18 @@ static void cs_put(struct hl_cs *cs) kref_put(&cs->refcount, cs_do_release); } +static void cs_job_do_release(struct kref *ref) +{ + struct hl_cs_job *job = container_of(ref, struct hl_cs_job, refcount); + + kfree(job); +} + +static void cs_job_put(struct hl_cs_job *job) +{ + kref_put(&job->refcount, cs_job_do_release); +} + static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job) { /* @@ -169,10 +233,7 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job) job->patched_cb = parser.patched_cb; job->job_cb_size = parser.patched_cb_size; job->contains_dma_pkt = parser.contains_dma_pkt; - - spin_lock(&job->patched_cb->lock); - job->patched_cb->cs_cnt++; - spin_unlock(&job->patched_cb->lock); + atomic_inc(&job->patched_cb->cs_cnt); } /* @@ -180,9 +241,7 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job) * original CB anymore because it was already parsed and * won't be accessed again for this CS */ - spin_lock(&job->user_cb->lock); - job->user_cb->cs_cnt--; - spin_unlock(&job->user_cb->lock); + atomic_dec(&job->user_cb->cs_cnt); hl_cb_put(job->user_cb); job->user_cb = NULL; } else if (!rc) { @@ -192,7 +251,7 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job) return rc; } -static void free_job(struct hl_device *hdev, struct hl_cs_job *job) +static void complete_job(struct hl_device *hdev, struct hl_cs_job *job) { struct hl_cs *cs = job->cs; @@ -204,10 +263,7 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job) * created, so we need to check it's not NULL */ if (job->patched_cb) { - spin_lock(&job->patched_cb->lock); - job->patched_cb->cs_cnt--; - spin_unlock(&job->patched_cb->lock); - + atomic_dec(&job->patched_cb->cs_cnt); hl_cb_put(job->patched_cb); } } @@ -215,13 +271,12 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job) /* For H/W queue jobs, if a user CB was allocated by driver and MMU is * enabled, the user CB isn't released in cs_parser() and thus should be * released here. + * This is also true for INT queues jobs which were allocated by driver */ - if (job->queue_type == QUEUE_TYPE_HW && - job->is_kernel_allocated_cb && hdev->mmu_enable) { - spin_lock(&job->user_cb->lock); - job->user_cb->cs_cnt--; - spin_unlock(&job->user_cb->lock); - + if (job->is_kernel_allocated_cb && + ((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) || + job->queue_type == QUEUE_TYPE_INT)) { + atomic_dec(&job->user_cb->cs_cnt); hl_cb_put(job->user_cb); } @@ -239,27 +294,12 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job) job->queue_type == QUEUE_TYPE_HW) cs_put(cs); - kfree(job); -} - -static void cs_counters_aggregate(struct hl_device *hdev, struct hl_ctx *ctx) -{ - hdev->aggregated_cs_counters.device_in_reset_drop_cnt += - ctx->cs_counters.device_in_reset_drop_cnt; - hdev->aggregated_cs_counters.out_of_mem_drop_cnt += - ctx->cs_counters.out_of_mem_drop_cnt; - hdev->aggregated_cs_counters.parsing_drop_cnt += - ctx->cs_counters.parsing_drop_cnt; - hdev->aggregated_cs_counters.queue_full_drop_cnt += - ctx->cs_counters.queue_full_drop_cnt; - hdev->aggregated_cs_counters.max_cs_in_flight_drop_cnt += - ctx->cs_counters.max_cs_in_flight_drop_cnt; + cs_job_put(job); } static void cs_do_release(struct kref *ref) { - struct hl_cs *cs = container_of(ref, struct hl_cs, - refcount); + struct hl_cs *cs = container_of(ref, struct hl_cs, refcount); struct hl_device *hdev = cs->ctx->hdev; struct hl_cs_job *job, *tmp; @@ -268,77 +308,78 @@ static void cs_do_release(struct kref *ref) /* * Although if we reached here it means that all external jobs have * finished, because each one of them took refcnt to CS, we still - * need to go over the internal jobs and free them. Otherwise, we + * need to go over the internal jobs and complete them. Otherwise, we * will have leaked memory and what's worse, the CS object (and * potentially the CTX object) could be released, while the JOB * still holds a pointer to them (but no reference). */ list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) - free_job(hdev, job); + complete_job(hdev, job); - /* We also need to update CI for internal queues */ - if (cs->submitted) { - hdev->asic_funcs->hw_queues_lock(hdev); + if (!cs->submitted) { + /* In case the wait for signal CS was submitted, the put occurs + * in init_signal_wait_cs() or collective_wait_init_cs() + * right before hanging on the PQ. + */ + if (cs->type == CS_TYPE_WAIT || + cs->type == CS_TYPE_COLLECTIVE_WAIT) + hl_fence_put(cs->signal_fence); - hdev->cs_active_cnt--; - if (!hdev->cs_active_cnt) { - struct hl_device_idle_busy_ts *ts; + goto out; + } - ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx++]; - ts->busy_to_idle_ts = ktime_get(); + hdev->asic_funcs->hw_queues_lock(hdev); - if (hdev->idle_busy_ts_idx == HL_IDLE_BUSY_TS_ARR_SIZE) - hdev->idle_busy_ts_idx = 0; - } else if (hdev->cs_active_cnt < 0) { - dev_crit(hdev->dev, "CS active cnt %d is negative\n", - hdev->cs_active_cnt); - } + hdev->cs_active_cnt--; + if (!hdev->cs_active_cnt) { + struct hl_device_idle_busy_ts *ts; - hdev->asic_funcs->hw_queues_unlock(hdev); + ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx++]; + ts->busy_to_idle_ts = ktime_get(); - hl_int_hw_queue_update_ci(cs); + if (hdev->idle_busy_ts_idx == HL_IDLE_BUSY_TS_ARR_SIZE) + hdev->idle_busy_ts_idx = 0; + } else if (hdev->cs_active_cnt < 0) { + dev_crit(hdev->dev, "CS active cnt %d is negative\n", + hdev->cs_active_cnt); + } - spin_lock(&hdev->hw_queues_mirror_lock); - /* remove CS from hw_queues mirror list */ - list_del_init(&cs->mirror_node); - spin_unlock(&hdev->hw_queues_mirror_lock); + hdev->asic_funcs->hw_queues_unlock(hdev); - /* - * Don't cancel TDR in case this CS was timedout because we - * might be running from the TDR context - */ - if ((!cs->timedout) && - (hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT)) { - struct hl_cs *next; + /* Need to update CI for internal queues */ + hl_int_hw_queue_update_ci(cs); - if (cs->tdr_active) - cancel_delayed_work_sync(&cs->work_tdr); + /* remove CS from CS mirror list */ + spin_lock(&hdev->cs_mirror_lock); + list_del_init(&cs->mirror_node); + spin_unlock(&hdev->cs_mirror_lock); - spin_lock(&hdev->hw_queues_mirror_lock); + /* Don't cancel TDR in case this CS was timedout because we might be + * running from the TDR context + */ + if (!cs->timedout && hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) { + struct hl_cs *next; - /* queue TDR for next CS */ - next = list_first_entry_or_null( - &hdev->hw_queues_mirror_list, - struct hl_cs, mirror_node); + if (cs->tdr_active) + cancel_delayed_work_sync(&cs->work_tdr); - if ((next) && (!next->tdr_active)) { - next->tdr_active = true; - schedule_delayed_work(&next->work_tdr, - hdev->timeout_jiffies); - } + spin_lock(&hdev->cs_mirror_lock); - spin_unlock(&hdev->hw_queues_mirror_lock); + /* queue TDR for next CS */ + next = list_first_entry_or_null(&hdev->cs_mirror_list, + struct hl_cs, mirror_node); + + if (next && !next->tdr_active) { + next->tdr_active = true; + schedule_delayed_work(&next->work_tdr, + hdev->timeout_jiffies); } - } else if (cs->type == CS_TYPE_WAIT) { - /* - * In case the wait for signal CS was submitted, the put occurs - * in init_signal_wait_cs() right before hanging on the PQ. - */ - hl_fence_put(cs->signal_fence); + + spin_unlock(&hdev->cs_mirror_lock); } - /* - * Must be called before hl_ctx_put because inside we use ctx to get +out: + /* Must be called before hl_ctx_put because inside we use ctx to get * the device */ hl_debugfs_remove_cs(cs); @@ -356,9 +397,10 @@ static void cs_do_release(struct kref *ref) else if (!cs->submitted) cs->fence->error = -EBUSY; + if (cs->timestamp) + cs->fence->timestamp = ktime_get(); complete_all(&cs->fence->completion); hl_fence_put(cs->fence); - cs_counters_aggregate(hdev, cs->ctx); kfree(cs->jobs_in_queue_cnt); kfree(cs); @@ -384,24 +426,51 @@ static void cs_timedout(struct work_struct *work) hdev = cs->ctx->hdev; - dev_err(hdev->dev, - "Command submission %llu has not finished in time!\n", - cs->sequence); + switch (cs->type) { + case CS_TYPE_SIGNAL: + dev_err(hdev->dev, + "Signal command submission %llu has not finished in time!\n", + cs->sequence); + break; + + case CS_TYPE_WAIT: + dev_err(hdev->dev, + "Wait command submission %llu has not finished in time!\n", + cs->sequence); + break; + + case CS_TYPE_COLLECTIVE_WAIT: + dev_err(hdev->dev, + "Collective Wait command submission %llu has not finished in time!\n", + cs->sequence); + break; + + default: + dev_err(hdev->dev, + "Command submission %llu has not finished in time!\n", + cs->sequence); + break; + } cs_put(cs); if (hdev->reset_on_lockup) hl_device_reset(hdev, false, false); + else + hdev->needs_reset = true; } static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx, enum hl_cs_type cs_type, struct hl_cs **cs_new) { - struct hl_cs_compl *cs_cmpl; + struct hl_cs_counters_atomic *cntr; struct hl_fence *other = NULL; + struct hl_cs_compl *cs_cmpl; struct hl_cs *cs; int rc; + cntr = &hdev->aggregated_cs_counters; + cs = kzalloc(sizeof(*cs), GFP_ATOMIC); if (!cs) return -ENOMEM; @@ -435,7 +504,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx, if (other && !completion_done(&other->completion)) { dev_dbg_ratelimited(hdev->dev, "Rejecting CS because of too many in-flights CS\n"); - ctx->cs_counters.max_cs_in_flight_drop_cnt++; + atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt); + atomic64_inc(&cntr->max_cs_in_flight_drop_cnt); rc = -EAGAIN; goto free_fence; } @@ -480,7 +550,7 @@ static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs) struct hl_cs_job *job, *tmp; list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) - free_job(hdev, job); + complete_job(hdev, job); } void hl_cs_rollback_all(struct hl_device *hdev) @@ -493,8 +563,7 @@ void hl_cs_rollback_all(struct hl_device *hdev) flush_workqueue(hdev->cq_wq[i]); /* Make sure we don't have leftovers in the H/W queues mirror list */ - list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list, - mirror_node) { + list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) { cs_get(cs); cs->aborted = true; dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n", @@ -512,7 +581,7 @@ static void job_wq_completion(struct work_struct *work) struct hl_device *hdev = cs->ctx->hdev; /* job is no longer needed */ - free_job(hdev, job); + complete_job(hdev, job); } static int validate_queue_index(struct hl_device *hdev, @@ -547,9 +616,36 @@ static int validate_queue_index(struct hl_device *hdev, return -EINVAL; } - *queue_type = hw_queue_prop->type; - *is_kernel_allocated_cb = !!hw_queue_prop->requires_kernel_cb; + /* When hw queue type isn't QUEUE_TYPE_HW, + * USER_ALLOC_CB flag shall be referred as "don't care". + */ + if (hw_queue_prop->type == QUEUE_TYPE_HW) { + if (chunk->cs_chunk_flags & HL_CS_CHUNK_FLAGS_USER_ALLOC_CB) { + if (!(hw_queue_prop->cb_alloc_flags & CB_ALLOC_USER)) { + dev_err(hdev->dev, + "Queue index %d doesn't support user CB\n", + chunk->queue_index); + return -EINVAL; + } + + *is_kernel_allocated_cb = false; + } else { + if (!(hw_queue_prop->cb_alloc_flags & + CB_ALLOC_KERNEL)) { + dev_err(hdev->dev, + "Queue index %d doesn't support kernel CB\n", + chunk->queue_index); + return -EINVAL; + } + + *is_kernel_allocated_cb = true; + } + } else { + *is_kernel_allocated_cb = !!(hw_queue_prop->cb_alloc_flags + & CB_ALLOC_KERNEL); + } + *queue_type = hw_queue_prop->type; return 0; } @@ -573,9 +669,7 @@ static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev, goto release_cb; } - spin_lock(&cb->lock); - cb->cs_cnt++; - spin_unlock(&cb->lock); + atomic_inc(&cb->cs_cnt); return cb; @@ -593,6 +687,7 @@ struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, if (!job) return NULL; + kref_init(&job->refcount); job->queue_type = queue_type; job->is_kernel_allocated_cb = is_kernel_allocated_cb; @@ -605,42 +700,115 @@ struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, return job; } -static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, - u32 num_chunks, u64 *cs_seq) +static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags) +{ + if (cs_type_flags & HL_CS_FLAGS_SIGNAL) + return CS_TYPE_SIGNAL; + else if (cs_type_flags & HL_CS_FLAGS_WAIT) + return CS_TYPE_WAIT; + else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT) + return CS_TYPE_COLLECTIVE_WAIT; + else + return CS_TYPE_DEFAULT; +} + +static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args) { struct hl_device *hdev = hpriv->hdev; - struct hl_cs_chunk *cs_chunk_array; - struct hl_cs_job *job; - struct hl_cs *cs; - struct hl_cb *cb; - bool int_queues_only = true; - u32 size_to_copy; - int rc, i; + struct hl_ctx *ctx = hpriv->ctx; + u32 cs_type_flags, num_chunks; + enum hl_device_status status; + enum hl_cs_type cs_type; - *cs_seq = ULLONG_MAX; + if (!hl_device_operational(hdev, &status)) { + dev_warn_ratelimited(hdev->dev, + "Device is %s. Can't submit new CS\n", + hdev->status[status]); + return -EBUSY; + } + + cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK; + + if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) { + dev_err(hdev->dev, + "CS type flags are mutually exclusive, context %d\n", + ctx->asid); + return -EINVAL; + } + + cs_type = hl_cs_get_cs_type(cs_type_flags); + num_chunks = args->in.num_chunks_execute; + + if (unlikely((cs_type != CS_TYPE_DEFAULT) && + !hdev->supports_sync_stream)) { + dev_err(hdev->dev, "Sync stream CS is not supported\n"); + return -EINVAL; + } + + if (cs_type == CS_TYPE_DEFAULT) { + if (!num_chunks) { + dev_err(hdev->dev, + "Got execute CS with 0 chunks, context %d\n", + ctx->asid); + return -EINVAL; + } + } else if (num_chunks != 1) { + dev_err(hdev->dev, + "Sync stream CS mandates one chunk only, context %d\n", + ctx->asid); + return -EINVAL; + } + + return 0; +} + +static int hl_cs_copy_chunk_array(struct hl_device *hdev, + struct hl_cs_chunk **cs_chunk_array, + void __user *chunks, u32 num_chunks) +{ + u32 size_to_copy; if (num_chunks > HL_MAX_JOBS_PER_CS) { dev_err(hdev->dev, "Number of chunks can NOT be larger than %d\n", HL_MAX_JOBS_PER_CS); - rc = -EINVAL; - goto out; + return -EINVAL; } - cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array), + *cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array), GFP_ATOMIC); - if (!cs_chunk_array) { - rc = -ENOMEM; - goto out; - } + if (!*cs_chunk_array) + return -ENOMEM; size_to_copy = num_chunks * sizeof(struct hl_cs_chunk); - if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) { + if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) { dev_err(hdev->dev, "Failed to copy cs chunk array from user\n"); - rc = -EFAULT; - goto free_cs_chunk_array; + kfree(*cs_chunk_array); + return -EFAULT; } + return 0; +} + +static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, + u32 num_chunks, u64 *cs_seq, bool timestamp) +{ + bool int_queues_only = true; + struct hl_device *hdev = hpriv->hdev; + struct hl_cs_chunk *cs_chunk_array; + struct hl_cs_counters_atomic *cntr; + struct hl_cs_job *job; + struct hl_cs *cs; + struct hl_cb *cb; + int rc, i; + + cntr = &hdev->aggregated_cs_counters; + *cs_seq = ULLONG_MAX; + + rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks); + if (rc) + goto out; + /* increment refcnt for context */ hl_ctx_get(hdev, hpriv->ctx); @@ -650,6 +818,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, goto free_cs_chunk_array; } + cs->timestamp = !!timestamp; *cs_seq = cs->sequence; hl_debugfs_add_cs(cs); @@ -663,14 +832,17 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, rc = validate_queue_index(hdev, chunk, &queue_type, &is_kernel_allocated_cb); if (rc) { - hpriv->ctx->cs_counters.parsing_drop_cnt++; + atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt); + atomic64_inc(&cntr->parsing_drop_cnt); goto free_cs_object; } if (is_kernel_allocated_cb) { cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk); if (!cb) { - hpriv->ctx->cs_counters.parsing_drop_cnt++; + atomic64_inc( + &hpriv->ctx->cs_counters.parsing_drop_cnt); + atomic64_inc(&cntr->parsing_drop_cnt); rc = -EINVAL; goto free_cs_object; } @@ -684,7 +856,9 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, job = hl_cs_allocate_job(hdev, queue_type, is_kernel_allocated_cb); if (!job) { - hpriv->ctx->cs_counters.out_of_mem_drop_cnt++; + atomic64_inc( + &hpriv->ctx->cs_counters.out_of_mem_drop_cnt); + atomic64_inc(&cntr->out_of_mem_drop_cnt); dev_err(hdev->dev, "Failed to allocate a new job\n"); rc = -ENOMEM; if (is_kernel_allocated_cb) @@ -717,7 +891,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, rc = cs_parser(hpriv, job); if (rc) { - hpriv->ctx->cs_counters.parsing_drop_cnt++; + atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt); + atomic64_inc(&cntr->parsing_drop_cnt); dev_err(hdev->dev, "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n", cs->ctx->asid, cs->sequence, job->id, rc); @@ -726,7 +901,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, } if (int_queues_only) { - hpriv->ctx->cs_counters.parsing_drop_cnt++; + atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt); + atomic64_inc(&cntr->parsing_drop_cnt); dev_err(hdev->dev, "Reject CS %d.%llu because only internal queues jobs are present\n", cs->ctx->asid, cs->sequence); @@ -747,9 +923,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, goto put_cs; release_cb: - spin_lock(&cb->lock); - cb->cs_cnt--; - spin_unlock(&cb->lock); + atomic_dec(&cb->cs_cnt); hl_cb_put(cb); free_cs_object: cs_rollback(hdev, cs); @@ -764,47 +938,234 @@ out: return rc; } -static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, - void __user *chunks, u32 num_chunks, +static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args, u64 *cs_seq) { struct hl_device *hdev = hpriv->hdev; struct hl_ctx *ctx = hpriv->ctx; - struct hl_cs_chunk *cs_chunk_array, *chunk; - struct hw_queue_properties *hw_queue_prop; - struct hl_fence *sig_fence = NULL; - struct hl_cs_job *job; - struct hl_cs *cs; - struct hl_cb *cb; - enum hl_queue_type q_type; - u64 *signal_seq_arr = NULL, signal_seq; - u32 size_to_copy, q_idx, signal_seq_arr_len, cb_size; - int rc; + bool need_soft_reset = false; + int rc = 0, do_ctx_switch; + void __user *chunks; + u32 num_chunks, tmp; + int ret; - *cs_seq = ULLONG_MAX; + do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0); - if (num_chunks > HL_MAX_JOBS_PER_CS) { + if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) { + mutex_lock(&hpriv->restore_phase_mutex); + + if (do_ctx_switch) { + rc = hdev->asic_funcs->context_switch(hdev, ctx->asid); + if (rc) { + dev_err_ratelimited(hdev->dev, + "Failed to switch to context %d, rejecting CS! %d\n", + ctx->asid, rc); + /* + * If we timedout, or if the device is not IDLE + * while we want to do context-switch (-EBUSY), + * we need to soft-reset because QMAN is + * probably stuck. However, we can't call to + * reset here directly because of deadlock, so + * need to do it at the very end of this + * function + */ + if ((rc == -ETIMEDOUT) || (rc == -EBUSY)) + need_soft_reset = true; + mutex_unlock(&hpriv->restore_phase_mutex); + goto out; + } + } + + hdev->asic_funcs->restore_phase_topology(hdev); + + chunks = (void __user *) (uintptr_t) args->in.chunks_restore; + num_chunks = args->in.num_chunks_restore; + + if (!num_chunks) { + dev_dbg(hdev->dev, + "Need to run restore phase but restore CS is empty\n"); + rc = 0; + } else { + rc = cs_ioctl_default(hpriv, chunks, num_chunks, + cs_seq, false); + } + + mutex_unlock(&hpriv->restore_phase_mutex); + + if (rc) { + dev_err(hdev->dev, + "Failed to submit restore CS for context %d (%d)\n", + ctx->asid, rc); + goto out; + } + + /* Need to wait for restore completion before execution phase */ + if (num_chunks) { + enum hl_cs_wait_status status; +wait_again: + ret = _hl_cs_wait_ioctl(hdev, ctx, + jiffies_to_usecs(hdev->timeout_jiffies), + *cs_seq, &status, NULL); + if (ret) { + if (ret == -ERESTARTSYS) { + usleep_range(100, 200); + goto wait_again; + } + + dev_err(hdev->dev, + "Restore CS for context %d failed to complete %d\n", + ctx->asid, ret); + rc = -ENOEXEC; + goto out; + } + } + + ctx->thread_ctx_switch_wait_token = 1; + + } else if (!ctx->thread_ctx_switch_wait_token) { + rc = hl_poll_timeout_memory(hdev, + &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1), + 100, jiffies_to_usecs(hdev->timeout_jiffies), false); + + if (rc == -ETIMEDOUT) { + dev_err(hdev->dev, + "context switch phase timeout (%d)\n", tmp); + goto out; + } + } + +out: + if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset)) + hl_device_reset(hdev, false, false); + + return rc; +} + +static int cs_ioctl_extract_signal_seq(struct hl_device *hdev, + struct hl_cs_chunk *chunk, u64 *signal_seq) +{ + u64 *signal_seq_arr = NULL; + u32 size_to_copy, signal_seq_arr_len; + int rc = 0; + + signal_seq_arr_len = chunk->num_signal_seq_arr; + + /* currently only one signal seq is supported */ + if (signal_seq_arr_len != 1) { dev_err(hdev->dev, - "Number of chunks can NOT be larger than %d\n", - HL_MAX_JOBS_PER_CS); - rc = -EINVAL; - goto out; + "Wait for signal CS supports only one signal CS seq\n"); + return -EINVAL; } - cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array), + signal_seq_arr = kmalloc_array(signal_seq_arr_len, + sizeof(*signal_seq_arr), GFP_ATOMIC); - if (!cs_chunk_array) { - rc = -ENOMEM; + if (!signal_seq_arr) + return -ENOMEM; + + size_to_copy = chunk->num_signal_seq_arr * sizeof(*signal_seq_arr); + if (copy_from_user(signal_seq_arr, + u64_to_user_ptr(chunk->signal_seq_arr), + size_to_copy)) { + dev_err(hdev->dev, + "Failed to copy signal seq array from user\n"); + rc = -EFAULT; goto out; } - size_to_copy = num_chunks * sizeof(struct hl_cs_chunk); - if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) { - dev_err(hdev->dev, "Failed to copy cs chunk array from user\n"); - rc = -EFAULT; - goto free_cs_chunk_array; + /* currently it is guaranteed to have only one signal seq */ + *signal_seq = signal_seq_arr[0]; + +out: + kfree(signal_seq_arr); + + return rc; +} + +static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev, + struct hl_ctx *ctx, struct hl_cs *cs, enum hl_queue_type q_type, + u32 q_idx) +{ + struct hl_cs_counters_atomic *cntr; + struct hl_cs_job *job; + struct hl_cb *cb; + u32 cb_size; + + cntr = &hdev->aggregated_cs_counters; + + job = hl_cs_allocate_job(hdev, q_type, true); + if (!job) { + atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); + atomic64_inc(&cntr->out_of_mem_drop_cnt); + dev_err(hdev->dev, "Failed to allocate a new job\n"); + return -ENOMEM; } + if (cs->type == CS_TYPE_WAIT) + cb_size = hdev->asic_funcs->get_wait_cb_size(hdev); + else + cb_size = hdev->asic_funcs->get_signal_cb_size(hdev); + + cb = hl_cb_kernel_create(hdev, cb_size, + q_type == QUEUE_TYPE_HW && hdev->mmu_enable); + if (!cb) { + atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); + atomic64_inc(&cntr->out_of_mem_drop_cnt); + kfree(job); + return -EFAULT; + } + + job->id = 0; + job->cs = cs; + job->user_cb = cb; + atomic_inc(&job->user_cb->cs_cnt); + job->user_cb_size = cb_size; + job->hw_queue_id = q_idx; + + /* + * No need in parsing, user CB is the patched CB. + * We call hl_cb_destroy() out of two reasons - we don't need the CB in + * the CB idr anymore and to decrement its refcount as it was + * incremented inside hl_cb_kernel_create(). + */ + job->patched_cb = job->user_cb; + job->job_cb_size = job->user_cb_size; + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); + + /* increment refcount as for external queues we get completion */ + cs_get(cs); + + cs->jobs_in_queue_cnt[job->hw_queue_id]++; + + list_add_tail(&job->cs_node, &cs->job_list); + + hl_debugfs_add_job(hdev, job); + + return 0; +} + +static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, + void __user *chunks, u32 num_chunks, + u64 *cs_seq, bool timestamp) +{ + struct hl_cs_chunk *cs_chunk_array, *chunk; + struct hw_queue_properties *hw_queue_prop; + struct hl_device *hdev = hpriv->hdev; + struct hl_cs_compl *sig_waitcs_cmpl; + u32 q_idx, collective_engine_id = 0; + struct hl_fence *sig_fence = NULL; + struct hl_ctx *ctx = hpriv->ctx; + enum hl_queue_type q_type; + struct hl_cs *cs; + u64 signal_seq; + int rc; + + *cs_seq = ULLONG_MAX; + + rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks); + if (rc) + goto out; + /* currently it is guaranteed to have only one chunk */ chunk = &cs_chunk_array[0]; @@ -819,60 +1180,43 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx]; q_type = hw_queue_prop->type; - if ((q_idx >= hdev->asic_prop.max_queues) || - (!hw_queue_prop->supports_sync_stream)) { - dev_err(hdev->dev, "Queue index %d is invalid\n", q_idx); + if (!hw_queue_prop->supports_sync_stream) { + dev_err(hdev->dev, + "Queue index %d does not support sync stream operations\n", + q_idx); rc = -EINVAL; goto free_cs_chunk_array; } - if (cs_type == CS_TYPE_WAIT) { - struct hl_cs_compl *sig_waitcs_cmpl; - - signal_seq_arr_len = chunk->num_signal_seq_arr; - - /* currently only one signal seq is supported */ - if (signal_seq_arr_len != 1) { + if (cs_type == CS_TYPE_COLLECTIVE_WAIT) { + if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) { dev_err(hdev->dev, - "Wait for signal CS supports only one signal CS seq\n"); + "Queue index %d is invalid\n", q_idx); rc = -EINVAL; goto free_cs_chunk_array; } - signal_seq_arr = kmalloc_array(signal_seq_arr_len, - sizeof(*signal_seq_arr), - GFP_ATOMIC); - if (!signal_seq_arr) { - rc = -ENOMEM; - goto free_cs_chunk_array; - } + collective_engine_id = chunk->collective_engine_id; + } - size_to_copy = chunk->num_signal_seq_arr * - sizeof(*signal_seq_arr); - if (copy_from_user(signal_seq_arr, - u64_to_user_ptr(chunk->signal_seq_arr), - size_to_copy)) { - dev_err(hdev->dev, - "Failed to copy signal seq array from user\n"); - rc = -EFAULT; - goto free_signal_seq_array; - } + if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) { + rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq); + if (rc) + goto free_cs_chunk_array; - /* currently it is guaranteed to have only one signal seq */ - signal_seq = signal_seq_arr[0]; sig_fence = hl_ctx_get_fence(ctx, signal_seq); if (IS_ERR(sig_fence)) { dev_err(hdev->dev, "Failed to get signal CS with seq 0x%llx\n", signal_seq); rc = PTR_ERR(sig_fence); - goto free_signal_seq_array; + goto free_cs_chunk_array; } if (!sig_fence) { /* signal CS already finished */ rc = 0; - goto free_signal_seq_array; + goto free_cs_chunk_array; } sig_waitcs_cmpl = @@ -884,14 +1228,14 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, signal_seq); hl_fence_put(sig_fence); rc = -EINVAL; - goto free_signal_seq_array; + goto free_cs_chunk_array; } if (completion_done(&sig_fence->completion)) { /* signal CS already finished */ hl_fence_put(sig_fence); rc = 0; - goto free_signal_seq_array; + goto free_cs_chunk_array; } } @@ -900,70 +1244,37 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, rc = allocate_cs(hdev, ctx, cs_type, &cs); if (rc) { - if (cs_type == CS_TYPE_WAIT) + if (cs_type == CS_TYPE_WAIT || + cs_type == CS_TYPE_COLLECTIVE_WAIT) hl_fence_put(sig_fence); hl_ctx_put(ctx); - goto free_signal_seq_array; + goto free_cs_chunk_array; } + cs->timestamp = !!timestamp; + /* * Save the signal CS fence for later initialization right before * hanging the wait CS on the queue. */ - if (cs->type == CS_TYPE_WAIT) + if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) cs->signal_fence = sig_fence; hl_debugfs_add_cs(cs); *cs_seq = cs->sequence; - job = hl_cs_allocate_job(hdev, q_type, true); - if (!job) { - ctx->cs_counters.out_of_mem_drop_cnt++; - dev_err(hdev->dev, "Failed to allocate a new job\n"); - rc = -ENOMEM; - goto put_cs; - } - - if (cs->type == CS_TYPE_WAIT) - cb_size = hdev->asic_funcs->get_wait_cb_size(hdev); + if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL) + rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type, + q_idx); + else if (cs_type == CS_TYPE_COLLECTIVE_WAIT) + rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx, + cs, q_idx, collective_engine_id); else - cb_size = hdev->asic_funcs->get_signal_cb_size(hdev); - - cb = hl_cb_kernel_create(hdev, cb_size, - q_type == QUEUE_TYPE_HW && hdev->mmu_enable); - if (!cb) { - ctx->cs_counters.out_of_mem_drop_cnt++; - kfree(job); - rc = -EFAULT; - goto put_cs; - } - - job->id = 0; - job->cs = cs; - job->user_cb = cb; - job->user_cb->cs_cnt++; - job->user_cb_size = cb_size; - job->hw_queue_id = q_idx; - - /* - * No need in parsing, user CB is the patched CB. - * We call hl_cb_destroy() out of two reasons - we don't need the CB in - * the CB idr anymore and to decrement its refcount as it was - * incremented inside hl_cb_kernel_create(). - */ - job->patched_cb = job->user_cb; - job->job_cb_size = job->user_cb_size; - hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); - - cs->jobs_in_queue_cnt[job->hw_queue_id]++; - - list_add_tail(&job->cs_node, &cs->job_list); - - /* increment refcount as for external queues we get completion */ - cs_get(cs); + rc = -EINVAL; - hl_debugfs_add_job(hdev, job); + if (rc) + goto free_cs_object; rc = hl_hw_queue_schedule_cs(cs); if (rc) { @@ -984,9 +1295,6 @@ free_cs_object: put_cs: /* We finished with the CS in this function, so put the ref */ cs_put(cs); -free_signal_seq_array: - if (cs_type == CS_TYPE_WAIT) - kfree(signal_seq_arr); free_cs_chunk_array: kfree(cs_chunk_array); out: @@ -995,156 +1303,39 @@ out: int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) { - struct hl_device *hdev = hpriv->hdev; union hl_cs_args *args = data; - struct hl_ctx *ctx = hpriv->ctx; - void __user *chunks_execute, *chunks_restore; enum hl_cs_type cs_type; - u32 num_chunks_execute, num_chunks_restore, sig_wait_flags; u64 cs_seq = ULONG_MAX; - int rc, do_ctx_switch; - bool need_soft_reset = false; - - if (hl_device_disabled_or_in_reset(hdev)) { - dev_warn_ratelimited(hdev->dev, - "Device is %s. Can't submit new CS\n", - atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); - rc = -EBUSY; - goto out; - } - - sig_wait_flags = args->in.cs_flags & HL_CS_FLAGS_SIG_WAIT; + void __user *chunks; + u32 num_chunks; + int rc; - if (unlikely(sig_wait_flags == HL_CS_FLAGS_SIG_WAIT)) { - dev_err(hdev->dev, - "Signal and wait CS flags are mutually exclusive, context %d\n", - ctx->asid); - rc = -EINVAL; + rc = hl_cs_sanity_checks(hpriv, args); + if (rc) goto out; - } - if (unlikely((sig_wait_flags & HL_CS_FLAGS_SIG_WAIT) && - (!hdev->supports_sync_stream))) { - dev_err(hdev->dev, "Sync stream CS is not supported\n"); - rc = -EINVAL; + rc = hl_cs_ctx_switch(hpriv, args, &cs_seq); + if (rc) goto out; - } - if (args->in.cs_flags & HL_CS_FLAGS_SIGNAL) - cs_type = CS_TYPE_SIGNAL; - else if (args->in.cs_flags & HL_CS_FLAGS_WAIT) - cs_type = CS_TYPE_WAIT; - else - cs_type = CS_TYPE_DEFAULT; - - chunks_execute = (void __user *) (uintptr_t) args->in.chunks_execute; - num_chunks_execute = args->in.num_chunks_execute; - - if (cs_type == CS_TYPE_DEFAULT) { - if (!num_chunks_execute) { - dev_err(hdev->dev, - "Got execute CS with 0 chunks, context %d\n", - ctx->asid); - rc = -EINVAL; - goto out; - } - } else if (num_chunks_execute != 1) { - dev_err(hdev->dev, - "Sync stream CS mandates one chunk only, context %d\n", - ctx->asid); - rc = -EINVAL; - goto out; + cs_type = hl_cs_get_cs_type(args->in.cs_flags & + ~HL_CS_FLAGS_FORCE_RESTORE); + chunks = (void __user *) (uintptr_t) args->in.chunks_execute; + num_chunks = args->in.num_chunks_execute; + + switch (cs_type) { + case CS_TYPE_SIGNAL: + case CS_TYPE_WAIT: + case CS_TYPE_COLLECTIVE_WAIT: + rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks, + &cs_seq, args->in.cs_flags & HL_CS_FLAGS_TIMESTAMP); + break; + default: + rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq, + args->in.cs_flags & HL_CS_FLAGS_TIMESTAMP); + break; } - do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0); - - if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) { - long ret; - - chunks_restore = - (void __user *) (uintptr_t) args->in.chunks_restore; - num_chunks_restore = args->in.num_chunks_restore; - - mutex_lock(&hpriv->restore_phase_mutex); - - if (do_ctx_switch) { - rc = hdev->asic_funcs->context_switch(hdev, ctx->asid); - if (rc) { - dev_err_ratelimited(hdev->dev, - "Failed to switch to context %d, rejecting CS! %d\n", - ctx->asid, rc); - /* - * If we timedout, or if the device is not IDLE - * while we want to do context-switch (-EBUSY), - * we need to soft-reset because QMAN is - * probably stuck. However, we can't call to - * reset here directly because of deadlock, so - * need to do it at the very end of this - * function - */ - if ((rc == -ETIMEDOUT) || (rc == -EBUSY)) - need_soft_reset = true; - mutex_unlock(&hpriv->restore_phase_mutex); - goto out; - } - } - - hdev->asic_funcs->restore_phase_topology(hdev); - - if (!num_chunks_restore) { - dev_dbg(hdev->dev, - "Need to run restore phase but restore CS is empty\n"); - rc = 0; - } else { - rc = cs_ioctl_default(hpriv, chunks_restore, - num_chunks_restore, &cs_seq); - } - - mutex_unlock(&hpriv->restore_phase_mutex); - - if (rc) { - dev_err(hdev->dev, - "Failed to submit restore CS for context %d (%d)\n", - ctx->asid, rc); - goto out; - } - - /* Need to wait for restore completion before execution phase */ - if (num_chunks_restore) { - ret = _hl_cs_wait_ioctl(hdev, ctx, - jiffies_to_usecs(hdev->timeout_jiffies), - cs_seq); - if (ret <= 0) { - dev_err(hdev->dev, - "Restore CS for context %d failed to complete %ld\n", - ctx->asid, ret); - rc = -ENOEXEC; - goto out; - } - } - - ctx->thread_ctx_switch_wait_token = 1; - } else if (!ctx->thread_ctx_switch_wait_token) { - u32 tmp; - - rc = hl_poll_timeout_memory(hdev, - &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1), - 100, jiffies_to_usecs(hdev->timeout_jiffies), false); - - if (rc == -ETIMEDOUT) { - dev_err(hdev->dev, - "context switch phase timeout (%d)\n", tmp); - goto out; - } - } - - if (cs_type == CS_TYPE_DEFAULT) - rc = cs_ioctl_default(hpriv, chunks_execute, num_chunks_execute, - &cs_seq); - else - rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks_execute, - num_chunks_execute, &cs_seq); - out: if (rc != -EAGAIN) { memset(args, 0, sizeof(*args)); @@ -1152,18 +1343,20 @@ out: args->out.seq = cs_seq; } - if (((rc == -ETIMEDOUT) || (rc == -EBUSY)) && (need_soft_reset)) - hl_device_reset(hdev, false, false); - return rc; } -static long _hl_cs_wait_ioctl(struct hl_device *hdev, - struct hl_ctx *ctx, u64 timeout_us, u64 seq) +static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, + u64 timeout_us, u64 seq, + enum hl_cs_wait_status *status, s64 *timestamp) { struct hl_fence *fence; unsigned long timeout; - long rc; + int rc = 0; + long completion_rc; + + if (timestamp) + *timestamp = 0; if (timeout_us == MAX_SCHEDULE_TIMEOUT) timeout = timeout_us; @@ -1181,11 +1374,20 @@ static long _hl_cs_wait_ioctl(struct hl_device *hdev, seq, ctx->cs_sequence); } else if (fence) { if (!timeout_us) - rc = completion_done(&fence->completion); + completion_rc = completion_done(&fence->completion); else - rc = wait_for_completion_interruptible_timeout( + completion_rc = + wait_for_completion_interruptible_timeout( &fence->completion, timeout); + if (completion_rc > 0) { + *status = CS_WAIT_STATUS_COMPLETED; + if (timestamp) + *timestamp = ktime_to_ns(fence->timestamp); + } else { + *status = CS_WAIT_STATUS_BUSY; + } + if (fence->error == -ETIMEDOUT) rc = -ETIMEDOUT; else if (fence->error == -EIO) @@ -1196,7 +1398,7 @@ static long _hl_cs_wait_ioctl(struct hl_device *hdev, dev_dbg(hdev->dev, "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n", seq, ctx->cs_sequence); - rc = 1; + *status = CS_WAIT_STATUS_GONE; } hl_ctx_put(ctx); @@ -1208,14 +1410,17 @@ int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) { struct hl_device *hdev = hpriv->hdev; union hl_wait_cs_args *args = data; + enum hl_cs_wait_status status; u64 seq = args->in.seq; - long rc; + s64 timestamp; + int rc; - rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq); + rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq, + &status, ×tamp); memset(args, 0, sizeof(*args)); - if (rc < 0) { + if (rc) { if (rc == -ERESTARTSYS) { dev_err_ratelimited(hdev->dev, "user process got signal while waiting for CS handle %llu\n", @@ -1236,10 +1441,23 @@ int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) return rc; } - if (rc == 0) - args->out.status = HL_WAIT_CS_STATUS_BUSY; - else + if (timestamp) { + args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD; + args->out.timestamp_nsec = timestamp; + } + + switch (status) { + case CS_WAIT_STATUS_GONE: + args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE; + fallthrough; + case CS_WAIT_STATUS_COMPLETED: args->out.status = HL_WAIT_CS_STATUS_COMPLETED; + break; + case CS_WAIT_STATUS_BUSY: + default: + args->out.status = HL_WAIT_CS_STATUS_BUSY; + break; + } return 0; } |