diff options
author | Dave Airlie <airlied@redhat.com> | 2024-10-25 16:55:35 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2024-10-25 16:55:39 +1000 |
commit | 4d95a12beba146b0ca2de59c7ce905bc0aadfd0c (patch) | |
tree | ff397b94ff6d9ab55fb61b7fb925a146e4debee2 | |
parent | e3e1cfe33f932860e070eafec8df8780e3b889a8 (diff) | |
parent | cdc21021f0351226a4845715564afd5dc50ed44b (diff) |
Merge tag 'drm-xe-fixes-2024-10-24-1' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-fixes
Driver Changes:
- Increase invalidation timeout to avoid errors in some hosts (Shuicheng)
- Flush worker on timeout (Badal)
- Better handling for force wake failure (Shuicheng)
- Improve argument check on user fence creation (Nirmoy)
- Don't restart parallel queues multiple times on GT reset (Nirmoy)
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/trlkoiewtc4x2cyhsxmj3atayyq4zwto4iryea5pvya2ymc3yp@fdx5nhwmiyem
-rw-r--r-- | drivers/gpu/drm/xe/xe_device.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_force_wake.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_guc_ct.c | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_guc_submit.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_sync.c | 3 |
5 files changed, 42 insertions, 7 deletions
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 0a9ffc19e92f..10fd4601b9f2 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -890,7 +890,7 @@ void xe_device_l2_flush(struct xe_device *xe) spin_lock(>->global_invl_lock); xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1); - if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true)) + if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true)) xe_gt_err_once(gt, "Global invalidation timeout\n"); spin_unlock(>->global_invl_lock); diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c index b263fff15273..7d9fc489dcb8 100644 --- a/drivers/gpu/drm/xe/xe_force_wake.c +++ b/drivers/gpu/drm/xe/xe_force_wake.c @@ -115,9 +115,15 @@ static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain, XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC, &value, true); if (ret) - xe_gt_notice(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n", - domain->id, str_wake_sleep(wake), ERR_PTR(ret), - domain->reg_ack.addr, value); + xe_gt_err(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n", + domain->id, str_wake_sleep(wake), ERR_PTR(ret), + domain->reg_ack.addr, value); + if (value == ~0) { + xe_gt_err(gt, + "Force wake domain %d: %s. MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n", + domain->id, str_wake_sleep(wake)); + ret = -EIO; + } return ret; } diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index d16eb9ab49fb..17986bfd8818 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -898,6 +898,24 @@ retry_same_fence: ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ); /* + * Occasionally it is seen that the G2H worker starts running after a delay of more than + * a second even after being queued and activated by the Linux workqueue subsystem. This + * leads to G2H timeout error. The root cause of issue lies with scheduling latency of + * Lunarlake Hybrid CPU. Issue dissappears if we disable Lunarlake atom cores from BIOS + * and this is beyond xe kmd. + * + * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU. + */ + if (!ret) { + flush_work(&ct->g2h_worker); + if (g2h_fence.done) { + xe_gt_warn(gt, "G2H fence %u, action %04x, done\n", + g2h_fence.seqno, action[0]); + ret = 1; + } + } + + /* * Ensure we serialize with completion side to prevent UAF with fence going out of scope on * the stack, since we have no clue if it will fire after the timeout before we can erase * from the xa. Also we have some dependent loads and stores below for which we need the diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 8a9254e5af6e..d333be9c4227 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -1726,8 +1726,13 @@ void xe_guc_submit_stop(struct xe_guc *guc) mutex_lock(&guc->submission_state.lock); - xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) + xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { + /* Prevent redundant attempts to stop parallel queues */ + if (q->guc->id != index) + continue; + guc_exec_queue_stop(guc, q); + } mutex_unlock(&guc->submission_state.lock); @@ -1765,8 +1770,13 @@ int xe_guc_submit_start(struct xe_guc *guc) mutex_lock(&guc->submission_state.lock); atomic_dec(&guc->submission_state.stopped); - xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) + xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { + /* Prevent redundant attempts to start parallel queues */ + if (q->guc->id != index) + continue; + guc_exec_queue_start(q); + } mutex_unlock(&guc->submission_state.lock); wake_up_all(&guc->ct.wq); diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c index c6cf227ead40..2e72c06fd40d 100644 --- a/drivers/gpu/drm/xe/xe_sync.c +++ b/drivers/gpu/drm/xe/xe_sync.c @@ -54,8 +54,9 @@ static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr, { struct xe_user_fence *ufence; u64 __user *ptr = u64_to_user_ptr(addr); + u64 __maybe_unused prefetch_val; - if (!access_ok(ptr, sizeof(*ptr))) + if (get_user(prefetch_val, ptr)) return ERR_PTR(-EFAULT); ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); |