From 2ec5b3dc18bab1108f49262e59fc22bb5939fe0b Mon Sep 17 00:00:00 2001
From: Abhinav Kumar <abhinavk@codeaurora.org>
Date: Fri, 16 Apr 2021 13:57:25 -0700
Subject: drm/msm: add disp snapshot points across dpu driver

Add snapshot points across dpu driver to trigger dumps when critical
errors are hit.

changes in v5:
 - change the callers to use the snapshot function directly

Signed-off-by: Abhinav Kumar <abhinavk@codeaurora.org>
Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Link: https://lore.kernel.org/r/1618606645-19695-8-git-send-email-abhinavk@codeaurora.org
Signed-off-by: Rob Clark <robdclark@chromium.org>
---
 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 16 +++++++++++++---
 1 file changed, 13 insertions(+), 3 deletions(-)

(limited to 'drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c')

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 8d942052db8a..f1642def3ff1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  */
@@ -26,6 +26,7 @@
 #include "dpu_crtc.h"
 #include "dpu_trace.h"
 #include "dpu_core_irq.h"
+#include "disp/msm_disp_snapshot.h"
 
 #define DPU_DEBUG_ENC(e, fmt, ...) DPU_DEBUG("enc%d " fmt,\
 		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
@@ -1336,6 +1337,11 @@ static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
 
 	DPU_ATRACE_BEGIN("encoder_underrun_callback");
 	atomic_inc(&phy_enc->underrun_cnt);
+
+	/* trigger dump only on the first underrun */
+	if (atomic_read(&phy_enc->underrun_cnt) == 1)
+		msm_disp_snapshot_state(drm_enc->dev);
+
 	trace_dpu_enc_underrun_cb(DRMID(drm_enc),
 				  atomic_read(&phy_enc->underrun_cnt));
 	DPU_ATRACE_END("encoder_underrun_callback");
@@ -1565,19 +1571,23 @@ static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
 	struct dpu_encoder_virt *dpu_enc;
 	struct dpu_hw_ctl *ctl;
 	int rc;
+	struct drm_encoder *drm_enc;
 
 	dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
 	ctl = phys_enc->hw_ctl;
+	drm_enc = phys_enc->parent;
 
 	if (!ctl->ops.reset)
 		return;
 
-	DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(phys_enc->parent),
+	DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc),
 		      ctl->idx);
 
 	rc = ctl->ops.reset(ctl);
-	if (rc)
+	if (rc) {
 		DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n",  ctl->idx);
+		msm_disp_snapshot_state(drm_enc->dev);
+	}
 
 	phys_enc->enable_state = DPU_ENC_ENABLED;
 }
-- 
cgit v1.2.3-70-g09d2


From 667e9985ee24caec46799eb481fcb3b227d8a503 Mon Sep 17 00:00:00 2001
From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Date: Sun, 16 May 2021 23:29:09 +0300
Subject: drm/msm/dpu: replace IRQ lookup with the data in hw catalog

The IRQ table in the dpu_hw_interrupts.h is big, ugly, and hard to
maintain. There are only few interrupts used from that table. Newer
generations use different IRQ locations. Move this data to hw catalog.

Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Reviewed-by: Abhinav Kumar <abhinavk@codeaurora.org>
Link: https://lore.kernel.org/r/20210516202910.2141079-5-dmitry.baryshkov@linaro.org
[fixup tracepoint compile warns/err]
Signed-off-by: Rob Clark <robdclark@chromium.org>
---
 drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c       |  20 +--
 drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h       |  13 --
 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c        |  64 ++++-----
 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h   |   2 -
 .../gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c   |  36 ++---
 .../gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c   |  31 ++---
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c     | 150 +++++++++++++--------
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h     |  12 +-
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c  | 137 ++++++++-----------
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h  |  17 +--
 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h          |  40 +++---
 11 files changed, 234 insertions(+), 288 deletions(-)

(limited to 'drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c')

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
index fd11a2aeab6c..11c0abed21ee 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
@@ -43,16 +43,6 @@ static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
 	spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
 }
 
-int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
-		enum dpu_intr_type intr_type, u32 instance_idx)
-{
-	if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.irq_idx_lookup)
-		return -EINVAL;
-
-	return dpu_kms->hw_intr->ops.irq_idx_lookup(dpu_kms->hw_intr,
-			intr_type, instance_idx);
-}
-
 /**
  * _dpu_core_irq_enable - enable core interrupt given by the index
  * @dpu_kms:		Pointer to dpu kms context
@@ -70,7 +60,7 @@ static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx)
 		return -EINVAL;
 	}
 
-	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
 		return -EINVAL;
 	}
@@ -133,7 +123,7 @@ static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx)
 		return -EINVAL;
 	}
 
-	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
 		return -EINVAL;
 	}
@@ -208,7 +198,7 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
 		return -EINVAL;
 	}
 
-	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
 		return -EINVAL;
 	}
@@ -243,7 +233,7 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
 		return -EINVAL;
 	}
 
-	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
 		return -EINVAL;
 	}
@@ -328,7 +318,7 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
 	spin_lock_init(&dpu_kms->irq_obj.cb_lock);
 
 	/* Create irq callbacks for all possible irq_idx */
-	dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->irq_idx_tbl_size;
+	dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->total_irqs;
 	dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs,
 			sizeof(struct list_head), GFP_KERNEL);
 	dpu_kms->irq_obj.enable_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
index e30775e6585b..d147784d5531 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -29,19 +29,6 @@ void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms);
  */
 irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms);
 
-/**
- * dpu_core_irq_idx_lookup - IRQ helper function for lookup irq_idx from HW
- *                      interrupt mapping table.
- * @dpu_kms:		DPU handle
- * @intr_type:		DPU HW interrupt type for lookup
- * @instance_idx:	DPU HW block instance defined in dpu_hw_mdss.h
- * @return:		irq_idx or -EINVAL when fail to lookup
- */
-int dpu_core_irq_idx_lookup(
-		struct dpu_kms *dpu_kms,
-		enum dpu_intr_type intr_type,
-		uint32_t instance_idx);
-
 /**
  * dpu_core_irq_enable - IRQ helper function for enabling one or more IRQs
  * @dpu_kms:		DPU handle
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index f1642def3ff1..f36603ffe913 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -254,7 +254,7 @@ void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
 }
 
 static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
-		int32_t hw_id, struct dpu_encoder_wait_info *info);
+		u32 irq_idx, struct dpu_encoder_wait_info *info);
 
 int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
 		enum dpu_intr_idx intr_idx,
@@ -274,27 +274,27 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
 
 	/* return EWOULDBLOCK since we know the wait isn't necessary */
 	if (phys_enc->enable_state == DPU_ENC_DISABLED) {
-		DRM_ERROR("encoder is disabled id=%u, intr=%d, hw=%d, irq=%d",
-			  DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+		DRM_ERROR("encoder is disabled id=%u, intr=%d, irq=%d",
+			  DRMID(phys_enc->parent), intr_idx,
 			  irq->irq_idx);
 		return -EWOULDBLOCK;
 	}
 
 	if (irq->irq_idx < 0) {
-		DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, hw=%d, irq=%s",
-			      DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+		DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, irq=%s",
+			      DRMID(phys_enc->parent), intr_idx,
 			      irq->name);
 		return 0;
 	}
 
-	DRM_DEBUG_KMS("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, pending_cnt=%d",
-		      DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+	DRM_DEBUG_KMS("id=%u, intr=%d, irq=%d, pp=%d, pending_cnt=%d",
+		      DRMID(phys_enc->parent), intr_idx,
 		      irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
 		      atomic_read(wait_info->atomic_cnt));
 
 	ret = dpu_encoder_helper_wait_event_timeout(
 			DRMID(phys_enc->parent),
-			irq->hw_idx,
+			irq->irq_idx,
 			wait_info);
 
 	if (ret <= 0) {
@@ -304,9 +304,9 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
 			unsigned long flags;
 
 			DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, "
-				      "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+				      "irq=%d, pp=%d, atomic_cnt=%d",
 				      DRMID(phys_enc->parent), intr_idx,
-				      irq->hw_idx, irq->irq_idx,
+				      irq->irq_idx,
 				      phys_enc->hw_pp->idx - PINGPONG_0,
 				      atomic_read(wait_info->atomic_cnt));
 			local_irq_save(flags);
@@ -316,16 +316,16 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
 		} else {
 			ret = -ETIMEDOUT;
 			DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, "
-				      "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+				      "irq=%d, pp=%d, atomic_cnt=%d",
 				      DRMID(phys_enc->parent), intr_idx,
-				      irq->hw_idx, irq->irq_idx,
+				      irq->irq_idx,
 				      phys_enc->hw_pp->idx - PINGPONG_0,
 				      atomic_read(wait_info->atomic_cnt));
 		}
 	} else {
 		ret = 0;
 		trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
-			intr_idx, irq->hw_idx, irq->irq_idx,
+			intr_idx, irq->irq_idx,
 			phys_enc->hw_pp->idx - PINGPONG_0,
 			atomic_read(wait_info->atomic_cnt));
 	}
@@ -345,19 +345,9 @@ int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
 	}
 	irq = &phys_enc->irq[intr_idx];
 
-	if (irq->irq_idx >= 0) {
-		DPU_DEBUG_PHYS(phys_enc,
-				"skipping already registered irq %s type %d\n",
-				irq->name, irq->intr_type);
-		return 0;
-	}
-
-	irq->irq_idx = dpu_core_irq_idx_lookup(phys_enc->dpu_kms,
-			irq->intr_type, irq->hw_idx);
 	if (irq->irq_idx < 0) {
 		DPU_ERROR_PHYS(phys_enc,
-			"failed to lookup IRQ index for %s type:%d\n",
-			irq->name, irq->intr_type);
+			"invalid IRQ index:%d\n", irq->irq_idx);
 		return -EINVAL;
 	}
 
@@ -373,8 +363,8 @@ int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
 
 	ret = dpu_core_irq_enable(phys_enc->dpu_kms, &irq->irq_idx, 1);
 	if (ret) {
-		DRM_ERROR("enable failed id=%u, intr=%d, hw=%d, irq=%d",
-			  DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+		DRM_ERROR("enable failed id=%u, intr=%d, irq=%d",
+			  DRMID(phys_enc->parent), intr_idx,
 			  irq->irq_idx);
 		dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
 				irq->irq_idx, &irq->cb);
@@ -383,7 +373,7 @@ int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
 	}
 
 	trace_dpu_enc_irq_register_success(DRMID(phys_enc->parent), intr_idx,
-				irq->hw_idx, irq->irq_idx);
+				irq->irq_idx);
 
 	return ret;
 }
@@ -398,31 +388,29 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
 
 	/* silently skip irqs that weren't registered */
 	if (irq->irq_idx < 0) {
-		DRM_ERROR("duplicate unregister id=%u, intr=%d, hw=%d, irq=%d",
-			  DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+		DRM_ERROR("duplicate unregister id=%u, intr=%d, irq=%d",
+			  DRMID(phys_enc->parent), intr_idx,
 			  irq->irq_idx);
 		return 0;
 	}
 
 	ret = dpu_core_irq_disable(phys_enc->dpu_kms, &irq->irq_idx, 1);
 	if (ret) {
-		DRM_ERROR("disable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d",
-			  DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+		DRM_ERROR("disable failed id=%u, intr=%d, irq=%d ret=%d",
+			  DRMID(phys_enc->parent), intr_idx,
 			  irq->irq_idx, ret);
 	}
 
 	ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx,
 			&irq->cb);
 	if (ret) {
-		DRM_ERROR("unreg cb fail id=%u, intr=%d, hw=%d, irq=%d ret=%d",
-			  DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+		DRM_ERROR("unreg cb fail id=%u, intr=%d, irq=%d ret=%d",
+			  DRMID(phys_enc->parent), intr_idx,
 			  irq->irq_idx, ret);
 	}
 
 	trace_dpu_enc_irq_unregister_success(DRMID(phys_enc->parent), intr_idx,
-					     irq->hw_idx, irq->irq_idx);
-
-	irq->irq_idx = -EINVAL;
+					     irq->irq_idx);
 
 	return 0;
 }
@@ -1543,7 +1531,7 @@ void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
 
 static int dpu_encoder_helper_wait_event_timeout(
 		int32_t drm_id,
-		int32_t hw_id,
+		u32 irq_idx,
 		struct dpu_encoder_wait_info *info)
 {
 	int rc = 0;
@@ -1556,7 +1544,7 @@ static int dpu_encoder_helper_wait_event_timeout(
 				atomic_read(info->atomic_cnt) == 0, jiffies);
 		time = ktime_to_ms(ktime_get());
 
-		trace_dpu_enc_wait_event_timeout(drm_id, hw_id, rc, time,
+		trace_dpu_enc_wait_event_timeout(drm_id, irq_idx, rc, time,
 						 expected_time,
 						 atomic_read(info->atomic_cnt));
 	/* If we timed out, counter is valid and time is less, wait again */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index ecbc4be98980..3bd12ce45a80 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -167,7 +167,6 @@ enum dpu_intr_idx {
  * @name:		string name of interrupt
  * @intr_type:		Encoder interrupt type
  * @intr_idx:		Encoder interrupt enumeration
- * @hw_idx:		HW Block ID
  * @irq_idx:		IRQ interface lookup index from DPU IRQ framework
  *			will be -EINVAL if IRQ is not registered
  * @irq_cb:		interrupt callback
@@ -176,7 +175,6 @@ struct dpu_encoder_irq {
 	const char *name;
 	enum dpu_intr_type intr_type;
 	enum dpu_intr_idx intr_idx;
-	int hw_idx;
 	int irq_idx;
 	struct dpu_irq_callback cb;
 };
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index 9999a73edc2c..7ce51782d112 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -144,28 +144,6 @@ static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
 			phys_enc);
 }
 
-static void _dpu_encoder_phys_cmd_setup_irq_hw_idx(
-		struct dpu_encoder_phys *phys_enc)
-{
-	struct dpu_encoder_irq *irq;
-
-	irq = &phys_enc->irq[INTR_IDX_CTL_START];
-	irq->hw_idx = phys_enc->hw_ctl->idx;
-	irq->irq_idx = -EINVAL;
-
-	irq = &phys_enc->irq[INTR_IDX_PINGPONG];
-	irq->hw_idx = phys_enc->hw_pp->idx;
-	irq->irq_idx = -EINVAL;
-
-	irq = &phys_enc->irq[INTR_IDX_RDPTR];
-	irq->hw_idx = phys_enc->hw_pp->idx;
-	irq->irq_idx = -EINVAL;
-
-	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
-	irq->hw_idx = phys_enc->intf_idx;
-	irq->irq_idx = -EINVAL;
-}
-
 static void dpu_encoder_phys_cmd_mode_set(
 		struct dpu_encoder_phys *phys_enc,
 		struct drm_display_mode *mode,
@@ -173,6 +151,7 @@ static void dpu_encoder_phys_cmd_mode_set(
 {
 	struct dpu_encoder_phys_cmd *cmd_enc =
 		to_dpu_encoder_phys_cmd(phys_enc);
+	struct dpu_encoder_irq *irq;
 
 	if (!mode || !adj_mode) {
 		DPU_ERROR("invalid args\n");
@@ -182,7 +161,17 @@ static void dpu_encoder_phys_cmd_mode_set(
 	DPU_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
 	drm_mode_debug_printmodeline(adj_mode);
 
-	_dpu_encoder_phys_cmd_setup_irq_hw_idx(phys_enc);
+	irq = &phys_enc->irq[INTR_IDX_CTL_START];
+	irq->irq_idx = phys_enc->hw_ctl->caps->intr_start;
+
+	irq = &phys_enc->irq[INTR_IDX_PINGPONG];
+	irq->irq_idx = phys_enc->hw_pp->caps->intr_done;
+
+	irq = &phys_enc->irq[INTR_IDX_RDPTR];
+	irq->irq_idx = phys_enc->hw_pp->caps->intr_rdptr;
+
+	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+	irq->irq_idx = phys_enc->hw_intf->cap->intr_underrun;
 }
 
 static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
@@ -799,7 +788,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
 		irq = &phys_enc->irq[i];
 		INIT_LIST_HEAD(&irq->cb.list);
 		irq->irq_idx = -EINVAL;
-		irq->hw_idx = -EINVAL;
 		irq->cb.arg = phys_enc;
 	}
 
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index fed019fcbac5..3bec7761c6fd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -364,38 +364,24 @@ static bool dpu_encoder_phys_vid_needs_single_flush(
 	return phys_enc->split_role != ENC_ROLE_SOLO;
 }
 
-static void _dpu_encoder_phys_vid_setup_irq_hw_idx(
-		struct dpu_encoder_phys *phys_enc)
-{
-	struct dpu_encoder_irq *irq;
-
-	/*
-	 * Initialize irq->hw_idx only when irq is not registered.
-	 * Prevent invalidating irq->irq_idx as modeset may be
-	 * called many times during dfps.
-	 */
-
-	irq = &phys_enc->irq[INTR_IDX_VSYNC];
-	if (irq->irq_idx < 0)
-		irq->hw_idx = phys_enc->intf_idx;
-
-	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
-	if (irq->irq_idx < 0)
-		irq->hw_idx = phys_enc->intf_idx;
-}
-
 static void dpu_encoder_phys_vid_mode_set(
 		struct dpu_encoder_phys *phys_enc,
 		struct drm_display_mode *mode,
 		struct drm_display_mode *adj_mode)
 {
+	struct dpu_encoder_irq *irq;
+
 	if (adj_mode) {
 		phys_enc->cached_mode = *adj_mode;
 		drm_mode_debug_printmodeline(adj_mode);
 		DPU_DEBUG_VIDENC(phys_enc, "caching mode:\n");
 	}
 
-	_dpu_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
+	irq = &phys_enc->irq[INTR_IDX_VSYNC];
+	irq->irq_idx = phys_enc->hw_intf->cap->intr_vsync;
+
+	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+	irq->irq_idx = phys_enc->hw_intf->cap->intr_underrun;
 }
 
 static int dpu_encoder_phys_vid_control_vblank_irq(
@@ -642,7 +628,7 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
 
 	if (enable) {
 		ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true);
-		if (ret)
+		if (WARN_ON(ret))
 			return;
 
 		dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
@@ -744,7 +730,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
 		irq = &phys_enc->irq[i];
 		INIT_LIST_HEAD(&irq->cb.list);
 		irq->irq_idx = -EINVAL;
-		irq->hw_idx = -EINVAL;
 		irq->cb.arg = phys_enc;
 	}
 
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 9a77d64d3fd4..f929131ed260 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -350,27 +350,32 @@ static const struct dpu_ctl_cfg sdm845_ctl[] = {
 	{
 	.name = "ctl_0", .id = CTL_0,
 	.base = 0x1000, .len = 0xE4,
-	.features = BIT(DPU_CTL_SPLIT_DISPLAY)
+	.features = BIT(DPU_CTL_SPLIT_DISPLAY),
+	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
 	},
 	{
 	.name = "ctl_1", .id = CTL_1,
 	.base = 0x1200, .len = 0xE4,
-	.features = BIT(DPU_CTL_SPLIT_DISPLAY)
+	.features = BIT(DPU_CTL_SPLIT_DISPLAY),
+	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
 	},
 	{
 	.name = "ctl_2", .id = CTL_2,
 	.base = 0x1400, .len = 0xE4,
-	.features = 0
+	.features = 0,
+	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
 	},
 	{
 	.name = "ctl_3", .id = CTL_3,
 	.base = 0x1600, .len = 0xE4,
-	.features = 0
+	.features = 0,
+	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
 	},
 	{
 	.name = "ctl_4", .id = CTL_4,
 	.base = 0x1800, .len = 0xE4,
-	.features = 0
+	.features = 0,
+	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
 	},
 };
 
@@ -378,17 +383,20 @@ static const struct dpu_ctl_cfg sc7180_ctl[] = {
 	{
 	.name = "ctl_0", .id = CTL_0,
 	.base = 0x1000, .len = 0xE4,
-	.features = BIT(DPU_CTL_ACTIVE_CFG)
+	.features = BIT(DPU_CTL_ACTIVE_CFG),
+	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
 	},
 	{
 	.name = "ctl_1", .id = CTL_1,
 	.base = 0x1200, .len = 0xE4,
-	.features = BIT(DPU_CTL_ACTIVE_CFG)
+	.features = BIT(DPU_CTL_ACTIVE_CFG),
+	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
 	},
 	{
 	.name = "ctl_2", .id = CTL_2,
 	.base = 0x1400, .len = 0xE4,
-	.features = BIT(DPU_CTL_ACTIVE_CFG)
+	.features = BIT(DPU_CTL_ACTIVE_CFG),
+	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
 	},
 };
 
@@ -396,32 +404,38 @@ static const struct dpu_ctl_cfg sm8150_ctl[] = {
 	{
 	.name = "ctl_0", .id = CTL_0,
 	.base = 0x1000, .len = 0x1e0,
-	.features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY)
+	.features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
 	},
 	{
 	.name = "ctl_1", .id = CTL_1,
 	.base = 0x1200, .len = 0x1e0,
-	.features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY)
+	.features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
 	},
 	{
 	.name = "ctl_2", .id = CTL_2,
 	.base = 0x1400, .len = 0x1e0,
-	.features = BIT(DPU_CTL_ACTIVE_CFG)
+	.features = BIT(DPU_CTL_ACTIVE_CFG),
+	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
 	},
 	{
 	.name = "ctl_3", .id = CTL_3,
 	.base = 0x1600, .len = 0x1e0,
-	.features = BIT(DPU_CTL_ACTIVE_CFG)
+	.features = BIT(DPU_CTL_ACTIVE_CFG),
+	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
 	},
 	{
 	.name = "ctl_4", .id = CTL_4,
 	.base = 0x1800, .len = 0x1e0,
-	.features = BIT(DPU_CTL_ACTIVE_CFG)
+	.features = BIT(DPU_CTL_ACTIVE_CFG),
+	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
 	},
 	{
 	.name = "ctl_5", .id = CTL_5,
 	.base = 0x1a00, .len = 0x1e0,
-	.features = BIT(DPU_CTL_ACTIVE_CFG)
+	.features = BIT(DPU_CTL_ACTIVE_CFG),
+	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
 	},
 };
 
@@ -429,22 +443,26 @@ static const struct dpu_ctl_cfg sc7280_ctl[] = {
 	{
 	.name = "ctl_0", .id = CTL_0,
 	.base = 0x15000, .len = 0x1E8,
-	.features = CTL_SC7280_MASK
+	.features = CTL_SC7280_MASK,
+	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
 	},
 	{
 	.name = "ctl_1", .id = CTL_1,
 	.base = 0x16000, .len = 0x1E8,
-	.features = CTL_SC7280_MASK
+	.features = CTL_SC7280_MASK,
+	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
 	},
 	{
 	.name = "ctl_2", .id = CTL_2,
 	.base = 0x17000, .len = 0x1E8,
-	.features = CTL_SC7280_MASK
+	.features = CTL_SC7280_MASK,
+	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
 	},
 	{
 	.name = "ctl_3", .id = CTL_3,
 	.base = 0x18000, .len = 0x1E8,
-	.features = CTL_SC7280_MASK
+	.features = CTL_SC7280_MASK,
+	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
 	},
 };
 
@@ -725,42 +743,66 @@ static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = {
 	.len = 0x20, .version = 0x20000},
 };
 
-#define PP_BLK_TE(_name, _id, _base, _merge_3d, _sblk) \
+#define PP_BLK_TE(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \
 	{\
 	.name = _name, .id = _id, \
 	.base = _base, .len = 0xd4, \
 	.features = PINGPONG_SDM845_SPLIT_MASK, \
 	.merge_3d = _merge_3d, \
-	.sblk = &_sblk \
+	.sblk = &_sblk, \
+	.intr_done = _done, \
+	.intr_rdptr = _rdptr, \
 	}
-#define PP_BLK(_name, _id, _base, _merge_3d, _sblk) \
+#define PP_BLK(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \
 	{\
 	.name = _name, .id = _id, \
 	.base = _base, .len = 0xd4, \
 	.features = PINGPONG_SDM845_MASK, \
 	.merge_3d = _merge_3d, \
-	.sblk = &_sblk \
+	.sblk = &_sblk, \
+	.intr_done = _done, \
+	.intr_rdptr = _rdptr, \
 	}
 
 static const struct dpu_pingpong_cfg sdm845_pp[] = {
-	PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk_te),
-	PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk_te),
-	PP_BLK("pingpong_2", PINGPONG_2, 0x71000, 0, sdm845_pp_sblk),
-	PP_BLK("pingpong_3", PINGPONG_3, 0x71800, 0, sdm845_pp_sblk),
+	PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk_te,
+			DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+			DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
+	PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk_te,
+			DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+			DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
+	PP_BLK("pingpong_2", PINGPONG_2, 0x71000, 0, sdm845_pp_sblk,
+			DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+			DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
+	PP_BLK("pingpong_3", PINGPONG_3, 0x71800, 0, sdm845_pp_sblk,
+			DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+			DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
 };
 
 static struct dpu_pingpong_cfg sc7180_pp[] = {
-	PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk_te),
-	PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk_te),
+	PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk_te, -1, -1),
+	PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk_te, -1, -1),
 };
 
 static const struct dpu_pingpong_cfg sm8150_pp[] = {
-	PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te),
-	PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk_te),
-	PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk),
-	PP_BLK("pingpong_3", PINGPONG_3, 0x71800, MERGE_3D_1, sdm845_pp_sblk),
-	PP_BLK("pingpong_4", PINGPONG_4, 0x72000, MERGE_3D_2, sdm845_pp_sblk),
-	PP_BLK("pingpong_5", PINGPONG_5, 0x72800, MERGE_3D_2, sdm845_pp_sblk),
+	PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te,
+			DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+			DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
+	PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk_te,
+			DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+			DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
+	PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk,
+			DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+			DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
+	PP_BLK("pingpong_3", PINGPONG_3, 0x71800, MERGE_3D_1, sdm845_pp_sblk,
+			DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+			DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
+	PP_BLK("pingpong_4", PINGPONG_4, 0x72000, MERGE_3D_2, sdm845_pp_sblk,
+			DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+			-1),
+	PP_BLK("pingpong_5", PINGPONG_5, 0x72800, MERGE_3D_2, sdm845_pp_sblk,
+			DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+			-1),
 };
 
 /*************************************************************
@@ -781,47 +823,49 @@ static const struct dpu_merge_3d_cfg sm8150_merge_3d[] = {
 };
 
 static const struct dpu_pingpong_cfg sc7280_pp[] = {
-	PP_BLK("pingpong_0", PINGPONG_0, 0x59000, 0, sc7280_pp_sblk),
-	PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk),
-	PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk),
-	PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk),
+	PP_BLK("pingpong_0", PINGPONG_0, 0x59000, 0, sc7280_pp_sblk, -1, -1),
+	PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1),
+	PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1),
+	PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1),
 };
 /*************************************************************
  * INTF sub blocks config
  *************************************************************/
-#define INTF_BLK(_name, _id, _base, _type, _ctrl_id, _progfetch, _features) \
+#define INTF_BLK(_name, _id, _base, _type, _ctrl_id, _progfetch, _features, _reg, _underrun_bit, _vsync_bit) \
 	{\
 	.name = _name, .id = _id, \
 	.base = _base, .len = 0x280, \
 	.features = _features, \
 	.type = _type, \
 	.controller_id = _ctrl_id, \
-	.prog_fetch_lines_worst_case = _progfetch \
+	.prog_fetch_lines_worst_case = _progfetch, \
+	.intr_underrun = DPU_IRQ_IDX(_reg, _underrun_bit), \
+	.intr_vsync = DPU_IRQ_IDX(_reg, _vsync_bit), \
 	}
 
 static const struct dpu_intf_cfg sdm845_intf[] = {
-	INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SDM845_MASK),
-	INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SDM845_MASK),
-	INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SDM845_MASK),
-	INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SDM845_MASK),
+	INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+	INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
+	INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
+	INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
 };
 
 static const struct dpu_intf_cfg sc7180_intf[] = {
-	INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK),
-	INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK),
+	INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+	INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
 };
 
 static const struct dpu_intf_cfg sm8150_intf[] = {
-	INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK),
-	INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK),
-	INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SC7180_MASK),
-	INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SC7180_MASK),
+	INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+	INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
+	INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
+	INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
 };
 
 static const struct dpu_intf_cfg sc7280_intf[] = {
-	INTF_BLK("intf_0", INTF_0, 0x34000, INTF_DP, 0, 24, INTF_SC7280_MASK),
-	INTF_BLK("intf_1", INTF_1, 0x35000, INTF_DSI, 0, 24, INTF_SC7280_MASK),
-	INTF_BLK("intf_5", INTF_5, 0x39000, INTF_EDP, 0, 24, INTF_SC7280_MASK),
+	INTF_BLK("intf_0", INTF_0, 0x34000, INTF_DP, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+	INTF_BLK("intf_1", INTF_1, 0x35000, INTF_DSI, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
+	INTF_BLK("intf_5", INTF_5, 0x39000, INTF_EDP, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 22, 23),
 };
 
 /*************************************************************
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 0f9f0a520fc2..9024cf0c0bc4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -464,13 +464,15 @@ struct dpu_mdp_cfg {
 	struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX];
 };
 
-/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
+/* struct dpu_ctl_cfg : MDP CTL instance info
  * @id:                index identifying this block
  * @base:              register base offset to mdss
  * @features           bit mask identifying sub-blocks/features
+ * @intr_start:        interrupt index for CTL_START
  */
 struct dpu_ctl_cfg {
 	DPU_HW_BLK_INFO;
+	s32 intr_start;
 };
 
 /**
@@ -526,11 +528,15 @@ struct dpu_dspp_cfg  {
  * @id                 enum identifying this block
  * @base               register offset of this block
  * @features           bit mask identifying sub-blocks/features
+ * @intr_done:         index for PINGPONG done interrupt
+ * @intr_rdptr:        index for PINGPONG readpointer done interrupt
  * @sblk               sub-blocks information
  */
 struct dpu_pingpong_cfg  {
 	DPU_HW_BLK_INFO;
 	u32 merge_3d;
+	s32 intr_done;
+	s32 intr_rdptr;
 	const struct dpu_pingpong_sub_blks *sblk;
 };
 
@@ -555,12 +561,16 @@ struct dpu_merge_3d_cfg  {
  * @type:              Interface type(DSI, DP, HDMI)
  * @controller_id:     Controller Instance ID in case of multiple of intf type
  * @prog_fetch_lines_worst_case	Worst case latency num lines needed to prefetch
+ * @intr_underrun:	index for INTF underrun interrupt
+ * @intr_vsync:	        index for INTF VSYNC interrupt
  */
 struct dpu_intf_cfg  {
 	DPU_HW_BLK_INFO;
 	u32 type;   /* interface type*/
 	u32 controller_id;
 	u32 prog_fetch_lines_worst_case;
+	s32 intr_underrun;
+	s32 intr_vsync;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index 8bd22e060437..adc1f04ac257 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -196,6 +196,8 @@ struct dpu_irq_type {
 
 /*
  * struct dpu_intr_reg -  List of DPU interrupt registers
+ *
+ * When making changes be sure to sync with dpu_hw_intr_reg
  */
 static const struct dpu_intr_reg dpu_intr_set[] = {
 	{
@@ -265,6 +267,9 @@ static const struct dpu_intr_reg dpu_intr_set[] = {
 	},
 };
 
+#define DPU_IRQ_REG(irq_idx)	(irq_idx / 32)
+#define DPU_IRQ_MASK(irq_idx)	(BIT(irq_idx % 32))
+
 /*
  * struct dpu_irq_type - IRQ mapping table use for lookup an irq_idx in this
  *			 table that have a matching interface type and
@@ -1345,23 +1350,6 @@ static const struct dpu_irq_type dpu_irq_map[] = {
 	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
 };
 
-static int dpu_hw_intr_irqidx_lookup(struct dpu_hw_intr *intr,
-	enum dpu_intr_type intr_type, u32 instance_idx)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(dpu_irq_map); i++) {
-		if (intr_type == dpu_irq_map[i].intr_type &&
-			instance_idx == dpu_irq_map[i].instance_idx &&
-			!(intr->obsolete_irq & BIT(dpu_irq_map[i].intr_type)))
-			return i;
-	}
-
-	pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
-			intr_type, instance_idx);
-	return -EINVAL;
-}
-
 static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr,
 		int irq_idx)
 {
@@ -1370,9 +1358,8 @@ static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr,
 	if (!intr)
 		return;
 
-	reg_idx = dpu_irq_map[irq_idx].reg_idx;
-	DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
-			dpu_irq_map[irq_idx].irq_mask);
+	reg_idx = DPU_IRQ_REG(irq_idx);
+	DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off, DPU_IRQ_MASK(irq_idx));
 
 	/* ensure register writes go through */
 	wmb();
@@ -1384,10 +1371,9 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
 {
 	int reg_idx;
 	int irq_idx;
-	int start_idx;
-	int end_idx;
 	u32 irq_status;
 	u32 enable_mask;
+	int bit;
 	unsigned long irq_flags;
 
 	if (!intr)
@@ -1400,15 +1386,7 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
 	 */
 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
 	for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
-		/*
-		 * Each Interrupt register has a range of 64 indexes, and
-		 * that is static for dpu_irq_map.
-		 */
-		start_idx = reg_idx * 64;
-		end_idx = start_idx + 64;
-
-		if (!test_bit(reg_idx, &intr->irq_mask) ||
-			start_idx >= ARRAY_SIZE(dpu_irq_map))
+		if (!test_bit(reg_idx, &intr->irq_mask))
 			continue;
 
 		/* Read interrupt status */
@@ -1425,37 +1403,33 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
 		/* Finally update IRQ status based on enable mask */
 		irq_status &= enable_mask;
 
+		if (!irq_status)
+			continue;
+
 		/*
-		 * Search through matching intr status from irq map.
-		 * start_idx and end_idx defined the search range in
-		 * the dpu_irq_map.
+		 * Search through matching intr status.
 		 */
-		for (irq_idx = start_idx;
-				(irq_idx < end_idx) && irq_status;
-				irq_idx++)
-			if ((irq_status & dpu_irq_map[irq_idx].irq_mask) &&
-				(dpu_irq_map[irq_idx].reg_idx == reg_idx) &&
-				!(intr->obsolete_irq &
-				BIT(dpu_irq_map[irq_idx].intr_type))) {
-				/*
-				 * Once a match on irq mask, perform a callback
-				 * to the given cbfunc. cbfunc will take care
-				 * the interrupt status clearing. If cbfunc is
-				 * not provided, then the interrupt clearing
-				 * is here.
-				 */
-				if (cbfunc)
-					cbfunc(arg, irq_idx);
-
-				dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx);
-
-				/*
-				 * When callback finish, clear the irq_status
-				 * with the matching mask. Once irq_status
-				 * is all cleared, the search can be stopped.
-				 */
-				irq_status &= ~dpu_irq_map[irq_idx].irq_mask;
-			}
+		while ((bit = ffs(irq_status)) != 0) {
+			irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
+			/*
+			 * Once a match on irq mask, perform a callback
+			 * to the given cbfunc. cbfunc will take care
+			 * the interrupt status clearing. If cbfunc is
+			 * not provided, then the interrupt clearing
+			 * is here.
+			 */
+			if (cbfunc)
+				cbfunc(arg, irq_idx);
+
+			dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx);
+
+			/*
+			 * When callback finish, clear the irq_status
+			 * with the matching mask. Once irq_status
+			 * is all cleared, the search can be stopped.
+			 */
+			irq_status &= ~BIT(bit - 1);
+		}
 	}
 
 	/* ensure register writes go through */
@@ -1469,32 +1443,30 @@ static int dpu_hw_intr_enable_irq(struct dpu_hw_intr *intr, int irq_idx)
 	int reg_idx;
 	unsigned long irq_flags;
 	const struct dpu_intr_reg *reg;
-	const struct dpu_irq_type *irq;
 	const char *dbgstr = NULL;
 	uint32_t cache_irq_mask;
 
 	if (!intr)
 		return -EINVAL;
 
-	if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
 		return -EINVAL;
 	}
 
-	irq = &dpu_irq_map[irq_idx];
-	reg_idx = irq->reg_idx;
+	reg_idx = DPU_IRQ_REG(irq_idx);
 	reg = &dpu_intr_set[reg_idx];
 
 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
 	cache_irq_mask = intr->cache_irq_mask[reg_idx];
-	if (cache_irq_mask & irq->irq_mask) {
+	if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
 		dbgstr = "DPU IRQ already set:";
 	} else {
 		dbgstr = "DPU IRQ enabled:";
 
-		cache_irq_mask |= irq->irq_mask;
+		cache_irq_mask |= DPU_IRQ_MASK(irq_idx);
 		/* Cleaning any pending interrupt */
-		DPU_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+		DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
 		/* Enabling interrupts with the new mask */
 		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
 
@@ -1505,8 +1477,8 @@ static int dpu_hw_intr_enable_irq(struct dpu_hw_intr *intr, int irq_idx)
 	}
 	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
 
-	pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
-			irq->irq_mask, cache_irq_mask);
+	pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
+			DPU_IRQ_MASK(irq_idx), cache_irq_mask);
 
 	return 0;
 }
@@ -1515,33 +1487,31 @@ static int dpu_hw_intr_disable_irq_nolock(struct dpu_hw_intr *intr, int irq_idx)
 {
 	int reg_idx;
 	const struct dpu_intr_reg *reg;
-	const struct dpu_irq_type *irq;
 	const char *dbgstr = NULL;
 	uint32_t cache_irq_mask;
 
 	if (!intr)
 		return -EINVAL;
 
-	if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
 		return -EINVAL;
 	}
 
-	irq = &dpu_irq_map[irq_idx];
-	reg_idx = irq->reg_idx;
+	reg_idx = DPU_IRQ_REG(irq_idx);
 	reg = &dpu_intr_set[reg_idx];
 
 	cache_irq_mask = intr->cache_irq_mask[reg_idx];
-	if ((cache_irq_mask & irq->irq_mask) == 0) {
+	if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
 		dbgstr = "DPU IRQ is already cleared:";
 	} else {
 		dbgstr = "DPU IRQ mask disable:";
 
-		cache_irq_mask &= ~irq->irq_mask;
+		cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx);
 		/* Disable interrupts based on the new mask */
 		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
 		/* Cleaning any pending interrupt */
-		DPU_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+		DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
 
 		/* ensure register write goes through */
 		wmb();
@@ -1549,8 +1519,8 @@ static int dpu_hw_intr_disable_irq_nolock(struct dpu_hw_intr *intr, int irq_idx)
 		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
 	}
 
-	pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
-			irq->irq_mask, cache_irq_mask);
+	pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
+			DPU_IRQ_MASK(irq_idx), cache_irq_mask);
 
 	return 0;
 }
@@ -1562,7 +1532,7 @@ static int dpu_hw_intr_disable_irq(struct dpu_hw_intr *intr, int irq_idx)
 	if (!intr)
 		return -EINVAL;
 
-	if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
 		return -EINVAL;
 	}
@@ -1622,17 +1592,17 @@ static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
 	if (!intr)
 		return 0;
 
-	if (irq_idx >= ARRAY_SIZE(dpu_irq_map) || irq_idx < 0) {
+	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
 		return 0;
 	}
 
 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
 
-	reg_idx = dpu_irq_map[irq_idx].reg_idx;
+	reg_idx = DPU_IRQ_REG(irq_idx);
 	intr_status = DPU_REG_READ(&intr->hw,
 			dpu_intr_set[reg_idx].status_off) &
-					dpu_irq_map[irq_idx].irq_mask;
+		DPU_IRQ_MASK(irq_idx);
 	if (intr_status && clear)
 		DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
 				intr_status);
@@ -1647,7 +1617,6 @@ static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
 
 static void __setup_intr_ops(struct dpu_hw_intr_ops *ops)
 {
-	ops->irq_idx_lookup = dpu_hw_intr_irqidx_lookup;
 	ops->enable_irq = dpu_hw_intr_enable_irq;
 	ops->disable_irq = dpu_hw_intr_disable_irq;
 	ops->dispatch_irqs = dpu_hw_intr_dispatch_irq;
@@ -1679,7 +1648,7 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
 	__intr_offset(m, addr, &intr->hw);
 	__setup_intr_ops(&intr->ops);
 
-	intr->irq_idx_tbl_size = ARRAY_SIZE(dpu_irq_map);
+	intr->total_irqs = ARRAY_SIZE(dpu_intr_set) * 32;
 
 	intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
 			GFP_KERNEL);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index b26a3445a8eb..c6b3d373ce75 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -92,23 +92,14 @@ enum dpu_hw_intr_reg {
 	MDP_INTR_MAX,
 };
 
+#define DPU_IRQ_IDX(reg_idx, offset)	(reg_idx * 32 + offset)
+
 struct dpu_hw_intr;
 
 /**
  * Interrupt operations.
  */
 struct dpu_hw_intr_ops {
-	/**
-	 * irq_idx_lookup - Lookup IRQ index on the HW interrupt type
-	 *                 Used for all irq related ops
-	 * @intr:		HW interrupt handle
-	 * @intr_type:		Interrupt type defined in dpu_intr_type
-	 * @instance_idx:	HW interrupt block instance
-	 * @return:		irq_idx or -EINVAL for lookup fail
-	 */
-	int (*irq_idx_lookup)(struct dpu_hw_intr *intr,
-			enum dpu_intr_type intr_type,
-			u32 instance_idx);
 
 	/**
 	 * enable_irq - Enable IRQ based on lookup IRQ index
@@ -179,7 +170,7 @@ struct dpu_hw_intr_ops {
  * @ops:              function pointer mapping for IRQ handling
  * @cache_irq_mask:   array of IRQ enable masks reg storage created during init
  * @save_irq_status:  array of IRQ status reg storage created during init
- * @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts
+ * @total_irqs: total number of irq_idx mapped in the hw_interrupts
  * @irq_lock:         spinlock for accessing IRQ resources
  * @obsolete_irq:      irq types that are obsolete for a particular target
  */
@@ -188,7 +179,7 @@ struct dpu_hw_intr {
 	struct dpu_hw_intr_ops ops;
 	u32 *cache_irq_mask;
 	u32 *save_irq_status;
-	u32 irq_idx_tbl_size;
+	u32 total_irqs;
 	spinlock_t irq_lock;
 	unsigned long irq_mask;
 	unsigned long obsolete_irq;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index 6714b088970f..0a48628b8422 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -168,44 +168,41 @@ TRACE_EVENT(dpu_perf_crtc_update,
 );
 
 DECLARE_EVENT_CLASS(dpu_enc_irq_template,
-	TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+	TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx,
 		 int irq_idx),
-	TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx),
+	TP_ARGS(drm_id, intr_idx, irq_idx),
 	TP_STRUCT__entry(
 		__field(	uint32_t,		drm_id		)
 		__field(	enum dpu_intr_idx,	intr_idx	)
-		__field(	int,			hw_idx		)
 		__field(	int,			irq_idx		)
 	),
 	TP_fast_assign(
 		__entry->drm_id = drm_id;
 		__entry->intr_idx = intr_idx;
-		__entry->hw_idx = hw_idx;
 		__entry->irq_idx = irq_idx;
 	),
-	TP_printk("id=%u, intr=%d, hw=%d, irq=%d",
-		  __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+	TP_printk("id=%u, intr=%d, irq=%d",
+		  __entry->drm_id, __entry->intr_idx,
 		  __entry->irq_idx)
 );
 DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_register_success,
-	TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+	TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx,
 		 int irq_idx),
-	TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+	TP_ARGS(drm_id, intr_idx, irq_idx)
 );
 DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_unregister_success,
-	TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+	TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx,
 		 int irq_idx),
-	TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+	TP_ARGS(drm_id, intr_idx, irq_idx)
 );
 
 TRACE_EVENT(dpu_enc_irq_wait_success,
-	TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+	TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx,
 		 int irq_idx, enum dpu_pingpong pp_idx, int atomic_cnt),
-	TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx, pp_idx, atomic_cnt),
+	TP_ARGS(drm_id, intr_idx, irq_idx, pp_idx, atomic_cnt),
 	TP_STRUCT__entry(
 		__field(	uint32_t,		drm_id		)
 		__field(	enum dpu_intr_idx,	intr_idx	)
-		__field(	int,			hw_idx		)
 		__field(	int,			irq_idx		)
 		__field(	enum dpu_pingpong,	pp_idx		)
 		__field(	int,			atomic_cnt	)
@@ -213,13 +210,12 @@ TRACE_EVENT(dpu_enc_irq_wait_success,
 	TP_fast_assign(
 		__entry->drm_id = drm_id;
 		__entry->intr_idx = intr_idx;
-		__entry->hw_idx = hw_idx;
 		__entry->irq_idx = irq_idx;
 		__entry->pp_idx = pp_idx;
 		__entry->atomic_cnt = atomic_cnt;
 	),
-	TP_printk("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
-		  __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+	TP_printk("id=%u, intr=%d, irq=%d, pp=%d, atomic_cnt=%d",
+		  __entry->drm_id, __entry->intr_idx,
 		  __entry->irq_idx, __entry->pp_idx, __entry->atomic_cnt)
 );
 
@@ -514,12 +510,12 @@ DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_more_pending,
 );
 
 TRACE_EVENT(dpu_enc_wait_event_timeout,
-	TP_PROTO(uint32_t drm_id, int32_t hw_id, int rc, s64 time,
+	TP_PROTO(uint32_t drm_id, int irq_idx, int rc, s64 time,
 		 s64 expected_time, int atomic_cnt),
-	TP_ARGS(drm_id, hw_id, rc, time, expected_time, atomic_cnt),
+	TP_ARGS(drm_id, irq_idx, rc, time, expected_time, atomic_cnt),
 	TP_STRUCT__entry(
 		__field(	uint32_t,	drm_id		)
-		__field(	int32_t,	hw_id		)
+		__field(	int,		irq_idx		)
 		__field(	int,		rc		)
 		__field(	s64,		time		)
 		__field(	s64,		expected_time	)
@@ -527,14 +523,14 @@ TRACE_EVENT(dpu_enc_wait_event_timeout,
 	),
 	TP_fast_assign(
 		__entry->drm_id = drm_id;
-		__entry->hw_id = hw_id;
+		__entry->irq_idx = irq_idx;
 		__entry->rc = rc;
 		__entry->time = time;
 		__entry->expected_time = expected_time;
 		__entry->atomic_cnt = atomic_cnt;
 	),
-	TP_printk("id=%u, hw_id=%d, rc=%d, time=%lld, expected=%lld cnt=%d",
-		  __entry->drm_id, __entry->hw_id, __entry->rc, __entry->time,
+	TP_printk("id=%u, irq_idx=%d, rc=%d, time=%lld, expected=%lld cnt=%d",
+		  __entry->drm_id, __entry->irq_idx, __entry->rc, __entry->time,
 		  __entry->expected_time, __entry->atomic_cnt)
 );
 
-- 
cgit v1.2.3-70-g09d2


From d94fc8f36f78e3a288ffd8b61809c433ca6999bd Mon Sep 17 00:00:00 2001
From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Date: Sun, 16 May 2021 23:29:11 +0300
Subject: drm/msm/dpu: simplify IRQ enabling/disabling

Merge dpu_core_irq_enable() into dpu_core_irq_register_callback() and
dpu_core_irq_disable() into dpu_core_irq_unregister_callback(), because
they are called in pairs. There is no need to have separate
enable/disable pair, we can enable hardware IRQ when first callback is
registered and when the last callback is unregistered.

Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Reviewed-by: Abhinav Kumar <abhinavk@codeaurora.org>
Link: https://lore.kernel.org/r/20210516202910.2141079-7-dmitry.baryshkov@linaro.org
[fixup tracepoint compile warns]
Signed-off-by: Rob Clark <robdclark@chromium.org>
---
 drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c | 168 +++++----------------------
 drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h |  30 -----
 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c  |  18 ---
 drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h      |   2 -
 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h    |  23 ----
 5 files changed, 27 insertions(+), 214 deletions(-)

(limited to 'drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c')

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
index 11c0abed21ee..4f110c428b60 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
@@ -26,10 +26,8 @@ static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
 
 	pr_debug("irq_idx=%d\n", irq_idx);
 
-	if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
-		DRM_ERROR("no registered cb, idx:%d enable_count:%d\n", irq_idx,
-			atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]));
-	}
+	if (list_empty(&irq_obj->irq_cb_tbl[irq_idx]))
+		DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
 
 	atomic_inc(&irq_obj->irq_counts[irq_idx]);
 
@@ -43,127 +41,6 @@ static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
 	spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
 }
 
-/**
- * _dpu_core_irq_enable - enable core interrupt given by the index
- * @dpu_kms:		Pointer to dpu kms context
- * @irq_idx:		interrupt index
- */
-static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx)
-{
-	unsigned long irq_flags;
-	int ret = 0, enable_count;
-
-	if (!dpu_kms->hw_intr ||
-			!dpu_kms->irq_obj.enable_counts ||
-			!dpu_kms->irq_obj.irq_counts) {
-		DPU_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
-		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
-		return -EINVAL;
-	}
-
-	enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
-	DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
-	trace_dpu_core_irq_enable_idx(irq_idx, enable_count);
-
-	if (atomic_inc_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 1) {
-		ret = dpu_kms->hw_intr->ops.enable_irq(
-				dpu_kms->hw_intr,
-				irq_idx);
-		if (ret)
-			DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
-					irq_idx);
-
-		DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
-
-		spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
-		/* empty callback list but interrupt is enabled */
-		if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]))
-			DPU_ERROR("irq_idx=%d enabled with no callback\n",
-					irq_idx);
-		spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
-	}
-
-	return ret;
-}
-
-int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
-{
-	int i, ret = 0, counts;
-
-	if (!irq_idxs || !irq_count) {
-		DPU_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
-	if (counts)
-		DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
-
-	for (i = 0; (i < irq_count) && !ret; i++)
-		ret = _dpu_core_irq_enable(dpu_kms, irq_idxs[i]);
-
-	return ret;
-}
-
-/**
- * _dpu_core_irq_disable - disable core interrupt given by the index
- * @dpu_kms:		Pointer to dpu kms context
- * @irq_idx:		interrupt index
- */
-static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx)
-{
-	int ret = 0, enable_count;
-
-	if (!dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
-		DPU_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
-		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
-		return -EINVAL;
-	}
-
-	enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
-	DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
-	trace_dpu_core_irq_disable_idx(irq_idx, enable_count);
-
-	if (atomic_dec_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 0) {
-		ret = dpu_kms->hw_intr->ops.disable_irq(
-				dpu_kms->hw_intr,
-				irq_idx);
-		if (ret)
-			DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
-					irq_idx);
-		DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
-	}
-
-	return ret;
-}
-
-int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
-{
-	int i, ret = 0, counts;
-
-	if (!irq_idxs || !irq_count) {
-		DPU_ERROR("invalid params\n");
-		return -EINVAL;
-	}
-
-	counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
-	if (counts == 2)
-		DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
-
-	for (i = 0; (i < irq_count) && !ret; i++)
-		ret = _dpu_core_irq_disable(dpu_kms, irq_idxs[i]);
-
-	return ret;
-}
-
 u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
 {
 	if (!dpu_kms->hw_intr ||
@@ -210,6 +87,16 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
 	list_del_init(&register_irq_cb->list);
 	list_add_tail(&register_irq_cb->list,
 			&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]);
+	if (list_is_first(&register_irq_cb->list,
+			&dpu_kms->irq_obj.irq_cb_tbl[irq_idx])) {
+		int ret = dpu_kms->hw_intr->ops.enable_irq(
+				dpu_kms->hw_intr,
+				irq_idx);
+		if (ret)
+			DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+					irq_idx);
+	}
+
 	spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
 
 	return 0;
@@ -244,9 +131,15 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
 	trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
 	list_del_init(&register_irq_cb->list);
 	/* empty callback list but interrupt is still enabled */
-	if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]) &&
-			atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]))
-		DPU_ERROR("irq_idx=%d enabled with no callback\n", irq_idx);
+	if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx])) {
+		int ret = dpu_kms->hw_intr->ops.disable_irq(
+				dpu_kms->hw_intr,
+				irq_idx);
+		if (ret)
+			DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
+					irq_idx);
+		DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+	}
 	spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
 
 	return 0;
@@ -274,23 +167,22 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
 	struct dpu_irq *irq_obj = s->private;
 	struct dpu_irq_callback *cb;
 	unsigned long irq_flags;
-	int i, irq_count, enable_count, cb_count;
+	int i, irq_count, cb_count;
 
-	if (WARN_ON(!irq_obj->enable_counts || !irq_obj->irq_cb_tbl))
+	if (WARN_ON(!irq_obj->irq_cb_tbl))
 		return 0;
 
 	for (i = 0; i < irq_obj->total_irqs; i++) {
 		spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
 		cb_count = 0;
 		irq_count = atomic_read(&irq_obj->irq_counts[i]);
-		enable_count = atomic_read(&irq_obj->enable_counts[i]);
 		list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
 			cb_count++;
 		spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
 
-		if (irq_count || enable_count || cb_count)
-			seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n",
-					i, irq_count, enable_count, cb_count);
+		if (irq_count || cb_count)
+			seq_printf(s, "idx:%d irq:%d cb:%d\n",
+					i, irq_count, cb_count);
 	}
 
 	return 0;
@@ -321,13 +213,10 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
 	dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->total_irqs;
 	dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs,
 			sizeof(struct list_head), GFP_KERNEL);
-	dpu_kms->irq_obj.enable_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
-			sizeof(atomic_t), GFP_KERNEL);
 	dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
 			sizeof(atomic_t), GFP_KERNEL);
 	for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) {
 		INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]);
-		atomic_set(&dpu_kms->irq_obj.enable_counts[i], 0);
 		atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0);
 	}
 }
@@ -338,8 +227,7 @@ void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
 
 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
 	for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
-		if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) ||
-				!list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
+		if (!list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
 			DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
 
 	dpu_clear_all_irqs(dpu_kms);
@@ -347,10 +235,8 @@ void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
 
 	kfree(dpu_kms->irq_obj.irq_cb_tbl);
-	kfree(dpu_kms->irq_obj.enable_counts);
 	kfree(dpu_kms->irq_obj.irq_counts);
 	dpu_kms->irq_obj.irq_cb_tbl = NULL;
-	dpu_kms->irq_obj.enable_counts = NULL;
 	dpu_kms->irq_obj.irq_counts = NULL;
 	dpu_kms->irq_obj.total_irqs = 0;
 }
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
index d147784d5531..90ae6c9ccc95 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -29,36 +29,6 @@ void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms);
  */
 irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms);
 
-/**
- * dpu_core_irq_enable - IRQ helper function for enabling one or more IRQs
- * @dpu_kms:		DPU handle
- * @irq_idxs:		Array of irq index
- * @irq_count:		Number of irq_idx provided in the array
- * @return:		0 for success enabling IRQ, otherwise failure
- *
- * This function increments count on each enable and decrements on each
- * disable.  Interrupts is enabled if count is 0 before increment.
- */
-int dpu_core_irq_enable(
-		struct dpu_kms *dpu_kms,
-		int *irq_idxs,
-		uint32_t irq_count);
-
-/**
- * dpu_core_irq_disable - IRQ helper function for disabling one of more IRQs
- * @dpu_kms:		DPU handle
- * @irq_idxs:		Array of irq index
- * @irq_count:		Number of irq_idx provided in the array
- * @return:		0 for success disabling IRQ, otherwise failure
- *
- * This function increments count on each enable and decrements on each
- * disable.  Interrupts is disabled if count is 0 after decrement.
- */
-int dpu_core_irq_disable(
-		struct dpu_kms *dpu_kms,
-		int *irq_idxs,
-		uint32_t irq_count);
-
 /**
  * dpu_core_irq_read - IRQ helper function for reading IRQ status
  * @dpu_kms:		DPU handle
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index f36603ffe913..324d5d928531 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -361,17 +361,6 @@ int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
 		return ret;
 	}
 
-	ret = dpu_core_irq_enable(phys_enc->dpu_kms, &irq->irq_idx, 1);
-	if (ret) {
-		DRM_ERROR("enable failed id=%u, intr=%d, irq=%d",
-			  DRMID(phys_enc->parent), intr_idx,
-			  irq->irq_idx);
-		dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
-				irq->irq_idx, &irq->cb);
-		irq->irq_idx = -EINVAL;
-		return ret;
-	}
-
 	trace_dpu_enc_irq_register_success(DRMID(phys_enc->parent), intr_idx,
 				irq->irq_idx);
 
@@ -394,13 +383,6 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
 		return 0;
 	}
 
-	ret = dpu_core_irq_disable(phys_enc->dpu_kms, &irq->irq_idx, 1);
-	if (ret) {
-		DRM_ERROR("disable failed id=%u, intr=%d, irq=%d ret=%d",
-			  DRMID(phys_enc->parent), intr_idx,
-			  irq->irq_idx, ret);
-	}
-
 	ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx,
 			&irq->cb);
 	if (ret) {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index d6717d6672f7..f6840b1af6e4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -82,14 +82,12 @@ struct dpu_irq_callback {
  * struct dpu_irq: IRQ structure contains callback registration info
  * @total_irq:    total number of irq_idx obtained from HW interrupts mapping
  * @irq_cb_tbl:   array of IRQ callbacks setting
- * @enable_counts array of IRQ enable counts
  * @cb_lock:      callback lock
  * @debugfs_file: debugfs file for irq statistics
  */
 struct dpu_irq {
 	u32 total_irqs;
 	struct list_head *irq_cb_tbl;
-	atomic_t *enable_counts;
 	atomic_t *irq_counts;
 	spinlock_t cb_lock;
 };
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index 0a48628b8422..37bba57675a8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -875,29 +875,6 @@ TRACE_EVENT(dpu_pp_connect_ext_te,
 	TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg)
 );
 
-DECLARE_EVENT_CLASS(dpu_core_irq_idx_cnt_template,
-	TP_PROTO(int irq_idx, int enable_count),
-	TP_ARGS(irq_idx, enable_count),
-	TP_STRUCT__entry(
-		__field(	int,	irq_idx		)
-		__field(	int,	enable_count	)
-	),
-	TP_fast_assign(
-		__entry->irq_idx = irq_idx;
-		__entry->enable_count = enable_count;
-	),
-	TP_printk("irq_idx:%d enable_count:%u", __entry->irq_idx,
-		  __entry->enable_count)
-);
-DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_enable_idx,
-	TP_PROTO(int irq_idx, int enable_count),
-	TP_ARGS(irq_idx, enable_count)
-);
-DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_disable_idx,
-	TP_PROTO(int irq_idx, int enable_count),
-	TP_ARGS(irq_idx, enable_count)
-);
-
 DECLARE_EVENT_CLASS(dpu_core_irq_callback_template,
 	TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
 	TP_ARGS(irq_idx, callback),
-- 
cgit v1.2.3-70-g09d2


From 5b702d787b47e19f5aebb4f11360678493759a3b Mon Sep 17 00:00:00 2001
From: Stephen Boyd <swboyd@chromium.org>
Date: Fri, 30 Apr 2021 12:31:04 -0700
Subject: drm/msm/disp: Move various debug logs to atomic bucket

These prints flood the logs with drm debugging set to enable kms and
driver logging (DRM_UT_KMS and DRM_UT_DRIVER). Let's move these prints
to the atomic bucket (DRM_UT_ATOMIC) as they're related to the atomic
paths.

Cc: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Cc: Abhinav Kumar <abhinavk@codeaurora.org>
Cc: Kuogee Hsieh <khsieh@codeaurora.org>
Cc: aravindh@codeaurora.org
Cc: Sean Paul <sean@poorly.run>
Signed-off-by: Stephen Boyd <swboyd@chromium.org>
Link: https://lore.kernel.org/r/20210430193104.1770538-7-swboyd@chromium.org
Reviewed-by: Abhinav Kumar <abhinavk@codeaurora.org>
Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Signed-off-by: Rob Clark <robdclark@chromium.org>
---
 drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c | 22 +++++++--------
 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c      | 39 +++++++++++++--------------
 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c   | 10 +++----
 drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c   |  6 ++---
 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c     |  2 +-
 drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c      | 14 +++++-----
 6 files changed, 45 insertions(+), 48 deletions(-)

(limited to 'drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c')

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 7cba5bbdf4b7..60fe06018581 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -132,7 +132,7 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
 		perf->core_clk_rate = _dpu_core_perf_calc_clk(kms, crtc, state);
 	}
 
-	DPU_DEBUG(
+	DRM_DEBUG_ATOMIC(
 		"crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu\n",
 			crtc->base.id, perf->core_clk_rate,
 			perf->max_per_pipe_ib, perf->bw_ctl);
@@ -178,7 +178,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
 			struct dpu_crtc_state *tmp_cstate =
 				to_dpu_crtc_state(tmp_crtc->state);
 
-			DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
+			DRM_DEBUG_ATOMIC("crtc:%d bw:%llu ctrl:%d\n",
 				tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl,
 				tmp_cstate->bw_control);
 
@@ -187,11 +187,11 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
 
 		/* convert bandwidth to kb */
 		bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
-		DPU_DEBUG("calculated bandwidth=%uk\n", bw);
+		DRM_DEBUG_ATOMIC("calculated bandwidth=%uk\n", bw);
 
 		threshold = kms->catalog->perf.max_bw_high;
 
-		DPU_DEBUG("final threshold bw limit = %d\n", threshold);
+		DRM_DEBUG_ATOMIC("final threshold bw limit = %d\n", threshold);
 
 		if (!threshold) {
 			DPU_ERROR("no bandwidth limits specified\n");
@@ -228,7 +228,7 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
 
 			perf.bw_ctl += dpu_cstate->new_perf.bw_ctl;
 
-			DPU_DEBUG("crtc=%d bw=%llu paths:%d\n",
+			DRM_DEBUG_ATOMIC("crtc=%d bw=%llu paths:%d\n",
 				  tmp_crtc->base.id,
 				  dpu_cstate->new_perf.bw_ctl, kms->num_paths);
 		}
@@ -278,7 +278,7 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
 	/* Release the bandwidth */
 	if (kms->perf.enable_bw_release) {
 		trace_dpu_cmd_release_bw(crtc->base.id);
-		DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
+		DRM_DEBUG_ATOMIC("Release BW crtc=%d\n", crtc->base.id);
 		dpu_crtc->cur_perf.bw_ctl = 0;
 		_dpu_core_perf_crtc_update_bus(kms, crtc);
 	}
@@ -314,7 +314,7 @@ static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
 	if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED)
 		clk_rate = kms->perf.fix_core_clk_rate;
 
-	DPU_DEBUG("clk:%llu\n", clk_rate);
+	DRM_DEBUG_ATOMIC("clk:%llu\n", clk_rate);
 
 	return clk_rate;
 }
@@ -344,7 +344,7 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
 	dpu_crtc = to_dpu_crtc(crtc);
 	dpu_cstate = to_dpu_crtc_state(crtc->state);
 
-	DPU_DEBUG("crtc:%d stop_req:%d core_clk:%llu\n",
+	DRM_DEBUG_ATOMIC("crtc:%d stop_req:%d core_clk:%llu\n",
 			crtc->base.id, stop_req, kms->perf.core_clk_rate);
 
 	old = &dpu_crtc->cur_perf;
@@ -362,7 +362,7 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
 			(new->max_per_pipe_ib > old->max_per_pipe_ib)))	||
 			(!params_changed && ((new->bw_ctl < old->bw_ctl) ||
 			(new->max_per_pipe_ib < old->max_per_pipe_ib)))) {
-			DPU_DEBUG("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
+			DRM_DEBUG_ATOMIC("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
 				crtc->base.id, params_changed,
 				new->bw_ctl, old->bw_ctl);
 			old->bw_ctl = new->bw_ctl;
@@ -378,7 +378,7 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
 			update_clk = true;
 		}
 	} else {
-		DPU_DEBUG("crtc=%d disable\n", crtc->base.id);
+		DRM_DEBUG_ATOMIC("crtc=%d disable\n", crtc->base.id);
 		memset(old, 0, sizeof(*old));
 		update_bus = true;
 		update_clk = true;
@@ -413,7 +413,7 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
 		}
 
 		kms->perf.core_clk_rate = clk_rate;
-		DPU_DEBUG("update clk rate = %lld HZ\n", clk_rate);
+		DRM_DEBUG_ATOMIC("update clk rate = %lld HZ\n", clk_rate);
 	}
 	return 0;
 }
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 18bc76b7f1a3..3e54dafc2f8a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -57,8 +57,6 @@ static void dpu_crtc_destroy(struct drm_crtc *crtc)
 {
 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
 
-	DPU_DEBUG("\n");
-
 	if (!crtc)
 		return;
 
@@ -163,7 +161,7 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
 	lm->ops.setup_blend_config(lm, pstate->stage,
 				0xFF, 0, blend_op);
 
-	DPU_DEBUG("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
+	DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
 		  &format->base.pixel_format, format->alpha_enable, blend_op);
 }
 
@@ -220,7 +218,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
 
 		dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
 		set_bit(dpu_plane_pipe(plane), fetch_active);
-		DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
+
+		DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d\n",
 				crtc->base.id,
 				pstate->stage,
 				plane->base.id,
@@ -278,7 +277,7 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
 	struct dpu_hw_mixer *lm;
 	int i;
 
-	DPU_DEBUG("%s\n", dpu_crtc->name);
+	DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
 
 	for (i = 0; i < cstate->num_mixers; i++) {
 		mixer[i].mixer_op_mode = 0;
@@ -305,7 +304,7 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
 		/* stage config flush mask */
 		ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
 
-		DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
+		DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
 			mixer[i].hw_lm->idx - LM_0,
 			mixer[i].mixer_op_mode,
 			ctl->idx - CTL_0,
@@ -388,7 +387,7 @@ static void dpu_crtc_frame_event_work(struct kthread_work *work)
 
 	DPU_ATRACE_BEGIN("crtc_frame_event");
 
-	DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
+	DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
 			ktime_to_ns(fevent->ts));
 
 	if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
@@ -558,7 +557,7 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
 		/* stage config flush mask */
 		ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
 
-		DPU_DEBUG("lm %d, ctl %d, flush mask 0x%x\n",
+		DRM_DEBUG_ATOMIC("lm %d, ctl %d, flush mask 0x%x\n",
 			mixer[i].hw_lm->idx - DSPP_0,
 			ctl->idx - CTL_0,
 			mixer[i].flush_mask);
@@ -572,12 +571,12 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
 	struct drm_encoder *encoder;
 
 	if (!crtc->state->enable) {
-		DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
+		DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n",
 				crtc->base.id, crtc->state->enable);
 		return;
 	}
 
-	DPU_DEBUG("crtc%d\n", crtc->base.id);
+	DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
 
 	_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
 
@@ -617,12 +616,12 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
 	struct dpu_crtc_state *cstate;
 
 	if (!crtc->state->enable) {
-		DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
+		DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n",
 				crtc->base.id, crtc->state->enable);
 		return;
 	}
 
-	DPU_DEBUG("crtc%d\n", crtc->base.id);
+	DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
 
 	dpu_crtc = to_dpu_crtc(crtc);
 	cstate = to_dpu_crtc_state(crtc->state);
@@ -675,7 +674,7 @@ static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
 {
 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
 
-	DPU_DEBUG("crtc%d\n", crtc->base.id);
+	DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
 
 	__drm_atomic_helper_crtc_destroy_state(state);
 
@@ -688,7 +687,7 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
 	int ret, rc = 0;
 
 	if (!atomic_read(&dpu_crtc->frame_pending)) {
-		DPU_DEBUG("no frames pending\n");
+		DRM_DEBUG_ATOMIC("no frames pending\n");
 		return 0;
 	}
 
@@ -731,9 +730,9 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
 
 	if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
 		/* acquire bandwidth and other resources */
-		DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
+		DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id);
 	} else
-		DPU_DEBUG("crtc%d commit\n", crtc->base.id);
+		DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id);
 
 	dpu_crtc->play_count++;
 
@@ -908,7 +907,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
 	pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
 
 	if (!crtc_state->enable || !crtc_state->active) {
-		DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
+		DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
 				crtc->base.id, crtc_state->enable,
 				crtc_state->active);
 		memset(&cstate->new_perf, 0, sizeof(cstate->new_perf));
@@ -916,7 +915,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
 	}
 
 	mode = &crtc_state->adjusted_mode;
-	DPU_DEBUG("%s: check\n", dpu_crtc->name);
+	DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
 
 	/* force a full mode set if active state changed */
 	if (crtc_state->active_changed)
@@ -1024,7 +1023,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
 		}
 
 		pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
-		DPU_DEBUG("%s: zpos %d\n", dpu_crtc->name, z_pos);
+		DRM_DEBUG_ATOMIC("%s: zpos %d\n", dpu_crtc->name, z_pos);
 	}
 
 	for (i = 0; i < multirect_count; i++) {
@@ -1376,6 +1375,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
 	/* initialize event handling */
 	spin_lock_init(&dpu_crtc->event_lock);
 
-	DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
+	DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name);
 	return crtc;
 }
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 324d5d928531..7f06238a7c64 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -28,13 +28,13 @@
 #include "dpu_core_irq.h"
 #include "disp/msm_disp_snapshot.h"
 
-#define DPU_DEBUG_ENC(e, fmt, ...) DPU_DEBUG("enc%d " fmt,\
+#define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\
 		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
 
 #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
 		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
 
-#define DPU_DEBUG_PHYS(p, fmt, ...) DPU_DEBUG("enc%d intf%d pp%d " fmt,\
+#define DPU_DEBUG_PHYS(p, fmt, ...) DRM_DEBUG_ATOMIC("enc%d intf%d pp%d " fmt,\
 		(p) ? (p)->parent->base.id : -1, \
 		(p) ? (p)->intf_idx - INTF_0 : -1, \
 		(p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
@@ -791,13 +791,13 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
 
 		/* return if the resource control is already in ON state */
 		if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
-			DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in ON state\n",
+			DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n",
 				      DRMID(drm_enc), sw_event);
 			mutex_unlock(&dpu_enc->rc_lock);
 			return 0;
 		} else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
 				dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
-			DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in state %d\n",
+			DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n",
 				      DRMID(drm_enc), sw_event,
 				      dpu_enc->rc_state);
 			mutex_unlock(&dpu_enc->rc_lock);
@@ -2048,8 +2048,6 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
 	phys_params.parent_ops = &dpu_encoder_parent_ops;
 	phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
 
-	DPU_DEBUG("\n");
-
 	switch (disp_info->intf_type) {
 	case DRM_MODE_ENCODER_DSI:
 		intf_type = INTF_DSI;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index 21ff8f9e5dfd..440ae93d7bd1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -992,7 +992,7 @@ const struct dpu_format *dpu_get_dpu_format_ext(
 	 * Currently only support exactly zero or one modifier.
 	 * All planes use the same modifier.
 	 */
-	DPU_DEBUG("plane format modifier 0x%llX\n", modifier);
+	DRM_DEBUG_ATOMIC("plane format modifier 0x%llX\n", modifier);
 
 	switch (modifier) {
 	case 0:
@@ -1002,7 +1002,7 @@ const struct dpu_format *dpu_get_dpu_format_ext(
 	case DRM_FORMAT_MOD_QCOM_COMPRESSED:
 		map = dpu_format_map_ubwc;
 		map_size = ARRAY_SIZE(dpu_format_map_ubwc);
-		DPU_DEBUG("found fmt: %4.4s  DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
+		DRM_DEBUG_ATOMIC("found fmt: %4.4s  DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
 				(char *)&format);
 		break;
 	default:
@@ -1021,7 +1021,7 @@ const struct dpu_format *dpu_get_dpu_format_ext(
 		DPU_ERROR("unsupported fmt: %4.4s modifier 0x%llX\n",
 			(char *)&format, modifier);
 	else
-		DPU_DEBUG("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n",
+		DRM_DEBUG_ATOMIC("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n",
 				(char *)&format, modifier,
 				DPU_FORMAT_IS_UBWC(fmt),
 				DPU_FORMAT_IS_YUV(fmt));
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 4863b378610d..e9cb8c00920b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -25,7 +25,7 @@
 #include "dpu_vbif.h"
 #include "dpu_plane.h"
 
-#define DPU_DEBUG_PLANE(pl, fmt, ...) DPU_DEBUG("plane%d " fmt,\
+#define DPU_DEBUG_PLANE(pl, fmt, ...) DRM_DEBUG_ATOMIC("plane%d " fmt,\
 		(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
 
 #define DPU_ERROR_PLANE(pl, fmt, ...) DPU_ERROR("plane%d " fmt,\
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 7e08f40e7e6f..21d20373eb8b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -46,7 +46,7 @@ static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
 				vbif->idx - VBIF_0, xin_id);
 	} else {
 		rc = 0;
-		DPU_DEBUG("VBIF %d client %d is halted\n",
+		DRM_DEBUG_ATOMIC("VBIF %d client %d is halted\n",
 				vbif->idx - VBIF_0, xin_id);
 	}
 
@@ -87,7 +87,7 @@ static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
 		}
 	}
 
-	DPU_DEBUG("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
+	DRM_DEBUG_ATOMIC("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
 			vbif->idx - VBIF_0, params->xin_id,
 			params->width, params->height, params->frame_rate,
 			pps, *ot_lim);
@@ -133,7 +133,7 @@ static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
 	}
 
 exit:
-	DPU_DEBUG("vbif:%d xin:%d ot_lim:%d\n",
+	DRM_DEBUG_ATOMIC("vbif:%d xin:%d ot_lim:%d\n",
 			vbif->idx - VBIF_0, params->xin_id, ot_lim);
 	return ot_lim;
 }
@@ -163,7 +163,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
 	}
 
 	if (!vbif || !mdp) {
-		DPU_DEBUG("invalid arguments vbif %d mdp %d\n",
+		DRM_DEBUG_ATOMIC("invalid arguments vbif %d mdp %d\n",
 				vbif != NULL, mdp != NULL);
 		return;
 	}
@@ -230,7 +230,7 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
 	}
 
 	if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
-		DPU_DEBUG("qos remap not supported\n");
+		DRM_DEBUG_ATOMIC("qos remap not supported\n");
 		return;
 	}
 
@@ -238,14 +238,14 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
 			&vbif->cap->qos_nrt_tbl;
 
 	if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
-		DPU_DEBUG("qos tbl not defined\n");
+		DRM_DEBUG_ATOMIC("qos tbl not defined\n");
 		return;
 	}
 
 	forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
 
 	for (i = 0; i < qos_tbl->npriority_lvl; i++) {
-		DPU_DEBUG("vbif:%d xin:%d lvl:%d/%d\n",
+		DRM_DEBUG_ATOMIC("vbif:%d xin:%d lvl:%d/%d\n",
 				params->vbif_idx, params->xin_id, i,
 				qos_tbl->priority_lvl[i]);
 		vbif->ops.set_qos_remap(vbif, params->xin_id, i,
-- 
cgit v1.2.3-70-g09d2


From 9389a0e7b17c7d781ef77003138f0c82f5a0aa4c Mon Sep 17 00:00:00 2001
From: Guenter Roeck <linux@roeck-us.net>
Date: Tue, 25 May 2021 04:29:04 -0700
Subject: drm/msm/dpu: Drop unnecessary NULL checks after container_of in
 dpu_encoder

The result of container_of() operations is never NULL unless the embedded
element is the first element of the structure. This is not the case here.
The NULL checks on the result of container_of() are therefore unnecessary
and misleading. Remove them.

This change was made automatically with the following Coccinelle script.

@@
type t;
identifier v;
statement s;
@@

<+...
(
  t v = container_of(...);
|
  v = container_of(...);
)
  ...
  when != v
- if (\( !v \| v == NULL \) ) s
...+>

Signed-off-by: Guenter Roeck <linux@roeck-us.net>
Link: https://lore.kernel.org/r/20210525112904.1747066-1-linux@roeck-us.net
Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
[DB: fixed patch subject]
Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Signed-off-by: Rob Clark <robdclark@chromium.org>
---
 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 10 ----------
 1 file changed, 10 deletions(-)

(limited to 'drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c')

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 7f06238a7c64..1c04b7cce43e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -1429,11 +1429,6 @@ static void dpu_encoder_off_work(struct work_struct *work)
 	struct dpu_encoder_virt *dpu_enc = container_of(work,
 			struct dpu_encoder_virt, delayed_off_work.work);
 
-	if (!dpu_enc) {
-		DPU_ERROR("invalid dpu encoder\n");
-		return;
-	}
-
 	dpu_encoder_resource_control(&dpu_enc->base,
 						DPU_ENC_RC_EVENT_ENTER_IDLE);
 
@@ -1777,11 +1772,6 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
 			struct dpu_encoder_virt, vsync_event_work);
 	ktime_t wakeup_time;
 
-	if (!dpu_enc) {
-		DPU_ERROR("invalid dpu encoder\n");
-		return;
-	}
-
 	if (dpu_encoder_vsync_time(&dpu_enc->base, &wakeup_time))
 		return;
 
-- 
cgit v1.2.3-70-g09d2