summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
authorDave Jiang <dave.jiang@intel.com>2023-04-07 13:31:31 -0700
committerVinod Koul <vkoul@kernel.org>2023-04-12 23:18:45 +0530
commit2f431ba908d2ef05da478d10925207728f1ff483 (patch)
tree32295e95fc493a45e01d8b81ea388fab8afcb421 /drivers/dma
parent244da66cda359227d80ccb41dbcb99da40eae186 (diff)
dmaengine: idxd: add interrupt handling for event log
An event log interrupt is raised in the misc interrupt INTCAUSE register when an event is written by the hardware. Add basic event log processing support to the interrupt handler. The event log is a ring where the hardware owns the tail and the software owns the head. The hardware will advance the tail index when an additional event has been pushed to memory. The software will process the log entry and then advances the head. The log is full when (tail + 1) % log_size = head. The hardware will stop writing when the log is full. The user is expected to create a log size large enough to handle all the expected events. Tested-by: Tony Zhu <tony.zhu@intel.com> Signed-off-by: Dave Jiang <dave.jiang@intel.com> Co-developed-by: Fenghua Yu <fenghua.yu@intel.com> Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Link: https://lore.kernel.org/r/20230407203143.2189681-5-fenghua.yu@intel.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/idxd/irq.c48
-rw-r--r--drivers/dma/idxd/registers.h19
2 files changed, 67 insertions, 0 deletions
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 0d639303b515..52b8b7d9db22 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -217,6 +217,49 @@ static void idxd_int_handle_revoke(struct work_struct *work)
kfree(revoke);
}
+static void process_evl_entry(struct idxd_device *idxd, struct __evl_entry *entry_head)
+{
+ struct device *dev = &idxd->pdev->dev;
+ u8 status;
+
+ status = DSA_COMP_STATUS(entry_head->error);
+ dev_warn_ratelimited(dev, "Device error %#x operation: %#x fault addr: %#llx\n",
+ status, entry_head->operation, entry_head->fault_addr);
+}
+
+static void process_evl_entries(struct idxd_device *idxd)
+{
+ union evl_status_reg evl_status;
+ unsigned int h, t;
+ struct idxd_evl *evl = idxd->evl;
+ struct __evl_entry *entry_head;
+ unsigned int ent_size = evl_ent_size(idxd);
+ u32 size;
+
+ evl_status.bits = 0;
+ evl_status.int_pending = 1;
+
+ spin_lock(&evl->lock);
+ /* Clear interrupt pending bit */
+ iowrite32(evl_status.bits_upper32,
+ idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
+ h = evl->head;
+ evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
+ t = evl_status.tail;
+ size = idxd->evl->size;
+
+ while (h != t) {
+ entry_head = (struct __evl_entry *)(evl->log + (h * ent_size));
+ process_evl_entry(idxd, entry_head);
+ h = (h + 1) % size;
+ }
+
+ evl->head = h;
+ evl_status.head = h;
+ iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
+ spin_unlock(&evl->lock);
+}
+
irqreturn_t idxd_misc_thread(int vec, void *data)
{
struct idxd_irq_entry *irq_entry = data;
@@ -304,6 +347,11 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
perfmon_counter_overflow(idxd);
}
+ if (cause & IDXD_INTC_EVL) {
+ val |= IDXD_INTC_EVL;
+ process_evl_entries(idxd);
+ }
+
val ^= cause;
if (val)
dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index 11bb97cf7481..148db94f9373 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -168,6 +168,7 @@ enum idxd_device_reset_type {
#define IDXD_INTC_OCCUPY 0x04
#define IDXD_INTC_PERFMON_OVFL 0x08
#define IDXD_INTC_HALT_STATE 0x10
+#define IDXD_INTC_EVL 0x20
#define IDXD_INTC_INT_HANDLE_REVOKED 0x80000000
#define IDXD_CMD_OFFSET 0xa0
@@ -558,6 +559,24 @@ union filter_cfg {
u64 val;
} __packed;
+#define IDXD_EVLSTATUS_OFFSET 0xf0
+
+union evl_status_reg {
+ struct {
+ u32 head:16;
+ u32 rsvd:16;
+ u32 tail:16;
+ u32 rsvd2:14;
+ u32 int_pending:1;
+ u32 rsvd3:1;
+ };
+ struct {
+ u32 bits_lower32;
+ u32 bits_upper32;
+ };
+ u64 bits;
+} __packed;
+
struct __evl_entry {
u64 rsvd:2;
u64 desc_valid:1;