summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAlexander Lobakin <aleksander.lobakin@intel.com>2024-05-09 16:46:16 +0200
committerChristoph Hellwig <hch@lst.de>2024-05-09 19:00:29 +0200
commita6016aac5252da9d22a4dc0b98121b0acdf6d2f5 (patch)
tree3c77ee2d2df0c886af162b6d85f6d562c3d6b1b7 /kernel
parent163943ac00cb31ac1a88ce5f78a7e2ead37329ec (diff)
dma: fix DMA sync for drivers not calling dma_set_mask*()
There are several reports that the DMA sync shortcut broke non-coherent devices. dev->dma_need_sync is false after the &device allocation and if a driver didn't call dma_set_mask*(), it will still be false even if the device is not DMA-coherent and thus needs synchronizing. Due to historical reasons, there's still a lot of drivers not calling it. Invert the boolean, so that the sync will be performed by default and the shortcut will be enabled only when calling dma_set_mask*(). Reported-by: Steven Price <steven.price@arm.com> Closes: https://lore.kernel.org/lkml/010686f5-3049-46a1-8230-7752a1b433ff@arm.com Reported-by: Marek Szyprowski <m.szyprowski@samsung.com> Closes: https://lore.kernel.org/lkml/46160534-5003-4809-a408-6b3a3f4921e9@samsung.com Fixes: f406c8e4b770. ("dma: avoid redundant calls for sync operations") Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Steven Price <steven.price@arm.com> Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/dma/mapping.c10
-rw-r--r--kernel/dma/swiotlb.c2
2 files changed, 6 insertions, 6 deletions
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 3524bc92c37f..3f77c3f8d16d 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -392,7 +392,7 @@ bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr)
if (dma_map_direct(dev, ops))
/*
- * dma_need_sync could've been reset on first SWIOTLB buffer
+ * dma_skip_sync could've been reset on first SWIOTLB buffer
* mapping, but @dma_addr is not necessary an SWIOTLB buffer.
* In this case, fall back to more granular check.
*/
@@ -407,20 +407,20 @@ static void dma_setup_need_sync(struct device *dev)
if (dma_map_direct(dev, ops) || (ops->flags & DMA_F_CAN_SKIP_SYNC))
/*
- * dma_need_sync will be reset to %true on first SWIOTLB buffer
+ * dma_skip_sync will be reset to %false on first SWIOTLB buffer
* mapping, if any. During the device initialization, it's
* enough to check only for the DMA coherence.
*/
- dev->dma_need_sync = !dev_is_dma_coherent(dev);
+ dev->dma_skip_sync = dev_is_dma_coherent(dev);
else if (!ops->sync_single_for_device && !ops->sync_single_for_cpu &&
!ops->sync_sg_for_device && !ops->sync_sg_for_cpu)
/*
* Synchronization is not possible when none of DMA sync ops
* is set.
*/
- dev->dma_need_sync = false;
+ dev->dma_skip_sync = true;
else
- dev->dma_need_sync = true;
+ dev->dma_skip_sync = false;
}
#else /* !CONFIG_DMA_NEED_SYNC */
static inline void dma_setup_need_sync(struct device *dev) { }
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index ae3e593eaadb..068134697cf1 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -1409,7 +1409,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
}
/*
- * If dma_need_sync wasn't set, reset it on first SWIOTLB buffer
+ * If dma_skip_sync was set, reset it on first SWIOTLB buffer
* mapping to always sync SWIOTLB buffers.
*/
dma_reset_need_sync(dev);