diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-06 10:56:45 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-06 10:56:45 -0700 |
commit | c993e07be023acdeec8e84e2e0743c52adb5fc94 (patch) | |
tree | 873b039ee47b424a31829ffcda3c316c52bf78e4 /drivers/iommu/dma-iommu.c | |
parent | 1d239c1eb873c7d6c6cbc80d68330c939fd86136 (diff) | |
parent | 5c850d31880e00f063fa2a3746ba212c4bcc510f (diff) |
Merge tag 'dma-mapping-5.20-2022-08-06' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig:
- convert arm32 to the common dma-direct code (Arnd Bergmann, Robin
Murphy, Christoph Hellwig)
- restructure the PCIe peer to peer mapping support (Logan Gunthorpe)
- allow the IOMMU code to communicate an optional DMA mapping length
and use that in scsi and libata (John Garry)
- split the global swiotlb lock (Tianyu Lan)
- various fixes and cleanup (Chao Gao, Dan Carpenter, Dongli Zhang,
Lukas Bulwahn, Robin Murphy)
* tag 'dma-mapping-5.20-2022-08-06' of git://git.infradead.org/users/hch/dma-mapping: (45 commits)
swiotlb: fix passing local variable to debugfs_create_ulong()
dma-mapping: reformat comment to suppress htmldoc warning
PCI/P2PDMA: Remove pci_p2pdma_[un]map_sg()
RDMA/rw: drop pci_p2pdma_[un]map_sg()
RDMA/core: introduce ib_dma_pci_p2p_dma_supported()
nvme-pci: convert to using dma_map_sgtable()
nvme-pci: check DMA ops when indicating support for PCI P2PDMA
iommu/dma: support PCI P2PDMA pages in dma-iommu map_sg
iommu: Explicitly skip bus address marked segments in __iommu_map_sg()
dma-mapping: add flags to dma_map_ops to indicate PCI P2PDMA support
dma-direct: support PCI P2PDMA pages in dma-direct map_sg
dma-mapping: allow EREMOTEIO return code for P2PDMA transfers
PCI/P2PDMA: Introduce helpers for dma_map_sg implementations
PCI/P2PDMA: Attempt to set map_type if it has not been set
lib/scatterlist: add flag for indicating P2PDMA segments in an SGL
swiotlb: clean up some coding style and minor issues
dma-mapping: update comment after dmabounce removal
scsi: sd: Add a comment about limiting max_sectors to shost optimal limit
ata: libata-scsi: cap ata_device->max_sectors according to shost->max_sectors
scsi: scsi_transport_sas: cap shost opt_sectors according to DMA optimal limit
...
Diffstat (limited to 'drivers/iommu/dma-iommu.c')
-rw-r--r-- | drivers/iommu/dma-iommu.c | 105 |
1 files changed, 91 insertions, 14 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 458fb6738223..376c4e3ae0e6 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -21,6 +21,7 @@ #include <linux/iova.h> #include <linux/irq.h> #include <linux/list_sort.h> +#include <linux/memremap.h> #include <linux/mm.h> #include <linux/mutex.h> #include <linux/pci.h> @@ -1062,15 +1063,30 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, for_each_sg(sg, s, nents, i) { /* Restore this segment's original unaligned fields first */ + dma_addr_t s_dma_addr = sg_dma_address(s); unsigned int s_iova_off = sg_dma_address(s); unsigned int s_length = sg_dma_len(s); unsigned int s_iova_len = s->length; - s->offset += s_iova_off; - s->length = s_length; sg_dma_address(s) = DMA_MAPPING_ERROR; sg_dma_len(s) = 0; + if (sg_is_dma_bus_address(s)) { + if (i > 0) + cur = sg_next(cur); + + sg_dma_unmark_bus_address(s); + sg_dma_address(cur) = s_dma_addr; + sg_dma_len(cur) = s_length; + sg_dma_mark_bus_address(cur); + count++; + cur_len = 0; + continue; + } + + s->offset += s_iova_off; + s->length = s_length; + /* * Now fill in the real DMA data. If... * - there is a valid output segment to append to @@ -1111,10 +1127,14 @@ static void __invalidate_sg(struct scatterlist *sg, int nents) int i; for_each_sg(sg, s, nents, i) { - if (sg_dma_address(s) != DMA_MAPPING_ERROR) - s->offset += sg_dma_address(s); - if (sg_dma_len(s)) - s->length = sg_dma_len(s); + if (sg_is_dma_bus_address(s)) { + sg_dma_unmark_bus_address(s); + } else { + if (sg_dma_address(s) != DMA_MAPPING_ERROR) + s->offset += sg_dma_address(s); + if (sg_dma_len(s)) + s->length = sg_dma_len(s); + } sg_dma_address(s) = DMA_MAPPING_ERROR; sg_dma_len(s) = 0; } @@ -1167,6 +1187,8 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, struct iova_domain *iovad = &cookie->iovad; struct scatterlist *s, *prev = NULL; int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs); + struct pci_p2pdma_map_state p2pdma_state = {}; + enum pci_p2pdma_map_type map; dma_addr_t iova; size_t iova_len = 0; unsigned long mask = dma_get_seg_boundary(dev); @@ -1196,6 +1218,30 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, size_t s_length = s->length; size_t pad_len = (mask - iova_len + 1) & mask; + if (is_pci_p2pdma_page(sg_page(s))) { + map = pci_p2pdma_map_segment(&p2pdma_state, dev, s); + switch (map) { + case PCI_P2PDMA_MAP_BUS_ADDR: + /* + * iommu_map_sg() will skip this segment as + * it is marked as a bus address, + * __finalise_sg() will copy the dma address + * into the output segment. + */ + continue; + case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: + /* + * Mapping through host bridge should be + * mapped with regular IOVAs, thus we + * do nothing here and continue below. + */ + break; + default: + ret = -EREMOTEIO; + goto out_restore_sg; + } + } + sg_dma_address(s) = s_iova_off; sg_dma_len(s) = s_length; s->offset -= s_iova_off; @@ -1224,6 +1270,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, prev = s; } + if (!iova_len) + return __finalise_sg(dev, sg, nents, 0); + iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); if (!iova) { ret = -ENOMEM; @@ -1245,7 +1294,7 @@ out_free_iova: out_restore_sg: __invalidate_sg(sg, nents); out: - if (ret != -ENOMEM) + if (ret != -ENOMEM && ret != -EREMOTEIO) return -EINVAL; return ret; } @@ -1253,7 +1302,7 @@ out: static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { - dma_addr_t start, end; + dma_addr_t end = 0, start; struct scatterlist *tmp; int i; @@ -1267,16 +1316,37 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, /* * The scatterlist segments are mapped into a single - * contiguous IOVA allocation, so this is incredibly easy. + * contiguous IOVA allocation, the start and end points + * just have to be determined. */ - start = sg_dma_address(sg); - for_each_sg(sg_next(sg), tmp, nents - 1, i) { + for_each_sg(sg, tmp, nents, i) { + if (sg_is_dma_bus_address(tmp)) { + sg_dma_unmark_bus_address(tmp); + continue; + } + if (sg_dma_len(tmp) == 0) break; - sg = tmp; + + start = sg_dma_address(tmp); + break; } - end = sg_dma_address(sg) + sg_dma_len(sg); - __iommu_dma_unmap(dev, start, end - start); + + nents -= i; + for_each_sg(tmp, tmp, nents, i) { + if (sg_is_dma_bus_address(tmp)) { + sg_dma_unmark_bus_address(tmp); + continue; + } + + if (sg_dma_len(tmp) == 0) + break; + + end = sg_dma_address(tmp) + sg_dma_len(tmp); + } + + if (end) + __iommu_dma_unmap(dev, start, end - start); } static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, @@ -1468,7 +1538,13 @@ static unsigned long iommu_dma_get_merge_boundary(struct device *dev) return (1UL << __ffs(domain->pgsize_bitmap)) - 1; } +static size_t iommu_dma_opt_mapping_size(void) +{ + return iova_rcache_range(); +} + static const struct dma_map_ops iommu_dma_ops = { + .flags = DMA_F_PCI_P2PDMA_SUPPORTED, .alloc = iommu_dma_alloc, .free = iommu_dma_free, .alloc_pages = dma_common_alloc_pages, @@ -1488,6 +1564,7 @@ static const struct dma_map_ops iommu_dma_ops = { .map_resource = iommu_dma_map_resource, .unmap_resource = iommu_dma_unmap_resource, .get_merge_boundary = iommu_dma_get_merge_boundary, + .opt_mapping_size = iommu_dma_opt_mapping_size, }; /* |