From 04867a7a33324c9c562ee7949dbcaab7aaad1fb4 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 8 Mar 2024 15:28:24 +0000 Subject: swiotlb: Fix double-allocation of slots due to broken alignment handling Commit bbb73a103fbb ("swiotlb: fix a braino in the alignment check fix"), which was a fix for commit 0eee5ae10256 ("swiotlb: fix slot alignment checks"), causes a functional regression with vsock in a virtual machine using bouncing via a restricted DMA SWIOTLB pool. When virtio allocates the virtqueues for the vsock device using dma_alloc_coherent(), the SWIOTLB search can return page-unaligned allocations if 'area->index' was left unaligned by a previous allocation from the buffer: # Final address in brackets is the SWIOTLB address returned to the caller | virtio-pci 0000:00:07.0: orig_addr 0x0 alloc_size 0x2000, iotlb_align_mask 0x800 stride 0x2: got slot 1645-1649/7168 (0x98326800) | virtio-pci 0000:00:07.0: orig_addr 0x0 alloc_size 0x2000, iotlb_align_mask 0x800 stride 0x2: got slot 1649-1653/7168 (0x98328800) | virtio-pci 0000:00:07.0: orig_addr 0x0 alloc_size 0x2000, iotlb_align_mask 0x800 stride 0x2: got slot 1653-1657/7168 (0x9832a800) This ends badly (typically buffer corruption and/or a hang) because swiotlb_alloc() is expecting a page-aligned allocation and so blindly returns a pointer to the 'struct page' corresponding to the allocation, therefore double-allocating the first half (2KiB slot) of the 4KiB page. Fix the problem by treating the allocation alignment separately to any additional alignment requirements from the device, using the maximum of the two as the stride to search the buffer slots and taking care to ensure a minimum of page-alignment for buffers larger than a page. This also resolves swiotlb allocation failures occuring due to the inclusion of ~PAGE_MASK in 'iotlb_align_mask' for large allocations and resulting in alignment requirements exceeding swiotlb_max_mapping_size(). Fixes: bbb73a103fbb ("swiotlb: fix a braino in the alignment check fix") Fixes: 0eee5ae10256 ("swiotlb: fix slot alignment checks") Signed-off-by: Will Deacon Reviewed-by: Michael Kelley Reviewed-by: Petr Tesarik Tested-by: Nicolin Chen Tested-by: Michael Kelley Signed-off-by: Christoph Hellwig --- kernel/dma/swiotlb.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) (limited to 'kernel/dma') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 77974cea3e69..980a7ec70418 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -1004,7 +1004,7 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool phys_to_dma_unencrypted(dev, pool->start) & boundary_mask; unsigned long max_slots = get_max_slots(boundary_mask); unsigned int iotlb_align_mask = - dma_get_min_align_mask(dev) | alloc_align_mask; + dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1); unsigned int nslots = nr_slots(alloc_size), stride; unsigned int offset = swiotlb_align_offset(dev, orig_addr); unsigned int index, slots_checked, count = 0, i; @@ -1016,18 +1016,17 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool BUG_ON(area_index >= pool->nareas); /* - * For allocations of PAGE_SIZE or larger only look for page aligned - * allocations. + * For mappings with an alignment requirement don't bother looping to + * unaligned slots once we found an aligned one. */ - if (alloc_size >= PAGE_SIZE) - iotlb_align_mask |= ~PAGE_MASK; - iotlb_align_mask &= ~(IO_TLB_SIZE - 1); + stride = get_max_slots(max(alloc_align_mask, iotlb_align_mask)); /* - * For mappings with an alignment requirement don't bother looping to - * unaligned slots once we found an aligned one. + * For allocations of PAGE_SIZE or larger only look for page aligned + * allocations. */ - stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1; + if (alloc_size >= PAGE_SIZE) + stride = umax(stride, PAGE_SHIFT - IO_TLB_SHIFT + 1); spin_lock_irqsave(&area->lock, flags); if (unlikely(nslots > pool->area_nslabs - area->used)) @@ -1037,11 +1036,14 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool index = area->index; for (slots_checked = 0; slots_checked < pool->area_nslabs; ) { + phys_addr_t tlb_addr; + slot_index = slot_base + index; + tlb_addr = slot_addr(tbl_dma_addr, slot_index); - if (orig_addr && - (slot_addr(tbl_dma_addr, slot_index) & - iotlb_align_mask) != (orig_addr & iotlb_align_mask)) { + if ((tlb_addr & alloc_align_mask) || + (orig_addr && (tlb_addr & iotlb_align_mask) != + (orig_addr & iotlb_align_mask))) { index = wrap_area_index(pool, index + 1); slots_checked++; continue; -- cgit v1.2.3-70-g09d2 From 823353b7cf0ea9dfb09f5181d5fb2825d727200b Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 8 Mar 2024 15:28:25 +0000 Subject: swiotlb: Enforce page alignment in swiotlb_alloc() When allocating pages from a restricted DMA pool in swiotlb_alloc(), the buffer address is blindly converted to a 'struct page *' that is returned to the caller. In the unlikely event of an allocation bug, page-unaligned addresses are not detected and slots can silently be double-allocated. Add a simple check of the buffer alignment in swiotlb_alloc() to make debugging a little easier if something has gone wonky. Signed-off-by: Will Deacon Reviewed-by: Michael Kelley Reviewed-by: Petr Tesarik Tested-by: Nicolin Chen Tested-by: Michael Kelley Signed-off-by: Christoph Hellwig --- kernel/dma/swiotlb.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'kernel/dma') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 980a7ec70418..88114433f1e6 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -1689,6 +1689,12 @@ struct page *swiotlb_alloc(struct device *dev, size_t size) return NULL; tlb_addr = slot_addr(pool->start, index); + if (unlikely(!PAGE_ALIGNED(tlb_addr))) { + dev_WARN_ONCE(dev, 1, "Cannot allocate pages from non page-aligned swiotlb addr 0x%pa.\n", + &tlb_addr); + swiotlb_release_slots(dev, tlb_addr); + return NULL; + } return pfn_to_page(PFN_DOWN(tlb_addr)); } -- cgit v1.2.3-70-g09d2 From cbf53074a528191df82b4dba1e3d21191102255e Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 8 Mar 2024 15:28:26 +0000 Subject: swiotlb: Honour dma_alloc_coherent() alignment in swiotlb_alloc() core-api/dma-api-howto.rst states the following properties of dma_alloc_coherent(): | The CPU virtual address and the DMA address are both guaranteed to | be aligned to the smallest PAGE_SIZE order which is greater than or | equal to the requested size. However, swiotlb_alloc() passes zero for the 'alloc_align_mask' parameter of swiotlb_find_slots() and so this property is not upheld. Instead, allocations larger than a page are aligned to PAGE_SIZE, Calculate the mask corresponding to the page order suitable for holding the allocation and pass that to swiotlb_find_slots(). Fixes: e81e99bacc9f ("swiotlb: Support aligned swiotlb buffers") Signed-off-by: Will Deacon Reviewed-by: Michael Kelley Reviewed-by: Petr Tesarik Tested-by: Nicolin Chen Tested-by: Michael Kelley Signed-off-by: Christoph Hellwig --- kernel/dma/swiotlb.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel/dma') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 88114433f1e6..a3645a9ae68e 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -1679,12 +1679,14 @@ struct page *swiotlb_alloc(struct device *dev, size_t size) struct io_tlb_mem *mem = dev->dma_io_tlb_mem; struct io_tlb_pool *pool; phys_addr_t tlb_addr; + unsigned int align; int index; if (!mem) return NULL; - index = swiotlb_find_slots(dev, 0, size, 0, &pool); + align = (1 << (get_order(size) + PAGE_SHIFT)) - 1; + index = swiotlb_find_slots(dev, 0, size, align, &pool); if (index == -1) return NULL; -- cgit v1.2.3-70-g09d2 From 51b30ecb73b481d5fac6ccf2ecb4a309c9ee3310 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 8 Mar 2024 15:28:27 +0000 Subject: swiotlb: Fix alignment checks when both allocation and DMA masks are present Nicolin reports that swiotlb buffer allocations fail for an NVME device behind an IOMMU using 64KiB pages. This is because we end up with a minimum allocation alignment of 64KiB (for the IOMMU to map the buffer safely) but a minimum DMA alignment mask corresponding to a 4KiB NVME page (i.e. preserving the 4KiB page offset from the original allocation). If the original address is not 4KiB-aligned, the allocation will fail because swiotlb_search_pool_area() erroneously compares these unmasked bits with the 64KiB-aligned candidate allocation. Tweak swiotlb_search_pool_area() so that the DMA alignment mask is reduced based on the required alignment of the allocation. Fixes: 82612d66d51d ("iommu: Allow the dma-iommu api to use bounce buffers") Link: https://lore.kernel.org/r/cover.1707851466.git.nicolinc@nvidia.com Reported-by: Nicolin Chen Signed-off-by: Will Deacon Reviewed-by: Michael Kelley Tested-by: Nicolin Chen Tested-by: Michael Kelley Signed-off-by: Christoph Hellwig --- kernel/dma/swiotlb.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'kernel/dma') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index a3645a9ae68e..f212943e51ca 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -1003,8 +1003,7 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool dma_addr_t tbl_dma_addr = phys_to_dma_unencrypted(dev, pool->start) & boundary_mask; unsigned long max_slots = get_max_slots(boundary_mask); - unsigned int iotlb_align_mask = - dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1); + unsigned int iotlb_align_mask = dma_get_min_align_mask(dev); unsigned int nslots = nr_slots(alloc_size), stride; unsigned int offset = swiotlb_align_offset(dev, orig_addr); unsigned int index, slots_checked, count = 0, i; @@ -1015,6 +1014,14 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool BUG_ON(!nslots); BUG_ON(area_index >= pool->nareas); + /* + * Ensure that the allocation is at least slot-aligned and update + * 'iotlb_align_mask' to ignore bits that will be preserved when + * offsetting into the allocation. + */ + alloc_align_mask |= (IO_TLB_SIZE - 1); + iotlb_align_mask &= ~alloc_align_mask; + /* * For mappings with an alignment requirement don't bother looping to * unaligned slots once we found an aligned one. -- cgit v1.2.3-70-g09d2 From 14cebf689a78e8a1c041138af221ef6eac6bc7da Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 8 Mar 2024 15:28:29 +0000 Subject: swiotlb: Reinstate page-alignment for mappings >= PAGE_SIZE For swiotlb allocations >= PAGE_SIZE, the slab search historically adjusted the stride to avoid checking unaligned slots. This had the side-effect of aligning large mapping requests to PAGE_SIZE, but that was broken by 0eee5ae10256 ("swiotlb: fix slot alignment checks"). Since this alignment could be relied upon drivers, reinstate PAGE_SIZE alignment for swiotlb mappings >= PAGE_SIZE. Reported-by: Michael Kelley Signed-off-by: Will Deacon Reviewed-by: Robin Murphy Reviewed-by: Petr Tesarik Tested-by: Nicolin Chen Tested-by: Michael Kelley Signed-off-by: Christoph Hellwig --- kernel/dma/swiotlb.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) (limited to 'kernel/dma') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index f212943e51ca..86fe172b5958 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -1014,6 +1014,17 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool BUG_ON(!nslots); BUG_ON(area_index >= pool->nareas); + /* + * Historically, swiotlb allocations >= PAGE_SIZE were guaranteed to be + * page-aligned in the absence of any other alignment requirements. + * 'alloc_align_mask' was later introduced to specify the alignment + * explicitly, however this is passed as zero for streaming mappings + * and so we preserve the old behaviour there in case any drivers are + * relying on it. + */ + if (!alloc_align_mask && !iotlb_align_mask && alloc_size >= PAGE_SIZE) + alloc_align_mask = PAGE_SIZE - 1; + /* * Ensure that the allocation is at least slot-aligned and update * 'iotlb_align_mask' to ignore bits that will be preserved when @@ -1028,13 +1039,6 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool */ stride = get_max_slots(max(alloc_align_mask, iotlb_align_mask)); - /* - * For allocations of PAGE_SIZE or larger only look for page aligned - * allocations. - */ - if (alloc_size >= PAGE_SIZE) - stride = umax(stride, PAGE_SHIFT - IO_TLB_SHIFT + 1); - spin_lock_irqsave(&area->lock, flags); if (unlikely(nslots > pool->area_nslabs - area->used)) goto not_found; -- cgit v1.2.3-70-g09d2