diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-12-13 09:05:19 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-12-13 09:05:19 -0800 |
commit | e529d3507a93d3c9528580081bbaf931a50de154 (patch) | |
tree | c363cf495fdbec199a8a9860d8e663389ae8cdd4 /drivers/infiniband/hw/hfi1 | |
parent | 6a24711d5c0bc8fb0fc49def433ab89ecbedf095 (diff) | |
parent | ffcb754584603adf7039d7972564fbf6febdc542 (diff) |
Merge tag 'dma-mapping-6.2-2022-12-13' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig:
- reduce the swiotlb buffer size on allocation failure (Alexey
Kardashevskiy)
- clean up passing of bogus GFP flags to the dma-coherent allocator
(Christoph Hellwig)
* tag 'dma-mapping-6.2-2022-12-13' of git://git.infradead.org/users/hch/dma-mapping:
dma-mapping: reject __GFP_COMP in dma_alloc_attrs
ALSA: memalloc: don't pass bogus GFP_ flags to dma_alloc_*
s390/ism: don't pass bogus GFP_ flags to dma_alloc_coherent
cnic: don't pass bogus GFP_ flags to dma_alloc_coherent
RDMA/qib: don't pass bogus GFP_ flags to dma_alloc_coherent
RDMA/hfi1: don't pass bogus GFP_ flags to dma_alloc_coherent
media: videobuf-dma-contig: use dma_mmap_coherent
swiotlb: reduce the swiotlb buffer size on allocation failure
Diffstat (limited to 'drivers/infiniband/hw/hfi1')
-rw-r--r-- | drivers/infiniband/hw/hfi1/init.c | 21 |
1 files changed, 3 insertions, 18 deletions
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 436372b31431..24c0f0d257fc 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -1761,17 +1761,11 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) unsigned amt; if (!rcd->rcvhdrq) { - gfp_t gfp_flags; - amt = rcvhdrq_size(rcd); - if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic) - gfp_flags = GFP_KERNEL; - else - gfp_flags = GFP_USER; rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, &rcd->rcvhdrq_dma, - gfp_flags | __GFP_COMP); + GFP_KERNEL); if (!rcd->rcvhdrq) { dd_dev_err(dd, @@ -1785,7 +1779,7 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, PAGE_SIZE, &rcd->rcvhdrqtailaddr_dma, - gfp_flags); + GFP_KERNEL); if (!rcd->rcvhdrtail_kvaddr) goto bail_free; } @@ -1821,20 +1815,11 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) { struct hfi1_devdata *dd = rcd->dd; u32 max_entries, egrtop, alloced_bytes = 0; - gfp_t gfp_flags; u16 order, idx = 0; int ret = 0; u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu); /* - * GFP_USER, but without GFP_FS, so buffer cache can be - * coalesced (we hope); otherwise, even at order 4, - * heavy filesystem activity makes these fail, and we can - * use compound pages. - */ - gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP; - - /* * The minimum size of the eager buffers is a groups of MTU-sized * buffers. * The global eager_buffer_size parameter is checked against the @@ -1864,7 +1849,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) dma_alloc_coherent(&dd->pcidev->dev, rcd->egrbufs.rcvtid_size, &rcd->egrbufs.buffers[idx].dma, - gfp_flags); + GFP_KERNEL); if (rcd->egrbufs.buffers[idx].addr) { rcd->egrbufs.buffers[idx].len = rcd->egrbufs.rcvtid_size; |