diff options
| author | Christoph Hellwig <hch@lst.de> | 2022-11-09 08:17:46 +0100 | 
|---|---|---|
| committer | Christoph Hellwig <hch@lst.de> | 2022-11-21 09:36:05 +0100 | 
| commit | 2fce26a15f1709090ca70f4c7da017424b3b78b3 (patch) | |
| tree | b1e061e8891144e46341d3ea9a6d4652ee9a973c /drivers/infiniband/hw/qib/qib_init.c | |
| parent | 82c310c33ace7d25c0475e49a6051727c48a8cc6 (diff) | |
RDMA/qib: don't pass bogus GFP_ flags to dma_alloc_coherent
dma_alloc_coherent is an opaque allocator that only uses the GFP_ flags
for allocation context control.  Don't pass GFP_USER which doesn't make
sense for a kernel DMA allocation or __GFP_COMP which makes no sense
for an allocation that can't in any way be converted to a page pointer.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_init.c')
| -rw-r--r-- | drivers/infiniband/hw/qib/qib_init.c | 21 | 
1 files changed, 4 insertions, 17 deletions
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 45211008449f..33667becd52b 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -1546,18 +1546,14 @@ int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)  	if (!rcd->rcvhdrq) {  		dma_addr_t phys_hdrqtail; -		gfp_t gfp_flags;  		amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *  			    sizeof(u32), PAGE_SIZE); -		gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ? -			GFP_USER : GFP_KERNEL;  		old_node_id = dev_to_node(&dd->pcidev->dev);  		set_dev_node(&dd->pcidev->dev, rcd->node_id); -		rcd->rcvhdrq = dma_alloc_coherent( -			&dd->pcidev->dev, amt, &rcd->rcvhdrq_phys, -			gfp_flags | __GFP_COMP); +		rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, +				&rcd->rcvhdrq_phys, GFP_KERNEL);  		set_dev_node(&dd->pcidev->dev, old_node_id);  		if (!rcd->rcvhdrq) { @@ -1577,7 +1573,7 @@ int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)  			set_dev_node(&dd->pcidev->dev, rcd->node_id);  			rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(  				&dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, -				gfp_flags); +				GFP_KERNEL);  			set_dev_node(&dd->pcidev->dev, old_node_id);  			if (!rcd->rcvhdrtail_kvaddr)  				goto bail_free; @@ -1621,17 +1617,8 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)  	struct qib_devdata *dd = rcd->dd;  	unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;  	size_t size; -	gfp_t gfp_flags;  	int old_node_id; -	/* -	 * GFP_USER, but without GFP_FS, so buffer cache can be -	 * coalesced (we hope); otherwise, even at order 4, -	 * heavy filesystem activity makes these fail, and we can -	 * use compound pages. -	 */ -	gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP; -  	egrcnt = rcd->rcvegrcnt;  	egroff = rcd->rcvegr_tid_base;  	egrsize = dd->rcvegrbufsize; @@ -1663,7 +1650,7 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)  		rcd->rcvegrbuf[e] =  			dma_alloc_coherent(&dd->pcidev->dev, size,  					   &rcd->rcvegrbuf_phys[e], -					   gfp_flags); +					   GFP_KERNEL);  		set_dev_node(&dd->pcidev->dev, old_node_id);  		if (!rcd->rcvegrbuf[e])  			goto bail_rcvegrbuf_phys;  | 
