summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/s390/cio/css.c20
-rw-r--r--drivers/s390/cio/device.c26
2 files changed, 34 insertions, 12 deletions
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 94c6470de635..253ab4e7a415 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -225,18 +225,23 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
INIT_WORK(&sch->todo_work, css_sch_todo);
sch->dev.release = &css_subchannel_release;
+ sch->dev.dma_mask = &sch->dma_mask;
device_initialize(&sch->dev);
/*
- * The physical addresses of some the dma structures that can
+ * The physical addresses for some of the dma structures that can
* belong to a subchannel need to fit 31 bit width (e.g. ccw).
*/
- sch->dev.coherent_dma_mask = DMA_BIT_MASK(31);
+ ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
+ if (ret)
+ goto err;
/*
* But we don't have such restrictions imposed on the stuff that
* is handled by the streaming API.
*/
- sch->dma_mask = DMA_BIT_MASK(64);
- sch->dev.dma_mask = &sch->dma_mask;
+ ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
+ if (ret)
+ goto err;
+
return sch;
err:
@@ -970,8 +975,11 @@ static int __init setup_css(int nr)
* css->device as the device argument with the DMA API)
* and are fine with 64 bit addresses.
*/
- css->device.coherent_dma_mask = DMA_BIT_MASK(64);
- css->device.dma_mask = &css->device.coherent_dma_mask;
+ ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64));
+ if (ret) {
+ kfree(css);
+ goto out_err;
+ }
mutex_init(&css->mutex);
ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index df46373d67ec..3f026021e95e 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -679,33 +679,47 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
{
struct ccw_device *cdev;
struct gen_pool *dma_pool;
+ int ret;
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
- if (!cdev)
+ if (!cdev) {
+ ret = -ENOMEM;
goto err_cdev;
+ }
cdev->private = kzalloc(sizeof(struct ccw_device_private),
GFP_KERNEL | GFP_DMA);
- if (!cdev->private)
+ if (!cdev->private) {
+ ret = -ENOMEM;
goto err_priv;
- cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask;
+ }
+
cdev->dev.dma_mask = sch->dev.dma_mask;
+ ret = dma_set_coherent_mask(&cdev->dev, sch->dev.coherent_dma_mask);
+ if (ret)
+ goto err_coherent_mask;
+
dma_pool = cio_gp_dma_create(&cdev->dev, 1);
- if (!dma_pool)
+ if (!dma_pool) {
+ ret = -ENOMEM;
goto err_dma_pool;
+ }
cdev->private->dma_pool = dma_pool;
cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev,
sizeof(*cdev->private->dma_area));
- if (!cdev->private->dma_area)
+ if (!cdev->private->dma_area) {
+ ret = -ENOMEM;
goto err_dma_area;
+ }
return cdev;
err_dma_area:
cio_gp_dma_destroy(dma_pool, &cdev->dev);
err_dma_pool:
+err_coherent_mask:
kfree(cdev->private);
err_priv:
kfree(cdev);
err_cdev:
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
}
static void ccw_device_todo(struct work_struct *work);