diff options
Diffstat (limited to 'drivers/s390')
33 files changed, 693 insertions, 1369 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index edcbf77852c3..215597f73be4 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2943,41 +2943,32 @@ static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) * Requeue a request back to the block request queue * only works for block requests */ -static int _dasd_requeue_request(struct dasd_ccw_req *cqr) +static void _dasd_requeue_request(struct dasd_ccw_req *cqr) { - struct dasd_block *block = cqr->block; struct request *req; - if (!block) - return -EINVAL; /* * If the request is an ERP request there is nothing to requeue. * This will be done with the remaining original request. */ if (cqr->refers) - return 0; + return; spin_lock_irq(&cqr->dq->lock); req = (struct request *) cqr->callback_data; blk_mq_requeue_request(req, true); spin_unlock_irq(&cqr->dq->lock); - return 0; + return; } -/* - * Go through all request on the dasd_block request queue, cancel them - * on the respective dasd_device, and return them to the generic - * block layer. - */ -static int dasd_flush_block_queue(struct dasd_block *block) +static int _dasd_requests_to_flushqueue(struct dasd_block *block, + struct list_head *flush_queue) { struct dasd_ccw_req *cqr, *n; - int rc, i; - struct list_head flush_queue; unsigned long flags; + int rc, i; - INIT_LIST_HEAD(&flush_queue); - spin_lock_bh(&block->queue_lock); + spin_lock_irqsave(&block->queue_lock, flags); rc = 0; restart: list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { @@ -2992,13 +2983,32 @@ restart: * is returned from the dasd_device layer. */ cqr->callback = _dasd_wake_block_flush_cb; - for (i = 0; cqr != NULL; cqr = cqr->refers, i++) - list_move_tail(&cqr->blocklist, &flush_queue); + for (i = 0; cqr; cqr = cqr->refers, i++) + list_move_tail(&cqr->blocklist, flush_queue); if (i > 1) /* moved more than one request - need to restart */ goto restart; } - spin_unlock_bh(&block->queue_lock); + spin_unlock_irqrestore(&block->queue_lock, flags); + + return rc; +} + +/* + * Go through all request on the dasd_block request queue, cancel them + * on the respective dasd_device, and return them to the generic + * block layer. + */ +static int dasd_flush_block_queue(struct dasd_block *block) +{ + struct dasd_ccw_req *cqr, *n; + struct list_head flush_queue; + unsigned long flags; + int rc; + + INIT_LIST_HEAD(&flush_queue); + rc = _dasd_requests_to_flushqueue(block, &flush_queue); + /* Now call the callback function of flushed requests */ restart_cb: list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { @@ -3626,11 +3636,8 @@ int dasd_generic_set_offline(struct ccw_device *cdev) * so sync bdev first and then wait for our queues to become * empty */ - if (device->block) { - rc = fsync_bdev(device->block->bdev); - if (rc != 0) - goto interrupted; - } + if (device->block) + bdev_mark_dead(device->block->bdev, false); dasd_schedule_device_bh(device); rc = wait_event_interruptible(shutdown_waitq, _wait_for_empty_queues(device)); @@ -3881,75 +3888,36 @@ EXPORT_SYMBOL_GPL(dasd_generic_space_avail); */ int dasd_generic_requeue_all_requests(struct dasd_device *device) { + struct dasd_block *block = device->block; struct list_head requeue_queue; struct dasd_ccw_req *cqr, *n; - struct dasd_ccw_req *refers; int rc; - INIT_LIST_HEAD(&requeue_queue); - spin_lock_irq(get_ccwdev_lock(device->cdev)); - rc = 0; - list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { - /* Check status and move request to flush_queue */ - if (cqr->status == DASD_CQR_IN_IO) { - rc = device->discipline->term_IO(cqr); - if (rc) { - /* unable to terminate requeust */ - dev_err(&device->cdev->dev, - "Unable to terminate request %p " - "on suspend\n", cqr); - spin_unlock_irq(get_ccwdev_lock(device->cdev)); - dasd_put_device(device); - return rc; - } - } - list_move_tail(&cqr->devlist, &requeue_queue); - } - spin_unlock_irq(get_ccwdev_lock(device->cdev)); - - list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) { - wait_event(dasd_flush_wq, - (cqr->status != DASD_CQR_CLEAR_PENDING)); + if (!block) + return 0; - /* - * requeue requests to blocklayer will only work - * for block device requests - */ - if (_dasd_requeue_request(cqr)) - continue; + INIT_LIST_HEAD(&requeue_queue); + rc = _dasd_requests_to_flushqueue(block, &requeue_queue); - /* remove requests from device and block queue */ - list_del_init(&cqr->devlist); - while (cqr->refers != NULL) { - refers = cqr->refers; - /* remove the request from the block queue */ - list_del(&cqr->blocklist); - /* free the finished erp request */ - dasd_free_erp_request(cqr, cqr->memdev); - cqr = refers; + /* Now call the callback function of flushed requests */ +restart_cb: + list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) { + wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); + /* Process finished ERP request. */ + if (cqr->refers) { + spin_lock_bh(&block->queue_lock); + __dasd_process_erp(block->base, cqr); + spin_unlock_bh(&block->queue_lock); + /* restart list_for_xx loop since dasd_process_erp + * might remove multiple elements + */ + goto restart_cb; } - - /* - * _dasd_requeue_request already checked for a valid - * blockdevice, no need to check again - * all erp requests (cqr->refers) have a cqr->block - * pointer copy from the original cqr - */ + _dasd_requeue_request(cqr); list_del_init(&cqr->blocklist); cqr->block->base->discipline->free_cp( cqr, (struct request *) cqr->callback_data); } - - /* - * if requests remain then they are internal request - * and go back to the device queue - */ - if (!list_empty(&requeue_queue)) { - /* move freeze_queue to start of the ccw_queue */ - spin_lock_irq(get_ccwdev_lock(device->cdev)); - list_splice_tail(&requeue_queue, &device->ccw_queue); - spin_unlock_irq(get_ccwdev_lock(device->cdev)); - } dasd_schedule_device_bh(device); return rc; } diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index 9fd36c468706..89957bb7244d 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c @@ -1050,7 +1050,7 @@ dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense) dev_err(&device->cdev->dev, "An I/O request was rejected" " because writing is inhibited\n"); erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); - } else if (sense[7] & SNS7_INVALID_ON_SEC) { + } else if (sense[7] == SNS7_INVALID_ON_SEC) { dev_err(&device->cdev->dev, "An I/O request was rejected on a copy pair secondary device\n"); /* suppress dump of sense data for this error */ set_bit(DASD_CQR_SUPPRESS_CR, &erp->refers->flags); @@ -2441,7 +2441,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr) erp->block = cqr->block; erp->magic = cqr->magic; erp->expires = cqr->expires; - erp->retries = 256; + erp->retries = device->default_retries; erp->buildclk = get_tod_clock(); erp->status = DASD_CQR_FILLED; diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index 513a7e6eee63..d55862605b82 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -131,6 +131,7 @@ static int dasd_ioctl_resume(struct dasd_block *block) spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); dasd_schedule_block_bh(block); + dasd_schedule_device_bh(base); return 0; } diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 09acf3853a77..06bcb6c78909 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -412,6 +412,7 @@ removeseg: } list_del(&dev_info->lh); + dax_remove_host(dev_info->gd); kill_dax(dev_info->dax_dev); put_dax(dev_info->dax_dev); del_gendisk(dev_info->gd); @@ -707,9 +708,9 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char goto out; out_dax_host: + put_device(&dev_info->dev); dax_remove_host(dev_info->gd); out_dax: - put_device(&dev_info->dev); kill_dax(dev_info->dax_dev); put_dax(dev_info->dax_dev); put_dev: @@ -789,6 +790,7 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch } list_del(&dev_info->lh); + dax_remove_host(dev_info->gd); kill_dax(dev_info->dax_dev); put_dax(dev_info->dax_dev); del_gendisk(dev_info->gd); @@ -860,7 +862,7 @@ dcssblk_submit_bio(struct bio *bio) struct bio_vec bvec; struct bvec_iter iter; unsigned long index; - unsigned long page_addr; + void *page_addr; unsigned long source_addr; unsigned long bytes_done; @@ -868,8 +870,8 @@ dcssblk_submit_bio(struct bio *bio) dev_info = bio->bi_bdev->bd_disk->private_data; if (dev_info == NULL) goto fail; - if ((bio->bi_iter.bi_sector & 7) != 0 || - (bio->bi_iter.bi_size & 4095) != 0) + if (!IS_ALIGNED(bio->bi_iter.bi_sector, 8) || + !IS_ALIGNED(bio->bi_iter.bi_size, PAGE_SIZE)) /* Request is not page-aligned. */ goto fail; /* verify data transfer direction */ @@ -889,18 +891,16 @@ dcssblk_submit_bio(struct bio *bio) index = (bio->bi_iter.bi_sector >> 3); bio_for_each_segment(bvec, bio, iter) { - page_addr = (unsigned long)bvec_virt(&bvec); + page_addr = bvec_virt(&bvec); source_addr = dev_info->start + (index<<12) + bytes_done; - if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0) + if (unlikely(!IS_ALIGNED((unsigned long)page_addr, PAGE_SIZE) || + !IS_ALIGNED(bvec.bv_len, PAGE_SIZE))) // More paranoia. goto fail; - if (bio_data_dir(bio) == READ) { - memcpy((void*)page_addr, (void*)source_addr, - bvec.bv_len); - } else { - memcpy((void*)source_addr, (void*)page_addr, - bvec.bv_len); - } + if (bio_data_dir(bio) == READ) + memcpy(page_addr, __va(source_addr), bvec.bv_len); + else + memcpy(__va(source_addr), page_addr, bvec.bv_len); bytes_done += bvec.bv_len; } bio_endio(bio); diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 0c1df1d5f1ac..3a9cc8a4a230 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -134,7 +134,7 @@ static void scm_request_done(struct scm_request *scmrq) if ((msb->flags & MSB_FLAG_IDA) && aidaw && IS_ALIGNED(aidaw, PAGE_SIZE)) - mempool_free(virt_to_page(aidaw), aidaw_pool); + mempool_free(virt_to_page((void *)aidaw), aidaw_pool); } spin_lock_irqsave(&list_lock, flags); diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index 3c87057436d5..8b4575a0db9f 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -392,10 +392,6 @@ static void __init add_memory_merged(u16 rn) goto skip_add; start = rn2addr(first_rn); size = (unsigned long long) num * sclp.rzm; - if (start >= VMEM_MAX_PHYS) - goto skip_add; - if (start + size > VMEM_MAX_PHYS) - size = VMEM_MAX_PHYS - start; if (start >= ident_map_size) goto skip_add; if (start + size > ident_map_size) diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index f480d6c7fd39..fdc8668f3fba 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -55,6 +55,7 @@ static void __init sclp_early_facilities_detect(void) S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST; if (sccb->cpuoff > 134) { sclp.has_diag318 = !!(sccb->byte_134 & 0x80); + sclp.has_diag320 = !!(sccb->byte_134 & 0x04); sclp.has_iplcc = !!(sccb->byte_134 & 0x02); } if (sccb->cpuoff > 137) { diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index 4cebfaaa22b4..eb0520a9d4af 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c @@ -89,7 +89,7 @@ static void vmcp_response_free(struct vmcp_session *session) order = get_order(session->bufsize); nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT; if (session->cma_alloc) { - page = virt_to_page((unsigned long)session->response); + page = virt_to_page(session->response); cma_release(vmcp_cma, page, nr_pages); session->cma_alloc = 0; } else { diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 942c73a11ca3..bc3be0330f1d 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -3,7 +3,7 @@ * zcore module to export memory content and register sets for creating system * dumps on SCSI/NVMe disks (zfcp/nvme dump). * - * For more information please refer to Documentation/s390/zfcpdump.rst + * For more information please refer to Documentation/arch/s390/zfcpdump.rst * * Copyright IBM Corp. 2003, 2008 * Author(s): Michael Holzheu diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile index 22d2db690cd3..0edacd101c12 100644 --- a/drivers/s390/crypto/Makefile +++ b/drivers/s390/crypto/Makefile @@ -11,7 +11,7 @@ zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o zcrypt-objs += zcrypt_ccamisc.o zcrypt_ep11misc.o obj-$(CONFIG_ZCRYPT) += zcrypt.o # adapter drivers depend on ap.o and zcrypt.o -obj-$(CONFIG_ZCRYPT) += zcrypt_cex2c.o zcrypt_cex2a.o zcrypt_cex4.o +obj-$(CONFIG_ZCRYPT) += zcrypt_cex4.o # pkey kernel module pkey-objs := pkey_api.o diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 420120be300f..339812efe822 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * Copyright IBM Corp. 2006, 2021 + * Copyright IBM Corp. 2006, 2023 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Ralph Wuerthner <rwuerthn@de.ibm.com> @@ -219,6 +219,15 @@ int ap_sb_available(void) } /* + * ap_is_se_guest(): Check for SE guest with AP pass-through support. + */ +bool ap_is_se_guest(void) +{ + return is_prot_virt_guest() && ap_sb_available(); +} +EXPORT_SYMBOL(ap_is_se_guest); + +/* * ap_fetch_qci_info(): Fetch cryptographic config info * * Returns the ap configuration info fetched via PQAP(QCI). @@ -387,23 +396,6 @@ static int ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac, *q_ml = tapq_info.ml; *q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED; *q_cstop = status.response_code == AP_RESPONSE_CHECKSTOPPED; - switch (*q_type) { - /* For CEX2 and CEX3 the available functions - * are not reflected by the facilities bits. - * Instead it is coded into the type. So here - * modify the function bits based on the type. - */ - case AP_DEVICE_TYPE_CEX2A: - case AP_DEVICE_TYPE_CEX3A: - *q_fac |= 0x08000000; - break; - case AP_DEVICE_TYPE_CEX2C: - case AP_DEVICE_TYPE_CEX3C: - *q_fac |= 0x10000000; - break; - default: - break; - } return 1; default: /* @@ -1678,8 +1670,8 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func) { int comp_type = 0; - /* < CEX2A is not supported */ - if (rawtype < AP_DEVICE_TYPE_CEX2A) { + /* < CEX4 is not supported */ + if (rawtype < AP_DEVICE_TYPE_CEX4) { AP_DBF_WARN("%s queue=%02x.%04x unsupported type %d\n", __func__, AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype); @@ -1701,7 +1693,7 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func) apinfo.cat = AP_DEVICE_TYPE_CEX8; status = ap_qact(qid, 0, &apinfo); if (status.response_code == AP_RESPONSE_NORMAL && - apinfo.cat >= AP_DEVICE_TYPE_CEX2A && + apinfo.cat >= AP_DEVICE_TYPE_CEX4 && apinfo.cat <= AP_DEVICE_TYPE_CEX8) comp_type = apinfo.cat; } diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 0d7b7eb374ad..be54b070c031 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0+ */ /* - * Copyright IBM Corp. 2006, 2019 + * Copyright IBM Corp. 2006, 2023 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Ralph Wuerthner <rwuerthn@de.ibm.com> @@ -67,15 +67,8 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr) #define AP_RESPONSE_INVALID_DOMAIN 0x42 /* - * Known device types + * Supported AP device types */ -#define AP_DEVICE_TYPE_PCICC 3 -#define AP_DEVICE_TYPE_PCICA 4 -#define AP_DEVICE_TYPE_PCIXCC 5 -#define AP_DEVICE_TYPE_CEX2A 6 -#define AP_DEVICE_TYPE_CEX2C 7 -#define AP_DEVICE_TYPE_CEX3A 8 -#define AP_DEVICE_TYPE_CEX3C 9 #define AP_DEVICE_TYPE_CEX4 10 #define AP_DEVICE_TYPE_CEX5 11 #define AP_DEVICE_TYPE_CEX6 12 @@ -272,14 +265,6 @@ static inline void ap_release_message(struct ap_message *ap_msg) kfree_sensitive(ap_msg->private); } -/* - * Note: don't use ap_send/ap_recv after using ap_queue_message - * for the first time. Otherwise the ap message queue will get - * confused. - */ -int ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen); -int ap_recv(ap_qid_t qid, unsigned long *psmid, void *msg, size_t msglen); - enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event); enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event); @@ -289,6 +274,7 @@ void ap_flush_queue(struct ap_queue *aq); void *ap_airq_ptr(void); int ap_sb_available(void); +bool ap_is_se_guest(void); void ap_wait(enum ap_sm_wait wait); void ap_request_timeout(struct timer_list *t); void ap_bus_force_rescan(void); diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 30df83735adf..1336e632adc4 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright IBM Corp. 2016 + * Copyright IBM Corp. 2016, 2023 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> * * Adjunct processor bus, queue related code. @@ -93,51 +93,6 @@ __ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen, return ap_nqap(qid, psmid, msg, msglen); } -int ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen) -{ - struct ap_queue_status status; - - status = __ap_send(qid, psmid, msg, msglen, 0); - if (status.async) - return -EPERM; - switch (status.response_code) { - case AP_RESPONSE_NORMAL: - return 0; - case AP_RESPONSE_Q_FULL: - case AP_RESPONSE_RESET_IN_PROGRESS: - return -EBUSY; - case AP_RESPONSE_REQ_FAC_NOT_INST: - return -EINVAL; - default: /* Device is gone. */ - return -ENODEV; - } -} -EXPORT_SYMBOL(ap_send); - -int ap_recv(ap_qid_t qid, unsigned long *psmid, void *msg, size_t msglen) -{ - struct ap_queue_status status; - - if (!msg) - return -EINVAL; - status = ap_dqap(qid, psmid, msg, msglen, NULL, NULL, NULL); - if (status.async) - return -EPERM; - switch (status.response_code) { - case AP_RESPONSE_NORMAL: - return 0; - case AP_RESPONSE_NO_PENDING_REPLY: - if (status.queue_empty) - return -ENOENT; - return -EBUSY; - case AP_RESPONSE_RESET_IN_PROGRESS: - return -EBUSY; - default: - return -ENODEV; - } -} -EXPORT_SYMBOL(ap_recv); - /* State machine definitions and helpers */ static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq) diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index e58bfd225323..6cfb6b2340c9 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -263,7 +263,9 @@ static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen, /* build a list of apqns suitable for ep11 keys with cpacf support */ rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX7, EP11_API_V, NULL); + ZCRYPT_CEX7, + ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4, + NULL); if (rc) goto out; @@ -272,7 +274,8 @@ static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen, card = apqns[i] >> 16; dom = apqns[i] & 0xFFFF; rc = ep11_clr2keyblob(card, dom, clrkeylen * 8, - 0, clrkey, keybuf, keybuflen); + 0, clrkey, keybuf, keybuflen, + PKEY_TYPE_EP11); if (rc == 0) break; } @@ -287,10 +290,9 @@ out: /* * Find card and transform EP11 secure key into protected key. */ -static int pkey_ep11key2pkey(const u8 *key, u8 *protkey, - u32 *protkeylen, u32 *protkeytype) +static int pkey_ep11key2pkey(const u8 *key, size_t keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype) { - struct ep11keyblob *kb = (struct ep11keyblob *)key; u32 nr_apqns, *apqns = NULL; u16 card, dom; int i, rc; @@ -299,7 +301,9 @@ static int pkey_ep11key2pkey(const u8 *key, u8 *protkey, /* build a list of apqns suitable for this key */ rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX7, EP11_API_V, kb->wkvp); + ZCRYPT_CEX7, + ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4, + ep11_kb_wkvp(key, keylen)); if (rc) goto out; @@ -307,7 +311,7 @@ static int pkey_ep11key2pkey(const u8 *key, u8 *protkey, for (rc = -ENODEV, i = 0; i < nr_apqns; i++) { card = apqns[i] >> 16; dom = apqns[i] & 0xFFFF; - rc = ep11_kblob2protkey(card, dom, key, kb->head.len, + rc = ep11_kblob2protkey(card, dom, key, keylen, protkey, protkeylen, protkeytype); if (rc == 0) break; @@ -495,7 +499,7 @@ try_via_ep11: tmpbuf, &tmpbuflen); if (rc) goto failure; - rc = pkey_ep11key2pkey(tmpbuf, + rc = pkey_ep11key2pkey(tmpbuf, tmpbuflen, protkey, protkeylen, protkeytype); if (!rc) goto out; @@ -611,7 +615,7 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen, rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1); if (rc) goto out; - rc = pkey_ep11key2pkey(key, + rc = pkey_ep11key2pkey(key, keylen, protkey, protkeylen, protkeytype); break; } @@ -620,7 +624,7 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen, rc = ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1); if (rc) goto out; - rc = pkey_ep11key2pkey(key + sizeof(struct ep11kblob_header), + rc = pkey_ep11key2pkey(key, keylen, protkey, protkeylen, protkeytype); break; default: @@ -713,6 +717,11 @@ static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns, if (*keybufsize < MINEP11AESKEYBLOBSIZE) return -EINVAL; break; + case PKEY_TYPE_EP11_AES: + if (*keybufsize < (sizeof(struct ep11kblob_header) + + MINEP11AESKEYBLOBSIZE)) + return -EINVAL; + break; default: return -EINVAL; } @@ -729,9 +738,10 @@ static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns, for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { card = apqns[i].card; dom = apqns[i].domain; - if (ktype == PKEY_TYPE_EP11) { + if (ktype == PKEY_TYPE_EP11 || + ktype == PKEY_TYPE_EP11_AES) { rc = ep11_genaeskey(card, dom, ksize, kflags, - keybuf, keybufsize); + keybuf, keybufsize, ktype); } else if (ktype == PKEY_TYPE_CCA_DATA) { rc = cca_genseckey(card, dom, ksize, keybuf); *keybufsize = (rc ? 0 : SECKEYBLOBSIZE); @@ -769,6 +779,11 @@ static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns, if (*keybufsize < MINEP11AESKEYBLOBSIZE) return -EINVAL; break; + case PKEY_TYPE_EP11_AES: + if (*keybufsize < (sizeof(struct ep11kblob_header) + + MINEP11AESKEYBLOBSIZE)) + return -EINVAL; + break; default: return -EINVAL; } @@ -787,9 +802,11 @@ static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns, for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { card = apqns[i].card; dom = apqns[i].domain; - if (ktype == PKEY_TYPE_EP11) { + if (ktype == PKEY_TYPE_EP11 || + ktype == PKEY_TYPE_EP11_AES) { rc = ep11_clr2keyblob(card, dom, ksize, kflags, - clrkey, keybuf, keybufsize); + clrkey, keybuf, keybufsize, + ktype); } else if (ktype == PKEY_TYPE_CCA_DATA) { rc = cca_clr2seckey(card, dom, ksize, clrkey, keybuf); @@ -888,6 +905,7 @@ static int pkey_verifykey2(const u8 *key, size_t keylen, } else if (hdr->type == TOKTYPE_NON_CCA && hdr->version == TOKVER_EP11_AES) { struct ep11keyblob *kb = (struct ep11keyblob *)key; + int api; rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1); if (rc) @@ -895,10 +913,12 @@ static int pkey_verifykey2(const u8 *key, size_t keylen, if (ktype) *ktype = PKEY_TYPE_EP11; if (ksize) - *ksize = kb->head.keybitlen; + *ksize = kb->head.bitlen; + api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, - ZCRYPT_CEX7, EP11_API_V, kb->wkvp); + ZCRYPT_CEX7, api, + ep11_kb_wkvp(key, keylen)); if (rc) goto out; @@ -908,6 +928,32 @@ static int pkey_verifykey2(const u8 *key, size_t keylen, *cardnr = ((struct pkey_apqn *)_apqns)->card; *domain = ((struct pkey_apqn *)_apqns)->domain; + } else if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_AES_WITH_HEADER) { + struct ep11kblob_header *kh = (struct ep11kblob_header *)key; + int api; + + rc = ep11_check_aes_key_with_hdr(debug_info, 3, + key, keylen, 1); + if (rc) + goto out; + if (ktype) + *ktype = PKEY_TYPE_EP11_AES; + if (ksize) + *ksize = kh->bitlen; + + api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; + rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, + ZCRYPT_CEX7, api, + ep11_kb_wkvp(key, keylen)); + if (rc) + goto out; + + if (flags) + *flags = PKEY_FLAGS_MATCH_CUR_MKVP; + + *cardnr = ((struct pkey_apqn *)_apqns)->card; + *domain = ((struct pkey_apqn *)_apqns)->domain; } else { rc = -EINVAL; } @@ -949,10 +995,12 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns, } } else if (hdr->type == TOKTYPE_NON_CCA) { if (hdr->version == TOKVER_EP11_AES) { - if (keylen < sizeof(struct ep11keyblob)) - return -EINVAL; if (ep11_check_aes_key(debug_info, 3, key, keylen, 1)) return -EINVAL; + } else if (hdr->version == TOKVER_EP11_AES_WITH_HEADER) { + if (ep11_check_aes_key_with_hdr(debug_info, 3, + key, keylen, 1)) + return -EINVAL; } else { return pkey_nonccatok2pkey(key, keylen, protkey, protkeylen, @@ -980,10 +1028,7 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns, protkey, protkeylen, protkeytype); } else { - /* EP11 AES secure key blob */ - struct ep11keyblob *kb = (struct ep11keyblob *)key; - - rc = ep11_kblob2protkey(card, dom, key, kb->head.len, + rc = ep11_kblob2protkey(card, dom, key, keylen, protkey, protkeylen, protkeytype); } @@ -1018,7 +1063,7 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, return -EINVAL; if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) { minhwtype = ZCRYPT_CEX7; - api = EP11_API_V; + api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; } rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, minhwtype, api, kb->wkvp); @@ -1034,7 +1079,7 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, return -EINVAL; if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) { minhwtype = ZCRYPT_CEX7; - api = EP11_API_V; + api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; } rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, minhwtype, api, kb->wkvp); @@ -1144,11 +1189,13 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype, ktype == PKEY_TYPE_EP11_AES || ktype == PKEY_TYPE_EP11_ECC) { u8 *wkvp = NULL; + int api; if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) wkvp = cur_mkvp; + api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX7, EP11_API_V, wkvp); + ZCRYPT_CEX7, api, wkvp); if (rc) goto out; @@ -1243,12 +1290,14 @@ static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns, hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) rc = ep11_kblob2protkey(card, dom, key, hdr->len, - protkey, protkeylen, protkeytype); + protkey, protkeylen, + protkeytype); else if (hdr->type == TOKTYPE_NON_CCA && hdr->version == TOKVER_EP11_AES && is_ep11_keyblob(key)) rc = ep11_kblob2protkey(card, dom, key, hdr->len, - protkey, protkeylen, protkeytype); + protkey, protkeylen, + protkeytype); else if (hdr->type == TOKTYPE_CCA_INTERNAL && hdr->version == TOKVER_CCA_AES) rc = cca_sec2protkey(card, dom, key, protkey, @@ -1466,7 +1515,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, apqns = _copy_apqns_from_user(kgs.apqns, kgs.apqn_entries); if (IS_ERR(apqns)) return PTR_ERR(apqns); - kkey = kmalloc(klen, GFP_KERNEL); + kkey = kzalloc(klen, GFP_KERNEL); if (!kkey) { kfree(apqns); return -ENOMEM; @@ -1508,7 +1557,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries); if (IS_ERR(apqns)) return PTR_ERR(apqns); - kkey = kmalloc(klen, GFP_KERNEL); + kkey = kzalloc(klen, GFP_KERNEL); if (!kkey) { kfree(apqns); return -ENOMEM; @@ -2102,7 +2151,7 @@ static struct attribute_group ccacipher_attr_group = { * (i.e. off != 0 or count < key blob size) -EINVAL is returned. * This function and the sysfs attributes using it provide EP11 key blobs * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently - * 320 bytes. + * 336 bytes. */ static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits, bool is_xts, char *buf, loff_t off, @@ -2120,7 +2169,9 @@ static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits, /* build a list of apqns able to generate an cipher key */ rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX7, EP11_API_V, NULL); + ZCRYPT_CEX7, + ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4, + NULL); if (rc) return rc; @@ -2130,7 +2181,8 @@ static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits, for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { card = apqns[i] >> 16; dom = apqns[i] & 0xFFFF; - rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize); + rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize, + PKEY_TYPE_EP11_AES); if (rc == 0) break; } @@ -2140,7 +2192,8 @@ static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits, if (is_xts) { keysize = MAXEP11AESKEYBLOBSIZE; buf += MAXEP11AESKEYBLOBSIZE; - rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize); + rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize, + PKEY_TYPE_EP11_AES); if (rc == 0) return 2 * MAXEP11AESKEYBLOBSIZE; } diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index b441745b0418..0509f80622cd 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -30,13 +30,12 @@ #define AP_QUEUE_UNASSIGNED "unassigned" #define AP_QUEUE_IN_USE "in use" -#define MAX_RESET_CHECK_WAIT 200 /* Sleep max 200ms for reset check */ #define AP_RESET_INTERVAL 20 /* Reset sleep interval (20ms) */ static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable); static struct vfio_ap_queue *vfio_ap_find_queue(int apqn); static const struct vfio_device_ops vfio_ap_matrix_dev_ops; -static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q); +static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q); /** * get_update_locks_for_kvm: Acquire the locks required to dynamically update a @@ -360,6 +359,28 @@ static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib) return 0; } +static int ensure_nib_shared(unsigned long addr, struct gmap *gmap) +{ + int ret; + + /* + * The nib has to be located in shared storage since guest and + * host access it. vfio_pin_pages() will do a pin shared and + * if that fails (possibly because it's not a shared page) it + * calls export. We try to do a second pin shared here so that + * the UV gives us an error code if we try to pin a non-shared + * page. + * + * If the page is already pinned shared the UV will return a success. + */ + ret = uv_pin_shared(addr); + if (ret) { + /* vfio_pin_pages() likely exported the page so let's re-import */ + gmap_convert_to_secure(gmap, addr); + } + return ret; +} + /** * vfio_ap_irq_enable - Enable Interruption for a APQN * @@ -423,6 +444,14 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q, h_nib = page_to_phys(h_page) | (nib & ~PAGE_MASK); aqic_gisa.gisc = isc; + /* NIB in non-shared storage is a rc 6 for PV guests */ + if (kvm_s390_pv_cpu_is_protected(vcpu) && + ensure_nib_shared(h_nib & PAGE_MASK, kvm->arch.gmap)) { + vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); + status.response_code = AP_RESPONSE_INVALID_ADDRESS; + return status; + } + nisc = kvm_s390_gisc_register(kvm, isc); if (nisc < 0) { VFIO_AP_DBF_WARN("%s: gisc registration failed: nisc=%d, isc=%d, apqn=%#04x\n", @@ -675,7 +704,7 @@ static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm, */ apqn = AP_MKQID(apid, apqi); q = vfio_ap_mdev_get_queue(matrix_mdev, apqn); - if (!q || q->reset_rc) { + if (!q || q->reset_status.response_code) { clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); break; @@ -1608,19 +1637,21 @@ static int apq_status_check(int apqn, struct ap_queue_status *status) { switch (status->response_code) { case AP_RESPONSE_NORMAL: + case AP_RESPONSE_DECONFIGURED: + return 0; case AP_RESPONSE_RESET_IN_PROGRESS: - if (status->queue_empty && !status->irq_enabled) - return 0; + case AP_RESPONSE_BUSY: return -EBUSY; - case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_ASSOC_SECRET_NOT_UNIQUE: + case AP_RESPONSE_ASSOC_FAILED: /* - * If the AP queue is deconfigured, any subsequent AP command - * targeting the queue will fail with the same response code. On the - * other hand, when an AP adapter is deconfigured, the associated - * queues are reset, so let's return a value indicating the reset - * for which we're waiting completed successfully. + * These asynchronous response codes indicate a PQAP(AAPQ) + * instruction to associate a secret with the guest failed. All + * subsequent AP instructions will end with the asynchronous + * response code until the AP queue is reset; so, let's return + * a value indicating a reset needs to be performed again. */ - return 0; + return -EAGAIN; default: WARN(true, "failed to verify reset of queue %02x.%04x: TAPQ rc=%u\n", @@ -1630,91 +1661,105 @@ static int apq_status_check(int apqn, struct ap_queue_status *status) } } -static int apq_reset_check(struct vfio_ap_queue *q) +#define WAIT_MSG "Waited %dms for reset of queue %02x.%04x (%u, %u, %u)" + +static void apq_reset_check(struct work_struct *reset_work) { - int ret; - int iters = MAX_RESET_CHECK_WAIT / AP_RESET_INTERVAL; + int ret = -EBUSY, elapsed = 0; struct ap_queue_status status; + struct vfio_ap_queue *q; - for (; iters > 0; iters--) { + q = container_of(reset_work, struct vfio_ap_queue, reset_work); + memcpy(&status, &q->reset_status, sizeof(status)); + while (true) { msleep(AP_RESET_INTERVAL); + elapsed += AP_RESET_INTERVAL; status = ap_tapq(q->apqn, NULL); ret = apq_status_check(q->apqn, &status); - if (ret != -EBUSY) - return ret; + if (ret == -EIO) + return; + if (ret == -EBUSY) { + pr_notice_ratelimited(WAIT_MSG, elapsed, + AP_QID_CARD(q->apqn), + AP_QID_QUEUE(q->apqn), + status.response_code, + status.queue_empty, + status.irq_enabled); + } else { + if (q->reset_status.response_code == AP_RESPONSE_RESET_IN_PROGRESS || + q->reset_status.response_code == AP_RESPONSE_BUSY || + q->reset_status.response_code == AP_RESPONSE_STATE_CHANGE_IN_PROGRESS || + ret == -EAGAIN) { + status = ap_zapq(q->apqn, 0); + memcpy(&q->reset_status, &status, sizeof(status)); + continue; + } + /* + * When an AP adapter is deconfigured, the + * associated queues are reset, so let's set the + * status response code to 0 so the queue may be + * passed through (i.e., not filtered) + */ + if (status.response_code == AP_RESPONSE_DECONFIGURED) + q->reset_status.response_code = 0; + if (q->saved_isc != VFIO_AP_ISC_INVALID) + vfio_ap_free_aqic_resources(q); + break; + } } - WARN_ONCE(iters <= 0, - "timeout verifying reset of queue %02x.%04x (%u, %u, %u)", - AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn), - status.queue_empty, status.irq_enabled, status.response_code); - return ret; } -static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q) +static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q) { struct ap_queue_status status; - int ret; if (!q) - return 0; -retry_zapq: + return; status = ap_zapq(q->apqn, 0); - q->reset_rc = status.response_code; + memcpy(&q->reset_status, &status, sizeof(status)); switch (status.response_code) { case AP_RESPONSE_NORMAL: - ret = 0; - /* if the reset has not completed, wait for it to take effect */ - if (!status.queue_empty || status.irq_enabled) - ret = apq_reset_check(q); - break; case AP_RESPONSE_RESET_IN_PROGRESS: + case AP_RESPONSE_BUSY: + case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS: /* - * There is a reset issued by another process in progress. Let's wait - * for that to complete. Since we have no idea whether it was a RAPQ or - * ZAPQ, then if it completes successfully, let's issue the ZAPQ. + * Let's verify whether the ZAPQ completed successfully on a work queue. */ - ret = apq_reset_check(q); - if (ret) - break; - goto retry_zapq; + queue_work(system_long_wq, &q->reset_work); + break; case AP_RESPONSE_DECONFIGURED: /* * When an AP adapter is deconfigured, the associated - * queues are reset, so let's return a value indicating the reset - * completed successfully. + * queues are reset, so let's set the status response code to 0 + * so the queue may be passed through (i.e., not filtered). */ - ret = 0; + q->reset_status.response_code = 0; + vfio_ap_free_aqic_resources(q); break; default: WARN(true, "PQAP/ZAPQ for %02x.%04x failed with invalid rc=%u\n", AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn), status.response_code); - return -EIO; } - - vfio_ap_free_aqic_resources(q); - - return ret; } static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable) { - int ret, loop_cursor, rc = 0; + int ret = 0, loop_cursor; struct vfio_ap_queue *q; + hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) + vfio_ap_mdev_reset_queue(q); + hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) { - ret = vfio_ap_mdev_reset_queue(q); - /* - * Regardless whether a queue turns out to be busy, or - * is not operational, we need to continue resetting - * the remaining queues. - */ - if (ret) - rc = ret; + flush_work(&q->reset_work); + + if (q->reset_status.response_code) + ret = -EIO; } - return rc; + return ret; } static int vfio_ap_mdev_open_device(struct vfio_device *vdev) @@ -2038,6 +2083,8 @@ int vfio_ap_mdev_probe_queue(struct ap_device *apdev) q->apqn = to_ap_queue(&apdev->device)->qid; q->saved_isc = VFIO_AP_ISC_INVALID; + memset(&q->reset_status, 0, sizeof(q->reset_status)); + INIT_WORK(&q->reset_work, apq_reset_check); matrix_mdev = get_update_locks_by_apqn(q->apqn); if (matrix_mdev) { @@ -2087,6 +2134,7 @@ void vfio_ap_mdev_remove_queue(struct ap_device *apdev) } vfio_ap_mdev_reset_queue(q); + flush_work(&q->reset_work); dev_set_drvdata(&apdev->device, NULL); kfree(q); release_update_locks_for_mdev(matrix_mdev); diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h index 4642bbdbd1b2..88aff8b81f2f 100644 --- a/drivers/s390/crypto/vfio_ap_private.h +++ b/drivers/s390/crypto/vfio_ap_private.h @@ -133,7 +133,8 @@ struct ap_matrix_mdev { * @apqn: the APQN of the AP queue device * @saved_isc: the guest ISC registered with the GIB interface * @mdev_qnode: allows the vfio_ap_queue struct to be added to a hashtable - * @reset_rc: the status response code from the last reset of the queue + * @reset_status: the status from the last reset of the queue + * @reset_work: work to wait for queue reset to complete */ struct vfio_ap_queue { struct ap_matrix_mdev *matrix_mdev; @@ -142,7 +143,8 @@ struct vfio_ap_queue { #define VFIO_AP_ISC_INVALID 0xff unsigned char saved_isc; struct hlist_node mdev_qnode; - unsigned int reset_rc; + struct ap_queue_status reset_status; + struct work_struct reset_work; }; int vfio_ap_mdev_register(void); diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c index 83f692c9c197..e69de29bb2d1 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.c +++ b/drivers/s390/crypto/zcrypt_cex2a.c @@ -1,227 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Copyright IBM Corp. 2001, 2012 - * Author(s): Robert Burroughs - * Eric Rossman (edrossma@us.ibm.com) - * - * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) - * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> - * Ralph Wuerthner <rwuerthn@de.ibm.com> - * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> - */ - -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/init.h> -#include <linux/err.h> -#include <linux/atomic.h> -#include <linux/uaccess.h> -#include <linux/mod_devicetable.h> - -#include "ap_bus.h" -#include "zcrypt_api.h" -#include "zcrypt_error.h" -#include "zcrypt_cex2a.h" -#include "zcrypt_msgtype50.h" - -#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */ -#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */ -#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE -#define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */ - -#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */ -#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */ - -#define CEX3A_MAX_RESPONSE_SIZE 0x210 /* 512 bit modulus - * (max outputdatalength) + - * type80_hdr - */ -#define CEX3A_MAX_MESSAGE_SIZE sizeof(struct type50_crb3_msg) - -#define CEX2A_CLEANUP_TIME (15 * HZ) -#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME - -MODULE_AUTHOR("IBM Corporation"); -MODULE_DESCRIPTION("CEX2A/CEX3A Cryptographic Coprocessor device driver, " \ - "Copyright IBM Corp. 2001, 2018"); -MODULE_LICENSE("GPL"); - -static struct ap_device_id zcrypt_cex2a_card_ids[] = { - { .dev_type = AP_DEVICE_TYPE_CEX2A, - .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, - { .dev_type = AP_DEVICE_TYPE_CEX3A, - .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, - { /* end of list */ }, -}; - -MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_card_ids); - -static struct ap_device_id zcrypt_cex2a_queue_ids[] = { - { .dev_type = AP_DEVICE_TYPE_CEX2A, - .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, - { .dev_type = AP_DEVICE_TYPE_CEX3A, - .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, - { /* end of list */ }, -}; - -MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_queue_ids); - -/* - * Probe function for CEX2A card devices. It always accepts the AP device - * since the bus_match already checked the card type. - * @ap_dev: pointer to the AP device. - */ -static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev) -{ - /* - * Normalized speed ratings per crypto adapter - * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY - */ - static const int CEX2A_SPEED_IDX[] = { - 800, 1000, 2000, 900, 1200, 2400, 0, 0}; - static const int CEX3A_SPEED_IDX[] = { - 400, 500, 1000, 450, 550, 1200, 0, 0}; - - struct ap_card *ac = to_ap_card(&ap_dev->device); - struct zcrypt_card *zc; - int rc = 0; - - zc = zcrypt_card_alloc(); - if (!zc) - return -ENOMEM; - zc->card = ac; - dev_set_drvdata(&ap_dev->device, zc); - - if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) { - zc->min_mod_size = CEX2A_MIN_MOD_SIZE; - zc->max_mod_size = CEX2A_MAX_MOD_SIZE; - zc->speed_rating = CEX2A_SPEED_IDX; - zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; - zc->type_string = "CEX2A"; - zc->user_space_type = ZCRYPT_CEX2A; - } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX3A) { - zc->min_mod_size = CEX2A_MIN_MOD_SIZE; - zc->max_mod_size = CEX2A_MAX_MOD_SIZE; - zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; - if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) && - ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) { - zc->max_mod_size = CEX3A_MAX_MOD_SIZE; - zc->max_exp_bit_length = CEX3A_MAX_MOD_SIZE; - } - zc->speed_rating = CEX3A_SPEED_IDX; - zc->type_string = "CEX3A"; - zc->user_space_type = ZCRYPT_CEX3A; - } else { - zcrypt_card_free(zc); - return -ENODEV; - } - zc->online = 1; - - rc = zcrypt_card_register(zc); - if (rc) - zcrypt_card_free(zc); - - return rc; -} - -/* - * This is called to remove the CEX2A card driver information - * if an AP card device is removed. - */ -static void zcrypt_cex2a_card_remove(struct ap_device *ap_dev) -{ - struct zcrypt_card *zc = dev_get_drvdata(&ap_dev->device); - - zcrypt_card_unregister(zc); -} - -static struct ap_driver zcrypt_cex2a_card_driver = { - .probe = zcrypt_cex2a_card_probe, - .remove = zcrypt_cex2a_card_remove, - .ids = zcrypt_cex2a_card_ids, - .flags = AP_DRIVER_FLAG_DEFAULT, -}; - -/* - * Probe function for CEX2A queue devices. It always accepts the AP device - * since the bus_match already checked the queue type. - * @ap_dev: pointer to the AP device. - */ -static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev) -{ - struct ap_queue *aq = to_ap_queue(&ap_dev->device); - struct zcrypt_queue *zq = NULL; - int rc; - - switch (ap_dev->device_type) { - case AP_DEVICE_TYPE_CEX2A: - zq = zcrypt_queue_alloc(CEX2A_MAX_RESPONSE_SIZE); - if (!zq) - return -ENOMEM; - break; - case AP_DEVICE_TYPE_CEX3A: - zq = zcrypt_queue_alloc(CEX3A_MAX_RESPONSE_SIZE); - if (!zq) - return -ENOMEM; - break; - } - if (!zq) - return -ENODEV; - zq->ops = zcrypt_msgtype(MSGTYPE50_NAME, MSGTYPE50_VARIANT_DEFAULT); - zq->queue = aq; - zq->online = 1; - atomic_set(&zq->load, 0); - ap_queue_init_state(aq); - ap_queue_init_reply(aq, &zq->reply); - aq->request_timeout = CEX2A_CLEANUP_TIME; - dev_set_drvdata(&ap_dev->device, zq); - rc = zcrypt_queue_register(zq); - if (rc) - zcrypt_queue_free(zq); - - return rc; -} - -/* - * This is called to remove the CEX2A queue driver information - * if an AP queue device is removed. - */ -static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev) -{ - struct zcrypt_queue *zq = dev_get_drvdata(&ap_dev->device); - - zcrypt_queue_unregister(zq); -} - -static struct ap_driver zcrypt_cex2a_queue_driver = { - .probe = zcrypt_cex2a_queue_probe, - .remove = zcrypt_cex2a_queue_remove, - .ids = zcrypt_cex2a_queue_ids, - .flags = AP_DRIVER_FLAG_DEFAULT, -}; - -int __init zcrypt_cex2a_init(void) -{ - int rc; - - rc = ap_driver_register(&zcrypt_cex2a_card_driver, - THIS_MODULE, "cex2acard"); - if (rc) - return rc; - - rc = ap_driver_register(&zcrypt_cex2a_queue_driver, - THIS_MODULE, "cex2aqueue"); - if (rc) - ap_driver_unregister(&zcrypt_cex2a_card_driver); - - return rc; -} - -void __exit zcrypt_cex2a_exit(void) -{ - ap_driver_unregister(&zcrypt_cex2a_queue_driver); - ap_driver_unregister(&zcrypt_cex2a_card_driver); -} - -module_init(zcrypt_cex2a_init); -module_exit(zcrypt_cex2a_exit); diff --git a/drivers/s390/crypto/zcrypt_cex2a.h b/drivers/s390/crypto/zcrypt_cex2a.h index 7842214d9d09..e69de29bb2d1 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.h +++ b/drivers/s390/crypto/zcrypt_cex2a.h @@ -1,134 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* - * Copyright IBM Corp. 2001, 2006 - * Author(s): Robert Burroughs - * Eric Rossman (edrossma@us.ibm.com) - * - * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) - * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> - */ - -#ifndef _ZCRYPT_CEX2A_H_ -#define _ZCRYPT_CEX2A_H_ - -/** - * The type 50 message family is associated with CEXxA cards. - * - * The four members of the family are described below. - * - * Note that all unsigned char arrays are right-justified and left-padded - * with zeroes. - * - * Note that all reserved fields must be zeroes. - */ -struct type50_hdr { - unsigned char reserved1; - unsigned char msg_type_code; /* 0x50 */ - unsigned short msg_len; - unsigned char reserved2; - unsigned char ignored; - unsigned short reserved3; -} __packed; - -#define TYPE50_TYPE_CODE 0x50 - -#define TYPE50_MEB1_FMT 0x0001 -#define TYPE50_MEB2_FMT 0x0002 -#define TYPE50_MEB3_FMT 0x0003 -#define TYPE50_CRB1_FMT 0x0011 -#define TYPE50_CRB2_FMT 0x0012 -#define TYPE50_CRB3_FMT 0x0013 - -/* Mod-Exp, with a small modulus */ -struct type50_meb1_msg { - struct type50_hdr header; - unsigned short keyblock_type; /* 0x0001 */ - unsigned char reserved[6]; - unsigned char exponent[128]; - unsigned char modulus[128]; - unsigned char message[128]; -} __packed; - -/* Mod-Exp, with a large modulus */ -struct type50_meb2_msg { - struct type50_hdr header; - unsigned short keyblock_type; /* 0x0002 */ - unsigned char reserved[6]; - unsigned char exponent[256]; - unsigned char modulus[256]; - unsigned char message[256]; -} __packed; - -/* Mod-Exp, with a larger modulus */ -struct type50_meb3_msg { - struct type50_hdr header; - unsigned short keyblock_type; /* 0x0003 */ - unsigned char reserved[6]; - unsigned char exponent[512]; - unsigned char modulus[512]; - unsigned char message[512]; -} __packed; - -/* CRT, with a small modulus */ -struct type50_crb1_msg { - struct type50_hdr header; - unsigned short keyblock_type; /* 0x0011 */ - unsigned char reserved[6]; - unsigned char p[64]; - unsigned char q[64]; - unsigned char dp[64]; - unsigned char dq[64]; - unsigned char u[64]; - unsigned char message[128]; -} __packed; - -/* CRT, with a large modulus */ -struct type50_crb2_msg { - struct type50_hdr header; - unsigned short keyblock_type; /* 0x0012 */ - unsigned char reserved[6]; - unsigned char p[128]; - unsigned char q[128]; - unsigned char dp[128]; - unsigned char dq[128]; - unsigned char u[128]; - unsigned char message[256]; -} __packed; - -/* CRT, with a larger modulus */ -struct type50_crb3_msg { - struct type50_hdr header; - unsigned short keyblock_type; /* 0x0013 */ - unsigned char reserved[6]; - unsigned char p[256]; - unsigned char q[256]; - unsigned char dp[256]; - unsigned char dq[256]; - unsigned char u[256]; - unsigned char message[512]; -} __packed; - -/** - * The type 80 response family is associated with a CEXxA cards. - * - * Note that all unsigned char arrays are right-justified and left-padded - * with zeroes. - * - * Note that all reserved fields must be zeroes. - */ - -#define TYPE80_RSP_CODE 0x80 - -struct type80_hdr { - unsigned char reserved1; - unsigned char type; /* 0x80 */ - unsigned short len; - unsigned char code; /* 0x00 */ - unsigned char reserved2[3]; - unsigned char reserved3[8]; -} __packed; - -int zcrypt_cex2a_init(void); -void zcrypt_cex2a_exit(void); - -#endif /* _ZCRYPT_CEX2A_H_ */ diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c index 251b5bd3d19c..e69de29bb2d1 100644 --- a/drivers/s390/crypto/zcrypt_cex2c.c +++ b/drivers/s390/crypto/zcrypt_cex2c.c @@ -1,421 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Copyright IBM Corp. 2001, 2018 - * Author(s): Robert Burroughs - * Eric Rossman (edrossma@us.ibm.com) - * - * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) - * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> - * Ralph Wuerthner <rwuerthn@de.ibm.com> - * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/err.h> -#include <linux/delay.h> -#include <linux/slab.h> -#include <linux/atomic.h> -#include <linux/uaccess.h> -#include <linux/mod_devicetable.h> - -#include "ap_bus.h" -#include "zcrypt_api.h" -#include "zcrypt_error.h" -#include "zcrypt_msgtype6.h" -#include "zcrypt_cex2c.h" -#include "zcrypt_cca_key.h" -#include "zcrypt_ccamisc.h" - -#define CEX2C_MIN_MOD_SIZE 16 /* 128 bits */ -#define CEX2C_MAX_MOD_SIZE 256 /* 2048 bits */ -#define CEX3C_MIN_MOD_SIZE 16 /* 128 bits */ -#define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */ -#define CEX2C_MAX_XCRB_MESSAGE_SIZE (12 * 1024) -#define CEX2C_CLEANUP_TIME (15 * HZ) - -MODULE_AUTHOR("IBM Corporation"); -MODULE_DESCRIPTION("CEX2C/CEX3C Cryptographic Coprocessor device driver, " \ - "Copyright IBM Corp. 2001, 2018"); -MODULE_LICENSE("GPL"); - -static struct ap_device_id zcrypt_cex2c_card_ids[] = { - { .dev_type = AP_DEVICE_TYPE_CEX2C, - .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, - { .dev_type = AP_DEVICE_TYPE_CEX3C, - .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, - { /* end of list */ }, -}; - -MODULE_DEVICE_TABLE(ap, zcrypt_cex2c_card_ids); - -static struct ap_device_id zcrypt_cex2c_queue_ids[] = { - { .dev_type = AP_DEVICE_TYPE_CEX2C, - .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, - { .dev_type = AP_DEVICE_TYPE_CEX3C, - .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, - { /* end of list */ }, -}; - -MODULE_DEVICE_TABLE(ap, zcrypt_cex2c_queue_ids); - -/* - * CCA card additional device attributes - */ -static ssize_t cca_serialnr_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct zcrypt_card *zc = dev_get_drvdata(dev); - struct cca_info ci; - struct ap_card *ac = to_ap_card(dev); - - memset(&ci, 0, sizeof(ci)); - - if (ap_domain_index >= 0) - cca_get_info(ac->id, ap_domain_index, &ci, zc->online); - - return sysfs_emit(buf, "%s\n", ci.serial); -} - -static struct device_attribute dev_attr_cca_serialnr = - __ATTR(serialnr, 0444, cca_serialnr_show, NULL); - -static struct attribute *cca_card_attrs[] = { - &dev_attr_cca_serialnr.attr, - NULL, -}; - -static const struct attribute_group cca_card_attr_grp = { - .attrs = cca_card_attrs, -}; - - /* - * CCA queue additional device attributes - */ -static ssize_t cca_mkvps_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct zcrypt_queue *zq = dev_get_drvdata(dev); - int n = 0; - struct cca_info ci; - static const char * const cao_state[] = { "invalid", "valid" }; - static const char * const new_state[] = { "empty", "partial", "full" }; - - memset(&ci, 0, sizeof(ci)); - - cca_get_info(AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid), - &ci, zq->online); - - if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3') - n = sysfs_emit(buf, "AES NEW: %s 0x%016llx\n", - new_state[ci.new_aes_mk_state - '1'], - ci.new_aes_mkvp); - else - n = sysfs_emit(buf, "AES NEW: - -\n"); - - if (ci.cur_aes_mk_state >= '1' && ci.cur_aes_mk_state <= '2') - n += sysfs_emit_at(buf, n, "AES CUR: %s 0x%016llx\n", - cao_state[ci.cur_aes_mk_state - '1'], - ci.cur_aes_mkvp); - else - n += sysfs_emit_at(buf, n, "AES CUR: - -\n"); - - if (ci.old_aes_mk_state >= '1' && ci.old_aes_mk_state <= '2') - n += sysfs_emit_at(buf, n, "AES OLD: %s 0x%016llx\n", - cao_state[ci.old_aes_mk_state - '1'], - ci.old_aes_mkvp); - else - n += sysfs_emit_at(buf, n, "AES OLD: - -\n"); - - if (ci.new_apka_mk_state >= '1' && ci.new_apka_mk_state <= '3') - n += sysfs_emit_at(buf, n, "APKA NEW: %s 0x%016llx\n", - new_state[ci.new_apka_mk_state - '1'], - ci.new_apka_mkvp); - else - n += sysfs_emit_at(buf, n, "APKA NEW: - -\n"); - - if (ci.cur_apka_mk_state >= '1' && ci.cur_apka_mk_state <= '2') - n += sysfs_emit_at(buf, n, "APKA CUR: %s 0x%016llx\n", - cao_state[ci.cur_apka_mk_state - '1'], - ci.cur_apka_mkvp); - else - n += sysfs_emit_at(buf, n, "APKA CUR: - -\n"); - - if (ci.old_apka_mk_state >= '1' && ci.old_apka_mk_state <= '2') - n += sysfs_emit_at(buf, n, "APKA OLD: %s 0x%016llx\n", - cao_state[ci.old_apka_mk_state - '1'], - ci.old_apka_mkvp); - else - n += sysfs_emit_at(buf, n, "APKA OLD: - -\n"); - - return n; -} - -static struct device_attribute dev_attr_cca_mkvps = - __ATTR(mkvps, 0444, cca_mkvps_show, NULL); - -static struct attribute *cca_queue_attrs[] = { - &dev_attr_cca_mkvps.attr, - NULL, -}; - -static const struct attribute_group cca_queue_attr_grp = { - .attrs = cca_queue_attrs, -}; - -/* - * Large random number detection function. Its sends a message to a CEX2C/CEX3C - * card to find out if large random numbers are supported. - * @ap_dev: pointer to the AP device. - * - * Returns 1 if large random numbers are supported, 0 if not and < 0 on error. - */ -static int zcrypt_cex2c_rng_supported(struct ap_queue *aq) -{ - struct ap_message ap_msg; - unsigned long psmid; - unsigned int domain; - struct { - struct type86_hdr hdr; - struct type86_fmt2_ext fmt2; - struct CPRBX cprbx; - } __packed *reply; - struct { - struct type6_hdr hdr; - struct CPRBX cprbx; - char function_code[2]; - short int rule_length; - char rule[8]; - short int verb_length; - short int key_length; - } __packed *msg; - int rc, i; - - ap_init_message(&ap_msg); - ap_msg.msg = (void *)get_zeroed_page(GFP_KERNEL); - if (!ap_msg.msg) - return -ENOMEM; - ap_msg.bufsize = PAGE_SIZE; - - rng_type6cprb_msgx(&ap_msg, 4, &domain); - - msg = ap_msg.msg; - msg->cprbx.domain = AP_QID_QUEUE(aq->qid); - - rc = ap_send(aq->qid, 0x0102030405060708UL, ap_msg.msg, ap_msg.len); - if (rc) - goto out_free; - - /* Wait for the test message to complete. */ - for (i = 0; i < 2 * HZ; i++) { - msleep(1000 / HZ); - rc = ap_recv(aq->qid, &psmid, ap_msg.msg, ap_msg.bufsize); - if (rc == 0 && psmid == 0x0102030405060708UL) - break; - } - - if (i >= 2 * HZ) { - /* Got no answer. */ - rc = -ENODEV; - goto out_free; - } - - reply = ap_msg.msg; - if (reply->cprbx.ccp_rtcode == 0 && reply->cprbx.ccp_rscode == 0) - rc = 1; - else - rc = 0; -out_free: - free_page((unsigned long)ap_msg.msg); - return rc; -} - -/* - * Probe function for CEX2C/CEX3C card devices. It always accepts the - * AP device since the bus_match already checked the hardware type. - * @ap_dev: pointer to the AP card device. - */ -static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev) -{ - /* - * Normalized speed ratings per crypto adapter - * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY - */ - static const int CEX2C_SPEED_IDX[] = { - 1000, 1400, 2400, 1100, 1500, 2600, 100, 12}; - static const int CEX3C_SPEED_IDX[] = { - 500, 700, 1400, 550, 800, 1500, 80, 10}; - - struct ap_card *ac = to_ap_card(&ap_dev->device); - struct zcrypt_card *zc; - int rc = 0; - - zc = zcrypt_card_alloc(); - if (!zc) - return -ENOMEM; - zc->card = ac; - dev_set_drvdata(&ap_dev->device, zc); - switch (ac->ap_dev.device_type) { - case AP_DEVICE_TYPE_CEX2C: - zc->user_space_type = ZCRYPT_CEX2C; - zc->type_string = "CEX2C"; - zc->speed_rating = CEX2C_SPEED_IDX; - zc->min_mod_size = CEX2C_MIN_MOD_SIZE; - zc->max_mod_size = CEX2C_MAX_MOD_SIZE; - zc->max_exp_bit_length = CEX2C_MAX_MOD_SIZE; - break; - case AP_DEVICE_TYPE_CEX3C: - zc->user_space_type = ZCRYPT_CEX3C; - zc->type_string = "CEX3C"; - zc->speed_rating = CEX3C_SPEED_IDX; - zc->min_mod_size = CEX3C_MIN_MOD_SIZE; - zc->max_mod_size = CEX3C_MAX_MOD_SIZE; - zc->max_exp_bit_length = CEX3C_MAX_MOD_SIZE; - break; - default: - zcrypt_card_free(zc); - return -ENODEV; - } - zc->online = 1; - - rc = zcrypt_card_register(zc); - if (rc) { - zcrypt_card_free(zc); - return rc; - } - - if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) { - rc = sysfs_create_group(&ap_dev->device.kobj, - &cca_card_attr_grp); - if (rc) { - zcrypt_card_unregister(zc); - zcrypt_card_free(zc); - } - } - - return rc; -} - -/* - * This is called to remove the CEX2C/CEX3C card driver information - * if an AP card device is removed. - */ -static void zcrypt_cex2c_card_remove(struct ap_device *ap_dev) -{ - struct zcrypt_card *zc = dev_get_drvdata(&ap_dev->device); - struct ap_card *ac = to_ap_card(&ap_dev->device); - - if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) - sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp); - - zcrypt_card_unregister(zc); -} - -static struct ap_driver zcrypt_cex2c_card_driver = { - .probe = zcrypt_cex2c_card_probe, - .remove = zcrypt_cex2c_card_remove, - .ids = zcrypt_cex2c_card_ids, - .flags = AP_DRIVER_FLAG_DEFAULT, -}; - -/* - * Probe function for CEX2C/CEX3C queue devices. It always accepts the - * AP device since the bus_match already checked the hardware type. - * @ap_dev: pointer to the AP card device. - */ -static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev) -{ - struct ap_queue *aq = to_ap_queue(&ap_dev->device); - struct zcrypt_queue *zq; - int rc; - - zq = zcrypt_queue_alloc(CEX2C_MAX_XCRB_MESSAGE_SIZE); - if (!zq) - return -ENOMEM; - zq->queue = aq; - zq->online = 1; - atomic_set(&zq->load, 0); - ap_rapq(aq->qid, 0); - rc = zcrypt_cex2c_rng_supported(aq); - if (rc < 0) { - zcrypt_queue_free(zq); - return rc; - } - if (rc) - zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, - MSGTYPE06_VARIANT_DEFAULT); - else - zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, - MSGTYPE06_VARIANT_NORNG); - ap_queue_init_state(aq); - ap_queue_init_reply(aq, &zq->reply); - aq->request_timeout = CEX2C_CLEANUP_TIME; - dev_set_drvdata(&ap_dev->device, zq); - rc = zcrypt_queue_register(zq); - if (rc) { - zcrypt_queue_free(zq); - return rc; - } - - if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) { - rc = sysfs_create_group(&ap_dev->device.kobj, - &cca_queue_attr_grp); - if (rc) { - zcrypt_queue_unregister(zq); - zcrypt_queue_free(zq); - } - } - - return rc; -} - -/* - * This is called to remove the CEX2C/CEX3C queue driver information - * if an AP queue device is removed. - */ -static void zcrypt_cex2c_queue_remove(struct ap_device *ap_dev) -{ - struct zcrypt_queue *zq = dev_get_drvdata(&ap_dev->device); - struct ap_queue *aq = to_ap_queue(&ap_dev->device); - - if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) - sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp); - - zcrypt_queue_unregister(zq); -} - -static struct ap_driver zcrypt_cex2c_queue_driver = { - .probe = zcrypt_cex2c_queue_probe, - .remove = zcrypt_cex2c_queue_remove, - .ids = zcrypt_cex2c_queue_ids, - .flags = AP_DRIVER_FLAG_DEFAULT, -}; - -int __init zcrypt_cex2c_init(void) -{ - int rc; - - rc = ap_driver_register(&zcrypt_cex2c_card_driver, - THIS_MODULE, "cex2card"); - if (rc) - return rc; - - rc = ap_driver_register(&zcrypt_cex2c_queue_driver, - THIS_MODULE, "cex2cqueue"); - if (rc) - ap_driver_unregister(&zcrypt_cex2c_card_driver); - - return rc; -} - -void zcrypt_cex2c_exit(void) -{ - ap_driver_unregister(&zcrypt_cex2c_queue_driver); - ap_driver_unregister(&zcrypt_cex2c_card_driver); -} - -module_init(zcrypt_cex2c_init); -module_exit(zcrypt_cex2c_exit); diff --git a/drivers/s390/crypto/zcrypt_cex2c.h b/drivers/s390/crypto/zcrypt_cex2c.h index 6ec405c2bec2..e69de29bb2d1 100644 --- a/drivers/s390/crypto/zcrypt_cex2c.h +++ b/drivers/s390/crypto/zcrypt_cex2c.h @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* - * Copyright IBM Corp. 2001, 2018 - * Author(s): Robert Burroughs - * Eric Rossman (edrossma@us.ibm.com) - * - * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) - * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> - * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> - */ - -#ifndef _ZCRYPT_CEX2C_H_ -#define _ZCRYPT_CEX2C_H_ - -int zcrypt_cex2c_init(void); -void zcrypt_cex2c_exit(void); - -#endif /* _ZCRYPT_CEX2C_H_ */ diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c index 958f5ee47f1b..0a877f9792c2 100644 --- a/drivers/s390/crypto/zcrypt_ep11misc.c +++ b/drivers/s390/crypto/zcrypt_ep11misc.c @@ -29,6 +29,8 @@ #define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__) #define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__) +#define EP11_PINBLOB_V1_BYTES 56 + /* default iv used here */ static const u8 def_iv[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff }; @@ -113,6 +115,109 @@ static void __exit card_cache_free(void) spin_unlock_bh(&card_list_lock); } +static int ep11_kb_split(const u8 *kb, size_t kblen, u32 kbver, + struct ep11kblob_header **kbhdr, size_t *kbhdrsize, + u8 **kbpl, size_t *kbplsize) +{ + struct ep11kblob_header *hdr = NULL; + size_t hdrsize, plsize = 0; + int rc = -EINVAL; + u8 *pl = NULL; + + if (kblen < sizeof(struct ep11kblob_header)) + goto out; + hdr = (struct ep11kblob_header *)kb; + + switch (kbver) { + case TOKVER_EP11_AES: + /* header overlays the payload */ + hdrsize = 0; + break; + case TOKVER_EP11_ECC_WITH_HEADER: + case TOKVER_EP11_AES_WITH_HEADER: + /* payload starts after the header */ + hdrsize = sizeof(struct ep11kblob_header); + break; + default: + goto out; + } + + plsize = kblen - hdrsize; + pl = (u8 *)kb + hdrsize; + + if (kbhdr) + *kbhdr = hdr; + if (kbhdrsize) + *kbhdrsize = hdrsize; + if (kbpl) + *kbpl = pl; + if (kbplsize) + *kbplsize = plsize; + + rc = 0; +out: + return rc; +} + +static int ep11_kb_decode(const u8 *kb, size_t kblen, + struct ep11kblob_header **kbhdr, size_t *kbhdrsize, + struct ep11keyblob **kbpl, size_t *kbplsize) +{ + struct ep11kblob_header *tmph, *hdr = NULL; + size_t hdrsize = 0, plsize = 0; + struct ep11keyblob *pl = NULL; + int rc = -EINVAL; + u8 *tmpp; + + if (kblen < sizeof(struct ep11kblob_header)) + goto out; + tmph = (struct ep11kblob_header *)kb; + + if (tmph->type != TOKTYPE_NON_CCA && + tmph->len > kblen) + goto out; + + if (ep11_kb_split(kb, kblen, tmph->version, + &hdr, &hdrsize, &tmpp, &plsize)) + goto out; + + if (plsize < sizeof(struct ep11keyblob)) + goto out; + + if (!is_ep11_keyblob(tmpp)) + goto out; + + pl = (struct ep11keyblob *)tmpp; + plsize = hdr->len - hdrsize; + + if (kbhdr) + *kbhdr = hdr; + if (kbhdrsize) + *kbhdrsize = hdrsize; + if (kbpl) + *kbpl = pl; + if (kbplsize) + *kbplsize = plsize; + + rc = 0; +out: + return rc; +} + +/* + * For valid ep11 keyblobs, returns a reference to the wrappingkey verification + * pattern. Otherwise NULL. + */ +const u8 *ep11_kb_wkvp(const u8 *keyblob, size_t keybloblen) +{ + struct ep11keyblob *kb; + + if (ep11_kb_decode(keyblob, keybloblen, NULL, NULL, &kb, NULL)) + return NULL; + return kb->wkvp; +} +EXPORT_SYMBOL(ep11_kb_wkvp); + /* * Simple check if the key blob is a valid EP11 AES key blob with header. */ @@ -489,7 +594,7 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, struct ep11_cprb *req = NULL, *rep = NULL; struct ep11_target_dev target; struct ep11_urb *urb = NULL; - int api = 1, rc = -ENOMEM; + int api = EP11_API_V1, rc = -ENOMEM; /* request cprb and payload */ req = alloc_cprb(sizeof(struct ep11_info_req_pl)); @@ -664,8 +769,9 @@ EXPORT_SYMBOL(ep11_get_domain_info); */ #define KEY_ATTR_DEFAULTS 0x00200c00 -int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, size_t *keybufsize) +static int _ep11_genaeskey(u16 card, u16 domain, + u32 keybitsize, u32 keygenflags, + u8 *keybuf, size_t *keybufsize) { struct keygen_req_pl { struct pl_head head; @@ -685,8 +791,7 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, u32 attr_bool_bits; u32 attr_val_len_type; u32 attr_val_len_value; - u8 pin_tag; - u8 pin_len; + /* followed by empty pin tag or empty pinblob tag */ } __packed * req_pl; struct keygen_rep_pl { struct pl_head head; @@ -699,10 +804,11 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, u8 data[512]; } __packed * rep_pl; struct ep11_cprb *req = NULL, *rep = NULL; + size_t req_pl_size, pinblob_size = 0; struct ep11_target_dev target; struct ep11_urb *urb = NULL; - struct ep11keyblob *kb; int api, rc = -ENOMEM; + u8 *p; switch (keybitsize) { case 128: @@ -718,12 +824,22 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, } /* request cprb and payload */ - req = alloc_cprb(sizeof(struct keygen_req_pl)); + api = (!keygenflags || keygenflags & 0x00200000) ? + EP11_API_V4 : EP11_API_V1; + if (ap_is_se_guest()) { + /* + * genkey within SE environment requires API ordinal 6 + * with empty pinblob + */ + api = EP11_API_V6; + pinblob_size = EP11_PINBLOB_V1_BYTES; + } + req_pl_size = sizeof(struct keygen_req_pl) + ASN1TAGLEN(pinblob_size); + req = alloc_cprb(req_pl_size); if (!req) goto out; req_pl = (struct keygen_req_pl *)(((u8 *)req) + sizeof(*req)); - api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1; - prep_head(&req_pl->head, sizeof(*req_pl), api, 21); /* GenerateKey */ + prep_head(&req_pl->head, req_pl_size, api, 21); /* GenerateKey */ req_pl->var_tag = 0x04; req_pl->var_len = sizeof(u32); req_pl->keybytes_tag = 0x04; @@ -739,7 +855,10 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS; req_pl->attr_val_len_type = 0x00000161; /* CKA_VALUE_LEN */ req_pl->attr_val_len_value = keybitsize / 8; - req_pl->pin_tag = 0x04; + p = ((u8 *)req_pl) + sizeof(*req_pl); + /* pin tag */ + *p++ = 0x04; + *p++ = pinblob_size; /* reply cprb and payload */ rep = alloc_cprb(sizeof(struct keygen_rep_pl)); @@ -754,7 +873,7 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, target.ap_id = card; target.dom_id = domain; prep_urb(urb, &target, 1, - req, sizeof(*req) + sizeof(*req_pl), + req, sizeof(*req) + req_pl_size, rep, sizeof(*rep) + sizeof(*rep_pl)); rc = zcrypt_send_ep11_cprb(urb); @@ -780,14 +899,9 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, goto out; } - /* copy key blob and set header values */ + /* copy key blob */ memcpy(keybuf, rep_pl->data, rep_pl->data_len); *keybufsize = rep_pl->data_len; - kb = (struct ep11keyblob *)keybuf; - kb->head.type = TOKTYPE_NON_CCA; - kb->head.len = rep_pl->data_len; - kb->head.version = TOKVER_EP11_AES; - kb->head.keybitlen = keybitsize; out: kfree(req); @@ -795,6 +909,43 @@ out: kfree(urb); return rc; } + +int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, + u8 *keybuf, size_t *keybufsize, u32 keybufver) +{ + struct ep11kblob_header *hdr; + size_t hdr_size, pl_size; + u8 *pl; + int rc; + + switch (keybufver) { + case TOKVER_EP11_AES: + case TOKVER_EP11_AES_WITH_HEADER: + break; + default: + return -EINVAL; + } + + rc = ep11_kb_split(keybuf, *keybufsize, keybufver, + &hdr, &hdr_size, &pl, &pl_size); + if (rc) + return rc; + + rc = _ep11_genaeskey(card, domain, keybitsize, keygenflags, + pl, &pl_size); + if (rc) + return rc; + + *keybufsize = hdr_size + pl_size; + + /* update header information */ + hdr->type = TOKTYPE_NON_CCA; + hdr->len = *keybufsize; + hdr->version = keybufver; + hdr->bitlen = keybitsize; + + return 0; +} EXPORT_SYMBOL(ep11_genaeskey); static int ep11_cryptsingle(u16 card, u16 domain, @@ -830,7 +981,7 @@ static int ep11_cryptsingle(u16 card, u16 domain, struct ep11_target_dev target; struct ep11_urb *urb = NULL; size_t req_pl_size, rep_pl_size; - int n, api = 1, rc = -ENOMEM; + int n, api = EP11_API_V1, rc = -ENOMEM; u8 *p; /* the simple asn1 coding used has length limits */ @@ -924,12 +1075,12 @@ out: return rc; } -static int ep11_unwrapkey(u16 card, u16 domain, - const u8 *kek, size_t keksize, - const u8 *enckey, size_t enckeysize, - u32 mech, const u8 *iv, - u32 keybitsize, u32 keygenflags, - u8 *keybuf, size_t *keybufsize) +static int _ep11_unwrapkey(u16 card, u16 domain, + const u8 *kek, size_t keksize, + const u8 *enckey, size_t enckeysize, + u32 mech, const u8 *iv, + u32 keybitsize, u32 keygenflags, + u8 *keybuf, size_t *keybufsize) { struct uw_req_pl { struct pl_head head; @@ -949,7 +1100,7 @@ static int ep11_unwrapkey(u16 card, u16 domain, * maybe followed by iv data * followed by kek tag + kek blob * followed by empty mac tag - * followed by empty pin tag + * followed by empty pin tag or empty pinblob tag * followed by encryted key tag + bytes */ } __packed * req_pl; @@ -964,21 +1115,30 @@ static int ep11_unwrapkey(u16 card, u16 domain, u8 data[512]; } __packed * rep_pl; struct ep11_cprb *req = NULL, *rep = NULL; + size_t req_pl_size, pinblob_size = 0; struct ep11_target_dev target; struct ep11_urb *urb = NULL; - struct ep11keyblob *kb; - size_t req_pl_size; int api, rc = -ENOMEM; u8 *p; /* request cprb and payload */ + api = (!keygenflags || keygenflags & 0x00200000) ? + EP11_API_V4 : EP11_API_V1; + if (ap_is_se_guest()) { + /* + * unwrap within SE environment requires API ordinal 6 + * with empty pinblob + */ + api = EP11_API_V6; + pinblob_size = EP11_PINBLOB_V1_BYTES; + } req_pl_size = sizeof(struct uw_req_pl) + (iv ? 16 : 0) - + ASN1TAGLEN(keksize) + 4 + ASN1TAGLEN(enckeysize); + + ASN1TAGLEN(keksize) + ASN1TAGLEN(0) + + ASN1TAGLEN(pinblob_size) + ASN1TAGLEN(enckeysize); req = alloc_cprb(req_pl_size); if (!req) goto out; req_pl = (struct uw_req_pl *)(((u8 *)req) + sizeof(*req)); - api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1; prep_head(&req_pl->head, req_pl_size, api, 34); /* UnwrapKey */ req_pl->attr_tag = 0x04; req_pl->attr_len = 7 * sizeof(u32); @@ -1003,9 +1163,10 @@ static int ep11_unwrapkey(u16 card, u16 domain, /* empty mac key tag */ *p++ = 0x04; *p++ = 0; - /* empty pin tag */ + /* pin tag */ *p++ = 0x04; - *p++ = 0; + *p++ = pinblob_size; + p += pinblob_size; /* encrypted key value tag and bytes */ p += asn1tag_write(p, 0x04, enckey, enckeysize); @@ -1048,14 +1209,9 @@ static int ep11_unwrapkey(u16 card, u16 domain, goto out; } - /* copy key blob and set header values */ + /* copy key blob */ memcpy(keybuf, rep_pl->data, rep_pl->data_len); *keybufsize = rep_pl->data_len; - kb = (struct ep11keyblob *)keybuf; - kb->head.type = TOKTYPE_NON_CCA; - kb->head.len = rep_pl->data_len; - kb->head.version = TOKVER_EP11_AES; - kb->head.keybitlen = keybitsize; out: kfree(req); @@ -1064,10 +1220,46 @@ out: return rc; } -static int ep11_wrapkey(u16 card, u16 domain, - const u8 *key, size_t keysize, - u32 mech, const u8 *iv, - u8 *databuf, size_t *datasize) +static int ep11_unwrapkey(u16 card, u16 domain, + const u8 *kek, size_t keksize, + const u8 *enckey, size_t enckeysize, + u32 mech, const u8 *iv, + u32 keybitsize, u32 keygenflags, + u8 *keybuf, size_t *keybufsize, + u8 keybufver) +{ + struct ep11kblob_header *hdr; + size_t hdr_size, pl_size; + u8 *pl; + int rc; + + rc = ep11_kb_split(keybuf, *keybufsize, keybufver, + &hdr, &hdr_size, &pl, &pl_size); + if (rc) + return rc; + + rc = _ep11_unwrapkey(card, domain, kek, keksize, enckey, enckeysize, + mech, iv, keybitsize, keygenflags, + pl, &pl_size); + if (rc) + return rc; + + *keybufsize = hdr_size + pl_size; + + /* update header information */ + hdr = (struct ep11kblob_header *)keybuf; + hdr->type = TOKTYPE_NON_CCA; + hdr->len = *keybufsize; + hdr->version = keybufver; + hdr->bitlen = keybitsize; + + return 0; +} + +static int _ep11_wrapkey(u16 card, u16 domain, + const u8 *key, size_t keysize, + u32 mech, const u8 *iv, + u8 *databuf, size_t *datasize) { struct wk_req_pl { struct pl_head head; @@ -1097,20 +1289,10 @@ static int ep11_wrapkey(u16 card, u16 domain, struct ep11_cprb *req = NULL, *rep = NULL; struct ep11_target_dev target; struct ep11_urb *urb = NULL; - struct ep11keyblob *kb; size_t req_pl_size; int api, rc = -ENOMEM; - bool has_header = false; u8 *p; - /* maybe the session field holds a header with key info */ - kb = (struct ep11keyblob *)key; - if (kb->head.type == TOKTYPE_NON_CCA && - kb->head.version == TOKVER_EP11_AES) { - has_header = true; - keysize = min_t(size_t, kb->head.len, keysize); - } - /* request cprb and payload */ req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0) + ASN1TAGLEN(keysize) + 4; @@ -1120,7 +1302,8 @@ static int ep11_wrapkey(u16 card, u16 domain, if (!mech || mech == 0x80060001) req->flags |= 0x20; /* CPACF_WRAP needs special bit */ req_pl = (struct wk_req_pl *)(((u8 *)req) + sizeof(*req)); - api = (!mech || mech == 0x80060001) ? 4 : 1; /* CKM_IBM_CPACF_WRAP */ + api = (!mech || mech == 0x80060001) ? /* CKM_IBM_CPACF_WRAP */ + EP11_API_V4 : EP11_API_V1; prep_head(&req_pl->head, req_pl_size, api, 33); /* WrapKey */ req_pl->var_tag = 0x04; req_pl->var_len = sizeof(u32); @@ -1135,11 +1318,6 @@ static int ep11_wrapkey(u16 card, u16 domain, } /* key blob */ p += asn1tag_write(p, 0x04, key, keysize); - /* maybe the key argument needs the head data cleaned out */ - if (has_header) { - kb = (struct ep11keyblob *)(p - keysize); - memset(&kb->head, 0, sizeof(kb->head)); - } /* empty kek tag */ *p++ = 0x04; *p++ = 0; @@ -1198,10 +1376,10 @@ out: } int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, - const u8 *clrkey, u8 *keybuf, size_t *keybufsize) + const u8 *clrkey, u8 *keybuf, size_t *keybufsize, + u32 keytype) { int rc; - struct ep11keyblob *kb; u8 encbuf[64], *kek = NULL; size_t clrkeylen, keklen, encbuflen = sizeof(encbuf); @@ -1223,17 +1401,15 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, } /* Step 1: generate AES 256 bit random kek key */ - rc = ep11_genaeskey(card, domain, 256, - 0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */ - kek, &keklen); + rc = _ep11_genaeskey(card, domain, 256, + 0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */ + kek, &keklen); if (rc) { DEBUG_ERR( "%s generate kek key failed, rc=%d\n", __func__, rc); goto out; } - kb = (struct ep11keyblob *)kek; - memset(&kb->head, 0, sizeof(kb->head)); /* Step 2: encrypt clear key value with the kek key */ rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen, @@ -1248,7 +1424,7 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, /* Step 3: import the encrypted key value as a new key */ rc = ep11_unwrapkey(card, domain, kek, keklen, encbuf, encbuflen, 0, def_iv, - keybitsize, 0, keybuf, keybufsize); + keybitsize, 0, keybuf, keybufsize, keytype); if (rc) { DEBUG_ERR( "%s importing key value as new key failed,, rc=%d\n", @@ -1262,11 +1438,12 @@ out: } EXPORT_SYMBOL(ep11_clr2keyblob); -int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen, +int ep11_kblob2protkey(u16 card, u16 dom, + const u8 *keyblob, size_t keybloblen, u8 *protkey, u32 *protkeylen, u32 *protkeytype) { - int rc = -EIO; - u8 *wkbuf = NULL; + struct ep11kblob_header *hdr; + struct ep11keyblob *key; size_t wkbuflen, keylen; struct wk_info { u16 version; @@ -1277,31 +1454,17 @@ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen, u8 res2[8]; u8 pkey[]; } __packed * wki; - const u8 *key; - struct ep11kblob_header *hdr; + u8 *wkbuf = NULL; + int rc = -EIO; - /* key with or without header ? */ - hdr = (struct ep11kblob_header *)keyblob; - if (hdr->type == TOKTYPE_NON_CCA && - (hdr->version == TOKVER_EP11_AES_WITH_HEADER || - hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && - is_ep11_keyblob(keyblob + sizeof(struct ep11kblob_header))) { - /* EP11 AES or ECC key with header */ - key = keyblob + sizeof(struct ep11kblob_header); - keylen = hdr->len - sizeof(struct ep11kblob_header); - } else if (hdr->type == TOKTYPE_NON_CCA && - hdr->version == TOKVER_EP11_AES && - is_ep11_keyblob(keyblob)) { - /* EP11 AES key (old style) */ - key = keyblob; - keylen = hdr->len; - } else if (is_ep11_keyblob(keyblob)) { - /* raw EP11 key blob */ - key = keyblob; - keylen = keybloblen; - } else { + if (ep11_kb_decode((u8 *)keyblob, keybloblen, &hdr, NULL, &key, &keylen)) return -EINVAL; + + if (hdr->version == TOKVER_EP11_AES) { + /* wipe overlayed header */ + memset(hdr, 0, sizeof(*hdr)); } + /* !!! hdr is no longer a valid header !!! */ /* alloc temp working buffer */ wkbuflen = (keylen + AES_BLOCK_SIZE) & (~(AES_BLOCK_SIZE - 1)); @@ -1310,8 +1473,8 @@ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen, return -ENOMEM; /* ep11 secure key -> protected key + info */ - rc = ep11_wrapkey(card, dom, key, keylen, - 0, def_iv, wkbuf, &wkbuflen); + rc = _ep11_wrapkey(card, dom, (u8 *)key, keylen, + 0, def_iv, wkbuf, &wkbuflen); if (rc) { DEBUG_ERR( "%s rewrapping ep11 key to pkey failed, rc=%d\n", diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h index a3eddf51242d..9d17fd5228a7 100644 --- a/drivers/s390/crypto/zcrypt_ep11misc.h +++ b/drivers/s390/crypto/zcrypt_ep11misc.h @@ -12,7 +12,9 @@ #include <asm/zcrypt.h> #include <asm/pkey.h> -#define EP11_API_V 4 /* highest known and supported EP11 API version */ +#define EP11_API_V1 1 /* min EP11 API, default if no higher api required */ +#define EP11_API_V4 4 /* supported EP11 API for the ep11misc cprbs */ +#define EP11_API_V6 6 /* min EP11 API for some cprbs in SE environment */ #define EP11_STRUCT_MAGIC 0x1234 #define EP11_BLOB_PKEY_EXTRACTABLE 0x00200000 @@ -29,14 +31,7 @@ struct ep11keyblob { union { u8 session[32]; /* only used for PKEY_TYPE_EP11: */ - struct { - u8 type; /* 0x00 (TOKTYPE_NON_CCA) */ - u8 res0; /* unused */ - u16 len; /* total length in bytes of this blob */ - u8 version; /* 0x03 (TOKVER_EP11_AES) */ - u8 res1; /* unused */ - u16 keybitlen; /* clear key bit len, 0 for unknown */ - } head; + struct ep11kblob_header head; }; u8 wkvp[16]; /* wrapping key verification pattern */ u64 attr; /* boolean key attributes */ @@ -56,6 +51,12 @@ static inline bool is_ep11_keyblob(const u8 *key) } /* + * For valid ep11 keyblobs, returns a reference to the wrappingkey verification + * pattern. Otherwise NULL. + */ +const u8 *ep11_kb_wkvp(const u8 *kblob, size_t kbloblen); + +/* * Simple check if the key blob is a valid EP11 AES key blob with header. * If checkcpacfexport is enabled, the key is also checked for the * attributes needed to export this key for CPACF use. @@ -114,13 +115,14 @@ int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info); * Generate (random) EP11 AES secure key. */ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, size_t *keybufsize); + u8 *keybuf, size_t *keybufsize, u32 keybufver); /* * Generate EP11 AES secure key with given clear key value. */ int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, - const u8 *clrkey, u8 *keybuf, size_t *keybufsize); + const u8 *clrkey, u8 *keybuf, size_t *keybufsize, + u32 keytype); /* * Build a list of ep11 apqns meeting the following constrains: diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c index 51f8f7a463f7..2e155de8abe5 100644 --- a/drivers/s390/crypto/zcrypt_msgtype50.c +++ b/drivers/s390/crypto/zcrypt_msgtype50.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * Copyright IBM Corp. 2001, 2012 + * Copyright IBM Corp. 2001, 2023 * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * @@ -28,15 +28,12 @@ /* >= CEX3A: 4096 bits */ #define CEX3A_MAX_MOD_SIZE 512 -/* CEX2A: max outputdatalength + type80_hdr */ -#define CEX2A_MAX_RESPONSE_SIZE 0x110 - /* >= CEX3A: 512 bit modulus, (max outputdatalength) + type80_hdr */ #define CEX3A_MAX_RESPONSE_SIZE 0x210 MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \ - "Copyright IBM Corp. 2001, 2012"); + "Copyright IBM Corp. 2001, 2023"); MODULE_LICENSE("GPL"); /* @@ -366,20 +363,17 @@ static int convert_type80(struct zcrypt_queue *zq, ap_send_online_uevent(&zq->queue->ap_dev, zq->online); return -EAGAIN; } - if (zq->zcard->user_space_type == ZCRYPT_CEX2A) - BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE); - else - BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE); + BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE); data = reply->msg + t80h->len - outputdatalength; if (copy_to_user(outputdata, data, outputdatalength)) return -EFAULT; return 0; } -static int convert_response_cex2a(struct zcrypt_queue *zq, - struct ap_message *reply, - char __user *outputdata, - unsigned int outputdatalength) +static int convert_response(struct zcrypt_queue *zq, + struct ap_message *reply, + char __user *outputdata, + unsigned int outputdatalength) { /* Response type byte is the second byte in the response. */ unsigned char rtype = ((unsigned char *)reply->msg)[1]; @@ -414,9 +408,9 @@ static int convert_response_cex2a(struct zcrypt_queue *zq, * @msg: pointer to the AP message * @reply: pointer to the AP reply message */ -static void zcrypt_cex2a_receive(struct ap_queue *aq, - struct ap_message *msg, - struct ap_message *reply) +static void zcrypt_msgtype50_receive(struct ap_queue *aq, + struct ap_message *msg, + struct ap_message *reply) { static struct error_hdr error_reply = { .type = TYPE82_RSP_CODE, @@ -456,19 +450,18 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0); * CEXxA device to the request distributor * @mex: pointer to the modexpo request buffer */ -static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq, - struct ica_rsa_modexpo *mex, - struct ap_message *ap_msg) +static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq, + struct ica_rsa_modexpo *mex, + struct ap_message *ap_msg) { struct completion work; int rc; - ap_msg->bufsize = (zq->zcard->user_space_type == ZCRYPT_CEX2A) ? - MSGTYPE50_CRB2_MAX_MSG_SIZE : MSGTYPE50_CRB3_MAX_MSG_SIZE; + ap_msg->bufsize = MSGTYPE50_CRB3_MAX_MSG_SIZE; ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); if (!ap_msg->msg) return -ENOMEM; - ap_msg->receive = zcrypt_cex2a_receive; + ap_msg->receive = zcrypt_msgtype50_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); ap_msg->private = &work; @@ -483,9 +476,9 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq, if (rc == 0) { rc = ap_msg->rc; if (rc == 0) - rc = convert_response_cex2a(zq, ap_msg, - mex->outputdata, - mex->outputdatalength); + rc = convert_response(zq, ap_msg, + mex->outputdata, + mex->outputdatalength); } else { /* Signal pending. */ ap_cancel_message(zq->queue, ap_msg); @@ -507,19 +500,18 @@ out: * CEXxA device to the request distributor * @crt: pointer to the modexpoc_crt request buffer */ -static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq, - struct ica_rsa_modexpo_crt *crt, - struct ap_message *ap_msg) +static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq, + struct ica_rsa_modexpo_crt *crt, + struct ap_message *ap_msg) { struct completion work; int rc; - ap_msg->bufsize = (zq->zcard->user_space_type == ZCRYPT_CEX2A) ? - MSGTYPE50_CRB2_MAX_MSG_SIZE : MSGTYPE50_CRB3_MAX_MSG_SIZE; + ap_msg->bufsize = MSGTYPE50_CRB3_MAX_MSG_SIZE; ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); if (!ap_msg->msg) return -ENOMEM; - ap_msg->receive = zcrypt_cex2a_receive; + ap_msg->receive = zcrypt_msgtype50_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); ap_msg->private = &work; @@ -534,9 +526,9 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq, if (rc == 0) { rc = ap_msg->rc; if (rc == 0) - rc = convert_response_cex2a(zq, ap_msg, - crt->outputdata, - crt->outputdatalength); + rc = convert_response(zq, ap_msg, + crt->outputdata, + crt->outputdatalength); } else { /* Signal pending. */ ap_cancel_message(zq->queue, ap_msg); @@ -555,8 +547,8 @@ out: * The crypto operations for message type 50. */ static struct zcrypt_ops zcrypt_msgtype50_ops = { - .rsa_modexpo = zcrypt_cex2a_modexpo, - .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt, + .rsa_modexpo = zcrypt_msgtype50_modexpo, + .rsa_modexpo_crt = zcrypt_msgtype50_modexpo_crt, .owner = THIS_MODULE, .name = MSGTYPE50_NAME, .variant = MSGTYPE50_VARIANT_DEFAULT, diff --git a/drivers/s390/crypto/zcrypt_msgtype50.h b/drivers/s390/crypto/zcrypt_msgtype50.h index eb49f06bed29..323e93b90b12 100644 --- a/drivers/s390/crypto/zcrypt_msgtype50.h +++ b/drivers/s390/crypto/zcrypt_msgtype50.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0+ */ /* - * Copyright IBM Corp. 2001, 2012 + * Copyright IBM Corp. 2001, 2023 * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * @@ -15,7 +15,6 @@ #define MSGTYPE50_NAME "zcrypt_msgtype50" #define MSGTYPE50_VARIANT_DEFAULT 0 -#define MSGTYPE50_CRB2_MAX_MSG_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */ #define MSGTYPE50_CRB3_MAX_MSG_SIZE 0x710 /* sizeof(struct type50_crb3_msg) */ #define MSGTYPE_ADJUSTMENT 0x08 /* type04 extension (not needed in type50) */ diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index 67fd2ec9c5a1..3c53abbdc342 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * Copyright IBM Corp. 2001, 2022 + * Copyright IBM Corp. 2001, 2023 * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * @@ -42,7 +42,7 @@ struct response_type { MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \ - "Copyright IBM Corp. 2001, 2012"); + "Copyright IBM Corp. 2001, 2023"); MODULE_LICENSE("GPL"); struct function_and_rules_block { @@ -1101,23 +1101,36 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq, struct ica_xcRB *xcrb, struct ap_message *ap_msg) { - int rc; struct response_type *rtype = ap_msg->private; struct { struct type6_hdr hdr; struct CPRBX cprbx; /* ... more data blocks ... */ } __packed * msg = ap_msg->msg; - - /* - * Set the queue's reply buffer length minus 128 byte padding - * as reply limit for the card firmware. - */ - msg->hdr.fromcardlen1 = min_t(unsigned int, msg->hdr.fromcardlen1, - zq->reply.bufsize - 128); - if (msg->hdr.fromcardlen2) - msg->hdr.fromcardlen2 = - zq->reply.bufsize - msg->hdr.fromcardlen1 - 128; + unsigned int max_payload_size; + int rc, delta; + + /* calculate maximum payload for this card and msg type */ + max_payload_size = zq->reply.bufsize - sizeof(struct type86_fmt2_msg); + + /* limit each of the two from fields to the maximum payload size */ + msg->hdr.fromcardlen1 = min(msg->hdr.fromcardlen1, max_payload_size); + msg->hdr.fromcardlen2 = min(msg->hdr.fromcardlen2, max_payload_size); + + /* calculate delta if the sum of both exceeds max payload size */ + delta = msg->hdr.fromcardlen1 + msg->hdr.fromcardlen2 + - max_payload_size; + if (delta > 0) { + /* + * Sum exceeds maximum payload size, prune fromcardlen1 + * (always trust fromcardlen2) + */ + if (delta > msg->hdr.fromcardlen1) { + rc = -EINVAL; + goto out; + } + msg->hdr.fromcardlen1 -= delta; + } init_completion(&rtype->work); rc = ap_queue_message(zq->queue, ap_msg); @@ -1335,14 +1348,6 @@ out: /* * The crypto operations for a CEXxC card. */ -static struct zcrypt_ops zcrypt_msgtype6_norng_ops = { - .owner = THIS_MODULE, - .name = MSGTYPE06_NAME, - .variant = MSGTYPE06_VARIANT_NORNG, - .rsa_modexpo = zcrypt_msgtype6_modexpo, - .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt, - .send_cprb = zcrypt_msgtype6_send_cprb, -}; static struct zcrypt_ops zcrypt_msgtype6_ops = { .owner = THIS_MODULE, @@ -1365,14 +1370,12 @@ static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = { void __init zcrypt_msgtype6_init(void) { - zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops); zcrypt_msgtype_register(&zcrypt_msgtype6_ops); zcrypt_msgtype_register(&zcrypt_msgtype6_ep11_ops); } void __exit zcrypt_msgtype6_exit(void) { - zcrypt_msgtype_unregister(&zcrypt_msgtype6_norng_ops); zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops); zcrypt_msgtype_unregister(&zcrypt_msgtype6_ep11_ops); } diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index 9c67b97faba2..74760c1a163b 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig @@ -5,12 +5,11 @@ menu "S/390 network device drivers" config LCS def_tristate m prompt "Lan Channel Station Interface" - depends on CCW && NETDEVICES && (ETHERNET || FDDI) + depends on CCW && NETDEVICES && ETHERNET help Select this option if you want to use LCS networking on IBM System z. - This device driver supports FDDI (IEEE 802.7) and Ethernet. To compile as a module, choose M. The module name is lcs. - If you do not know what it is, it's safe to choose Y. + If you do not use LCS, choose N. config CTCM def_tristate m diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c index 9b5fccdbc7d6..6df7f377d2f9 100644 --- a/drivers/s390/net/ism_drv.c +++ b/drivers/s390/net/ism_drv.c @@ -36,7 +36,7 @@ static const struct smcd_ops ism_ops; static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */ /* a list for fast mapping */ static u8 max_client; -static DEFINE_SPINLOCK(clients_lock); +static DEFINE_MUTEX(clients_lock); struct ism_dev_list { struct list_head list; struct mutex mutex; /* protects ism device list */ @@ -47,14 +47,22 @@ static struct ism_dev_list ism_dev_list = { .mutex = __MUTEX_INITIALIZER(ism_dev_list.mutex), }; +static void ism_setup_forwarding(struct ism_client *client, struct ism_dev *ism) +{ + unsigned long flags; + + spin_lock_irqsave(&ism->lock, flags); + ism->subs[client->id] = client; + spin_unlock_irqrestore(&ism->lock, flags); +} + int ism_register_client(struct ism_client *client) { struct ism_dev *ism; - unsigned long flags; int i, rc = -ENOSPC; mutex_lock(&ism_dev_list.mutex); - spin_lock_irqsave(&clients_lock, flags); + mutex_lock(&clients_lock); for (i = 0; i < MAX_CLIENTS; ++i) { if (!clients[i]) { clients[i] = client; @@ -65,12 +73,14 @@ int ism_register_client(struct ism_client *client) break; } } - spin_unlock_irqrestore(&clients_lock, flags); + mutex_unlock(&clients_lock); + if (i < MAX_CLIENTS) { /* initialize with all devices that we got so far */ list_for_each_entry(ism, &ism_dev_list.list, list) { ism->priv[i] = NULL; client->add(ism); + ism_setup_forwarding(client, ism); } } mutex_unlock(&ism_dev_list.mutex); @@ -86,25 +96,32 @@ int ism_unregister_client(struct ism_client *client) int rc = 0; mutex_lock(&ism_dev_list.mutex); - spin_lock_irqsave(&clients_lock, flags); - clients[client->id] = NULL; - if (client->id + 1 == max_client) - max_client--; - spin_unlock_irqrestore(&clients_lock, flags); list_for_each_entry(ism, &ism_dev_list.list, list) { + spin_lock_irqsave(&ism->lock, flags); + /* Stop forwarding IRQs and events */ + ism->subs[client->id] = NULL; for (int i = 0; i < ISM_NR_DMBS; ++i) { if (ism->sba_client_arr[i] == client->id) { - pr_err("%s: attempt to unregister client '%s'" - "with registered dmb(s)\n", __func__, - client->name); + WARN(1, "%s: attempt to unregister '%s' with registered dmb(s)\n", + __func__, client->name); rc = -EBUSY; - goto out; + goto err_reg_dmb; } } + spin_unlock_irqrestore(&ism->lock, flags); } -out: mutex_unlock(&ism_dev_list.mutex); + mutex_lock(&clients_lock); + clients[client->id] = NULL; + if (client->id + 1 == max_client) + max_client--; + mutex_unlock(&clients_lock); + return rc; + +err_reg_dmb: + spin_unlock_irqrestore(&ism->lock, flags); + mutex_unlock(&ism_dev_list.mutex); return rc; } EXPORT_SYMBOL_GPL(ism_unregister_client); @@ -328,6 +345,7 @@ int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb, struct ism_client *client) { union ism_reg_dmb cmd; + unsigned long flags; int ret; ret = ism_alloc_dmb(ism, dmb); @@ -351,7 +369,9 @@ int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb, goto out; } dmb->dmb_tok = cmd.response.dmb_tok; + spin_lock_irqsave(&ism->lock, flags); ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = client->id; + spin_unlock_irqrestore(&ism->lock, flags); out: return ret; } @@ -360,6 +380,7 @@ EXPORT_SYMBOL_GPL(ism_register_dmb); int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb) { union ism_unreg_dmb cmd; + unsigned long flags; int ret; memset(&cmd, 0, sizeof(cmd)); @@ -368,7 +389,9 @@ int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb) cmd.request.dmb_tok = dmb->dmb_tok; + spin_lock_irqsave(&ism->lock, flags); ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = NO_CLIENT; + spin_unlock_irqrestore(&ism->lock, flags); ret = ism_cmd(ism, &cmd); if (ret && ret != ISM_ERROR) @@ -491,6 +514,7 @@ static u16 ism_get_chid(struct ism_dev *ism) static void ism_handle_event(struct ism_dev *ism) { struct ism_event *entry; + struct ism_client *clt; int i; while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) { @@ -499,21 +523,21 @@ static void ism_handle_event(struct ism_dev *ism) entry = &ism->ieq->entry[ism->ieq_idx]; debug_event(ism_debug_info, 2, entry, sizeof(*entry)); - spin_lock(&clients_lock); - for (i = 0; i < max_client; ++i) - if (clients[i]) - clients[i]->handle_event(ism, entry); - spin_unlock(&clients_lock); + for (i = 0; i < max_client; ++i) { + clt = ism->subs[i]; + if (clt) + clt->handle_event(ism, entry); + } } } static irqreturn_t ism_handle_irq(int irq, void *data) { struct ism_dev *ism = data; - struct ism_client *clt; unsigned long bit, end; unsigned long *bv; u16 dmbemask; + u8 client_id; bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET]; end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET; @@ -530,8 +554,10 @@ static irqreturn_t ism_handle_irq(int irq, void *data) dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET]; ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0; barrier(); - clt = clients[ism->sba_client_arr[bit]]; - clt->handle_irq(ism, bit + ISM_DMB_BIT_OFFSET, dmbemask); + client_id = ism->sba_client_arr[bit]; + if (unlikely(client_id == NO_CLIENT || !ism->subs[client_id])) + continue; + ism->subs[client_id]->handle_irq(ism, bit + ISM_DMB_BIT_OFFSET, dmbemask); } if (ism->sba->e) { @@ -548,20 +574,9 @@ static u64 ism_get_local_gid(struct ism_dev *ism) return ism->local_gid; } -static void ism_dev_add_work_func(struct work_struct *work) -{ - struct ism_client *client = container_of(work, struct ism_client, - add_work); - - client->add(client->tgt_ism); - atomic_dec(&client->tgt_ism->add_dev_cnt); - wake_up(&client->tgt_ism->waitq); -} - static int ism_dev_init(struct ism_dev *ism) { struct pci_dev *pdev = ism->pdev; - unsigned long flags; int i, ret; ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); @@ -594,25 +609,16 @@ static int ism_dev_init(struct ism_dev *ism) /* hardware is V2 capable */ ism_create_system_eid(); - init_waitqueue_head(&ism->waitq); - atomic_set(&ism->free_clients_cnt, 0); - atomic_set(&ism->add_dev_cnt, 0); - - wait_event(ism->waitq, !atomic_read(&ism->add_dev_cnt)); - spin_lock_irqsave(&clients_lock, flags); - for (i = 0; i < max_client; ++i) + mutex_lock(&ism_dev_list.mutex); + mutex_lock(&clients_lock); + for (i = 0; i < max_client; ++i) { if (clients[i]) { - INIT_WORK(&clients[i]->add_work, - ism_dev_add_work_func); - clients[i]->tgt_ism = ism; - atomic_inc(&ism->add_dev_cnt); - schedule_work(&clients[i]->add_work); + clients[i]->add(ism); + ism_setup_forwarding(clients[i], ism); } - spin_unlock_irqrestore(&clients_lock, flags); - - wait_event(ism->waitq, !atomic_read(&ism->add_dev_cnt)); + } + mutex_unlock(&clients_lock); - mutex_lock(&ism_dev_list.mutex); list_add(&ism->list, &ism_dev_list.list); mutex_unlock(&ism_dev_list.mutex); @@ -687,36 +693,24 @@ err_dev: return ret; } -static void ism_dev_remove_work_func(struct work_struct *work) -{ - struct ism_client *client = container_of(work, struct ism_client, - remove_work); - - client->remove(client->tgt_ism); - atomic_dec(&client->tgt_ism->free_clients_cnt); - wake_up(&client->tgt_ism->waitq); -} - -/* Callers must hold ism_dev_list.mutex */ static void ism_dev_exit(struct ism_dev *ism) { struct pci_dev *pdev = ism->pdev; unsigned long flags; int i; - wait_event(ism->waitq, !atomic_read(&ism->free_clients_cnt)); - spin_lock_irqsave(&clients_lock, flags); + spin_lock_irqsave(&ism->lock, flags); for (i = 0; i < max_client; ++i) - if (clients[i]) { - INIT_WORK(&clients[i]->remove_work, - ism_dev_remove_work_func); - clients[i]->tgt_ism = ism; - atomic_inc(&ism->free_clients_cnt); - schedule_work(&clients[i]->remove_work); - } - spin_unlock_irqrestore(&clients_lock, flags); + ism->subs[i] = NULL; + spin_unlock_irqrestore(&ism->lock, flags); - wait_event(ism->waitq, !atomic_read(&ism->free_clients_cnt)); + mutex_lock(&ism_dev_list.mutex); + mutex_lock(&clients_lock); + for (i = 0; i < max_client; ++i) { + if (clients[i]) + clients[i]->remove(ism); + } + mutex_unlock(&clients_lock); if (SYSTEM_EID.serial_number[0] != '0' || SYSTEM_EID.type[0] != '0') @@ -727,15 +721,14 @@ static void ism_dev_exit(struct ism_dev *ism) kfree(ism->sba_client_arr); pci_free_irq_vectors(pdev); list_del_init(&ism->list); + mutex_unlock(&ism_dev_list.mutex); } static void ism_remove(struct pci_dev *pdev) { struct ism_dev *ism = dev_get_drvdata(&pdev->dev); - mutex_lock(&ism_dev_list.mutex); ism_dev_exit(ism); - mutex_unlock(&ism_dev_list.mutex); pci_release_mem_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 9fd8e6f07a03..a1f2acd6fb8f 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -17,7 +17,6 @@ #include <linux/if.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> -#include <linux/fddidevice.h> #include <linux/inetdevice.h> #include <linux/in.h> #include <linux/igmp.h> @@ -36,10 +35,6 @@ #include "lcs.h" -#if !defined(CONFIG_ETHERNET) && !defined(CONFIG_FDDI) -#error Cannot compile lcs.c without some net devices switched on. -#endif - /* * initialization string for output */ @@ -1601,19 +1596,11 @@ lcs_startlan_auto(struct lcs_card *card) int rc; LCS_DBF_TEXT(2, trace, "strtauto"); -#ifdef CONFIG_ETHERNET card->lan_type = LCS_FRAME_TYPE_ENET; rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); if (rc == 0) return 0; -#endif -#ifdef CONFIG_FDDI - card->lan_type = LCS_FRAME_TYPE_FDDI; - rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); - if (rc == 0) - return 0; -#endif return -EIO; } @@ -1806,22 +1793,16 @@ lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer) card->stats.rx_errors++; return; } - /* What kind of frame is it? */ - if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL) { - /* Control frame. */ + if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL) lcs_get_control(card, (struct lcs_cmd *) lcs_hdr); - } else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET || - lcs_hdr->type == LCS_FRAME_TYPE_TR || - lcs_hdr->type == LCS_FRAME_TYPE_FDDI) { - /* Normal network packet. */ + else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET) lcs_get_skb(card, (char *)(lcs_hdr + 1), lcs_hdr->offset - offset - sizeof(struct lcs_header)); - } else { - /* Unknown frame type. */ - ; // FIXME: error message ? - } - /* Proceed to next frame. */ + else + dev_info_once(&card->dev->dev, + "Unknown frame type %d\n", + lcs_hdr->type); offset = lcs_hdr->offset; lcs_hdr->offset = LCS_ILLEGAL_OFFSET; lcs_hdr = (struct lcs_header *) (buffer->data + offset); @@ -2140,18 +2121,10 @@ lcs_new_device(struct ccwgroup_device *ccwgdev) goto netdev_out; } switch (card->lan_type) { -#ifdef CONFIG_ETHERNET case LCS_FRAME_TYPE_ENET: card->lan_type_trans = eth_type_trans; dev = alloc_etherdev(0); break; -#endif -#ifdef CONFIG_FDDI - case LCS_FRAME_TYPE_FDDI: - card->lan_type_trans = fddi_type_trans; - dev = alloc_fddidev(0); - break; -#endif default: LCS_DBF_TEXT(3, setup, "errinit"); pr_err(" Initialization failed\n"); diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 1d195429753d..613eab729704 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -716,7 +716,6 @@ struct qeth_card_info { u16 chid; u8 ids_valid:1; /* cssid,iid,chid */ u8 dev_addr_is_registered:1; - u8 open_when_online:1; u8 promisc_mode:1; u8 use_v1_blkt:1; u8 is_vm_nic:1; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 1d5b207c2b9e..cd783290bde5 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -5373,8 +5373,6 @@ int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc, qeth_clear_ipacmd_list(card); rtnl_lock(); - card->info.open_when_online = card->dev->flags & IFF_UP; - dev_close(card->dev); netif_device_detach(card->dev); netif_carrier_off(card->dev); rtnl_unlock(); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 9f13ed170a43..75910c0bcc2b 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -2388,9 +2388,12 @@ static int qeth_l2_set_online(struct qeth_card *card, bool carrier_ok) qeth_enable_hw_features(dev); qeth_l2_enable_brport_features(card); - if (card->info.open_when_online) { - card->info.open_when_online = 0; - dev_open(dev, NULL); + if (netif_running(dev)) { + local_bh_disable(); + napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); + qeth_l2_set_rx_mode(dev); } rtnl_unlock(); } diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index af4e60d2917e..b92a32b4b114 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2018,9 +2018,11 @@ static int qeth_l3_set_online(struct qeth_card *card, bool carrier_ok) netif_device_attach(dev); qeth_enable_hw_features(dev); - if (card->info.open_when_online) { - card->info.open_when_online = 0; - dev_open(dev, NULL); + if (netif_running(dev)) { + local_bh_disable(); + napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); } rtnl_unlock(); } diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index f21307537829..4f0d0e55f0d4 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -534,8 +534,7 @@ static void zfcp_fc_adisc_handler(void *data) /* re-init to undo drop from zfcp_fc_adisc() */ port->d_id = ntoh24(adisc_resp->adisc_port_id); - /* port is good, unblock rport without going through erp */ - zfcp_scsi_schedule_rport_register(port); + /* port is still good, nothing to do */ out: atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status); put_device(&port->dev); @@ -595,9 +594,6 @@ void zfcp_fc_link_test_work(struct work_struct *work) int retval; set_worker_desc("zadisc%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */ - get_device(&port->dev); - port->rport_task = RPORT_DEL; - zfcp_scsi_rport_work(&port->rport_work); /* only issue one test command at one time per port */ if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST) |