diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-02-21 13:40:06 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-02-21 13:40:06 -0800 |
commit | df24212a493afda0d4de42176bea10d45825e9a0 (patch) | |
tree | 72d191a100a849cf3d030e6409c52429d8537131 /drivers/s390 | |
parent | 3e10585335b7967326ca7b4118cada0d2d00a2ab (diff) | |
parent | 2223318c2862edc7f5b282939b850b19fc934ec4 (diff) |
Merge tag 's390-5.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Vasily Gorbik:
- Convert to using the generic entry infrastructure.
- Add vdso time namespace support.
- Switch s390 and alpha to 64-bit ino_t. As discussed at
https://lore.kernel.org/linux-mm/YCV7QiyoweJwvN+m@osiris/
- Get rid of expensive stck (store clock) usages where possible.
Utilize cpu alternatives to patch stckf when supported.
- Make tod_clock usage less error prone by converting it to a union and
rework code which is using it.
- Machine check handler fixes and cleanups.
- Drop couple of minor inline asm optimizations to fix clang build.
- Default configs changes notably to make libvirt happy.
- Various changes to rework and improve qdio code.
- Other small various fixes and improvements all over the code.
* tag 's390-5.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (68 commits)
s390/qdio: remove 'merge_pending' mechanism
s390/qdio: improve handling of PENDING buffers for QEBSM devices
s390/qdio: rework q->qdio_error indication
s390/qdio: inline qdio_kick_handler()
s390/time: remove get_tod_clock_ext()
s390/crypto: use store_tod_clock_ext()
s390/hypfs: use store_tod_clock_ext()
s390/debug: use union tod_clock
s390/kvm: use union tod_clock
s390/vdso: use union tod_clock
s390/time: convert tod_clock_base to union
s390/time: introduce new store_tod_clock_ext()
s390/time: rename store_tod_clock_ext() and use union tod_clock
s390/time: introduce union tod_clock
s390,alpha: switch to 64-bit ino_t
s390: split cleanup_sie
s390: use r13 in cleanup_sie as temp register
s390: fix kernel asce loading when sie is interrupted
s390: add stack for machine check handler
s390: use WRITE_ONCE when re-allocating async stack
...
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/char/tape_3590.c | 4 | ||||
-rw-r--r-- | drivers/s390/cio/css.c | 20 | ||||
-rw-r--r-- | drivers/s390/cio/device.c | 39 | ||||
-rw-r--r-- | drivers/s390/cio/qdio.h | 25 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_debug.c | 9 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_main.c | 209 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_setup.c | 19 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_thinint.c | 70 | ||||
-rw-r--r-- | drivers/s390/crypto/zcrypt_api.c | 14 | ||||
-rw-r--r-- | drivers/s390/crypto/zcrypt_ccamisc.c | 15 | ||||
-rw-r--r-- | drivers/s390/net/qeth_core_main.c | 7 |
11 files changed, 154 insertions, 277 deletions
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index ecf8c5006a0e..0d484fe43d7e 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c @@ -761,7 +761,7 @@ tape_3590_done(struct tape_device *device, struct tape_request *request) * This function is called, when error recovery was successful */ static inline int -tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request) +tape_3590_erp_succeeded(struct tape_device *device, struct tape_request *request) { DBF_EVENT(3, "Error Recovery successful for %s\n", tape_op_verbose[request->op]); @@ -831,7 +831,7 @@ tape_3590_erp_basic(struct tape_device *device, struct tape_request *request, case SENSE_BRA_PER: return tape_3590_erp_failed(device, request, irb, rc); case SENSE_BRA_CONT: - return tape_3590_erp_succeded(device, request); + return tape_3590_erp_succeeded(device, request); case SENSE_BRA_RE: return tape_3590_erp_retry(device, request, irb); case SENSE_BRA_DRE: diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 94c6470de635..253ab4e7a415 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -225,18 +225,23 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid, INIT_WORK(&sch->todo_work, css_sch_todo); sch->dev.release = &css_subchannel_release; + sch->dev.dma_mask = &sch->dma_mask; device_initialize(&sch->dev); /* - * The physical addresses of some the dma structures that can + * The physical addresses for some of the dma structures that can * belong to a subchannel need to fit 31 bit width (e.g. ccw). */ - sch->dev.coherent_dma_mask = DMA_BIT_MASK(31); + ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31)); + if (ret) + goto err; /* * But we don't have such restrictions imposed on the stuff that * is handled by the streaming API. */ - sch->dma_mask = DMA_BIT_MASK(64); - sch->dev.dma_mask = &sch->dma_mask; + ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64)); + if (ret) + goto err; + return sch; err: @@ -970,8 +975,11 @@ static int __init setup_css(int nr) * css->device as the device argument with the DMA API) * and are fine with 64 bit addresses. */ - css->device.coherent_dma_mask = DMA_BIT_MASK(64); - css->device.dma_mask = &css->device.coherent_dma_mask; + ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64)); + if (ret) { + kfree(css); + goto out_err; + } mutex_init(&css->mutex); ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid); diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 4b0a7cbb2096..3f026021e95e 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -621,14 +621,6 @@ static const struct attribute_group *ccwdev_attr_groups[] = { NULL, }; -static int ccw_device_add(struct ccw_device *cdev) -{ - struct device *dev = &cdev->dev; - - dev->bus = &ccw_bus_type; - return device_add(dev); -} - static int match_dev_id(struct device *dev, const void *data) { struct ccw_device *cdev = to_ccwdev(dev); @@ -687,33 +679,47 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) { struct ccw_device *cdev; struct gen_pool *dma_pool; + int ret; cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); - if (!cdev) + if (!cdev) { + ret = -ENOMEM; goto err_cdev; + } cdev->private = kzalloc(sizeof(struct ccw_device_private), GFP_KERNEL | GFP_DMA); - if (!cdev->private) + if (!cdev->private) { + ret = -ENOMEM; goto err_priv; - cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask; + } + cdev->dev.dma_mask = sch->dev.dma_mask; + ret = dma_set_coherent_mask(&cdev->dev, sch->dev.coherent_dma_mask); + if (ret) + goto err_coherent_mask; + dma_pool = cio_gp_dma_create(&cdev->dev, 1); - if (!dma_pool) + if (!dma_pool) { + ret = -ENOMEM; goto err_dma_pool; + } cdev->private->dma_pool = dma_pool; cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev, sizeof(*cdev->private->dma_area)); - if (!cdev->private->dma_area) + if (!cdev->private->dma_area) { + ret = -ENOMEM; goto err_dma_area; + } return cdev; err_dma_area: cio_gp_dma_destroy(dma_pool, &cdev->dev); err_dma_pool: +err_coherent_mask: kfree(cdev->private); err_priv: kfree(cdev); err_cdev: - return ERR_PTR(-ENOMEM); + return ERR_PTR(ret); } static void ccw_device_todo(struct work_struct *work); @@ -739,6 +745,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch, cdev->ccwlock = sch->lock; cdev->dev.parent = &sch->dev; cdev->dev.release = ccw_device_release; + cdev->dev.bus = &ccw_bus_type; cdev->dev.groups = ccwdev_attr_groups; /* Do first half of device_register. */ device_initialize(&cdev->dev); @@ -840,7 +847,7 @@ static void io_subchannel_register(struct ccw_device *cdev) kobject_uevent(&sch->dev.kobj, KOBJ_ADD); } /* make it known to the system */ - ret = ccw_device_add(cdev); + ret = device_add(&cdev->dev); if (ret) { CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", cdev->private->dev_id.ssid, @@ -1052,7 +1059,7 @@ static int io_subchannel_probe(struct subchannel *sch) kobject_uevent(&sch->dev.kobj, KOBJ_ADD); } cdev = sch_get_cdev(sch); - rc = ccw_device_add(cdev); + rc = device_add(&cdev->dev); if (rc) { /* Release online reference. */ put_device(&cdev->dev); diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index cd2df4ff8e0e..34bf2f197c71 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -139,9 +139,6 @@ struct qdio_dev_perf_stat { unsigned int qdio_int; unsigned int pci_request_int; - unsigned int tasklet_inbound; - unsigned int tasklet_inbound_resched; - unsigned int tasklet_inbound_resched2; unsigned int tasklet_outbound; unsigned int siga_read; @@ -149,7 +146,6 @@ struct qdio_dev_perf_stat { unsigned int siga_sync; unsigned int inbound_call; - unsigned int inbound_handler; unsigned int stop_polling; unsigned int inbound_queue_full; unsigned int outbound_call; @@ -193,6 +189,8 @@ struct qdio_output_q { struct qdio_outbuf_state *sbal_state; /* timer to check for more outbound work */ struct timer_list timer; + /* tasklet to check for completions */ + struct tasklet_struct tasklet; }; /* @@ -216,13 +214,9 @@ struct qdio_q { /* number of buffers in use by the adapter */ atomic_t nr_buf_used; - /* error condition during a data transfer */ - unsigned int qdio_error; - /* last scan of the queue */ u64 timestamp; - struct tasklet_struct tasklet; struct qdio_queue_perf_stat q_stats; struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned; @@ -254,6 +248,7 @@ struct qdio_irq { struct ccw_device *cdev; struct list_head entry; /* list of thinint devices */ struct dentry *debugfs_dev; + u64 last_data_irq_time; unsigned long int_parm; struct subchannel_id schid; @@ -324,6 +319,14 @@ static inline int multicast_outbound(struct qdio_q *q) (q->nr == q->irq_ptr->nr_output_qs - 1); } +static inline void qdio_deliver_irq(struct qdio_irq *irq) +{ + if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq->poll_state)) + irq->irq_poll(irq->cdev, irq->int_parm); + else + QDIO_PERF_STAT_INC(irq, int_discarded); +} + #define pci_out_supported(irq) ((irq)->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) #define is_qebsm(q) (q->irq_ptr->sch_token != 0) @@ -357,16 +360,12 @@ extern u64 last_ai_time; /* prototypes for thin interrupt */ int qdio_establish_thinint(struct qdio_irq *irq_ptr); void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); -void tiqdio_add_device(struct qdio_irq *irq_ptr); -void tiqdio_remove_device(struct qdio_irq *irq_ptr); -void tiqdio_inbound_processing(unsigned long q); int qdio_thinint_init(void); void qdio_thinint_exit(void); int test_nonshared_ind(struct qdio_irq *); /* prototypes for setup */ -void qdio_inbound_processing(unsigned long data); -void qdio_outbound_processing(unsigned long data); +void qdio_outbound_tasklet(struct tasklet_struct *t); void qdio_outbound_timer(struct timer_list *t); void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb); diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index 863d17c802ca..00384f58f218 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c @@ -105,8 +105,9 @@ static int qstat_show(struct seq_file *m, void *v) if (!q) return 0; - seq_printf(m, "Timestamp: %Lx Last AI: %Lx\n", - q->timestamp, last_ai_time); + seq_printf(m, "Timestamp: %llx\n", q->timestamp); + seq_printf(m, "Last Data IRQ: %llx Last AI: %llx\n", + q->irq_ptr->last_data_irq_time, last_ai_time); seq_printf(m, "nr_used: %d ftc: %d\n", atomic_read(&q->nr_buf_used), q->first_to_check); if (q->is_input_q) { @@ -197,15 +198,11 @@ static char *qperf_names[] = { "Assumed adapter interrupts", "QDIO interrupts", "Requested PCIs", - "Inbound tasklet runs", - "Inbound tasklet resched", - "Inbound tasklet resched2", "Outbound tasklet runs", "SIGA read", "SIGA write", "SIGA sync", "Inbound calls", - "Inbound handler", "Inbound stop_polling", "Inbound queue full", "Outbound calls", diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index f9a31c7819ae..03a011619908 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -202,7 +202,7 @@ again: */ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, unsigned char *state, unsigned int count, - int auto_ack, int merge_pending) + int auto_ack) { unsigned char __state = 0; int i = 1; @@ -217,18 +217,9 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, if (__state & SLSB_OWNER_CU) goto out; - if (merge_pending && __state == SLSB_P_OUTPUT_PENDING) - __state = SLSB_P_OUTPUT_EMPTY; - for (; i < count; i++) { bufnr = next_buf(bufnr); - /* merge PENDING into EMPTY: */ - if (merge_pending && - q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING && - __state == SLSB_P_OUTPUT_EMPTY) - continue; - /* stop if next state differs from initial state: */ if (q->slsb.val[bufnr] != __state) break; @@ -242,7 +233,7 @@ out: static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state, int auto_ack) { - return get_buf_states(q, bufnr, state, 1, auto_ack, 0); + return get_buf_states(q, bufnr, state, 1, auto_ack); } /* wrap-around safe setting of slsb states, returns number of changed buffers */ @@ -420,8 +411,6 @@ static inline void account_sbals(struct qdio_q *q, unsigned int count) static void process_buffer_error(struct qdio_q *q, unsigned int start, int count) { - q->qdio_error = QDIO_ERROR_SLSB_STATE; - /* special handling for no target buffer empty */ if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q && q->sbal[start]->element[15].sflags == 0x10) { @@ -450,7 +439,8 @@ static inline void inbound_handle_work(struct qdio_q *q, unsigned int start, q->u.in.batch_count += count; } -static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start) +static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start, + unsigned int *error) { unsigned char state = 0; int count; @@ -465,7 +455,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start) * No siga sync here, as a PCI or we after a thin interrupt * already sync'ed the queues. */ - count = get_buf_states(q, start, &state, count, 1, 0); + count = get_buf_states(q, start, &state, count, 1); if (!count) return 0; @@ -484,6 +474,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start) DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr, count); + *error = QDIO_ERROR_SLSB_STATE; process_buffer_error(q, start, count); inbound_handle_work(q, start, count, false); if (atomic_sub_return(count, &q->nr_buf_used) == 0) @@ -508,11 +499,6 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start) } } -static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start) -{ - return get_inbound_buffer_frontier(q, start); -} - static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start) { unsigned char state = 0; @@ -546,96 +532,23 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q, WARN_ON_ONCE(phys_aob & 0xFF); } - q->sbal_state[bufnr].flags = 0; return phys_aob; } -static void qdio_kick_handler(struct qdio_q *q, unsigned int start, - unsigned int count) -{ - if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) - return; - - if (q->is_input_q) { - qperf_inc(q, inbound_handler); - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count); - } else { - qperf_inc(q, outbound_handler); - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", - start, count); - } - - q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, - q->irq_ptr->int_parm); - - /* for the next time */ - q->qdio_error = 0; -} - static inline int qdio_tasklet_schedule(struct qdio_q *q) { if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) { - tasklet_schedule(&q->tasklet); + tasklet_schedule(&q->u.out.tasklet); return 0; } return -EPERM; } -static void __qdio_inbound_processing(struct qdio_q *q) -{ - unsigned int start = q->first_to_check; - int count; - - qperf_inc(q, tasklet_inbound); - - count = qdio_inbound_q_moved(q, start); - if (count == 0) - return; - - qdio_kick_handler(q, start, count); - start = add_buf(start, count); - q->first_to_check = start; - - if (!qdio_inbound_q_done(q, start)) { - /* means poll time is not yet over */ - qperf_inc(q, tasklet_inbound_resched); - if (!qdio_tasklet_schedule(q)) - return; - } - - qdio_stop_polling(q); - /* - * We need to check again to not lose initiative after - * resetting the ACK state. - */ - if (!qdio_inbound_q_done(q, start)) { - qperf_inc(q, tasklet_inbound_resched2); - qdio_tasklet_schedule(q); - } -} - -void qdio_inbound_processing(unsigned long data) -{ - struct qdio_q *q = (struct qdio_q *)data; - __qdio_inbound_processing(q); -} - -static void qdio_check_pending(struct qdio_q *q, unsigned int index) -{ - unsigned char state; - - if (get_buf_state(q, index, &state, 0) > 0 && - state == SLSB_P_OUTPUT_PENDING && - q->u.out.aobs[index]) { - q->u.out.sbal_state[index].flags |= - QDIO_OUTBUF_STATE_FLAG_PENDING; - q->u.out.aobs[index] = NULL; - } -} - -static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start) +static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start, + unsigned int *error) { unsigned char state = 0; + unsigned int i; int count; q->timestamp = get_tod_clock_fast(); @@ -651,13 +564,19 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start) if (!count) return 0; - count = get_buf_states(q, start, &state, count, 0, q->u.out.use_cq); + count = get_buf_states(q, start, &state, count, 0); if (!count) return 0; switch (state) { - case SLSB_P_OUTPUT_EMPTY: case SLSB_P_OUTPUT_PENDING: + /* detach the utilized QAOBs: */ + for (i = 0; i < count; i++) + q->u.out.aobs[QDIO_BUFNR(start + i)] = NULL; + + *error = QDIO_ERROR_SLSB_PENDING; + fallthrough; + case SLSB_P_OUTPUT_EMPTY: /* the adapter got it */ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count); @@ -667,6 +586,10 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start) account_sbals(q, count); return count; case SLSB_P_OUTPUT_ERROR: + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out error:%1d %02x", + q->nr, count); + + *error = QDIO_ERROR_SLSB_STATE; process_buffer_error(q, start, count); atomic_sub(count, &q->nr_buf_used); if (q->irq_ptr->perf_stat_enabled) @@ -697,26 +620,6 @@ static inline int qdio_outbound_q_done(struct qdio_q *q) return atomic_read(&q->nr_buf_used) == 0; } -static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start) -{ - int count; - - count = get_outbound_buffer_frontier(q, start); - - if (count) { - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); - - if (q->u.out.use_cq) { - unsigned int i; - - for (i = 0; i < count; i++) - qdio_check_pending(q, QDIO_BUFNR(start + i)); - } - } - - return count; -} - static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count, unsigned long aob) { @@ -760,18 +663,29 @@ retry: return cc; } -static void __qdio_outbound_processing(struct qdio_q *q) +void qdio_outbound_tasklet(struct tasklet_struct *t) { + struct qdio_output_q *out_q = from_tasklet(out_q, t, tasklet); + struct qdio_q *q = container_of(out_q, struct qdio_q, u.out); unsigned int start = q->first_to_check; + unsigned int error = 0; int count; qperf_inc(q, tasklet_outbound); WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0); - count = qdio_outbound_q_moved(q, start); + count = get_outbound_buffer_frontier(q, start, &error); if (count) { q->first_to_check = add_buf(start, count); - qdio_kick_handler(q, start, count); + + if (q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE) { + qperf_inc(q, outbound_handler); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", + start, count); + + q->handler(q->irq_ptr->cdev, error, q->nr, start, + count, q->irq_ptr->int_parm); + } } if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) && @@ -798,13 +712,6 @@ sched: qdio_tasklet_schedule(q); } -/* outbound tasklet */ -void qdio_outbound_processing(unsigned long data) -{ - struct qdio_q *q = (struct qdio_q *)data; - __qdio_outbound_processing(q); -} - void qdio_outbound_timer(struct timer_list *t) { struct qdio_q *q = from_timer(q, t, u.out.timer); @@ -825,19 +732,6 @@ static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq) qdio_tasklet_schedule(out); } -void tiqdio_inbound_processing(unsigned long data) -{ - struct qdio_q *q = (struct qdio_q *)data; - - if (need_siga_sync(q) && need_siga_sync_after_ai(q)) - qdio_sync_queues(q); - - /* The interrupt could be caused by a PCI request: */ - qdio_check_outbound_pci_queues(q->irq_ptr); - - __qdio_inbound_processing(q); -} - static inline void qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state) { @@ -865,15 +759,8 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) return; - if (irq_ptr->irq_poll) { - if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state)) - irq_ptr->irq_poll(irq_ptr->cdev, irq_ptr->int_parm); - else - QDIO_PERF_STAT_INC(irq_ptr, int_discarded); - } else { - for_each_input_queue(irq_ptr, q, i) - tasklet_schedule(&q->tasklet); - } + qdio_deliver_irq(irq_ptr); + irq_ptr->last_data_irq_time = S390_lowcore.int_clock; if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold) return; @@ -1016,12 +903,9 @@ static void qdio_shutdown_queues(struct qdio_irq *irq_ptr) struct qdio_q *q; int i; - for_each_input_queue(irq_ptr, q, i) - tasklet_kill(&q->tasklet); - for_each_output_queue(irq_ptr, q, i) { del_timer_sync(&q->u.out.timer); - tasklet_kill(&q->tasklet); + tasklet_kill(&q->u.out.tasklet); } } @@ -1059,7 +943,6 @@ int qdio_shutdown(struct ccw_device *cdev, int how) */ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); - tiqdio_remove_device(irq_ptr); qdio_shutdown_queues(irq_ptr); qdio_shutdown_debug_entries(irq_ptr); @@ -1177,7 +1060,6 @@ int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs, if (rc) goto err_queues; - INIT_LIST_HEAD(&irq_ptr->entry); cdev->private->qdio_data = irq_ptr; qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); return 0; @@ -1263,6 +1145,9 @@ int qdio_establish(struct ccw_device *cdev, !init_data->output_sbal_addr_array) return -EINVAL; + if (!init_data->irq_poll) + return -EINVAL; + mutex_lock(&irq_ptr->setup_mutex); qdio_trace_init_data(irq_ptr, init_data); qdio_setup_irq(irq_ptr, init_data); @@ -1357,9 +1242,6 @@ int qdio_activate(struct ccw_device *cdev) goto out; } - if (is_thinint_irq(irq_ptr)) - tiqdio_add_device(irq_ptr); - /* wait for subchannel to become active */ msleep(5); @@ -1557,17 +1439,16 @@ static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr, unsigned int start = q->first_to_check; int count; - count = q->is_input_q ? qdio_inbound_q_moved(q, start) : - qdio_outbound_q_moved(q, start); + *error = 0; + count = q->is_input_q ? get_inbound_buffer_frontier(q, start, error) : + get_outbound_buffer_frontier(q, start, error); if (count == 0) return 0; *bufnr = start; - *error = q->qdio_error; /* for the next time */ q->first_to_check = add_buf(start, count); - q->qdio_error = 0; return count; } diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index a5b2e16b7aa8..c8b9620bc688 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -259,14 +259,6 @@ static void setup_queues(struct qdio_irq *irq_ptr, setup_storage_lists(q, irq_ptr, qdio_init->input_sbal_addr_array[i], i); - - if (is_thinint_irq(irq_ptr)) { - tasklet_init(&q->tasklet, tiqdio_inbound_processing, - (unsigned long) q); - } else { - tasklet_init(&q->tasklet, qdio_inbound_processing, - (unsigned long) q); - } } for_each_output_queue(irq_ptr, q, i) { @@ -280,8 +272,7 @@ static void setup_queues(struct qdio_irq *irq_ptr, setup_storage_lists(q, irq_ptr, qdio_init->output_sbal_addr_array[i], i); - tasklet_init(&q->tasklet, qdio_outbound_processing, - (unsigned long) q); + tasklet_setup(&q->u.out.tasklet, qdio_outbound_tasklet); timer_setup(&q->u.out.timer, qdio_outbound_timer, 0); } } @@ -483,12 +474,8 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data) ccw_device_get_schid(cdev, &irq_ptr->schid); setup_queues(irq_ptr, init_data); - if (init_data->irq_poll) { - irq_ptr->irq_poll = init_data->irq_poll; - set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state); - } else { - irq_ptr->irq_poll = NULL; - } + irq_ptr->irq_poll = init_data->irq_poll; + set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state); setup_qib(irq_ptr, init_data); set_impl_params(irq_ptr, init_data->qib_param_field_format, diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 7a440e4328cd..8e09bf3a2fcd 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -66,22 +66,6 @@ static void put_indicator(u32 *addr) atomic_dec(&ind->count); } -void tiqdio_add_device(struct qdio_irq *irq_ptr) -{ - mutex_lock(&tiq_list_lock); - list_add_rcu(&irq_ptr->entry, &tiq_list); - mutex_unlock(&tiq_list_lock); -} - -void tiqdio_remove_device(struct qdio_irq *irq_ptr) -{ - mutex_lock(&tiq_list_lock); - list_del_rcu(&irq_ptr->entry); - mutex_unlock(&tiq_list_lock); - synchronize_rcu(); - INIT_LIST_HEAD(&irq_ptr->entry); -} - static inline int references_shared_dsci(struct qdio_irq *irq_ptr) { return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; @@ -106,32 +90,6 @@ static inline u32 clear_shared_ind(void) return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); } -static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq) -{ - struct qdio_q *q; - int i; - - if (!references_shared_dsci(irq)) - xchg(irq->dsci, 0); - - if (irq->irq_poll) { - if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq->poll_state)) - irq->irq_poll(irq->cdev, irq->int_parm); - else - QDIO_PERF_STAT_INC(irq, int_discarded); - - return; - } - - for_each_input_queue(irq, q, i) { - /* - * Call inbound processing but not directly - * since that could starve other thinint queues. - */ - tasklet_schedule(&q->tasklet); - } -} - /** * tiqdio_thinint_handler - thin interrupt handler for qdio * @airq: pointer to adapter interrupt descriptor @@ -139,10 +97,11 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq) */ static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating) { + u64 irq_time = S390_lowcore.int_clock; u32 si_used = clear_shared_ind(); struct qdio_irq *irq; - last_ai_time = S390_lowcore.int_clock; + last_ai_time = irq_time; inc_irq_stat(IRQIO_QAI); /* protect tiq_list entries, only changed in activate or shutdown */ @@ -153,10 +112,15 @@ static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating) if (unlikely(references_shared_dsci(irq))) { if (!si_used) continue; - } else if (!*irq->dsci) - continue; + } else { + if (!*irq->dsci) + continue; - tiqdio_call_inq_handlers(irq); + xchg(irq->dsci, 0); + } + + qdio_deliver_irq(irq); + irq->last_data_irq_time = irq_time; QDIO_PERF_STAT_INC(irq, adapter_int); } @@ -208,10 +172,15 @@ int qdio_establish_thinint(struct qdio_irq *irq_ptr) DBF_HEX(&irq_ptr->dsci, sizeof(void *)); rc = set_subchannel_ind(irq_ptr, 0); - if (rc) + if (rc) { put_indicator(irq_ptr->dsci); + return rc; + } - return rc; + mutex_lock(&tiq_list_lock); + list_add_rcu(&irq_ptr->entry, &tiq_list); + mutex_unlock(&tiq_list_lock); + return 0; } void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) @@ -219,6 +188,11 @@ void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) if (!is_thinint_irq(irq_ptr)) return; + mutex_lock(&tiq_list_lock); + list_del_rcu(&irq_ptr->entry); + mutex_unlock(&tiq_list_lock); + synchronize_rcu(); + /* reset adapter interrupt indicators */ set_subchannel_ind(irq_ptr, 1); put_indicator(irq_ptr->dsci); diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 10206e4498d0..52eaf51c9bb6 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -1438,6 +1438,8 @@ static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg) if (rc == -EAGAIN) tr.again_counter++; } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); + if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) + rc = -EIO; if (rc) { ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc); return rc; @@ -1481,6 +1483,8 @@ static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg) if (rc == -EAGAIN) tr.again_counter++; } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); + if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) + rc = -EIO; if (rc) { ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc); return rc; @@ -1524,6 +1528,8 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) if (rc == -EAGAIN) tr.again_counter++; } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); + if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) + rc = -EIO; if (rc) ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n", rc, xcRB.status); @@ -1568,6 +1574,8 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg) if (rc == -EAGAIN) tr.again_counter++; } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); + if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) + rc = -EIO; if (rc) ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc); if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) @@ -1744,6 +1752,8 @@ static long trans_modexpo32(struct ap_perms *perms, struct file *filp, if (rc == -EAGAIN) tr.again_counter++; } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); + if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) + rc = -EIO; if (rc) return rc; return put_user(mex64.outputdatalength, @@ -1795,6 +1805,8 @@ static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp, if (rc == -EAGAIN) tr.again_counter++; } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); + if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) + rc = -EIO; if (rc) return rc; return put_user(crt64.outputdatalength, @@ -1865,6 +1877,8 @@ static long trans_xcRB32(struct ap_perms *perms, struct file *filp, if (rc == -EAGAIN) tr.again_counter++; } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); + if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) + rc = -EIO; xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; xcRB32.reply_data_length = xcRB64.reply_data_length; xcRB32.status = xcRB64.status; diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c index b1046811450f..d68c0ed5e0dd 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.c +++ b/drivers/s390/crypto/zcrypt_ccamisc.c @@ -662,7 +662,10 @@ int cca_sec2protkey(u16 cardnr, u16 domain, __func__, (int) prepcblk->ccp_rtcode, (int) prepcblk->ccp_rscode); - rc = -EIO; + if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290) + rc = -EAGAIN; + else + rc = -EIO; goto out; } if (prepcblk->ccp_rscode != 0) { @@ -1275,7 +1278,10 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, __func__, (int) prepcblk->ccp_rtcode, (int) prepcblk->ccp_rscode); - rc = -EIO; + if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290) + rc = -EAGAIN; + else + rc = -EIO; goto out; } if (prepcblk->ccp_rscode != 0) { @@ -1441,7 +1447,10 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, __func__, (int) prepcblk->ccp_rtcode, (int) prepcblk->ccp_rscode); - rc = -EIO; + if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290) + rc = -EAGAIN; + else + rc = -EIO; goto out; } if (prepcblk->ccp_rscode != 0) { diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 89b223885b0c..b71b8902d1c4 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -6074,14 +6074,15 @@ int qeth_poll(struct napi_struct *napi, int budget) EXPORT_SYMBOL_GPL(qeth_poll); static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, - unsigned int bidx, bool error, int budget) + unsigned int bidx, unsigned int qdio_error, + int budget) { struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx]; u8 sflags = buffer->buffer->element[15].sflags; struct qeth_card *card = queue->card; + bool error = !!qdio_error; - if (queue->bufstates && (queue->bufstates[bidx].flags & - QDIO_OUTBUF_STATE_FLAG_PENDING)) { + if (qdio_error == QDIO_ERROR_SLSB_PENDING) { WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED); QETH_CARD_TEXT_(card, 5, "pel%u", bidx); |