Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[cascardo/linux.git] / drivers / s390 / cio / qdio_main.c
1 /*
2  * linux/drivers/s390/cio/qdio_main.c
3  *
4  * Linux for s390 qdio support, buffer handling, qdio API and module support.
5  *
6  * Copyright 2000,2008 IBM Corp.
7  * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8  *            Jan Glauber <jang@linux.vnet.ibm.com>
9  * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
10  */
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/delay.h>
16 #include <linux/gfp.h>
17 #include <linux/io.h>
18 #include <linux/atomic.h>
19 #include <asm/debug.h>
20 #include <asm/qdio.h>
21 #include <asm/ipl.h>
22
23 #include "cio.h"
24 #include "css.h"
25 #include "device.h"
26 #include "qdio.h"
27 #include "qdio_debug.h"
28
29 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
30         "Jan Glauber <jang@linux.vnet.ibm.com>");
31 MODULE_DESCRIPTION("QDIO base support");
32 MODULE_LICENSE("GPL");
33
34 static inline int do_siga_sync(unsigned long schid,
35                                unsigned int out_mask, unsigned int in_mask,
36                                unsigned int fc)
37 {
38         register unsigned long __fc asm ("0") = fc;
39         register unsigned long __schid asm ("1") = schid;
40         register unsigned long out asm ("2") = out_mask;
41         register unsigned long in asm ("3") = in_mask;
42         int cc;
43
44         asm volatile(
45                 "       siga    0\n"
46                 "       ipm     %0\n"
47                 "       srl     %0,28\n"
48                 : "=d" (cc)
49                 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
50         return cc;
51 }
52
53 static inline int do_siga_input(unsigned long schid, unsigned int mask,
54                                 unsigned int fc)
55 {
56         register unsigned long __fc asm ("0") = fc;
57         register unsigned long __schid asm ("1") = schid;
58         register unsigned long __mask asm ("2") = mask;
59         int cc;
60
61         asm volatile(
62                 "       siga    0\n"
63                 "       ipm     %0\n"
64                 "       srl     %0,28\n"
65                 : "=d" (cc)
66                 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
67         return cc;
68 }
69
70 /**
71  * do_siga_output - perform SIGA-w/wt function
72  * @schid: subchannel id or in case of QEBSM the subchannel token
73  * @mask: which output queues to process
74  * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
75  * @fc: function code to perform
76  *
77  * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
78  * Note: For IQDC unicast queues only the highest priority queue is processed.
79  */
80 static inline int do_siga_output(unsigned long schid, unsigned long mask,
81                                  unsigned int *bb, unsigned int fc,
82                                  unsigned long aob)
83 {
84         register unsigned long __fc asm("0") = fc;
85         register unsigned long __schid asm("1") = schid;
86         register unsigned long __mask asm("2") = mask;
87         register unsigned long __aob asm("3") = aob;
88         int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
89
90         asm volatile(
91                 "       siga    0\n"
92                 "0:     ipm     %0\n"
93                 "       srl     %0,28\n"
94                 "1:\n"
95                 EX_TABLE(0b, 1b)
96                 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask),
97                   "+d" (__aob)
98                 : : "cc", "memory");
99         *bb = ((unsigned int) __fc) >> 31;
100         return cc;
101 }
102
103 static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
104 {
105         /* all done or next buffer state different */
106         if (ccq == 0 || ccq == 32)
107                 return 0;
108         /* no buffer processed */
109         if (ccq == 97)
110                 return 1;
111         /* not all buffers processed */
112         if (ccq == 96)
113                 return 2;
114         /* notify devices immediately */
115         DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
116         return -EIO;
117 }
118
119 /**
120  * qdio_do_eqbs - extract buffer states for QEBSM
121  * @q: queue to manipulate
122  * @state: state of the extracted buffers
123  * @start: buffer number to start at
124  * @count: count of buffers to examine
125  * @auto_ack: automatically acknowledge buffers
126  *
127  * Returns the number of successfully extracted equal buffer states.
128  * Stops processing if a state is different from the last buffers state.
129  */
130 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
131                         int start, int count, int auto_ack)
132 {
133         int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
134         unsigned int ccq = 0;
135
136         BUG_ON(!q->irq_ptr->sch_token);
137         qperf_inc(q, eqbs);
138
139         if (!q->is_input_q)
140                 nr += q->irq_ptr->nr_input_qs;
141 again:
142         ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
143                       auto_ack);
144         rc = qdio_check_ccq(q, ccq);
145         if (!rc)
146                 return count - tmp_count;
147
148         if (rc == 1) {
149                 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
150                 goto again;
151         }
152
153         if (rc == 2) {
154                 BUG_ON(tmp_count == count);
155                 qperf_inc(q, eqbs_partial);
156                 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
157                         tmp_count);
158                 /*
159                  * Retry once, if that fails bail out and process the
160                  * extracted buffers before trying again.
161                  */
162                 if (!retried++)
163                         goto again;
164                 else
165                         return count - tmp_count;
166         }
167
168         DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
169         DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
170         q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
171                    q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
172         return 0;
173 }
174
175 /**
176  * qdio_do_sqbs - set buffer states for QEBSM
177  * @q: queue to manipulate
178  * @state: new state of the buffers
179  * @start: first buffer number to change
180  * @count: how many buffers to change
181  *
182  * Returns the number of successfully changed buffers.
183  * Does retrying until the specified count of buffer states is set or an
184  * error occurs.
185  */
186 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
187                         int count)
188 {
189         unsigned int ccq = 0;
190         int tmp_count = count, tmp_start = start;
191         int nr = q->nr;
192         int rc;
193
194         if (!count)
195                 return 0;
196
197         BUG_ON(!q->irq_ptr->sch_token);
198         qperf_inc(q, sqbs);
199
200         if (!q->is_input_q)
201                 nr += q->irq_ptr->nr_input_qs;
202 again:
203         ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
204         rc = qdio_check_ccq(q, ccq);
205         if (!rc) {
206                 WARN_ON(tmp_count);
207                 return count - tmp_count;
208         }
209
210         if (rc == 1 || rc == 2) {
211                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
212                 qperf_inc(q, sqbs_partial);
213                 goto again;
214         }
215
216         DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
217         DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
218         q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
219                    q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
220         return 0;
221 }
222
223 /* returns number of examined buffers and their common state in *state */
224 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
225                                  unsigned char *state, unsigned int count,
226                                  int auto_ack, int merge_pending)
227 {
228         unsigned char __state = 0;
229         int i;
230
231         BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
232         BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
233
234         if (is_qebsm(q))
235                 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
236
237         for (i = 0; i < count; i++) {
238                 if (!__state) {
239                         __state = q->slsb.val[bufnr];
240                         if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
241                                 __state = SLSB_P_OUTPUT_EMPTY;
242                 } else if (merge_pending) {
243                         if ((q->slsb.val[bufnr] & __state) != __state)
244                                 break;
245                 } else if (q->slsb.val[bufnr] != __state)
246                         break;
247                 bufnr = next_buf(bufnr);
248         }
249         *state = __state;
250         return i;
251 }
252
253 static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
254                                 unsigned char *state, int auto_ack)
255 {
256         return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
257 }
258
259 /* wrap-around safe setting of slsb states, returns number of changed buffers */
260 static inline int set_buf_states(struct qdio_q *q, int bufnr,
261                                  unsigned char state, int count)
262 {
263         int i;
264
265         BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
266         BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
267
268         if (is_qebsm(q))
269                 return qdio_do_sqbs(q, state, bufnr, count);
270
271         for (i = 0; i < count; i++) {
272                 xchg(&q->slsb.val[bufnr], state);
273                 bufnr = next_buf(bufnr);
274         }
275         return count;
276 }
277
278 static inline int set_buf_state(struct qdio_q *q, int bufnr,
279                                 unsigned char state)
280 {
281         return set_buf_states(q, bufnr, state, 1);
282 }
283
284 /* set slsb states to initial state */
285 static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
286 {
287         struct qdio_q *q;
288         int i;
289
290         for_each_input_queue(irq_ptr, q, i)
291                 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
292                                QDIO_MAX_BUFFERS_PER_Q);
293         for_each_output_queue(irq_ptr, q, i)
294                 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
295                                QDIO_MAX_BUFFERS_PER_Q);
296 }
297
298 static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
299                           unsigned int input)
300 {
301         unsigned long schid = *((u32 *) &q->irq_ptr->schid);
302         unsigned int fc = QDIO_SIGA_SYNC;
303         int cc;
304
305         DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
306         qperf_inc(q, siga_sync);
307
308         if (is_qebsm(q)) {
309                 schid = q->irq_ptr->sch_token;
310                 fc |= QDIO_SIGA_QEBSM_FLAG;
311         }
312
313         cc = do_siga_sync(schid, output, input, fc);
314         if (unlikely(cc))
315                 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
316         return cc;
317 }
318
319 static inline int qdio_siga_sync_q(struct qdio_q *q)
320 {
321         if (q->is_input_q)
322                 return qdio_siga_sync(q, 0, q->mask);
323         else
324                 return qdio_siga_sync(q, q->mask, 0);
325 }
326
327 static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
328         unsigned long aob)
329 {
330         unsigned long schid = *((u32 *) &q->irq_ptr->schid);
331         unsigned int fc = QDIO_SIGA_WRITE;
332         u64 start_time = 0;
333         int retries = 0, cc;
334         unsigned long laob = 0;
335
336         if (q->u.out.use_cq && aob != 0) {
337                 fc = QDIO_SIGA_WRITEQ;
338                 laob = aob;
339         }
340
341         if (is_qebsm(q)) {
342                 schid = q->irq_ptr->sch_token;
343                 fc |= QDIO_SIGA_QEBSM_FLAG;
344         }
345 again:
346         WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) ||
347                 (aob && fc != QDIO_SIGA_WRITEQ));
348         cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
349
350         /* hipersocket busy condition */
351         if (unlikely(*busy_bit)) {
352                 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
353                 retries++;
354
355                 if (!start_time) {
356                         start_time = get_clock();
357                         goto again;
358                 }
359                 if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
360                         goto again;
361         }
362         if (retries) {
363                 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
364                               "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
365                 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
366         }
367         return cc;
368 }
369
370 static inline int qdio_siga_input(struct qdio_q *q)
371 {
372         unsigned long schid = *((u32 *) &q->irq_ptr->schid);
373         unsigned int fc = QDIO_SIGA_READ;
374         int cc;
375
376         DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
377         qperf_inc(q, siga_read);
378
379         if (is_qebsm(q)) {
380                 schid = q->irq_ptr->sch_token;
381                 fc |= QDIO_SIGA_QEBSM_FLAG;
382         }
383
384         cc = do_siga_input(schid, q->mask, fc);
385         if (unlikely(cc))
386                 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
387         return cc;
388 }
389
390 #define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
391 #define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
392
393 static inline void qdio_sync_queues(struct qdio_q *q)
394 {
395         /* PCI capable outbound queues will also be scanned so sync them too */
396         if (pci_out_supported(q))
397                 qdio_siga_sync_all(q);
398         else
399                 qdio_siga_sync_q(q);
400 }
401
402 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
403                         unsigned char *state)
404 {
405         if (need_siga_sync(q))
406                 qdio_siga_sync_q(q);
407         return get_buf_states(q, bufnr, state, 1, 0, 0);
408 }
409
410 static inline void qdio_stop_polling(struct qdio_q *q)
411 {
412         if (!q->u.in.polling)
413                 return;
414
415         q->u.in.polling = 0;
416         qperf_inc(q, stop_polling);
417
418         /* show the card that we are not polling anymore */
419         if (is_qebsm(q)) {
420                 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
421                                q->u.in.ack_count);
422                 q->u.in.ack_count = 0;
423         } else
424                 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
425 }
426
427 static inline void account_sbals(struct qdio_q *q, int count)
428 {
429         int pos = 0;
430
431         q->q_stats.nr_sbal_total += count;
432         if (count == QDIO_MAX_BUFFERS_MASK) {
433                 q->q_stats.nr_sbals[7]++;
434                 return;
435         }
436         while (count >>= 1)
437                 pos++;
438         q->q_stats.nr_sbals[pos]++;
439 }
440
441 static void process_buffer_error(struct qdio_q *q, int count)
442 {
443         unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
444                                         SLSB_P_OUTPUT_NOT_INIT;
445
446         q->qdio_error |= QDIO_ERROR_SLSB_STATE;
447
448         /* special handling for no target buffer empty */
449         if ((!q->is_input_q &&
450             (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
451                 qperf_inc(q, target_full);
452                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
453                               q->first_to_check);
454                 goto set;
455         }
456
457         DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
458         DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
459         DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
460         DBF_ERROR("F14:%2x F15:%2x",
461                   q->sbal[q->first_to_check]->element[14].sflags,
462                   q->sbal[q->first_to_check]->element[15].sflags);
463
464 set:
465         /*
466          * Interrupts may be avoided as long as the error is present
467          * so change the buffer state immediately to avoid starvation.
468          */
469         set_buf_states(q, q->first_to_check, state, count);
470 }
471
472 static inline void inbound_primed(struct qdio_q *q, int count)
473 {
474         int new;
475
476         DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
477
478         /* for QEBSM the ACK was already set by EQBS */
479         if (is_qebsm(q)) {
480                 if (!q->u.in.polling) {
481                         q->u.in.polling = 1;
482                         q->u.in.ack_count = count;
483                         q->u.in.ack_start = q->first_to_check;
484                         return;
485                 }
486
487                 /* delete the previous ACK's */
488                 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
489                                q->u.in.ack_count);
490                 q->u.in.ack_count = count;
491                 q->u.in.ack_start = q->first_to_check;
492                 return;
493         }
494
495         /*
496          * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
497          * or by the next inbound run.
498          */
499         new = add_buf(q->first_to_check, count - 1);
500         if (q->u.in.polling) {
501                 /* reset the previous ACK but first set the new one */
502                 set_buf_state(q, new, SLSB_P_INPUT_ACK);
503                 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
504         } else {
505                 q->u.in.polling = 1;
506                 set_buf_state(q, new, SLSB_P_INPUT_ACK);
507         }
508
509         q->u.in.ack_start = new;
510         count--;
511         if (!count)
512                 return;
513         /* need to change ALL buffers to get more interrupts */
514         set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
515 }
516
517 static int get_inbound_buffer_frontier(struct qdio_q *q)
518 {
519         int count, stop;
520         unsigned char state = 0;
521
522         q->timestamp = get_clock_fast();
523
524         /*
525          * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
526          * would return 0.
527          */
528         count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
529         stop = add_buf(q->first_to_check, count);
530
531         if (q->first_to_check == stop)
532                 goto out;
533
534         /*
535          * No siga sync here, as a PCI or we after a thin interrupt
536          * already sync'ed the queues.
537          */
538         count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
539         if (!count)
540                 goto out;
541
542         switch (state) {
543         case SLSB_P_INPUT_PRIMED:
544                 inbound_primed(q, count);
545                 q->first_to_check = add_buf(q->first_to_check, count);
546                 if (atomic_sub(count, &q->nr_buf_used) == 0)
547                         qperf_inc(q, inbound_queue_full);
548                 if (q->irq_ptr->perf_stat_enabled)
549                         account_sbals(q, count);
550                 break;
551         case SLSB_P_INPUT_ERROR:
552                 process_buffer_error(q, count);
553                 q->first_to_check = add_buf(q->first_to_check, count);
554                 atomic_sub(count, &q->nr_buf_used);
555                 if (q->irq_ptr->perf_stat_enabled)
556                         account_sbals_error(q, count);
557                 break;
558         case SLSB_CU_INPUT_EMPTY:
559         case SLSB_P_INPUT_NOT_INIT:
560         case SLSB_P_INPUT_ACK:
561                 if (q->irq_ptr->perf_stat_enabled)
562                         q->q_stats.nr_sbal_nop++;
563                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
564                 break;
565         default:
566                 BUG();
567         }
568 out:
569         return q->first_to_check;
570 }
571
572 static int qdio_inbound_q_moved(struct qdio_q *q)
573 {
574         int bufnr;
575
576         bufnr = get_inbound_buffer_frontier(q);
577
578         if ((bufnr != q->last_move) || q->qdio_error) {
579                 q->last_move = bufnr;
580                 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
581                         q->u.in.timestamp = get_clock();
582                 return 1;
583         } else
584                 return 0;
585 }
586
587 static inline int qdio_inbound_q_done(struct qdio_q *q)
588 {
589         unsigned char state = 0;
590
591         if (!atomic_read(&q->nr_buf_used))
592                 return 1;
593
594         if (need_siga_sync(q))
595                 qdio_siga_sync_q(q);
596         get_buf_state(q, q->first_to_check, &state, 0);
597
598         if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
599                 /* more work coming */
600                 return 0;
601
602         if (is_thinint_irq(q->irq_ptr))
603                 return 1;
604
605         /* don't poll under z/VM */
606         if (MACHINE_IS_VM)
607                 return 1;
608
609         /*
610          * At this point we know, that inbound first_to_check
611          * has (probably) not moved (see qdio_inbound_processing).
612          */
613         if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
614                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
615                               q->first_to_check);
616                 return 1;
617         } else
618                 return 0;
619 }
620
621 static inline int contains_aobs(struct qdio_q *q)
622 {
623         return !q->is_input_q && q->u.out.use_cq;
624 }
625
626 static inline void qdio_trace_aob(struct qdio_irq *irq, struct qdio_q *q,
627                                 int i, struct qaob *aob)
628 {
629         int tmp;
630
631         DBF_DEV_EVENT(DBF_INFO, irq, "AOB%d:%lx", i,
632                         (unsigned long) virt_to_phys(aob));
633         DBF_DEV_EVENT(DBF_INFO, irq, "RES00:%lx",
634                         (unsigned long) aob->res0[0]);
635         DBF_DEV_EVENT(DBF_INFO, irq, "RES01:%lx",
636                         (unsigned long) aob->res0[1]);
637         DBF_DEV_EVENT(DBF_INFO, irq, "RES02:%lx",
638                         (unsigned long) aob->res0[2]);
639         DBF_DEV_EVENT(DBF_INFO, irq, "RES03:%lx",
640                         (unsigned long) aob->res0[3]);
641         DBF_DEV_EVENT(DBF_INFO, irq, "RES04:%lx",
642                         (unsigned long) aob->res0[4]);
643         DBF_DEV_EVENT(DBF_INFO, irq, "RES05:%lx",
644                         (unsigned long) aob->res0[5]);
645         DBF_DEV_EVENT(DBF_INFO, irq, "RES1:%x", aob->res1);
646         DBF_DEV_EVENT(DBF_INFO, irq, "RES2:%x", aob->res2);
647         DBF_DEV_EVENT(DBF_INFO, irq, "RES3:%x", aob->res3);
648         DBF_DEV_EVENT(DBF_INFO, irq, "AORC:%u", aob->aorc);
649         DBF_DEV_EVENT(DBF_INFO, irq, "FLAGS:%u", aob->flags);
650         DBF_DEV_EVENT(DBF_INFO, irq, "CBTBS:%u", aob->cbtbs);
651         DBF_DEV_EVENT(DBF_INFO, irq, "SBC:%u", aob->sb_count);
652         for (tmp = 0; tmp < QDIO_MAX_ELEMENTS_PER_BUFFER; ++tmp) {
653                 DBF_DEV_EVENT(DBF_INFO, irq, "SBA%d:%lx", tmp,
654                                 (unsigned long) aob->sba[tmp]);
655                 DBF_DEV_EVENT(DBF_INFO, irq, "rSBA%d:%lx", tmp,
656                                 (unsigned long) q->sbal[i]->element[tmp].addr);
657                 DBF_DEV_EVENT(DBF_INFO, irq, "DC%d:%u", tmp, aob->dcount[tmp]);
658                 DBF_DEV_EVENT(DBF_INFO, irq, "rDC%d:%u", tmp,
659                                 q->sbal[i]->element[tmp].length);
660         }
661         DBF_DEV_EVENT(DBF_INFO, irq, "USER0:%lx", (unsigned long) aob->user0);
662         for (tmp = 0; tmp < 2; ++tmp) {
663                 DBF_DEV_EVENT(DBF_INFO, irq, "RES4%d:%lx", tmp,
664                         (unsigned long) aob->res4[tmp]);
665         }
666         DBF_DEV_EVENT(DBF_INFO, irq, "USER1:%lx", (unsigned long) aob->user1);
667         DBF_DEV_EVENT(DBF_INFO, irq, "USER2:%lx", (unsigned long) aob->user2);
668 }
669
670 static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
671 {
672         unsigned char state = 0;
673         int j, b = start;
674
675         if (!contains_aobs(q))
676                 return;
677
678         for (j = 0; j < count; ++j) {
679                 get_buf_state(q, b, &state, 0);
680                 if (state == SLSB_P_OUTPUT_PENDING) {
681                         struct qaob *aob = q->u.out.aobs[b];
682                         if (aob == NULL)
683                                 continue;
684
685                         BUG_ON(q->u.out.sbal_state == NULL);
686                         q->u.out.sbal_state[b].flags |=
687                                 QDIO_OUTBUF_STATE_FLAG_PENDING;
688                         q->u.out.aobs[b] = NULL;
689                 } else if (state == SLSB_P_OUTPUT_EMPTY) {
690                         BUG_ON(q->u.out.sbal_state == NULL);
691                         q->u.out.sbal_state[b].aob = NULL;
692                 }
693                 b = next_buf(b);
694         }
695 }
696
697 static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
698                                         int bufnr)
699 {
700         unsigned long phys_aob = 0;
701
702         if (!q->use_cq)
703                 goto out;
704
705         if (!q->aobs[bufnr]) {
706                 struct qaob *aob = qdio_allocate_aob();
707                 q->aobs[bufnr] = aob;
708         }
709         if (q->aobs[bufnr]) {
710                 BUG_ON(q->sbal_state == NULL);
711                 q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
712                 q->sbal_state[bufnr].aob = q->aobs[bufnr];
713                 q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
714                 phys_aob = virt_to_phys(q->aobs[bufnr]);
715                 BUG_ON(phys_aob & 0xFF);
716         }
717
718 out:
719         return phys_aob;
720 }
721
722 static void qdio_kick_handler(struct qdio_q *q)
723 {
724         int start = q->first_to_kick;
725         int end = q->first_to_check;
726         int count;
727
728         if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
729                 return;
730
731         count = sub_buf(end, start);
732
733         if (q->is_input_q) {
734                 qperf_inc(q, inbound_handler);
735                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
736         } else {
737                 qperf_inc(q, outbound_handler);
738                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
739                               start, count);
740         }
741
742         qdio_handle_aobs(q, start, count);
743
744         q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
745                    q->irq_ptr->int_parm);
746
747         /* for the next time */
748         q->first_to_kick = end;
749         q->qdio_error = 0;
750 }
751
752 static void __qdio_inbound_processing(struct qdio_q *q)
753 {
754         qperf_inc(q, tasklet_inbound);
755
756         if (!qdio_inbound_q_moved(q))
757                 return;
758
759         qdio_kick_handler(q);
760
761         if (!qdio_inbound_q_done(q)) {
762                 /* means poll time is not yet over */
763                 qperf_inc(q, tasklet_inbound_resched);
764                 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
765                         tasklet_schedule(&q->tasklet);
766                         return;
767                 }
768         }
769
770         qdio_stop_polling(q);
771         /*
772          * We need to check again to not lose initiative after
773          * resetting the ACK state.
774          */
775         if (!qdio_inbound_q_done(q)) {
776                 qperf_inc(q, tasklet_inbound_resched2);
777                 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
778                         tasklet_schedule(&q->tasklet);
779         }
780 }
781
782 void qdio_inbound_processing(unsigned long data)
783 {
784         struct qdio_q *q = (struct qdio_q *)data;
785         __qdio_inbound_processing(q);
786 }
787
788 static int get_outbound_buffer_frontier(struct qdio_q *q)
789 {
790         int count, stop;
791         unsigned char state = 0;
792
793         q->timestamp = get_clock_fast();
794
795         if (need_siga_sync(q))
796                 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
797                     !pci_out_supported(q)) ||
798                     (queue_type(q) == QDIO_IQDIO_QFMT &&
799                     multicast_outbound(q)))
800                         qdio_siga_sync_q(q);
801
802         /*
803          * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
804          * would return 0.
805          */
806         count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
807         stop = add_buf(q->first_to_check, count);
808         if (q->first_to_check == stop)
809                 goto out;
810
811         count = get_buf_states(q, q->first_to_check, &state, count, 0, 1);
812         if (!count)
813                 goto out;
814
815         switch (state) {
816         case SLSB_P_OUTPUT_PENDING:
817                 BUG();
818         case SLSB_P_OUTPUT_EMPTY:
819                 /* the adapter got it */
820                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
821                         "out empty:%1d %02x", q->nr, count);
822
823                 atomic_sub(count, &q->nr_buf_used);
824                 q->first_to_check = add_buf(q->first_to_check, count);
825                 if (q->irq_ptr->perf_stat_enabled)
826                         account_sbals(q, count);
827
828                 break;
829         case SLSB_P_OUTPUT_ERROR:
830                 process_buffer_error(q, count);
831                 q->first_to_check = add_buf(q->first_to_check, count);
832                 atomic_sub(count, &q->nr_buf_used);
833                 if (q->irq_ptr->perf_stat_enabled)
834                         account_sbals_error(q, count);
835                 break;
836         case SLSB_CU_OUTPUT_PRIMED:
837                 /* the adapter has not fetched the output yet */
838                 if (q->irq_ptr->perf_stat_enabled)
839                         q->q_stats.nr_sbal_nop++;
840                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
841                               q->nr);
842                 break;
843         case SLSB_P_OUTPUT_NOT_INIT:
844         case SLSB_P_OUTPUT_HALTED:
845                 break;
846         default:
847                 BUG();
848         }
849
850 out:
851         return q->first_to_check;
852 }
853
854 /* all buffers processed? */
855 static inline int qdio_outbound_q_done(struct qdio_q *q)
856 {
857         return atomic_read(&q->nr_buf_used) == 0;
858 }
859
860 static inline int qdio_outbound_q_moved(struct qdio_q *q)
861 {
862         int bufnr;
863
864         bufnr = get_outbound_buffer_frontier(q);
865
866         if ((bufnr != q->last_move) || q->qdio_error) {
867                 q->last_move = bufnr;
868                 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
869                 return 1;
870         } else
871                 return 0;
872 }
873
874 static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
875 {
876         int retries = 0, cc;
877         unsigned int busy_bit;
878
879         if (!need_siga_out(q))
880                 return 0;
881
882         DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
883 retry:
884         qperf_inc(q, siga_write);
885
886         cc = qdio_siga_output(q, &busy_bit, aob);
887         switch (cc) {
888         case 0:
889                 break;
890         case 2:
891                 if (busy_bit) {
892                         while (++retries < QDIO_BUSY_BIT_RETRIES) {
893                                 mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
894                                 goto retry;
895                         }
896                         DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
897                         cc |= QDIO_ERROR_SIGA_BUSY;
898                 } else
899                         DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
900                 break;
901         case 1:
902         case 3:
903                 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
904                 break;
905         }
906         if (retries) {
907                 DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
908                 DBF_ERROR("count:%u", retries);
909         }
910         return cc;
911 }
912
913 static void __qdio_outbound_processing(struct qdio_q *q)
914 {
915         qperf_inc(q, tasklet_outbound);
916         BUG_ON(atomic_read(&q->nr_buf_used) < 0);
917
918         if (qdio_outbound_q_moved(q))
919                 qdio_kick_handler(q);
920
921         if (queue_type(q) == QDIO_ZFCP_QFMT)
922                 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
923                         goto sched;
924
925         if (q->u.out.pci_out_enabled)
926                 return;
927
928         /*
929          * Now we know that queue type is either qeth without pci enabled
930          * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
931          * is noticed and outbound_handler is called after some time.
932          */
933         if (qdio_outbound_q_done(q))
934                 del_timer(&q->u.out.timer);
935         else
936                 if (!timer_pending(&q->u.out.timer))
937                         mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
938         return;
939
940 sched:
941         if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
942                 return;
943         tasklet_schedule(&q->tasklet);
944 }
945
946 /* outbound tasklet */
947 void qdio_outbound_processing(unsigned long data)
948 {
949         struct qdio_q *q = (struct qdio_q *)data;
950         __qdio_outbound_processing(q);
951 }
952
953 void qdio_outbound_timer(unsigned long data)
954 {
955         struct qdio_q *q = (struct qdio_q *)data;
956
957         if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
958                 return;
959         tasklet_schedule(&q->tasklet);
960 }
961
962 static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
963 {
964         struct qdio_q *out;
965         int i;
966
967         if (!pci_out_supported(q))
968                 return;
969
970         for_each_output_queue(q->irq_ptr, out, i)
971                 if (!qdio_outbound_q_done(out))
972                         tasklet_schedule(&out->tasklet);
973 }
974
975 static void __tiqdio_inbound_processing(struct qdio_q *q)
976 {
977         qperf_inc(q, tasklet_inbound);
978         if (need_siga_sync(q) && need_siga_sync_after_ai(q))
979                 qdio_sync_queues(q);
980
981         /*
982          * The interrupt could be caused by a PCI request. Check the
983          * PCI capable outbound queues.
984          */
985         qdio_check_outbound_after_thinint(q);
986
987         if (!qdio_inbound_q_moved(q))
988                 return;
989
990         qdio_kick_handler(q);
991
992         if (!qdio_inbound_q_done(q)) {
993                 qperf_inc(q, tasklet_inbound_resched);
994                 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
995                         tasklet_schedule(&q->tasklet);
996                         return;
997                 }
998         }
999
1000         qdio_stop_polling(q);
1001         /*
1002          * We need to check again to not lose initiative after
1003          * resetting the ACK state.
1004          */
1005         if (!qdio_inbound_q_done(q)) {
1006                 qperf_inc(q, tasklet_inbound_resched2);
1007                 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
1008                         tasklet_schedule(&q->tasklet);
1009         }
1010 }
1011
1012 void tiqdio_inbound_processing(unsigned long data)
1013 {
1014         struct qdio_q *q = (struct qdio_q *)data;
1015         __tiqdio_inbound_processing(q);
1016 }
1017
1018 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
1019                                   enum qdio_irq_states state)
1020 {
1021         DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
1022
1023         irq_ptr->state = state;
1024         mb();
1025 }
1026
1027 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
1028 {
1029         if (irb->esw.esw0.erw.cons) {
1030                 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
1031                 DBF_ERROR_HEX(irb, 64);
1032                 DBF_ERROR_HEX(irb->ecw, 64);
1033         }
1034 }
1035
1036 /* PCI interrupt handler */
1037 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
1038 {
1039         int i;
1040         struct qdio_q *q;
1041
1042         if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
1043                 return;
1044
1045         for_each_input_queue(irq_ptr, q, i) {
1046                 if (q->u.in.queue_start_poll) {
1047                         /* skip if polling is enabled or already in work */
1048                         if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1049                                      &q->u.in.queue_irq_state)) {
1050                                 qperf_inc(q, int_discarded);
1051                                 continue;
1052                         }
1053                         q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
1054                                                  q->irq_ptr->int_parm);
1055                 } else {
1056                         tasklet_schedule(&q->tasklet);
1057                 }
1058         }
1059
1060         if (!pci_out_supported(q))
1061                 return;
1062
1063         for_each_output_queue(irq_ptr, q, i) {
1064                 if (qdio_outbound_q_done(q))
1065                         continue;
1066                 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
1067                         qdio_siga_sync_q(q);
1068                 tasklet_schedule(&q->tasklet);
1069         }
1070 }
1071
1072 static void qdio_handle_activate_check(struct ccw_device *cdev,
1073                                 unsigned long intparm, int cstat, int dstat)
1074 {
1075         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1076         struct qdio_q *q;
1077         int count;
1078
1079         DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
1080         DBF_ERROR("intp :%lx", intparm);
1081         DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1082
1083         if (irq_ptr->nr_input_qs) {
1084                 q = irq_ptr->input_qs[0];
1085         } else if (irq_ptr->nr_output_qs) {
1086                 q = irq_ptr->output_qs[0];
1087         } else {
1088                 dump_stack();
1089                 goto no_handler;
1090         }
1091
1092         count = sub_buf(q->first_to_check, q->first_to_kick);
1093         q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
1094                    q->nr, q->first_to_kick, count, irq_ptr->int_parm);
1095 no_handler:
1096         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1097         /*
1098          * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
1099          * Therefore we call the LGR detection function here.
1100          */
1101         lgr_info_log();
1102 }
1103
1104 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
1105                                       int dstat)
1106 {
1107         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1108
1109         DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
1110
1111         if (cstat)
1112                 goto error;
1113         if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
1114                 goto error;
1115         if (!(dstat & DEV_STAT_DEV_END))
1116                 goto error;
1117         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
1118         return;
1119
1120 error:
1121         DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
1122         DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1123         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1124 }
1125
1126 /* qdio interrupt handler */
1127 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1128                       struct irb *irb)
1129 {
1130         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1131         int cstat, dstat;
1132
1133         if (!intparm || !irq_ptr) {
1134                 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
1135                 return;
1136         }
1137
1138         if (irq_ptr->perf_stat_enabled)
1139                 irq_ptr->perf_stat.qdio_int++;
1140
1141         if (IS_ERR(irb)) {
1142                 switch (PTR_ERR(irb)) {
1143                 case -EIO:
1144                         DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
1145                         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1146                         wake_up(&cdev->private->wait_q);
1147                         return;
1148                 default:
1149                         WARN_ON(1);
1150                         return;
1151                 }
1152         }
1153         qdio_irq_check_sense(irq_ptr, irb);
1154         cstat = irb->scsw.cmd.cstat;
1155         dstat = irb->scsw.cmd.dstat;
1156
1157         switch (irq_ptr->state) {
1158         case QDIO_IRQ_STATE_INACTIVE:
1159                 qdio_establish_handle_irq(cdev, cstat, dstat);
1160                 break;
1161         case QDIO_IRQ_STATE_CLEANUP:
1162                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1163                 break;
1164         case QDIO_IRQ_STATE_ESTABLISHED:
1165         case QDIO_IRQ_STATE_ACTIVE:
1166                 if (cstat & SCHN_STAT_PCI) {
1167                         qdio_int_handler_pci(irq_ptr);
1168                         return;
1169                 }
1170                 if (cstat || dstat)
1171                         qdio_handle_activate_check(cdev, intparm, cstat,
1172                                                    dstat);
1173                 break;
1174         case QDIO_IRQ_STATE_STOPPED:
1175                 break;
1176         default:
1177                 WARN_ON(1);
1178         }
1179         wake_up(&cdev->private->wait_q);
1180 }
1181
1182 /**
1183  * qdio_get_ssqd_desc - get qdio subchannel description
1184  * @cdev: ccw device to get description for
1185  * @data: where to store the ssqd
1186  *
1187  * Returns 0 or an error code. The results of the chsc are stored in the
1188  * specified structure.
1189  */
1190 int qdio_get_ssqd_desc(struct ccw_device *cdev,
1191                        struct qdio_ssqd_desc *data)
1192 {
1193
1194         if (!cdev || !cdev->private)
1195                 return -EINVAL;
1196
1197         DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
1198         return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
1199 }
1200 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1201
1202 static void qdio_shutdown_queues(struct ccw_device *cdev)
1203 {
1204         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1205         struct qdio_q *q;
1206         int i;
1207
1208         for_each_input_queue(irq_ptr, q, i)
1209                 tasklet_kill(&q->tasklet);
1210
1211         for_each_output_queue(irq_ptr, q, i) {
1212                 del_timer(&q->u.out.timer);
1213                 tasklet_kill(&q->tasklet);
1214         }
1215 }
1216
1217 /**
1218  * qdio_shutdown - shut down a qdio subchannel
1219  * @cdev: associated ccw device
1220  * @how: use halt or clear to shutdown
1221  */
1222 int qdio_shutdown(struct ccw_device *cdev, int how)
1223 {
1224         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1225         int rc;
1226         unsigned long flags;
1227
1228         if (!irq_ptr)
1229                 return -ENODEV;
1230
1231         BUG_ON(irqs_disabled());
1232         DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1233
1234         mutex_lock(&irq_ptr->setup_mutex);
1235         /*
1236          * Subchannel was already shot down. We cannot prevent being called
1237          * twice since cio may trigger a shutdown asynchronously.
1238          */
1239         if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1240                 mutex_unlock(&irq_ptr->setup_mutex);
1241                 return 0;
1242         }
1243
1244         /*
1245          * Indicate that the device is going down. Scheduling the queue
1246          * tasklets is forbidden from here on.
1247          */
1248         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1249
1250         tiqdio_remove_input_queues(irq_ptr);
1251         qdio_shutdown_queues(cdev);
1252         qdio_shutdown_debug_entries(irq_ptr, cdev);
1253
1254         /* cleanup subchannel */
1255         spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1256
1257         if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1258                 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1259         else
1260                 /* default behaviour is halt */
1261                 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1262         if (rc) {
1263                 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1264                 DBF_ERROR("rc:%4d", rc);
1265                 goto no_cleanup;
1266         }
1267
1268         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1269         spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1270         wait_event_interruptible_timeout(cdev->private->wait_q,
1271                 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1272                 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1273                 10 * HZ);
1274         spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1275
1276 no_cleanup:
1277         qdio_shutdown_thinint(irq_ptr);
1278
1279         /* restore interrupt handler */
1280         if ((void *)cdev->handler == (void *)qdio_int_handler)
1281                 cdev->handler = irq_ptr->orig_handler;
1282         spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1283
1284         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1285         mutex_unlock(&irq_ptr->setup_mutex);
1286         if (rc)
1287                 return rc;
1288         return 0;
1289 }
1290 EXPORT_SYMBOL_GPL(qdio_shutdown);
1291
1292 /**
1293  * qdio_free - free data structures for a qdio subchannel
1294  * @cdev: associated ccw device
1295  */
1296 int qdio_free(struct ccw_device *cdev)
1297 {
1298         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1299
1300         if (!irq_ptr)
1301                 return -ENODEV;
1302
1303         DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
1304         mutex_lock(&irq_ptr->setup_mutex);
1305
1306         if (irq_ptr->debug_area != NULL) {
1307                 debug_unregister(irq_ptr->debug_area);
1308                 irq_ptr->debug_area = NULL;
1309         }
1310         cdev->private->qdio_data = NULL;
1311         mutex_unlock(&irq_ptr->setup_mutex);
1312
1313         qdio_release_memory(irq_ptr);
1314         return 0;
1315 }
1316 EXPORT_SYMBOL_GPL(qdio_free);
1317
1318 /**
1319  * qdio_allocate - allocate qdio queues and associated data
1320  * @init_data: initialization data
1321  */
1322 int qdio_allocate(struct qdio_initialize *init_data)
1323 {
1324         struct qdio_irq *irq_ptr;
1325
1326         DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
1327
1328         if ((init_data->no_input_qs && !init_data->input_handler) ||
1329             (init_data->no_output_qs && !init_data->output_handler))
1330                 return -EINVAL;
1331
1332         if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1333             (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1334                 return -EINVAL;
1335
1336         if ((!init_data->input_sbal_addr_array) ||
1337             (!init_data->output_sbal_addr_array))
1338                 return -EINVAL;
1339
1340         /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1341         irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1342         if (!irq_ptr)
1343                 goto out_err;
1344
1345         mutex_init(&irq_ptr->setup_mutex);
1346         qdio_allocate_dbf(init_data, irq_ptr);
1347
1348         /*
1349          * Allocate a page for the chsc calls in qdio_establish.
1350          * Must be pre-allocated since a zfcp recovery will call
1351          * qdio_establish. In case of low memory and swap on a zfcp disk
1352          * we may not be able to allocate memory otherwise.
1353          */
1354         irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1355         if (!irq_ptr->chsc_page)
1356                 goto out_rel;
1357
1358         /* qdr is used in ccw1.cda which is u32 */
1359         irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1360         if (!irq_ptr->qdr)
1361                 goto out_rel;
1362         WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1363
1364         if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1365                              init_data->no_output_qs))
1366                 goto out_rel;
1367
1368         init_data->cdev->private->qdio_data = irq_ptr;
1369         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1370         return 0;
1371 out_rel:
1372         qdio_release_memory(irq_ptr);
1373 out_err:
1374         return -ENOMEM;
1375 }
1376 EXPORT_SYMBOL_GPL(qdio_allocate);
1377
1378 static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
1379 {
1380         struct qdio_q *q = irq_ptr->input_qs[0];
1381         int i, use_cq = 0;
1382
1383         if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
1384                 use_cq = 1;
1385
1386         for_each_output_queue(irq_ptr, q, i) {
1387                 if (use_cq) {
1388                         if (qdio_enable_async_operation(&q->u.out) < 0) {
1389                                 use_cq = 0;
1390                                 continue;
1391                         }
1392                 } else
1393                         qdio_disable_async_operation(&q->u.out);
1394         }
1395         DBF_EVENT("use_cq:%d", use_cq);
1396 }
1397
1398 /**
1399  * qdio_establish - establish queues on a qdio subchannel
1400  * @init_data: initialization data
1401  */
1402 int qdio_establish(struct qdio_initialize *init_data)
1403 {
1404         struct qdio_irq *irq_ptr;
1405         struct ccw_device *cdev = init_data->cdev;
1406         unsigned long saveflags;
1407         int rc;
1408
1409         DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
1410
1411         irq_ptr = cdev->private->qdio_data;
1412         if (!irq_ptr)
1413                 return -ENODEV;
1414
1415         if (cdev->private->state != DEV_STATE_ONLINE)
1416                 return -EINVAL;
1417
1418         mutex_lock(&irq_ptr->setup_mutex);
1419         qdio_setup_irq(init_data);
1420
1421         rc = qdio_establish_thinint(irq_ptr);
1422         if (rc) {
1423                 mutex_unlock(&irq_ptr->setup_mutex);
1424                 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1425                 return rc;
1426         }
1427
1428         /* establish q */
1429         irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1430         irq_ptr->ccw.flags = CCW_FLAG_SLI;
1431         irq_ptr->ccw.count = irq_ptr->equeue.count;
1432         irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1433
1434         spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1435         ccw_device_set_options_mask(cdev, 0);
1436
1437         rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1438         if (rc) {
1439                 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1440                 DBF_ERROR("rc:%4x", rc);
1441         }
1442         spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1443
1444         if (rc) {
1445                 mutex_unlock(&irq_ptr->setup_mutex);
1446                 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1447                 return rc;
1448         }
1449
1450         wait_event_interruptible_timeout(cdev->private->wait_q,
1451                 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1452                 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1453
1454         if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1455                 mutex_unlock(&irq_ptr->setup_mutex);
1456                 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1457                 return -EIO;
1458         }
1459
1460         qdio_setup_ssqd_info(irq_ptr);
1461         DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
1462
1463         qdio_detect_hsicq(irq_ptr);
1464
1465         /* qebsm is now setup if available, initialize buffer states */
1466         qdio_init_buf_states(irq_ptr);
1467
1468         mutex_unlock(&irq_ptr->setup_mutex);
1469         qdio_print_subchannel_info(irq_ptr, cdev);
1470         qdio_setup_debug_entries(irq_ptr, cdev);
1471         return 0;
1472 }
1473 EXPORT_SYMBOL_GPL(qdio_establish);
1474
1475 /**
1476  * qdio_activate - activate queues on a qdio subchannel
1477  * @cdev: associated cdev
1478  */
1479 int qdio_activate(struct ccw_device *cdev)
1480 {
1481         struct qdio_irq *irq_ptr;
1482         int rc;
1483         unsigned long saveflags;
1484
1485         DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
1486
1487         irq_ptr = cdev->private->qdio_data;
1488         if (!irq_ptr)
1489                 return -ENODEV;
1490
1491         if (cdev->private->state != DEV_STATE_ONLINE)
1492                 return -EINVAL;
1493
1494         mutex_lock(&irq_ptr->setup_mutex);
1495         if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1496                 rc = -EBUSY;
1497                 goto out;
1498         }
1499
1500         irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1501         irq_ptr->ccw.flags = CCW_FLAG_SLI;
1502         irq_ptr->ccw.count = irq_ptr->aqueue.count;
1503         irq_ptr->ccw.cda = 0;
1504
1505         spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1506         ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1507
1508         rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1509                               0, DOIO_DENY_PREFETCH);
1510         if (rc) {
1511                 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1512                 DBF_ERROR("rc:%4x", rc);
1513         }
1514         spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1515
1516         if (rc)
1517                 goto out;
1518
1519         if (is_thinint_irq(irq_ptr))
1520                 tiqdio_add_input_queues(irq_ptr);
1521
1522         /* wait for subchannel to become active */
1523         msleep(5);
1524
1525         switch (irq_ptr->state) {
1526         case QDIO_IRQ_STATE_STOPPED:
1527         case QDIO_IRQ_STATE_ERR:
1528                 rc = -EIO;
1529                 break;
1530         default:
1531                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1532                 rc = 0;
1533         }
1534 out:
1535         mutex_unlock(&irq_ptr->setup_mutex);
1536         return rc;
1537 }
1538 EXPORT_SYMBOL_GPL(qdio_activate);
1539
1540 static inline int buf_in_between(int bufnr, int start, int count)
1541 {
1542         int end = add_buf(start, count);
1543
1544         if (end > start) {
1545                 if (bufnr >= start && bufnr < end)
1546                         return 1;
1547                 else
1548                         return 0;
1549         }
1550
1551         /* wrap-around case */
1552         if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1553             (bufnr < end))
1554                 return 1;
1555         else
1556                 return 0;
1557 }
1558
1559 /**
1560  * handle_inbound - reset processed input buffers
1561  * @q: queue containing the buffers
1562  * @callflags: flags
1563  * @bufnr: first buffer to process
1564  * @count: how many buffers are emptied
1565  */
1566 static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1567                           int bufnr, int count)
1568 {
1569         int used, diff;
1570
1571         qperf_inc(q, inbound_call);
1572
1573         if (!q->u.in.polling)
1574                 goto set;
1575
1576         /* protect against stop polling setting an ACK for an emptied slsb */
1577         if (count == QDIO_MAX_BUFFERS_PER_Q) {
1578                 /* overwriting everything, just delete polling status */
1579                 q->u.in.polling = 0;
1580                 q->u.in.ack_count = 0;
1581                 goto set;
1582         } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1583                 if (is_qebsm(q)) {
1584                         /* partial overwrite, just update ack_start */
1585                         diff = add_buf(bufnr, count);
1586                         diff = sub_buf(diff, q->u.in.ack_start);
1587                         q->u.in.ack_count -= diff;
1588                         if (q->u.in.ack_count <= 0) {
1589                                 q->u.in.polling = 0;
1590                                 q->u.in.ack_count = 0;
1591                                 goto set;
1592                         }
1593                         q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1594                 }
1595                 else
1596                         /* the only ACK will be deleted, so stop polling */
1597                         q->u.in.polling = 0;
1598         }
1599
1600 set:
1601         count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1602
1603         used = atomic_add_return(count, &q->nr_buf_used) - count;
1604         BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1605
1606         if (need_siga_in(q))
1607                 return qdio_siga_input(q);
1608
1609         return 0;
1610 }
1611
1612 /**
1613  * handle_outbound - process filled outbound buffers
1614  * @q: queue containing the buffers
1615  * @callflags: flags
1616  * @bufnr: first buffer to process
1617  * @count: how many buffers are filled
1618  */
1619 static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1620                            int bufnr, int count)
1621 {
1622         unsigned char state = 0;
1623         int used, rc = 0;
1624
1625         qperf_inc(q, outbound_call);
1626
1627         count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1628         used = atomic_add_return(count, &q->nr_buf_used);
1629         BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1630
1631         if (used == QDIO_MAX_BUFFERS_PER_Q)
1632                 qperf_inc(q, outbound_queue_full);
1633
1634         if (callflags & QDIO_FLAG_PCI_OUT) {
1635                 q->u.out.pci_out_enabled = 1;
1636                 qperf_inc(q, pci_request_int);
1637         } else
1638                 q->u.out.pci_out_enabled = 0;
1639
1640         if (queue_type(q) == QDIO_IQDIO_QFMT) {
1641                 unsigned long phys_aob = 0;
1642
1643                 /* One SIGA-W per buffer required for unicast HSI */
1644                 WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1645
1646                 phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
1647
1648                 rc = qdio_kick_outbound_q(q, phys_aob);
1649         } else if (need_siga_sync(q)) {
1650                 rc = qdio_siga_sync_q(q);
1651         } else {
1652                 /* try to fast requeue buffers */
1653                 get_buf_state(q, prev_buf(bufnr), &state, 0);
1654                 if (state != SLSB_CU_OUTPUT_PRIMED)
1655                         rc = qdio_kick_outbound_q(q, 0);
1656                 else
1657                         qperf_inc(q, fast_requeue);
1658         }
1659
1660         /* in case of SIGA errors we must process the error immediately */
1661         if (used >= q->u.out.scan_threshold || rc)
1662                 tasklet_schedule(&q->tasklet);
1663         else
1664                 /* free the SBALs in case of no further traffic */
1665                 if (!timer_pending(&q->u.out.timer))
1666                         mod_timer(&q->u.out.timer, jiffies + HZ);
1667         return rc;
1668 }
1669
1670 /**
1671  * do_QDIO - process input or output buffers
1672  * @cdev: associated ccw_device for the qdio subchannel
1673  * @callflags: input or output and special flags from the program
1674  * @q_nr: queue number
1675  * @bufnr: buffer number
1676  * @count: how many buffers to process
1677  */
1678 int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1679             int q_nr, unsigned int bufnr, unsigned int count)
1680 {
1681         struct qdio_irq *irq_ptr;
1682
1683
1684         if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1685                 return -EINVAL;
1686
1687         irq_ptr = cdev->private->qdio_data;
1688         if (!irq_ptr)
1689                 return -ENODEV;
1690
1691         DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1692                       "do%02x b:%02x c:%02x", callflags, bufnr, count);
1693
1694         if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1695                 return -EBUSY;
1696         if (!count)
1697                 return 0;
1698         if (callflags & QDIO_FLAG_SYNC_INPUT)
1699                 return handle_inbound(irq_ptr->input_qs[q_nr],
1700                                       callflags, bufnr, count);
1701         else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1702                 return handle_outbound(irq_ptr->output_qs[q_nr],
1703                                        callflags, bufnr, count);
1704         return -EINVAL;
1705 }
1706 EXPORT_SYMBOL_GPL(do_QDIO);
1707
1708 /**
1709  * qdio_start_irq - process input buffers
1710  * @cdev: associated ccw_device for the qdio subchannel
1711  * @nr: input queue number
1712  *
1713  * Return codes
1714  *   0 - success
1715  *   1 - irqs not started since new data is available
1716  */
1717 int qdio_start_irq(struct ccw_device *cdev, int nr)
1718 {
1719         struct qdio_q *q;
1720         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1721
1722         if (!irq_ptr)
1723                 return -ENODEV;
1724         q = irq_ptr->input_qs[nr];
1725
1726         WARN_ON(queue_irqs_enabled(q));
1727
1728         clear_nonshared_ind(irq_ptr);
1729         qdio_stop_polling(q);
1730         clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1731
1732         /*
1733          * We need to check again to not lose initiative after
1734          * resetting the ACK state.
1735          */
1736         if (test_nonshared_ind(irq_ptr))
1737                 goto rescan;
1738         if (!qdio_inbound_q_done(q))
1739                 goto rescan;
1740         return 0;
1741
1742 rescan:
1743         if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1744                              &q->u.in.queue_irq_state))
1745                 return 0;
1746         else
1747                 return 1;
1748
1749 }
1750 EXPORT_SYMBOL(qdio_start_irq);
1751
1752 /**
1753  * qdio_get_next_buffers - process input buffers
1754  * @cdev: associated ccw_device for the qdio subchannel
1755  * @nr: input queue number
1756  * @bufnr: first filled buffer number
1757  * @error: buffers are in error state
1758  *
1759  * Return codes
1760  *   < 0 - error
1761  *   = 0 - no new buffers found
1762  *   > 0 - number of processed buffers
1763  */
1764 int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1765                           int *error)
1766 {
1767         struct qdio_q *q;
1768         int start, end;
1769         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1770
1771         if (!irq_ptr)
1772                 return -ENODEV;
1773         q = irq_ptr->input_qs[nr];
1774         WARN_ON(queue_irqs_enabled(q));
1775
1776         /*
1777          * Cannot rely on automatic sync after interrupt since queues may
1778          * also be examined without interrupt.
1779          */
1780         if (need_siga_sync(q))
1781                 qdio_sync_queues(q);
1782
1783         /* check the PCI capable outbound queues. */
1784         qdio_check_outbound_after_thinint(q);
1785
1786         if (!qdio_inbound_q_moved(q))
1787                 return 0;
1788
1789         /* Note: upper-layer MUST stop processing immediately here ... */
1790         if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1791                 return -EIO;
1792
1793         start = q->first_to_kick;
1794         end = q->first_to_check;
1795         *bufnr = start;
1796         *error = q->qdio_error;
1797
1798         /* for the next time */
1799         q->first_to_kick = end;
1800         q->qdio_error = 0;
1801         return sub_buf(end, start);
1802 }
1803 EXPORT_SYMBOL(qdio_get_next_buffers);
1804
1805 /**
1806  * qdio_stop_irq - disable interrupt processing for the device
1807  * @cdev: associated ccw_device for the qdio subchannel
1808  * @nr: input queue number
1809  *
1810  * Return codes
1811  *   0 - interrupts were already disabled
1812  *   1 - interrupts successfully disabled
1813  */
1814 int qdio_stop_irq(struct ccw_device *cdev, int nr)
1815 {
1816         struct qdio_q *q;
1817         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1818
1819         if (!irq_ptr)
1820                 return -ENODEV;
1821         q = irq_ptr->input_qs[nr];
1822
1823         if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1824                              &q->u.in.queue_irq_state))
1825                 return 0;
1826         else
1827                 return 1;
1828 }
1829 EXPORT_SYMBOL(qdio_stop_irq);
1830
1831 static int __init init_QDIO(void)
1832 {
1833         int rc;
1834
1835         rc = qdio_debug_init();
1836         if (rc)
1837                 return rc;
1838         rc = qdio_setup_init();
1839         if (rc)
1840                 goto out_debug;
1841         rc = tiqdio_allocate_memory();
1842         if (rc)
1843                 goto out_cache;
1844         rc = tiqdio_register_thinints();
1845         if (rc)
1846                 goto out_ti;
1847         return 0;
1848
1849 out_ti:
1850         tiqdio_free_memory();
1851 out_cache:
1852         qdio_setup_exit();
1853 out_debug:
1854         qdio_debug_exit();
1855         return rc;
1856 }
1857
1858 static void __exit exit_QDIO(void)
1859 {
1860         tiqdio_unregister_thinints();
1861         tiqdio_free_memory();
1862         qdio_setup_exit();
1863         qdio_debug_exit();
1864 }
1865
1866 module_init(init_QDIO);
1867 module_exit(exit_QDIO);