Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[linux-2.6] / drivers / s390 / cio / qdio.c
1 /*
2  *
3  * linux/drivers/s390/cio/qdio.c
4  *
5  * Linux for S/390 QDIO base support, Hipersocket base support
6  * version 2
7  *
8  * Copyright 2000,2002 IBM Corporation
9  * Author(s):             Utz Bacher <utz.bacher@de.ibm.com>
10  * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
11  *
12  * Restriction: only 63 iqdio subchannels would have its own indicator,
13  * after that, subsequent subchannels share one indicator
14  *
15  *
16  *
17  *
18  * This program is free software; you can redistribute it and/or modify
19  * it under the terms of the GNU General Public License as published by
20  * the Free Software Foundation; either version 2, or (at your option)
21  * any later version.
22  *
23  * This program is distributed in the hope that it will be useful,
24  * but WITHOUT ANY WARRANTY; without even the implied warranty of
25  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
26  * GNU General Public License for more details.
27  *
28  * You should have received a copy of the GNU General Public License
29  * along with this program; if not, write to the Free Software
30  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31  */
32
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/delay.h>
36 #include <linux/slab.h>
37 #include <linux/kernel.h>
38 #include <linux/proc_fs.h>
39 #include <linux/timer.h>
40 #include <linux/mempool.h>
41 #include <linux/semaphore.h>
42
43 #include <asm/ccwdev.h>
44 #include <asm/io.h>
45 #include <asm/atomic.h>
46 #include <asm/timex.h>
47
48 #include <asm/debug.h>
49 #include <asm/s390_rdev.h>
50 #include <asm/qdio.h>
51 #include <asm/airq.h>
52
53 #include "cio.h"
54 #include "css.h"
55 #include "device.h"
56 #include "qdio.h"
57 #include "ioasm.h"
58 #include "chsc.h"
59
60 /****************** MODULE PARAMETER VARIABLES ********************/
61 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
62 MODULE_DESCRIPTION("QDIO base support version 2, " \
63                    "Copyright 2000 IBM Corporation");
64 MODULE_LICENSE("GPL");
65
66 /******************** HERE WE GO ***********************************/
67
68 static const char version[] = "QDIO base support version 2";
69
70 static int qdio_performance_stats = 0;
71 static int proc_perf_file_registration;
72 static struct qdio_perf_stats perf_stats;
73
74 static int hydra_thinints;
75 static int is_passthrough = 0;
76 static int omit_svs;
77
78 static int indicator_used[INDICATORS_PER_CACHELINE];
79 static __u32 * volatile indicators;
80 static __u32 volatile spare_indicator;
81 static atomic_t spare_indicator_usecount;
82 #define QDIO_MEMPOOL_SCSSC_ELEMENTS 2
83 static mempool_t *qdio_mempool_scssc;
84 static struct kmem_cache *qdio_q_cache;
85
86 static debug_info_t *qdio_dbf_setup;
87 static debug_info_t *qdio_dbf_sbal;
88 static debug_info_t *qdio_dbf_trace;
89 static debug_info_t *qdio_dbf_sense;
90 #ifdef CONFIG_QDIO_DEBUG
91 static debug_info_t *qdio_dbf_slsb_out;
92 static debug_info_t *qdio_dbf_slsb_in;
93 #endif /* CONFIG_QDIO_DEBUG */
94
95 /* iQDIO stuff: */
96 static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change
97                                                  during a while loop */
98 static DEFINE_SPINLOCK(ttiq_list_lock);
99 static void *tiqdio_ind;
100 static void tiqdio_tl(unsigned long);
101 static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0);
102
103 /* not a macro, as one of the arguments is atomic_read */
104 static inline int 
105 qdio_min(int a,int b)
106 {
107         if (a<b)
108                 return a;
109         else
110                 return b;
111 }
112
113 /***************** SCRUBBER HELPER ROUTINES **********************/
114 #ifdef CONFIG_64BIT
115 static inline void qdio_perf_stat_inc(atomic64_t *count)
116 {
117         if (qdio_performance_stats)
118                 atomic64_inc(count);
119 }
120
121 static inline void qdio_perf_stat_dec(atomic64_t *count)
122 {
123         if (qdio_performance_stats)
124                 atomic64_dec(count);
125 }
126 #else /* CONFIG_64BIT */
127 static inline void qdio_perf_stat_inc(atomic_t *count)
128 {
129         if (qdio_performance_stats)
130                 atomic_inc(count);
131 }
132
133 static inline void qdio_perf_stat_dec(atomic_t *count)
134 {
135         if (qdio_performance_stats)
136                 atomic_dec(count);
137 }
138 #endif /* CONFIG_64BIT */
139
140 static inline __u64 
141 qdio_get_micros(void)
142 {
143         return (get_clock() >> 12); /* time>>12 is microseconds */
144 }
145
146 /* 
147  * unfortunately, we can't just xchg the values; in do_QDIO we want to reserve
148  * the q in any case, so that we'll not be interrupted when we are in
149  * qdio_mark_tiq... shouldn't have a really bad impact, as reserving almost
150  * ever works (last famous words) 
151  */
152 static inline int 
153 qdio_reserve_q(struct qdio_q *q)
154 {
155         return atomic_add_return(1,&q->use_count) - 1;
156 }
157
158 static inline void 
159 qdio_release_q(struct qdio_q *q)
160 {
161         atomic_dec(&q->use_count);
162 }
163
164 /*check ccq  */
165 static int
166 qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
167 {
168         char dbf_text[15];
169
170         if (ccq == 0 || ccq == 32)
171                 return 0;
172         if (ccq == 96 || ccq == 97)
173                 return 1;
174         /*notify devices immediately*/
175         sprintf(dbf_text,"%d", ccq);
176         QDIO_DBF_TEXT2(1,trace,dbf_text);
177         return -EIO;
178 }
179 /* EQBS: extract buffer states */
180 static int
181 qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
182              unsigned int *start, unsigned int *cnt)
183 {
184         struct qdio_irq *irq;
185         unsigned int tmp_cnt, q_no, ccq;
186         int rc ;
187         char dbf_text[15];
188
189         ccq = 0;
190         tmp_cnt = *cnt;
191         irq = (struct qdio_irq*)q->irq_ptr;
192         q_no = q->q_no;
193         if(!q->is_input_q)
194                 q_no += irq->no_input_qs;
195 again:
196         ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt);
197         rc = qdio_check_ccq(q, ccq);
198         if ((ccq == 96) && (tmp_cnt != *cnt))
199                 rc = 0;
200         if (rc == 1) {
201                 QDIO_DBF_TEXT5(1,trace,"eqAGAIN");
202                 goto again;
203         }
204         if (rc < 0) {
205                 QDIO_DBF_TEXT2(1,trace,"eqberr");
206                 sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no);
207                 QDIO_DBF_TEXT2(1,trace,dbf_text);
208                 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
209                                 QDIO_STATUS_LOOK_FOR_ERROR,
210                                 0, 0, 0, -1, -1, q->int_parm);
211                 return 0;
212         }
213         return (tmp_cnt - *cnt);
214 }
215
216 /* SQBS: set buffer states */
217 static int
218 qdio_do_sqbs(struct qdio_q *q, unsigned char state,
219              unsigned int *start, unsigned int *cnt)
220 {
221         struct qdio_irq *irq;
222         unsigned int tmp_cnt, q_no, ccq;
223         int rc;
224         char dbf_text[15];
225
226         ccq = 0;
227         tmp_cnt = *cnt;
228         irq = (struct qdio_irq*)q->irq_ptr;
229         q_no = q->q_no;
230         if(!q->is_input_q)
231                 q_no += irq->no_input_qs;
232 again:
233         ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt);
234         rc = qdio_check_ccq(q, ccq);
235         if (rc == 1) {
236                 QDIO_DBF_TEXT5(1,trace,"sqAGAIN");
237                 goto again;
238         }
239         if (rc < 0) {
240                 QDIO_DBF_TEXT3(1,trace,"sqberr");
241                 sprintf(dbf_text,"%2x,%2x",tmp_cnt,*cnt);
242                 QDIO_DBF_TEXT3(1,trace,dbf_text);
243                 sprintf(dbf_text,"%d,%d",ccq,q_no);
244                 QDIO_DBF_TEXT3(1,trace,dbf_text);
245                 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
246                                 QDIO_STATUS_LOOK_FOR_ERROR,
247                                 0, 0, 0, -1, -1, q->int_parm);
248                 return 0;
249         }
250         return (tmp_cnt - *cnt);
251 }
252
253 static inline int
254 qdio_set_slsb(struct qdio_q *q, unsigned int *bufno,
255               unsigned char state, unsigned int *count)
256 {
257         volatile char *slsb;
258         struct qdio_irq *irq;
259
260         irq = (struct qdio_irq*)q->irq_ptr;
261         if (!irq->is_qebsm) {
262                 slsb = (char *)&q->slsb.acc.val[(*bufno)];
263                 xchg(slsb, state);
264                 return 1;
265         }
266         return qdio_do_sqbs(q, state, bufno, count);
267 }
268
269 #ifdef CONFIG_QDIO_DEBUG
270 static inline void
271 qdio_trace_slsb(struct qdio_q *q)
272 {
273         if (q->queue_type==QDIO_TRACE_QTYPE) {
274                 if (q->is_input_q)
275                         QDIO_DBF_HEX2(0,slsb_in,&q->slsb,
276                                       QDIO_MAX_BUFFERS_PER_Q);
277                 else
278                         QDIO_DBF_HEX2(0,slsb_out,&q->slsb,
279                                       QDIO_MAX_BUFFERS_PER_Q);
280         }
281 }
282 #endif
283
284 static inline int
285 set_slsb(struct qdio_q *q, unsigned int *bufno,
286          unsigned char state, unsigned int *count)
287 {
288         int rc;
289 #ifdef CONFIG_QDIO_DEBUG
290         qdio_trace_slsb(q);
291 #endif
292         rc = qdio_set_slsb(q, bufno, state, count);
293 #ifdef CONFIG_QDIO_DEBUG
294         qdio_trace_slsb(q);
295 #endif
296         return rc;
297 }
298 static inline int 
299 qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
300                unsigned int gpr3)
301 {
302         int cc;
303
304         QDIO_DBF_TEXT4(0,trace,"sigasync");
305         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
306
307         qdio_perf_stat_inc(&perf_stats.siga_syncs);
308
309         cc = do_siga_sync(q->schid, gpr2, gpr3);
310         if (cc)
311                 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
312
313         return cc;
314 }
315
316 static inline int
317 qdio_siga_sync_q(struct qdio_q *q)
318 {
319         if (q->is_input_q)
320                 return qdio_siga_sync(q, 0, q->mask);
321         return qdio_siga_sync(q, q->mask, 0);
322 }
323
324 static int
325 __do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
326 {
327        struct qdio_irq *irq;
328        unsigned int fc = 0;
329        unsigned long schid;
330
331        irq = (struct qdio_irq *) q->irq_ptr;
332        if (!irq->is_qebsm)
333                schid = *((u32 *)&q->schid);
334        else {
335                schid = irq->sch_token;
336                fc |= 0x80;
337        }
338        return do_siga_output(schid, q->mask, busy_bit, fc);
339 }
340
341 /* 
342  * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns
343  * an access exception 
344  */
345 static int
346 qdio_siga_output(struct qdio_q *q)
347 {
348         int cc;
349         __u32 busy_bit;
350         __u64 start_time=0;
351
352         qdio_perf_stat_inc(&perf_stats.siga_outs);
353
354         QDIO_DBF_TEXT4(0,trace,"sigaout");
355         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
356
357         for (;;) {
358                 cc = __do_siga_output(q, &busy_bit);
359 //QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit);
360                 if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) {
361                         if (!start_time) 
362                                 start_time=NOW;
363                         if ((NOW-start_time)>QDIO_BUSY_BIT_PATIENCE)
364                                 break;
365                 } else
366                         break;
367         }
368         
369         if ((cc==2) && (busy_bit)) 
370                 cc |= QDIO_SIGA_ERROR_B_BIT_SET;
371
372         if (cc)
373                 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
374
375         return cc;
376 }
377
378 static int
379 qdio_siga_input(struct qdio_q *q)
380 {
381         int cc;
382
383         QDIO_DBF_TEXT4(0,trace,"sigain");
384         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
385
386         qdio_perf_stat_inc(&perf_stats.siga_ins);
387
388         cc = do_siga_input(q->schid, q->mask);
389         
390         if (cc)
391                 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
392
393         return cc;
394 }
395
396 /* locked by the locks in qdio_activate and qdio_cleanup */
397 static __u32 *
398 qdio_get_indicator(void)
399 {
400         int i;
401
402         for (i = 0; i < INDICATORS_PER_CACHELINE; i++)
403                 if (!indicator_used[i]) {
404                         indicator_used[i]=1;
405                         return indicators+i;
406                 }
407         atomic_inc(&spare_indicator_usecount);
408         return (__u32 * volatile) &spare_indicator;
409 }
410
411 /* locked by the locks in qdio_activate and qdio_cleanup */
412 static void 
413 qdio_put_indicator(__u32 *addr)
414 {
415         int i;
416
417         if ( (addr) && (addr!=&spare_indicator) ) {
418                 i=addr-indicators;
419                 indicator_used[i]=0;
420         }
421         if (addr == &spare_indicator)
422                 atomic_dec(&spare_indicator_usecount);
423 }
424
425 static inline void
426 tiqdio_clear_summary_bit(__u32 *location)
427 {
428         QDIO_DBF_TEXT5(0,trace,"clrsummb");
429         QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
430
431         xchg(location,0);
432 }
433
434 static inline  void
435 tiqdio_set_summary_bit(__u32 *location)
436 {
437         QDIO_DBF_TEXT5(0,trace,"setsummb");
438         QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
439
440         xchg(location,-1);
441 }
442
443 static inline void 
444 tiqdio_sched_tl(void)
445 {
446         tasklet_hi_schedule(&tiqdio_tasklet);
447 }
448
449 static void
450 qdio_mark_tiq(struct qdio_q *q)
451 {
452         unsigned long flags;
453
454         QDIO_DBF_TEXT4(0,trace,"mark iq");
455         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
456
457         spin_lock_irqsave(&ttiq_list_lock,flags);
458         if (unlikely(atomic_read(&q->is_in_shutdown)))
459                 goto out_unlock;
460
461         if (!q->is_input_q)
462                 goto out_unlock;
463
464         if ((q->list_prev) || (q->list_next)) 
465                 goto out_unlock;
466
467         if (!tiq_list) {
468                 tiq_list=q;
469                 q->list_prev=q;
470                 q->list_next=q;
471         } else {
472                 q->list_next=tiq_list;
473                 q->list_prev=tiq_list->list_prev;
474                 tiq_list->list_prev->list_next=q;
475                 tiq_list->list_prev=q;
476         }
477         spin_unlock_irqrestore(&ttiq_list_lock,flags);
478
479         tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
480         tiqdio_sched_tl();
481         return;
482 out_unlock:
483         spin_unlock_irqrestore(&ttiq_list_lock,flags);
484         return;
485 }
486
487 static inline void
488 qdio_mark_q(struct qdio_q *q)
489 {
490         QDIO_DBF_TEXT4(0,trace,"mark q");
491         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
492
493         if (unlikely(atomic_read(&q->is_in_shutdown)))
494                 return;
495
496         tasklet_schedule(&q->tasklet);
497 }
498
499 static int
500 qdio_stop_polling(struct qdio_q *q)
501 {
502 #ifdef QDIO_USE_PROCESSING_STATE
503        unsigned int tmp, gsf, count = 1;
504        unsigned char state = 0;
505        struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
506
507         if (!atomic_xchg(&q->polling,0))
508                 return 1;
509
510         QDIO_DBF_TEXT4(0,trace,"stoppoll");
511         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
512
513         /* show the card that we are not polling anymore */
514         if (!q->is_input_q)
515                 return 1;
516
517        tmp = gsf = GET_SAVED_FRONTIER(q);
518        tmp = ((tmp + QDIO_MAX_BUFFERS_PER_Q-1) & (QDIO_MAX_BUFFERS_PER_Q-1) );
519        set_slsb(q, &tmp, SLSB_P_INPUT_NOT_INIT, &count);
520
521         /* 
522          * we don't issue this SYNC_MEMORY, as we trust Rick T and
523          * moreover will not use the PROCESSING state under VM, so
524          * q->polling was 0 anyway
525          */
526         /*SYNC_MEMORY;*/
527        if (irq->is_qebsm) {
528                count = 1;
529                qdio_do_eqbs(q, &state, &gsf, &count);
530        } else
531                state = q->slsb.acc.val[gsf];
532        if (state != SLSB_P_INPUT_PRIMED)
533                 return 1;
534         /* 
535          * set our summary bit again, as otherwise there is a
536          * small window we can miss between resetting it and
537          * checking for PRIMED state 
538          */
539         if (q->is_thinint_q)
540                 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
541         return 0;
542
543 #else /* QDIO_USE_PROCESSING_STATE */
544         return 1;
545 #endif /* QDIO_USE_PROCESSING_STATE */
546 }
547
548 /* 
549  * see the comment in do_QDIO and before qdio_reserve_q about the
550  * sophisticated locking outside of unmark_q, so that we don't need to
551  * disable the interrupts :-) 
552 */
553 static void
554 qdio_unmark_q(struct qdio_q *q)
555 {
556         unsigned long flags;
557
558         QDIO_DBF_TEXT4(0,trace,"unmark q");
559         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
560
561         if ((!q->list_prev)||(!q->list_next))
562                 return;
563
564         if ((q->is_thinint_q)&&(q->is_input_q)) {
565                 /* iQDIO */
566                 spin_lock_irqsave(&ttiq_list_lock,flags);
567                 /* in case cleanup has done this already and simultanously
568                  * qdio_unmark_q is called from the interrupt handler, we've
569                  * got to check this in this specific case again */
570                 if ((!q->list_prev)||(!q->list_next))
571                         goto out;
572                 if (q->list_next==q) {
573                         /* q was the only interesting q */
574                         tiq_list=NULL;
575                         q->list_next=NULL;
576                         q->list_prev=NULL;
577                 } else {
578                         q->list_next->list_prev=q->list_prev;
579                         q->list_prev->list_next=q->list_next;
580                         tiq_list=q->list_next;
581                         q->list_next=NULL;
582                         q->list_prev=NULL;
583                 }
584 out:
585                 spin_unlock_irqrestore(&ttiq_list_lock,flags);
586         }
587 }
588
589 static inline unsigned long 
590 tiqdio_clear_global_summary(void)
591 {
592         unsigned long time;
593
594         QDIO_DBF_TEXT5(0,trace,"clrglobl");
595         
596         time = do_clear_global_summary();
597
598         QDIO_DBF_HEX5(0,trace,&time,sizeof(unsigned long));
599
600         return time;
601 }
602
603
604 /************************* OUTBOUND ROUTINES *******************************/
605 static int
606 qdio_qebsm_get_outbound_buffer_frontier(struct qdio_q *q)
607 {
608         struct qdio_irq *irq;
609         unsigned char state;
610         unsigned int cnt, count, ftc;
611
612         irq = (struct qdio_irq *) q->irq_ptr;
613         if ((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis))
614                 SYNC_MEMORY;
615
616         ftc = q->first_to_check;
617         count = qdio_min(atomic_read(&q->number_of_buffers_used),
618                         (QDIO_MAX_BUFFERS_PER_Q-1));
619         if (count == 0)
620                 return q->first_to_check;
621         cnt = qdio_do_eqbs(q, &state, &ftc, &count);
622         if (cnt == 0)
623                 return q->first_to_check;
624         switch (state) {
625         case SLSB_P_OUTPUT_ERROR:
626                 QDIO_DBF_TEXT3(0,trace,"outperr");
627                 atomic_sub(cnt , &q->number_of_buffers_used);
628                 if (q->qdio_error)
629                         q->error_status_flags |=
630                                 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
631                 q->qdio_error = SLSB_P_OUTPUT_ERROR;
632                 q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
633                 q->first_to_check = ftc;
634                 break;
635         case SLSB_P_OUTPUT_EMPTY:
636                 QDIO_DBF_TEXT5(0,trace,"outpempt");
637                 atomic_sub(cnt, &q->number_of_buffers_used);
638                 q->first_to_check = ftc;
639                 break;
640         case SLSB_CU_OUTPUT_PRIMED:
641                 /* all buffers primed */
642                 QDIO_DBF_TEXT5(0,trace,"outpprim");
643                 break;
644         default:
645                 break;
646         }
647         QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
648         return q->first_to_check;
649 }
650
651 static int
652 qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q)
653 {
654         struct qdio_irq *irq;
655         unsigned char state;
656         int tmp, ftc, count, cnt;
657         char dbf_text[15];
658
659
660         irq = (struct qdio_irq *) q->irq_ptr;
661         ftc = q->first_to_check;
662         count = qdio_min(atomic_read(&q->number_of_buffers_used),
663                         (QDIO_MAX_BUFFERS_PER_Q-1));
664         if (count == 0)
665                  return q->first_to_check;
666         cnt = qdio_do_eqbs(q, &state, &ftc, &count);
667         if (cnt == 0)
668                  return q->first_to_check;
669         switch (state) {
670         case SLSB_P_INPUT_ERROR :
671 #ifdef CONFIG_QDIO_DEBUG
672                 QDIO_DBF_TEXT3(1,trace,"inperr");
673                 sprintf(dbf_text,"%2x,%2x",ftc,count);
674                 QDIO_DBF_TEXT3(1,trace,dbf_text);
675 #endif /* CONFIG_QDIO_DEBUG */
676                 if (q->qdio_error)
677                         q->error_status_flags |=
678                                 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
679                 q->qdio_error = SLSB_P_INPUT_ERROR;
680                 q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
681                 atomic_sub(cnt, &q->number_of_buffers_used);
682                 q->first_to_check = ftc;
683                 break;
684         case SLSB_P_INPUT_PRIMED :
685                 QDIO_DBF_TEXT3(0,trace,"inptprim");
686                 sprintf(dbf_text,"%2x,%2x",ftc,count);
687                 QDIO_DBF_TEXT3(1,trace,dbf_text);
688                 tmp = 0;
689                 ftc = q->first_to_check;
690 #ifdef QDIO_USE_PROCESSING_STATE
691                 if (cnt > 1) {
692                         cnt -= 1;
693                         tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
694                         if (!tmp)
695                                 break;
696                 }
697                 cnt = 1;
698                 tmp += set_slsb(q, &ftc,
699                                SLSB_P_INPUT_PROCESSING, &cnt);
700                 atomic_set(&q->polling, 1);
701 #else
702                 tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
703 #endif
704                 atomic_sub(tmp, &q->number_of_buffers_used);
705                 q->first_to_check = ftc;
706                 break;
707         case SLSB_CU_INPUT_EMPTY:
708         case SLSB_P_INPUT_NOT_INIT:
709         case SLSB_P_INPUT_PROCESSING:
710                 QDIO_DBF_TEXT5(0,trace,"inpnipro");
711                 break;
712         default:
713                 break;
714         }
715         QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
716         return q->first_to_check;
717 }
718
719 static int
720 qdio_get_outbound_buffer_frontier(struct qdio_q *q)
721 {
722         struct qdio_irq *irq;
723         volatile char *slsb;
724         unsigned int count = 1;
725         int first_not_to_check, f, f_mod_no;
726         char dbf_text[15];
727
728         QDIO_DBF_TEXT4(0,trace,"getobfro");
729         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
730
731         irq = (struct qdio_irq *) q->irq_ptr;
732         if (irq->is_qebsm)
733                 return qdio_qebsm_get_outbound_buffer_frontier(q);
734
735         slsb=&q->slsb.acc.val[0];
736         f_mod_no=f=q->first_to_check;
737         /* 
738          * f points to already processed elements, so f+no_used is correct...
739          * ... but: we don't check 128 buffers, as otherwise
740          * qdio_has_outbound_q_moved would return 0 
741          */
742         first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
743                                       (QDIO_MAX_BUFFERS_PER_Q-1));
744
745         if (((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis)) ||
746                  (q->queue_type == QDIO_IQDIO_QFMT_ASYNCH))
747                 SYNC_MEMORY;
748
749 check_next:
750         if (f==first_not_to_check) 
751                 goto out;
752
753         switch(slsb[f_mod_no]) {
754
755         /* the adapter has not fetched the output yet */
756         case SLSB_CU_OUTPUT_PRIMED:
757                 QDIO_DBF_TEXT5(0,trace,"outpprim");
758                 break;
759
760         /* the adapter got it */
761         case SLSB_P_OUTPUT_EMPTY:
762                 atomic_dec(&q->number_of_buffers_used);
763                 f++;
764                 f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
765                 QDIO_DBF_TEXT5(0,trace,"outpempt");
766                 goto check_next;
767
768         case SLSB_P_OUTPUT_ERROR:
769                 QDIO_DBF_TEXT3(0,trace,"outperr");
770                 sprintf(dbf_text,"%x-%x-%x",f_mod_no,
771                         q->sbal[f_mod_no]->element[14].sbalf.value,
772                         q->sbal[f_mod_no]->element[15].sbalf.value);
773                 QDIO_DBF_TEXT3(1,trace,dbf_text);
774                 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
775
776                 /* kind of process the buffer */
777                 set_slsb(q, &f_mod_no, SLSB_P_OUTPUT_NOT_INIT, &count);
778
779                 /* 
780                  * we increment the frontier, as this buffer
781                  * was processed obviously 
782                  */
783                 atomic_dec(&q->number_of_buffers_used);
784                 f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
785
786                 if (q->qdio_error)
787                         q->error_status_flags|=
788                                 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
789                 q->qdio_error=SLSB_P_OUTPUT_ERROR;
790                 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
791
792                 break;
793
794         /* no new buffers */
795         default:
796                 QDIO_DBF_TEXT5(0,trace,"outpni");
797         }
798 out:
799         return (q->first_to_check=f_mod_no);
800 }
801
802 /* all buffers are processed */
803 static int
804 qdio_is_outbound_q_done(struct qdio_q *q)
805 {
806         int no_used;
807 #ifdef CONFIG_QDIO_DEBUG
808         char dbf_text[15];
809 #endif
810
811         no_used=atomic_read(&q->number_of_buffers_used);
812
813 #ifdef CONFIG_QDIO_DEBUG
814         if (no_used) {
815                 sprintf(dbf_text,"oqisnt%02x",no_used);
816                 QDIO_DBF_TEXT4(0,trace,dbf_text);
817         } else {
818                 QDIO_DBF_TEXT4(0,trace,"oqisdone");
819         }
820         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
821 #endif /* CONFIG_QDIO_DEBUG */
822         return (no_used==0);
823 }
824
825 static int
826 qdio_has_outbound_q_moved(struct qdio_q *q)
827 {
828         int i;
829
830         i=qdio_get_outbound_buffer_frontier(q);
831
832         if ( (i!=GET_SAVED_FRONTIER(q)) ||
833              (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
834                 SAVE_FRONTIER(q,i);
835                 QDIO_DBF_TEXT4(0,trace,"oqhasmvd");
836                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
837                 return 1;
838         } else {
839                 QDIO_DBF_TEXT4(0,trace,"oqhsntmv");
840                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
841                 return 0;
842         }
843 }
844
845 static void
846 qdio_kick_outbound_q(struct qdio_q *q)
847 {
848         int result;
849 #ifdef CONFIG_QDIO_DEBUG
850         char dbf_text[15];
851
852         QDIO_DBF_TEXT4(0,trace,"kickoutq");
853         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
854 #endif /* CONFIG_QDIO_DEBUG */
855
856         if (!q->siga_out)
857                 return;
858
859         /* here's the story with cc=2 and busy bit set (thanks, Rick):
860          * VM's CP could present us cc=2 and busy bit set on SIGA-write
861          * during reconfiguration of their Guest LAN (only in HIPERS mode,
862          * QDIO mode is asynchronous -- cc=2 and busy bit there will take
863          * the queues down immediately; and not being under VM we have a
864          * problem on cc=2 and busy bit set right away).
865          *
866          * Therefore qdio_siga_output will try for a short time constantly,
867          * if such a condition occurs. If it doesn't change, it will
868          * increase the busy_siga_counter and save the timestamp, and
869          * schedule the queue for later processing (via mark_q, using the
870          * queue tasklet). __qdio_outbound_processing will check out the
871          * counter. If non-zero, it will call qdio_kick_outbound_q as often
872          * as the value of the counter. This will attempt further SIGA
873          * instructions. For each successful SIGA, the counter is
874          * decreased, for failing SIGAs the counter remains the same, after
875          * all.
876          * After some time of no movement, qdio_kick_outbound_q will
877          * finally fail and reflect corresponding error codes to call
878          * the upper layer module and have it take the queues down.
879          *
880          * Note that this is a change from the original HiperSockets design
881          * (saying cc=2 and busy bit means take the queues down), but in
882          * these days Guest LAN didn't exist... excessive cc=2 with busy bit
883          * conditions will still take the queues down, but the threshold is
884          * higher due to the Guest LAN environment.
885          */
886
887
888         result=qdio_siga_output(q);
889
890         switch (result) {
891         case 0:
892                 /* went smooth this time, reset timestamp */
893 #ifdef CONFIG_QDIO_DEBUG
894                 QDIO_DBF_TEXT3(0,trace,"cc2reslv");
895                 sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
896                         atomic_read(&q->busy_siga_counter));
897                 QDIO_DBF_TEXT3(0,trace,dbf_text);
898 #endif /* CONFIG_QDIO_DEBUG */
899                 q->timing.busy_start=0;
900                 break;
901         case (2|QDIO_SIGA_ERROR_B_BIT_SET):
902                 /* cc=2 and busy bit: */
903                 atomic_inc(&q->busy_siga_counter);
904
905                 /* if the last siga was successful, save
906                  * timestamp here */
907                 if (!q->timing.busy_start)
908                         q->timing.busy_start=NOW;
909
910                 /* if we're in time, don't touch error_status_flags
911                  * and siga_error */
912                 if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
913                         qdio_mark_q(q);
914                         break;
915                 }
916                 QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
917 #ifdef CONFIG_QDIO_DEBUG
918                 sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
919                         atomic_read(&q->busy_siga_counter));
920                 QDIO_DBF_TEXT3(0,trace,dbf_text);
921 #endif /* CONFIG_QDIO_DEBUG */
922                 /* else fallthrough and report error */
923         default:
924                 /* for plain cc=1, 2 or 3: */
925                 if (q->siga_error)
926                         q->error_status_flags|=
927                                 QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
928                 q->error_status_flags|=
929                         QDIO_STATUS_LOOK_FOR_ERROR;
930                 q->siga_error=result;
931         }
932 }
933
934 static void
935 qdio_kick_outbound_handler(struct qdio_q *q)
936 {
937         int start, end, real_end, count;
938 #ifdef CONFIG_QDIO_DEBUG
939         char dbf_text[15];
940 #endif
941
942         start = q->first_element_to_kick;
943         /* last_move_ftc was just updated */
944         real_end = GET_SAVED_FRONTIER(q);
945         end = (real_end+QDIO_MAX_BUFFERS_PER_Q-1)&
946                 (QDIO_MAX_BUFFERS_PER_Q-1);
947         count = (end+QDIO_MAX_BUFFERS_PER_Q+1-start)&
948                 (QDIO_MAX_BUFFERS_PER_Q-1);
949
950 #ifdef CONFIG_QDIO_DEBUG
951         QDIO_DBF_TEXT4(0,trace,"kickouth");
952         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
953
954         sprintf(dbf_text,"s=%2xc=%2x",start,count);
955         QDIO_DBF_TEXT4(0,trace,dbf_text);
956 #endif /* CONFIG_QDIO_DEBUG */
957
958         if (q->state==QDIO_IRQ_STATE_ACTIVE)
959                 q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT|
960                            q->error_status_flags,
961                            q->qdio_error,q->siga_error,q->q_no,start,count,
962                            q->int_parm);
963
964         /* for the next time: */
965         q->first_element_to_kick=real_end;
966         q->qdio_error=0;
967         q->siga_error=0;
968         q->error_status_flags=0;
969 }
970
971 static void
972 __qdio_outbound_processing(struct qdio_q *q)
973 {
974         int siga_attempts;
975
976         QDIO_DBF_TEXT4(0,trace,"qoutproc");
977         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
978
979         if (unlikely(qdio_reserve_q(q))) {
980                 qdio_release_q(q);
981                 qdio_perf_stat_inc(&perf_stats.outbound_tl_runs_resched);
982                 /* as we're sissies, we'll check next time */
983                 if (likely(!atomic_read(&q->is_in_shutdown))) {
984                         qdio_mark_q(q);
985                         QDIO_DBF_TEXT4(0,trace,"busy,agn");
986                 }
987                 return;
988         }
989         qdio_perf_stat_inc(&perf_stats.outbound_tl_runs);
990         qdio_perf_stat_inc(&perf_stats.tl_runs);
991
992         /* see comment in qdio_kick_outbound_q */
993         siga_attempts=atomic_read(&q->busy_siga_counter);
994         while (siga_attempts) {
995                 atomic_dec(&q->busy_siga_counter);
996                 qdio_kick_outbound_q(q);
997                 siga_attempts--;
998         }
999
1000         if (qdio_has_outbound_q_moved(q))
1001                 qdio_kick_outbound_handler(q);
1002
1003         if (q->queue_type == QDIO_ZFCP_QFMT) {
1004                 if ((!q->hydra_gives_outbound_pcis) &&
1005                     (!qdio_is_outbound_q_done(q)))
1006                         qdio_mark_q(q);
1007         }
1008         else if (((!q->is_iqdio_q) && (!q->is_pci_out)) ||
1009                  (q->queue_type == QDIO_IQDIO_QFMT_ASYNCH)) {
1010                 /* 
1011                  * make sure buffer switch from PRIMED to EMPTY is noticed
1012                  * and outbound_handler is called
1013                  */
1014                 if (qdio_is_outbound_q_done(q)) {
1015                         del_timer(&q->timer);
1016                 } else {
1017                         if (!timer_pending(&q->timer))
1018                                 mod_timer(&q->timer, jiffies +
1019                                           QDIO_FORCE_CHECK_TIMEOUT);
1020                 }
1021         }
1022
1023         qdio_release_q(q);
1024 }
1025
1026 static void
1027 qdio_outbound_processing(unsigned long q)
1028 {
1029         __qdio_outbound_processing((struct qdio_q *) q);
1030 }
1031
1032 /************************* INBOUND ROUTINES *******************************/
1033
1034
1035 static int
1036 qdio_get_inbound_buffer_frontier(struct qdio_q *q)
1037 {
1038         struct qdio_irq *irq;
1039         int f,f_mod_no;
1040         volatile char *slsb;
1041         unsigned int count = 1;
1042         int first_not_to_check;
1043 #ifdef CONFIG_QDIO_DEBUG
1044         char dbf_text[15];
1045 #endif /* CONFIG_QDIO_DEBUG */
1046 #ifdef QDIO_USE_PROCESSING_STATE
1047         int last_position=-1;
1048 #endif /* QDIO_USE_PROCESSING_STATE */
1049
1050         QDIO_DBF_TEXT4(0,trace,"getibfro");
1051         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1052
1053         irq = (struct qdio_irq *) q->irq_ptr;
1054         if (irq->is_qebsm)
1055                 return qdio_qebsm_get_inbound_buffer_frontier(q);
1056
1057         slsb=&q->slsb.acc.val[0];
1058         f_mod_no=f=q->first_to_check;
1059         /* 
1060          * we don't check 128 buffers, as otherwise qdio_has_inbound_q_moved
1061          * would return 0 
1062          */
1063         first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
1064                                       (QDIO_MAX_BUFFERS_PER_Q-1));
1065
1066         /* 
1067          * we don't use this one, as a PCI or we after a thin interrupt
1068          * will sync the queues
1069          */
1070         /* SYNC_MEMORY;*/
1071
1072 check_next:
1073         f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
1074         if (f==first_not_to_check) 
1075                 goto out;
1076         switch (slsb[f_mod_no]) {
1077
1078         /* CU_EMPTY means frontier is reached */
1079         case SLSB_CU_INPUT_EMPTY:
1080                 QDIO_DBF_TEXT5(0,trace,"inptempt");
1081                 break;
1082
1083         /* P_PRIMED means set slsb to P_PROCESSING and move on */
1084         case SLSB_P_INPUT_PRIMED:
1085                 QDIO_DBF_TEXT5(0,trace,"inptprim");
1086
1087 #ifdef QDIO_USE_PROCESSING_STATE
1088                 /* 
1089                  * as soon as running under VM, polling the input queues will
1090                  * kill VM in terms of CP overhead 
1091                  */
1092                 if (q->siga_sync) {
1093                         set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
1094                 } else {
1095                         /* set the previous buffer to NOT_INIT. The current
1096                          * buffer will be set to PROCESSING at the end of
1097                          * this function to avoid further interrupts. */
1098                         if (last_position>=0)
1099                                 set_slsb(q, &last_position,
1100                                          SLSB_P_INPUT_NOT_INIT, &count);
1101                         atomic_set(&q->polling,1);
1102                         last_position=f_mod_no;
1103                 }
1104 #else /* QDIO_USE_PROCESSING_STATE */
1105                 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
1106 #endif /* QDIO_USE_PROCESSING_STATE */
1107                 /* 
1108                  * not needed, as the inbound queue will be synced on the next
1109                  * siga-r, resp. tiqdio_is_inbound_q_done will do the siga-s
1110                  */
1111                 /*SYNC_MEMORY;*/
1112                 f++;
1113                 atomic_dec(&q->number_of_buffers_used);
1114                 goto check_next;
1115
1116         case SLSB_P_INPUT_NOT_INIT:
1117         case SLSB_P_INPUT_PROCESSING:
1118                 QDIO_DBF_TEXT5(0,trace,"inpnipro");
1119                 break;
1120
1121         /* P_ERROR means frontier is reached, break and report error */
1122         case SLSB_P_INPUT_ERROR:
1123 #ifdef CONFIG_QDIO_DEBUG
1124                 sprintf(dbf_text,"inperr%2x",f_mod_no);
1125                 QDIO_DBF_TEXT3(1,trace,dbf_text);
1126 #endif /* CONFIG_QDIO_DEBUG */
1127                 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
1128
1129                 /* kind of process the buffer */
1130                 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
1131
1132                 if (q->qdio_error)
1133                         q->error_status_flags|=
1134                                 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
1135                 q->qdio_error=SLSB_P_INPUT_ERROR;
1136                 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
1137
1138                 /* we increment the frontier, as this buffer
1139                  * was processed obviously */
1140                 f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1141                 atomic_dec(&q->number_of_buffers_used);
1142
1143 #ifdef QDIO_USE_PROCESSING_STATE
1144                 last_position=-1;
1145 #endif /* QDIO_USE_PROCESSING_STATE */
1146
1147                 break;
1148
1149         /* everything else means frontier not changed (HALTED or so) */
1150         default: 
1151                 break;
1152         }
1153 out:
1154         q->first_to_check=f_mod_no;
1155
1156 #ifdef QDIO_USE_PROCESSING_STATE
1157         if (last_position>=0)
1158                 set_slsb(q, &last_position, SLSB_P_INPUT_PROCESSING, &count);
1159 #endif /* QDIO_USE_PROCESSING_STATE */
1160
1161         QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
1162
1163         return q->first_to_check;
1164 }
1165
1166 static int
1167 qdio_has_inbound_q_moved(struct qdio_q *q)
1168 {
1169         int i;
1170
1171         i=qdio_get_inbound_buffer_frontier(q);
1172         if ( (i!=GET_SAVED_FRONTIER(q)) ||
1173              (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
1174                 SAVE_FRONTIER(q,i);
1175                 if ((!q->siga_sync)&&(!q->hydra_gives_outbound_pcis))
1176                         SAVE_TIMESTAMP(q);
1177
1178                 QDIO_DBF_TEXT4(0,trace,"inhasmvd");
1179                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1180                 return 1;
1181         } else {
1182                 QDIO_DBF_TEXT4(0,trace,"inhsntmv");
1183                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1184                 return 0;
1185         }
1186 }
1187
1188 /* means, no more buffers to be filled */
1189 static int
1190 tiqdio_is_inbound_q_done(struct qdio_q *q)
1191 {
1192         int no_used;
1193         unsigned int start_buf, count;
1194         unsigned char state = 0;
1195         struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
1196
1197 #ifdef CONFIG_QDIO_DEBUG
1198         char dbf_text[15];
1199 #endif
1200
1201         no_used=atomic_read(&q->number_of_buffers_used);
1202
1203         /* propagate the change from 82 to 80 through VM */
1204         SYNC_MEMORY;
1205
1206 #ifdef CONFIG_QDIO_DEBUG
1207         if (no_used) {
1208                 sprintf(dbf_text,"iqisnt%02x",no_used);
1209                 QDIO_DBF_TEXT4(0,trace,dbf_text);
1210         } else {
1211                 QDIO_DBF_TEXT4(0,trace,"iniqisdo");
1212         }
1213         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1214 #endif /* CONFIG_QDIO_DEBUG */
1215
1216         if (!no_used)
1217                 return 1;
1218         if (irq->is_qebsm) {
1219                 count = 1;
1220                 start_buf = q->first_to_check;
1221                 qdio_do_eqbs(q, &state, &start_buf, &count);
1222         } else
1223                 state = q->slsb.acc.val[q->first_to_check];
1224         if (state != SLSB_P_INPUT_PRIMED)
1225                 /* 
1226                  * nothing more to do, if next buffer is not PRIMED.
1227                  * note that we did a SYNC_MEMORY before, that there
1228                  * has been a sychnronization.
1229                  * we will return 0 below, as there is nothing to do
1230                  * (stop_polling not necessary, as we have not been
1231                  * using the PROCESSING state 
1232                  */
1233                 return 0;
1234
1235         /* 
1236          * ok, the next input buffer is primed. that means, that device state 
1237          * change indicator and adapter local summary are set, so we will find
1238          * it next time.
1239          * we will return 0 below, as there is nothing to do, except scheduling
1240          * ourselves for the next time. 
1241          */
1242         tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1243         tiqdio_sched_tl();
1244         return 0;
1245 }
1246
1247 static int
1248 qdio_is_inbound_q_done(struct qdio_q *q)
1249 {
1250         int no_used;
1251         unsigned int start_buf, count;
1252         unsigned char state = 0;
1253         struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
1254
1255 #ifdef CONFIG_QDIO_DEBUG
1256         char dbf_text[15];
1257 #endif
1258
1259         no_used=atomic_read(&q->number_of_buffers_used);
1260
1261         /* 
1262          * we need that one for synchronization with the adapter, as it
1263          * does a kind of PCI avoidance 
1264          */
1265         SYNC_MEMORY;
1266
1267         if (!no_used) {
1268                 QDIO_DBF_TEXT4(0,trace,"inqisdnA");
1269                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1270                 return 1;
1271         }
1272         if (irq->is_qebsm) {
1273                 count = 1;
1274                 start_buf = q->first_to_check;
1275                 qdio_do_eqbs(q, &state, &start_buf, &count);
1276         } else
1277                 state = q->slsb.acc.val[q->first_to_check];
1278         if (state == SLSB_P_INPUT_PRIMED) {
1279                 /* we got something to do */
1280                 QDIO_DBF_TEXT4(0,trace,"inqisntA");
1281                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1282                 return 0;
1283         }
1284
1285         /* on VM, we don't poll, so the q is always done here */
1286         if (q->siga_sync)
1287                 return 1;
1288         if (q->hydra_gives_outbound_pcis)
1289                 return 1;
1290
1291         /* 
1292          * at this point we know, that inbound first_to_check
1293          * has (probably) not moved (see qdio_inbound_processing) 
1294          */
1295         if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) {
1296 #ifdef CONFIG_QDIO_DEBUG
1297                 QDIO_DBF_TEXT4(0,trace,"inqisdon");
1298                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1299                 sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
1300                 QDIO_DBF_TEXT4(0,trace,dbf_text);
1301 #endif /* CONFIG_QDIO_DEBUG */
1302                 return 1;
1303         } else {
1304 #ifdef CONFIG_QDIO_DEBUG
1305                 QDIO_DBF_TEXT4(0,trace,"inqisntd");
1306                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1307                 sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
1308                 QDIO_DBF_TEXT4(0,trace,dbf_text);
1309 #endif /* CONFIG_QDIO_DEBUG */
1310                 return 0;
1311         }
1312 }
1313
1314 static void
1315 qdio_kick_inbound_handler(struct qdio_q *q)
1316 {
1317         int count, start, end, real_end, i;
1318 #ifdef CONFIG_QDIO_DEBUG
1319         char dbf_text[15];
1320 #endif
1321
1322         QDIO_DBF_TEXT4(0,trace,"kickinh");
1323         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1324
1325         start=q->first_element_to_kick;
1326         real_end=q->first_to_check;
1327         end=(real_end+QDIO_MAX_BUFFERS_PER_Q-1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1328  
1329         i=start;
1330         count=0;
1331         while (1) {
1332                 count++;
1333                 if (i==end)
1334                         break;
1335                 i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1336         }
1337
1338 #ifdef CONFIG_QDIO_DEBUG
1339         sprintf(dbf_text,"s=%2xc=%2x",start,count);
1340         QDIO_DBF_TEXT4(0,trace,dbf_text);
1341 #endif /* CONFIG_QDIO_DEBUG */
1342
1343         if (likely(q->state==QDIO_IRQ_STATE_ACTIVE))
1344                 q->handler(q->cdev,
1345                            QDIO_STATUS_INBOUND_INT|q->error_status_flags,
1346                            q->qdio_error,q->siga_error,q->q_no,start,count,
1347                            q->int_parm);
1348
1349         /* for the next time: */
1350         q->first_element_to_kick=real_end;
1351         q->qdio_error=0;
1352         q->siga_error=0;
1353         q->error_status_flags=0;
1354
1355         qdio_perf_stat_inc(&perf_stats.inbound_cnt);
1356 }
1357
1358 static void
1359 __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1360 {
1361         struct qdio_irq *irq_ptr;
1362         struct qdio_q *oq;
1363         int i;
1364
1365         QDIO_DBF_TEXT4(0,trace,"iqinproc");
1366         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1367
1368         /* 
1369          * we first want to reserve the q, so that we know, that we don't
1370          * interrupt ourselves and call qdio_unmark_q, as is_in_shutdown might
1371          * be set 
1372          */
1373         if (unlikely(qdio_reserve_q(q))) {
1374                 qdio_release_q(q);
1375                 qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched);
1376                 /* 
1377                  * as we might just be about to stop polling, we make
1378                  * sure that we check again at least once more 
1379                  */
1380                 tiqdio_sched_tl();
1381                 return;
1382         }
1383         qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs);
1384         if (unlikely(atomic_read(&q->is_in_shutdown))) {
1385                 qdio_unmark_q(q);
1386                 goto out;
1387         }
1388
1389         /* 
1390          * we reset spare_ind_was_set, when the queue does not use the
1391          * spare indicator
1392          */
1393         if (spare_ind_was_set)
1394                 spare_ind_was_set = (q->dev_st_chg_ind == &spare_indicator);
1395
1396         if (!(*(q->dev_st_chg_ind)) && !spare_ind_was_set)
1397                 goto out;
1398         /*
1399          * q->dev_st_chg_ind is the indicator, be it shared or not.
1400          * only clear it, if indicator is non-shared
1401          */
1402         if (q->dev_st_chg_ind != &spare_indicator)
1403                 tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind);
1404
1405         if (q->hydra_gives_outbound_pcis) {
1406                 if (!q->siga_sync_done_on_thinints) {
1407                         SYNC_MEMORY_ALL;
1408                 } else if (!q->siga_sync_done_on_outb_tis) {
1409                         SYNC_MEMORY_ALL_OUTB;
1410                 }
1411         } else {
1412                 SYNC_MEMORY;
1413         }
1414         /*
1415          * maybe we have to do work on our outbound queues... at least
1416          * we have to check the outbound-int-capable thinint-capable
1417          * queues
1418          */
1419         if (q->hydra_gives_outbound_pcis) {
1420                 irq_ptr = (struct qdio_irq*)q->irq_ptr;
1421                 for (i=0;i<irq_ptr->no_output_qs;i++) {
1422                         oq = irq_ptr->output_qs[i];
1423                         if (!qdio_is_outbound_q_done(oq)) {
1424                                 qdio_perf_stat_dec(&perf_stats.tl_runs);
1425                                 __qdio_outbound_processing(oq);
1426                         }
1427                 }
1428         }
1429
1430         if (!qdio_has_inbound_q_moved(q))
1431                 goto out;
1432
1433         qdio_kick_inbound_handler(q);
1434         if (tiqdio_is_inbound_q_done(q))
1435                 if (!qdio_stop_polling(q)) {
1436                         /* 
1437                          * we set the flags to get into the stuff next time,
1438                          * see also comment in qdio_stop_polling 
1439                          */
1440                         tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1441                         tiqdio_sched_tl();
1442                 }
1443 out:
1444         qdio_release_q(q);
1445 }
1446
1447 static void
1448 tiqdio_inbound_processing(unsigned long q)
1449 {
1450         __tiqdio_inbound_processing((struct qdio_q *) q,
1451                                     atomic_read(&spare_indicator_usecount));
1452 }
1453
1454 static void
1455 __qdio_inbound_processing(struct qdio_q *q)
1456 {
1457         int q_laps=0;
1458
1459         QDIO_DBF_TEXT4(0,trace,"qinproc");
1460         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1461
1462         if (unlikely(qdio_reserve_q(q))) {
1463                 qdio_release_q(q);
1464                 qdio_perf_stat_inc(&perf_stats.inbound_tl_runs_resched);
1465                 /* as we're sissies, we'll check next time */
1466                 if (likely(!atomic_read(&q->is_in_shutdown))) {
1467                         qdio_mark_q(q);
1468                         QDIO_DBF_TEXT4(0,trace,"busy,agn");
1469                 }
1470                 return;
1471         }
1472         qdio_perf_stat_inc(&perf_stats.inbound_tl_runs);
1473         qdio_perf_stat_inc(&perf_stats.tl_runs);
1474
1475 again:
1476         if (qdio_has_inbound_q_moved(q)) {
1477                 qdio_kick_inbound_handler(q);
1478                 if (!qdio_stop_polling(q)) {
1479                         q_laps++;
1480                         if (q_laps<QDIO_Q_LAPS) 
1481                                 goto again;
1482                 }
1483                 qdio_mark_q(q);
1484         } else {
1485                 if (!qdio_is_inbound_q_done(q)) 
1486                         /* means poll time is not yet over */
1487                         qdio_mark_q(q);
1488         }
1489
1490         qdio_release_q(q);
1491 }
1492
1493 static void
1494 qdio_inbound_processing(unsigned long q)
1495 {
1496         __qdio_inbound_processing((struct qdio_q *) q);
1497 }
1498
1499 /************************* MAIN ROUTINES *******************************/
1500
1501 #ifdef QDIO_USE_PROCESSING_STATE
1502 static int
1503 tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
1504 {
1505         if (!q) {
1506                 tiqdio_sched_tl();
1507                 return 0;
1508         }
1509
1510         /* 
1511          * under VM, we have not used the PROCESSING state, so no
1512          * need to stop polling 
1513          */
1514         if (q->siga_sync)
1515                 return 2;
1516
1517         if (unlikely(qdio_reserve_q(q))) {
1518                 qdio_release_q(q);
1519                 qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched);
1520                 /* 
1521                  * as we might just be about to stop polling, we make
1522                  * sure that we check again at least once more 
1523                  */
1524                 
1525                 /* 
1526                  * sanity -- we'd get here without setting the
1527                  * dev st chg ind 
1528                  */
1529                 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1530                 tiqdio_sched_tl();
1531                 return 0;
1532         }
1533         if (qdio_stop_polling(q)) {
1534                 qdio_release_q(q);
1535                 return 2;
1536         }               
1537         if (q_laps<QDIO_Q_LAPS-1) {
1538                 qdio_release_q(q);
1539                 return 3;
1540         }
1541         /* 
1542          * we set the flags to get into the stuff
1543          * next time, see also comment in qdio_stop_polling 
1544          */
1545         tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1546         tiqdio_sched_tl();
1547         qdio_release_q(q);
1548         return 1;
1549         
1550 }
1551 #endif /* QDIO_USE_PROCESSING_STATE */
1552
1553 static void
1554 tiqdio_inbound_checks(void)
1555 {
1556         struct qdio_q *q;
1557         int spare_ind_was_set=0;
1558 #ifdef QDIO_USE_PROCESSING_STATE
1559         int q_laps=0;
1560 #endif /* QDIO_USE_PROCESSING_STATE */
1561
1562         QDIO_DBF_TEXT4(0,trace,"iqdinbck");
1563         QDIO_DBF_TEXT5(0,trace,"iqlocsum");
1564
1565 #ifdef QDIO_USE_PROCESSING_STATE
1566 again:
1567 #endif /* QDIO_USE_PROCESSING_STATE */
1568
1569         /* when the spare indicator is used and set, save that and clear it */
1570         if ((atomic_read(&spare_indicator_usecount)) && spare_indicator) {
1571                 spare_ind_was_set = 1;
1572                 tiqdio_clear_summary_bit((__u32*)&spare_indicator);
1573         }
1574
1575         q=(struct qdio_q*)tiq_list;
1576         do {
1577                 if (!q)
1578                         break;
1579                 __tiqdio_inbound_processing(q, spare_ind_was_set);
1580                 q=(struct qdio_q*)q->list_next;
1581         } while (q!=(struct qdio_q*)tiq_list);
1582
1583 #ifdef QDIO_USE_PROCESSING_STATE
1584         q=(struct qdio_q*)tiq_list;
1585         do {
1586                 int ret;
1587
1588                 ret = tiqdio_reset_processing_state(q, q_laps);
1589                 switch (ret) {
1590                 case 0:
1591                         return;
1592                 case 1:
1593                         q_laps++;
1594                 case 2:
1595                         q = (struct qdio_q*)q->list_next;
1596                         break;
1597                 default:
1598                         q_laps++;
1599                         goto again;
1600                 }
1601         } while (q!=(struct qdio_q*)tiq_list);
1602 #endif /* QDIO_USE_PROCESSING_STATE */
1603 }
1604
1605 static void
1606 tiqdio_tl(unsigned long data)
1607 {
1608         QDIO_DBF_TEXT4(0,trace,"iqdio_tl");
1609
1610         qdio_perf_stat_inc(&perf_stats.tl_runs);
1611
1612         tiqdio_inbound_checks();
1613 }
1614
1615 /********************* GENERAL HELPER_ROUTINES ***********************/
1616
1617 static void
1618 qdio_release_irq_memory(struct qdio_irq *irq_ptr)
1619 {
1620         int i;
1621         struct qdio_q *q;
1622
1623         for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
1624                 q = irq_ptr->input_qs[i];
1625                 if (q) {
1626                         free_page((unsigned long) q->slib);
1627                         kmem_cache_free(qdio_q_cache, q);
1628                 }
1629                 q = irq_ptr->output_qs[i];
1630                 if (q) {
1631                         free_page((unsigned long) q->slib);
1632                         kmem_cache_free(qdio_q_cache, q);
1633                 }
1634         }
1635         free_page((unsigned long) irq_ptr->qdr);
1636         free_page((unsigned long) irq_ptr);
1637 }
1638
1639 static void
1640 qdio_set_impl_params(struct qdio_irq *irq_ptr,
1641                      unsigned int qib_param_field_format,
1642                      /* pointer to 128 bytes or NULL, if no param field */
1643                      unsigned char *qib_param_field,
1644                      /* pointer to no_queues*128 words of data or NULL */
1645                      unsigned int no_input_qs,
1646                      unsigned int no_output_qs,
1647                      unsigned long *input_slib_elements,
1648                      unsigned long *output_slib_elements)
1649 {
1650         int i,j;
1651
1652         if (!irq_ptr)
1653                 return;
1654
1655         irq_ptr->qib.pfmt=qib_param_field_format;
1656         if (qib_param_field)
1657                 memcpy(irq_ptr->qib.parm,qib_param_field,
1658                        QDIO_MAX_BUFFERS_PER_Q);
1659
1660         if (input_slib_elements)
1661                 for (i=0;i<no_input_qs;i++) {
1662                         for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1663                                 irq_ptr->input_qs[i]->slib->slibe[j].parms=
1664                                         input_slib_elements[
1665                                                 i*QDIO_MAX_BUFFERS_PER_Q+j];
1666                 }
1667         if (output_slib_elements)
1668                 for (i=0;i<no_output_qs;i++) {
1669                         for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1670                                 irq_ptr->output_qs[i]->slib->slibe[j].parms=
1671                                         output_slib_elements[
1672                                                 i*QDIO_MAX_BUFFERS_PER_Q+j];
1673                 }
1674 }
1675
1676 static int
1677 qdio_alloc_qs(struct qdio_irq *irq_ptr,
1678               int no_input_qs, int no_output_qs)
1679 {
1680         int i;
1681         struct qdio_q *q;
1682
1683         for (i = 0; i < no_input_qs; i++) {
1684                 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
1685                 if (!q)
1686                         return -ENOMEM;
1687                 memset(q, 0, sizeof(*q));
1688
1689                 q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
1690                 if (!q->slib) {
1691                         kmem_cache_free(qdio_q_cache, q);
1692                         return -ENOMEM;
1693                 }
1694                 irq_ptr->input_qs[i]=q;
1695         }
1696
1697         for (i = 0; i < no_output_qs; i++) {
1698                 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
1699                 if (!q)
1700                         return -ENOMEM;
1701                 memset(q, 0, sizeof(*q));
1702
1703                 q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
1704                 if (!q->slib) {
1705                         kmem_cache_free(qdio_q_cache, q);
1706                         return -ENOMEM;
1707                 }
1708                 irq_ptr->output_qs[i]=q;
1709         }
1710         return 0;
1711 }
1712
1713 static void
1714 qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1715              int no_input_qs, int no_output_qs,
1716              qdio_handler_t *input_handler,
1717              qdio_handler_t *output_handler,
1718              unsigned long int_parm,int q_format,
1719              unsigned long flags,
1720              void **inbound_sbals_array,
1721              void **outbound_sbals_array)
1722 {
1723         struct qdio_q *q;
1724         int i,j;
1725         char dbf_text[20]; /* see qdio_initialize */
1726         void *ptr;
1727         int available;
1728
1729         sprintf(dbf_text,"qfqs%4x",cdev->private->schid.sch_no);
1730         QDIO_DBF_TEXT0(0,setup,dbf_text);
1731         for (i=0;i<no_input_qs;i++) {
1732                 q=irq_ptr->input_qs[i];
1733
1734                 memset(q,0,((char*)&q->slib)-((char*)q));
1735                 sprintf(dbf_text,"in-q%4x",i);
1736                 QDIO_DBF_TEXT0(0,setup,dbf_text);
1737                 QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
1738
1739                 memset(q->slib,0,PAGE_SIZE);
1740                 q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
1741
1742                 available=0;
1743
1744                 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1745                         q->sbal[j]=*(inbound_sbals_array++);
1746
1747                 q->queue_type=q_format;
1748                 q->int_parm=int_parm;
1749                 q->schid = irq_ptr->schid;
1750                 q->irq_ptr = irq_ptr;
1751                 q->cdev = cdev;
1752                 q->mask=1<<(31-i);
1753                 q->q_no=i;
1754                 q->is_input_q=1;
1755                 q->first_to_check=0;
1756                 q->last_move_ftc=0;
1757                 q->handler=input_handler;
1758                 q->dev_st_chg_ind=irq_ptr->dev_st_chg_ind;
1759
1760                 /* q->is_thinint_q isn't valid at this time, but
1761                  * irq_ptr->is_thinint_irq is
1762                  */
1763                 if (irq_ptr->is_thinint_irq)
1764                         tasklet_init(&q->tasklet, tiqdio_inbound_processing,
1765                                      (unsigned long) q);
1766                 else
1767                         tasklet_init(&q->tasklet, qdio_inbound_processing,
1768                                      (unsigned long) q);
1769
1770                 /* actually this is not used for inbound queues. yet. */
1771                 atomic_set(&q->busy_siga_counter,0);
1772                 q->timing.busy_start=0;
1773
1774 /*              for (j=0;j<QDIO_STATS_NUMBER;j++)
1775                         q->timing.last_transfer_times[j]=(qdio_get_micros()/
1776                                                           QDIO_STATS_NUMBER)*j;
1777                 q->timing.last_transfer_index=QDIO_STATS_NUMBER-1;
1778 */
1779
1780                 /* fill in slib */
1781                 if (i>0) irq_ptr->input_qs[i-1]->slib->nsliba=
1782                                  (unsigned long)(q->slib);
1783                 q->slib->sla=(unsigned long)(q->sl);
1784                 q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
1785
1786                 /* fill in sl */
1787                 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1788                         q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
1789
1790                 QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
1791                 ptr=(void*)q->sl;
1792                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1793                 ptr=(void*)&q->slsb;
1794                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1795                 ptr=(void*)q->sbal[0];
1796                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1797
1798                 /* fill in slsb */
1799                 if (!irq_ptr->is_qebsm) {
1800                         unsigned int count = 1;
1801                         for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
1802                                 set_slsb(q, &j, SLSB_P_INPUT_NOT_INIT, &count);
1803                 }
1804         }
1805
1806         for (i=0;i<no_output_qs;i++) {
1807                 q=irq_ptr->output_qs[i];
1808                 memset(q,0,((char*)&q->slib)-((char*)q));
1809
1810                 sprintf(dbf_text,"outq%4x",i);
1811                 QDIO_DBF_TEXT0(0,setup,dbf_text);
1812                 QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
1813
1814                 memset(q->slib,0,PAGE_SIZE);
1815                 q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
1816
1817                 available=0;
1818                 
1819                 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1820                         q->sbal[j]=*(outbound_sbals_array++);
1821
1822                 q->queue_type=q_format;
1823                 if ((q->queue_type == QDIO_IQDIO_QFMT) &&
1824                     (no_output_qs > 1) &&
1825                     (i == no_output_qs-1))
1826                         q->queue_type = QDIO_IQDIO_QFMT_ASYNCH;
1827                 q->int_parm=int_parm;
1828                 q->is_input_q=0;
1829                 q->is_pci_out = 0;
1830                 q->schid = irq_ptr->schid;
1831                 q->cdev = cdev;
1832                 q->irq_ptr = irq_ptr;
1833                 q->mask=1<<(31-i);
1834                 q->q_no=i;
1835                 q->first_to_check=0;
1836                 q->last_move_ftc=0;
1837                 q->handler=output_handler;
1838
1839                 tasklet_init(&q->tasklet, qdio_outbound_processing,
1840                              (unsigned long) q);
1841                 setup_timer(&q->timer, qdio_outbound_processing,
1842                             (unsigned long) q);
1843
1844                 atomic_set(&q->busy_siga_counter,0);
1845                 q->timing.busy_start=0;
1846
1847                 /* fill in slib */
1848                 if (i>0) irq_ptr->output_qs[i-1]->slib->nsliba=
1849                                  (unsigned long)(q->slib);
1850                 q->slib->sla=(unsigned long)(q->sl);
1851                 q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
1852
1853                 /* fill in sl */
1854                 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1855                         q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
1856
1857                 QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
1858                 ptr=(void*)q->sl;
1859                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1860                 ptr=(void*)&q->slsb;
1861                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1862                 ptr=(void*)q->sbal[0];
1863                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1864
1865                 /* fill in slsb */
1866                 if (!irq_ptr->is_qebsm) {
1867                         unsigned int count = 1;
1868                         for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
1869                                 set_slsb(q, &j, SLSB_P_OUTPUT_NOT_INIT, &count);
1870                 }
1871         }
1872 }
1873
1874 static void
1875 qdio_fill_thresholds(struct qdio_irq *irq_ptr,
1876                      unsigned int no_input_qs,
1877                      unsigned int no_output_qs,
1878                      unsigned int min_input_threshold,
1879                      unsigned int max_input_threshold,
1880                      unsigned int min_output_threshold,
1881                      unsigned int max_output_threshold)
1882 {
1883         int i;
1884         struct qdio_q *q;
1885
1886         for (i=0;i<no_input_qs;i++) {
1887                 q=irq_ptr->input_qs[i];
1888                 q->timing.threshold=max_input_threshold;
1889 /*              for (j=0;j<QDIO_STATS_CLASSES;j++) {
1890                         q->threshold_classes[j].threshold=
1891                                 min_input_threshold+
1892                                 (max_input_threshold-min_input_threshold)/
1893                                 QDIO_STATS_CLASSES;
1894                 }
1895                 qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
1896         }
1897         for (i=0;i<no_output_qs;i++) {
1898                 q=irq_ptr->output_qs[i];
1899                 q->timing.threshold=max_output_threshold;
1900 /*              for (j=0;j<QDIO_STATS_CLASSES;j++) {
1901                         q->threshold_classes[j].threshold=
1902                                 min_output_threshold+
1903                                 (max_output_threshold-min_output_threshold)/
1904                                 QDIO_STATS_CLASSES;
1905                 }
1906                 qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
1907         }
1908 }
1909
1910 static void tiqdio_thinint_handler(void *ind, void *drv_data)
1911 {
1912         QDIO_DBF_TEXT4(0,trace,"thin_int");
1913
1914         qdio_perf_stat_inc(&perf_stats.thinints);
1915
1916         /* SVS only when needed:
1917          * issue SVS to benefit from iqdio interrupt avoidance
1918          * (SVS clears AISOI)*/
1919         if (!omit_svs)
1920                 tiqdio_clear_global_summary();
1921
1922         tiqdio_inbound_checks();
1923 }
1924
1925 static void
1926 qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
1927 {
1928         int i;
1929 #ifdef CONFIG_QDIO_DEBUG
1930         char dbf_text[15];
1931
1932         QDIO_DBF_TEXT5(0,trace,"newstate");
1933         sprintf(dbf_text,"%4x%4x",irq_ptr->schid.sch_no,state);
1934         QDIO_DBF_TEXT5(0,trace,dbf_text);
1935 #endif /* CONFIG_QDIO_DEBUG */
1936
1937         irq_ptr->state=state;
1938         for (i=0;i<irq_ptr->no_input_qs;i++)
1939                 irq_ptr->input_qs[i]->state=state;
1940         for (i=0;i<irq_ptr->no_output_qs;i++)
1941                 irq_ptr->output_qs[i]->state=state;
1942         mb();
1943 }
1944
1945 static void
1946 qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
1947 {
1948         char dbf_text[15];
1949
1950         if (irb->esw.esw0.erw.cons) {
1951                 sprintf(dbf_text,"sens%4x",schid.sch_no);
1952                 QDIO_DBF_TEXT2(1,trace,dbf_text);
1953                 QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN);
1954
1955                 QDIO_PRINT_WARN("sense data available on qdio channel.\n");
1956                 QDIO_HEXDUMP16(WARN,"irb: ",irb);
1957                 QDIO_HEXDUMP16(WARN,"sense data: ",irb->ecw);
1958         }
1959                 
1960 }
1961
1962 static void
1963 qdio_handle_pci(struct qdio_irq *irq_ptr)
1964 {
1965         int i;
1966         struct qdio_q *q;
1967
1968         qdio_perf_stat_inc(&perf_stats.pcis);
1969         for (i=0;i<irq_ptr->no_input_qs;i++) {
1970                 q=irq_ptr->input_qs[i];
1971                 if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT)
1972                         qdio_mark_q(q);
1973                 else {
1974                         qdio_perf_stat_dec(&perf_stats.tl_runs);
1975                         __qdio_inbound_processing(q);
1976                 }
1977         }
1978         if (!irq_ptr->hydra_gives_outbound_pcis)
1979                 return;
1980         for (i=0;i<irq_ptr->no_output_qs;i++) {
1981                 q=irq_ptr->output_qs[i];
1982                 if (qdio_is_outbound_q_done(q))
1983                         continue;
1984                 qdio_perf_stat_dec(&perf_stats.tl_runs);
1985                 if (!irq_ptr->sync_done_on_outb_pcis)
1986                         SYNC_MEMORY;
1987                 __qdio_outbound_processing(q);
1988         }
1989 }
1990
1991 static void qdio_establish_handle_irq(struct ccw_device*, int, int);
1992
1993 static void
1994 qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm,
1995                            int cstat, int dstat)
1996 {
1997         struct qdio_irq *irq_ptr;
1998         struct qdio_q *q;
1999         char dbf_text[15];
2000
2001         irq_ptr = cdev->private->qdio_data;
2002
2003         QDIO_DBF_TEXT2(1, trace, "ick2");
2004         sprintf(dbf_text,"%s", cdev->dev.bus_id);
2005         QDIO_DBF_TEXT2(1,trace,dbf_text);
2006         QDIO_DBF_HEX2(0,trace,&intparm,sizeof(int));
2007         QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
2008         QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
2009         QDIO_PRINT_ERR("received check condition on activate " \
2010                        "queues on device %s (cs=x%x, ds=x%x).\n",
2011                        cdev->dev.bus_id, cstat, dstat);
2012         if (irq_ptr->no_input_qs) {
2013                 q=irq_ptr->input_qs[0];
2014         } else if (irq_ptr->no_output_qs) {
2015                 q=irq_ptr->output_qs[0];
2016         } else {
2017                 QDIO_PRINT_ERR("oops... no queue registered for device %s!?\n",
2018                                cdev->dev.bus_id);
2019                 goto omit_handler_call;
2020         }
2021         q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
2022                    QDIO_STATUS_LOOK_FOR_ERROR,
2023                    0,0,0,-1,-1,q->int_parm);
2024 omit_handler_call:
2025         qdio_set_state(irq_ptr,QDIO_IRQ_STATE_STOPPED);
2026
2027 }
2028
2029 static void
2030 qdio_call_shutdown(struct work_struct *work)
2031 {
2032         struct ccw_device_private *priv;
2033         struct ccw_device *cdev;
2034
2035         priv = container_of(work, struct ccw_device_private, kick_work);
2036         cdev = priv->cdev;
2037         qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
2038         put_device(&cdev->dev);
2039 }
2040
2041 static void
2042 qdio_timeout_handler(struct ccw_device *cdev)
2043 {
2044         struct qdio_irq *irq_ptr;
2045         char dbf_text[15];
2046
2047         QDIO_DBF_TEXT2(0, trace, "qtoh");
2048         sprintf(dbf_text, "%s", cdev->dev.bus_id);
2049         QDIO_DBF_TEXT2(0, trace, dbf_text);
2050
2051         irq_ptr = cdev->private->qdio_data;
2052         sprintf(dbf_text, "state:%d", irq_ptr->state);
2053         QDIO_DBF_TEXT2(0, trace, dbf_text);
2054
2055         switch (irq_ptr->state) {
2056         case QDIO_IRQ_STATE_INACTIVE:
2057                 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: timed out\n",
2058                                irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2059                 QDIO_DBF_TEXT2(1,setup,"eq:timeo");
2060                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2061                 break;
2062         case QDIO_IRQ_STATE_CLEANUP:
2063                 QDIO_PRINT_INFO("Did not get interrupt on cleanup, "
2064                                 "irq=0.%x.%x.\n",
2065                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2066                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2067                 break;
2068         case QDIO_IRQ_STATE_ESTABLISHED:
2069         case QDIO_IRQ_STATE_ACTIVE:
2070                 /* I/O has been terminated by common I/O layer. */
2071                 QDIO_PRINT_INFO("Queues on irq 0.%x.%04x killed by cio.\n",
2072                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2073                 QDIO_DBF_TEXT2(1, trace, "cio:term");
2074                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
2075                 if (get_device(&cdev->dev)) {
2076                         /* Can't call shutdown from interrupt context. */
2077                         PREPARE_WORK(&cdev->private->kick_work,
2078                                      qdio_call_shutdown);
2079                         queue_work(ccw_device_work, &cdev->private->kick_work);
2080                 }
2081                 break;
2082         default:
2083                 BUG();
2084         }
2085         ccw_device_set_timeout(cdev, 0);
2086         wake_up(&cdev->private->wait_q);
2087 }
2088
2089 static void
2090 qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
2091 {
2092         struct qdio_irq *irq_ptr;
2093         int cstat,dstat;
2094         char dbf_text[15];
2095
2096 #ifdef CONFIG_QDIO_DEBUG
2097         QDIO_DBF_TEXT4(0, trace, "qint");
2098         sprintf(dbf_text, "%s", cdev->dev.bus_id);
2099         QDIO_DBF_TEXT4(0, trace, dbf_text);
2100 #endif /* CONFIG_QDIO_DEBUG */
2101         
2102         if (!intparm) {
2103                 QDIO_PRINT_ERR("got unsolicited interrupt in qdio " \
2104                                   "handler, device %s\n", cdev->dev.bus_id);
2105                 return;
2106         }
2107
2108         irq_ptr = cdev->private->qdio_data;
2109         if (!irq_ptr) {
2110                 QDIO_DBF_TEXT2(1, trace, "uint");
2111                 sprintf(dbf_text,"%s", cdev->dev.bus_id);
2112                 QDIO_DBF_TEXT2(1,trace,dbf_text);
2113                 QDIO_PRINT_ERR("received interrupt on unused device %s!\n",
2114                                cdev->dev.bus_id);
2115                 return;
2116         }
2117
2118         if (IS_ERR(irb)) {
2119                 /* Currently running i/o is in error. */
2120                 switch (PTR_ERR(irb)) {
2121                 case -EIO:
2122                         QDIO_PRINT_ERR("i/o error on device %s\n",
2123                                        cdev->dev.bus_id);
2124                         return;
2125                 case -ETIMEDOUT:
2126                         qdio_timeout_handler(cdev);
2127                         return;
2128                 default:
2129                         QDIO_PRINT_ERR("unknown error state %ld on device %s\n",
2130                                        PTR_ERR(irb), cdev->dev.bus_id);
2131                         return;
2132                 }
2133         }
2134
2135         qdio_irq_check_sense(irq_ptr->schid, irb);
2136
2137 #ifdef CONFIG_QDIO_DEBUG
2138         sprintf(dbf_text, "state:%d", irq_ptr->state);
2139         QDIO_DBF_TEXT4(0, trace, dbf_text);
2140 #endif /* CONFIG_QDIO_DEBUG */
2141
2142         cstat = irb->scsw.cstat;
2143         dstat = irb->scsw.dstat;
2144
2145         switch (irq_ptr->state) {
2146         case QDIO_IRQ_STATE_INACTIVE:
2147                 qdio_establish_handle_irq(cdev, cstat, dstat);
2148                 break;
2149
2150         case QDIO_IRQ_STATE_CLEANUP:
2151                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2152                 break;
2153
2154         case QDIO_IRQ_STATE_ESTABLISHED:
2155         case QDIO_IRQ_STATE_ACTIVE:
2156                 if (cstat & SCHN_STAT_PCI) {
2157                         qdio_handle_pci(irq_ptr);
2158                         break;
2159                 }
2160
2161                 if ((cstat&~SCHN_STAT_PCI)||dstat) {
2162                         qdio_handle_activate_check(cdev, intparm, cstat, dstat);
2163                         break;
2164                 }
2165         default:
2166                 QDIO_PRINT_ERR("got interrupt for queues in state %d on " \
2167                                "device %s?!\n",
2168                                irq_ptr->state, cdev->dev.bus_id);
2169         }
2170         wake_up(&cdev->private->wait_q);
2171
2172 }
2173
2174 int
2175 qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
2176                  unsigned int queue_number)
2177 {
2178         int cc = 0;
2179         struct qdio_q *q;
2180         struct qdio_irq *irq_ptr;
2181         void *ptr;
2182 #ifdef CONFIG_QDIO_DEBUG
2183         char dbf_text[15]="SyncXXXX";
2184 #endif
2185
2186         irq_ptr = cdev->private->qdio_data;
2187         if (!irq_ptr)
2188                 return -ENODEV;
2189
2190 #ifdef CONFIG_QDIO_DEBUG
2191         *((int*)(&dbf_text[4])) = irq_ptr->schid.sch_no;
2192         QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
2193         *((int*)(&dbf_text[0]))=flags;
2194         *((int*)(&dbf_text[4]))=queue_number;
2195         QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
2196 #endif /* CONFIG_QDIO_DEBUG */
2197
2198         if (flags&QDIO_FLAG_SYNC_INPUT) {
2199                 q=irq_ptr->input_qs[queue_number];
2200                 if (!q)
2201                         return -EINVAL;
2202                 if (!(irq_ptr->is_qebsm))
2203                         cc = do_siga_sync(q->schid, 0, q->mask);
2204         } else if (flags&QDIO_FLAG_SYNC_OUTPUT) {
2205                 q=irq_ptr->output_qs[queue_number];
2206                 if (!q)
2207                         return -EINVAL;
2208                 if (!(irq_ptr->is_qebsm))
2209                         cc = do_siga_sync(q->schid, q->mask, 0);
2210         } else 
2211                 return -EINVAL;
2212
2213         ptr=&cc;
2214         if (cc)
2215                 QDIO_DBF_HEX3(0,trace,&ptr,sizeof(int));
2216
2217         return cc;
2218 }
2219
2220 static int
2221 qdio_get_ssqd_information(struct subchannel_id *schid,
2222                           struct qdio_chsc_ssqd **ssqd_area)
2223 {
2224         int result;
2225
2226         QDIO_DBF_TEXT0(0, setup, "getssqd");
2227         *ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2228         if (!ssqd_area) {
2229                 QDIO_PRINT_WARN("Could not get memory for chsc on sch x%x.\n",
2230                                 schid->sch_no);
2231                 return -ENOMEM;
2232         }
2233
2234         (*ssqd_area)->request = (struct chsc_header) {
2235                 .length = 0x0010,
2236                 .code   = 0x0024,
2237         };
2238         (*ssqd_area)->first_sch = schid->sch_no;
2239         (*ssqd_area)->last_sch = schid->sch_no;
2240         (*ssqd_area)->ssid = schid->ssid;
2241         result = chsc(*ssqd_area);
2242
2243         if (result) {
2244                 QDIO_PRINT_WARN("CHSC returned cc %i on sch 0.%x.%x.\n",
2245                                 result, schid->ssid, schid->sch_no);
2246                 goto out;
2247         }
2248
2249         if ((*ssqd_area)->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
2250                 QDIO_PRINT_WARN("CHSC response is 0x%x on sch 0.%x.%x.\n",
2251                                 (*ssqd_area)->response.code,
2252                                 schid->ssid, schid->sch_no);
2253                 goto out;
2254         }
2255         if (!((*ssqd_area)->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
2256             !((*ssqd_area)->flags & CHSC_FLAG_VALIDITY) ||
2257             ((*ssqd_area)->sch != schid->sch_no)) {
2258                 QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
2259                                 "using all SIGAs.\n",
2260                                 schid->ssid, schid->sch_no);
2261                 goto out;
2262         }
2263         return 0;
2264 out:
2265         return -EINVAL;
2266 }
2267
2268 int
2269 qdio_get_ssqd_pct(struct ccw_device *cdev)
2270 {
2271         struct qdio_chsc_ssqd *ssqd_area;
2272         struct subchannel_id schid;
2273         char dbf_text[15];
2274         int rc;
2275         int pct = 0;
2276
2277         QDIO_DBF_TEXT0(0, setup, "getpct");
2278         schid = ccw_device_get_subchannel_id(cdev);
2279         rc = qdio_get_ssqd_information(&schid, &ssqd_area);
2280         if (!rc)
2281                 pct = (int)ssqd_area->pct;
2282         if (rc != -ENOMEM)
2283                 mempool_free(ssqd_area, qdio_mempool_scssc);
2284         sprintf(dbf_text, "pct: %d", pct);
2285         QDIO_DBF_TEXT2(0, setup, dbf_text);
2286         return pct;
2287 }
2288 EXPORT_SYMBOL(qdio_get_ssqd_pct);
2289
2290 static void
2291 qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned long token)
2292 {
2293         struct qdio_q *q;
2294         int i;
2295         unsigned int count, start_buf;
2296         char dbf_text[15];
2297
2298         /*check if QEBSM is disabled */
2299         if (!(irq_ptr->is_qebsm) || !(irq_ptr->qdioac & 0x01)) {
2300                 irq_ptr->is_qebsm  = 0;
2301                 irq_ptr->sch_token = 0;
2302                 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
2303                 QDIO_DBF_TEXT0(0,setup,"noV=V");
2304                 return;
2305         }
2306         irq_ptr->sch_token = token;
2307         /*input queue*/
2308         for (i = 0; i < irq_ptr->no_input_qs;i++) {
2309                 q = irq_ptr->input_qs[i];
2310                 count = QDIO_MAX_BUFFERS_PER_Q;
2311                 start_buf = 0;
2312                 set_slsb(q, &start_buf, SLSB_P_INPUT_NOT_INIT, &count);
2313         }
2314         sprintf(dbf_text,"V=V:%2x",irq_ptr->is_qebsm);
2315         QDIO_DBF_TEXT0(0,setup,dbf_text);
2316         sprintf(dbf_text,"%8lx",irq_ptr->sch_token);
2317         QDIO_DBF_TEXT0(0,setup,dbf_text);
2318         /*output queue*/
2319         for (i = 0; i < irq_ptr->no_output_qs; i++) {
2320                 q = irq_ptr->output_qs[i];
2321                 count = QDIO_MAX_BUFFERS_PER_Q;
2322                 start_buf = 0;
2323                 set_slsb(q, &start_buf, SLSB_P_OUTPUT_NOT_INIT, &count);
2324         }
2325 }
2326
2327 static void
2328 qdio_get_ssqd_siga(struct qdio_irq *irq_ptr)
2329 {
2330         int rc;
2331         struct qdio_chsc_ssqd *ssqd_area;
2332
2333         QDIO_DBF_TEXT0(0,setup,"getssqd");
2334         irq_ptr->qdioac = 0;
2335         rc = qdio_get_ssqd_information(&irq_ptr->schid, &ssqd_area);
2336         if (rc) {
2337                 QDIO_PRINT_WARN("using all SIGAs for sch x%x.n",
2338                         irq_ptr->schid.sch_no);
2339                 irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
2340                                   CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
2341                                   CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2342                 irq_ptr->is_qebsm = 0;
2343         } else
2344                 irq_ptr->qdioac = ssqd_area->qdioac1;
2345
2346         qdio_check_subchannel_qebsm(irq_ptr, ssqd_area->sch_token);
2347         if (rc != -ENOMEM)
2348                 mempool_free(ssqd_area, qdio_mempool_scssc);
2349 }
2350
2351 static unsigned int
2352 tiqdio_check_chsc_availability(void)
2353 {
2354         char dbf_text[15];
2355
2356         if (!css_characteristics_avail)
2357                 return -EIO;
2358
2359         /* Check for bit 41. */
2360         if (!css_general_characteristics.aif) {
2361                 QDIO_PRINT_WARN("Adapter interruption facility not " \
2362                                 "installed.\n");
2363                 return -ENOENT;
2364         }
2365
2366         /* Check for bits 107 and 108. */
2367         if (!css_chsc_characteristics.scssc ||
2368             !css_chsc_characteristics.scsscf) {
2369                 QDIO_PRINT_WARN("Set Chan Subsys. Char. & Fast-CHSCs " \
2370                                 "not available.\n");
2371                 return -ENOENT;
2372         }
2373
2374         /* Check for OSA/FCP thin interrupts (bit 67). */
2375         hydra_thinints = css_general_characteristics.aif_osa;
2376         sprintf(dbf_text,"hydrati%1x", hydra_thinints);
2377         QDIO_DBF_TEXT0(0,setup,dbf_text);
2378
2379 #ifdef CONFIG_64BIT
2380         /* Check for QEBSM support in general (bit 58). */
2381         is_passthrough = css_general_characteristics.qebsm;
2382 #endif
2383         sprintf(dbf_text,"cssQBS:%1x", is_passthrough);
2384         QDIO_DBF_TEXT0(0,setup,dbf_text);
2385
2386         /* Check for aif time delay disablement fac (bit 56). If installed,
2387          * omit svs even under lpar (good point by rick again) */
2388         omit_svs = css_general_characteristics.aif_tdd;
2389         sprintf(dbf_text,"omitsvs%1x", omit_svs);
2390         QDIO_DBF_TEXT0(0,setup,dbf_text);
2391         return 0;
2392 }
2393
2394
2395 static unsigned int
2396 tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2397 {
2398         unsigned long real_addr_local_summary_bit;
2399         unsigned long real_addr_dev_st_chg_ind;
2400         void *ptr;
2401         char dbf_text[15];
2402
2403         unsigned int resp_code;
2404         int result;
2405
2406         struct {
2407                 struct chsc_header request;
2408                 u16 operation_code;
2409                 u16 reserved1;
2410                 u32 reserved2;
2411                 u32 reserved3;
2412                 u64 summary_indicator_addr;
2413                 u64 subchannel_indicator_addr;
2414                 u32 ks:4;
2415                 u32 kc:4;
2416                 u32 reserved4:21;
2417                 u32 isc:3;
2418                 u32 word_with_d_bit;
2419                 /* set to 0x10000000 to enable
2420                  * time delay disablement facility */
2421                 u32 reserved5;
2422                 struct subchannel_id schid;
2423                 u32 reserved6[1004];
2424                 struct chsc_header response;
2425                 u32 reserved7;
2426         } *scssc_area;
2427
2428         if (!irq_ptr->is_thinint_irq)
2429                 return -ENODEV;
2430
2431         if (reset_to_zero) {
2432                 real_addr_local_summary_bit=0;
2433                 real_addr_dev_st_chg_ind=0;
2434         } else {
2435                 real_addr_local_summary_bit=
2436                         virt_to_phys((volatile void *)tiqdio_ind);
2437                 real_addr_dev_st_chg_ind=
2438                         virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind);
2439         }
2440
2441         scssc_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2442         if (!scssc_area) {
2443                 QDIO_PRINT_WARN("No memory for setting indicators on " \
2444                                 "subchannel 0.%x.%x.\n",
2445                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2446                 return -ENOMEM;
2447         }
2448         scssc_area->request = (struct chsc_header) {
2449                 .length = 0x0fe0,
2450                 .code   = 0x0021,
2451         };
2452         scssc_area->operation_code = 0;
2453
2454         scssc_area->summary_indicator_addr = real_addr_local_summary_bit;
2455         scssc_area->subchannel_indicator_addr = real_addr_dev_st_chg_ind;
2456         scssc_area->ks = QDIO_STORAGE_KEY;
2457         scssc_area->kc = QDIO_STORAGE_KEY;
2458         scssc_area->isc = TIQDIO_THININT_ISC;
2459         scssc_area->schid = irq_ptr->schid;
2460         /* enables the time delay disablement facility. Don't care
2461          * whether it is really there (i.e. we haven't checked for
2462          * it) */
2463         if (css_general_characteristics.aif_tdd)
2464                 scssc_area->word_with_d_bit = 0x10000000;
2465         else
2466                 QDIO_PRINT_WARN("Time delay disablement facility " \
2467                                 "not available\n");
2468
2469         result = chsc(scssc_area);
2470         if (result) {
2471                 QDIO_PRINT_WARN("could not set indicators on irq 0.%x.%x, " \
2472                                 "cc=%i.\n",
2473                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,result);
2474                 result = -EIO;
2475                 goto out;
2476         }
2477
2478         resp_code = scssc_area->response.code;
2479         if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
2480                 QDIO_PRINT_WARN("response upon setting indicators " \
2481                                 "is 0x%x.\n",resp_code);
2482                 sprintf(dbf_text,"sidR%4x",resp_code);
2483                 QDIO_DBF_TEXT1(0,trace,dbf_text);
2484                 QDIO_DBF_TEXT1(0,setup,dbf_text);
2485                 ptr=&scssc_area->response;
2486                 QDIO_DBF_HEX2(1,setup,&ptr,QDIO_DBF_SETUP_LEN);
2487                 result = -EIO;
2488                 goto out;
2489         }
2490
2491         QDIO_DBF_TEXT2(0,setup,"setscind");
2492         QDIO_DBF_HEX2(0,setup,&real_addr_local_summary_bit,
2493                       sizeof(unsigned long));
2494         QDIO_DBF_HEX2(0,setup,&real_addr_dev_st_chg_ind,sizeof(unsigned long));
2495         result = 0;
2496 out:
2497         mempool_free(scssc_area, qdio_mempool_scssc);
2498         return result;
2499
2500 }
2501
2502 static unsigned int
2503 tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
2504 {
2505         unsigned int resp_code;
2506         int result;
2507         void *ptr;
2508         char dbf_text[15];
2509
2510         struct {
2511                 struct chsc_header request;
2512                 u16 operation_code;
2513                 u16 reserved1;
2514                 u32 reserved2;
2515                 u32 reserved3;
2516                 u32 reserved4[2];
2517                 u32 delay_target;
2518                 u32 reserved5[1009];
2519                 struct chsc_header response;
2520                 u32 reserved6;
2521         } *scsscf_area;
2522
2523         if (!irq_ptr->is_thinint_irq)
2524                 return -ENODEV;
2525
2526         scsscf_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2527         if (!scsscf_area) {
2528                 QDIO_PRINT_WARN("No memory for setting delay target on " \
2529                                 "subchannel 0.%x.%x.\n",
2530                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2531                 return -ENOMEM;
2532         }
2533         scsscf_area->request = (struct chsc_header) {
2534                 .length = 0x0fe0,
2535                 .code   = 0x1027,
2536         };
2537
2538         scsscf_area->delay_target = delay_target<<16;
2539
2540         result=chsc(scsscf_area);
2541         if (result) {
2542                 QDIO_PRINT_WARN("could not set delay target on irq 0.%x.%x, " \
2543                                 "cc=%i. Continuing.\n",
2544                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2545                                 result);
2546                 result = -EIO;
2547                 goto out;
2548         }
2549
2550         resp_code = scsscf_area->response.code;
2551         if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
2552                 QDIO_PRINT_WARN("response upon setting delay target " \
2553                                 "is 0x%x. Continuing.\n",resp_code);
2554                 sprintf(dbf_text,"sdtR%4x",resp_code);
2555                 QDIO_DBF_TEXT1(0,trace,dbf_text);
2556                 QDIO_DBF_TEXT1(0,setup,dbf_text);
2557                 ptr=&scsscf_area->response;
2558                 QDIO_DBF_HEX2(1,trace,&ptr,QDIO_DBF_TRACE_LEN);
2559         }
2560         QDIO_DBF_TEXT2(0,trace,"delytrgt");
2561         QDIO_DBF_HEX2(0,trace,&delay_target,sizeof(unsigned long));
2562         result = 0; /* not critical */
2563 out:
2564         mempool_free(scsscf_area, qdio_mempool_scssc);
2565         return result;
2566 }
2567
2568 int
2569 qdio_cleanup(struct ccw_device *cdev, int how)
2570 {
2571         struct qdio_irq *irq_ptr;
2572         char dbf_text[15];
2573         int rc;
2574
2575         irq_ptr = cdev->private->qdio_data;
2576         if (!irq_ptr)
2577                 return -ENODEV;
2578
2579         sprintf(dbf_text,"qcln%4x",irq_ptr->schid.sch_no);
2580         QDIO_DBF_TEXT1(0,trace,dbf_text);
2581         QDIO_DBF_TEXT0(0,setup,dbf_text);
2582
2583         rc = qdio_shutdown(cdev, how);
2584         if ((rc == 0) || (rc == -EINPROGRESS))
2585                 rc = qdio_free(cdev);
2586         return rc;
2587 }
2588
2589 int
2590 qdio_shutdown(struct ccw_device *cdev, int how)
2591 {
2592         struct qdio_irq *irq_ptr;
2593         int i;
2594         int result = 0;
2595         int rc;
2596         unsigned long flags;
2597         int timeout;
2598         char dbf_text[15];
2599
2600         irq_ptr = cdev->private->qdio_data;
2601         if (!irq_ptr)
2602                 return -ENODEV;
2603
2604         down(&irq_ptr->setting_up_sema);
2605
2606         sprintf(dbf_text,"qsqs%4x",irq_ptr->schid.sch_no);
2607         QDIO_DBF_TEXT1(0,trace,dbf_text);
2608         QDIO_DBF_TEXT0(0,setup,dbf_text);
2609
2610         /* mark all qs as uninteresting */
2611         for (i=0;i<irq_ptr->no_input_qs;i++)
2612                 atomic_set(&irq_ptr->input_qs[i]->is_in_shutdown,1);
2613
2614         for (i=0;i<irq_ptr->no_output_qs;i++)
2615                 atomic_set(&irq_ptr->output_qs[i]->is_in_shutdown,1);
2616
2617         tasklet_kill(&tiqdio_tasklet);
2618
2619         for (i=0;i<irq_ptr->no_input_qs;i++) {
2620                 qdio_unmark_q(irq_ptr->input_qs[i]);
2621                 tasklet_kill(&irq_ptr->input_qs[i]->tasklet);
2622                 wait_event_interruptible_timeout(cdev->private->wait_q,
2623                                                  !atomic_read(&irq_ptr->
2624                                                               input_qs[i]->
2625                                                               use_count),
2626                                                  QDIO_NO_USE_COUNT_TIMEOUT);
2627                 if (atomic_read(&irq_ptr->input_qs[i]->use_count))
2628                         result=-EINPROGRESS;
2629         }
2630
2631         for (i=0;i<irq_ptr->no_output_qs;i++) {
2632                 tasklet_kill(&irq_ptr->output_qs[i]->tasklet);
2633                 del_timer(&irq_ptr->output_qs[i]->timer);
2634                 wait_event_interruptible_timeout(cdev->private->wait_q,
2635                                                  !atomic_read(&irq_ptr->
2636                                                               output_qs[i]->
2637                                                               use_count),
2638                                                  QDIO_NO_USE_COUNT_TIMEOUT);
2639                 if (atomic_read(&irq_ptr->output_qs[i]->use_count))
2640                         result=-EINPROGRESS;
2641         }
2642
2643         /* cleanup subchannel */
2644         spin_lock_irqsave(get_ccwdev_lock(cdev),flags);
2645         if (how&QDIO_FLAG_CLEANUP_USING_CLEAR) {
2646                 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
2647                 timeout=QDIO_CLEANUP_CLEAR_TIMEOUT;
2648         } else if (how&QDIO_FLAG_CLEANUP_USING_HALT) {
2649                 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
2650                 timeout=QDIO_CLEANUP_HALT_TIMEOUT;
2651         } else { /* default behaviour */
2652                 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
2653                 timeout=QDIO_CLEANUP_HALT_TIMEOUT;
2654         }
2655         if (rc == -ENODEV) {
2656                 /* No need to wait for device no longer present. */
2657                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2658                 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2659         } else if (((void *)cdev->handler != (void *)qdio_handler) && rc == 0) {
2660                 /*
2661                  * Whoever put another handler there, has to cope with the
2662                  * interrupt theirself. Might happen if qdio_shutdown was
2663                  * called on already shutdown queues, but this shouldn't have
2664                  * bad side effects.
2665                  */
2666                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2667                 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2668         } else if (rc == 0) {
2669                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
2670                 ccw_device_set_timeout(cdev, timeout);
2671                 spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags);
2672
2673                 wait_event(cdev->private->wait_q,
2674                            irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
2675                            irq_ptr->state == QDIO_IRQ_STATE_ERR);
2676         } else {
2677                 QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for "
2678                                 "device %s\n", result, cdev->dev.bus_id);
2679                 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2680                 result = rc;
2681                 goto out;
2682         }
2683         if (irq_ptr->is_thinint_irq) {
2684                 qdio_put_indicator((__u32*)irq_ptr->dev_st_chg_ind);
2685                 tiqdio_set_subchannel_ind(irq_ptr,1); 
2686                 /* reset adapter interrupt indicators */
2687         }
2688
2689         /* exchange int handlers, if necessary */
2690         if ((void*)cdev->handler == (void*)qdio_handler)
2691                 cdev->handler=irq_ptr->original_int_handler;
2692
2693         /* Ignore errors. */
2694         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2695         ccw_device_set_timeout(cdev, 0);
2696 out:
2697         up(&irq_ptr->setting_up_sema);
2698         return result;
2699 }
2700
2701 int
2702 qdio_free(struct ccw_device *cdev)
2703 {
2704         struct qdio_irq *irq_ptr;
2705         char dbf_text[15];
2706
2707         irq_ptr = cdev->private->qdio_data;
2708         if (!irq_ptr)
2709                 return -ENODEV;
2710
2711         down(&irq_ptr->setting_up_sema);
2712
2713         sprintf(dbf_text,"qfqs%4x",irq_ptr->schid.sch_no);
2714         QDIO_DBF_TEXT1(0,trace,dbf_text);
2715         QDIO_DBF_TEXT0(0,setup,dbf_text);
2716
2717         cdev->private->qdio_data = NULL;
2718
2719         up(&irq_ptr->setting_up_sema);
2720
2721         qdio_release_irq_memory(irq_ptr);
2722         module_put(THIS_MODULE);
2723         return 0;
2724 }
2725
2726 static void
2727 qdio_allocate_do_dbf(struct qdio_initialize *init_data)
2728 {
2729         char dbf_text[20]; /* if a printf printed out more than 8 chars */
2730
2731         sprintf(dbf_text,"qfmt:%x",init_data->q_format);
2732         QDIO_DBF_TEXT0(0,setup,dbf_text);
2733         QDIO_DBF_HEX0(0,setup,init_data->adapter_name,8);
2734         sprintf(dbf_text,"qpff%4x",init_data->qib_param_field_format);
2735         QDIO_DBF_TEXT0(0,setup,dbf_text);
2736         QDIO_DBF_HEX0(0,setup,&init_data->qib_param_field,sizeof(char*));
2737         QDIO_DBF_HEX0(0,setup,&init_data->input_slib_elements,sizeof(long*));
2738         QDIO_DBF_HEX0(0,setup,&init_data->output_slib_elements,sizeof(long*));
2739         sprintf(dbf_text,"miit%4x",init_data->min_input_threshold);
2740         QDIO_DBF_TEXT0(0,setup,dbf_text);
2741         sprintf(dbf_text,"mait%4x",init_data->max_input_threshold);
2742         QDIO_DBF_TEXT0(0,setup,dbf_text);
2743         sprintf(dbf_text,"miot%4x",init_data->min_output_threshold);
2744         QDIO_DBF_TEXT0(0,setup,dbf_text);
2745         sprintf(dbf_text,"maot%4x",init_data->max_output_threshold);
2746         QDIO_DBF_TEXT0(0,setup,dbf_text);
2747         sprintf(dbf_text,"niq:%4x",init_data->no_input_qs);
2748         QDIO_DBF_TEXT0(0,setup,dbf_text);
2749         sprintf(dbf_text,"noq:%4x",init_data->no_output_qs);
2750         QDIO_DBF_TEXT0(0,setup,dbf_text);
2751         QDIO_DBF_HEX0(0,setup,&init_data->input_handler,sizeof(void*));
2752         QDIO_DBF_HEX0(0,setup,&init_data->output_handler,sizeof(void*));
2753         QDIO_DBF_HEX0(0,setup,&init_data->int_parm,sizeof(long));
2754         QDIO_DBF_HEX0(0,setup,&init_data->flags,sizeof(long));
2755         QDIO_DBF_HEX0(0,setup,&init_data->input_sbal_addr_array,sizeof(void*));
2756         QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*));
2757 }
2758
2759 static void
2760 qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt)
2761 {
2762         irq_ptr->input_qs[i]->is_iqdio_q = iqfmt;
2763         irq_ptr->input_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
2764
2765         irq_ptr->qdr->qdf0[i].sliba=(unsigned long)(irq_ptr->input_qs[i]->slib);
2766
2767         irq_ptr->qdr->qdf0[i].sla=(unsigned long)(irq_ptr->input_qs[i]->sl);
2768
2769         irq_ptr->qdr->qdf0[i].slsba=
2770                 (unsigned long)(&irq_ptr->input_qs[i]->slsb.acc.val[0]);
2771
2772         irq_ptr->qdr->qdf0[i].akey=QDIO_STORAGE_KEY;
2773         irq_ptr->qdr->qdf0[i].bkey=QDIO_STORAGE_KEY;
2774         irq_ptr->qdr->qdf0[i].ckey=QDIO_STORAGE_KEY;
2775         irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY;
2776 }
2777
2778 static void
2779 qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
2780                                int j, int iqfmt)
2781 {
2782         irq_ptr->output_qs[i]->is_iqdio_q = iqfmt;
2783         irq_ptr->output_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
2784
2785         irq_ptr->qdr->qdf0[i+j].sliba=(unsigned long)(irq_ptr->output_qs[i]->slib);
2786
2787         irq_ptr->qdr->qdf0[i+j].sla=(unsigned long)(irq_ptr->output_qs[i]->sl);
2788
2789         irq_ptr->qdr->qdf0[i+j].slsba=
2790                 (unsigned long)(&irq_ptr->output_qs[i]->slsb.acc.val[0]);
2791
2792         irq_ptr->qdr->qdf0[i+j].akey=QDIO_STORAGE_KEY;
2793         irq_ptr->qdr->qdf0[i+j].bkey=QDIO_STORAGE_KEY;
2794         irq_ptr->qdr->qdf0[i+j].ckey=QDIO_STORAGE_KEY;
2795         irq_ptr->qdr->qdf0[i+j].dkey=QDIO_STORAGE_KEY;
2796 }
2797
2798
2799 static void
2800 qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
2801 {
2802         int i;
2803
2804         for (i=0;i<irq_ptr->no_input_qs;i++) {
2805                 irq_ptr->input_qs[i]->siga_sync=
2806                         irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
2807                 irq_ptr->input_qs[i]->siga_in=
2808                         irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
2809                 irq_ptr->input_qs[i]->siga_out=
2810                         irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
2811                 irq_ptr->input_qs[i]->siga_sync_done_on_thinints=
2812                         irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
2813                 irq_ptr->input_qs[i]->hydra_gives_outbound_pcis=
2814                         irq_ptr->hydra_gives_outbound_pcis;
2815                 irq_ptr->input_qs[i]->siga_sync_done_on_outb_tis=
2816                         ((irq_ptr->qdioac&
2817                           (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2818                            CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
2819                          (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2820                           CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
2821
2822         }
2823 }
2824
2825 static void
2826 qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr)
2827 {
2828         int i;
2829
2830         for (i=0;i<irq_ptr->no_output_qs;i++) {
2831                 irq_ptr->output_qs[i]->siga_sync=
2832                         irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
2833                 irq_ptr->output_qs[i]->siga_in=
2834                         irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
2835                 irq_ptr->output_qs[i]->siga_out=
2836                         irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
2837                 irq_ptr->output_qs[i]->siga_sync_done_on_thinints=
2838                         irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
2839                 irq_ptr->output_qs[i]->hydra_gives_outbound_pcis=
2840                         irq_ptr->hydra_gives_outbound_pcis;
2841                 irq_ptr->output_qs[i]->siga_sync_done_on_outb_tis=
2842                         ((irq_ptr->qdioac&
2843                           (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2844                            CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
2845                          (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2846                           CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
2847
2848         }
2849 }
2850
2851 static int
2852 qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2853                                     int dstat)
2854 {
2855         char dbf_text[15];
2856         struct qdio_irq *irq_ptr;
2857
2858         irq_ptr = cdev->private->qdio_data;
2859
2860         if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) {
2861                 sprintf(dbf_text,"ick1%4x",irq_ptr->schid.sch_no);
2862                 QDIO_DBF_TEXT2(1,trace,dbf_text);
2863                 QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
2864                 QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
2865                 QDIO_PRINT_ERR("received check condition on establish " \
2866                                "queues on irq 0.%x.%x (cs=x%x, ds=x%x).\n",
2867                                irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2868                                cstat,dstat);
2869                 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR);
2870         }
2871         
2872         if (!(dstat & DEV_STAT_DEV_END)) {
2873                 QDIO_DBF_TEXT2(1,setup,"eq:no de");
2874                 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2875                 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2876                 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: didn't get "
2877                                "device end: dstat=%02x, cstat=%02x\n",
2878                                irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2879                                dstat, cstat);
2880                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2881                 return 1;
2882         }
2883
2884         if (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) {
2885                 QDIO_DBF_TEXT2(1,setup,"eq:badio");
2886                 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2887                 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2888                 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: got "
2889                                "the following devstat: dstat=%02x, "
2890                                "cstat=%02x\n", irq_ptr->schid.ssid,
2891                                irq_ptr->schid.sch_no, dstat, cstat);
2892                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2893                 return 1;
2894         }
2895         return 0;
2896 }
2897
2898 static void
2899 qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
2900 {
2901         struct qdio_irq *irq_ptr;
2902         char dbf_text[15];
2903
2904         irq_ptr = cdev->private->qdio_data;
2905
2906         sprintf(dbf_text,"qehi%4x",cdev->private->schid.sch_no);
2907         QDIO_DBF_TEXT0(0,setup,dbf_text);
2908         QDIO_DBF_TEXT0(0,trace,dbf_text);
2909
2910         if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) {
2911                 ccw_device_set_timeout(cdev, 0);
2912                 return;
2913         }
2914
2915         qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED);
2916         ccw_device_set_timeout(cdev, 0);
2917 }
2918
2919 int
2920 qdio_initialize(struct qdio_initialize *init_data)
2921 {
2922         int rc;
2923         char dbf_text[15];
2924
2925         sprintf(dbf_text,"qini%4x",init_data->cdev->private->schid.sch_no);
2926         QDIO_DBF_TEXT0(0,setup,dbf_text);
2927         QDIO_DBF_TEXT0(0,trace,dbf_text);
2928
2929         rc = qdio_allocate(init_data);
2930         if (rc == 0) {
2931                 rc = qdio_establish(init_data);
2932                 if (rc != 0)
2933                         qdio_free(init_data->cdev);
2934         }
2935
2936         return rc;
2937 }
2938
2939
2940 int
2941 qdio_allocate(struct qdio_initialize *init_data)
2942 {
2943         struct qdio_irq *irq_ptr;
2944         char dbf_text[15];
2945
2946         sprintf(dbf_text,"qalc%4x",init_data->cdev->private->schid.sch_no);
2947         QDIO_DBF_TEXT0(0,setup,dbf_text);
2948         QDIO_DBF_TEXT0(0,trace,dbf_text);
2949         if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
2950              (init_data->no_output_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
2951              ((init_data->no_input_qs) && (!init_data->input_handler)) ||
2952              ((init_data->no_output_qs) && (!init_data->output_handler)) )
2953                 return -EINVAL;
2954
2955         if (!init_data->input_sbal_addr_array)
2956                 return -EINVAL;
2957
2958         if (!init_data->output_sbal_addr_array)
2959                 return -EINVAL;
2960
2961         qdio_allocate_do_dbf(init_data);
2962
2963         /* create irq */
2964         irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2965
2966         QDIO_DBF_TEXT0(0,setup,"irq_ptr:");
2967         QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*));
2968
2969         if (!irq_ptr) {
2970                 QDIO_PRINT_ERR("allocation of irq_ptr failed!\n");
2971                 return -ENOMEM;
2972         }
2973
2974         init_MUTEX(&irq_ptr->setting_up_sema);
2975
2976         /* QDR must be in DMA area since CCW data address is only 32 bit */
2977         irq_ptr->qdr = (struct qdr *) __get_free_page(GFP_KERNEL | GFP_DMA);
2978         if (!(irq_ptr->qdr)) {
2979                 free_page((unsigned long) irq_ptr);
2980                 QDIO_PRINT_ERR("allocation of irq_ptr->qdr failed!\n");
2981                 return -ENOMEM;
2982         }
2983         QDIO_DBF_TEXT0(0,setup,"qdr:");
2984         QDIO_DBF_HEX0(0,setup,&irq_ptr->qdr,sizeof(void*));
2985
2986         if (qdio_alloc_qs(irq_ptr,
2987                           init_data->no_input_qs,
2988                           init_data->no_output_qs)) {
2989                 QDIO_PRINT_ERR("queue allocation failed!\n");
2990                 qdio_release_irq_memory(irq_ptr);
2991                 return -ENOMEM;
2992         }
2993
2994         init_data->cdev->private->qdio_data = irq_ptr;
2995
2996         qdio_set_state(irq_ptr,QDIO_IRQ_STATE_INACTIVE);
2997
2998         return 0;
2999 }
3000
3001 static int qdio_fill_irq(struct qdio_initialize *init_data)
3002 {
3003         int i;
3004         char dbf_text[15];
3005         struct ciw *ciw;
3006         int is_iqdio;
3007         struct qdio_irq *irq_ptr;
3008
3009         irq_ptr = init_data->cdev->private->qdio_data;
3010
3011         memset(irq_ptr,0,((char*)&irq_ptr->qdr)-((char*)irq_ptr));
3012
3013         /* wipes qib.ac, required by ar7063 */
3014         memset(irq_ptr->qdr,0,sizeof(struct qdr));
3015
3016         irq_ptr->int_parm=init_data->int_parm;
3017
3018         irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
3019         irq_ptr->no_input_qs=init_data->no_input_qs;
3020         irq_ptr->no_output_qs=init_data->no_output_qs;
3021
3022         if (init_data->q_format==QDIO_IQDIO_QFMT) {
3023                 irq_ptr->is_iqdio_irq=1;
3024                 irq_ptr->is_thinint_irq=1;
3025         } else {
3026                 irq_ptr->is_iqdio_irq=0;
3027                 irq_ptr->is_thinint_irq=hydra_thinints;
3028         }
3029         sprintf(dbf_text,"is_i_t%1x%1x",
3030                 irq_ptr->is_iqdio_irq,irq_ptr->is_thinint_irq);
3031         QDIO_DBF_TEXT2(0,setup,dbf_text);
3032
3033         if (irq_ptr->is_thinint_irq) {
3034                 irq_ptr->dev_st_chg_ind = qdio_get_indicator();
3035                 QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*));
3036                 if (!irq_ptr->dev_st_chg_ind) {
3037                         QDIO_PRINT_WARN("no indicator location available " \
3038                                         "for irq 0.%x.%x\n",
3039                                         irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
3040                         qdio_release_irq_memory(irq_ptr);
3041                         return -ENOBUFS;
3042                 }
3043         }
3044
3045         /* defaults */
3046         irq_ptr->equeue.cmd=DEFAULT_ESTABLISH_QS_CMD;
3047         irq_ptr->equeue.count=DEFAULT_ESTABLISH_QS_COUNT;
3048         irq_ptr->aqueue.cmd=DEFAULT_ACTIVATE_QS_CMD;
3049         irq_ptr->aqueue.count=DEFAULT_ACTIVATE_QS_COUNT;
3050
3051         qdio_fill_qs(irq_ptr, init_data->cdev,
3052                      init_data->no_input_qs,
3053                      init_data->no_output_qs,
3054                      init_data->input_handler,
3055                      init_data->output_handler,init_data->int_parm,
3056                      init_data->q_format,init_data->flags,
3057                      init_data->input_sbal_addr_array,
3058                      init_data->output_sbal_addr_array);
3059
3060         if (!try_module_get(THIS_MODULE)) {
3061                 QDIO_PRINT_CRIT("try_module_get() failed!\n");
3062                 qdio_release_irq_memory(irq_ptr);
3063                 return -EINVAL;
3064         }
3065
3066         qdio_fill_thresholds(irq_ptr,init_data->no_input_qs,
3067                              init_data->no_output_qs,
3068                              init_data->min_input_threshold,
3069                              init_data->max_input_threshold,
3070                              init_data->min_output_threshold,
3071                              init_data->max_output_threshold);
3072
3073         /* fill in qdr */
3074         irq_ptr->qdr->qfmt=init_data->q_format;
3075         irq_ptr->qdr->iqdcnt=init_data->no_input_qs;
3076         irq_ptr->qdr->oqdcnt=init_data->no_output_qs;
3077         irq_ptr->qdr->iqdsz=sizeof(struct qdesfmt0)/4; /* size in words */
3078         irq_ptr->qdr->oqdsz=sizeof(struct qdesfmt0)/4;
3079
3080         irq_ptr->qdr->qiba=(unsigned long)&irq_ptr->qib;
3081         irq_ptr->qdr->qkey=QDIO_STORAGE_KEY;
3082
3083         /* fill in qib */
3084         irq_ptr->is_qebsm = is_passthrough;
3085         if (irq_ptr->is_qebsm)
3086                 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
3087
3088         irq_ptr->qib.qfmt=init_data->q_format;
3089         if (init_data->no_input_qs)
3090                 irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib);
3091         if (init_data->no_output_qs)
3092                 irq_ptr->qib.osliba=(unsigned long)(irq_ptr->output_qs[0]->slib);
3093         memcpy(irq_ptr->qib.ebcnam,init_data->adapter_name,8);
3094
3095         qdio_set_impl_params(irq_ptr,init_data->qib_param_field_format,
3096                              init_data->qib_param_field,
3097                              init_data->no_input_qs,
3098                              init_data->no_output_qs,
3099                              init_data->input_slib_elements,
3100                              init_data->output_slib_elements);
3101
3102         /* first input descriptors, then output descriptors */
3103         is_iqdio = (init_data->q_format == QDIO_IQDIO_QFMT) ? 1 : 0;
3104         for (i=0;i<init_data->no_input_qs;i++)
3105                 qdio_allocate_fill_input_desc(irq_ptr, i, is_iqdio);
3106
3107         for (i=0;i<init_data->no_output_qs;i++)
3108                 qdio_allocate_fill_output_desc(irq_ptr, i,
3109                                                init_data->no_input_qs,
3110                                                is_iqdio);
3111
3112         /* qdr, qib, sls, slsbs, slibs, sbales filled. */
3113
3114         /* get qdio commands */
3115         ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
3116         if (!ciw) {
3117                 QDIO_DBF_TEXT2(1,setup,"no eq");
3118                 QDIO_PRINT_INFO("No equeue CIW found for QDIO commands. "
3119                                 "Trying to use default.\n");
3120         } else
3121                 irq_ptr->equeue = *ciw;
3122         ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
3123         if (!ciw) {
3124                 QDIO_DBF_TEXT2(1,setup,"no aq");
3125                 QDIO_PRINT_INFO("No aqueue CIW found for QDIO commands. "
3126                                 "Trying to use default.\n");
3127         } else
3128                 irq_ptr->aqueue = *ciw;
3129
3130         /* Set new interrupt handler. */
3131         irq_ptr->original_int_handler = init_data->cdev->handler;
3132         init_data->cdev->handler = qdio_handler;
3133
3134         return 0;
3135 }
3136
3137 int
3138 qdio_establish(struct qdio_initialize *init_data)
3139 {
3140         struct qdio_irq *irq_ptr;
3141         unsigned long saveflags;
3142         int result, result2;
3143         struct ccw_device *cdev;
3144         char dbf_text[20];
3145
3146         cdev=init_data->cdev;
3147         irq_ptr = cdev->private->qdio_data;
3148         if (!irq_ptr)
3149                 return -EINVAL;
3150
3151         if (cdev->private->state != DEV_STATE_ONLINE)
3152                 return -EINVAL;
3153         
3154         down(&irq_ptr->setting_up_sema);
3155
3156         qdio_fill_irq(init_data);
3157
3158         /* the thinint CHSC stuff */
3159         if (irq_ptr->is_thinint_irq) {
3160
3161                 result = tiqdio_set_subchannel_ind(irq_ptr,0);
3162                 if (result) {
3163                         up(&irq_ptr->setting_up_sema);
3164                         qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3165                         return result;
3166                 }
3167                 tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET);
3168         }
3169
3170         sprintf(dbf_text,"qest%4x",cdev->private->schid.sch_no);
3171         QDIO_DBF_TEXT0(0,setup,dbf_text);
3172         QDIO_DBF_TEXT0(0,trace,dbf_text);
3173
3174         /* establish q */
3175         irq_ptr->ccw.cmd_code=irq_ptr->equeue.cmd;
3176         irq_ptr->ccw.flags=CCW_FLAG_SLI;
3177         irq_ptr->ccw.count=irq_ptr->equeue.count;
3178         irq_ptr->ccw.cda=QDIO_GET_ADDR(irq_ptr->qdr);
3179
3180         spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
3181
3182         ccw_device_set_options_mask(cdev, 0);
3183         result = ccw_device_start(cdev, &irq_ptr->ccw,
3184                                 QDIO_DOING_ESTABLISH, 0, 0);
3185         if (result) {
3186                 result2 = ccw_device_start(cdev, &irq_ptr->ccw,
3187                                         QDIO_DOING_ESTABLISH, 0, 0);
3188                 sprintf(dbf_text,"eq:io%4x",result);
3189                 QDIO_DBF_TEXT2(1,setup,dbf_text);
3190                 if (result2) {
3191                         sprintf(dbf_text,"eq:io%4x",result);
3192                         QDIO_DBF_TEXT2(1,setup,dbf_text);
3193                 }
3194                 QDIO_PRINT_WARN("establish queues on irq 0.%x.%04x: do_IO " \
3195                                 "returned %i, next try returned %i\n",
3196                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3197                                 result, result2);
3198                 result=result2;
3199                 if (result)
3200                         ccw_device_set_timeout(cdev, 0);
3201         }
3202
3203         spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
3204
3205         if (result) {
3206                 up(&irq_ptr->setting_up_sema);
3207                 qdio_shutdown(cdev,QDIO_FLAG_CLEANUP_USING_CLEAR);
3208                 return result;
3209         }
3210         
3211         wait_event_interruptible_timeout(cdev->private->wait_q,
3212                 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
3213                 irq_ptr->state == QDIO_IRQ_STATE_ERR,
3214                 QDIO_ESTABLISH_TIMEOUT);
3215
3216         if (irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED)
3217                 result = 0;
3218         else {
3219                 up(&irq_ptr->setting_up_sema);
3220                 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3221                 return -EIO;
3222         }
3223
3224         qdio_get_ssqd_siga(irq_ptr);
3225         /* if this gets set once, we're running under VM and can omit SVSes */
3226         if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY)
3227                 omit_svs=1;
3228
3229         sprintf(dbf_text,"qdioac%2x",irq_ptr->qdioac);
3230         QDIO_DBF_TEXT2(0,setup,dbf_text);
3231
3232         sprintf(dbf_text,"qib ac%2x",irq_ptr->qib.ac);
3233         QDIO_DBF_TEXT2(0,setup,dbf_text);
3234
3235         irq_ptr->hydra_gives_outbound_pcis=
3236                 irq_ptr->qib.ac&QIB_AC_OUTBOUND_PCI_SUPPORTED;
3237         irq_ptr->sync_done_on_outb_pcis=
3238                 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS;
3239
3240         qdio_initialize_set_siga_flags_input(irq_ptr);
3241         qdio_initialize_set_siga_flags_output(irq_ptr);
3242
3243         up(&irq_ptr->setting_up_sema);
3244
3245         return result;
3246         
3247 }
3248
3249 int
3250 qdio_activate(struct ccw_device *cdev, int flags)
3251 {
3252         struct qdio_irq *irq_ptr;
3253         int i,result=0,result2;
3254         unsigned long saveflags;
3255         char dbf_text[20]; /* see qdio_initialize */
3256
3257         irq_ptr = cdev->private->qdio_data;
3258         if (!irq_ptr)
3259                 return -ENODEV;
3260
3261         if (cdev->private->state != DEV_STATE_ONLINE)
3262                 return -EINVAL;
3263
3264         down(&irq_ptr->setting_up_sema);
3265         if (irq_ptr->state==QDIO_IRQ_STATE_INACTIVE) {
3266                 result=-EBUSY;
3267                 goto out;
3268         }
3269
3270         sprintf(dbf_text,"qact%4x", irq_ptr->schid.sch_no);
3271         QDIO_DBF_TEXT2(0,setup,dbf_text);
3272         QDIO_DBF_TEXT2(0,trace,dbf_text);
3273
3274         /* activate q */
3275         irq_ptr->ccw.cmd_code=irq_ptr->aqueue.cmd;
3276         irq_ptr->ccw.flags=CCW_FLAG_SLI;
3277         irq_ptr->ccw.count=irq_ptr->aqueue.count;
3278         irq_ptr->ccw.cda=QDIO_GET_ADDR(0);
3279
3280         spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
3281
3282         ccw_device_set_timeout(cdev, 0);
3283         ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
3284         result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE,
3285                                 0, DOIO_DENY_PREFETCH);
3286         if (result) {
3287                 result2=ccw_device_start(cdev,&irq_ptr->ccw,
3288                                          QDIO_DOING_ACTIVATE,0,0);
3289                 sprintf(dbf_text,"aq:io%4x",result);
3290                 QDIO_DBF_TEXT2(1,setup,dbf_text);
3291                 if (result2) {
3292                         sprintf(dbf_text,"aq:io%4x",result);
3293                         QDIO_DBF_TEXT2(1,setup,dbf_text);
3294                 }
3295                 QDIO_PRINT_WARN("activate queues on irq 0.%x.%04x: do_IO " \
3296                                 "returned %i, next try returned %i\n",
3297                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3298                                 result, result2);
3299                 result=result2;
3300         }
3301
3302         spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
3303         if (result)
3304                 goto out;
3305
3306         for (i=0;i<irq_ptr->no_input_qs;i++) {
3307                 if (irq_ptr->is_thinint_irq) {
3308                         /* 
3309                          * that way we know, that, if we will get interrupted
3310                          * by tiqdio_inbound_processing, qdio_unmark_q will
3311                          * not be called 
3312                          */
3313                         qdio_reserve_q(irq_ptr->input_qs[i]);
3314                         qdio_mark_tiq(irq_ptr->input_qs[i]);
3315                         qdio_release_q(irq_ptr->input_qs[i]);
3316                 }
3317         }
3318
3319         if (flags&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) {
3320                 for (i=0;i<irq_ptr->no_input_qs;i++) {
3321                         irq_ptr->input_qs[i]->is_input_q|=
3322                                 QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT;
3323                 }
3324         }
3325
3326         msleep(QDIO_ACTIVATE_TIMEOUT);
3327         switch (irq_ptr->state) {
3328         case QDIO_IRQ_STATE_STOPPED:
3329         case QDIO_IRQ_STATE_ERR:
3330                 up(&irq_ptr->setting_up_sema);
3331                 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3332                 down(&irq_ptr->setting_up_sema);
3333                 result = -EIO;
3334                 break;
3335         default:
3336                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
3337                 result = 0;
3338         }
3339  out:
3340         up(&irq_ptr->setting_up_sema);
3341
3342         return result;
3343 }
3344
3345 /* buffers filled forwards again to make Rick happy */
3346 static void
3347 qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
3348                         unsigned int count, struct qdio_buffer *buffers)
3349 {
3350         struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3351         int tmp = 0;
3352
3353         qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
3354         if (irq->is_qebsm) {
3355                 while (count) {
3356                         tmp = set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
3357                         if (!tmp)
3358                                 return;
3359                 }
3360                 return;
3361         }
3362         for (;;) {
3363                 set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
3364                 count--;
3365                 if (!count) break;
3366                 qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
3367         }
3368 }
3369
3370 static void
3371 qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
3372                          unsigned int count, struct qdio_buffer *buffers)
3373 {
3374         struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3375         int tmp = 0;
3376
3377         qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
3378         if (irq->is_qebsm) {
3379                 while (count) {
3380                         tmp = set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
3381                         if (!tmp)
3382                                 return;
3383                 }
3384                 return;
3385         }
3386
3387         for (;;) {
3388                 set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
3389                 count--;
3390                 if (!count) break;
3391                 qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
3392         }
3393 }
3394
3395 static void
3396 do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
3397                        unsigned int qidx, unsigned int count,
3398                        struct qdio_buffer *buffers)
3399 {
3400         int used_elements;
3401
3402         /* This is the inbound handling of queues */
3403         used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
3404         
3405         qdio_do_qdio_fill_input(q,qidx,count,buffers);
3406         
3407         if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&&
3408             (callflags&QDIO_FLAG_UNDER_INTERRUPT))
3409                 atomic_xchg(&q->polling,0);
3410         
3411         if (used_elements) 
3412                 return;
3413         if (callflags&QDIO_FLAG_DONT_SIGA)
3414                 return;
3415         if (q->siga_in) {
3416                 int result;
3417                 
3418                 result=qdio_siga_input(q);
3419                 if (result) {
3420                         if (q->siga_error)
3421                                 q->error_status_flags|=
3422                                         QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
3423                         q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
3424                         q->siga_error=result;
3425                 }
3426         }
3427                 
3428         qdio_mark_q(q);
3429 }
3430
3431 static void
3432 do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3433                         unsigned int qidx, unsigned int count,
3434                         struct qdio_buffer *buffers)
3435 {
3436         int used_elements;
3437         unsigned int cnt, start_buf;
3438         unsigned char state = 0;
3439         struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3440
3441         /* This is the outbound handling of queues */
3442         qdio_do_qdio_fill_output(q,qidx,count,buffers);
3443
3444         used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
3445
3446         if (callflags&QDIO_FLAG_DONT_SIGA) {
3447                 qdio_perf_stat_inc(&perf_stats.outbound_cnt);
3448                 return;
3449         }
3450         if (callflags & QDIO_FLAG_PCI_OUT)
3451                 q->is_pci_out = 1;
3452         else
3453                 q->is_pci_out = 0;
3454         if (q->is_iqdio_q) {
3455                 /* one siga for every sbal */
3456                 while (count--)
3457                         qdio_kick_outbound_q(q);
3458                         
3459                 __qdio_outbound_processing(q);
3460         } else {
3461                 /* under VM, we do a SIGA sync unconditionally */
3462                 SYNC_MEMORY;
3463                 else {
3464                         /* 
3465                          * w/o shadow queues (else branch of
3466                          * SYNC_MEMORY :-/ ), we try to
3467                          * fast-requeue buffers 
3468                          */
3469                         if (irq->is_qebsm) {
3470                                 cnt = 1;
3471                                 start_buf = ((qidx+QDIO_MAX_BUFFERS_PER_Q-1) &
3472                                              (QDIO_MAX_BUFFERS_PER_Q-1));
3473                                 qdio_do_eqbs(q, &state, &start_buf, &cnt);
3474                         } else
3475                                 state = q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1)
3476                                         &(QDIO_MAX_BUFFERS_PER_Q-1) ];
3477                          if (state != SLSB_CU_OUTPUT_PRIMED) {
3478                                 qdio_kick_outbound_q(q);
3479                         } else {
3480                                 QDIO_DBF_TEXT3(0,trace, "fast-req");
3481                                 qdio_perf_stat_inc(&perf_stats.fast_reqs);
3482                         }
3483                 }
3484                 /* 
3485                  * only marking the q could take too long,
3486                  * the upper layer module could do a lot of
3487                  * traffic in that time 
3488                  */
3489                 __qdio_outbound_processing(q);
3490         }
3491
3492         qdio_perf_stat_inc(&perf_stats.outbound_cnt);
3493 }
3494
3495 /* count must be 1 in iqdio */
3496 int
3497 do_QDIO(struct ccw_device *cdev,unsigned int callflags,
3498         unsigned int queue_number, unsigned int qidx,
3499         unsigned int count,struct qdio_buffer *buffers)
3500 {
3501         struct qdio_irq *irq_ptr;
3502 #ifdef CONFIG_QDIO_DEBUG
3503         char dbf_text[20];
3504
3505         sprintf(dbf_text,"doQD%04x",cdev->private->schid.sch_no);
3506         QDIO_DBF_TEXT3(0,trace,dbf_text);
3507 #endif /* CONFIG_QDIO_DEBUG */
3508
3509         if ( (qidx>QDIO_MAX_BUFFERS_PER_Q) ||
3510              (count>QDIO_MAX_BUFFERS_PER_Q) ||
3511              (queue_number>QDIO_MAX_QUEUES_PER_IRQ) )
3512                 return -EINVAL;
3513
3514         if (count==0)
3515                 return 0;
3516
3517         irq_ptr = cdev->private->qdio_data;
3518         if (!irq_ptr)
3519                 return -ENODEV;
3520
3521 #ifdef CONFIG_QDIO_DEBUG
3522         if (callflags&QDIO_FLAG_SYNC_INPUT)
3523                 QDIO_DBF_HEX3(0,trace,&irq_ptr->input_qs[queue_number],
3524                               sizeof(void*));
3525         else
3526                 QDIO_DBF_HEX3(0,trace,&irq_ptr->output_qs[queue_number],
3527                               sizeof(void*));
3528         sprintf(dbf_text,"flag%04x",callflags);
3529         QDIO_DBF_TEXT3(0,trace,dbf_text);
3530         sprintf(dbf_text,"qi%02xct%02x",qidx,count);
3531         QDIO_DBF_TEXT3(0,trace,dbf_text);
3532 #endif /* CONFIG_QDIO_DEBUG */
3533
3534         if (irq_ptr->state!=QDIO_IRQ_STATE_ACTIVE)
3535                 return -EBUSY;
3536
3537         if (callflags&QDIO_FLAG_SYNC_INPUT)
3538                 do_qdio_handle_inbound(irq_ptr->input_qs[queue_number],
3539                                        callflags, qidx, count, buffers);
3540         else if (callflags&QDIO_FLAG_SYNC_OUTPUT)
3541                 do_qdio_handle_outbound(irq_ptr->output_qs[queue_number],
3542                                         callflags, qidx, count, buffers);
3543         else {
3544                 QDIO_DBF_TEXT3(1,trace,"doQD:inv");
3545                 return -EINVAL;
3546         }
3547         return 0;
3548 }
3549
3550 static int
3551 qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
3552                         int buffer_length, int *eof, void *data)
3553 {
3554         int c=0;
3555
3556         /* we are always called with buffer_length=4k, so we all
3557            deliver on the first read */
3558         if (offset>0)
3559                 return 0;
3560
3561 #define _OUTP_IT(x...) c+=sprintf(buffer+c,x)
3562 #ifdef CONFIG_64BIT
3563         _OUTP_IT("Number of tasklet runs (total)                  : %li\n",
3564                  (long)atomic64_read(&perf_stats.tl_runs));
3565         _OUTP_IT("Inbound tasklet runs      tried/retried         : %li/%li\n",
3566                  (long)atomic64_read(&perf_stats.inbound_tl_runs),
3567                  (long)atomic64_read(&perf_stats.inbound_tl_runs_resched));
3568         _OUTP_IT("Inbound-thin tasklet runs tried/retried         : %li/%li\n",
3569                  (long)atomic64_read(&perf_stats.inbound_thin_tl_runs),
3570                  (long)atomic64_read(&perf_stats.inbound_thin_tl_runs_resched));
3571         _OUTP_IT("Outbound tasklet runs     tried/retried         : %li/%li\n",
3572                  (long)atomic64_read(&perf_stats.outbound_tl_runs),
3573                  (long)atomic64_read(&perf_stats.outbound_tl_runs_resched));
3574         _OUTP_IT("\n");
3575         _OUTP_IT("Number of SIGA sync's issued                    : %li\n",
3576                  (long)atomic64_read(&perf_stats.siga_syncs));
3577         _OUTP_IT("Number of SIGA in's issued                      : %li\n",
3578                  (long)atomic64_read(&perf_stats.siga_ins));
3579         _OUTP_IT("Number of SIGA out's issued                     : %li\n",
3580                  (long)atomic64_read(&perf_stats.siga_outs));
3581         _OUTP_IT("Number of PCIs caught                           : %li\n",
3582                  (long)atomic64_read(&perf_stats.pcis));
3583         _OUTP_IT("Number of adapter interrupts caught             : %li\n",
3584                  (long)atomic64_read(&perf_stats.thinints));
3585         _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA)  : %li\n",
3586                  (long)atomic64_read(&perf_stats.fast_reqs));
3587         _OUTP_IT("\n");
3588         _OUTP_IT("Number of inbound transfers                     : %li\n",
3589                  (long)atomic64_read(&perf_stats.inbound_cnt));
3590         _OUTP_IT("Number of do_QDIOs outbound                     : %li\n",
3591                  (long)atomic64_read(&perf_stats.outbound_cnt));
3592 #else /* CONFIG_64BIT */
3593         _OUTP_IT("Number of tasklet runs (total)                  : %i\n",
3594                  atomic_read(&perf_stats.tl_runs));
3595         _OUTP_IT("Inbound tasklet runs      tried/retried         : %i/%i\n",
3596                  atomic_read(&perf_stats.inbound_tl_runs),
3597                  atomic_read(&perf_stats.inbound_tl_runs_resched));
3598         _OUTP_IT("Inbound-thin tasklet runs tried/retried         : %i/%i\n",
3599                  atomic_read(&perf_stats.inbound_thin_tl_runs),
3600                  atomic_read(&perf_stats.inbound_thin_tl_runs_resched));
3601         _OUTP_IT("Outbound tasklet runs     tried/retried         : %i/%i\n",
3602                  atomic_read(&perf_stats.outbound_tl_runs),
3603                  atomic_read(&perf_stats.outbound_tl_runs_resched));
3604         _OUTP_IT("\n");
3605         _OUTP_IT("Number of SIGA sync's issued                    : %i\n",
3606                  atomic_read(&perf_stats.siga_syncs));
3607         _OUTP_IT("Number of SIGA in's issued                      : %i\n",
3608                  atomic_read(&perf_stats.siga_ins));
3609         _OUTP_IT("Number of SIGA out's issued                     : %i\n",
3610                  atomic_read(&perf_stats.siga_outs));
3611         _OUTP_IT("Number of PCIs caught                           : %i\n",
3612                  atomic_read(&perf_stats.pcis));
3613         _OUTP_IT("Number of adapter interrupts caught             : %i\n",
3614                  atomic_read(&perf_stats.thinints));
3615         _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA)  : %i\n",
3616                  atomic_read(&perf_stats.fast_reqs));
3617         _OUTP_IT("\n");
3618         _OUTP_IT("Number of inbound transfers                     : %i\n",
3619                  atomic_read(&perf_stats.inbound_cnt));
3620         _OUTP_IT("Number of do_QDIOs outbound                     : %i\n",
3621                  atomic_read(&perf_stats.outbound_cnt));
3622 #endif /* CONFIG_64BIT */
3623         _OUTP_IT("\n");
3624
3625         return c;
3626 }
3627
3628 static struct proc_dir_entry *qdio_perf_proc_file;
3629
3630 static void
3631 qdio_add_procfs_entry(void)
3632 {
3633         proc_perf_file_registration=0;
3634         qdio_perf_proc_file=create_proc_entry(QDIO_PERF,
3635                                               S_IFREG|0444,NULL);
3636         if (qdio_perf_proc_file) {
3637                 qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read;
3638         } else proc_perf_file_registration=-1;
3639
3640         if (proc_perf_file_registration)
3641                 QDIO_PRINT_WARN("was not able to register perf. " \
3642                                 "proc-file (%i).\n",
3643                                 proc_perf_file_registration);
3644 }
3645
3646 static void
3647 qdio_remove_procfs_entry(void)
3648 {
3649         if (!proc_perf_file_registration) /* means if it went ok earlier */
3650                 remove_proc_entry(QDIO_PERF,NULL);
3651 }
3652
3653 /**
3654  * attributes in sysfs
3655  *****************************************************************************/
3656
3657 static ssize_t
3658 qdio_performance_stats_show(struct bus_type *bus, char *buf)
3659 {
3660         return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
3661 }
3662
3663 static ssize_t
3664 qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count)
3665 {
3666         unsigned long i;
3667         int ret;
3668
3669         ret = strict_strtoul(buf, 16, &i);
3670         if (!ret && ((i == 0) || (i == 1))) {
3671                 if (i == qdio_performance_stats)
3672                         return count;
3673                 qdio_performance_stats = i;
3674                 if (i==0) {
3675                         /* reset perf. stat. info */
3676 #ifdef CONFIG_64BIT
3677                         atomic64_set(&perf_stats.tl_runs, 0);
3678                         atomic64_set(&perf_stats.outbound_tl_runs, 0);
3679                         atomic64_set(&perf_stats.inbound_tl_runs, 0);
3680                         atomic64_set(&perf_stats.inbound_tl_runs_resched, 0);
3681                         atomic64_set(&perf_stats.inbound_thin_tl_runs, 0);
3682                         atomic64_set(&perf_stats.inbound_thin_tl_runs_resched,
3683                                      0);
3684                         atomic64_set(&perf_stats.siga_outs, 0);
3685                         atomic64_set(&perf_stats.siga_ins, 0);
3686                         atomic64_set(&perf_stats.siga_syncs, 0);
3687                         atomic64_set(&perf_stats.pcis, 0);
3688                         atomic64_set(&perf_stats.thinints, 0);
3689                         atomic64_set(&perf_stats.fast_reqs, 0);
3690                         atomic64_set(&perf_stats.outbound_cnt, 0);
3691                         atomic64_set(&perf_stats.inbound_cnt, 0);
3692 #else /* CONFIG_64BIT */
3693                         atomic_set(&perf_stats.tl_runs, 0);
3694                         atomic_set(&perf_stats.outbound_tl_runs, 0);
3695                         atomic_set(&perf_stats.inbound_tl_runs, 0);
3696                         atomic_set(&perf_stats.inbound_tl_runs_resched, 0);
3697                         atomic_set(&perf_stats.inbound_thin_tl_runs, 0);
3698                         atomic_set(&perf_stats.inbound_thin_tl_runs_resched, 0);
3699                         atomic_set(&perf_stats.siga_outs, 0);
3700                         atomic_set(&perf_stats.siga_ins, 0);
3701                         atomic_set(&perf_stats.siga_syncs, 0);
3702                         atomic_set(&perf_stats.pcis, 0);
3703                         atomic_set(&perf_stats.thinints, 0);
3704                         atomic_set(&perf_stats.fast_reqs, 0);
3705                         atomic_set(&perf_stats.outbound_cnt, 0);
3706                         atomic_set(&perf_stats.inbound_cnt, 0);
3707 #endif /* CONFIG_64BIT */
3708                 }
3709         } else {
3710                 QDIO_PRINT_ERR("QDIO performance_stats: write 0 or 1 to this file!\n");
3711                 return -EINVAL;
3712         }
3713         return count;
3714 }
3715
3716 static BUS_ATTR(qdio_performance_stats, 0644, qdio_performance_stats_show,
3717                         qdio_performance_stats_store);
3718
3719 static void
3720 tiqdio_register_thinints(void)
3721 {
3722         char dbf_text[20];
3723
3724         tiqdio_ind =
3725                 s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL);
3726         if (IS_ERR(tiqdio_ind)) {
3727                 sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind));
3728                 QDIO_DBF_TEXT0(0,setup,dbf_text);
3729                 QDIO_PRINT_ERR("failed to register adapter handler " \
3730                                "(rc=%li).\nAdapter interrupts might " \
3731                                "not work. Continuing.\n",
3732                                PTR_ERR(tiqdio_ind));
3733                 tiqdio_ind = NULL;
3734         }
3735 }
3736
3737 static void
3738 tiqdio_unregister_thinints(void)
3739 {
3740         if (tiqdio_ind)
3741                 s390_unregister_adapter_interrupt(tiqdio_ind);
3742 }
3743
3744 static int
3745 qdio_get_qdio_memory(void)
3746 {
3747         int i;
3748         indicator_used[0]=1;
3749
3750         for (i=1;i<INDICATORS_PER_CACHELINE;i++)
3751                 indicator_used[i]=0;
3752         indicators = kzalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE),
3753                              GFP_KERNEL);
3754         if (!indicators)
3755                 return -ENOMEM;
3756         return 0;
3757 }
3758
3759 static void
3760 qdio_release_qdio_memory(void)
3761 {
3762         kfree(indicators);
3763 }
3764
3765 static void
3766 qdio_unregister_dbf_views(void)
3767 {
3768         if (qdio_dbf_setup)
3769                 debug_unregister(qdio_dbf_setup);
3770         if (qdio_dbf_sbal)
3771                 debug_unregister(qdio_dbf_sbal);
3772         if (qdio_dbf_sense)
3773                 debug_unregister(qdio_dbf_sense);
3774         if (qdio_dbf_trace)
3775                 debug_unregister(qdio_dbf_trace);
3776 #ifdef CONFIG_QDIO_DEBUG
3777         if (qdio_dbf_slsb_out)
3778                 debug_unregister(qdio_dbf_slsb_out);
3779         if (qdio_dbf_slsb_in)
3780                 debug_unregister(qdio_dbf_slsb_in);
3781 #endif /* CONFIG_QDIO_DEBUG */
3782 }
3783
3784 static int
3785 qdio_register_dbf_views(void)
3786 {
3787         qdio_dbf_setup=debug_register(QDIO_DBF_SETUP_NAME,
3788                                       QDIO_DBF_SETUP_PAGES,
3789                                       QDIO_DBF_SETUP_NR_AREAS,
3790                                       QDIO_DBF_SETUP_LEN);
3791         if (!qdio_dbf_setup)
3792                 goto oom;
3793         debug_register_view(qdio_dbf_setup,&debug_hex_ascii_view);
3794         debug_set_level(qdio_dbf_setup,QDIO_DBF_SETUP_LEVEL);
3795
3796         qdio_dbf_sbal=debug_register(QDIO_DBF_SBAL_NAME,
3797                                      QDIO_DBF_SBAL_PAGES,
3798                                      QDIO_DBF_SBAL_NR_AREAS,
3799                                      QDIO_DBF_SBAL_LEN);
3800         if (!qdio_dbf_sbal)
3801                 goto oom;
3802
3803         debug_register_view(qdio_dbf_sbal,&debug_hex_ascii_view);
3804         debug_set_level(qdio_dbf_sbal,QDIO_DBF_SBAL_LEVEL);
3805
3806         qdio_dbf_sense=debug_register(QDIO_DBF_SENSE_NAME,
3807                                       QDIO_DBF_SENSE_PAGES,
3808                                       QDIO_DBF_SENSE_NR_AREAS,
3809                                       QDIO_DBF_SENSE_LEN);
3810         if (!qdio_dbf_sense)
3811                 goto oom;
3812
3813         debug_register_view(qdio_dbf_sense,&debug_hex_ascii_view);
3814         debug_set_level(qdio_dbf_sense,QDIO_DBF_SENSE_LEVEL);
3815
3816         qdio_dbf_trace=debug_register(QDIO_DBF_TRACE_NAME,
3817                                       QDIO_DBF_TRACE_PAGES,
3818                                       QDIO_DBF_TRACE_NR_AREAS,
3819                                       QDIO_DBF_TRACE_LEN);
3820         if (!qdio_dbf_trace)
3821                 goto oom;
3822
3823         debug_register_view(qdio_dbf_trace,&debug_hex_ascii_view);
3824         debug_set_level(qdio_dbf_trace,QDIO_DBF_TRACE_LEVEL);
3825
3826 #ifdef CONFIG_QDIO_DEBUG
3827         qdio_dbf_slsb_out=debug_register(QDIO_DBF_SLSB_OUT_NAME,
3828                                          QDIO_DBF_SLSB_OUT_PAGES,
3829                                          QDIO_DBF_SLSB_OUT_NR_AREAS,
3830                                          QDIO_DBF_SLSB_OUT_LEN);
3831         if (!qdio_dbf_slsb_out)
3832                 goto oom;
3833         debug_register_view(qdio_dbf_slsb_out,&debug_hex_ascii_view);
3834         debug_set_level(qdio_dbf_slsb_out,QDIO_DBF_SLSB_OUT_LEVEL);
3835
3836         qdio_dbf_slsb_in=debug_register(QDIO_DBF_SLSB_IN_NAME,
3837                                         QDIO_DBF_SLSB_IN_PAGES,
3838                                         QDIO_DBF_SLSB_IN_NR_AREAS,
3839                                         QDIO_DBF_SLSB_IN_LEN);
3840         if (!qdio_dbf_slsb_in)
3841                 goto oom;
3842         debug_register_view(qdio_dbf_slsb_in,&debug_hex_ascii_view);
3843         debug_set_level(qdio_dbf_slsb_in,QDIO_DBF_SLSB_IN_LEVEL);
3844 #endif /* CONFIG_QDIO_DEBUG */
3845         return 0;
3846 oom:
3847         QDIO_PRINT_ERR("not enough memory for dbf.\n");
3848         qdio_unregister_dbf_views();
3849         return -ENOMEM;
3850 }
3851
3852 static void *qdio_mempool_alloc(gfp_t gfp_mask, void *size)
3853 {
3854         return (void *) get_zeroed_page(gfp_mask|GFP_DMA);
3855 }
3856
3857 static void qdio_mempool_free(void *element, void *size)
3858 {
3859         free_page((unsigned long) element);
3860 }
3861
3862 static int __init
3863 init_QDIO(void)
3864 {
3865         int res;
3866         void *ptr;
3867
3868         printk("qdio: loading %s\n",version);
3869
3870         res=qdio_get_qdio_memory();
3871         if (res)
3872                 return res;
3873
3874         qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
3875                                          256, 0, NULL);
3876         if (!qdio_q_cache) {
3877                 qdio_release_qdio_memory();
3878                 return -ENOMEM;
3879         }
3880
3881         res = qdio_register_dbf_views();
3882         if (res) {
3883                 kmem_cache_destroy(qdio_q_cache);
3884                 qdio_release_qdio_memory();
3885                 return res;
3886         }
3887
3888         QDIO_DBF_TEXT0(0,setup,"initQDIO");
3889         res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
3890
3891         memset((void*)&perf_stats,0,sizeof(perf_stats));
3892         QDIO_DBF_TEXT0(0,setup,"perfstat");
3893         ptr=&perf_stats;
3894         QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*));
3895
3896         qdio_add_procfs_entry();
3897
3898         qdio_mempool_scssc = mempool_create(QDIO_MEMPOOL_SCSSC_ELEMENTS,
3899                                             qdio_mempool_alloc,
3900                                             qdio_mempool_free, NULL);
3901
3902         if (tiqdio_check_chsc_availability())
3903                 QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n");
3904
3905         tiqdio_register_thinints();
3906
3907         return 0;
3908  }
3909
3910 static void __exit
3911 cleanup_QDIO(void)
3912 {
3913         tiqdio_unregister_thinints();
3914         qdio_remove_procfs_entry();
3915         qdio_release_qdio_memory();
3916         qdio_unregister_dbf_views();
3917         mempool_destroy(qdio_mempool_scssc);
3918         kmem_cache_destroy(qdio_q_cache);
3919         bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
3920         printk("qdio: %s: module removed\n",version);
3921 }
3922
3923 module_init(init_QDIO);
3924 module_exit(cleanup_QDIO);
3925
3926 EXPORT_SYMBOL(qdio_allocate);
3927 EXPORT_SYMBOL(qdio_establish);
3928 EXPORT_SYMBOL(qdio_initialize);
3929 EXPORT_SYMBOL(qdio_activate);
3930 EXPORT_SYMBOL(do_QDIO);
3931 EXPORT_SYMBOL(qdio_shutdown);
3932 EXPORT_SYMBOL(qdio_free);
3933 EXPORT_SYMBOL(qdio_cleanup);
3934 EXPORT_SYMBOL(qdio_synchronize);