Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/mfasheh...
[linux-2.6] / drivers / s390 / cio / qdio.c
1 /*
2  *
3  * linux/drivers/s390/cio/qdio.c
4  *
5  * Linux for S/390 QDIO base support, Hipersocket base support
6  * version 2
7  *
8  * Copyright 2000,2002 IBM Corporation
9  * Author(s):             Utz Bacher <utz.bacher@de.ibm.com>
10  * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
11  *
12  * Restriction: only 63 iqdio subchannels would have its own indicator,
13  * after that, subsequent subchannels share one indicator
14  *
15  *
16  *
17  *
18  * This program is free software; you can redistribute it and/or modify
19  * it under the terms of the GNU General Public License as published by
20  * the Free Software Foundation; either version 2, or (at your option)
21  * any later version.
22  *
23  * This program is distributed in the hope that it will be useful,
24  * but WITHOUT ANY WARRANTY; without even the implied warranty of
25  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
26  * GNU General Public License for more details.
27  *
28  * You should have received a copy of the GNU General Public License
29  * along with this program; if not, write to the Free Software
30  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31  */
32
33 #include <linux/module.h>
34 #include <linux/init.h>
35
36 #include <linux/slab.h>
37 #include <linux/kernel.h>
38 #include <linux/proc_fs.h>
39 #include <linux/timer.h>
40 #include <linux/mempool.h>
41
42 #include <asm/ccwdev.h>
43 #include <asm/io.h>
44 #include <asm/atomic.h>
45 #include <asm/semaphore.h>
46 #include <asm/timex.h>
47
48 #include <asm/debug.h>
49 #include <asm/s390_rdev.h>
50 #include <asm/qdio.h>
51
52 #include "cio.h"
53 #include "css.h"
54 #include "device.h"
55 #include "airq.h"
56 #include "qdio.h"
57 #include "ioasm.h"
58 #include "chsc.h"
59
60 /****************** MODULE PARAMETER VARIABLES ********************/
61 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
62 MODULE_DESCRIPTION("QDIO base support version 2, " \
63                    "Copyright 2000 IBM Corporation");
64 MODULE_LICENSE("GPL");
65
66 /******************** HERE WE GO ***********************************/
67
68 static const char version[] = "QDIO base support version 2";
69
70 static int qdio_performance_stats = 0;
71 static int proc_perf_file_registration;
72 static unsigned long i_p_c, i_p_nc, o_p_c, o_p_nc, ii_p_c, ii_p_nc;
73 static struct qdio_perf_stats perf_stats;
74
75 static int hydra_thinints;
76 static int is_passthrough = 0;
77 static int omit_svs;
78
79 static int indicator_used[INDICATORS_PER_CACHELINE];
80 static __u32 * volatile indicators;
81 static __u32 volatile spare_indicator;
82 static atomic_t spare_indicator_usecount;
83 #define QDIO_MEMPOOL_SCSSC_ELEMENTS 2
84 static mempool_t *qdio_mempool_scssc;
85
86 static debug_info_t *qdio_dbf_setup;
87 static debug_info_t *qdio_dbf_sbal;
88 static debug_info_t *qdio_dbf_trace;
89 static debug_info_t *qdio_dbf_sense;
90 #ifdef CONFIG_QDIO_DEBUG
91 static debug_info_t *qdio_dbf_slsb_out;
92 static debug_info_t *qdio_dbf_slsb_in;
93 #endif /* CONFIG_QDIO_DEBUG */
94
95 /* iQDIO stuff: */
96 static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change
97                                                  during a while loop */
98 static DEFINE_SPINLOCK(ttiq_list_lock);
99 static int register_thinint_result;
100 static void tiqdio_tl(unsigned long);
101 static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0);
102
103 /* not a macro, as one of the arguments is atomic_read */
104 static inline int 
105 qdio_min(int a,int b)
106 {
107         if (a<b)
108                 return a;
109         else
110                 return b;
111 }
112
113 /***************** SCRUBBER HELPER ROUTINES **********************/
114
115 static inline __u64 
116 qdio_get_micros(void)
117 {
118         return (get_clock() >> 12); /* time>>12 is microseconds */
119 }
120
121 /* 
122  * unfortunately, we can't just xchg the values; in do_QDIO we want to reserve
123  * the q in any case, so that we'll not be interrupted when we are in
124  * qdio_mark_tiq... shouldn't have a really bad impact, as reserving almost
125  * ever works (last famous words) 
126  */
127 static inline int 
128 qdio_reserve_q(struct qdio_q *q)
129 {
130         return atomic_add_return(1,&q->use_count) - 1;
131 }
132
133 static inline void 
134 qdio_release_q(struct qdio_q *q)
135 {
136         atomic_dec(&q->use_count);
137 }
138
139 /*check ccq  */
140 static int
141 qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
142 {
143         char dbf_text[15];
144
145         if (ccq == 0 || ccq == 32 || ccq == 96)
146                 return 0;
147         if (ccq == 97)
148                 return 1;
149         /*notify devices immediately*/
150         sprintf(dbf_text,"%d", ccq);
151         QDIO_DBF_TEXT2(1,trace,dbf_text);
152         return -EIO;
153 }
154 /* EQBS: extract buffer states */
155 static int
156 qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
157              unsigned int *start, unsigned int *cnt)
158 {
159         struct qdio_irq *irq;
160         unsigned int tmp_cnt, q_no, ccq;
161         int rc ;
162         char dbf_text[15];
163
164         ccq = 0;
165         tmp_cnt = *cnt;
166         irq = (struct qdio_irq*)q->irq_ptr;
167         q_no = q->q_no;
168         if(!q->is_input_q)
169                 q_no += irq->no_input_qs;
170 again:
171         ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt);
172         rc = qdio_check_ccq(q, ccq);
173         if (rc == 1) {
174                 QDIO_DBF_TEXT5(1,trace,"eqAGAIN");
175                 goto again;
176         }
177         if (rc < 0) {
178                 QDIO_DBF_TEXT2(1,trace,"eqberr");
179                 sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no);
180                 QDIO_DBF_TEXT2(1,trace,dbf_text);
181                 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
182                                 QDIO_STATUS_LOOK_FOR_ERROR,
183                                 0, 0, 0, -1, -1, q->int_parm);
184                 return 0;
185         }
186         return (tmp_cnt - *cnt);
187 }
188
189 /* SQBS: set buffer states */
190 static int
191 qdio_do_sqbs(struct qdio_q *q, unsigned char state,
192              unsigned int *start, unsigned int *cnt)
193 {
194         struct qdio_irq *irq;
195         unsigned int tmp_cnt, q_no, ccq;
196         int rc;
197         char dbf_text[15];
198
199         ccq = 0;
200         tmp_cnt = *cnt;
201         irq = (struct qdio_irq*)q->irq_ptr;
202         q_no = q->q_no;
203         if(!q->is_input_q)
204                 q_no += irq->no_input_qs;
205 again:
206         ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt);
207         rc = qdio_check_ccq(q, ccq);
208         if (rc == 1) {
209                 QDIO_DBF_TEXT5(1,trace,"sqAGAIN");
210                 goto again;
211         }
212         if (rc < 0) {
213                 QDIO_DBF_TEXT3(1,trace,"sqberr");
214                 sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt,*cnt,ccq,q_no);
215                 QDIO_DBF_TEXT3(1,trace,dbf_text);
216                 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
217                                 QDIO_STATUS_LOOK_FOR_ERROR,
218                                 0, 0, 0, -1, -1, q->int_parm);
219                 return 0;
220         }
221         return (tmp_cnt - *cnt);
222 }
223
224 static inline int
225 qdio_set_slsb(struct qdio_q *q, unsigned int *bufno,
226               unsigned char state, unsigned int *count)
227 {
228         volatile char *slsb;
229         struct qdio_irq *irq;
230
231         irq = (struct qdio_irq*)q->irq_ptr;
232         if (!irq->is_qebsm) {
233                 slsb = (char *)&q->slsb.acc.val[(*bufno)];
234                 xchg(slsb, state);
235                 return 1;
236         }
237         return qdio_do_sqbs(q, state, bufno, count);
238 }
239
240 #ifdef CONFIG_QDIO_DEBUG
241 static inline void
242 qdio_trace_slsb(struct qdio_q *q)
243 {
244         if (q->queue_type==QDIO_TRACE_QTYPE) {
245                 if (q->is_input_q)
246                         QDIO_DBF_HEX2(0,slsb_in,&q->slsb,
247                                       QDIO_MAX_BUFFERS_PER_Q);
248                 else
249                         QDIO_DBF_HEX2(0,slsb_out,&q->slsb,
250                                       QDIO_MAX_BUFFERS_PER_Q);
251         }
252 }
253 #endif
254
255 static inline int
256 set_slsb(struct qdio_q *q, unsigned int *bufno,
257          unsigned char state, unsigned int *count)
258 {
259         int rc;
260 #ifdef CONFIG_QDIO_DEBUG
261         qdio_trace_slsb(q);
262 #endif
263         rc = qdio_set_slsb(q, bufno, state, count);
264 #ifdef CONFIG_QDIO_DEBUG
265         qdio_trace_slsb(q);
266 #endif
267         return rc;
268 }
269 static inline int 
270 qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
271                unsigned int gpr3)
272 {
273         int cc;
274
275         QDIO_DBF_TEXT4(0,trace,"sigasync");
276         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
277
278         if (qdio_performance_stats)
279                 perf_stats.siga_syncs++;
280
281         cc = do_siga_sync(q->schid, gpr2, gpr3);
282         if (cc)
283                 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
284
285         return cc;
286 }
287
288 static inline int
289 qdio_siga_sync_q(struct qdio_q *q)
290 {
291         if (q->is_input_q)
292                 return qdio_siga_sync(q, 0, q->mask);
293         return qdio_siga_sync(q, q->mask, 0);
294 }
295
296 static int
297 __do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
298 {
299        struct qdio_irq *irq;
300        unsigned int fc = 0;
301        unsigned long schid;
302
303        irq = (struct qdio_irq *) q->irq_ptr;
304        if (!irq->is_qebsm)
305                schid = *((u32 *)&q->schid);
306        else {
307                schid = irq->sch_token;
308                fc |= 0x80;
309        }
310        return do_siga_output(schid, q->mask, busy_bit, fc);
311 }
312
313 /* 
314  * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns
315  * an access exception 
316  */
317 static int
318 qdio_siga_output(struct qdio_q *q)
319 {
320         int cc;
321         __u32 busy_bit;
322         __u64 start_time=0;
323
324         if (qdio_performance_stats)
325                 perf_stats.siga_outs++;
326
327         QDIO_DBF_TEXT4(0,trace,"sigaout");
328         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
329
330         for (;;) {
331                 cc = __do_siga_output(q, &busy_bit);
332 //QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit);
333                 if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) {
334                         if (!start_time) 
335                                 start_time=NOW;
336                         if ((NOW-start_time)>QDIO_BUSY_BIT_PATIENCE)
337                                 break;
338                 } else
339                         break;
340         }
341         
342         if ((cc==2) && (busy_bit)) 
343                 cc |= QDIO_SIGA_ERROR_B_BIT_SET;
344
345         if (cc)
346                 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
347
348         return cc;
349 }
350
351 static int
352 qdio_siga_input(struct qdio_q *q)
353 {
354         int cc;
355
356         QDIO_DBF_TEXT4(0,trace,"sigain");
357         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
358
359         if (qdio_performance_stats)
360                 perf_stats.siga_ins++;
361
362         cc = do_siga_input(q->schid, q->mask);
363         
364         if (cc)
365                 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
366
367         return cc;
368 }
369
370 /* locked by the locks in qdio_activate and qdio_cleanup */
371 static __u32 *
372 qdio_get_indicator(void)
373 {
374         int i;
375
376         for (i=1;i<INDICATORS_PER_CACHELINE;i++)
377                 if (!indicator_used[i]) {
378                         indicator_used[i]=1;
379                         return indicators+i;
380                 }
381         atomic_inc(&spare_indicator_usecount);
382         return (__u32 * volatile) &spare_indicator;
383 }
384
385 /* locked by the locks in qdio_activate and qdio_cleanup */
386 static void 
387 qdio_put_indicator(__u32 *addr)
388 {
389         int i;
390
391         if ( (addr) && (addr!=&spare_indicator) ) {
392                 i=addr-indicators;
393                 indicator_used[i]=0;
394         }
395         if (addr == &spare_indicator)
396                 atomic_dec(&spare_indicator_usecount);
397 }
398
399 static inline void
400 tiqdio_clear_summary_bit(__u32 *location)
401 {
402         QDIO_DBF_TEXT5(0,trace,"clrsummb");
403         QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
404
405         xchg(location,0);
406 }
407
408 static inline  void
409 tiqdio_set_summary_bit(__u32 *location)
410 {
411         QDIO_DBF_TEXT5(0,trace,"setsummb");
412         QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
413
414         xchg(location,-1);
415 }
416
417 static inline void 
418 tiqdio_sched_tl(void)
419 {
420         tasklet_hi_schedule(&tiqdio_tasklet);
421 }
422
423 static void
424 qdio_mark_tiq(struct qdio_q *q)
425 {
426         unsigned long flags;
427
428         QDIO_DBF_TEXT4(0,trace,"mark iq");
429         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
430
431         spin_lock_irqsave(&ttiq_list_lock,flags);
432         if (unlikely(atomic_read(&q->is_in_shutdown)))
433                 goto out_unlock;
434
435         if (!q->is_input_q)
436                 goto out_unlock;
437
438         if ((q->list_prev) || (q->list_next)) 
439                 goto out_unlock;
440
441         if (!tiq_list) {
442                 tiq_list=q;
443                 q->list_prev=q;
444                 q->list_next=q;
445         } else {
446                 q->list_next=tiq_list;
447                 q->list_prev=tiq_list->list_prev;
448                 tiq_list->list_prev->list_next=q;
449                 tiq_list->list_prev=q;
450         }
451         spin_unlock_irqrestore(&ttiq_list_lock,flags);
452
453         tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
454         tiqdio_sched_tl();
455         return;
456 out_unlock:
457         spin_unlock_irqrestore(&ttiq_list_lock,flags);
458         return;
459 }
460
461 static inline void
462 qdio_mark_q(struct qdio_q *q)
463 {
464         QDIO_DBF_TEXT4(0,trace,"mark q");
465         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
466
467         if (unlikely(atomic_read(&q->is_in_shutdown)))
468                 return;
469
470         tasklet_schedule(&q->tasklet);
471 }
472
473 static int
474 qdio_stop_polling(struct qdio_q *q)
475 {
476 #ifdef QDIO_USE_PROCESSING_STATE
477        unsigned int tmp, gsf, count = 1;
478        unsigned char state = 0;
479        struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
480
481         if (!atomic_xchg(&q->polling,0))
482                 return 1;
483
484         QDIO_DBF_TEXT4(0,trace,"stoppoll");
485         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
486
487         /* show the card that we are not polling anymore */
488         if (!q->is_input_q)
489                 return 1;
490
491        tmp = gsf = GET_SAVED_FRONTIER(q);
492        tmp = ((tmp + QDIO_MAX_BUFFERS_PER_Q-1) & (QDIO_MAX_BUFFERS_PER_Q-1) );
493        set_slsb(q, &tmp, SLSB_P_INPUT_NOT_INIT, &count);
494
495         /* 
496          * we don't issue this SYNC_MEMORY, as we trust Rick T and
497          * moreover will not use the PROCESSING state under VM, so
498          * q->polling was 0 anyway
499          */
500         /*SYNC_MEMORY;*/
501        if (irq->is_qebsm) {
502                count = 1;
503                qdio_do_eqbs(q, &state, &gsf, &count);
504        } else
505                state = q->slsb.acc.val[gsf];
506        if (state != SLSB_P_INPUT_PRIMED)
507                 return 1;
508         /* 
509          * set our summary bit again, as otherwise there is a
510          * small window we can miss between resetting it and
511          * checking for PRIMED state 
512          */
513         if (q->is_thinint_q)
514                 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
515         return 0;
516
517 #else /* QDIO_USE_PROCESSING_STATE */
518         return 1;
519 #endif /* QDIO_USE_PROCESSING_STATE */
520 }
521
522 /* 
523  * see the comment in do_QDIO and before qdio_reserve_q about the
524  * sophisticated locking outside of unmark_q, so that we don't need to
525  * disable the interrupts :-) 
526 */
527 static void
528 qdio_unmark_q(struct qdio_q *q)
529 {
530         unsigned long flags;
531
532         QDIO_DBF_TEXT4(0,trace,"unmark q");
533         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
534
535         if ((!q->list_prev)||(!q->list_next))
536                 return;
537
538         if ((q->is_thinint_q)&&(q->is_input_q)) {
539                 /* iQDIO */
540                 spin_lock_irqsave(&ttiq_list_lock,flags);
541                 /* in case cleanup has done this already and simultanously
542                  * qdio_unmark_q is called from the interrupt handler, we've
543                  * got to check this in this specific case again */
544                 if ((!q->list_prev)||(!q->list_next))
545                         goto out;
546                 if (q->list_next==q) {
547                         /* q was the only interesting q */
548                         tiq_list=NULL;
549                         q->list_next=NULL;
550                         q->list_prev=NULL;
551                 } else {
552                         q->list_next->list_prev=q->list_prev;
553                         q->list_prev->list_next=q->list_next;
554                         tiq_list=q->list_next;
555                         q->list_next=NULL;
556                         q->list_prev=NULL;
557                 }
558 out:
559                 spin_unlock_irqrestore(&ttiq_list_lock,flags);
560         }
561 }
562
563 static inline unsigned long 
564 tiqdio_clear_global_summary(void)
565 {
566         unsigned long time;
567
568         QDIO_DBF_TEXT5(0,trace,"clrglobl");
569         
570         time = do_clear_global_summary();
571
572         QDIO_DBF_HEX5(0,trace,&time,sizeof(unsigned long));
573
574         return time;
575 }
576
577
578 /************************* OUTBOUND ROUTINES *******************************/
579 static int
580 qdio_qebsm_get_outbound_buffer_frontier(struct qdio_q *q)
581 {
582         struct qdio_irq *irq;
583         unsigned char state;
584         unsigned int cnt, count, ftc;
585
586         irq = (struct qdio_irq *) q->irq_ptr;
587         if ((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis))
588                 SYNC_MEMORY;
589
590         ftc = q->first_to_check;
591         count = qdio_min(atomic_read(&q->number_of_buffers_used),
592                         (QDIO_MAX_BUFFERS_PER_Q-1));
593         if (count == 0)
594                 return q->first_to_check;
595         cnt = qdio_do_eqbs(q, &state, &ftc, &count);
596         if (cnt == 0)
597                 return q->first_to_check;
598         switch (state) {
599         case SLSB_P_OUTPUT_ERROR:
600                 QDIO_DBF_TEXT3(0,trace,"outperr");
601                 atomic_sub(cnt , &q->number_of_buffers_used);
602                 if (q->qdio_error)
603                         q->error_status_flags |=
604                                 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
605                 q->qdio_error = SLSB_P_OUTPUT_ERROR;
606                 q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
607                 q->first_to_check = ftc;
608                 break;
609         case SLSB_P_OUTPUT_EMPTY:
610                 QDIO_DBF_TEXT5(0,trace,"outpempt");
611                 atomic_sub(cnt, &q->number_of_buffers_used);
612                 q->first_to_check = ftc;
613                 break;
614         case SLSB_CU_OUTPUT_PRIMED:
615                 /* all buffers primed */
616                 QDIO_DBF_TEXT5(0,trace,"outpprim");
617                 break;
618         default:
619                 break;
620         }
621         QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
622         return q->first_to_check;
623 }
624
625 static int
626 qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q)
627 {
628         struct qdio_irq *irq;
629         unsigned char state;
630         int tmp, ftc, count, cnt;
631         char dbf_text[15];
632
633
634         irq = (struct qdio_irq *) q->irq_ptr;
635         ftc = q->first_to_check;
636         count = qdio_min(atomic_read(&q->number_of_buffers_used),
637                         (QDIO_MAX_BUFFERS_PER_Q-1));
638         if (count == 0)
639                  return q->first_to_check;
640         cnt = qdio_do_eqbs(q, &state, &ftc, &count);
641         if (cnt == 0)
642                  return q->first_to_check;
643         switch (state) {
644         case SLSB_P_INPUT_ERROR :
645 #ifdef CONFIG_QDIO_DEBUG
646                 QDIO_DBF_TEXT3(1,trace,"inperr");
647                 sprintf(dbf_text,"%2x,%2x",ftc,count);
648                 QDIO_DBF_TEXT3(1,trace,dbf_text);
649 #endif /* CONFIG_QDIO_DEBUG */
650                 if (q->qdio_error)
651                         q->error_status_flags |=
652                                 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
653                 q->qdio_error = SLSB_P_INPUT_ERROR;
654                 q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
655                 atomic_sub(cnt, &q->number_of_buffers_used);
656                 q->first_to_check = ftc;
657                 break;
658         case SLSB_P_INPUT_PRIMED :
659                 QDIO_DBF_TEXT3(0,trace,"inptprim");
660                 sprintf(dbf_text,"%2x,%2x",ftc,count);
661                 QDIO_DBF_TEXT3(1,trace,dbf_text);
662                 tmp = 0;
663                 ftc = q->first_to_check;
664 #ifdef QDIO_USE_PROCESSING_STATE
665                 if (cnt > 1) {
666                         cnt -= 1;
667                         tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
668                         if (!tmp)
669                                 break;
670                 }
671                 cnt = 1;
672                 tmp += set_slsb(q, &ftc,
673                                SLSB_P_INPUT_PROCESSING, &cnt);
674                 atomic_set(&q->polling, 1);
675 #else
676                 tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
677 #endif
678                 atomic_sub(tmp, &q->number_of_buffers_used);
679                 q->first_to_check = ftc;
680                 break;
681         case SLSB_CU_INPUT_EMPTY:
682         case SLSB_P_INPUT_NOT_INIT:
683         case SLSB_P_INPUT_PROCESSING:
684                 QDIO_DBF_TEXT5(0,trace,"inpnipro");
685                 break;
686         default:
687                 break;
688         }
689         QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
690         return q->first_to_check;
691 }
692
693 static int
694 qdio_get_outbound_buffer_frontier(struct qdio_q *q)
695 {
696         struct qdio_irq *irq;
697         volatile char *slsb;
698         unsigned int count = 1;
699         int first_not_to_check, f, f_mod_no;
700         char dbf_text[15];
701
702         QDIO_DBF_TEXT4(0,trace,"getobfro");
703         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
704
705         irq = (struct qdio_irq *) q->irq_ptr;
706         if (irq->is_qebsm)
707                 return qdio_qebsm_get_outbound_buffer_frontier(q);
708
709         slsb=&q->slsb.acc.val[0];
710         f_mod_no=f=q->first_to_check;
711         /* 
712          * f points to already processed elements, so f+no_used is correct...
713          * ... but: we don't check 128 buffers, as otherwise
714          * qdio_has_outbound_q_moved would return 0 
715          */
716         first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
717                                       (QDIO_MAX_BUFFERS_PER_Q-1));
718
719         if ((!q->is_iqdio_q)&&(!q->hydra_gives_outbound_pcis))
720                 SYNC_MEMORY;
721
722 check_next:
723         if (f==first_not_to_check) 
724                 goto out;
725
726         switch(slsb[f_mod_no]) {
727
728         /* the adapter has not fetched the output yet */
729         case SLSB_CU_OUTPUT_PRIMED:
730                 QDIO_DBF_TEXT5(0,trace,"outpprim");
731                 break;
732
733         /* the adapter got it */
734         case SLSB_P_OUTPUT_EMPTY:
735                 atomic_dec(&q->number_of_buffers_used);
736                 f++;
737                 f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
738                 QDIO_DBF_TEXT5(0,trace,"outpempt");
739                 goto check_next;
740
741         case SLSB_P_OUTPUT_ERROR:
742                 QDIO_DBF_TEXT3(0,trace,"outperr");
743                 sprintf(dbf_text,"%x-%x-%x",f_mod_no,
744                         q->sbal[f_mod_no]->element[14].sbalf.value,
745                         q->sbal[f_mod_no]->element[15].sbalf.value);
746                 QDIO_DBF_TEXT3(1,trace,dbf_text);
747                 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
748
749                 /* kind of process the buffer */
750                 set_slsb(q, &f_mod_no, SLSB_P_OUTPUT_NOT_INIT, &count);
751
752                 /* 
753                  * we increment the frontier, as this buffer
754                  * was processed obviously 
755                  */
756                 atomic_dec(&q->number_of_buffers_used);
757                 f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
758
759                 if (q->qdio_error)
760                         q->error_status_flags|=
761                                 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
762                 q->qdio_error=SLSB_P_OUTPUT_ERROR;
763                 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
764
765                 break;
766
767         /* no new buffers */
768         default:
769                 QDIO_DBF_TEXT5(0,trace,"outpni");
770         }
771 out:
772         return (q->first_to_check=f_mod_no);
773 }
774
775 /* all buffers are processed */
776 static int
777 qdio_is_outbound_q_done(struct qdio_q *q)
778 {
779         int no_used;
780 #ifdef CONFIG_QDIO_DEBUG
781         char dbf_text[15];
782 #endif
783
784         no_used=atomic_read(&q->number_of_buffers_used);
785
786 #ifdef CONFIG_QDIO_DEBUG
787         if (no_used) {
788                 sprintf(dbf_text,"oqisnt%02x",no_used);
789                 QDIO_DBF_TEXT4(0,trace,dbf_text);
790         } else {
791                 QDIO_DBF_TEXT4(0,trace,"oqisdone");
792         }
793         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
794 #endif /* CONFIG_QDIO_DEBUG */
795         return (no_used==0);
796 }
797
798 static int
799 qdio_has_outbound_q_moved(struct qdio_q *q)
800 {
801         int i;
802
803         i=qdio_get_outbound_buffer_frontier(q);
804
805         if ( (i!=GET_SAVED_FRONTIER(q)) ||
806              (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
807                 SAVE_FRONTIER(q,i);
808                 QDIO_DBF_TEXT4(0,trace,"oqhasmvd");
809                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
810                 return 1;
811         } else {
812                 QDIO_DBF_TEXT4(0,trace,"oqhsntmv");
813                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
814                 return 0;
815         }
816 }
817
818 static void
819 qdio_kick_outbound_q(struct qdio_q *q)
820 {
821         int result;
822 #ifdef CONFIG_QDIO_DEBUG
823         char dbf_text[15];
824
825         QDIO_DBF_TEXT4(0,trace,"kickoutq");
826         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
827 #endif /* CONFIG_QDIO_DEBUG */
828
829         if (!q->siga_out)
830                 return;
831
832         /* here's the story with cc=2 and busy bit set (thanks, Rick):
833          * VM's CP could present us cc=2 and busy bit set on SIGA-write
834          * during reconfiguration of their Guest LAN (only in HIPERS mode,
835          * QDIO mode is asynchronous -- cc=2 and busy bit there will take
836          * the queues down immediately; and not being under VM we have a
837          * problem on cc=2 and busy bit set right away).
838          *
839          * Therefore qdio_siga_output will try for a short time constantly,
840          * if such a condition occurs. If it doesn't change, it will
841          * increase the busy_siga_counter and save the timestamp, and
842          * schedule the queue for later processing (via mark_q, using the
843          * queue tasklet). __qdio_outbound_processing will check out the
844          * counter. If non-zero, it will call qdio_kick_outbound_q as often
845          * as the value of the counter. This will attempt further SIGA
846          * instructions. For each successful SIGA, the counter is
847          * decreased, for failing SIGAs the counter remains the same, after
848          * all.
849          * After some time of no movement, qdio_kick_outbound_q will
850          * finally fail and reflect corresponding error codes to call
851          * the upper layer module and have it take the queues down.
852          *
853          * Note that this is a change from the original HiperSockets design
854          * (saying cc=2 and busy bit means take the queues down), but in
855          * these days Guest LAN didn't exist... excessive cc=2 with busy bit
856          * conditions will still take the queues down, but the threshold is
857          * higher due to the Guest LAN environment.
858          */
859
860
861         result=qdio_siga_output(q);
862
863         switch (result) {
864         case 0:
865                 /* went smooth this time, reset timestamp */
866 #ifdef CONFIG_QDIO_DEBUG
867                 QDIO_DBF_TEXT3(0,trace,"cc2reslv");
868                 sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
869                         atomic_read(&q->busy_siga_counter));
870                 QDIO_DBF_TEXT3(0,trace,dbf_text);
871 #endif /* CONFIG_QDIO_DEBUG */
872                 q->timing.busy_start=0;
873                 break;
874         case (2|QDIO_SIGA_ERROR_B_BIT_SET):
875                 /* cc=2 and busy bit: */
876                 atomic_inc(&q->busy_siga_counter);
877
878                 /* if the last siga was successful, save
879                  * timestamp here */
880                 if (!q->timing.busy_start)
881                         q->timing.busy_start=NOW;
882
883                 /* if we're in time, don't touch error_status_flags
884                  * and siga_error */
885                 if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
886                         qdio_mark_q(q);
887                         break;
888                 }
889                 QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
890 #ifdef CONFIG_QDIO_DEBUG
891                 sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
892                         atomic_read(&q->busy_siga_counter));
893                 QDIO_DBF_TEXT3(0,trace,dbf_text);
894 #endif /* CONFIG_QDIO_DEBUG */
895                 /* else fallthrough and report error */
896         default:
897                 /* for plain cc=1, 2 or 3: */
898                 if (q->siga_error)
899                         q->error_status_flags|=
900                                 QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
901                 q->error_status_flags|=
902                         QDIO_STATUS_LOOK_FOR_ERROR;
903                 q->siga_error=result;
904         }
905 }
906
907 static void
908 qdio_kick_outbound_handler(struct qdio_q *q)
909 {
910         int start, end, real_end, count;
911 #ifdef CONFIG_QDIO_DEBUG
912         char dbf_text[15];
913 #endif
914
915         start = q->first_element_to_kick;
916         /* last_move_ftc was just updated */
917         real_end = GET_SAVED_FRONTIER(q);
918         end = (real_end+QDIO_MAX_BUFFERS_PER_Q-1)&
919                 (QDIO_MAX_BUFFERS_PER_Q-1);
920         count = (end+QDIO_MAX_BUFFERS_PER_Q+1-start)&
921                 (QDIO_MAX_BUFFERS_PER_Q-1);
922
923 #ifdef CONFIG_QDIO_DEBUG
924         QDIO_DBF_TEXT4(0,trace,"kickouth");
925         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
926
927         sprintf(dbf_text,"s=%2xc=%2x",start,count);
928         QDIO_DBF_TEXT4(0,trace,dbf_text);
929 #endif /* CONFIG_QDIO_DEBUG */
930
931         if (q->state==QDIO_IRQ_STATE_ACTIVE)
932                 q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT|
933                            q->error_status_flags,
934                            q->qdio_error,q->siga_error,q->q_no,start,count,
935                            q->int_parm);
936
937         /* for the next time: */
938         q->first_element_to_kick=real_end;
939         q->qdio_error=0;
940         q->siga_error=0;
941         q->error_status_flags=0;
942 }
943
944 static void
945 __qdio_outbound_processing(struct qdio_q *q)
946 {
947         int siga_attempts;
948
949         QDIO_DBF_TEXT4(0,trace,"qoutproc");
950         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
951
952         if (unlikely(qdio_reserve_q(q))) {
953                 qdio_release_q(q);
954                 if (qdio_performance_stats)
955                         o_p_c++;
956                 /* as we're sissies, we'll check next time */
957                 if (likely(!atomic_read(&q->is_in_shutdown))) {
958                         qdio_mark_q(q);
959                         QDIO_DBF_TEXT4(0,trace,"busy,agn");
960                 }
961                 return;
962         }
963         if (qdio_performance_stats) {
964                 o_p_nc++;
965                 perf_stats.tl_runs++;
966         }
967
968         /* see comment in qdio_kick_outbound_q */
969         siga_attempts=atomic_read(&q->busy_siga_counter);
970         while (siga_attempts) {
971                 atomic_dec(&q->busy_siga_counter);
972                 qdio_kick_outbound_q(q);
973                 siga_attempts--;
974         }
975
976         if (qdio_has_outbound_q_moved(q))
977                 qdio_kick_outbound_handler(q);
978
979         if (q->is_iqdio_q) {
980                 /* 
981                  * for asynchronous queues, we better check, if the sent
982                  * buffer is already switched from PRIMED to EMPTY.
983                  */
984                 if ((q->queue_type == QDIO_IQDIO_QFMT_ASYNCH) &&
985                     !qdio_is_outbound_q_done(q))
986                         qdio_mark_q(q);
987
988         } else if (!q->hydra_gives_outbound_pcis)
989                 if (!qdio_is_outbound_q_done(q))
990                         qdio_mark_q(q);
991
992         qdio_release_q(q);
993 }
994
995 static void
996 qdio_outbound_processing(struct qdio_q *q)
997 {
998         __qdio_outbound_processing(q);
999 }
1000
1001 /************************* INBOUND ROUTINES *******************************/
1002
1003
1004 static int
1005 qdio_get_inbound_buffer_frontier(struct qdio_q *q)
1006 {
1007         struct qdio_irq *irq;
1008         int f,f_mod_no;
1009         volatile char *slsb;
1010         unsigned int count = 1;
1011         int first_not_to_check;
1012 #ifdef CONFIG_QDIO_DEBUG
1013         char dbf_text[15];
1014 #endif /* CONFIG_QDIO_DEBUG */
1015 #ifdef QDIO_USE_PROCESSING_STATE
1016         int last_position=-1;
1017 #endif /* QDIO_USE_PROCESSING_STATE */
1018
1019         QDIO_DBF_TEXT4(0,trace,"getibfro");
1020         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1021
1022         irq = (struct qdio_irq *) q->irq_ptr;
1023         if (irq->is_qebsm)
1024                 return qdio_qebsm_get_inbound_buffer_frontier(q);
1025
1026         slsb=&q->slsb.acc.val[0];
1027         f_mod_no=f=q->first_to_check;
1028         /* 
1029          * we don't check 128 buffers, as otherwise qdio_has_inbound_q_moved
1030          * would return 0 
1031          */
1032         first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
1033                                       (QDIO_MAX_BUFFERS_PER_Q-1));
1034
1035         /* 
1036          * we don't use this one, as a PCI or we after a thin interrupt
1037          * will sync the queues
1038          */
1039         /* SYNC_MEMORY;*/
1040
1041 check_next:
1042         f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
1043         if (f==first_not_to_check) 
1044                 goto out;
1045         switch (slsb[f_mod_no]) {
1046
1047         /* CU_EMPTY means frontier is reached */
1048         case SLSB_CU_INPUT_EMPTY:
1049                 QDIO_DBF_TEXT5(0,trace,"inptempt");
1050                 break;
1051
1052         /* P_PRIMED means set slsb to P_PROCESSING and move on */
1053         case SLSB_P_INPUT_PRIMED:
1054                 QDIO_DBF_TEXT5(0,trace,"inptprim");
1055
1056 #ifdef QDIO_USE_PROCESSING_STATE
1057                 /* 
1058                  * as soon as running under VM, polling the input queues will
1059                  * kill VM in terms of CP overhead 
1060                  */
1061                 if (q->siga_sync) {
1062                         set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
1063                 } else {
1064                         /* set the previous buffer to NOT_INIT. The current
1065                          * buffer will be set to PROCESSING at the end of
1066                          * this function to avoid further interrupts. */
1067                         if (last_position>=0)
1068                                 set_slsb(q, &last_position,
1069                                          SLSB_P_INPUT_NOT_INIT, &count);
1070                         atomic_set(&q->polling,1);
1071                         last_position=f_mod_no;
1072                 }
1073 #else /* QDIO_USE_PROCESSING_STATE */
1074                 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
1075 #endif /* QDIO_USE_PROCESSING_STATE */
1076                 /* 
1077                  * not needed, as the inbound queue will be synced on the next
1078                  * siga-r, resp. tiqdio_is_inbound_q_done will do the siga-s
1079                  */
1080                 /*SYNC_MEMORY;*/
1081                 f++;
1082                 atomic_dec(&q->number_of_buffers_used);
1083                 goto check_next;
1084
1085         case SLSB_P_INPUT_NOT_INIT:
1086         case SLSB_P_INPUT_PROCESSING:
1087                 QDIO_DBF_TEXT5(0,trace,"inpnipro");
1088                 break;
1089
1090         /* P_ERROR means frontier is reached, break and report error */
1091         case SLSB_P_INPUT_ERROR:
1092 #ifdef CONFIG_QDIO_DEBUG
1093                 sprintf(dbf_text,"inperr%2x",f_mod_no);
1094                 QDIO_DBF_TEXT3(1,trace,dbf_text);
1095 #endif /* CONFIG_QDIO_DEBUG */
1096                 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
1097
1098                 /* kind of process the buffer */
1099                 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
1100
1101                 if (q->qdio_error)
1102                         q->error_status_flags|=
1103                                 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
1104                 q->qdio_error=SLSB_P_INPUT_ERROR;
1105                 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
1106
1107                 /* we increment the frontier, as this buffer
1108                  * was processed obviously */
1109                 f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1110                 atomic_dec(&q->number_of_buffers_used);
1111
1112 #ifdef QDIO_USE_PROCESSING_STATE
1113                 last_position=-1;
1114 #endif /* QDIO_USE_PROCESSING_STATE */
1115
1116                 break;
1117
1118         /* everything else means frontier not changed (HALTED or so) */
1119         default: 
1120                 break;
1121         }
1122 out:
1123         q->first_to_check=f_mod_no;
1124
1125 #ifdef QDIO_USE_PROCESSING_STATE
1126         if (last_position>=0)
1127                 set_slsb(q, &last_position, SLSB_P_INPUT_PROCESSING, &count);
1128 #endif /* QDIO_USE_PROCESSING_STATE */
1129
1130         QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
1131
1132         return q->first_to_check;
1133 }
1134
1135 static int
1136 qdio_has_inbound_q_moved(struct qdio_q *q)
1137 {
1138         int i;
1139
1140         static int old_pcis=0;
1141         static int old_thinints=0;
1142
1143         if (qdio_performance_stats) {
1144                 if ((old_pcis==perf_stats.pcis)&&
1145                     (old_thinints==perf_stats.thinints))
1146                         perf_stats.start_time_inbound=NOW;
1147                 else
1148                         old_pcis=perf_stats.pcis;
1149         }
1150
1151         i=qdio_get_inbound_buffer_frontier(q);
1152         if ( (i!=GET_SAVED_FRONTIER(q)) ||
1153              (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
1154                 SAVE_FRONTIER(q,i);
1155                 if ((!q->siga_sync)&&(!q->hydra_gives_outbound_pcis))
1156                         SAVE_TIMESTAMP(q);
1157
1158                 QDIO_DBF_TEXT4(0,trace,"inhasmvd");
1159                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1160                 return 1;
1161         } else {
1162                 QDIO_DBF_TEXT4(0,trace,"inhsntmv");
1163                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1164                 return 0;
1165         }
1166 }
1167
1168 /* means, no more buffers to be filled */
1169 static int
1170 tiqdio_is_inbound_q_done(struct qdio_q *q)
1171 {
1172         int no_used;
1173         unsigned int start_buf, count;
1174         unsigned char state = 0;
1175         struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
1176
1177 #ifdef CONFIG_QDIO_DEBUG
1178         char dbf_text[15];
1179 #endif
1180
1181         no_used=atomic_read(&q->number_of_buffers_used);
1182
1183         /* propagate the change from 82 to 80 through VM */
1184         SYNC_MEMORY;
1185
1186 #ifdef CONFIG_QDIO_DEBUG
1187         if (no_used) {
1188                 sprintf(dbf_text,"iqisnt%02x",no_used);
1189                 QDIO_DBF_TEXT4(0,trace,dbf_text);
1190         } else {
1191                 QDIO_DBF_TEXT4(0,trace,"iniqisdo");
1192         }
1193         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1194 #endif /* CONFIG_QDIO_DEBUG */
1195
1196         if (!no_used)
1197                 return 1;
1198         if (!q->siga_sync && !irq->is_qebsm)
1199                 /* we'll check for more primed buffers in qeth_stop_polling */
1200                 return 0;
1201         if (irq->is_qebsm) {
1202                 count = 1;
1203                 start_buf = q->first_to_check;
1204                 qdio_do_eqbs(q, &state, &start_buf, &count);
1205         } else
1206                 state = q->slsb.acc.val[q->first_to_check];
1207         if (state != SLSB_P_INPUT_PRIMED)
1208                 /* 
1209                  * nothing more to do, if next buffer is not PRIMED.
1210                  * note that we did a SYNC_MEMORY before, that there
1211                  * has been a sychnronization.
1212                  * we will return 0 below, as there is nothing to do
1213                  * (stop_polling not necessary, as we have not been
1214                  * using the PROCESSING state 
1215                  */
1216                 return 0;
1217
1218         /* 
1219          * ok, the next input buffer is primed. that means, that device state 
1220          * change indicator and adapter local summary are set, so we will find
1221          * it next time.
1222          * we will return 0 below, as there is nothing to do, except scheduling
1223          * ourselves for the next time. 
1224          */
1225         tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1226         tiqdio_sched_tl();
1227         return 0;
1228 }
1229
1230 static int
1231 qdio_is_inbound_q_done(struct qdio_q *q)
1232 {
1233         int no_used;
1234         unsigned int start_buf, count;
1235         unsigned char state = 0;
1236         struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
1237
1238 #ifdef CONFIG_QDIO_DEBUG
1239         char dbf_text[15];
1240 #endif
1241
1242         no_used=atomic_read(&q->number_of_buffers_used);
1243
1244         /* 
1245          * we need that one for synchronization with the adapter, as it
1246          * does a kind of PCI avoidance 
1247          */
1248         SYNC_MEMORY;
1249
1250         if (!no_used) {
1251                 QDIO_DBF_TEXT4(0,trace,"inqisdnA");
1252                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1253                 QDIO_DBF_TEXT4(0,trace,dbf_text);
1254                 return 1;
1255         }
1256         if (irq->is_qebsm) {
1257                 count = 1;
1258                 start_buf = q->first_to_check;
1259                 qdio_do_eqbs(q, &state, &start_buf, &count);
1260         } else
1261                 state = q->slsb.acc.val[q->first_to_check];
1262         if (state == SLSB_P_INPUT_PRIMED) {
1263                 /* we got something to do */
1264                 QDIO_DBF_TEXT4(0,trace,"inqisntA");
1265                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1266                 return 0;
1267         }
1268
1269         /* on VM, we don't poll, so the q is always done here */
1270         if (q->siga_sync)
1271                 return 1;
1272         if (q->hydra_gives_outbound_pcis)
1273                 return 1;
1274
1275         /* 
1276          * at this point we know, that inbound first_to_check
1277          * has (probably) not moved (see qdio_inbound_processing) 
1278          */
1279         if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) {
1280 #ifdef CONFIG_QDIO_DEBUG
1281                 QDIO_DBF_TEXT4(0,trace,"inqisdon");
1282                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1283                 sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
1284                 QDIO_DBF_TEXT4(0,trace,dbf_text);
1285 #endif /* CONFIG_QDIO_DEBUG */
1286                 return 1;
1287         } else {
1288 #ifdef CONFIG_QDIO_DEBUG
1289                 QDIO_DBF_TEXT4(0,trace,"inqisntd");
1290                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1291                 sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
1292                 QDIO_DBF_TEXT4(0,trace,dbf_text);
1293 #endif /* CONFIG_QDIO_DEBUG */
1294                 return 0;
1295         }
1296 }
1297
1298 static void
1299 qdio_kick_inbound_handler(struct qdio_q *q)
1300 {
1301         int count, start, end, real_end, i;
1302 #ifdef CONFIG_QDIO_DEBUG
1303         char dbf_text[15];
1304 #endif
1305
1306         QDIO_DBF_TEXT4(0,trace,"kickinh");
1307         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1308
1309         start=q->first_element_to_kick;
1310         real_end=q->first_to_check;
1311         end=(real_end+QDIO_MAX_BUFFERS_PER_Q-1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1312  
1313         i=start;
1314         count=0;
1315         while (1) {
1316                 count++;
1317                 if (i==end)
1318                         break;
1319                 i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1320         }
1321
1322 #ifdef CONFIG_QDIO_DEBUG
1323         sprintf(dbf_text,"s=%2xc=%2x",start,count);
1324         QDIO_DBF_TEXT4(0,trace,dbf_text);
1325 #endif /* CONFIG_QDIO_DEBUG */
1326
1327         if (likely(q->state==QDIO_IRQ_STATE_ACTIVE))
1328                 q->handler(q->cdev,
1329                            QDIO_STATUS_INBOUND_INT|q->error_status_flags,
1330                            q->qdio_error,q->siga_error,q->q_no,start,count,
1331                            q->int_parm);
1332
1333         /* for the next time: */
1334         q->first_element_to_kick=real_end;
1335         q->qdio_error=0;
1336         q->siga_error=0;
1337         q->error_status_flags=0;
1338
1339         if (qdio_performance_stats) {
1340                 perf_stats.inbound_time+=NOW-perf_stats.start_time_inbound;
1341                 perf_stats.inbound_cnt++;
1342         }
1343 }
1344
1345 static void
1346 __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1347 {
1348         struct qdio_irq *irq_ptr;
1349         struct qdio_q *oq;
1350         int i;
1351
1352         QDIO_DBF_TEXT4(0,trace,"iqinproc");
1353         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1354
1355         /* 
1356          * we first want to reserve the q, so that we know, that we don't
1357          * interrupt ourselves and call qdio_unmark_q, as is_in_shutdown might
1358          * be set 
1359          */
1360         if (unlikely(qdio_reserve_q(q))) {
1361                 qdio_release_q(q);
1362                 if (qdio_performance_stats)
1363                         ii_p_c++;
1364                 /* 
1365                  * as we might just be about to stop polling, we make
1366                  * sure that we check again at least once more 
1367                  */
1368                 tiqdio_sched_tl();
1369                 return;
1370         }
1371         if (qdio_performance_stats)
1372                 ii_p_nc++;
1373         if (unlikely(atomic_read(&q->is_in_shutdown))) {
1374                 qdio_unmark_q(q);
1375                 goto out;
1376         }
1377
1378         /* 
1379          * we reset spare_ind_was_set, when the queue does not use the
1380          * spare indicator
1381          */
1382         if (spare_ind_was_set)
1383                 spare_ind_was_set = (q->dev_st_chg_ind == &spare_indicator);
1384
1385         if (!(*(q->dev_st_chg_ind)) && !spare_ind_was_set)
1386                 goto out;
1387         /*
1388          * q->dev_st_chg_ind is the indicator, be it shared or not.
1389          * only clear it, if indicator is non-shared
1390          */
1391         if (!spare_ind_was_set)
1392                 tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind);
1393
1394         if (q->hydra_gives_outbound_pcis) {
1395                 if (!q->siga_sync_done_on_thinints) {
1396                         SYNC_MEMORY_ALL;
1397                 } else if ((!q->siga_sync_done_on_outb_tis)&&
1398                          (q->hydra_gives_outbound_pcis)) {
1399                         SYNC_MEMORY_ALL_OUTB;
1400                 }
1401         } else {
1402                 SYNC_MEMORY;
1403         }
1404         /*
1405          * maybe we have to do work on our outbound queues... at least
1406          * we have to check the outbound-int-capable thinint-capable
1407          * queues
1408          */
1409         if (q->hydra_gives_outbound_pcis) {
1410                 irq_ptr = (struct qdio_irq*)q->irq_ptr;
1411                 for (i=0;i<irq_ptr->no_output_qs;i++) {
1412                         oq = irq_ptr->output_qs[i];
1413                         if (!qdio_is_outbound_q_done(oq)) {
1414                                 if (qdio_performance_stats)
1415                                         perf_stats.tl_runs--;
1416                                 __qdio_outbound_processing(oq);
1417                         }
1418                 }
1419         }
1420
1421         if (!qdio_has_inbound_q_moved(q))
1422                 goto out;
1423
1424         qdio_kick_inbound_handler(q);
1425         if (tiqdio_is_inbound_q_done(q))
1426                 if (!qdio_stop_polling(q)) {
1427                         /* 
1428                          * we set the flags to get into the stuff next time,
1429                          * see also comment in qdio_stop_polling 
1430                          */
1431                         tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1432                         tiqdio_sched_tl();
1433                 }
1434 out:
1435         qdio_release_q(q);
1436 }
1437
1438 static void
1439 tiqdio_inbound_processing(struct qdio_q *q)
1440 {
1441         __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount));
1442 }
1443
1444 static void
1445 __qdio_inbound_processing(struct qdio_q *q)
1446 {
1447         int q_laps=0;
1448
1449         QDIO_DBF_TEXT4(0,trace,"qinproc");
1450         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1451
1452         if (unlikely(qdio_reserve_q(q))) {
1453                 qdio_release_q(q);
1454                 if (qdio_performance_stats)
1455                         i_p_c++;
1456                 /* as we're sissies, we'll check next time */
1457                 if (likely(!atomic_read(&q->is_in_shutdown))) {
1458                         qdio_mark_q(q);
1459                         QDIO_DBF_TEXT4(0,trace,"busy,agn");
1460                 }
1461                 return;
1462         }
1463         if (qdio_performance_stats) {
1464                 i_p_nc++;
1465                 perf_stats.tl_runs++;
1466         }
1467
1468 again:
1469         if (qdio_has_inbound_q_moved(q)) {
1470                 qdio_kick_inbound_handler(q);
1471                 if (!qdio_stop_polling(q)) {
1472                         q_laps++;
1473                         if (q_laps<QDIO_Q_LAPS) 
1474                                 goto again;
1475                 }
1476                 qdio_mark_q(q);
1477         } else {
1478                 if (!qdio_is_inbound_q_done(q)) 
1479                         /* means poll time is not yet over */
1480                         qdio_mark_q(q);
1481         }
1482
1483         qdio_release_q(q);
1484 }
1485
1486 static void
1487 qdio_inbound_processing(struct qdio_q *q)
1488 {
1489         __qdio_inbound_processing(q);
1490 }
1491
1492 /************************* MAIN ROUTINES *******************************/
1493
1494 #ifdef QDIO_USE_PROCESSING_STATE
1495 static int
1496 tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
1497 {
1498         if (!q) {
1499                 tiqdio_sched_tl();
1500                 return 0;
1501         }
1502
1503         /* 
1504          * under VM, we have not used the PROCESSING state, so no
1505          * need to stop polling 
1506          */
1507         if (q->siga_sync)
1508                 return 2;
1509
1510         if (unlikely(qdio_reserve_q(q))) {
1511                 qdio_release_q(q);
1512                 if (qdio_performance_stats)
1513                         ii_p_c++;
1514                 /* 
1515                  * as we might just be about to stop polling, we make
1516                  * sure that we check again at least once more 
1517                  */
1518                 
1519                 /* 
1520                  * sanity -- we'd get here without setting the
1521                  * dev st chg ind 
1522                  */
1523                 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1524                 tiqdio_sched_tl();
1525                 return 0;
1526         }
1527         if (qdio_stop_polling(q)) {
1528                 qdio_release_q(q);
1529                 return 2;
1530         }               
1531         if (q_laps<QDIO_Q_LAPS-1) {
1532                 qdio_release_q(q);
1533                 return 3;
1534         }
1535         /* 
1536          * we set the flags to get into the stuff
1537          * next time, see also comment in qdio_stop_polling 
1538          */
1539         tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1540         tiqdio_sched_tl();
1541         qdio_release_q(q);
1542         return 1;
1543         
1544 }
1545 #endif /* QDIO_USE_PROCESSING_STATE */
1546
1547 static void
1548 tiqdio_inbound_checks(void)
1549 {
1550         struct qdio_q *q;
1551         int spare_ind_was_set=0;
1552 #ifdef QDIO_USE_PROCESSING_STATE
1553         int q_laps=0;
1554 #endif /* QDIO_USE_PROCESSING_STATE */
1555
1556         QDIO_DBF_TEXT4(0,trace,"iqdinbck");
1557         QDIO_DBF_TEXT5(0,trace,"iqlocsum");
1558
1559 #ifdef QDIO_USE_PROCESSING_STATE
1560 again:
1561 #endif /* QDIO_USE_PROCESSING_STATE */
1562
1563         /* when the spare indicator is used and set, save that and clear it */
1564         if ((atomic_read(&spare_indicator_usecount)) && spare_indicator) {
1565                 spare_ind_was_set = 1;
1566                 tiqdio_clear_summary_bit((__u32*)&spare_indicator);
1567         }
1568
1569         q=(struct qdio_q*)tiq_list;
1570         do {
1571                 if (!q)
1572                         break;
1573                 __tiqdio_inbound_processing(q, spare_ind_was_set);
1574                 q=(struct qdio_q*)q->list_next;
1575         } while (q!=(struct qdio_q*)tiq_list);
1576
1577 #ifdef QDIO_USE_PROCESSING_STATE
1578         q=(struct qdio_q*)tiq_list;
1579         do {
1580                 int ret;
1581
1582                 ret = tiqdio_reset_processing_state(q, q_laps);
1583                 switch (ret) {
1584                 case 0:
1585                         return;
1586                 case 1:
1587                         q_laps++;
1588                 case 2:
1589                         q = (struct qdio_q*)q->list_next;
1590                         break;
1591                 default:
1592                         q_laps++;
1593                         goto again;
1594                 }
1595         } while (q!=(struct qdio_q*)tiq_list);
1596 #endif /* QDIO_USE_PROCESSING_STATE */
1597 }
1598
1599 static void
1600 tiqdio_tl(unsigned long data)
1601 {
1602         QDIO_DBF_TEXT4(0,trace,"iqdio_tl");
1603
1604         if (qdio_performance_stats)
1605                 perf_stats.tl_runs++;
1606
1607         tiqdio_inbound_checks();
1608 }
1609
1610 /********************* GENERAL HELPER_ROUTINES ***********************/
1611
1612 static void
1613 qdio_release_irq_memory(struct qdio_irq *irq_ptr)
1614 {
1615         int i;
1616
1617         for (i=0;i<QDIO_MAX_QUEUES_PER_IRQ;i++) {
1618                 if (!irq_ptr->input_qs[i])
1619                         goto next;
1620
1621                 kfree(irq_ptr->input_qs[i]->slib);
1622                 kfree(irq_ptr->input_qs[i]);
1623
1624 next:
1625                 if (!irq_ptr->output_qs[i])
1626                         continue;
1627
1628                 kfree(irq_ptr->output_qs[i]->slib);
1629                 kfree(irq_ptr->output_qs[i]);
1630
1631         }
1632         kfree(irq_ptr->qdr);
1633         free_page((unsigned long) irq_ptr);
1634 }
1635
1636 static void
1637 qdio_set_impl_params(struct qdio_irq *irq_ptr,
1638                      unsigned int qib_param_field_format,
1639                      /* pointer to 128 bytes or NULL, if no param field */
1640                      unsigned char *qib_param_field,
1641                      /* pointer to no_queues*128 words of data or NULL */
1642                      unsigned int no_input_qs,
1643                      unsigned int no_output_qs,
1644                      unsigned long *input_slib_elements,
1645                      unsigned long *output_slib_elements)
1646 {
1647         int i,j;
1648
1649         if (!irq_ptr)
1650                 return;
1651
1652         irq_ptr->qib.pfmt=qib_param_field_format;
1653         if (qib_param_field)
1654                 memcpy(irq_ptr->qib.parm,qib_param_field,
1655                        QDIO_MAX_BUFFERS_PER_Q);
1656
1657         if (input_slib_elements)
1658                 for (i=0;i<no_input_qs;i++) {
1659                         for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1660                                 irq_ptr->input_qs[i]->slib->slibe[j].parms=
1661                                         input_slib_elements[
1662                                                 i*QDIO_MAX_BUFFERS_PER_Q+j];
1663                 }
1664         if (output_slib_elements)
1665                 for (i=0;i<no_output_qs;i++) {
1666                         for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1667                                 irq_ptr->output_qs[i]->slib->slibe[j].parms=
1668                                         output_slib_elements[
1669                                                 i*QDIO_MAX_BUFFERS_PER_Q+j];
1670                 }
1671 }
1672
1673 static int
1674 qdio_alloc_qs(struct qdio_irq *irq_ptr,
1675               int no_input_qs, int no_output_qs)
1676 {
1677         int i;
1678         struct qdio_q *q;
1679         int result=-ENOMEM;
1680
1681         for (i=0;i<no_input_qs;i++) {
1682                 q = kzalloc(sizeof(struct qdio_q), GFP_KERNEL);
1683
1684                 if (!q) {
1685                         QDIO_PRINT_ERR("kmalloc of q failed!\n");
1686                         goto out;
1687                 }
1688
1689                 q->slib = kmalloc(PAGE_SIZE, GFP_KERNEL);
1690                 if (!q->slib) {
1691                         QDIO_PRINT_ERR("kmalloc of slib failed!\n");
1692                         goto out;
1693                 }
1694
1695                 irq_ptr->input_qs[i]=q;
1696         }
1697
1698         for (i=0;i<no_output_qs;i++) {
1699                 q = kzalloc(sizeof(struct qdio_q), GFP_KERNEL);
1700
1701                 if (!q) {
1702                         goto out;
1703                 }
1704
1705                 q->slib=kmalloc(PAGE_SIZE,GFP_KERNEL);
1706                 if (!q->slib) {
1707                         QDIO_PRINT_ERR("kmalloc of slib failed!\n");
1708                         goto out;
1709                 }
1710
1711                 irq_ptr->output_qs[i]=q;
1712         }
1713
1714         result=0;
1715 out:
1716         return result;
1717 }
1718
1719 static void
1720 qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1721              int no_input_qs, int no_output_qs,
1722              qdio_handler_t *input_handler,
1723              qdio_handler_t *output_handler,
1724              unsigned long int_parm,int q_format,
1725              unsigned long flags,
1726              void **inbound_sbals_array,
1727              void **outbound_sbals_array)
1728 {
1729         struct qdio_q *q;
1730         int i,j;
1731         char dbf_text[20]; /* see qdio_initialize */
1732         void *ptr;
1733         int available;
1734
1735         sprintf(dbf_text,"qfqs%4x",cdev->private->schid.sch_no);
1736         QDIO_DBF_TEXT0(0,setup,dbf_text);
1737         for (i=0;i<no_input_qs;i++) {
1738                 q=irq_ptr->input_qs[i];
1739
1740                 memset(q,0,((char*)&q->slib)-((char*)q));
1741                 sprintf(dbf_text,"in-q%4x",i);
1742                 QDIO_DBF_TEXT0(0,setup,dbf_text);
1743                 QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
1744
1745                 memset(q->slib,0,PAGE_SIZE);
1746                 q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
1747
1748                 available=0;
1749
1750                 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1751                         q->sbal[j]=*(inbound_sbals_array++);
1752
1753                 q->queue_type=q_format;
1754                 q->int_parm=int_parm;
1755                 q->schid = irq_ptr->schid;
1756                 q->irq_ptr = irq_ptr;
1757                 q->cdev = cdev;
1758                 q->mask=1<<(31-i);
1759                 q->q_no=i;
1760                 q->is_input_q=1;
1761                 q->first_to_check=0;
1762                 q->last_move_ftc=0;
1763                 q->handler=input_handler;
1764                 q->dev_st_chg_ind=irq_ptr->dev_st_chg_ind;
1765
1766                 q->tasklet.data=(unsigned long)q;
1767                 /* q->is_thinint_q isn't valid at this time, but
1768                  * irq_ptr->is_thinint_irq is */
1769                 q->tasklet.func=(void(*)(unsigned long))
1770                         ((irq_ptr->is_thinint_irq)?&tiqdio_inbound_processing:
1771                          &qdio_inbound_processing);
1772
1773                 /* actually this is not used for inbound queues. yet. */
1774                 atomic_set(&q->busy_siga_counter,0);
1775                 q->timing.busy_start=0;
1776
1777 /*              for (j=0;j<QDIO_STATS_NUMBER;j++)
1778                         q->timing.last_transfer_times[j]=(qdio_get_micros()/
1779                                                           QDIO_STATS_NUMBER)*j;
1780                 q->timing.last_transfer_index=QDIO_STATS_NUMBER-1;
1781 */
1782
1783                 /* fill in slib */
1784                 if (i>0) irq_ptr->input_qs[i-1]->slib->nsliba=
1785                                  (unsigned long)(q->slib);
1786                 q->slib->sla=(unsigned long)(q->sl);
1787                 q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
1788
1789                 /* fill in sl */
1790                 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1791                         q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
1792
1793                 QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
1794                 ptr=(void*)q->sl;
1795                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1796                 ptr=(void*)&q->slsb;
1797                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1798                 ptr=(void*)q->sbal[0];
1799                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1800
1801                 /* fill in slsb */
1802                 if (!irq_ptr->is_qebsm) {
1803                         unsigned int count = 1;
1804                         for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
1805                                 set_slsb(q, &j, SLSB_P_INPUT_NOT_INIT, &count);
1806                 }
1807         }
1808
1809         for (i=0;i<no_output_qs;i++) {
1810                 q=irq_ptr->output_qs[i];
1811                 memset(q,0,((char*)&q->slib)-((char*)q));
1812
1813                 sprintf(dbf_text,"outq%4x",i);
1814                 QDIO_DBF_TEXT0(0,setup,dbf_text);
1815                 QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
1816
1817                 memset(q->slib,0,PAGE_SIZE);
1818                 q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
1819
1820                 available=0;
1821                 
1822                 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1823                         q->sbal[j]=*(outbound_sbals_array++);
1824
1825                 q->queue_type=q_format;
1826                 if ((q->queue_type == QDIO_IQDIO_QFMT) &&
1827                     (no_output_qs > 1) &&
1828                     (i == no_output_qs-1))
1829                         q->queue_type = QDIO_IQDIO_QFMT_ASYNCH;
1830                 q->int_parm=int_parm;
1831                 q->is_input_q=0;
1832                 q->schid = irq_ptr->schid;
1833                 q->cdev = cdev;
1834                 q->irq_ptr = irq_ptr;
1835                 q->mask=1<<(31-i);
1836                 q->q_no=i;
1837                 q->first_to_check=0;
1838                 q->last_move_ftc=0;
1839                 q->handler=output_handler;
1840
1841                 q->tasklet.data=(unsigned long)q;
1842                 q->tasklet.func=(void(*)(unsigned long))
1843                         &qdio_outbound_processing;
1844
1845                 atomic_set(&q->busy_siga_counter,0);
1846                 q->timing.busy_start=0;
1847
1848                 /* fill in slib */
1849                 if (i>0) irq_ptr->output_qs[i-1]->slib->nsliba=
1850                                  (unsigned long)(q->slib);
1851                 q->slib->sla=(unsigned long)(q->sl);
1852                 q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
1853
1854                 /* fill in sl */
1855                 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1856                         q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
1857
1858                 QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
1859                 ptr=(void*)q->sl;
1860                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1861                 ptr=(void*)&q->slsb;
1862                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1863                 ptr=(void*)q->sbal[0];
1864                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1865
1866                 /* fill in slsb */
1867                 if (!irq_ptr->is_qebsm) {
1868                         unsigned int count = 1;
1869                         for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
1870                                 set_slsb(q, &j, SLSB_P_OUTPUT_NOT_INIT, &count);
1871                 }
1872         }
1873 }
1874
1875 static void
1876 qdio_fill_thresholds(struct qdio_irq *irq_ptr,
1877                      unsigned int no_input_qs,
1878                      unsigned int no_output_qs,
1879                      unsigned int min_input_threshold,
1880                      unsigned int max_input_threshold,
1881                      unsigned int min_output_threshold,
1882                      unsigned int max_output_threshold)
1883 {
1884         int i;
1885         struct qdio_q *q;
1886
1887         for (i=0;i<no_input_qs;i++) {
1888                 q=irq_ptr->input_qs[i];
1889                 q->timing.threshold=max_input_threshold;
1890 /*              for (j=0;j<QDIO_STATS_CLASSES;j++) {
1891                         q->threshold_classes[j].threshold=
1892                                 min_input_threshold+
1893                                 (max_input_threshold-min_input_threshold)/
1894                                 QDIO_STATS_CLASSES;
1895                 }
1896                 qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
1897         }
1898         for (i=0;i<no_output_qs;i++) {
1899                 q=irq_ptr->output_qs[i];
1900                 q->timing.threshold=max_output_threshold;
1901 /*              for (j=0;j<QDIO_STATS_CLASSES;j++) {
1902                         q->threshold_classes[j].threshold=
1903                                 min_output_threshold+
1904                                 (max_output_threshold-min_output_threshold)/
1905                                 QDIO_STATS_CLASSES;
1906                 }
1907                 qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
1908         }
1909 }
1910
1911 static int
1912 tiqdio_thinint_handler(void)
1913 {
1914         QDIO_DBF_TEXT4(0,trace,"thin_int");
1915
1916         if (qdio_performance_stats) {
1917                 perf_stats.thinints++;
1918                 perf_stats.start_time_inbound=NOW;
1919         }
1920
1921         /* SVS only when needed:
1922          * issue SVS to benefit from iqdio interrupt avoidance
1923          * (SVS clears AISOI)*/
1924         if (!omit_svs)
1925                 tiqdio_clear_global_summary();
1926
1927         tiqdio_inbound_checks();
1928         return 0;
1929 }
1930
1931 static void
1932 qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
1933 {
1934         int i;
1935 #ifdef CONFIG_QDIO_DEBUG
1936         char dbf_text[15];
1937
1938         QDIO_DBF_TEXT5(0,trace,"newstate");
1939         sprintf(dbf_text,"%4x%4x",irq_ptr->schid.sch_no,state);
1940         QDIO_DBF_TEXT5(0,trace,dbf_text);
1941 #endif /* CONFIG_QDIO_DEBUG */
1942
1943         irq_ptr->state=state;
1944         for (i=0;i<irq_ptr->no_input_qs;i++)
1945                 irq_ptr->input_qs[i]->state=state;
1946         for (i=0;i<irq_ptr->no_output_qs;i++)
1947                 irq_ptr->output_qs[i]->state=state;
1948         mb();
1949 }
1950
1951 static void
1952 qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
1953 {
1954         char dbf_text[15];
1955
1956         if (irb->esw.esw0.erw.cons) {
1957                 sprintf(dbf_text,"sens%4x",schid.sch_no);
1958                 QDIO_DBF_TEXT2(1,trace,dbf_text);
1959                 QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN);
1960
1961                 QDIO_PRINT_WARN("sense data available on qdio channel.\n");
1962                 QDIO_HEXDUMP16(WARN,"irb: ",irb);
1963                 QDIO_HEXDUMP16(WARN,"sense data: ",irb->ecw);
1964         }
1965                 
1966 }
1967
1968 static void
1969 qdio_handle_pci(struct qdio_irq *irq_ptr)
1970 {
1971         int i;
1972         struct qdio_q *q;
1973
1974         if (qdio_performance_stats) {
1975                 perf_stats.pcis++;
1976                 perf_stats.start_time_inbound=NOW;
1977         }
1978         for (i=0;i<irq_ptr->no_input_qs;i++) {
1979                 q=irq_ptr->input_qs[i];
1980                 if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT)
1981                         qdio_mark_q(q);
1982                 else {
1983                         if (qdio_performance_stats)
1984                                 perf_stats.tl_runs--;
1985                         __qdio_inbound_processing(q);
1986                 }
1987         }
1988         if (!irq_ptr->hydra_gives_outbound_pcis)
1989                 return;
1990         for (i=0;i<irq_ptr->no_output_qs;i++) {
1991                 q=irq_ptr->output_qs[i];
1992                 if (qdio_is_outbound_q_done(q))
1993                         continue;
1994                 if (qdio_performance_stats)
1995                         perf_stats.tl_runs--;
1996                 if (!irq_ptr->sync_done_on_outb_pcis)
1997                         SYNC_MEMORY;
1998                 __qdio_outbound_processing(q);
1999         }
2000 }
2001
2002 static void qdio_establish_handle_irq(struct ccw_device*, int, int);
2003
2004 static void
2005 qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm,
2006                            int cstat, int dstat)
2007 {
2008         struct qdio_irq *irq_ptr;
2009         struct qdio_q *q;
2010         char dbf_text[15];
2011
2012         irq_ptr = cdev->private->qdio_data;
2013
2014         QDIO_DBF_TEXT2(1, trace, "ick2");
2015         sprintf(dbf_text,"%s", cdev->dev.bus_id);
2016         QDIO_DBF_TEXT2(1,trace,dbf_text);
2017         QDIO_DBF_HEX2(0,trace,&intparm,sizeof(int));
2018         QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
2019         QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
2020         QDIO_PRINT_ERR("received check condition on activate " \
2021                        "queues on device %s (cs=x%x, ds=x%x).\n",
2022                        cdev->dev.bus_id, cstat, dstat);
2023         if (irq_ptr->no_input_qs) {
2024                 q=irq_ptr->input_qs[0];
2025         } else if (irq_ptr->no_output_qs) {
2026                 q=irq_ptr->output_qs[0];
2027         } else {
2028                 QDIO_PRINT_ERR("oops... no queue registered for device %s!?\n",
2029                                cdev->dev.bus_id);
2030                 goto omit_handler_call;
2031         }
2032         q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
2033                    QDIO_STATUS_LOOK_FOR_ERROR,
2034                    0,0,0,-1,-1,q->int_parm);
2035 omit_handler_call:
2036         qdio_set_state(irq_ptr,QDIO_IRQ_STATE_STOPPED);
2037
2038 }
2039
2040 static void
2041 qdio_call_shutdown(struct work_struct *work)
2042 {
2043         struct ccw_device_private *priv;
2044         struct ccw_device *cdev;
2045
2046         priv = container_of(work, struct ccw_device_private, kick_work);
2047         cdev = priv->cdev;
2048         qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
2049         put_device(&cdev->dev);
2050 }
2051
2052 static void
2053 qdio_timeout_handler(struct ccw_device *cdev)
2054 {
2055         struct qdio_irq *irq_ptr;
2056         char dbf_text[15];
2057
2058         QDIO_DBF_TEXT2(0, trace, "qtoh");
2059         sprintf(dbf_text, "%s", cdev->dev.bus_id);
2060         QDIO_DBF_TEXT2(0, trace, dbf_text);
2061
2062         irq_ptr = cdev->private->qdio_data;
2063         sprintf(dbf_text, "state:%d", irq_ptr->state);
2064         QDIO_DBF_TEXT2(0, trace, dbf_text);
2065
2066         switch (irq_ptr->state) {
2067         case QDIO_IRQ_STATE_INACTIVE:
2068                 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: timed out\n",
2069                                irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2070                 QDIO_DBF_TEXT2(1,setup,"eq:timeo");
2071                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2072                 break;
2073         case QDIO_IRQ_STATE_CLEANUP:
2074                 QDIO_PRINT_INFO("Did not get interrupt on cleanup, "
2075                                 "irq=0.%x.%x.\n",
2076                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2077                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2078                 break;
2079         case QDIO_IRQ_STATE_ESTABLISHED:
2080         case QDIO_IRQ_STATE_ACTIVE:
2081                 /* I/O has been terminated by common I/O layer. */
2082                 QDIO_PRINT_INFO("Queues on irq 0.%x.%04x killed by cio.\n",
2083                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2084                 QDIO_DBF_TEXT2(1, trace, "cio:term");
2085                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
2086                 if (get_device(&cdev->dev)) {
2087                         /* Can't call shutdown from interrupt context. */
2088                         PREPARE_WORK(&cdev->private->kick_work,
2089                                      qdio_call_shutdown);
2090                         queue_work(ccw_device_work, &cdev->private->kick_work);
2091                 }
2092                 break;
2093         default:
2094                 BUG();
2095         }
2096         ccw_device_set_timeout(cdev, 0);
2097         wake_up(&cdev->private->wait_q);
2098 }
2099
2100 static void
2101 qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
2102 {
2103         struct qdio_irq *irq_ptr;
2104         int cstat,dstat;
2105         char dbf_text[15];
2106
2107 #ifdef CONFIG_QDIO_DEBUG
2108         QDIO_DBF_TEXT4(0, trace, "qint");
2109         sprintf(dbf_text, "%s", cdev->dev.bus_id);
2110         QDIO_DBF_TEXT4(0, trace, dbf_text);
2111 #endif /* CONFIG_QDIO_DEBUG */
2112         
2113         if (!intparm) {
2114                 QDIO_PRINT_ERR("got unsolicited interrupt in qdio " \
2115                                   "handler, device %s\n", cdev->dev.bus_id);
2116                 return;
2117         }
2118
2119         irq_ptr = cdev->private->qdio_data;
2120         if (!irq_ptr) {
2121                 QDIO_DBF_TEXT2(1, trace, "uint");
2122                 sprintf(dbf_text,"%s", cdev->dev.bus_id);
2123                 QDIO_DBF_TEXT2(1,trace,dbf_text);
2124                 QDIO_PRINT_ERR("received interrupt on unused device %s!\n",
2125                                cdev->dev.bus_id);
2126                 return;
2127         }
2128
2129         if (IS_ERR(irb)) {
2130                 /* Currently running i/o is in error. */
2131                 switch (PTR_ERR(irb)) {
2132                 case -EIO:
2133                         QDIO_PRINT_ERR("i/o error on device %s\n",
2134                                        cdev->dev.bus_id);
2135                         return;
2136                 case -ETIMEDOUT:
2137                         qdio_timeout_handler(cdev);
2138                         return;
2139                 default:
2140                         QDIO_PRINT_ERR("unknown error state %ld on device %s\n",
2141                                        PTR_ERR(irb), cdev->dev.bus_id);
2142                         return;
2143                 }
2144         }
2145
2146         qdio_irq_check_sense(irq_ptr->schid, irb);
2147
2148 #ifdef CONFIG_QDIO_DEBUG
2149         sprintf(dbf_text, "state:%d", irq_ptr->state);
2150         QDIO_DBF_TEXT4(0, trace, dbf_text);
2151 #endif /* CONFIG_QDIO_DEBUG */
2152
2153         cstat = irb->scsw.cstat;
2154         dstat = irb->scsw.dstat;
2155
2156         switch (irq_ptr->state) {
2157         case QDIO_IRQ_STATE_INACTIVE:
2158                 qdio_establish_handle_irq(cdev, cstat, dstat);
2159                 break;
2160
2161         case QDIO_IRQ_STATE_CLEANUP:
2162                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2163                 break;
2164
2165         case QDIO_IRQ_STATE_ESTABLISHED:
2166         case QDIO_IRQ_STATE_ACTIVE:
2167                 if (cstat & SCHN_STAT_PCI) {
2168                         qdio_handle_pci(irq_ptr);
2169                         break;
2170                 }
2171
2172                 if ((cstat&~SCHN_STAT_PCI)||dstat) {
2173                         qdio_handle_activate_check(cdev, intparm, cstat, dstat);
2174                         break;
2175                 }
2176         default:
2177                 QDIO_PRINT_ERR("got interrupt for queues in state %d on " \
2178                                "device %s?!\n",
2179                                irq_ptr->state, cdev->dev.bus_id);
2180         }
2181         wake_up(&cdev->private->wait_q);
2182
2183 }
2184
2185 int
2186 qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
2187                  unsigned int queue_number)
2188 {
2189         int cc = 0;
2190         struct qdio_q *q;
2191         struct qdio_irq *irq_ptr;
2192         void *ptr;
2193 #ifdef CONFIG_QDIO_DEBUG
2194         char dbf_text[15]="SyncXXXX";
2195 #endif
2196
2197         irq_ptr = cdev->private->qdio_data;
2198         if (!irq_ptr)
2199                 return -ENODEV;
2200
2201 #ifdef CONFIG_QDIO_DEBUG
2202         *((int*)(&dbf_text[4])) = irq_ptr->schid.sch_no;
2203         QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
2204         *((int*)(&dbf_text[0]))=flags;
2205         *((int*)(&dbf_text[4]))=queue_number;
2206         QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
2207 #endif /* CONFIG_QDIO_DEBUG */
2208
2209         if (flags&QDIO_FLAG_SYNC_INPUT) {
2210                 q=irq_ptr->input_qs[queue_number];
2211                 if (!q)
2212                         return -EINVAL;
2213                 if (!(irq_ptr->is_qebsm))
2214                         cc = do_siga_sync(q->schid, 0, q->mask);
2215         } else if (flags&QDIO_FLAG_SYNC_OUTPUT) {
2216                 q=irq_ptr->output_qs[queue_number];
2217                 if (!q)
2218                         return -EINVAL;
2219                 if (!(irq_ptr->is_qebsm))
2220                         cc = do_siga_sync(q->schid, q->mask, 0);
2221         } else 
2222                 return -EINVAL;
2223
2224         ptr=&cc;
2225         if (cc)
2226                 QDIO_DBF_HEX3(0,trace,&ptr,sizeof(int));
2227
2228         return cc;
2229 }
2230
2231 static void
2232 qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
2233                             unsigned long token)
2234 {
2235         struct qdio_q *q;
2236         int i;
2237         unsigned int count, start_buf;
2238         char dbf_text[15];
2239
2240         /*check if QEBSM is disabled */
2241         if (!(irq_ptr->is_qebsm) || !(qdioac & 0x01)) {
2242                 irq_ptr->is_qebsm  = 0;
2243                 irq_ptr->sch_token = 0;
2244                 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
2245                 QDIO_DBF_TEXT0(0,setup,"noV=V");
2246                 return;
2247         }
2248         irq_ptr->sch_token = token;
2249         /*input queue*/
2250         for (i = 0; i < irq_ptr->no_input_qs;i++) {
2251                 q = irq_ptr->input_qs[i];
2252                 count = QDIO_MAX_BUFFERS_PER_Q;
2253                 start_buf = 0;
2254                 set_slsb(q, &start_buf, SLSB_P_INPUT_NOT_INIT, &count);
2255         }
2256         sprintf(dbf_text,"V=V:%2x",irq_ptr->is_qebsm);
2257         QDIO_DBF_TEXT0(0,setup,dbf_text);
2258         sprintf(dbf_text,"%8lx",irq_ptr->sch_token);
2259         QDIO_DBF_TEXT0(0,setup,dbf_text);
2260         /*output queue*/
2261         for (i = 0; i < irq_ptr->no_output_qs; i++) {
2262                 q = irq_ptr->output_qs[i];
2263                 count = QDIO_MAX_BUFFERS_PER_Q;
2264                 start_buf = 0;
2265                 set_slsb(q, &start_buf, SLSB_P_OUTPUT_NOT_INIT, &count);
2266         }
2267 }
2268
2269 static void
2270 qdio_get_ssqd_information(struct qdio_irq *irq_ptr)
2271 {
2272         int result;
2273         unsigned char qdioac;
2274         struct {
2275                 struct chsc_header request;
2276                 u16 reserved1:10;
2277                 u16 ssid:2;
2278                 u16 fmt:4;
2279                 u16 first_sch;
2280                 u16 reserved2;
2281                 u16 last_sch;
2282                 u32 reserved3;
2283                 struct chsc_header response;
2284                 u32 reserved4;
2285                 u8  flags;
2286                 u8  reserved5;
2287                 u16 sch;
2288                 u8  qfmt;
2289                 u8  parm;
2290                 u8  qdioac1;
2291                 u8  sch_class;
2292                 u8  reserved7;
2293                 u8  icnt;
2294                 u8  reserved8;
2295                 u8  ocnt;
2296                 u8 reserved9;
2297                 u8 mbccnt;
2298                 u16 qdioac2;
2299                 u64 sch_token;
2300         } *ssqd_area;
2301
2302         QDIO_DBF_TEXT0(0,setup,"getssqd");
2303         qdioac = 0;
2304         ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2305         if (!ssqd_area) {
2306                 QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \
2307                                 "SIGAs for sch x%x.\n", irq_ptr->schid.sch_no);
2308                 irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
2309                                   CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
2310                                   CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2311                 irq_ptr->is_qebsm = 0;
2312                 irq_ptr->sch_token = 0;
2313                 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
2314                 return;
2315         }
2316
2317         ssqd_area->request = (struct chsc_header) {
2318                 .length = 0x0010,
2319                 .code   = 0x0024,
2320         };
2321         ssqd_area->first_sch = irq_ptr->schid.sch_no;
2322         ssqd_area->last_sch = irq_ptr->schid.sch_no;
2323         ssqd_area->ssid = irq_ptr->schid.ssid;
2324         result = chsc(ssqd_area);
2325
2326         if (result) {
2327                 QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \
2328                                 "SIGAs for sch 0.%x.%x.\n", result,
2329                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2330                 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
2331                         CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
2332                         CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2333                 irq_ptr->is_qebsm  = 0;
2334                 goto out;
2335         }
2336
2337         if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
2338                 QDIO_PRINT_WARN("response upon checking SIGA needs " \
2339                                 "is 0x%x. Using all SIGAs for sch 0.%x.%x.\n",
2340                                 ssqd_area->response.code,
2341                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2342                 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
2343                         CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
2344                         CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2345                 irq_ptr->is_qebsm  = 0;
2346                 goto out;
2347         }
2348         if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
2349             !(ssqd_area->flags & CHSC_FLAG_VALIDITY) ||
2350             (ssqd_area->sch != irq_ptr->schid.sch_no)) {
2351                 QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
2352                                 "using all SIGAs.\n",
2353                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2354                 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
2355                         CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
2356                         CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */
2357                 irq_ptr->is_qebsm  = 0;
2358                 goto out;
2359         }
2360         qdioac = ssqd_area->qdioac1;
2361 out:
2362         qdio_check_subchannel_qebsm(irq_ptr, qdioac,
2363                                     ssqd_area->sch_token);
2364         mempool_free(ssqd_area, qdio_mempool_scssc);
2365         irq_ptr->qdioac = qdioac;
2366 }
2367
2368 static unsigned int
2369 tiqdio_check_chsc_availability(void)
2370 {
2371         char dbf_text[15];
2372
2373         if (!css_characteristics_avail)
2374                 return -EIO;
2375
2376         /* Check for bit 41. */
2377         if (!css_general_characteristics.aif) {
2378                 QDIO_PRINT_WARN("Adapter interruption facility not " \
2379                                 "installed.\n");
2380                 return -ENOENT;
2381         }
2382
2383         /* Check for bits 107 and 108. */
2384         if (!css_chsc_characteristics.scssc ||
2385             !css_chsc_characteristics.scsscf) {
2386                 QDIO_PRINT_WARN("Set Chan Subsys. Char. & Fast-CHSCs " \
2387                                 "not available.\n");
2388                 return -ENOENT;
2389         }
2390
2391         /* Check for OSA/FCP thin interrupts (bit 67). */
2392         hydra_thinints = css_general_characteristics.aif_osa;
2393         sprintf(dbf_text,"hydrati%1x", hydra_thinints);
2394         QDIO_DBF_TEXT0(0,setup,dbf_text);
2395
2396 #ifdef CONFIG_64BIT
2397         /* Check for QEBSM support in general (bit 58). */
2398         is_passthrough = css_general_characteristics.qebsm;
2399 #endif
2400         sprintf(dbf_text,"cssQBS:%1x", is_passthrough);
2401         QDIO_DBF_TEXT0(0,setup,dbf_text);
2402
2403         /* Check for aif time delay disablement fac (bit 56). If installed,
2404          * omit svs even under lpar (good point by rick again) */
2405         omit_svs = css_general_characteristics.aif_tdd;
2406         sprintf(dbf_text,"omitsvs%1x", omit_svs);
2407         QDIO_DBF_TEXT0(0,setup,dbf_text);
2408         return 0;
2409 }
2410
2411
2412 static unsigned int
2413 tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2414 {
2415         unsigned long real_addr_local_summary_bit;
2416         unsigned long real_addr_dev_st_chg_ind;
2417         void *ptr;
2418         char dbf_text[15];
2419
2420         unsigned int resp_code;
2421         int result;
2422
2423         struct {
2424                 struct chsc_header request;
2425                 u16 operation_code;
2426                 u16 reserved1;
2427                 u32 reserved2;
2428                 u32 reserved3;
2429                 u64 summary_indicator_addr;
2430                 u64 subchannel_indicator_addr;
2431                 u32 ks:4;
2432                 u32 kc:4;
2433                 u32 reserved4:21;
2434                 u32 isc:3;
2435                 u32 word_with_d_bit;
2436                 /* set to 0x10000000 to enable
2437                  * time delay disablement facility */
2438                 u32 reserved5;
2439                 struct subchannel_id schid;
2440                 u32 reserved6[1004];
2441                 struct chsc_header response;
2442                 u32 reserved7;
2443         } *scssc_area;
2444
2445         if (!irq_ptr->is_thinint_irq)
2446                 return -ENODEV;
2447
2448         if (reset_to_zero) {
2449                 real_addr_local_summary_bit=0;
2450                 real_addr_dev_st_chg_ind=0;
2451         } else {
2452                 real_addr_local_summary_bit=
2453                         virt_to_phys((volatile void *)indicators);
2454                 real_addr_dev_st_chg_ind=
2455                         virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind);
2456         }
2457
2458         scssc_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2459         if (!scssc_area) {
2460                 QDIO_PRINT_WARN("No memory for setting indicators on " \
2461                                 "subchannel 0.%x.%x.\n",
2462                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2463                 return -ENOMEM;
2464         }
2465         scssc_area->request = (struct chsc_header) {
2466                 .length = 0x0fe0,
2467                 .code   = 0x0021,
2468         };
2469         scssc_area->operation_code = 0;
2470
2471         scssc_area->summary_indicator_addr = real_addr_local_summary_bit;
2472         scssc_area->subchannel_indicator_addr = real_addr_dev_st_chg_ind;
2473         scssc_area->ks = QDIO_STORAGE_KEY;
2474         scssc_area->kc = QDIO_STORAGE_KEY;
2475         scssc_area->isc = TIQDIO_THININT_ISC;
2476         scssc_area->schid = irq_ptr->schid;
2477         /* enables the time delay disablement facility. Don't care
2478          * whether it is really there (i.e. we haven't checked for
2479          * it) */
2480         if (css_general_characteristics.aif_tdd)
2481                 scssc_area->word_with_d_bit = 0x10000000;
2482         else
2483                 QDIO_PRINT_WARN("Time delay disablement facility " \
2484                                 "not available\n");
2485
2486         result = chsc(scssc_area);
2487         if (result) {
2488                 QDIO_PRINT_WARN("could not set indicators on irq 0.%x.%x, " \
2489                                 "cc=%i.\n",
2490                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,result);
2491                 result = -EIO;
2492                 goto out;
2493         }
2494
2495         resp_code = scssc_area->response.code;
2496         if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
2497                 QDIO_PRINT_WARN("response upon setting indicators " \
2498                                 "is 0x%x.\n",resp_code);
2499                 sprintf(dbf_text,"sidR%4x",resp_code);
2500                 QDIO_DBF_TEXT1(0,trace,dbf_text);
2501                 QDIO_DBF_TEXT1(0,setup,dbf_text);
2502                 ptr=&scssc_area->response;
2503                 QDIO_DBF_HEX2(1,setup,&ptr,QDIO_DBF_SETUP_LEN);
2504                 result = -EIO;
2505                 goto out;
2506         }
2507
2508         QDIO_DBF_TEXT2(0,setup,"setscind");
2509         QDIO_DBF_HEX2(0,setup,&real_addr_local_summary_bit,
2510                       sizeof(unsigned long));
2511         QDIO_DBF_HEX2(0,setup,&real_addr_dev_st_chg_ind,sizeof(unsigned long));
2512         result = 0;
2513 out:
2514         mempool_free(scssc_area, qdio_mempool_scssc);
2515         return result;
2516
2517 }
2518
2519 static unsigned int
2520 tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
2521 {
2522         unsigned int resp_code;
2523         int result;
2524         void *ptr;
2525         char dbf_text[15];
2526
2527         struct {
2528                 struct chsc_header request;
2529                 u16 operation_code;
2530                 u16 reserved1;
2531                 u32 reserved2;
2532                 u32 reserved3;
2533                 u32 reserved4[2];
2534                 u32 delay_target;
2535                 u32 reserved5[1009];
2536                 struct chsc_header response;
2537                 u32 reserved6;
2538         } *scsscf_area;
2539
2540         if (!irq_ptr->is_thinint_irq)
2541                 return -ENODEV;
2542
2543         scsscf_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2544         if (!scsscf_area) {
2545                 QDIO_PRINT_WARN("No memory for setting delay target on " \
2546                                 "subchannel 0.%x.%x.\n",
2547                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2548                 return -ENOMEM;
2549         }
2550         scsscf_area->request = (struct chsc_header) {
2551                 .length = 0x0fe0,
2552                 .code   = 0x1027,
2553         };
2554
2555         scsscf_area->delay_target = delay_target<<16;
2556
2557         result=chsc(scsscf_area);
2558         if (result) {
2559                 QDIO_PRINT_WARN("could not set delay target on irq 0.%x.%x, " \
2560                                 "cc=%i. Continuing.\n",
2561                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2562                                 result);
2563                 result = -EIO;
2564                 goto out;
2565         }
2566
2567         resp_code = scsscf_area->response.code;
2568         if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
2569                 QDIO_PRINT_WARN("response upon setting delay target " \
2570                                 "is 0x%x. Continuing.\n",resp_code);
2571                 sprintf(dbf_text,"sdtR%4x",resp_code);
2572                 QDIO_DBF_TEXT1(0,trace,dbf_text);
2573                 QDIO_DBF_TEXT1(0,setup,dbf_text);
2574                 ptr=&scsscf_area->response;
2575                 QDIO_DBF_HEX2(1,trace,&ptr,QDIO_DBF_TRACE_LEN);
2576         }
2577         QDIO_DBF_TEXT2(0,trace,"delytrgt");
2578         QDIO_DBF_HEX2(0,trace,&delay_target,sizeof(unsigned long));
2579         result = 0; /* not critical */
2580 out:
2581         mempool_free(scsscf_area, qdio_mempool_scssc);
2582         return result;
2583 }
2584
2585 int
2586 qdio_cleanup(struct ccw_device *cdev, int how)
2587 {
2588         struct qdio_irq *irq_ptr;
2589         char dbf_text[15];
2590         int rc;
2591
2592         irq_ptr = cdev->private->qdio_data;
2593         if (!irq_ptr)
2594                 return -ENODEV;
2595
2596         sprintf(dbf_text,"qcln%4x",irq_ptr->schid.sch_no);
2597         QDIO_DBF_TEXT1(0,trace,dbf_text);
2598         QDIO_DBF_TEXT0(0,setup,dbf_text);
2599
2600         rc = qdio_shutdown(cdev, how);
2601         if ((rc == 0) || (rc == -EINPROGRESS))
2602                 rc = qdio_free(cdev);
2603         return rc;
2604 }
2605
2606 int
2607 qdio_shutdown(struct ccw_device *cdev, int how)
2608 {
2609         struct qdio_irq *irq_ptr;
2610         int i;
2611         int result = 0;
2612         int rc;
2613         unsigned long flags;
2614         int timeout;
2615         char dbf_text[15];
2616
2617         irq_ptr = cdev->private->qdio_data;
2618         if (!irq_ptr)
2619                 return -ENODEV;
2620
2621         down(&irq_ptr->setting_up_sema);
2622
2623         sprintf(dbf_text,"qsqs%4x",irq_ptr->schid.sch_no);
2624         QDIO_DBF_TEXT1(0,trace,dbf_text);
2625         QDIO_DBF_TEXT0(0,setup,dbf_text);
2626
2627         /* mark all qs as uninteresting */
2628         for (i=0;i<irq_ptr->no_input_qs;i++)
2629                 atomic_set(&irq_ptr->input_qs[i]->is_in_shutdown,1);
2630
2631         for (i=0;i<irq_ptr->no_output_qs;i++)
2632                 atomic_set(&irq_ptr->output_qs[i]->is_in_shutdown,1);
2633
2634         tasklet_kill(&tiqdio_tasklet);
2635
2636         for (i=0;i<irq_ptr->no_input_qs;i++) {
2637                 qdio_unmark_q(irq_ptr->input_qs[i]);
2638                 tasklet_kill(&irq_ptr->input_qs[i]->tasklet);
2639                 wait_event_interruptible_timeout(cdev->private->wait_q,
2640                                                  !atomic_read(&irq_ptr->
2641                                                               input_qs[i]->
2642                                                               use_count),
2643                                                  QDIO_NO_USE_COUNT_TIMEOUT);
2644                 if (atomic_read(&irq_ptr->input_qs[i]->use_count))
2645                         result=-EINPROGRESS;
2646         }
2647
2648         for (i=0;i<irq_ptr->no_output_qs;i++) {
2649                 tasklet_kill(&irq_ptr->output_qs[i]->tasklet);
2650                 wait_event_interruptible_timeout(cdev->private->wait_q,
2651                                                  !atomic_read(&irq_ptr->
2652                                                               output_qs[i]->
2653                                                               use_count),
2654                                                  QDIO_NO_USE_COUNT_TIMEOUT);
2655                 if (atomic_read(&irq_ptr->output_qs[i]->use_count))
2656                         result=-EINPROGRESS;
2657         }
2658
2659         /* cleanup subchannel */
2660         spin_lock_irqsave(get_ccwdev_lock(cdev),flags);
2661         if (how&QDIO_FLAG_CLEANUP_USING_CLEAR) {
2662                 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
2663                 timeout=QDIO_CLEANUP_CLEAR_TIMEOUT;
2664         } else if (how&QDIO_FLAG_CLEANUP_USING_HALT) {
2665                 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
2666                 timeout=QDIO_CLEANUP_HALT_TIMEOUT;
2667         } else { /* default behaviour */
2668                 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
2669                 timeout=QDIO_CLEANUP_HALT_TIMEOUT;
2670         }
2671         if (rc == -ENODEV) {
2672                 /* No need to wait for device no longer present. */
2673                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2674                 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2675         } else if (((void *)cdev->handler != (void *)qdio_handler) && rc == 0) {
2676                 /*
2677                  * Whoever put another handler there, has to cope with the
2678                  * interrupt theirself. Might happen if qdio_shutdown was
2679                  * called on already shutdown queues, but this shouldn't have
2680                  * bad side effects.
2681                  */
2682                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2683                 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2684         } else if (rc == 0) {
2685                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
2686                 ccw_device_set_timeout(cdev, timeout);
2687                 spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags);
2688
2689                 wait_event(cdev->private->wait_q,
2690                            irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
2691                            irq_ptr->state == QDIO_IRQ_STATE_ERR);
2692         } else {
2693                 QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for "
2694                                 "device %s\n", result, cdev->dev.bus_id);
2695                 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2696                 result = rc;
2697                 goto out;
2698         }
2699         if (irq_ptr->is_thinint_irq) {
2700                 qdio_put_indicator((__u32*)irq_ptr->dev_st_chg_ind);
2701                 tiqdio_set_subchannel_ind(irq_ptr,1); 
2702                 /* reset adapter interrupt indicators */
2703         }
2704
2705         /* exchange int handlers, if necessary */
2706         if ((void*)cdev->handler == (void*)qdio_handler)
2707                 cdev->handler=irq_ptr->original_int_handler;
2708
2709         /* Ignore errors. */
2710         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2711         ccw_device_set_timeout(cdev, 0);
2712 out:
2713         up(&irq_ptr->setting_up_sema);
2714         return result;
2715 }
2716
2717 int
2718 qdio_free(struct ccw_device *cdev)
2719 {
2720         struct qdio_irq *irq_ptr;
2721         char dbf_text[15];
2722
2723         irq_ptr = cdev->private->qdio_data;
2724         if (!irq_ptr)
2725                 return -ENODEV;
2726
2727         down(&irq_ptr->setting_up_sema);
2728
2729         sprintf(dbf_text,"qfqs%4x",irq_ptr->schid.sch_no);
2730         QDIO_DBF_TEXT1(0,trace,dbf_text);
2731         QDIO_DBF_TEXT0(0,setup,dbf_text);
2732
2733         cdev->private->qdio_data = NULL;
2734
2735         up(&irq_ptr->setting_up_sema);
2736
2737         qdio_release_irq_memory(irq_ptr);
2738         module_put(THIS_MODULE);
2739         return 0;
2740 }
2741
2742 static void
2743 qdio_allocate_do_dbf(struct qdio_initialize *init_data)
2744 {
2745         char dbf_text[20]; /* if a printf printed out more than 8 chars */
2746
2747         sprintf(dbf_text,"qfmt:%x",init_data->q_format);
2748         QDIO_DBF_TEXT0(0,setup,dbf_text);
2749         QDIO_DBF_HEX0(0,setup,init_data->adapter_name,8);
2750         sprintf(dbf_text,"qpff%4x",init_data->qib_param_field_format);
2751         QDIO_DBF_TEXT0(0,setup,dbf_text);
2752         QDIO_DBF_HEX0(0,setup,&init_data->qib_param_field,sizeof(char*));
2753         QDIO_DBF_HEX0(0,setup,&init_data->input_slib_elements,sizeof(long*));
2754         QDIO_DBF_HEX0(0,setup,&init_data->output_slib_elements,sizeof(long*));
2755         sprintf(dbf_text,"miit%4x",init_data->min_input_threshold);
2756         QDIO_DBF_TEXT0(0,setup,dbf_text);
2757         sprintf(dbf_text,"mait%4x",init_data->max_input_threshold);
2758         QDIO_DBF_TEXT0(0,setup,dbf_text);
2759         sprintf(dbf_text,"miot%4x",init_data->min_output_threshold);
2760         QDIO_DBF_TEXT0(0,setup,dbf_text);
2761         sprintf(dbf_text,"maot%4x",init_data->max_output_threshold);
2762         QDIO_DBF_TEXT0(0,setup,dbf_text);
2763         sprintf(dbf_text,"niq:%4x",init_data->no_input_qs);
2764         QDIO_DBF_TEXT0(0,setup,dbf_text);
2765         sprintf(dbf_text,"noq:%4x",init_data->no_output_qs);
2766         QDIO_DBF_TEXT0(0,setup,dbf_text);
2767         QDIO_DBF_HEX0(0,setup,&init_data->input_handler,sizeof(void*));
2768         QDIO_DBF_HEX0(0,setup,&init_data->output_handler,sizeof(void*));
2769         QDIO_DBF_HEX0(0,setup,&init_data->int_parm,sizeof(long));
2770         QDIO_DBF_HEX0(0,setup,&init_data->flags,sizeof(long));
2771         QDIO_DBF_HEX0(0,setup,&init_data->input_sbal_addr_array,sizeof(void*));
2772         QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*));
2773 }
2774
2775 static void
2776 qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt)
2777 {
2778         irq_ptr->input_qs[i]->is_iqdio_q = iqfmt;
2779         irq_ptr->input_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
2780
2781         irq_ptr->qdr->qdf0[i].sliba=(unsigned long)(irq_ptr->input_qs[i]->slib);
2782
2783         irq_ptr->qdr->qdf0[i].sla=(unsigned long)(irq_ptr->input_qs[i]->sl);
2784
2785         irq_ptr->qdr->qdf0[i].slsba=
2786                 (unsigned long)(&irq_ptr->input_qs[i]->slsb.acc.val[0]);
2787
2788         irq_ptr->qdr->qdf0[i].akey=QDIO_STORAGE_KEY;
2789         irq_ptr->qdr->qdf0[i].bkey=QDIO_STORAGE_KEY;
2790         irq_ptr->qdr->qdf0[i].ckey=QDIO_STORAGE_KEY;
2791         irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY;
2792 }
2793
2794 static void
2795 qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
2796                                int j, int iqfmt)
2797 {
2798         irq_ptr->output_qs[i]->is_iqdio_q = iqfmt;
2799         irq_ptr->output_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
2800
2801         irq_ptr->qdr->qdf0[i+j].sliba=(unsigned long)(irq_ptr->output_qs[i]->slib);
2802
2803         irq_ptr->qdr->qdf0[i+j].sla=(unsigned long)(irq_ptr->output_qs[i]->sl);
2804
2805         irq_ptr->qdr->qdf0[i+j].slsba=
2806                 (unsigned long)(&irq_ptr->output_qs[i]->slsb.acc.val[0]);
2807
2808         irq_ptr->qdr->qdf0[i+j].akey=QDIO_STORAGE_KEY;
2809         irq_ptr->qdr->qdf0[i+j].bkey=QDIO_STORAGE_KEY;
2810         irq_ptr->qdr->qdf0[i+j].ckey=QDIO_STORAGE_KEY;
2811         irq_ptr->qdr->qdf0[i+j].dkey=QDIO_STORAGE_KEY;
2812 }
2813
2814
2815 static void
2816 qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
2817 {
2818         int i;
2819
2820         for (i=0;i<irq_ptr->no_input_qs;i++) {
2821                 irq_ptr->input_qs[i]->siga_sync=
2822                         irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
2823                 irq_ptr->input_qs[i]->siga_in=
2824                         irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
2825                 irq_ptr->input_qs[i]->siga_out=
2826                         irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
2827                 irq_ptr->input_qs[i]->siga_sync_done_on_thinints=
2828                         irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
2829                 irq_ptr->input_qs[i]->hydra_gives_outbound_pcis=
2830                         irq_ptr->hydra_gives_outbound_pcis;
2831                 irq_ptr->input_qs[i]->siga_sync_done_on_outb_tis=
2832                         ((irq_ptr->qdioac&
2833                           (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2834                            CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
2835                          (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2836                           CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
2837
2838         }
2839 }
2840
2841 static void
2842 qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr)
2843 {
2844         int i;
2845
2846         for (i=0;i<irq_ptr->no_output_qs;i++) {
2847                 irq_ptr->output_qs[i]->siga_sync=
2848                         irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
2849                 irq_ptr->output_qs[i]->siga_in=
2850                         irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
2851                 irq_ptr->output_qs[i]->siga_out=
2852                         irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
2853                 irq_ptr->output_qs[i]->siga_sync_done_on_thinints=
2854                         irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
2855                 irq_ptr->output_qs[i]->hydra_gives_outbound_pcis=
2856                         irq_ptr->hydra_gives_outbound_pcis;
2857                 irq_ptr->output_qs[i]->siga_sync_done_on_outb_tis=
2858                         ((irq_ptr->qdioac&
2859                           (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2860                            CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
2861                          (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2862                           CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
2863
2864         }
2865 }
2866
2867 static int
2868 qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2869                                     int dstat)
2870 {
2871         char dbf_text[15];
2872         struct qdio_irq *irq_ptr;
2873
2874         irq_ptr = cdev->private->qdio_data;
2875
2876         if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) {
2877                 sprintf(dbf_text,"ick1%4x",irq_ptr->schid.sch_no);
2878                 QDIO_DBF_TEXT2(1,trace,dbf_text);
2879                 QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
2880                 QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
2881                 QDIO_PRINT_ERR("received check condition on establish " \
2882                                "queues on irq 0.%x.%x (cs=x%x, ds=x%x).\n",
2883                                irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2884                                cstat,dstat);
2885                 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR);
2886         }
2887         
2888         if (!(dstat & DEV_STAT_DEV_END)) {
2889                 QDIO_DBF_TEXT2(1,setup,"eq:no de");
2890                 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2891                 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2892                 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: didn't get "
2893                                "device end: dstat=%02x, cstat=%02x\n",
2894                                irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2895                                dstat, cstat);
2896                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2897                 return 1;
2898         }
2899
2900         if (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) {
2901                 QDIO_DBF_TEXT2(1,setup,"eq:badio");
2902                 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2903                 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2904                 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: got "
2905                                "the following devstat: dstat=%02x, "
2906                                "cstat=%02x\n", irq_ptr->schid.ssid,
2907                                irq_ptr->schid.sch_no, dstat, cstat);
2908                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2909                 return 1;
2910         }
2911         return 0;
2912 }
2913
2914 static void
2915 qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
2916 {
2917         struct qdio_irq *irq_ptr;
2918         char dbf_text[15];
2919
2920         irq_ptr = cdev->private->qdio_data;
2921
2922         sprintf(dbf_text,"qehi%4x",cdev->private->schid.sch_no);
2923         QDIO_DBF_TEXT0(0,setup,dbf_text);
2924         QDIO_DBF_TEXT0(0,trace,dbf_text);
2925
2926         if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) {
2927                 ccw_device_set_timeout(cdev, 0);
2928                 return;
2929         }
2930
2931         qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED);
2932         ccw_device_set_timeout(cdev, 0);
2933 }
2934
2935 int
2936 qdio_initialize(struct qdio_initialize *init_data)
2937 {
2938         int rc;
2939         char dbf_text[15];
2940
2941         sprintf(dbf_text,"qini%4x",init_data->cdev->private->schid.sch_no);
2942         QDIO_DBF_TEXT0(0,setup,dbf_text);
2943         QDIO_DBF_TEXT0(0,trace,dbf_text);
2944
2945         rc = qdio_allocate(init_data);
2946         if (rc == 0) {
2947                 rc = qdio_establish(init_data);
2948                 if (rc != 0)
2949                         qdio_free(init_data->cdev);
2950         }
2951
2952         return rc;
2953 }
2954
2955
2956 int
2957 qdio_allocate(struct qdio_initialize *init_data)
2958 {
2959         struct qdio_irq *irq_ptr;
2960         char dbf_text[15];
2961
2962         sprintf(dbf_text,"qalc%4x",init_data->cdev->private->schid.sch_no);
2963         QDIO_DBF_TEXT0(0,setup,dbf_text);
2964         QDIO_DBF_TEXT0(0,trace,dbf_text);
2965         if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
2966              (init_data->no_output_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
2967              ((init_data->no_input_qs) && (!init_data->input_handler)) ||
2968              ((init_data->no_output_qs) && (!init_data->output_handler)) )
2969                 return -EINVAL;
2970
2971         if (!init_data->input_sbal_addr_array)
2972                 return -EINVAL;
2973
2974         if (!init_data->output_sbal_addr_array)
2975                 return -EINVAL;
2976
2977         qdio_allocate_do_dbf(init_data);
2978
2979         /* create irq */
2980         irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2981
2982         QDIO_DBF_TEXT0(0,setup,"irq_ptr:");
2983         QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*));
2984
2985         if (!irq_ptr) {
2986                 QDIO_PRINT_ERR("kmalloc of irq_ptr failed!\n");
2987                 return -ENOMEM;
2988         }
2989
2990         init_MUTEX(&irq_ptr->setting_up_sema);
2991
2992         /* QDR must be in DMA area since CCW data address is only 32 bit */
2993         irq_ptr->qdr=kmalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA);
2994         if (!(irq_ptr->qdr)) {
2995                 free_page((unsigned long) irq_ptr);
2996                 QDIO_PRINT_ERR("kmalloc of irq_ptr->qdr failed!\n");
2997                 return -ENOMEM;
2998         }
2999         QDIO_DBF_TEXT0(0,setup,"qdr:");
3000         QDIO_DBF_HEX0(0,setup,&irq_ptr->qdr,sizeof(void*));
3001
3002         if (qdio_alloc_qs(irq_ptr,
3003                           init_data->no_input_qs,
3004                           init_data->no_output_qs)) {
3005                 qdio_release_irq_memory(irq_ptr);
3006                 return -ENOMEM;
3007         }
3008
3009         init_data->cdev->private->qdio_data = irq_ptr;
3010
3011         qdio_set_state(irq_ptr,QDIO_IRQ_STATE_INACTIVE);
3012
3013         return 0;
3014 }
3015
3016 static int qdio_fill_irq(struct qdio_initialize *init_data)
3017 {
3018         int i;
3019         char dbf_text[15];
3020         struct ciw *ciw;
3021         int is_iqdio;
3022         struct qdio_irq *irq_ptr;
3023
3024         irq_ptr = init_data->cdev->private->qdio_data;
3025
3026         memset(irq_ptr,0,((char*)&irq_ptr->qdr)-((char*)irq_ptr));
3027
3028         /* wipes qib.ac, required by ar7063 */
3029         memset(irq_ptr->qdr,0,sizeof(struct qdr));
3030
3031         irq_ptr->int_parm=init_data->int_parm;
3032
3033         irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
3034         irq_ptr->no_input_qs=init_data->no_input_qs;
3035         irq_ptr->no_output_qs=init_data->no_output_qs;
3036
3037         if (init_data->q_format==QDIO_IQDIO_QFMT) {
3038                 irq_ptr->is_iqdio_irq=1;
3039                 irq_ptr->is_thinint_irq=1;
3040         } else {
3041                 irq_ptr->is_iqdio_irq=0;
3042                 irq_ptr->is_thinint_irq=hydra_thinints;
3043         }
3044         sprintf(dbf_text,"is_i_t%1x%1x",
3045                 irq_ptr->is_iqdio_irq,irq_ptr->is_thinint_irq);
3046         QDIO_DBF_TEXT2(0,setup,dbf_text);
3047
3048         if (irq_ptr->is_thinint_irq) {
3049                 irq_ptr->dev_st_chg_ind = qdio_get_indicator();
3050                 QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*));
3051                 if (!irq_ptr->dev_st_chg_ind) {
3052                         QDIO_PRINT_WARN("no indicator location available " \
3053                                         "for irq 0.%x.%x\n",
3054                                         irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
3055                         qdio_release_irq_memory(irq_ptr);
3056                         return -ENOBUFS;
3057                 }
3058         }
3059
3060         /* defaults */
3061         irq_ptr->equeue.cmd=DEFAULT_ESTABLISH_QS_CMD;
3062         irq_ptr->equeue.count=DEFAULT_ESTABLISH_QS_COUNT;
3063         irq_ptr->aqueue.cmd=DEFAULT_ACTIVATE_QS_CMD;
3064         irq_ptr->aqueue.count=DEFAULT_ACTIVATE_QS_COUNT;
3065
3066         qdio_fill_qs(irq_ptr, init_data->cdev,
3067                      init_data->no_input_qs,
3068                      init_data->no_output_qs,
3069                      init_data->input_handler,
3070                      init_data->output_handler,init_data->int_parm,
3071                      init_data->q_format,init_data->flags,
3072                      init_data->input_sbal_addr_array,
3073                      init_data->output_sbal_addr_array);
3074
3075         if (!try_module_get(THIS_MODULE)) {
3076                 QDIO_PRINT_CRIT("try_module_get() failed!\n");
3077                 qdio_release_irq_memory(irq_ptr);
3078                 return -EINVAL;
3079         }
3080
3081         qdio_fill_thresholds(irq_ptr,init_data->no_input_qs,
3082                              init_data->no_output_qs,
3083                              init_data->min_input_threshold,
3084                              init_data->max_input_threshold,
3085                              init_data->min_output_threshold,
3086                              init_data->max_output_threshold);
3087
3088         /* fill in qdr */
3089         irq_ptr->qdr->qfmt=init_data->q_format;
3090         irq_ptr->qdr->iqdcnt=init_data->no_input_qs;
3091         irq_ptr->qdr->oqdcnt=init_data->no_output_qs;
3092         irq_ptr->qdr->iqdsz=sizeof(struct qdesfmt0)/4; /* size in words */
3093         irq_ptr->qdr->oqdsz=sizeof(struct qdesfmt0)/4;
3094
3095         irq_ptr->qdr->qiba=(unsigned long)&irq_ptr->qib;
3096         irq_ptr->qdr->qkey=QDIO_STORAGE_KEY;
3097
3098         /* fill in qib */
3099         irq_ptr->is_qebsm = is_passthrough;
3100         if (irq_ptr->is_qebsm)
3101                 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
3102
3103         irq_ptr->qib.qfmt=init_data->q_format;
3104         if (init_data->no_input_qs)
3105                 irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib);
3106         if (init_data->no_output_qs)
3107                 irq_ptr->qib.osliba=(unsigned long)(irq_ptr->output_qs[0]->slib);
3108         memcpy(irq_ptr->qib.ebcnam,init_data->adapter_name,8);
3109
3110         qdio_set_impl_params(irq_ptr,init_data->qib_param_field_format,
3111                              init_data->qib_param_field,
3112                              init_data->no_input_qs,
3113                              init_data->no_output_qs,
3114                              init_data->input_slib_elements,
3115                              init_data->output_slib_elements);
3116
3117         /* first input descriptors, then output descriptors */
3118         is_iqdio = (init_data->q_format == QDIO_IQDIO_QFMT) ? 1 : 0;
3119         for (i=0;i<init_data->no_input_qs;i++)
3120                 qdio_allocate_fill_input_desc(irq_ptr, i, is_iqdio);
3121
3122         for (i=0;i<init_data->no_output_qs;i++)
3123                 qdio_allocate_fill_output_desc(irq_ptr, i,
3124                                                init_data->no_input_qs,
3125                                                is_iqdio);
3126
3127         /* qdr, qib, sls, slsbs, slibs, sbales filled. */
3128
3129         /* get qdio commands */
3130         ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
3131         if (!ciw) {
3132                 QDIO_DBF_TEXT2(1,setup,"no eq");
3133                 QDIO_PRINT_INFO("No equeue CIW found for QDIO commands. "
3134                                 "Trying to use default.\n");
3135         } else
3136                 irq_ptr->equeue = *ciw;
3137         ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
3138         if (!ciw) {
3139                 QDIO_DBF_TEXT2(1,setup,"no aq");
3140                 QDIO_PRINT_INFO("No aqueue CIW found for QDIO commands. "
3141                                 "Trying to use default.\n");
3142         } else
3143                 irq_ptr->aqueue = *ciw;
3144
3145         /* Set new interrupt handler. */
3146         irq_ptr->original_int_handler = init_data->cdev->handler;
3147         init_data->cdev->handler = qdio_handler;
3148
3149         return 0;
3150 }
3151
3152 int
3153 qdio_establish(struct qdio_initialize *init_data)
3154 {
3155         struct qdio_irq *irq_ptr;
3156         unsigned long saveflags;
3157         int result, result2;
3158         struct ccw_device *cdev;
3159         char dbf_text[20];
3160
3161         cdev=init_data->cdev;
3162         irq_ptr = cdev->private->qdio_data;
3163         if (!irq_ptr)
3164                 return -EINVAL;
3165
3166         if (cdev->private->state != DEV_STATE_ONLINE)
3167                 return -EINVAL;
3168         
3169         down(&irq_ptr->setting_up_sema);
3170
3171         qdio_fill_irq(init_data);
3172
3173         /* the thinint CHSC stuff */
3174         if (irq_ptr->is_thinint_irq) {
3175
3176                 result = tiqdio_set_subchannel_ind(irq_ptr,0);
3177                 if (result) {
3178                         up(&irq_ptr->setting_up_sema);
3179                         qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3180                         return result;
3181                 }
3182                 tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET);
3183         }
3184
3185         sprintf(dbf_text,"qest%4x",cdev->private->schid.sch_no);
3186         QDIO_DBF_TEXT0(0,setup,dbf_text);
3187         QDIO_DBF_TEXT0(0,trace,dbf_text);
3188
3189         /* establish q */
3190         irq_ptr->ccw.cmd_code=irq_ptr->equeue.cmd;
3191         irq_ptr->ccw.flags=CCW_FLAG_SLI;
3192         irq_ptr->ccw.count=irq_ptr->equeue.count;
3193         irq_ptr->ccw.cda=QDIO_GET_ADDR(irq_ptr->qdr);
3194
3195         spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
3196
3197         ccw_device_set_options_mask(cdev, 0);
3198         result=ccw_device_start_timeout(cdev,&irq_ptr->ccw,
3199                                         QDIO_DOING_ESTABLISH,0, 0,
3200                                         QDIO_ESTABLISH_TIMEOUT);
3201         if (result) {
3202                 result2=ccw_device_start_timeout(cdev,&irq_ptr->ccw,
3203                                                  QDIO_DOING_ESTABLISH,0,0,
3204                                                  QDIO_ESTABLISH_TIMEOUT);
3205                 sprintf(dbf_text,"eq:io%4x",result);
3206                 QDIO_DBF_TEXT2(1,setup,dbf_text);
3207                 if (result2) {
3208                         sprintf(dbf_text,"eq:io%4x",result);
3209                         QDIO_DBF_TEXT2(1,setup,dbf_text);
3210                 }
3211                 QDIO_PRINT_WARN("establish queues on irq 0.%x.%04x: do_IO " \
3212                                 "returned %i, next try returned %i\n",
3213                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3214                                 result, result2);
3215                 result=result2;
3216                 if (result)
3217                         ccw_device_set_timeout(cdev, 0);
3218         }
3219
3220         spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
3221
3222         if (result) {
3223                 up(&irq_ptr->setting_up_sema);
3224                 qdio_shutdown(cdev,QDIO_FLAG_CLEANUP_USING_CLEAR);
3225                 return result;
3226         }
3227         
3228         /* Timeout is cared for already by using ccw_device_start_timeout(). */
3229         wait_event_interruptible(cdev->private->wait_q,
3230                  irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
3231                  irq_ptr->state == QDIO_IRQ_STATE_ERR);
3232
3233         if (irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED)
3234                 result = 0;
3235         else {
3236                 up(&irq_ptr->setting_up_sema);
3237                 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3238                 return -EIO;
3239         }
3240
3241         qdio_get_ssqd_information(irq_ptr);
3242         /* if this gets set once, we're running under VM and can omit SVSes */
3243         if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY)
3244                 omit_svs=1;
3245
3246         sprintf(dbf_text,"qdioac%2x",irq_ptr->qdioac);
3247         QDIO_DBF_TEXT2(0,setup,dbf_text);
3248
3249         sprintf(dbf_text,"qib ac%2x",irq_ptr->qib.ac);
3250         QDIO_DBF_TEXT2(0,setup,dbf_text);
3251
3252         irq_ptr->hydra_gives_outbound_pcis=
3253                 irq_ptr->qib.ac&QIB_AC_OUTBOUND_PCI_SUPPORTED;
3254         irq_ptr->sync_done_on_outb_pcis=
3255                 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS;
3256
3257         qdio_initialize_set_siga_flags_input(irq_ptr);
3258         qdio_initialize_set_siga_flags_output(irq_ptr);
3259
3260         up(&irq_ptr->setting_up_sema);
3261
3262         return result;
3263         
3264 }
3265
3266 int
3267 qdio_activate(struct ccw_device *cdev, int flags)
3268 {
3269         struct qdio_irq *irq_ptr;
3270         int i,result=0,result2;
3271         unsigned long saveflags;
3272         char dbf_text[20]; /* see qdio_initialize */
3273
3274         irq_ptr = cdev->private->qdio_data;
3275         if (!irq_ptr)
3276                 return -ENODEV;
3277
3278         if (cdev->private->state != DEV_STATE_ONLINE)
3279                 return -EINVAL;
3280
3281         down(&irq_ptr->setting_up_sema);
3282         if (irq_ptr->state==QDIO_IRQ_STATE_INACTIVE) {
3283                 result=-EBUSY;
3284                 goto out;
3285         }
3286
3287         sprintf(dbf_text,"qact%4x", irq_ptr->schid.sch_no);
3288         QDIO_DBF_TEXT2(0,setup,dbf_text);
3289         QDIO_DBF_TEXT2(0,trace,dbf_text);
3290
3291         /* activate q */
3292         irq_ptr->ccw.cmd_code=irq_ptr->aqueue.cmd;
3293         irq_ptr->ccw.flags=CCW_FLAG_SLI;
3294         irq_ptr->ccw.count=irq_ptr->aqueue.count;
3295         irq_ptr->ccw.cda=QDIO_GET_ADDR(0);
3296
3297         spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
3298
3299         ccw_device_set_timeout(cdev, 0);
3300         ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
3301         result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE,
3302                                 0, DOIO_DENY_PREFETCH);
3303         if (result) {
3304                 result2=ccw_device_start(cdev,&irq_ptr->ccw,
3305                                          QDIO_DOING_ACTIVATE,0,0);
3306                 sprintf(dbf_text,"aq:io%4x",result);
3307                 QDIO_DBF_TEXT2(1,setup,dbf_text);
3308                 if (result2) {
3309                         sprintf(dbf_text,"aq:io%4x",result);
3310                         QDIO_DBF_TEXT2(1,setup,dbf_text);
3311                 }
3312                 QDIO_PRINT_WARN("activate queues on irq 0.%x.%04x: do_IO " \
3313                                 "returned %i, next try returned %i\n",
3314                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3315                                 result, result2);
3316                 result=result2;
3317         }
3318
3319         spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
3320         if (result)
3321                 goto out;
3322
3323         for (i=0;i<irq_ptr->no_input_qs;i++) {
3324                 if (irq_ptr->is_thinint_irq) {
3325                         /* 
3326                          * that way we know, that, if we will get interrupted
3327                          * by tiqdio_inbound_processing, qdio_unmark_q will
3328                          * not be called 
3329                          */
3330                         qdio_reserve_q(irq_ptr->input_qs[i]);
3331                         qdio_mark_tiq(irq_ptr->input_qs[i]);
3332                         qdio_release_q(irq_ptr->input_qs[i]);
3333                 }
3334         }
3335
3336         if (flags&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) {
3337                 for (i=0;i<irq_ptr->no_input_qs;i++) {
3338                         irq_ptr->input_qs[i]->is_input_q|=
3339                                 QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT;
3340                 }
3341         }
3342
3343         wait_event_interruptible_timeout(cdev->private->wait_q,
3344                                          ((irq_ptr->state ==
3345                                           QDIO_IRQ_STATE_STOPPED) ||
3346                                           (irq_ptr->state ==
3347                                            QDIO_IRQ_STATE_ERR)),
3348                                          QDIO_ACTIVATE_TIMEOUT);
3349
3350         switch (irq_ptr->state) {
3351         case QDIO_IRQ_STATE_STOPPED:
3352         case QDIO_IRQ_STATE_ERR:
3353                 up(&irq_ptr->setting_up_sema);
3354                 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3355                 down(&irq_ptr->setting_up_sema);
3356                 result = -EIO;
3357                 break;
3358         default:
3359                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
3360                 result = 0;
3361         }
3362  out:
3363         up(&irq_ptr->setting_up_sema);
3364
3365         return result;
3366 }
3367
3368 /* buffers filled forwards again to make Rick happy */
3369 static void
3370 qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
3371                         unsigned int count, struct qdio_buffer *buffers)
3372 {
3373         struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3374         qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
3375         if (irq->is_qebsm) {
3376                 while (count)
3377                         set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
3378                 return;
3379         }
3380         for (;;) {
3381                 set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
3382                 count--;
3383                 if (!count) break;
3384                 qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
3385         }
3386 }
3387
3388 static void
3389 qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
3390                          unsigned int count, struct qdio_buffer *buffers)
3391 {
3392         struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3393
3394         qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
3395         if (irq->is_qebsm) {
3396                 while (count)
3397                         set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
3398                 return;
3399         }
3400
3401         for (;;) {
3402                 set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
3403                 count--;
3404                 if (!count) break;
3405                 qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
3406         }
3407 }
3408
3409 static void
3410 do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
3411                        unsigned int qidx, unsigned int count,
3412                        struct qdio_buffer *buffers)
3413 {
3414         int used_elements;
3415
3416         /* This is the inbound handling of queues */
3417         used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
3418         
3419         qdio_do_qdio_fill_input(q,qidx,count,buffers);
3420         
3421         if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&&
3422             (callflags&QDIO_FLAG_UNDER_INTERRUPT))
3423                 atomic_xchg(&q->polling,0);
3424         
3425         if (used_elements) 
3426                 return;
3427         if (callflags&QDIO_FLAG_DONT_SIGA)
3428                 return;
3429         if (q->siga_in) {
3430                 int result;
3431                 
3432                 result=qdio_siga_input(q);
3433                 if (result) {
3434                         if (q->siga_error)
3435                                 q->error_status_flags|=
3436                                         QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
3437                         q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
3438                         q->siga_error=result;
3439                 }
3440         }
3441                 
3442         qdio_mark_q(q);
3443 }
3444
3445 static void
3446 do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3447                         unsigned int qidx, unsigned int count,
3448                         struct qdio_buffer *buffers)
3449 {
3450         int used_elements;
3451         unsigned int cnt, start_buf;
3452         unsigned char state = 0;
3453         struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3454
3455         /* This is the outbound handling of queues */
3456         if (qdio_performance_stats)
3457                 perf_stats.start_time_outbound=NOW;
3458
3459         qdio_do_qdio_fill_output(q,qidx,count,buffers);
3460
3461         used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
3462
3463         if (callflags&QDIO_FLAG_DONT_SIGA) {
3464                 if (qdio_performance_stats) {
3465                         perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
3466                         perf_stats.outbound_cnt++;
3467                 }
3468                 return;
3469         }
3470         if (q->is_iqdio_q) {
3471                 /* one siga for every sbal */
3472                 while (count--)
3473                         qdio_kick_outbound_q(q);
3474                         
3475                 __qdio_outbound_processing(q);
3476         } else {
3477                 /* under VM, we do a SIGA sync unconditionally */
3478                 SYNC_MEMORY;
3479                 else {
3480                         /* 
3481                          * w/o shadow queues (else branch of
3482                          * SYNC_MEMORY :-/ ), we try to
3483                          * fast-requeue buffers 
3484                          */
3485                         if (irq->is_qebsm) {
3486                                 cnt = 1;
3487                                 start_buf = ((qidx+QDIO_MAX_BUFFERS_PER_Q-1) &
3488                                              (QDIO_MAX_BUFFERS_PER_Q-1));
3489                                 qdio_do_eqbs(q, &state, &start_buf, &cnt);
3490                         } else
3491                                 state = q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1)
3492                                         &(QDIO_MAX_BUFFERS_PER_Q-1) ];
3493                          if (state != SLSB_CU_OUTPUT_PRIMED) {
3494                                 qdio_kick_outbound_q(q);
3495                         } else {
3496                                 QDIO_DBF_TEXT3(0,trace, "fast-req");
3497                                 if (qdio_performance_stats)
3498                                         perf_stats.fast_reqs++;
3499                         }
3500                 }
3501                 /* 
3502                  * only marking the q could take too long,
3503                  * the upper layer module could do a lot of
3504                  * traffic in that time 
3505                  */
3506                 __qdio_outbound_processing(q);
3507         }
3508
3509         if (qdio_performance_stats) {
3510                 perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
3511                 perf_stats.outbound_cnt++;
3512         }
3513 }
3514
3515 /* count must be 1 in iqdio */
3516 int
3517 do_QDIO(struct ccw_device *cdev,unsigned int callflags,
3518         unsigned int queue_number, unsigned int qidx,
3519         unsigned int count,struct qdio_buffer *buffers)
3520 {
3521         struct qdio_irq *irq_ptr;
3522 #ifdef CONFIG_QDIO_DEBUG
3523         char dbf_text[20];
3524
3525         sprintf(dbf_text,"doQD%04x",cdev->private->schid.sch_no);
3526         QDIO_DBF_TEXT3(0,trace,dbf_text);
3527 #endif /* CONFIG_QDIO_DEBUG */
3528
3529         if ( (qidx>QDIO_MAX_BUFFERS_PER_Q) ||
3530              (count>QDIO_MAX_BUFFERS_PER_Q) ||
3531              (queue_number>QDIO_MAX_QUEUES_PER_IRQ) )
3532                 return -EINVAL;
3533
3534         if (count==0)
3535                 return 0;
3536
3537         irq_ptr = cdev->private->qdio_data;
3538         if (!irq_ptr)
3539                 return -ENODEV;
3540
3541 #ifdef CONFIG_QDIO_DEBUG
3542         if (callflags&QDIO_FLAG_SYNC_INPUT)
3543                 QDIO_DBF_HEX3(0,trace,&irq_ptr->input_qs[queue_number],
3544                               sizeof(void*));
3545         else
3546                 QDIO_DBF_HEX3(0,trace,&irq_ptr->output_qs[queue_number],
3547                               sizeof(void*));
3548         sprintf(dbf_text,"flag%04x",callflags);
3549         QDIO_DBF_TEXT3(0,trace,dbf_text);
3550         sprintf(dbf_text,"qi%02xct%02x",qidx,count);
3551         QDIO_DBF_TEXT3(0,trace,dbf_text);
3552 #endif /* CONFIG_QDIO_DEBUG */
3553
3554         if (irq_ptr->state!=QDIO_IRQ_STATE_ACTIVE)
3555                 return -EBUSY;
3556
3557         if (callflags&QDIO_FLAG_SYNC_INPUT)
3558                 do_qdio_handle_inbound(irq_ptr->input_qs[queue_number],
3559                                        callflags, qidx, count, buffers);
3560         else if (callflags&QDIO_FLAG_SYNC_OUTPUT)
3561                 do_qdio_handle_outbound(irq_ptr->output_qs[queue_number],
3562                                         callflags, qidx, count, buffers);
3563         else {
3564                 QDIO_DBF_TEXT3(1,trace,"doQD:inv");
3565                 return -EINVAL;
3566         }
3567         return 0;
3568 }
3569
3570 static int
3571 qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
3572                         int buffer_length, int *eof, void *data)
3573 {
3574         int c=0;
3575
3576         /* we are always called with buffer_length=4k, so we all
3577            deliver on the first read */
3578         if (offset>0)
3579                 return 0;
3580
3581 #define _OUTP_IT(x...) c+=sprintf(buffer+c,x)
3582         _OUTP_IT("i_p_nc/c=%lu/%lu\n",i_p_nc,i_p_c);
3583         _OUTP_IT("ii_p_nc/c=%lu/%lu\n",ii_p_nc,ii_p_c);
3584         _OUTP_IT("o_p_nc/c=%lu/%lu\n",o_p_nc,o_p_c);
3585         _OUTP_IT("Number of tasklet runs (total)                  : %lu\n",
3586                  perf_stats.tl_runs);
3587         _OUTP_IT("\n");
3588         _OUTP_IT("Number of SIGA sync's issued                    : %lu\n",
3589                  perf_stats.siga_syncs);
3590         _OUTP_IT("Number of SIGA in's issued                      : %lu\n",
3591                  perf_stats.siga_ins);
3592         _OUTP_IT("Number of SIGA out's issued                     : %lu\n",
3593                  perf_stats.siga_outs);
3594         _OUTP_IT("Number of PCIs caught                           : %lu\n",
3595                  perf_stats.pcis);
3596         _OUTP_IT("Number of adapter interrupts caught             : %lu\n",
3597                  perf_stats.thinints);
3598         _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA)  : %lu\n",
3599                  perf_stats.fast_reqs);
3600         _OUTP_IT("\n");
3601         _OUTP_IT("Total time of all inbound actions (us) incl. UL : %lu\n",
3602                  perf_stats.inbound_time);
3603         _OUTP_IT("Number of inbound transfers                     : %lu\n",
3604                  perf_stats.inbound_cnt);
3605         _OUTP_IT("Total time of all outbound do_QDIOs (us)        : %lu\n",
3606                  perf_stats.outbound_time);
3607         _OUTP_IT("Number of do_QDIOs outbound                     : %lu\n",
3608                  perf_stats.outbound_cnt);
3609         _OUTP_IT("\n");
3610
3611         return c;
3612 }
3613
3614 static struct proc_dir_entry *qdio_perf_proc_file;
3615
3616 static void
3617 qdio_add_procfs_entry(void)
3618 {
3619         proc_perf_file_registration=0;
3620         qdio_perf_proc_file=create_proc_entry(QDIO_PERF,
3621                                               S_IFREG|0444,&proc_root);
3622         if (qdio_perf_proc_file) {
3623                 qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read;
3624         } else proc_perf_file_registration=-1;
3625
3626         if (proc_perf_file_registration)
3627                 QDIO_PRINT_WARN("was not able to register perf. " \
3628                                 "proc-file (%i).\n",
3629                                 proc_perf_file_registration);
3630 }
3631
3632 static void
3633 qdio_remove_procfs_entry(void)
3634 {
3635         perf_stats.tl_runs=0;
3636
3637         if (!proc_perf_file_registration) /* means if it went ok earlier */
3638                 remove_proc_entry(QDIO_PERF,&proc_root);
3639 }
3640
3641 /**
3642  * attributes in sysfs
3643  *****************************************************************************/
3644
3645 static ssize_t
3646 qdio_performance_stats_show(struct bus_type *bus, char *buf)
3647 {
3648         return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
3649 }
3650
3651 static ssize_t
3652 qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count)
3653 {
3654         char *tmp;
3655         int i;
3656
3657         i = simple_strtoul(buf, &tmp, 16);
3658         if ((i == 0) || (i == 1)) {
3659                 if (i == qdio_performance_stats)
3660                         return count;
3661                 qdio_performance_stats = i;
3662                 if (i==0) {
3663                         /* reset perf. stat. info */
3664                         i_p_nc = 0;
3665                         i_p_c = 0;
3666                         ii_p_nc = 0;
3667                         ii_p_c = 0;
3668                         o_p_nc = 0;
3669                         o_p_c = 0;
3670                         memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
3671                 }
3672         } else {
3673                 QDIO_PRINT_WARN("QDIO performance_stats: write 0 or 1 to this file!\n");
3674                 return -EINVAL;
3675         }
3676         return count;
3677 }
3678
3679 static BUS_ATTR(qdio_performance_stats, 0644, qdio_performance_stats_show,
3680                         qdio_performance_stats_store);
3681
3682 static void
3683 tiqdio_register_thinints(void)
3684 {
3685         char dbf_text[20];
3686         register_thinint_result=
3687                 s390_register_adapter_interrupt(&tiqdio_thinint_handler);
3688         if (register_thinint_result) {
3689                 sprintf(dbf_text,"regthn%x",(register_thinint_result&0xff));
3690                 QDIO_DBF_TEXT0(0,setup,dbf_text);
3691                 QDIO_PRINT_ERR("failed to register adapter handler " \
3692                                "(rc=%i).\nAdapter interrupts might " \
3693                                "not work. Continuing.\n",
3694                                register_thinint_result);
3695         }
3696 }
3697
3698 static void
3699 tiqdio_unregister_thinints(void)
3700 {
3701         if (!register_thinint_result)
3702                 s390_unregister_adapter_interrupt(&tiqdio_thinint_handler);
3703 }
3704
3705 static int
3706 qdio_get_qdio_memory(void)
3707 {
3708         int i;
3709         indicator_used[0]=1;
3710
3711         for (i=1;i<INDICATORS_PER_CACHELINE;i++)
3712                 indicator_used[i]=0;
3713         indicators = kzalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE),
3714                                    GFP_KERNEL);
3715         if (!indicators)
3716                 return -ENOMEM;
3717         return 0;
3718 }
3719
3720 static void
3721 qdio_release_qdio_memory(void)
3722 {
3723         kfree(indicators);
3724 }
3725
3726
3727 static void
3728 qdio_unregister_dbf_views(void)
3729 {
3730         if (qdio_dbf_setup)
3731                 debug_unregister(qdio_dbf_setup);
3732         if (qdio_dbf_sbal)
3733                 debug_unregister(qdio_dbf_sbal);
3734         if (qdio_dbf_sense)
3735                 debug_unregister(qdio_dbf_sense);
3736         if (qdio_dbf_trace)
3737                 debug_unregister(qdio_dbf_trace);
3738 #ifdef CONFIG_QDIO_DEBUG
3739         if (qdio_dbf_slsb_out)
3740                 debug_unregister(qdio_dbf_slsb_out);
3741         if (qdio_dbf_slsb_in)
3742                 debug_unregister(qdio_dbf_slsb_in);
3743 #endif /* CONFIG_QDIO_DEBUG */
3744 }
3745
3746 static int
3747 qdio_register_dbf_views(void)
3748 {
3749         qdio_dbf_setup=debug_register(QDIO_DBF_SETUP_NAME,
3750                                       QDIO_DBF_SETUP_PAGES,
3751                                       QDIO_DBF_SETUP_NR_AREAS,
3752                                       QDIO_DBF_SETUP_LEN);
3753         if (!qdio_dbf_setup)
3754                 goto oom;
3755         debug_register_view(qdio_dbf_setup,&debug_hex_ascii_view);
3756         debug_set_level(qdio_dbf_setup,QDIO_DBF_SETUP_LEVEL);
3757
3758         qdio_dbf_sbal=debug_register(QDIO_DBF_SBAL_NAME,
3759                                      QDIO_DBF_SBAL_PAGES,
3760                                      QDIO_DBF_SBAL_NR_AREAS,
3761                                      QDIO_DBF_SBAL_LEN);
3762         if (!qdio_dbf_sbal)
3763                 goto oom;
3764
3765         debug_register_view(qdio_dbf_sbal,&debug_hex_ascii_view);
3766         debug_set_level(qdio_dbf_sbal,QDIO_DBF_SBAL_LEVEL);
3767
3768         qdio_dbf_sense=debug_register(QDIO_DBF_SENSE_NAME,
3769                                       QDIO_DBF_SENSE_PAGES,
3770                                       QDIO_DBF_SENSE_NR_AREAS,
3771                                       QDIO_DBF_SENSE_LEN);
3772         if (!qdio_dbf_sense)
3773                 goto oom;
3774
3775         debug_register_view(qdio_dbf_sense,&debug_hex_ascii_view);
3776         debug_set_level(qdio_dbf_sense,QDIO_DBF_SENSE_LEVEL);
3777
3778         qdio_dbf_trace=debug_register(QDIO_DBF_TRACE_NAME,
3779                                       QDIO_DBF_TRACE_PAGES,
3780                                       QDIO_DBF_TRACE_NR_AREAS,
3781                                       QDIO_DBF_TRACE_LEN);
3782         if (!qdio_dbf_trace)
3783                 goto oom;
3784
3785         debug_register_view(qdio_dbf_trace,&debug_hex_ascii_view);
3786         debug_set_level(qdio_dbf_trace,QDIO_DBF_TRACE_LEVEL);
3787
3788 #ifdef CONFIG_QDIO_DEBUG
3789         qdio_dbf_slsb_out=debug_register(QDIO_DBF_SLSB_OUT_NAME,
3790                                          QDIO_DBF_SLSB_OUT_PAGES,
3791                                          QDIO_DBF_SLSB_OUT_NR_AREAS,
3792                                          QDIO_DBF_SLSB_OUT_LEN);
3793         if (!qdio_dbf_slsb_out)
3794                 goto oom;
3795         debug_register_view(qdio_dbf_slsb_out,&debug_hex_ascii_view);
3796         debug_set_level(qdio_dbf_slsb_out,QDIO_DBF_SLSB_OUT_LEVEL);
3797
3798         qdio_dbf_slsb_in=debug_register(QDIO_DBF_SLSB_IN_NAME,
3799                                         QDIO_DBF_SLSB_IN_PAGES,
3800                                         QDIO_DBF_SLSB_IN_NR_AREAS,
3801                                         QDIO_DBF_SLSB_IN_LEN);
3802         if (!qdio_dbf_slsb_in)
3803                 goto oom;
3804         debug_register_view(qdio_dbf_slsb_in,&debug_hex_ascii_view);
3805         debug_set_level(qdio_dbf_slsb_in,QDIO_DBF_SLSB_IN_LEVEL);
3806 #endif /* CONFIG_QDIO_DEBUG */
3807         return 0;
3808 oom:
3809         QDIO_PRINT_ERR("not enough memory for dbf.\n");
3810         qdio_unregister_dbf_views();
3811         return -ENOMEM;
3812 }
3813
3814 static void *qdio_mempool_alloc(gfp_t gfp_mask, void *size)
3815 {
3816         return (void *) get_zeroed_page(gfp_mask|GFP_DMA);
3817 }
3818
3819 static void qdio_mempool_free(void *element, void *size)
3820 {
3821         free_page((unsigned long) element);
3822 }
3823
3824 static int __init
3825 init_QDIO(void)
3826 {
3827         int res;
3828         void *ptr;
3829
3830         printk("qdio: loading %s\n",version);
3831
3832         res=qdio_get_qdio_memory();
3833         if (res)
3834                 return res;
3835
3836         res = qdio_register_dbf_views();
3837         if (res)
3838                 return res;
3839
3840         QDIO_DBF_TEXT0(0,setup,"initQDIO");
3841         res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
3842
3843         memset((void*)&perf_stats,0,sizeof(perf_stats));
3844         QDIO_DBF_TEXT0(0,setup,"perfstat");
3845         ptr=&perf_stats;
3846         QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*));
3847
3848         qdio_add_procfs_entry();
3849
3850         qdio_mempool_scssc = mempool_create(QDIO_MEMPOOL_SCSSC_ELEMENTS,
3851                                             qdio_mempool_alloc,
3852                                             qdio_mempool_free, NULL);
3853
3854         if (tiqdio_check_chsc_availability())
3855                 QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n");
3856
3857         tiqdio_register_thinints();
3858
3859         return 0;
3860  }
3861
3862 static void __exit
3863 cleanup_QDIO(void)
3864 {
3865         tiqdio_unregister_thinints();
3866         qdio_remove_procfs_entry();
3867         qdio_release_qdio_memory();
3868         qdio_unregister_dbf_views();
3869         mempool_destroy(qdio_mempool_scssc);
3870         bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
3871         printk("qdio: %s: module removed\n",version);
3872 }
3873
3874 module_init(init_QDIO);
3875 module_exit(cleanup_QDIO);
3876
3877 EXPORT_SYMBOL(qdio_allocate);
3878 EXPORT_SYMBOL(qdio_establish);
3879 EXPORT_SYMBOL(qdio_initialize);
3880 EXPORT_SYMBOL(qdio_activate);
3881 EXPORT_SYMBOL(do_QDIO);
3882 EXPORT_SYMBOL(qdio_shutdown);
3883 EXPORT_SYMBOL(qdio_free);
3884 EXPORT_SYMBOL(qdio_cleanup);
3885 EXPORT_SYMBOL(qdio_synchronize);