3  * linux/drivers/s390/cio/qdio.c
 
   5  * Linux for S/390 QDIO base support, Hipersocket base support
 
   8  * Copyright 2000,2002 IBM Corporation
 
   9  * Author(s):             Utz Bacher <utz.bacher@de.ibm.com>
 
  10  * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
 
  12  * Restriction: only 63 iqdio subchannels would have its own indicator,
 
  13  * after that, subsequent subchannels share one indicator
 
  18  * This program is free software; you can redistribute it and/or modify
 
  19  * it under the terms of the GNU General Public License as published by
 
  20  * the Free Software Foundation; either version 2, or (at your option)
 
  23  * This program is distributed in the hope that it will be useful,
 
  24  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 
  25  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
  26  * GNU General Public License for more details.
 
  28  * You should have received a copy of the GNU General Public License
 
  29  * along with this program; if not, write to the Free Software
 
  30  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 
  33 #include <linux/module.h>
 
  34 #include <linux/init.h>
 
  35 #include <linux/delay.h>
 
  36 #include <linux/slab.h>
 
  37 #include <linux/kernel.h>
 
  38 #include <linux/proc_fs.h>
 
  39 #include <linux/timer.h>
 
  40 #include <linux/mempool.h>
 
  42 #include <asm/ccwdev.h>
 
  44 #include <asm/atomic.h>
 
  45 #include <asm/semaphore.h>
 
  46 #include <asm/timex.h>
 
  48 #include <asm/debug.h>
 
  49 #include <asm/s390_rdev.h>
 
  60 /****************** MODULE PARAMETER VARIABLES ********************/
 
  61 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
 
  62 MODULE_DESCRIPTION("QDIO base support version 2, " \
 
  63                    "Copyright 2000 IBM Corporation");
 
  64 MODULE_LICENSE("GPL");
 
  66 /******************** HERE WE GO ***********************************/
 
  68 static const char version[] = "QDIO base support version 2";
 
  70 static int qdio_performance_stats = 0;
 
  71 static int proc_perf_file_registration;
 
  72 static struct qdio_perf_stats perf_stats;
 
  74 static int hydra_thinints;
 
  75 static int is_passthrough = 0;
 
  78 static int indicator_used[INDICATORS_PER_CACHELINE];
 
  79 static __u32 * volatile indicators;
 
  80 static __u32 volatile spare_indicator;
 
  81 static atomic_t spare_indicator_usecount;
 
  82 #define QDIO_MEMPOOL_SCSSC_ELEMENTS 2
 
  83 static mempool_t *qdio_mempool_scssc;
 
  84 static struct kmem_cache *qdio_q_cache;
 
  86 static debug_info_t *qdio_dbf_setup;
 
  87 static debug_info_t *qdio_dbf_sbal;
 
  88 static debug_info_t *qdio_dbf_trace;
 
  89 static debug_info_t *qdio_dbf_sense;
 
  90 #ifdef CONFIG_QDIO_DEBUG
 
  91 static debug_info_t *qdio_dbf_slsb_out;
 
  92 static debug_info_t *qdio_dbf_slsb_in;
 
  93 #endif /* CONFIG_QDIO_DEBUG */
 
  96 static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change
 
  97                                                  during a while loop */
 
  98 static DEFINE_SPINLOCK(ttiq_list_lock);
 
  99 static void *tiqdio_ind;
 
 100 static void tiqdio_tl(unsigned long);
 
 101 static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0);
 
 103 /* not a macro, as one of the arguments is atomic_read */
 
 105 qdio_min(int a,int b)
 
 113 /***************** SCRUBBER HELPER ROUTINES **********************/
 
 115 static inline void qdio_perf_stat_inc(atomic64_t *count)
 
 117         if (qdio_performance_stats)
 
 121 static inline void qdio_perf_stat_dec(atomic64_t *count)
 
 123         if (qdio_performance_stats)
 
 126 #else /* CONFIG_64BIT */
 
 127 static inline void qdio_perf_stat_inc(atomic_t *count)
 
 129         if (qdio_performance_stats)
 
 133 static inline void qdio_perf_stat_dec(atomic_t *count)
 
 135         if (qdio_performance_stats)
 
 138 #endif /* CONFIG_64BIT */
 
 141 qdio_get_micros(void)
 
 143         return (get_clock() >> 12); /* time>>12 is microseconds */
 
 147  * unfortunately, we can't just xchg the values; in do_QDIO we want to reserve
 
 148  * the q in any case, so that we'll not be interrupted when we are in
 
 149  * qdio_mark_tiq... shouldn't have a really bad impact, as reserving almost
 
 150  * ever works (last famous words) 
 
 153 qdio_reserve_q(struct qdio_q *q)
 
 155         return atomic_add_return(1,&q->use_count) - 1;
 
 159 qdio_release_q(struct qdio_q *q)
 
 161         atomic_dec(&q->use_count);
 
 166 qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
 
 170         if (ccq == 0 || ccq == 32)
 
 172         if (ccq == 96 || ccq == 97)
 
 174         /*notify devices immediately*/
 
 175         sprintf(dbf_text,"%d", ccq);
 
 176         QDIO_DBF_TEXT2(1,trace,dbf_text);
 
 179 /* EQBS: extract buffer states */
 
 181 qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
 
 182              unsigned int *start, unsigned int *cnt)
 
 184         struct qdio_irq *irq;
 
 185         unsigned int tmp_cnt, q_no, ccq;
 
 191         irq = (struct qdio_irq*)q->irq_ptr;
 
 194                 q_no += irq->no_input_qs;
 
 196         ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt);
 
 197         rc = qdio_check_ccq(q, ccq);
 
 198         if ((ccq == 96) && (tmp_cnt != *cnt))
 
 201                 QDIO_DBF_TEXT5(1,trace,"eqAGAIN");
 
 205                 QDIO_DBF_TEXT2(1,trace,"eqberr");
 
 206                 sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no);
 
 207                 QDIO_DBF_TEXT2(1,trace,dbf_text);
 
 208                 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
 
 209                                 QDIO_STATUS_LOOK_FOR_ERROR,
 
 210                                 0, 0, 0, -1, -1, q->int_parm);
 
 213         return (tmp_cnt - *cnt);
 
 216 /* SQBS: set buffer states */
 
 218 qdio_do_sqbs(struct qdio_q *q, unsigned char state,
 
 219              unsigned int *start, unsigned int *cnt)
 
 221         struct qdio_irq *irq;
 
 222         unsigned int tmp_cnt, q_no, ccq;
 
 228         irq = (struct qdio_irq*)q->irq_ptr;
 
 231                 q_no += irq->no_input_qs;
 
 233         ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt);
 
 234         rc = qdio_check_ccq(q, ccq);
 
 236                 QDIO_DBF_TEXT5(1,trace,"sqAGAIN");
 
 240                 QDIO_DBF_TEXT3(1,trace,"sqberr");
 
 241                 sprintf(dbf_text,"%2x,%2x",tmp_cnt,*cnt);
 
 242                 QDIO_DBF_TEXT3(1,trace,dbf_text);
 
 243                 sprintf(dbf_text,"%d,%d",ccq,q_no);
 
 244                 QDIO_DBF_TEXT3(1,trace,dbf_text);
 
 245                 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
 
 246                                 QDIO_STATUS_LOOK_FOR_ERROR,
 
 247                                 0, 0, 0, -1, -1, q->int_parm);
 
 250         return (tmp_cnt - *cnt);
 
 254 qdio_set_slsb(struct qdio_q *q, unsigned int *bufno,
 
 255               unsigned char state, unsigned int *count)
 
 258         struct qdio_irq *irq;
 
 260         irq = (struct qdio_irq*)q->irq_ptr;
 
 261         if (!irq->is_qebsm) {
 
 262                 slsb = (char *)&q->slsb.acc.val[(*bufno)];
 
 266         return qdio_do_sqbs(q, state, bufno, count);
 
 269 #ifdef CONFIG_QDIO_DEBUG
 
 271 qdio_trace_slsb(struct qdio_q *q)
 
 273         if (q->queue_type==QDIO_TRACE_QTYPE) {
 
 275                         QDIO_DBF_HEX2(0,slsb_in,&q->slsb,
 
 276                                       QDIO_MAX_BUFFERS_PER_Q);
 
 278                         QDIO_DBF_HEX2(0,slsb_out,&q->slsb,
 
 279                                       QDIO_MAX_BUFFERS_PER_Q);
 
 285 set_slsb(struct qdio_q *q, unsigned int *bufno,
 
 286          unsigned char state, unsigned int *count)
 
 289 #ifdef CONFIG_QDIO_DEBUG
 
 292         rc = qdio_set_slsb(q, bufno, state, count);
 
 293 #ifdef CONFIG_QDIO_DEBUG
 
 299 qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
 
 304         QDIO_DBF_TEXT4(0,trace,"sigasync");
 
 305         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
 307         qdio_perf_stat_inc(&perf_stats.siga_syncs);
 
 309         cc = do_siga_sync(q->schid, gpr2, gpr3);
 
 311                 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
 
 317 qdio_siga_sync_q(struct qdio_q *q)
 
 320                 return qdio_siga_sync(q, 0, q->mask);
 
 321         return qdio_siga_sync(q, q->mask, 0);
 
 325 __do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
 
 327        struct qdio_irq *irq;
 
 331        irq = (struct qdio_irq *) q->irq_ptr;
 
 333                schid = *((u32 *)&q->schid);
 
 335                schid = irq->sch_token;
 
 338        return do_siga_output(schid, q->mask, busy_bit, fc);
 
 342  * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns
 
 343  * an access exception 
 
 346 qdio_siga_output(struct qdio_q *q)
 
 352         qdio_perf_stat_inc(&perf_stats.siga_outs);
 
 354         QDIO_DBF_TEXT4(0,trace,"sigaout");
 
 355         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
 358                 cc = __do_siga_output(q, &busy_bit);
 
 359 //QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit);
 
 360                 if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) {
 
 363                         if ((NOW-start_time)>QDIO_BUSY_BIT_PATIENCE)
 
 369         if ((cc==2) && (busy_bit)) 
 
 370                 cc |= QDIO_SIGA_ERROR_B_BIT_SET;
 
 373                 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
 
 379 qdio_siga_input(struct qdio_q *q)
 
 383         QDIO_DBF_TEXT4(0,trace,"sigain");
 
 384         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
 386         qdio_perf_stat_inc(&perf_stats.siga_ins);
 
 388         cc = do_siga_input(q->schid, q->mask);
 
 391                 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
 
 396 /* locked by the locks in qdio_activate and qdio_cleanup */
 
 398 qdio_get_indicator(void)
 
 402         for (i = 0; i < INDICATORS_PER_CACHELINE; i++)
 
 403                 if (!indicator_used[i]) {
 
 407         atomic_inc(&spare_indicator_usecount);
 
 408         return (__u32 * volatile) &spare_indicator;
 
 411 /* locked by the locks in qdio_activate and qdio_cleanup */
 
 413 qdio_put_indicator(__u32 *addr)
 
 417         if ( (addr) && (addr!=&spare_indicator) ) {
 
 421         if (addr == &spare_indicator)
 
 422                 atomic_dec(&spare_indicator_usecount);
 
 426 tiqdio_clear_summary_bit(__u32 *location)
 
 428         QDIO_DBF_TEXT5(0,trace,"clrsummb");
 
 429         QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
 
 435 tiqdio_set_summary_bit(__u32 *location)
 
 437         QDIO_DBF_TEXT5(0,trace,"setsummb");
 
 438         QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
 
 444 tiqdio_sched_tl(void)
 
 446         tasklet_hi_schedule(&tiqdio_tasklet);
 
 450 qdio_mark_tiq(struct qdio_q *q)
 
 454         QDIO_DBF_TEXT4(0,trace,"mark iq");
 
 455         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
 457         spin_lock_irqsave(&ttiq_list_lock,flags);
 
 458         if (unlikely(atomic_read(&q->is_in_shutdown)))
 
 464         if ((q->list_prev) || (q->list_next)) 
 
 472                 q->list_next=tiq_list;
 
 473                 q->list_prev=tiq_list->list_prev;
 
 474                 tiq_list->list_prev->list_next=q;
 
 475                 tiq_list->list_prev=q;
 
 477         spin_unlock_irqrestore(&ttiq_list_lock,flags);
 
 479         tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
 
 483         spin_unlock_irqrestore(&ttiq_list_lock,flags);
 
 488 qdio_mark_q(struct qdio_q *q)
 
 490         QDIO_DBF_TEXT4(0,trace,"mark q");
 
 491         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
 493         if (unlikely(atomic_read(&q->is_in_shutdown)))
 
 496         tasklet_schedule(&q->tasklet);
 
 500 qdio_stop_polling(struct qdio_q *q)
 
 502 #ifdef QDIO_USE_PROCESSING_STATE
 
 503        unsigned int tmp, gsf, count = 1;
 
 504        unsigned char state = 0;
 
 505        struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
 
 507         if (!atomic_xchg(&q->polling,0))
 
 510         QDIO_DBF_TEXT4(0,trace,"stoppoll");
 
 511         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
 513         /* show the card that we are not polling anymore */
 
 517        tmp = gsf = GET_SAVED_FRONTIER(q);
 
 518        tmp = ((tmp + QDIO_MAX_BUFFERS_PER_Q-1) & (QDIO_MAX_BUFFERS_PER_Q-1) );
 
 519        set_slsb(q, &tmp, SLSB_P_INPUT_NOT_INIT, &count);
 
 522          * we don't issue this SYNC_MEMORY, as we trust Rick T and
 
 523          * moreover will not use the PROCESSING state under VM, so
 
 524          * q->polling was 0 anyway
 
 529                qdio_do_eqbs(q, &state, &gsf, &count);
 
 531                state = q->slsb.acc.val[gsf];
 
 532        if (state != SLSB_P_INPUT_PRIMED)
 
 535          * set our summary bit again, as otherwise there is a
 
 536          * small window we can miss between resetting it and
 
 537          * checking for PRIMED state 
 
 540                 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
 
 543 #else /* QDIO_USE_PROCESSING_STATE */
 
 545 #endif /* QDIO_USE_PROCESSING_STATE */
 
 549  * see the comment in do_QDIO and before qdio_reserve_q about the
 
 550  * sophisticated locking outside of unmark_q, so that we don't need to
 
 551  * disable the interrupts :-) 
 
 554 qdio_unmark_q(struct qdio_q *q)
 
 558         QDIO_DBF_TEXT4(0,trace,"unmark q");
 
 559         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
 561         if ((!q->list_prev)||(!q->list_next))
 
 564         if ((q->is_thinint_q)&&(q->is_input_q)) {
 
 566                 spin_lock_irqsave(&ttiq_list_lock,flags);
 
 567                 /* in case cleanup has done this already and simultanously
 
 568                  * qdio_unmark_q is called from the interrupt handler, we've
 
 569                  * got to check this in this specific case again */
 
 570                 if ((!q->list_prev)||(!q->list_next))
 
 572                 if (q->list_next==q) {
 
 573                         /* q was the only interesting q */
 
 578                         q->list_next->list_prev=q->list_prev;
 
 579                         q->list_prev->list_next=q->list_next;
 
 580                         tiq_list=q->list_next;
 
 585                 spin_unlock_irqrestore(&ttiq_list_lock,flags);
 
 589 static inline unsigned long 
 
 590 tiqdio_clear_global_summary(void)
 
 594         QDIO_DBF_TEXT5(0,trace,"clrglobl");
 
 596         time = do_clear_global_summary();
 
 598         QDIO_DBF_HEX5(0,trace,&time,sizeof(unsigned long));
 
 604 /************************* OUTBOUND ROUTINES *******************************/
 
 606 qdio_qebsm_get_outbound_buffer_frontier(struct qdio_q *q)
 
 608         struct qdio_irq *irq;
 
 610         unsigned int cnt, count, ftc;
 
 612         irq = (struct qdio_irq *) q->irq_ptr;
 
 613         if ((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis))
 
 616         ftc = q->first_to_check;
 
 617         count = qdio_min(atomic_read(&q->number_of_buffers_used),
 
 618                         (QDIO_MAX_BUFFERS_PER_Q-1));
 
 620                 return q->first_to_check;
 
 621         cnt = qdio_do_eqbs(q, &state, &ftc, &count);
 
 623                 return q->first_to_check;
 
 625         case SLSB_P_OUTPUT_ERROR:
 
 626                 QDIO_DBF_TEXT3(0,trace,"outperr");
 
 627                 atomic_sub(cnt , &q->number_of_buffers_used);
 
 629                         q->error_status_flags |=
 
 630                                 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
 
 631                 q->qdio_error = SLSB_P_OUTPUT_ERROR;
 
 632                 q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
 
 633                 q->first_to_check = ftc;
 
 635         case SLSB_P_OUTPUT_EMPTY:
 
 636                 QDIO_DBF_TEXT5(0,trace,"outpempt");
 
 637                 atomic_sub(cnt, &q->number_of_buffers_used);
 
 638                 q->first_to_check = ftc;
 
 640         case SLSB_CU_OUTPUT_PRIMED:
 
 641                 /* all buffers primed */
 
 642                 QDIO_DBF_TEXT5(0,trace,"outpprim");
 
 647         QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
 
 648         return q->first_to_check;
 
 652 qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q)
 
 654         struct qdio_irq *irq;
 
 656         int tmp, ftc, count, cnt;
 
 660         irq = (struct qdio_irq *) q->irq_ptr;
 
 661         ftc = q->first_to_check;
 
 662         count = qdio_min(atomic_read(&q->number_of_buffers_used),
 
 663                         (QDIO_MAX_BUFFERS_PER_Q-1));
 
 665                  return q->first_to_check;
 
 666         cnt = qdio_do_eqbs(q, &state, &ftc, &count);
 
 668                  return q->first_to_check;
 
 670         case SLSB_P_INPUT_ERROR :
 
 671 #ifdef CONFIG_QDIO_DEBUG
 
 672                 QDIO_DBF_TEXT3(1,trace,"inperr");
 
 673                 sprintf(dbf_text,"%2x,%2x",ftc,count);
 
 674                 QDIO_DBF_TEXT3(1,trace,dbf_text);
 
 675 #endif /* CONFIG_QDIO_DEBUG */
 
 677                         q->error_status_flags |=
 
 678                                 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
 
 679                 q->qdio_error = SLSB_P_INPUT_ERROR;
 
 680                 q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
 
 681                 atomic_sub(cnt, &q->number_of_buffers_used);
 
 682                 q->first_to_check = ftc;
 
 684         case SLSB_P_INPUT_PRIMED :
 
 685                 QDIO_DBF_TEXT3(0,trace,"inptprim");
 
 686                 sprintf(dbf_text,"%2x,%2x",ftc,count);
 
 687                 QDIO_DBF_TEXT3(1,trace,dbf_text);
 
 689                 ftc = q->first_to_check;
 
 690 #ifdef QDIO_USE_PROCESSING_STATE
 
 693                         tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
 
 698                 tmp += set_slsb(q, &ftc,
 
 699                                SLSB_P_INPUT_PROCESSING, &cnt);
 
 700                 atomic_set(&q->polling, 1);
 
 702                 tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
 
 704                 atomic_sub(tmp, &q->number_of_buffers_used);
 
 705                 q->first_to_check = ftc;
 
 707         case SLSB_CU_INPUT_EMPTY:
 
 708         case SLSB_P_INPUT_NOT_INIT:
 
 709         case SLSB_P_INPUT_PROCESSING:
 
 710                 QDIO_DBF_TEXT5(0,trace,"inpnipro");
 
 715         QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
 
 716         return q->first_to_check;
 
 720 qdio_get_outbound_buffer_frontier(struct qdio_q *q)
 
 722         struct qdio_irq *irq;
 
 724         unsigned int count = 1;
 
 725         int first_not_to_check, f, f_mod_no;
 
 728         QDIO_DBF_TEXT4(0,trace,"getobfro");
 
 729         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
 731         irq = (struct qdio_irq *) q->irq_ptr;
 
 733                 return qdio_qebsm_get_outbound_buffer_frontier(q);
 
 735         slsb=&q->slsb.acc.val[0];
 
 736         f_mod_no=f=q->first_to_check;
 
 738          * f points to already processed elements, so f+no_used is correct...
 
 739          * ... but: we don't check 128 buffers, as otherwise
 
 740          * qdio_has_outbound_q_moved would return 0 
 
 742         first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
 
 743                                       (QDIO_MAX_BUFFERS_PER_Q-1));
 
 745         if (((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis)) ||
 
 746                  (q->queue_type == QDIO_IQDIO_QFMT_ASYNCH))
 
 750         if (f==first_not_to_check) 
 
 753         switch(slsb[f_mod_no]) {
 
 755         /* the adapter has not fetched the output yet */
 
 756         case SLSB_CU_OUTPUT_PRIMED:
 
 757                 QDIO_DBF_TEXT5(0,trace,"outpprim");
 
 760         /* the adapter got it */
 
 761         case SLSB_P_OUTPUT_EMPTY:
 
 762                 atomic_dec(&q->number_of_buffers_used);
 
 764                 f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
 
 765                 QDIO_DBF_TEXT5(0,trace,"outpempt");
 
 768         case SLSB_P_OUTPUT_ERROR:
 
 769                 QDIO_DBF_TEXT3(0,trace,"outperr");
 
 770                 sprintf(dbf_text,"%x-%x-%x",f_mod_no,
 
 771                         q->sbal[f_mod_no]->element[14].sbalf.value,
 
 772                         q->sbal[f_mod_no]->element[15].sbalf.value);
 
 773                 QDIO_DBF_TEXT3(1,trace,dbf_text);
 
 774                 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
 
 776                 /* kind of process the buffer */
 
 777                 set_slsb(q, &f_mod_no, SLSB_P_OUTPUT_NOT_INIT, &count);
 
 780                  * we increment the frontier, as this buffer
 
 781                  * was processed obviously 
 
 783                 atomic_dec(&q->number_of_buffers_used);
 
 784                 f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
 
 787                         q->error_status_flags|=
 
 788                                 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
 
 789                 q->qdio_error=SLSB_P_OUTPUT_ERROR;
 
 790                 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
 
 796                 QDIO_DBF_TEXT5(0,trace,"outpni");
 
 799         return (q->first_to_check=f_mod_no);
 
 802 /* all buffers are processed */
 
 804 qdio_is_outbound_q_done(struct qdio_q *q)
 
 807 #ifdef CONFIG_QDIO_DEBUG
 
 811         no_used=atomic_read(&q->number_of_buffers_used);
 
 813 #ifdef CONFIG_QDIO_DEBUG
 
 815                 sprintf(dbf_text,"oqisnt%02x",no_used);
 
 816                 QDIO_DBF_TEXT4(0,trace,dbf_text);
 
 818                 QDIO_DBF_TEXT4(0,trace,"oqisdone");
 
 820         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
 821 #endif /* CONFIG_QDIO_DEBUG */
 
 826 qdio_has_outbound_q_moved(struct qdio_q *q)
 
 830         i=qdio_get_outbound_buffer_frontier(q);
 
 832         if ( (i!=GET_SAVED_FRONTIER(q)) ||
 
 833              (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
 
 835                 QDIO_DBF_TEXT4(0,trace,"oqhasmvd");
 
 836                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
 839                 QDIO_DBF_TEXT4(0,trace,"oqhsntmv");
 
 840                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
 846 qdio_kick_outbound_q(struct qdio_q *q)
 
 849 #ifdef CONFIG_QDIO_DEBUG
 
 852         QDIO_DBF_TEXT4(0,trace,"kickoutq");
 
 853         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
 854 #endif /* CONFIG_QDIO_DEBUG */
 
 859         /* here's the story with cc=2 and busy bit set (thanks, Rick):
 
 860          * VM's CP could present us cc=2 and busy bit set on SIGA-write
 
 861          * during reconfiguration of their Guest LAN (only in HIPERS mode,
 
 862          * QDIO mode is asynchronous -- cc=2 and busy bit there will take
 
 863          * the queues down immediately; and not being under VM we have a
 
 864          * problem on cc=2 and busy bit set right away).
 
 866          * Therefore qdio_siga_output will try for a short time constantly,
 
 867          * if such a condition occurs. If it doesn't change, it will
 
 868          * increase the busy_siga_counter and save the timestamp, and
 
 869          * schedule the queue for later processing (via mark_q, using the
 
 870          * queue tasklet). __qdio_outbound_processing will check out the
 
 871          * counter. If non-zero, it will call qdio_kick_outbound_q as often
 
 872          * as the value of the counter. This will attempt further SIGA
 
 873          * instructions. For each successful SIGA, the counter is
 
 874          * decreased, for failing SIGAs the counter remains the same, after
 
 876          * After some time of no movement, qdio_kick_outbound_q will
 
 877          * finally fail and reflect corresponding error codes to call
 
 878          * the upper layer module and have it take the queues down.
 
 880          * Note that this is a change from the original HiperSockets design
 
 881          * (saying cc=2 and busy bit means take the queues down), but in
 
 882          * these days Guest LAN didn't exist... excessive cc=2 with busy bit
 
 883          * conditions will still take the queues down, but the threshold is
 
 884          * higher due to the Guest LAN environment.
 
 888         result=qdio_siga_output(q);
 
 892                 /* went smooth this time, reset timestamp */
 
 893 #ifdef CONFIG_QDIO_DEBUG
 
 894                 QDIO_DBF_TEXT3(0,trace,"cc2reslv");
 
 895                 sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
 
 896                         atomic_read(&q->busy_siga_counter));
 
 897                 QDIO_DBF_TEXT3(0,trace,dbf_text);
 
 898 #endif /* CONFIG_QDIO_DEBUG */
 
 899                 q->timing.busy_start=0;
 
 901         case (2|QDIO_SIGA_ERROR_B_BIT_SET):
 
 902                 /* cc=2 and busy bit: */
 
 903                 atomic_inc(&q->busy_siga_counter);
 
 905                 /* if the last siga was successful, save
 
 907                 if (!q->timing.busy_start)
 
 908                         q->timing.busy_start=NOW;
 
 910                 /* if we're in time, don't touch error_status_flags
 
 912                 if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
 
 916                 QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
 
 917 #ifdef CONFIG_QDIO_DEBUG
 
 918                 sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
 
 919                         atomic_read(&q->busy_siga_counter));
 
 920                 QDIO_DBF_TEXT3(0,trace,dbf_text);
 
 921 #endif /* CONFIG_QDIO_DEBUG */
 
 922                 /* else fallthrough and report error */
 
 924                 /* for plain cc=1, 2 or 3: */
 
 926                         q->error_status_flags|=
 
 927                                 QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
 
 928                 q->error_status_flags|=
 
 929                         QDIO_STATUS_LOOK_FOR_ERROR;
 
 930                 q->siga_error=result;
 
 935 qdio_kick_outbound_handler(struct qdio_q *q)
 
 937         int start, end, real_end, count;
 
 938 #ifdef CONFIG_QDIO_DEBUG
 
 942         start = q->first_element_to_kick;
 
 943         /* last_move_ftc was just updated */
 
 944         real_end = GET_SAVED_FRONTIER(q);
 
 945         end = (real_end+QDIO_MAX_BUFFERS_PER_Q-1)&
 
 946                 (QDIO_MAX_BUFFERS_PER_Q-1);
 
 947         count = (end+QDIO_MAX_BUFFERS_PER_Q+1-start)&
 
 948                 (QDIO_MAX_BUFFERS_PER_Q-1);
 
 950 #ifdef CONFIG_QDIO_DEBUG
 
 951         QDIO_DBF_TEXT4(0,trace,"kickouth");
 
 952         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
 954         sprintf(dbf_text,"s=%2xc=%2x",start,count);
 
 955         QDIO_DBF_TEXT4(0,trace,dbf_text);
 
 956 #endif /* CONFIG_QDIO_DEBUG */
 
 958         if (q->state==QDIO_IRQ_STATE_ACTIVE)
 
 959                 q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT|
 
 960                            q->error_status_flags,
 
 961                            q->qdio_error,q->siga_error,q->q_no,start,count,
 
 964         /* for the next time: */
 
 965         q->first_element_to_kick=real_end;
 
 968         q->error_status_flags=0;
 
 972 __qdio_outbound_processing(struct qdio_q *q)
 
 976         QDIO_DBF_TEXT4(0,trace,"qoutproc");
 
 977         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
 979         if (unlikely(qdio_reserve_q(q))) {
 
 981                 qdio_perf_stat_inc(&perf_stats.outbound_tl_runs_resched);
 
 982                 /* as we're sissies, we'll check next time */
 
 983                 if (likely(!atomic_read(&q->is_in_shutdown))) {
 
 985                         QDIO_DBF_TEXT4(0,trace,"busy,agn");
 
 989         qdio_perf_stat_inc(&perf_stats.outbound_tl_runs);
 
 990         qdio_perf_stat_inc(&perf_stats.tl_runs);
 
 992         /* see comment in qdio_kick_outbound_q */
 
 993         siga_attempts=atomic_read(&q->busy_siga_counter);
 
 994         while (siga_attempts) {
 
 995                 atomic_dec(&q->busy_siga_counter);
 
 996                 qdio_kick_outbound_q(q);
 
1000         if (qdio_has_outbound_q_moved(q))
 
1001                 qdio_kick_outbound_handler(q);
 
1003         if (q->queue_type == QDIO_ZFCP_QFMT) {
 
1004                 if ((!q->hydra_gives_outbound_pcis) &&
 
1005                     (!qdio_is_outbound_q_done(q)))
 
1008         else if (((!q->is_iqdio_q) && (!q->is_pci_out)) ||
 
1009                  (q->queue_type == QDIO_IQDIO_QFMT_ASYNCH)) {
 
1011                  * make sure buffer switch from PRIMED to EMPTY is noticed
 
1012                  * and outbound_handler is called
 
1014                 if (qdio_is_outbound_q_done(q)) {
 
1015                         del_timer(&q->timer);
 
1017                         if (!timer_pending(&q->timer))
 
1018                                 mod_timer(&q->timer, jiffies +
 
1019                                           QDIO_FORCE_CHECK_TIMEOUT);
 
1027 qdio_outbound_processing(unsigned long q)
 
1029         __qdio_outbound_processing((struct qdio_q *) q);
 
1032 /************************* INBOUND ROUTINES *******************************/
 
1036 qdio_get_inbound_buffer_frontier(struct qdio_q *q)
 
1038         struct qdio_irq *irq;
 
1040         volatile char *slsb;
 
1041         unsigned int count = 1;
 
1042         int first_not_to_check;
 
1043 #ifdef CONFIG_QDIO_DEBUG
 
1045 #endif /* CONFIG_QDIO_DEBUG */
 
1046 #ifdef QDIO_USE_PROCESSING_STATE
 
1047         int last_position=-1;
 
1048 #endif /* QDIO_USE_PROCESSING_STATE */
 
1050         QDIO_DBF_TEXT4(0,trace,"getibfro");
 
1051         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
1053         irq = (struct qdio_irq *) q->irq_ptr;
 
1055                 return qdio_qebsm_get_inbound_buffer_frontier(q);
 
1057         slsb=&q->slsb.acc.val[0];
 
1058         f_mod_no=f=q->first_to_check;
 
1060          * we don't check 128 buffers, as otherwise qdio_has_inbound_q_moved
 
1063         first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
 
1064                                       (QDIO_MAX_BUFFERS_PER_Q-1));
 
1067          * we don't use this one, as a PCI or we after a thin interrupt
 
1068          * will sync the queues
 
1073         f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
 
1074         if (f==first_not_to_check) 
 
1076         switch (slsb[f_mod_no]) {
 
1078         /* CU_EMPTY means frontier is reached */
 
1079         case SLSB_CU_INPUT_EMPTY:
 
1080                 QDIO_DBF_TEXT5(0,trace,"inptempt");
 
1083         /* P_PRIMED means set slsb to P_PROCESSING and move on */
 
1084         case SLSB_P_INPUT_PRIMED:
 
1085                 QDIO_DBF_TEXT5(0,trace,"inptprim");
 
1087 #ifdef QDIO_USE_PROCESSING_STATE
 
1089                  * as soon as running under VM, polling the input queues will
 
1090                  * kill VM in terms of CP overhead 
 
1093                         set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
 
1095                         /* set the previous buffer to NOT_INIT. The current
 
1096                          * buffer will be set to PROCESSING at the end of
 
1097                          * this function to avoid further interrupts. */
 
1098                         if (last_position>=0)
 
1099                                 set_slsb(q, &last_position,
 
1100                                          SLSB_P_INPUT_NOT_INIT, &count);
 
1101                         atomic_set(&q->polling,1);
 
1102                         last_position=f_mod_no;
 
1104 #else /* QDIO_USE_PROCESSING_STATE */
 
1105                 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
 
1106 #endif /* QDIO_USE_PROCESSING_STATE */
 
1108                  * not needed, as the inbound queue will be synced on the next
 
1109                  * siga-r, resp. tiqdio_is_inbound_q_done will do the siga-s
 
1113                 atomic_dec(&q->number_of_buffers_used);
 
1116         case SLSB_P_INPUT_NOT_INIT:
 
1117         case SLSB_P_INPUT_PROCESSING:
 
1118                 QDIO_DBF_TEXT5(0,trace,"inpnipro");
 
1121         /* P_ERROR means frontier is reached, break and report error */
 
1122         case SLSB_P_INPUT_ERROR:
 
1123 #ifdef CONFIG_QDIO_DEBUG
 
1124                 sprintf(dbf_text,"inperr%2x",f_mod_no);
 
1125                 QDIO_DBF_TEXT3(1,trace,dbf_text);
 
1126 #endif /* CONFIG_QDIO_DEBUG */
 
1127                 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
 
1129                 /* kind of process the buffer */
 
1130                 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
 
1133                         q->error_status_flags|=
 
1134                                 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
 
1135                 q->qdio_error=SLSB_P_INPUT_ERROR;
 
1136                 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
 
1138                 /* we increment the frontier, as this buffer
 
1139                  * was processed obviously */
 
1140                 f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
 
1141                 atomic_dec(&q->number_of_buffers_used);
 
1143 #ifdef QDIO_USE_PROCESSING_STATE
 
1145 #endif /* QDIO_USE_PROCESSING_STATE */
 
1149         /* everything else means frontier not changed (HALTED or so) */
 
1154         q->first_to_check=f_mod_no;
 
1156 #ifdef QDIO_USE_PROCESSING_STATE
 
1157         if (last_position>=0)
 
1158                 set_slsb(q, &last_position, SLSB_P_INPUT_PROCESSING, &count);
 
1159 #endif /* QDIO_USE_PROCESSING_STATE */
 
1161         QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
 
1163         return q->first_to_check;
 
1167 qdio_has_inbound_q_moved(struct qdio_q *q)
 
1171         i=qdio_get_inbound_buffer_frontier(q);
 
1172         if ( (i!=GET_SAVED_FRONTIER(q)) ||
 
1173              (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
 
1175                 if ((!q->siga_sync)&&(!q->hydra_gives_outbound_pcis))
 
1178                 QDIO_DBF_TEXT4(0,trace,"inhasmvd");
 
1179                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
1182                 QDIO_DBF_TEXT4(0,trace,"inhsntmv");
 
1183                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
1188 /* means, no more buffers to be filled */
 
1190 tiqdio_is_inbound_q_done(struct qdio_q *q)
 
1193         unsigned int start_buf, count;
 
1194         unsigned char state = 0;
 
1195         struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
 
1197 #ifdef CONFIG_QDIO_DEBUG
 
1201         no_used=atomic_read(&q->number_of_buffers_used);
 
1203         /* propagate the change from 82 to 80 through VM */
 
1206 #ifdef CONFIG_QDIO_DEBUG
 
1208                 sprintf(dbf_text,"iqisnt%02x",no_used);
 
1209                 QDIO_DBF_TEXT4(0,trace,dbf_text);
 
1211                 QDIO_DBF_TEXT4(0,trace,"iniqisdo");
 
1213         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
1214 #endif /* CONFIG_QDIO_DEBUG */
 
1218         if (irq->is_qebsm) {
 
1220                 start_buf = q->first_to_check;
 
1221                 qdio_do_eqbs(q, &state, &start_buf, &count);
 
1223                 state = q->slsb.acc.val[q->first_to_check];
 
1224         if (state != SLSB_P_INPUT_PRIMED)
 
1226                  * nothing more to do, if next buffer is not PRIMED.
 
1227                  * note that we did a SYNC_MEMORY before, that there
 
1228                  * has been a sychnronization.
 
1229                  * we will return 0 below, as there is nothing to do
 
1230                  * (stop_polling not necessary, as we have not been
 
1231                  * using the PROCESSING state 
 
1236          * ok, the next input buffer is primed. that means, that device state 
 
1237          * change indicator and adapter local summary are set, so we will find
 
1239          * we will return 0 below, as there is nothing to do, except scheduling
 
1240          * ourselves for the next time. 
 
1242         tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
 
1248 qdio_is_inbound_q_done(struct qdio_q *q)
 
1251         unsigned int start_buf, count;
 
1252         unsigned char state = 0;
 
1253         struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
 
1255 #ifdef CONFIG_QDIO_DEBUG
 
1259         no_used=atomic_read(&q->number_of_buffers_used);
 
1262          * we need that one for synchronization with the adapter, as it
 
1263          * does a kind of PCI avoidance 
 
1268                 QDIO_DBF_TEXT4(0,trace,"inqisdnA");
 
1269                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
1272         if (irq->is_qebsm) {
 
1274                 start_buf = q->first_to_check;
 
1275                 qdio_do_eqbs(q, &state, &start_buf, &count);
 
1277                 state = q->slsb.acc.val[q->first_to_check];
 
1278         if (state == SLSB_P_INPUT_PRIMED) {
 
1279                 /* we got something to do */
 
1280                 QDIO_DBF_TEXT4(0,trace,"inqisntA");
 
1281                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
1285         /* on VM, we don't poll, so the q is always done here */
 
1288         if (q->hydra_gives_outbound_pcis)
 
1292          * at this point we know, that inbound first_to_check
 
1293          * has (probably) not moved (see qdio_inbound_processing) 
 
1295         if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) {
 
1296 #ifdef CONFIG_QDIO_DEBUG
 
1297                 QDIO_DBF_TEXT4(0,trace,"inqisdon");
 
1298                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
1299                 sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
 
1300                 QDIO_DBF_TEXT4(0,trace,dbf_text);
 
1301 #endif /* CONFIG_QDIO_DEBUG */
 
1304 #ifdef CONFIG_QDIO_DEBUG
 
1305                 QDIO_DBF_TEXT4(0,trace,"inqisntd");
 
1306                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
1307                 sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
 
1308                 QDIO_DBF_TEXT4(0,trace,dbf_text);
 
1309 #endif /* CONFIG_QDIO_DEBUG */
 
1315 qdio_kick_inbound_handler(struct qdio_q *q)
 
1317         int count, start, end, real_end, i;
 
1318 #ifdef CONFIG_QDIO_DEBUG
 
1322         QDIO_DBF_TEXT4(0,trace,"kickinh");
 
1323         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
1325         start=q->first_element_to_kick;
 
1326         real_end=q->first_to_check;
 
1327         end=(real_end+QDIO_MAX_BUFFERS_PER_Q-1)&(QDIO_MAX_BUFFERS_PER_Q-1);
 
1335                 i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
 
1338 #ifdef CONFIG_QDIO_DEBUG
 
1339         sprintf(dbf_text,"s=%2xc=%2x",start,count);
 
1340         QDIO_DBF_TEXT4(0,trace,dbf_text);
 
1341 #endif /* CONFIG_QDIO_DEBUG */
 
1343         if (likely(q->state==QDIO_IRQ_STATE_ACTIVE))
 
1345                            QDIO_STATUS_INBOUND_INT|q->error_status_flags,
 
1346                            q->qdio_error,q->siga_error,q->q_no,start,count,
 
1349         /* for the next time: */
 
1350         q->first_element_to_kick=real_end;
 
1353         q->error_status_flags=0;
 
1355         qdio_perf_stat_inc(&perf_stats.inbound_cnt);
 
1359 __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
 
1361         struct qdio_irq *irq_ptr;
 
1365         QDIO_DBF_TEXT4(0,trace,"iqinproc");
 
1366         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
1369          * we first want to reserve the q, so that we know, that we don't
 
1370          * interrupt ourselves and call qdio_unmark_q, as is_in_shutdown might
 
1373         if (unlikely(qdio_reserve_q(q))) {
 
1375                 qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched);
 
1377                  * as we might just be about to stop polling, we make
 
1378                  * sure that we check again at least once more 
 
1383         qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs);
 
1384         if (unlikely(atomic_read(&q->is_in_shutdown))) {
 
1390          * we reset spare_ind_was_set, when the queue does not use the
 
1393         if (spare_ind_was_set)
 
1394                 spare_ind_was_set = (q->dev_st_chg_ind == &spare_indicator);
 
1396         if (!(*(q->dev_st_chg_ind)) && !spare_ind_was_set)
 
1399          * q->dev_st_chg_ind is the indicator, be it shared or not.
 
1400          * only clear it, if indicator is non-shared
 
1402         if (!spare_ind_was_set)
 
1403                 tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind);
 
1405         if (q->hydra_gives_outbound_pcis) {
 
1406                 if (!q->siga_sync_done_on_thinints) {
 
1408                 } else if (!q->siga_sync_done_on_outb_tis) {
 
1409                         SYNC_MEMORY_ALL_OUTB;
 
1415          * maybe we have to do work on our outbound queues... at least
 
1416          * we have to check the outbound-int-capable thinint-capable
 
1419         if (q->hydra_gives_outbound_pcis) {
 
1420                 irq_ptr = (struct qdio_irq*)q->irq_ptr;
 
1421                 for (i=0;i<irq_ptr->no_output_qs;i++) {
 
1422                         oq = irq_ptr->output_qs[i];
 
1423                         if (!qdio_is_outbound_q_done(oq)) {
 
1424                                 qdio_perf_stat_dec(&perf_stats.tl_runs);
 
1425                                 __qdio_outbound_processing(oq);
 
1430         if (!qdio_has_inbound_q_moved(q))
 
1433         qdio_kick_inbound_handler(q);
 
1434         if (tiqdio_is_inbound_q_done(q))
 
1435                 if (!qdio_stop_polling(q)) {
 
1437                          * we set the flags to get into the stuff next time,
 
1438                          * see also comment in qdio_stop_polling 
 
1440                         tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
 
1448 tiqdio_inbound_processing(unsigned long q)
 
1450         __tiqdio_inbound_processing((struct qdio_q *) q,
 
1451                                     atomic_read(&spare_indicator_usecount));
 
1455 __qdio_inbound_processing(struct qdio_q *q)
 
1459         QDIO_DBF_TEXT4(0,trace,"qinproc");
 
1460         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
 
1462         if (unlikely(qdio_reserve_q(q))) {
 
1464                 qdio_perf_stat_inc(&perf_stats.inbound_tl_runs_resched);
 
1465                 /* as we're sissies, we'll check next time */
 
1466                 if (likely(!atomic_read(&q->is_in_shutdown))) {
 
1468                         QDIO_DBF_TEXT4(0,trace,"busy,agn");
 
1472         qdio_perf_stat_inc(&perf_stats.inbound_tl_runs);
 
1473         qdio_perf_stat_inc(&perf_stats.tl_runs);
 
1476         if (qdio_has_inbound_q_moved(q)) {
 
1477                 qdio_kick_inbound_handler(q);
 
1478                 if (!qdio_stop_polling(q)) {
 
1480                         if (q_laps<QDIO_Q_LAPS) 
 
1485                 if (!qdio_is_inbound_q_done(q)) 
 
1486                         /* means poll time is not yet over */
 
1494 qdio_inbound_processing(unsigned long q)
 
1496         __qdio_inbound_processing((struct qdio_q *) q);
 
1499 /************************* MAIN ROUTINES *******************************/
 
1501 #ifdef QDIO_USE_PROCESSING_STATE
 
1503 tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
 
1511          * under VM, we have not used the PROCESSING state, so no
 
1512          * need to stop polling 
 
1517         if (unlikely(qdio_reserve_q(q))) {
 
1519                 qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched);
 
1521                  * as we might just be about to stop polling, we make
 
1522                  * sure that we check again at least once more 
 
1526                  * sanity -- we'd get here without setting the
 
1529                 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
 
1533         if (qdio_stop_polling(q)) {
 
1537         if (q_laps<QDIO_Q_LAPS-1) {
 
1542          * we set the flags to get into the stuff
 
1543          * next time, see also comment in qdio_stop_polling 
 
1545         tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
 
1551 #endif /* QDIO_USE_PROCESSING_STATE */
 
1554 tiqdio_inbound_checks(void)
 
1557         int spare_ind_was_set=0;
 
1558 #ifdef QDIO_USE_PROCESSING_STATE
 
1560 #endif /* QDIO_USE_PROCESSING_STATE */
 
1562         QDIO_DBF_TEXT4(0,trace,"iqdinbck");
 
1563         QDIO_DBF_TEXT5(0,trace,"iqlocsum");
 
1565 #ifdef QDIO_USE_PROCESSING_STATE
 
1567 #endif /* QDIO_USE_PROCESSING_STATE */
 
1569         /* when the spare indicator is used and set, save that and clear it */
 
1570         if ((atomic_read(&spare_indicator_usecount)) && spare_indicator) {
 
1571                 spare_ind_was_set = 1;
 
1572                 tiqdio_clear_summary_bit((__u32*)&spare_indicator);
 
1575         q=(struct qdio_q*)tiq_list;
 
1579                 __tiqdio_inbound_processing(q, spare_ind_was_set);
 
1580                 q=(struct qdio_q*)q->list_next;
 
1581         } while (q!=(struct qdio_q*)tiq_list);
 
1583 #ifdef QDIO_USE_PROCESSING_STATE
 
1584         q=(struct qdio_q*)tiq_list;
 
1588                 ret = tiqdio_reset_processing_state(q, q_laps);
 
1595                         q = (struct qdio_q*)q->list_next;
 
1601         } while (q!=(struct qdio_q*)tiq_list);
 
1602 #endif /* QDIO_USE_PROCESSING_STATE */
 
1606 tiqdio_tl(unsigned long data)
 
1608         QDIO_DBF_TEXT4(0,trace,"iqdio_tl");
 
1610         qdio_perf_stat_inc(&perf_stats.tl_runs);
 
1612         tiqdio_inbound_checks();
 
1615 /********************* GENERAL HELPER_ROUTINES ***********************/
 
1618 qdio_release_irq_memory(struct qdio_irq *irq_ptr)
 
1623         for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
 
1624                 q = irq_ptr->input_qs[i];
 
1626                         free_page((unsigned long) q->slib);
 
1627                         kmem_cache_free(qdio_q_cache, q);
 
1629                 q = irq_ptr->output_qs[i];
 
1631                         free_page((unsigned long) q->slib);
 
1632                         kmem_cache_free(qdio_q_cache, q);
 
1635         free_page((unsigned long) irq_ptr->qdr);
 
1636         free_page((unsigned long) irq_ptr);
 
1640 qdio_set_impl_params(struct qdio_irq *irq_ptr,
 
1641                      unsigned int qib_param_field_format,
 
1642                      /* pointer to 128 bytes or NULL, if no param field */
 
1643                      unsigned char *qib_param_field,
 
1644                      /* pointer to no_queues*128 words of data or NULL */
 
1645                      unsigned int no_input_qs,
 
1646                      unsigned int no_output_qs,
 
1647                      unsigned long *input_slib_elements,
 
1648                      unsigned long *output_slib_elements)
 
1655         irq_ptr->qib.pfmt=qib_param_field_format;
 
1656         if (qib_param_field)
 
1657                 memcpy(irq_ptr->qib.parm,qib_param_field,
 
1658                        QDIO_MAX_BUFFERS_PER_Q);
 
1660         if (input_slib_elements)
 
1661                 for (i=0;i<no_input_qs;i++) {
 
1662                         for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
 
1663                                 irq_ptr->input_qs[i]->slib->slibe[j].parms=
 
1664                                         input_slib_elements[
 
1665                                                 i*QDIO_MAX_BUFFERS_PER_Q+j];
 
1667         if (output_slib_elements)
 
1668                 for (i=0;i<no_output_qs;i++) {
 
1669                         for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
 
1670                                 irq_ptr->output_qs[i]->slib->slibe[j].parms=
 
1671                                         output_slib_elements[
 
1672                                                 i*QDIO_MAX_BUFFERS_PER_Q+j];
 
1677 qdio_alloc_qs(struct qdio_irq *irq_ptr,
 
1678               int no_input_qs, int no_output_qs)
 
1683         for (i = 0; i < no_input_qs; i++) {
 
1684                 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
 
1687                 memset(q, 0, sizeof(*q));
 
1689                 q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
 
1691                         kmem_cache_free(qdio_q_cache, q);
 
1694                 irq_ptr->input_qs[i]=q;
 
1697         for (i = 0; i < no_output_qs; i++) {
 
1698                 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
 
1701                 memset(q, 0, sizeof(*q));
 
1703                 q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
 
1705                         kmem_cache_free(qdio_q_cache, q);
 
1708                 irq_ptr->output_qs[i]=q;
 
1714 qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
 
1715              int no_input_qs, int no_output_qs,
 
1716              qdio_handler_t *input_handler,
 
1717              qdio_handler_t *output_handler,
 
1718              unsigned long int_parm,int q_format,
 
1719              unsigned long flags,
 
1720              void **inbound_sbals_array,
 
1721              void **outbound_sbals_array)
 
1725         char dbf_text[20]; /* see qdio_initialize */
 
1729         sprintf(dbf_text,"qfqs%4x",cdev->private->schid.sch_no);
 
1730         QDIO_DBF_TEXT0(0,setup,dbf_text);
 
1731         for (i=0;i<no_input_qs;i++) {
 
1732                 q=irq_ptr->input_qs[i];
 
1734                 memset(q,0,((char*)&q->slib)-((char*)q));
 
1735                 sprintf(dbf_text,"in-q%4x",i);
 
1736                 QDIO_DBF_TEXT0(0,setup,dbf_text);
 
1737                 QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
 
1739                 memset(q->slib,0,PAGE_SIZE);
 
1740                 q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
 
1744                 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
 
1745                         q->sbal[j]=*(inbound_sbals_array++);
 
1747                 q->queue_type=q_format;
 
1748                 q->int_parm=int_parm;
 
1749                 q->schid = irq_ptr->schid;
 
1750                 q->irq_ptr = irq_ptr;
 
1755                 q->first_to_check=0;
 
1757                 q->handler=input_handler;
 
1758                 q->dev_st_chg_ind=irq_ptr->dev_st_chg_ind;
 
1760                 /* q->is_thinint_q isn't valid at this time, but
 
1761                  * irq_ptr->is_thinint_irq is
 
1763                 if (irq_ptr->is_thinint_irq)
 
1764                         tasklet_init(&q->tasklet, tiqdio_inbound_processing,
 
1767                         tasklet_init(&q->tasklet, qdio_inbound_processing,
 
1770                 /* actually this is not used for inbound queues. yet. */
 
1771                 atomic_set(&q->busy_siga_counter,0);
 
1772                 q->timing.busy_start=0;
 
1774 /*              for (j=0;j<QDIO_STATS_NUMBER;j++)
 
1775                         q->timing.last_transfer_times[j]=(qdio_get_micros()/
 
1776                                                           QDIO_STATS_NUMBER)*j;
 
1777                 q->timing.last_transfer_index=QDIO_STATS_NUMBER-1;
 
1781                 if (i>0) irq_ptr->input_qs[i-1]->slib->nsliba=
 
1782                                  (unsigned long)(q->slib);
 
1783                 q->slib->sla=(unsigned long)(q->sl);
 
1784                 q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
 
1787                 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
 
1788                         q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
 
1790                 QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
 
1792                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
 
1793                 ptr=(void*)&q->slsb;
 
1794                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
 
1795                 ptr=(void*)q->sbal[0];
 
1796                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
 
1799                 if (!irq_ptr->is_qebsm) {
 
1800                         unsigned int count = 1;
 
1801                         for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
 
1802                                 set_slsb(q, &j, SLSB_P_INPUT_NOT_INIT, &count);
 
1806         for (i=0;i<no_output_qs;i++) {
 
1807                 q=irq_ptr->output_qs[i];
 
1808                 memset(q,0,((char*)&q->slib)-((char*)q));
 
1810                 sprintf(dbf_text,"outq%4x",i);
 
1811                 QDIO_DBF_TEXT0(0,setup,dbf_text);
 
1812                 QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
 
1814                 memset(q->slib,0,PAGE_SIZE);
 
1815                 q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
 
1819                 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
 
1820                         q->sbal[j]=*(outbound_sbals_array++);
 
1822                 q->queue_type=q_format;
 
1823                 if ((q->queue_type == QDIO_IQDIO_QFMT) &&
 
1824                     (no_output_qs > 1) &&
 
1825                     (i == no_output_qs-1))
 
1826                         q->queue_type = QDIO_IQDIO_QFMT_ASYNCH;
 
1827                 q->int_parm=int_parm;
 
1830                 q->schid = irq_ptr->schid;
 
1832                 q->irq_ptr = irq_ptr;
 
1835                 q->first_to_check=0;
 
1837                 q->handler=output_handler;
 
1839                 tasklet_init(&q->tasklet, qdio_outbound_processing,
 
1841                 setup_timer(&q->timer, qdio_outbound_processing,
 
1844                 atomic_set(&q->busy_siga_counter,0);
 
1845                 q->timing.busy_start=0;
 
1848                 if (i>0) irq_ptr->output_qs[i-1]->slib->nsliba=
 
1849                                  (unsigned long)(q->slib);
 
1850                 q->slib->sla=(unsigned long)(q->sl);
 
1851                 q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
 
1854                 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
 
1855                         q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
 
1857                 QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
 
1859                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
 
1860                 ptr=(void*)&q->slsb;
 
1861                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
 
1862                 ptr=(void*)q->sbal[0];
 
1863                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
 
1866                 if (!irq_ptr->is_qebsm) {
 
1867                         unsigned int count = 1;
 
1868                         for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
 
1869                                 set_slsb(q, &j, SLSB_P_OUTPUT_NOT_INIT, &count);
 
1875 qdio_fill_thresholds(struct qdio_irq *irq_ptr,
 
1876                      unsigned int no_input_qs,
 
1877                      unsigned int no_output_qs,
 
1878                      unsigned int min_input_threshold,
 
1879                      unsigned int max_input_threshold,
 
1880                      unsigned int min_output_threshold,
 
1881                      unsigned int max_output_threshold)
 
1886         for (i=0;i<no_input_qs;i++) {
 
1887                 q=irq_ptr->input_qs[i];
 
1888                 q->timing.threshold=max_input_threshold;
 
1889 /*              for (j=0;j<QDIO_STATS_CLASSES;j++) {
 
1890                         q->threshold_classes[j].threshold=
 
1891                                 min_input_threshold+
 
1892                                 (max_input_threshold-min_input_threshold)/
 
1895                 qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
 
1897         for (i=0;i<no_output_qs;i++) {
 
1898                 q=irq_ptr->output_qs[i];
 
1899                 q->timing.threshold=max_output_threshold;
 
1900 /*              for (j=0;j<QDIO_STATS_CLASSES;j++) {
 
1901                         q->threshold_classes[j].threshold=
 
1902                                 min_output_threshold+
 
1903                                 (max_output_threshold-min_output_threshold)/
 
1906                 qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
 
1910 static void tiqdio_thinint_handler(void *ind, void *drv_data)
 
1912         QDIO_DBF_TEXT4(0,trace,"thin_int");
 
1914         qdio_perf_stat_inc(&perf_stats.thinints);
 
1916         /* SVS only when needed:
 
1917          * issue SVS to benefit from iqdio interrupt avoidance
 
1918          * (SVS clears AISOI)*/
 
1920                 tiqdio_clear_global_summary();
 
1922         tiqdio_inbound_checks();
 
1926 qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
 
1929 #ifdef CONFIG_QDIO_DEBUG
 
1932         QDIO_DBF_TEXT5(0,trace,"newstate");
 
1933         sprintf(dbf_text,"%4x%4x",irq_ptr->schid.sch_no,state);
 
1934         QDIO_DBF_TEXT5(0,trace,dbf_text);
 
1935 #endif /* CONFIG_QDIO_DEBUG */
 
1937         irq_ptr->state=state;
 
1938         for (i=0;i<irq_ptr->no_input_qs;i++)
 
1939                 irq_ptr->input_qs[i]->state=state;
 
1940         for (i=0;i<irq_ptr->no_output_qs;i++)
 
1941                 irq_ptr->output_qs[i]->state=state;
 
1946 qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
 
1950         if (irb->esw.esw0.erw.cons) {
 
1951                 sprintf(dbf_text,"sens%4x",schid.sch_no);
 
1952                 QDIO_DBF_TEXT2(1,trace,dbf_text);
 
1953                 QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN);
 
1955                 QDIO_PRINT_WARN("sense data available on qdio channel.\n");
 
1956                 QDIO_HEXDUMP16(WARN,"irb: ",irb);
 
1957                 QDIO_HEXDUMP16(WARN,"sense data: ",irb->ecw);
 
1963 qdio_handle_pci(struct qdio_irq *irq_ptr)
 
1968         qdio_perf_stat_inc(&perf_stats.pcis);
 
1969         for (i=0;i<irq_ptr->no_input_qs;i++) {
 
1970                 q=irq_ptr->input_qs[i];
 
1971                 if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT)
 
1974                         qdio_perf_stat_dec(&perf_stats.tl_runs);
 
1975                         __qdio_inbound_processing(q);
 
1978         if (!irq_ptr->hydra_gives_outbound_pcis)
 
1980         for (i=0;i<irq_ptr->no_output_qs;i++) {
 
1981                 q=irq_ptr->output_qs[i];
 
1982                 if (qdio_is_outbound_q_done(q))
 
1984                 qdio_perf_stat_dec(&perf_stats.tl_runs);
 
1985                 if (!irq_ptr->sync_done_on_outb_pcis)
 
1987                 __qdio_outbound_processing(q);
 
1991 static void qdio_establish_handle_irq(struct ccw_device*, int, int);
 
1994 qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm,
 
1995                            int cstat, int dstat)
 
1997         struct qdio_irq *irq_ptr;
 
2001         irq_ptr = cdev->private->qdio_data;
 
2003         QDIO_DBF_TEXT2(1, trace, "ick2");
 
2004         sprintf(dbf_text,"%s", cdev->dev.bus_id);
 
2005         QDIO_DBF_TEXT2(1,trace,dbf_text);
 
2006         QDIO_DBF_HEX2(0,trace,&intparm,sizeof(int));
 
2007         QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
 
2008         QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
 
2009         QDIO_PRINT_ERR("received check condition on activate " \
 
2010                        "queues on device %s (cs=x%x, ds=x%x).\n",
 
2011                        cdev->dev.bus_id, cstat, dstat);
 
2012         if (irq_ptr->no_input_qs) {
 
2013                 q=irq_ptr->input_qs[0];
 
2014         } else if (irq_ptr->no_output_qs) {
 
2015                 q=irq_ptr->output_qs[0];
 
2017                 QDIO_PRINT_ERR("oops... no queue registered for device %s!?\n",
 
2019                 goto omit_handler_call;
 
2021         q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
 
2022                    QDIO_STATUS_LOOK_FOR_ERROR,
 
2023                    0,0,0,-1,-1,q->int_parm);
 
2025         qdio_set_state(irq_ptr,QDIO_IRQ_STATE_STOPPED);
 
2030 qdio_call_shutdown(struct work_struct *work)
 
2032         struct ccw_device_private *priv;
 
2033         struct ccw_device *cdev;
 
2035         priv = container_of(work, struct ccw_device_private, kick_work);
 
2037         qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
 
2038         put_device(&cdev->dev);
 
2042 qdio_timeout_handler(struct ccw_device *cdev)
 
2044         struct qdio_irq *irq_ptr;
 
2047         QDIO_DBF_TEXT2(0, trace, "qtoh");
 
2048         sprintf(dbf_text, "%s", cdev->dev.bus_id);
 
2049         QDIO_DBF_TEXT2(0, trace, dbf_text);
 
2051         irq_ptr = cdev->private->qdio_data;
 
2052         sprintf(dbf_text, "state:%d", irq_ptr->state);
 
2053         QDIO_DBF_TEXT2(0, trace, dbf_text);
 
2055         switch (irq_ptr->state) {
 
2056         case QDIO_IRQ_STATE_INACTIVE:
 
2057                 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: timed out\n",
 
2058                                irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
 
2059                 QDIO_DBF_TEXT2(1,setup,"eq:timeo");
 
2060                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
 
2062         case QDIO_IRQ_STATE_CLEANUP:
 
2063                 QDIO_PRINT_INFO("Did not get interrupt on cleanup, "
 
2065                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
 
2066                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
 
2068         case QDIO_IRQ_STATE_ESTABLISHED:
 
2069         case QDIO_IRQ_STATE_ACTIVE:
 
2070                 /* I/O has been terminated by common I/O layer. */
 
2071                 QDIO_PRINT_INFO("Queues on irq 0.%x.%04x killed by cio.\n",
 
2072                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
 
2073                 QDIO_DBF_TEXT2(1, trace, "cio:term");
 
2074                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
 
2075                 if (get_device(&cdev->dev)) {
 
2076                         /* Can't call shutdown from interrupt context. */
 
2077                         PREPARE_WORK(&cdev->private->kick_work,
 
2078                                      qdio_call_shutdown);
 
2079                         queue_work(ccw_device_work, &cdev->private->kick_work);
 
2085         ccw_device_set_timeout(cdev, 0);
 
2086         wake_up(&cdev->private->wait_q);
 
2090 qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
 
2092         struct qdio_irq *irq_ptr;
 
2096 #ifdef CONFIG_QDIO_DEBUG
 
2097         QDIO_DBF_TEXT4(0, trace, "qint");
 
2098         sprintf(dbf_text, "%s", cdev->dev.bus_id);
 
2099         QDIO_DBF_TEXT4(0, trace, dbf_text);
 
2100 #endif /* CONFIG_QDIO_DEBUG */
 
2103                 QDIO_PRINT_ERR("got unsolicited interrupt in qdio " \
 
2104                                   "handler, device %s\n", cdev->dev.bus_id);
 
2108         irq_ptr = cdev->private->qdio_data;
 
2110                 QDIO_DBF_TEXT2(1, trace, "uint");
 
2111                 sprintf(dbf_text,"%s", cdev->dev.bus_id);
 
2112                 QDIO_DBF_TEXT2(1,trace,dbf_text);
 
2113                 QDIO_PRINT_ERR("received interrupt on unused device %s!\n",
 
2119                 /* Currently running i/o is in error. */
 
2120                 switch (PTR_ERR(irb)) {
 
2122                         QDIO_PRINT_ERR("i/o error on device %s\n",
 
2126                         qdio_timeout_handler(cdev);
 
2129                         QDIO_PRINT_ERR("unknown error state %ld on device %s\n",
 
2130                                        PTR_ERR(irb), cdev->dev.bus_id);
 
2135         qdio_irq_check_sense(irq_ptr->schid, irb);
 
2137 #ifdef CONFIG_QDIO_DEBUG
 
2138         sprintf(dbf_text, "state:%d", irq_ptr->state);
 
2139         QDIO_DBF_TEXT4(0, trace, dbf_text);
 
2140 #endif /* CONFIG_QDIO_DEBUG */
 
2142         cstat = irb->scsw.cstat;
 
2143         dstat = irb->scsw.dstat;
 
2145         switch (irq_ptr->state) {
 
2146         case QDIO_IRQ_STATE_INACTIVE:
 
2147                 qdio_establish_handle_irq(cdev, cstat, dstat);
 
2150         case QDIO_IRQ_STATE_CLEANUP:
 
2151                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
 
2154         case QDIO_IRQ_STATE_ESTABLISHED:
 
2155         case QDIO_IRQ_STATE_ACTIVE:
 
2156                 if (cstat & SCHN_STAT_PCI) {
 
2157                         qdio_handle_pci(irq_ptr);
 
2161                 if ((cstat&~SCHN_STAT_PCI)||dstat) {
 
2162                         qdio_handle_activate_check(cdev, intparm, cstat, dstat);
 
2166                 QDIO_PRINT_ERR("got interrupt for queues in state %d on " \
 
2168                                irq_ptr->state, cdev->dev.bus_id);
 
2170         wake_up(&cdev->private->wait_q);
 
2175 qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
 
2176                  unsigned int queue_number)
 
2180         struct qdio_irq *irq_ptr;
 
2182 #ifdef CONFIG_QDIO_DEBUG
 
2183         char dbf_text[15]="SyncXXXX";
 
2186         irq_ptr = cdev->private->qdio_data;
 
2190 #ifdef CONFIG_QDIO_DEBUG
 
2191         *((int*)(&dbf_text[4])) = irq_ptr->schid.sch_no;
 
2192         QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
 
2193         *((int*)(&dbf_text[0]))=flags;
 
2194         *((int*)(&dbf_text[4]))=queue_number;
 
2195         QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
 
2196 #endif /* CONFIG_QDIO_DEBUG */
 
2198         if (flags&QDIO_FLAG_SYNC_INPUT) {
 
2199                 q=irq_ptr->input_qs[queue_number];
 
2202                 if (!(irq_ptr->is_qebsm))
 
2203                         cc = do_siga_sync(q->schid, 0, q->mask);
 
2204         } else if (flags&QDIO_FLAG_SYNC_OUTPUT) {
 
2205                 q=irq_ptr->output_qs[queue_number];
 
2208                 if (!(irq_ptr->is_qebsm))
 
2209                         cc = do_siga_sync(q->schid, q->mask, 0);
 
2215                 QDIO_DBF_HEX3(0,trace,&ptr,sizeof(int));
 
2221 qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
 
2222                             unsigned long token)
 
2226         unsigned int count, start_buf;
 
2229         /*check if QEBSM is disabled */
 
2230         if (!(irq_ptr->is_qebsm) || !(qdioac & 0x01)) {
 
2231                 irq_ptr->is_qebsm  = 0;
 
2232                 irq_ptr->sch_token = 0;
 
2233                 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
 
2234                 QDIO_DBF_TEXT0(0,setup,"noV=V");
 
2237         irq_ptr->sch_token = token;
 
2239         for (i = 0; i < irq_ptr->no_input_qs;i++) {
 
2240                 q = irq_ptr->input_qs[i];
 
2241                 count = QDIO_MAX_BUFFERS_PER_Q;
 
2243                 set_slsb(q, &start_buf, SLSB_P_INPUT_NOT_INIT, &count);
 
2245         sprintf(dbf_text,"V=V:%2x",irq_ptr->is_qebsm);
 
2246         QDIO_DBF_TEXT0(0,setup,dbf_text);
 
2247         sprintf(dbf_text,"%8lx",irq_ptr->sch_token);
 
2248         QDIO_DBF_TEXT0(0,setup,dbf_text);
 
2250         for (i = 0; i < irq_ptr->no_output_qs; i++) {
 
2251                 q = irq_ptr->output_qs[i];
 
2252                 count = QDIO_MAX_BUFFERS_PER_Q;
 
2254                 set_slsb(q, &start_buf, SLSB_P_OUTPUT_NOT_INIT, &count);
 
2259 qdio_get_ssqd_information(struct qdio_irq *irq_ptr)
 
2262         unsigned char qdioac;
 
2264                 struct chsc_header request;
 
2272                 struct chsc_header response;
 
2291         QDIO_DBF_TEXT0(0,setup,"getssqd");
 
2293         ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
 
2295                 QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \
 
2296                                 "SIGAs for sch x%x.\n", irq_ptr->schid.sch_no);
 
2297                 irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
 
2298                                   CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
 
2299                                   CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
 
2300                 irq_ptr->is_qebsm = 0;
 
2301                 irq_ptr->sch_token = 0;
 
2302                 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
 
2306         ssqd_area->request = (struct chsc_header) {
 
2310         ssqd_area->first_sch = irq_ptr->schid.sch_no;
 
2311         ssqd_area->last_sch = irq_ptr->schid.sch_no;
 
2312         ssqd_area->ssid = irq_ptr->schid.ssid;
 
2313         result = chsc(ssqd_area);
 
2316                 QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \
 
2317                                 "SIGAs for sch 0.%x.%x.\n", result,
 
2318                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
 
2319                 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
 
2320                         CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
 
2321                         CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
 
2322                 irq_ptr->is_qebsm  = 0;
 
2326         if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
 
2327                 QDIO_PRINT_WARN("response upon checking SIGA needs " \
 
2328                                 "is 0x%x. Using all SIGAs for sch 0.%x.%x.\n",
 
2329                                 ssqd_area->response.code,
 
2330                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
 
2331                 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
 
2332                         CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
 
2333                         CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
 
2334                 irq_ptr->is_qebsm  = 0;
 
2337         if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
 
2338             !(ssqd_area->flags & CHSC_FLAG_VALIDITY) ||
 
2339             (ssqd_area->sch != irq_ptr->schid.sch_no)) {
 
2340                 QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
 
2341                                 "using all SIGAs.\n",
 
2342                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
 
2343                 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
 
2344                         CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
 
2345                         CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */
 
2346                 irq_ptr->is_qebsm  = 0;
 
2349         qdioac = ssqd_area->qdioac1;
 
2351         qdio_check_subchannel_qebsm(irq_ptr, qdioac,
 
2352                                     ssqd_area->sch_token);
 
2353         mempool_free(ssqd_area, qdio_mempool_scssc);
 
2354         irq_ptr->qdioac = qdioac;
 
2358 tiqdio_check_chsc_availability(void)
 
2362         if (!css_characteristics_avail)
 
2365         /* Check for bit 41. */
 
2366         if (!css_general_characteristics.aif) {
 
2367                 QDIO_PRINT_WARN("Adapter interruption facility not " \
 
2372         /* Check for bits 107 and 108. */
 
2373         if (!css_chsc_characteristics.scssc ||
 
2374             !css_chsc_characteristics.scsscf) {
 
2375                 QDIO_PRINT_WARN("Set Chan Subsys. Char. & Fast-CHSCs " \
 
2376                                 "not available.\n");
 
2380         /* Check for OSA/FCP thin interrupts (bit 67). */
 
2381         hydra_thinints = css_general_characteristics.aif_osa;
 
2382         sprintf(dbf_text,"hydrati%1x", hydra_thinints);
 
2383         QDIO_DBF_TEXT0(0,setup,dbf_text);
 
2386         /* Check for QEBSM support in general (bit 58). */
 
2387         is_passthrough = css_general_characteristics.qebsm;
 
2389         sprintf(dbf_text,"cssQBS:%1x", is_passthrough);
 
2390         QDIO_DBF_TEXT0(0,setup,dbf_text);
 
2392         /* Check for aif time delay disablement fac (bit 56). If installed,
 
2393          * omit svs even under lpar (good point by rick again) */
 
2394         omit_svs = css_general_characteristics.aif_tdd;
 
2395         sprintf(dbf_text,"omitsvs%1x", omit_svs);
 
2396         QDIO_DBF_TEXT0(0,setup,dbf_text);
 
2402 tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
 
2404         unsigned long real_addr_local_summary_bit;
 
2405         unsigned long real_addr_dev_st_chg_ind;
 
2409         unsigned int resp_code;
 
2413                 struct chsc_header request;
 
2418                 u64 summary_indicator_addr;
 
2419                 u64 subchannel_indicator_addr;
 
2424                 u32 word_with_d_bit;
 
2425                 /* set to 0x10000000 to enable
 
2426                  * time delay disablement facility */
 
2428                 struct subchannel_id schid;
 
2429                 u32 reserved6[1004];
 
2430                 struct chsc_header response;
 
2434         if (!irq_ptr->is_thinint_irq)
 
2437         if (reset_to_zero) {
 
2438                 real_addr_local_summary_bit=0;
 
2439                 real_addr_dev_st_chg_ind=0;
 
2441                 real_addr_local_summary_bit=
 
2442                         virt_to_phys((volatile void *)tiqdio_ind);
 
2443                 real_addr_dev_st_chg_ind=
 
2444                         virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind);
 
2447         scssc_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
 
2449                 QDIO_PRINT_WARN("No memory for setting indicators on " \
 
2450                                 "subchannel 0.%x.%x.\n",
 
2451                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
 
2454         scssc_area->request = (struct chsc_header) {
 
2458         scssc_area->operation_code = 0;
 
2460         scssc_area->summary_indicator_addr = real_addr_local_summary_bit;
 
2461         scssc_area->subchannel_indicator_addr = real_addr_dev_st_chg_ind;
 
2462         scssc_area->ks = QDIO_STORAGE_KEY;
 
2463         scssc_area->kc = QDIO_STORAGE_KEY;
 
2464         scssc_area->isc = TIQDIO_THININT_ISC;
 
2465         scssc_area->schid = irq_ptr->schid;
 
2466         /* enables the time delay disablement facility. Don't care
 
2467          * whether it is really there (i.e. we haven't checked for
 
2469         if (css_general_characteristics.aif_tdd)
 
2470                 scssc_area->word_with_d_bit = 0x10000000;
 
2472                 QDIO_PRINT_WARN("Time delay disablement facility " \
 
2475         result = chsc(scssc_area);
 
2477                 QDIO_PRINT_WARN("could not set indicators on irq 0.%x.%x, " \
 
2479                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,result);
 
2484         resp_code = scssc_area->response.code;
 
2485         if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
 
2486                 QDIO_PRINT_WARN("response upon setting indicators " \
 
2487                                 "is 0x%x.\n",resp_code);
 
2488                 sprintf(dbf_text,"sidR%4x",resp_code);
 
2489                 QDIO_DBF_TEXT1(0,trace,dbf_text);
 
2490                 QDIO_DBF_TEXT1(0,setup,dbf_text);
 
2491                 ptr=&scssc_area->response;
 
2492                 QDIO_DBF_HEX2(1,setup,&ptr,QDIO_DBF_SETUP_LEN);
 
2497         QDIO_DBF_TEXT2(0,setup,"setscind");
 
2498         QDIO_DBF_HEX2(0,setup,&real_addr_local_summary_bit,
 
2499                       sizeof(unsigned long));
 
2500         QDIO_DBF_HEX2(0,setup,&real_addr_dev_st_chg_ind,sizeof(unsigned long));
 
2503         mempool_free(scssc_area, qdio_mempool_scssc);
 
2509 tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
 
2511         unsigned int resp_code;
 
2517                 struct chsc_header request;
 
2524                 u32 reserved5[1009];
 
2525                 struct chsc_header response;
 
2529         if (!irq_ptr->is_thinint_irq)
 
2532         scsscf_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
 
2534                 QDIO_PRINT_WARN("No memory for setting delay target on " \
 
2535                                 "subchannel 0.%x.%x.\n",
 
2536                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
 
2539         scsscf_area->request = (struct chsc_header) {
 
2544         scsscf_area->delay_target = delay_target<<16;
 
2546         result=chsc(scsscf_area);
 
2548                 QDIO_PRINT_WARN("could not set delay target on irq 0.%x.%x, " \
 
2549                                 "cc=%i. Continuing.\n",
 
2550                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
 
2556         resp_code = scsscf_area->response.code;
 
2557         if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
 
2558                 QDIO_PRINT_WARN("response upon setting delay target " \
 
2559                                 "is 0x%x. Continuing.\n",resp_code);
 
2560                 sprintf(dbf_text,"sdtR%4x",resp_code);
 
2561                 QDIO_DBF_TEXT1(0,trace,dbf_text);
 
2562                 QDIO_DBF_TEXT1(0,setup,dbf_text);
 
2563                 ptr=&scsscf_area->response;
 
2564                 QDIO_DBF_HEX2(1,trace,&ptr,QDIO_DBF_TRACE_LEN);
 
2566         QDIO_DBF_TEXT2(0,trace,"delytrgt");
 
2567         QDIO_DBF_HEX2(0,trace,&delay_target,sizeof(unsigned long));
 
2568         result = 0; /* not critical */
 
2570         mempool_free(scsscf_area, qdio_mempool_scssc);
 
2575 qdio_cleanup(struct ccw_device *cdev, int how)
 
2577         struct qdio_irq *irq_ptr;
 
2581         irq_ptr = cdev->private->qdio_data;
 
2585         sprintf(dbf_text,"qcln%4x",irq_ptr->schid.sch_no);
 
2586         QDIO_DBF_TEXT1(0,trace,dbf_text);
 
2587         QDIO_DBF_TEXT0(0,setup,dbf_text);
 
2589         rc = qdio_shutdown(cdev, how);
 
2590         if ((rc == 0) || (rc == -EINPROGRESS))
 
2591                 rc = qdio_free(cdev);
 
2596 qdio_shutdown(struct ccw_device *cdev, int how)
 
2598         struct qdio_irq *irq_ptr;
 
2602         unsigned long flags;
 
2606         irq_ptr = cdev->private->qdio_data;
 
2610         down(&irq_ptr->setting_up_sema);
 
2612         sprintf(dbf_text,"qsqs%4x",irq_ptr->schid.sch_no);
 
2613         QDIO_DBF_TEXT1(0,trace,dbf_text);
 
2614         QDIO_DBF_TEXT0(0,setup,dbf_text);
 
2616         /* mark all qs as uninteresting */
 
2617         for (i=0;i<irq_ptr->no_input_qs;i++)
 
2618                 atomic_set(&irq_ptr->input_qs[i]->is_in_shutdown,1);
 
2620         for (i=0;i<irq_ptr->no_output_qs;i++)
 
2621                 atomic_set(&irq_ptr->output_qs[i]->is_in_shutdown,1);
 
2623         tasklet_kill(&tiqdio_tasklet);
 
2625         for (i=0;i<irq_ptr->no_input_qs;i++) {
 
2626                 qdio_unmark_q(irq_ptr->input_qs[i]);
 
2627                 tasklet_kill(&irq_ptr->input_qs[i]->tasklet);
 
2628                 wait_event_interruptible_timeout(cdev->private->wait_q,
 
2629                                                  !atomic_read(&irq_ptr->
 
2632                                                  QDIO_NO_USE_COUNT_TIMEOUT);
 
2633                 if (atomic_read(&irq_ptr->input_qs[i]->use_count))
 
2634                         result=-EINPROGRESS;
 
2637         for (i=0;i<irq_ptr->no_output_qs;i++) {
 
2638                 tasklet_kill(&irq_ptr->output_qs[i]->tasklet);
 
2639                 del_timer(&irq_ptr->output_qs[i]->timer);
 
2640                 wait_event_interruptible_timeout(cdev->private->wait_q,
 
2641                                                  !atomic_read(&irq_ptr->
 
2644                                                  QDIO_NO_USE_COUNT_TIMEOUT);
 
2645                 if (atomic_read(&irq_ptr->output_qs[i]->use_count))
 
2646                         result=-EINPROGRESS;
 
2649         /* cleanup subchannel */
 
2650         spin_lock_irqsave(get_ccwdev_lock(cdev),flags);
 
2651         if (how&QDIO_FLAG_CLEANUP_USING_CLEAR) {
 
2652                 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
 
2653                 timeout=QDIO_CLEANUP_CLEAR_TIMEOUT;
 
2654         } else if (how&QDIO_FLAG_CLEANUP_USING_HALT) {
 
2655                 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
 
2656                 timeout=QDIO_CLEANUP_HALT_TIMEOUT;
 
2657         } else { /* default behaviour */
 
2658                 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
 
2659                 timeout=QDIO_CLEANUP_HALT_TIMEOUT;
 
2661         if (rc == -ENODEV) {
 
2662                 /* No need to wait for device no longer present. */
 
2663                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
 
2664                 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
 
2665         } else if (((void *)cdev->handler != (void *)qdio_handler) && rc == 0) {
 
2667                  * Whoever put another handler there, has to cope with the
 
2668                  * interrupt theirself. Might happen if qdio_shutdown was
 
2669                  * called on already shutdown queues, but this shouldn't have
 
2672                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
 
2673                 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
 
2674         } else if (rc == 0) {
 
2675                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
 
2676                 ccw_device_set_timeout(cdev, timeout);
 
2677                 spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags);
 
2679                 wait_event(cdev->private->wait_q,
 
2680                            irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
 
2681                            irq_ptr->state == QDIO_IRQ_STATE_ERR);
 
2683                 QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for "
 
2684                                 "device %s\n", result, cdev->dev.bus_id);
 
2685                 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
 
2689         if (irq_ptr->is_thinint_irq) {
 
2690                 qdio_put_indicator((__u32*)irq_ptr->dev_st_chg_ind);
 
2691                 tiqdio_set_subchannel_ind(irq_ptr,1); 
 
2692                 /* reset adapter interrupt indicators */
 
2695         /* exchange int handlers, if necessary */
 
2696         if ((void*)cdev->handler == (void*)qdio_handler)
 
2697                 cdev->handler=irq_ptr->original_int_handler;
 
2699         /* Ignore errors. */
 
2700         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
 
2701         ccw_device_set_timeout(cdev, 0);
 
2703         up(&irq_ptr->setting_up_sema);
 
2708 qdio_free(struct ccw_device *cdev)
 
2710         struct qdio_irq *irq_ptr;
 
2713         irq_ptr = cdev->private->qdio_data;
 
2717         down(&irq_ptr->setting_up_sema);
 
2719         sprintf(dbf_text,"qfqs%4x",irq_ptr->schid.sch_no);
 
2720         QDIO_DBF_TEXT1(0,trace,dbf_text);
 
2721         QDIO_DBF_TEXT0(0,setup,dbf_text);
 
2723         cdev->private->qdio_data = NULL;
 
2725         up(&irq_ptr->setting_up_sema);
 
2727         qdio_release_irq_memory(irq_ptr);
 
2728         module_put(THIS_MODULE);
 
2733 qdio_allocate_do_dbf(struct qdio_initialize *init_data)
 
2735         char dbf_text[20]; /* if a printf printed out more than 8 chars */
 
2737         sprintf(dbf_text,"qfmt:%x",init_data->q_format);
 
2738         QDIO_DBF_TEXT0(0,setup,dbf_text);
 
2739         QDIO_DBF_HEX0(0,setup,init_data->adapter_name,8);
 
2740         sprintf(dbf_text,"qpff%4x",init_data->qib_param_field_format);
 
2741         QDIO_DBF_TEXT0(0,setup,dbf_text);
 
2742         QDIO_DBF_HEX0(0,setup,&init_data->qib_param_field,sizeof(char*));
 
2743         QDIO_DBF_HEX0(0,setup,&init_data->input_slib_elements,sizeof(long*));
 
2744         QDIO_DBF_HEX0(0,setup,&init_data->output_slib_elements,sizeof(long*));
 
2745         sprintf(dbf_text,"miit%4x",init_data->min_input_threshold);
 
2746         QDIO_DBF_TEXT0(0,setup,dbf_text);
 
2747         sprintf(dbf_text,"mait%4x",init_data->max_input_threshold);
 
2748         QDIO_DBF_TEXT0(0,setup,dbf_text);
 
2749         sprintf(dbf_text,"miot%4x",init_data->min_output_threshold);
 
2750         QDIO_DBF_TEXT0(0,setup,dbf_text);
 
2751         sprintf(dbf_text,"maot%4x",init_data->max_output_threshold);
 
2752         QDIO_DBF_TEXT0(0,setup,dbf_text);
 
2753         sprintf(dbf_text,"niq:%4x",init_data->no_input_qs);
 
2754         QDIO_DBF_TEXT0(0,setup,dbf_text);
 
2755         sprintf(dbf_text,"noq:%4x",init_data->no_output_qs);
 
2756         QDIO_DBF_TEXT0(0,setup,dbf_text);
 
2757         QDIO_DBF_HEX0(0,setup,&init_data->input_handler,sizeof(void*));
 
2758         QDIO_DBF_HEX0(0,setup,&init_data->output_handler,sizeof(void*));
 
2759         QDIO_DBF_HEX0(0,setup,&init_data->int_parm,sizeof(long));
 
2760         QDIO_DBF_HEX0(0,setup,&init_data->flags,sizeof(long));
 
2761         QDIO_DBF_HEX0(0,setup,&init_data->input_sbal_addr_array,sizeof(void*));
 
2762         QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*));
 
2766 qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt)
 
2768         irq_ptr->input_qs[i]->is_iqdio_q = iqfmt;
 
2769         irq_ptr->input_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
 
2771         irq_ptr->qdr->qdf0[i].sliba=(unsigned long)(irq_ptr->input_qs[i]->slib);
 
2773         irq_ptr->qdr->qdf0[i].sla=(unsigned long)(irq_ptr->input_qs[i]->sl);
 
2775         irq_ptr->qdr->qdf0[i].slsba=
 
2776                 (unsigned long)(&irq_ptr->input_qs[i]->slsb.acc.val[0]);
 
2778         irq_ptr->qdr->qdf0[i].akey=QDIO_STORAGE_KEY;
 
2779         irq_ptr->qdr->qdf0[i].bkey=QDIO_STORAGE_KEY;
 
2780         irq_ptr->qdr->qdf0[i].ckey=QDIO_STORAGE_KEY;
 
2781         irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY;
 
2785 qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
 
2788         irq_ptr->output_qs[i]->is_iqdio_q = iqfmt;
 
2789         irq_ptr->output_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
 
2791         irq_ptr->qdr->qdf0[i+j].sliba=(unsigned long)(irq_ptr->output_qs[i]->slib);
 
2793         irq_ptr->qdr->qdf0[i+j].sla=(unsigned long)(irq_ptr->output_qs[i]->sl);
 
2795         irq_ptr->qdr->qdf0[i+j].slsba=
 
2796                 (unsigned long)(&irq_ptr->output_qs[i]->slsb.acc.val[0]);
 
2798         irq_ptr->qdr->qdf0[i+j].akey=QDIO_STORAGE_KEY;
 
2799         irq_ptr->qdr->qdf0[i+j].bkey=QDIO_STORAGE_KEY;
 
2800         irq_ptr->qdr->qdf0[i+j].ckey=QDIO_STORAGE_KEY;
 
2801         irq_ptr->qdr->qdf0[i+j].dkey=QDIO_STORAGE_KEY;
 
2806 qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
 
2810         for (i=0;i<irq_ptr->no_input_qs;i++) {
 
2811                 irq_ptr->input_qs[i]->siga_sync=
 
2812                         irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
 
2813                 irq_ptr->input_qs[i]->siga_in=
 
2814                         irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
 
2815                 irq_ptr->input_qs[i]->siga_out=
 
2816                         irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
 
2817                 irq_ptr->input_qs[i]->siga_sync_done_on_thinints=
 
2818                         irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
 
2819                 irq_ptr->input_qs[i]->hydra_gives_outbound_pcis=
 
2820                         irq_ptr->hydra_gives_outbound_pcis;
 
2821                 irq_ptr->input_qs[i]->siga_sync_done_on_outb_tis=
 
2823                           (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
 
2824                            CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
 
2825                          (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
 
2826                           CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
 
2832 qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr)
 
2836         for (i=0;i<irq_ptr->no_output_qs;i++) {
 
2837                 irq_ptr->output_qs[i]->siga_sync=
 
2838                         irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
 
2839                 irq_ptr->output_qs[i]->siga_in=
 
2840                         irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
 
2841                 irq_ptr->output_qs[i]->siga_out=
 
2842                         irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
 
2843                 irq_ptr->output_qs[i]->siga_sync_done_on_thinints=
 
2844                         irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
 
2845                 irq_ptr->output_qs[i]->hydra_gives_outbound_pcis=
 
2846                         irq_ptr->hydra_gives_outbound_pcis;
 
2847                 irq_ptr->output_qs[i]->siga_sync_done_on_outb_tis=
 
2849                           (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
 
2850                            CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
 
2851                          (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
 
2852                           CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
 
2858 qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
 
2862         struct qdio_irq *irq_ptr;
 
2864         irq_ptr = cdev->private->qdio_data;
 
2866         if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) {
 
2867                 sprintf(dbf_text,"ick1%4x",irq_ptr->schid.sch_no);
 
2868                 QDIO_DBF_TEXT2(1,trace,dbf_text);
 
2869                 QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
 
2870                 QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
 
2871                 QDIO_PRINT_ERR("received check condition on establish " \
 
2872                                "queues on irq 0.%x.%x (cs=x%x, ds=x%x).\n",
 
2873                                irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
 
2875                 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR);
 
2878         if (!(dstat & DEV_STAT_DEV_END)) {
 
2879                 QDIO_DBF_TEXT2(1,setup,"eq:no de");
 
2880                 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
 
2881                 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
 
2882                 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: didn't get "
 
2883                                "device end: dstat=%02x, cstat=%02x\n",
 
2884                                irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
 
2886                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
 
2890         if (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) {
 
2891                 QDIO_DBF_TEXT2(1,setup,"eq:badio");
 
2892                 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
 
2893                 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
 
2894                 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: got "
 
2895                                "the following devstat: dstat=%02x, "
 
2896                                "cstat=%02x\n", irq_ptr->schid.ssid,
 
2897                                irq_ptr->schid.sch_no, dstat, cstat);
 
2898                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
 
2905 qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
 
2907         struct qdio_irq *irq_ptr;
 
2910         irq_ptr = cdev->private->qdio_data;
 
2912         sprintf(dbf_text,"qehi%4x",cdev->private->schid.sch_no);
 
2913         QDIO_DBF_TEXT0(0,setup,dbf_text);
 
2914         QDIO_DBF_TEXT0(0,trace,dbf_text);
 
2916         if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) {
 
2917                 ccw_device_set_timeout(cdev, 0);
 
2921         qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED);
 
2922         ccw_device_set_timeout(cdev, 0);
 
2926 qdio_initialize(struct qdio_initialize *init_data)
 
2931         sprintf(dbf_text,"qini%4x",init_data->cdev->private->schid.sch_no);
 
2932         QDIO_DBF_TEXT0(0,setup,dbf_text);
 
2933         QDIO_DBF_TEXT0(0,trace,dbf_text);
 
2935         rc = qdio_allocate(init_data);
 
2937                 rc = qdio_establish(init_data);
 
2939                         qdio_free(init_data->cdev);
 
2947 qdio_allocate(struct qdio_initialize *init_data)
 
2949         struct qdio_irq *irq_ptr;
 
2952         sprintf(dbf_text,"qalc%4x",init_data->cdev->private->schid.sch_no);
 
2953         QDIO_DBF_TEXT0(0,setup,dbf_text);
 
2954         QDIO_DBF_TEXT0(0,trace,dbf_text);
 
2955         if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
 
2956              (init_data->no_output_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
 
2957              ((init_data->no_input_qs) && (!init_data->input_handler)) ||
 
2958              ((init_data->no_output_qs) && (!init_data->output_handler)) )
 
2961         if (!init_data->input_sbal_addr_array)
 
2964         if (!init_data->output_sbal_addr_array)
 
2967         qdio_allocate_do_dbf(init_data);
 
2970         irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
 
2972         QDIO_DBF_TEXT0(0,setup,"irq_ptr:");
 
2973         QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*));
 
2976                 QDIO_PRINT_ERR("allocation of irq_ptr failed!\n");
 
2980         init_MUTEX(&irq_ptr->setting_up_sema);
 
2982         /* QDR must be in DMA area since CCW data address is only 32 bit */
 
2983         irq_ptr->qdr = (struct qdr *) __get_free_page(GFP_KERNEL | GFP_DMA);
 
2984         if (!(irq_ptr->qdr)) {
 
2985                 free_page((unsigned long) irq_ptr);
 
2986                 QDIO_PRINT_ERR("allocation of irq_ptr->qdr failed!\n");
 
2989         QDIO_DBF_TEXT0(0,setup,"qdr:");
 
2990         QDIO_DBF_HEX0(0,setup,&irq_ptr->qdr,sizeof(void*));
 
2992         if (qdio_alloc_qs(irq_ptr,
 
2993                           init_data->no_input_qs,
 
2994                           init_data->no_output_qs)) {
 
2995                 QDIO_PRINT_ERR("queue allocation failed!\n");
 
2996                 qdio_release_irq_memory(irq_ptr);
 
3000         init_data->cdev->private->qdio_data = irq_ptr;
 
3002         qdio_set_state(irq_ptr,QDIO_IRQ_STATE_INACTIVE);
 
3007 static int qdio_fill_irq(struct qdio_initialize *init_data)
 
3013         struct qdio_irq *irq_ptr;
 
3015         irq_ptr = init_data->cdev->private->qdio_data;
 
3017         memset(irq_ptr,0,((char*)&irq_ptr->qdr)-((char*)irq_ptr));
 
3019         /* wipes qib.ac, required by ar7063 */
 
3020         memset(irq_ptr->qdr,0,sizeof(struct qdr));
 
3022         irq_ptr->int_parm=init_data->int_parm;
 
3024         irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
 
3025         irq_ptr->no_input_qs=init_data->no_input_qs;
 
3026         irq_ptr->no_output_qs=init_data->no_output_qs;
 
3028         if (init_data->q_format==QDIO_IQDIO_QFMT) {
 
3029                 irq_ptr->is_iqdio_irq=1;
 
3030                 irq_ptr->is_thinint_irq=1;
 
3032                 irq_ptr->is_iqdio_irq=0;
 
3033                 irq_ptr->is_thinint_irq=hydra_thinints;
 
3035         sprintf(dbf_text,"is_i_t%1x%1x",
 
3036                 irq_ptr->is_iqdio_irq,irq_ptr->is_thinint_irq);
 
3037         QDIO_DBF_TEXT2(0,setup,dbf_text);
 
3039         if (irq_ptr->is_thinint_irq) {
 
3040                 irq_ptr->dev_st_chg_ind = qdio_get_indicator();
 
3041                 QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*));
 
3042                 if (!irq_ptr->dev_st_chg_ind) {
 
3043                         QDIO_PRINT_WARN("no indicator location available " \
 
3044                                         "for irq 0.%x.%x\n",
 
3045                                         irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
 
3046                         qdio_release_irq_memory(irq_ptr);
 
3052         irq_ptr->equeue.cmd=DEFAULT_ESTABLISH_QS_CMD;
 
3053         irq_ptr->equeue.count=DEFAULT_ESTABLISH_QS_COUNT;
 
3054         irq_ptr->aqueue.cmd=DEFAULT_ACTIVATE_QS_CMD;
 
3055         irq_ptr->aqueue.count=DEFAULT_ACTIVATE_QS_COUNT;
 
3057         qdio_fill_qs(irq_ptr, init_data->cdev,
 
3058                      init_data->no_input_qs,
 
3059                      init_data->no_output_qs,
 
3060                      init_data->input_handler,
 
3061                      init_data->output_handler,init_data->int_parm,
 
3062                      init_data->q_format,init_data->flags,
 
3063                      init_data->input_sbal_addr_array,
 
3064                      init_data->output_sbal_addr_array);
 
3066         if (!try_module_get(THIS_MODULE)) {
 
3067                 QDIO_PRINT_CRIT("try_module_get() failed!\n");
 
3068                 qdio_release_irq_memory(irq_ptr);
 
3072         qdio_fill_thresholds(irq_ptr,init_data->no_input_qs,
 
3073                              init_data->no_output_qs,
 
3074                              init_data->min_input_threshold,
 
3075                              init_data->max_input_threshold,
 
3076                              init_data->min_output_threshold,
 
3077                              init_data->max_output_threshold);
 
3080         irq_ptr->qdr->qfmt=init_data->q_format;
 
3081         irq_ptr->qdr->iqdcnt=init_data->no_input_qs;
 
3082         irq_ptr->qdr->oqdcnt=init_data->no_output_qs;
 
3083         irq_ptr->qdr->iqdsz=sizeof(struct qdesfmt0)/4; /* size in words */
 
3084         irq_ptr->qdr->oqdsz=sizeof(struct qdesfmt0)/4;
 
3086         irq_ptr->qdr->qiba=(unsigned long)&irq_ptr->qib;
 
3087         irq_ptr->qdr->qkey=QDIO_STORAGE_KEY;
 
3090         irq_ptr->is_qebsm = is_passthrough;
 
3091         if (irq_ptr->is_qebsm)
 
3092                 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
 
3094         irq_ptr->qib.qfmt=init_data->q_format;
 
3095         if (init_data->no_input_qs)
 
3096                 irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib);
 
3097         if (init_data->no_output_qs)
 
3098                 irq_ptr->qib.osliba=(unsigned long)(irq_ptr->output_qs[0]->slib);
 
3099         memcpy(irq_ptr->qib.ebcnam,init_data->adapter_name,8);
 
3101         qdio_set_impl_params(irq_ptr,init_data->qib_param_field_format,
 
3102                              init_data->qib_param_field,
 
3103                              init_data->no_input_qs,
 
3104                              init_data->no_output_qs,
 
3105                              init_data->input_slib_elements,
 
3106                              init_data->output_slib_elements);
 
3108         /* first input descriptors, then output descriptors */
 
3109         is_iqdio = (init_data->q_format == QDIO_IQDIO_QFMT) ? 1 : 0;
 
3110         for (i=0;i<init_data->no_input_qs;i++)
 
3111                 qdio_allocate_fill_input_desc(irq_ptr, i, is_iqdio);
 
3113         for (i=0;i<init_data->no_output_qs;i++)
 
3114                 qdio_allocate_fill_output_desc(irq_ptr, i,
 
3115                                                init_data->no_input_qs,
 
3118         /* qdr, qib, sls, slsbs, slibs, sbales filled. */
 
3120         /* get qdio commands */
 
3121         ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
 
3123                 QDIO_DBF_TEXT2(1,setup,"no eq");
 
3124                 QDIO_PRINT_INFO("No equeue CIW found for QDIO commands. "
 
3125                                 "Trying to use default.\n");
 
3127                 irq_ptr->equeue = *ciw;
 
3128         ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
 
3130                 QDIO_DBF_TEXT2(1,setup,"no aq");
 
3131                 QDIO_PRINT_INFO("No aqueue CIW found for QDIO commands. "
 
3132                                 "Trying to use default.\n");
 
3134                 irq_ptr->aqueue = *ciw;
 
3136         /* Set new interrupt handler. */
 
3137         irq_ptr->original_int_handler = init_data->cdev->handler;
 
3138         init_data->cdev->handler = qdio_handler;
 
3144 qdio_establish(struct qdio_initialize *init_data)
 
3146         struct qdio_irq *irq_ptr;
 
3147         unsigned long saveflags;
 
3148         int result, result2;
 
3149         struct ccw_device *cdev;
 
3152         cdev=init_data->cdev;
 
3153         irq_ptr = cdev->private->qdio_data;
 
3157         if (cdev->private->state != DEV_STATE_ONLINE)
 
3160         down(&irq_ptr->setting_up_sema);
 
3162         qdio_fill_irq(init_data);
 
3164         /* the thinint CHSC stuff */
 
3165         if (irq_ptr->is_thinint_irq) {
 
3167                 result = tiqdio_set_subchannel_ind(irq_ptr,0);
 
3169                         up(&irq_ptr->setting_up_sema);
 
3170                         qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
 
3173                 tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET);
 
3176         sprintf(dbf_text,"qest%4x",cdev->private->schid.sch_no);
 
3177         QDIO_DBF_TEXT0(0,setup,dbf_text);
 
3178         QDIO_DBF_TEXT0(0,trace,dbf_text);
 
3181         irq_ptr->ccw.cmd_code=irq_ptr->equeue.cmd;
 
3182         irq_ptr->ccw.flags=CCW_FLAG_SLI;
 
3183         irq_ptr->ccw.count=irq_ptr->equeue.count;
 
3184         irq_ptr->ccw.cda=QDIO_GET_ADDR(irq_ptr->qdr);
 
3186         spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
 
3188         ccw_device_set_options_mask(cdev, 0);
 
3189         result = ccw_device_start(cdev, &irq_ptr->ccw,
 
3190                                 QDIO_DOING_ESTABLISH, 0, 0);
 
3192                 result2 = ccw_device_start(cdev, &irq_ptr->ccw,
 
3193                                         QDIO_DOING_ESTABLISH, 0, 0);
 
3194                 sprintf(dbf_text,"eq:io%4x",result);
 
3195                 QDIO_DBF_TEXT2(1,setup,dbf_text);
 
3197                         sprintf(dbf_text,"eq:io%4x",result);
 
3198                         QDIO_DBF_TEXT2(1,setup,dbf_text);
 
3200                 QDIO_PRINT_WARN("establish queues on irq 0.%x.%04x: do_IO " \
 
3201                                 "returned %i, next try returned %i\n",
 
3202                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
 
3206                         ccw_device_set_timeout(cdev, 0);
 
3209         spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
 
3212                 up(&irq_ptr->setting_up_sema);
 
3213                 qdio_shutdown(cdev,QDIO_FLAG_CLEANUP_USING_CLEAR);
 
3217         wait_event_interruptible_timeout(cdev->private->wait_q,
 
3218                 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
 
3219                 irq_ptr->state == QDIO_IRQ_STATE_ERR,
 
3220                 QDIO_ESTABLISH_TIMEOUT);
 
3222         if (irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED)
 
3225                 up(&irq_ptr->setting_up_sema);
 
3226                 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
 
3230         qdio_get_ssqd_information(irq_ptr);
 
3231         /* if this gets set once, we're running under VM and can omit SVSes */
 
3232         if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY)
 
3235         sprintf(dbf_text,"qdioac%2x",irq_ptr->qdioac);
 
3236         QDIO_DBF_TEXT2(0,setup,dbf_text);
 
3238         sprintf(dbf_text,"qib ac%2x",irq_ptr->qib.ac);
 
3239         QDIO_DBF_TEXT2(0,setup,dbf_text);
 
3241         irq_ptr->hydra_gives_outbound_pcis=
 
3242                 irq_ptr->qib.ac&QIB_AC_OUTBOUND_PCI_SUPPORTED;
 
3243         irq_ptr->sync_done_on_outb_pcis=
 
3244                 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS;
 
3246         qdio_initialize_set_siga_flags_input(irq_ptr);
 
3247         qdio_initialize_set_siga_flags_output(irq_ptr);
 
3249         up(&irq_ptr->setting_up_sema);
 
3256 qdio_activate(struct ccw_device *cdev, int flags)
 
3258         struct qdio_irq *irq_ptr;
 
3259         int i,result=0,result2;
 
3260         unsigned long saveflags;
 
3261         char dbf_text[20]; /* see qdio_initialize */
 
3263         irq_ptr = cdev->private->qdio_data;
 
3267         if (cdev->private->state != DEV_STATE_ONLINE)
 
3270         down(&irq_ptr->setting_up_sema);
 
3271         if (irq_ptr->state==QDIO_IRQ_STATE_INACTIVE) {
 
3276         sprintf(dbf_text,"qact%4x", irq_ptr->schid.sch_no);
 
3277         QDIO_DBF_TEXT2(0,setup,dbf_text);
 
3278         QDIO_DBF_TEXT2(0,trace,dbf_text);
 
3281         irq_ptr->ccw.cmd_code=irq_ptr->aqueue.cmd;
 
3282         irq_ptr->ccw.flags=CCW_FLAG_SLI;
 
3283         irq_ptr->ccw.count=irq_ptr->aqueue.count;
 
3284         irq_ptr->ccw.cda=QDIO_GET_ADDR(0);
 
3286         spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
 
3288         ccw_device_set_timeout(cdev, 0);
 
3289         ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
 
3290         result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE,
 
3291                                 0, DOIO_DENY_PREFETCH);
 
3293                 result2=ccw_device_start(cdev,&irq_ptr->ccw,
 
3294                                          QDIO_DOING_ACTIVATE,0,0);
 
3295                 sprintf(dbf_text,"aq:io%4x",result);
 
3296                 QDIO_DBF_TEXT2(1,setup,dbf_text);
 
3298                         sprintf(dbf_text,"aq:io%4x",result);
 
3299                         QDIO_DBF_TEXT2(1,setup,dbf_text);
 
3301                 QDIO_PRINT_WARN("activate queues on irq 0.%x.%04x: do_IO " \
 
3302                                 "returned %i, next try returned %i\n",
 
3303                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
 
3308         spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
 
3312         for (i=0;i<irq_ptr->no_input_qs;i++) {
 
3313                 if (irq_ptr->is_thinint_irq) {
 
3315                          * that way we know, that, if we will get interrupted
 
3316                          * by tiqdio_inbound_processing, qdio_unmark_q will
 
3319                         qdio_reserve_q(irq_ptr->input_qs[i]);
 
3320                         qdio_mark_tiq(irq_ptr->input_qs[i]);
 
3321                         qdio_release_q(irq_ptr->input_qs[i]);
 
3325         if (flags&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) {
 
3326                 for (i=0;i<irq_ptr->no_input_qs;i++) {
 
3327                         irq_ptr->input_qs[i]->is_input_q|=
 
3328                                 QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT;
 
3332         msleep(QDIO_ACTIVATE_TIMEOUT);
 
3333         switch (irq_ptr->state) {
 
3334         case QDIO_IRQ_STATE_STOPPED:
 
3335         case QDIO_IRQ_STATE_ERR:
 
3336                 up(&irq_ptr->setting_up_sema);
 
3337                 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
 
3338                 down(&irq_ptr->setting_up_sema);
 
3342                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
 
3346         up(&irq_ptr->setting_up_sema);
 
3351 /* buffers filled forwards again to make Rick happy */
 
3353 qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
 
3354                         unsigned int count, struct qdio_buffer *buffers)
 
3356         struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
 
3359         qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
 
3360         if (irq->is_qebsm) {
 
3362                         tmp = set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
 
3369                 set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
 
3372                 qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
 
3377 qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
 
3378                          unsigned int count, struct qdio_buffer *buffers)
 
3380         struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
 
3383         qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
 
3384         if (irq->is_qebsm) {
 
3386                         tmp = set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
 
3394                 set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
 
3397                 qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
 
3402 do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
 
3403                        unsigned int qidx, unsigned int count,
 
3404                        struct qdio_buffer *buffers)
 
3408         /* This is the inbound handling of queues */
 
3409         used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
 
3411         qdio_do_qdio_fill_input(q,qidx,count,buffers);
 
3413         if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&&
 
3414             (callflags&QDIO_FLAG_UNDER_INTERRUPT))
 
3415                 atomic_xchg(&q->polling,0);
 
3419         if (callflags&QDIO_FLAG_DONT_SIGA)
 
3424                 result=qdio_siga_input(q);
 
3427                                 q->error_status_flags|=
 
3428                                         QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
 
3429                         q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
 
3430                         q->siga_error=result;
 
3438 do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
 
3439                         unsigned int qidx, unsigned int count,
 
3440                         struct qdio_buffer *buffers)
 
3443         unsigned int cnt, start_buf;
 
3444         unsigned char state = 0;
 
3445         struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
 
3447         /* This is the outbound handling of queues */
 
3448         qdio_do_qdio_fill_output(q,qidx,count,buffers);
 
3450         used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
 
3452         if (callflags&QDIO_FLAG_DONT_SIGA) {
 
3453                 qdio_perf_stat_inc(&perf_stats.outbound_cnt);
 
3456         if (callflags & QDIO_FLAG_PCI_OUT)
 
3460         if (q->is_iqdio_q) {
 
3461                 /* one siga for every sbal */
 
3463                         qdio_kick_outbound_q(q);
 
3465                 __qdio_outbound_processing(q);
 
3467                 /* under VM, we do a SIGA sync unconditionally */
 
3471                          * w/o shadow queues (else branch of
 
3472                          * SYNC_MEMORY :-/ ), we try to
 
3473                          * fast-requeue buffers 
 
3475                         if (irq->is_qebsm) {
 
3477                                 start_buf = ((qidx+QDIO_MAX_BUFFERS_PER_Q-1) &
 
3478                                              (QDIO_MAX_BUFFERS_PER_Q-1));
 
3479                                 qdio_do_eqbs(q, &state, &start_buf, &cnt);
 
3481                                 state = q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1)
 
3482                                         &(QDIO_MAX_BUFFERS_PER_Q-1) ];
 
3483                          if (state != SLSB_CU_OUTPUT_PRIMED) {
 
3484                                 qdio_kick_outbound_q(q);
 
3486                                 QDIO_DBF_TEXT3(0,trace, "fast-req");
 
3487                                 qdio_perf_stat_inc(&perf_stats.fast_reqs);
 
3491                  * only marking the q could take too long,
 
3492                  * the upper layer module could do a lot of
 
3493                  * traffic in that time 
 
3495                 __qdio_outbound_processing(q);
 
3498         qdio_perf_stat_inc(&perf_stats.outbound_cnt);
 
3501 /* count must be 1 in iqdio */
 
3503 do_QDIO(struct ccw_device *cdev,unsigned int callflags,
 
3504         unsigned int queue_number, unsigned int qidx,
 
3505         unsigned int count,struct qdio_buffer *buffers)
 
3507         struct qdio_irq *irq_ptr;
 
3508 #ifdef CONFIG_QDIO_DEBUG
 
3511         sprintf(dbf_text,"doQD%04x",cdev->private->schid.sch_no);
 
3512         QDIO_DBF_TEXT3(0,trace,dbf_text);
 
3513 #endif /* CONFIG_QDIO_DEBUG */
 
3515         if ( (qidx>QDIO_MAX_BUFFERS_PER_Q) ||
 
3516              (count>QDIO_MAX_BUFFERS_PER_Q) ||
 
3517              (queue_number>QDIO_MAX_QUEUES_PER_IRQ) )
 
3523         irq_ptr = cdev->private->qdio_data;
 
3527 #ifdef CONFIG_QDIO_DEBUG
 
3528         if (callflags&QDIO_FLAG_SYNC_INPUT)
 
3529                 QDIO_DBF_HEX3(0,trace,&irq_ptr->input_qs[queue_number],
 
3532                 QDIO_DBF_HEX3(0,trace,&irq_ptr->output_qs[queue_number],
 
3534         sprintf(dbf_text,"flag%04x",callflags);
 
3535         QDIO_DBF_TEXT3(0,trace,dbf_text);
 
3536         sprintf(dbf_text,"qi%02xct%02x",qidx,count);
 
3537         QDIO_DBF_TEXT3(0,trace,dbf_text);
 
3538 #endif /* CONFIG_QDIO_DEBUG */
 
3540         if (irq_ptr->state!=QDIO_IRQ_STATE_ACTIVE)
 
3543         if (callflags&QDIO_FLAG_SYNC_INPUT)
 
3544                 do_qdio_handle_inbound(irq_ptr->input_qs[queue_number],
 
3545                                        callflags, qidx, count, buffers);
 
3546         else if (callflags&QDIO_FLAG_SYNC_OUTPUT)
 
3547                 do_qdio_handle_outbound(irq_ptr->output_qs[queue_number],
 
3548                                         callflags, qidx, count, buffers);
 
3550                 QDIO_DBF_TEXT3(1,trace,"doQD:inv");
 
3557 qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
 
3558                         int buffer_length, int *eof, void *data)
 
3562         /* we are always called with buffer_length=4k, so we all
 
3563            deliver on the first read */
 
3567 #define _OUTP_IT(x...) c+=sprintf(buffer+c,x)
 
3569         _OUTP_IT("Number of tasklet runs (total)                  : %li\n",
 
3570                  (long)atomic64_read(&perf_stats.tl_runs));
 
3571         _OUTP_IT("Inbound tasklet runs      tried/retried         : %li/%li\n",
 
3572                  (long)atomic64_read(&perf_stats.inbound_tl_runs),
 
3573                  (long)atomic64_read(&perf_stats.inbound_tl_runs_resched));
 
3574         _OUTP_IT("Inbound-thin tasklet runs tried/retried         : %li/%li\n",
 
3575                  (long)atomic64_read(&perf_stats.inbound_thin_tl_runs),
 
3576                  (long)atomic64_read(&perf_stats.inbound_thin_tl_runs_resched));
 
3577         _OUTP_IT("Outbound tasklet runs     tried/retried         : %li/%li\n",
 
3578                  (long)atomic64_read(&perf_stats.outbound_tl_runs),
 
3579                  (long)atomic64_read(&perf_stats.outbound_tl_runs_resched));
 
3581         _OUTP_IT("Number of SIGA sync's issued                    : %li\n",
 
3582                  (long)atomic64_read(&perf_stats.siga_syncs));
 
3583         _OUTP_IT("Number of SIGA in's issued                      : %li\n",
 
3584                  (long)atomic64_read(&perf_stats.siga_ins));
 
3585         _OUTP_IT("Number of SIGA out's issued                     : %li\n",
 
3586                  (long)atomic64_read(&perf_stats.siga_outs));
 
3587         _OUTP_IT("Number of PCIs caught                           : %li\n",
 
3588                  (long)atomic64_read(&perf_stats.pcis));
 
3589         _OUTP_IT("Number of adapter interrupts caught             : %li\n",
 
3590                  (long)atomic64_read(&perf_stats.thinints));
 
3591         _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA)  : %li\n",
 
3592                  (long)atomic64_read(&perf_stats.fast_reqs));
 
3594         _OUTP_IT("Number of inbound transfers                     : %li\n",
 
3595                  (long)atomic64_read(&perf_stats.inbound_cnt));
 
3596         _OUTP_IT("Number of do_QDIOs outbound                     : %li\n",
 
3597                  (long)atomic64_read(&perf_stats.outbound_cnt));
 
3598 #else /* CONFIG_64BIT */
 
3599         _OUTP_IT("Number of tasklet runs (total)                  : %i\n",
 
3600                  atomic_read(&perf_stats.tl_runs));
 
3601         _OUTP_IT("Inbound tasklet runs      tried/retried         : %i/%i\n",
 
3602                  atomic_read(&perf_stats.inbound_tl_runs),
 
3603                  atomic_read(&perf_stats.inbound_tl_runs_resched));
 
3604         _OUTP_IT("Inbound-thin tasklet runs tried/retried         : %i/%i\n",
 
3605                  atomic_read(&perf_stats.inbound_thin_tl_runs),
 
3606                  atomic_read(&perf_stats.inbound_thin_tl_runs_resched));
 
3607         _OUTP_IT("Outbound tasklet runs     tried/retried         : %i/%i\n",
 
3608                  atomic_read(&perf_stats.outbound_tl_runs),
 
3609                  atomic_read(&perf_stats.outbound_tl_runs_resched));
 
3611         _OUTP_IT("Number of SIGA sync's issued                    : %i\n",
 
3612                  atomic_read(&perf_stats.siga_syncs));
 
3613         _OUTP_IT("Number of SIGA in's issued                      : %i\n",
 
3614                  atomic_read(&perf_stats.siga_ins));
 
3615         _OUTP_IT("Number of SIGA out's issued                     : %i\n",
 
3616                  atomic_read(&perf_stats.siga_outs));
 
3617         _OUTP_IT("Number of PCIs caught                           : %i\n",
 
3618                  atomic_read(&perf_stats.pcis));
 
3619         _OUTP_IT("Number of adapter interrupts caught             : %i\n",
 
3620                  atomic_read(&perf_stats.thinints));
 
3621         _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA)  : %i\n",
 
3622                  atomic_read(&perf_stats.fast_reqs));
 
3624         _OUTP_IT("Number of inbound transfers                     : %i\n",
 
3625                  atomic_read(&perf_stats.inbound_cnt));
 
3626         _OUTP_IT("Number of do_QDIOs outbound                     : %i\n",
 
3627                  atomic_read(&perf_stats.outbound_cnt));
 
3628 #endif /* CONFIG_64BIT */
 
3634 static struct proc_dir_entry *qdio_perf_proc_file;
 
3637 qdio_add_procfs_entry(void)
 
3639         proc_perf_file_registration=0;
 
3640         qdio_perf_proc_file=create_proc_entry(QDIO_PERF,
 
3641                                               S_IFREG|0444,&proc_root);
 
3642         if (qdio_perf_proc_file) {
 
3643                 qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read;
 
3644         } else proc_perf_file_registration=-1;
 
3646         if (proc_perf_file_registration)
 
3647                 QDIO_PRINT_WARN("was not able to register perf. " \
 
3648                                 "proc-file (%i).\n",
 
3649                                 proc_perf_file_registration);
 
3653 qdio_remove_procfs_entry(void)
 
3655         if (!proc_perf_file_registration) /* means if it went ok earlier */
 
3656                 remove_proc_entry(QDIO_PERF,&proc_root);
 
3660  * attributes in sysfs
 
3661  *****************************************************************************/
 
3664 qdio_performance_stats_show(struct bus_type *bus, char *buf)
 
3666         return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
 
3670 qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count)
 
3675         i = simple_strtoul(buf, &tmp, 16);
 
3676         if ((i == 0) || (i == 1)) {
 
3677                 if (i == qdio_performance_stats)
 
3679                 qdio_performance_stats = i;
 
3681                         /* reset perf. stat. info */
 
3683                         atomic64_set(&perf_stats.tl_runs, 0);
 
3684                         atomic64_set(&perf_stats.outbound_tl_runs, 0);
 
3685                         atomic64_set(&perf_stats.inbound_tl_runs, 0);
 
3686                         atomic64_set(&perf_stats.inbound_tl_runs_resched, 0);
 
3687                         atomic64_set(&perf_stats.inbound_thin_tl_runs, 0);
 
3688                         atomic64_set(&perf_stats.inbound_thin_tl_runs_resched,
 
3690                         atomic64_set(&perf_stats.siga_outs, 0);
 
3691                         atomic64_set(&perf_stats.siga_ins, 0);
 
3692                         atomic64_set(&perf_stats.siga_syncs, 0);
 
3693                         atomic64_set(&perf_stats.pcis, 0);
 
3694                         atomic64_set(&perf_stats.thinints, 0);
 
3695                         atomic64_set(&perf_stats.fast_reqs, 0);
 
3696                         atomic64_set(&perf_stats.outbound_cnt, 0);
 
3697                         atomic64_set(&perf_stats.inbound_cnt, 0);
 
3698 #else /* CONFIG_64BIT */
 
3699                         atomic_set(&perf_stats.tl_runs, 0);
 
3700                         atomic_set(&perf_stats.outbound_tl_runs, 0);
 
3701                         atomic_set(&perf_stats.inbound_tl_runs, 0);
 
3702                         atomic_set(&perf_stats.inbound_tl_runs_resched, 0);
 
3703                         atomic_set(&perf_stats.inbound_thin_tl_runs, 0);
 
3704                         atomic_set(&perf_stats.inbound_thin_tl_runs_resched, 0);
 
3705                         atomic_set(&perf_stats.siga_outs, 0);
 
3706                         atomic_set(&perf_stats.siga_ins, 0);
 
3707                         atomic_set(&perf_stats.siga_syncs, 0);
 
3708                         atomic_set(&perf_stats.pcis, 0);
 
3709                         atomic_set(&perf_stats.thinints, 0);
 
3710                         atomic_set(&perf_stats.fast_reqs, 0);
 
3711                         atomic_set(&perf_stats.outbound_cnt, 0);
 
3712                         atomic_set(&perf_stats.inbound_cnt, 0);
 
3713 #endif /* CONFIG_64BIT */
 
3716                 QDIO_PRINT_ERR("QDIO performance_stats: write 0 or 1 to this file!\n");
 
3722 static BUS_ATTR(qdio_performance_stats, 0644, qdio_performance_stats_show,
 
3723                         qdio_performance_stats_store);
 
3726 tiqdio_register_thinints(void)
 
3731                 s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL);
 
3732         if (IS_ERR(tiqdio_ind)) {
 
3733                 sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind));
 
3734                 QDIO_DBF_TEXT0(0,setup,dbf_text);
 
3735                 QDIO_PRINT_ERR("failed to register adapter handler " \
 
3736                                "(rc=%li).\nAdapter interrupts might " \
 
3737                                "not work. Continuing.\n",
 
3738                                PTR_ERR(tiqdio_ind));
 
3744 tiqdio_unregister_thinints(void)
 
3747                 s390_unregister_adapter_interrupt(tiqdio_ind);
 
3751 qdio_get_qdio_memory(void)
 
3754         indicator_used[0]=1;
 
3756         for (i=1;i<INDICATORS_PER_CACHELINE;i++)
 
3757                 indicator_used[i]=0;
 
3758         indicators = kzalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE),
 
3766 qdio_release_qdio_memory(void)
 
3772 qdio_unregister_dbf_views(void)
 
3775                 debug_unregister(qdio_dbf_setup);
 
3777                 debug_unregister(qdio_dbf_sbal);
 
3779                 debug_unregister(qdio_dbf_sense);
 
3781                 debug_unregister(qdio_dbf_trace);
 
3782 #ifdef CONFIG_QDIO_DEBUG
 
3783         if (qdio_dbf_slsb_out)
 
3784                 debug_unregister(qdio_dbf_slsb_out);
 
3785         if (qdio_dbf_slsb_in)
 
3786                 debug_unregister(qdio_dbf_slsb_in);
 
3787 #endif /* CONFIG_QDIO_DEBUG */
 
3791 qdio_register_dbf_views(void)
 
3793         qdio_dbf_setup=debug_register(QDIO_DBF_SETUP_NAME,
 
3794                                       QDIO_DBF_SETUP_PAGES,
 
3795                                       QDIO_DBF_SETUP_NR_AREAS,
 
3796                                       QDIO_DBF_SETUP_LEN);
 
3797         if (!qdio_dbf_setup)
 
3799         debug_register_view(qdio_dbf_setup,&debug_hex_ascii_view);
 
3800         debug_set_level(qdio_dbf_setup,QDIO_DBF_SETUP_LEVEL);
 
3802         qdio_dbf_sbal=debug_register(QDIO_DBF_SBAL_NAME,
 
3803                                      QDIO_DBF_SBAL_PAGES,
 
3804                                      QDIO_DBF_SBAL_NR_AREAS,
 
3809         debug_register_view(qdio_dbf_sbal,&debug_hex_ascii_view);
 
3810         debug_set_level(qdio_dbf_sbal,QDIO_DBF_SBAL_LEVEL);
 
3812         qdio_dbf_sense=debug_register(QDIO_DBF_SENSE_NAME,
 
3813                                       QDIO_DBF_SENSE_PAGES,
 
3814                                       QDIO_DBF_SENSE_NR_AREAS,
 
3815                                       QDIO_DBF_SENSE_LEN);
 
3816         if (!qdio_dbf_sense)
 
3819         debug_register_view(qdio_dbf_sense,&debug_hex_ascii_view);
 
3820         debug_set_level(qdio_dbf_sense,QDIO_DBF_SENSE_LEVEL);
 
3822         qdio_dbf_trace=debug_register(QDIO_DBF_TRACE_NAME,
 
3823                                       QDIO_DBF_TRACE_PAGES,
 
3824                                       QDIO_DBF_TRACE_NR_AREAS,
 
3825                                       QDIO_DBF_TRACE_LEN);
 
3826         if (!qdio_dbf_trace)
 
3829         debug_register_view(qdio_dbf_trace,&debug_hex_ascii_view);
 
3830         debug_set_level(qdio_dbf_trace,QDIO_DBF_TRACE_LEVEL);
 
3832 #ifdef CONFIG_QDIO_DEBUG
 
3833         qdio_dbf_slsb_out=debug_register(QDIO_DBF_SLSB_OUT_NAME,
 
3834                                          QDIO_DBF_SLSB_OUT_PAGES,
 
3835                                          QDIO_DBF_SLSB_OUT_NR_AREAS,
 
3836                                          QDIO_DBF_SLSB_OUT_LEN);
 
3837         if (!qdio_dbf_slsb_out)
 
3839         debug_register_view(qdio_dbf_slsb_out,&debug_hex_ascii_view);
 
3840         debug_set_level(qdio_dbf_slsb_out,QDIO_DBF_SLSB_OUT_LEVEL);
 
3842         qdio_dbf_slsb_in=debug_register(QDIO_DBF_SLSB_IN_NAME,
 
3843                                         QDIO_DBF_SLSB_IN_PAGES,
 
3844                                         QDIO_DBF_SLSB_IN_NR_AREAS,
 
3845                                         QDIO_DBF_SLSB_IN_LEN);
 
3846         if (!qdio_dbf_slsb_in)
 
3848         debug_register_view(qdio_dbf_slsb_in,&debug_hex_ascii_view);
 
3849         debug_set_level(qdio_dbf_slsb_in,QDIO_DBF_SLSB_IN_LEVEL);
 
3850 #endif /* CONFIG_QDIO_DEBUG */
 
3853         QDIO_PRINT_ERR("not enough memory for dbf.\n");
 
3854         qdio_unregister_dbf_views();
 
3858 static void *qdio_mempool_alloc(gfp_t gfp_mask, void *size)
 
3860         return (void *) get_zeroed_page(gfp_mask|GFP_DMA);
 
3863 static void qdio_mempool_free(void *element, void *size)
 
3865         free_page((unsigned long) element);
 
3874         printk("qdio: loading %s\n",version);
 
3876         res=qdio_get_qdio_memory();
 
3880         qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
 
3882         if (!qdio_q_cache) {
 
3883                 qdio_release_qdio_memory();
 
3887         res = qdio_register_dbf_views();
 
3889                 kmem_cache_destroy(qdio_q_cache);
 
3890                 qdio_release_qdio_memory();
 
3894         QDIO_DBF_TEXT0(0,setup,"initQDIO");
 
3895         res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
 
3897         memset((void*)&perf_stats,0,sizeof(perf_stats));
 
3898         QDIO_DBF_TEXT0(0,setup,"perfstat");
 
3900         QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*));
 
3902         qdio_add_procfs_entry();
 
3904         qdio_mempool_scssc = mempool_create(QDIO_MEMPOOL_SCSSC_ELEMENTS,
 
3906                                             qdio_mempool_free, NULL);
 
3908         if (tiqdio_check_chsc_availability())
 
3909                 QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n");
 
3911         tiqdio_register_thinints();
 
3919         tiqdio_unregister_thinints();
 
3920         qdio_remove_procfs_entry();
 
3921         qdio_release_qdio_memory();
 
3922         qdio_unregister_dbf_views();
 
3923         mempool_destroy(qdio_mempool_scssc);
 
3924         kmem_cache_destroy(qdio_q_cache);
 
3925         bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
 
3926         printk("qdio: %s: module removed\n",version);
 
3929 module_init(init_QDIO);
 
3930 module_exit(cleanup_QDIO);
 
3932 EXPORT_SYMBOL(qdio_allocate);
 
3933 EXPORT_SYMBOL(qdio_establish);
 
3934 EXPORT_SYMBOL(qdio_initialize);
 
3935 EXPORT_SYMBOL(qdio_activate);
 
3936 EXPORT_SYMBOL(do_QDIO);
 
3937 EXPORT_SYMBOL(qdio_shutdown);
 
3938 EXPORT_SYMBOL(qdio_free);
 
3939 EXPORT_SYMBOL(qdio_cleanup);
 
3940 EXPORT_SYMBOL(qdio_synchronize);