3 * linux/drivers/s390/cio/qdio.c
5 * Linux for S/390 QDIO base support, Hipersocket base support
8 * Copyright 2000,2002 IBM Corporation
9 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
10 * 2.6 cio integration by Cornelia Huck <cohuck@de.ibm.com>
12 * Restriction: only 63 iqdio subchannels would have its own indicator,
13 * after that, subsequent subchannels share one indicator
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 #include <linux/config.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/kernel.h>
39 #include <linux/proc_fs.h>
40 #include <linux/timer.h>
42 #include <asm/ccwdev.h>
44 #include <asm/atomic.h>
45 #include <asm/semaphore.h>
46 #include <asm/timex.h>
48 #include <asm/debug.h>
59 #define VERSION_QDIO_C "$Revision: 1.101 $"
61 /****************** MODULE PARAMETER VARIABLES ********************/
62 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
63 MODULE_DESCRIPTION("QDIO base support version 2, " \
64 "Copyright 2000 IBM Corporation");
65 MODULE_LICENSE("GPL");
67 /******************** HERE WE GO ***********************************/
69 static const char version[] = "QDIO base support version 2 ("
70 VERSION_QDIO_C "/" VERSION_QDIO_H "/" VERSION_CIO_QDIO_H ")";
72 #ifdef QDIO_PERFORMANCE_STATS
73 static int proc_perf_file_registration;
74 static unsigned long i_p_c, i_p_nc, o_p_c, o_p_nc, ii_p_c, ii_p_nc;
75 static struct qdio_perf_stats perf_stats;
76 #endif /* QDIO_PERFORMANCE_STATS */
78 static int hydra_thinints;
81 static int indicator_used[INDICATORS_PER_CACHELINE];
82 static __u32 * volatile indicators;
83 static __u32 volatile spare_indicator;
84 static atomic_t spare_indicator_usecount;
86 static debug_info_t *qdio_dbf_setup;
87 static debug_info_t *qdio_dbf_sbal;
88 static debug_info_t *qdio_dbf_trace;
89 static debug_info_t *qdio_dbf_sense;
90 #ifdef CONFIG_QDIO_DEBUG
91 static debug_info_t *qdio_dbf_slsb_out;
92 static debug_info_t *qdio_dbf_slsb_in;
93 #endif /* CONFIG_QDIO_DEBUG */
96 static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change
97 during a while loop */
98 static DEFINE_SPINLOCK(ttiq_list_lock);
99 static int register_thinint_result;
100 static void tiqdio_tl(unsigned long);
101 static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0);
103 /* not a macro, as one of the arguments is atomic_read */
105 qdio_min(int a,int b)
113 /***************** SCRUBBER HELPER ROUTINES **********************/
115 static inline volatile __u64
116 qdio_get_micros(void)
118 return (get_clock() >> 10); /* time>>12 is microseconds */
122 * unfortunately, we can't just xchg the values; in do_QDIO we want to reserve
123 * the q in any case, so that we'll not be interrupted when we are in
124 * qdio_mark_tiq... shouldn't have a really bad impact, as reserving almost
125 * ever works (last famous words)
128 qdio_reserve_q(struct qdio_q *q)
130 return atomic_add_return(1,&q->use_count) - 1;
134 qdio_release_q(struct qdio_q *q)
136 atomic_dec(&q->use_count);
139 static volatile inline void
140 qdio_set_slsb(volatile char *slsb, unsigned char value)
142 xchg((char*)slsb,value);
146 qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
151 QDIO_DBF_TEXT4(0,trace,"sigasync");
152 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
154 #ifdef QDIO_PERFORMANCE_STATS
155 perf_stats.siga_syncs++;
156 #endif /* QDIO_PERFORMANCE_STATS */
158 cc = do_siga_sync(q->irq, gpr2, gpr3);
160 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
166 qdio_siga_sync_q(struct qdio_q *q)
169 return qdio_siga_sync(q, 0, q->mask);
170 return qdio_siga_sync(q, q->mask, 0);
174 * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns
175 * an access exception
178 qdio_siga_output(struct qdio_q *q)
184 #ifdef QDIO_PERFORMANCE_STATS
185 perf_stats.siga_outs++;
186 #endif /* QDIO_PERFORMANCE_STATS */
188 QDIO_DBF_TEXT4(0,trace,"sigaout");
189 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
192 cc = do_siga_output(q->irq, q->mask, &busy_bit);
193 //QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit);
194 if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) {
197 if ((NOW-start_time)>QDIO_BUSY_BIT_PATIENCE)
203 if ((cc==2) && (busy_bit))
204 cc |= QDIO_SIGA_ERROR_B_BIT_SET;
207 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
213 qdio_siga_input(struct qdio_q *q)
217 QDIO_DBF_TEXT4(0,trace,"sigain");
218 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
220 #ifdef QDIO_PERFORMANCE_STATS
221 perf_stats.siga_ins++;
222 #endif /* QDIO_PERFORMANCE_STATS */
224 cc = do_siga_input(q->irq, q->mask);
227 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
232 /* locked by the locks in qdio_activate and qdio_cleanup */
233 static __u32 volatile *
234 qdio_get_indicator(void)
238 for (i=1;i<INDICATORS_PER_CACHELINE;i++)
239 if (!indicator_used[i]) {
243 atomic_inc(&spare_indicator_usecount);
244 return (__u32 * volatile) &spare_indicator;
247 /* locked by the locks in qdio_activate and qdio_cleanup */
249 qdio_put_indicator(__u32 *addr)
253 if ( (addr) && (addr!=&spare_indicator) ) {
257 if (addr == &spare_indicator)
258 atomic_dec(&spare_indicator_usecount);
261 static inline volatile void
262 tiqdio_clear_summary_bit(__u32 *location)
264 QDIO_DBF_TEXT5(0,trace,"clrsummb");
265 QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
270 static inline volatile void
271 tiqdio_set_summary_bit(__u32 *location)
273 QDIO_DBF_TEXT5(0,trace,"setsummb");
274 QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
280 tiqdio_sched_tl(void)
282 tasklet_hi_schedule(&tiqdio_tasklet);
286 qdio_mark_tiq(struct qdio_q *q)
290 QDIO_DBF_TEXT4(0,trace,"mark iq");
291 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
293 spin_lock_irqsave(&ttiq_list_lock,flags);
294 if (unlikely(atomic_read(&q->is_in_shutdown)))
300 if ((q->list_prev) || (q->list_next))
308 q->list_next=tiq_list;
309 q->list_prev=tiq_list->list_prev;
310 tiq_list->list_prev->list_next=q;
311 tiq_list->list_prev=q;
313 spin_unlock_irqrestore(&ttiq_list_lock,flags);
315 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
319 spin_unlock_irqrestore(&ttiq_list_lock,flags);
324 qdio_mark_q(struct qdio_q *q)
326 QDIO_DBF_TEXT4(0,trace,"mark q");
327 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
329 if (unlikely(atomic_read(&q->is_in_shutdown)))
332 tasklet_schedule(&q->tasklet);
336 qdio_stop_polling(struct qdio_q *q)
338 #ifdef QDIO_USE_PROCESSING_STATE
341 if (!atomic_swap(&q->polling,0))
344 QDIO_DBF_TEXT4(0,trace,"stoppoll");
345 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
347 /* show the card that we are not polling anymore */
351 gsf=GET_SAVED_FRONTIER(q);
352 set_slsb(&q->slsb.acc.val[(gsf+QDIO_MAX_BUFFERS_PER_Q-1)&
353 (QDIO_MAX_BUFFERS_PER_Q-1)],
354 SLSB_P_INPUT_NOT_INIT);
356 * we don't issue this SYNC_MEMORY, as we trust Rick T and
357 * moreover will not use the PROCESSING state under VM, so
358 * q->polling was 0 anyway
361 if (q->slsb.acc.val[gsf]!=SLSB_P_INPUT_PRIMED)
364 * set our summary bit again, as otherwise there is a
365 * small window we can miss between resetting it and
366 * checking for PRIMED state
369 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
372 #else /* QDIO_USE_PROCESSING_STATE */
374 #endif /* QDIO_USE_PROCESSING_STATE */
378 * see the comment in do_QDIO and before qdio_reserve_q about the
379 * sophisticated locking outside of unmark_q, so that we don't need to
380 * disable the interrupts :-)
383 qdio_unmark_q(struct qdio_q *q)
387 QDIO_DBF_TEXT4(0,trace,"unmark q");
388 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
390 if ((!q->list_prev)||(!q->list_next))
393 if ((q->is_thinint_q)&&(q->is_input_q)) {
395 spin_lock_irqsave(&ttiq_list_lock,flags);
396 /* in case cleanup has done this already and simultanously
397 * qdio_unmark_q is called from the interrupt handler, we've
398 * got to check this in this specific case again */
399 if ((!q->list_prev)||(!q->list_next))
401 if (q->list_next==q) {
402 /* q was the only interesting q */
407 q->list_next->list_prev=q->list_prev;
408 q->list_prev->list_next=q->list_next;
409 tiq_list=q->list_next;
414 spin_unlock_irqrestore(&ttiq_list_lock,flags);
418 static inline unsigned long
419 tiqdio_clear_global_summary(void)
423 QDIO_DBF_TEXT5(0,trace,"clrglobl");
425 time = do_clear_global_summary();
427 QDIO_DBF_HEX5(0,trace,&time,sizeof(unsigned long));
433 /************************* OUTBOUND ROUTINES *******************************/
436 qdio_get_outbound_buffer_frontier(struct qdio_q *q)
440 int first_not_to_check;
443 QDIO_DBF_TEXT4(0,trace,"getobfro");
444 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
446 slsb=&q->slsb.acc.val[0];
447 f_mod_no=f=q->first_to_check;
449 * f points to already processed elements, so f+no_used is correct...
450 * ... but: we don't check 128 buffers, as otherwise
451 * qdio_has_outbound_q_moved would return 0
453 first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
454 (QDIO_MAX_BUFFERS_PER_Q-1));
456 if ((!q->is_iqdio_q)&&(!q->hydra_gives_outbound_pcis))
460 if (f==first_not_to_check)
463 switch(slsb[f_mod_no]) {
465 /* the adapter has not fetched the output yet */
466 case SLSB_CU_OUTPUT_PRIMED:
467 QDIO_DBF_TEXT5(0,trace,"outpprim");
470 /* the adapter got it */
471 case SLSB_P_OUTPUT_EMPTY:
472 atomic_dec(&q->number_of_buffers_used);
474 f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
475 QDIO_DBF_TEXT5(0,trace,"outpempt");
478 case SLSB_P_OUTPUT_ERROR:
479 QDIO_DBF_TEXT3(0,trace,"outperr");
480 sprintf(dbf_text,"%x-%x-%x",f_mod_no,
481 q->sbal[f_mod_no]->element[14].sbalf.value,
482 q->sbal[f_mod_no]->element[15].sbalf.value);
483 QDIO_DBF_TEXT3(1,trace,dbf_text);
484 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
486 /* kind of process the buffer */
487 set_slsb(&q->slsb.acc.val[f_mod_no], SLSB_P_OUTPUT_NOT_INIT);
490 * we increment the frontier, as this buffer
491 * was processed obviously
493 atomic_dec(&q->number_of_buffers_used);
494 f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
497 q->error_status_flags|=
498 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
499 q->qdio_error=SLSB_P_OUTPUT_ERROR;
500 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
506 QDIO_DBF_TEXT5(0,trace,"outpni");
509 return (q->first_to_check=f_mod_no);
512 /* all buffers are processed */
514 qdio_is_outbound_q_done(struct qdio_q *q)
517 #ifdef CONFIG_QDIO_DEBUG
521 no_used=atomic_read(&q->number_of_buffers_used);
523 #ifdef CONFIG_QDIO_DEBUG
525 sprintf(dbf_text,"oqisnt%02x",no_used);
526 QDIO_DBF_TEXT4(0,trace,dbf_text);
528 QDIO_DBF_TEXT4(0,trace,"oqisdone");
530 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
531 #endif /* CONFIG_QDIO_DEBUG */
536 qdio_has_outbound_q_moved(struct qdio_q *q)
540 i=qdio_get_outbound_buffer_frontier(q);
542 if ( (i!=GET_SAVED_FRONTIER(q)) ||
543 (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
545 QDIO_DBF_TEXT4(0,trace,"oqhasmvd");
546 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
549 QDIO_DBF_TEXT4(0,trace,"oqhsntmv");
550 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
556 qdio_kick_outbound_q(struct qdio_q *q)
559 #ifdef CONFIG_QDIO_DEBUG
562 QDIO_DBF_TEXT4(0,trace,"kickoutq");
563 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
564 #endif /* CONFIG_QDIO_DEBUG */
569 /* here's the story with cc=2 and busy bit set (thanks, Rick):
570 * VM's CP could present us cc=2 and busy bit set on SIGA-write
571 * during reconfiguration of their Guest LAN (only in HIPERS mode,
572 * QDIO mode is asynchronous -- cc=2 and busy bit there will take
573 * the queues down immediately; and not being under VM we have a
574 * problem on cc=2 and busy bit set right away).
576 * Therefore qdio_siga_output will try for a short time constantly,
577 * if such a condition occurs. If it doesn't change, it will
578 * increase the busy_siga_counter and save the timestamp, and
579 * schedule the queue for later processing (via mark_q, using the
580 * queue tasklet). __qdio_outbound_processing will check out the
581 * counter. If non-zero, it will call qdio_kick_outbound_q as often
582 * as the value of the counter. This will attempt further SIGA
583 * instructions. For each successful SIGA, the counter is
584 * decreased, for failing SIGAs the counter remains the same, after
586 * After some time of no movement, qdio_kick_outbound_q will
587 * finally fail and reflect corresponding error codes to call
588 * the upper layer module and have it take the queues down.
590 * Note that this is a change from the original HiperSockets design
591 * (saying cc=2 and busy bit means take the queues down), but in
592 * these days Guest LAN didn't exist... excessive cc=2 with busy bit
593 * conditions will still take the queues down, but the threshold is
594 * higher due to the Guest LAN environment.
598 result=qdio_siga_output(q);
602 /* went smooth this time, reset timestamp */
603 #ifdef CONFIG_QDIO_DEBUG
604 QDIO_DBF_TEXT3(0,trace,"cc2reslv");
605 sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no,
606 atomic_read(&q->busy_siga_counter));
607 QDIO_DBF_TEXT3(0,trace,dbf_text);
608 #endif /* CONFIG_QDIO_DEBUG */
609 q->timing.busy_start=0;
611 case (2|QDIO_SIGA_ERROR_B_BIT_SET):
612 /* cc=2 and busy bit: */
613 atomic_inc(&q->busy_siga_counter);
615 /* if the last siga was successful, save
617 if (!q->timing.busy_start)
618 q->timing.busy_start=NOW;
620 /* if we're in time, don't touch error_status_flags
622 if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
626 QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
627 #ifdef CONFIG_QDIO_DEBUG
628 sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no,
629 atomic_read(&q->busy_siga_counter));
630 QDIO_DBF_TEXT3(0,trace,dbf_text);
631 #endif /* CONFIG_QDIO_DEBUG */
632 /* else fallthrough and report error */
634 /* for plain cc=1, 2 or 3: */
636 q->error_status_flags|=
637 QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
638 q->error_status_flags|=
639 QDIO_STATUS_LOOK_FOR_ERROR;
640 q->siga_error=result;
645 qdio_kick_outbound_handler(struct qdio_q *q)
647 int start, end, real_end, count;
648 #ifdef CONFIG_QDIO_DEBUG
652 start = q->first_element_to_kick;
653 /* last_move_ftc was just updated */
654 real_end = GET_SAVED_FRONTIER(q);
655 end = (real_end+QDIO_MAX_BUFFERS_PER_Q-1)&
656 (QDIO_MAX_BUFFERS_PER_Q-1);
657 count = (end+QDIO_MAX_BUFFERS_PER_Q+1-start)&
658 (QDIO_MAX_BUFFERS_PER_Q-1);
660 #ifdef CONFIG_QDIO_DEBUG
661 QDIO_DBF_TEXT4(0,trace,"kickouth");
662 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
664 sprintf(dbf_text,"s=%2xc=%2x",start,count);
665 QDIO_DBF_TEXT4(0,trace,dbf_text);
666 #endif /* CONFIG_QDIO_DEBUG */
668 if (q->state==QDIO_IRQ_STATE_ACTIVE)
669 q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT|
670 q->error_status_flags,
671 q->qdio_error,q->siga_error,q->q_no,start,count,
674 /* for the next time: */
675 q->first_element_to_kick=real_end;
678 q->error_status_flags=0;
682 __qdio_outbound_processing(struct qdio_q *q)
686 QDIO_DBF_TEXT4(0,trace,"qoutproc");
687 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
689 if (unlikely(qdio_reserve_q(q))) {
691 #ifdef QDIO_PERFORMANCE_STATS
693 #endif /* QDIO_PERFORMANCE_STATS */
694 /* as we're sissies, we'll check next time */
695 if (likely(!atomic_read(&q->is_in_shutdown))) {
697 QDIO_DBF_TEXT4(0,trace,"busy,agn");
701 #ifdef QDIO_PERFORMANCE_STATS
703 perf_stats.tl_runs++;
704 #endif /* QDIO_PERFORMANCE_STATS */
706 /* see comment in qdio_kick_outbound_q */
707 siga_attempts=atomic_read(&q->busy_siga_counter);
708 while (siga_attempts) {
709 atomic_dec(&q->busy_siga_counter);
710 qdio_kick_outbound_q(q);
714 if (qdio_has_outbound_q_moved(q))
715 qdio_kick_outbound_handler(q);
719 * for asynchronous queues, we better check, if the fill
720 * level is too high. for synchronous queues, the fill
721 * level will never be that high.
723 if (atomic_read(&q->number_of_buffers_used)>
724 IQDIO_FILL_LEVEL_TO_POLL)
727 } else if (!q->hydra_gives_outbound_pcis)
728 if (!qdio_is_outbound_q_done(q))
735 qdio_outbound_processing(struct qdio_q *q)
737 __qdio_outbound_processing(q);
740 /************************* INBOUND ROUTINES *******************************/
744 qdio_get_inbound_buffer_frontier(struct qdio_q *q)
748 int first_not_to_check;
749 #ifdef CONFIG_QDIO_DEBUG
751 #endif /* CONFIG_QDIO_DEBUG */
752 #ifdef QDIO_USE_PROCESSING_STATE
753 int last_position=-1;
754 #endif /* QDIO_USE_PROCESSING_STATE */
756 QDIO_DBF_TEXT4(0,trace,"getibfro");
757 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
759 slsb=&q->slsb.acc.val[0];
760 f_mod_no=f=q->first_to_check;
762 * we don't check 128 buffers, as otherwise qdio_has_inbound_q_moved
765 first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
766 (QDIO_MAX_BUFFERS_PER_Q-1));
769 * we don't use this one, as a PCI or we after a thin interrupt
770 * will sync the queues
775 f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
776 if (f==first_not_to_check)
778 switch (slsb[f_mod_no]) {
780 /* CU_EMPTY means frontier is reached */
781 case SLSB_CU_INPUT_EMPTY:
782 QDIO_DBF_TEXT5(0,trace,"inptempt");
785 /* P_PRIMED means set slsb to P_PROCESSING and move on */
786 case SLSB_P_INPUT_PRIMED:
787 QDIO_DBF_TEXT5(0,trace,"inptprim");
789 #ifdef QDIO_USE_PROCESSING_STATE
791 * as soon as running under VM, polling the input queues will
792 * kill VM in terms of CP overhead
795 set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT);
797 /* set the previous buffer to NOT_INIT. The current
798 * buffer will be set to PROCESSING at the end of
799 * this function to avoid further interrupts. */
800 if (last_position>=0)
801 set_slsb(&slsb[last_position],
802 SLSB_P_INPUT_NOT_INIT);
803 atomic_set(&q->polling,1);
804 last_position=f_mod_no;
806 #else /* QDIO_USE_PROCESSING_STATE */
807 set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT);
808 #endif /* QDIO_USE_PROCESSING_STATE */
810 * not needed, as the inbound queue will be synced on the next
811 * siga-r, resp. tiqdio_is_inbound_q_done will do the siga-s
815 atomic_dec(&q->number_of_buffers_used);
818 case SLSB_P_INPUT_NOT_INIT:
819 case SLSB_P_INPUT_PROCESSING:
820 QDIO_DBF_TEXT5(0,trace,"inpnipro");
823 /* P_ERROR means frontier is reached, break and report error */
824 case SLSB_P_INPUT_ERROR:
825 #ifdef CONFIG_QDIO_DEBUG
826 sprintf(dbf_text,"inperr%2x",f_mod_no);
827 QDIO_DBF_TEXT3(1,trace,dbf_text);
828 #endif /* CONFIG_QDIO_DEBUG */
829 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
831 /* kind of process the buffer */
832 set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT);
835 q->error_status_flags|=
836 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
837 q->qdio_error=SLSB_P_INPUT_ERROR;
838 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
840 /* we increment the frontier, as this buffer
841 * was processed obviously */
842 f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
843 atomic_dec(&q->number_of_buffers_used);
845 #ifdef QDIO_USE_PROCESSING_STATE
847 #endif /* QDIO_USE_PROCESSING_STATE */
851 /* everything else means frontier not changed (HALTED or so) */
856 q->first_to_check=f_mod_no;
858 #ifdef QDIO_USE_PROCESSING_STATE
859 if (last_position>=0)
860 set_slsb(&slsb[last_position],SLSB_P_INPUT_PROCESSING);
861 #endif /* QDIO_USE_PROCESSING_STATE */
863 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
865 return q->first_to_check;
869 qdio_has_inbound_q_moved(struct qdio_q *q)
873 #ifdef QDIO_PERFORMANCE_STATS
874 static int old_pcis=0;
875 static int old_thinints=0;
877 if ((old_pcis==perf_stats.pcis)&&(old_thinints==perf_stats.thinints))
878 perf_stats.start_time_inbound=NOW;
880 old_pcis=perf_stats.pcis;
881 #endif /* QDIO_PERFORMANCE_STATS */
883 i=qdio_get_inbound_buffer_frontier(q);
884 if ( (i!=GET_SAVED_FRONTIER(q)) ||
885 (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
887 if ((!q->siga_sync)&&(!q->hydra_gives_outbound_pcis))
890 QDIO_DBF_TEXT4(0,trace,"inhasmvd");
891 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
894 QDIO_DBF_TEXT4(0,trace,"inhsntmv");
895 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
900 /* means, no more buffers to be filled */
902 tiqdio_is_inbound_q_done(struct qdio_q *q)
905 #ifdef CONFIG_QDIO_DEBUG
909 no_used=atomic_read(&q->number_of_buffers_used);
911 /* propagate the change from 82 to 80 through VM */
914 #ifdef CONFIG_QDIO_DEBUG
916 sprintf(dbf_text,"iqisnt%02x",no_used);
917 QDIO_DBF_TEXT4(0,trace,dbf_text);
919 QDIO_DBF_TEXT4(0,trace,"iniqisdo");
921 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
922 #endif /* CONFIG_QDIO_DEBUG */
928 /* we'll check for more primed buffers in qeth_stop_polling */
931 if (q->slsb.acc.val[q->first_to_check]!=SLSB_P_INPUT_PRIMED)
933 * nothing more to do, if next buffer is not PRIMED.
934 * note that we did a SYNC_MEMORY before, that there
935 * has been a sychnronization.
936 * we will return 0 below, as there is nothing to do
937 * (stop_polling not necessary, as we have not been
938 * using the PROCESSING state
943 * ok, the next input buffer is primed. that means, that device state
944 * change indicator and adapter local summary are set, so we will find
946 * we will return 0 below, as there is nothing to do, except scheduling
947 * ourselves for the next time.
949 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
955 qdio_is_inbound_q_done(struct qdio_q *q)
958 #ifdef CONFIG_QDIO_DEBUG
962 no_used=atomic_read(&q->number_of_buffers_used);
965 * we need that one for synchronization with the adapter, as it
966 * does a kind of PCI avoidance
971 QDIO_DBF_TEXT4(0,trace,"inqisdnA");
972 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
973 QDIO_DBF_TEXT4(0,trace,dbf_text);
977 if (q->slsb.acc.val[q->first_to_check]==SLSB_P_INPUT_PRIMED) {
978 /* we got something to do */
979 QDIO_DBF_TEXT4(0,trace,"inqisntA");
980 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
984 /* on VM, we don't poll, so the q is always done here */
987 if (q->hydra_gives_outbound_pcis)
991 * at this point we know, that inbound first_to_check
992 * has (probably) not moved (see qdio_inbound_processing)
994 if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) {
995 #ifdef CONFIG_QDIO_DEBUG
996 QDIO_DBF_TEXT4(0,trace,"inqisdon");
997 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
998 sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
999 QDIO_DBF_TEXT4(0,trace,dbf_text);
1000 #endif /* CONFIG_QDIO_DEBUG */
1003 #ifdef CONFIG_QDIO_DEBUG
1004 QDIO_DBF_TEXT4(0,trace,"inqisntd");
1005 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1006 sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
1007 QDIO_DBF_TEXT4(0,trace,dbf_text);
1008 #endif /* CONFIG_QDIO_DEBUG */
1014 qdio_kick_inbound_handler(struct qdio_q *q)
1016 int count, start, end, real_end, i;
1017 #ifdef CONFIG_QDIO_DEBUG
1021 QDIO_DBF_TEXT4(0,trace,"kickinh");
1022 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1024 start=q->first_element_to_kick;
1025 real_end=q->first_to_check;
1026 end=(real_end+QDIO_MAX_BUFFERS_PER_Q-1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1034 i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1037 #ifdef CONFIG_QDIO_DEBUG
1038 sprintf(dbf_text,"s=%2xc=%2x",start,count);
1039 QDIO_DBF_TEXT4(0,trace,dbf_text);
1040 #endif /* CONFIG_QDIO_DEBUG */
1042 if (likely(q->state==QDIO_IRQ_STATE_ACTIVE))
1044 QDIO_STATUS_INBOUND_INT|q->error_status_flags,
1045 q->qdio_error,q->siga_error,q->q_no,start,count,
1048 /* for the next time: */
1049 q->first_element_to_kick=real_end;
1052 q->error_status_flags=0;
1054 #ifdef QDIO_PERFORMANCE_STATS
1055 perf_stats.inbound_time+=NOW-perf_stats.start_time_inbound;
1056 perf_stats.inbound_cnt++;
1057 #endif /* QDIO_PERFORMANCE_STATS */
1061 __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1063 struct qdio_irq *irq_ptr;
1067 QDIO_DBF_TEXT4(0,trace,"iqinproc");
1068 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1071 * we first want to reserve the q, so that we know, that we don't
1072 * interrupt ourselves and call qdio_unmark_q, as is_in_shutdown might
1075 if (unlikely(qdio_reserve_q(q))) {
1077 #ifdef QDIO_PERFORMANCE_STATS
1079 #endif /* QDIO_PERFORMANCE_STATS */
1081 * as we might just be about to stop polling, we make
1082 * sure that we check again at least once more
1087 #ifdef QDIO_PERFORMANCE_STATS
1089 #endif /* QDIO_PERFORMANCE_STATS */
1090 if (unlikely(atomic_read(&q->is_in_shutdown))) {
1096 * we reset spare_ind_was_set, when the queue does not use the
1099 if (spare_ind_was_set)
1100 spare_ind_was_set = (q->dev_st_chg_ind == &spare_indicator);
1102 if (!(*(q->dev_st_chg_ind)) && !spare_ind_was_set)
1105 * q->dev_st_chg_ind is the indicator, be it shared or not.
1106 * only clear it, if indicator is non-shared
1108 if (!spare_ind_was_set)
1109 tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind);
1111 if (q->hydra_gives_outbound_pcis) {
1112 if (!q->siga_sync_done_on_thinints) {
1114 } else if ((!q->siga_sync_done_on_outb_tis)&&
1115 (q->hydra_gives_outbound_pcis)) {
1116 SYNC_MEMORY_ALL_OUTB;
1122 * maybe we have to do work on our outbound queues... at least
1123 * we have to check the outbound-int-capable thinint-capable
1126 if (q->hydra_gives_outbound_pcis) {
1127 irq_ptr = (struct qdio_irq*)q->irq_ptr;
1128 for (i=0;i<irq_ptr->no_output_qs;i++) {
1129 oq = irq_ptr->output_qs[i];
1130 #ifdef QDIO_PERFORMANCE_STATS
1131 perf_stats.tl_runs--;
1132 #endif /* QDIO_PERFORMANCE_STATS */
1133 if (!qdio_is_outbound_q_done(oq))
1134 __qdio_outbound_processing(oq);
1138 if (!qdio_has_inbound_q_moved(q))
1141 qdio_kick_inbound_handler(q);
1142 if (tiqdio_is_inbound_q_done(q))
1143 if (!qdio_stop_polling(q)) {
1145 * we set the flags to get into the stuff next time,
1146 * see also comment in qdio_stop_polling
1148 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1156 tiqdio_inbound_processing(struct qdio_q *q)
1158 __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount));
1162 __qdio_inbound_processing(struct qdio_q *q)
1166 QDIO_DBF_TEXT4(0,trace,"qinproc");
1167 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1169 if (unlikely(qdio_reserve_q(q))) {
1171 #ifdef QDIO_PERFORMANCE_STATS
1173 #endif /* QDIO_PERFORMANCE_STATS */
1174 /* as we're sissies, we'll check next time */
1175 if (likely(!atomic_read(&q->is_in_shutdown))) {
1177 QDIO_DBF_TEXT4(0,trace,"busy,agn");
1181 #ifdef QDIO_PERFORMANCE_STATS
1183 perf_stats.tl_runs++;
1184 #endif /* QDIO_PERFORMANCE_STATS */
1187 if (qdio_has_inbound_q_moved(q)) {
1188 qdio_kick_inbound_handler(q);
1189 if (!qdio_stop_polling(q)) {
1191 if (q_laps<QDIO_Q_LAPS)
1196 if (!qdio_is_inbound_q_done(q))
1197 /* means poll time is not yet over */
1205 qdio_inbound_processing(struct qdio_q *q)
1207 __qdio_inbound_processing(q);
1210 /************************* MAIN ROUTINES *******************************/
1212 #ifdef QDIO_USE_PROCESSING_STATE
1214 tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
1222 * under VM, we have not used the PROCESSING state, so no
1223 * need to stop polling
1228 if (unlikely(qdio_reserve_q(q))) {
1230 #ifdef QDIO_PERFORMANCE_STATS
1232 #endif /* QDIO_PERFORMANCE_STATS */
1234 * as we might just be about to stop polling, we make
1235 * sure that we check again at least once more
1239 * sanity -- we'd get here without setting the
1242 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1246 if (qdio_stop_polling(q)) {
1250 if (q_laps<QDIO_Q_LAPS-1) {
1255 * we set the flags to get into the stuff
1256 * next time, see also comment in qdio_stop_polling
1258 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1264 #endif /* QDIO_USE_PROCESSING_STATE */
1267 tiqdio_inbound_checks(void)
1270 int spare_ind_was_set=0;
1271 #ifdef QDIO_USE_PROCESSING_STATE
1273 #endif /* QDIO_USE_PROCESSING_STATE */
1275 QDIO_DBF_TEXT4(0,trace,"iqdinbck");
1276 QDIO_DBF_TEXT5(0,trace,"iqlocsum");
1278 #ifdef QDIO_USE_PROCESSING_STATE
1280 #endif /* QDIO_USE_PROCESSING_STATE */
1282 /* when the spare indicator is used and set, save that and clear it */
1283 if ((atomic_read(&spare_indicator_usecount)) && spare_indicator) {
1284 spare_ind_was_set = 1;
1285 tiqdio_clear_summary_bit((__u32*)&spare_indicator);
1288 q=(struct qdio_q*)tiq_list;
1292 __tiqdio_inbound_processing(q, spare_ind_was_set);
1293 q=(struct qdio_q*)q->list_next;
1294 } while (q!=(struct qdio_q*)tiq_list);
1296 #ifdef QDIO_USE_PROCESSING_STATE
1297 q=(struct qdio_q*)tiq_list;
1301 ret = tiqdio_reset_processing_state(q, q_laps);
1308 q = (struct qdio_q*)q->list_next;
1314 } while (q!=(struct qdio_q*)tiq_list);
1315 #endif /* QDIO_USE_PROCESSING_STATE */
1319 tiqdio_tl(unsigned long data)
1321 QDIO_DBF_TEXT4(0,trace,"iqdio_tl");
1323 #ifdef QDIO_PERFORMANCE_STATS
1324 perf_stats.tl_runs++;
1325 #endif /* QDIO_PERFORMANCE_STATS */
1327 tiqdio_inbound_checks();
1330 /********************* GENERAL HELPER_ROUTINES ***********************/
1333 qdio_release_irq_memory(struct qdio_irq *irq_ptr)
1337 for (i=0;i<QDIO_MAX_QUEUES_PER_IRQ;i++) {
1338 if (!irq_ptr->input_qs[i])
1341 if (irq_ptr->input_qs[i]->slib)
1342 kfree(irq_ptr->input_qs[i]->slib);
1343 kfree(irq_ptr->input_qs[i]);
1346 if (!irq_ptr->output_qs[i])
1349 if (irq_ptr->output_qs[i]->slib)
1350 kfree(irq_ptr->output_qs[i]->slib);
1351 kfree(irq_ptr->output_qs[i]);
1354 kfree(irq_ptr->qdr);
1359 qdio_set_impl_params(struct qdio_irq *irq_ptr,
1360 unsigned int qib_param_field_format,
1361 /* pointer to 128 bytes or NULL, if no param field */
1362 unsigned char *qib_param_field,
1363 /* pointer to no_queues*128 words of data or NULL */
1364 unsigned int no_input_qs,
1365 unsigned int no_output_qs,
1366 unsigned long *input_slib_elements,
1367 unsigned long *output_slib_elements)
1374 irq_ptr->qib.pfmt=qib_param_field_format;
1375 if (qib_param_field)
1376 memcpy(irq_ptr->qib.parm,qib_param_field,
1377 QDIO_MAX_BUFFERS_PER_Q);
1379 if (input_slib_elements)
1380 for (i=0;i<no_input_qs;i++) {
1381 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1382 irq_ptr->input_qs[i]->slib->slibe[j].parms=
1383 input_slib_elements[
1384 i*QDIO_MAX_BUFFERS_PER_Q+j];
1386 if (output_slib_elements)
1387 for (i=0;i<no_output_qs;i++) {
1388 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1389 irq_ptr->output_qs[i]->slib->slibe[j].parms=
1390 output_slib_elements[
1391 i*QDIO_MAX_BUFFERS_PER_Q+j];
1396 qdio_alloc_qs(struct qdio_irq *irq_ptr,
1397 int no_input_qs, int no_output_qs)
1403 for (i=0;i<no_input_qs;i++) {
1404 q=kmalloc(sizeof(struct qdio_q),GFP_KERNEL);
1407 QDIO_PRINT_ERR("kmalloc of q failed!\n");
1411 memset(q,0,sizeof(struct qdio_q));
1413 q->slib=kmalloc(PAGE_SIZE,GFP_KERNEL);
1415 QDIO_PRINT_ERR("kmalloc of slib failed!\n");
1419 irq_ptr->input_qs[i]=q;
1422 for (i=0;i<no_output_qs;i++) {
1423 q=kmalloc(sizeof(struct qdio_q),GFP_KERNEL);
1429 memset(q,0,sizeof(struct qdio_q));
1431 q->slib=kmalloc(PAGE_SIZE,GFP_KERNEL);
1433 QDIO_PRINT_ERR("kmalloc of slib failed!\n");
1437 irq_ptr->output_qs[i]=q;
1446 qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1447 int no_input_qs, int no_output_qs,
1448 qdio_handler_t *input_handler,
1449 qdio_handler_t *output_handler,
1450 unsigned long int_parm,int q_format,
1451 unsigned long flags,
1452 void **inbound_sbals_array,
1453 void **outbound_sbals_array)
1457 char dbf_text[20]; /* see qdio_initialize */
1461 sprintf(dbf_text,"qfqs%4x",cdev->private->irq);
1462 QDIO_DBF_TEXT0(0,setup,dbf_text);
1463 for (i=0;i<no_input_qs;i++) {
1464 q=irq_ptr->input_qs[i];
1466 memset(q,0,((char*)&q->slib)-((char*)q));
1467 sprintf(dbf_text,"in-q%4x",i);
1468 QDIO_DBF_TEXT0(0,setup,dbf_text);
1469 QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
1471 memset(q->slib,0,PAGE_SIZE);
1472 q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
1476 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1477 q->sbal[j]=*(inbound_sbals_array++);
1479 q->queue_type=q_format;
1480 q->int_parm=int_parm;
1481 q->irq=irq_ptr->irq;
1482 q->irq_ptr = irq_ptr;
1487 q->first_to_check=0;
1489 q->handler=input_handler;
1490 q->dev_st_chg_ind=irq_ptr->dev_st_chg_ind;
1492 q->tasklet.data=(unsigned long)q;
1493 /* q->is_thinint_q isn't valid at this time, but
1494 * irq_ptr->is_thinint_irq is */
1495 q->tasklet.func=(void(*)(unsigned long))
1496 ((irq_ptr->is_thinint_irq)?&tiqdio_inbound_processing:
1497 &qdio_inbound_processing);
1499 /* actually this is not used for inbound queues. yet. */
1500 atomic_set(&q->busy_siga_counter,0);
1501 q->timing.busy_start=0;
1503 /* for (j=0;j<QDIO_STATS_NUMBER;j++)
1504 q->timing.last_transfer_times[j]=(qdio_get_micros()/
1505 QDIO_STATS_NUMBER)*j;
1506 q->timing.last_transfer_index=QDIO_STATS_NUMBER-1;
1510 if (i>0) irq_ptr->input_qs[i-1]->slib->nsliba=
1511 (unsigned long)(q->slib);
1512 q->slib->sla=(unsigned long)(q->sl);
1513 q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
1516 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1517 q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
1519 QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
1521 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1522 ptr=(void*)&q->slsb;
1523 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1524 ptr=(void*)q->sbal[0];
1525 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1528 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) {
1529 set_slsb(&q->slsb.acc.val[j],
1530 SLSB_P_INPUT_NOT_INIT);
1531 /* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/
1535 for (i=0;i<no_output_qs;i++) {
1536 q=irq_ptr->output_qs[i];
1537 memset(q,0,((char*)&q->slib)-((char*)q));
1539 sprintf(dbf_text,"outq%4x",i);
1540 QDIO_DBF_TEXT0(0,setup,dbf_text);
1541 QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
1543 memset(q->slib,0,PAGE_SIZE);
1544 q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
1548 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1549 q->sbal[j]=*(outbound_sbals_array++);
1551 q->queue_type=q_format;
1552 q->int_parm=int_parm;
1554 q->irq=irq_ptr->irq;
1556 q->irq_ptr = irq_ptr;
1559 q->first_to_check=0;
1561 q->handler=output_handler;
1563 q->tasklet.data=(unsigned long)q;
1564 q->tasklet.func=(void(*)(unsigned long))
1565 &qdio_outbound_processing;
1567 atomic_set(&q->busy_siga_counter,0);
1568 q->timing.busy_start=0;
1571 if (i>0) irq_ptr->output_qs[i-1]->slib->nsliba=
1572 (unsigned long)(q->slib);
1573 q->slib->sla=(unsigned long)(q->sl);
1574 q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
1577 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1578 q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
1580 QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
1582 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1583 ptr=(void*)&q->slsb;
1584 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1585 ptr=(void*)q->sbal[0];
1586 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1589 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) {
1590 set_slsb(&q->slsb.acc.val[j],
1591 SLSB_P_OUTPUT_NOT_INIT);
1592 /* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/
1598 qdio_fill_thresholds(struct qdio_irq *irq_ptr,
1599 unsigned int no_input_qs,
1600 unsigned int no_output_qs,
1601 unsigned int min_input_threshold,
1602 unsigned int max_input_threshold,
1603 unsigned int min_output_threshold,
1604 unsigned int max_output_threshold)
1609 for (i=0;i<no_input_qs;i++) {
1610 q=irq_ptr->input_qs[i];
1611 q->timing.threshold=max_input_threshold;
1612 /* for (j=0;j<QDIO_STATS_CLASSES;j++) {
1613 q->threshold_classes[j].threshold=
1614 min_input_threshold+
1615 (max_input_threshold-min_input_threshold)/
1618 qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
1620 for (i=0;i<no_output_qs;i++) {
1621 q=irq_ptr->output_qs[i];
1622 q->timing.threshold=max_output_threshold;
1623 /* for (j=0;j<QDIO_STATS_CLASSES;j++) {
1624 q->threshold_classes[j].threshold=
1625 min_output_threshold+
1626 (max_output_threshold-min_output_threshold)/
1629 qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
1634 tiqdio_thinint_handler(void)
1636 QDIO_DBF_TEXT4(0,trace,"thin_int");
1638 #ifdef QDIO_PERFORMANCE_STATS
1639 perf_stats.thinints++;
1640 perf_stats.start_time_inbound=NOW;
1641 #endif /* QDIO_PERFORMANCE_STATS */
1643 /* SVS only when needed:
1644 * issue SVS to benefit from iqdio interrupt avoidance
1645 * (SVS clears AISOI)*/
1647 tiqdio_clear_global_summary();
1649 tiqdio_inbound_checks();
1654 qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
1657 #ifdef CONFIG_QDIO_DEBUG
1660 QDIO_DBF_TEXT5(0,trace,"newstate");
1661 sprintf(dbf_text,"%4x%4x",irq_ptr->irq,state);
1662 QDIO_DBF_TEXT5(0,trace,dbf_text);
1663 #endif /* CONFIG_QDIO_DEBUG */
1665 irq_ptr->state=state;
1666 for (i=0;i<irq_ptr->no_input_qs;i++)
1667 irq_ptr->input_qs[i]->state=state;
1668 for (i=0;i<irq_ptr->no_output_qs;i++)
1669 irq_ptr->output_qs[i]->state=state;
1674 qdio_irq_check_sense(int irq, struct irb *irb)
1678 if (irb->esw.esw0.erw.cons) {
1679 sprintf(dbf_text,"sens%4x",irq);
1680 QDIO_DBF_TEXT2(1,trace,dbf_text);
1681 QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN);
1683 QDIO_PRINT_WARN("sense data available on qdio channel.\n");
1684 HEXDUMP16(WARN,"irb: ",irb);
1685 HEXDUMP16(WARN,"sense data: ",irb->ecw);
1691 qdio_handle_pci(struct qdio_irq *irq_ptr)
1696 #ifdef QDIO_PERFORMANCE_STATS
1698 perf_stats.start_time_inbound=NOW;
1699 #endif /* QDIO_PERFORMANCE_STATS */
1700 for (i=0;i<irq_ptr->no_input_qs;i++) {
1701 q=irq_ptr->input_qs[i];
1702 if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT)
1705 #ifdef QDIO_PERFORMANCE_STATS
1706 perf_stats.tl_runs--;
1707 #endif /* QDIO_PERFORMANCE_STATS */
1708 __qdio_inbound_processing(q);
1711 if (!irq_ptr->hydra_gives_outbound_pcis)
1713 for (i=0;i<irq_ptr->no_output_qs;i++) {
1714 q=irq_ptr->output_qs[i];
1715 #ifdef QDIO_PERFORMANCE_STATS
1716 perf_stats.tl_runs--;
1717 #endif /* QDIO_PERFORMANCE_STATS */
1718 if (qdio_is_outbound_q_done(q))
1720 if (!irq_ptr->sync_done_on_outb_pcis)
1722 __qdio_outbound_processing(q);
1726 static void qdio_establish_handle_irq(struct ccw_device*, int, int);
1729 qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm,
1730 int cstat, int dstat)
1732 struct qdio_irq *irq_ptr;
1736 irq_ptr = cdev->private->qdio_data;
1738 QDIO_DBF_TEXT2(1, trace, "ick2");
1739 sprintf(dbf_text,"%s", cdev->dev.bus_id);
1740 QDIO_DBF_TEXT2(1,trace,dbf_text);
1741 QDIO_DBF_HEX2(0,trace,&intparm,sizeof(int));
1742 QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
1743 QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
1744 QDIO_PRINT_ERR("received check condition on activate " \
1745 "queues on device %s (cs=x%x, ds=x%x).\n",
1746 cdev->dev.bus_id, cstat, dstat);
1747 if (irq_ptr->no_input_qs) {
1748 q=irq_ptr->input_qs[0];
1749 } else if (irq_ptr->no_output_qs) {
1750 q=irq_ptr->output_qs[0];
1752 QDIO_PRINT_ERR("oops... no queue registered for device %s!?\n",
1754 goto omit_handler_call;
1756 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
1757 QDIO_STATUS_LOOK_FOR_ERROR,
1758 0,0,0,-1,-1,q->int_parm);
1760 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_STOPPED);
1765 qdio_call_shutdown(void *data)
1767 struct ccw_device *cdev;
1769 cdev = (struct ccw_device *)data;
1770 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1771 put_device(&cdev->dev);
1775 qdio_timeout_handler(struct ccw_device *cdev)
1777 struct qdio_irq *irq_ptr;
1780 QDIO_DBF_TEXT2(0, trace, "qtoh");
1781 sprintf(dbf_text, "%s", cdev->dev.bus_id);
1782 QDIO_DBF_TEXT2(0, trace, dbf_text);
1784 irq_ptr = cdev->private->qdio_data;
1785 sprintf(dbf_text, "state:%d", irq_ptr->state);
1786 QDIO_DBF_TEXT2(0, trace, dbf_text);
1788 switch (irq_ptr->state) {
1789 case QDIO_IRQ_STATE_INACTIVE:
1790 QDIO_PRINT_ERR("establish queues on irq %04x: timed out\n",
1792 QDIO_DBF_TEXT2(1,setup,"eq:timeo");
1793 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1795 case QDIO_IRQ_STATE_CLEANUP:
1796 QDIO_PRINT_INFO("Did not get interrupt on cleanup, irq=0x%x.\n",
1798 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1800 case QDIO_IRQ_STATE_ESTABLISHED:
1801 case QDIO_IRQ_STATE_ACTIVE:
1802 /* I/O has been terminated by common I/O layer. */
1803 QDIO_PRINT_INFO("Queues on irq %04x killed by cio.\n",
1805 QDIO_DBF_TEXT2(1, trace, "cio:term");
1806 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1807 if (get_device(&cdev->dev)) {
1808 /* Can't call shutdown from interrupt context. */
1809 PREPARE_WORK(&cdev->private->kick_work,
1810 qdio_call_shutdown, (void *)cdev);
1811 queue_work(ccw_device_work, &cdev->private->kick_work);
1817 ccw_device_set_timeout(cdev, 0);
1818 wake_up(&cdev->private->wait_q);
1822 qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1824 struct qdio_irq *irq_ptr;
1828 #ifdef CONFIG_QDIO_DEBUG
1829 QDIO_DBF_TEXT4(0, trace, "qint");
1830 sprintf(dbf_text, "%s", cdev->dev.bus_id);
1831 QDIO_DBF_TEXT4(0, trace, dbf_text);
1832 #endif /* CONFIG_QDIO_DEBUG */
1835 QDIO_PRINT_ERR("got unsolicited interrupt in qdio " \
1836 "handler, device %s\n", cdev->dev.bus_id);
1840 irq_ptr = cdev->private->qdio_data;
1842 QDIO_DBF_TEXT2(1, trace, "uint");
1843 sprintf(dbf_text,"%s", cdev->dev.bus_id);
1844 QDIO_DBF_TEXT2(1,trace,dbf_text);
1845 QDIO_PRINT_ERR("received interrupt on unused device %s!\n",
1851 /* Currently running i/o is in error. */
1852 switch (PTR_ERR(irb)) {
1854 QDIO_PRINT_ERR("i/o error on device %s\n",
1858 qdio_timeout_handler(cdev);
1861 QDIO_PRINT_ERR("unknown error state %ld on device %s\n",
1862 PTR_ERR(irb), cdev->dev.bus_id);
1867 qdio_irq_check_sense(irq_ptr->irq, irb);
1869 #ifdef CONFIG_QDIO_DEBUG
1870 sprintf(dbf_text, "state:%d", irq_ptr->state);
1871 QDIO_DBF_TEXT4(0, trace, dbf_text);
1872 #endif /* CONFIG_QDIO_DEBUG */
1874 cstat = irb->scsw.cstat;
1875 dstat = irb->scsw.dstat;
1877 switch (irq_ptr->state) {
1878 case QDIO_IRQ_STATE_INACTIVE:
1879 qdio_establish_handle_irq(cdev, cstat, dstat);
1882 case QDIO_IRQ_STATE_CLEANUP:
1883 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1886 case QDIO_IRQ_STATE_ESTABLISHED:
1887 case QDIO_IRQ_STATE_ACTIVE:
1888 if (cstat & SCHN_STAT_PCI) {
1889 qdio_handle_pci(irq_ptr);
1893 if ((cstat&~SCHN_STAT_PCI)||dstat) {
1894 qdio_handle_activate_check(cdev, intparm, cstat, dstat);
1898 QDIO_PRINT_ERR("got interrupt for queues in state %d on " \
1900 irq_ptr->state, cdev->dev.bus_id);
1902 wake_up(&cdev->private->wait_q);
1907 qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
1908 unsigned int queue_number)
1912 struct qdio_irq *irq_ptr;
1914 #ifdef CONFIG_QDIO_DEBUG
1915 char dbf_text[15]="SyncXXXX";
1918 irq_ptr = cdev->private->qdio_data;
1922 #ifdef CONFIG_QDIO_DEBUG
1923 *((int*)(&dbf_text[4])) = irq_ptr->irq;
1924 QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
1925 *((int*)(&dbf_text[0]))=flags;
1926 *((int*)(&dbf_text[4]))=queue_number;
1927 QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
1928 #endif /* CONFIG_QDIO_DEBUG */
1930 if (flags&QDIO_FLAG_SYNC_INPUT) {
1931 q=irq_ptr->input_qs[queue_number];
1934 cc = do_siga_sync(q->irq, 0, q->mask);
1935 } else if (flags&QDIO_FLAG_SYNC_OUTPUT) {
1936 q=irq_ptr->output_qs[queue_number];
1939 cc = do_siga_sync(q->irq, q->mask, 0);
1945 QDIO_DBF_HEX3(0,trace,&ptr,sizeof(int));
1950 static unsigned char
1951 qdio_check_siga_needs(int sch)
1954 unsigned char qdioac;
1957 struct chsc_header request;
1963 struct chsc_header response;
1978 ssqd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1980 QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \
1981 "SIGAs for sch x%x.\n", sch);
1982 return CHSC_FLAG_SIGA_INPUT_NECESSARY ||
1983 CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
1984 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
1986 ssqd_area->request = (struct chsc_header) {
1991 ssqd_area->first_sch = sch;
1992 ssqd_area->last_sch = sch;
1994 result=chsc(ssqd_area);
1997 QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \
1998 "SIGAs for sch x%x.\n",
2000 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
2001 CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
2002 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2006 if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
2007 QDIO_PRINT_WARN("response upon checking SIGA needs " \
2008 "is 0x%x. Using all SIGAs for sch x%x.\n",
2009 ssqd_area->response.code, sch);
2010 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
2011 CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
2012 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2015 if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
2016 !(ssqd_area->flags & CHSC_FLAG_VALIDITY) ||
2017 (ssqd_area->sch != sch)) {
2018 QDIO_PRINT_WARN("huh? problems checking out sch x%x... " \
2019 "using all SIGAs.\n",sch);
2020 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
2021 CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
2022 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */
2026 qdioac = ssqd_area->qdioac;
2028 free_page ((unsigned long) ssqd_area);
2033 tiqdio_check_chsc_availability(void)
2037 if (!css_characteristics_avail)
2040 /* Check for bit 41. */
2041 if (!css_general_characteristics.aif) {
2042 QDIO_PRINT_WARN("Adapter interruption facility not " \
2047 /* Check for bits 107 and 108. */
2048 if (!css_chsc_characteristics.scssc ||
2049 !css_chsc_characteristics.scsscf) {
2050 QDIO_PRINT_WARN("Set Chan Subsys. Char. & Fast-CHSCs " \
2051 "not available.\n");
2055 /* Check for OSA/FCP thin interrupts (bit 67). */
2056 hydra_thinints = css_general_characteristics.aif_osa;
2057 sprintf(dbf_text,"hydrati%1x", hydra_thinints);
2058 QDIO_DBF_TEXT0(0,setup,dbf_text);
2060 /* Check for aif time delay disablement fac (bit 56). If installed,
2061 * omit svs even under lpar (good point by rick again) */
2062 omit_svs = css_general_characteristics.aif_tdd;
2063 sprintf(dbf_text,"omitsvs%1x", omit_svs);
2064 QDIO_DBF_TEXT0(0,setup,dbf_text);
2070 tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2072 unsigned long real_addr_local_summary_bit;
2073 unsigned long real_addr_dev_st_chg_ind;
2077 unsigned int resp_code;
2081 struct chsc_header request;
2086 u64 summary_indicator_addr;
2087 u64 subchannel_indicator_addr;
2092 u32 word_with_d_bit;
2093 /* set to 0x10000000 to enable
2094 * time delay disablement facility */
2097 u32 reserved6[1004];
2098 struct chsc_header response;
2102 if (!irq_ptr->is_thinint_irq)
2105 if (reset_to_zero) {
2106 real_addr_local_summary_bit=0;
2107 real_addr_dev_st_chg_ind=0;
2109 real_addr_local_summary_bit=
2110 virt_to_phys((volatile void *)indicators);
2111 real_addr_dev_st_chg_ind=
2112 virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind);
2115 scssc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
2117 QDIO_PRINT_WARN("No memory for setting indicators on " \
2118 "subchannel x%x.\n", irq_ptr->irq);
2121 scssc_area->request = (struct chsc_header) {
2125 scssc_area->operation_code = 0;
2127 scssc_area->summary_indicator_addr = real_addr_local_summary_bit;
2128 scssc_area->subchannel_indicator_addr = real_addr_dev_st_chg_ind;
2129 scssc_area->ks = QDIO_STORAGE_KEY;
2130 scssc_area->kc = QDIO_STORAGE_KEY;
2131 scssc_area->isc = TIQDIO_THININT_ISC;
2132 scssc_area->subsystem_id = (1<<16) + irq_ptr->irq;
2133 /* enables the time delay disablement facility. Don't care
2134 * whether it is really there (i.e. we haven't checked for
2136 if (css_general_characteristics.aif_tdd)
2137 scssc_area->word_with_d_bit = 0x10000000;
2139 QDIO_PRINT_WARN("Time delay disablement facility " \
2144 result = chsc(scssc_area);
2146 QDIO_PRINT_WARN("could not set indicators on irq x%x, " \
2147 "cc=%i.\n",irq_ptr->irq,result);
2152 resp_code = scssc_area->response.code;
2153 if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
2154 QDIO_PRINT_WARN("response upon setting indicators " \
2155 "is 0x%x.\n",resp_code);
2156 sprintf(dbf_text,"sidR%4x",resp_code);
2157 QDIO_DBF_TEXT1(0,trace,dbf_text);
2158 QDIO_DBF_TEXT1(0,setup,dbf_text);
2159 ptr=&scssc_area->response;
2160 QDIO_DBF_HEX2(1,setup,&ptr,QDIO_DBF_SETUP_LEN);
2165 QDIO_DBF_TEXT2(0,setup,"setscind");
2166 QDIO_DBF_HEX2(0,setup,&real_addr_local_summary_bit,
2167 sizeof(unsigned long));
2168 QDIO_DBF_HEX2(0,setup,&real_addr_dev_st_chg_ind,sizeof(unsigned long));
2171 free_page ((unsigned long) scssc_area);
2177 tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
2179 unsigned int resp_code;
2185 struct chsc_header request;
2192 u32 reserved5[1009];
2193 struct chsc_header response;
2197 if (!irq_ptr->is_thinint_irq)
2200 scsscf_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
2202 QDIO_PRINT_WARN("No memory for setting delay target on " \
2203 "subchannel x%x.\n", irq_ptr->irq);
2206 scsscf_area->request = (struct chsc_header) {
2211 scsscf_area->delay_target = delay_target<<16;
2213 result=chsc(scsscf_area);
2215 QDIO_PRINT_WARN("could not set delay target on irq x%x, " \
2216 "cc=%i. Continuing.\n",irq_ptr->irq,result);
2221 resp_code = scsscf_area->response.code;
2222 if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
2223 QDIO_PRINT_WARN("response upon setting delay target " \
2224 "is 0x%x. Continuing.\n",resp_code);
2225 sprintf(dbf_text,"sdtR%4x",resp_code);
2226 QDIO_DBF_TEXT1(0,trace,dbf_text);
2227 QDIO_DBF_TEXT1(0,setup,dbf_text);
2228 ptr=&scsscf_area->response;
2229 QDIO_DBF_HEX2(1,trace,&ptr,QDIO_DBF_TRACE_LEN);
2231 QDIO_DBF_TEXT2(0,trace,"delytrgt");
2232 QDIO_DBF_HEX2(0,trace,&delay_target,sizeof(unsigned long));
2233 result = 0; /* not critical */
2235 free_page ((unsigned long) scsscf_area);
2240 qdio_cleanup(struct ccw_device *cdev, int how)
2242 struct qdio_irq *irq_ptr;
2246 irq_ptr = cdev->private->qdio_data;
2250 sprintf(dbf_text,"qcln%4x",irq_ptr->irq);
2251 QDIO_DBF_TEXT1(0,trace,dbf_text);
2252 QDIO_DBF_TEXT0(0,setup,dbf_text);
2254 rc = qdio_shutdown(cdev, how);
2255 if ((rc == 0) || (rc == -EINPROGRESS))
2256 rc = qdio_free(cdev);
2261 qdio_shutdown(struct ccw_device *cdev, int how)
2263 struct qdio_irq *irq_ptr;
2267 unsigned long flags;
2271 irq_ptr = cdev->private->qdio_data;
2275 down(&irq_ptr->setting_up_sema);
2277 sprintf(dbf_text,"qsqs%4x",irq_ptr->irq);
2278 QDIO_DBF_TEXT1(0,trace,dbf_text);
2279 QDIO_DBF_TEXT0(0,setup,dbf_text);
2281 /* mark all qs as uninteresting */
2282 for (i=0;i<irq_ptr->no_input_qs;i++)
2283 atomic_set(&irq_ptr->input_qs[i]->is_in_shutdown,1);
2285 for (i=0;i<irq_ptr->no_output_qs;i++)
2286 atomic_set(&irq_ptr->output_qs[i]->is_in_shutdown,1);
2288 tasklet_kill(&tiqdio_tasklet);
2290 for (i=0;i<irq_ptr->no_input_qs;i++) {
2291 qdio_unmark_q(irq_ptr->input_qs[i]);
2292 tasklet_kill(&irq_ptr->input_qs[i]->tasklet);
2293 wait_event_interruptible_timeout(cdev->private->wait_q,
2294 !atomic_read(&irq_ptr->
2297 QDIO_NO_USE_COUNT_TIMEOUT);
2298 if (atomic_read(&irq_ptr->input_qs[i]->use_count))
2299 result=-EINPROGRESS;
2302 for (i=0;i<irq_ptr->no_output_qs;i++) {
2303 tasklet_kill(&irq_ptr->output_qs[i]->tasklet);
2304 wait_event_interruptible_timeout(cdev->private->wait_q,
2305 !atomic_read(&irq_ptr->
2308 QDIO_NO_USE_COUNT_TIMEOUT);
2309 if (atomic_read(&irq_ptr->output_qs[i]->use_count))
2310 result=-EINPROGRESS;
2313 /* cleanup subchannel */
2314 spin_lock_irqsave(get_ccwdev_lock(cdev),flags);
2315 if (how&QDIO_FLAG_CLEANUP_USING_CLEAR) {
2316 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
2317 timeout=QDIO_CLEANUP_CLEAR_TIMEOUT;
2318 } else if (how&QDIO_FLAG_CLEANUP_USING_HALT) {
2319 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
2320 timeout=QDIO_CLEANUP_HALT_TIMEOUT;
2321 } else { /* default behaviour */
2322 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
2323 timeout=QDIO_CLEANUP_HALT_TIMEOUT;
2325 if (rc == -ENODEV) {
2326 /* No need to wait for device no longer present. */
2327 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2328 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2329 } else if (((void *)cdev->handler != (void *)qdio_handler) && rc == 0) {
2331 * Whoever put another handler there, has to cope with the
2332 * interrupt theirself. Might happen if qdio_shutdown was
2333 * called on already shutdown queues, but this shouldn't have
2336 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2337 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2338 } else if (rc == 0) {
2339 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
2340 ccw_device_set_timeout(cdev, timeout);
2341 spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags);
2343 wait_event(cdev->private->wait_q,
2344 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
2345 irq_ptr->state == QDIO_IRQ_STATE_ERR);
2347 QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for "
2348 "device %s\n", result, cdev->dev.bus_id);
2349 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2353 if (irq_ptr->is_thinint_irq) {
2354 qdio_put_indicator((__u32*)irq_ptr->dev_st_chg_ind);
2355 tiqdio_set_subchannel_ind(irq_ptr,1);
2356 /* reset adapter interrupt indicators */
2359 /* exchange int handlers, if necessary */
2360 if ((void*)cdev->handler == (void*)qdio_handler)
2361 cdev->handler=irq_ptr->original_int_handler;
2363 /* Ignore errors. */
2364 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2365 ccw_device_set_timeout(cdev, 0);
2367 up(&irq_ptr->setting_up_sema);
2372 qdio_free(struct ccw_device *cdev)
2374 struct qdio_irq *irq_ptr;
2377 irq_ptr = cdev->private->qdio_data;
2381 down(&irq_ptr->setting_up_sema);
2383 sprintf(dbf_text,"qfqs%4x",irq_ptr->irq);
2384 QDIO_DBF_TEXT1(0,trace,dbf_text);
2385 QDIO_DBF_TEXT0(0,setup,dbf_text);
2387 cdev->private->qdio_data = 0;
2389 up(&irq_ptr->setting_up_sema);
2391 qdio_release_irq_memory(irq_ptr);
2392 module_put(THIS_MODULE);
2397 qdio_allocate_do_dbf(struct qdio_initialize *init_data)
2399 char dbf_text[20]; /* if a printf printed out more than 8 chars */
2401 sprintf(dbf_text,"qfmt:%x",init_data->q_format);
2402 QDIO_DBF_TEXT0(0,setup,dbf_text);
2403 QDIO_DBF_HEX0(0,setup,init_data->adapter_name,8);
2404 sprintf(dbf_text,"qpff%4x",init_data->qib_param_field_format);
2405 QDIO_DBF_TEXT0(0,setup,dbf_text);
2406 QDIO_DBF_HEX0(0,setup,&init_data->qib_param_field,sizeof(char*));
2407 QDIO_DBF_HEX0(0,setup,&init_data->input_slib_elements,sizeof(long*));
2408 QDIO_DBF_HEX0(0,setup,&init_data->output_slib_elements,sizeof(long*));
2409 sprintf(dbf_text,"miit%4x",init_data->min_input_threshold);
2410 QDIO_DBF_TEXT0(0,setup,dbf_text);
2411 sprintf(dbf_text,"mait%4x",init_data->max_input_threshold);
2412 QDIO_DBF_TEXT0(0,setup,dbf_text);
2413 sprintf(dbf_text,"miot%4x",init_data->min_output_threshold);
2414 QDIO_DBF_TEXT0(0,setup,dbf_text);
2415 sprintf(dbf_text,"maot%4x",init_data->max_output_threshold);
2416 QDIO_DBF_TEXT0(0,setup,dbf_text);
2417 sprintf(dbf_text,"niq:%4x",init_data->no_input_qs);
2418 QDIO_DBF_TEXT0(0,setup,dbf_text);
2419 sprintf(dbf_text,"noq:%4x",init_data->no_output_qs);
2420 QDIO_DBF_TEXT0(0,setup,dbf_text);
2421 QDIO_DBF_HEX0(0,setup,&init_data->input_handler,sizeof(void*));
2422 QDIO_DBF_HEX0(0,setup,&init_data->output_handler,sizeof(void*));
2423 QDIO_DBF_HEX0(0,setup,&init_data->int_parm,sizeof(long));
2424 QDIO_DBF_HEX0(0,setup,&init_data->flags,sizeof(long));
2425 QDIO_DBF_HEX0(0,setup,&init_data->input_sbal_addr_array,sizeof(void*));
2426 QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*));
2430 qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt)
2432 irq_ptr->input_qs[i]->is_iqdio_q = iqfmt;
2433 irq_ptr->input_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
2435 irq_ptr->qdr->qdf0[i].sliba=(unsigned long)(irq_ptr->input_qs[i]->slib);
2437 irq_ptr->qdr->qdf0[i].sla=(unsigned long)(irq_ptr->input_qs[i]->sl);
2439 irq_ptr->qdr->qdf0[i].slsba=
2440 (unsigned long)(&irq_ptr->input_qs[i]->slsb.acc.val[0]);
2442 irq_ptr->qdr->qdf0[i].akey=QDIO_STORAGE_KEY;
2443 irq_ptr->qdr->qdf0[i].bkey=QDIO_STORAGE_KEY;
2444 irq_ptr->qdr->qdf0[i].ckey=QDIO_STORAGE_KEY;
2445 irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY;
2449 qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
2452 irq_ptr->output_qs[i]->is_iqdio_q = iqfmt;
2453 irq_ptr->output_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
2455 irq_ptr->qdr->qdf0[i+j].sliba=(unsigned long)(irq_ptr->output_qs[i]->slib);
2457 irq_ptr->qdr->qdf0[i+j].sla=(unsigned long)(irq_ptr->output_qs[i]->sl);
2459 irq_ptr->qdr->qdf0[i+j].slsba=
2460 (unsigned long)(&irq_ptr->output_qs[i]->slsb.acc.val[0]);
2462 irq_ptr->qdr->qdf0[i+j].akey=QDIO_STORAGE_KEY;
2463 irq_ptr->qdr->qdf0[i+j].bkey=QDIO_STORAGE_KEY;
2464 irq_ptr->qdr->qdf0[i+j].ckey=QDIO_STORAGE_KEY;
2465 irq_ptr->qdr->qdf0[i+j].dkey=QDIO_STORAGE_KEY;
2470 qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
2474 for (i=0;i<irq_ptr->no_input_qs;i++) {
2475 irq_ptr->input_qs[i]->siga_sync=
2476 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
2477 irq_ptr->input_qs[i]->siga_in=
2478 irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
2479 irq_ptr->input_qs[i]->siga_out=
2480 irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
2481 irq_ptr->input_qs[i]->siga_sync_done_on_thinints=
2482 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
2483 irq_ptr->input_qs[i]->hydra_gives_outbound_pcis=
2484 irq_ptr->hydra_gives_outbound_pcis;
2485 irq_ptr->input_qs[i]->siga_sync_done_on_outb_tis=
2487 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2488 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
2489 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2490 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
2496 qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr)
2500 for (i=0;i<irq_ptr->no_output_qs;i++) {
2501 irq_ptr->output_qs[i]->siga_sync=
2502 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
2503 irq_ptr->output_qs[i]->siga_in=
2504 irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
2505 irq_ptr->output_qs[i]->siga_out=
2506 irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
2507 irq_ptr->output_qs[i]->siga_sync_done_on_thinints=
2508 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
2509 irq_ptr->output_qs[i]->hydra_gives_outbound_pcis=
2510 irq_ptr->hydra_gives_outbound_pcis;
2511 irq_ptr->output_qs[i]->siga_sync_done_on_outb_tis=
2513 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2514 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
2515 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2516 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
2522 qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2526 struct qdio_irq *irq_ptr;
2528 irq_ptr = cdev->private->qdio_data;
2530 if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) {
2531 sprintf(dbf_text,"ick1%4x",irq_ptr->irq);
2532 QDIO_DBF_TEXT2(1,trace,dbf_text);
2533 QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
2534 QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
2535 QDIO_PRINT_ERR("received check condition on establish " \
2536 "queues on irq 0x%x (cs=x%x, ds=x%x).\n",
2537 irq_ptr->irq,cstat,dstat);
2538 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR);
2541 if (!(dstat & DEV_STAT_DEV_END)) {
2542 QDIO_DBF_TEXT2(1,setup,"eq:no de");
2543 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2544 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2545 QDIO_PRINT_ERR("establish queues on irq %04x: didn't get "
2546 "device end: dstat=%02x, cstat=%02x\n",
2547 irq_ptr->irq, dstat, cstat);
2548 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2552 if (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) {
2553 QDIO_DBF_TEXT2(1,setup,"eq:badio");
2554 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2555 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2556 QDIO_PRINT_ERR("establish queues on irq %04x: got "
2557 "the following devstat: dstat=%02x, "
2559 irq_ptr->irq, dstat, cstat);
2560 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2567 qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
2569 struct qdio_irq *irq_ptr;
2572 irq_ptr = cdev->private->qdio_data;
2574 sprintf(dbf_text,"qehi%4x",cdev->private->irq);
2575 QDIO_DBF_TEXT0(0,setup,dbf_text);
2576 QDIO_DBF_TEXT0(0,trace,dbf_text);
2578 if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) {
2579 ccw_device_set_timeout(cdev, 0);
2583 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED);
2584 ccw_device_set_timeout(cdev, 0);
2588 qdio_initialize(struct qdio_initialize *init_data)
2593 sprintf(dbf_text,"qini%4x",init_data->cdev->private->irq);
2594 QDIO_DBF_TEXT0(0,setup,dbf_text);
2595 QDIO_DBF_TEXT0(0,trace,dbf_text);
2597 rc = qdio_allocate(init_data);
2599 rc = qdio_establish(init_data);
2601 qdio_free(init_data->cdev);
2609 qdio_allocate(struct qdio_initialize *init_data)
2611 struct qdio_irq *irq_ptr;
2614 sprintf(dbf_text,"qalc%4x",init_data->cdev->private->irq);
2615 QDIO_DBF_TEXT0(0,setup,dbf_text);
2616 QDIO_DBF_TEXT0(0,trace,dbf_text);
2617 if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
2618 (init_data->no_output_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
2619 ((init_data->no_input_qs) && (!init_data->input_handler)) ||
2620 ((init_data->no_output_qs) && (!init_data->output_handler)) )
2623 if (!init_data->input_sbal_addr_array)
2626 if (!init_data->output_sbal_addr_array)
2629 qdio_allocate_do_dbf(init_data);
2632 irq_ptr=kmalloc(sizeof(struct qdio_irq), GFP_KERNEL | GFP_DMA);
2634 QDIO_DBF_TEXT0(0,setup,"irq_ptr:");
2635 QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*));
2638 QDIO_PRINT_ERR("kmalloc of irq_ptr failed!\n");
2642 memset(irq_ptr,0,sizeof(struct qdio_irq));
2644 init_MUTEX(&irq_ptr->setting_up_sema);
2646 /* QDR must be in DMA area since CCW data address is only 32 bit */
2647 irq_ptr->qdr=kmalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA);
2648 if (!(irq_ptr->qdr)) {
2650 QDIO_PRINT_ERR("kmalloc of irq_ptr->qdr failed!\n");
2653 QDIO_DBF_TEXT0(0,setup,"qdr:");
2654 QDIO_DBF_HEX0(0,setup,&irq_ptr->qdr,sizeof(void*));
2656 if (qdio_alloc_qs(irq_ptr,
2657 init_data->no_input_qs,
2658 init_data->no_output_qs)) {
2659 qdio_release_irq_memory(irq_ptr);
2663 init_data->cdev->private->qdio_data = irq_ptr;
2665 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_INACTIVE);
2670 int qdio_fill_irq(struct qdio_initialize *init_data)
2676 struct qdio_irq *irq_ptr;
2678 irq_ptr = init_data->cdev->private->qdio_data;
2680 memset(irq_ptr,0,((char*)&irq_ptr->qdr)-((char*)irq_ptr));
2682 /* wipes qib.ac, required by ar7063 */
2683 memset(irq_ptr->qdr,0,sizeof(struct qdr));
2685 irq_ptr->int_parm=init_data->int_parm;
2687 irq_ptr->irq = init_data->cdev->private->irq;
2688 irq_ptr->no_input_qs=init_data->no_input_qs;
2689 irq_ptr->no_output_qs=init_data->no_output_qs;
2691 if (init_data->q_format==QDIO_IQDIO_QFMT) {
2692 irq_ptr->is_iqdio_irq=1;
2693 irq_ptr->is_thinint_irq=1;
2695 irq_ptr->is_iqdio_irq=0;
2696 irq_ptr->is_thinint_irq=hydra_thinints;
2698 sprintf(dbf_text,"is_i_t%1x%1x",
2699 irq_ptr->is_iqdio_irq,irq_ptr->is_thinint_irq);
2700 QDIO_DBF_TEXT2(0,setup,dbf_text);
2702 if (irq_ptr->is_thinint_irq) {
2703 irq_ptr->dev_st_chg_ind=qdio_get_indicator();
2704 QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*));
2705 if (!irq_ptr->dev_st_chg_ind) {
2706 QDIO_PRINT_WARN("no indicator location available " \
2707 "for irq 0x%x\n",irq_ptr->irq);
2708 qdio_release_irq_memory(irq_ptr);
2714 irq_ptr->equeue.cmd=DEFAULT_ESTABLISH_QS_CMD;
2715 irq_ptr->equeue.count=DEFAULT_ESTABLISH_QS_COUNT;
2716 irq_ptr->aqueue.cmd=DEFAULT_ACTIVATE_QS_CMD;
2717 irq_ptr->aqueue.count=DEFAULT_ACTIVATE_QS_COUNT;
2719 qdio_fill_qs(irq_ptr, init_data->cdev,
2720 init_data->no_input_qs,
2721 init_data->no_output_qs,
2722 init_data->input_handler,
2723 init_data->output_handler,init_data->int_parm,
2724 init_data->q_format,init_data->flags,
2725 init_data->input_sbal_addr_array,
2726 init_data->output_sbal_addr_array);
2728 if (!try_module_get(THIS_MODULE)) {
2729 QDIO_PRINT_CRIT("try_module_get() failed!\n");
2730 qdio_release_irq_memory(irq_ptr);
2734 qdio_fill_thresholds(irq_ptr,init_data->no_input_qs,
2735 init_data->no_output_qs,
2736 init_data->min_input_threshold,
2737 init_data->max_input_threshold,
2738 init_data->min_output_threshold,
2739 init_data->max_output_threshold);
2742 irq_ptr->qdr->qfmt=init_data->q_format;
2743 irq_ptr->qdr->iqdcnt=init_data->no_input_qs;
2744 irq_ptr->qdr->oqdcnt=init_data->no_output_qs;
2745 irq_ptr->qdr->iqdsz=sizeof(struct qdesfmt0)/4; /* size in words */
2746 irq_ptr->qdr->oqdsz=sizeof(struct qdesfmt0)/4;
2748 irq_ptr->qdr->qiba=(unsigned long)&irq_ptr->qib;
2749 irq_ptr->qdr->qkey=QDIO_STORAGE_KEY;
2752 irq_ptr->qib.qfmt=init_data->q_format;
2753 if (init_data->no_input_qs)
2754 irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib);
2755 if (init_data->no_output_qs)
2756 irq_ptr->qib.osliba=(unsigned long)(irq_ptr->output_qs[0]->slib);
2757 memcpy(irq_ptr->qib.ebcnam,init_data->adapter_name,8);
2759 qdio_set_impl_params(irq_ptr,init_data->qib_param_field_format,
2760 init_data->qib_param_field,
2761 init_data->no_input_qs,
2762 init_data->no_output_qs,
2763 init_data->input_slib_elements,
2764 init_data->output_slib_elements);
2766 /* first input descriptors, then output descriptors */
2767 is_iqdio = (init_data->q_format == QDIO_IQDIO_QFMT) ? 1 : 0;
2768 for (i=0;i<init_data->no_input_qs;i++)
2769 qdio_allocate_fill_input_desc(irq_ptr, i, is_iqdio);
2771 for (i=0;i<init_data->no_output_qs;i++)
2772 qdio_allocate_fill_output_desc(irq_ptr, i,
2773 init_data->no_input_qs,
2776 /* qdr, qib, sls, slsbs, slibs, sbales filled. */
2778 /* get qdio commands */
2779 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
2781 QDIO_DBF_TEXT2(1,setup,"no eq");
2782 QDIO_PRINT_INFO("No equeue CIW found for QDIO commands. "
2783 "Trying to use default.\n");
2785 irq_ptr->equeue = *ciw;
2786 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
2788 QDIO_DBF_TEXT2(1,setup,"no aq");
2789 QDIO_PRINT_INFO("No aqueue CIW found for QDIO commands. "
2790 "Trying to use default.\n");
2792 irq_ptr->aqueue = *ciw;
2794 /* Set new interrupt handler. */
2795 irq_ptr->original_int_handler = init_data->cdev->handler;
2796 init_data->cdev->handler = qdio_handler;
2802 qdio_establish(struct qdio_initialize *init_data)
2804 struct qdio_irq *irq_ptr;
2805 unsigned long saveflags;
2806 int result, result2;
2807 struct ccw_device *cdev;
2810 cdev=init_data->cdev;
2811 irq_ptr = cdev->private->qdio_data;
2815 if (cdev->private->state != DEV_STATE_ONLINE)
2818 down(&irq_ptr->setting_up_sema);
2820 qdio_fill_irq(init_data);
2822 /* the thinint CHSC stuff */
2823 if (irq_ptr->is_thinint_irq) {
2825 result = tiqdio_set_subchannel_ind(irq_ptr,0);
2827 up(&irq_ptr->setting_up_sema);
2828 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
2831 tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET);
2834 sprintf(dbf_text,"qest%4x",cdev->private->irq);
2835 QDIO_DBF_TEXT0(0,setup,dbf_text);
2836 QDIO_DBF_TEXT0(0,trace,dbf_text);
2839 irq_ptr->ccw.cmd_code=irq_ptr->equeue.cmd;
2840 irq_ptr->ccw.flags=CCW_FLAG_SLI;
2841 irq_ptr->ccw.count=irq_ptr->equeue.count;
2842 irq_ptr->ccw.cda=QDIO_GET_ADDR(irq_ptr->qdr);
2844 spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
2846 ccw_device_set_options(cdev, 0);
2847 result=ccw_device_start_timeout(cdev,&irq_ptr->ccw,
2848 QDIO_DOING_ESTABLISH,0, 0,
2849 QDIO_ESTABLISH_TIMEOUT);
2851 result2=ccw_device_start_timeout(cdev,&irq_ptr->ccw,
2852 QDIO_DOING_ESTABLISH,0,0,
2853 QDIO_ESTABLISH_TIMEOUT);
2854 sprintf(dbf_text,"eq:io%4x",result);
2855 QDIO_DBF_TEXT2(1,setup,dbf_text);
2857 sprintf(dbf_text,"eq:io%4x",result);
2858 QDIO_DBF_TEXT2(1,setup,dbf_text);
2860 QDIO_PRINT_WARN("establish queues on irq %04x: do_IO " \
2861 "returned %i, next try returned %i\n",
2862 irq_ptr->irq,result,result2);
2865 ccw_device_set_timeout(cdev, 0);
2868 spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
2871 up(&irq_ptr->setting_up_sema);
2872 qdio_shutdown(cdev,QDIO_FLAG_CLEANUP_USING_CLEAR);
2876 wait_event_interruptible_timeout(cdev->private->wait_q,
2877 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
2878 irq_ptr->state == QDIO_IRQ_STATE_ERR,
2879 QDIO_ESTABLISH_TIMEOUT);
2881 if (irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED)
2884 up(&irq_ptr->setting_up_sema);
2885 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
2889 irq_ptr->qdioac=qdio_check_siga_needs(irq_ptr->irq);
2890 /* if this gets set once, we're running under VM and can omit SVSes */
2891 if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY)
2894 sprintf(dbf_text,"qdioac%2x",irq_ptr->qdioac);
2895 QDIO_DBF_TEXT2(0,setup,dbf_text);
2897 sprintf(dbf_text,"qib ac%2x",irq_ptr->qib.ac);
2898 QDIO_DBF_TEXT2(0,setup,dbf_text);
2900 irq_ptr->hydra_gives_outbound_pcis=
2901 irq_ptr->qib.ac&QIB_AC_OUTBOUND_PCI_SUPPORTED;
2902 irq_ptr->sync_done_on_outb_pcis=
2903 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS;
2905 qdio_initialize_set_siga_flags_input(irq_ptr);
2906 qdio_initialize_set_siga_flags_output(irq_ptr);
2908 up(&irq_ptr->setting_up_sema);
2915 qdio_activate(struct ccw_device *cdev, int flags)
2917 struct qdio_irq *irq_ptr;
2918 int i,result=0,result2;
2919 unsigned long saveflags;
2920 char dbf_text[20]; /* see qdio_initialize */
2922 irq_ptr = cdev->private->qdio_data;
2926 if (cdev->private->state != DEV_STATE_ONLINE)
2929 down(&irq_ptr->setting_up_sema);
2930 if (irq_ptr->state==QDIO_IRQ_STATE_INACTIVE) {
2935 sprintf(dbf_text,"qact%4x", irq_ptr->irq);
2936 QDIO_DBF_TEXT2(0,setup,dbf_text);
2937 QDIO_DBF_TEXT2(0,trace,dbf_text);
2940 irq_ptr->ccw.cmd_code=irq_ptr->aqueue.cmd;
2941 irq_ptr->ccw.flags=CCW_FLAG_SLI;
2942 irq_ptr->ccw.count=irq_ptr->aqueue.count;
2943 irq_ptr->ccw.cda=QDIO_GET_ADDR(0);
2945 spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
2947 ccw_device_set_timeout(cdev, 0);
2948 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
2949 result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE,
2950 0, DOIO_DENY_PREFETCH);
2952 result2=ccw_device_start(cdev,&irq_ptr->ccw,
2953 QDIO_DOING_ACTIVATE,0,0);
2954 sprintf(dbf_text,"aq:io%4x",result);
2955 QDIO_DBF_TEXT2(1,setup,dbf_text);
2957 sprintf(dbf_text,"aq:io%4x",result);
2958 QDIO_DBF_TEXT2(1,setup,dbf_text);
2960 QDIO_PRINT_WARN("activate queues on irq %04x: do_IO " \
2961 "returned %i, next try returned %i\n",
2962 irq_ptr->irq,result,result2);
2966 spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
2970 for (i=0;i<irq_ptr->no_input_qs;i++) {
2971 if (irq_ptr->is_thinint_irq) {
2973 * that way we know, that, if we will get interrupted
2974 * by tiqdio_inbound_processing, qdio_unmark_q will
2977 qdio_reserve_q(irq_ptr->input_qs[i]);
2978 qdio_mark_tiq(irq_ptr->input_qs[i]);
2979 qdio_release_q(irq_ptr->input_qs[i]);
2983 if (flags&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) {
2984 for (i=0;i<irq_ptr->no_input_qs;i++) {
2985 irq_ptr->input_qs[i]->is_input_q|=
2986 QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT;
2990 wait_event_interruptible_timeout(cdev->private->wait_q,
2992 QDIO_IRQ_STATE_STOPPED) ||
2994 QDIO_IRQ_STATE_ERR)),
2995 QDIO_ACTIVATE_TIMEOUT);
2997 switch (irq_ptr->state) {
2998 case QDIO_IRQ_STATE_STOPPED:
2999 case QDIO_IRQ_STATE_ERR:
3000 up(&irq_ptr->setting_up_sema);
3001 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3002 down(&irq_ptr->setting_up_sema);
3006 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
3010 up(&irq_ptr->setting_up_sema);
3015 /* buffers filled forwards again to make Rick happy */
3017 qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
3018 unsigned int count, struct qdio_buffer *buffers)
3021 set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_INPUT_EMPTY);
3024 qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
3027 /* not necessary, as the queues are synced during the SIGA read */
3032 qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
3033 unsigned int count, struct qdio_buffer *buffers)
3036 set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_OUTPUT_PRIMED);
3039 qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
3042 /* SIGA write will sync the queues */
3047 do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
3048 unsigned int qidx, unsigned int count,
3049 struct qdio_buffer *buffers)
3053 /* This is the inbound handling of queues */
3054 used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
3056 qdio_do_qdio_fill_input(q,qidx,count,buffers);
3058 if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&&
3059 (callflags&QDIO_FLAG_UNDER_INTERRUPT))
3060 atomic_swap(&q->polling,0);
3064 if (callflags&QDIO_FLAG_DONT_SIGA)
3069 result=qdio_siga_input(q);
3072 q->error_status_flags|=
3073 QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
3074 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
3075 q->siga_error=result;
3083 do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3084 unsigned int qidx, unsigned int count,
3085 struct qdio_buffer *buffers)
3089 /* This is the outbound handling of queues */
3090 #ifdef QDIO_PERFORMANCE_STATS
3091 perf_stats.start_time_outbound=NOW;
3092 #endif /* QDIO_PERFORMANCE_STATS */
3094 qdio_do_qdio_fill_output(q,qidx,count,buffers);
3096 used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
3098 if (callflags&QDIO_FLAG_DONT_SIGA) {
3099 #ifdef QDIO_PERFORMANCE_STATS
3100 perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
3101 perf_stats.outbound_cnt++;
3102 #endif /* QDIO_PERFORMANCE_STATS */
3105 if (q->is_iqdio_q) {
3106 /* one siga for every sbal */
3108 qdio_kick_outbound_q(q);
3110 __qdio_outbound_processing(q);
3112 /* under VM, we do a SIGA sync unconditionally */
3116 * w/o shadow queues (else branch of
3117 * SYNC_MEMORY :-/ ), we try to
3118 * fast-requeue buffers
3120 if (q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1)
3121 &(QDIO_MAX_BUFFERS_PER_Q-1)]!=
3122 SLSB_CU_OUTPUT_PRIMED) {
3123 qdio_kick_outbound_q(q);
3125 QDIO_DBF_TEXT3(0,trace, "fast-req");
3126 #ifdef QDIO_PERFORMANCE_STATS
3127 perf_stats.fast_reqs++;
3128 #endif /* QDIO_PERFORMANCE_STATS */
3132 * only marking the q could take too long,
3133 * the upper layer module could do a lot of
3134 * traffic in that time
3136 __qdio_outbound_processing(q);
3139 #ifdef QDIO_PERFORMANCE_STATS
3140 perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
3141 perf_stats.outbound_cnt++;
3142 #endif /* QDIO_PERFORMANCE_STATS */
3145 /* count must be 1 in iqdio */
3147 do_QDIO(struct ccw_device *cdev,unsigned int callflags,
3148 unsigned int queue_number, unsigned int qidx,
3149 unsigned int count,struct qdio_buffer *buffers)
3151 struct qdio_irq *irq_ptr;
3152 #ifdef CONFIG_QDIO_DEBUG
3155 sprintf(dbf_text,"doQD%04x",cdev->private->irq);
3156 QDIO_DBF_TEXT3(0,trace,dbf_text);
3157 #endif /* CONFIG_QDIO_DEBUG */
3159 if ( (qidx>QDIO_MAX_BUFFERS_PER_Q) ||
3160 (count>QDIO_MAX_BUFFERS_PER_Q) ||
3161 (queue_number>QDIO_MAX_QUEUES_PER_IRQ) )
3167 irq_ptr = cdev->private->qdio_data;
3171 #ifdef CONFIG_QDIO_DEBUG
3172 if (callflags&QDIO_FLAG_SYNC_INPUT)
3173 QDIO_DBF_HEX3(0,trace,&irq_ptr->input_qs[queue_number],
3176 QDIO_DBF_HEX3(0,trace,&irq_ptr->output_qs[queue_number],
3178 sprintf(dbf_text,"flag%04x",callflags);
3179 QDIO_DBF_TEXT3(0,trace,dbf_text);
3180 sprintf(dbf_text,"qi%02xct%02x",qidx,count);
3181 QDIO_DBF_TEXT3(0,trace,dbf_text);
3182 #endif /* CONFIG_QDIO_DEBUG */
3184 if (irq_ptr->state!=QDIO_IRQ_STATE_ACTIVE)
3187 if (callflags&QDIO_FLAG_SYNC_INPUT)
3188 do_qdio_handle_inbound(irq_ptr->input_qs[queue_number],
3189 callflags, qidx, count, buffers);
3190 else if (callflags&QDIO_FLAG_SYNC_OUTPUT)
3191 do_qdio_handle_outbound(irq_ptr->output_qs[queue_number],
3192 callflags, qidx, count, buffers);
3194 QDIO_DBF_TEXT3(1,trace,"doQD:inv");
3200 #ifdef QDIO_PERFORMANCE_STATS
3202 qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
3203 int buffer_length, int *eof, void *data)
3207 /* we are always called with buffer_length=4k, so we all
3208 deliver on the first read */
3212 #define _OUTP_IT(x...) c+=sprintf(buffer+c,x)
3213 _OUTP_IT("i_p_nc/c=%lu/%lu\n",i_p_nc,i_p_c);
3214 _OUTP_IT("ii_p_nc/c=%lu/%lu\n",ii_p_nc,ii_p_c);
3215 _OUTP_IT("o_p_nc/c=%lu/%lu\n",o_p_nc,o_p_c);
3216 _OUTP_IT("Number of tasklet runs (total) : %u\n",
3217 perf_stats.tl_runs);
3219 _OUTP_IT("Number of SIGA sync's issued : %u\n",
3220 perf_stats.siga_syncs);
3221 _OUTP_IT("Number of SIGA in's issued : %u\n",
3222 perf_stats.siga_ins);
3223 _OUTP_IT("Number of SIGA out's issued : %u\n",
3224 perf_stats.siga_outs);
3225 _OUTP_IT("Number of PCIs caught : %u\n",
3227 _OUTP_IT("Number of adapter interrupts caught : %u\n",
3228 perf_stats.thinints);
3229 _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %u\n",
3230 perf_stats.fast_reqs);
3232 _OUTP_IT("Total time of all inbound actions (us) incl. UL : %u\n",
3233 perf_stats.inbound_time);
3234 _OUTP_IT("Number of inbound transfers : %u\n",
3235 perf_stats.inbound_cnt);
3236 _OUTP_IT("Total time of all outbound do_QDIOs (us) : %u\n",
3237 perf_stats.outbound_time);
3238 _OUTP_IT("Number of do_QDIOs outbound : %u\n",
3239 perf_stats.outbound_cnt);
3245 static struct proc_dir_entry *qdio_perf_proc_file;
3246 #endif /* QDIO_PERFORMANCE_STATS */
3249 qdio_add_procfs_entry(void)
3251 #ifdef QDIO_PERFORMANCE_STATS
3252 proc_perf_file_registration=0;
3253 qdio_perf_proc_file=create_proc_entry(QDIO_PERF,
3254 S_IFREG|0444,&proc_root);
3255 if (qdio_perf_proc_file) {
3256 qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read;
3257 } else proc_perf_file_registration=-1;
3259 if (proc_perf_file_registration)
3260 QDIO_PRINT_WARN("was not able to register perf. " \
3261 "proc-file (%i).\n",
3262 proc_perf_file_registration);
3263 #endif /* QDIO_PERFORMANCE_STATS */
3267 qdio_remove_procfs_entry(void)
3269 #ifdef QDIO_PERFORMANCE_STATS
3270 perf_stats.tl_runs=0;
3272 if (!proc_perf_file_registration) /* means if it went ok earlier */
3273 remove_proc_entry(QDIO_PERF,&proc_root);
3274 #endif /* QDIO_PERFORMANCE_STATS */
3278 tiqdio_register_thinints(void)
3281 register_thinint_result=
3282 s390_register_adapter_interrupt(&tiqdio_thinint_handler);
3283 if (register_thinint_result) {
3284 sprintf(dbf_text,"regthn%x",(register_thinint_result&0xff));
3285 QDIO_DBF_TEXT0(0,setup,dbf_text);
3286 QDIO_PRINT_ERR("failed to register adapter handler " \
3287 "(rc=%i).\nAdapter interrupts might " \
3288 "not work. Continuing.\n",
3289 register_thinint_result);
3294 tiqdio_unregister_thinints(void)
3296 if (!register_thinint_result)
3297 s390_unregister_adapter_interrupt(&tiqdio_thinint_handler);
3301 qdio_get_qdio_memory(void)
3304 indicator_used[0]=1;
3306 for (i=1;i<INDICATORS_PER_CACHELINE;i++)
3307 indicator_used[i]=0;
3308 indicators=(__u32*)kmalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE),
3310 if (!indicators) return -ENOMEM;
3311 memset(indicators,0,sizeof(__u32)*(INDICATORS_PER_CACHELINE));
3316 qdio_release_qdio_memory(void)
3323 qdio_unregister_dbf_views(void)
3326 debug_unregister(qdio_dbf_setup);
3328 debug_unregister(qdio_dbf_sbal);
3330 debug_unregister(qdio_dbf_sense);
3332 debug_unregister(qdio_dbf_trace);
3333 #ifdef CONFIG_QDIO_DEBUG
3334 if (qdio_dbf_slsb_out)
3335 debug_unregister(qdio_dbf_slsb_out);
3336 if (qdio_dbf_slsb_in)
3337 debug_unregister(qdio_dbf_slsb_in);
3338 #endif /* CONFIG_QDIO_DEBUG */
3342 qdio_register_dbf_views(void)
3344 qdio_dbf_setup=debug_register(QDIO_DBF_SETUP_NAME,
3345 QDIO_DBF_SETUP_PAGES,
3346 QDIO_DBF_SETUP_NR_AREAS,
3347 QDIO_DBF_SETUP_LEN);
3348 if (!qdio_dbf_setup)
3350 debug_register_view(qdio_dbf_setup,&debug_hex_ascii_view);
3351 debug_set_level(qdio_dbf_setup,QDIO_DBF_SETUP_LEVEL);
3353 qdio_dbf_sbal=debug_register(QDIO_DBF_SBAL_NAME,
3354 QDIO_DBF_SBAL_PAGES,
3355 QDIO_DBF_SBAL_NR_AREAS,
3360 debug_register_view(qdio_dbf_sbal,&debug_hex_ascii_view);
3361 debug_set_level(qdio_dbf_sbal,QDIO_DBF_SBAL_LEVEL);
3363 qdio_dbf_sense=debug_register(QDIO_DBF_SENSE_NAME,
3364 QDIO_DBF_SENSE_PAGES,
3365 QDIO_DBF_SENSE_NR_AREAS,
3366 QDIO_DBF_SENSE_LEN);
3367 if (!qdio_dbf_sense)
3370 debug_register_view(qdio_dbf_sense,&debug_hex_ascii_view);
3371 debug_set_level(qdio_dbf_sense,QDIO_DBF_SENSE_LEVEL);
3373 qdio_dbf_trace=debug_register(QDIO_DBF_TRACE_NAME,
3374 QDIO_DBF_TRACE_PAGES,
3375 QDIO_DBF_TRACE_NR_AREAS,
3376 QDIO_DBF_TRACE_LEN);
3377 if (!qdio_dbf_trace)
3380 debug_register_view(qdio_dbf_trace,&debug_hex_ascii_view);
3381 debug_set_level(qdio_dbf_trace,QDIO_DBF_TRACE_LEVEL);
3383 #ifdef CONFIG_QDIO_DEBUG
3384 qdio_dbf_slsb_out=debug_register(QDIO_DBF_SLSB_OUT_NAME,
3385 QDIO_DBF_SLSB_OUT_PAGES,
3386 QDIO_DBF_SLSB_OUT_NR_AREAS,
3387 QDIO_DBF_SLSB_OUT_LEN);
3388 if (!qdio_dbf_slsb_out)
3390 debug_register_view(qdio_dbf_slsb_out,&debug_hex_ascii_view);
3391 debug_set_level(qdio_dbf_slsb_out,QDIO_DBF_SLSB_OUT_LEVEL);
3393 qdio_dbf_slsb_in=debug_register(QDIO_DBF_SLSB_IN_NAME,
3394 QDIO_DBF_SLSB_IN_PAGES,
3395 QDIO_DBF_SLSB_IN_NR_AREAS,
3396 QDIO_DBF_SLSB_IN_LEN);
3397 if (!qdio_dbf_slsb_in)
3399 debug_register_view(qdio_dbf_slsb_in,&debug_hex_ascii_view);
3400 debug_set_level(qdio_dbf_slsb_in,QDIO_DBF_SLSB_IN_LEVEL);
3401 #endif /* CONFIG_QDIO_DEBUG */
3404 QDIO_PRINT_ERR("not enough memory for dbf.\n");
3405 qdio_unregister_dbf_views();
3413 #ifdef QDIO_PERFORMANCE_STATS
3415 #endif /* QDIO_PERFORMANCE_STATS */
3417 printk("qdio: loading %s\n",version);
3419 res=qdio_get_qdio_memory();
3423 res = qdio_register_dbf_views();
3427 QDIO_DBF_TEXT0(0,setup,"initQDIO");
3429 #ifdef QDIO_PERFORMANCE_STATS
3430 memset((void*)&perf_stats,0,sizeof(perf_stats));
3431 QDIO_DBF_TEXT0(0,setup,"perfstat");
3433 QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*));
3434 #endif /* QDIO_PERFORMANCE_STATS */
3436 qdio_add_procfs_entry();
3438 if (tiqdio_check_chsc_availability())
3439 QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n");
3441 tiqdio_register_thinints();
3449 tiqdio_unregister_thinints();
3450 qdio_remove_procfs_entry();
3451 qdio_release_qdio_memory();
3452 qdio_unregister_dbf_views();
3454 printk("qdio: %s: module removed\n",version);
3457 module_init(init_QDIO);
3458 module_exit(cleanup_QDIO);
3460 EXPORT_SYMBOL(qdio_allocate);
3461 EXPORT_SYMBOL(qdio_establish);
3462 EXPORT_SYMBOL(qdio_initialize);
3463 EXPORT_SYMBOL(qdio_activate);
3464 EXPORT_SYMBOL(do_QDIO);
3465 EXPORT_SYMBOL(qdio_shutdown);
3466 EXPORT_SYMBOL(qdio_free);
3467 EXPORT_SYMBOL(qdio_cleanup);
3468 EXPORT_SYMBOL(qdio_synchronize);