2 * linux/drivers/s390/cio/qdio_main.c
4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
6 * Copyright 2000,2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Jan Glauber <jang@linux.vnet.ibm.com>
9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/delay.h>
16 #include <asm/atomic.h>
17 #include <asm/debug.h>
24 #include "qdio_debug.h"
25 #include "qdio_perf.h"
27 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
28 "Jan Glauber <jang@linux.vnet.ibm.com>");
29 MODULE_DESCRIPTION("QDIO base support");
30 MODULE_LICENSE("GPL");
32 static inline int do_siga_sync(struct subchannel_id schid,
33 unsigned int out_mask, unsigned int in_mask)
35 register unsigned long __fc asm ("0") = 2;
36 register struct subchannel_id __schid asm ("1") = schid;
37 register unsigned long out asm ("2") = out_mask;
38 register unsigned long in asm ("3") = in_mask;
46 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
50 static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
52 register unsigned long __fc asm ("0") = 1;
53 register struct subchannel_id __schid asm ("1") = schid;
54 register unsigned long __mask asm ("2") = mask;
62 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
67 * do_siga_output - perform SIGA-w/wt function
68 * @schid: subchannel id or in case of QEBSM the subchannel token
69 * @mask: which output queues to process
70 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
71 * @fc: function code to perform
73 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
74 * Note: For IQDC unicast queues only the highest priority queue is processed.
76 static inline int do_siga_output(unsigned long schid, unsigned long mask,
77 u32 *bb, unsigned int fc)
79 register unsigned long __fc asm("0") = fc;
80 register unsigned long __schid asm("1") = schid;
81 register unsigned long __mask asm("2") = mask;
82 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
90 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
92 *bb = ((unsigned int) __fc) >> 31;
96 static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
100 /* all done or next buffer state different */
101 if (ccq == 0 || ccq == 32)
103 /* not all buffers processed */
104 if (ccq == 96 || ccq == 97)
106 /* notify devices immediately */
107 sprintf(dbf_text, "%d", ccq);
108 QDIO_DBF_TEXT2(1, trace, dbf_text);
113 * qdio_do_eqbs - extract buffer states for QEBSM
114 * @q: queue to manipulate
115 * @state: state of the extracted buffers
116 * @start: buffer number to start at
117 * @count: count of buffers to examine
119 * Returns the number of successfull extracted equal buffer states.
120 * Stops processing if a state is different from the last buffers state.
122 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
123 int start, int count)
125 unsigned int ccq = 0;
126 int tmp_count = count, tmp_start = start;
131 BUG_ON(!q->irq_ptr->sch_token);
134 nr += q->irq_ptr->nr_input_qs;
136 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
137 rc = qdio_check_ccq(q, ccq);
139 /* At least one buffer was processed, return and extract the remaining
142 if ((ccq == 96) && (count != tmp_count))
143 return (count - tmp_count);
145 QDIO_DBF_TEXT5(1, trace, "eqAGAIN");
150 QDIO_DBF_TEXT2(1, trace, "eqberr");
151 sprintf(dbf_text, "%2x,%2x,%d,%d", count, tmp_count, ccq, nr);
152 QDIO_DBF_TEXT2(1, trace, dbf_text);
153 q->handler(q->irq_ptr->cdev,
154 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
155 0, -1, -1, q->irq_ptr->int_parm);
158 return count - tmp_count;
162 * qdio_do_sqbs - set buffer states for QEBSM
163 * @q: queue to manipulate
164 * @state: new state of the buffers
165 * @start: first buffer number to change
166 * @count: how many buffers to change
168 * Returns the number of successfully changed buffers.
169 * Does retrying until the specified count of buffer states is set or an
172 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
175 unsigned int ccq = 0;
176 int tmp_count = count, tmp_start = start;
181 BUG_ON(!q->irq_ptr->sch_token);
184 nr += q->irq_ptr->nr_input_qs;
186 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
187 rc = qdio_check_ccq(q, ccq);
189 QDIO_DBF_TEXT5(1, trace, "sqAGAIN");
193 QDIO_DBF_TEXT3(1, trace, "sqberr");
194 sprintf(dbf_text, "%2x,%2x", count, tmp_count);
195 QDIO_DBF_TEXT3(1, trace, dbf_text);
196 sprintf(dbf_text, "%d,%d", ccq, nr);
197 QDIO_DBF_TEXT3(1, trace, dbf_text);
199 q->handler(q->irq_ptr->cdev,
200 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
201 0, -1, -1, q->irq_ptr->int_parm);
205 return count - tmp_count;
208 /* returns number of examined buffers and their common state in *state */
209 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
210 unsigned char *state, unsigned int count)
212 unsigned char __state = 0;
215 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
216 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
219 return qdio_do_eqbs(q, state, bufnr, count);
221 for (i = 0; i < count; i++) {
223 __state = q->slsb.val[bufnr];
224 else if (q->slsb.val[bufnr] != __state)
226 bufnr = next_buf(bufnr);
232 inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
233 unsigned char *state)
235 return get_buf_states(q, bufnr, state, 1);
238 /* wrap-around safe setting of slsb states, returns number of changed buffers */
239 static inline int set_buf_states(struct qdio_q *q, int bufnr,
240 unsigned char state, int count)
244 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
245 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
248 return qdio_do_sqbs(q, state, bufnr, count);
250 for (i = 0; i < count; i++) {
251 xchg(&q->slsb.val[bufnr], state);
252 bufnr = next_buf(bufnr);
257 static inline int set_buf_state(struct qdio_q *q, int bufnr,
260 return set_buf_states(q, bufnr, state, 1);
263 /* set slsb states to initial state */
264 void qdio_init_buf_states(struct qdio_irq *irq_ptr)
269 for_each_input_queue(irq_ptr, q, i)
270 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
271 QDIO_MAX_BUFFERS_PER_Q);
272 for_each_output_queue(irq_ptr, q, i)
273 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
274 QDIO_MAX_BUFFERS_PER_Q);
277 static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
282 if (!need_siga_sync(q))
285 qdio_perf_stat_inc(&perf_stats.siga_sync);
287 cc = do_siga_sync(q->irq_ptr->schid, output, input);
289 QDIO_DBF_TEXT4(0, trace, "sigasync");
290 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
291 QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
296 inline int qdio_siga_sync_q(struct qdio_q *q)
299 return qdio_siga_sync(q, 0, q->mask);
301 return qdio_siga_sync(q, q->mask, 0);
304 static inline int qdio_siga_sync_out(struct qdio_q *q)
306 return qdio_siga_sync(q, ~0U, 0);
309 static inline int qdio_siga_sync_all(struct qdio_q *q)
311 return qdio_siga_sync(q, ~0U, ~0U);
314 static inline int qdio_do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
320 schid = *((u32 *)&q->irq_ptr->schid);
322 schid = q->irq_ptr->sch_token;
325 return do_siga_output(schid, q->mask, busy_bit, fc);
328 static int qdio_siga_output(struct qdio_q *q)
335 QDIO_DBF_TEXT5(0, trace, "sigaout");
336 QDIO_DBF_HEX5(0, trace, &q, sizeof(void *));
338 qdio_perf_stat_inc(&perf_stats.siga_out);
340 cc = qdio_do_siga_output(q, &busy_bit);
341 if (queue_type(q) == QDIO_IQDIO_QFMT && cc == 2 && busy_bit) {
342 sprintf(dbf_text, "bb%4x%2x", q->irq_ptr->schid.sch_no, q->nr);
343 QDIO_DBF_TEXT3(0, trace, dbf_text);
346 start_time = get_usecs();
347 else if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE)
351 if (cc == 2 && busy_bit)
352 cc |= QDIO_ERROR_SIGA_BUSY;
354 QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
358 static inline int qdio_siga_input(struct qdio_q *q)
362 QDIO_DBF_TEXT4(0, trace, "sigain");
363 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
365 qdio_perf_stat_inc(&perf_stats.siga_in);
367 cc = do_siga_input(q->irq_ptr->schid, q->mask);
369 QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
373 /* called from thinint inbound handler */
374 void qdio_sync_after_thinint(struct qdio_q *q)
376 if (pci_out_supported(q)) {
377 if (need_siga_sync_thinint(q))
378 qdio_siga_sync_all(q);
379 else if (need_siga_sync_out_thinint(q))
380 qdio_siga_sync_out(q);
385 inline void qdio_stop_polling(struct qdio_q *q)
387 spin_lock_bh(&q->u.in.lock);
388 if (!q->u.in.polling) {
389 spin_unlock_bh(&q->u.in.lock);
393 qdio_perf_stat_inc(&perf_stats.debug_stop_polling);
395 /* show the card that we are not polling anymore */
396 set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT);
397 spin_unlock_bh(&q->u.in.lock);
400 static void announce_buffer_error(struct qdio_q *q)
405 QDIO_DBF_TEXT3(1, trace, "inperr");
407 QDIO_DBF_TEXT3(0, trace, "outperr");
409 sprintf(dbf_text, "%x-%x-%x", q->first_to_check,
410 q->sbal[q->first_to_check]->element[14].flags,
411 q->sbal[q->first_to_check]->element[15].flags);
412 QDIO_DBF_TEXT3(1, trace, dbf_text);
413 QDIO_DBF_HEX2(1, trace, q->sbal[q->first_to_check], 256);
415 q->qdio_error = QDIO_ERROR_SLSB_STATE;
418 static int get_inbound_buffer_frontier(struct qdio_q *q)
424 * If we still poll don't update last_move_ftc, keep the
425 * previously ACK buffer there.
427 if (!q->u.in.polling)
428 q->last_move_ftc = q->first_to_check;
431 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
434 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
435 stop = add_buf(q->first_to_check, count);
438 * No siga sync here, as a PCI or we after a thin interrupt
439 * will sync the queues.
442 /* need to set count to 1 for non-qebsm */
447 if (q->first_to_check == stop)
450 count = get_buf_states(q, q->first_to_check, &state, count);
455 case SLSB_P_INPUT_PRIMED:
456 QDIO_DBF_TEXT5(0, trace, "inptprim");
459 * Only ACK the first buffer. The ACK will be removed in
463 state = SLSB_P_INPUT_NOT_INIT;
466 state = SLSB_P_INPUT_ACK;
468 set_buf_state(q, q->first_to_check, state);
471 * Need to change all PRIMED buffers to NOT_INIT, otherwise
472 * we're loosing initiative in the thinint code.
475 set_buf_states(q, next_buf(q->first_to_check),
476 SLSB_P_INPUT_NOT_INIT, count - 1);
479 * No siga-sync needed for non-qebsm here, as the inbound queue
480 * will be synced on the next siga-r, resp.
481 * tiqdio_is_inbound_q_done will do the siga-sync.
483 q->first_to_check = add_buf(q->first_to_check, count);
484 atomic_sub(count, &q->nr_buf_used);
486 case SLSB_P_INPUT_ERROR:
487 announce_buffer_error(q);
488 /* process the buffer, the upper layer will take care of it */
489 q->first_to_check = add_buf(q->first_to_check, count);
490 atomic_sub(count, &q->nr_buf_used);
492 case SLSB_CU_INPUT_EMPTY:
493 case SLSB_P_INPUT_NOT_INIT:
494 case SLSB_P_INPUT_ACK:
495 QDIO_DBF_TEXT5(0, trace, "inpnipro");
501 QDIO_DBF_HEX4(0, trace, &q->first_to_check, sizeof(int));
502 return q->first_to_check;
505 int qdio_inbound_q_moved(struct qdio_q *q)
509 bufnr = get_inbound_buffer_frontier(q);
511 if ((bufnr != q->last_move_ftc) || q->qdio_error) {
512 if (!need_siga_sync(q) && !pci_out_supported(q))
513 q->u.in.timestamp = get_usecs();
515 QDIO_DBF_TEXT4(0, trace, "inhasmvd");
516 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
522 static int qdio_inbound_q_done(struct qdio_q *q)
525 #ifdef CONFIG_QDIO_DEBUG
529 if (!atomic_read(&q->nr_buf_used))
533 * We need that one for synchronization with the adapter, as it
534 * does a kind of PCI avoidance.
538 get_buf_state(q, q->first_to_check, &state);
539 if (state == SLSB_P_INPUT_PRIMED)
540 /* we got something to do */
543 /* on VM, we don't poll, so the q is always done here */
544 if (need_siga_sync(q) || pci_out_supported(q))
548 * At this point we know, that inbound first_to_check
549 * has (probably) not moved (see qdio_inbound_processing).
551 if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
552 #ifdef CONFIG_QDIO_DEBUG
553 QDIO_DBF_TEXT4(0, trace, "inqisdon");
554 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
555 sprintf(dbf_text, "pf%02x", q->first_to_check);
556 QDIO_DBF_TEXT4(0, trace, dbf_text);
557 #endif /* CONFIG_QDIO_DEBUG */
560 #ifdef CONFIG_QDIO_DEBUG
561 QDIO_DBF_TEXT4(0, trace, "inqisntd");
562 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
563 sprintf(dbf_text, "pf%02x", q->first_to_check);
564 QDIO_DBF_TEXT4(0, trace, dbf_text);
565 #endif /* CONFIG_QDIO_DEBUG */
570 void qdio_kick_inbound_handler(struct qdio_q *q)
572 int count, start, end;
573 #ifdef CONFIG_QDIO_DEBUG
577 qdio_perf_stat_inc(&perf_stats.inbound_handler);
579 start = q->first_to_kick;
580 end = q->first_to_check;
584 count = end + QDIO_MAX_BUFFERS_PER_Q - start;
586 #ifdef CONFIG_QDIO_DEBUG
587 sprintf(dbf_text, "s=%2xc=%2x", start, count);
588 QDIO_DBF_TEXT4(0, trace, dbf_text);
589 #endif /* CONFIG_QDIO_DEBUG */
591 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
594 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr,
595 start, count, q->irq_ptr->int_parm);
597 /* for the next time */
598 q->first_to_kick = q->first_to_check;
602 static void __qdio_inbound_processing(struct qdio_q *q)
604 qdio_perf_stat_inc(&perf_stats.tasklet_inbound);
606 if (!qdio_inbound_q_moved(q))
609 qdio_kick_inbound_handler(q);
611 if (!qdio_inbound_q_done(q))
612 /* means poll time is not yet over */
615 qdio_stop_polling(q);
617 * We need to check again to not lose initiative after
618 * resetting the ACK state.
620 if (!qdio_inbound_q_done(q))
624 /* inbound tasklet */
625 void qdio_inbound_processing(unsigned long data)
627 struct qdio_q *q = (struct qdio_q *)data;
628 __qdio_inbound_processing(q);
631 static int get_outbound_buffer_frontier(struct qdio_q *q)
636 if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) ||
637 (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q)))
641 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
644 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
645 stop = add_buf(q->first_to_check, count);
647 /* need to set count to 1 for non-qebsm */
652 if (q->first_to_check == stop)
653 return q->first_to_check;
655 count = get_buf_states(q, q->first_to_check, &state, count);
657 return q->first_to_check;
660 case SLSB_P_OUTPUT_EMPTY:
661 /* the adapter got it */
662 QDIO_DBF_TEXT5(0, trace, "outpempt");
664 atomic_sub(count, &q->nr_buf_used);
665 q->first_to_check = add_buf(q->first_to_check, count);
667 * We fetch all buffer states at once. get_buf_states may
668 * return count < stop. For QEBSM we do not loop.
673 case SLSB_P_OUTPUT_ERROR:
674 announce_buffer_error(q);
675 /* process the buffer, the upper layer will take care of it */
676 q->first_to_check = add_buf(q->first_to_check, count);
677 atomic_sub(count, &q->nr_buf_used);
679 case SLSB_CU_OUTPUT_PRIMED:
680 /* the adapter has not fetched the output yet */
681 QDIO_DBF_TEXT5(0, trace, "outpprim");
683 case SLSB_P_OUTPUT_NOT_INIT:
684 case SLSB_P_OUTPUT_HALTED:
689 return q->first_to_check;
692 /* all buffers processed? */
693 static inline int qdio_outbound_q_done(struct qdio_q *q)
695 return atomic_read(&q->nr_buf_used) == 0;
698 static inline int qdio_outbound_q_moved(struct qdio_q *q)
702 bufnr = get_outbound_buffer_frontier(q);
704 if ((bufnr != q->last_move_ftc) || q->qdio_error) {
705 q->last_move_ftc = bufnr;
706 QDIO_DBF_TEXT4(0, trace, "oqhasmvd");
707 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
714 * VM could present us cc=2 and busy bit set on SIGA-write
715 * during reconfiguration of their Guest LAN (only in iqdio mode,
716 * otherwise qdio is asynchronous and cc=2 and busy bit there will take
717 * the queues down immediately).
719 * Therefore qdio_siga_output will try for a short time constantly,
720 * if such a condition occurs. If it doesn't change, it will
721 * increase the busy_siga_counter and save the timestamp, and
722 * schedule the queue for later processing. qdio_outbound_processing
723 * will check out the counter. If non-zero, it will call qdio_kick_outbound_q
724 * as often as the value of the counter. This will attempt further SIGA
725 * instructions. For each successful SIGA, the counter is
726 * decreased, for failing SIGAs the counter remains the same, after
727 * all. After some time of no movement, qdio_kick_outbound_q will
728 * finally fail and reflect corresponding error codes to call
729 * the upper layer module and have it take the queues down.
731 * Note that this is a change from the original HiperSockets design
732 * (saying cc=2 and busy bit means take the queues down), but in
733 * these days Guest LAN didn't exist... excessive cc=2 with busy bit
734 * conditions will still take the queues down, but the threshold is
735 * higher due to the Guest LAN environment.
737 * Called from outbound tasklet and do_QDIO handler.
739 static void qdio_kick_outbound_q(struct qdio_q *q)
742 #ifdef CONFIG_QDIO_DEBUG
745 QDIO_DBF_TEXT5(0, trace, "kickoutq");
746 QDIO_DBF_HEX5(0, trace, &q, sizeof(void *));
747 #endif /* CONFIG_QDIO_DEBUG */
749 if (!need_siga_out(q))
752 rc = qdio_siga_output(q);
755 /* TODO: improve error handling for CC=0 case */
756 #ifdef CONFIG_QDIO_DEBUG
757 if (q->u.out.timestamp) {
758 QDIO_DBF_TEXT3(0, trace, "cc2reslv");
759 sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no,
761 atomic_read(&q->u.out.busy_siga_counter));
762 QDIO_DBF_TEXT3(0, trace, dbf_text);
764 #endif /* CONFIG_QDIO_DEBUG */
765 /* went smooth this time, reset timestamp */
766 q->u.out.timestamp = 0;
768 /* cc=2 and busy bit */
769 case (2 | QDIO_ERROR_SIGA_BUSY):
770 atomic_inc(&q->u.out.busy_siga_counter);
772 /* if the last siga was successful, save timestamp here */
773 if (!q->u.out.timestamp)
774 q->u.out.timestamp = get_usecs();
776 /* if we're in time, don't touch qdio_error */
777 if (get_usecs() - q->u.out.timestamp < QDIO_BUSY_BIT_GIVE_UP) {
778 tasklet_schedule(&q->tasklet);
781 QDIO_DBF_TEXT2(0, trace, "cc2REPRT");
782 #ifdef CONFIG_QDIO_DEBUG
783 sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr,
784 atomic_read(&q->u.out.busy_siga_counter));
785 QDIO_DBF_TEXT3(0, trace, dbf_text);
786 #endif /* CONFIG_QDIO_DEBUG */
788 /* for plain cc=1, 2 or 3 */
793 static void qdio_kick_outbound_handler(struct qdio_q *q)
795 int start, end, count;
796 #ifdef CONFIG_QDIO_DEBUG
800 start = q->first_to_kick;
801 end = q->last_move_ftc;
805 count = end + QDIO_MAX_BUFFERS_PER_Q - start;
807 #ifdef CONFIG_QDIO_DEBUG
808 QDIO_DBF_TEXT4(0, trace, "kickouth");
809 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
811 sprintf(dbf_text, "s=%2xc=%2x", start, count);
812 QDIO_DBF_TEXT4(0, trace, dbf_text);
813 #endif /* CONFIG_QDIO_DEBUG */
815 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
818 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
819 q->irq_ptr->int_parm);
821 /* for the next time: */
822 q->first_to_kick = q->last_move_ftc;
826 static void __qdio_outbound_processing(struct qdio_q *q)
830 qdio_perf_stat_inc(&perf_stats.tasklet_outbound);
832 /* see comment in qdio_kick_outbound_q */
833 siga_attempts = atomic_read(&q->u.out.busy_siga_counter);
834 while (siga_attempts--) {
835 atomic_dec(&q->u.out.busy_siga_counter);
836 qdio_kick_outbound_q(q);
839 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
841 if (qdio_outbound_q_moved(q))
842 qdio_kick_outbound_handler(q);
844 if (queue_type(q) == QDIO_ZFCP_QFMT) {
845 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
846 tasklet_schedule(&q->tasklet);
850 /* bail out for HiperSockets unicast queues */
851 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
854 if (q->u.out.pci_out_enabled)
858 * Now we know that queue type is either qeth without pci enabled
859 * or HiperSockets multicast. Make sure buffer switch from PRIMED to
860 * EMPTY is noticed and outbound_handler is called after some time.
862 if (qdio_outbound_q_done(q))
863 del_timer(&q->u.out.timer);
865 if (!timer_pending(&q->u.out.timer)) {
866 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
867 qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
872 /* outbound tasklet */
873 void qdio_outbound_processing(unsigned long data)
875 struct qdio_q *q = (struct qdio_q *)data;
876 __qdio_outbound_processing(q);
879 void qdio_outbound_timer(unsigned long data)
881 struct qdio_q *q = (struct qdio_q *)data;
882 tasklet_schedule(&q->tasklet);
885 /* called from thinint inbound tasklet */
886 void qdio_check_outbound_after_thinint(struct qdio_q *q)
891 if (!pci_out_supported(q))
894 for_each_output_queue(q->irq_ptr, out, i)
895 if (!qdio_outbound_q_done(out))
896 tasklet_schedule(&out->tasklet);
899 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
900 enum qdio_irq_states state)
902 #ifdef CONFIG_QDIO_DEBUG
905 QDIO_DBF_TEXT5(0, trace, "newstate");
906 sprintf(dbf_text, "%4x%4x", irq_ptr->schid.sch_no, state);
907 QDIO_DBF_TEXT5(0, trace, dbf_text);
908 #endif /* CONFIG_QDIO_DEBUG */
910 irq_ptr->state = state;
914 static void qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
918 if (irb->esw.esw0.erw.cons) {
919 sprintf(dbf_text, "sens%4x", schid.sch_no);
920 QDIO_DBF_TEXT2(1, trace, dbf_text);
921 QDIO_DBF_HEX0(0, trace, irb, 64);
922 QDIO_DBF_HEX0(0, trace, irb->ecw, 64);
926 /* PCI interrupt handler */
927 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
932 qdio_perf_stat_inc(&perf_stats.pci_int);
934 for_each_input_queue(irq_ptr, q, i)
935 tasklet_schedule(&q->tasklet);
937 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
940 for_each_output_queue(irq_ptr, q, i) {
941 if (qdio_outbound_q_done(q))
944 if (!siga_syncs_out_pci(q))
947 tasklet_schedule(&q->tasklet);
951 static void qdio_handle_activate_check(struct ccw_device *cdev,
952 unsigned long intparm, int cstat, int dstat)
954 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
958 QDIO_DBF_TEXT2(1, trace, "ick2");
959 sprintf(dbf_text, "%s", cdev->dev.bus_id);
960 QDIO_DBF_TEXT2(1, trace, dbf_text);
961 QDIO_DBF_HEX2(0, trace, &intparm, sizeof(int));
962 QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int));
963 QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int));
965 if (irq_ptr->nr_input_qs) {
966 q = irq_ptr->input_qs[0];
967 } else if (irq_ptr->nr_output_qs) {
968 q = irq_ptr->output_qs[0];
973 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
974 0, -1, -1, irq_ptr->int_parm);
976 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
979 static void qdio_call_shutdown(struct work_struct *work)
981 struct ccw_device_private *priv;
982 struct ccw_device *cdev;
984 priv = container_of(work, struct ccw_device_private, kick_work);
986 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
987 put_device(&cdev->dev);
990 static void qdio_int_error(struct ccw_device *cdev)
992 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
994 switch (irq_ptr->state) {
995 case QDIO_IRQ_STATE_INACTIVE:
996 case QDIO_IRQ_STATE_CLEANUP:
997 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
999 case QDIO_IRQ_STATE_ESTABLISHED:
1000 case QDIO_IRQ_STATE_ACTIVE:
1001 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1002 if (get_device(&cdev->dev)) {
1003 /* Can't call shutdown from interrupt context. */
1004 PREPARE_WORK(&cdev->private->kick_work,
1005 qdio_call_shutdown);
1006 queue_work(ccw_device_work, &cdev->private->kick_work);
1012 wake_up(&cdev->private->wait_q);
1015 static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat,
1018 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1020 if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
1021 QDIO_DBF_TEXT2(1, setup, "eq:ckcon");
1025 if (!(dstat & DEV_STAT_DEV_END)) {
1026 QDIO_DBF_TEXT2(1, setup, "eq:no de");
1030 if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) {
1031 QDIO_DBF_TEXT2(1, setup, "eq:badio");
1036 QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int));
1037 QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int));
1038 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1042 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
1045 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1048 sprintf(dbf_text, "qehi%4x", cdev->private->schid.sch_no);
1049 QDIO_DBF_TEXT0(0, setup, dbf_text);
1050 QDIO_DBF_TEXT0(0, trace, dbf_text);
1052 if (!qdio_establish_check_errors(cdev, cstat, dstat))
1053 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
1056 /* qdio interrupt handler */
1057 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1060 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1064 qdio_perf_stat_inc(&perf_stats.qdio_int);
1066 if (!intparm || !irq_ptr) {
1067 sprintf(dbf_text, "qihd%4x", cdev->private->schid.sch_no);
1068 QDIO_DBF_TEXT2(1, setup, dbf_text);
1073 switch (PTR_ERR(irb)) {
1075 sprintf(dbf_text, "ierr%4x", irq_ptr->schid.sch_no);
1076 QDIO_DBF_TEXT2(1, setup, dbf_text);
1077 qdio_int_error(cdev);
1080 sprintf(dbf_text, "qtoh%4x", irq_ptr->schid.sch_no);
1081 QDIO_DBF_TEXT2(1, setup, dbf_text);
1082 qdio_int_error(cdev);
1089 qdio_irq_check_sense(irq_ptr->schid, irb);
1091 cstat = irb->scsw.cmd.cstat;
1092 dstat = irb->scsw.cmd.dstat;
1094 switch (irq_ptr->state) {
1095 case QDIO_IRQ_STATE_INACTIVE:
1096 qdio_establish_handle_irq(cdev, cstat, dstat);
1099 case QDIO_IRQ_STATE_CLEANUP:
1100 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1103 case QDIO_IRQ_STATE_ESTABLISHED:
1104 case QDIO_IRQ_STATE_ACTIVE:
1105 if (cstat & SCHN_STAT_PCI) {
1106 qdio_int_handler_pci(irq_ptr);
1107 /* no state change so no need to wake up wait_q */
1110 if ((cstat & ~SCHN_STAT_PCI) || dstat) {
1111 qdio_handle_activate_check(cdev, intparm, cstat,
1118 wake_up(&cdev->private->wait_q);
1122 * qdio_get_ssqd_desc - get qdio subchannel description
1123 * @cdev: ccw device to get description for
1125 * Returns a pointer to the saved qdio subchannel description,
1126 * or NULL for not setup qdio devices.
1128 struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev)
1130 struct qdio_irq *irq_ptr;
1133 sprintf(dbf_text, "qssq%4x", cdev->private->schid.sch_no);
1134 QDIO_DBF_TEXT0(0, setup, dbf_text);
1136 irq_ptr = cdev->private->qdio_data;
1140 return &irq_ptr->ssqd_desc;
1142 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1145 * qdio_cleanup - shutdown queues and free data structures
1146 * @cdev: associated ccw device
1147 * @how: use halt or clear to shutdown
1149 * This function calls qdio_shutdown() for @cdev with method @how
1150 * and on success qdio_free() for @cdev.
1152 int qdio_cleanup(struct ccw_device *cdev, int how)
1154 struct qdio_irq *irq_ptr;
1158 sprintf(dbf_text, "qcln%4x", cdev->private->schid.sch_no);
1159 QDIO_DBF_TEXT0(0, setup, dbf_text);
1161 irq_ptr = cdev->private->qdio_data;
1165 rc = qdio_shutdown(cdev, how);
1167 rc = qdio_free(cdev);
1170 EXPORT_SYMBOL_GPL(qdio_cleanup);
1172 static void qdio_shutdown_queues(struct ccw_device *cdev)
1174 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1178 for_each_input_queue(irq_ptr, q, i)
1179 tasklet_disable(&q->tasklet);
1181 for_each_output_queue(irq_ptr, q, i) {
1182 tasklet_disable(&q->tasklet);
1183 del_timer(&q->u.out.timer);
1188 * qdio_shutdown - shut down a qdio subchannel
1189 * @cdev: associated ccw device
1190 * @how: use halt or clear to shutdown
1192 int qdio_shutdown(struct ccw_device *cdev, int how)
1194 struct qdio_irq *irq_ptr;
1196 unsigned long flags;
1199 sprintf(dbf_text, "qshu%4x", cdev->private->schid.sch_no);
1200 QDIO_DBF_TEXT0(0, setup, dbf_text);
1202 irq_ptr = cdev->private->qdio_data;
1206 mutex_lock(&irq_ptr->setup_mutex);
1208 * Subchannel was already shot down. We cannot prevent being called
1209 * twice since cio may trigger a shutdown asynchronously.
1211 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1212 mutex_unlock(&irq_ptr->setup_mutex);
1216 tiqdio_remove_input_queues(irq_ptr);
1217 qdio_shutdown_queues(cdev);
1218 qdio_shutdown_debug_entries(irq_ptr, cdev);
1220 /* cleanup subchannel */
1221 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1223 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1224 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1226 /* default behaviour is halt */
1227 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1229 sprintf(dbf_text, "sher%4x", irq_ptr->schid.sch_no);
1230 QDIO_DBF_TEXT0(0, setup, dbf_text);
1231 sprintf(dbf_text, "rc=%d", rc);
1232 QDIO_DBF_TEXT0(0, setup, dbf_text);
1236 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1237 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1238 wait_event_interruptible_timeout(cdev->private->wait_q,
1239 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1240 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1242 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1245 qdio_shutdown_thinint(irq_ptr);
1247 /* restore interrupt handler */
1248 if ((void *)cdev->handler == (void *)qdio_int_handler)
1249 cdev->handler = irq_ptr->orig_handler;
1250 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1252 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1253 mutex_unlock(&irq_ptr->setup_mutex);
1258 EXPORT_SYMBOL_GPL(qdio_shutdown);
1261 * qdio_free - free data structures for a qdio subchannel
1262 * @cdev: associated ccw device
1264 int qdio_free(struct ccw_device *cdev)
1266 struct qdio_irq *irq_ptr;
1269 sprintf(dbf_text, "qfre%4x", cdev->private->schid.sch_no);
1270 QDIO_DBF_TEXT0(0, setup, dbf_text);
1272 irq_ptr = cdev->private->qdio_data;
1276 mutex_lock(&irq_ptr->setup_mutex);
1277 cdev->private->qdio_data = NULL;
1278 mutex_unlock(&irq_ptr->setup_mutex);
1280 qdio_release_memory(irq_ptr);
1283 EXPORT_SYMBOL_GPL(qdio_free);
1286 * qdio_initialize - allocate and establish queues for a qdio subchannel
1287 * @init_data: initialization data
1289 * This function first allocates queues via qdio_allocate() and on success
1290 * establishes them via qdio_establish().
1292 int qdio_initialize(struct qdio_initialize *init_data)
1297 sprintf(dbf_text, "qini%4x", init_data->cdev->private->schid.sch_no);
1298 QDIO_DBF_TEXT0(0, setup, dbf_text);
1300 rc = qdio_allocate(init_data);
1304 rc = qdio_establish(init_data);
1306 qdio_free(init_data->cdev);
1309 EXPORT_SYMBOL_GPL(qdio_initialize);
1312 * qdio_allocate - allocate qdio queues and associated data
1313 * @init_data: initialization data
1315 int qdio_allocate(struct qdio_initialize *init_data)
1317 struct qdio_irq *irq_ptr;
1320 sprintf(dbf_text, "qalc%4x", init_data->cdev->private->schid.sch_no);
1321 QDIO_DBF_TEXT0(0, setup, dbf_text);
1323 if ((init_data->no_input_qs && !init_data->input_handler) ||
1324 (init_data->no_output_qs && !init_data->output_handler))
1327 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1328 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1331 if ((!init_data->input_sbal_addr_array) ||
1332 (!init_data->output_sbal_addr_array))
1335 qdio_allocate_do_dbf(init_data);
1337 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1338 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1341 QDIO_DBF_TEXT0(0, setup, "irq_ptr:");
1342 QDIO_DBF_HEX0(0, setup, &irq_ptr, sizeof(void *));
1344 mutex_init(&irq_ptr->setup_mutex);
1347 * Allocate a page for the chsc calls in qdio_establish.
1348 * Must be pre-allocated since a zfcp recovery will call
1349 * qdio_establish. In case of low memory and swap on a zfcp disk
1350 * we may not be able to allocate memory otherwise.
1352 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1353 if (!irq_ptr->chsc_page)
1356 /* qdr is used in ccw1.cda which is u32 */
1357 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1360 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1362 QDIO_DBF_TEXT0(0, setup, "qdr:");
1363 QDIO_DBF_HEX0(0, setup, &irq_ptr->qdr, sizeof(void *));
1365 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1366 init_data->no_output_qs))
1369 init_data->cdev->private->qdio_data = irq_ptr;
1370 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1373 qdio_release_memory(irq_ptr);
1377 EXPORT_SYMBOL_GPL(qdio_allocate);
1380 * qdio_establish - establish queues on a qdio subchannel
1381 * @init_data: initialization data
1383 int qdio_establish(struct qdio_initialize *init_data)
1386 struct qdio_irq *irq_ptr;
1387 struct ccw_device *cdev = init_data->cdev;
1388 unsigned long saveflags;
1391 sprintf(dbf_text, "qest%4x", cdev->private->schid.sch_no);
1392 QDIO_DBF_TEXT0(0, setup, dbf_text);
1394 irq_ptr = cdev->private->qdio_data;
1398 if (cdev->private->state != DEV_STATE_ONLINE)
1401 mutex_lock(&irq_ptr->setup_mutex);
1402 qdio_setup_irq(init_data);
1404 rc = qdio_establish_thinint(irq_ptr);
1406 mutex_unlock(&irq_ptr->setup_mutex);
1407 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1412 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1413 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1414 irq_ptr->ccw.count = irq_ptr->equeue.count;
1415 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1417 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1418 ccw_device_set_options_mask(cdev, 0);
1420 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1422 sprintf(dbf_text, "eq:io%4x", irq_ptr->schid.sch_no);
1423 QDIO_DBF_TEXT2(1, setup, dbf_text);
1424 sprintf(dbf_text, "eq:rc%4x", rc);
1425 QDIO_DBF_TEXT2(1, setup, dbf_text);
1427 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1430 mutex_unlock(&irq_ptr->setup_mutex);
1431 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1435 wait_event_interruptible_timeout(cdev->private->wait_q,
1436 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1437 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1439 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1440 mutex_unlock(&irq_ptr->setup_mutex);
1441 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1445 qdio_setup_ssqd_info(irq_ptr);
1446 sprintf(dbf_text, "qib ac%2x", irq_ptr->qib.ac);
1447 QDIO_DBF_TEXT2(0, setup, dbf_text);
1449 /* qebsm is now setup if available, initialize buffer states */
1450 qdio_init_buf_states(irq_ptr);
1452 mutex_unlock(&irq_ptr->setup_mutex);
1453 qdio_print_subchannel_info(irq_ptr, cdev);
1454 qdio_setup_debug_entries(irq_ptr, cdev);
1457 EXPORT_SYMBOL_GPL(qdio_establish);
1460 * qdio_activate - activate queues on a qdio subchannel
1461 * @cdev: associated cdev
1463 int qdio_activate(struct ccw_device *cdev)
1465 struct qdio_irq *irq_ptr;
1467 unsigned long saveflags;
1470 sprintf(dbf_text, "qact%4x", cdev->private->schid.sch_no);
1471 QDIO_DBF_TEXT0(0, setup, dbf_text);
1473 irq_ptr = cdev->private->qdio_data;
1477 if (cdev->private->state != DEV_STATE_ONLINE)
1480 mutex_lock(&irq_ptr->setup_mutex);
1481 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1486 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1487 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1488 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1489 irq_ptr->ccw.cda = 0;
1491 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1492 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1494 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1495 0, DOIO_DENY_PREFETCH);
1497 sprintf(dbf_text, "aq:io%4x", irq_ptr->schid.sch_no);
1498 QDIO_DBF_TEXT2(1, setup, dbf_text);
1499 sprintf(dbf_text, "aq:rc%4x", rc);
1500 QDIO_DBF_TEXT2(1, setup, dbf_text);
1502 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1507 if (is_thinint_irq(irq_ptr))
1508 tiqdio_add_input_queues(irq_ptr);
1510 /* wait for subchannel to become active */
1513 switch (irq_ptr->state) {
1514 case QDIO_IRQ_STATE_STOPPED:
1515 case QDIO_IRQ_STATE_ERR:
1516 mutex_unlock(&irq_ptr->setup_mutex);
1517 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1520 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1524 mutex_unlock(&irq_ptr->setup_mutex);
1527 EXPORT_SYMBOL_GPL(qdio_activate);
1529 static inline int buf_in_between(int bufnr, int start, int count)
1531 int end = add_buf(start, count);
1534 if (bufnr >= start && bufnr < end)
1540 /* wrap-around case */
1541 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1549 * handle_inbound - reset processed input buffers
1550 * @q: queue containing the buffers
1552 * @bufnr: first buffer to process
1553 * @count: how many buffers are emptied
1555 static void handle_inbound(struct qdio_q *q, unsigned int callflags,
1556 int bufnr, int count)
1558 unsigned long flags;
1562 * do_QDIO could run in parallel with the queue tasklet so the
1563 * upper-layer programm could empty the ACK'ed buffer here.
1564 * If that happens we must clear the polling flag, otherwise
1565 * qdio_stop_polling() could set the buffer to NOT_INIT after
1566 * it was set to EMPTY which would kill us.
1568 spin_lock_irqsave(&q->u.in.lock, flags);
1569 if (q->u.in.polling)
1570 if (buf_in_between(q->last_move_ftc, bufnr, count))
1571 q->u.in.polling = 0;
1573 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1574 spin_unlock_irqrestore(&q->u.in.lock, flags);
1576 used = atomic_add_return(count, &q->nr_buf_used) - count;
1577 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1579 /* no need to signal as long as the adapter had free buffers */
1583 if (need_siga_in(q)) {
1584 rc = qdio_siga_input(q);
1591 * handle_outbound - process filled outbound buffers
1592 * @q: queue containing the buffers
1594 * @bufnr: first buffer to process
1595 * @count: how many buffers are filled
1597 static void handle_outbound(struct qdio_q *q, unsigned int callflags,
1598 int bufnr, int count)
1600 unsigned char state;
1603 qdio_perf_stat_inc(&perf_stats.outbound_handler);
1605 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1606 used = atomic_add_return(count, &q->nr_buf_used);
1607 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1609 if (callflags & QDIO_FLAG_PCI_OUT)
1610 q->u.out.pci_out_enabled = 1;
1612 q->u.out.pci_out_enabled = 0;
1614 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1615 if (multicast_outbound(q))
1616 qdio_kick_outbound_q(q);
1619 * One siga-w per buffer required for unicast
1623 qdio_kick_outbound_q(q);
1627 if (need_siga_sync(q)) {
1628 qdio_siga_sync_q(q);
1632 /* try to fast requeue buffers */
1633 get_buf_state(q, prev_buf(bufnr), &state);
1634 if (state != SLSB_CU_OUTPUT_PRIMED)
1635 qdio_kick_outbound_q(q);
1637 QDIO_DBF_TEXT5(0, trace, "fast-req");
1638 qdio_perf_stat_inc(&perf_stats.fast_requeue);
1641 /* Fixme: could wait forever if called from process context */
1642 tasklet_schedule(&q->tasklet);
1646 * do_QDIO - process input or output buffers
1647 * @cdev: associated ccw_device for the qdio subchannel
1648 * @callflags: input or output and special flags from the program
1649 * @q_nr: queue number
1650 * @bufnr: buffer number
1651 * @count: how many buffers to process
1653 int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1654 int q_nr, int bufnr, int count)
1656 struct qdio_irq *irq_ptr;
1657 #ifdef CONFIG_QDIO_DEBUG
1660 sprintf(dbf_text, "doQD%4x", cdev->private->schid.sch_no);
1661 QDIO_DBF_TEXT3(0, trace, dbf_text);
1662 #endif /* CONFIG_QDIO_DEBUG */
1664 if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) ||
1665 (count > QDIO_MAX_BUFFERS_PER_Q) ||
1666 (q_nr > QDIO_MAX_QUEUES_PER_IRQ))
1672 irq_ptr = cdev->private->qdio_data;
1676 #ifdef CONFIG_QDIO_DEBUG
1677 if (callflags & QDIO_FLAG_SYNC_INPUT)
1678 QDIO_DBF_HEX3(0, trace, &irq_ptr->input_qs[q_nr],
1681 QDIO_DBF_HEX3(0, trace, &irq_ptr->output_qs[q_nr],
1684 sprintf(dbf_text, "flag%04x", callflags);
1685 QDIO_DBF_TEXT3(0, trace, dbf_text);
1686 sprintf(dbf_text, "qi%02xct%02x", bufnr, count);
1687 QDIO_DBF_TEXT3(0, trace, dbf_text);
1688 #endif /* CONFIG_QDIO_DEBUG */
1690 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1693 if (callflags & QDIO_FLAG_SYNC_INPUT)
1694 handle_inbound(irq_ptr->input_qs[q_nr],
1695 callflags, bufnr, count);
1696 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1697 handle_outbound(irq_ptr->output_qs[q_nr],
1698 callflags, bufnr, count);
1700 QDIO_DBF_TEXT3(1, trace, "doQD:inv");
1705 EXPORT_SYMBOL_GPL(do_QDIO);
1707 static int __init init_QDIO(void)
1711 rc = qdio_setup_init();
1714 rc = tiqdio_allocate_memory();
1717 rc = qdio_debug_init();
1720 rc = qdio_setup_perf_stats();
1723 rc = tiqdio_register_thinints();
1729 qdio_remove_perf_stats();
1733 tiqdio_free_memory();
1739 static void __exit exit_QDIO(void)
1741 tiqdio_unregister_thinints();
1742 tiqdio_free_memory();
1743 qdio_remove_perf_stats();
1748 module_init(init_QDIO);
1749 module_exit(exit_QDIO);