2 * CTC / ESCON network driver
4 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
6 * Fixes by : Jochen Röhrig (roehrig@de.ibm.com)
7 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 Peter Tiedemann (ptiedem@de.ibm.com)
9 * Driver Model stuff by : Cornelia Huck <cornelia.huck@de.ibm.com>
12 * - Principles of Operation (IBM doc#: SA22-7201-06)
13 * - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
14 * - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
15 * - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
16 * - ESCON I/O Interface (IBM doc#: SA22-7202-029
18 * and the source of the original CTC driver by:
19 * Dieter Wellerdiek (wel@de.ibm.com)
20 * Martin Schwidefsky (schwidefsky@de.ibm.com)
21 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
22 * Jochen Röhrig (roehrig@de.ibm.com)
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2, or (at your option)
29 * This program is distributed in the hope that it will be useful,
30 * but WITHOUT ANY WARRANTY; without even the implied warranty of
31 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
32 * GNU General Public License for more details.
34 * You should have received a copy of the GNU General Public License
35 * along with this program; if not, write to the Free Software
36 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/kernel.h>
43 #include <linux/slab.h>
44 #include <linux/errno.h>
45 #include <linux/types.h>
46 #include <linux/interrupt.h>
47 #include <linux/timer.h>
48 #include <linux/bitops.h>
50 #include <linux/signal.h>
51 #include <linux/string.h>
54 #include <linux/if_arp.h>
55 #include <linux/tcp.h>
56 #include <linux/skbuff.h>
57 #include <linux/ctype.h>
61 #include <asm/ccwdev.h>
62 #include <asm/ccwgroup.h>
63 #include <asm/uaccess.h>
65 #include <asm/idals.h>
73 MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
74 MODULE_DESCRIPTION("Linux for S/390 CTC/Escon Driver");
75 MODULE_LICENSE("GPL");
77 * States of the interface statemachine.
81 DEV_STATE_STARTWAIT_RXTX,
82 DEV_STATE_STARTWAIT_RX,
83 DEV_STATE_STARTWAIT_TX,
84 DEV_STATE_STOPWAIT_RXTX,
85 DEV_STATE_STOPWAIT_RX,
86 DEV_STATE_STOPWAIT_TX,
89 * MUST be always the last element!!
94 static const char *dev_state_names[] = {
106 * Events of the interface statemachine.
117 * MUST be always the last element!!
122 static const char *dev_event_names[] = {
133 * Events of the channel statemachine
137 * Events, representing return code of
138 * I/O operations (ccw_device_start, ccw_device_halt et al.)
151 * Events, representing unit-check
155 CH_EVENT_UC_TXTIMEOUT,
156 CH_EVENT_UC_TXPARITY,
158 CH_EVENT_UC_RXPARITY,
163 * Events, representing subchannel-check
168 * Events, representing machine checks
174 * Event, representing normal IRQ
180 * Event, representing timer expiry.
185 * Events, representing commands from upper levels.
191 * MUST be always the last element!!
197 * States of the channel statemachine.
201 * Channel not assigned to any device,
202 * initial state, direction invalid
207 * Channel assigned but not operating
226 * MUST be always the last element!!
231 static int loglevel = CTC_LOGLEVEL_DEFAULT;
234 * Linked list of all detected channels.
236 static struct channel *channels = NULL;
244 static int printed = 0;
249 printk(KERN_INFO "CTC driver initialized\n");
254 * Return type of a detected device.
256 static enum channel_types
257 get_channel_type(struct ccw_device_id *id)
259 enum channel_types type = (enum channel_types) id->driver_info;
261 if (type == channel_type_ficon)
262 type = channel_type_escon;
267 static const char *ch_event_names[] = {
268 "ccw_device success",
272 "ccw_device unknown",
274 "Status ATTN & BUSY",
278 "Unit check remote reset",
279 "Unit check remote system reset",
280 "Unit check TX timeout",
281 "Unit check TX parity",
282 "Unit check Hardware failure",
283 "Unit check RX parity",
285 "Unit check Unknown",
287 "SubChannel check Unknown",
289 "Machine check failure",
290 "Machine check operational",
301 static const char *ch_state_names[] = {
322 * Dump header and first 16 bytes of an sk_buff for debugging purposes.
324 * @param skb The sk_buff to dump.
325 * @param offset Offset relative to skb-data, where to start the dump.
328 ctc_dump_skb(struct sk_buff *skb, int offset)
330 unsigned char *p = skb->data;
332 struct ll_header *header;
335 if (!(loglevel & CTC_LOGLEVEL_DEBUG))
340 header = (struct ll_header *) p;
343 printk(KERN_DEBUG "dump:\n");
344 printk(KERN_DEBUG "blocklen=%d %04x\n", bl, bl);
346 printk(KERN_DEBUG "h->length=%d %04x\n", header->length,
348 printk(KERN_DEBUG "h->type=%04x\n", header->type);
349 printk(KERN_DEBUG "h->unused=%04x\n", header->unused);
352 printk(KERN_DEBUG "data: ");
353 for (i = 0; i < bl; i++)
354 printk("%02x%s", *p++, (i % 16) ? " " : "\n<7>");
359 ctc_dump_skb(struct sk_buff *skb, int offset)
365 * Unpack a just received skb and hand it over to
368 * @param ch The channel where this skb has been received.
369 * @param pskb The received skb.
372 ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
374 struct net_device *dev = ch->netdev;
375 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
376 __u16 len = *((__u16 *) pskb->data);
378 DBF_TEXT(trace, 4, __FUNCTION__);
379 skb_put(pskb, 2 + LL_HEADER_LENGTH);
382 pskb->ip_summed = CHECKSUM_UNNECESSARY;
385 struct ll_header *header = (struct ll_header *) pskb->data;
387 skb_pull(pskb, LL_HEADER_LENGTH);
388 if ((ch->protocol == CTC_PROTO_S390) &&
389 (header->type != ETH_P_IP)) {
392 if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
395 * Check packet type only if we stick strictly
396 * to S/390's protocol of OS390. This only
397 * supports IP. Otherwise allow any packet
401 "%s Illegal packet type 0x%04x received, dropping\n",
402 dev->name, header->type);
403 ch->logflags |= LOG_FLAG_ILLEGALPKT;
408 ctc_dump_skb(pskb, -6);
410 privptr->stats.rx_dropped++;
411 privptr->stats.rx_frame_errors++;
414 pskb->protocol = ntohs(header->type);
415 if (header->length <= LL_HEADER_LENGTH) {
417 if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
420 "%s Illegal packet size %d "
421 "received (MTU=%d blocklen=%d), "
422 "dropping\n", dev->name, header->length,
424 ch->logflags |= LOG_FLAG_ILLEGALSIZE;
429 ctc_dump_skb(pskb, -6);
431 privptr->stats.rx_dropped++;
432 privptr->stats.rx_length_errors++;
435 header->length -= LL_HEADER_LENGTH;
436 len -= LL_HEADER_LENGTH;
437 if ((header->length > skb_tailroom(pskb)) ||
438 (header->length > len)) {
440 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
443 "%s Illegal packet size %d "
444 "(beyond the end of received data), "
445 "dropping\n", dev->name, header->length);
446 ch->logflags |= LOG_FLAG_OVERRUN;
451 ctc_dump_skb(pskb, -6);
453 privptr->stats.rx_dropped++;
454 privptr->stats.rx_length_errors++;
457 skb_put(pskb, header->length);
458 pskb->mac.raw = pskb->data;
459 len -= header->length;
460 skb = dev_alloc_skb(pskb->len);
463 if (!(ch->logflags & LOG_FLAG_NOMEM)) {
466 "%s Out of memory in ctc_unpack_skb\n",
468 ch->logflags |= LOG_FLAG_NOMEM;
472 privptr->stats.rx_dropped++;
475 memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
476 skb->mac.raw = skb->data;
477 skb->dev = pskb->dev;
478 skb->protocol = pskb->protocol;
479 pskb->ip_summed = CHECKSUM_UNNECESSARY;
482 * Successful rx; reset logflags
485 dev->last_rx = jiffies;
486 privptr->stats.rx_packets++;
487 privptr->stats.rx_bytes += skb->len;
489 skb_pull(pskb, header->length);
490 if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
492 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
495 "%s Overrun in ctc_unpack_skb\n",
497 ch->logflags |= LOG_FLAG_OVERRUN;
503 skb_put(pskb, LL_HEADER_LENGTH);
509 * Check return code of a preceeding ccw_device call, halt_IO etc...
511 * @param ch The channel, the error belongs to.
512 * @param return_code The error code to inspect.
515 ccw_check_return_code(struct channel *ch, int return_code, char *msg)
517 DBF_TEXT(trace, 5, __FUNCTION__);
518 switch (return_code) {
520 fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
523 ctc_pr_warn("%s (%s): Busy !\n", ch->id, msg);
524 fsm_event(ch->fsm, CH_EVENT_IO_EBUSY, ch);
527 ctc_pr_emerg("%s (%s): Invalid device called for IO\n",
529 fsm_event(ch->fsm, CH_EVENT_IO_ENODEV, ch);
532 ctc_pr_emerg("%s (%s): Status pending... \n",
534 fsm_event(ch->fsm, CH_EVENT_IO_EIO, ch);
537 ctc_pr_emerg("%s (%s): Unknown error in do_IO %04x\n",
538 ch->id, msg, return_code);
539 fsm_event(ch->fsm, CH_EVENT_IO_UNKNOWN, ch);
544 * Check sense of a unit check.
546 * @param ch The channel, the sense code belongs to.
547 * @param sense The sense code to inspect.
550 ccw_unit_check(struct channel *ch, unsigned char sense)
552 DBF_TEXT(trace, 5, __FUNCTION__);
553 if (sense & SNS0_INTERVENTION_REQ) {
555 ctc_pr_debug("%s: Interface disc. or Sel. reset "
556 "(remote)\n", ch->id);
557 fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch);
559 ctc_pr_debug("%s: System reset (remote)\n", ch->id);
560 fsm_event(ch->fsm, CH_EVENT_UC_RSRESET, ch);
562 } else if (sense & SNS0_EQUIPMENT_CHECK) {
563 if (sense & SNS0_BUS_OUT_CHECK) {
564 ctc_pr_warn("%s: Hardware malfunction (remote)\n",
566 fsm_event(ch->fsm, CH_EVENT_UC_HWFAIL, ch);
568 ctc_pr_warn("%s: Read-data parity error (remote)\n",
570 fsm_event(ch->fsm, CH_EVENT_UC_RXPARITY, ch);
572 } else if (sense & SNS0_BUS_OUT_CHECK) {
574 ctc_pr_warn("%s: Data-streaming timeout)\n", ch->id);
575 fsm_event(ch->fsm, CH_EVENT_UC_TXTIMEOUT, ch);
577 ctc_pr_warn("%s: Data-transfer parity error\n", ch->id);
578 fsm_event(ch->fsm, CH_EVENT_UC_TXPARITY, ch);
580 } else if (sense & SNS0_CMD_REJECT) {
581 ctc_pr_warn("%s: Command reject\n", ch->id);
582 } else if (sense == 0) {
583 ctc_pr_debug("%s: Unit check ZERO\n", ch->id);
584 fsm_event(ch->fsm, CH_EVENT_UC_ZERO, ch);
586 ctc_pr_warn("%s: Unit Check with sense code: %02x\n",
588 fsm_event(ch->fsm, CH_EVENT_UC_UNKNOWN, ch);
593 ctc_purge_skb_queue(struct sk_buff_head *q)
597 DBF_TEXT(trace, 5, __FUNCTION__);
599 while ((skb = skb_dequeue(q))) {
600 atomic_dec(&skb->users);
601 dev_kfree_skb_irq(skb);
606 ctc_checkalloc_buffer(struct channel *ch, int warn)
608 DBF_TEXT(trace, 5, __FUNCTION__);
609 if ((ch->trans_skb == NULL) ||
610 (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) {
611 if (ch->trans_skb != NULL)
612 dev_kfree_skb(ch->trans_skb);
613 clear_normalized_cda(&ch->ccw[1]);
614 ch->trans_skb = __dev_alloc_skb(ch->max_bufsize,
615 GFP_ATOMIC | GFP_DMA);
616 if (ch->trans_skb == NULL) {
619 "%s: Couldn't alloc %s trans_skb\n",
621 (CHANNEL_DIRECTION(ch->flags) == READ) ?
625 ch->ccw[1].count = ch->max_bufsize;
626 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
627 dev_kfree_skb(ch->trans_skb);
628 ch->trans_skb = NULL;
631 "%s: set_normalized_cda for %s "
632 "trans_skb failed, dropping packets\n",
634 (CHANNEL_DIRECTION(ch->flags) == READ) ?
638 ch->ccw[1].count = 0;
639 ch->trans_skb_data = ch->trans_skb->data;
640 ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
646 * Dummy NOP action for statemachines
649 fsm_action_nop(fsm_instance * fi, int event, void *arg)
654 * Actions for channel - statemachines.
655 *****************************************************************************/
658 * Normal data has been send. Free the corresponding
659 * skb (it's in io_queue), reset dev->tbusy and
660 * revert to idle state.
662 * @param fi An instance of a channel statemachine.
663 * @param event The event, just happened.
664 * @param arg Generic pointer, casted from channel * upon call.
667 ch_action_txdone(fsm_instance * fi, int event, void *arg)
669 struct channel *ch = (struct channel *) arg;
670 struct net_device *dev = ch->netdev;
671 struct ctc_priv *privptr = dev->priv;
675 unsigned long duration;
676 struct timespec done_stamp = xtime;
678 DBF_TEXT(trace, 4, __FUNCTION__);
681 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
682 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
683 if (duration > ch->prof.tx_time)
684 ch->prof.tx_time = duration;
686 if (ch->irb->scsw.count != 0)
687 ctc_pr_debug("%s: TX not complete, remaining %d bytes\n",
688 dev->name, ch->irb->scsw.count);
689 fsm_deltimer(&ch->timer);
690 while ((skb = skb_dequeue(&ch->io_queue))) {
691 privptr->stats.tx_packets++;
692 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
694 privptr->stats.tx_bytes += 2;
697 atomic_dec(&skb->users);
698 dev_kfree_skb_irq(skb);
700 spin_lock(&ch->collect_lock);
701 clear_normalized_cda(&ch->ccw[4]);
702 if (ch->collect_len > 0) {
705 if (ctc_checkalloc_buffer(ch, 1)) {
706 spin_unlock(&ch->collect_lock);
709 ch->trans_skb->tail = ch->trans_skb->data = ch->trans_skb_data;
710 ch->trans_skb->len = 0;
711 if (ch->prof.maxmulti < (ch->collect_len + 2))
712 ch->prof.maxmulti = ch->collect_len + 2;
713 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
714 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
715 *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
717 while ((skb = skb_dequeue(&ch->collect_queue))) {
718 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
720 privptr->stats.tx_packets++;
721 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
722 atomic_dec(&skb->users);
723 dev_kfree_skb_irq(skb);
727 spin_unlock(&ch->collect_lock);
728 ch->ccw[1].count = ch->trans_skb->len;
729 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
730 ch->prof.send_stamp = xtime;
731 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
732 (unsigned long) ch, 0xff, 0);
733 ch->prof.doios_multi++;
735 privptr->stats.tx_dropped += i;
736 privptr->stats.tx_errors += i;
737 fsm_deltimer(&ch->timer);
738 ccw_check_return_code(ch, rc, "chained TX");
741 spin_unlock(&ch->collect_lock);
742 fsm_newstate(fi, CH_STATE_TXIDLE);
748 * Initial data is sent.
749 * Notify device statemachine that we are up and
752 * @param fi An instance of a channel statemachine.
753 * @param event The event, just happened.
754 * @param arg Generic pointer, casted from channel * upon call.
757 ch_action_txidle(fsm_instance * fi, int event, void *arg)
759 struct channel *ch = (struct channel *) arg;
761 DBF_TEXT(trace, 4, __FUNCTION__);
762 fsm_deltimer(&ch->timer);
763 fsm_newstate(fi, CH_STATE_TXIDLE);
764 fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
769 * Got normal data, check for sanity, queue it up, allocate new buffer
770 * trigger bottom half, and initiate next read.
772 * @param fi An instance of a channel statemachine.
773 * @param event The event, just happened.
774 * @param arg Generic pointer, casted from channel * upon call.
777 ch_action_rx(fsm_instance * fi, int event, void *arg)
779 struct channel *ch = (struct channel *) arg;
780 struct net_device *dev = ch->netdev;
781 struct ctc_priv *privptr = dev->priv;
782 int len = ch->max_bufsize - ch->irb->scsw.count;
783 struct sk_buff *skb = ch->trans_skb;
784 __u16 block_len = *((__u16 *) skb->data);
788 DBF_TEXT(trace, 4, __FUNCTION__);
789 fsm_deltimer(&ch->timer);
791 ctc_pr_debug("%s: got packet with length %d < 8\n",
793 privptr->stats.rx_dropped++;
794 privptr->stats.rx_length_errors++;
797 if (len > ch->max_bufsize) {
798 ctc_pr_debug("%s: got packet with length %d > %d\n",
799 dev->name, len, ch->max_bufsize);
800 privptr->stats.rx_dropped++;
801 privptr->stats.rx_length_errors++;
806 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
808 switch (ch->protocol) {
810 case CTC_PROTO_OS390:
811 check_len = block_len + 2;
814 check_len = block_len;
817 if ((len < block_len) || (len > check_len)) {
818 ctc_pr_debug("%s: got block length %d != rx length %d\n",
819 dev->name, block_len, len);
821 ctc_dump_skb(skb, 0);
823 *((__u16 *) skb->data) = len;
824 privptr->stats.rx_dropped++;
825 privptr->stats.rx_length_errors++;
830 *((__u16 *) skb->data) = block_len;
831 ctc_unpack_skb(ch, skb);
834 skb->data = skb->tail = ch->trans_skb_data;
836 if (ctc_checkalloc_buffer(ch, 1))
838 ch->ccw[1].count = ch->max_bufsize;
839 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
841 ccw_check_return_code(ch, rc, "normal RX");
844 static void ch_action_rxidle(fsm_instance * fi, int event, void *arg);
847 * Initialize connection by sending a __u16 of value 0.
849 * @param fi An instance of a channel statemachine.
850 * @param event The event, just happened.
851 * @param arg Generic pointer, casted from channel * upon call.
854 ch_action_firstio(fsm_instance * fi, int event, void *arg)
856 struct channel *ch = (struct channel *) arg;
859 DBF_TEXT(trace, 4, __FUNCTION__);
861 if (fsm_getstate(fi) == CH_STATE_TXIDLE)
862 ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id);
863 fsm_deltimer(&ch->timer);
864 if (ctc_checkalloc_buffer(ch, 1))
866 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
867 (ch->protocol == CTC_PROTO_OS390)) {
868 /* OS/390 resp. z/OS */
869 if (CHANNEL_DIRECTION(ch->flags) == READ) {
870 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
871 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC,
873 ch_action_rxidle(fi, event, arg);
875 struct net_device *dev = ch->netdev;
876 fsm_newstate(fi, CH_STATE_TXIDLE);
877 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
878 DEV_EVENT_TXUP, dev);
884 * Don´t setup a timer for receiving the initial RX frame
885 * if in compatibility mode, since VM TCP delays the initial
886 * frame until it has some data to send.
888 if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
889 (ch->protocol != CTC_PROTO_S390))
890 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
892 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
893 ch->ccw[1].count = 2; /* Transfer only length */
895 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
896 ? CH_STATE_RXINIT : CH_STATE_TXINIT);
897 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
899 fsm_deltimer(&ch->timer);
900 fsm_newstate(fi, CH_STATE_SETUPWAIT);
901 ccw_check_return_code(ch, rc, "init IO");
904 * If in compatibility mode since we don´t setup a timer, we
905 * also signal RX channel up immediately. This enables us
906 * to send packets early which in turn usually triggers some
907 * reply from VM TCP which brings up the RX channel to it´s
910 if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
911 (ch->protocol == CTC_PROTO_S390)) {
912 struct net_device *dev = ch->netdev;
913 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXUP,
919 * Got initial data, check it. If OK,
920 * notify device statemachine that we are up and
923 * @param fi An instance of a channel statemachine.
924 * @param event The event, just happened.
925 * @param arg Generic pointer, casted from channel * upon call.
928 ch_action_rxidle(fsm_instance * fi, int event, void *arg)
930 struct channel *ch = (struct channel *) arg;
931 struct net_device *dev = ch->netdev;
935 DBF_TEXT(trace, 4, __FUNCTION__);
936 fsm_deltimer(&ch->timer);
937 buflen = *((__u16 *) ch->trans_skb->data);
939 ctc_pr_debug("%s: Initial RX count %d\n", dev->name, buflen);
941 if (buflen >= CTC_INITIAL_BLOCKLEN) {
942 if (ctc_checkalloc_buffer(ch, 1))
944 ch->ccw[1].count = ch->max_bufsize;
945 fsm_newstate(fi, CH_STATE_RXIDLE);
946 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
947 (unsigned long) ch, 0xff, 0);
949 fsm_newstate(fi, CH_STATE_RXINIT);
950 ccw_check_return_code(ch, rc, "initial RX");
952 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
953 DEV_EVENT_RXUP, dev);
955 ctc_pr_debug("%s: Initial RX count %d not %d\n",
956 dev->name, buflen, CTC_INITIAL_BLOCKLEN);
957 ch_action_firstio(fi, event, arg);
962 * Set channel into extended mode.
964 * @param fi An instance of a channel statemachine.
965 * @param event The event, just happened.
966 * @param arg Generic pointer, casted from channel * upon call.
969 ch_action_setmode(fsm_instance * fi, int event, void *arg)
971 struct channel *ch = (struct channel *) arg;
973 unsigned long saveflags;
975 DBF_TEXT(trace, 4, __FUNCTION__);
976 fsm_deltimer(&ch->timer);
977 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
978 fsm_newstate(fi, CH_STATE_SETUPWAIT);
979 saveflags = 0; /* avoids compiler warning with
980 spin_unlock_irqrestore */
981 if (event == CH_EVENT_TIMER) // only for timer not yet locked
982 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
983 rc = ccw_device_start(ch->cdev, &ch->ccw[6], (unsigned long) ch, 0xff, 0);
984 if (event == CH_EVENT_TIMER)
985 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
987 fsm_deltimer(&ch->timer);
988 fsm_newstate(fi, CH_STATE_STARTWAIT);
989 ccw_check_return_code(ch, rc, "set Mode");
997 * @param fi An instance of a channel statemachine.
998 * @param event The event, just happened.
999 * @param arg Generic pointer, casted from channel * upon call.
1002 ch_action_start(fsm_instance * fi, int event, void *arg)
1004 struct channel *ch = (struct channel *) arg;
1005 unsigned long saveflags;
1007 struct net_device *dev;
1009 DBF_TEXT(trace, 4, __FUNCTION__);
1011 ctc_pr_warn("ch_action_start ch=NULL\n");
1014 if (ch->netdev == NULL) {
1015 ctc_pr_warn("ch_action_start dev=NULL, id=%s\n", ch->id);
1021 ctc_pr_debug("%s: %s channel start\n", dev->name,
1022 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1025 if (ch->trans_skb != NULL) {
1026 clear_normalized_cda(&ch->ccw[1]);
1027 dev_kfree_skb(ch->trans_skb);
1028 ch->trans_skb = NULL;
1030 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1031 ch->ccw[1].cmd_code = CCW_CMD_READ;
1032 ch->ccw[1].flags = CCW_FLAG_SLI;
1033 ch->ccw[1].count = 0;
1035 ch->ccw[1].cmd_code = CCW_CMD_WRITE;
1036 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1037 ch->ccw[1].count = 0;
1039 if (ctc_checkalloc_buffer(ch, 0)) {
1041 "%s: Could not allocate %s trans_skb, delaying "
1042 "allocation until first transfer\n",
1044 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1047 ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
1048 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1049 ch->ccw[0].count = 0;
1051 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
1052 ch->ccw[2].flags = CCW_FLAG_SLI;
1053 ch->ccw[2].count = 0;
1055 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof (struct ccw1) * 3);
1057 ch->ccw[4].flags &= ~CCW_FLAG_IDA;
1059 fsm_newstate(fi, CH_STATE_STARTWAIT);
1060 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1061 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1062 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1063 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1066 fsm_deltimer(&ch->timer);
1067 ccw_check_return_code(ch, rc, "initial HaltIO");
1070 ctc_pr_debug("ctc: %s(): leaving\n", __func__);
1075 * Shutdown a channel.
1077 * @param fi An instance of a channel statemachine.
1078 * @param event The event, just happened.
1079 * @param arg Generic pointer, casted from channel * upon call.
1082 ch_action_haltio(fsm_instance * fi, int event, void *arg)
1084 struct channel *ch = (struct channel *) arg;
1085 unsigned long saveflags;
1089 DBF_TEXT(trace, 3, __FUNCTION__);
1090 fsm_deltimer(&ch->timer);
1091 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1092 saveflags = 0; /* avoids comp warning with
1093 spin_unlock_irqrestore */
1094 if (event == CH_EVENT_STOP) // only for STOP not yet locked
1095 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1096 oldstate = fsm_getstate(fi);
1097 fsm_newstate(fi, CH_STATE_TERM);
1098 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1099 if (event == CH_EVENT_STOP)
1100 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1103 fsm_deltimer(&ch->timer);
1104 fsm_newstate(fi, oldstate);
1106 ccw_check_return_code(ch, rc, "HaltIO in ch_action_haltio");
1111 * A channel has successfully been halted.
1112 * Cleanup it's queue and notify interface statemachine.
1114 * @param fi An instance of a channel statemachine.
1115 * @param event The event, just happened.
1116 * @param arg Generic pointer, casted from channel * upon call.
1119 ch_action_stopped(fsm_instance * fi, int event, void *arg)
1121 struct channel *ch = (struct channel *) arg;
1122 struct net_device *dev = ch->netdev;
1124 DBF_TEXT(trace, 3, __FUNCTION__);
1125 fsm_deltimer(&ch->timer);
1126 fsm_newstate(fi, CH_STATE_STOPPED);
1127 if (ch->trans_skb != NULL) {
1128 clear_normalized_cda(&ch->ccw[1]);
1129 dev_kfree_skb(ch->trans_skb);
1130 ch->trans_skb = NULL;
1132 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1133 skb_queue_purge(&ch->io_queue);
1134 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1135 DEV_EVENT_RXDOWN, dev);
1137 ctc_purge_skb_queue(&ch->io_queue);
1138 spin_lock(&ch->collect_lock);
1139 ctc_purge_skb_queue(&ch->collect_queue);
1140 ch->collect_len = 0;
1141 spin_unlock(&ch->collect_lock);
1142 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1143 DEV_EVENT_TXDOWN, dev);
1148 * A stop command from device statemachine arrived and we are in
1149 * not operational mode. Set state to stopped.
1151 * @param fi An instance of a channel statemachine.
1152 * @param event The event, just happened.
1153 * @param arg Generic pointer, casted from channel * upon call.
1156 ch_action_stop(fsm_instance * fi, int event, void *arg)
1158 fsm_newstate(fi, CH_STATE_STOPPED);
1162 * A machine check for no path, not operational status or gone device has
1164 * Cleanup queue and notify interface statemachine.
1166 * @param fi An instance of a channel statemachine.
1167 * @param event The event, just happened.
1168 * @param arg Generic pointer, casted from channel * upon call.
1171 ch_action_fail(fsm_instance * fi, int event, void *arg)
1173 struct channel *ch = (struct channel *) arg;
1174 struct net_device *dev = ch->netdev;
1176 DBF_TEXT(trace, 3, __FUNCTION__);
1177 fsm_deltimer(&ch->timer);
1178 fsm_newstate(fi, CH_STATE_NOTOP);
1179 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1180 skb_queue_purge(&ch->io_queue);
1181 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1182 DEV_EVENT_RXDOWN, dev);
1184 ctc_purge_skb_queue(&ch->io_queue);
1185 spin_lock(&ch->collect_lock);
1186 ctc_purge_skb_queue(&ch->collect_queue);
1187 ch->collect_len = 0;
1188 spin_unlock(&ch->collect_lock);
1189 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1190 DEV_EVENT_TXDOWN, dev);
1195 * Handle error during setup of channel.
1197 * @param fi An instance of a channel statemachine.
1198 * @param event The event, just happened.
1199 * @param arg Generic pointer, casted from channel * upon call.
1202 ch_action_setuperr(fsm_instance * fi, int event, void *arg)
1204 struct channel *ch = (struct channel *) arg;
1205 struct net_device *dev = ch->netdev;
1207 DBF_TEXT(setup, 3, __FUNCTION__);
1209 * Special case: Got UC_RCRESET on setmode.
1210 * This means that remote side isn't setup. In this case
1211 * simply retry after some 10 secs...
1213 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
1214 ((event == CH_EVENT_UC_RCRESET) ||
1215 (event == CH_EVENT_UC_RSRESET))) {
1216 fsm_newstate(fi, CH_STATE_STARTRETRY);
1217 fsm_deltimer(&ch->timer);
1218 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1219 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1220 int rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1222 ccw_check_return_code(
1223 ch, rc, "HaltIO in ch_action_setuperr");
1228 ctc_pr_debug("%s: Error %s during %s channel setup state=%s\n",
1229 dev->name, ch_event_names[event],
1230 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
1231 fsm_getstate_str(fi));
1232 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1233 fsm_newstate(fi, CH_STATE_RXERR);
1234 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1235 DEV_EVENT_RXDOWN, dev);
1237 fsm_newstate(fi, CH_STATE_TXERR);
1238 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1239 DEV_EVENT_TXDOWN, dev);
1244 * Restart a channel after an error.
1246 * @param fi An instance of a channel statemachine.
1247 * @param event The event, just happened.
1248 * @param arg Generic pointer, casted from channel * upon call.
1251 ch_action_restart(fsm_instance * fi, int event, void *arg)
1253 unsigned long saveflags;
1257 struct channel *ch = (struct channel *) arg;
1258 struct net_device *dev = ch->netdev;
1260 DBF_TEXT(trace, 3, __FUNCTION__);
1261 fsm_deltimer(&ch->timer);
1262 ctc_pr_debug("%s: %s channel restart\n", dev->name,
1263 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1264 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1265 oldstate = fsm_getstate(fi);
1266 fsm_newstate(fi, CH_STATE_STARTWAIT);
1267 saveflags = 0; /* avoids compiler warning with
1268 spin_unlock_irqrestore */
1269 if (event == CH_EVENT_TIMER) // only for timer not yet locked
1270 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1271 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1272 if (event == CH_EVENT_TIMER)
1273 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1276 fsm_deltimer(&ch->timer);
1277 fsm_newstate(fi, oldstate);
1279 ccw_check_return_code(ch, rc, "HaltIO in ch_action_restart");
1284 * Handle error during RX initial handshake (exchange of
1285 * 0-length block header)
1287 * @param fi An instance of a channel statemachine.
1288 * @param event The event, just happened.
1289 * @param arg Generic pointer, casted from channel * upon call.
1292 ch_action_rxiniterr(fsm_instance * fi, int event, void *arg)
1294 struct channel *ch = (struct channel *) arg;
1295 struct net_device *dev = ch->netdev;
1297 DBF_TEXT(setup, 3, __FUNCTION__);
1298 if (event == CH_EVENT_TIMER) {
1299 fsm_deltimer(&ch->timer);
1300 ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name);
1301 if (ch->retry++ < 3)
1302 ch_action_restart(fi, event, arg);
1304 fsm_newstate(fi, CH_STATE_RXERR);
1305 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1306 DEV_EVENT_RXDOWN, dev);
1309 ctc_pr_warn("%s: Error during RX init handshake\n", dev->name);
1313 * Notify device statemachine if we gave up initialization
1316 * @param fi An instance of a channel statemachine.
1317 * @param event The event, just happened.
1318 * @param arg Generic pointer, casted from channel * upon call.
1321 ch_action_rxinitfail(fsm_instance * fi, int event, void *arg)
1323 struct channel *ch = (struct channel *) arg;
1324 struct net_device *dev = ch->netdev;
1326 DBF_TEXT(setup, 3, __FUNCTION__);
1327 fsm_newstate(fi, CH_STATE_RXERR);
1328 ctc_pr_warn("%s: RX initialization failed\n", dev->name);
1329 ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name);
1330 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1334 * Handle RX Unit check remote reset (remote disconnected)
1336 * @param fi An instance of a channel statemachine.
1337 * @param event The event, just happened.
1338 * @param arg Generic pointer, casted from channel * upon call.
1341 ch_action_rxdisc(fsm_instance * fi, int event, void *arg)
1343 struct channel *ch = (struct channel *) arg;
1344 struct channel *ch2;
1345 struct net_device *dev = ch->netdev;
1347 DBF_TEXT(trace, 3, __FUNCTION__);
1348 fsm_deltimer(&ch->timer);
1349 ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n",
1353 * Notify device statemachine
1355 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1356 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
1358 fsm_newstate(fi, CH_STATE_DTERM);
1359 ch2 = ((struct ctc_priv *) dev->priv)->channel[WRITE];
1360 fsm_newstate(ch2->fsm, CH_STATE_DTERM);
1362 ccw_device_halt(ch->cdev, (unsigned long) ch);
1363 ccw_device_halt(ch2->cdev, (unsigned long) ch2);
1367 * Handle error during TX channel initialization.
1369 * @param fi An instance of a channel statemachine.
1370 * @param event The event, just happened.
1371 * @param arg Generic pointer, casted from channel * upon call.
1374 ch_action_txiniterr(fsm_instance * fi, int event, void *arg)
1376 struct channel *ch = (struct channel *) arg;
1377 struct net_device *dev = ch->netdev;
1379 DBF_TEXT(setup, 2, __FUNCTION__);
1380 if (event == CH_EVENT_TIMER) {
1381 fsm_deltimer(&ch->timer);
1382 ctc_pr_debug("%s: Timeout during TX init handshake\n", dev->name);
1383 if (ch->retry++ < 3)
1384 ch_action_restart(fi, event, arg);
1386 fsm_newstate(fi, CH_STATE_TXERR);
1387 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1388 DEV_EVENT_TXDOWN, dev);
1391 ctc_pr_warn("%s: Error during TX init handshake\n", dev->name);
1395 * Handle TX timeout by retrying operation.
1397 * @param fi An instance of a channel statemachine.
1398 * @param event The event, just happened.
1399 * @param arg Generic pointer, casted from channel * upon call.
1402 ch_action_txretry(fsm_instance * fi, int event, void *arg)
1404 struct channel *ch = (struct channel *) arg;
1405 struct net_device *dev = ch->netdev;
1406 unsigned long saveflags;
1408 DBF_TEXT(trace, 4, __FUNCTION__);
1409 fsm_deltimer(&ch->timer);
1410 if (ch->retry++ > 3) {
1411 ctc_pr_debug("%s: TX retry failed, restarting channel\n",
1413 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1414 DEV_EVENT_TXDOWN, dev);
1415 ch_action_restart(fi, event, arg);
1417 struct sk_buff *skb;
1419 ctc_pr_debug("%s: TX retry %d\n", dev->name, ch->retry);
1420 if ((skb = skb_peek(&ch->io_queue))) {
1423 clear_normalized_cda(&ch->ccw[4]);
1424 ch->ccw[4].count = skb->len;
1425 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1427 "%s: IDAL alloc failed, chan restart\n",
1429 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1430 DEV_EVENT_TXDOWN, dev);
1431 ch_action_restart(fi, event, arg);
1434 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1435 saveflags = 0; /* avoids compiler warning with
1436 spin_unlock_irqrestore */
1437 if (event == CH_EVENT_TIMER) // only for TIMER not yet locked
1438 spin_lock_irqsave(get_ccwdev_lock(ch->cdev),
1440 rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1441 (unsigned long) ch, 0xff, 0);
1442 if (event == CH_EVENT_TIMER)
1443 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1446 fsm_deltimer(&ch->timer);
1447 ccw_check_return_code(ch, rc, "TX in ch_action_txretry");
1448 ctc_purge_skb_queue(&ch->io_queue);
1456 * Handle fatal errors during an I/O command.
1458 * @param fi An instance of a channel statemachine.
1459 * @param event The event, just happened.
1460 * @param arg Generic pointer, casted from channel * upon call.
1463 ch_action_iofatal(fsm_instance * fi, int event, void *arg)
1465 struct channel *ch = (struct channel *) arg;
1466 struct net_device *dev = ch->netdev;
1468 DBF_TEXT(trace, 3, __FUNCTION__);
1469 fsm_deltimer(&ch->timer);
1470 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1471 ctc_pr_debug("%s: RX I/O error\n", dev->name);
1472 fsm_newstate(fi, CH_STATE_RXERR);
1473 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1474 DEV_EVENT_RXDOWN, dev);
1476 ctc_pr_debug("%s: TX I/O error\n", dev->name);
1477 fsm_newstate(fi, CH_STATE_TXERR);
1478 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1479 DEV_EVENT_TXDOWN, dev);
1484 ch_action_reinit(fsm_instance *fi, int event, void *arg)
1486 struct channel *ch = (struct channel *)arg;
1487 struct net_device *dev = ch->netdev;
1488 struct ctc_priv *privptr = dev->priv;
1490 DBF_TEXT(trace, 4, __FUNCTION__);
1491 ch_action_iofatal(fi, event, arg);
1492 fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
1496 * The statemachine for a channel.
1498 static const fsm_node ch_fsm[] = {
1499 {CH_STATE_STOPPED, CH_EVENT_STOP, fsm_action_nop },
1500 {CH_STATE_STOPPED, CH_EVENT_START, ch_action_start },
1501 {CH_STATE_STOPPED, CH_EVENT_FINSTAT, fsm_action_nop },
1502 {CH_STATE_STOPPED, CH_EVENT_MC_FAIL, fsm_action_nop },
1504 {CH_STATE_NOTOP, CH_EVENT_STOP, ch_action_stop },
1505 {CH_STATE_NOTOP, CH_EVENT_START, fsm_action_nop },
1506 {CH_STATE_NOTOP, CH_EVENT_FINSTAT, fsm_action_nop },
1507 {CH_STATE_NOTOP, CH_EVENT_MC_FAIL, fsm_action_nop },
1508 {CH_STATE_NOTOP, CH_EVENT_MC_GOOD, ch_action_start },
1510 {CH_STATE_STARTWAIT, CH_EVENT_STOP, ch_action_haltio },
1511 {CH_STATE_STARTWAIT, CH_EVENT_START, fsm_action_nop },
1512 {CH_STATE_STARTWAIT, CH_EVENT_FINSTAT, ch_action_setmode },
1513 {CH_STATE_STARTWAIT, CH_EVENT_TIMER, ch_action_setuperr },
1514 {CH_STATE_STARTWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1515 {CH_STATE_STARTWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1516 {CH_STATE_STARTWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1518 {CH_STATE_STARTRETRY, CH_EVENT_STOP, ch_action_haltio },
1519 {CH_STATE_STARTRETRY, CH_EVENT_TIMER, ch_action_setmode },
1520 {CH_STATE_STARTRETRY, CH_EVENT_FINSTAT, fsm_action_nop },
1521 {CH_STATE_STARTRETRY, CH_EVENT_MC_FAIL, ch_action_fail },
1523 {CH_STATE_SETUPWAIT, CH_EVENT_STOP, ch_action_haltio },
1524 {CH_STATE_SETUPWAIT, CH_EVENT_START, fsm_action_nop },
1525 {CH_STATE_SETUPWAIT, CH_EVENT_FINSTAT, ch_action_firstio },
1526 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RCRESET, ch_action_setuperr },
1527 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RSRESET, ch_action_setuperr },
1528 {CH_STATE_SETUPWAIT, CH_EVENT_TIMER, ch_action_setmode },
1529 {CH_STATE_SETUPWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1530 {CH_STATE_SETUPWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1531 {CH_STATE_SETUPWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1533 {CH_STATE_RXINIT, CH_EVENT_STOP, ch_action_haltio },
1534 {CH_STATE_RXINIT, CH_EVENT_START, fsm_action_nop },
1535 {CH_STATE_RXINIT, CH_EVENT_FINSTAT, ch_action_rxidle },
1536 {CH_STATE_RXINIT, CH_EVENT_UC_RCRESET, ch_action_rxiniterr },
1537 {CH_STATE_RXINIT, CH_EVENT_UC_RSRESET, ch_action_rxiniterr },
1538 {CH_STATE_RXINIT, CH_EVENT_TIMER, ch_action_rxiniterr },
1539 {CH_STATE_RXINIT, CH_EVENT_ATTNBUSY, ch_action_rxinitfail },
1540 {CH_STATE_RXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1541 {CH_STATE_RXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1542 {CH_STATE_RXINIT, CH_EVENT_UC_ZERO, ch_action_firstio },
1543 {CH_STATE_RXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1545 {CH_STATE_RXIDLE, CH_EVENT_STOP, ch_action_haltio },
1546 {CH_STATE_RXIDLE, CH_EVENT_START, fsm_action_nop },
1547 {CH_STATE_RXIDLE, CH_EVENT_FINSTAT, ch_action_rx },
1548 {CH_STATE_RXIDLE, CH_EVENT_UC_RCRESET, ch_action_rxdisc },
1549 // {CH_STATE_RXIDLE, CH_EVENT_UC_RSRESET, ch_action_rxretry },
1550 {CH_STATE_RXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1551 {CH_STATE_RXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1552 {CH_STATE_RXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1553 {CH_STATE_RXIDLE, CH_EVENT_UC_ZERO, ch_action_rx },
1555 {CH_STATE_TXINIT, CH_EVENT_STOP, ch_action_haltio },
1556 {CH_STATE_TXINIT, CH_EVENT_START, fsm_action_nop },
1557 {CH_STATE_TXINIT, CH_EVENT_FINSTAT, ch_action_txidle },
1558 {CH_STATE_TXINIT, CH_EVENT_UC_RCRESET, ch_action_txiniterr },
1559 {CH_STATE_TXINIT, CH_EVENT_UC_RSRESET, ch_action_txiniterr },
1560 {CH_STATE_TXINIT, CH_EVENT_TIMER, ch_action_txiniterr },
1561 {CH_STATE_TXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1562 {CH_STATE_TXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1563 {CH_STATE_TXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1565 {CH_STATE_TXIDLE, CH_EVENT_STOP, ch_action_haltio },
1566 {CH_STATE_TXIDLE, CH_EVENT_START, fsm_action_nop },
1567 {CH_STATE_TXIDLE, CH_EVENT_FINSTAT, ch_action_firstio },
1568 {CH_STATE_TXIDLE, CH_EVENT_UC_RCRESET, fsm_action_nop },
1569 {CH_STATE_TXIDLE, CH_EVENT_UC_RSRESET, fsm_action_nop },
1570 {CH_STATE_TXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1571 {CH_STATE_TXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1572 {CH_STATE_TXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1574 {CH_STATE_TERM, CH_EVENT_STOP, fsm_action_nop },
1575 {CH_STATE_TERM, CH_EVENT_START, ch_action_restart },
1576 {CH_STATE_TERM, CH_EVENT_FINSTAT, ch_action_stopped },
1577 {CH_STATE_TERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1578 {CH_STATE_TERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1579 {CH_STATE_TERM, CH_EVENT_MC_FAIL, ch_action_fail },
1581 {CH_STATE_DTERM, CH_EVENT_STOP, ch_action_haltio },
1582 {CH_STATE_DTERM, CH_EVENT_START, ch_action_restart },
1583 {CH_STATE_DTERM, CH_EVENT_FINSTAT, ch_action_setmode },
1584 {CH_STATE_DTERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1585 {CH_STATE_DTERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1586 {CH_STATE_DTERM, CH_EVENT_MC_FAIL, ch_action_fail },
1588 {CH_STATE_TX, CH_EVENT_STOP, ch_action_haltio },
1589 {CH_STATE_TX, CH_EVENT_START, fsm_action_nop },
1590 {CH_STATE_TX, CH_EVENT_FINSTAT, ch_action_txdone },
1591 {CH_STATE_TX, CH_EVENT_UC_RCRESET, ch_action_txretry },
1592 {CH_STATE_TX, CH_EVENT_UC_RSRESET, ch_action_txretry },
1593 {CH_STATE_TX, CH_EVENT_TIMER, ch_action_txretry },
1594 {CH_STATE_TX, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1595 {CH_STATE_TX, CH_EVENT_IO_EIO, ch_action_reinit },
1596 {CH_STATE_TX, CH_EVENT_MC_FAIL, ch_action_fail },
1598 {CH_STATE_RXERR, CH_EVENT_STOP, ch_action_haltio },
1599 {CH_STATE_TXERR, CH_EVENT_STOP, ch_action_haltio },
1600 {CH_STATE_TXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1601 {CH_STATE_RXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1604 static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node);
1607 * Functions related to setup and device detection.
1608 *****************************************************************************/
1611 less_than(char *id1, char *id2)
1615 for (i = 0; i < 5; i++) {
1619 dev1 = simple_strtoul(id1, &id1, 16);
1620 dev2 = simple_strtoul(id2, &id2, 16);
1622 return (dev1 < dev2);
1626 * Add a new channel to the list of channels.
1627 * Keeps the channel list sorted.
1629 * @param cdev The ccw_device to be added.
1630 * @param type The type class of the new channel.
1632 * @return 0 on success, !0 on error.
1635 add_channel(struct ccw_device *cdev, enum channel_types type)
1637 struct channel **c = &channels;
1640 DBF_TEXT(trace, 2, __FUNCTION__);
1642 (struct channel *) kmalloc(sizeof (struct channel),
1643 GFP_KERNEL)) == NULL) {
1644 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1647 memset(ch, 0, sizeof (struct channel));
1648 if ((ch->ccw = kmalloc(8*sizeof(struct ccw1),
1649 GFP_KERNEL | GFP_DMA)) == NULL) {
1651 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1655 memset(ch->ccw, 0, 8*sizeof(struct ccw1)); // assure all flags and counters are reset
1658 * "static" ccws are used in the following way:
1660 * ccw[0..2] (Channel program for generic I/O):
1662 * 1: read or write (depending on direction) with fixed
1663 * buffer (idal allocated once when buffer is allocated)
1665 * ccw[3..5] (Channel program for direct write of packets)
1667 * 4: write (idal allocated on every write).
1669 * ccw[6..7] (Channel program for initial channel setup):
1670 * 6: set extended mode
1673 * ch->ccw[0..5] are initialized in ch_action_start because
1674 * the channel's direction is yet unknown here.
1676 ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
1677 ch->ccw[6].flags = CCW_FLAG_SLI;
1679 ch->ccw[7].cmd_code = CCW_CMD_NOOP;
1680 ch->ccw[7].flags = CCW_FLAG_SLI;
1683 snprintf(ch->id, CTC_ID_SIZE, "ch-%s", cdev->dev.bus_id);
1685 ch->fsm = init_fsm(ch->id, ch_state_names,
1686 ch_event_names, NR_CH_STATES, NR_CH_EVENTS,
1687 ch_fsm, CH_FSM_LEN, GFP_KERNEL);
1688 if (ch->fsm == NULL) {
1689 ctc_pr_warn("ctc: Could not create FSM in add_channel\n");
1694 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1695 if ((ch->irb = kmalloc(sizeof (struct irb),
1696 GFP_KERNEL)) == NULL) {
1697 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1703 memset(ch->irb, 0, sizeof (struct irb));
1704 while (*c && less_than((*c)->id, ch->id))
1706 if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) {
1708 "ctc: add_channel: device %s already in list, "
1709 "using old entry\n", (*c)->id);
1717 spin_lock_init(&ch->collect_lock);
1719 fsm_settimer(ch->fsm, &ch->timer);
1720 skb_queue_head_init(&ch->io_queue);
1721 skb_queue_head_init(&ch->collect_queue);
1728 * Release a specific channel in the channel list.
1730 * @param ch Pointer to channel struct to be released.
1733 channel_free(struct channel *ch)
1735 ch->flags &= ~CHANNEL_FLAGS_INUSE;
1736 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1740 * Remove a specific channel in the channel list.
1742 * @param ch Pointer to channel struct to be released.
1745 channel_remove(struct channel *ch)
1747 struct channel **c = &channels;
1749 DBF_TEXT(trace, 2, __FUNCTION__);
1757 fsm_deltimer(&ch->timer);
1759 clear_normalized_cda(&ch->ccw[4]);
1760 if (ch->trans_skb != NULL) {
1761 clear_normalized_cda(&ch->ccw[1]);
1762 dev_kfree_skb(ch->trans_skb);
1774 * Get a specific channel from the channel list.
1776 * @param type Type of channel we are interested in.
1777 * @param id Id of channel we are interested in.
1778 * @param direction Direction we want to use this channel for.
1780 * @return Pointer to a channel or NULL if no matching channel available.
1782 static struct channel
1784 channel_get(enum channel_types type, char *id, int direction)
1786 struct channel *ch = channels;
1788 DBF_TEXT(trace, 3, __FUNCTION__);
1790 ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n",
1791 __func__, id, type);
1794 while (ch && ((strncmp(ch->id, id, CTC_ID_SIZE)) || (ch->type != type))) {
1796 ctc_pr_debug("ctc: %s(): ch=0x%p (id=%s, type=%d\n",
1797 __func__, ch, ch->id, ch->type);
1802 ctc_pr_debug("ctc: %s(): ch=0x%pq (id=%s, type=%d\n",
1803 __func__, ch, ch->id, ch->type);
1806 ctc_pr_warn("ctc: %s(): channel with id %s "
1807 "and type %d not found in channel list\n",
1808 __func__, id, type);
1810 if (ch->flags & CHANNEL_FLAGS_INUSE)
1813 ch->flags |= CHANNEL_FLAGS_INUSE;
1814 ch->flags &= ~CHANNEL_FLAGS_RWMASK;
1815 ch->flags |= (direction == WRITE)
1816 ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
1817 fsm_newstate(ch->fsm, CH_STATE_STOPPED);
1824 * Return the channel type by name.
1826 * @param name Name of network interface.
1828 * @return Type class of channel to be used for that interface.
1830 static enum channel_types inline
1831 extract_channel_media(char *name)
1833 enum channel_types ret = channel_type_unknown;
1836 if (strncmp(name, "ctc", 3) == 0)
1837 ret = channel_type_parallel;
1838 if (strncmp(name, "escon", 5) == 0)
1839 ret = channel_type_escon;
1845 __ctc_check_irb_error(struct ccw_device *cdev, struct irb *irb)
1850 switch (PTR_ERR(irb)) {
1852 ctc_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id);
1853 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1854 // CTC_DBF_TEXT_(trace, 2, " rc%d", -EIO);
1857 ctc_pr_warn("timeout on device %s\n", cdev->dev.bus_id);
1858 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1859 // CTC_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
1862 ctc_pr_warn("unknown error %ld on device %s\n", PTR_ERR(irb),
1864 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1865 // CTC_DBF_TEXT(trace, 2, " rc???");
1867 return PTR_ERR(irb);
1873 * @param cdev The ccw_device the interrupt is for.
1874 * @param intparm interruption parameter.
1875 * @param irb interruption response block.
1878 ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1881 struct net_device *dev;
1882 struct ctc_priv *priv;
1884 DBF_TEXT(trace, 5, __FUNCTION__);
1885 if (__ctc_check_irb_error(cdev, irb))
1888 /* Check for unsolicited interrupts. */
1889 if (!cdev->dev.driver_data) {
1890 ctc_pr_warn("ctc: Got unsolicited irq: %s c-%02x d-%02x\n",
1891 cdev->dev.bus_id, irb->scsw.cstat,
1896 priv = ((struct ccwgroup_device *)cdev->dev.driver_data)
1899 /* Try to extract channel from driver data. */
1900 if (priv->channel[READ]->cdev == cdev)
1901 ch = priv->channel[READ];
1902 else if (priv->channel[WRITE]->cdev == cdev)
1903 ch = priv->channel[WRITE];
1905 ctc_pr_err("ctc: Can't determine channel for interrupt, "
1906 "device %s\n", cdev->dev.bus_id);
1910 dev = (struct net_device *) (ch->netdev);
1912 ctc_pr_crit("ctc: ctc_irq_handler dev=NULL bus_id=%s, ch=0x%p\n",
1913 cdev->dev.bus_id, ch);
1918 ctc_pr_debug("%s: interrupt for device: %s received c-%02x d-%02x\n",
1919 dev->name, ch->id, irb->scsw.cstat, irb->scsw.dstat);
1922 /* Copy interruption response block. */
1923 memcpy(ch->irb, irb, sizeof(struct irb));
1925 /* Check for good subchannel return code, otherwise error message */
1926 if (ch->irb->scsw.cstat) {
1927 fsm_event(ch->fsm, CH_EVENT_SC_UNKNOWN, ch);
1928 ctc_pr_warn("%s: subchannel check for device: %s - %02x %02x\n",
1929 dev->name, ch->id, ch->irb->scsw.cstat,
1930 ch->irb->scsw.dstat);
1934 /* Check the reason-code of a unit check */
1935 if (ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
1936 ccw_unit_check(ch, ch->irb->ecw[0]);
1939 if (ch->irb->scsw.dstat & DEV_STAT_BUSY) {
1940 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION)
1941 fsm_event(ch->fsm, CH_EVENT_ATTNBUSY, ch);
1943 fsm_event(ch->fsm, CH_EVENT_BUSY, ch);
1946 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION) {
1947 fsm_event(ch->fsm, CH_EVENT_ATTN, ch);
1950 if ((ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
1951 (ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
1952 (ch->irb->scsw.stctl ==
1953 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
1954 fsm_event(ch->fsm, CH_EVENT_FINSTAT, ch);
1956 fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
1961 * Actions for interface - statemachine.
1962 *****************************************************************************/
1965 * Startup channels by sending CH_EVENT_START to each channel.
1967 * @param fi An instance of an interface statemachine.
1968 * @param event The event, just happened.
1969 * @param arg Generic pointer, casted from struct net_device * upon call.
1972 dev_action_start(fsm_instance * fi, int event, void *arg)
1974 struct net_device *dev = (struct net_device *) arg;
1975 struct ctc_priv *privptr = dev->priv;
1978 DBF_TEXT(setup, 3, __FUNCTION__);
1979 fsm_deltimer(&privptr->restart_timer);
1980 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
1981 for (direction = READ; direction <= WRITE; direction++) {
1982 struct channel *ch = privptr->channel[direction];
1983 fsm_event(ch->fsm, CH_EVENT_START, ch);
1988 * Shutdown channels by sending CH_EVENT_STOP to each channel.
1990 * @param fi An instance of an interface statemachine.
1991 * @param event The event, just happened.
1992 * @param arg Generic pointer, casted from struct net_device * upon call.
1995 dev_action_stop(fsm_instance * fi, int event, void *arg)
1997 struct net_device *dev = (struct net_device *) arg;
1998 struct ctc_priv *privptr = dev->priv;
2001 DBF_TEXT(trace, 3, __FUNCTION__);
2002 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2003 for (direction = READ; direction <= WRITE; direction++) {
2004 struct channel *ch = privptr->channel[direction];
2005 fsm_event(ch->fsm, CH_EVENT_STOP, ch);
2009 dev_action_restart(fsm_instance *fi, int event, void *arg)
2011 struct net_device *dev = (struct net_device *)arg;
2012 struct ctc_priv *privptr = dev->priv;
2014 DBF_TEXT(trace, 3, __FUNCTION__);
2015 ctc_pr_debug("%s: Restarting\n", dev->name);
2016 dev_action_stop(fi, event, arg);
2017 fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
2018 fsm_addtimer(&privptr->restart_timer, CTC_TIMEOUT_5SEC,
2019 DEV_EVENT_START, dev);
2023 * Called from channel statemachine
2024 * when a channel is up and running.
2026 * @param fi An instance of an interface statemachine.
2027 * @param event The event, just happened.
2028 * @param arg Generic pointer, casted from struct net_device * upon call.
2031 dev_action_chup(fsm_instance * fi, int event, void *arg)
2033 struct net_device *dev = (struct net_device *) arg;
2035 DBF_TEXT(trace, 3, __FUNCTION__);
2036 switch (fsm_getstate(fi)) {
2037 case DEV_STATE_STARTWAIT_RXTX:
2038 if (event == DEV_EVENT_RXUP)
2039 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2041 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2043 case DEV_STATE_STARTWAIT_RX:
2044 if (event == DEV_EVENT_RXUP) {
2045 fsm_newstate(fi, DEV_STATE_RUNNING);
2046 ctc_pr_info("%s: connected with remote side\n",
2048 ctc_clear_busy(dev);
2051 case DEV_STATE_STARTWAIT_TX:
2052 if (event == DEV_EVENT_TXUP) {
2053 fsm_newstate(fi, DEV_STATE_RUNNING);
2054 ctc_pr_info("%s: connected with remote side\n",
2056 ctc_clear_busy(dev);
2059 case DEV_STATE_STOPWAIT_TX:
2060 if (event == DEV_EVENT_RXUP)
2061 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2063 case DEV_STATE_STOPWAIT_RX:
2064 if (event == DEV_EVENT_TXUP)
2065 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2071 * Called from channel statemachine
2072 * when a channel has been shutdown.
2074 * @param fi An instance of an interface statemachine.
2075 * @param event The event, just happened.
2076 * @param arg Generic pointer, casted from struct net_device * upon call.
2079 dev_action_chdown(fsm_instance * fi, int event, void *arg)
2082 DBF_TEXT(trace, 3, __FUNCTION__);
2083 switch (fsm_getstate(fi)) {
2084 case DEV_STATE_RUNNING:
2085 if (event == DEV_EVENT_TXDOWN)
2086 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2088 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2090 case DEV_STATE_STARTWAIT_RX:
2091 if (event == DEV_EVENT_TXDOWN)
2092 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2094 case DEV_STATE_STARTWAIT_TX:
2095 if (event == DEV_EVENT_RXDOWN)
2096 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2098 case DEV_STATE_STOPWAIT_RXTX:
2099 if (event == DEV_EVENT_TXDOWN)
2100 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2102 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2104 case DEV_STATE_STOPWAIT_RX:
2105 if (event == DEV_EVENT_RXDOWN)
2106 fsm_newstate(fi, DEV_STATE_STOPPED);
2108 case DEV_STATE_STOPWAIT_TX:
2109 if (event == DEV_EVENT_TXDOWN)
2110 fsm_newstate(fi, DEV_STATE_STOPPED);
2115 static const fsm_node dev_fsm[] = {
2116 {DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start},
2118 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
2119 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2120 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2121 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2123 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
2124 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2125 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2126 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
2127 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2129 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
2130 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2131 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2132 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
2133 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2135 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
2136 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
2137 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
2138 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2139 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2140 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2142 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
2143 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2144 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2145 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
2146 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2148 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
2149 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2150 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2151 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
2152 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2154 {DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
2155 {DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
2156 {DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
2157 {DEV_STATE_RUNNING, DEV_EVENT_TXUP, fsm_action_nop },
2158 {DEV_STATE_RUNNING, DEV_EVENT_RXUP, fsm_action_nop },
2159 {DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
2162 static const int DEV_FSM_LEN = sizeof (dev_fsm) / sizeof (fsm_node);
2165 * Transmit a packet.
2166 * This is a helper function for ctc_tx().
2168 * @param ch Channel to be used for sending.
2169 * @param skb Pointer to struct sk_buff of packet to send.
2170 * The linklevel header has already been set up
2173 * @return 0 on success, -ERRNO on failure. (Never fails.)
2176 transmit_skb(struct channel *ch, struct sk_buff *skb)
2178 unsigned long saveflags;
2179 struct ll_header header;
2182 DBF_TEXT(trace, 5, __FUNCTION__);
2183 /* we need to acquire the lock for testing the state
2184 * otherwise we can have an IRQ changing the state to
2185 * TXIDLE after the test but before acquiring the lock.
2187 spin_lock_irqsave(&ch->collect_lock, saveflags);
2188 if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
2189 int l = skb->len + LL_HEADER_LENGTH;
2191 if (ch->collect_len + l > ch->max_bufsize - 2) {
2192 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2195 atomic_inc(&skb->users);
2197 header.type = skb->protocol;
2199 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2201 skb_queue_tail(&ch->collect_queue, skb);
2202 ch->collect_len += l;
2204 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2208 struct sk_buff *nskb;
2210 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2212 * Protect skb against beeing free'd by upper
2215 atomic_inc(&skb->users);
2216 ch->prof.txlen += skb->len;
2217 header.length = skb->len + LL_HEADER_LENGTH;
2218 header.type = skb->protocol;
2220 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2222 block_len = skb->len + 2;
2223 *((__u16 *) skb_push(skb, 2)) = block_len;
2226 * IDAL support in CTC is broken, so we have to
2227 * care about skb's above 2G ourselves.
2229 hi = ((unsigned long) skb->tail + LL_HEADER_LENGTH) >> 31;
2231 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
2233 atomic_dec(&skb->users);
2234 skb_pull(skb, LL_HEADER_LENGTH + 2);
2235 ctc_clear_busy(ch->netdev);
2238 memcpy(skb_put(nskb, skb->len),
2239 skb->data, skb->len);
2240 atomic_inc(&nskb->users);
2241 atomic_dec(&skb->users);
2242 dev_kfree_skb_irq(skb);
2247 ch->ccw[4].count = block_len;
2248 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
2250 * idal allocation failed, try via copying to
2251 * trans_skb. trans_skb usually has a pre-allocated
2254 if (ctc_checkalloc_buffer(ch, 1)) {
2256 * Remove our header. It gets added
2257 * again on retransmit.
2259 atomic_dec(&skb->users);
2260 skb_pull(skb, LL_HEADER_LENGTH + 2);
2261 ctc_clear_busy(ch->netdev);
2265 ch->trans_skb->tail = ch->trans_skb->data;
2266 ch->trans_skb->len = 0;
2267 ch->ccw[1].count = skb->len;
2268 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
2270 atomic_dec(&skb->users);
2271 dev_kfree_skb_irq(skb);
2274 skb_queue_tail(&ch->io_queue, skb);
2278 fsm_newstate(ch->fsm, CH_STATE_TX);
2279 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
2280 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
2281 ch->prof.send_stamp = xtime;
2282 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
2283 (unsigned long) ch, 0xff, 0);
2284 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
2286 ch->prof.doios_single++;
2288 fsm_deltimer(&ch->timer);
2289 ccw_check_return_code(ch, rc, "single skb TX");
2291 skb_dequeue_tail(&ch->io_queue);
2293 * Remove our header. It gets added
2294 * again on retransmit.
2296 skb_pull(skb, LL_HEADER_LENGTH + 2);
2299 struct net_device *dev = ch->netdev;
2300 struct ctc_priv *privptr = dev->priv;
2301 privptr->stats.tx_packets++;
2302 privptr->stats.tx_bytes +=
2303 skb->len - LL_HEADER_LENGTH;
2308 ctc_clear_busy(ch->netdev);
2313 * Interface API for upper network layers
2314 *****************************************************************************/
2317 * Open an interface.
2318 * Called from generic network layer when ifconfig up is run.
2320 * @param dev Pointer to interface struct.
2322 * @return 0 on success, -ERRNO on failure. (Never fails.)
2325 ctc_open(struct net_device * dev)
2327 DBF_TEXT(trace, 5, __FUNCTION__);
2328 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev);
2333 * Close an interface.
2334 * Called from generic network layer when ifconfig down is run.
2336 * @param dev Pointer to interface struct.
2338 * @return 0 on success, -ERRNO on failure. (Never fails.)
2341 ctc_close(struct net_device * dev)
2343 DBF_TEXT(trace, 5, __FUNCTION__);
2344 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev);
2349 * Start transmission of a packet.
2350 * Called from generic network device layer.
2352 * @param skb Pointer to buffer containing the packet.
2353 * @param dev Pointer to interface struct.
2355 * @return 0 if packet consumed, !0 if packet rejected.
2356 * Note: If we return !0, then the packet is free'd by
2357 * the generic network layer.
2360 ctc_tx(struct sk_buff *skb, struct net_device * dev)
2363 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2365 DBF_TEXT(trace, 5, __FUNCTION__);
2367 * Some sanity checks ...
2370 ctc_pr_warn("%s: NULL sk_buff passed\n", dev->name);
2371 privptr->stats.tx_dropped++;
2374 if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
2375 ctc_pr_warn("%s: Got sk_buff with head room < %ld bytes\n",
2376 dev->name, LL_HEADER_LENGTH + 2);
2378 privptr->stats.tx_dropped++;
2383 * If channels are not running, try to restart them
2384 * and throw away packet.
2386 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
2387 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
2389 privptr->stats.tx_dropped++;
2390 privptr->stats.tx_errors++;
2391 privptr->stats.tx_carrier_errors++;
2395 if (ctc_test_and_set_busy(dev))
2398 dev->trans_start = jiffies;
2399 if (transmit_skb(privptr->channel[WRITE], skb) != 0)
2405 * Sets MTU of an interface.
2407 * @param dev Pointer to interface struct.
2408 * @param new_mtu The new MTU to use for this interface.
2410 * @return 0 on success, -EINVAL if MTU is out of valid range.
2411 * (valid range is 576 .. 65527). If VM is on the
2412 * remote side, maximum MTU is 32760, however this is
2413 * <em>not</em> checked here.
2416 ctc_change_mtu(struct net_device * dev, int new_mtu)
2418 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2420 DBF_TEXT(trace, 3, __FUNCTION__);
2421 if ((new_mtu < 576) || (new_mtu > 65527) ||
2422 (new_mtu > (privptr->channel[READ]->max_bufsize -
2423 LL_HEADER_LENGTH - 2)))
2426 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2431 * Returns interface statistics of a device.
2433 * @param dev Pointer to interface struct.
2435 * @return Pointer to stats struct of this interface.
2437 static struct net_device_stats *
2438 ctc_stats(struct net_device * dev)
2440 return &((struct ctc_priv *) dev->priv)->stats;
2448 buffer_show(struct device *dev, struct device_attribute *attr, char *buf)
2450 struct ctc_priv *priv;
2452 priv = dev->driver_data;
2455 return sprintf(buf, "%d\n",
2460 buffer_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2462 struct ctc_priv *priv;
2463 struct net_device *ndev;
2467 DBF_TEXT(trace, 3, __FUNCTION__);
2468 DBF_TEXT(trace, 3, buf);
2469 priv = dev->driver_data;
2471 DBF_TEXT(trace, 3, "bfnopriv");
2475 sscanf(buf, "%u", &bs1);
2476 if (bs1 > CTC_BUFSIZE_LIMIT)
2478 if (bs1 < (576 + LL_HEADER_LENGTH + 2))
2480 priv->buffer_size = bs1; // just to overwrite the default
2482 ndev = priv->channel[READ]->netdev;
2484 DBF_TEXT(trace, 3, "bfnondev");
2488 if ((ndev->flags & IFF_RUNNING) &&
2489 (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
2492 priv->channel[READ]->max_bufsize = bs1;
2493 priv->channel[WRITE]->max_bufsize = bs1;
2494 if (!(ndev->flags & IFF_RUNNING))
2495 ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
2496 priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2497 priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2499 sprintf(buffer, "%d",priv->buffer_size);
2500 DBF_TEXT(trace, 3, buffer);
2504 DBF_TEXT(trace, 3, "buff_err");
2509 loglevel_show(struct device *dev, struct device_attribute *attr, char *buf)
2511 return sprintf(buf, "%d\n", loglevel);
2515 loglevel_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2519 DBF_TEXT(trace, 5, __FUNCTION__);
2520 sscanf(buf, "%i", &ll1);
2522 if ((ll1 > CTC_LOGLEVEL_MAX) || (ll1 < 0))
2529 ctc_print_statistics(struct ctc_priv *priv)
2534 DBF_TEXT(trace, 4, __FUNCTION__);
2537 sbuf = kmalloc(2048, GFP_KERNEL);
2542 p += sprintf(p, " Device FSM state: %s\n",
2543 fsm_getstate_str(priv->fsm));
2544 p += sprintf(p, " RX channel FSM state: %s\n",
2545 fsm_getstate_str(priv->channel[READ]->fsm));
2546 p += sprintf(p, " TX channel FSM state: %s\n",
2547 fsm_getstate_str(priv->channel[WRITE]->fsm));
2548 p += sprintf(p, " Max. TX buffer used: %ld\n",
2549 priv->channel[WRITE]->prof.maxmulti);
2550 p += sprintf(p, " Max. chained SKBs: %ld\n",
2551 priv->channel[WRITE]->prof.maxcqueue);
2552 p += sprintf(p, " TX single write ops: %ld\n",
2553 priv->channel[WRITE]->prof.doios_single);
2554 p += sprintf(p, " TX multi write ops: %ld\n",
2555 priv->channel[WRITE]->prof.doios_multi);
2556 p += sprintf(p, " Netto bytes written: %ld\n",
2557 priv->channel[WRITE]->prof.txlen);
2558 p += sprintf(p, " Max. TX IO-time: %ld\n",
2559 priv->channel[WRITE]->prof.tx_time);
2561 ctc_pr_debug("Statistics for %s:\n%s",
2562 priv->channel[WRITE]->netdev->name, sbuf);
2568 stats_show(struct device *dev, struct device_attribute *attr, char *buf)
2570 struct ctc_priv *priv = dev->driver_data;
2573 ctc_print_statistics(priv);
2574 return sprintf(buf, "0\n");
2578 stats_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2580 struct ctc_priv *priv = dev->driver_data;
2583 /* Reset statistics */
2584 memset(&priv->channel[WRITE]->prof, 0,
2585 sizeof(priv->channel[WRITE]->prof));
2590 ctc_netdev_unregister(struct net_device * dev)
2592 struct ctc_priv *privptr;
2596 privptr = (struct ctc_priv *) dev->priv;
2597 unregister_netdev(dev);
2601 ctc_netdev_register(struct net_device * dev)
2603 return register_netdev(dev);
2607 ctc_free_netdevice(struct net_device * dev, int free_dev)
2609 struct ctc_priv *privptr;
2612 privptr = dev->priv;
2615 kfree_fsm(privptr->fsm);
2625 ctc_proto_show(struct device *dev, struct device_attribute *attr, char *buf)
2627 struct ctc_priv *priv;
2629 priv = dev->driver_data;
2633 return sprintf(buf, "%d\n", priv->protocol);
2637 ctc_proto_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2639 struct ctc_priv *priv;
2642 DBF_TEXT(trace, 3, __FUNCTION__);
2643 pr_debug("%s() called\n", __FUNCTION__);
2645 priv = dev->driver_data;
2648 sscanf(buf, "%u", &value);
2649 if (!((value == CTC_PROTO_S390) ||
2650 (value == CTC_PROTO_LINUX) ||
2651 (value == CTC_PROTO_OS390)))
2653 priv->protocol = value;
2659 ctc_type_show(struct device *dev, struct device_attribute *attr, char *buf)
2661 struct ccwgroup_device *cgdev;
2663 cgdev = to_ccwgroupdev(dev);
2667 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
2670 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
2671 static DEVICE_ATTR(protocol, 0644, ctc_proto_show, ctc_proto_store);
2672 static DEVICE_ATTR(type, 0444, ctc_type_show, NULL);
2674 static DEVICE_ATTR(loglevel, 0644, loglevel_show, loglevel_write);
2675 static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
2677 static struct attribute *ctc_attr[] = {
2678 &dev_attr_protocol.attr,
2679 &dev_attr_type.attr,
2680 &dev_attr_buffer.attr,
2684 static struct attribute_group ctc_attr_group = {
2689 ctc_add_attributes(struct device *dev)
2693 rc = device_create_file(dev, &dev_attr_loglevel);
2696 rc = device_create_file(dev, &dev_attr_stats);
2699 device_remove_file(dev, &dev_attr_loglevel);
2705 ctc_remove_attributes(struct device *dev)
2707 device_remove_file(dev, &dev_attr_stats);
2708 device_remove_file(dev, &dev_attr_loglevel);
2712 ctc_add_files(struct device *dev)
2714 pr_debug("%s() called\n", __FUNCTION__);
2716 return sysfs_create_group(&dev->kobj, &ctc_attr_group);
2720 ctc_remove_files(struct device *dev)
2722 pr_debug("%s() called\n", __FUNCTION__);
2724 sysfs_remove_group(&dev->kobj, &ctc_attr_group);
2728 * Add ctc specific attributes.
2729 * Add ctc private data.
2731 * @param cgdev pointer to ccwgroup_device just added
2733 * @returns 0 on success, !0 on failure.
2736 ctc_probe_device(struct ccwgroup_device *cgdev)
2738 struct ctc_priv *priv;
2742 pr_debug("%s() called\n", __FUNCTION__);
2743 DBF_TEXT(setup, 3, __FUNCTION__);
2745 if (!get_device(&cgdev->dev))
2748 priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL);
2750 ctc_pr_err("%s: Out of memory\n", __func__);
2751 put_device(&cgdev->dev);
2755 memset(priv, 0, sizeof (struct ctc_priv));
2756 rc = ctc_add_files(&cgdev->dev);
2759 put_device(&cgdev->dev);
2762 priv->buffer_size = CTC_BUFSIZE_DEFAULT;
2763 cgdev->cdev[0]->handler = ctc_irq_handler;
2764 cgdev->cdev[1]->handler = ctc_irq_handler;
2765 cgdev->dev.driver_data = priv;
2767 sprintf(buffer, "%p", priv);
2768 DBF_TEXT(data, 3, buffer);
2770 sprintf(buffer, "%u", (unsigned int)sizeof(struct ctc_priv));
2771 DBF_TEXT(data, 3, buffer);
2773 sprintf(buffer, "%p", &channels);
2774 DBF_TEXT(data, 3, buffer);
2776 sprintf(buffer, "%u", (unsigned int)sizeof(struct channel));
2777 DBF_TEXT(data, 3, buffer);
2783 * Initialize everything of the net device except the name and the
2786 static struct net_device *
2787 ctc_init_netdevice(struct net_device * dev, int alloc_device,
2788 struct ctc_priv *privptr)
2793 DBF_TEXT(setup, 3, __FUNCTION__);
2796 dev = kmalloc(sizeof (struct net_device), GFP_KERNEL);
2799 memset(dev, 0, sizeof (struct net_device));
2802 dev->priv = privptr;
2803 privptr->fsm = init_fsm("ctcdev", dev_state_names,
2804 dev_event_names, CTC_NR_DEV_STATES, CTC_NR_DEV_EVENTS,
2805 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2806 if (privptr->fsm == NULL) {
2811 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2812 fsm_settimer(privptr->fsm, &privptr->restart_timer);
2814 dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
2815 dev->hard_start_xmit = ctc_tx;
2816 dev->open = ctc_open;
2817 dev->stop = ctc_close;
2818 dev->get_stats = ctc_stats;
2819 dev->change_mtu = ctc_change_mtu;
2820 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2822 dev->type = ARPHRD_SLIP;
2823 dev->tx_queue_len = 100;
2824 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2825 SET_MODULE_OWNER(dev);
2832 * Setup an interface.
2834 * @param cgdev Device to be setup.
2836 * @returns 0 on success, !0 on failure.
2839 ctc_new_device(struct ccwgroup_device *cgdev)
2841 char read_id[CTC_ID_SIZE];
2842 char write_id[CTC_ID_SIZE];
2844 enum channel_types type;
2845 struct ctc_priv *privptr;
2846 struct net_device *dev;
2850 pr_debug("%s() called\n", __FUNCTION__);
2851 DBF_TEXT(setup, 3, __FUNCTION__);
2853 privptr = cgdev->dev.driver_data;
2857 sprintf(buffer, "%d", privptr->buffer_size);
2858 DBF_TEXT(setup, 3, buffer);
2860 type = get_channel_type(&cgdev->cdev[0]->id);
2862 snprintf(read_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id);
2863 snprintf(write_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id);
2865 if (add_channel(cgdev->cdev[0], type))
2867 if (add_channel(cgdev->cdev[1], type))
2870 ret = ccw_device_set_online(cgdev->cdev[0]);
2873 "ccw_device_set_online (cdev[0]) failed with ret = %d\n", ret);
2876 ret = ccw_device_set_online(cgdev->cdev[1]);
2879 "ccw_device_set_online (cdev[1]) failed with ret = %d\n", ret);
2882 dev = ctc_init_netdevice(NULL, 1, privptr);
2885 ctc_pr_warn("ctc_init_netdevice failed\n");
2889 strlcpy(dev->name, "ctc%d", IFNAMSIZ);
2891 for (direction = READ; direction <= WRITE; direction++) {
2892 privptr->channel[direction] =
2893 channel_get(type, direction == READ ? read_id : write_id,
2895 if (privptr->channel[direction] == NULL) {
2896 if (direction == WRITE)
2897 channel_free(privptr->channel[READ]);
2899 ctc_free_netdevice(dev, 1);
2902 privptr->channel[direction]->netdev = dev;
2903 privptr->channel[direction]->protocol = privptr->protocol;
2904 privptr->channel[direction]->max_bufsize = privptr->buffer_size;
2907 SET_NETDEV_DEV(dev, &cgdev->dev);
2909 if (ctc_netdev_register(dev) != 0) {
2910 ctc_free_netdevice(dev, 1);
2914 if (ctc_add_attributes(&cgdev->dev)) {
2915 ctc_netdev_unregister(dev);
2917 ctc_free_netdevice(dev, 1);
2921 strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name));
2925 ctc_pr_info("%s: read: %s, write: %s, proto: %d\n",
2926 dev->name, privptr->channel[READ]->id,
2927 privptr->channel[WRITE]->id, privptr->protocol);
2931 ccw_device_set_offline(cgdev->cdev[1]);
2932 ccw_device_set_offline(cgdev->cdev[0]);
2938 * Shutdown an interface.
2940 * @param cgdev Device to be shut down.
2942 * @returns 0 on success, !0 on failure.
2945 ctc_shutdown_device(struct ccwgroup_device *cgdev)
2947 struct ctc_priv *priv;
2948 struct net_device *ndev;
2950 DBF_TEXT(setup, 3, __FUNCTION__);
2951 pr_debug("%s() called\n", __FUNCTION__);
2954 priv = cgdev->dev.driver_data;
2959 if (priv->channel[READ]) {
2960 ndev = priv->channel[READ]->netdev;
2962 /* Close the device */
2964 ndev->flags &=~IFF_RUNNING;
2966 ctc_remove_attributes(&cgdev->dev);
2968 channel_free(priv->channel[READ]);
2970 if (priv->channel[WRITE])
2971 channel_free(priv->channel[WRITE]);
2974 ctc_netdev_unregister(ndev);
2976 ctc_free_netdevice(ndev, 1);
2980 kfree_fsm(priv->fsm);
2982 ccw_device_set_offline(cgdev->cdev[1]);
2983 ccw_device_set_offline(cgdev->cdev[0]);
2985 if (priv->channel[READ])
2986 channel_remove(priv->channel[READ]);
2987 if (priv->channel[WRITE])
2988 channel_remove(priv->channel[WRITE]);
2989 priv->channel[READ] = priv->channel[WRITE] = NULL;
2996 ctc_remove_device(struct ccwgroup_device *cgdev)
2998 struct ctc_priv *priv;
3000 pr_debug("%s() called\n", __FUNCTION__);
3001 DBF_TEXT(setup, 3, __FUNCTION__);
3003 priv = cgdev->dev.driver_data;
3006 if (cgdev->state == CCWGROUP_ONLINE)
3007 ctc_shutdown_device(cgdev);
3008 ctc_remove_files(&cgdev->dev);
3009 cgdev->dev.driver_data = NULL;
3011 put_device(&cgdev->dev);
3014 static struct ccwgroup_driver ctc_group_driver = {
3015 .owner = THIS_MODULE,
3018 .driver_id = 0xC3E3C3,
3019 .probe = ctc_probe_device,
3020 .remove = ctc_remove_device,
3021 .set_online = ctc_new_device,
3022 .set_offline = ctc_shutdown_device,
3026 * Module related routines
3027 *****************************************************************************/
3030 * Prepare to be unloaded. Free IRQ's and release all resources.
3031 * This is called just before this module is unloaded. It is
3032 * <em>not</em> called, if the usage count is !0, so we don't need to check
3038 DBF_TEXT(setup, 3, __FUNCTION__);
3039 unregister_cu3088_discipline(&ctc_group_driver);
3040 ctc_unregister_dbf_views();
3041 ctc_pr_info("CTC driver unloaded\n");
3045 * Initialize module.
3046 * This is called just after the module is loaded.
3048 * @return 0 on success, !0 on error.
3055 loglevel = CTC_LOGLEVEL_DEFAULT;
3057 DBF_TEXT(setup, 3, __FUNCTION__);
3061 ret = ctc_register_dbf_views();
3063 ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
3066 ret = register_cu3088_discipline(&ctc_group_driver);
3068 ctc_unregister_dbf_views();
3073 module_init(ctc_init);
3074 module_exit(ctc_exit);
3076 /* --- This is the END my friend --- */