2 * $Id: ctcmain.c,v 1.74 2005/03/24 09:04:17 mschwide Exp $
4 * CTC / ESCON network driver
6 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
8 * Fixes by : Jochen Röhrig (roehrig@de.ibm.com)
9 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
10 Peter Tiedemann (ptiedem@de.ibm.com)
11 * Driver Model stuff by : Cornelia Huck <cohuck@de.ibm.com>
14 * - Principles of Operation (IBM doc#: SA22-7201-06)
15 * - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
16 * - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
17 * - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
18 * - ESCON I/O Interface (IBM doc#: SA22-7202-029
20 * and the source of the original CTC driver by:
21 * Dieter Wellerdiek (wel@de.ibm.com)
22 * Martin Schwidefsky (schwidefsky@de.ibm.com)
23 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
24 * Jochen Röhrig (roehrig@de.ibm.com)
26 * This program is free software; you can redistribute it and/or modify
27 * it under the terms of the GNU General Public License as published by
28 * the Free Software Foundation; either version 2, or (at your option)
31 * This program is distributed in the hope that it will be useful,
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
34 * GNU General Public License for more details.
36 * You should have received a copy of the GNU General Public License
37 * along with this program; if not, write to the Free Software
38 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
40 * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.74 $
45 #include <linux/module.h>
46 #include <linux/init.h>
47 #include <linux/kernel.h>
48 #include <linux/slab.h>
49 #include <linux/errno.h>
50 #include <linux/types.h>
51 #include <linux/interrupt.h>
52 #include <linux/timer.h>
53 #include <linux/sched.h>
54 #include <linux/bitops.h>
56 #include <linux/signal.h>
57 #include <linux/string.h>
60 #include <linux/if_arp.h>
61 #include <linux/tcp.h>
62 #include <linux/skbuff.h>
63 #include <linux/ctype.h>
67 #include <asm/ccwdev.h>
68 #include <asm/ccwgroup.h>
69 #include <asm/uaccess.h>
71 #include <asm/idals.h>
80 MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
81 MODULE_DESCRIPTION("Linux for S/390 CTC/Escon Driver");
82 MODULE_LICENSE("GPL");
84 * States of the interface statemachine.
88 DEV_STATE_STARTWAIT_RXTX,
89 DEV_STATE_STARTWAIT_RX,
90 DEV_STATE_STARTWAIT_TX,
91 DEV_STATE_STOPWAIT_RXTX,
92 DEV_STATE_STOPWAIT_RX,
93 DEV_STATE_STOPWAIT_TX,
96 * MUST be always the last element!!
101 static const char *dev_state_names[] = {
113 * Events of the interface statemachine.
124 * MUST be always the last element!!
129 static const char *dev_event_names[] = {
140 * Events of the channel statemachine
144 * Events, representing return code of
145 * I/O operations (ccw_device_start, ccw_device_halt et al.)
158 * Events, representing unit-check
162 CH_EVENT_UC_TXTIMEOUT,
163 CH_EVENT_UC_TXPARITY,
165 CH_EVENT_UC_RXPARITY,
170 * Events, representing subchannel-check
175 * Events, representing machine checks
181 * Event, representing normal IRQ
187 * Event, representing timer expiry.
192 * Events, representing commands from upper levels.
198 * MUST be always the last element!!
204 * States of the channel statemachine.
208 * Channel not assigned to any device,
209 * initial state, direction invalid
214 * Channel assigned but not operating
233 * MUST be always the last element!!
238 static int loglevel = CTC_LOGLEVEL_DEFAULT;
241 * Linked list of all detected channels.
243 static struct channel *channels = NULL;
251 static int printed = 0;
252 char vbuf[] = "$Revision: 1.74 $";
253 char *version = vbuf;
257 if ((version = strchr(version, ':'))) {
258 char *p = strchr(version + 1, '$');
263 printk(KERN_INFO "CTC driver Version%s"
265 " (DEBUG-VERSION, " __DATE__ __TIME__ ")"
267 " initialized\n", version);
272 * Return type of a detected device.
274 static enum channel_types
275 get_channel_type(struct ccw_device_id *id)
277 enum channel_types type = (enum channel_types) id->driver_info;
279 if (type == channel_type_ficon)
280 type = channel_type_escon;
285 static const char *ch_event_names[] = {
286 "ccw_device success",
290 "ccw_device unknown",
292 "Status ATTN & BUSY",
296 "Unit check remote reset",
297 "Unit check remote system reset",
298 "Unit check TX timeout",
299 "Unit check TX parity",
300 "Unit check Hardware failure",
301 "Unit check RX parity",
303 "Unit check Unknown",
305 "SubChannel check Unknown",
307 "Machine check failure",
308 "Machine check operational",
319 static const char *ch_state_names[] = {
340 * Dump header and first 16 bytes of an sk_buff for debugging purposes.
342 * @param skb The sk_buff to dump.
343 * @param offset Offset relative to skb-data, where to start the dump.
346 ctc_dump_skb(struct sk_buff *skb, int offset)
348 unsigned char *p = skb->data;
350 struct ll_header *header;
353 if (!(loglevel & CTC_LOGLEVEL_DEBUG))
358 header = (struct ll_header *) p;
361 printk(KERN_DEBUG "dump:\n");
362 printk(KERN_DEBUG "blocklen=%d %04x\n", bl, bl);
364 printk(KERN_DEBUG "h->length=%d %04x\n", header->length,
366 printk(KERN_DEBUG "h->type=%04x\n", header->type);
367 printk(KERN_DEBUG "h->unused=%04x\n", header->unused);
370 printk(KERN_DEBUG "data: ");
371 for (i = 0; i < bl; i++)
372 printk("%02x%s", *p++, (i % 16) ? " " : "\n<7>");
377 ctc_dump_skb(struct sk_buff *skb, int offset)
383 * Unpack a just received skb and hand it over to
386 * @param ch The channel where this skb has been received.
387 * @param pskb The received skb.
389 static __inline__ void
390 ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
392 struct net_device *dev = ch->netdev;
393 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
394 __u16 len = *((__u16 *) pskb->data);
396 DBF_TEXT(trace, 4, __FUNCTION__);
397 skb_put(pskb, 2 + LL_HEADER_LENGTH);
400 pskb->ip_summed = CHECKSUM_UNNECESSARY;
403 struct ll_header *header = (struct ll_header *) pskb->data;
405 skb_pull(pskb, LL_HEADER_LENGTH);
406 if ((ch->protocol == CTC_PROTO_S390) &&
407 (header->type != ETH_P_IP)) {
410 if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
413 * Check packet type only if we stick strictly
414 * to S/390's protocol of OS390. This only
415 * supports IP. Otherwise allow any packet
419 "%s Illegal packet type 0x%04x received, dropping\n",
420 dev->name, header->type);
421 ch->logflags |= LOG_FLAG_ILLEGALPKT;
426 ctc_dump_skb(pskb, -6);
428 privptr->stats.rx_dropped++;
429 privptr->stats.rx_frame_errors++;
432 pskb->protocol = ntohs(header->type);
433 if (header->length <= LL_HEADER_LENGTH) {
435 if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
438 "%s Illegal packet size %d "
439 "received (MTU=%d blocklen=%d), "
440 "dropping\n", dev->name, header->length,
442 ch->logflags |= LOG_FLAG_ILLEGALSIZE;
447 ctc_dump_skb(pskb, -6);
449 privptr->stats.rx_dropped++;
450 privptr->stats.rx_length_errors++;
453 header->length -= LL_HEADER_LENGTH;
454 len -= LL_HEADER_LENGTH;
455 if ((header->length > skb_tailroom(pskb)) ||
456 (header->length > len)) {
458 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
461 "%s Illegal packet size %d "
462 "(beyond the end of received data), "
463 "dropping\n", dev->name, header->length);
464 ch->logflags |= LOG_FLAG_OVERRUN;
469 ctc_dump_skb(pskb, -6);
471 privptr->stats.rx_dropped++;
472 privptr->stats.rx_length_errors++;
475 skb_put(pskb, header->length);
476 pskb->mac.raw = pskb->data;
477 len -= header->length;
478 skb = dev_alloc_skb(pskb->len);
481 if (!(ch->logflags & LOG_FLAG_NOMEM)) {
484 "%s Out of memory in ctc_unpack_skb\n",
486 ch->logflags |= LOG_FLAG_NOMEM;
490 privptr->stats.rx_dropped++;
493 memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
494 skb->mac.raw = skb->data;
495 skb->dev = pskb->dev;
496 skb->protocol = pskb->protocol;
497 pskb->ip_summed = CHECKSUM_UNNECESSARY;
498 if (ch->protocol == CTC_PROTO_LINUX_TTY)
499 ctc_tty_netif_rx(skb);
503 * Successful rx; reset logflags
506 dev->last_rx = jiffies;
507 privptr->stats.rx_packets++;
508 privptr->stats.rx_bytes += skb->len;
510 skb_pull(pskb, header->length);
511 if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
513 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
516 "%s Overrun in ctc_unpack_skb\n",
518 ch->logflags |= LOG_FLAG_OVERRUN;
524 skb_put(pskb, LL_HEADER_LENGTH);
530 * Check return code of a preceeding ccw_device call, halt_IO etc...
532 * @param ch The channel, the error belongs to.
533 * @param return_code The error code to inspect.
536 ccw_check_return_code(struct channel *ch, int return_code, char *msg)
538 DBF_TEXT(trace, 5, __FUNCTION__);
539 switch (return_code) {
541 fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
544 ctc_pr_warn("%s (%s): Busy !\n", ch->id, msg);
545 fsm_event(ch->fsm, CH_EVENT_IO_EBUSY, ch);
548 ctc_pr_emerg("%s (%s): Invalid device called for IO\n",
550 fsm_event(ch->fsm, CH_EVENT_IO_ENODEV, ch);
553 ctc_pr_emerg("%s (%s): Status pending... \n",
555 fsm_event(ch->fsm, CH_EVENT_IO_EIO, ch);
558 ctc_pr_emerg("%s (%s): Unknown error in do_IO %04x\n",
559 ch->id, msg, return_code);
560 fsm_event(ch->fsm, CH_EVENT_IO_UNKNOWN, ch);
565 * Check sense of a unit check.
567 * @param ch The channel, the sense code belongs to.
568 * @param sense The sense code to inspect.
571 ccw_unit_check(struct channel *ch, unsigned char sense)
573 DBF_TEXT(trace, 5, __FUNCTION__);
574 if (sense & SNS0_INTERVENTION_REQ) {
576 if (ch->protocol != CTC_PROTO_LINUX_TTY)
577 ctc_pr_debug("%s: Interface disc. or Sel. reset "
578 "(remote)\n", ch->id);
579 fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch);
581 ctc_pr_debug("%s: System reset (remote)\n", ch->id);
582 fsm_event(ch->fsm, CH_EVENT_UC_RSRESET, ch);
584 } else if (sense & SNS0_EQUIPMENT_CHECK) {
585 if (sense & SNS0_BUS_OUT_CHECK) {
586 ctc_pr_warn("%s: Hardware malfunction (remote)\n",
588 fsm_event(ch->fsm, CH_EVENT_UC_HWFAIL, ch);
590 ctc_pr_warn("%s: Read-data parity error (remote)\n",
592 fsm_event(ch->fsm, CH_EVENT_UC_RXPARITY, ch);
594 } else if (sense & SNS0_BUS_OUT_CHECK) {
596 ctc_pr_warn("%s: Data-streaming timeout)\n", ch->id);
597 fsm_event(ch->fsm, CH_EVENT_UC_TXTIMEOUT, ch);
599 ctc_pr_warn("%s: Data-transfer parity error\n", ch->id);
600 fsm_event(ch->fsm, CH_EVENT_UC_TXPARITY, ch);
602 } else if (sense & SNS0_CMD_REJECT) {
603 ctc_pr_warn("%s: Command reject\n", ch->id);
604 } else if (sense == 0) {
605 ctc_pr_debug("%s: Unit check ZERO\n", ch->id);
606 fsm_event(ch->fsm, CH_EVENT_UC_ZERO, ch);
608 ctc_pr_warn("%s: Unit Check with sense code: %02x\n",
610 fsm_event(ch->fsm, CH_EVENT_UC_UNKNOWN, ch);
615 ctc_purge_skb_queue(struct sk_buff_head *q)
619 DBF_TEXT(trace, 5, __FUNCTION__);
621 while ((skb = skb_dequeue(q))) {
622 atomic_dec(&skb->users);
623 dev_kfree_skb_irq(skb);
627 static __inline__ int
628 ctc_checkalloc_buffer(struct channel *ch, int warn)
630 DBF_TEXT(trace, 5, __FUNCTION__);
631 if ((ch->trans_skb == NULL) ||
632 (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) {
633 if (ch->trans_skb != NULL)
634 dev_kfree_skb(ch->trans_skb);
635 clear_normalized_cda(&ch->ccw[1]);
636 ch->trans_skb = __dev_alloc_skb(ch->max_bufsize,
637 GFP_ATOMIC | GFP_DMA);
638 if (ch->trans_skb == NULL) {
641 "%s: Couldn't alloc %s trans_skb\n",
643 (CHANNEL_DIRECTION(ch->flags) == READ) ?
647 ch->ccw[1].count = ch->max_bufsize;
648 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
649 dev_kfree_skb(ch->trans_skb);
650 ch->trans_skb = NULL;
653 "%s: set_normalized_cda for %s "
654 "trans_skb failed, dropping packets\n",
656 (CHANNEL_DIRECTION(ch->flags) == READ) ?
660 ch->ccw[1].count = 0;
661 ch->trans_skb_data = ch->trans_skb->data;
662 ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
668 * Dummy NOP action for statemachines
671 fsm_action_nop(fsm_instance * fi, int event, void *arg)
676 * Actions for channel - statemachines.
677 *****************************************************************************/
680 * Normal data has been send. Free the corresponding
681 * skb (it's in io_queue), reset dev->tbusy and
682 * revert to idle state.
684 * @param fi An instance of a channel statemachine.
685 * @param event The event, just happened.
686 * @param arg Generic pointer, casted from channel * upon call.
689 ch_action_txdone(fsm_instance * fi, int event, void *arg)
691 struct channel *ch = (struct channel *) arg;
692 struct net_device *dev = ch->netdev;
693 struct ctc_priv *privptr = dev->priv;
697 unsigned long duration;
698 struct timespec done_stamp = xtime;
700 DBF_TEXT(trace, 4, __FUNCTION__);
703 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
704 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
705 if (duration > ch->prof.tx_time)
706 ch->prof.tx_time = duration;
708 if (ch->irb->scsw.count != 0)
709 ctc_pr_debug("%s: TX not complete, remaining %d bytes\n",
710 dev->name, ch->irb->scsw.count);
711 fsm_deltimer(&ch->timer);
712 while ((skb = skb_dequeue(&ch->io_queue))) {
713 privptr->stats.tx_packets++;
714 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
716 privptr->stats.tx_bytes += 2;
719 atomic_dec(&skb->users);
720 dev_kfree_skb_irq(skb);
722 spin_lock(&ch->collect_lock);
723 clear_normalized_cda(&ch->ccw[4]);
724 if (ch->collect_len > 0) {
727 if (ctc_checkalloc_buffer(ch, 1)) {
728 spin_unlock(&ch->collect_lock);
731 ch->trans_skb->tail = ch->trans_skb->data = ch->trans_skb_data;
732 ch->trans_skb->len = 0;
733 if (ch->prof.maxmulti < (ch->collect_len + 2))
734 ch->prof.maxmulti = ch->collect_len + 2;
735 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
736 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
737 *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
739 while ((skb = skb_dequeue(&ch->collect_queue))) {
740 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
742 privptr->stats.tx_packets++;
743 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
744 atomic_dec(&skb->users);
745 dev_kfree_skb_irq(skb);
749 spin_unlock(&ch->collect_lock);
750 ch->ccw[1].count = ch->trans_skb->len;
751 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
752 ch->prof.send_stamp = xtime;
753 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
754 (unsigned long) ch, 0xff, 0);
755 ch->prof.doios_multi++;
757 privptr->stats.tx_dropped += i;
758 privptr->stats.tx_errors += i;
759 fsm_deltimer(&ch->timer);
760 ccw_check_return_code(ch, rc, "chained TX");
763 spin_unlock(&ch->collect_lock);
764 fsm_newstate(fi, CH_STATE_TXIDLE);
770 * Initial data is sent.
771 * Notify device statemachine that we are up and
774 * @param fi An instance of a channel statemachine.
775 * @param event The event, just happened.
776 * @param arg Generic pointer, casted from channel * upon call.
779 ch_action_txidle(fsm_instance * fi, int event, void *arg)
781 struct channel *ch = (struct channel *) arg;
783 DBF_TEXT(trace, 4, __FUNCTION__);
784 fsm_deltimer(&ch->timer);
785 fsm_newstate(fi, CH_STATE_TXIDLE);
786 fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
791 * Got normal data, check for sanity, queue it up, allocate new buffer
792 * trigger bottom half, and initiate next read.
794 * @param fi An instance of a channel statemachine.
795 * @param event The event, just happened.
796 * @param arg Generic pointer, casted from channel * upon call.
799 ch_action_rx(fsm_instance * fi, int event, void *arg)
801 struct channel *ch = (struct channel *) arg;
802 struct net_device *dev = ch->netdev;
803 struct ctc_priv *privptr = dev->priv;
804 int len = ch->max_bufsize - ch->irb->scsw.count;
805 struct sk_buff *skb = ch->trans_skb;
806 __u16 block_len = *((__u16 *) skb->data);
810 DBF_TEXT(trace, 4, __FUNCTION__);
811 fsm_deltimer(&ch->timer);
813 ctc_pr_debug("%s: got packet with length %d < 8\n",
815 privptr->stats.rx_dropped++;
816 privptr->stats.rx_length_errors++;
819 if (len > ch->max_bufsize) {
820 ctc_pr_debug("%s: got packet with length %d > %d\n",
821 dev->name, len, ch->max_bufsize);
822 privptr->stats.rx_dropped++;
823 privptr->stats.rx_length_errors++;
828 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
830 switch (ch->protocol) {
832 case CTC_PROTO_OS390:
833 check_len = block_len + 2;
836 check_len = block_len;
839 if ((len < block_len) || (len > check_len)) {
840 ctc_pr_debug("%s: got block length %d != rx length %d\n",
841 dev->name, block_len, len);
843 ctc_dump_skb(skb, 0);
845 *((__u16 *) skb->data) = len;
846 privptr->stats.rx_dropped++;
847 privptr->stats.rx_length_errors++;
852 *((__u16 *) skb->data) = block_len;
853 ctc_unpack_skb(ch, skb);
856 skb->data = skb->tail = ch->trans_skb_data;
858 if (ctc_checkalloc_buffer(ch, 1))
860 ch->ccw[1].count = ch->max_bufsize;
861 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
863 ccw_check_return_code(ch, rc, "normal RX");
866 static void ch_action_rxidle(fsm_instance * fi, int event, void *arg);
869 * Initialize connection by sending a __u16 of value 0.
871 * @param fi An instance of a channel statemachine.
872 * @param event The event, just happened.
873 * @param arg Generic pointer, casted from channel * upon call.
876 ch_action_firstio(fsm_instance * fi, int event, void *arg)
878 struct channel *ch = (struct channel *) arg;
881 DBF_TEXT(trace, 4, __FUNCTION__);
883 if (fsm_getstate(fi) == CH_STATE_TXIDLE)
884 ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id);
885 fsm_deltimer(&ch->timer);
886 if (ctc_checkalloc_buffer(ch, 1))
888 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
889 (ch->protocol == CTC_PROTO_OS390)) {
890 /* OS/390 resp. z/OS */
891 if (CHANNEL_DIRECTION(ch->flags) == READ) {
892 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
893 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC,
895 ch_action_rxidle(fi, event, arg);
897 struct net_device *dev = ch->netdev;
898 fsm_newstate(fi, CH_STATE_TXIDLE);
899 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
900 DEV_EVENT_TXUP, dev);
906 * Don´t setup a timer for receiving the initial RX frame
907 * if in compatibility mode, since VM TCP delays the initial
908 * frame until it has some data to send.
910 if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
911 (ch->protocol != CTC_PROTO_S390))
912 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
914 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
915 ch->ccw[1].count = 2; /* Transfer only length */
917 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
918 ? CH_STATE_RXINIT : CH_STATE_TXINIT);
919 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
921 fsm_deltimer(&ch->timer);
922 fsm_newstate(fi, CH_STATE_SETUPWAIT);
923 ccw_check_return_code(ch, rc, "init IO");
926 * If in compatibility mode since we don´t setup a timer, we
927 * also signal RX channel up immediately. This enables us
928 * to send packets early which in turn usually triggers some
929 * reply from VM TCP which brings up the RX channel to it´s
932 if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
933 (ch->protocol == CTC_PROTO_S390)) {
934 struct net_device *dev = ch->netdev;
935 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXUP,
941 * Got initial data, check it. If OK,
942 * notify device statemachine that we are up and
945 * @param fi An instance of a channel statemachine.
946 * @param event The event, just happened.
947 * @param arg Generic pointer, casted from channel * upon call.
950 ch_action_rxidle(fsm_instance * fi, int event, void *arg)
952 struct channel *ch = (struct channel *) arg;
953 struct net_device *dev = ch->netdev;
957 DBF_TEXT(trace, 4, __FUNCTION__);
958 fsm_deltimer(&ch->timer);
959 buflen = *((__u16 *) ch->trans_skb->data);
961 ctc_pr_debug("%s: Initial RX count %d\n", dev->name, buflen);
963 if (buflen >= CTC_INITIAL_BLOCKLEN) {
964 if (ctc_checkalloc_buffer(ch, 1))
966 ch->ccw[1].count = ch->max_bufsize;
967 fsm_newstate(fi, CH_STATE_RXIDLE);
968 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
969 (unsigned long) ch, 0xff, 0);
971 fsm_newstate(fi, CH_STATE_RXINIT);
972 ccw_check_return_code(ch, rc, "initial RX");
974 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
975 DEV_EVENT_RXUP, dev);
977 ctc_pr_debug("%s: Initial RX count %d not %d\n",
978 dev->name, buflen, CTC_INITIAL_BLOCKLEN);
979 ch_action_firstio(fi, event, arg);
984 * Set channel into extended mode.
986 * @param fi An instance of a channel statemachine.
987 * @param event The event, just happened.
988 * @param arg Generic pointer, casted from channel * upon call.
991 ch_action_setmode(fsm_instance * fi, int event, void *arg)
993 struct channel *ch = (struct channel *) arg;
995 unsigned long saveflags;
997 DBF_TEXT(trace, 4, __FUNCTION__);
998 fsm_deltimer(&ch->timer);
999 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1000 fsm_newstate(fi, CH_STATE_SETUPWAIT);
1001 saveflags = 0; /* avoids compiler warning with
1002 spin_unlock_irqrestore */
1003 if (event == CH_EVENT_TIMER) // only for timer not yet locked
1004 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1005 rc = ccw_device_start(ch->cdev, &ch->ccw[6], (unsigned long) ch, 0xff, 0);
1006 if (event == CH_EVENT_TIMER)
1007 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1009 fsm_deltimer(&ch->timer);
1010 fsm_newstate(fi, CH_STATE_STARTWAIT);
1011 ccw_check_return_code(ch, rc, "set Mode");
1019 * @param fi An instance of a channel statemachine.
1020 * @param event The event, just happened.
1021 * @param arg Generic pointer, casted from channel * upon call.
1024 ch_action_start(fsm_instance * fi, int event, void *arg)
1026 struct channel *ch = (struct channel *) arg;
1027 unsigned long saveflags;
1029 struct net_device *dev;
1031 DBF_TEXT(trace, 4, __FUNCTION__);
1033 ctc_pr_warn("ch_action_start ch=NULL\n");
1036 if (ch->netdev == NULL) {
1037 ctc_pr_warn("ch_action_start dev=NULL, id=%s\n", ch->id);
1043 ctc_pr_debug("%s: %s channel start\n", dev->name,
1044 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1047 if (ch->trans_skb != NULL) {
1048 clear_normalized_cda(&ch->ccw[1]);
1049 dev_kfree_skb(ch->trans_skb);
1050 ch->trans_skb = NULL;
1052 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1053 ch->ccw[1].cmd_code = CCW_CMD_READ;
1054 ch->ccw[1].flags = CCW_FLAG_SLI;
1055 ch->ccw[1].count = 0;
1057 ch->ccw[1].cmd_code = CCW_CMD_WRITE;
1058 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1059 ch->ccw[1].count = 0;
1061 if (ctc_checkalloc_buffer(ch, 0)) {
1063 "%s: Could not allocate %s trans_skb, delaying "
1064 "allocation until first transfer\n",
1066 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1069 ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
1070 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1071 ch->ccw[0].count = 0;
1073 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
1074 ch->ccw[2].flags = CCW_FLAG_SLI;
1075 ch->ccw[2].count = 0;
1077 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof (struct ccw1) * 3);
1079 ch->ccw[4].flags &= ~CCW_FLAG_IDA;
1081 fsm_newstate(fi, CH_STATE_STARTWAIT);
1082 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1083 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1084 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1085 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1088 fsm_deltimer(&ch->timer);
1089 ccw_check_return_code(ch, rc, "initial HaltIO");
1092 ctc_pr_debug("ctc: %s(): leaving\n", __func__);
1097 * Shutdown a channel.
1099 * @param fi An instance of a channel statemachine.
1100 * @param event The event, just happened.
1101 * @param arg Generic pointer, casted from channel * upon call.
1104 ch_action_haltio(fsm_instance * fi, int event, void *arg)
1106 struct channel *ch = (struct channel *) arg;
1107 unsigned long saveflags;
1111 DBF_TEXT(trace, 3, __FUNCTION__);
1112 fsm_deltimer(&ch->timer);
1113 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1114 saveflags = 0; /* avoids comp warning with
1115 spin_unlock_irqrestore */
1116 if (event == CH_EVENT_STOP) // only for STOP not yet locked
1117 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1118 oldstate = fsm_getstate(fi);
1119 fsm_newstate(fi, CH_STATE_TERM);
1120 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1121 if (event == CH_EVENT_STOP)
1122 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1125 fsm_deltimer(&ch->timer);
1126 fsm_newstate(fi, oldstate);
1128 ccw_check_return_code(ch, rc, "HaltIO in ch_action_haltio");
1133 * A channel has successfully been halted.
1134 * Cleanup it's queue and notify interface statemachine.
1136 * @param fi An instance of a channel statemachine.
1137 * @param event The event, just happened.
1138 * @param arg Generic pointer, casted from channel * upon call.
1141 ch_action_stopped(fsm_instance * fi, int event, void *arg)
1143 struct channel *ch = (struct channel *) arg;
1144 struct net_device *dev = ch->netdev;
1146 DBF_TEXT(trace, 3, __FUNCTION__);
1147 fsm_deltimer(&ch->timer);
1148 fsm_newstate(fi, CH_STATE_STOPPED);
1149 if (ch->trans_skb != NULL) {
1150 clear_normalized_cda(&ch->ccw[1]);
1151 dev_kfree_skb(ch->trans_skb);
1152 ch->trans_skb = NULL;
1154 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1155 skb_queue_purge(&ch->io_queue);
1156 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1157 DEV_EVENT_RXDOWN, dev);
1159 ctc_purge_skb_queue(&ch->io_queue);
1160 spin_lock(&ch->collect_lock);
1161 ctc_purge_skb_queue(&ch->collect_queue);
1162 ch->collect_len = 0;
1163 spin_unlock(&ch->collect_lock);
1164 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1165 DEV_EVENT_TXDOWN, dev);
1170 * A stop command from device statemachine arrived and we are in
1171 * not operational mode. Set state to stopped.
1173 * @param fi An instance of a channel statemachine.
1174 * @param event The event, just happened.
1175 * @param arg Generic pointer, casted from channel * upon call.
1178 ch_action_stop(fsm_instance * fi, int event, void *arg)
1180 fsm_newstate(fi, CH_STATE_STOPPED);
1184 * A machine check for no path, not operational status or gone device has
1186 * Cleanup queue and notify interface statemachine.
1188 * @param fi An instance of a channel statemachine.
1189 * @param event The event, just happened.
1190 * @param arg Generic pointer, casted from channel * upon call.
1193 ch_action_fail(fsm_instance * fi, int event, void *arg)
1195 struct channel *ch = (struct channel *) arg;
1196 struct net_device *dev = ch->netdev;
1198 DBF_TEXT(trace, 3, __FUNCTION__);
1199 fsm_deltimer(&ch->timer);
1200 fsm_newstate(fi, CH_STATE_NOTOP);
1201 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1202 skb_queue_purge(&ch->io_queue);
1203 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1204 DEV_EVENT_RXDOWN, dev);
1206 ctc_purge_skb_queue(&ch->io_queue);
1207 spin_lock(&ch->collect_lock);
1208 ctc_purge_skb_queue(&ch->collect_queue);
1209 ch->collect_len = 0;
1210 spin_unlock(&ch->collect_lock);
1211 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1212 DEV_EVENT_TXDOWN, dev);
1217 * Handle error during setup of channel.
1219 * @param fi An instance of a channel statemachine.
1220 * @param event The event, just happened.
1221 * @param arg Generic pointer, casted from channel * upon call.
1224 ch_action_setuperr(fsm_instance * fi, int event, void *arg)
1226 struct channel *ch = (struct channel *) arg;
1227 struct net_device *dev = ch->netdev;
1229 DBF_TEXT(setup, 3, __FUNCTION__);
1231 * Special case: Got UC_RCRESET on setmode.
1232 * This means that remote side isn't setup. In this case
1233 * simply retry after some 10 secs...
1235 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
1236 ((event == CH_EVENT_UC_RCRESET) ||
1237 (event == CH_EVENT_UC_RSRESET))) {
1238 fsm_newstate(fi, CH_STATE_STARTRETRY);
1239 fsm_deltimer(&ch->timer);
1240 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1241 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1242 int rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1244 ccw_check_return_code(
1245 ch, rc, "HaltIO in ch_action_setuperr");
1250 ctc_pr_debug("%s: Error %s during %s channel setup state=%s\n",
1251 dev->name, ch_event_names[event],
1252 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
1253 fsm_getstate_str(fi));
1254 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1255 fsm_newstate(fi, CH_STATE_RXERR);
1256 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1257 DEV_EVENT_RXDOWN, dev);
1259 fsm_newstate(fi, CH_STATE_TXERR);
1260 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1261 DEV_EVENT_TXDOWN, dev);
1266 * Restart a channel after an error.
1268 * @param fi An instance of a channel statemachine.
1269 * @param event The event, just happened.
1270 * @param arg Generic pointer, casted from channel * upon call.
1273 ch_action_restart(fsm_instance * fi, int event, void *arg)
1275 unsigned long saveflags;
1279 struct channel *ch = (struct channel *) arg;
1280 struct net_device *dev = ch->netdev;
1282 DBF_TEXT(trace, 3, __FUNCTION__);
1283 fsm_deltimer(&ch->timer);
1284 ctc_pr_debug("%s: %s channel restart\n", dev->name,
1285 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1286 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1287 oldstate = fsm_getstate(fi);
1288 fsm_newstate(fi, CH_STATE_STARTWAIT);
1289 saveflags = 0; /* avoids compiler warning with
1290 spin_unlock_irqrestore */
1291 if (event == CH_EVENT_TIMER) // only for timer not yet locked
1292 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1293 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1294 if (event == CH_EVENT_TIMER)
1295 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1298 fsm_deltimer(&ch->timer);
1299 fsm_newstate(fi, oldstate);
1301 ccw_check_return_code(ch, rc, "HaltIO in ch_action_restart");
1306 * Handle error during RX initial handshake (exchange of
1307 * 0-length block header)
1309 * @param fi An instance of a channel statemachine.
1310 * @param event The event, just happened.
1311 * @param arg Generic pointer, casted from channel * upon call.
1314 ch_action_rxiniterr(fsm_instance * fi, int event, void *arg)
1316 struct channel *ch = (struct channel *) arg;
1317 struct net_device *dev = ch->netdev;
1319 DBF_TEXT(setup, 3, __FUNCTION__);
1320 if (event == CH_EVENT_TIMER) {
1321 fsm_deltimer(&ch->timer);
1322 ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name);
1323 if (ch->retry++ < 3)
1324 ch_action_restart(fi, event, arg);
1326 fsm_newstate(fi, CH_STATE_RXERR);
1327 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1328 DEV_EVENT_RXDOWN, dev);
1331 ctc_pr_warn("%s: Error during RX init handshake\n", dev->name);
1335 * Notify device statemachine if we gave up initialization
1338 * @param fi An instance of a channel statemachine.
1339 * @param event The event, just happened.
1340 * @param arg Generic pointer, casted from channel * upon call.
1343 ch_action_rxinitfail(fsm_instance * fi, int event, void *arg)
1345 struct channel *ch = (struct channel *) arg;
1346 struct net_device *dev = ch->netdev;
1348 DBF_TEXT(setup, 3, __FUNCTION__);
1349 fsm_newstate(fi, CH_STATE_RXERR);
1350 ctc_pr_warn("%s: RX initialization failed\n", dev->name);
1351 ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name);
1352 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1356 * Handle RX Unit check remote reset (remote disconnected)
1358 * @param fi An instance of a channel statemachine.
1359 * @param event The event, just happened.
1360 * @param arg Generic pointer, casted from channel * upon call.
1363 ch_action_rxdisc(fsm_instance * fi, int event, void *arg)
1365 struct channel *ch = (struct channel *) arg;
1366 struct channel *ch2;
1367 struct net_device *dev = ch->netdev;
1369 DBF_TEXT(trace, 3, __FUNCTION__);
1370 fsm_deltimer(&ch->timer);
1371 ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n",
1375 * Notify device statemachine
1377 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1378 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
1380 fsm_newstate(fi, CH_STATE_DTERM);
1381 ch2 = ((struct ctc_priv *) dev->priv)->channel[WRITE];
1382 fsm_newstate(ch2->fsm, CH_STATE_DTERM);
1384 ccw_device_halt(ch->cdev, (unsigned long) ch);
1385 ccw_device_halt(ch2->cdev, (unsigned long) ch2);
1389 * Handle error during TX channel initialization.
1391 * @param fi An instance of a channel statemachine.
1392 * @param event The event, just happened.
1393 * @param arg Generic pointer, casted from channel * upon call.
1396 ch_action_txiniterr(fsm_instance * fi, int event, void *arg)
1398 struct channel *ch = (struct channel *) arg;
1399 struct net_device *dev = ch->netdev;
1401 DBF_TEXT(setup, 2, __FUNCTION__);
1402 if (event == CH_EVENT_TIMER) {
1403 fsm_deltimer(&ch->timer);
1404 ctc_pr_debug("%s: Timeout during TX init handshake\n", dev->name);
1405 if (ch->retry++ < 3)
1406 ch_action_restart(fi, event, arg);
1408 fsm_newstate(fi, CH_STATE_TXERR);
1409 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1410 DEV_EVENT_TXDOWN, dev);
1413 ctc_pr_warn("%s: Error during TX init handshake\n", dev->name);
1417 * Handle TX timeout by retrying operation.
1419 * @param fi An instance of a channel statemachine.
1420 * @param event The event, just happened.
1421 * @param arg Generic pointer, casted from channel * upon call.
1424 ch_action_txretry(fsm_instance * fi, int event, void *arg)
1426 struct channel *ch = (struct channel *) arg;
1427 struct net_device *dev = ch->netdev;
1428 unsigned long saveflags;
1430 DBF_TEXT(trace, 4, __FUNCTION__);
1431 fsm_deltimer(&ch->timer);
1432 if (ch->retry++ > 3) {
1433 ctc_pr_debug("%s: TX retry failed, restarting channel\n",
1435 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1436 DEV_EVENT_TXDOWN, dev);
1437 ch_action_restart(fi, event, arg);
1439 struct sk_buff *skb;
1441 ctc_pr_debug("%s: TX retry %d\n", dev->name, ch->retry);
1442 if ((skb = skb_peek(&ch->io_queue))) {
1445 clear_normalized_cda(&ch->ccw[4]);
1446 ch->ccw[4].count = skb->len;
1447 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1449 "%s: IDAL alloc failed, chan restart\n",
1451 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1452 DEV_EVENT_TXDOWN, dev);
1453 ch_action_restart(fi, event, arg);
1456 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1457 saveflags = 0; /* avoids compiler warning with
1458 spin_unlock_irqrestore */
1459 if (event == CH_EVENT_TIMER) // only for TIMER not yet locked
1460 spin_lock_irqsave(get_ccwdev_lock(ch->cdev),
1462 rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1463 (unsigned long) ch, 0xff, 0);
1464 if (event == CH_EVENT_TIMER)
1465 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1468 fsm_deltimer(&ch->timer);
1469 ccw_check_return_code(ch, rc, "TX in ch_action_txretry");
1470 ctc_purge_skb_queue(&ch->io_queue);
1478 * Handle fatal errors during an I/O command.
1480 * @param fi An instance of a channel statemachine.
1481 * @param event The event, just happened.
1482 * @param arg Generic pointer, casted from channel * upon call.
1485 ch_action_iofatal(fsm_instance * fi, int event, void *arg)
1487 struct channel *ch = (struct channel *) arg;
1488 struct net_device *dev = ch->netdev;
1490 DBF_TEXT(trace, 3, __FUNCTION__);
1491 fsm_deltimer(&ch->timer);
1492 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1493 ctc_pr_debug("%s: RX I/O error\n", dev->name);
1494 fsm_newstate(fi, CH_STATE_RXERR);
1495 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1496 DEV_EVENT_RXDOWN, dev);
1498 ctc_pr_debug("%s: TX I/O error\n", dev->name);
1499 fsm_newstate(fi, CH_STATE_TXERR);
1500 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1501 DEV_EVENT_TXDOWN, dev);
1506 ch_action_reinit(fsm_instance *fi, int event, void *arg)
1508 struct channel *ch = (struct channel *)arg;
1509 struct net_device *dev = ch->netdev;
1510 struct ctc_priv *privptr = dev->priv;
1512 DBF_TEXT(trace, 4, __FUNCTION__);
1513 ch_action_iofatal(fi, event, arg);
1514 fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
1519 * The statemachine for a channel.
1521 static const fsm_node ch_fsm[] = {
1522 {CH_STATE_STOPPED, CH_EVENT_STOP, fsm_action_nop },
1523 {CH_STATE_STOPPED, CH_EVENT_START, ch_action_start },
1524 {CH_STATE_STOPPED, CH_EVENT_FINSTAT, fsm_action_nop },
1525 {CH_STATE_STOPPED, CH_EVENT_MC_FAIL, fsm_action_nop },
1527 {CH_STATE_NOTOP, CH_EVENT_STOP, ch_action_stop },
1528 {CH_STATE_NOTOP, CH_EVENT_START, fsm_action_nop },
1529 {CH_STATE_NOTOP, CH_EVENT_FINSTAT, fsm_action_nop },
1530 {CH_STATE_NOTOP, CH_EVENT_MC_FAIL, fsm_action_nop },
1531 {CH_STATE_NOTOP, CH_EVENT_MC_GOOD, ch_action_start },
1533 {CH_STATE_STARTWAIT, CH_EVENT_STOP, ch_action_haltio },
1534 {CH_STATE_STARTWAIT, CH_EVENT_START, fsm_action_nop },
1535 {CH_STATE_STARTWAIT, CH_EVENT_FINSTAT, ch_action_setmode },
1536 {CH_STATE_STARTWAIT, CH_EVENT_TIMER, ch_action_setuperr },
1537 {CH_STATE_STARTWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1538 {CH_STATE_STARTWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1539 {CH_STATE_STARTWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1541 {CH_STATE_STARTRETRY, CH_EVENT_STOP, ch_action_haltio },
1542 {CH_STATE_STARTRETRY, CH_EVENT_TIMER, ch_action_setmode },
1543 {CH_STATE_STARTRETRY, CH_EVENT_FINSTAT, fsm_action_nop },
1544 {CH_STATE_STARTRETRY, CH_EVENT_MC_FAIL, ch_action_fail },
1546 {CH_STATE_SETUPWAIT, CH_EVENT_STOP, ch_action_haltio },
1547 {CH_STATE_SETUPWAIT, CH_EVENT_START, fsm_action_nop },
1548 {CH_STATE_SETUPWAIT, CH_EVENT_FINSTAT, ch_action_firstio },
1549 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RCRESET, ch_action_setuperr },
1550 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RSRESET, ch_action_setuperr },
1551 {CH_STATE_SETUPWAIT, CH_EVENT_TIMER, ch_action_setmode },
1552 {CH_STATE_SETUPWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1553 {CH_STATE_SETUPWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1554 {CH_STATE_SETUPWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1556 {CH_STATE_RXINIT, CH_EVENT_STOP, ch_action_haltio },
1557 {CH_STATE_RXINIT, CH_EVENT_START, fsm_action_nop },
1558 {CH_STATE_RXINIT, CH_EVENT_FINSTAT, ch_action_rxidle },
1559 {CH_STATE_RXINIT, CH_EVENT_UC_RCRESET, ch_action_rxiniterr },
1560 {CH_STATE_RXINIT, CH_EVENT_UC_RSRESET, ch_action_rxiniterr },
1561 {CH_STATE_RXINIT, CH_EVENT_TIMER, ch_action_rxiniterr },
1562 {CH_STATE_RXINIT, CH_EVENT_ATTNBUSY, ch_action_rxinitfail },
1563 {CH_STATE_RXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1564 {CH_STATE_RXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1565 {CH_STATE_RXINIT, CH_EVENT_UC_ZERO, ch_action_firstio },
1566 {CH_STATE_RXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1568 {CH_STATE_RXIDLE, CH_EVENT_STOP, ch_action_haltio },
1569 {CH_STATE_RXIDLE, CH_EVENT_START, fsm_action_nop },
1570 {CH_STATE_RXIDLE, CH_EVENT_FINSTAT, ch_action_rx },
1571 {CH_STATE_RXIDLE, CH_EVENT_UC_RCRESET, ch_action_rxdisc },
1572 // {CH_STATE_RXIDLE, CH_EVENT_UC_RSRESET, ch_action_rxretry },
1573 {CH_STATE_RXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1574 {CH_STATE_RXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1575 {CH_STATE_RXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1576 {CH_STATE_RXIDLE, CH_EVENT_UC_ZERO, ch_action_rx },
1578 {CH_STATE_TXINIT, CH_EVENT_STOP, ch_action_haltio },
1579 {CH_STATE_TXINIT, CH_EVENT_START, fsm_action_nop },
1580 {CH_STATE_TXINIT, CH_EVENT_FINSTAT, ch_action_txidle },
1581 {CH_STATE_TXINIT, CH_EVENT_UC_RCRESET, ch_action_txiniterr },
1582 {CH_STATE_TXINIT, CH_EVENT_UC_RSRESET, ch_action_txiniterr },
1583 {CH_STATE_TXINIT, CH_EVENT_TIMER, ch_action_txiniterr },
1584 {CH_STATE_TXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1585 {CH_STATE_TXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1586 {CH_STATE_TXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1588 {CH_STATE_TXIDLE, CH_EVENT_STOP, ch_action_haltio },
1589 {CH_STATE_TXIDLE, CH_EVENT_START, fsm_action_nop },
1590 {CH_STATE_TXIDLE, CH_EVENT_FINSTAT, ch_action_firstio },
1591 {CH_STATE_TXIDLE, CH_EVENT_UC_RCRESET, fsm_action_nop },
1592 {CH_STATE_TXIDLE, CH_EVENT_UC_RSRESET, fsm_action_nop },
1593 {CH_STATE_TXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1594 {CH_STATE_TXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1595 {CH_STATE_TXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1597 {CH_STATE_TERM, CH_EVENT_STOP, fsm_action_nop },
1598 {CH_STATE_TERM, CH_EVENT_START, ch_action_restart },
1599 {CH_STATE_TERM, CH_EVENT_FINSTAT, ch_action_stopped },
1600 {CH_STATE_TERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1601 {CH_STATE_TERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1602 {CH_STATE_TERM, CH_EVENT_MC_FAIL, ch_action_fail },
1604 {CH_STATE_DTERM, CH_EVENT_STOP, ch_action_haltio },
1605 {CH_STATE_DTERM, CH_EVENT_START, ch_action_restart },
1606 {CH_STATE_DTERM, CH_EVENT_FINSTAT, ch_action_setmode },
1607 {CH_STATE_DTERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1608 {CH_STATE_DTERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1609 {CH_STATE_DTERM, CH_EVENT_MC_FAIL, ch_action_fail },
1611 {CH_STATE_TX, CH_EVENT_STOP, ch_action_haltio },
1612 {CH_STATE_TX, CH_EVENT_START, fsm_action_nop },
1613 {CH_STATE_TX, CH_EVENT_FINSTAT, ch_action_txdone },
1614 {CH_STATE_TX, CH_EVENT_UC_RCRESET, ch_action_txretry },
1615 {CH_STATE_TX, CH_EVENT_UC_RSRESET, ch_action_txretry },
1616 {CH_STATE_TX, CH_EVENT_TIMER, ch_action_txretry },
1617 {CH_STATE_TX, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1618 {CH_STATE_TX, CH_EVENT_IO_EIO, ch_action_reinit },
1619 {CH_STATE_TX, CH_EVENT_MC_FAIL, ch_action_fail },
1621 {CH_STATE_RXERR, CH_EVENT_STOP, ch_action_haltio },
1622 {CH_STATE_TXERR, CH_EVENT_STOP, ch_action_haltio },
1623 {CH_STATE_TXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1624 {CH_STATE_RXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1627 static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node);
1630 * Functions related to setup and device detection.
1631 *****************************************************************************/
1634 less_than(char *id1, char *id2)
1638 for (i = 0; i < 5; i++) {
1642 dev1 = simple_strtoul(id1, &id1, 16);
1643 dev2 = simple_strtoul(id2, &id2, 16);
1645 return (dev1 < dev2);
1649 * Add a new channel to the list of channels.
1650 * Keeps the channel list sorted.
1652 * @param cdev The ccw_device to be added.
1653 * @param type The type class of the new channel.
1655 * @return 0 on success, !0 on error.
1658 add_channel(struct ccw_device *cdev, enum channel_types type)
1660 struct channel **c = &channels;
1663 DBF_TEXT(trace, 2, __FUNCTION__);
1665 (struct channel *) kmalloc(sizeof (struct channel),
1666 GFP_KERNEL)) == NULL) {
1667 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1670 memset(ch, 0, sizeof (struct channel));
1671 if ((ch->ccw = (struct ccw1 *) kmalloc(8*sizeof(struct ccw1),
1672 GFP_KERNEL | GFP_DMA)) == NULL) {
1674 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1678 memset(ch->ccw, 0, 8*sizeof(struct ccw1)); // assure all flags and counters are reset
1681 * "static" ccws are used in the following way:
1683 * ccw[0..2] (Channel program for generic I/O):
1685 * 1: read or write (depending on direction) with fixed
1686 * buffer (idal allocated once when buffer is allocated)
1688 * ccw[3..5] (Channel program for direct write of packets)
1690 * 4: write (idal allocated on every write).
1692 * ccw[6..7] (Channel program for initial channel setup):
1693 * 6: set extended mode
1696 * ch->ccw[0..5] are initialized in ch_action_start because
1697 * the channel's direction is yet unknown here.
1699 ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
1700 ch->ccw[6].flags = CCW_FLAG_SLI;
1702 ch->ccw[7].cmd_code = CCW_CMD_NOOP;
1703 ch->ccw[7].flags = CCW_FLAG_SLI;
1706 snprintf(ch->id, CTC_ID_SIZE, "ch-%s", cdev->dev.bus_id);
1708 ch->fsm = init_fsm(ch->id, ch_state_names,
1709 ch_event_names, NR_CH_STATES, NR_CH_EVENTS,
1710 ch_fsm, CH_FSM_LEN, GFP_KERNEL);
1711 if (ch->fsm == NULL) {
1712 ctc_pr_warn("ctc: Could not create FSM in add_channel\n");
1717 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1718 if ((ch->irb = (struct irb *) kmalloc(sizeof (struct irb),
1719 GFP_KERNEL)) == NULL) {
1720 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1726 memset(ch->irb, 0, sizeof (struct irb));
1727 while (*c && less_than((*c)->id, ch->id))
1729 if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) {
1731 "ctc: add_channel: device %s already in list, "
1732 "using old entry\n", (*c)->id);
1739 fsm_settimer(ch->fsm, &ch->timer);
1740 skb_queue_head_init(&ch->io_queue);
1741 skb_queue_head_init(&ch->collect_queue);
1748 * Release a specific channel in the channel list.
1750 * @param ch Pointer to channel struct to be released.
1753 channel_free(struct channel *ch)
1755 ch->flags &= ~CHANNEL_FLAGS_INUSE;
1756 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1760 * Remove a specific channel in the channel list.
1762 * @param ch Pointer to channel struct to be released.
1765 channel_remove(struct channel *ch)
1767 struct channel **c = &channels;
1769 DBF_TEXT(trace, 2, __FUNCTION__);
1777 fsm_deltimer(&ch->timer);
1779 clear_normalized_cda(&ch->ccw[4]);
1780 if (ch->trans_skb != NULL) {
1781 clear_normalized_cda(&ch->ccw[1]);
1782 dev_kfree_skb(ch->trans_skb);
1794 * Get a specific channel from the channel list.
1796 * @param type Type of channel we are interested in.
1797 * @param id Id of channel we are interested in.
1798 * @param direction Direction we want to use this channel for.
1800 * @return Pointer to a channel or NULL if no matching channel available.
1802 static struct channel
1804 channel_get(enum channel_types type, char *id, int direction)
1806 struct channel *ch = channels;
1808 DBF_TEXT(trace, 3, __FUNCTION__);
1810 ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n",
1811 __func__, id, type);
1814 while (ch && ((strncmp(ch->id, id, CTC_ID_SIZE)) || (ch->type != type))) {
1816 ctc_pr_debug("ctc: %s(): ch=0x%p (id=%s, type=%d\n",
1817 __func__, ch, ch->id, ch->type);
1822 ctc_pr_debug("ctc: %s(): ch=0x%pq (id=%s, type=%d\n",
1823 __func__, ch, ch->id, ch->type);
1826 ctc_pr_warn("ctc: %s(): channel with id %s "
1827 "and type %d not found in channel list\n",
1828 __func__, id, type);
1830 if (ch->flags & CHANNEL_FLAGS_INUSE)
1833 ch->flags |= CHANNEL_FLAGS_INUSE;
1834 ch->flags &= ~CHANNEL_FLAGS_RWMASK;
1835 ch->flags |= (direction == WRITE)
1836 ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
1837 fsm_newstate(ch->fsm, CH_STATE_STOPPED);
1844 * Return the channel type by name.
1846 * @param name Name of network interface.
1848 * @return Type class of channel to be used for that interface.
1850 static enum channel_types inline
1851 extract_channel_media(char *name)
1853 enum channel_types ret = channel_type_unknown;
1856 if (strncmp(name, "ctc", 3) == 0)
1857 ret = channel_type_parallel;
1858 if (strncmp(name, "escon", 5) == 0)
1859 ret = channel_type_escon;
1865 __ctc_check_irb_error(struct ccw_device *cdev, struct irb *irb)
1870 switch (PTR_ERR(irb)) {
1872 ctc_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id);
1873 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1874 // CTC_DBF_TEXT_(trace, 2, " rc%d", -EIO);
1877 ctc_pr_warn("timeout on device %s\n", cdev->dev.bus_id);
1878 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1879 // CTC_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
1882 ctc_pr_warn("unknown error %ld on device %s\n", PTR_ERR(irb),
1884 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1885 // CTC_DBF_TEXT(trace, 2, " rc???");
1887 return PTR_ERR(irb);
1893 * @param cdev The ccw_device the interrupt is for.
1894 * @param intparm interruption parameter.
1895 * @param irb interruption response block.
1898 ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1901 struct net_device *dev;
1902 struct ctc_priv *priv;
1904 DBF_TEXT(trace, 5, __FUNCTION__);
1905 if (__ctc_check_irb_error(cdev, irb))
1908 /* Check for unsolicited interrupts. */
1909 if (!cdev->dev.driver_data) {
1910 ctc_pr_warn("ctc: Got unsolicited irq: %s c-%02x d-%02x\n",
1911 cdev->dev.bus_id, irb->scsw.cstat,
1916 priv = ((struct ccwgroup_device *)cdev->dev.driver_data)
1919 /* Try to extract channel from driver data. */
1920 if (priv->channel[READ]->cdev == cdev)
1921 ch = priv->channel[READ];
1922 else if (priv->channel[WRITE]->cdev == cdev)
1923 ch = priv->channel[WRITE];
1925 ctc_pr_err("ctc: Can't determine channel for interrupt, "
1926 "device %s\n", cdev->dev.bus_id);
1930 dev = (struct net_device *) (ch->netdev);
1932 ctc_pr_crit("ctc: ctc_irq_handler dev=NULL bus_id=%s, ch=0x%p\n",
1933 cdev->dev.bus_id, ch);
1938 ctc_pr_debug("%s: interrupt for device: %s received c-%02x d-%02x\n",
1939 dev->name, ch->id, irb->scsw.cstat, irb->scsw.dstat);
1942 /* Copy interruption response block. */
1943 memcpy(ch->irb, irb, sizeof(struct irb));
1945 /* Check for good subchannel return code, otherwise error message */
1946 if (ch->irb->scsw.cstat) {
1947 fsm_event(ch->fsm, CH_EVENT_SC_UNKNOWN, ch);
1948 ctc_pr_warn("%s: subchannel check for device: %s - %02x %02x\n",
1949 dev->name, ch->id, ch->irb->scsw.cstat,
1950 ch->irb->scsw.dstat);
1954 /* Check the reason-code of a unit check */
1955 if (ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
1956 ccw_unit_check(ch, ch->irb->ecw[0]);
1959 if (ch->irb->scsw.dstat & DEV_STAT_BUSY) {
1960 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION)
1961 fsm_event(ch->fsm, CH_EVENT_ATTNBUSY, ch);
1963 fsm_event(ch->fsm, CH_EVENT_BUSY, ch);
1966 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION) {
1967 fsm_event(ch->fsm, CH_EVENT_ATTN, ch);
1970 if ((ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
1971 (ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
1972 (ch->irb->scsw.stctl ==
1973 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
1974 fsm_event(ch->fsm, CH_EVENT_FINSTAT, ch);
1976 fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
1981 * Actions for interface - statemachine.
1982 *****************************************************************************/
1985 * Startup channels by sending CH_EVENT_START to each channel.
1987 * @param fi An instance of an interface statemachine.
1988 * @param event The event, just happened.
1989 * @param arg Generic pointer, casted from struct net_device * upon call.
1992 dev_action_start(fsm_instance * fi, int event, void *arg)
1994 struct net_device *dev = (struct net_device *) arg;
1995 struct ctc_priv *privptr = dev->priv;
1998 DBF_TEXT(setup, 3, __FUNCTION__);
1999 fsm_deltimer(&privptr->restart_timer);
2000 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2001 for (direction = READ; direction <= WRITE; direction++) {
2002 struct channel *ch = privptr->channel[direction];
2003 fsm_event(ch->fsm, CH_EVENT_START, ch);
2008 * Shutdown channels by sending CH_EVENT_STOP to each channel.
2010 * @param fi An instance of an interface statemachine.
2011 * @param event The event, just happened.
2012 * @param arg Generic pointer, casted from struct net_device * upon call.
2015 dev_action_stop(fsm_instance * fi, int event, void *arg)
2017 struct net_device *dev = (struct net_device *) arg;
2018 struct ctc_priv *privptr = dev->priv;
2021 DBF_TEXT(trace, 3, __FUNCTION__);
2022 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2023 for (direction = READ; direction <= WRITE; direction++) {
2024 struct channel *ch = privptr->channel[direction];
2025 fsm_event(ch->fsm, CH_EVENT_STOP, ch);
2029 dev_action_restart(fsm_instance *fi, int event, void *arg)
2031 struct net_device *dev = (struct net_device *)arg;
2032 struct ctc_priv *privptr = dev->priv;
2034 DBF_TEXT(trace, 3, __FUNCTION__);
2035 ctc_pr_debug("%s: Restarting\n", dev->name);
2036 dev_action_stop(fi, event, arg);
2037 fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
2038 fsm_addtimer(&privptr->restart_timer, CTC_TIMEOUT_5SEC,
2039 DEV_EVENT_START, dev);
2043 * Called from channel statemachine
2044 * when a channel is up and running.
2046 * @param fi An instance of an interface statemachine.
2047 * @param event The event, just happened.
2048 * @param arg Generic pointer, casted from struct net_device * upon call.
2051 dev_action_chup(fsm_instance * fi, int event, void *arg)
2053 struct net_device *dev = (struct net_device *) arg;
2054 struct ctc_priv *privptr = dev->priv;
2056 DBF_TEXT(trace, 3, __FUNCTION__);
2057 switch (fsm_getstate(fi)) {
2058 case DEV_STATE_STARTWAIT_RXTX:
2059 if (event == DEV_EVENT_RXUP)
2060 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2062 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2064 case DEV_STATE_STARTWAIT_RX:
2065 if (event == DEV_EVENT_RXUP) {
2066 fsm_newstate(fi, DEV_STATE_RUNNING);
2067 ctc_pr_info("%s: connected with remote side\n",
2069 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2070 ctc_tty_setcarrier(dev, 1);
2071 ctc_clear_busy(dev);
2074 case DEV_STATE_STARTWAIT_TX:
2075 if (event == DEV_EVENT_TXUP) {
2076 fsm_newstate(fi, DEV_STATE_RUNNING);
2077 ctc_pr_info("%s: connected with remote side\n",
2079 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2080 ctc_tty_setcarrier(dev, 1);
2081 ctc_clear_busy(dev);
2084 case DEV_STATE_STOPWAIT_TX:
2085 if (event == DEV_EVENT_RXUP)
2086 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2088 case DEV_STATE_STOPWAIT_RX:
2089 if (event == DEV_EVENT_TXUP)
2090 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2096 * Called from channel statemachine
2097 * when a channel has been shutdown.
2099 * @param fi An instance of an interface statemachine.
2100 * @param event The event, just happened.
2101 * @param arg Generic pointer, casted from struct net_device * upon call.
2104 dev_action_chdown(fsm_instance * fi, int event, void *arg)
2106 struct net_device *dev = (struct net_device *) arg;
2107 struct ctc_priv *privptr = dev->priv;
2109 DBF_TEXT(trace, 3, __FUNCTION__);
2110 switch (fsm_getstate(fi)) {
2111 case DEV_STATE_RUNNING:
2112 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2113 ctc_tty_setcarrier(dev, 0);
2114 if (event == DEV_EVENT_TXDOWN)
2115 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2117 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2119 case DEV_STATE_STARTWAIT_RX:
2120 if (event == DEV_EVENT_TXDOWN)
2121 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2123 case DEV_STATE_STARTWAIT_TX:
2124 if (event == DEV_EVENT_RXDOWN)
2125 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2127 case DEV_STATE_STOPWAIT_RXTX:
2128 if (event == DEV_EVENT_TXDOWN)
2129 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2131 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2133 case DEV_STATE_STOPWAIT_RX:
2134 if (event == DEV_EVENT_RXDOWN)
2135 fsm_newstate(fi, DEV_STATE_STOPPED);
2137 case DEV_STATE_STOPWAIT_TX:
2138 if (event == DEV_EVENT_TXDOWN)
2139 fsm_newstate(fi, DEV_STATE_STOPPED);
2144 static const fsm_node dev_fsm[] = {
2145 {DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start},
2147 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
2148 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2149 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2150 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2152 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
2153 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2154 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2155 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
2156 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2158 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
2159 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2160 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2161 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
2162 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2164 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
2165 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
2166 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
2167 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2168 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2169 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2171 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
2172 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2173 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2174 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
2175 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2177 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
2178 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2179 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2180 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
2181 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2183 {DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
2184 {DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
2185 {DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
2186 {DEV_STATE_RUNNING, DEV_EVENT_TXUP, fsm_action_nop },
2187 {DEV_STATE_RUNNING, DEV_EVENT_RXUP, fsm_action_nop },
2188 {DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
2191 static const int DEV_FSM_LEN = sizeof (dev_fsm) / sizeof (fsm_node);
2194 * Transmit a packet.
2195 * This is a helper function for ctc_tx().
2197 * @param ch Channel to be used for sending.
2198 * @param skb Pointer to struct sk_buff of packet to send.
2199 * The linklevel header has already been set up
2202 * @return 0 on success, -ERRNO on failure. (Never fails.)
2205 transmit_skb(struct channel *ch, struct sk_buff *skb)
2207 unsigned long saveflags;
2208 struct ll_header header;
2211 DBF_TEXT(trace, 5, __FUNCTION__);
2212 if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
2213 int l = skb->len + LL_HEADER_LENGTH;
2215 spin_lock_irqsave(&ch->collect_lock, saveflags);
2216 if (ch->collect_len + l > ch->max_bufsize - 2)
2219 atomic_inc(&skb->users);
2221 header.type = skb->protocol;
2223 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2225 skb_queue_tail(&ch->collect_queue, skb);
2226 ch->collect_len += l;
2228 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2232 struct sk_buff *nskb;
2236 * Protect skb against beeing free'd by upper
2239 atomic_inc(&skb->users);
2240 ch->prof.txlen += skb->len;
2241 header.length = skb->len + LL_HEADER_LENGTH;
2242 header.type = skb->protocol;
2244 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2246 block_len = skb->len + 2;
2247 *((__u16 *) skb_push(skb, 2)) = block_len;
2250 * IDAL support in CTC is broken, so we have to
2251 * care about skb's above 2G ourselves.
2253 hi = ((unsigned long) skb->tail + LL_HEADER_LENGTH) >> 31;
2255 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
2257 atomic_dec(&skb->users);
2258 skb_pull(skb, LL_HEADER_LENGTH + 2);
2261 memcpy(skb_put(nskb, skb->len),
2262 skb->data, skb->len);
2263 atomic_inc(&nskb->users);
2264 atomic_dec(&skb->users);
2265 dev_kfree_skb_irq(skb);
2270 ch->ccw[4].count = block_len;
2271 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
2273 * idal allocation failed, try via copying to
2274 * trans_skb. trans_skb usually has a pre-allocated
2277 if (ctc_checkalloc_buffer(ch, 1)) {
2279 * Remove our header. It gets added
2280 * again on retransmit.
2282 atomic_dec(&skb->users);
2283 skb_pull(skb, LL_HEADER_LENGTH + 2);
2287 ch->trans_skb->tail = ch->trans_skb->data;
2288 ch->trans_skb->len = 0;
2289 ch->ccw[1].count = skb->len;
2290 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
2292 atomic_dec(&skb->users);
2293 dev_kfree_skb_irq(skb);
2296 skb_queue_tail(&ch->io_queue, skb);
2300 fsm_newstate(ch->fsm, CH_STATE_TX);
2301 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
2302 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
2303 ch->prof.send_stamp = xtime;
2304 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
2305 (unsigned long) ch, 0xff, 0);
2306 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
2308 ch->prof.doios_single++;
2310 fsm_deltimer(&ch->timer);
2311 ccw_check_return_code(ch, rc, "single skb TX");
2313 skb_dequeue_tail(&ch->io_queue);
2315 * Remove our header. It gets added
2316 * again on retransmit.
2318 skb_pull(skb, LL_HEADER_LENGTH + 2);
2321 struct net_device *dev = ch->netdev;
2322 struct ctc_priv *privptr = dev->priv;
2323 privptr->stats.tx_packets++;
2324 privptr->stats.tx_bytes +=
2325 skb->len - LL_HEADER_LENGTH;
2334 * Interface API for upper network layers
2335 *****************************************************************************/
2338 * Open an interface.
2339 * Called from generic network layer when ifconfig up is run.
2341 * @param dev Pointer to interface struct.
2343 * @return 0 on success, -ERRNO on failure. (Never fails.)
2346 ctc_open(struct net_device * dev)
2348 DBF_TEXT(trace, 5, __FUNCTION__);
2349 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev);
2354 * Close an interface.
2355 * Called from generic network layer when ifconfig down is run.
2357 * @param dev Pointer to interface struct.
2359 * @return 0 on success, -ERRNO on failure. (Never fails.)
2362 ctc_close(struct net_device * dev)
2364 DBF_TEXT(trace, 5, __FUNCTION__);
2365 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev);
2370 * Start transmission of a packet.
2371 * Called from generic network device layer.
2373 * @param skb Pointer to buffer containing the packet.
2374 * @param dev Pointer to interface struct.
2376 * @return 0 if packet consumed, !0 if packet rejected.
2377 * Note: If we return !0, then the packet is free'd by
2378 * the generic network layer.
2381 ctc_tx(struct sk_buff *skb, struct net_device * dev)
2384 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2386 DBF_TEXT(trace, 5, __FUNCTION__);
2388 * Some sanity checks ...
2391 ctc_pr_warn("%s: NULL sk_buff passed\n", dev->name);
2392 privptr->stats.tx_dropped++;
2395 if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
2396 ctc_pr_warn("%s: Got sk_buff with head room < %ld bytes\n",
2397 dev->name, LL_HEADER_LENGTH + 2);
2399 privptr->stats.tx_dropped++;
2404 * If channels are not running, try to restart them
2405 * and throw away packet.
2407 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
2408 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
2409 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2412 privptr->stats.tx_dropped++;
2413 privptr->stats.tx_errors++;
2414 privptr->stats.tx_carrier_errors++;
2418 if (ctc_test_and_set_busy(dev))
2421 dev->trans_start = jiffies;
2422 if (transmit_skb(privptr->channel[WRITE], skb) != 0)
2424 ctc_clear_busy(dev);
2429 * Sets MTU of an interface.
2431 * @param dev Pointer to interface struct.
2432 * @param new_mtu The new MTU to use for this interface.
2434 * @return 0 on success, -EINVAL if MTU is out of valid range.
2435 * (valid range is 576 .. 65527). If VM is on the
2436 * remote side, maximum MTU is 32760, however this is
2437 * <em>not</em> checked here.
2440 ctc_change_mtu(struct net_device * dev, int new_mtu)
2442 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2444 DBF_TEXT(trace, 3, __FUNCTION__);
2445 if ((new_mtu < 576) || (new_mtu > 65527) ||
2446 (new_mtu > (privptr->channel[READ]->max_bufsize -
2447 LL_HEADER_LENGTH - 2)))
2450 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2455 * Returns interface statistics of a device.
2457 * @param dev Pointer to interface struct.
2459 * @return Pointer to stats struct of this interface.
2461 static struct net_device_stats *
2462 ctc_stats(struct net_device * dev)
2464 return &((struct ctc_priv *) dev->priv)->stats;
2472 buffer_show(struct device *dev, char *buf)
2474 struct ctc_priv *priv;
2476 priv = dev->driver_data;
2479 return sprintf(buf, "%d\n",
2484 buffer_write(struct device *dev, const char *buf, size_t count)
2486 struct ctc_priv *priv;
2487 struct net_device *ndev;
2491 DBF_TEXT(trace, 3, __FUNCTION__);
2492 DBF_TEXT(trace, 3, buf);
2493 priv = dev->driver_data;
2495 DBF_TEXT(trace, 3, "bfnopriv");
2499 sscanf(buf, "%u", &bs1);
2500 if (bs1 > CTC_BUFSIZE_LIMIT)
2502 if (bs1 < (576 + LL_HEADER_LENGTH + 2))
2504 priv->buffer_size = bs1; // just to overwrite the default
2506 ndev = priv->channel[READ]->netdev;
2508 DBF_TEXT(trace, 3, "bfnondev");
2512 if ((ndev->flags & IFF_RUNNING) &&
2513 (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
2516 priv->channel[READ]->max_bufsize = bs1;
2517 priv->channel[WRITE]->max_bufsize = bs1;
2518 if (!(ndev->flags & IFF_RUNNING))
2519 ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
2520 priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2521 priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2523 sprintf(buffer, "%d",priv->buffer_size);
2524 DBF_TEXT(trace, 3, buffer);
2528 DBF_TEXT(trace, 3, "buff_err");
2533 loglevel_show(struct device *dev, char *buf)
2535 return sprintf(buf, "%d\n", loglevel);
2539 loglevel_write(struct device *dev, const char *buf, size_t count)
2543 DBF_TEXT(trace, 5, __FUNCTION__);
2544 sscanf(buf, "%i", &ll1);
2546 if ((ll1 > CTC_LOGLEVEL_MAX) || (ll1 < 0))
2553 ctc_print_statistics(struct ctc_priv *priv)
2558 DBF_TEXT(trace, 4, __FUNCTION__);
2561 sbuf = (char *)kmalloc(2048, GFP_KERNEL);
2566 p += sprintf(p, " Device FSM state: %s\n",
2567 fsm_getstate_str(priv->fsm));
2568 p += sprintf(p, " RX channel FSM state: %s\n",
2569 fsm_getstate_str(priv->channel[READ]->fsm));
2570 p += sprintf(p, " TX channel FSM state: %s\n",
2571 fsm_getstate_str(priv->channel[WRITE]->fsm));
2572 p += sprintf(p, " Max. TX buffer used: %ld\n",
2573 priv->channel[WRITE]->prof.maxmulti);
2574 p += sprintf(p, " Max. chained SKBs: %ld\n",
2575 priv->channel[WRITE]->prof.maxcqueue);
2576 p += sprintf(p, " TX single write ops: %ld\n",
2577 priv->channel[WRITE]->prof.doios_single);
2578 p += sprintf(p, " TX multi write ops: %ld\n",
2579 priv->channel[WRITE]->prof.doios_multi);
2580 p += sprintf(p, " Netto bytes written: %ld\n",
2581 priv->channel[WRITE]->prof.txlen);
2582 p += sprintf(p, " Max. TX IO-time: %ld\n",
2583 priv->channel[WRITE]->prof.tx_time);
2585 ctc_pr_debug("Statistics for %s:\n%s",
2586 priv->channel[WRITE]->netdev->name, sbuf);
2592 stats_show(struct device *dev, char *buf)
2594 struct ctc_priv *priv = dev->driver_data;
2597 ctc_print_statistics(priv);
2598 return sprintf(buf, "0\n");
2602 stats_write(struct device *dev, const char *buf, size_t count)
2604 struct ctc_priv *priv = dev->driver_data;
2607 /* Reset statistics */
2608 memset(&priv->channel[WRITE]->prof, 0,
2609 sizeof(priv->channel[WRITE]->prof));
2615 ctc_netdev_unregister(struct net_device * dev)
2617 struct ctc_priv *privptr;
2621 privptr = (struct ctc_priv *) dev->priv;
2622 if (privptr->protocol != CTC_PROTO_LINUX_TTY)
2623 unregister_netdev(dev);
2625 ctc_tty_unregister_netdev(dev);
2629 ctc_netdev_register(struct net_device * dev)
2631 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2632 if (privptr->protocol != CTC_PROTO_LINUX_TTY)
2633 return register_netdev(dev);
2635 return ctc_tty_register_netdev(dev);
2639 ctc_free_netdevice(struct net_device * dev, int free_dev)
2641 struct ctc_priv *privptr;
2644 privptr = dev->priv;
2647 kfree_fsm(privptr->fsm);
2657 ctc_proto_show(struct device *dev, char *buf)
2659 struct ctc_priv *priv;
2661 priv = dev->driver_data;
2665 return sprintf(buf, "%d\n", priv->protocol);
2669 ctc_proto_store(struct device *dev, const char *buf, size_t count)
2671 struct ctc_priv *priv;
2674 DBF_TEXT(trace, 3, __FUNCTION__);
2675 pr_debug("%s() called\n", __FUNCTION__);
2677 priv = dev->driver_data;
2680 sscanf(buf, "%u", &value);
2681 if ((value < 0) || (value > CTC_PROTO_MAX))
2683 priv->protocol = value;
2690 ctc_type_show(struct device *dev, char *buf)
2692 struct ccwgroup_device *cgdev;
2694 cgdev = to_ccwgroupdev(dev);
2698 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
2701 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
2702 static DEVICE_ATTR(protocol, 0644, ctc_proto_show, ctc_proto_store);
2703 static DEVICE_ATTR(type, 0444, ctc_type_show, NULL);
2705 static DEVICE_ATTR(loglevel, 0644, loglevel_show, loglevel_write);
2706 static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
2708 static struct attribute *ctc_attr[] = {
2709 &dev_attr_protocol.attr,
2710 &dev_attr_type.attr,
2711 &dev_attr_buffer.attr,
2715 static struct attribute_group ctc_attr_group = {
2720 ctc_add_attributes(struct device *dev)
2722 device_create_file(dev, &dev_attr_loglevel);
2723 device_create_file(dev, &dev_attr_stats);
2728 ctc_remove_attributes(struct device *dev)
2730 device_remove_file(dev, &dev_attr_stats);
2731 device_remove_file(dev, &dev_attr_loglevel);
2735 ctc_add_files(struct device *dev)
2737 pr_debug("%s() called\n", __FUNCTION__);
2739 return sysfs_create_group(&dev->kobj, &ctc_attr_group);
2743 ctc_remove_files(struct device *dev)
2745 pr_debug("%s() called\n", __FUNCTION__);
2747 sysfs_remove_group(&dev->kobj, &ctc_attr_group);
2751 * Add ctc specific attributes.
2752 * Add ctc private data.
2754 * @param cgdev pointer to ccwgroup_device just added
2756 * @returns 0 on success, !0 on failure.
2759 ctc_probe_device(struct ccwgroup_device *cgdev)
2761 struct ctc_priv *priv;
2765 pr_debug("%s() called\n", __FUNCTION__);
2766 DBF_TEXT(setup, 3, __FUNCTION__);
2768 if (!get_device(&cgdev->dev))
2771 priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL);
2773 ctc_pr_err("%s: Out of memory\n", __func__);
2774 put_device(&cgdev->dev);
2778 memset(priv, 0, sizeof (struct ctc_priv));
2779 rc = ctc_add_files(&cgdev->dev);
2782 put_device(&cgdev->dev);
2785 priv->buffer_size = CTC_BUFSIZE_DEFAULT;
2786 cgdev->cdev[0]->handler = ctc_irq_handler;
2787 cgdev->cdev[1]->handler = ctc_irq_handler;
2788 cgdev->dev.driver_data = priv;
2790 sprintf(buffer, "%p", priv);
2791 DBF_TEXT(data, 3, buffer);
2793 sprintf(buffer, "%u", (unsigned int)sizeof(struct ctc_priv));
2794 DBF_TEXT(data, 3, buffer);
2796 sprintf(buffer, "%p", &channels);
2797 DBF_TEXT(data, 3, buffer);
2799 sprintf(buffer, "%u", (unsigned int)sizeof(struct channel));
2800 DBF_TEXT(data, 3, buffer);
2806 * Initialize everything of the net device except the name and the
2809 static struct net_device *
2810 ctc_init_netdevice(struct net_device * dev, int alloc_device,
2811 struct ctc_priv *privptr)
2816 DBF_TEXT(setup, 3, __FUNCTION__);
2819 dev = kmalloc(sizeof (struct net_device), GFP_KERNEL);
2822 memset(dev, 0, sizeof (struct net_device));
2825 dev->priv = privptr;
2826 privptr->fsm = init_fsm("ctcdev", dev_state_names,
2827 dev_event_names, CTC_NR_DEV_STATES, CTC_NR_DEV_EVENTS,
2828 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2829 if (privptr->fsm == NULL) {
2834 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2835 fsm_settimer(privptr->fsm, &privptr->restart_timer);
2837 dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
2838 dev->hard_start_xmit = ctc_tx;
2839 dev->open = ctc_open;
2840 dev->stop = ctc_close;
2841 dev->get_stats = ctc_stats;
2842 dev->change_mtu = ctc_change_mtu;
2843 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2845 dev->type = ARPHRD_SLIP;
2846 dev->tx_queue_len = 100;
2847 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2848 SET_MODULE_OWNER(dev);
2855 * Setup an interface.
2857 * @param cgdev Device to be setup.
2859 * @returns 0 on success, !0 on failure.
2862 ctc_new_device(struct ccwgroup_device *cgdev)
2864 char read_id[CTC_ID_SIZE];
2865 char write_id[CTC_ID_SIZE];
2867 enum channel_types type;
2868 struct ctc_priv *privptr;
2869 struct net_device *dev;
2873 pr_debug("%s() called\n", __FUNCTION__);
2874 DBF_TEXT(setup, 3, __FUNCTION__);
2876 privptr = cgdev->dev.driver_data;
2880 sprintf(buffer, "%d", privptr->buffer_size);
2881 DBF_TEXT(setup, 3, buffer);
2883 type = get_channel_type(&cgdev->cdev[0]->id);
2885 snprintf(read_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id);
2886 snprintf(write_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id);
2888 if (add_channel(cgdev->cdev[0], type))
2890 if (add_channel(cgdev->cdev[1], type))
2893 ret = ccw_device_set_online(cgdev->cdev[0]);
2896 "ccw_device_set_online (cdev[0]) failed with ret = %d\n", ret);
2899 ret = ccw_device_set_online(cgdev->cdev[1]);
2902 "ccw_device_set_online (cdev[1]) failed with ret = %d\n", ret);
2905 dev = ctc_init_netdevice(NULL, 1, privptr);
2908 ctc_pr_warn("ctc_init_netdevice failed\n");
2912 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2913 strlcpy(dev->name, "ctctty%d", IFNAMSIZ);
2915 strlcpy(dev->name, "ctc%d", IFNAMSIZ);
2917 for (direction = READ; direction <= WRITE; direction++) {
2918 privptr->channel[direction] =
2919 channel_get(type, direction == READ ? read_id : write_id,
2921 if (privptr->channel[direction] == NULL) {
2922 if (direction == WRITE)
2923 channel_free(privptr->channel[READ]);
2925 ctc_free_netdevice(dev, 1);
2928 privptr->channel[direction]->netdev = dev;
2929 privptr->channel[direction]->protocol = privptr->protocol;
2930 privptr->channel[direction]->max_bufsize = privptr->buffer_size;
2933 SET_NETDEV_DEV(dev, &cgdev->dev);
2935 if (ctc_netdev_register(dev) != 0) {
2936 ctc_free_netdevice(dev, 1);
2940 ctc_add_attributes(&cgdev->dev);
2942 strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name));
2946 ctc_pr_info("%s: read: %s, write: %s, proto: %d\n",
2947 dev->name, privptr->channel[READ]->id,
2948 privptr->channel[WRITE]->id, privptr->protocol);
2952 ccw_device_set_offline(cgdev->cdev[1]);
2953 ccw_device_set_offline(cgdev->cdev[0]);
2959 * Shutdown an interface.
2961 * @param cgdev Device to be shut down.
2963 * @returns 0 on success, !0 on failure.
2966 ctc_shutdown_device(struct ccwgroup_device *cgdev)
2968 struct ctc_priv *priv;
2969 struct net_device *ndev;
2971 DBF_TEXT(setup, 3, __FUNCTION__);
2972 pr_debug("%s() called\n", __FUNCTION__);
2975 priv = cgdev->dev.driver_data;
2980 if (priv->channel[READ]) {
2981 ndev = priv->channel[READ]->netdev;
2983 /* Close the device */
2985 ndev->flags &=~IFF_RUNNING;
2987 ctc_remove_attributes(&cgdev->dev);
2989 channel_free(priv->channel[READ]);
2991 if (priv->channel[WRITE])
2992 channel_free(priv->channel[WRITE]);
2995 ctc_netdev_unregister(ndev);
2997 ctc_free_netdevice(ndev, 1);
3001 kfree_fsm(priv->fsm);
3003 ccw_device_set_offline(cgdev->cdev[1]);
3004 ccw_device_set_offline(cgdev->cdev[0]);
3006 if (priv->channel[READ])
3007 channel_remove(priv->channel[READ]);
3008 if (priv->channel[WRITE])
3009 channel_remove(priv->channel[WRITE]);
3010 priv->channel[READ] = priv->channel[WRITE] = NULL;
3017 ctc_remove_device(struct ccwgroup_device *cgdev)
3019 struct ctc_priv *priv;
3021 pr_debug("%s() called\n", __FUNCTION__);
3022 DBF_TEXT(setup, 3, __FUNCTION__);
3024 priv = cgdev->dev.driver_data;
3027 if (cgdev->state == CCWGROUP_ONLINE)
3028 ctc_shutdown_device(cgdev);
3029 ctc_remove_files(&cgdev->dev);
3030 cgdev->dev.driver_data = NULL;
3032 put_device(&cgdev->dev);
3035 static struct ccwgroup_driver ctc_group_driver = {
3036 .owner = THIS_MODULE,
3039 .driver_id = 0xC3E3C3,
3040 .probe = ctc_probe_device,
3041 .remove = ctc_remove_device,
3042 .set_online = ctc_new_device,
3043 .set_offline = ctc_shutdown_device,
3047 * Module related routines
3048 *****************************************************************************/
3051 * Prepare to be unloaded. Free IRQ's and release all resources.
3052 * This is called just before this module is unloaded. It is
3053 * <em>not</em> called, if the usage count is !0, so we don't need to check
3059 DBF_TEXT(setup, 3, __FUNCTION__);
3060 unregister_cu3088_discipline(&ctc_group_driver);
3062 ctc_unregister_dbf_views();
3063 ctc_pr_info("CTC driver unloaded\n");
3067 * Initialize module.
3068 * This is called just after the module is loaded.
3070 * @return 0 on success, !0 on error.
3077 loglevel = CTC_LOGLEVEL_DEFAULT;
3079 DBF_TEXT(setup, 3, __FUNCTION__);
3083 ret = ctc_register_dbf_views();
3085 ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
3089 ret = register_cu3088_discipline(&ctc_group_driver);
3092 ctc_unregister_dbf_views();
3097 module_init(ctc_init);
3098 module_exit(ctc_exit);
3100 /* --- This is the END my friend --- */