2 * CTC / ESCON network driver
4 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
6 * Fixes by : Jochen Röhrig (roehrig@de.ibm.com)
7 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 Peter Tiedemann (ptiedem@de.ibm.com)
9 * Driver Model stuff by : Cornelia Huck <huckc@de.ibm.com>
12 * - Principles of Operation (IBM doc#: SA22-7201-06)
13 * - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
14 * - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
15 * - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
16 * - ESCON I/O Interface (IBM doc#: SA22-7202-029
18 * and the source of the original CTC driver by:
19 * Dieter Wellerdiek (wel@de.ibm.com)
20 * Martin Schwidefsky (schwidefsky@de.ibm.com)
21 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
22 * Jochen Röhrig (roehrig@de.ibm.com)
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2, or (at your option)
29 * This program is distributed in the hope that it will be useful,
30 * but WITHOUT ANY WARRANTY; without even the implied warranty of
31 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
32 * GNU General Public License for more details.
34 * You should have received a copy of the GNU General Public License
35 * along with this program; if not, write to the Free Software
36 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/kernel.h>
43 #include <linux/slab.h>
44 #include <linux/errno.h>
45 #include <linux/types.h>
46 #include <linux/interrupt.h>
47 #include <linux/timer.h>
48 #include <linux/sched.h>
49 #include <linux/bitops.h>
51 #include <linux/signal.h>
52 #include <linux/string.h>
55 #include <linux/if_arp.h>
56 #include <linux/tcp.h>
57 #include <linux/skbuff.h>
58 #include <linux/ctype.h>
62 #include <asm/ccwdev.h>
63 #include <asm/ccwgroup.h>
64 #include <asm/uaccess.h>
66 #include <asm/idals.h>
75 MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
76 MODULE_DESCRIPTION("Linux for S/390 CTC/Escon Driver");
77 MODULE_LICENSE("GPL");
79 * States of the interface statemachine.
83 DEV_STATE_STARTWAIT_RXTX,
84 DEV_STATE_STARTWAIT_RX,
85 DEV_STATE_STARTWAIT_TX,
86 DEV_STATE_STOPWAIT_RXTX,
87 DEV_STATE_STOPWAIT_RX,
88 DEV_STATE_STOPWAIT_TX,
91 * MUST be always the last element!!
96 static const char *dev_state_names[] = {
108 * Events of the interface statemachine.
119 * MUST be always the last element!!
124 static const char *dev_event_names[] = {
135 * Events of the channel statemachine
139 * Events, representing return code of
140 * I/O operations (ccw_device_start, ccw_device_halt et al.)
153 * Events, representing unit-check
157 CH_EVENT_UC_TXTIMEOUT,
158 CH_EVENT_UC_TXPARITY,
160 CH_EVENT_UC_RXPARITY,
165 * Events, representing subchannel-check
170 * Events, representing machine checks
176 * Event, representing normal IRQ
182 * Event, representing timer expiry.
187 * Events, representing commands from upper levels.
193 * MUST be always the last element!!
199 * States of the channel statemachine.
203 * Channel not assigned to any device,
204 * initial state, direction invalid
209 * Channel assigned but not operating
228 * MUST be always the last element!!
233 static int loglevel = CTC_LOGLEVEL_DEFAULT;
236 * Linked list of all detected channels.
238 static struct channel *channels = NULL;
246 static int printed = 0;
251 printk(KERN_INFO "CTC driver initialized\n");
256 * Return type of a detected device.
258 static enum channel_types
259 get_channel_type(struct ccw_device_id *id)
261 enum channel_types type = (enum channel_types) id->driver_info;
263 if (type == channel_type_ficon)
264 type = channel_type_escon;
269 static const char *ch_event_names[] = {
270 "ccw_device success",
274 "ccw_device unknown",
276 "Status ATTN & BUSY",
280 "Unit check remote reset",
281 "Unit check remote system reset",
282 "Unit check TX timeout",
283 "Unit check TX parity",
284 "Unit check Hardware failure",
285 "Unit check RX parity",
287 "Unit check Unknown",
289 "SubChannel check Unknown",
291 "Machine check failure",
292 "Machine check operational",
303 static const char *ch_state_names[] = {
324 * Dump header and first 16 bytes of an sk_buff for debugging purposes.
326 * @param skb The sk_buff to dump.
327 * @param offset Offset relative to skb-data, where to start the dump.
330 ctc_dump_skb(struct sk_buff *skb, int offset)
332 unsigned char *p = skb->data;
334 struct ll_header *header;
337 if (!(loglevel & CTC_LOGLEVEL_DEBUG))
342 header = (struct ll_header *) p;
345 printk(KERN_DEBUG "dump:\n");
346 printk(KERN_DEBUG "blocklen=%d %04x\n", bl, bl);
348 printk(KERN_DEBUG "h->length=%d %04x\n", header->length,
350 printk(KERN_DEBUG "h->type=%04x\n", header->type);
351 printk(KERN_DEBUG "h->unused=%04x\n", header->unused);
354 printk(KERN_DEBUG "data: ");
355 for (i = 0; i < bl; i++)
356 printk("%02x%s", *p++, (i % 16) ? " " : "\n<7>");
361 ctc_dump_skb(struct sk_buff *skb, int offset)
367 * Unpack a just received skb and hand it over to
370 * @param ch The channel where this skb has been received.
371 * @param pskb The received skb.
373 static __inline__ void
374 ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
376 struct net_device *dev = ch->netdev;
377 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
378 __u16 len = *((__u16 *) pskb->data);
380 DBF_TEXT(trace, 4, __FUNCTION__);
381 skb_put(pskb, 2 + LL_HEADER_LENGTH);
384 pskb->ip_summed = CHECKSUM_UNNECESSARY;
387 struct ll_header *header = (struct ll_header *) pskb->data;
389 skb_pull(pskb, LL_HEADER_LENGTH);
390 if ((ch->protocol == CTC_PROTO_S390) &&
391 (header->type != ETH_P_IP)) {
394 if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
397 * Check packet type only if we stick strictly
398 * to S/390's protocol of OS390. This only
399 * supports IP. Otherwise allow any packet
403 "%s Illegal packet type 0x%04x received, dropping\n",
404 dev->name, header->type);
405 ch->logflags |= LOG_FLAG_ILLEGALPKT;
410 ctc_dump_skb(pskb, -6);
412 privptr->stats.rx_dropped++;
413 privptr->stats.rx_frame_errors++;
416 pskb->protocol = ntohs(header->type);
417 if (header->length <= LL_HEADER_LENGTH) {
419 if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
422 "%s Illegal packet size %d "
423 "received (MTU=%d blocklen=%d), "
424 "dropping\n", dev->name, header->length,
426 ch->logflags |= LOG_FLAG_ILLEGALSIZE;
431 ctc_dump_skb(pskb, -6);
433 privptr->stats.rx_dropped++;
434 privptr->stats.rx_length_errors++;
437 header->length -= LL_HEADER_LENGTH;
438 len -= LL_HEADER_LENGTH;
439 if ((header->length > skb_tailroom(pskb)) ||
440 (header->length > len)) {
442 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
445 "%s Illegal packet size %d "
446 "(beyond the end of received data), "
447 "dropping\n", dev->name, header->length);
448 ch->logflags |= LOG_FLAG_OVERRUN;
453 ctc_dump_skb(pskb, -6);
455 privptr->stats.rx_dropped++;
456 privptr->stats.rx_length_errors++;
459 skb_put(pskb, header->length);
460 pskb->mac.raw = pskb->data;
461 len -= header->length;
462 skb = dev_alloc_skb(pskb->len);
465 if (!(ch->logflags & LOG_FLAG_NOMEM)) {
468 "%s Out of memory in ctc_unpack_skb\n",
470 ch->logflags |= LOG_FLAG_NOMEM;
474 privptr->stats.rx_dropped++;
477 memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
478 skb->mac.raw = skb->data;
479 skb->dev = pskb->dev;
480 skb->protocol = pskb->protocol;
481 pskb->ip_summed = CHECKSUM_UNNECESSARY;
482 if (ch->protocol == CTC_PROTO_LINUX_TTY)
483 ctc_tty_netif_rx(skb);
487 * Successful rx; reset logflags
490 dev->last_rx = jiffies;
491 privptr->stats.rx_packets++;
492 privptr->stats.rx_bytes += skb->len;
494 skb_pull(pskb, header->length);
495 if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
497 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
500 "%s Overrun in ctc_unpack_skb\n",
502 ch->logflags |= LOG_FLAG_OVERRUN;
508 skb_put(pskb, LL_HEADER_LENGTH);
514 * Check return code of a preceeding ccw_device call, halt_IO etc...
516 * @param ch The channel, the error belongs to.
517 * @param return_code The error code to inspect.
520 ccw_check_return_code(struct channel *ch, int return_code, char *msg)
522 DBF_TEXT(trace, 5, __FUNCTION__);
523 switch (return_code) {
525 fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
528 ctc_pr_warn("%s (%s): Busy !\n", ch->id, msg);
529 fsm_event(ch->fsm, CH_EVENT_IO_EBUSY, ch);
532 ctc_pr_emerg("%s (%s): Invalid device called for IO\n",
534 fsm_event(ch->fsm, CH_EVENT_IO_ENODEV, ch);
537 ctc_pr_emerg("%s (%s): Status pending... \n",
539 fsm_event(ch->fsm, CH_EVENT_IO_EIO, ch);
542 ctc_pr_emerg("%s (%s): Unknown error in do_IO %04x\n",
543 ch->id, msg, return_code);
544 fsm_event(ch->fsm, CH_EVENT_IO_UNKNOWN, ch);
549 * Check sense of a unit check.
551 * @param ch The channel, the sense code belongs to.
552 * @param sense The sense code to inspect.
555 ccw_unit_check(struct channel *ch, unsigned char sense)
557 DBF_TEXT(trace, 5, __FUNCTION__);
558 if (sense & SNS0_INTERVENTION_REQ) {
560 if (ch->protocol != CTC_PROTO_LINUX_TTY)
561 ctc_pr_debug("%s: Interface disc. or Sel. reset "
562 "(remote)\n", ch->id);
563 fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch);
565 ctc_pr_debug("%s: System reset (remote)\n", ch->id);
566 fsm_event(ch->fsm, CH_EVENT_UC_RSRESET, ch);
568 } else if (sense & SNS0_EQUIPMENT_CHECK) {
569 if (sense & SNS0_BUS_OUT_CHECK) {
570 ctc_pr_warn("%s: Hardware malfunction (remote)\n",
572 fsm_event(ch->fsm, CH_EVENT_UC_HWFAIL, ch);
574 ctc_pr_warn("%s: Read-data parity error (remote)\n",
576 fsm_event(ch->fsm, CH_EVENT_UC_RXPARITY, ch);
578 } else if (sense & SNS0_BUS_OUT_CHECK) {
580 ctc_pr_warn("%s: Data-streaming timeout)\n", ch->id);
581 fsm_event(ch->fsm, CH_EVENT_UC_TXTIMEOUT, ch);
583 ctc_pr_warn("%s: Data-transfer parity error\n", ch->id);
584 fsm_event(ch->fsm, CH_EVENT_UC_TXPARITY, ch);
586 } else if (sense & SNS0_CMD_REJECT) {
587 ctc_pr_warn("%s: Command reject\n", ch->id);
588 } else if (sense == 0) {
589 ctc_pr_debug("%s: Unit check ZERO\n", ch->id);
590 fsm_event(ch->fsm, CH_EVENT_UC_ZERO, ch);
592 ctc_pr_warn("%s: Unit Check with sense code: %02x\n",
594 fsm_event(ch->fsm, CH_EVENT_UC_UNKNOWN, ch);
599 ctc_purge_skb_queue(struct sk_buff_head *q)
603 DBF_TEXT(trace, 5, __FUNCTION__);
605 while ((skb = skb_dequeue(q))) {
606 atomic_dec(&skb->users);
607 dev_kfree_skb_irq(skb);
611 static __inline__ int
612 ctc_checkalloc_buffer(struct channel *ch, int warn)
614 DBF_TEXT(trace, 5, __FUNCTION__);
615 if ((ch->trans_skb == NULL) ||
616 (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) {
617 if (ch->trans_skb != NULL)
618 dev_kfree_skb(ch->trans_skb);
619 clear_normalized_cda(&ch->ccw[1]);
620 ch->trans_skb = __dev_alloc_skb(ch->max_bufsize,
621 GFP_ATOMIC | GFP_DMA);
622 if (ch->trans_skb == NULL) {
625 "%s: Couldn't alloc %s trans_skb\n",
627 (CHANNEL_DIRECTION(ch->flags) == READ) ?
631 ch->ccw[1].count = ch->max_bufsize;
632 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
633 dev_kfree_skb(ch->trans_skb);
634 ch->trans_skb = NULL;
637 "%s: set_normalized_cda for %s "
638 "trans_skb failed, dropping packets\n",
640 (CHANNEL_DIRECTION(ch->flags) == READ) ?
644 ch->ccw[1].count = 0;
645 ch->trans_skb_data = ch->trans_skb->data;
646 ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
652 * Dummy NOP action for statemachines
655 fsm_action_nop(fsm_instance * fi, int event, void *arg)
660 * Actions for channel - statemachines.
661 *****************************************************************************/
664 * Normal data has been send. Free the corresponding
665 * skb (it's in io_queue), reset dev->tbusy and
666 * revert to idle state.
668 * @param fi An instance of a channel statemachine.
669 * @param event The event, just happened.
670 * @param arg Generic pointer, casted from channel * upon call.
673 ch_action_txdone(fsm_instance * fi, int event, void *arg)
675 struct channel *ch = (struct channel *) arg;
676 struct net_device *dev = ch->netdev;
677 struct ctc_priv *privptr = dev->priv;
681 unsigned long duration;
682 struct timespec done_stamp = xtime;
684 DBF_TEXT(trace, 4, __FUNCTION__);
687 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
688 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
689 if (duration > ch->prof.tx_time)
690 ch->prof.tx_time = duration;
692 if (ch->irb->scsw.count != 0)
693 ctc_pr_debug("%s: TX not complete, remaining %d bytes\n",
694 dev->name, ch->irb->scsw.count);
695 fsm_deltimer(&ch->timer);
696 while ((skb = skb_dequeue(&ch->io_queue))) {
697 privptr->stats.tx_packets++;
698 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
700 privptr->stats.tx_bytes += 2;
703 atomic_dec(&skb->users);
704 dev_kfree_skb_irq(skb);
706 spin_lock(&ch->collect_lock);
707 clear_normalized_cda(&ch->ccw[4]);
708 if (ch->collect_len > 0) {
711 if (ctc_checkalloc_buffer(ch, 1)) {
712 spin_unlock(&ch->collect_lock);
715 ch->trans_skb->tail = ch->trans_skb->data = ch->trans_skb_data;
716 ch->trans_skb->len = 0;
717 if (ch->prof.maxmulti < (ch->collect_len + 2))
718 ch->prof.maxmulti = ch->collect_len + 2;
719 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
720 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
721 *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
723 while ((skb = skb_dequeue(&ch->collect_queue))) {
724 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
726 privptr->stats.tx_packets++;
727 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
728 atomic_dec(&skb->users);
729 dev_kfree_skb_irq(skb);
733 spin_unlock(&ch->collect_lock);
734 ch->ccw[1].count = ch->trans_skb->len;
735 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
736 ch->prof.send_stamp = xtime;
737 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
738 (unsigned long) ch, 0xff, 0);
739 ch->prof.doios_multi++;
741 privptr->stats.tx_dropped += i;
742 privptr->stats.tx_errors += i;
743 fsm_deltimer(&ch->timer);
744 ccw_check_return_code(ch, rc, "chained TX");
747 spin_unlock(&ch->collect_lock);
748 fsm_newstate(fi, CH_STATE_TXIDLE);
754 * Initial data is sent.
755 * Notify device statemachine that we are up and
758 * @param fi An instance of a channel statemachine.
759 * @param event The event, just happened.
760 * @param arg Generic pointer, casted from channel * upon call.
763 ch_action_txidle(fsm_instance * fi, int event, void *arg)
765 struct channel *ch = (struct channel *) arg;
767 DBF_TEXT(trace, 4, __FUNCTION__);
768 fsm_deltimer(&ch->timer);
769 fsm_newstate(fi, CH_STATE_TXIDLE);
770 fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
775 * Got normal data, check for sanity, queue it up, allocate new buffer
776 * trigger bottom half, and initiate next read.
778 * @param fi An instance of a channel statemachine.
779 * @param event The event, just happened.
780 * @param arg Generic pointer, casted from channel * upon call.
783 ch_action_rx(fsm_instance * fi, int event, void *arg)
785 struct channel *ch = (struct channel *) arg;
786 struct net_device *dev = ch->netdev;
787 struct ctc_priv *privptr = dev->priv;
788 int len = ch->max_bufsize - ch->irb->scsw.count;
789 struct sk_buff *skb = ch->trans_skb;
790 __u16 block_len = *((__u16 *) skb->data);
794 DBF_TEXT(trace, 4, __FUNCTION__);
795 fsm_deltimer(&ch->timer);
797 ctc_pr_debug("%s: got packet with length %d < 8\n",
799 privptr->stats.rx_dropped++;
800 privptr->stats.rx_length_errors++;
803 if (len > ch->max_bufsize) {
804 ctc_pr_debug("%s: got packet with length %d > %d\n",
805 dev->name, len, ch->max_bufsize);
806 privptr->stats.rx_dropped++;
807 privptr->stats.rx_length_errors++;
812 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
814 switch (ch->protocol) {
816 case CTC_PROTO_OS390:
817 check_len = block_len + 2;
820 check_len = block_len;
823 if ((len < block_len) || (len > check_len)) {
824 ctc_pr_debug("%s: got block length %d != rx length %d\n",
825 dev->name, block_len, len);
827 ctc_dump_skb(skb, 0);
829 *((__u16 *) skb->data) = len;
830 privptr->stats.rx_dropped++;
831 privptr->stats.rx_length_errors++;
836 *((__u16 *) skb->data) = block_len;
837 ctc_unpack_skb(ch, skb);
840 skb->data = skb->tail = ch->trans_skb_data;
842 if (ctc_checkalloc_buffer(ch, 1))
844 ch->ccw[1].count = ch->max_bufsize;
845 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
847 ccw_check_return_code(ch, rc, "normal RX");
850 static void ch_action_rxidle(fsm_instance * fi, int event, void *arg);
853 * Initialize connection by sending a __u16 of value 0.
855 * @param fi An instance of a channel statemachine.
856 * @param event The event, just happened.
857 * @param arg Generic pointer, casted from channel * upon call.
860 ch_action_firstio(fsm_instance * fi, int event, void *arg)
862 struct channel *ch = (struct channel *) arg;
865 DBF_TEXT(trace, 4, __FUNCTION__);
867 if (fsm_getstate(fi) == CH_STATE_TXIDLE)
868 ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id);
869 fsm_deltimer(&ch->timer);
870 if (ctc_checkalloc_buffer(ch, 1))
872 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
873 (ch->protocol == CTC_PROTO_OS390)) {
874 /* OS/390 resp. z/OS */
875 if (CHANNEL_DIRECTION(ch->flags) == READ) {
876 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
877 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC,
879 ch_action_rxidle(fi, event, arg);
881 struct net_device *dev = ch->netdev;
882 fsm_newstate(fi, CH_STATE_TXIDLE);
883 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
884 DEV_EVENT_TXUP, dev);
890 * Don´t setup a timer for receiving the initial RX frame
891 * if in compatibility mode, since VM TCP delays the initial
892 * frame until it has some data to send.
894 if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
895 (ch->protocol != CTC_PROTO_S390))
896 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
898 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
899 ch->ccw[1].count = 2; /* Transfer only length */
901 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
902 ? CH_STATE_RXINIT : CH_STATE_TXINIT);
903 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
905 fsm_deltimer(&ch->timer);
906 fsm_newstate(fi, CH_STATE_SETUPWAIT);
907 ccw_check_return_code(ch, rc, "init IO");
910 * If in compatibility mode since we don´t setup a timer, we
911 * also signal RX channel up immediately. This enables us
912 * to send packets early which in turn usually triggers some
913 * reply from VM TCP which brings up the RX channel to it´s
916 if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
917 (ch->protocol == CTC_PROTO_S390)) {
918 struct net_device *dev = ch->netdev;
919 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXUP,
925 * Got initial data, check it. If OK,
926 * notify device statemachine that we are up and
929 * @param fi An instance of a channel statemachine.
930 * @param event The event, just happened.
931 * @param arg Generic pointer, casted from channel * upon call.
934 ch_action_rxidle(fsm_instance * fi, int event, void *arg)
936 struct channel *ch = (struct channel *) arg;
937 struct net_device *dev = ch->netdev;
941 DBF_TEXT(trace, 4, __FUNCTION__);
942 fsm_deltimer(&ch->timer);
943 buflen = *((__u16 *) ch->trans_skb->data);
945 ctc_pr_debug("%s: Initial RX count %d\n", dev->name, buflen);
947 if (buflen >= CTC_INITIAL_BLOCKLEN) {
948 if (ctc_checkalloc_buffer(ch, 1))
950 ch->ccw[1].count = ch->max_bufsize;
951 fsm_newstate(fi, CH_STATE_RXIDLE);
952 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
953 (unsigned long) ch, 0xff, 0);
955 fsm_newstate(fi, CH_STATE_RXINIT);
956 ccw_check_return_code(ch, rc, "initial RX");
958 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
959 DEV_EVENT_RXUP, dev);
961 ctc_pr_debug("%s: Initial RX count %d not %d\n",
962 dev->name, buflen, CTC_INITIAL_BLOCKLEN);
963 ch_action_firstio(fi, event, arg);
968 * Set channel into extended mode.
970 * @param fi An instance of a channel statemachine.
971 * @param event The event, just happened.
972 * @param arg Generic pointer, casted from channel * upon call.
975 ch_action_setmode(fsm_instance * fi, int event, void *arg)
977 struct channel *ch = (struct channel *) arg;
979 unsigned long saveflags;
981 DBF_TEXT(trace, 4, __FUNCTION__);
982 fsm_deltimer(&ch->timer);
983 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
984 fsm_newstate(fi, CH_STATE_SETUPWAIT);
985 saveflags = 0; /* avoids compiler warning with
986 spin_unlock_irqrestore */
987 if (event == CH_EVENT_TIMER) // only for timer not yet locked
988 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
989 rc = ccw_device_start(ch->cdev, &ch->ccw[6], (unsigned long) ch, 0xff, 0);
990 if (event == CH_EVENT_TIMER)
991 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
993 fsm_deltimer(&ch->timer);
994 fsm_newstate(fi, CH_STATE_STARTWAIT);
995 ccw_check_return_code(ch, rc, "set Mode");
1003 * @param fi An instance of a channel statemachine.
1004 * @param event The event, just happened.
1005 * @param arg Generic pointer, casted from channel * upon call.
1008 ch_action_start(fsm_instance * fi, int event, void *arg)
1010 struct channel *ch = (struct channel *) arg;
1011 unsigned long saveflags;
1013 struct net_device *dev;
1015 DBF_TEXT(trace, 4, __FUNCTION__);
1017 ctc_pr_warn("ch_action_start ch=NULL\n");
1020 if (ch->netdev == NULL) {
1021 ctc_pr_warn("ch_action_start dev=NULL, id=%s\n", ch->id);
1027 ctc_pr_debug("%s: %s channel start\n", dev->name,
1028 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1031 if (ch->trans_skb != NULL) {
1032 clear_normalized_cda(&ch->ccw[1]);
1033 dev_kfree_skb(ch->trans_skb);
1034 ch->trans_skb = NULL;
1036 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1037 ch->ccw[1].cmd_code = CCW_CMD_READ;
1038 ch->ccw[1].flags = CCW_FLAG_SLI;
1039 ch->ccw[1].count = 0;
1041 ch->ccw[1].cmd_code = CCW_CMD_WRITE;
1042 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1043 ch->ccw[1].count = 0;
1045 if (ctc_checkalloc_buffer(ch, 0)) {
1047 "%s: Could not allocate %s trans_skb, delaying "
1048 "allocation until first transfer\n",
1050 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1053 ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
1054 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1055 ch->ccw[0].count = 0;
1057 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
1058 ch->ccw[2].flags = CCW_FLAG_SLI;
1059 ch->ccw[2].count = 0;
1061 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof (struct ccw1) * 3);
1063 ch->ccw[4].flags &= ~CCW_FLAG_IDA;
1065 fsm_newstate(fi, CH_STATE_STARTWAIT);
1066 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1067 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1068 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1069 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1072 fsm_deltimer(&ch->timer);
1073 ccw_check_return_code(ch, rc, "initial HaltIO");
1076 ctc_pr_debug("ctc: %s(): leaving\n", __func__);
1081 * Shutdown a channel.
1083 * @param fi An instance of a channel statemachine.
1084 * @param event The event, just happened.
1085 * @param arg Generic pointer, casted from channel * upon call.
1088 ch_action_haltio(fsm_instance * fi, int event, void *arg)
1090 struct channel *ch = (struct channel *) arg;
1091 unsigned long saveflags;
1095 DBF_TEXT(trace, 3, __FUNCTION__);
1096 fsm_deltimer(&ch->timer);
1097 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1098 saveflags = 0; /* avoids comp warning with
1099 spin_unlock_irqrestore */
1100 if (event == CH_EVENT_STOP) // only for STOP not yet locked
1101 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1102 oldstate = fsm_getstate(fi);
1103 fsm_newstate(fi, CH_STATE_TERM);
1104 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1105 if (event == CH_EVENT_STOP)
1106 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1109 fsm_deltimer(&ch->timer);
1110 fsm_newstate(fi, oldstate);
1112 ccw_check_return_code(ch, rc, "HaltIO in ch_action_haltio");
1117 * A channel has successfully been halted.
1118 * Cleanup it's queue and notify interface statemachine.
1120 * @param fi An instance of a channel statemachine.
1121 * @param event The event, just happened.
1122 * @param arg Generic pointer, casted from channel * upon call.
1125 ch_action_stopped(fsm_instance * fi, int event, void *arg)
1127 struct channel *ch = (struct channel *) arg;
1128 struct net_device *dev = ch->netdev;
1130 DBF_TEXT(trace, 3, __FUNCTION__);
1131 fsm_deltimer(&ch->timer);
1132 fsm_newstate(fi, CH_STATE_STOPPED);
1133 if (ch->trans_skb != NULL) {
1134 clear_normalized_cda(&ch->ccw[1]);
1135 dev_kfree_skb(ch->trans_skb);
1136 ch->trans_skb = NULL;
1138 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1139 skb_queue_purge(&ch->io_queue);
1140 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1141 DEV_EVENT_RXDOWN, dev);
1143 ctc_purge_skb_queue(&ch->io_queue);
1144 spin_lock(&ch->collect_lock);
1145 ctc_purge_skb_queue(&ch->collect_queue);
1146 ch->collect_len = 0;
1147 spin_unlock(&ch->collect_lock);
1148 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1149 DEV_EVENT_TXDOWN, dev);
1154 * A stop command from device statemachine arrived and we are in
1155 * not operational mode. Set state to stopped.
1157 * @param fi An instance of a channel statemachine.
1158 * @param event The event, just happened.
1159 * @param arg Generic pointer, casted from channel * upon call.
1162 ch_action_stop(fsm_instance * fi, int event, void *arg)
1164 fsm_newstate(fi, CH_STATE_STOPPED);
1168 * A machine check for no path, not operational status or gone device has
1170 * Cleanup queue and notify interface statemachine.
1172 * @param fi An instance of a channel statemachine.
1173 * @param event The event, just happened.
1174 * @param arg Generic pointer, casted from channel * upon call.
1177 ch_action_fail(fsm_instance * fi, int event, void *arg)
1179 struct channel *ch = (struct channel *) arg;
1180 struct net_device *dev = ch->netdev;
1182 DBF_TEXT(trace, 3, __FUNCTION__);
1183 fsm_deltimer(&ch->timer);
1184 fsm_newstate(fi, CH_STATE_NOTOP);
1185 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1186 skb_queue_purge(&ch->io_queue);
1187 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1188 DEV_EVENT_RXDOWN, dev);
1190 ctc_purge_skb_queue(&ch->io_queue);
1191 spin_lock(&ch->collect_lock);
1192 ctc_purge_skb_queue(&ch->collect_queue);
1193 ch->collect_len = 0;
1194 spin_unlock(&ch->collect_lock);
1195 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1196 DEV_EVENT_TXDOWN, dev);
1201 * Handle error during setup of channel.
1203 * @param fi An instance of a channel statemachine.
1204 * @param event The event, just happened.
1205 * @param arg Generic pointer, casted from channel * upon call.
1208 ch_action_setuperr(fsm_instance * fi, int event, void *arg)
1210 struct channel *ch = (struct channel *) arg;
1211 struct net_device *dev = ch->netdev;
1213 DBF_TEXT(setup, 3, __FUNCTION__);
1215 * Special case: Got UC_RCRESET on setmode.
1216 * This means that remote side isn't setup. In this case
1217 * simply retry after some 10 secs...
1219 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
1220 ((event == CH_EVENT_UC_RCRESET) ||
1221 (event == CH_EVENT_UC_RSRESET))) {
1222 fsm_newstate(fi, CH_STATE_STARTRETRY);
1223 fsm_deltimer(&ch->timer);
1224 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1225 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1226 int rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1228 ccw_check_return_code(
1229 ch, rc, "HaltIO in ch_action_setuperr");
1234 ctc_pr_debug("%s: Error %s during %s channel setup state=%s\n",
1235 dev->name, ch_event_names[event],
1236 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
1237 fsm_getstate_str(fi));
1238 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1239 fsm_newstate(fi, CH_STATE_RXERR);
1240 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1241 DEV_EVENT_RXDOWN, dev);
1243 fsm_newstate(fi, CH_STATE_TXERR);
1244 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1245 DEV_EVENT_TXDOWN, dev);
1250 * Restart a channel after an error.
1252 * @param fi An instance of a channel statemachine.
1253 * @param event The event, just happened.
1254 * @param arg Generic pointer, casted from channel * upon call.
1257 ch_action_restart(fsm_instance * fi, int event, void *arg)
1259 unsigned long saveflags;
1263 struct channel *ch = (struct channel *) arg;
1264 struct net_device *dev = ch->netdev;
1266 DBF_TEXT(trace, 3, __FUNCTION__);
1267 fsm_deltimer(&ch->timer);
1268 ctc_pr_debug("%s: %s channel restart\n", dev->name,
1269 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1270 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1271 oldstate = fsm_getstate(fi);
1272 fsm_newstate(fi, CH_STATE_STARTWAIT);
1273 saveflags = 0; /* avoids compiler warning with
1274 spin_unlock_irqrestore */
1275 if (event == CH_EVENT_TIMER) // only for timer not yet locked
1276 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1277 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1278 if (event == CH_EVENT_TIMER)
1279 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1282 fsm_deltimer(&ch->timer);
1283 fsm_newstate(fi, oldstate);
1285 ccw_check_return_code(ch, rc, "HaltIO in ch_action_restart");
1290 * Handle error during RX initial handshake (exchange of
1291 * 0-length block header)
1293 * @param fi An instance of a channel statemachine.
1294 * @param event The event, just happened.
1295 * @param arg Generic pointer, casted from channel * upon call.
1298 ch_action_rxiniterr(fsm_instance * fi, int event, void *arg)
1300 struct channel *ch = (struct channel *) arg;
1301 struct net_device *dev = ch->netdev;
1303 DBF_TEXT(setup, 3, __FUNCTION__);
1304 if (event == CH_EVENT_TIMER) {
1305 fsm_deltimer(&ch->timer);
1306 ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name);
1307 if (ch->retry++ < 3)
1308 ch_action_restart(fi, event, arg);
1310 fsm_newstate(fi, CH_STATE_RXERR);
1311 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1312 DEV_EVENT_RXDOWN, dev);
1315 ctc_pr_warn("%s: Error during RX init handshake\n", dev->name);
1319 * Notify device statemachine if we gave up initialization
1322 * @param fi An instance of a channel statemachine.
1323 * @param event The event, just happened.
1324 * @param arg Generic pointer, casted from channel * upon call.
1327 ch_action_rxinitfail(fsm_instance * fi, int event, void *arg)
1329 struct channel *ch = (struct channel *) arg;
1330 struct net_device *dev = ch->netdev;
1332 DBF_TEXT(setup, 3, __FUNCTION__);
1333 fsm_newstate(fi, CH_STATE_RXERR);
1334 ctc_pr_warn("%s: RX initialization failed\n", dev->name);
1335 ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name);
1336 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1340 * Handle RX Unit check remote reset (remote disconnected)
1342 * @param fi An instance of a channel statemachine.
1343 * @param event The event, just happened.
1344 * @param arg Generic pointer, casted from channel * upon call.
1347 ch_action_rxdisc(fsm_instance * fi, int event, void *arg)
1349 struct channel *ch = (struct channel *) arg;
1350 struct channel *ch2;
1351 struct net_device *dev = ch->netdev;
1353 DBF_TEXT(trace, 3, __FUNCTION__);
1354 fsm_deltimer(&ch->timer);
1355 ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n",
1359 * Notify device statemachine
1361 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1362 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
1364 fsm_newstate(fi, CH_STATE_DTERM);
1365 ch2 = ((struct ctc_priv *) dev->priv)->channel[WRITE];
1366 fsm_newstate(ch2->fsm, CH_STATE_DTERM);
1368 ccw_device_halt(ch->cdev, (unsigned long) ch);
1369 ccw_device_halt(ch2->cdev, (unsigned long) ch2);
1373 * Handle error during TX channel initialization.
1375 * @param fi An instance of a channel statemachine.
1376 * @param event The event, just happened.
1377 * @param arg Generic pointer, casted from channel * upon call.
1380 ch_action_txiniterr(fsm_instance * fi, int event, void *arg)
1382 struct channel *ch = (struct channel *) arg;
1383 struct net_device *dev = ch->netdev;
1385 DBF_TEXT(setup, 2, __FUNCTION__);
1386 if (event == CH_EVENT_TIMER) {
1387 fsm_deltimer(&ch->timer);
1388 ctc_pr_debug("%s: Timeout during TX init handshake\n", dev->name);
1389 if (ch->retry++ < 3)
1390 ch_action_restart(fi, event, arg);
1392 fsm_newstate(fi, CH_STATE_TXERR);
1393 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1394 DEV_EVENT_TXDOWN, dev);
1397 ctc_pr_warn("%s: Error during TX init handshake\n", dev->name);
1401 * Handle TX timeout by retrying operation.
1403 * @param fi An instance of a channel statemachine.
1404 * @param event The event, just happened.
1405 * @param arg Generic pointer, casted from channel * upon call.
1408 ch_action_txretry(fsm_instance * fi, int event, void *arg)
1410 struct channel *ch = (struct channel *) arg;
1411 struct net_device *dev = ch->netdev;
1412 unsigned long saveflags;
1414 DBF_TEXT(trace, 4, __FUNCTION__);
1415 fsm_deltimer(&ch->timer);
1416 if (ch->retry++ > 3) {
1417 ctc_pr_debug("%s: TX retry failed, restarting channel\n",
1419 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1420 DEV_EVENT_TXDOWN, dev);
1421 ch_action_restart(fi, event, arg);
1423 struct sk_buff *skb;
1425 ctc_pr_debug("%s: TX retry %d\n", dev->name, ch->retry);
1426 if ((skb = skb_peek(&ch->io_queue))) {
1429 clear_normalized_cda(&ch->ccw[4]);
1430 ch->ccw[4].count = skb->len;
1431 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1433 "%s: IDAL alloc failed, chan restart\n",
1435 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1436 DEV_EVENT_TXDOWN, dev);
1437 ch_action_restart(fi, event, arg);
1440 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1441 saveflags = 0; /* avoids compiler warning with
1442 spin_unlock_irqrestore */
1443 if (event == CH_EVENT_TIMER) // only for TIMER not yet locked
1444 spin_lock_irqsave(get_ccwdev_lock(ch->cdev),
1446 rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1447 (unsigned long) ch, 0xff, 0);
1448 if (event == CH_EVENT_TIMER)
1449 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1452 fsm_deltimer(&ch->timer);
1453 ccw_check_return_code(ch, rc, "TX in ch_action_txretry");
1454 ctc_purge_skb_queue(&ch->io_queue);
1462 * Handle fatal errors during an I/O command.
1464 * @param fi An instance of a channel statemachine.
1465 * @param event The event, just happened.
1466 * @param arg Generic pointer, casted from channel * upon call.
1469 ch_action_iofatal(fsm_instance * fi, int event, void *arg)
1471 struct channel *ch = (struct channel *) arg;
1472 struct net_device *dev = ch->netdev;
1474 DBF_TEXT(trace, 3, __FUNCTION__);
1475 fsm_deltimer(&ch->timer);
1476 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1477 ctc_pr_debug("%s: RX I/O error\n", dev->name);
1478 fsm_newstate(fi, CH_STATE_RXERR);
1479 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1480 DEV_EVENT_RXDOWN, dev);
1482 ctc_pr_debug("%s: TX I/O error\n", dev->name);
1483 fsm_newstate(fi, CH_STATE_TXERR);
1484 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1485 DEV_EVENT_TXDOWN, dev);
1490 ch_action_reinit(fsm_instance *fi, int event, void *arg)
1492 struct channel *ch = (struct channel *)arg;
1493 struct net_device *dev = ch->netdev;
1494 struct ctc_priv *privptr = dev->priv;
1496 DBF_TEXT(trace, 4, __FUNCTION__);
1497 ch_action_iofatal(fi, event, arg);
1498 fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
1502 * The statemachine for a channel.
1504 static const fsm_node ch_fsm[] = {
1505 {CH_STATE_STOPPED, CH_EVENT_STOP, fsm_action_nop },
1506 {CH_STATE_STOPPED, CH_EVENT_START, ch_action_start },
1507 {CH_STATE_STOPPED, CH_EVENT_FINSTAT, fsm_action_nop },
1508 {CH_STATE_STOPPED, CH_EVENT_MC_FAIL, fsm_action_nop },
1510 {CH_STATE_NOTOP, CH_EVENT_STOP, ch_action_stop },
1511 {CH_STATE_NOTOP, CH_EVENT_START, fsm_action_nop },
1512 {CH_STATE_NOTOP, CH_EVENT_FINSTAT, fsm_action_nop },
1513 {CH_STATE_NOTOP, CH_EVENT_MC_FAIL, fsm_action_nop },
1514 {CH_STATE_NOTOP, CH_EVENT_MC_GOOD, ch_action_start },
1516 {CH_STATE_STARTWAIT, CH_EVENT_STOP, ch_action_haltio },
1517 {CH_STATE_STARTWAIT, CH_EVENT_START, fsm_action_nop },
1518 {CH_STATE_STARTWAIT, CH_EVENT_FINSTAT, ch_action_setmode },
1519 {CH_STATE_STARTWAIT, CH_EVENT_TIMER, ch_action_setuperr },
1520 {CH_STATE_STARTWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1521 {CH_STATE_STARTWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1522 {CH_STATE_STARTWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1524 {CH_STATE_STARTRETRY, CH_EVENT_STOP, ch_action_haltio },
1525 {CH_STATE_STARTRETRY, CH_EVENT_TIMER, ch_action_setmode },
1526 {CH_STATE_STARTRETRY, CH_EVENT_FINSTAT, fsm_action_nop },
1527 {CH_STATE_STARTRETRY, CH_EVENT_MC_FAIL, ch_action_fail },
1529 {CH_STATE_SETUPWAIT, CH_EVENT_STOP, ch_action_haltio },
1530 {CH_STATE_SETUPWAIT, CH_EVENT_START, fsm_action_nop },
1531 {CH_STATE_SETUPWAIT, CH_EVENT_FINSTAT, ch_action_firstio },
1532 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RCRESET, ch_action_setuperr },
1533 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RSRESET, ch_action_setuperr },
1534 {CH_STATE_SETUPWAIT, CH_EVENT_TIMER, ch_action_setmode },
1535 {CH_STATE_SETUPWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1536 {CH_STATE_SETUPWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1537 {CH_STATE_SETUPWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1539 {CH_STATE_RXINIT, CH_EVENT_STOP, ch_action_haltio },
1540 {CH_STATE_RXINIT, CH_EVENT_START, fsm_action_nop },
1541 {CH_STATE_RXINIT, CH_EVENT_FINSTAT, ch_action_rxidle },
1542 {CH_STATE_RXINIT, CH_EVENT_UC_RCRESET, ch_action_rxiniterr },
1543 {CH_STATE_RXINIT, CH_EVENT_UC_RSRESET, ch_action_rxiniterr },
1544 {CH_STATE_RXINIT, CH_EVENT_TIMER, ch_action_rxiniterr },
1545 {CH_STATE_RXINIT, CH_EVENT_ATTNBUSY, ch_action_rxinitfail },
1546 {CH_STATE_RXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1547 {CH_STATE_RXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1548 {CH_STATE_RXINIT, CH_EVENT_UC_ZERO, ch_action_firstio },
1549 {CH_STATE_RXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1551 {CH_STATE_RXIDLE, CH_EVENT_STOP, ch_action_haltio },
1552 {CH_STATE_RXIDLE, CH_EVENT_START, fsm_action_nop },
1553 {CH_STATE_RXIDLE, CH_EVENT_FINSTAT, ch_action_rx },
1554 {CH_STATE_RXIDLE, CH_EVENT_UC_RCRESET, ch_action_rxdisc },
1555 // {CH_STATE_RXIDLE, CH_EVENT_UC_RSRESET, ch_action_rxretry },
1556 {CH_STATE_RXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1557 {CH_STATE_RXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1558 {CH_STATE_RXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1559 {CH_STATE_RXIDLE, CH_EVENT_UC_ZERO, ch_action_rx },
1561 {CH_STATE_TXINIT, CH_EVENT_STOP, ch_action_haltio },
1562 {CH_STATE_TXINIT, CH_EVENT_START, fsm_action_nop },
1563 {CH_STATE_TXINIT, CH_EVENT_FINSTAT, ch_action_txidle },
1564 {CH_STATE_TXINIT, CH_EVENT_UC_RCRESET, ch_action_txiniterr },
1565 {CH_STATE_TXINIT, CH_EVENT_UC_RSRESET, ch_action_txiniterr },
1566 {CH_STATE_TXINIT, CH_EVENT_TIMER, ch_action_txiniterr },
1567 {CH_STATE_TXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1568 {CH_STATE_TXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1569 {CH_STATE_TXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1571 {CH_STATE_TXIDLE, CH_EVENT_STOP, ch_action_haltio },
1572 {CH_STATE_TXIDLE, CH_EVENT_START, fsm_action_nop },
1573 {CH_STATE_TXIDLE, CH_EVENT_FINSTAT, ch_action_firstio },
1574 {CH_STATE_TXIDLE, CH_EVENT_UC_RCRESET, fsm_action_nop },
1575 {CH_STATE_TXIDLE, CH_EVENT_UC_RSRESET, fsm_action_nop },
1576 {CH_STATE_TXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1577 {CH_STATE_TXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1578 {CH_STATE_TXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1580 {CH_STATE_TERM, CH_EVENT_STOP, fsm_action_nop },
1581 {CH_STATE_TERM, CH_EVENT_START, ch_action_restart },
1582 {CH_STATE_TERM, CH_EVENT_FINSTAT, ch_action_stopped },
1583 {CH_STATE_TERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1584 {CH_STATE_TERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1585 {CH_STATE_TERM, CH_EVENT_MC_FAIL, ch_action_fail },
1587 {CH_STATE_DTERM, CH_EVENT_STOP, ch_action_haltio },
1588 {CH_STATE_DTERM, CH_EVENT_START, ch_action_restart },
1589 {CH_STATE_DTERM, CH_EVENT_FINSTAT, ch_action_setmode },
1590 {CH_STATE_DTERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1591 {CH_STATE_DTERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1592 {CH_STATE_DTERM, CH_EVENT_MC_FAIL, ch_action_fail },
1594 {CH_STATE_TX, CH_EVENT_STOP, ch_action_haltio },
1595 {CH_STATE_TX, CH_EVENT_START, fsm_action_nop },
1596 {CH_STATE_TX, CH_EVENT_FINSTAT, ch_action_txdone },
1597 {CH_STATE_TX, CH_EVENT_UC_RCRESET, ch_action_txretry },
1598 {CH_STATE_TX, CH_EVENT_UC_RSRESET, ch_action_txretry },
1599 {CH_STATE_TX, CH_EVENT_TIMER, ch_action_txretry },
1600 {CH_STATE_TX, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1601 {CH_STATE_TX, CH_EVENT_IO_EIO, ch_action_reinit },
1602 {CH_STATE_TX, CH_EVENT_MC_FAIL, ch_action_fail },
1604 {CH_STATE_RXERR, CH_EVENT_STOP, ch_action_haltio },
1605 {CH_STATE_TXERR, CH_EVENT_STOP, ch_action_haltio },
1606 {CH_STATE_TXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1607 {CH_STATE_RXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1610 static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node);
1613 * Functions related to setup and device detection.
1614 *****************************************************************************/
1617 less_than(char *id1, char *id2)
1621 for (i = 0; i < 5; i++) {
1625 dev1 = simple_strtoul(id1, &id1, 16);
1626 dev2 = simple_strtoul(id2, &id2, 16);
1628 return (dev1 < dev2);
1632 * Add a new channel to the list of channels.
1633 * Keeps the channel list sorted.
1635 * @param cdev The ccw_device to be added.
1636 * @param type The type class of the new channel.
1638 * @return 0 on success, !0 on error.
1641 add_channel(struct ccw_device *cdev, enum channel_types type)
1643 struct channel **c = &channels;
1646 DBF_TEXT(trace, 2, __FUNCTION__);
1648 (struct channel *) kmalloc(sizeof (struct channel),
1649 GFP_KERNEL)) == NULL) {
1650 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1653 memset(ch, 0, sizeof (struct channel));
1654 if ((ch->ccw = (struct ccw1 *) kmalloc(8*sizeof(struct ccw1),
1655 GFP_KERNEL | GFP_DMA)) == NULL) {
1657 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1661 memset(ch->ccw, 0, 8*sizeof(struct ccw1)); // assure all flags and counters are reset
1664 * "static" ccws are used in the following way:
1666 * ccw[0..2] (Channel program for generic I/O):
1668 * 1: read or write (depending on direction) with fixed
1669 * buffer (idal allocated once when buffer is allocated)
1671 * ccw[3..5] (Channel program for direct write of packets)
1673 * 4: write (idal allocated on every write).
1675 * ccw[6..7] (Channel program for initial channel setup):
1676 * 6: set extended mode
1679 * ch->ccw[0..5] are initialized in ch_action_start because
1680 * the channel's direction is yet unknown here.
1682 ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
1683 ch->ccw[6].flags = CCW_FLAG_SLI;
1685 ch->ccw[7].cmd_code = CCW_CMD_NOOP;
1686 ch->ccw[7].flags = CCW_FLAG_SLI;
1689 snprintf(ch->id, CTC_ID_SIZE, "ch-%s", cdev->dev.bus_id);
1691 ch->fsm = init_fsm(ch->id, ch_state_names,
1692 ch_event_names, NR_CH_STATES, NR_CH_EVENTS,
1693 ch_fsm, CH_FSM_LEN, GFP_KERNEL);
1694 if (ch->fsm == NULL) {
1695 ctc_pr_warn("ctc: Could not create FSM in add_channel\n");
1700 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1701 if ((ch->irb = (struct irb *) kmalloc(sizeof (struct irb),
1702 GFP_KERNEL)) == NULL) {
1703 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1709 memset(ch->irb, 0, sizeof (struct irb));
1710 while (*c && less_than((*c)->id, ch->id))
1712 if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) {
1714 "ctc: add_channel: device %s already in list, "
1715 "using old entry\n", (*c)->id);
1722 fsm_settimer(ch->fsm, &ch->timer);
1723 skb_queue_head_init(&ch->io_queue);
1724 skb_queue_head_init(&ch->collect_queue);
1731 * Release a specific channel in the channel list.
1733 * @param ch Pointer to channel struct to be released.
1736 channel_free(struct channel *ch)
1738 ch->flags &= ~CHANNEL_FLAGS_INUSE;
1739 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1743 * Remove a specific channel in the channel list.
1745 * @param ch Pointer to channel struct to be released.
1748 channel_remove(struct channel *ch)
1750 struct channel **c = &channels;
1752 DBF_TEXT(trace, 2, __FUNCTION__);
1760 fsm_deltimer(&ch->timer);
1762 clear_normalized_cda(&ch->ccw[4]);
1763 if (ch->trans_skb != NULL) {
1764 clear_normalized_cda(&ch->ccw[1]);
1765 dev_kfree_skb(ch->trans_skb);
1777 * Get a specific channel from the channel list.
1779 * @param type Type of channel we are interested in.
1780 * @param id Id of channel we are interested in.
1781 * @param direction Direction we want to use this channel for.
1783 * @return Pointer to a channel or NULL if no matching channel available.
1785 static struct channel
1787 channel_get(enum channel_types type, char *id, int direction)
1789 struct channel *ch = channels;
1791 DBF_TEXT(trace, 3, __FUNCTION__);
1793 ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n",
1794 __func__, id, type);
1797 while (ch && ((strncmp(ch->id, id, CTC_ID_SIZE)) || (ch->type != type))) {
1799 ctc_pr_debug("ctc: %s(): ch=0x%p (id=%s, type=%d\n",
1800 __func__, ch, ch->id, ch->type);
1805 ctc_pr_debug("ctc: %s(): ch=0x%pq (id=%s, type=%d\n",
1806 __func__, ch, ch->id, ch->type);
1809 ctc_pr_warn("ctc: %s(): channel with id %s "
1810 "and type %d not found in channel list\n",
1811 __func__, id, type);
1813 if (ch->flags & CHANNEL_FLAGS_INUSE)
1816 ch->flags |= CHANNEL_FLAGS_INUSE;
1817 ch->flags &= ~CHANNEL_FLAGS_RWMASK;
1818 ch->flags |= (direction == WRITE)
1819 ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
1820 fsm_newstate(ch->fsm, CH_STATE_STOPPED);
1827 * Return the channel type by name.
1829 * @param name Name of network interface.
1831 * @return Type class of channel to be used for that interface.
1833 static enum channel_types inline
1834 extract_channel_media(char *name)
1836 enum channel_types ret = channel_type_unknown;
1839 if (strncmp(name, "ctc", 3) == 0)
1840 ret = channel_type_parallel;
1841 if (strncmp(name, "escon", 5) == 0)
1842 ret = channel_type_escon;
1848 __ctc_check_irb_error(struct ccw_device *cdev, struct irb *irb)
1853 switch (PTR_ERR(irb)) {
1855 ctc_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id);
1856 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1857 // CTC_DBF_TEXT_(trace, 2, " rc%d", -EIO);
1860 ctc_pr_warn("timeout on device %s\n", cdev->dev.bus_id);
1861 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1862 // CTC_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
1865 ctc_pr_warn("unknown error %ld on device %s\n", PTR_ERR(irb),
1867 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1868 // CTC_DBF_TEXT(trace, 2, " rc???");
1870 return PTR_ERR(irb);
1876 * @param cdev The ccw_device the interrupt is for.
1877 * @param intparm interruption parameter.
1878 * @param irb interruption response block.
1881 ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1884 struct net_device *dev;
1885 struct ctc_priv *priv;
1887 DBF_TEXT(trace, 5, __FUNCTION__);
1888 if (__ctc_check_irb_error(cdev, irb))
1891 /* Check for unsolicited interrupts. */
1892 if (!cdev->dev.driver_data) {
1893 ctc_pr_warn("ctc: Got unsolicited irq: %s c-%02x d-%02x\n",
1894 cdev->dev.bus_id, irb->scsw.cstat,
1899 priv = ((struct ccwgroup_device *)cdev->dev.driver_data)
1902 /* Try to extract channel from driver data. */
1903 if (priv->channel[READ]->cdev == cdev)
1904 ch = priv->channel[READ];
1905 else if (priv->channel[WRITE]->cdev == cdev)
1906 ch = priv->channel[WRITE];
1908 ctc_pr_err("ctc: Can't determine channel for interrupt, "
1909 "device %s\n", cdev->dev.bus_id);
1913 dev = (struct net_device *) (ch->netdev);
1915 ctc_pr_crit("ctc: ctc_irq_handler dev=NULL bus_id=%s, ch=0x%p\n",
1916 cdev->dev.bus_id, ch);
1921 ctc_pr_debug("%s: interrupt for device: %s received c-%02x d-%02x\n",
1922 dev->name, ch->id, irb->scsw.cstat, irb->scsw.dstat);
1925 /* Copy interruption response block. */
1926 memcpy(ch->irb, irb, sizeof(struct irb));
1928 /* Check for good subchannel return code, otherwise error message */
1929 if (ch->irb->scsw.cstat) {
1930 fsm_event(ch->fsm, CH_EVENT_SC_UNKNOWN, ch);
1931 ctc_pr_warn("%s: subchannel check for device: %s - %02x %02x\n",
1932 dev->name, ch->id, ch->irb->scsw.cstat,
1933 ch->irb->scsw.dstat);
1937 /* Check the reason-code of a unit check */
1938 if (ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
1939 ccw_unit_check(ch, ch->irb->ecw[0]);
1942 if (ch->irb->scsw.dstat & DEV_STAT_BUSY) {
1943 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION)
1944 fsm_event(ch->fsm, CH_EVENT_ATTNBUSY, ch);
1946 fsm_event(ch->fsm, CH_EVENT_BUSY, ch);
1949 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION) {
1950 fsm_event(ch->fsm, CH_EVENT_ATTN, ch);
1953 if ((ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
1954 (ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
1955 (ch->irb->scsw.stctl ==
1956 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
1957 fsm_event(ch->fsm, CH_EVENT_FINSTAT, ch);
1959 fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
1964 * Actions for interface - statemachine.
1965 *****************************************************************************/
1968 * Startup channels by sending CH_EVENT_START to each channel.
1970 * @param fi An instance of an interface statemachine.
1971 * @param event The event, just happened.
1972 * @param arg Generic pointer, casted from struct net_device * upon call.
1975 dev_action_start(fsm_instance * fi, int event, void *arg)
1977 struct net_device *dev = (struct net_device *) arg;
1978 struct ctc_priv *privptr = dev->priv;
1981 DBF_TEXT(setup, 3, __FUNCTION__);
1982 fsm_deltimer(&privptr->restart_timer);
1983 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
1984 for (direction = READ; direction <= WRITE; direction++) {
1985 struct channel *ch = privptr->channel[direction];
1986 fsm_event(ch->fsm, CH_EVENT_START, ch);
1991 * Shutdown channels by sending CH_EVENT_STOP to each channel.
1993 * @param fi An instance of an interface statemachine.
1994 * @param event The event, just happened.
1995 * @param arg Generic pointer, casted from struct net_device * upon call.
1998 dev_action_stop(fsm_instance * fi, int event, void *arg)
2000 struct net_device *dev = (struct net_device *) arg;
2001 struct ctc_priv *privptr = dev->priv;
2004 DBF_TEXT(trace, 3, __FUNCTION__);
2005 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2006 for (direction = READ; direction <= WRITE; direction++) {
2007 struct channel *ch = privptr->channel[direction];
2008 fsm_event(ch->fsm, CH_EVENT_STOP, ch);
2012 dev_action_restart(fsm_instance *fi, int event, void *arg)
2014 struct net_device *dev = (struct net_device *)arg;
2015 struct ctc_priv *privptr = dev->priv;
2017 DBF_TEXT(trace, 3, __FUNCTION__);
2018 ctc_pr_debug("%s: Restarting\n", dev->name);
2019 dev_action_stop(fi, event, arg);
2020 fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
2021 fsm_addtimer(&privptr->restart_timer, CTC_TIMEOUT_5SEC,
2022 DEV_EVENT_START, dev);
2026 * Called from channel statemachine
2027 * when a channel is up and running.
2029 * @param fi An instance of an interface statemachine.
2030 * @param event The event, just happened.
2031 * @param arg Generic pointer, casted from struct net_device * upon call.
2034 dev_action_chup(fsm_instance * fi, int event, void *arg)
2036 struct net_device *dev = (struct net_device *) arg;
2037 struct ctc_priv *privptr = dev->priv;
2039 DBF_TEXT(trace, 3, __FUNCTION__);
2040 switch (fsm_getstate(fi)) {
2041 case DEV_STATE_STARTWAIT_RXTX:
2042 if (event == DEV_EVENT_RXUP)
2043 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2045 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2047 case DEV_STATE_STARTWAIT_RX:
2048 if (event == DEV_EVENT_RXUP) {
2049 fsm_newstate(fi, DEV_STATE_RUNNING);
2050 ctc_pr_info("%s: connected with remote side\n",
2052 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2053 ctc_tty_setcarrier(dev, 1);
2054 ctc_clear_busy(dev);
2057 case DEV_STATE_STARTWAIT_TX:
2058 if (event == DEV_EVENT_TXUP) {
2059 fsm_newstate(fi, DEV_STATE_RUNNING);
2060 ctc_pr_info("%s: connected with remote side\n",
2062 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2063 ctc_tty_setcarrier(dev, 1);
2064 ctc_clear_busy(dev);
2067 case DEV_STATE_STOPWAIT_TX:
2068 if (event == DEV_EVENT_RXUP)
2069 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2071 case DEV_STATE_STOPWAIT_RX:
2072 if (event == DEV_EVENT_TXUP)
2073 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2079 * Called from channel statemachine
2080 * when a channel has been shutdown.
2082 * @param fi An instance of an interface statemachine.
2083 * @param event The event, just happened.
2084 * @param arg Generic pointer, casted from struct net_device * upon call.
2087 dev_action_chdown(fsm_instance * fi, int event, void *arg)
2089 struct net_device *dev = (struct net_device *) arg;
2090 struct ctc_priv *privptr = dev->priv;
2092 DBF_TEXT(trace, 3, __FUNCTION__);
2093 switch (fsm_getstate(fi)) {
2094 case DEV_STATE_RUNNING:
2095 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2096 ctc_tty_setcarrier(dev, 0);
2097 if (event == DEV_EVENT_TXDOWN)
2098 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2100 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2102 case DEV_STATE_STARTWAIT_RX:
2103 if (event == DEV_EVENT_TXDOWN)
2104 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2106 case DEV_STATE_STARTWAIT_TX:
2107 if (event == DEV_EVENT_RXDOWN)
2108 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2110 case DEV_STATE_STOPWAIT_RXTX:
2111 if (event == DEV_EVENT_TXDOWN)
2112 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2114 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2116 case DEV_STATE_STOPWAIT_RX:
2117 if (event == DEV_EVENT_RXDOWN)
2118 fsm_newstate(fi, DEV_STATE_STOPPED);
2120 case DEV_STATE_STOPWAIT_TX:
2121 if (event == DEV_EVENT_TXDOWN)
2122 fsm_newstate(fi, DEV_STATE_STOPPED);
2127 static const fsm_node dev_fsm[] = {
2128 {DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start},
2130 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
2131 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2132 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2133 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2135 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
2136 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2137 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2138 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
2139 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2141 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
2142 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2143 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2144 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
2145 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2147 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
2148 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
2149 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
2150 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2151 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2152 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2154 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
2155 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2156 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2157 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
2158 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2160 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
2161 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2162 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2163 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
2164 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2166 {DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
2167 {DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
2168 {DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
2169 {DEV_STATE_RUNNING, DEV_EVENT_TXUP, fsm_action_nop },
2170 {DEV_STATE_RUNNING, DEV_EVENT_RXUP, fsm_action_nop },
2171 {DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
2174 static const int DEV_FSM_LEN = sizeof (dev_fsm) / sizeof (fsm_node);
2177 * Transmit a packet.
2178 * This is a helper function for ctc_tx().
2180 * @param ch Channel to be used for sending.
2181 * @param skb Pointer to struct sk_buff of packet to send.
2182 * The linklevel header has already been set up
2185 * @return 0 on success, -ERRNO on failure. (Never fails.)
2188 transmit_skb(struct channel *ch, struct sk_buff *skb)
2190 unsigned long saveflags;
2191 struct ll_header header;
2194 DBF_TEXT(trace, 5, __FUNCTION__);
2195 /* we need to acquire the lock for testing the state
2196 * otherwise we can have an IRQ changing the state to
2197 * TXIDLE after the test but before acquiring the lock.
2199 spin_lock_irqsave(&ch->collect_lock, saveflags);
2200 if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
2201 int l = skb->len + LL_HEADER_LENGTH;
2203 if (ch->collect_len + l > ch->max_bufsize - 2) {
2204 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2207 atomic_inc(&skb->users);
2209 header.type = skb->protocol;
2211 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2213 skb_queue_tail(&ch->collect_queue, skb);
2214 ch->collect_len += l;
2216 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2220 struct sk_buff *nskb;
2222 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2224 * Protect skb against beeing free'd by upper
2227 atomic_inc(&skb->users);
2228 ch->prof.txlen += skb->len;
2229 header.length = skb->len + LL_HEADER_LENGTH;
2230 header.type = skb->protocol;
2232 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2234 block_len = skb->len + 2;
2235 *((__u16 *) skb_push(skb, 2)) = block_len;
2238 * IDAL support in CTC is broken, so we have to
2239 * care about skb's above 2G ourselves.
2241 hi = ((unsigned long) skb->tail + LL_HEADER_LENGTH) >> 31;
2243 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
2245 atomic_dec(&skb->users);
2246 skb_pull(skb, LL_HEADER_LENGTH + 2);
2247 ctc_clear_busy(ch->netdev);
2250 memcpy(skb_put(nskb, skb->len),
2251 skb->data, skb->len);
2252 atomic_inc(&nskb->users);
2253 atomic_dec(&skb->users);
2254 dev_kfree_skb_irq(skb);
2259 ch->ccw[4].count = block_len;
2260 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
2262 * idal allocation failed, try via copying to
2263 * trans_skb. trans_skb usually has a pre-allocated
2266 if (ctc_checkalloc_buffer(ch, 1)) {
2268 * Remove our header. It gets added
2269 * again on retransmit.
2271 atomic_dec(&skb->users);
2272 skb_pull(skb, LL_HEADER_LENGTH + 2);
2273 ctc_clear_busy(ch->netdev);
2277 ch->trans_skb->tail = ch->trans_skb->data;
2278 ch->trans_skb->len = 0;
2279 ch->ccw[1].count = skb->len;
2280 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
2282 atomic_dec(&skb->users);
2283 dev_kfree_skb_irq(skb);
2286 skb_queue_tail(&ch->io_queue, skb);
2290 fsm_newstate(ch->fsm, CH_STATE_TX);
2291 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
2292 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
2293 ch->prof.send_stamp = xtime;
2294 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
2295 (unsigned long) ch, 0xff, 0);
2296 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
2298 ch->prof.doios_single++;
2300 fsm_deltimer(&ch->timer);
2301 ccw_check_return_code(ch, rc, "single skb TX");
2303 skb_dequeue_tail(&ch->io_queue);
2305 * Remove our header. It gets added
2306 * again on retransmit.
2308 skb_pull(skb, LL_HEADER_LENGTH + 2);
2311 struct net_device *dev = ch->netdev;
2312 struct ctc_priv *privptr = dev->priv;
2313 privptr->stats.tx_packets++;
2314 privptr->stats.tx_bytes +=
2315 skb->len - LL_HEADER_LENGTH;
2320 ctc_clear_busy(ch->netdev);
2325 * Interface API for upper network layers
2326 *****************************************************************************/
2329 * Open an interface.
2330 * Called from generic network layer when ifconfig up is run.
2332 * @param dev Pointer to interface struct.
2334 * @return 0 on success, -ERRNO on failure. (Never fails.)
2337 ctc_open(struct net_device * dev)
2339 DBF_TEXT(trace, 5, __FUNCTION__);
2340 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev);
2345 * Close an interface.
2346 * Called from generic network layer when ifconfig down is run.
2348 * @param dev Pointer to interface struct.
2350 * @return 0 on success, -ERRNO on failure. (Never fails.)
2353 ctc_close(struct net_device * dev)
2355 DBF_TEXT(trace, 5, __FUNCTION__);
2356 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev);
2361 * Start transmission of a packet.
2362 * Called from generic network device layer.
2364 * @param skb Pointer to buffer containing the packet.
2365 * @param dev Pointer to interface struct.
2367 * @return 0 if packet consumed, !0 if packet rejected.
2368 * Note: If we return !0, then the packet is free'd by
2369 * the generic network layer.
2372 ctc_tx(struct sk_buff *skb, struct net_device * dev)
2375 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2377 DBF_TEXT(trace, 5, __FUNCTION__);
2379 * Some sanity checks ...
2382 ctc_pr_warn("%s: NULL sk_buff passed\n", dev->name);
2383 privptr->stats.tx_dropped++;
2386 if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
2387 ctc_pr_warn("%s: Got sk_buff with head room < %ld bytes\n",
2388 dev->name, LL_HEADER_LENGTH + 2);
2390 privptr->stats.tx_dropped++;
2395 * If channels are not running, try to restart them
2396 * and throw away packet.
2398 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
2399 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
2400 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2403 privptr->stats.tx_dropped++;
2404 privptr->stats.tx_errors++;
2405 privptr->stats.tx_carrier_errors++;
2409 if (ctc_test_and_set_busy(dev))
2412 dev->trans_start = jiffies;
2413 if (transmit_skb(privptr->channel[WRITE], skb) != 0)
2419 * Sets MTU of an interface.
2421 * @param dev Pointer to interface struct.
2422 * @param new_mtu The new MTU to use for this interface.
2424 * @return 0 on success, -EINVAL if MTU is out of valid range.
2425 * (valid range is 576 .. 65527). If VM is on the
2426 * remote side, maximum MTU is 32760, however this is
2427 * <em>not</em> checked here.
2430 ctc_change_mtu(struct net_device * dev, int new_mtu)
2432 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2434 DBF_TEXT(trace, 3, __FUNCTION__);
2435 if ((new_mtu < 576) || (new_mtu > 65527) ||
2436 (new_mtu > (privptr->channel[READ]->max_bufsize -
2437 LL_HEADER_LENGTH - 2)))
2440 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2445 * Returns interface statistics of a device.
2447 * @param dev Pointer to interface struct.
2449 * @return Pointer to stats struct of this interface.
2451 static struct net_device_stats *
2452 ctc_stats(struct net_device * dev)
2454 return &((struct ctc_priv *) dev->priv)->stats;
2462 buffer_show(struct device *dev, struct device_attribute *attr, char *buf)
2464 struct ctc_priv *priv;
2466 priv = dev->driver_data;
2469 return sprintf(buf, "%d\n",
2474 buffer_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2476 struct ctc_priv *priv;
2477 struct net_device *ndev;
2481 DBF_TEXT(trace, 3, __FUNCTION__);
2482 DBF_TEXT(trace, 3, buf);
2483 priv = dev->driver_data;
2485 DBF_TEXT(trace, 3, "bfnopriv");
2489 sscanf(buf, "%u", &bs1);
2490 if (bs1 > CTC_BUFSIZE_LIMIT)
2492 if (bs1 < (576 + LL_HEADER_LENGTH + 2))
2494 priv->buffer_size = bs1; // just to overwrite the default
2496 ndev = priv->channel[READ]->netdev;
2498 DBF_TEXT(trace, 3, "bfnondev");
2502 if ((ndev->flags & IFF_RUNNING) &&
2503 (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
2506 priv->channel[READ]->max_bufsize = bs1;
2507 priv->channel[WRITE]->max_bufsize = bs1;
2508 if (!(ndev->flags & IFF_RUNNING))
2509 ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
2510 priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2511 priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2513 sprintf(buffer, "%d",priv->buffer_size);
2514 DBF_TEXT(trace, 3, buffer);
2518 DBF_TEXT(trace, 3, "buff_err");
2523 loglevel_show(struct device *dev, struct device_attribute *attr, char *buf)
2525 return sprintf(buf, "%d\n", loglevel);
2529 loglevel_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2533 DBF_TEXT(trace, 5, __FUNCTION__);
2534 sscanf(buf, "%i", &ll1);
2536 if ((ll1 > CTC_LOGLEVEL_MAX) || (ll1 < 0))
2543 ctc_print_statistics(struct ctc_priv *priv)
2548 DBF_TEXT(trace, 4, __FUNCTION__);
2551 sbuf = (char *)kmalloc(2048, GFP_KERNEL);
2556 p += sprintf(p, " Device FSM state: %s\n",
2557 fsm_getstate_str(priv->fsm));
2558 p += sprintf(p, " RX channel FSM state: %s\n",
2559 fsm_getstate_str(priv->channel[READ]->fsm));
2560 p += sprintf(p, " TX channel FSM state: %s\n",
2561 fsm_getstate_str(priv->channel[WRITE]->fsm));
2562 p += sprintf(p, " Max. TX buffer used: %ld\n",
2563 priv->channel[WRITE]->prof.maxmulti);
2564 p += sprintf(p, " Max. chained SKBs: %ld\n",
2565 priv->channel[WRITE]->prof.maxcqueue);
2566 p += sprintf(p, " TX single write ops: %ld\n",
2567 priv->channel[WRITE]->prof.doios_single);
2568 p += sprintf(p, " TX multi write ops: %ld\n",
2569 priv->channel[WRITE]->prof.doios_multi);
2570 p += sprintf(p, " Netto bytes written: %ld\n",
2571 priv->channel[WRITE]->prof.txlen);
2572 p += sprintf(p, " Max. TX IO-time: %ld\n",
2573 priv->channel[WRITE]->prof.tx_time);
2575 ctc_pr_debug("Statistics for %s:\n%s",
2576 priv->channel[WRITE]->netdev->name, sbuf);
2582 stats_show(struct device *dev, struct device_attribute *attr, char *buf)
2584 struct ctc_priv *priv = dev->driver_data;
2587 ctc_print_statistics(priv);
2588 return sprintf(buf, "0\n");
2592 stats_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2594 struct ctc_priv *priv = dev->driver_data;
2597 /* Reset statistics */
2598 memset(&priv->channel[WRITE]->prof, 0,
2599 sizeof(priv->channel[WRITE]->prof));
2604 ctc_netdev_unregister(struct net_device * dev)
2606 struct ctc_priv *privptr;
2610 privptr = (struct ctc_priv *) dev->priv;
2611 if (privptr->protocol != CTC_PROTO_LINUX_TTY)
2612 unregister_netdev(dev);
2614 ctc_tty_unregister_netdev(dev);
2618 ctc_netdev_register(struct net_device * dev)
2620 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2621 if (privptr->protocol != CTC_PROTO_LINUX_TTY)
2622 return register_netdev(dev);
2624 return ctc_tty_register_netdev(dev);
2628 ctc_free_netdevice(struct net_device * dev, int free_dev)
2630 struct ctc_priv *privptr;
2633 privptr = dev->priv;
2636 kfree_fsm(privptr->fsm);
2646 ctc_proto_show(struct device *dev, struct device_attribute *attr, char *buf)
2648 struct ctc_priv *priv;
2650 priv = dev->driver_data;
2654 return sprintf(buf, "%d\n", priv->protocol);
2658 ctc_proto_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2660 struct ctc_priv *priv;
2663 DBF_TEXT(trace, 3, __FUNCTION__);
2664 pr_debug("%s() called\n", __FUNCTION__);
2666 priv = dev->driver_data;
2669 sscanf(buf, "%u", &value);
2670 if ((value < 0) || (value > CTC_PROTO_MAX))
2672 priv->protocol = value;
2678 ctc_type_show(struct device *dev, struct device_attribute *attr, char *buf)
2680 struct ccwgroup_device *cgdev;
2682 cgdev = to_ccwgroupdev(dev);
2686 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
2689 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
2690 static DEVICE_ATTR(protocol, 0644, ctc_proto_show, ctc_proto_store);
2691 static DEVICE_ATTR(type, 0444, ctc_type_show, NULL);
2693 static DEVICE_ATTR(loglevel, 0644, loglevel_show, loglevel_write);
2694 static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
2696 static struct attribute *ctc_attr[] = {
2697 &dev_attr_protocol.attr,
2698 &dev_attr_type.attr,
2699 &dev_attr_buffer.attr,
2703 static struct attribute_group ctc_attr_group = {
2708 ctc_add_attributes(struct device *dev)
2710 device_create_file(dev, &dev_attr_loglevel);
2711 device_create_file(dev, &dev_attr_stats);
2716 ctc_remove_attributes(struct device *dev)
2718 device_remove_file(dev, &dev_attr_stats);
2719 device_remove_file(dev, &dev_attr_loglevel);
2723 ctc_add_files(struct device *dev)
2725 pr_debug("%s() called\n", __FUNCTION__);
2727 return sysfs_create_group(&dev->kobj, &ctc_attr_group);
2731 ctc_remove_files(struct device *dev)
2733 pr_debug("%s() called\n", __FUNCTION__);
2735 sysfs_remove_group(&dev->kobj, &ctc_attr_group);
2739 * Add ctc specific attributes.
2740 * Add ctc private data.
2742 * @param cgdev pointer to ccwgroup_device just added
2744 * @returns 0 on success, !0 on failure.
2747 ctc_probe_device(struct ccwgroup_device *cgdev)
2749 struct ctc_priv *priv;
2753 pr_debug("%s() called\n", __FUNCTION__);
2754 DBF_TEXT(setup, 3, __FUNCTION__);
2756 if (!get_device(&cgdev->dev))
2759 priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL);
2761 ctc_pr_err("%s: Out of memory\n", __func__);
2762 put_device(&cgdev->dev);
2766 memset(priv, 0, sizeof (struct ctc_priv));
2767 rc = ctc_add_files(&cgdev->dev);
2770 put_device(&cgdev->dev);
2773 priv->buffer_size = CTC_BUFSIZE_DEFAULT;
2774 cgdev->cdev[0]->handler = ctc_irq_handler;
2775 cgdev->cdev[1]->handler = ctc_irq_handler;
2776 cgdev->dev.driver_data = priv;
2778 sprintf(buffer, "%p", priv);
2779 DBF_TEXT(data, 3, buffer);
2781 sprintf(buffer, "%u", (unsigned int)sizeof(struct ctc_priv));
2782 DBF_TEXT(data, 3, buffer);
2784 sprintf(buffer, "%p", &channels);
2785 DBF_TEXT(data, 3, buffer);
2787 sprintf(buffer, "%u", (unsigned int)sizeof(struct channel));
2788 DBF_TEXT(data, 3, buffer);
2794 * Initialize everything of the net device except the name and the
2797 static struct net_device *
2798 ctc_init_netdevice(struct net_device * dev, int alloc_device,
2799 struct ctc_priv *privptr)
2804 DBF_TEXT(setup, 3, __FUNCTION__);
2807 dev = kmalloc(sizeof (struct net_device), GFP_KERNEL);
2810 memset(dev, 0, sizeof (struct net_device));
2813 dev->priv = privptr;
2814 privptr->fsm = init_fsm("ctcdev", dev_state_names,
2815 dev_event_names, CTC_NR_DEV_STATES, CTC_NR_DEV_EVENTS,
2816 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2817 if (privptr->fsm == NULL) {
2822 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2823 fsm_settimer(privptr->fsm, &privptr->restart_timer);
2825 dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
2826 dev->hard_start_xmit = ctc_tx;
2827 dev->open = ctc_open;
2828 dev->stop = ctc_close;
2829 dev->get_stats = ctc_stats;
2830 dev->change_mtu = ctc_change_mtu;
2831 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2833 dev->type = ARPHRD_SLIP;
2834 dev->tx_queue_len = 100;
2835 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2836 SET_MODULE_OWNER(dev);
2843 * Setup an interface.
2845 * @param cgdev Device to be setup.
2847 * @returns 0 on success, !0 on failure.
2850 ctc_new_device(struct ccwgroup_device *cgdev)
2852 char read_id[CTC_ID_SIZE];
2853 char write_id[CTC_ID_SIZE];
2855 enum channel_types type;
2856 struct ctc_priv *privptr;
2857 struct net_device *dev;
2861 pr_debug("%s() called\n", __FUNCTION__);
2862 DBF_TEXT(setup, 3, __FUNCTION__);
2864 privptr = cgdev->dev.driver_data;
2868 sprintf(buffer, "%d", privptr->buffer_size);
2869 DBF_TEXT(setup, 3, buffer);
2871 type = get_channel_type(&cgdev->cdev[0]->id);
2873 snprintf(read_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id);
2874 snprintf(write_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id);
2876 if (add_channel(cgdev->cdev[0], type))
2878 if (add_channel(cgdev->cdev[1], type))
2881 ret = ccw_device_set_online(cgdev->cdev[0]);
2884 "ccw_device_set_online (cdev[0]) failed with ret = %d\n", ret);
2887 ret = ccw_device_set_online(cgdev->cdev[1]);
2890 "ccw_device_set_online (cdev[1]) failed with ret = %d\n", ret);
2893 dev = ctc_init_netdevice(NULL, 1, privptr);
2896 ctc_pr_warn("ctc_init_netdevice failed\n");
2900 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2901 strlcpy(dev->name, "ctctty%d", IFNAMSIZ);
2903 strlcpy(dev->name, "ctc%d", IFNAMSIZ);
2905 for (direction = READ; direction <= WRITE; direction++) {
2906 privptr->channel[direction] =
2907 channel_get(type, direction == READ ? read_id : write_id,
2909 if (privptr->channel[direction] == NULL) {
2910 if (direction == WRITE)
2911 channel_free(privptr->channel[READ]);
2913 ctc_free_netdevice(dev, 1);
2916 privptr->channel[direction]->netdev = dev;
2917 privptr->channel[direction]->protocol = privptr->protocol;
2918 privptr->channel[direction]->max_bufsize = privptr->buffer_size;
2921 SET_NETDEV_DEV(dev, &cgdev->dev);
2923 if (ctc_netdev_register(dev) != 0) {
2924 ctc_free_netdevice(dev, 1);
2928 ctc_add_attributes(&cgdev->dev);
2930 strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name));
2934 ctc_pr_info("%s: read: %s, write: %s, proto: %d\n",
2935 dev->name, privptr->channel[READ]->id,
2936 privptr->channel[WRITE]->id, privptr->protocol);
2940 ccw_device_set_offline(cgdev->cdev[1]);
2941 ccw_device_set_offline(cgdev->cdev[0]);
2947 * Shutdown an interface.
2949 * @param cgdev Device to be shut down.
2951 * @returns 0 on success, !0 on failure.
2954 ctc_shutdown_device(struct ccwgroup_device *cgdev)
2956 struct ctc_priv *priv;
2957 struct net_device *ndev;
2959 DBF_TEXT(setup, 3, __FUNCTION__);
2960 pr_debug("%s() called\n", __FUNCTION__);
2963 priv = cgdev->dev.driver_data;
2968 if (priv->channel[READ]) {
2969 ndev = priv->channel[READ]->netdev;
2971 /* Close the device */
2973 ndev->flags &=~IFF_RUNNING;
2975 ctc_remove_attributes(&cgdev->dev);
2977 channel_free(priv->channel[READ]);
2979 if (priv->channel[WRITE])
2980 channel_free(priv->channel[WRITE]);
2983 ctc_netdev_unregister(ndev);
2985 ctc_free_netdevice(ndev, 1);
2989 kfree_fsm(priv->fsm);
2991 ccw_device_set_offline(cgdev->cdev[1]);
2992 ccw_device_set_offline(cgdev->cdev[0]);
2994 if (priv->channel[READ])
2995 channel_remove(priv->channel[READ]);
2996 if (priv->channel[WRITE])
2997 channel_remove(priv->channel[WRITE]);
2998 priv->channel[READ] = priv->channel[WRITE] = NULL;
3005 ctc_remove_device(struct ccwgroup_device *cgdev)
3007 struct ctc_priv *priv;
3009 pr_debug("%s() called\n", __FUNCTION__);
3010 DBF_TEXT(setup, 3, __FUNCTION__);
3012 priv = cgdev->dev.driver_data;
3015 if (cgdev->state == CCWGROUP_ONLINE)
3016 ctc_shutdown_device(cgdev);
3017 ctc_remove_files(&cgdev->dev);
3018 cgdev->dev.driver_data = NULL;
3020 put_device(&cgdev->dev);
3023 static struct ccwgroup_driver ctc_group_driver = {
3024 .owner = THIS_MODULE,
3027 .driver_id = 0xC3E3C3,
3028 .probe = ctc_probe_device,
3029 .remove = ctc_remove_device,
3030 .set_online = ctc_new_device,
3031 .set_offline = ctc_shutdown_device,
3035 * Module related routines
3036 *****************************************************************************/
3039 * Prepare to be unloaded. Free IRQ's and release all resources.
3040 * This is called just before this module is unloaded. It is
3041 * <em>not</em> called, if the usage count is !0, so we don't need to check
3047 DBF_TEXT(setup, 3, __FUNCTION__);
3048 unregister_cu3088_discipline(&ctc_group_driver);
3050 ctc_unregister_dbf_views();
3051 ctc_pr_info("CTC driver unloaded\n");
3055 * Initialize module.
3056 * This is called just after the module is loaded.
3058 * @return 0 on success, !0 on error.
3065 loglevel = CTC_LOGLEVEL_DEFAULT;
3067 DBF_TEXT(setup, 3, __FUNCTION__);
3071 ret = ctc_register_dbf_views();
3073 ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
3077 ret = register_cu3088_discipline(&ctc_group_driver);
3080 ctc_unregister_dbf_views();
3085 module_init(ctc_init);
3086 module_exit(ctc_exit);
3088 /* --- This is the END my friend --- */