2 * CTC / ESCON network driver
4 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
6 * Fixes by : Jochen Röhrig (roehrig@de.ibm.com)
7 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 Peter Tiedemann (ptiedem@de.ibm.com)
9 * Driver Model stuff by : Cornelia Huck <cornelia.huck@de.ibm.com>
12 * - Principles of Operation (IBM doc#: SA22-7201-06)
13 * - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
14 * - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
15 * - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
16 * - ESCON I/O Interface (IBM doc#: SA22-7202-029
18 * and the source of the original CTC driver by:
19 * Dieter Wellerdiek (wel@de.ibm.com)
20 * Martin Schwidefsky (schwidefsky@de.ibm.com)
21 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
22 * Jochen Röhrig (roehrig@de.ibm.com)
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2, or (at your option)
29 * This program is distributed in the hope that it will be useful,
30 * but WITHOUT ANY WARRANTY; without even the implied warranty of
31 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
32 * GNU General Public License for more details.
34 * You should have received a copy of the GNU General Public License
35 * along with this program; if not, write to the Free Software
36 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/kernel.h>
43 #include <linux/slab.h>
44 #include <linux/errno.h>
45 #include <linux/types.h>
46 #include <linux/interrupt.h>
47 #include <linux/timer.h>
48 #include <linux/sched.h>
49 #include <linux/bitops.h>
51 #include <linux/signal.h>
52 #include <linux/string.h>
55 #include <linux/if_arp.h>
56 #include <linux/tcp.h>
57 #include <linux/skbuff.h>
58 #include <linux/ctype.h>
62 #include <asm/ccwdev.h>
63 #include <asm/ccwgroup.h>
64 #include <asm/uaccess.h>
66 #include <asm/idals.h>
74 MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
75 MODULE_DESCRIPTION("Linux for S/390 CTC/Escon Driver");
76 MODULE_LICENSE("GPL");
78 * States of the interface statemachine.
82 DEV_STATE_STARTWAIT_RXTX,
83 DEV_STATE_STARTWAIT_RX,
84 DEV_STATE_STARTWAIT_TX,
85 DEV_STATE_STOPWAIT_RXTX,
86 DEV_STATE_STOPWAIT_RX,
87 DEV_STATE_STOPWAIT_TX,
90 * MUST be always the last element!!
95 static const char *dev_state_names[] = {
107 * Events of the interface statemachine.
118 * MUST be always the last element!!
123 static const char *dev_event_names[] = {
134 * Events of the channel statemachine
138 * Events, representing return code of
139 * I/O operations (ccw_device_start, ccw_device_halt et al.)
152 * Events, representing unit-check
156 CH_EVENT_UC_TXTIMEOUT,
157 CH_EVENT_UC_TXPARITY,
159 CH_EVENT_UC_RXPARITY,
164 * Events, representing subchannel-check
169 * Events, representing machine checks
175 * Event, representing normal IRQ
181 * Event, representing timer expiry.
186 * Events, representing commands from upper levels.
192 * MUST be always the last element!!
198 * States of the channel statemachine.
202 * Channel not assigned to any device,
203 * initial state, direction invalid
208 * Channel assigned but not operating
227 * MUST be always the last element!!
232 static int loglevel = CTC_LOGLEVEL_DEFAULT;
235 * Linked list of all detected channels.
237 static struct channel *channels = NULL;
245 static int printed = 0;
250 printk(KERN_INFO "CTC driver initialized\n");
255 * Return type of a detected device.
257 static enum channel_types
258 get_channel_type(struct ccw_device_id *id)
260 enum channel_types type = (enum channel_types) id->driver_info;
262 if (type == channel_type_ficon)
263 type = channel_type_escon;
268 static const char *ch_event_names[] = {
269 "ccw_device success",
273 "ccw_device unknown",
275 "Status ATTN & BUSY",
279 "Unit check remote reset",
280 "Unit check remote system reset",
281 "Unit check TX timeout",
282 "Unit check TX parity",
283 "Unit check Hardware failure",
284 "Unit check RX parity",
286 "Unit check Unknown",
288 "SubChannel check Unknown",
290 "Machine check failure",
291 "Machine check operational",
302 static const char *ch_state_names[] = {
323 * Dump header and first 16 bytes of an sk_buff for debugging purposes.
325 * @param skb The sk_buff to dump.
326 * @param offset Offset relative to skb-data, where to start the dump.
329 ctc_dump_skb(struct sk_buff *skb, int offset)
331 unsigned char *p = skb->data;
333 struct ll_header *header;
336 if (!(loglevel & CTC_LOGLEVEL_DEBUG))
341 header = (struct ll_header *) p;
344 printk(KERN_DEBUG "dump:\n");
345 printk(KERN_DEBUG "blocklen=%d %04x\n", bl, bl);
347 printk(KERN_DEBUG "h->length=%d %04x\n", header->length,
349 printk(KERN_DEBUG "h->type=%04x\n", header->type);
350 printk(KERN_DEBUG "h->unused=%04x\n", header->unused);
353 printk(KERN_DEBUG "data: ");
354 for (i = 0; i < bl; i++)
355 printk("%02x%s", *p++, (i % 16) ? " " : "\n<7>");
360 ctc_dump_skb(struct sk_buff *skb, int offset)
366 * Unpack a just received skb and hand it over to
369 * @param ch The channel where this skb has been received.
370 * @param pskb The received skb.
372 static __inline__ void
373 ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
375 struct net_device *dev = ch->netdev;
376 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
377 __u16 len = *((__u16 *) pskb->data);
379 DBF_TEXT(trace, 4, __FUNCTION__);
380 skb_put(pskb, 2 + LL_HEADER_LENGTH);
383 pskb->ip_summed = CHECKSUM_UNNECESSARY;
386 struct ll_header *header = (struct ll_header *) pskb->data;
388 skb_pull(pskb, LL_HEADER_LENGTH);
389 if ((ch->protocol == CTC_PROTO_S390) &&
390 (header->type != ETH_P_IP)) {
393 if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
396 * Check packet type only if we stick strictly
397 * to S/390's protocol of OS390. This only
398 * supports IP. Otherwise allow any packet
402 "%s Illegal packet type 0x%04x received, dropping\n",
403 dev->name, header->type);
404 ch->logflags |= LOG_FLAG_ILLEGALPKT;
409 ctc_dump_skb(pskb, -6);
411 privptr->stats.rx_dropped++;
412 privptr->stats.rx_frame_errors++;
415 pskb->protocol = ntohs(header->type);
416 if (header->length <= LL_HEADER_LENGTH) {
418 if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
421 "%s Illegal packet size %d "
422 "received (MTU=%d blocklen=%d), "
423 "dropping\n", dev->name, header->length,
425 ch->logflags |= LOG_FLAG_ILLEGALSIZE;
430 ctc_dump_skb(pskb, -6);
432 privptr->stats.rx_dropped++;
433 privptr->stats.rx_length_errors++;
436 header->length -= LL_HEADER_LENGTH;
437 len -= LL_HEADER_LENGTH;
438 if ((header->length > skb_tailroom(pskb)) ||
439 (header->length > len)) {
441 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
444 "%s Illegal packet size %d "
445 "(beyond the end of received data), "
446 "dropping\n", dev->name, header->length);
447 ch->logflags |= LOG_FLAG_OVERRUN;
452 ctc_dump_skb(pskb, -6);
454 privptr->stats.rx_dropped++;
455 privptr->stats.rx_length_errors++;
458 skb_put(pskb, header->length);
459 pskb->mac.raw = pskb->data;
460 len -= header->length;
461 skb = dev_alloc_skb(pskb->len);
464 if (!(ch->logflags & LOG_FLAG_NOMEM)) {
467 "%s Out of memory in ctc_unpack_skb\n",
469 ch->logflags |= LOG_FLAG_NOMEM;
473 privptr->stats.rx_dropped++;
476 memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
477 skb->mac.raw = skb->data;
478 skb->dev = pskb->dev;
479 skb->protocol = pskb->protocol;
480 pskb->ip_summed = CHECKSUM_UNNECESSARY;
483 * Successful rx; reset logflags
486 dev->last_rx = jiffies;
487 privptr->stats.rx_packets++;
488 privptr->stats.rx_bytes += skb->len;
490 skb_pull(pskb, header->length);
491 if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
493 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
496 "%s Overrun in ctc_unpack_skb\n",
498 ch->logflags |= LOG_FLAG_OVERRUN;
504 skb_put(pskb, LL_HEADER_LENGTH);
510 * Check return code of a preceeding ccw_device call, halt_IO etc...
512 * @param ch The channel, the error belongs to.
513 * @param return_code The error code to inspect.
516 ccw_check_return_code(struct channel *ch, int return_code, char *msg)
518 DBF_TEXT(trace, 5, __FUNCTION__);
519 switch (return_code) {
521 fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
524 ctc_pr_warn("%s (%s): Busy !\n", ch->id, msg);
525 fsm_event(ch->fsm, CH_EVENT_IO_EBUSY, ch);
528 ctc_pr_emerg("%s (%s): Invalid device called for IO\n",
530 fsm_event(ch->fsm, CH_EVENT_IO_ENODEV, ch);
533 ctc_pr_emerg("%s (%s): Status pending... \n",
535 fsm_event(ch->fsm, CH_EVENT_IO_EIO, ch);
538 ctc_pr_emerg("%s (%s): Unknown error in do_IO %04x\n",
539 ch->id, msg, return_code);
540 fsm_event(ch->fsm, CH_EVENT_IO_UNKNOWN, ch);
545 * Check sense of a unit check.
547 * @param ch The channel, the sense code belongs to.
548 * @param sense The sense code to inspect.
551 ccw_unit_check(struct channel *ch, unsigned char sense)
553 DBF_TEXT(trace, 5, __FUNCTION__);
554 if (sense & SNS0_INTERVENTION_REQ) {
556 ctc_pr_debug("%s: Interface disc. or Sel. reset "
557 "(remote)\n", ch->id);
558 fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch);
560 ctc_pr_debug("%s: System reset (remote)\n", ch->id);
561 fsm_event(ch->fsm, CH_EVENT_UC_RSRESET, ch);
563 } else if (sense & SNS0_EQUIPMENT_CHECK) {
564 if (sense & SNS0_BUS_OUT_CHECK) {
565 ctc_pr_warn("%s: Hardware malfunction (remote)\n",
567 fsm_event(ch->fsm, CH_EVENT_UC_HWFAIL, ch);
569 ctc_pr_warn("%s: Read-data parity error (remote)\n",
571 fsm_event(ch->fsm, CH_EVENT_UC_RXPARITY, ch);
573 } else if (sense & SNS0_BUS_OUT_CHECK) {
575 ctc_pr_warn("%s: Data-streaming timeout)\n", ch->id);
576 fsm_event(ch->fsm, CH_EVENT_UC_TXTIMEOUT, ch);
578 ctc_pr_warn("%s: Data-transfer parity error\n", ch->id);
579 fsm_event(ch->fsm, CH_EVENT_UC_TXPARITY, ch);
581 } else if (sense & SNS0_CMD_REJECT) {
582 ctc_pr_warn("%s: Command reject\n", ch->id);
583 } else if (sense == 0) {
584 ctc_pr_debug("%s: Unit check ZERO\n", ch->id);
585 fsm_event(ch->fsm, CH_EVENT_UC_ZERO, ch);
587 ctc_pr_warn("%s: Unit Check with sense code: %02x\n",
589 fsm_event(ch->fsm, CH_EVENT_UC_UNKNOWN, ch);
594 ctc_purge_skb_queue(struct sk_buff_head *q)
598 DBF_TEXT(trace, 5, __FUNCTION__);
600 while ((skb = skb_dequeue(q))) {
601 atomic_dec(&skb->users);
602 dev_kfree_skb_irq(skb);
606 static __inline__ int
607 ctc_checkalloc_buffer(struct channel *ch, int warn)
609 DBF_TEXT(trace, 5, __FUNCTION__);
610 if ((ch->trans_skb == NULL) ||
611 (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) {
612 if (ch->trans_skb != NULL)
613 dev_kfree_skb(ch->trans_skb);
614 clear_normalized_cda(&ch->ccw[1]);
615 ch->trans_skb = __dev_alloc_skb(ch->max_bufsize,
616 GFP_ATOMIC | GFP_DMA);
617 if (ch->trans_skb == NULL) {
620 "%s: Couldn't alloc %s trans_skb\n",
622 (CHANNEL_DIRECTION(ch->flags) == READ) ?
626 ch->ccw[1].count = ch->max_bufsize;
627 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
628 dev_kfree_skb(ch->trans_skb);
629 ch->trans_skb = NULL;
632 "%s: set_normalized_cda for %s "
633 "trans_skb failed, dropping packets\n",
635 (CHANNEL_DIRECTION(ch->flags) == READ) ?
639 ch->ccw[1].count = 0;
640 ch->trans_skb_data = ch->trans_skb->data;
641 ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
647 * Dummy NOP action for statemachines
650 fsm_action_nop(fsm_instance * fi, int event, void *arg)
655 * Actions for channel - statemachines.
656 *****************************************************************************/
659 * Normal data has been send. Free the corresponding
660 * skb (it's in io_queue), reset dev->tbusy and
661 * revert to idle state.
663 * @param fi An instance of a channel statemachine.
664 * @param event The event, just happened.
665 * @param arg Generic pointer, casted from channel * upon call.
668 ch_action_txdone(fsm_instance * fi, int event, void *arg)
670 struct channel *ch = (struct channel *) arg;
671 struct net_device *dev = ch->netdev;
672 struct ctc_priv *privptr = dev->priv;
676 unsigned long duration;
677 struct timespec done_stamp = xtime;
679 DBF_TEXT(trace, 4, __FUNCTION__);
682 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
683 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
684 if (duration > ch->prof.tx_time)
685 ch->prof.tx_time = duration;
687 if (ch->irb->scsw.count != 0)
688 ctc_pr_debug("%s: TX not complete, remaining %d bytes\n",
689 dev->name, ch->irb->scsw.count);
690 fsm_deltimer(&ch->timer);
691 while ((skb = skb_dequeue(&ch->io_queue))) {
692 privptr->stats.tx_packets++;
693 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
695 privptr->stats.tx_bytes += 2;
698 atomic_dec(&skb->users);
699 dev_kfree_skb_irq(skb);
701 spin_lock(&ch->collect_lock);
702 clear_normalized_cda(&ch->ccw[4]);
703 if (ch->collect_len > 0) {
706 if (ctc_checkalloc_buffer(ch, 1)) {
707 spin_unlock(&ch->collect_lock);
710 ch->trans_skb->tail = ch->trans_skb->data = ch->trans_skb_data;
711 ch->trans_skb->len = 0;
712 if (ch->prof.maxmulti < (ch->collect_len + 2))
713 ch->prof.maxmulti = ch->collect_len + 2;
714 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
715 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
716 *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
718 while ((skb = skb_dequeue(&ch->collect_queue))) {
719 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
721 privptr->stats.tx_packets++;
722 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
723 atomic_dec(&skb->users);
724 dev_kfree_skb_irq(skb);
728 spin_unlock(&ch->collect_lock);
729 ch->ccw[1].count = ch->trans_skb->len;
730 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
731 ch->prof.send_stamp = xtime;
732 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
733 (unsigned long) ch, 0xff, 0);
734 ch->prof.doios_multi++;
736 privptr->stats.tx_dropped += i;
737 privptr->stats.tx_errors += i;
738 fsm_deltimer(&ch->timer);
739 ccw_check_return_code(ch, rc, "chained TX");
742 spin_unlock(&ch->collect_lock);
743 fsm_newstate(fi, CH_STATE_TXIDLE);
749 * Initial data is sent.
750 * Notify device statemachine that we are up and
753 * @param fi An instance of a channel statemachine.
754 * @param event The event, just happened.
755 * @param arg Generic pointer, casted from channel * upon call.
758 ch_action_txidle(fsm_instance * fi, int event, void *arg)
760 struct channel *ch = (struct channel *) arg;
762 DBF_TEXT(trace, 4, __FUNCTION__);
763 fsm_deltimer(&ch->timer);
764 fsm_newstate(fi, CH_STATE_TXIDLE);
765 fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
770 * Got normal data, check for sanity, queue it up, allocate new buffer
771 * trigger bottom half, and initiate next read.
773 * @param fi An instance of a channel statemachine.
774 * @param event The event, just happened.
775 * @param arg Generic pointer, casted from channel * upon call.
778 ch_action_rx(fsm_instance * fi, int event, void *arg)
780 struct channel *ch = (struct channel *) arg;
781 struct net_device *dev = ch->netdev;
782 struct ctc_priv *privptr = dev->priv;
783 int len = ch->max_bufsize - ch->irb->scsw.count;
784 struct sk_buff *skb = ch->trans_skb;
785 __u16 block_len = *((__u16 *) skb->data);
789 DBF_TEXT(trace, 4, __FUNCTION__);
790 fsm_deltimer(&ch->timer);
792 ctc_pr_debug("%s: got packet with length %d < 8\n",
794 privptr->stats.rx_dropped++;
795 privptr->stats.rx_length_errors++;
798 if (len > ch->max_bufsize) {
799 ctc_pr_debug("%s: got packet with length %d > %d\n",
800 dev->name, len, ch->max_bufsize);
801 privptr->stats.rx_dropped++;
802 privptr->stats.rx_length_errors++;
807 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
809 switch (ch->protocol) {
811 case CTC_PROTO_OS390:
812 check_len = block_len + 2;
815 check_len = block_len;
818 if ((len < block_len) || (len > check_len)) {
819 ctc_pr_debug("%s: got block length %d != rx length %d\n",
820 dev->name, block_len, len);
822 ctc_dump_skb(skb, 0);
824 *((__u16 *) skb->data) = len;
825 privptr->stats.rx_dropped++;
826 privptr->stats.rx_length_errors++;
831 *((__u16 *) skb->data) = block_len;
832 ctc_unpack_skb(ch, skb);
835 skb->data = skb->tail = ch->trans_skb_data;
837 if (ctc_checkalloc_buffer(ch, 1))
839 ch->ccw[1].count = ch->max_bufsize;
840 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
842 ccw_check_return_code(ch, rc, "normal RX");
845 static void ch_action_rxidle(fsm_instance * fi, int event, void *arg);
848 * Initialize connection by sending a __u16 of value 0.
850 * @param fi An instance of a channel statemachine.
851 * @param event The event, just happened.
852 * @param arg Generic pointer, casted from channel * upon call.
855 ch_action_firstio(fsm_instance * fi, int event, void *arg)
857 struct channel *ch = (struct channel *) arg;
860 DBF_TEXT(trace, 4, __FUNCTION__);
862 if (fsm_getstate(fi) == CH_STATE_TXIDLE)
863 ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id);
864 fsm_deltimer(&ch->timer);
865 if (ctc_checkalloc_buffer(ch, 1))
867 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
868 (ch->protocol == CTC_PROTO_OS390)) {
869 /* OS/390 resp. z/OS */
870 if (CHANNEL_DIRECTION(ch->flags) == READ) {
871 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
872 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC,
874 ch_action_rxidle(fi, event, arg);
876 struct net_device *dev = ch->netdev;
877 fsm_newstate(fi, CH_STATE_TXIDLE);
878 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
879 DEV_EVENT_TXUP, dev);
885 * Don´t setup a timer for receiving the initial RX frame
886 * if in compatibility mode, since VM TCP delays the initial
887 * frame until it has some data to send.
889 if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
890 (ch->protocol != CTC_PROTO_S390))
891 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
893 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
894 ch->ccw[1].count = 2; /* Transfer only length */
896 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
897 ? CH_STATE_RXINIT : CH_STATE_TXINIT);
898 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
900 fsm_deltimer(&ch->timer);
901 fsm_newstate(fi, CH_STATE_SETUPWAIT);
902 ccw_check_return_code(ch, rc, "init IO");
905 * If in compatibility mode since we don´t setup a timer, we
906 * also signal RX channel up immediately. This enables us
907 * to send packets early which in turn usually triggers some
908 * reply from VM TCP which brings up the RX channel to it´s
911 if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
912 (ch->protocol == CTC_PROTO_S390)) {
913 struct net_device *dev = ch->netdev;
914 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXUP,
920 * Got initial data, check it. If OK,
921 * notify device statemachine that we are up and
924 * @param fi An instance of a channel statemachine.
925 * @param event The event, just happened.
926 * @param arg Generic pointer, casted from channel * upon call.
929 ch_action_rxidle(fsm_instance * fi, int event, void *arg)
931 struct channel *ch = (struct channel *) arg;
932 struct net_device *dev = ch->netdev;
936 DBF_TEXT(trace, 4, __FUNCTION__);
937 fsm_deltimer(&ch->timer);
938 buflen = *((__u16 *) ch->trans_skb->data);
940 ctc_pr_debug("%s: Initial RX count %d\n", dev->name, buflen);
942 if (buflen >= CTC_INITIAL_BLOCKLEN) {
943 if (ctc_checkalloc_buffer(ch, 1))
945 ch->ccw[1].count = ch->max_bufsize;
946 fsm_newstate(fi, CH_STATE_RXIDLE);
947 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
948 (unsigned long) ch, 0xff, 0);
950 fsm_newstate(fi, CH_STATE_RXINIT);
951 ccw_check_return_code(ch, rc, "initial RX");
953 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
954 DEV_EVENT_RXUP, dev);
956 ctc_pr_debug("%s: Initial RX count %d not %d\n",
957 dev->name, buflen, CTC_INITIAL_BLOCKLEN);
958 ch_action_firstio(fi, event, arg);
963 * Set channel into extended mode.
965 * @param fi An instance of a channel statemachine.
966 * @param event The event, just happened.
967 * @param arg Generic pointer, casted from channel * upon call.
970 ch_action_setmode(fsm_instance * fi, int event, void *arg)
972 struct channel *ch = (struct channel *) arg;
974 unsigned long saveflags;
976 DBF_TEXT(trace, 4, __FUNCTION__);
977 fsm_deltimer(&ch->timer);
978 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
979 fsm_newstate(fi, CH_STATE_SETUPWAIT);
980 saveflags = 0; /* avoids compiler warning with
981 spin_unlock_irqrestore */
982 if (event == CH_EVENT_TIMER) // only for timer not yet locked
983 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
984 rc = ccw_device_start(ch->cdev, &ch->ccw[6], (unsigned long) ch, 0xff, 0);
985 if (event == CH_EVENT_TIMER)
986 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
988 fsm_deltimer(&ch->timer);
989 fsm_newstate(fi, CH_STATE_STARTWAIT);
990 ccw_check_return_code(ch, rc, "set Mode");
998 * @param fi An instance of a channel statemachine.
999 * @param event The event, just happened.
1000 * @param arg Generic pointer, casted from channel * upon call.
1003 ch_action_start(fsm_instance * fi, int event, void *arg)
1005 struct channel *ch = (struct channel *) arg;
1006 unsigned long saveflags;
1008 struct net_device *dev;
1010 DBF_TEXT(trace, 4, __FUNCTION__);
1012 ctc_pr_warn("ch_action_start ch=NULL\n");
1015 if (ch->netdev == NULL) {
1016 ctc_pr_warn("ch_action_start dev=NULL, id=%s\n", ch->id);
1022 ctc_pr_debug("%s: %s channel start\n", dev->name,
1023 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1026 if (ch->trans_skb != NULL) {
1027 clear_normalized_cda(&ch->ccw[1]);
1028 dev_kfree_skb(ch->trans_skb);
1029 ch->trans_skb = NULL;
1031 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1032 ch->ccw[1].cmd_code = CCW_CMD_READ;
1033 ch->ccw[1].flags = CCW_FLAG_SLI;
1034 ch->ccw[1].count = 0;
1036 ch->ccw[1].cmd_code = CCW_CMD_WRITE;
1037 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1038 ch->ccw[1].count = 0;
1040 if (ctc_checkalloc_buffer(ch, 0)) {
1042 "%s: Could not allocate %s trans_skb, delaying "
1043 "allocation until first transfer\n",
1045 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1048 ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
1049 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1050 ch->ccw[0].count = 0;
1052 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
1053 ch->ccw[2].flags = CCW_FLAG_SLI;
1054 ch->ccw[2].count = 0;
1056 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof (struct ccw1) * 3);
1058 ch->ccw[4].flags &= ~CCW_FLAG_IDA;
1060 fsm_newstate(fi, CH_STATE_STARTWAIT);
1061 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1062 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1063 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1064 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1067 fsm_deltimer(&ch->timer);
1068 ccw_check_return_code(ch, rc, "initial HaltIO");
1071 ctc_pr_debug("ctc: %s(): leaving\n", __func__);
1076 * Shutdown a channel.
1078 * @param fi An instance of a channel statemachine.
1079 * @param event The event, just happened.
1080 * @param arg Generic pointer, casted from channel * upon call.
1083 ch_action_haltio(fsm_instance * fi, int event, void *arg)
1085 struct channel *ch = (struct channel *) arg;
1086 unsigned long saveflags;
1090 DBF_TEXT(trace, 3, __FUNCTION__);
1091 fsm_deltimer(&ch->timer);
1092 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1093 saveflags = 0; /* avoids comp warning with
1094 spin_unlock_irqrestore */
1095 if (event == CH_EVENT_STOP) // only for STOP not yet locked
1096 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1097 oldstate = fsm_getstate(fi);
1098 fsm_newstate(fi, CH_STATE_TERM);
1099 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1100 if (event == CH_EVENT_STOP)
1101 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1104 fsm_deltimer(&ch->timer);
1105 fsm_newstate(fi, oldstate);
1107 ccw_check_return_code(ch, rc, "HaltIO in ch_action_haltio");
1112 * A channel has successfully been halted.
1113 * Cleanup it's queue and notify interface statemachine.
1115 * @param fi An instance of a channel statemachine.
1116 * @param event The event, just happened.
1117 * @param arg Generic pointer, casted from channel * upon call.
1120 ch_action_stopped(fsm_instance * fi, int event, void *arg)
1122 struct channel *ch = (struct channel *) arg;
1123 struct net_device *dev = ch->netdev;
1125 DBF_TEXT(trace, 3, __FUNCTION__);
1126 fsm_deltimer(&ch->timer);
1127 fsm_newstate(fi, CH_STATE_STOPPED);
1128 if (ch->trans_skb != NULL) {
1129 clear_normalized_cda(&ch->ccw[1]);
1130 dev_kfree_skb(ch->trans_skb);
1131 ch->trans_skb = NULL;
1133 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1134 skb_queue_purge(&ch->io_queue);
1135 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1136 DEV_EVENT_RXDOWN, dev);
1138 ctc_purge_skb_queue(&ch->io_queue);
1139 spin_lock(&ch->collect_lock);
1140 ctc_purge_skb_queue(&ch->collect_queue);
1141 ch->collect_len = 0;
1142 spin_unlock(&ch->collect_lock);
1143 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1144 DEV_EVENT_TXDOWN, dev);
1149 * A stop command from device statemachine arrived and we are in
1150 * not operational mode. Set state to stopped.
1152 * @param fi An instance of a channel statemachine.
1153 * @param event The event, just happened.
1154 * @param arg Generic pointer, casted from channel * upon call.
1157 ch_action_stop(fsm_instance * fi, int event, void *arg)
1159 fsm_newstate(fi, CH_STATE_STOPPED);
1163 * A machine check for no path, not operational status or gone device has
1165 * Cleanup queue and notify interface statemachine.
1167 * @param fi An instance of a channel statemachine.
1168 * @param event The event, just happened.
1169 * @param arg Generic pointer, casted from channel * upon call.
1172 ch_action_fail(fsm_instance * fi, int event, void *arg)
1174 struct channel *ch = (struct channel *) arg;
1175 struct net_device *dev = ch->netdev;
1177 DBF_TEXT(trace, 3, __FUNCTION__);
1178 fsm_deltimer(&ch->timer);
1179 fsm_newstate(fi, CH_STATE_NOTOP);
1180 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1181 skb_queue_purge(&ch->io_queue);
1182 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1183 DEV_EVENT_RXDOWN, dev);
1185 ctc_purge_skb_queue(&ch->io_queue);
1186 spin_lock(&ch->collect_lock);
1187 ctc_purge_skb_queue(&ch->collect_queue);
1188 ch->collect_len = 0;
1189 spin_unlock(&ch->collect_lock);
1190 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1191 DEV_EVENT_TXDOWN, dev);
1196 * Handle error during setup of channel.
1198 * @param fi An instance of a channel statemachine.
1199 * @param event The event, just happened.
1200 * @param arg Generic pointer, casted from channel * upon call.
1203 ch_action_setuperr(fsm_instance * fi, int event, void *arg)
1205 struct channel *ch = (struct channel *) arg;
1206 struct net_device *dev = ch->netdev;
1208 DBF_TEXT(setup, 3, __FUNCTION__);
1210 * Special case: Got UC_RCRESET on setmode.
1211 * This means that remote side isn't setup. In this case
1212 * simply retry after some 10 secs...
1214 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
1215 ((event == CH_EVENT_UC_RCRESET) ||
1216 (event == CH_EVENT_UC_RSRESET))) {
1217 fsm_newstate(fi, CH_STATE_STARTRETRY);
1218 fsm_deltimer(&ch->timer);
1219 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1220 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1221 int rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1223 ccw_check_return_code(
1224 ch, rc, "HaltIO in ch_action_setuperr");
1229 ctc_pr_debug("%s: Error %s during %s channel setup state=%s\n",
1230 dev->name, ch_event_names[event],
1231 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
1232 fsm_getstate_str(fi));
1233 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1234 fsm_newstate(fi, CH_STATE_RXERR);
1235 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1236 DEV_EVENT_RXDOWN, dev);
1238 fsm_newstate(fi, CH_STATE_TXERR);
1239 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1240 DEV_EVENT_TXDOWN, dev);
1245 * Restart a channel after an error.
1247 * @param fi An instance of a channel statemachine.
1248 * @param event The event, just happened.
1249 * @param arg Generic pointer, casted from channel * upon call.
1252 ch_action_restart(fsm_instance * fi, int event, void *arg)
1254 unsigned long saveflags;
1258 struct channel *ch = (struct channel *) arg;
1259 struct net_device *dev = ch->netdev;
1261 DBF_TEXT(trace, 3, __FUNCTION__);
1262 fsm_deltimer(&ch->timer);
1263 ctc_pr_debug("%s: %s channel restart\n", dev->name,
1264 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1265 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1266 oldstate = fsm_getstate(fi);
1267 fsm_newstate(fi, CH_STATE_STARTWAIT);
1268 saveflags = 0; /* avoids compiler warning with
1269 spin_unlock_irqrestore */
1270 if (event == CH_EVENT_TIMER) // only for timer not yet locked
1271 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1272 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1273 if (event == CH_EVENT_TIMER)
1274 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1277 fsm_deltimer(&ch->timer);
1278 fsm_newstate(fi, oldstate);
1280 ccw_check_return_code(ch, rc, "HaltIO in ch_action_restart");
1285 * Handle error during RX initial handshake (exchange of
1286 * 0-length block header)
1288 * @param fi An instance of a channel statemachine.
1289 * @param event The event, just happened.
1290 * @param arg Generic pointer, casted from channel * upon call.
1293 ch_action_rxiniterr(fsm_instance * fi, int event, void *arg)
1295 struct channel *ch = (struct channel *) arg;
1296 struct net_device *dev = ch->netdev;
1298 DBF_TEXT(setup, 3, __FUNCTION__);
1299 if (event == CH_EVENT_TIMER) {
1300 fsm_deltimer(&ch->timer);
1301 ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name);
1302 if (ch->retry++ < 3)
1303 ch_action_restart(fi, event, arg);
1305 fsm_newstate(fi, CH_STATE_RXERR);
1306 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1307 DEV_EVENT_RXDOWN, dev);
1310 ctc_pr_warn("%s: Error during RX init handshake\n", dev->name);
1314 * Notify device statemachine if we gave up initialization
1317 * @param fi An instance of a channel statemachine.
1318 * @param event The event, just happened.
1319 * @param arg Generic pointer, casted from channel * upon call.
1322 ch_action_rxinitfail(fsm_instance * fi, int event, void *arg)
1324 struct channel *ch = (struct channel *) arg;
1325 struct net_device *dev = ch->netdev;
1327 DBF_TEXT(setup, 3, __FUNCTION__);
1328 fsm_newstate(fi, CH_STATE_RXERR);
1329 ctc_pr_warn("%s: RX initialization failed\n", dev->name);
1330 ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name);
1331 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1335 * Handle RX Unit check remote reset (remote disconnected)
1337 * @param fi An instance of a channel statemachine.
1338 * @param event The event, just happened.
1339 * @param arg Generic pointer, casted from channel * upon call.
1342 ch_action_rxdisc(fsm_instance * fi, int event, void *arg)
1344 struct channel *ch = (struct channel *) arg;
1345 struct channel *ch2;
1346 struct net_device *dev = ch->netdev;
1348 DBF_TEXT(trace, 3, __FUNCTION__);
1349 fsm_deltimer(&ch->timer);
1350 ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n",
1354 * Notify device statemachine
1356 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1357 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
1359 fsm_newstate(fi, CH_STATE_DTERM);
1360 ch2 = ((struct ctc_priv *) dev->priv)->channel[WRITE];
1361 fsm_newstate(ch2->fsm, CH_STATE_DTERM);
1363 ccw_device_halt(ch->cdev, (unsigned long) ch);
1364 ccw_device_halt(ch2->cdev, (unsigned long) ch2);
1368 * Handle error during TX channel initialization.
1370 * @param fi An instance of a channel statemachine.
1371 * @param event The event, just happened.
1372 * @param arg Generic pointer, casted from channel * upon call.
1375 ch_action_txiniterr(fsm_instance * fi, int event, void *arg)
1377 struct channel *ch = (struct channel *) arg;
1378 struct net_device *dev = ch->netdev;
1380 DBF_TEXT(setup, 2, __FUNCTION__);
1381 if (event == CH_EVENT_TIMER) {
1382 fsm_deltimer(&ch->timer);
1383 ctc_pr_debug("%s: Timeout during TX init handshake\n", dev->name);
1384 if (ch->retry++ < 3)
1385 ch_action_restart(fi, event, arg);
1387 fsm_newstate(fi, CH_STATE_TXERR);
1388 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1389 DEV_EVENT_TXDOWN, dev);
1392 ctc_pr_warn("%s: Error during TX init handshake\n", dev->name);
1396 * Handle TX timeout by retrying operation.
1398 * @param fi An instance of a channel statemachine.
1399 * @param event The event, just happened.
1400 * @param arg Generic pointer, casted from channel * upon call.
1403 ch_action_txretry(fsm_instance * fi, int event, void *arg)
1405 struct channel *ch = (struct channel *) arg;
1406 struct net_device *dev = ch->netdev;
1407 unsigned long saveflags;
1409 DBF_TEXT(trace, 4, __FUNCTION__);
1410 fsm_deltimer(&ch->timer);
1411 if (ch->retry++ > 3) {
1412 ctc_pr_debug("%s: TX retry failed, restarting channel\n",
1414 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1415 DEV_EVENT_TXDOWN, dev);
1416 ch_action_restart(fi, event, arg);
1418 struct sk_buff *skb;
1420 ctc_pr_debug("%s: TX retry %d\n", dev->name, ch->retry);
1421 if ((skb = skb_peek(&ch->io_queue))) {
1424 clear_normalized_cda(&ch->ccw[4]);
1425 ch->ccw[4].count = skb->len;
1426 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1428 "%s: IDAL alloc failed, chan restart\n",
1430 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1431 DEV_EVENT_TXDOWN, dev);
1432 ch_action_restart(fi, event, arg);
1435 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1436 saveflags = 0; /* avoids compiler warning with
1437 spin_unlock_irqrestore */
1438 if (event == CH_EVENT_TIMER) // only for TIMER not yet locked
1439 spin_lock_irqsave(get_ccwdev_lock(ch->cdev),
1441 rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1442 (unsigned long) ch, 0xff, 0);
1443 if (event == CH_EVENT_TIMER)
1444 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1447 fsm_deltimer(&ch->timer);
1448 ccw_check_return_code(ch, rc, "TX in ch_action_txretry");
1449 ctc_purge_skb_queue(&ch->io_queue);
1457 * Handle fatal errors during an I/O command.
1459 * @param fi An instance of a channel statemachine.
1460 * @param event The event, just happened.
1461 * @param arg Generic pointer, casted from channel * upon call.
1464 ch_action_iofatal(fsm_instance * fi, int event, void *arg)
1466 struct channel *ch = (struct channel *) arg;
1467 struct net_device *dev = ch->netdev;
1469 DBF_TEXT(trace, 3, __FUNCTION__);
1470 fsm_deltimer(&ch->timer);
1471 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1472 ctc_pr_debug("%s: RX I/O error\n", dev->name);
1473 fsm_newstate(fi, CH_STATE_RXERR);
1474 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1475 DEV_EVENT_RXDOWN, dev);
1477 ctc_pr_debug("%s: TX I/O error\n", dev->name);
1478 fsm_newstate(fi, CH_STATE_TXERR);
1479 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1480 DEV_EVENT_TXDOWN, dev);
1485 ch_action_reinit(fsm_instance *fi, int event, void *arg)
1487 struct channel *ch = (struct channel *)arg;
1488 struct net_device *dev = ch->netdev;
1489 struct ctc_priv *privptr = dev->priv;
1491 DBF_TEXT(trace, 4, __FUNCTION__);
1492 ch_action_iofatal(fi, event, arg);
1493 fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
1497 * The statemachine for a channel.
1499 static const fsm_node ch_fsm[] = {
1500 {CH_STATE_STOPPED, CH_EVENT_STOP, fsm_action_nop },
1501 {CH_STATE_STOPPED, CH_EVENT_START, ch_action_start },
1502 {CH_STATE_STOPPED, CH_EVENT_FINSTAT, fsm_action_nop },
1503 {CH_STATE_STOPPED, CH_EVENT_MC_FAIL, fsm_action_nop },
1505 {CH_STATE_NOTOP, CH_EVENT_STOP, ch_action_stop },
1506 {CH_STATE_NOTOP, CH_EVENT_START, fsm_action_nop },
1507 {CH_STATE_NOTOP, CH_EVENT_FINSTAT, fsm_action_nop },
1508 {CH_STATE_NOTOP, CH_EVENT_MC_FAIL, fsm_action_nop },
1509 {CH_STATE_NOTOP, CH_EVENT_MC_GOOD, ch_action_start },
1511 {CH_STATE_STARTWAIT, CH_EVENT_STOP, ch_action_haltio },
1512 {CH_STATE_STARTWAIT, CH_EVENT_START, fsm_action_nop },
1513 {CH_STATE_STARTWAIT, CH_EVENT_FINSTAT, ch_action_setmode },
1514 {CH_STATE_STARTWAIT, CH_EVENT_TIMER, ch_action_setuperr },
1515 {CH_STATE_STARTWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1516 {CH_STATE_STARTWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1517 {CH_STATE_STARTWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1519 {CH_STATE_STARTRETRY, CH_EVENT_STOP, ch_action_haltio },
1520 {CH_STATE_STARTRETRY, CH_EVENT_TIMER, ch_action_setmode },
1521 {CH_STATE_STARTRETRY, CH_EVENT_FINSTAT, fsm_action_nop },
1522 {CH_STATE_STARTRETRY, CH_EVENT_MC_FAIL, ch_action_fail },
1524 {CH_STATE_SETUPWAIT, CH_EVENT_STOP, ch_action_haltio },
1525 {CH_STATE_SETUPWAIT, CH_EVENT_START, fsm_action_nop },
1526 {CH_STATE_SETUPWAIT, CH_EVENT_FINSTAT, ch_action_firstio },
1527 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RCRESET, ch_action_setuperr },
1528 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RSRESET, ch_action_setuperr },
1529 {CH_STATE_SETUPWAIT, CH_EVENT_TIMER, ch_action_setmode },
1530 {CH_STATE_SETUPWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1531 {CH_STATE_SETUPWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1532 {CH_STATE_SETUPWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1534 {CH_STATE_RXINIT, CH_EVENT_STOP, ch_action_haltio },
1535 {CH_STATE_RXINIT, CH_EVENT_START, fsm_action_nop },
1536 {CH_STATE_RXINIT, CH_EVENT_FINSTAT, ch_action_rxidle },
1537 {CH_STATE_RXINIT, CH_EVENT_UC_RCRESET, ch_action_rxiniterr },
1538 {CH_STATE_RXINIT, CH_EVENT_UC_RSRESET, ch_action_rxiniterr },
1539 {CH_STATE_RXINIT, CH_EVENT_TIMER, ch_action_rxiniterr },
1540 {CH_STATE_RXINIT, CH_EVENT_ATTNBUSY, ch_action_rxinitfail },
1541 {CH_STATE_RXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1542 {CH_STATE_RXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1543 {CH_STATE_RXINIT, CH_EVENT_UC_ZERO, ch_action_firstio },
1544 {CH_STATE_RXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1546 {CH_STATE_RXIDLE, CH_EVENT_STOP, ch_action_haltio },
1547 {CH_STATE_RXIDLE, CH_EVENT_START, fsm_action_nop },
1548 {CH_STATE_RXIDLE, CH_EVENT_FINSTAT, ch_action_rx },
1549 {CH_STATE_RXIDLE, CH_EVENT_UC_RCRESET, ch_action_rxdisc },
1550 // {CH_STATE_RXIDLE, CH_EVENT_UC_RSRESET, ch_action_rxretry },
1551 {CH_STATE_RXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1552 {CH_STATE_RXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1553 {CH_STATE_RXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1554 {CH_STATE_RXIDLE, CH_EVENT_UC_ZERO, ch_action_rx },
1556 {CH_STATE_TXINIT, CH_EVENT_STOP, ch_action_haltio },
1557 {CH_STATE_TXINIT, CH_EVENT_START, fsm_action_nop },
1558 {CH_STATE_TXINIT, CH_EVENT_FINSTAT, ch_action_txidle },
1559 {CH_STATE_TXINIT, CH_EVENT_UC_RCRESET, ch_action_txiniterr },
1560 {CH_STATE_TXINIT, CH_EVENT_UC_RSRESET, ch_action_txiniterr },
1561 {CH_STATE_TXINIT, CH_EVENT_TIMER, ch_action_txiniterr },
1562 {CH_STATE_TXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1563 {CH_STATE_TXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1564 {CH_STATE_TXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1566 {CH_STATE_TXIDLE, CH_EVENT_STOP, ch_action_haltio },
1567 {CH_STATE_TXIDLE, CH_EVENT_START, fsm_action_nop },
1568 {CH_STATE_TXIDLE, CH_EVENT_FINSTAT, ch_action_firstio },
1569 {CH_STATE_TXIDLE, CH_EVENT_UC_RCRESET, fsm_action_nop },
1570 {CH_STATE_TXIDLE, CH_EVENT_UC_RSRESET, fsm_action_nop },
1571 {CH_STATE_TXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1572 {CH_STATE_TXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1573 {CH_STATE_TXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1575 {CH_STATE_TERM, CH_EVENT_STOP, fsm_action_nop },
1576 {CH_STATE_TERM, CH_EVENT_START, ch_action_restart },
1577 {CH_STATE_TERM, CH_EVENT_FINSTAT, ch_action_stopped },
1578 {CH_STATE_TERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1579 {CH_STATE_TERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1580 {CH_STATE_TERM, CH_EVENT_MC_FAIL, ch_action_fail },
1582 {CH_STATE_DTERM, CH_EVENT_STOP, ch_action_haltio },
1583 {CH_STATE_DTERM, CH_EVENT_START, ch_action_restart },
1584 {CH_STATE_DTERM, CH_EVENT_FINSTAT, ch_action_setmode },
1585 {CH_STATE_DTERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1586 {CH_STATE_DTERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1587 {CH_STATE_DTERM, CH_EVENT_MC_FAIL, ch_action_fail },
1589 {CH_STATE_TX, CH_EVENT_STOP, ch_action_haltio },
1590 {CH_STATE_TX, CH_EVENT_START, fsm_action_nop },
1591 {CH_STATE_TX, CH_EVENT_FINSTAT, ch_action_txdone },
1592 {CH_STATE_TX, CH_EVENT_UC_RCRESET, ch_action_txretry },
1593 {CH_STATE_TX, CH_EVENT_UC_RSRESET, ch_action_txretry },
1594 {CH_STATE_TX, CH_EVENT_TIMER, ch_action_txretry },
1595 {CH_STATE_TX, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1596 {CH_STATE_TX, CH_EVENT_IO_EIO, ch_action_reinit },
1597 {CH_STATE_TX, CH_EVENT_MC_FAIL, ch_action_fail },
1599 {CH_STATE_RXERR, CH_EVENT_STOP, ch_action_haltio },
1600 {CH_STATE_TXERR, CH_EVENT_STOP, ch_action_haltio },
1601 {CH_STATE_TXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1602 {CH_STATE_RXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1605 static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node);
1608 * Functions related to setup and device detection.
1609 *****************************************************************************/
1612 less_than(char *id1, char *id2)
1616 for (i = 0; i < 5; i++) {
1620 dev1 = simple_strtoul(id1, &id1, 16);
1621 dev2 = simple_strtoul(id2, &id2, 16);
1623 return (dev1 < dev2);
1627 * Add a new channel to the list of channels.
1628 * Keeps the channel list sorted.
1630 * @param cdev The ccw_device to be added.
1631 * @param type The type class of the new channel.
1633 * @return 0 on success, !0 on error.
1636 add_channel(struct ccw_device *cdev, enum channel_types type)
1638 struct channel **c = &channels;
1641 DBF_TEXT(trace, 2, __FUNCTION__);
1643 (struct channel *) kmalloc(sizeof (struct channel),
1644 GFP_KERNEL)) == NULL) {
1645 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1648 memset(ch, 0, sizeof (struct channel));
1649 if ((ch->ccw = (struct ccw1 *) kmalloc(8*sizeof(struct ccw1),
1650 GFP_KERNEL | GFP_DMA)) == NULL) {
1652 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1656 memset(ch->ccw, 0, 8*sizeof(struct ccw1)); // assure all flags and counters are reset
1659 * "static" ccws are used in the following way:
1661 * ccw[0..2] (Channel program for generic I/O):
1663 * 1: read or write (depending on direction) with fixed
1664 * buffer (idal allocated once when buffer is allocated)
1666 * ccw[3..5] (Channel program for direct write of packets)
1668 * 4: write (idal allocated on every write).
1670 * ccw[6..7] (Channel program for initial channel setup):
1671 * 6: set extended mode
1674 * ch->ccw[0..5] are initialized in ch_action_start because
1675 * the channel's direction is yet unknown here.
1677 ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
1678 ch->ccw[6].flags = CCW_FLAG_SLI;
1680 ch->ccw[7].cmd_code = CCW_CMD_NOOP;
1681 ch->ccw[7].flags = CCW_FLAG_SLI;
1684 snprintf(ch->id, CTC_ID_SIZE, "ch-%s", cdev->dev.bus_id);
1686 ch->fsm = init_fsm(ch->id, ch_state_names,
1687 ch_event_names, NR_CH_STATES, NR_CH_EVENTS,
1688 ch_fsm, CH_FSM_LEN, GFP_KERNEL);
1689 if (ch->fsm == NULL) {
1690 ctc_pr_warn("ctc: Could not create FSM in add_channel\n");
1695 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1696 if ((ch->irb = (struct irb *) kmalloc(sizeof (struct irb),
1697 GFP_KERNEL)) == NULL) {
1698 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1704 memset(ch->irb, 0, sizeof (struct irb));
1705 while (*c && less_than((*c)->id, ch->id))
1707 if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) {
1709 "ctc: add_channel: device %s already in list, "
1710 "using old entry\n", (*c)->id);
1717 fsm_settimer(ch->fsm, &ch->timer);
1718 skb_queue_head_init(&ch->io_queue);
1719 skb_queue_head_init(&ch->collect_queue);
1726 * Release a specific channel in the channel list.
1728 * @param ch Pointer to channel struct to be released.
1731 channel_free(struct channel *ch)
1733 ch->flags &= ~CHANNEL_FLAGS_INUSE;
1734 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1738 * Remove a specific channel in the channel list.
1740 * @param ch Pointer to channel struct to be released.
1743 channel_remove(struct channel *ch)
1745 struct channel **c = &channels;
1747 DBF_TEXT(trace, 2, __FUNCTION__);
1755 fsm_deltimer(&ch->timer);
1757 clear_normalized_cda(&ch->ccw[4]);
1758 if (ch->trans_skb != NULL) {
1759 clear_normalized_cda(&ch->ccw[1]);
1760 dev_kfree_skb(ch->trans_skb);
1772 * Get a specific channel from the channel list.
1774 * @param type Type of channel we are interested in.
1775 * @param id Id of channel we are interested in.
1776 * @param direction Direction we want to use this channel for.
1778 * @return Pointer to a channel or NULL if no matching channel available.
1780 static struct channel
1782 channel_get(enum channel_types type, char *id, int direction)
1784 struct channel *ch = channels;
1786 DBF_TEXT(trace, 3, __FUNCTION__);
1788 ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n",
1789 __func__, id, type);
1792 while (ch && ((strncmp(ch->id, id, CTC_ID_SIZE)) || (ch->type != type))) {
1794 ctc_pr_debug("ctc: %s(): ch=0x%p (id=%s, type=%d\n",
1795 __func__, ch, ch->id, ch->type);
1800 ctc_pr_debug("ctc: %s(): ch=0x%pq (id=%s, type=%d\n",
1801 __func__, ch, ch->id, ch->type);
1804 ctc_pr_warn("ctc: %s(): channel with id %s "
1805 "and type %d not found in channel list\n",
1806 __func__, id, type);
1808 if (ch->flags & CHANNEL_FLAGS_INUSE)
1811 ch->flags |= CHANNEL_FLAGS_INUSE;
1812 ch->flags &= ~CHANNEL_FLAGS_RWMASK;
1813 ch->flags |= (direction == WRITE)
1814 ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
1815 fsm_newstate(ch->fsm, CH_STATE_STOPPED);
1822 * Return the channel type by name.
1824 * @param name Name of network interface.
1826 * @return Type class of channel to be used for that interface.
1828 static enum channel_types inline
1829 extract_channel_media(char *name)
1831 enum channel_types ret = channel_type_unknown;
1834 if (strncmp(name, "ctc", 3) == 0)
1835 ret = channel_type_parallel;
1836 if (strncmp(name, "escon", 5) == 0)
1837 ret = channel_type_escon;
1843 __ctc_check_irb_error(struct ccw_device *cdev, struct irb *irb)
1848 switch (PTR_ERR(irb)) {
1850 ctc_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id);
1851 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1852 // CTC_DBF_TEXT_(trace, 2, " rc%d", -EIO);
1855 ctc_pr_warn("timeout on device %s\n", cdev->dev.bus_id);
1856 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1857 // CTC_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
1860 ctc_pr_warn("unknown error %ld on device %s\n", PTR_ERR(irb),
1862 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1863 // CTC_DBF_TEXT(trace, 2, " rc???");
1865 return PTR_ERR(irb);
1871 * @param cdev The ccw_device the interrupt is for.
1872 * @param intparm interruption parameter.
1873 * @param irb interruption response block.
1876 ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1879 struct net_device *dev;
1880 struct ctc_priv *priv;
1882 DBF_TEXT(trace, 5, __FUNCTION__);
1883 if (__ctc_check_irb_error(cdev, irb))
1886 /* Check for unsolicited interrupts. */
1887 if (!cdev->dev.driver_data) {
1888 ctc_pr_warn("ctc: Got unsolicited irq: %s c-%02x d-%02x\n",
1889 cdev->dev.bus_id, irb->scsw.cstat,
1894 priv = ((struct ccwgroup_device *)cdev->dev.driver_data)
1897 /* Try to extract channel from driver data. */
1898 if (priv->channel[READ]->cdev == cdev)
1899 ch = priv->channel[READ];
1900 else if (priv->channel[WRITE]->cdev == cdev)
1901 ch = priv->channel[WRITE];
1903 ctc_pr_err("ctc: Can't determine channel for interrupt, "
1904 "device %s\n", cdev->dev.bus_id);
1908 dev = (struct net_device *) (ch->netdev);
1910 ctc_pr_crit("ctc: ctc_irq_handler dev=NULL bus_id=%s, ch=0x%p\n",
1911 cdev->dev.bus_id, ch);
1916 ctc_pr_debug("%s: interrupt for device: %s received c-%02x d-%02x\n",
1917 dev->name, ch->id, irb->scsw.cstat, irb->scsw.dstat);
1920 /* Copy interruption response block. */
1921 memcpy(ch->irb, irb, sizeof(struct irb));
1923 /* Check for good subchannel return code, otherwise error message */
1924 if (ch->irb->scsw.cstat) {
1925 fsm_event(ch->fsm, CH_EVENT_SC_UNKNOWN, ch);
1926 ctc_pr_warn("%s: subchannel check for device: %s - %02x %02x\n",
1927 dev->name, ch->id, ch->irb->scsw.cstat,
1928 ch->irb->scsw.dstat);
1932 /* Check the reason-code of a unit check */
1933 if (ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
1934 ccw_unit_check(ch, ch->irb->ecw[0]);
1937 if (ch->irb->scsw.dstat & DEV_STAT_BUSY) {
1938 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION)
1939 fsm_event(ch->fsm, CH_EVENT_ATTNBUSY, ch);
1941 fsm_event(ch->fsm, CH_EVENT_BUSY, ch);
1944 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION) {
1945 fsm_event(ch->fsm, CH_EVENT_ATTN, ch);
1948 if ((ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
1949 (ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
1950 (ch->irb->scsw.stctl ==
1951 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
1952 fsm_event(ch->fsm, CH_EVENT_FINSTAT, ch);
1954 fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
1959 * Actions for interface - statemachine.
1960 *****************************************************************************/
1963 * Startup channels by sending CH_EVENT_START to each channel.
1965 * @param fi An instance of an interface statemachine.
1966 * @param event The event, just happened.
1967 * @param arg Generic pointer, casted from struct net_device * upon call.
1970 dev_action_start(fsm_instance * fi, int event, void *arg)
1972 struct net_device *dev = (struct net_device *) arg;
1973 struct ctc_priv *privptr = dev->priv;
1976 DBF_TEXT(setup, 3, __FUNCTION__);
1977 fsm_deltimer(&privptr->restart_timer);
1978 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
1979 for (direction = READ; direction <= WRITE; direction++) {
1980 struct channel *ch = privptr->channel[direction];
1981 fsm_event(ch->fsm, CH_EVENT_START, ch);
1986 * Shutdown channels by sending CH_EVENT_STOP to each channel.
1988 * @param fi An instance of an interface statemachine.
1989 * @param event The event, just happened.
1990 * @param arg Generic pointer, casted from struct net_device * upon call.
1993 dev_action_stop(fsm_instance * fi, int event, void *arg)
1995 struct net_device *dev = (struct net_device *) arg;
1996 struct ctc_priv *privptr = dev->priv;
1999 DBF_TEXT(trace, 3, __FUNCTION__);
2000 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2001 for (direction = READ; direction <= WRITE; direction++) {
2002 struct channel *ch = privptr->channel[direction];
2003 fsm_event(ch->fsm, CH_EVENT_STOP, ch);
2007 dev_action_restart(fsm_instance *fi, int event, void *arg)
2009 struct net_device *dev = (struct net_device *)arg;
2010 struct ctc_priv *privptr = dev->priv;
2012 DBF_TEXT(trace, 3, __FUNCTION__);
2013 ctc_pr_debug("%s: Restarting\n", dev->name);
2014 dev_action_stop(fi, event, arg);
2015 fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
2016 fsm_addtimer(&privptr->restart_timer, CTC_TIMEOUT_5SEC,
2017 DEV_EVENT_START, dev);
2021 * Called from channel statemachine
2022 * when a channel is up and running.
2024 * @param fi An instance of an interface statemachine.
2025 * @param event The event, just happened.
2026 * @param arg Generic pointer, casted from struct net_device * upon call.
2029 dev_action_chup(fsm_instance * fi, int event, void *arg)
2031 struct net_device *dev = (struct net_device *) arg;
2033 DBF_TEXT(trace, 3, __FUNCTION__);
2034 switch (fsm_getstate(fi)) {
2035 case DEV_STATE_STARTWAIT_RXTX:
2036 if (event == DEV_EVENT_RXUP)
2037 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2039 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2041 case DEV_STATE_STARTWAIT_RX:
2042 if (event == DEV_EVENT_RXUP) {
2043 fsm_newstate(fi, DEV_STATE_RUNNING);
2044 ctc_pr_info("%s: connected with remote side\n",
2046 ctc_clear_busy(dev);
2049 case DEV_STATE_STARTWAIT_TX:
2050 if (event == DEV_EVENT_TXUP) {
2051 fsm_newstate(fi, DEV_STATE_RUNNING);
2052 ctc_pr_info("%s: connected with remote side\n",
2054 ctc_clear_busy(dev);
2057 case DEV_STATE_STOPWAIT_TX:
2058 if (event == DEV_EVENT_RXUP)
2059 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2061 case DEV_STATE_STOPWAIT_RX:
2062 if (event == DEV_EVENT_TXUP)
2063 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2069 * Called from channel statemachine
2070 * when a channel has been shutdown.
2072 * @param fi An instance of an interface statemachine.
2073 * @param event The event, just happened.
2074 * @param arg Generic pointer, casted from struct net_device * upon call.
2077 dev_action_chdown(fsm_instance * fi, int event, void *arg)
2080 DBF_TEXT(trace, 3, __FUNCTION__);
2081 switch (fsm_getstate(fi)) {
2082 case DEV_STATE_RUNNING:
2083 if (event == DEV_EVENT_TXDOWN)
2084 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2086 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2088 case DEV_STATE_STARTWAIT_RX:
2089 if (event == DEV_EVENT_TXDOWN)
2090 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2092 case DEV_STATE_STARTWAIT_TX:
2093 if (event == DEV_EVENT_RXDOWN)
2094 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2096 case DEV_STATE_STOPWAIT_RXTX:
2097 if (event == DEV_EVENT_TXDOWN)
2098 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2100 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2102 case DEV_STATE_STOPWAIT_RX:
2103 if (event == DEV_EVENT_RXDOWN)
2104 fsm_newstate(fi, DEV_STATE_STOPPED);
2106 case DEV_STATE_STOPWAIT_TX:
2107 if (event == DEV_EVENT_TXDOWN)
2108 fsm_newstate(fi, DEV_STATE_STOPPED);
2113 static const fsm_node dev_fsm[] = {
2114 {DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start},
2116 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
2117 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2118 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2119 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2121 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
2122 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2123 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2124 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
2125 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2127 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
2128 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2129 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2130 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
2131 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2133 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
2134 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
2135 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
2136 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2137 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2138 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2140 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
2141 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2142 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2143 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
2144 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2146 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
2147 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2148 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2149 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
2150 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2152 {DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
2153 {DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
2154 {DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
2155 {DEV_STATE_RUNNING, DEV_EVENT_TXUP, fsm_action_nop },
2156 {DEV_STATE_RUNNING, DEV_EVENT_RXUP, fsm_action_nop },
2157 {DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
2160 static const int DEV_FSM_LEN = sizeof (dev_fsm) / sizeof (fsm_node);
2163 * Transmit a packet.
2164 * This is a helper function for ctc_tx().
2166 * @param ch Channel to be used for sending.
2167 * @param skb Pointer to struct sk_buff of packet to send.
2168 * The linklevel header has already been set up
2171 * @return 0 on success, -ERRNO on failure. (Never fails.)
2174 transmit_skb(struct channel *ch, struct sk_buff *skb)
2176 unsigned long saveflags;
2177 struct ll_header header;
2180 DBF_TEXT(trace, 5, __FUNCTION__);
2181 /* we need to acquire the lock for testing the state
2182 * otherwise we can have an IRQ changing the state to
2183 * TXIDLE after the test but before acquiring the lock.
2185 spin_lock_irqsave(&ch->collect_lock, saveflags);
2186 if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
2187 int l = skb->len + LL_HEADER_LENGTH;
2189 if (ch->collect_len + l > ch->max_bufsize - 2) {
2190 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2193 atomic_inc(&skb->users);
2195 header.type = skb->protocol;
2197 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2199 skb_queue_tail(&ch->collect_queue, skb);
2200 ch->collect_len += l;
2202 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2206 struct sk_buff *nskb;
2208 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2210 * Protect skb against beeing free'd by upper
2213 atomic_inc(&skb->users);
2214 ch->prof.txlen += skb->len;
2215 header.length = skb->len + LL_HEADER_LENGTH;
2216 header.type = skb->protocol;
2218 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2220 block_len = skb->len + 2;
2221 *((__u16 *) skb_push(skb, 2)) = block_len;
2224 * IDAL support in CTC is broken, so we have to
2225 * care about skb's above 2G ourselves.
2227 hi = ((unsigned long) skb->tail + LL_HEADER_LENGTH) >> 31;
2229 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
2231 atomic_dec(&skb->users);
2232 skb_pull(skb, LL_HEADER_LENGTH + 2);
2233 ctc_clear_busy(ch->netdev);
2236 memcpy(skb_put(nskb, skb->len),
2237 skb->data, skb->len);
2238 atomic_inc(&nskb->users);
2239 atomic_dec(&skb->users);
2240 dev_kfree_skb_irq(skb);
2245 ch->ccw[4].count = block_len;
2246 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
2248 * idal allocation failed, try via copying to
2249 * trans_skb. trans_skb usually has a pre-allocated
2252 if (ctc_checkalloc_buffer(ch, 1)) {
2254 * Remove our header. It gets added
2255 * again on retransmit.
2257 atomic_dec(&skb->users);
2258 skb_pull(skb, LL_HEADER_LENGTH + 2);
2259 ctc_clear_busy(ch->netdev);
2263 ch->trans_skb->tail = ch->trans_skb->data;
2264 ch->trans_skb->len = 0;
2265 ch->ccw[1].count = skb->len;
2266 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
2268 atomic_dec(&skb->users);
2269 dev_kfree_skb_irq(skb);
2272 skb_queue_tail(&ch->io_queue, skb);
2276 fsm_newstate(ch->fsm, CH_STATE_TX);
2277 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
2278 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
2279 ch->prof.send_stamp = xtime;
2280 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
2281 (unsigned long) ch, 0xff, 0);
2282 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
2284 ch->prof.doios_single++;
2286 fsm_deltimer(&ch->timer);
2287 ccw_check_return_code(ch, rc, "single skb TX");
2289 skb_dequeue_tail(&ch->io_queue);
2291 * Remove our header. It gets added
2292 * again on retransmit.
2294 skb_pull(skb, LL_HEADER_LENGTH + 2);
2297 struct net_device *dev = ch->netdev;
2298 struct ctc_priv *privptr = dev->priv;
2299 privptr->stats.tx_packets++;
2300 privptr->stats.tx_bytes +=
2301 skb->len - LL_HEADER_LENGTH;
2306 ctc_clear_busy(ch->netdev);
2311 * Interface API for upper network layers
2312 *****************************************************************************/
2315 * Open an interface.
2316 * Called from generic network layer when ifconfig up is run.
2318 * @param dev Pointer to interface struct.
2320 * @return 0 on success, -ERRNO on failure. (Never fails.)
2323 ctc_open(struct net_device * dev)
2325 DBF_TEXT(trace, 5, __FUNCTION__);
2326 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev);
2331 * Close an interface.
2332 * Called from generic network layer when ifconfig down is run.
2334 * @param dev Pointer to interface struct.
2336 * @return 0 on success, -ERRNO on failure. (Never fails.)
2339 ctc_close(struct net_device * dev)
2341 DBF_TEXT(trace, 5, __FUNCTION__);
2342 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev);
2347 * Start transmission of a packet.
2348 * Called from generic network device layer.
2350 * @param skb Pointer to buffer containing the packet.
2351 * @param dev Pointer to interface struct.
2353 * @return 0 if packet consumed, !0 if packet rejected.
2354 * Note: If we return !0, then the packet is free'd by
2355 * the generic network layer.
2358 ctc_tx(struct sk_buff *skb, struct net_device * dev)
2361 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2363 DBF_TEXT(trace, 5, __FUNCTION__);
2365 * Some sanity checks ...
2368 ctc_pr_warn("%s: NULL sk_buff passed\n", dev->name);
2369 privptr->stats.tx_dropped++;
2372 if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
2373 ctc_pr_warn("%s: Got sk_buff with head room < %ld bytes\n",
2374 dev->name, LL_HEADER_LENGTH + 2);
2376 privptr->stats.tx_dropped++;
2381 * If channels are not running, try to restart them
2382 * and throw away packet.
2384 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
2385 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
2387 privptr->stats.tx_dropped++;
2388 privptr->stats.tx_errors++;
2389 privptr->stats.tx_carrier_errors++;
2393 if (ctc_test_and_set_busy(dev))
2396 dev->trans_start = jiffies;
2397 if (transmit_skb(privptr->channel[WRITE], skb) != 0)
2403 * Sets MTU of an interface.
2405 * @param dev Pointer to interface struct.
2406 * @param new_mtu The new MTU to use for this interface.
2408 * @return 0 on success, -EINVAL if MTU is out of valid range.
2409 * (valid range is 576 .. 65527). If VM is on the
2410 * remote side, maximum MTU is 32760, however this is
2411 * <em>not</em> checked here.
2414 ctc_change_mtu(struct net_device * dev, int new_mtu)
2416 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2418 DBF_TEXT(trace, 3, __FUNCTION__);
2419 if ((new_mtu < 576) || (new_mtu > 65527) ||
2420 (new_mtu > (privptr->channel[READ]->max_bufsize -
2421 LL_HEADER_LENGTH - 2)))
2424 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2429 * Returns interface statistics of a device.
2431 * @param dev Pointer to interface struct.
2433 * @return Pointer to stats struct of this interface.
2435 static struct net_device_stats *
2436 ctc_stats(struct net_device * dev)
2438 return &((struct ctc_priv *) dev->priv)->stats;
2446 buffer_show(struct device *dev, struct device_attribute *attr, char *buf)
2448 struct ctc_priv *priv;
2450 priv = dev->driver_data;
2453 return sprintf(buf, "%d\n",
2458 buffer_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2460 struct ctc_priv *priv;
2461 struct net_device *ndev;
2465 DBF_TEXT(trace, 3, __FUNCTION__);
2466 DBF_TEXT(trace, 3, buf);
2467 priv = dev->driver_data;
2469 DBF_TEXT(trace, 3, "bfnopriv");
2473 sscanf(buf, "%u", &bs1);
2474 if (bs1 > CTC_BUFSIZE_LIMIT)
2476 if (bs1 < (576 + LL_HEADER_LENGTH + 2))
2478 priv->buffer_size = bs1; // just to overwrite the default
2480 ndev = priv->channel[READ]->netdev;
2482 DBF_TEXT(trace, 3, "bfnondev");
2486 if ((ndev->flags & IFF_RUNNING) &&
2487 (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
2490 priv->channel[READ]->max_bufsize = bs1;
2491 priv->channel[WRITE]->max_bufsize = bs1;
2492 if (!(ndev->flags & IFF_RUNNING))
2493 ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
2494 priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2495 priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2497 sprintf(buffer, "%d",priv->buffer_size);
2498 DBF_TEXT(trace, 3, buffer);
2502 DBF_TEXT(trace, 3, "buff_err");
2507 loglevel_show(struct device *dev, struct device_attribute *attr, char *buf)
2509 return sprintf(buf, "%d\n", loglevel);
2513 loglevel_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2517 DBF_TEXT(trace, 5, __FUNCTION__);
2518 sscanf(buf, "%i", &ll1);
2520 if ((ll1 > CTC_LOGLEVEL_MAX) || (ll1 < 0))
2527 ctc_print_statistics(struct ctc_priv *priv)
2532 DBF_TEXT(trace, 4, __FUNCTION__);
2535 sbuf = (char *)kmalloc(2048, GFP_KERNEL);
2540 p += sprintf(p, " Device FSM state: %s\n",
2541 fsm_getstate_str(priv->fsm));
2542 p += sprintf(p, " RX channel FSM state: %s\n",
2543 fsm_getstate_str(priv->channel[READ]->fsm));
2544 p += sprintf(p, " TX channel FSM state: %s\n",
2545 fsm_getstate_str(priv->channel[WRITE]->fsm));
2546 p += sprintf(p, " Max. TX buffer used: %ld\n",
2547 priv->channel[WRITE]->prof.maxmulti);
2548 p += sprintf(p, " Max. chained SKBs: %ld\n",
2549 priv->channel[WRITE]->prof.maxcqueue);
2550 p += sprintf(p, " TX single write ops: %ld\n",
2551 priv->channel[WRITE]->prof.doios_single);
2552 p += sprintf(p, " TX multi write ops: %ld\n",
2553 priv->channel[WRITE]->prof.doios_multi);
2554 p += sprintf(p, " Netto bytes written: %ld\n",
2555 priv->channel[WRITE]->prof.txlen);
2556 p += sprintf(p, " Max. TX IO-time: %ld\n",
2557 priv->channel[WRITE]->prof.tx_time);
2559 ctc_pr_debug("Statistics for %s:\n%s",
2560 priv->channel[WRITE]->netdev->name, sbuf);
2566 stats_show(struct device *dev, struct device_attribute *attr, char *buf)
2568 struct ctc_priv *priv = dev->driver_data;
2571 ctc_print_statistics(priv);
2572 return sprintf(buf, "0\n");
2576 stats_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2578 struct ctc_priv *priv = dev->driver_data;
2581 /* Reset statistics */
2582 memset(&priv->channel[WRITE]->prof, 0,
2583 sizeof(priv->channel[WRITE]->prof));
2588 ctc_netdev_unregister(struct net_device * dev)
2590 struct ctc_priv *privptr;
2594 privptr = (struct ctc_priv *) dev->priv;
2595 unregister_netdev(dev);
2599 ctc_netdev_register(struct net_device * dev)
2601 return register_netdev(dev);
2605 ctc_free_netdevice(struct net_device * dev, int free_dev)
2607 struct ctc_priv *privptr;
2610 privptr = dev->priv;
2613 kfree_fsm(privptr->fsm);
2623 ctc_proto_show(struct device *dev, struct device_attribute *attr, char *buf)
2625 struct ctc_priv *priv;
2627 priv = dev->driver_data;
2631 return sprintf(buf, "%d\n", priv->protocol);
2635 ctc_proto_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2637 struct ctc_priv *priv;
2640 DBF_TEXT(trace, 3, __FUNCTION__);
2641 pr_debug("%s() called\n", __FUNCTION__);
2643 priv = dev->driver_data;
2646 sscanf(buf, "%u", &value);
2647 if (!((value == CTC_PROTO_S390) ||
2648 (value == CTC_PROTO_LINUX) ||
2649 (value == CTC_PROTO_OS390)))
2651 priv->protocol = value;
2657 ctc_type_show(struct device *dev, struct device_attribute *attr, char *buf)
2659 struct ccwgroup_device *cgdev;
2661 cgdev = to_ccwgroupdev(dev);
2665 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
2668 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
2669 static DEVICE_ATTR(protocol, 0644, ctc_proto_show, ctc_proto_store);
2670 static DEVICE_ATTR(type, 0444, ctc_type_show, NULL);
2672 static DEVICE_ATTR(loglevel, 0644, loglevel_show, loglevel_write);
2673 static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
2675 static struct attribute *ctc_attr[] = {
2676 &dev_attr_protocol.attr,
2677 &dev_attr_type.attr,
2678 &dev_attr_buffer.attr,
2682 static struct attribute_group ctc_attr_group = {
2687 ctc_add_attributes(struct device *dev)
2691 rc = device_create_file(dev, &dev_attr_loglevel);
2694 rc = device_create_file(dev, &dev_attr_stats);
2697 device_remove_file(dev, &dev_attr_loglevel);
2703 ctc_remove_attributes(struct device *dev)
2705 device_remove_file(dev, &dev_attr_stats);
2706 device_remove_file(dev, &dev_attr_loglevel);
2710 ctc_add_files(struct device *dev)
2712 pr_debug("%s() called\n", __FUNCTION__);
2714 return sysfs_create_group(&dev->kobj, &ctc_attr_group);
2718 ctc_remove_files(struct device *dev)
2720 pr_debug("%s() called\n", __FUNCTION__);
2722 sysfs_remove_group(&dev->kobj, &ctc_attr_group);
2726 * Add ctc specific attributes.
2727 * Add ctc private data.
2729 * @param cgdev pointer to ccwgroup_device just added
2731 * @returns 0 on success, !0 on failure.
2734 ctc_probe_device(struct ccwgroup_device *cgdev)
2736 struct ctc_priv *priv;
2740 pr_debug("%s() called\n", __FUNCTION__);
2741 DBF_TEXT(setup, 3, __FUNCTION__);
2743 if (!get_device(&cgdev->dev))
2746 priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL);
2748 ctc_pr_err("%s: Out of memory\n", __func__);
2749 put_device(&cgdev->dev);
2753 memset(priv, 0, sizeof (struct ctc_priv));
2754 rc = ctc_add_files(&cgdev->dev);
2757 put_device(&cgdev->dev);
2760 priv->buffer_size = CTC_BUFSIZE_DEFAULT;
2761 cgdev->cdev[0]->handler = ctc_irq_handler;
2762 cgdev->cdev[1]->handler = ctc_irq_handler;
2763 cgdev->dev.driver_data = priv;
2765 sprintf(buffer, "%p", priv);
2766 DBF_TEXT(data, 3, buffer);
2768 sprintf(buffer, "%u", (unsigned int)sizeof(struct ctc_priv));
2769 DBF_TEXT(data, 3, buffer);
2771 sprintf(buffer, "%p", &channels);
2772 DBF_TEXT(data, 3, buffer);
2774 sprintf(buffer, "%u", (unsigned int)sizeof(struct channel));
2775 DBF_TEXT(data, 3, buffer);
2781 * Initialize everything of the net device except the name and the
2784 static struct net_device *
2785 ctc_init_netdevice(struct net_device * dev, int alloc_device,
2786 struct ctc_priv *privptr)
2791 DBF_TEXT(setup, 3, __FUNCTION__);
2794 dev = kmalloc(sizeof (struct net_device), GFP_KERNEL);
2797 memset(dev, 0, sizeof (struct net_device));
2800 dev->priv = privptr;
2801 privptr->fsm = init_fsm("ctcdev", dev_state_names,
2802 dev_event_names, CTC_NR_DEV_STATES, CTC_NR_DEV_EVENTS,
2803 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2804 if (privptr->fsm == NULL) {
2809 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2810 fsm_settimer(privptr->fsm, &privptr->restart_timer);
2812 dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
2813 dev->hard_start_xmit = ctc_tx;
2814 dev->open = ctc_open;
2815 dev->stop = ctc_close;
2816 dev->get_stats = ctc_stats;
2817 dev->change_mtu = ctc_change_mtu;
2818 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2820 dev->type = ARPHRD_SLIP;
2821 dev->tx_queue_len = 100;
2822 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2823 SET_MODULE_OWNER(dev);
2830 * Setup an interface.
2832 * @param cgdev Device to be setup.
2834 * @returns 0 on success, !0 on failure.
2837 ctc_new_device(struct ccwgroup_device *cgdev)
2839 char read_id[CTC_ID_SIZE];
2840 char write_id[CTC_ID_SIZE];
2842 enum channel_types type;
2843 struct ctc_priv *privptr;
2844 struct net_device *dev;
2848 pr_debug("%s() called\n", __FUNCTION__);
2849 DBF_TEXT(setup, 3, __FUNCTION__);
2851 privptr = cgdev->dev.driver_data;
2855 sprintf(buffer, "%d", privptr->buffer_size);
2856 DBF_TEXT(setup, 3, buffer);
2858 type = get_channel_type(&cgdev->cdev[0]->id);
2860 snprintf(read_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id);
2861 snprintf(write_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id);
2863 if (add_channel(cgdev->cdev[0], type))
2865 if (add_channel(cgdev->cdev[1], type))
2868 ret = ccw_device_set_online(cgdev->cdev[0]);
2871 "ccw_device_set_online (cdev[0]) failed with ret = %d\n", ret);
2874 ret = ccw_device_set_online(cgdev->cdev[1]);
2877 "ccw_device_set_online (cdev[1]) failed with ret = %d\n", ret);
2880 dev = ctc_init_netdevice(NULL, 1, privptr);
2883 ctc_pr_warn("ctc_init_netdevice failed\n");
2887 strlcpy(dev->name, "ctc%d", IFNAMSIZ);
2889 for (direction = READ; direction <= WRITE; direction++) {
2890 privptr->channel[direction] =
2891 channel_get(type, direction == READ ? read_id : write_id,
2893 if (privptr->channel[direction] == NULL) {
2894 if (direction == WRITE)
2895 channel_free(privptr->channel[READ]);
2897 ctc_free_netdevice(dev, 1);
2900 privptr->channel[direction]->netdev = dev;
2901 privptr->channel[direction]->protocol = privptr->protocol;
2902 privptr->channel[direction]->max_bufsize = privptr->buffer_size;
2905 SET_NETDEV_DEV(dev, &cgdev->dev);
2907 if (ctc_netdev_register(dev) != 0) {
2908 ctc_free_netdevice(dev, 1);
2912 if (ctc_add_attributes(&cgdev->dev)) {
2913 ctc_netdev_unregister(dev);
2915 ctc_free_netdevice(dev, 1);
2919 strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name));
2923 ctc_pr_info("%s: read: %s, write: %s, proto: %d\n",
2924 dev->name, privptr->channel[READ]->id,
2925 privptr->channel[WRITE]->id, privptr->protocol);
2929 ccw_device_set_offline(cgdev->cdev[1]);
2930 ccw_device_set_offline(cgdev->cdev[0]);
2936 * Shutdown an interface.
2938 * @param cgdev Device to be shut down.
2940 * @returns 0 on success, !0 on failure.
2943 ctc_shutdown_device(struct ccwgroup_device *cgdev)
2945 struct ctc_priv *priv;
2946 struct net_device *ndev;
2948 DBF_TEXT(setup, 3, __FUNCTION__);
2949 pr_debug("%s() called\n", __FUNCTION__);
2952 priv = cgdev->dev.driver_data;
2957 if (priv->channel[READ]) {
2958 ndev = priv->channel[READ]->netdev;
2960 /* Close the device */
2962 ndev->flags &=~IFF_RUNNING;
2964 ctc_remove_attributes(&cgdev->dev);
2966 channel_free(priv->channel[READ]);
2968 if (priv->channel[WRITE])
2969 channel_free(priv->channel[WRITE]);
2972 ctc_netdev_unregister(ndev);
2974 ctc_free_netdevice(ndev, 1);
2978 kfree_fsm(priv->fsm);
2980 ccw_device_set_offline(cgdev->cdev[1]);
2981 ccw_device_set_offline(cgdev->cdev[0]);
2983 if (priv->channel[READ])
2984 channel_remove(priv->channel[READ]);
2985 if (priv->channel[WRITE])
2986 channel_remove(priv->channel[WRITE]);
2987 priv->channel[READ] = priv->channel[WRITE] = NULL;
2994 ctc_remove_device(struct ccwgroup_device *cgdev)
2996 struct ctc_priv *priv;
2998 pr_debug("%s() called\n", __FUNCTION__);
2999 DBF_TEXT(setup, 3, __FUNCTION__);
3001 priv = cgdev->dev.driver_data;
3004 if (cgdev->state == CCWGROUP_ONLINE)
3005 ctc_shutdown_device(cgdev);
3006 ctc_remove_files(&cgdev->dev);
3007 cgdev->dev.driver_data = NULL;
3009 put_device(&cgdev->dev);
3012 static struct ccwgroup_driver ctc_group_driver = {
3013 .owner = THIS_MODULE,
3016 .driver_id = 0xC3E3C3,
3017 .probe = ctc_probe_device,
3018 .remove = ctc_remove_device,
3019 .set_online = ctc_new_device,
3020 .set_offline = ctc_shutdown_device,
3024 * Module related routines
3025 *****************************************************************************/
3028 * Prepare to be unloaded. Free IRQ's and release all resources.
3029 * This is called just before this module is unloaded. It is
3030 * <em>not</em> called, if the usage count is !0, so we don't need to check
3036 DBF_TEXT(setup, 3, __FUNCTION__);
3037 unregister_cu3088_discipline(&ctc_group_driver);
3038 ctc_unregister_dbf_views();
3039 ctc_pr_info("CTC driver unloaded\n");
3043 * Initialize module.
3044 * This is called just after the module is loaded.
3046 * @return 0 on success, !0 on error.
3053 loglevel = CTC_LOGLEVEL_DEFAULT;
3055 DBF_TEXT(setup, 3, __FUNCTION__);
3059 ret = ctc_register_dbf_views();
3061 ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
3064 ret = register_cu3088_discipline(&ctc_group_driver);
3066 ctc_unregister_dbf_views();
3071 module_init(ctc_init);
3072 module_exit(ctc_exit);
3074 /* --- This is the END my friend --- */