r6040: cleanups
[linux-2.6] / drivers / s390 / net / ctcmain.c
1 /*
2  * CTC / ESCON network driver
3  *
4  * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5  * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
6  * Fixes by : Jochen Röhrig (roehrig@de.ibm.com)
7  *            Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8               Peter Tiedemann (ptiedem@de.ibm.com)
9  * Driver Model stuff by : Cornelia Huck <cornelia.huck@de.ibm.com>
10  *
11  * Documentation used:
12  *  - Principles of Operation (IBM doc#: SA22-7201-06)
13  *  - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
14  *  - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
15  *  - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
16  *  - ESCON I/O Interface (IBM doc#: SA22-7202-029
17  *
18  * and the source of the original CTC driver by:
19  *  Dieter Wellerdiek (wel@de.ibm.com)
20  *  Martin Schwidefsky (schwidefsky@de.ibm.com)
21  *  Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
22  *  Jochen Röhrig (roehrig@de.ibm.com)
23  *
24  * This program is free software; you can redistribute it and/or modify
25  * it under the terms of the GNU General Public License as published by
26  * the Free Software Foundation; either version 2, or (at your option)
27  * any later version.
28  *
29  * This program is distributed in the hope that it will be useful,
30  * but WITHOUT ANY WARRANTY; without even the implied warranty of
31  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
32  * GNU General Public License for more details.
33  *
34  * You should have received a copy of the GNU General Public License
35  * along with this program; if not, write to the Free Software
36  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
37  *
38  */
39 #undef DEBUG
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/kernel.h>
43 #include <linux/slab.h>
44 #include <linux/errno.h>
45 #include <linux/types.h>
46 #include <linux/interrupt.h>
47 #include <linux/timer.h>
48 #include <linux/bitops.h>
49
50 #include <linux/signal.h>
51 #include <linux/string.h>
52
53 #include <linux/ip.h>
54 #include <linux/if_arp.h>
55 #include <linux/tcp.h>
56 #include <linux/skbuff.h>
57 #include <linux/ctype.h>
58 #include <net/dst.h>
59
60 #include <asm/io.h>
61 #include <asm/ccwdev.h>
62 #include <asm/ccwgroup.h>
63 #include <asm/uaccess.h>
64
65 #include <asm/idals.h>
66
67 #include "fsm.h"
68 #include "cu3088.h"
69
70 #include "ctcdbug.h"
71 #include "ctcmain.h"
72
73 MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
74 MODULE_DESCRIPTION("Linux for S/390 CTC/Escon Driver");
75 MODULE_LICENSE("GPL");
76 /**
77  * States of the interface statemachine.
78  */
79 enum dev_states {
80         DEV_STATE_STOPPED,
81         DEV_STATE_STARTWAIT_RXTX,
82         DEV_STATE_STARTWAIT_RX,
83         DEV_STATE_STARTWAIT_TX,
84         DEV_STATE_STOPWAIT_RXTX,
85         DEV_STATE_STOPWAIT_RX,
86         DEV_STATE_STOPWAIT_TX,
87         DEV_STATE_RUNNING,
88         /**
89          * MUST be always the last element!!
90          */
91         CTC_NR_DEV_STATES
92 };
93
94 static const char *dev_state_names[] = {
95         "Stopped",
96         "StartWait RXTX",
97         "StartWait RX",
98         "StartWait TX",
99         "StopWait RXTX",
100         "StopWait RX",
101         "StopWait TX",
102         "Running",
103 };
104
105 /**
106  * Events of the interface statemachine.
107  */
108 enum dev_events {
109         DEV_EVENT_START,
110         DEV_EVENT_STOP,
111         DEV_EVENT_RXUP,
112         DEV_EVENT_TXUP,
113         DEV_EVENT_RXDOWN,
114         DEV_EVENT_TXDOWN,
115         DEV_EVENT_RESTART,
116         /**
117          * MUST be always the last element!!
118          */
119         CTC_NR_DEV_EVENTS
120 };
121
122 static const char *dev_event_names[] = {
123         "Start",
124         "Stop",
125         "RX up",
126         "TX up",
127         "RX down",
128         "TX down",
129         "Restart",
130 };
131
132 /**
133  * Events of the channel statemachine
134  */
135 enum ch_events {
136         /**
137          * Events, representing return code of
138          * I/O operations (ccw_device_start, ccw_device_halt et al.)
139          */
140         CH_EVENT_IO_SUCCESS,
141         CH_EVENT_IO_EBUSY,
142         CH_EVENT_IO_ENODEV,
143         CH_EVENT_IO_EIO,
144         CH_EVENT_IO_UNKNOWN,
145
146         CH_EVENT_ATTNBUSY,
147         CH_EVENT_ATTN,
148         CH_EVENT_BUSY,
149
150         /**
151          * Events, representing unit-check
152          */
153         CH_EVENT_UC_RCRESET,
154         CH_EVENT_UC_RSRESET,
155         CH_EVENT_UC_TXTIMEOUT,
156         CH_EVENT_UC_TXPARITY,
157         CH_EVENT_UC_HWFAIL,
158         CH_EVENT_UC_RXPARITY,
159         CH_EVENT_UC_ZERO,
160         CH_EVENT_UC_UNKNOWN,
161
162         /**
163          * Events, representing subchannel-check
164          */
165         CH_EVENT_SC_UNKNOWN,
166
167         /**
168          * Events, representing machine checks
169          */
170         CH_EVENT_MC_FAIL,
171         CH_EVENT_MC_GOOD,
172
173         /**
174          * Event, representing normal IRQ
175          */
176         CH_EVENT_IRQ,
177         CH_EVENT_FINSTAT,
178
179         /**
180          * Event, representing timer expiry.
181          */
182         CH_EVENT_TIMER,
183
184         /**
185          * Events, representing commands from upper levels.
186          */
187         CH_EVENT_START,
188         CH_EVENT_STOP,
189
190         /**
191          * MUST be always the last element!!
192          */
193         NR_CH_EVENTS,
194 };
195
196 /**
197  * States of the channel statemachine.
198  */
199 enum ch_states {
200         /**
201          * Channel not assigned to any device,
202          * initial state, direction invalid
203          */
204         CH_STATE_IDLE,
205
206         /**
207          * Channel assigned but not operating
208          */
209         CH_STATE_STOPPED,
210         CH_STATE_STARTWAIT,
211         CH_STATE_STARTRETRY,
212         CH_STATE_SETUPWAIT,
213         CH_STATE_RXINIT,
214         CH_STATE_TXINIT,
215         CH_STATE_RX,
216         CH_STATE_TX,
217         CH_STATE_RXIDLE,
218         CH_STATE_TXIDLE,
219         CH_STATE_RXERR,
220         CH_STATE_TXERR,
221         CH_STATE_TERM,
222         CH_STATE_DTERM,
223         CH_STATE_NOTOP,
224
225         /**
226          * MUST be always the last element!!
227          */
228         NR_CH_STATES,
229 };
230
231 static int loglevel = CTC_LOGLEVEL_DEFAULT;
232
233 /**
234  * Linked list of all detected channels.
235  */
236 static struct channel *channels = NULL;
237
238 /**
239  * Print Banner.
240  */
241 static void
242 print_banner(void)
243 {
244         static int printed = 0;
245
246         if (printed)
247                 return;
248
249         printk(KERN_INFO "CTC driver initialized\n");
250         printed = 1;
251 }
252
253 /**
254  * Return type of a detected device.
255  */
256 static enum channel_types
257 get_channel_type(struct ccw_device_id *id)
258 {
259         enum channel_types type = (enum channel_types) id->driver_info;
260
261         if (type == channel_type_ficon)
262                 type = channel_type_escon;
263
264         return type;
265 }
266
267 static const char *ch_event_names[] = {
268         "ccw_device success",
269         "ccw_device busy",
270         "ccw_device enodev",
271         "ccw_device ioerr",
272         "ccw_device unknown",
273
274         "Status ATTN & BUSY",
275         "Status ATTN",
276         "Status BUSY",
277
278         "Unit check remote reset",
279         "Unit check remote system reset",
280         "Unit check TX timeout",
281         "Unit check TX parity",
282         "Unit check Hardware failure",
283         "Unit check RX parity",
284         "Unit check ZERO",
285         "Unit check Unknown",
286
287         "SubChannel check Unknown",
288
289         "Machine check failure",
290         "Machine check operational",
291
292         "IRQ normal",
293         "IRQ final",
294
295         "Timer",
296
297         "Start",
298         "Stop",
299 };
300
301 static const char *ch_state_names[] = {
302         "Idle",
303         "Stopped",
304         "StartWait",
305         "StartRetry",
306         "SetupWait",
307         "RX init",
308         "TX init",
309         "RX",
310         "TX",
311         "RX idle",
312         "TX idle",
313         "RX error",
314         "TX error",
315         "Terminating",
316         "Restarting",
317         "Not operational",
318 };
319
320 #ifdef DEBUG
321 /**
322  * Dump header and first 16 bytes of an sk_buff for debugging purposes.
323  *
324  * @param skb    The sk_buff to dump.
325  * @param offset Offset relative to skb-data, where to start the dump.
326  */
327 static void
328 ctc_dump_skb(struct sk_buff *skb, int offset)
329 {
330         unsigned char *p = skb->data;
331         __u16 bl;
332         struct ll_header *header;
333         int i;
334
335         if (!(loglevel & CTC_LOGLEVEL_DEBUG))
336                 return;
337         p += offset;
338         bl = *((__u16 *) p);
339         p += 2;
340         header = (struct ll_header *) p;
341         p -= 2;
342
343         printk(KERN_DEBUG "dump:\n");
344         printk(KERN_DEBUG "blocklen=%d %04x\n", bl, bl);
345
346         printk(KERN_DEBUG "h->length=%d %04x\n", header->length,
347                header->length);
348         printk(KERN_DEBUG "h->type=%04x\n", header->type);
349         printk(KERN_DEBUG "h->unused=%04x\n", header->unused);
350         if (bl > 16)
351                 bl = 16;
352         printk(KERN_DEBUG "data: ");
353         for (i = 0; i < bl; i++)
354                 printk("%02x%s", *p++, (i % 16) ? " " : "\n<7>");
355         printk("\n");
356 }
357 #else
358 static inline void
359 ctc_dump_skb(struct sk_buff *skb, int offset)
360 {
361 }
362 #endif
363
364 /**
365  * Unpack a just received skb and hand it over to
366  * upper layers.
367  *
368  * @param ch The channel where this skb has been received.
369  * @param pskb The received skb.
370  */
371 static void
372 ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
373 {
374         struct net_device *dev = ch->netdev;
375         struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
376         __u16 len = *((__u16 *) pskb->data);
377
378         DBF_TEXT(trace, 4, __FUNCTION__);
379         skb_put(pskb, 2 + LL_HEADER_LENGTH);
380         skb_pull(pskb, 2);
381         pskb->dev = dev;
382         pskb->ip_summed = CHECKSUM_UNNECESSARY;
383         while (len > 0) {
384                 struct sk_buff *skb;
385                 struct ll_header *header = (struct ll_header *) pskb->data;
386
387                 skb_pull(pskb, LL_HEADER_LENGTH);
388                 if ((ch->protocol == CTC_PROTO_S390) &&
389                     (header->type != ETH_P_IP)) {
390
391 #ifndef DEBUG
392                         if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
393 #endif
394                                 /**
395                                  * Check packet type only if we stick strictly
396                                  * to S/390's protocol of OS390. This only
397                                  * supports IP. Otherwise allow any packet
398                                  * type.
399                                  */
400                                 ctc_pr_warn(
401                                         "%s Illegal packet type 0x%04x received, dropping\n",
402                                         dev->name, header->type);
403                                 ch->logflags |= LOG_FLAG_ILLEGALPKT;
404 #ifndef DEBUG
405                         }
406 #endif
407 #ifdef DEBUG
408                         ctc_dump_skb(pskb, -6);
409 #endif
410                         privptr->stats.rx_dropped++;
411                         privptr->stats.rx_frame_errors++;
412                         return;
413                 }
414                 pskb->protocol = ntohs(header->type);
415                 if (header->length <= LL_HEADER_LENGTH) {
416 #ifndef DEBUG
417                         if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
418 #endif
419                                 ctc_pr_warn(
420                                        "%s Illegal packet size %d "
421                                        "received (MTU=%d blocklen=%d), "
422                                        "dropping\n", dev->name, header->length,
423                                        dev->mtu, len);
424                                 ch->logflags |= LOG_FLAG_ILLEGALSIZE;
425 #ifndef DEBUG
426                         }
427 #endif
428 #ifdef DEBUG
429                         ctc_dump_skb(pskb, -6);
430 #endif
431                         privptr->stats.rx_dropped++;
432                         privptr->stats.rx_length_errors++;
433                         return;
434                 }
435                 header->length -= LL_HEADER_LENGTH;
436                 len -= LL_HEADER_LENGTH;
437                 if ((header->length > skb_tailroom(pskb)) ||
438                     (header->length > len)) {
439 #ifndef DEBUG
440                         if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
441 #endif
442                                 ctc_pr_warn(
443                                         "%s Illegal packet size %d "
444                                         "(beyond the end of received data), "
445                                         "dropping\n", dev->name, header->length);
446                                 ch->logflags |= LOG_FLAG_OVERRUN;
447 #ifndef DEBUG
448                         }
449 #endif
450 #ifdef DEBUG
451                         ctc_dump_skb(pskb, -6);
452 #endif
453                         privptr->stats.rx_dropped++;
454                         privptr->stats.rx_length_errors++;
455                         return;
456                 }
457                 skb_put(pskb, header->length);
458                 skb_reset_mac_header(pskb);
459                 len -= header->length;
460                 skb = dev_alloc_skb(pskb->len);
461                 if (!skb) {
462 #ifndef DEBUG
463                         if (!(ch->logflags & LOG_FLAG_NOMEM)) {
464 #endif
465                                 ctc_pr_warn(
466                                         "%s Out of memory in ctc_unpack_skb\n",
467                                         dev->name);
468                                 ch->logflags |= LOG_FLAG_NOMEM;
469 #ifndef DEBUG
470                         }
471 #endif
472                         privptr->stats.rx_dropped++;
473                         return;
474                 }
475                 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
476                                           pskb->len);
477                 skb_reset_mac_header(skb);
478                 skb->dev = pskb->dev;
479                 skb->protocol = pskb->protocol;
480                 pskb->ip_summed = CHECKSUM_UNNECESSARY;
481                 /**
482                  * reset logflags
483                  */
484                 ch->logflags = 0;
485                 privptr->stats.rx_packets++;
486                 privptr->stats.rx_bytes += skb->len;
487                 netif_rx_ni(skb);
488                 dev->last_rx = jiffies;
489                 if (len > 0) {
490                         skb_pull(pskb, header->length);
491                         if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
492 #ifndef DEBUG
493                                 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
494 #endif
495                                         ctc_pr_warn(
496                                                 "%s Overrun in ctc_unpack_skb\n",
497                                                 dev->name);
498                                         ch->logflags |= LOG_FLAG_OVERRUN;
499 #ifndef DEBUG
500                                 }
501 #endif
502                                 return;
503                         }
504                         skb_put(pskb, LL_HEADER_LENGTH);
505                 }
506         }
507 }
508
509 /**
510  * Check return code of a preceeding ccw_device call, halt_IO etc...
511  *
512  * @param ch          The channel, the error belongs to.
513  * @param return_code The error code to inspect.
514  */
515 static void
516 ccw_check_return_code(struct channel *ch, int return_code, char *msg)
517 {
518         DBF_TEXT(trace, 5, __FUNCTION__);
519         switch (return_code) {
520                 case 0:
521                         fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
522                         break;
523                 case -EBUSY:
524                         ctc_pr_warn("%s (%s): Busy !\n", ch->id, msg);
525                         fsm_event(ch->fsm, CH_EVENT_IO_EBUSY, ch);
526                         break;
527                 case -ENODEV:
528                         ctc_pr_emerg("%s (%s): Invalid device called for IO\n",
529                                      ch->id, msg);
530                         fsm_event(ch->fsm, CH_EVENT_IO_ENODEV, ch);
531                         break;
532                 case -EIO:
533                         ctc_pr_emerg("%s (%s): Status pending... \n",
534                                      ch->id, msg);
535                         fsm_event(ch->fsm, CH_EVENT_IO_EIO, ch);
536                         break;
537                 default:
538                         ctc_pr_emerg("%s (%s): Unknown error in do_IO %04x\n",
539                                      ch->id, msg, return_code);
540                         fsm_event(ch->fsm, CH_EVENT_IO_UNKNOWN, ch);
541         }
542 }
543
544 /**
545  * Check sense of a unit check.
546  *
547  * @param ch    The channel, the sense code belongs to.
548  * @param sense The sense code to inspect.
549  */
550 static void
551 ccw_unit_check(struct channel *ch, unsigned char sense)
552 {
553         DBF_TEXT(trace, 5, __FUNCTION__);
554         if (sense & SNS0_INTERVENTION_REQ) {
555                 if (sense & 0x01) {
556                         ctc_pr_debug("%s: Interface disc. or Sel. reset "
557                                         "(remote)\n", ch->id);
558                         fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch);
559                 } else {
560                         ctc_pr_debug("%s: System reset (remote)\n", ch->id);
561                         fsm_event(ch->fsm, CH_EVENT_UC_RSRESET, ch);
562                 }
563         } else if (sense & SNS0_EQUIPMENT_CHECK) {
564                 if (sense & SNS0_BUS_OUT_CHECK) {
565                         ctc_pr_warn("%s: Hardware malfunction (remote)\n",
566                                     ch->id);
567                         fsm_event(ch->fsm, CH_EVENT_UC_HWFAIL, ch);
568                 } else {
569                         ctc_pr_warn("%s: Read-data parity error (remote)\n",
570                                     ch->id);
571                         fsm_event(ch->fsm, CH_EVENT_UC_RXPARITY, ch);
572                 }
573         } else if (sense & SNS0_BUS_OUT_CHECK) {
574                 if (sense & 0x04) {
575                         ctc_pr_warn("%s: Data-streaming timeout)\n", ch->id);
576                         fsm_event(ch->fsm, CH_EVENT_UC_TXTIMEOUT, ch);
577                 } else {
578                         ctc_pr_warn("%s: Data-transfer parity error\n", ch->id);
579                         fsm_event(ch->fsm, CH_EVENT_UC_TXPARITY, ch);
580                 }
581         } else if (sense & SNS0_CMD_REJECT) {
582                 ctc_pr_warn("%s: Command reject\n", ch->id);
583         } else if (sense == 0) {
584                 ctc_pr_debug("%s: Unit check ZERO\n", ch->id);
585                 fsm_event(ch->fsm, CH_EVENT_UC_ZERO, ch);
586         } else {
587                 ctc_pr_warn("%s: Unit Check with sense code: %02x\n",
588                             ch->id, sense);
589                 fsm_event(ch->fsm, CH_EVENT_UC_UNKNOWN, ch);
590         }
591 }
592
593 static void
594 ctc_purge_skb_queue(struct sk_buff_head *q)
595 {
596         struct sk_buff *skb;
597
598         DBF_TEXT(trace, 5, __FUNCTION__);
599
600         while ((skb = skb_dequeue(q))) {
601                 atomic_dec(&skb->users);
602                 dev_kfree_skb_irq(skb);
603         }
604 }
605
606 static int
607 ctc_checkalloc_buffer(struct channel *ch, int warn)
608 {
609         DBF_TEXT(trace, 5, __FUNCTION__);
610         if ((ch->trans_skb == NULL) ||
611             (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) {
612                 if (ch->trans_skb != NULL)
613                         dev_kfree_skb(ch->trans_skb);
614                 clear_normalized_cda(&ch->ccw[1]);
615                 ch->trans_skb = __dev_alloc_skb(ch->max_bufsize,
616                                                 GFP_ATOMIC | GFP_DMA);
617                 if (ch->trans_skb == NULL) {
618                         if (warn)
619                                 ctc_pr_warn(
620                                         "%s: Couldn't alloc %s trans_skb\n",
621                                         ch->id,
622                                         (CHANNEL_DIRECTION(ch->flags) == READ) ?
623                                         "RX" : "TX");
624                         return -ENOMEM;
625                 }
626                 ch->ccw[1].count = ch->max_bufsize;
627                 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
628                         dev_kfree_skb(ch->trans_skb);
629                         ch->trans_skb = NULL;
630                         if (warn)
631                                 ctc_pr_warn(
632                                         "%s: set_normalized_cda for %s "
633                                         "trans_skb failed, dropping packets\n",
634                                         ch->id,
635                                         (CHANNEL_DIRECTION(ch->flags) == READ) ?
636                                         "RX" : "TX");
637                         return -ENOMEM;
638                 }
639                 ch->ccw[1].count = 0;
640                 ch->trans_skb_data = ch->trans_skb->data;
641                 ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
642         }
643         return 0;
644 }
645
646 /**
647  * Dummy NOP action for statemachines
648  */
649 static void
650 fsm_action_nop(fsm_instance * fi, int event, void *arg)
651 {
652 }
653
654 /**
655  * Actions for channel - statemachines.
656  *****************************************************************************/
657
658 /**
659  * Normal data has been send. Free the corresponding
660  * skb (it's in io_queue), reset dev->tbusy and
661  * revert to idle state.
662  *
663  * @param fi    An instance of a channel statemachine.
664  * @param event The event, just happened.
665  * @param arg   Generic pointer, casted from channel * upon call.
666  */
667 static void
668 ch_action_txdone(fsm_instance * fi, int event, void *arg)
669 {
670         struct channel *ch = (struct channel *) arg;
671         struct net_device *dev = ch->netdev;
672         struct ctc_priv *privptr = dev->priv;
673         struct sk_buff *skb;
674         int first = 1;
675         int i;
676         unsigned long duration;
677         struct timespec done_stamp = current_kernel_time();
678
679         DBF_TEXT(trace, 4, __FUNCTION__);
680
681         duration =
682             (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
683             (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
684         if (duration > ch->prof.tx_time)
685                 ch->prof.tx_time = duration;
686
687         if (ch->irb->scsw.count != 0)
688                 ctc_pr_debug("%s: TX not complete, remaining %d bytes\n",
689                              dev->name, ch->irb->scsw.count);
690         fsm_deltimer(&ch->timer);
691         while ((skb = skb_dequeue(&ch->io_queue))) {
692                 privptr->stats.tx_packets++;
693                 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
694                 if (first) {
695                         privptr->stats.tx_bytes += 2;
696                         first = 0;
697                 }
698                 atomic_dec(&skb->users);
699                 dev_kfree_skb_irq(skb);
700         }
701         spin_lock(&ch->collect_lock);
702         clear_normalized_cda(&ch->ccw[4]);
703         if (ch->collect_len > 0) {
704                 int rc;
705
706                 if (ctc_checkalloc_buffer(ch, 1)) {
707                         spin_unlock(&ch->collect_lock);
708                         return;
709                 }
710                 ch->trans_skb->data = ch->trans_skb_data;
711                 skb_reset_tail_pointer(ch->trans_skb);
712                 ch->trans_skb->len = 0;
713                 if (ch->prof.maxmulti < (ch->collect_len + 2))
714                         ch->prof.maxmulti = ch->collect_len + 2;
715                 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
716                         ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
717                 *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
718                 i = 0;
719                 while ((skb = skb_dequeue(&ch->collect_queue))) {
720                         skb_copy_from_linear_data(skb, skb_put(ch->trans_skb,
721                                                                skb->len),
722                                                   skb->len);
723                         privptr->stats.tx_packets++;
724                         privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
725                         atomic_dec(&skb->users);
726                         dev_kfree_skb_irq(skb);
727                         i++;
728                 }
729                 ch->collect_len = 0;
730                 spin_unlock(&ch->collect_lock);
731                 ch->ccw[1].count = ch->trans_skb->len;
732                 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
733                 ch->prof.send_stamp = current_kernel_time();
734                 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
735                                       (unsigned long) ch, 0xff, 0);
736                 ch->prof.doios_multi++;
737                 if (rc != 0) {
738                         privptr->stats.tx_dropped += i;
739                         privptr->stats.tx_errors += i;
740                         fsm_deltimer(&ch->timer);
741                         ccw_check_return_code(ch, rc, "chained TX");
742                 }
743         } else {
744                 spin_unlock(&ch->collect_lock);
745                 fsm_newstate(fi, CH_STATE_TXIDLE);
746         }
747         ctc_clear_busy(dev);
748 }
749
750 /**
751  * Initial data is sent.
752  * Notify device statemachine that we are up and
753  * running.
754  *
755  * @param fi    An instance of a channel statemachine.
756  * @param event The event, just happened.
757  * @param arg   Generic pointer, casted from channel * upon call.
758  */
759 static void
760 ch_action_txidle(fsm_instance * fi, int event, void *arg)
761 {
762         struct channel *ch = (struct channel *) arg;
763
764         DBF_TEXT(trace, 4, __FUNCTION__);
765         fsm_deltimer(&ch->timer);
766         fsm_newstate(fi, CH_STATE_TXIDLE);
767         fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
768                   ch->netdev);
769 }
770
771 /**
772  * Got normal data, check for sanity, queue it up, allocate new buffer
773  * trigger bottom half, and initiate next read.
774  *
775  * @param fi    An instance of a channel statemachine.
776  * @param event The event, just happened.
777  * @param arg   Generic pointer, casted from channel * upon call.
778  */
779 static void
780 ch_action_rx(fsm_instance * fi, int event, void *arg)
781 {
782         struct channel *ch = (struct channel *) arg;
783         struct net_device *dev = ch->netdev;
784         struct ctc_priv *privptr = dev->priv;
785         int len = ch->max_bufsize - ch->irb->scsw.count;
786         struct sk_buff *skb = ch->trans_skb;
787         __u16 block_len = *((__u16 *) skb->data);
788         int check_len;
789         int rc;
790
791         DBF_TEXT(trace, 4, __FUNCTION__);
792         fsm_deltimer(&ch->timer);
793         if (len < 8) {
794                 ctc_pr_debug("%s: got packet with length %d < 8\n",
795                              dev->name, len);
796                 privptr->stats.rx_dropped++;
797                 privptr->stats.rx_length_errors++;
798                 goto again;
799         }
800         if (len > ch->max_bufsize) {
801                 ctc_pr_debug("%s: got packet with length %d > %d\n",
802                              dev->name, len, ch->max_bufsize);
803                 privptr->stats.rx_dropped++;
804                 privptr->stats.rx_length_errors++;
805                 goto again;
806         }
807
808         /**
809          * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
810          */
811         switch (ch->protocol) {
812                 case CTC_PROTO_S390:
813                 case CTC_PROTO_OS390:
814                         check_len = block_len + 2;
815                         break;
816                 default:
817                         check_len = block_len;
818                         break;
819         }
820         if ((len < block_len) || (len > check_len)) {
821                 ctc_pr_debug("%s: got block length %d != rx length %d\n",
822                              dev->name, block_len, len);
823 #ifdef DEBUG
824                 ctc_dump_skb(skb, 0);
825 #endif
826                 *((__u16 *) skb->data) = len;
827                 privptr->stats.rx_dropped++;
828                 privptr->stats.rx_length_errors++;
829                 goto again;
830         }
831         block_len -= 2;
832         if (block_len > 0) {
833                 *((__u16 *) skb->data) = block_len;
834                 ctc_unpack_skb(ch, skb);
835         }
836  again:
837         skb->data = ch->trans_skb_data;
838         skb_reset_tail_pointer(skb);
839         skb->len = 0;
840         if (ctc_checkalloc_buffer(ch, 1))
841                 return;
842         ch->ccw[1].count = ch->max_bufsize;
843         rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
844         if (rc != 0)
845                 ccw_check_return_code(ch, rc, "normal RX");
846 }
847
848 static void ch_action_rxidle(fsm_instance * fi, int event, void *arg);
849
850 /**
851  * Initialize connection by sending a __u16 of value 0.
852  *
853  * @param fi    An instance of a channel statemachine.
854  * @param event The event, just happened.
855  * @param arg   Generic pointer, casted from channel * upon call.
856  */
857 static void
858 ch_action_firstio(fsm_instance * fi, int event, void *arg)
859 {
860         struct channel *ch = (struct channel *) arg;
861         int rc;
862
863         DBF_TEXT(trace, 4, __FUNCTION__);
864
865         if (fsm_getstate(fi) == CH_STATE_TXIDLE)
866                 ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id);
867         fsm_deltimer(&ch->timer);
868         if (ctc_checkalloc_buffer(ch, 1))
869                 return;
870         if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
871             (ch->protocol == CTC_PROTO_OS390)) {
872                 /* OS/390 resp. z/OS */
873                 if (CHANNEL_DIRECTION(ch->flags) == READ) {
874                         *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
875                         fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC,
876                                      CH_EVENT_TIMER, ch);
877                         ch_action_rxidle(fi, event, arg);
878                 } else {
879                         struct net_device *dev = ch->netdev;
880                         fsm_newstate(fi, CH_STATE_TXIDLE);
881                         fsm_event(((struct ctc_priv *) dev->priv)->fsm,
882                                   DEV_EVENT_TXUP, dev);
883                 }
884                 return;
885         }
886
887         /**
888          * Don't setup a timer for receiving the initial RX frame
889          * if in compatibility mode, since VM TCP delays the initial
890          * frame until it has some data to send.
891          */
892         if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
893             (ch->protocol != CTC_PROTO_S390))
894                 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
895
896         *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
897         ch->ccw[1].count = 2;   /* Transfer only length */
898
899         fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
900                      ? CH_STATE_RXINIT : CH_STATE_TXINIT);
901         rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
902         if (rc != 0) {
903                 fsm_deltimer(&ch->timer);
904                 fsm_newstate(fi, CH_STATE_SETUPWAIT);
905                 ccw_check_return_code(ch, rc, "init IO");
906         }
907         /**
908          * If in compatibility mode since we don't setup a timer, we
909          * also signal RX channel up immediately. This enables us
910          * to send packets early which in turn usually triggers some
911          * reply from VM TCP which brings up the RX channel to it's
912          * final state.
913          */
914         if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
915             (ch->protocol == CTC_PROTO_S390)) {
916                 struct net_device *dev = ch->netdev;
917                 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXUP,
918                           dev);
919         }
920 }
921
922 /**
923  * Got initial data, check it. If OK,
924  * notify device statemachine that we are up and
925  * running.
926  *
927  * @param fi    An instance of a channel statemachine.
928  * @param event The event, just happened.
929  * @param arg   Generic pointer, casted from channel * upon call.
930  */
931 static void
932 ch_action_rxidle(fsm_instance * fi, int event, void *arg)
933 {
934         struct channel *ch = (struct channel *) arg;
935         struct net_device *dev = ch->netdev;
936         __u16 buflen;
937         int rc;
938
939         DBF_TEXT(trace, 4, __FUNCTION__);
940         fsm_deltimer(&ch->timer);
941         buflen = *((__u16 *) ch->trans_skb->data);
942 #ifdef DEBUG
943         ctc_pr_debug("%s: Initial RX count %d\n", dev->name, buflen);
944 #endif
945         if (buflen >= CTC_INITIAL_BLOCKLEN) {
946                 if (ctc_checkalloc_buffer(ch, 1))
947                         return;
948                 ch->ccw[1].count = ch->max_bufsize;
949                 fsm_newstate(fi, CH_STATE_RXIDLE);
950                 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
951                                       (unsigned long) ch, 0xff, 0);
952                 if (rc != 0) {
953                         fsm_newstate(fi, CH_STATE_RXINIT);
954                         ccw_check_return_code(ch, rc, "initial RX");
955                 } else
956                         fsm_event(((struct ctc_priv *) dev->priv)->fsm,
957                                   DEV_EVENT_RXUP, dev);
958         } else {
959                 ctc_pr_debug("%s: Initial RX count %d not %d\n",
960                              dev->name, buflen, CTC_INITIAL_BLOCKLEN);
961                 ch_action_firstio(fi, event, arg);
962         }
963 }
964
965 /**
966  * Set channel into extended mode.
967  *
968  * @param fi    An instance of a channel statemachine.
969  * @param event The event, just happened.
970  * @param arg   Generic pointer, casted from channel * upon call.
971  */
972 static void
973 ch_action_setmode(fsm_instance * fi, int event, void *arg)
974 {
975         struct channel *ch = (struct channel *) arg;
976         int rc;
977         unsigned long saveflags;
978
979         DBF_TEXT(trace, 4, __FUNCTION__);
980         fsm_deltimer(&ch->timer);
981         fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
982         fsm_newstate(fi, CH_STATE_SETUPWAIT);
983         saveflags = 0;  /* avoids compiler warning with
984                            spin_unlock_irqrestore */
985         if (event == CH_EVENT_TIMER)    // only for timer not yet locked
986                 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
987         rc = ccw_device_start(ch->cdev, &ch->ccw[6], (unsigned long) ch, 0xff, 0);
988         if (event == CH_EVENT_TIMER)
989                 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
990         if (rc != 0) {
991                 fsm_deltimer(&ch->timer);
992                 fsm_newstate(fi, CH_STATE_STARTWAIT);
993                 ccw_check_return_code(ch, rc, "set Mode");
994         } else
995                 ch->retry = 0;
996 }
997
998 /**
999  * Setup channel.
1000  *
1001  * @param fi    An instance of a channel statemachine.
1002  * @param event The event, just happened.
1003  * @param arg   Generic pointer, casted from channel * upon call.
1004  */
1005 static void
1006 ch_action_start(fsm_instance * fi, int event, void *arg)
1007 {
1008         struct channel *ch = (struct channel *) arg;
1009         unsigned long saveflags;
1010         int rc;
1011         struct net_device *dev;
1012
1013         DBF_TEXT(trace, 4, __FUNCTION__);
1014         if (ch == NULL) {
1015                 ctc_pr_warn("ch_action_start ch=NULL\n");
1016                 return;
1017         }
1018         if (ch->netdev == NULL) {
1019                 ctc_pr_warn("ch_action_start dev=NULL, id=%s\n", ch->id);
1020                 return;
1021         }
1022         dev = ch->netdev;
1023
1024 #ifdef DEBUG
1025         ctc_pr_debug("%s: %s channel start\n", dev->name,
1026                      (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1027 #endif
1028
1029         if (ch->trans_skb != NULL) {
1030                 clear_normalized_cda(&ch->ccw[1]);
1031                 dev_kfree_skb(ch->trans_skb);
1032                 ch->trans_skb = NULL;
1033         }
1034         if (CHANNEL_DIRECTION(ch->flags) == READ) {
1035                 ch->ccw[1].cmd_code = CCW_CMD_READ;
1036                 ch->ccw[1].flags = CCW_FLAG_SLI;
1037                 ch->ccw[1].count = 0;
1038         } else {
1039                 ch->ccw[1].cmd_code = CCW_CMD_WRITE;
1040                 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1041                 ch->ccw[1].count = 0;
1042         }
1043         if (ctc_checkalloc_buffer(ch, 0)) {
1044                 ctc_pr_notice(
1045                         "%s: Could not allocate %s trans_skb, delaying "
1046                         "allocation until first transfer\n",
1047                         dev->name,
1048                         (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1049         }
1050
1051         ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
1052         ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1053         ch->ccw[0].count = 0;
1054         ch->ccw[0].cda = 0;
1055         ch->ccw[2].cmd_code = CCW_CMD_NOOP;     /* jointed CE + DE */
1056         ch->ccw[2].flags = CCW_FLAG_SLI;
1057         ch->ccw[2].count = 0;
1058         ch->ccw[2].cda = 0;
1059         memcpy(&ch->ccw[3], &ch->ccw[0], sizeof (struct ccw1) * 3);
1060         ch->ccw[4].cda = 0;
1061         ch->ccw[4].flags &= ~CCW_FLAG_IDA;
1062
1063         fsm_newstate(fi, CH_STATE_STARTWAIT);
1064         fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1065         spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1066         rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1067         spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1068         if (rc != 0) {
1069                 if (rc != -EBUSY)
1070                     fsm_deltimer(&ch->timer);
1071                 ccw_check_return_code(ch, rc, "initial HaltIO");
1072         }
1073 #ifdef DEBUG
1074         ctc_pr_debug("ctc: %s(): leaving\n", __func__);
1075 #endif
1076 }
1077
1078 /**
1079  * Shutdown a channel.
1080  *
1081  * @param fi    An instance of a channel statemachine.
1082  * @param event The event, just happened.
1083  * @param arg   Generic pointer, casted from channel * upon call.
1084  */
1085 static void
1086 ch_action_haltio(fsm_instance * fi, int event, void *arg)
1087 {
1088         struct channel *ch = (struct channel *) arg;
1089         unsigned long saveflags;
1090         int rc;
1091         int oldstate;
1092
1093         DBF_TEXT(trace, 3, __FUNCTION__);
1094         fsm_deltimer(&ch->timer);
1095         fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1096         saveflags = 0;  /* avoids comp warning with
1097                            spin_unlock_irqrestore */
1098         if (event == CH_EVENT_STOP)     // only for STOP not yet locked
1099                 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1100         oldstate = fsm_getstate(fi);
1101         fsm_newstate(fi, CH_STATE_TERM);
1102         rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1103         if (event == CH_EVENT_STOP)
1104                 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1105         if (rc != 0) {
1106                 if (rc != -EBUSY) {
1107                     fsm_deltimer(&ch->timer);
1108                     fsm_newstate(fi, oldstate);
1109                 }
1110                 ccw_check_return_code(ch, rc, "HaltIO in ch_action_haltio");
1111         }
1112 }
1113
1114 /**
1115  * A channel has successfully been halted.
1116  * Cleanup it's queue and notify interface statemachine.
1117  *
1118  * @param fi    An instance of a channel statemachine.
1119  * @param event The event, just happened.
1120  * @param arg   Generic pointer, casted from channel * upon call.
1121  */
1122 static void
1123 ch_action_stopped(fsm_instance * fi, int event, void *arg)
1124 {
1125         struct channel *ch = (struct channel *) arg;
1126         struct net_device *dev = ch->netdev;
1127
1128         DBF_TEXT(trace, 3, __FUNCTION__);
1129         fsm_deltimer(&ch->timer);
1130         fsm_newstate(fi, CH_STATE_STOPPED);
1131         if (ch->trans_skb != NULL) {
1132                 clear_normalized_cda(&ch->ccw[1]);
1133                 dev_kfree_skb(ch->trans_skb);
1134                 ch->trans_skb = NULL;
1135         }
1136         if (CHANNEL_DIRECTION(ch->flags) == READ) {
1137                 skb_queue_purge(&ch->io_queue);
1138                 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1139                           DEV_EVENT_RXDOWN, dev);
1140         } else {
1141                 ctc_purge_skb_queue(&ch->io_queue);
1142                 spin_lock(&ch->collect_lock);
1143                 ctc_purge_skb_queue(&ch->collect_queue);
1144                 ch->collect_len = 0;
1145                 spin_unlock(&ch->collect_lock);
1146                 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1147                           DEV_EVENT_TXDOWN, dev);
1148         }
1149 }
1150
1151 /**
1152  * A stop command from device statemachine arrived and we are in
1153  * not operational mode. Set state to stopped.
1154  *
1155  * @param fi    An instance of a channel statemachine.
1156  * @param event The event, just happened.
1157  * @param arg   Generic pointer, casted from channel * upon call.
1158  */
1159 static void
1160 ch_action_stop(fsm_instance * fi, int event, void *arg)
1161 {
1162         fsm_newstate(fi, CH_STATE_STOPPED);
1163 }
1164
1165 /**
1166  * A machine check for no path, not operational status or gone device has
1167  * happened.
1168  * Cleanup queue and notify interface statemachine.
1169  *
1170  * @param fi    An instance of a channel statemachine.
1171  * @param event The event, just happened.
1172  * @param arg   Generic pointer, casted from channel * upon call.
1173  */
1174 static void
1175 ch_action_fail(fsm_instance * fi, int event, void *arg)
1176 {
1177         struct channel *ch = (struct channel *) arg;
1178         struct net_device *dev = ch->netdev;
1179
1180         DBF_TEXT(trace, 3, __FUNCTION__);
1181         fsm_deltimer(&ch->timer);
1182         fsm_newstate(fi, CH_STATE_NOTOP);
1183         if (CHANNEL_DIRECTION(ch->flags) == READ) {
1184                 skb_queue_purge(&ch->io_queue);
1185                 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1186                           DEV_EVENT_RXDOWN, dev);
1187         } else {
1188                 ctc_purge_skb_queue(&ch->io_queue);
1189                 spin_lock(&ch->collect_lock);
1190                 ctc_purge_skb_queue(&ch->collect_queue);
1191                 ch->collect_len = 0;
1192                 spin_unlock(&ch->collect_lock);
1193                 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1194                           DEV_EVENT_TXDOWN, dev);
1195         }
1196 }
1197
1198 /**
1199  * Handle error during setup of channel.
1200  *
1201  * @param fi    An instance of a channel statemachine.
1202  * @param event The event, just happened.
1203  * @param arg   Generic pointer, casted from channel * upon call.
1204  */
1205 static void
1206 ch_action_setuperr(fsm_instance * fi, int event, void *arg)
1207 {
1208         struct channel *ch = (struct channel *) arg;
1209         struct net_device *dev = ch->netdev;
1210
1211         DBF_TEXT(setup, 3, __FUNCTION__);
1212         /**
1213          * Special case: Got UC_RCRESET on setmode.
1214          * This means that remote side isn't setup. In this case
1215          * simply retry after some 10 secs...
1216          */
1217         if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
1218             ((event == CH_EVENT_UC_RCRESET) ||
1219              (event == CH_EVENT_UC_RSRESET))) {
1220                 fsm_newstate(fi, CH_STATE_STARTRETRY);
1221                 fsm_deltimer(&ch->timer);
1222                 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1223                 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1224                         int rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1225                         if (rc != 0)
1226                                 ccw_check_return_code(
1227                                         ch, rc, "HaltIO in ch_action_setuperr");
1228                 }
1229                 return;
1230         }
1231
1232         ctc_pr_debug("%s: Error %s during %s channel setup state=%s\n",
1233                      dev->name, ch_event_names[event],
1234                      (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
1235                      fsm_getstate_str(fi));
1236         if (CHANNEL_DIRECTION(ch->flags) == READ) {
1237                 fsm_newstate(fi, CH_STATE_RXERR);
1238                 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1239                           DEV_EVENT_RXDOWN, dev);
1240         } else {
1241                 fsm_newstate(fi, CH_STATE_TXERR);
1242                 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1243                           DEV_EVENT_TXDOWN, dev);
1244         }
1245 }
1246
1247 /**
1248  * Restart a channel after an error.
1249  *
1250  * @param fi    An instance of a channel statemachine.
1251  * @param event The event, just happened.
1252  * @param arg   Generic pointer, casted from channel * upon call.
1253  */
1254 static void
1255 ch_action_restart(fsm_instance * fi, int event, void *arg)
1256 {
1257         unsigned long saveflags;
1258         int oldstate;
1259         int rc;
1260
1261         struct channel *ch = (struct channel *) arg;
1262         struct net_device *dev = ch->netdev;
1263
1264         DBF_TEXT(trace, 3, __FUNCTION__);
1265         fsm_deltimer(&ch->timer);
1266         ctc_pr_debug("%s: %s channel restart\n", dev->name,
1267                      (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1268         fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1269         oldstate = fsm_getstate(fi);
1270         fsm_newstate(fi, CH_STATE_STARTWAIT);
1271         saveflags = 0;  /* avoids compiler warning with
1272                            spin_unlock_irqrestore */
1273         if (event == CH_EVENT_TIMER)    // only for timer not yet locked
1274                 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1275         rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1276         if (event == CH_EVENT_TIMER)
1277                 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1278         if (rc != 0) {
1279                 if (rc != -EBUSY) {
1280                     fsm_deltimer(&ch->timer);
1281                     fsm_newstate(fi, oldstate);
1282                 }
1283                 ccw_check_return_code(ch, rc, "HaltIO in ch_action_restart");
1284         }
1285 }
1286
1287 /**
1288  * Handle error during RX initial handshake (exchange of
1289  * 0-length block header)
1290  *
1291  * @param fi    An instance of a channel statemachine.
1292  * @param event The event, just happened.
1293  * @param arg   Generic pointer, casted from channel * upon call.
1294  */
1295 static void
1296 ch_action_rxiniterr(fsm_instance * fi, int event, void *arg)
1297 {
1298         struct channel *ch = (struct channel *) arg;
1299         struct net_device *dev = ch->netdev;
1300
1301         DBF_TEXT(setup, 3, __FUNCTION__);
1302         if (event == CH_EVENT_TIMER) {
1303                 fsm_deltimer(&ch->timer);
1304                 ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name);
1305                 if (ch->retry++ < 3)
1306                         ch_action_restart(fi, event, arg);
1307                 else {
1308                         fsm_newstate(fi, CH_STATE_RXERR);
1309                         fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1310                                   DEV_EVENT_RXDOWN, dev);
1311                 }
1312         } else
1313                 ctc_pr_warn("%s: Error during RX init handshake\n", dev->name);
1314 }
1315
1316 /**
1317  * Notify device statemachine if we gave up initialization
1318  * of RX channel.
1319  *
1320  * @param fi    An instance of a channel statemachine.
1321  * @param event The event, just happened.
1322  * @param arg   Generic pointer, casted from channel * upon call.
1323  */
1324 static void
1325 ch_action_rxinitfail(fsm_instance * fi, int event, void *arg)
1326 {
1327         struct channel *ch = (struct channel *) arg;
1328         struct net_device *dev = ch->netdev;
1329
1330         DBF_TEXT(setup, 3, __FUNCTION__);
1331         fsm_newstate(fi, CH_STATE_RXERR);
1332         ctc_pr_warn("%s: RX initialization failed\n", dev->name);
1333         ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name);
1334         fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1335 }
1336
1337 /**
1338  * Handle RX Unit check remote reset (remote disconnected)
1339  *
1340  * @param fi    An instance of a channel statemachine.
1341  * @param event The event, just happened.
1342  * @param arg   Generic pointer, casted from channel * upon call.
1343  */
1344 static void
1345 ch_action_rxdisc(fsm_instance * fi, int event, void *arg)
1346 {
1347         struct channel *ch = (struct channel *) arg;
1348         struct channel *ch2;
1349         struct net_device *dev = ch->netdev;
1350
1351         DBF_TEXT(trace, 3, __FUNCTION__);
1352         fsm_deltimer(&ch->timer);
1353         ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n",
1354                      dev->name);
1355
1356         /**
1357          * Notify device statemachine
1358          */
1359         fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1360         fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
1361
1362         fsm_newstate(fi, CH_STATE_DTERM);
1363         ch2 = ((struct ctc_priv *) dev->priv)->channel[WRITE];
1364         fsm_newstate(ch2->fsm, CH_STATE_DTERM);
1365
1366         ccw_device_halt(ch->cdev, (unsigned long) ch);
1367         ccw_device_halt(ch2->cdev, (unsigned long) ch2);
1368 }
1369
1370 /**
1371  * Handle error during TX channel initialization.
1372  *
1373  * @param fi    An instance of a channel statemachine.
1374  * @param event The event, just happened.
1375  * @param arg   Generic pointer, casted from channel * upon call.
1376  */
1377 static void
1378 ch_action_txiniterr(fsm_instance * fi, int event, void *arg)
1379 {
1380         struct channel *ch = (struct channel *) arg;
1381         struct net_device *dev = ch->netdev;
1382
1383         DBF_TEXT(setup, 2, __FUNCTION__);
1384         if (event == CH_EVENT_TIMER) {
1385                 fsm_deltimer(&ch->timer);
1386                 ctc_pr_debug("%s: Timeout during TX init handshake\n", dev->name);
1387                 if (ch->retry++ < 3)
1388                         ch_action_restart(fi, event, arg);
1389                 else {
1390                         fsm_newstate(fi, CH_STATE_TXERR);
1391                         fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1392                                   DEV_EVENT_TXDOWN, dev);
1393                 }
1394         } else
1395                 ctc_pr_warn("%s: Error during TX init handshake\n", dev->name);
1396 }
1397
1398 /**
1399  * Handle TX timeout by retrying operation.
1400  *
1401  * @param fi    An instance of a channel statemachine.
1402  * @param event The event, just happened.
1403  * @param arg   Generic pointer, casted from channel * upon call.
1404  */
1405 static void
1406 ch_action_txretry(fsm_instance * fi, int event, void *arg)
1407 {
1408         struct channel *ch = (struct channel *) arg;
1409         struct net_device *dev = ch->netdev;
1410         unsigned long saveflags;
1411
1412         DBF_TEXT(trace, 4, __FUNCTION__);
1413         fsm_deltimer(&ch->timer);
1414         if (ch->retry++ > 3) {
1415                 ctc_pr_debug("%s: TX retry failed, restarting channel\n",
1416                              dev->name);
1417                 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1418                           DEV_EVENT_TXDOWN, dev);
1419                 ch_action_restart(fi, event, arg);
1420         } else {
1421                 struct sk_buff *skb;
1422
1423                 ctc_pr_debug("%s: TX retry %d\n", dev->name, ch->retry);
1424                 if ((skb = skb_peek(&ch->io_queue))) {
1425                         int rc = 0;
1426
1427                         clear_normalized_cda(&ch->ccw[4]);
1428                         ch->ccw[4].count = skb->len;
1429                         if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1430                                 ctc_pr_debug(
1431                                         "%s: IDAL alloc failed, chan restart\n",
1432                                         dev->name);
1433                                 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1434                                           DEV_EVENT_TXDOWN, dev);
1435                                 ch_action_restart(fi, event, arg);
1436                                 return;
1437                         }
1438                         fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1439                         saveflags = 0;  /* avoids compiler warning with
1440                                            spin_unlock_irqrestore */
1441                         if (event == CH_EVENT_TIMER) // only for TIMER not yet locked
1442                                 spin_lock_irqsave(get_ccwdev_lock(ch->cdev),
1443                                                   saveflags);
1444                         rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1445                                               (unsigned long) ch, 0xff, 0);
1446                         if (event == CH_EVENT_TIMER)
1447                                 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1448                                                        saveflags);
1449                         if (rc != 0) {
1450                                 fsm_deltimer(&ch->timer);
1451                                 ccw_check_return_code(ch, rc, "TX in ch_action_txretry");
1452                                 ctc_purge_skb_queue(&ch->io_queue);
1453                         }
1454                 }
1455         }
1456
1457 }
1458
1459 /**
1460  * Handle fatal errors during an I/O command.
1461  *
1462  * @param fi    An instance of a channel statemachine.
1463  * @param event The event, just happened.
1464  * @param arg   Generic pointer, casted from channel * upon call.
1465  */
1466 static void
1467 ch_action_iofatal(fsm_instance * fi, int event, void *arg)
1468 {
1469         struct channel *ch = (struct channel *) arg;
1470         struct net_device *dev = ch->netdev;
1471
1472         DBF_TEXT(trace, 3, __FUNCTION__);
1473         fsm_deltimer(&ch->timer);
1474         if (CHANNEL_DIRECTION(ch->flags) == READ) {
1475                 ctc_pr_debug("%s: RX I/O error\n", dev->name);
1476                 fsm_newstate(fi, CH_STATE_RXERR);
1477                 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1478                           DEV_EVENT_RXDOWN, dev);
1479         } else {
1480                 ctc_pr_debug("%s: TX I/O error\n", dev->name);
1481                 fsm_newstate(fi, CH_STATE_TXERR);
1482                 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1483                           DEV_EVENT_TXDOWN, dev);
1484         }
1485 }
1486
1487 static void
1488 ch_action_reinit(fsm_instance *fi, int event, void *arg)
1489 {
1490         struct channel *ch = (struct channel *)arg;
1491         struct net_device *dev = ch->netdev;
1492         struct ctc_priv *privptr = dev->priv;
1493
1494         DBF_TEXT(trace, 4, __FUNCTION__);
1495         ch_action_iofatal(fi, event, arg);
1496         fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
1497 }
1498
1499 /**
1500  * The statemachine for a channel.
1501  */
1502 static const fsm_node ch_fsm[] = {
1503         {CH_STATE_STOPPED,    CH_EVENT_STOP,       fsm_action_nop       },
1504         {CH_STATE_STOPPED,    CH_EVENT_START,      ch_action_start      },
1505         {CH_STATE_STOPPED,    CH_EVENT_FINSTAT,    fsm_action_nop       },
1506         {CH_STATE_STOPPED,    CH_EVENT_MC_FAIL,    fsm_action_nop       },
1507
1508         {CH_STATE_NOTOP,      CH_EVENT_STOP,       ch_action_stop       },
1509         {CH_STATE_NOTOP,      CH_EVENT_START,      fsm_action_nop       },
1510         {CH_STATE_NOTOP,      CH_EVENT_FINSTAT,    fsm_action_nop       },
1511         {CH_STATE_NOTOP,      CH_EVENT_MC_FAIL,    fsm_action_nop       },
1512         {CH_STATE_NOTOP,      CH_EVENT_MC_GOOD,    ch_action_start      },
1513
1514         {CH_STATE_STARTWAIT,  CH_EVENT_STOP,       ch_action_haltio     },
1515         {CH_STATE_STARTWAIT,  CH_EVENT_START,      fsm_action_nop       },
1516         {CH_STATE_STARTWAIT,  CH_EVENT_FINSTAT,    ch_action_setmode    },
1517         {CH_STATE_STARTWAIT,  CH_EVENT_TIMER,      ch_action_setuperr   },
1518         {CH_STATE_STARTWAIT,  CH_EVENT_IO_ENODEV,  ch_action_iofatal    },
1519         {CH_STATE_STARTWAIT,  CH_EVENT_IO_EIO,     ch_action_reinit     },
1520         {CH_STATE_STARTWAIT,  CH_EVENT_MC_FAIL,    ch_action_fail       },
1521
1522         {CH_STATE_STARTRETRY, CH_EVENT_STOP,       ch_action_haltio     },
1523         {CH_STATE_STARTRETRY, CH_EVENT_TIMER,      ch_action_setmode    },
1524         {CH_STATE_STARTRETRY, CH_EVENT_FINSTAT,    fsm_action_nop       },
1525         {CH_STATE_STARTRETRY, CH_EVENT_MC_FAIL,    ch_action_fail       },
1526
1527         {CH_STATE_SETUPWAIT,  CH_EVENT_STOP,       ch_action_haltio     },
1528         {CH_STATE_SETUPWAIT,  CH_EVENT_START,      fsm_action_nop       },
1529         {CH_STATE_SETUPWAIT,  CH_EVENT_FINSTAT,    ch_action_firstio    },
1530         {CH_STATE_SETUPWAIT,  CH_EVENT_UC_RCRESET, ch_action_setuperr   },
1531         {CH_STATE_SETUPWAIT,  CH_EVENT_UC_RSRESET, ch_action_setuperr   },
1532         {CH_STATE_SETUPWAIT,  CH_EVENT_TIMER,      ch_action_setmode    },
1533         {CH_STATE_SETUPWAIT,  CH_EVENT_IO_ENODEV,  ch_action_iofatal    },
1534         {CH_STATE_SETUPWAIT,  CH_EVENT_IO_EIO,     ch_action_reinit     },
1535         {CH_STATE_SETUPWAIT,  CH_EVENT_MC_FAIL,    ch_action_fail       },
1536
1537         {CH_STATE_RXINIT,     CH_EVENT_STOP,       ch_action_haltio     },
1538         {CH_STATE_RXINIT,     CH_EVENT_START,      fsm_action_nop       },
1539         {CH_STATE_RXINIT,     CH_EVENT_FINSTAT,    ch_action_rxidle     },
1540         {CH_STATE_RXINIT,     CH_EVENT_UC_RCRESET, ch_action_rxiniterr  },
1541         {CH_STATE_RXINIT,     CH_EVENT_UC_RSRESET, ch_action_rxiniterr  },
1542         {CH_STATE_RXINIT,     CH_EVENT_TIMER,      ch_action_rxiniterr  },
1543         {CH_STATE_RXINIT,     CH_EVENT_ATTNBUSY,   ch_action_rxinitfail },
1544         {CH_STATE_RXINIT,     CH_EVENT_IO_ENODEV,  ch_action_iofatal    },
1545         {CH_STATE_RXINIT,     CH_EVENT_IO_EIO,     ch_action_reinit     },
1546         {CH_STATE_RXINIT,     CH_EVENT_UC_ZERO,    ch_action_firstio    },
1547         {CH_STATE_RXINIT,     CH_EVENT_MC_FAIL,    ch_action_fail       },
1548
1549         {CH_STATE_RXIDLE,     CH_EVENT_STOP,       ch_action_haltio     },
1550         {CH_STATE_RXIDLE,     CH_EVENT_START,      fsm_action_nop       },
1551         {CH_STATE_RXIDLE,     CH_EVENT_FINSTAT,    ch_action_rx         },
1552         {CH_STATE_RXIDLE,     CH_EVENT_UC_RCRESET, ch_action_rxdisc     },
1553 //      {CH_STATE_RXIDLE,     CH_EVENT_UC_RSRESET, ch_action_rxretry    },
1554         {CH_STATE_RXIDLE,     CH_EVENT_IO_ENODEV,  ch_action_iofatal    },
1555         {CH_STATE_RXIDLE,     CH_EVENT_IO_EIO,     ch_action_reinit     },
1556         {CH_STATE_RXIDLE,     CH_EVENT_MC_FAIL,    ch_action_fail       },
1557         {CH_STATE_RXIDLE,     CH_EVENT_UC_ZERO,    ch_action_rx         },
1558
1559         {CH_STATE_TXINIT,     CH_EVENT_STOP,       ch_action_haltio     },
1560         {CH_STATE_TXINIT,     CH_EVENT_START,      fsm_action_nop       },
1561         {CH_STATE_TXINIT,     CH_EVENT_FINSTAT,    ch_action_txidle     },
1562         {CH_STATE_TXINIT,     CH_EVENT_UC_RCRESET, ch_action_txiniterr  },
1563         {CH_STATE_TXINIT,     CH_EVENT_UC_RSRESET, ch_action_txiniterr  },
1564         {CH_STATE_TXINIT,     CH_EVENT_TIMER,      ch_action_txiniterr  },
1565         {CH_STATE_TXINIT,     CH_EVENT_IO_ENODEV,  ch_action_iofatal    },
1566         {CH_STATE_TXINIT,     CH_EVENT_IO_EIO,     ch_action_reinit     },
1567         {CH_STATE_TXINIT,     CH_EVENT_MC_FAIL,    ch_action_fail       },
1568
1569         {CH_STATE_TXIDLE,     CH_EVENT_STOP,       ch_action_haltio     },
1570         {CH_STATE_TXIDLE,     CH_EVENT_START,      fsm_action_nop       },
1571         {CH_STATE_TXIDLE,     CH_EVENT_FINSTAT,    ch_action_firstio    },
1572         {CH_STATE_TXIDLE,     CH_EVENT_UC_RCRESET, fsm_action_nop       },
1573         {CH_STATE_TXIDLE,     CH_EVENT_UC_RSRESET, fsm_action_nop       },
1574         {CH_STATE_TXIDLE,     CH_EVENT_IO_ENODEV,  ch_action_iofatal    },
1575         {CH_STATE_TXIDLE,     CH_EVENT_IO_EIO,     ch_action_reinit     },
1576         {CH_STATE_TXIDLE,     CH_EVENT_MC_FAIL,    ch_action_fail       },
1577
1578         {CH_STATE_TERM,       CH_EVENT_STOP,       fsm_action_nop       },
1579         {CH_STATE_TERM,       CH_EVENT_START,      ch_action_restart    },
1580         {CH_STATE_TERM,       CH_EVENT_FINSTAT,    ch_action_stopped    },
1581         {CH_STATE_TERM,       CH_EVENT_UC_RCRESET, fsm_action_nop       },
1582         {CH_STATE_TERM,       CH_EVENT_UC_RSRESET, fsm_action_nop       },
1583         {CH_STATE_TERM,       CH_EVENT_MC_FAIL,    ch_action_fail       },
1584
1585         {CH_STATE_DTERM,      CH_EVENT_STOP,       ch_action_haltio     },
1586         {CH_STATE_DTERM,      CH_EVENT_START,      ch_action_restart    },
1587         {CH_STATE_DTERM,      CH_EVENT_FINSTAT,    ch_action_setmode    },
1588         {CH_STATE_DTERM,      CH_EVENT_UC_RCRESET, fsm_action_nop       },
1589         {CH_STATE_DTERM,      CH_EVENT_UC_RSRESET, fsm_action_nop       },
1590         {CH_STATE_DTERM,      CH_EVENT_MC_FAIL,    ch_action_fail       },
1591
1592         {CH_STATE_TX,         CH_EVENT_STOP,       ch_action_haltio     },
1593         {CH_STATE_TX,         CH_EVENT_START,      fsm_action_nop       },
1594         {CH_STATE_TX,         CH_EVENT_FINSTAT,    ch_action_txdone     },
1595         {CH_STATE_TX,         CH_EVENT_UC_RCRESET, ch_action_txretry    },
1596         {CH_STATE_TX,         CH_EVENT_UC_RSRESET, ch_action_txretry    },
1597         {CH_STATE_TX,         CH_EVENT_TIMER,      ch_action_txretry    },
1598         {CH_STATE_TX,         CH_EVENT_IO_ENODEV,  ch_action_iofatal    },
1599         {CH_STATE_TX,         CH_EVENT_IO_EIO,     ch_action_reinit     },
1600         {CH_STATE_TX,         CH_EVENT_MC_FAIL,    ch_action_fail       },
1601
1602         {CH_STATE_RXERR,      CH_EVENT_STOP,       ch_action_haltio     },
1603         {CH_STATE_TXERR,      CH_EVENT_STOP,       ch_action_haltio     },
1604         {CH_STATE_TXERR,      CH_EVENT_MC_FAIL,    ch_action_fail       },
1605         {CH_STATE_RXERR,      CH_EVENT_MC_FAIL,    ch_action_fail       },
1606 };
1607
1608 static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node);
1609
1610 /**
1611  * Functions related to setup and device detection.
1612  *****************************************************************************/
1613
1614 static inline int
1615 less_than(char *id1, char *id2)
1616 {
1617         int dev1, dev2, i;
1618
1619         for (i = 0; i < 5; i++) {
1620                 id1++;
1621                 id2++;
1622         }
1623         dev1 = simple_strtoul(id1, &id1, 16);
1624         dev2 = simple_strtoul(id2, &id2, 16);
1625
1626         return (dev1 < dev2);
1627 }
1628
1629 /**
1630  * Add a new channel to the list of channels.
1631  * Keeps the channel list sorted.
1632  *
1633  * @param cdev  The ccw_device to be added.
1634  * @param type  The type class of the new channel.
1635  *
1636  * @return 0 on success, !0 on error.
1637  */
1638 static int
1639 add_channel(struct ccw_device *cdev, enum channel_types type)
1640 {
1641         struct channel **c = &channels;
1642         struct channel *ch;
1643
1644         DBF_TEXT(trace, 2, __FUNCTION__);
1645         ch = kzalloc(sizeof(struct channel), GFP_KERNEL);
1646         if (!ch) {
1647                 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1648                 return -1;
1649         }
1650         /* assure all flags and counters are reset */
1651         ch->ccw = kzalloc(8 * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
1652         if (!ch->ccw) {
1653                 kfree(ch);
1654                 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1655                 return -1;
1656         }
1657
1658
1659         /**
1660          * "static" ccws are used in the following way:
1661          *
1662          * ccw[0..2] (Channel program for generic I/O):
1663          *           0: prepare
1664          *           1: read or write (depending on direction) with fixed
1665          *              buffer (idal allocated once when buffer is allocated)
1666          *           2: nop
1667          * ccw[3..5] (Channel program for direct write of packets)
1668          *           3: prepare
1669          *           4: write (idal allocated on every write).
1670          *           5: nop
1671          * ccw[6..7] (Channel program for initial channel setup):
1672          *           6: set extended mode
1673          *           7: nop
1674          *
1675          * ch->ccw[0..5] are initialized in ch_action_start because
1676          * the channel's direction is yet unknown here.
1677          */
1678         ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
1679         ch->ccw[6].flags = CCW_FLAG_SLI;
1680
1681         ch->ccw[7].cmd_code = CCW_CMD_NOOP;
1682         ch->ccw[7].flags = CCW_FLAG_SLI;
1683
1684         ch->cdev = cdev;
1685         snprintf(ch->id, CTC_ID_SIZE, "ch-%s", cdev->dev.bus_id);
1686         ch->type = type;
1687         ch->fsm = init_fsm(ch->id, ch_state_names,
1688                            ch_event_names, NR_CH_STATES, NR_CH_EVENTS,
1689                            ch_fsm, CH_FSM_LEN, GFP_KERNEL);
1690         if (ch->fsm == NULL) {
1691                 ctc_pr_warn("ctc: Could not create FSM in add_channel\n");
1692                 kfree(ch->ccw);
1693                 kfree(ch);
1694                 return -1;
1695         }
1696         fsm_newstate(ch->fsm, CH_STATE_IDLE);
1697         ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL);
1698         if (!ch->irb) {
1699                 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1700                 kfree_fsm(ch->fsm);
1701                 kfree(ch->ccw);
1702                 kfree(ch);
1703                 return -1;
1704         }
1705         while (*c && less_than((*c)->id, ch->id))
1706                 c = &(*c)->next;
1707         if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) {
1708                 ctc_pr_debug(
1709                         "ctc: add_channel: device %s already in list, "
1710                         "using old entry\n", (*c)->id);
1711                 kfree(ch->irb);
1712                 kfree_fsm(ch->fsm);
1713                 kfree(ch->ccw);
1714                 kfree(ch);
1715                 return 0;
1716         }
1717
1718         spin_lock_init(&ch->collect_lock);
1719
1720         fsm_settimer(ch->fsm, &ch->timer);
1721         skb_queue_head_init(&ch->io_queue);
1722         skb_queue_head_init(&ch->collect_queue);
1723         ch->next = *c;
1724         *c = ch;
1725         return 0;
1726 }
1727
1728 /**
1729  * Release a specific channel in the channel list.
1730  *
1731  * @param ch Pointer to channel struct to be released.
1732  */
1733 static void
1734 channel_free(struct channel *ch)
1735 {
1736         ch->flags &= ~CHANNEL_FLAGS_INUSE;
1737         fsm_newstate(ch->fsm, CH_STATE_IDLE);
1738 }
1739
1740 /**
1741  * Remove a specific channel in the channel list.
1742  *
1743  * @param ch Pointer to channel struct to be released.
1744  */
1745 static void
1746 channel_remove(struct channel *ch)
1747 {
1748         struct channel **c = &channels;
1749
1750         DBF_TEXT(trace, 2, __FUNCTION__);
1751         if (ch == NULL)
1752                 return;
1753
1754         channel_free(ch);
1755         while (*c) {
1756                 if (*c == ch) {
1757                         *c = ch->next;
1758                         fsm_deltimer(&ch->timer);
1759                         kfree_fsm(ch->fsm);
1760                         clear_normalized_cda(&ch->ccw[4]);
1761                         if (ch->trans_skb != NULL) {
1762                                 clear_normalized_cda(&ch->ccw[1]);
1763                                 dev_kfree_skb(ch->trans_skb);
1764                         }
1765                         kfree(ch->ccw);
1766                         kfree(ch->irb);
1767                         kfree(ch);
1768                         return;
1769                 }
1770                 c = &((*c)->next);
1771         }
1772 }
1773
1774 /**
1775  * Get a specific channel from the channel list.
1776  *
1777  * @param type Type of channel we are interested in.
1778  * @param id Id of channel we are interested in.
1779  * @param direction Direction we want to use this channel for.
1780  *
1781  * @return Pointer to a channel or NULL if no matching channel available.
1782  */
1783 static struct channel
1784 *
1785 channel_get(enum channel_types type, char *id, int direction)
1786 {
1787         struct channel *ch = channels;
1788
1789         DBF_TEXT(trace, 3, __FUNCTION__);
1790 #ifdef DEBUG
1791         ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n",
1792                      __func__, id, type);
1793 #endif
1794
1795         while (ch && ((strncmp(ch->id, id, CTC_ID_SIZE)) || (ch->type != type))) {
1796 #ifdef DEBUG
1797                 ctc_pr_debug("ctc: %s(): ch=0x%p (id=%s, type=%d\n",
1798                              __func__, ch, ch->id, ch->type);
1799 #endif
1800                 ch = ch->next;
1801         }
1802 #ifdef DEBUG
1803         ctc_pr_debug("ctc: %s(): ch=0x%pq (id=%s, type=%d\n",
1804                      __func__, ch, ch->id, ch->type);
1805 #endif
1806         if (!ch) {
1807                 ctc_pr_warn("ctc: %s(): channel with id %s "
1808                             "and type %d not found in channel list\n",
1809                             __func__, id, type);
1810         } else {
1811                 if (ch->flags & CHANNEL_FLAGS_INUSE)
1812                         ch = NULL;
1813                 else {
1814                         ch->flags |= CHANNEL_FLAGS_INUSE;
1815                         ch->flags &= ~CHANNEL_FLAGS_RWMASK;
1816                         ch->flags |= (direction == WRITE)
1817                             ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
1818                         fsm_newstate(ch->fsm, CH_STATE_STOPPED);
1819                 }
1820         }
1821         return ch;
1822 }
1823
1824 /**
1825  * Return the channel type by name.
1826  *
1827  * @param name Name of network interface.
1828  *
1829  * @return Type class of channel to be used for that interface.
1830  */
1831 static enum channel_types inline
1832 extract_channel_media(char *name)
1833 {
1834         enum channel_types ret = channel_type_unknown;
1835
1836         if (name != NULL) {
1837                 if (strncmp(name, "ctc", 3) == 0)
1838                         ret = channel_type_parallel;
1839                 if (strncmp(name, "escon", 5) == 0)
1840                         ret = channel_type_escon;
1841         }
1842         return ret;
1843 }
1844
1845 static long
1846 __ctc_check_irb_error(struct ccw_device *cdev, struct irb *irb)
1847 {
1848         if (!IS_ERR(irb))
1849                 return 0;
1850
1851         switch (PTR_ERR(irb)) {
1852         case -EIO:
1853                 ctc_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id);
1854 //              CTC_DBF_TEXT(trace, 2, "ckirberr");
1855 //              CTC_DBF_TEXT_(trace, 2, "  rc%d", -EIO);
1856                 break;
1857         case -ETIMEDOUT:
1858                 ctc_pr_warn("timeout on device %s\n", cdev->dev.bus_id);
1859 //              CTC_DBF_TEXT(trace, 2, "ckirberr");
1860 //              CTC_DBF_TEXT_(trace, 2, "  rc%d", -ETIMEDOUT);
1861                 break;
1862         default:
1863                 ctc_pr_warn("unknown error %ld on device %s\n", PTR_ERR(irb),
1864                            cdev->dev.bus_id);
1865 //              CTC_DBF_TEXT(trace, 2, "ckirberr");
1866 //              CTC_DBF_TEXT(trace, 2, "  rc???");
1867         }
1868         return PTR_ERR(irb);
1869 }
1870
1871 /**
1872  * Main IRQ handler.
1873  *
1874  * @param cdev    The ccw_device the interrupt is for.
1875  * @param intparm interruption parameter.
1876  * @param irb     interruption response block.
1877  */
1878 static void
1879 ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1880 {
1881         struct channel *ch;
1882         struct net_device *dev;
1883         struct ctc_priv *priv;
1884
1885         DBF_TEXT(trace, 5, __FUNCTION__);
1886         if (__ctc_check_irb_error(cdev, irb))
1887                 return;
1888
1889         /* Check for unsolicited interrupts. */
1890         if (!cdev->dev.driver_data) {
1891                 ctc_pr_warn("ctc: Got unsolicited irq: %s c-%02x d-%02x\n",
1892                             cdev->dev.bus_id, irb->scsw.cstat,
1893                             irb->scsw.dstat);
1894                 return;
1895         }
1896
1897         priv = ((struct ccwgroup_device *)cdev->dev.driver_data)
1898                 ->dev.driver_data;
1899
1900         /* Try to extract channel from driver data. */
1901         if (priv->channel[READ]->cdev == cdev)
1902                 ch = priv->channel[READ];
1903         else if (priv->channel[WRITE]->cdev == cdev)
1904                 ch = priv->channel[WRITE];
1905         else {
1906                 ctc_pr_err("ctc: Can't determine channel for interrupt, "
1907                            "device %s\n", cdev->dev.bus_id);
1908                 return;
1909         }
1910
1911         dev = (struct net_device *) (ch->netdev);
1912         if (dev == NULL) {
1913                 ctc_pr_crit("ctc: ctc_irq_handler dev=NULL bus_id=%s, ch=0x%p\n",
1914                             cdev->dev.bus_id, ch);
1915                 return;
1916         }
1917
1918 #ifdef DEBUG
1919         ctc_pr_debug("%s: interrupt for device: %s received c-%02x d-%02x\n",
1920                      dev->name, ch->id, irb->scsw.cstat, irb->scsw.dstat);
1921 #endif
1922
1923         /* Copy interruption response block. */
1924         memcpy(ch->irb, irb, sizeof(struct irb));
1925
1926         /* Check for good subchannel return code, otherwise error message */
1927         if (ch->irb->scsw.cstat) {
1928                 fsm_event(ch->fsm, CH_EVENT_SC_UNKNOWN, ch);
1929                 ctc_pr_warn("%s: subchannel check for device: %s - %02x %02x\n",
1930                             dev->name, ch->id, ch->irb->scsw.cstat,
1931                             ch->irb->scsw.dstat);
1932                 return;
1933         }
1934
1935         /* Check the reason-code of a unit check */
1936         if (ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
1937                 ccw_unit_check(ch, ch->irb->ecw[0]);
1938                 return;
1939         }
1940         if (ch->irb->scsw.dstat & DEV_STAT_BUSY) {
1941                 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION)
1942                         fsm_event(ch->fsm, CH_EVENT_ATTNBUSY, ch);
1943                 else
1944                         fsm_event(ch->fsm, CH_EVENT_BUSY, ch);
1945                 return;
1946         }
1947         if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION) {
1948                 fsm_event(ch->fsm, CH_EVENT_ATTN, ch);
1949                 return;
1950         }
1951         if ((ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
1952             (ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
1953             (ch->irb->scsw.stctl ==
1954              (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
1955                 fsm_event(ch->fsm, CH_EVENT_FINSTAT, ch);
1956         else
1957                 fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
1958
1959 }
1960
1961 /**
1962  * Actions for interface - statemachine.
1963  *****************************************************************************/
1964
1965 /**
1966  * Startup channels by sending CH_EVENT_START to each channel.
1967  *
1968  * @param fi    An instance of an interface statemachine.
1969  * @param event The event, just happened.
1970  * @param arg   Generic pointer, casted from struct net_device * upon call.
1971  */
1972 static void
1973 dev_action_start(fsm_instance * fi, int event, void *arg)
1974 {
1975         struct net_device *dev = (struct net_device *) arg;
1976         struct ctc_priv *privptr = dev->priv;
1977         int direction;
1978
1979         DBF_TEXT(setup, 3, __FUNCTION__);
1980         fsm_deltimer(&privptr->restart_timer);
1981         fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
1982         for (direction = READ; direction <= WRITE; direction++) {
1983                 struct channel *ch = privptr->channel[direction];
1984                 fsm_event(ch->fsm, CH_EVENT_START, ch);
1985         }
1986 }
1987
1988 /**
1989  * Shutdown channels by sending CH_EVENT_STOP to each channel.
1990  *
1991  * @param fi    An instance of an interface statemachine.
1992  * @param event The event, just happened.
1993  * @param arg   Generic pointer, casted from struct net_device * upon call.
1994  */
1995 static void
1996 dev_action_stop(fsm_instance * fi, int event, void *arg)
1997 {
1998         struct net_device *dev = (struct net_device *) arg;
1999         struct ctc_priv *privptr = dev->priv;
2000         int direction;
2001
2002         DBF_TEXT(trace, 3, __FUNCTION__);
2003         fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2004         for (direction = READ; direction <= WRITE; direction++) {
2005                 struct channel *ch = privptr->channel[direction];
2006                 fsm_event(ch->fsm, CH_EVENT_STOP, ch);
2007         }
2008 }
2009 static void
2010 dev_action_restart(fsm_instance *fi, int event, void *arg)
2011 {
2012         struct net_device *dev = (struct net_device *)arg;
2013         struct ctc_priv *privptr = dev->priv;
2014
2015         DBF_TEXT(trace, 3, __FUNCTION__);
2016         ctc_pr_debug("%s: Restarting\n", dev->name);
2017         dev_action_stop(fi, event, arg);
2018         fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
2019         fsm_addtimer(&privptr->restart_timer, CTC_TIMEOUT_5SEC,
2020                      DEV_EVENT_START, dev);
2021 }
2022
2023 /**
2024  * Called from channel statemachine
2025  * when a channel is up and running.
2026  *
2027  * @param fi    An instance of an interface statemachine.
2028  * @param event The event, just happened.
2029  * @param arg   Generic pointer, casted from struct net_device * upon call.
2030  */
2031 static void
2032 dev_action_chup(fsm_instance * fi, int event, void *arg)
2033 {
2034         struct net_device *dev = (struct net_device *) arg;
2035
2036         DBF_TEXT(trace, 3, __FUNCTION__);
2037         switch (fsm_getstate(fi)) {
2038                 case DEV_STATE_STARTWAIT_RXTX:
2039                         if (event == DEV_EVENT_RXUP)
2040                                 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2041                         else
2042                                 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2043                         break;
2044                 case DEV_STATE_STARTWAIT_RX:
2045                         if (event == DEV_EVENT_RXUP) {
2046                                 fsm_newstate(fi, DEV_STATE_RUNNING);
2047                                 ctc_pr_info("%s: connected with remote side\n",
2048                                             dev->name);
2049                                 ctc_clear_busy(dev);
2050                         }
2051                         break;
2052                 case DEV_STATE_STARTWAIT_TX:
2053                         if (event == DEV_EVENT_TXUP) {
2054                                 fsm_newstate(fi, DEV_STATE_RUNNING);
2055                                 ctc_pr_info("%s: connected with remote side\n",
2056                                             dev->name);
2057                                 ctc_clear_busy(dev);
2058                         }
2059                         break;
2060                 case DEV_STATE_STOPWAIT_TX:
2061                         if (event == DEV_EVENT_RXUP)
2062                                 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2063                         break;
2064                 case DEV_STATE_STOPWAIT_RX:
2065                         if (event == DEV_EVENT_TXUP)
2066                                 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2067                         break;
2068         }
2069 }
2070
2071 /**
2072  * Called from channel statemachine
2073  * when a channel has been shutdown.
2074  *
2075  * @param fi    An instance of an interface statemachine.
2076  * @param event The event, just happened.
2077  * @param arg   Generic pointer, casted from struct net_device * upon call.
2078  */
2079 static void
2080 dev_action_chdown(fsm_instance * fi, int event, void *arg)
2081 {
2082
2083         DBF_TEXT(trace, 3, __FUNCTION__);
2084         switch (fsm_getstate(fi)) {
2085                 case DEV_STATE_RUNNING:
2086                         if (event == DEV_EVENT_TXDOWN)
2087                                 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2088                         else
2089                                 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2090                         break;
2091                 case DEV_STATE_STARTWAIT_RX:
2092                         if (event == DEV_EVENT_TXDOWN)
2093                                 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2094                         break;
2095                 case DEV_STATE_STARTWAIT_TX:
2096                         if (event == DEV_EVENT_RXDOWN)
2097                                 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2098                         break;
2099                 case DEV_STATE_STOPWAIT_RXTX:
2100                         if (event == DEV_EVENT_TXDOWN)
2101                                 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2102                         else
2103                                 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2104                         break;
2105                 case DEV_STATE_STOPWAIT_RX:
2106                         if (event == DEV_EVENT_RXDOWN)
2107                                 fsm_newstate(fi, DEV_STATE_STOPPED);
2108                         break;
2109                 case DEV_STATE_STOPWAIT_TX:
2110                         if (event == DEV_EVENT_TXDOWN)
2111                                 fsm_newstate(fi, DEV_STATE_STOPPED);
2112                         break;
2113         }
2114 }
2115
2116 static const fsm_node dev_fsm[] = {
2117         {DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start},
2118
2119         {DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_START,   dev_action_start   },
2120         {DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_RXDOWN,  dev_action_chdown  },
2121         {DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_TXDOWN,  dev_action_chdown  },
2122         {DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_RESTART, dev_action_restart },
2123
2124         {DEV_STATE_STOPWAIT_RX,    DEV_EVENT_START,   dev_action_start   },
2125         {DEV_STATE_STOPWAIT_RX,    DEV_EVENT_RXUP,    dev_action_chup    },
2126         {DEV_STATE_STOPWAIT_RX,    DEV_EVENT_TXUP,    dev_action_chup    },
2127         {DEV_STATE_STOPWAIT_RX,    DEV_EVENT_RXDOWN,  dev_action_chdown  },
2128         {DEV_STATE_STOPWAIT_RX,    DEV_EVENT_RESTART, dev_action_restart },
2129
2130         {DEV_STATE_STOPWAIT_TX,    DEV_EVENT_START,   dev_action_start   },
2131         {DEV_STATE_STOPWAIT_TX,    DEV_EVENT_RXUP,    dev_action_chup    },
2132         {DEV_STATE_STOPWAIT_TX,    DEV_EVENT_TXUP,    dev_action_chup    },
2133         {DEV_STATE_STOPWAIT_TX,    DEV_EVENT_TXDOWN,  dev_action_chdown  },
2134         {DEV_STATE_STOPWAIT_TX,    DEV_EVENT_RESTART, dev_action_restart },
2135
2136         {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP,    dev_action_stop    },
2137         {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP,    dev_action_chup    },
2138         {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP,    dev_action_chup    },
2139         {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN,  dev_action_chdown  },
2140         {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN,  dev_action_chdown  },
2141         {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2142
2143         {DEV_STATE_STARTWAIT_TX,   DEV_EVENT_STOP,    dev_action_stop    },
2144         {DEV_STATE_STARTWAIT_TX,   DEV_EVENT_RXUP,    dev_action_chup    },
2145         {DEV_STATE_STARTWAIT_TX,   DEV_EVENT_TXUP,    dev_action_chup    },
2146         {DEV_STATE_STARTWAIT_TX,   DEV_EVENT_RXDOWN,  dev_action_chdown  },
2147         {DEV_STATE_STARTWAIT_TX,   DEV_EVENT_RESTART, dev_action_restart },
2148
2149         {DEV_STATE_STARTWAIT_RX,   DEV_EVENT_STOP,    dev_action_stop    },
2150         {DEV_STATE_STARTWAIT_RX,   DEV_EVENT_RXUP,    dev_action_chup    },
2151         {DEV_STATE_STARTWAIT_RX,   DEV_EVENT_TXUP,    dev_action_chup    },
2152         {DEV_STATE_STARTWAIT_RX,   DEV_EVENT_TXDOWN,  dev_action_chdown  },
2153         {DEV_STATE_STARTWAIT_RX,   DEV_EVENT_RESTART, dev_action_restart },
2154
2155         {DEV_STATE_RUNNING,        DEV_EVENT_STOP,    dev_action_stop    },
2156         {DEV_STATE_RUNNING,        DEV_EVENT_RXDOWN,  dev_action_chdown  },
2157         {DEV_STATE_RUNNING,        DEV_EVENT_TXDOWN,  dev_action_chdown  },
2158         {DEV_STATE_RUNNING,        DEV_EVENT_TXUP,    fsm_action_nop     },
2159         {DEV_STATE_RUNNING,        DEV_EVENT_RXUP,    fsm_action_nop     },
2160         {DEV_STATE_RUNNING,        DEV_EVENT_RESTART, dev_action_restart },
2161 };
2162
2163 static const int DEV_FSM_LEN = sizeof (dev_fsm) / sizeof (fsm_node);
2164
2165 /**
2166  * Transmit a packet.
2167  * This is a helper function for ctc_tx().
2168  *
2169  * @param ch Channel to be used for sending.
2170  * @param skb Pointer to struct sk_buff of packet to send.
2171  *            The linklevel header has already been set up
2172  *            by ctc_tx().
2173  *
2174  * @return 0 on success, -ERRNO on failure. (Never fails.)
2175  */
2176 static int
2177 transmit_skb(struct channel *ch, struct sk_buff *skb)
2178 {
2179         unsigned long saveflags;
2180         struct ll_header header;
2181         int rc = 0;
2182
2183         DBF_TEXT(trace, 5, __FUNCTION__);
2184         /* we need to acquire the lock for testing the state
2185          * otherwise we can have an IRQ changing the state to
2186          * TXIDLE after the test but before acquiring the lock.
2187          */
2188         spin_lock_irqsave(&ch->collect_lock, saveflags);
2189         if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
2190                 int l = skb->len + LL_HEADER_LENGTH;
2191
2192                 if (ch->collect_len + l > ch->max_bufsize - 2) {
2193                         spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2194                         return -EBUSY;
2195                 } else {
2196                         atomic_inc(&skb->users);
2197                         header.length = l;
2198                         header.type = skb->protocol;
2199                         header.unused = 0;
2200                         memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2201                                LL_HEADER_LENGTH);
2202                         skb_queue_tail(&ch->collect_queue, skb);
2203                         ch->collect_len += l;
2204                 }
2205                 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2206         } else {
2207                 __u16 block_len;
2208                 int ccw_idx;
2209                 struct sk_buff *nskb;
2210                 unsigned long hi;
2211                 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2212                 /**
2213                  * Protect skb against beeing free'd by upper
2214                  * layers.
2215                  */
2216                 atomic_inc(&skb->users);
2217                 ch->prof.txlen += skb->len;
2218                 header.length = skb->len + LL_HEADER_LENGTH;
2219                 header.type = skb->protocol;
2220                 header.unused = 0;
2221                 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2222                        LL_HEADER_LENGTH);
2223                 block_len = skb->len + 2;
2224                 *((__u16 *) skb_push(skb, 2)) = block_len;
2225
2226                 /**
2227                  * IDAL support in CTC is broken, so we have to
2228                  * care about skb's above 2G ourselves.
2229                  */
2230                 hi = ((unsigned long)skb_tail_pointer(skb) +
2231                       LL_HEADER_LENGTH) >> 31;
2232                 if (hi) {
2233                         nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
2234                         if (!nskb) {
2235                                 atomic_dec(&skb->users);
2236                                 skb_pull(skb, LL_HEADER_LENGTH + 2);
2237                                 ctc_clear_busy(ch->netdev);
2238                                 return -ENOMEM;
2239                         } else {
2240                                 memcpy(skb_put(nskb, skb->len),
2241                                        skb->data, skb->len);
2242                                 atomic_inc(&nskb->users);
2243                                 atomic_dec(&skb->users);
2244                                 dev_kfree_skb_irq(skb);
2245                                 skb = nskb;
2246                         }
2247                 }
2248
2249                 ch->ccw[4].count = block_len;
2250                 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
2251                         /**
2252                          * idal allocation failed, try via copying to
2253                          * trans_skb. trans_skb usually has a pre-allocated
2254                          * idal.
2255                          */
2256                         if (ctc_checkalloc_buffer(ch, 1)) {
2257                                 /**
2258                                  * Remove our header. It gets added
2259                                  * again on retransmit.
2260                                  */
2261                                 atomic_dec(&skb->users);
2262                                 skb_pull(skb, LL_HEADER_LENGTH + 2);
2263                                 ctc_clear_busy(ch->netdev);
2264                                 return -EBUSY;
2265                         }
2266
2267                         skb_reset_tail_pointer(ch->trans_skb);
2268                         ch->trans_skb->len = 0;
2269                         ch->ccw[1].count = skb->len;
2270                         skb_copy_from_linear_data(skb, skb_put(ch->trans_skb,
2271                                                                skb->len),
2272                                                   skb->len);
2273                         atomic_dec(&skb->users);
2274                         dev_kfree_skb_irq(skb);
2275                         ccw_idx = 0;
2276                 } else {
2277                         skb_queue_tail(&ch->io_queue, skb);
2278                         ccw_idx = 3;
2279                 }
2280                 ch->retry = 0;
2281                 fsm_newstate(ch->fsm, CH_STATE_TX);
2282                 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
2283                 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
2284                 ch->prof.send_stamp = current_kernel_time();
2285                 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
2286                                       (unsigned long) ch, 0xff, 0);
2287                 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
2288                 if (ccw_idx == 3)
2289                         ch->prof.doios_single++;
2290                 if (rc != 0) {
2291                         fsm_deltimer(&ch->timer);
2292                         ccw_check_return_code(ch, rc, "single skb TX");
2293                         if (ccw_idx == 3)
2294                                 skb_dequeue_tail(&ch->io_queue);
2295                         /**
2296                          * Remove our header. It gets added
2297                          * again on retransmit.
2298                          */
2299                         skb_pull(skb, LL_HEADER_LENGTH + 2);
2300                 } else {
2301                         if (ccw_idx == 0) {
2302                                 struct net_device *dev = ch->netdev;
2303                                 struct ctc_priv *privptr = dev->priv;
2304                                 privptr->stats.tx_packets++;
2305                                 privptr->stats.tx_bytes +=
2306                                     skb->len - LL_HEADER_LENGTH;
2307                         }
2308                 }
2309         }
2310
2311         ctc_clear_busy(ch->netdev);
2312         return rc;
2313 }
2314
2315 /**
2316  * Interface API for upper network layers
2317  *****************************************************************************/
2318
2319 /**
2320  * Open an interface.
2321  * Called from generic network layer when ifconfig up is run.
2322  *
2323  * @param dev Pointer to interface struct.
2324  *
2325  * @return 0 on success, -ERRNO on failure. (Never fails.)
2326  */
2327 static int
2328 ctc_open(struct net_device * dev)
2329 {
2330         DBF_TEXT(trace, 5, __FUNCTION__);
2331         fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev);
2332         return 0;
2333 }
2334
2335 /**
2336  * Close an interface.
2337  * Called from generic network layer when ifconfig down is run.
2338  *
2339  * @param dev Pointer to interface struct.
2340  *
2341  * @return 0 on success, -ERRNO on failure. (Never fails.)
2342  */
2343 static int
2344 ctc_close(struct net_device * dev)
2345 {
2346         DBF_TEXT(trace, 5, __FUNCTION__);
2347         fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev);
2348         return 0;
2349 }
2350
2351 /**
2352  * Start transmission of a packet.
2353  * Called from generic network device layer.
2354  *
2355  * @param skb Pointer to buffer containing the packet.
2356  * @param dev Pointer to interface struct.
2357  *
2358  * @return 0 if packet consumed, !0 if packet rejected.
2359  *         Note: If we return !0, then the packet is free'd by
2360  *               the generic network layer.
2361  */
2362 static int
2363 ctc_tx(struct sk_buff *skb, struct net_device * dev)
2364 {
2365         int rc = 0;
2366         struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2367
2368         DBF_TEXT(trace, 5, __FUNCTION__);
2369         /**
2370          * Some sanity checks ...
2371          */
2372         if (skb == NULL) {
2373                 ctc_pr_warn("%s: NULL sk_buff passed\n", dev->name);
2374                 privptr->stats.tx_dropped++;
2375                 return 0;
2376         }
2377         if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
2378                 ctc_pr_warn("%s: Got sk_buff with head room < %ld bytes\n",
2379                             dev->name, LL_HEADER_LENGTH + 2);
2380                 dev_kfree_skb(skb);
2381                 privptr->stats.tx_dropped++;
2382                 return 0;
2383         }
2384
2385         /**
2386          * If channels are not running, try to restart them
2387          * and throw away packet.
2388          */
2389         if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
2390                 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
2391                 dev_kfree_skb(skb);
2392                 privptr->stats.tx_dropped++;
2393                 privptr->stats.tx_errors++;
2394                 privptr->stats.tx_carrier_errors++;
2395                 return 0;
2396         }
2397
2398         if (ctc_test_and_set_busy(dev))
2399                 return -EBUSY;
2400
2401         dev->trans_start = jiffies;
2402         if (transmit_skb(privptr->channel[WRITE], skb) != 0)
2403                 rc = 1;
2404         return rc;
2405 }
2406
2407 /**
2408  * Sets MTU of an interface.
2409  *
2410  * @param dev     Pointer to interface struct.
2411  * @param new_mtu The new MTU to use for this interface.
2412  *
2413  * @return 0 on success, -EINVAL if MTU is out of valid range.
2414  *         (valid range is 576 .. 65527). If VM is on the
2415  *         remote side, maximum MTU is 32760, however this is
2416  *         <em>not</em> checked here.
2417  */
2418 static int
2419 ctc_change_mtu(struct net_device * dev, int new_mtu)
2420 {
2421         struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2422
2423         DBF_TEXT(trace, 3, __FUNCTION__);
2424         if ((new_mtu < 576) || (new_mtu > 65527) ||
2425             (new_mtu > (privptr->channel[READ]->max_bufsize -
2426                         LL_HEADER_LENGTH - 2)))
2427                 return -EINVAL;
2428         dev->mtu = new_mtu;
2429         dev->hard_header_len = LL_HEADER_LENGTH + 2;
2430         return 0;
2431 }
2432
2433 /**
2434  * Returns interface statistics of a device.
2435  *
2436  * @param dev Pointer to interface struct.
2437  *
2438  * @return Pointer to stats struct of this interface.
2439  */
2440 static struct net_device_stats *
2441 ctc_stats(struct net_device * dev)
2442 {
2443         return &((struct ctc_priv *) dev->priv)->stats;
2444 }
2445
2446 /*
2447  * sysfs attributes
2448  */
2449
2450 static ssize_t
2451 buffer_show(struct device *dev, struct device_attribute *attr, char *buf)
2452 {
2453         struct ctc_priv *priv;
2454
2455         priv = dev->driver_data;
2456         if (!priv)
2457                 return -ENODEV;
2458         return sprintf(buf, "%d\n",
2459                         priv->buffer_size);
2460 }
2461
2462 static ssize_t
2463 buffer_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2464 {
2465         struct ctc_priv *priv;
2466         struct net_device *ndev;
2467         int bs1;
2468         char buffer[16];
2469
2470         DBF_TEXT(trace, 3, __FUNCTION__);
2471         DBF_TEXT(trace, 3, buf);
2472         priv = dev->driver_data;
2473         if (!priv) {
2474                 DBF_TEXT(trace, 3, "bfnopriv");
2475                 return -ENODEV;
2476         }
2477
2478         sscanf(buf, "%u", &bs1);
2479         if (bs1 > CTC_BUFSIZE_LIMIT)
2480                 goto einval;
2481         if (bs1 < (576 + LL_HEADER_LENGTH + 2))
2482                 goto einval;
2483         priv->buffer_size = bs1;        // just to overwrite the default
2484
2485         ndev = priv->channel[READ]->netdev;
2486         if (!ndev) {
2487                 DBF_TEXT(trace, 3, "bfnondev");
2488                 return -ENODEV;
2489         }
2490
2491         if ((ndev->flags & IFF_RUNNING) &&
2492             (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
2493                 goto einval;
2494
2495         priv->channel[READ]->max_bufsize = bs1;
2496         priv->channel[WRITE]->max_bufsize = bs1;
2497         if (!(ndev->flags & IFF_RUNNING))
2498                 ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
2499         priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2500         priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2501
2502         sprintf(buffer, "%d",priv->buffer_size);
2503         DBF_TEXT(trace, 3, buffer);
2504         return count;
2505
2506 einval:
2507         DBF_TEXT(trace, 3, "buff_err");
2508         return -EINVAL;
2509 }
2510
2511 static ssize_t
2512 loglevel_show(struct device *dev, struct device_attribute *attr, char *buf)
2513 {
2514         return sprintf(buf, "%d\n", loglevel);
2515 }
2516
2517 static ssize_t
2518 loglevel_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2519 {
2520         int ll1;
2521
2522         DBF_TEXT(trace, 5, __FUNCTION__);
2523         sscanf(buf, "%i", &ll1);
2524
2525         if ((ll1 > CTC_LOGLEVEL_MAX) || (ll1 < 0))
2526                 return -EINVAL;
2527         loglevel = ll1;
2528         return count;
2529 }
2530
2531 static void
2532 ctc_print_statistics(struct ctc_priv *priv)
2533 {
2534         char *sbuf;
2535         char *p;
2536
2537         DBF_TEXT(trace, 4, __FUNCTION__);
2538         if (!priv)
2539                 return;
2540         sbuf = kmalloc(2048, GFP_KERNEL);
2541         if (sbuf == NULL)
2542                 return;
2543         p = sbuf;
2544
2545         p += sprintf(p, "  Device FSM state: %s\n",
2546                      fsm_getstate_str(priv->fsm));
2547         p += sprintf(p, "  RX channel FSM state: %s\n",
2548                      fsm_getstate_str(priv->channel[READ]->fsm));
2549         p += sprintf(p, "  TX channel FSM state: %s\n",
2550                      fsm_getstate_str(priv->channel[WRITE]->fsm));
2551         p += sprintf(p, "  Max. TX buffer used: %ld\n",
2552                      priv->channel[WRITE]->prof.maxmulti);
2553         p += sprintf(p, "  Max. chained SKBs: %ld\n",
2554                      priv->channel[WRITE]->prof.maxcqueue);
2555         p += sprintf(p, "  TX single write ops: %ld\n",
2556                      priv->channel[WRITE]->prof.doios_single);
2557         p += sprintf(p, "  TX multi write ops: %ld\n",
2558                      priv->channel[WRITE]->prof.doios_multi);
2559         p += sprintf(p, "  Netto bytes written: %ld\n",
2560                      priv->channel[WRITE]->prof.txlen);
2561         p += sprintf(p, "  Max. TX IO-time: %ld\n",
2562                      priv->channel[WRITE]->prof.tx_time);
2563
2564         ctc_pr_debug("Statistics for %s:\n%s",
2565                      priv->channel[WRITE]->netdev->name, sbuf);
2566         kfree(sbuf);
2567         return;
2568 }
2569
2570 static ssize_t
2571 stats_show(struct device *dev, struct device_attribute *attr, char *buf)
2572 {
2573         struct ctc_priv *priv = dev->driver_data;
2574         if (!priv)
2575                 return -ENODEV;
2576         ctc_print_statistics(priv);
2577         return sprintf(buf, "0\n");
2578 }
2579
2580 static ssize_t
2581 stats_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2582 {
2583         struct ctc_priv *priv = dev->driver_data;
2584         if (!priv)
2585                 return -ENODEV;
2586         /* Reset statistics */
2587         memset(&priv->channel[WRITE]->prof, 0,
2588                         sizeof(priv->channel[WRITE]->prof));
2589         return count;
2590 }
2591
2592 static void
2593 ctc_netdev_unregister(struct net_device * dev)
2594 {
2595         struct ctc_priv *privptr;
2596
2597         if (!dev)
2598                 return;
2599         privptr = (struct ctc_priv *) dev->priv;
2600         unregister_netdev(dev);
2601 }
2602
2603 static int
2604 ctc_netdev_register(struct net_device * dev)
2605 {
2606         return register_netdev(dev);
2607 }
2608
2609 static void
2610 ctc_free_netdevice(struct net_device * dev, int free_dev)
2611 {
2612         struct ctc_priv *privptr;
2613         if (!dev)
2614                 return;
2615         privptr = dev->priv;
2616         if (privptr) {
2617                 if (privptr->fsm)
2618                         kfree_fsm(privptr->fsm);
2619                 kfree(privptr);
2620         }
2621 #ifdef MODULE
2622         if (free_dev)
2623                 free_netdev(dev);
2624 #endif
2625 }
2626
2627 static ssize_t
2628 ctc_proto_show(struct device *dev, struct device_attribute *attr, char *buf)
2629 {
2630         struct ctc_priv *priv;
2631
2632         priv = dev->driver_data;
2633         if (!priv)
2634                 return -ENODEV;
2635
2636         return sprintf(buf, "%d\n", priv->protocol);
2637 }
2638
2639 static ssize_t
2640 ctc_proto_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2641 {
2642         struct ctc_priv *priv;
2643         int value;
2644
2645         DBF_TEXT(trace, 3, __FUNCTION__);
2646         pr_debug("%s() called\n", __FUNCTION__);
2647
2648         priv = dev->driver_data;
2649         if (!priv)
2650                 return -ENODEV;
2651         sscanf(buf, "%u", &value);
2652         if (!((value == CTC_PROTO_S390)  ||
2653               (value == CTC_PROTO_LINUX) ||
2654               (value == CTC_PROTO_OS390)))
2655                 return -EINVAL;
2656         priv->protocol = value;
2657
2658         return count;
2659 }
2660
2661 static ssize_t
2662 ctc_type_show(struct device *dev, struct device_attribute *attr, char *buf)
2663 {
2664         struct ccwgroup_device *cgdev;
2665
2666         cgdev = to_ccwgroupdev(dev);
2667         if (!cgdev)
2668                 return -ENODEV;
2669
2670         return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
2671 }
2672
2673 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
2674 static DEVICE_ATTR(protocol, 0644, ctc_proto_show, ctc_proto_store);
2675 static DEVICE_ATTR(type, 0444, ctc_type_show, NULL);
2676
2677 static DEVICE_ATTR(loglevel, 0644, loglevel_show, loglevel_write);
2678 static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
2679
2680 static struct attribute *ctc_attr[] = {
2681         &dev_attr_protocol.attr,
2682         &dev_attr_type.attr,
2683         &dev_attr_buffer.attr,
2684         NULL,
2685 };
2686
2687 static struct attribute_group ctc_attr_group = {
2688         .attrs = ctc_attr,
2689 };
2690
2691 static int
2692 ctc_add_attributes(struct device *dev)
2693 {
2694         int rc;
2695
2696         rc = device_create_file(dev, &dev_attr_loglevel);
2697         if (rc)
2698                 goto out;
2699         rc = device_create_file(dev, &dev_attr_stats);
2700         if (!rc)
2701                 goto out;
2702         device_remove_file(dev, &dev_attr_loglevel);
2703 out:
2704         return rc;
2705 }
2706
2707 static void
2708 ctc_remove_attributes(struct device *dev)
2709 {
2710         device_remove_file(dev, &dev_attr_stats);
2711         device_remove_file(dev, &dev_attr_loglevel);
2712 }
2713
2714 static int
2715 ctc_add_files(struct device *dev)
2716 {
2717         pr_debug("%s() called\n", __FUNCTION__);
2718
2719         return sysfs_create_group(&dev->kobj, &ctc_attr_group);
2720 }
2721
2722 static void
2723 ctc_remove_files(struct device *dev)
2724 {
2725         pr_debug("%s() called\n", __FUNCTION__);
2726
2727         sysfs_remove_group(&dev->kobj, &ctc_attr_group);
2728 }
2729
2730 /**
2731  * Add ctc specific attributes.
2732  * Add ctc private data.
2733  *
2734  * @param cgdev pointer to ccwgroup_device just added
2735  *
2736  * @returns 0 on success, !0 on failure.
2737  */
2738 static int
2739 ctc_probe_device(struct ccwgroup_device *cgdev)
2740 {
2741         struct ctc_priv *priv;
2742         int rc;
2743         char buffer[16];
2744
2745         pr_debug("%s() called\n", __FUNCTION__);
2746         DBF_TEXT(setup, 3, __FUNCTION__);
2747
2748         if (!get_device(&cgdev->dev))
2749                 return -ENODEV;
2750
2751         priv = kzalloc(sizeof(struct ctc_priv), GFP_KERNEL);
2752         if (!priv) {
2753                 ctc_pr_err("%s: Out of memory\n", __func__);
2754                 put_device(&cgdev->dev);
2755                 return -ENOMEM;
2756         }
2757
2758         rc = ctc_add_files(&cgdev->dev);
2759         if (rc) {
2760                 kfree(priv);
2761                 put_device(&cgdev->dev);
2762                 return rc;
2763         }
2764         priv->buffer_size = CTC_BUFSIZE_DEFAULT;
2765         cgdev->cdev[0]->handler = ctc_irq_handler;
2766         cgdev->cdev[1]->handler = ctc_irq_handler;
2767         cgdev->dev.driver_data = priv;
2768
2769         sprintf(buffer, "%p", priv);
2770         DBF_TEXT(data, 3, buffer);
2771
2772         sprintf(buffer, "%u", (unsigned int)sizeof(struct ctc_priv));
2773         DBF_TEXT(data, 3, buffer);
2774
2775         sprintf(buffer, "%p", &channels);
2776         DBF_TEXT(data, 3, buffer);
2777
2778         sprintf(buffer, "%u", (unsigned int)sizeof(struct channel));
2779         DBF_TEXT(data, 3, buffer);
2780
2781         return 0;
2782 }
2783
2784 /**
2785  * Device setup function called by alloc_netdev().
2786  *
2787  * @param dev  Device to be setup.
2788  */
2789 void ctc_init_netdevice(struct net_device * dev)
2790 {
2791         DBF_TEXT(setup, 3, __FUNCTION__);
2792
2793         if (dev->mtu == 0)
2794                 dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
2795         dev->hard_start_xmit = ctc_tx;
2796         dev->open = ctc_open;
2797         dev->stop = ctc_close;
2798         dev->get_stats = ctc_stats;
2799         dev->change_mtu = ctc_change_mtu;
2800         dev->hard_header_len = LL_HEADER_LENGTH + 2;
2801         dev->addr_len = 0;
2802         dev->type = ARPHRD_SLIP;
2803         dev->tx_queue_len = 100;
2804         dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2805 }
2806
2807
2808 /**
2809  *
2810  * Setup an interface.
2811  *
2812  * @param cgdev  Device to be setup.
2813  *
2814  * @returns 0 on success, !0 on failure.
2815  */
2816 static int
2817 ctc_new_device(struct ccwgroup_device *cgdev)
2818 {
2819         char read_id[CTC_ID_SIZE];
2820         char write_id[CTC_ID_SIZE];
2821         int direction;
2822         enum channel_types type;
2823         struct ctc_priv *privptr;
2824         struct net_device *dev;
2825         int ret;
2826         char buffer[16];
2827
2828         pr_debug("%s() called\n", __FUNCTION__);
2829         DBF_TEXT(setup, 3, __FUNCTION__);
2830
2831         privptr = cgdev->dev.driver_data;
2832         if (!privptr)
2833                 return -ENODEV;
2834
2835         sprintf(buffer, "%d", privptr->buffer_size);
2836         DBF_TEXT(setup, 3, buffer);
2837
2838         type = get_channel_type(&cgdev->cdev[0]->id);
2839
2840         snprintf(read_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id);
2841         snprintf(write_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id);
2842
2843         if (add_channel(cgdev->cdev[0], type))
2844                 return -ENOMEM;
2845         if (add_channel(cgdev->cdev[1], type))
2846                 return -ENOMEM;
2847
2848         ret = ccw_device_set_online(cgdev->cdev[0]);
2849         if (ret != 0) {
2850                         printk(KERN_WARNING
2851                         "ccw_device_set_online (cdev[0]) failed with ret = %d\n", ret);
2852         }
2853
2854         ret = ccw_device_set_online(cgdev->cdev[1]);
2855         if (ret != 0) {
2856                         printk(KERN_WARNING
2857                         "ccw_device_set_online (cdev[1]) failed with ret = %d\n", ret);
2858         }
2859
2860         dev = alloc_netdev(0, "ctc%d", ctc_init_netdevice);
2861         if (!dev) {
2862                 ctc_pr_warn("ctc_init_netdevice failed\n");
2863                 goto out;
2864         }
2865         dev->priv = privptr;
2866
2867         privptr->fsm = init_fsm("ctcdev", dev_state_names,
2868                         dev_event_names, CTC_NR_DEV_STATES, CTC_NR_DEV_EVENTS,
2869                         dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2870         if (privptr->fsm == NULL) {
2871                 free_netdev(dev);
2872                 goto out;
2873         }
2874         fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2875         fsm_settimer(privptr->fsm, &privptr->restart_timer);
2876
2877         for (direction = READ; direction <= WRITE; direction++) {
2878                 privptr->channel[direction] =
2879                     channel_get(type, direction == READ ? read_id : write_id,
2880                                 direction);
2881                 if (privptr->channel[direction] == NULL) {
2882                         if (direction == WRITE)
2883                                 channel_free(privptr->channel[READ]);
2884
2885                         ctc_free_netdevice(dev, 1);
2886                         goto out;
2887                 }
2888                 privptr->channel[direction]->netdev = dev;
2889                 privptr->channel[direction]->protocol = privptr->protocol;
2890                 privptr->channel[direction]->max_bufsize = privptr->buffer_size;
2891         }
2892         /* sysfs magic */
2893         SET_NETDEV_DEV(dev, &cgdev->dev);
2894
2895         if (ctc_netdev_register(dev) != 0) {
2896                 ctc_free_netdevice(dev, 1);
2897                 goto out;
2898         }
2899
2900         if (ctc_add_attributes(&cgdev->dev)) {
2901                 ctc_netdev_unregister(dev);
2902                 dev->priv = NULL;
2903                 ctc_free_netdevice(dev, 1);
2904                 goto out;
2905         }
2906
2907         strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name));
2908
2909         print_banner();
2910
2911         ctc_pr_info("%s: read: %s, write: %s, proto: %d\n",
2912                     dev->name, privptr->channel[READ]->id,
2913                     privptr->channel[WRITE]->id, privptr->protocol);
2914
2915         return 0;
2916 out:
2917         ccw_device_set_offline(cgdev->cdev[1]);
2918         ccw_device_set_offline(cgdev->cdev[0]);
2919
2920         return -ENODEV;
2921 }
2922
2923 /**
2924  * Shutdown an interface.
2925  *
2926  * @param cgdev  Device to be shut down.
2927  *
2928  * @returns 0 on success, !0 on failure.
2929  */
2930 static int
2931 ctc_shutdown_device(struct ccwgroup_device *cgdev)
2932 {
2933         struct ctc_priv *priv;
2934         struct net_device *ndev;
2935
2936         DBF_TEXT(setup, 3, __FUNCTION__);
2937         pr_debug("%s() called\n", __FUNCTION__);
2938
2939
2940         priv = cgdev->dev.driver_data;
2941         ndev = NULL;
2942         if (!priv)
2943                 return -ENODEV;
2944
2945         if (priv->channel[READ]) {
2946                 ndev = priv->channel[READ]->netdev;
2947
2948                 /* Close the device */
2949                 ctc_close(ndev);
2950                 ndev->flags &=~IFF_RUNNING;
2951
2952                 ctc_remove_attributes(&cgdev->dev);
2953
2954                 channel_free(priv->channel[READ]);
2955         }
2956         if (priv->channel[WRITE])
2957                 channel_free(priv->channel[WRITE]);
2958
2959         if (ndev) {
2960                 ctc_netdev_unregister(ndev);
2961                 ndev->priv = NULL;
2962                 ctc_free_netdevice(ndev, 1);
2963         }
2964
2965         if (priv->fsm)
2966                 kfree_fsm(priv->fsm);
2967
2968         ccw_device_set_offline(cgdev->cdev[1]);
2969         ccw_device_set_offline(cgdev->cdev[0]);
2970
2971         if (priv->channel[READ])
2972                 channel_remove(priv->channel[READ]);
2973         if (priv->channel[WRITE])
2974                 channel_remove(priv->channel[WRITE]);
2975         priv->channel[READ] = priv->channel[WRITE] = NULL;
2976
2977         return 0;
2978
2979 }
2980
2981 static void
2982 ctc_remove_device(struct ccwgroup_device *cgdev)
2983 {
2984         struct ctc_priv *priv;
2985
2986         pr_debug("%s() called\n", __FUNCTION__);
2987         DBF_TEXT(setup, 3, __FUNCTION__);
2988
2989         priv = cgdev->dev.driver_data;
2990         if (!priv)
2991                 return;
2992         if (cgdev->state == CCWGROUP_ONLINE)
2993                 ctc_shutdown_device(cgdev);
2994         ctc_remove_files(&cgdev->dev);
2995         cgdev->dev.driver_data = NULL;
2996         kfree(priv);
2997         put_device(&cgdev->dev);
2998 }
2999
3000 static struct ccwgroup_driver ctc_group_driver = {
3001         .owner       = THIS_MODULE,
3002         .name        = "ctc",
3003         .max_slaves  = 2,
3004         .driver_id   = 0xC3E3C3,
3005         .probe       = ctc_probe_device,
3006         .remove      = ctc_remove_device,
3007         .set_online  = ctc_new_device,
3008         .set_offline = ctc_shutdown_device,
3009 };
3010
3011 /**
3012  * Module related routines
3013  *****************************************************************************/
3014
3015 /**
3016  * Prepare to be unloaded. Free IRQ's and release all resources.
3017  * This is called just before this module is unloaded. It is
3018  * <em>not</em> called, if the usage count is !0, so we don't need to check
3019  * for that.
3020  */
3021 static void __exit
3022 ctc_exit(void)
3023 {
3024         DBF_TEXT(setup, 3, __FUNCTION__);
3025         unregister_cu3088_discipline(&ctc_group_driver);
3026         ctc_unregister_dbf_views();
3027         ctc_pr_info("CTC driver unloaded\n");
3028 }
3029
3030 /**
3031  * Initialize module.
3032  * This is called just after the module is loaded.
3033  *
3034  * @return 0 on success, !0 on error.
3035  */
3036 static int __init
3037 ctc_init(void)
3038 {
3039         int ret = 0;
3040
3041         loglevel = CTC_LOGLEVEL_DEFAULT;
3042
3043         DBF_TEXT(setup, 3, __FUNCTION__);
3044
3045         print_banner();
3046
3047         ret = ctc_register_dbf_views();
3048         if (ret){
3049                 ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
3050                 return ret;
3051         }
3052         ret = register_cu3088_discipline(&ctc_group_driver);
3053         if (ret) {
3054                 ctc_unregister_dbf_views();
3055         }
3056         return ret;
3057 }
3058
3059 module_init(ctc_init);
3060 module_exit(ctc_exit);
3061
3062 /* --- This is the END my friend --- */