Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
[linux-2.6] / drivers / s390 / net / netiucv.c
1 /*
2  * IUCV network driver
3  *
4  * Copyright 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5  * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
6  *
7  * Sysfs integration and all bugs therein by Cornelia Huck
8  * (cornelia.huck@de.ibm.com)
9  *
10  * Documentation used:
11  *  the source of the original IUCV driver by:
12  *    Stefan Hegewald <hegewald@de.ibm.com>
13  *    Hartmut Penner <hpenner@de.ibm.com>
14  *    Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
15  *    Martin Schwidefsky (schwidefsky@de.ibm.com)
16  *    Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
17  *
18  * This program is free software; you can redistribute it and/or modify
19  * it under the terms of the GNU General Public License as published by
20  * the Free Software Foundation; either version 2, or (at your option)
21  * any later version.
22  *
23  * This program is distributed in the hope that it will be useful,
24  * but WITHOUT ANY WARRANTY; without even the implied warranty of
25  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
26  * GNU General Public License for more details.
27  *
28  * You should have received a copy of the GNU General Public License
29  * along with this program; if not, write to the Free Software
30  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31  *
32  */
33
34 #define KMSG_COMPONENT "netiucv"
35 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
36
37 #undef DEBUG
38
39 #include <linux/module.h>
40 #include <linux/init.h>
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43 #include <linux/errno.h>
44 #include <linux/types.h>
45 #include <linux/interrupt.h>
46 #include <linux/timer.h>
47 #include <linux/bitops.h>
48
49 #include <linux/signal.h>
50 #include <linux/string.h>
51 #include <linux/device.h>
52
53 #include <linux/ip.h>
54 #include <linux/if_arp.h>
55 #include <linux/tcp.h>
56 #include <linux/skbuff.h>
57 #include <linux/ctype.h>
58 #include <net/dst.h>
59
60 #include <asm/io.h>
61 #include <asm/uaccess.h>
62
63 #include <net/iucv/iucv.h>
64 #include "fsm.h"
65
66 MODULE_AUTHOR
67     ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
68 MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
69
70 /**
71  * Debug Facility stuff
72  */
73 #define IUCV_DBF_SETUP_NAME "iucv_setup"
74 #define IUCV_DBF_SETUP_LEN 32
75 #define IUCV_DBF_SETUP_PAGES 2
76 #define IUCV_DBF_SETUP_NR_AREAS 1
77 #define IUCV_DBF_SETUP_LEVEL 3
78
79 #define IUCV_DBF_DATA_NAME "iucv_data"
80 #define IUCV_DBF_DATA_LEN 128
81 #define IUCV_DBF_DATA_PAGES 2
82 #define IUCV_DBF_DATA_NR_AREAS 1
83 #define IUCV_DBF_DATA_LEVEL 2
84
85 #define IUCV_DBF_TRACE_NAME "iucv_trace"
86 #define IUCV_DBF_TRACE_LEN 16
87 #define IUCV_DBF_TRACE_PAGES 4
88 #define IUCV_DBF_TRACE_NR_AREAS 1
89 #define IUCV_DBF_TRACE_LEVEL 3
90
91 #define IUCV_DBF_TEXT(name,level,text) \
92         do { \
93                 debug_text_event(iucv_dbf_##name,level,text); \
94         } while (0)
95
96 #define IUCV_DBF_HEX(name,level,addr,len) \
97         do { \
98                 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
99         } while (0)
100
101 DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
102
103 /* Allow to sort out low debug levels early to avoid wasted sprints */
104 static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
105 {
106         return (level <= dbf_grp->level);
107 }
108
109 #define IUCV_DBF_TEXT_(name, level, text...) \
110         do { \
111                 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
112                         char* iucv_dbf_txt_buf = \
113                                         get_cpu_var(iucv_dbf_txt_buf); \
114                         sprintf(iucv_dbf_txt_buf, text); \
115                         debug_text_event(iucv_dbf_##name, level, \
116                                                 iucv_dbf_txt_buf); \
117                         put_cpu_var(iucv_dbf_txt_buf); \
118                 } \
119         } while (0)
120
121 #define IUCV_DBF_SPRINTF(name,level,text...) \
122         do { \
123                 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
124                 debug_sprintf_event(iucv_dbf_trace, level, text ); \
125         } while (0)
126
127 /**
128  * some more debug stuff
129  */
130 #define IUCV_HEXDUMP16(importance,header,ptr) \
131 PRINT_##importance(header "%02x %02x %02x %02x  %02x %02x %02x %02x  " \
132                    "%02x %02x %02x %02x  %02x %02x %02x %02x\n", \
133                    *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
134                    *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
135                    *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
136                    *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
137                    *(((char*)ptr)+12),*(((char*)ptr)+13), \
138                    *(((char*)ptr)+14),*(((char*)ptr)+15)); \
139 PRINT_##importance(header "%02x %02x %02x %02x  %02x %02x %02x %02x  " \
140                    "%02x %02x %02x %02x  %02x %02x %02x %02x\n", \
141                    *(((char*)ptr)+16),*(((char*)ptr)+17), \
142                    *(((char*)ptr)+18),*(((char*)ptr)+19), \
143                    *(((char*)ptr)+20),*(((char*)ptr)+21), \
144                    *(((char*)ptr)+22),*(((char*)ptr)+23), \
145                    *(((char*)ptr)+24),*(((char*)ptr)+25), \
146                    *(((char*)ptr)+26),*(((char*)ptr)+27), \
147                    *(((char*)ptr)+28),*(((char*)ptr)+29), \
148                    *(((char*)ptr)+30),*(((char*)ptr)+31));
149
150 #define PRINTK_HEADER " iucv: "       /* for debugging */
151
152 static struct device_driver netiucv_driver = {
153         .owner = THIS_MODULE,
154         .name = "netiucv",
155         .bus  = &iucv_bus,
156 };
157
158 static int netiucv_callback_connreq(struct iucv_path *,
159                                     u8 ipvmid[8], u8 ipuser[16]);
160 static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
161 static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
162 static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
163 static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
164 static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
165 static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
166
167 static struct iucv_handler netiucv_handler = {
168         .path_pending     = netiucv_callback_connreq,
169         .path_complete    = netiucv_callback_connack,
170         .path_severed     = netiucv_callback_connrej,
171         .path_quiesced    = netiucv_callback_connsusp,
172         .path_resumed     = netiucv_callback_connres,
173         .message_pending  = netiucv_callback_rx,
174         .message_complete = netiucv_callback_txdone
175 };
176
177 /**
178  * Per connection profiling data
179  */
180 struct connection_profile {
181         unsigned long maxmulti;
182         unsigned long maxcqueue;
183         unsigned long doios_single;
184         unsigned long doios_multi;
185         unsigned long txlen;
186         unsigned long tx_time;
187         struct timespec send_stamp;
188         unsigned long tx_pending;
189         unsigned long tx_max_pending;
190 };
191
192 /**
193  * Representation of one iucv connection
194  */
195 struct iucv_connection {
196         struct list_head          list;
197         struct iucv_path          *path;
198         struct sk_buff            *rx_buff;
199         struct sk_buff            *tx_buff;
200         struct sk_buff_head       collect_queue;
201         struct sk_buff_head       commit_queue;
202         spinlock_t                collect_lock;
203         int                       collect_len;
204         int                       max_buffsize;
205         fsm_timer                 timer;
206         fsm_instance              *fsm;
207         struct net_device         *netdev;
208         struct connection_profile prof;
209         char                      userid[9];
210 };
211
212 /**
213  * Linked list of all connection structs.
214  */
215 static LIST_HEAD(iucv_connection_list);
216 static DEFINE_RWLOCK(iucv_connection_rwlock);
217
218 /**
219  * Representation of event-data for the
220  * connection state machine.
221  */
222 struct iucv_event {
223         struct iucv_connection *conn;
224         void                   *data;
225 };
226
227 /**
228  * Private part of the network device structure
229  */
230 struct netiucv_priv {
231         struct net_device_stats stats;
232         unsigned long           tbusy;
233         fsm_instance            *fsm;
234         struct iucv_connection  *conn;
235         struct device           *dev;
236 };
237
238 /**
239  * Link level header for a packet.
240  */
241 struct ll_header {
242         u16 next;
243 };
244
245 #define NETIUCV_HDRLEN           (sizeof(struct ll_header))
246 #define NETIUCV_BUFSIZE_MAX      32768
247 #define NETIUCV_BUFSIZE_DEFAULT  NETIUCV_BUFSIZE_MAX
248 #define NETIUCV_MTU_MAX          (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
249 #define NETIUCV_MTU_DEFAULT      9216
250 #define NETIUCV_QUEUELEN_DEFAULT 50
251 #define NETIUCV_TIMEOUT_5SEC     5000
252
253 /**
254  * Compatibility macros for busy handling
255  * of network devices.
256  */
257 static inline void netiucv_clear_busy(struct net_device *dev)
258 {
259         struct netiucv_priv *priv = netdev_priv(dev);
260         clear_bit(0, &priv->tbusy);
261         netif_wake_queue(dev);
262 }
263
264 static inline int netiucv_test_and_set_busy(struct net_device *dev)
265 {
266         struct netiucv_priv *priv = netdev_priv(dev);
267         netif_stop_queue(dev);
268         return test_and_set_bit(0, &priv->tbusy);
269 }
270
271 static u8 iucvMagic[16] = {
272         0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
273         0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
274 };
275
276 /**
277  * Convert an iucv userId to its printable
278  * form (strip whitespace at end).
279  *
280  * @param An iucv userId
281  *
282  * @returns The printable string (static data!!)
283  */
284 static char *netiucv_printname(char *name)
285 {
286         static char tmp[9];
287         char *p = tmp;
288         memcpy(tmp, name, 8);
289         tmp[8] = '\0';
290         while (*p && (!isspace(*p)))
291                 p++;
292         *p = '\0';
293         return tmp;
294 }
295
296 /**
297  * States of the interface statemachine.
298  */
299 enum dev_states {
300         DEV_STATE_STOPPED,
301         DEV_STATE_STARTWAIT,
302         DEV_STATE_STOPWAIT,
303         DEV_STATE_RUNNING,
304         /**
305          * MUST be always the last element!!
306          */
307         NR_DEV_STATES
308 };
309
310 static const char *dev_state_names[] = {
311         "Stopped",
312         "StartWait",
313         "StopWait",
314         "Running",
315 };
316
317 /**
318  * Events of the interface statemachine.
319  */
320 enum dev_events {
321         DEV_EVENT_START,
322         DEV_EVENT_STOP,
323         DEV_EVENT_CONUP,
324         DEV_EVENT_CONDOWN,
325         /**
326          * MUST be always the last element!!
327          */
328         NR_DEV_EVENTS
329 };
330
331 static const char *dev_event_names[] = {
332         "Start",
333         "Stop",
334         "Connection up",
335         "Connection down",
336 };
337
338 /**
339  * Events of the connection statemachine
340  */
341 enum conn_events {
342         /**
343          * Events, representing callbacks from
344          * lowlevel iucv layer)
345          */
346         CONN_EVENT_CONN_REQ,
347         CONN_EVENT_CONN_ACK,
348         CONN_EVENT_CONN_REJ,
349         CONN_EVENT_CONN_SUS,
350         CONN_EVENT_CONN_RES,
351         CONN_EVENT_RX,
352         CONN_EVENT_TXDONE,
353
354         /**
355          * Events, representing errors return codes from
356          * calls to lowlevel iucv layer
357          */
358
359         /**
360          * Event, representing timer expiry.
361          */
362         CONN_EVENT_TIMER,
363
364         /**
365          * Events, representing commands from upper levels.
366          */
367         CONN_EVENT_START,
368         CONN_EVENT_STOP,
369
370         /**
371          * MUST be always the last element!!
372          */
373         NR_CONN_EVENTS,
374 };
375
376 static const char *conn_event_names[] = {
377         "Remote connection request",
378         "Remote connection acknowledge",
379         "Remote connection reject",
380         "Connection suspended",
381         "Connection resumed",
382         "Data received",
383         "Data sent",
384
385         "Timer",
386
387         "Start",
388         "Stop",
389 };
390
391 /**
392  * States of the connection statemachine.
393  */
394 enum conn_states {
395         /**
396          * Connection not assigned to any device,
397          * initial state, invalid
398          */
399         CONN_STATE_INVALID,
400
401         /**
402          * Userid assigned but not operating
403          */
404         CONN_STATE_STOPPED,
405
406         /**
407          * Connection registered,
408          * no connection request sent yet,
409          * no connection request received
410          */
411         CONN_STATE_STARTWAIT,
412
413         /**
414          * Connection registered and connection request sent,
415          * no acknowledge and no connection request received yet.
416          */
417         CONN_STATE_SETUPWAIT,
418
419         /**
420          * Connection up and running idle
421          */
422         CONN_STATE_IDLE,
423
424         /**
425          * Data sent, awaiting CONN_EVENT_TXDONE
426          */
427         CONN_STATE_TX,
428
429         /**
430          * Error during registration.
431          */
432         CONN_STATE_REGERR,
433
434         /**
435          * Error during registration.
436          */
437         CONN_STATE_CONNERR,
438
439         /**
440          * MUST be always the last element!!
441          */
442         NR_CONN_STATES,
443 };
444
445 static const char *conn_state_names[] = {
446         "Invalid",
447         "Stopped",
448         "StartWait",
449         "SetupWait",
450         "Idle",
451         "TX",
452         "Terminating",
453         "Registration error",
454         "Connect error",
455 };
456
457
458 /**
459  * Debug Facility Stuff
460  */
461 static debug_info_t *iucv_dbf_setup = NULL;
462 static debug_info_t *iucv_dbf_data = NULL;
463 static debug_info_t *iucv_dbf_trace = NULL;
464
465 DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
466
467 static void iucv_unregister_dbf_views(void)
468 {
469         if (iucv_dbf_setup)
470                 debug_unregister(iucv_dbf_setup);
471         if (iucv_dbf_data)
472                 debug_unregister(iucv_dbf_data);
473         if (iucv_dbf_trace)
474                 debug_unregister(iucv_dbf_trace);
475 }
476 static int iucv_register_dbf_views(void)
477 {
478         iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
479                                         IUCV_DBF_SETUP_PAGES,
480                                         IUCV_DBF_SETUP_NR_AREAS,
481                                         IUCV_DBF_SETUP_LEN);
482         iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
483                                        IUCV_DBF_DATA_PAGES,
484                                        IUCV_DBF_DATA_NR_AREAS,
485                                        IUCV_DBF_DATA_LEN);
486         iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
487                                         IUCV_DBF_TRACE_PAGES,
488                                         IUCV_DBF_TRACE_NR_AREAS,
489                                         IUCV_DBF_TRACE_LEN);
490
491         if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
492             (iucv_dbf_trace == NULL)) {
493                 iucv_unregister_dbf_views();
494                 return -ENOMEM;
495         }
496         debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
497         debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
498
499         debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
500         debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
501
502         debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
503         debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
504
505         return 0;
506 }
507
508 /*
509  * Callback-wrappers, called from lowlevel iucv layer.
510  */
511
512 static void netiucv_callback_rx(struct iucv_path *path,
513                                 struct iucv_message *msg)
514 {
515         struct iucv_connection *conn = path->private;
516         struct iucv_event ev;
517
518         ev.conn = conn;
519         ev.data = msg;
520         fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
521 }
522
523 static void netiucv_callback_txdone(struct iucv_path *path,
524                                     struct iucv_message *msg)
525 {
526         struct iucv_connection *conn = path->private;
527         struct iucv_event ev;
528
529         ev.conn = conn;
530         ev.data = msg;
531         fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
532 }
533
534 static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
535 {
536         struct iucv_connection *conn = path->private;
537
538         fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
539 }
540
541 static int netiucv_callback_connreq(struct iucv_path *path,
542                                     u8 ipvmid[8], u8 ipuser[16])
543 {
544         struct iucv_connection *conn = path->private;
545         struct iucv_event ev;
546         int rc;
547
548         if (memcmp(iucvMagic, ipuser, sizeof(ipuser)))
549                 /* ipuser must match iucvMagic. */
550                 return -EINVAL;
551         rc = -EINVAL;
552         read_lock_bh(&iucv_connection_rwlock);
553         list_for_each_entry(conn, &iucv_connection_list, list) {
554                 if (strncmp(ipvmid, conn->userid, 8))
555                         continue;
556                 /* Found a matching connection for this path. */
557                 conn->path = path;
558                 ev.conn = conn;
559                 ev.data = path;
560                 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
561                 rc = 0;
562         }
563         read_unlock_bh(&iucv_connection_rwlock);
564         return rc;
565 }
566
567 static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
568 {
569         struct iucv_connection *conn = path->private;
570
571         fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
572 }
573
574 static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
575 {
576         struct iucv_connection *conn = path->private;
577
578         fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
579 }
580
581 static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
582 {
583         struct iucv_connection *conn = path->private;
584
585         fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
586 }
587
588 /**
589  * NOP action for statemachines
590  */
591 static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
592 {
593 }
594
595 /*
596  * Actions of the connection statemachine
597  */
598
599 /**
600  * netiucv_unpack_skb
601  * @conn: The connection where this skb has been received.
602  * @pskb: The received skb.
603  *
604  * Unpack a just received skb and hand it over to upper layers.
605  * Helper function for conn_action_rx.
606  */
607 static void netiucv_unpack_skb(struct iucv_connection *conn,
608                                struct sk_buff *pskb)
609 {
610         struct net_device     *dev = conn->netdev;
611         struct netiucv_priv   *privptr = netdev_priv(dev);
612         u16 offset = 0;
613
614         skb_put(pskb, NETIUCV_HDRLEN);
615         pskb->dev = dev;
616         pskb->ip_summed = CHECKSUM_NONE;
617         pskb->protocol = ntohs(ETH_P_IP);
618
619         while (1) {
620                 struct sk_buff *skb;
621                 struct ll_header *header = (struct ll_header *) pskb->data;
622
623                 if (!header->next)
624                         break;
625
626                 skb_pull(pskb, NETIUCV_HDRLEN);
627                 header->next -= offset;
628                 offset += header->next;
629                 header->next -= NETIUCV_HDRLEN;
630                 if (skb_tailroom(pskb) < header->next) {
631                         IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
632                                 header->next, skb_tailroom(pskb));
633                         return;
634                 }
635                 skb_put(pskb, header->next);
636                 skb_reset_mac_header(pskb);
637                 skb = dev_alloc_skb(pskb->len);
638                 if (!skb) {
639                         IUCV_DBF_TEXT(data, 2,
640                                 "Out of memory in netiucv_unpack_skb\n");
641                         privptr->stats.rx_dropped++;
642                         return;
643                 }
644                 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
645                                           pskb->len);
646                 skb_reset_mac_header(skb);
647                 skb->dev = pskb->dev;
648                 skb->protocol = pskb->protocol;
649                 pskb->ip_summed = CHECKSUM_UNNECESSARY;
650                 privptr->stats.rx_packets++;
651                 privptr->stats.rx_bytes += skb->len;
652                 /*
653                  * Since receiving is always initiated from a tasklet (in iucv.c),
654                  * we must use netif_rx_ni() instead of netif_rx()
655                  */
656                 netif_rx_ni(skb);
657                 dev->last_rx = jiffies;
658                 skb_pull(pskb, header->next);
659                 skb_put(pskb, NETIUCV_HDRLEN);
660         }
661 }
662
663 static void conn_action_rx(fsm_instance *fi, int event, void *arg)
664 {
665         struct iucv_event *ev = arg;
666         struct iucv_connection *conn = ev->conn;
667         struct iucv_message *msg = ev->data;
668         struct netiucv_priv *privptr = netdev_priv(conn->netdev);
669         int rc;
670
671         IUCV_DBF_TEXT(trace, 4, __func__);
672
673         if (!conn->netdev) {
674                 iucv_message_reject(conn->path, msg);
675                 IUCV_DBF_TEXT(data, 2,
676                               "Received data for unlinked connection\n");
677                 return;
678         }
679         if (msg->length > conn->max_buffsize) {
680                 iucv_message_reject(conn->path, msg);
681                 privptr->stats.rx_dropped++;
682                 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
683                                msg->length, conn->max_buffsize);
684                 return;
685         }
686         conn->rx_buff->data = conn->rx_buff->head;
687         skb_reset_tail_pointer(conn->rx_buff);
688         conn->rx_buff->len = 0;
689         rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
690                                   msg->length, NULL);
691         if (rc || msg->length < 5) {
692                 privptr->stats.rx_errors++;
693                 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
694                 return;
695         }
696         netiucv_unpack_skb(conn, conn->rx_buff);
697 }
698
699 static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
700 {
701         struct iucv_event *ev = arg;
702         struct iucv_connection *conn = ev->conn;
703         struct iucv_message *msg = ev->data;
704         struct iucv_message txmsg;
705         struct netiucv_priv *privptr = NULL;
706         u32 single_flag = msg->tag;
707         u32 txbytes = 0;
708         u32 txpackets = 0;
709         u32 stat_maxcq = 0;
710         struct sk_buff *skb;
711         unsigned long saveflags;
712         struct ll_header header;
713         int rc;
714
715         IUCV_DBF_TEXT(trace, 4, __func__);
716
717         if (conn && conn->netdev)
718                 privptr = netdev_priv(conn->netdev);
719         conn->prof.tx_pending--;
720         if (single_flag) {
721                 if ((skb = skb_dequeue(&conn->commit_queue))) {
722                         atomic_dec(&skb->users);
723                         dev_kfree_skb_any(skb);
724                         if (privptr) {
725                                 privptr->stats.tx_packets++;
726                                 privptr->stats.tx_bytes +=
727                                         (skb->len - NETIUCV_HDRLEN
728                                                   - NETIUCV_HDRLEN);
729                         }
730                 }
731         }
732         conn->tx_buff->data = conn->tx_buff->head;
733         skb_reset_tail_pointer(conn->tx_buff);
734         conn->tx_buff->len = 0;
735         spin_lock_irqsave(&conn->collect_lock, saveflags);
736         while ((skb = skb_dequeue(&conn->collect_queue))) {
737                 header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
738                 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
739                        NETIUCV_HDRLEN);
740                 skb_copy_from_linear_data(skb,
741                                           skb_put(conn->tx_buff, skb->len),
742                                           skb->len);
743                 txbytes += skb->len;
744                 txpackets++;
745                 stat_maxcq++;
746                 atomic_dec(&skb->users);
747                 dev_kfree_skb_any(skb);
748         }
749         if (conn->collect_len > conn->prof.maxmulti)
750                 conn->prof.maxmulti = conn->collect_len;
751         conn->collect_len = 0;
752         spin_unlock_irqrestore(&conn->collect_lock, saveflags);
753         if (conn->tx_buff->len == 0) {
754                 fsm_newstate(fi, CONN_STATE_IDLE);
755                 return;
756         }
757
758         header.next = 0;
759         memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
760         conn->prof.send_stamp = current_kernel_time();
761         txmsg.class = 0;
762         txmsg.tag = 0;
763         rc = iucv_message_send(conn->path, &txmsg, 0, 0,
764                                conn->tx_buff->data, conn->tx_buff->len);
765         conn->prof.doios_multi++;
766         conn->prof.txlen += conn->tx_buff->len;
767         conn->prof.tx_pending++;
768         if (conn->prof.tx_pending > conn->prof.tx_max_pending)
769                 conn->prof.tx_max_pending = conn->prof.tx_pending;
770         if (rc) {
771                 conn->prof.tx_pending--;
772                 fsm_newstate(fi, CONN_STATE_IDLE);
773                 if (privptr)
774                         privptr->stats.tx_errors += txpackets;
775                 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
776         } else {
777                 if (privptr) {
778                         privptr->stats.tx_packets += txpackets;
779                         privptr->stats.tx_bytes += txbytes;
780                 }
781                 if (stat_maxcq > conn->prof.maxcqueue)
782                         conn->prof.maxcqueue = stat_maxcq;
783         }
784 }
785
786 static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
787 {
788         struct iucv_event *ev = arg;
789         struct iucv_connection *conn = ev->conn;
790         struct iucv_path *path = ev->data;
791         struct net_device *netdev = conn->netdev;
792         struct netiucv_priv *privptr = netdev_priv(netdev);
793         int rc;
794
795         IUCV_DBF_TEXT(trace, 3, __func__);
796
797         conn->path = path;
798         path->msglim = NETIUCV_QUEUELEN_DEFAULT;
799         path->flags = 0;
800         rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
801         if (rc) {
802                 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
803                 return;
804         }
805         fsm_newstate(fi, CONN_STATE_IDLE);
806         netdev->tx_queue_len = conn->path->msglim;
807         fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
808 }
809
810 static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
811 {
812         struct iucv_event *ev = arg;
813         struct iucv_path *path = ev->data;
814
815         IUCV_DBF_TEXT(trace, 3, __func__);
816         iucv_path_sever(path, NULL);
817 }
818
819 static void conn_action_connack(fsm_instance *fi, int event, void *arg)
820 {
821         struct iucv_connection *conn = arg;
822         struct net_device *netdev = conn->netdev;
823         struct netiucv_priv *privptr = netdev_priv(netdev);
824
825         IUCV_DBF_TEXT(trace, 3, __func__);
826         fsm_deltimer(&conn->timer);
827         fsm_newstate(fi, CONN_STATE_IDLE);
828         netdev->tx_queue_len = conn->path->msglim;
829         fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
830 }
831
832 static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
833 {
834         struct iucv_connection *conn = arg;
835
836         IUCV_DBF_TEXT(trace, 3, __func__);
837         fsm_deltimer(&conn->timer);
838         iucv_path_sever(conn->path, NULL);
839         fsm_newstate(fi, CONN_STATE_STARTWAIT);
840 }
841
842 static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
843 {
844         struct iucv_connection *conn = arg;
845         struct net_device *netdev = conn->netdev;
846         struct netiucv_priv *privptr = netdev_priv(netdev);
847
848         IUCV_DBF_TEXT(trace, 3, __func__);
849
850         fsm_deltimer(&conn->timer);
851         iucv_path_sever(conn->path, NULL);
852         dev_info(privptr->dev, "The peer interface of the IUCV device"
853                 " has closed the connection\n");
854         IUCV_DBF_TEXT(data, 2,
855                       "conn_action_connsever: Remote dropped connection\n");
856         fsm_newstate(fi, CONN_STATE_STARTWAIT);
857         fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
858 }
859
860 static void conn_action_start(fsm_instance *fi, int event, void *arg)
861 {
862         struct iucv_connection *conn = arg;
863         struct net_device *netdev = conn->netdev;
864         struct netiucv_priv *privptr = netdev_priv(netdev);
865         int rc;
866
867         IUCV_DBF_TEXT(trace, 3, __func__);
868
869         fsm_newstate(fi, CONN_STATE_STARTWAIT);
870         IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n",
871                 netdev->name, conn->userid);
872
873         /*
874          * We must set the state before calling iucv_connect because the
875          * callback handler could be called at any point after the connection
876          * request is sent
877          */
878
879         fsm_newstate(fi, CONN_STATE_SETUPWAIT);
880         conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
881         rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
882                                NULL, iucvMagic, conn);
883         switch (rc) {
884         case 0:
885                 netdev->tx_queue_len = conn->path->msglim;
886                 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
887                              CONN_EVENT_TIMER, conn);
888                 return;
889         case 11:
890                 dev_warn(privptr->dev,
891                         "The IUCV device failed to connect to z/VM guest %s\n",
892                         netiucv_printname(conn->userid));
893                 fsm_newstate(fi, CONN_STATE_STARTWAIT);
894                 break;
895         case 12:
896                 dev_warn(privptr->dev,
897                         "The IUCV device failed to connect to the peer on z/VM"
898                         " guest %s\n", netiucv_printname(conn->userid));
899                 fsm_newstate(fi, CONN_STATE_STARTWAIT);
900                 break;
901         case 13:
902                 dev_err(privptr->dev,
903                         "Connecting the IUCV device would exceed the maximum"
904                         " number of IUCV connections\n");
905                 fsm_newstate(fi, CONN_STATE_CONNERR);
906                 break;
907         case 14:
908                 dev_err(privptr->dev,
909                         "z/VM guest %s has too many IUCV connections"
910                         " to connect with the IUCV device\n",
911                         netiucv_printname(conn->userid));
912                 fsm_newstate(fi, CONN_STATE_CONNERR);
913                 break;
914         case 15:
915                 dev_err(privptr->dev,
916                         "The IUCV device cannot connect to a z/VM guest with no"
917                         " IUCV authorization\n");
918                 fsm_newstate(fi, CONN_STATE_CONNERR);
919                 break;
920         default:
921                 dev_err(privptr->dev,
922                         "Connecting the IUCV device failed with error %d\n",
923                         rc);
924                 fsm_newstate(fi, CONN_STATE_CONNERR);
925                 break;
926         }
927         IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
928         kfree(conn->path);
929         conn->path = NULL;
930 }
931
932 static void netiucv_purge_skb_queue(struct sk_buff_head *q)
933 {
934         struct sk_buff *skb;
935
936         while ((skb = skb_dequeue(q))) {
937                 atomic_dec(&skb->users);
938                 dev_kfree_skb_any(skb);
939         }
940 }
941
942 static void conn_action_stop(fsm_instance *fi, int event, void *arg)
943 {
944         struct iucv_event *ev = arg;
945         struct iucv_connection *conn = ev->conn;
946         struct net_device *netdev = conn->netdev;
947         struct netiucv_priv *privptr = netdev_priv(netdev);
948
949         IUCV_DBF_TEXT(trace, 3, __func__);
950
951         fsm_deltimer(&conn->timer);
952         fsm_newstate(fi, CONN_STATE_STOPPED);
953         netiucv_purge_skb_queue(&conn->collect_queue);
954         if (conn->path) {
955                 IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
956                 iucv_path_sever(conn->path, iucvMagic);
957                 kfree(conn->path);
958                 conn->path = NULL;
959         }
960         netiucv_purge_skb_queue(&conn->commit_queue);
961         fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
962 }
963
964 static void conn_action_inval(fsm_instance *fi, int event, void *arg)
965 {
966         struct iucv_connection *conn = arg;
967         struct net_device *netdev = conn->netdev;
968
969         IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
970                 netdev->name, conn->userid);
971 }
972
973 static const fsm_node conn_fsm[] = {
974         { CONN_STATE_INVALID,   CONN_EVENT_START,    conn_action_inval      },
975         { CONN_STATE_STOPPED,   CONN_EVENT_START,    conn_action_start      },
976
977         { CONN_STATE_STOPPED,   CONN_EVENT_STOP,     conn_action_stop       },
978         { CONN_STATE_STARTWAIT, CONN_EVENT_STOP,     conn_action_stop       },
979         { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP,     conn_action_stop       },
980         { CONN_STATE_IDLE,      CONN_EVENT_STOP,     conn_action_stop       },
981         { CONN_STATE_TX,        CONN_EVENT_STOP,     conn_action_stop       },
982         { CONN_STATE_REGERR,    CONN_EVENT_STOP,     conn_action_stop       },
983         { CONN_STATE_CONNERR,   CONN_EVENT_STOP,     conn_action_stop       },
984
985         { CONN_STATE_STOPPED,   CONN_EVENT_CONN_REQ, conn_action_connreject },
986         { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
987         { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
988         { CONN_STATE_IDLE,      CONN_EVENT_CONN_REQ, conn_action_connreject },
989         { CONN_STATE_TX,        CONN_EVENT_CONN_REQ, conn_action_connreject },
990
991         { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack    },
992         { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER,    conn_action_conntimsev },
993
994         { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever  },
995         { CONN_STATE_IDLE,      CONN_EVENT_CONN_REJ, conn_action_connsever  },
996         { CONN_STATE_TX,        CONN_EVENT_CONN_REJ, conn_action_connsever  },
997
998         { CONN_STATE_IDLE,      CONN_EVENT_RX,       conn_action_rx         },
999         { CONN_STATE_TX,        CONN_EVENT_RX,       conn_action_rx         },
1000
1001         { CONN_STATE_TX,        CONN_EVENT_TXDONE,   conn_action_txdone     },
1002         { CONN_STATE_IDLE,      CONN_EVENT_TXDONE,   conn_action_txdone     },
1003 };
1004
1005 static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
1006
1007
1008 /*
1009  * Actions for interface - statemachine.
1010  */
1011
1012 /**
1013  * dev_action_start
1014  * @fi: An instance of an interface statemachine.
1015  * @event: The event, just happened.
1016  * @arg: Generic pointer, casted from struct net_device * upon call.
1017  *
1018  * Startup connection by sending CONN_EVENT_START to it.
1019  */
1020 static void dev_action_start(fsm_instance *fi, int event, void *arg)
1021 {
1022         struct net_device   *dev = arg;
1023         struct netiucv_priv *privptr = netdev_priv(dev);
1024
1025         IUCV_DBF_TEXT(trace, 3, __func__);
1026
1027         fsm_newstate(fi, DEV_STATE_STARTWAIT);
1028         fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1029 }
1030
1031 /**
1032  * Shutdown connection by sending CONN_EVENT_STOP to it.
1033  *
1034  * @param fi    An instance of an interface statemachine.
1035  * @param event The event, just happened.
1036  * @param arg   Generic pointer, casted from struct net_device * upon call.
1037  */
1038 static void
1039 dev_action_stop(fsm_instance *fi, int event, void *arg)
1040 {
1041         struct net_device   *dev = arg;
1042         struct netiucv_priv *privptr = netdev_priv(dev);
1043         struct iucv_event   ev;
1044
1045         IUCV_DBF_TEXT(trace, 3, __func__);
1046
1047         ev.conn = privptr->conn;
1048
1049         fsm_newstate(fi, DEV_STATE_STOPWAIT);
1050         fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1051 }
1052
1053 /**
1054  * Called from connection statemachine
1055  * when a connection is up and running.
1056  *
1057  * @param fi    An instance of an interface statemachine.
1058  * @param event The event, just happened.
1059  * @param arg   Generic pointer, casted from struct net_device * upon call.
1060  */
1061 static void
1062 dev_action_connup(fsm_instance *fi, int event, void *arg)
1063 {
1064         struct net_device   *dev = arg;
1065         struct netiucv_priv *privptr = netdev_priv(dev);
1066
1067         IUCV_DBF_TEXT(trace, 3, __func__);
1068
1069         switch (fsm_getstate(fi)) {
1070                 case DEV_STATE_STARTWAIT:
1071                         fsm_newstate(fi, DEV_STATE_RUNNING);
1072                         dev_info(privptr->dev,
1073                                 "The IUCV device has been connected"
1074                                 " successfully to %s\n", privptr->conn->userid);
1075                         IUCV_DBF_TEXT(setup, 3,
1076                                 "connection is up and running\n");
1077                         break;
1078                 case DEV_STATE_STOPWAIT:
1079                         IUCV_DBF_TEXT(data, 2,
1080                                 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1081                         break;
1082         }
1083 }
1084
1085 /**
1086  * Called from connection statemachine
1087  * when a connection has been shutdown.
1088  *
1089  * @param fi    An instance of an interface statemachine.
1090  * @param event The event, just happened.
1091  * @param arg   Generic pointer, casted from struct net_device * upon call.
1092  */
1093 static void
1094 dev_action_conndown(fsm_instance *fi, int event, void *arg)
1095 {
1096         IUCV_DBF_TEXT(trace, 3, __func__);
1097
1098         switch (fsm_getstate(fi)) {
1099                 case DEV_STATE_RUNNING:
1100                         fsm_newstate(fi, DEV_STATE_STARTWAIT);
1101                         break;
1102                 case DEV_STATE_STOPWAIT:
1103                         fsm_newstate(fi, DEV_STATE_STOPPED);
1104                         IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1105                         break;
1106         }
1107 }
1108
1109 static const fsm_node dev_fsm[] = {
1110         { DEV_STATE_STOPPED,    DEV_EVENT_START,   dev_action_start    },
1111
1112         { DEV_STATE_STOPWAIT,   DEV_EVENT_START,   dev_action_start    },
1113         { DEV_STATE_STOPWAIT,   DEV_EVENT_CONDOWN, dev_action_conndown },
1114
1115         { DEV_STATE_STARTWAIT,  DEV_EVENT_STOP,    dev_action_stop     },
1116         { DEV_STATE_STARTWAIT,  DEV_EVENT_CONUP,   dev_action_connup   },
1117
1118         { DEV_STATE_RUNNING,    DEV_EVENT_STOP,    dev_action_stop     },
1119         { DEV_STATE_RUNNING,    DEV_EVENT_CONDOWN, dev_action_conndown },
1120         { DEV_STATE_RUNNING,    DEV_EVENT_CONUP,   netiucv_action_nop  },
1121 };
1122
1123 static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1124
1125 /**
1126  * Transmit a packet.
1127  * This is a helper function for netiucv_tx().
1128  *
1129  * @param conn Connection to be used for sending.
1130  * @param skb Pointer to struct sk_buff of packet to send.
1131  *            The linklevel header has already been set up
1132  *            by netiucv_tx().
1133  *
1134  * @return 0 on success, -ERRNO on failure. (Never fails.)
1135  */
1136 static int netiucv_transmit_skb(struct iucv_connection *conn,
1137                                 struct sk_buff *skb)
1138 {
1139         struct iucv_message msg;
1140         unsigned long saveflags;
1141         struct ll_header header;
1142         int rc;
1143
1144         if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1145                 int l = skb->len + NETIUCV_HDRLEN;
1146
1147                 spin_lock_irqsave(&conn->collect_lock, saveflags);
1148                 if (conn->collect_len + l >
1149                     (conn->max_buffsize - NETIUCV_HDRLEN)) {
1150                         rc = -EBUSY;
1151                         IUCV_DBF_TEXT(data, 2,
1152                                       "EBUSY from netiucv_transmit_skb\n");
1153                 } else {
1154                         atomic_inc(&skb->users);
1155                         skb_queue_tail(&conn->collect_queue, skb);
1156                         conn->collect_len += l;
1157                         rc = 0;
1158                 }
1159                 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1160         } else {
1161                 struct sk_buff *nskb = skb;
1162                 /**
1163                  * Copy the skb to a new allocated skb in lowmem only if the
1164                  * data is located above 2G in memory or tailroom is < 2.
1165                  */
1166                 unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1167                                     NETIUCV_HDRLEN)) >> 31;
1168                 int copied = 0;
1169                 if (hi || (skb_tailroom(skb) < 2)) {
1170                         nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1171                                          NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1172                         if (!nskb) {
1173                                 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1174                                 rc = -ENOMEM;
1175                                 return rc;
1176                         } else {
1177                                 skb_reserve(nskb, NETIUCV_HDRLEN);
1178                                 memcpy(skb_put(nskb, skb->len),
1179                                        skb->data, skb->len);
1180                         }
1181                         copied = 1;
1182                 }
1183                 /**
1184                  * skb now is below 2G and has enough room. Add headers.
1185                  */
1186                 header.next = nskb->len + NETIUCV_HDRLEN;
1187                 memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1188                 header.next = 0;
1189                 memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header,  NETIUCV_HDRLEN);
1190
1191                 fsm_newstate(conn->fsm, CONN_STATE_TX);
1192                 conn->prof.send_stamp = current_kernel_time();
1193
1194                 msg.tag = 1;
1195                 msg.class = 0;
1196                 rc = iucv_message_send(conn->path, &msg, 0, 0,
1197                                        nskb->data, nskb->len);
1198                 conn->prof.doios_single++;
1199                 conn->prof.txlen += skb->len;
1200                 conn->prof.tx_pending++;
1201                 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1202                         conn->prof.tx_max_pending = conn->prof.tx_pending;
1203                 if (rc) {
1204                         struct netiucv_priv *privptr;
1205                         fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1206                         conn->prof.tx_pending--;
1207                         privptr = netdev_priv(conn->netdev);
1208                         if (privptr)
1209                                 privptr->stats.tx_errors++;
1210                         if (copied)
1211                                 dev_kfree_skb(nskb);
1212                         else {
1213                                 /**
1214                                  * Remove our headers. They get added
1215                                  * again on retransmit.
1216                                  */
1217                                 skb_pull(skb, NETIUCV_HDRLEN);
1218                                 skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1219                         }
1220                         IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1221                 } else {
1222                         if (copied)
1223                                 dev_kfree_skb(skb);
1224                         atomic_inc(&nskb->users);
1225                         skb_queue_tail(&conn->commit_queue, nskb);
1226                 }
1227         }
1228
1229         return rc;
1230 }
1231
1232 /*
1233  * Interface API for upper network layers
1234  */
1235
1236 /**
1237  * Open an interface.
1238  * Called from generic network layer when ifconfig up is run.
1239  *
1240  * @param dev Pointer to interface struct.
1241  *
1242  * @return 0 on success, -ERRNO on failure. (Never fails.)
1243  */
1244 static int netiucv_open(struct net_device *dev)
1245 {
1246         struct netiucv_priv *priv = netdev_priv(dev);
1247
1248         fsm_event(priv->fsm, DEV_EVENT_START, dev);
1249         return 0;
1250 }
1251
1252 /**
1253  * Close an interface.
1254  * Called from generic network layer when ifconfig down is run.
1255  *
1256  * @param dev Pointer to interface struct.
1257  *
1258  * @return 0 on success, -ERRNO on failure. (Never fails.)
1259  */
1260 static int netiucv_close(struct net_device *dev)
1261 {
1262         struct netiucv_priv *priv = netdev_priv(dev);
1263
1264         fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1265         return 0;
1266 }
1267
1268 /**
1269  * Start transmission of a packet.
1270  * Called from generic network device layer.
1271  *
1272  * @param skb Pointer to buffer containing the packet.
1273  * @param dev Pointer to interface struct.
1274  *
1275  * @return 0 if packet consumed, !0 if packet rejected.
1276  *         Note: If we return !0, then the packet is free'd by
1277  *               the generic network layer.
1278  */
1279 static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1280 {
1281         struct netiucv_priv *privptr = netdev_priv(dev);
1282         int rc;
1283
1284         IUCV_DBF_TEXT(trace, 4, __func__);
1285         /**
1286          * Some sanity checks ...
1287          */
1288         if (skb == NULL) {
1289                 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1290                 privptr->stats.tx_dropped++;
1291                 return 0;
1292         }
1293         if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1294                 IUCV_DBF_TEXT(data, 2,
1295                         "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1296                 dev_kfree_skb(skb);
1297                 privptr->stats.tx_dropped++;
1298                 return 0;
1299         }
1300
1301         /**
1302          * If connection is not running, try to restart it
1303          * and throw away packet.
1304          */
1305         if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1306                 dev_kfree_skb(skb);
1307                 privptr->stats.tx_dropped++;
1308                 privptr->stats.tx_errors++;
1309                 privptr->stats.tx_carrier_errors++;
1310                 return 0;
1311         }
1312
1313         if (netiucv_test_and_set_busy(dev)) {
1314                 IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1315                 return -EBUSY;
1316         }
1317         dev->trans_start = jiffies;
1318         rc = netiucv_transmit_skb(privptr->conn, skb) != 0;
1319         netiucv_clear_busy(dev);
1320         return rc;
1321 }
1322
1323 /**
1324  * netiucv_stats
1325  * @dev: Pointer to interface struct.
1326  *
1327  * Returns interface statistics of a device.
1328  *
1329  * Returns pointer to stats struct of this interface.
1330  */
1331 static struct net_device_stats *netiucv_stats (struct net_device * dev)
1332 {
1333         struct netiucv_priv *priv = netdev_priv(dev);
1334
1335         IUCV_DBF_TEXT(trace, 5, __func__);
1336         return &priv->stats;
1337 }
1338
1339 /**
1340  * netiucv_change_mtu
1341  * @dev: Pointer to interface struct.
1342  * @new_mtu: The new MTU to use for this interface.
1343  *
1344  * Sets MTU of an interface.
1345  *
1346  * Returns 0 on success, -EINVAL if MTU is out of valid range.
1347  *         (valid range is 576 .. NETIUCV_MTU_MAX).
1348  */
1349 static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1350 {
1351         IUCV_DBF_TEXT(trace, 3, __func__);
1352         if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
1353                 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1354                 return -EINVAL;
1355         }
1356         dev->mtu = new_mtu;
1357         return 0;
1358 }
1359
1360 /*
1361  * attributes in sysfs
1362  */
1363
1364 static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1365                          char *buf)
1366 {
1367         struct netiucv_priv *priv = dev->driver_data;
1368
1369         IUCV_DBF_TEXT(trace, 5, __func__);
1370         return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
1371 }
1372
1373 static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1374                           const char *buf, size_t count)
1375 {
1376         struct netiucv_priv *priv = dev->driver_data;
1377         struct net_device *ndev = priv->conn->netdev;
1378         char    *p;
1379         char    *tmp;
1380         char    username[9];
1381         int     i;
1382         struct iucv_connection *cp;
1383
1384         IUCV_DBF_TEXT(trace, 3, __func__);
1385         if (count > 9) {
1386                 IUCV_DBF_TEXT_(setup, 2,
1387                                "%d is length of username\n", (int) count);
1388                 return -EINVAL;
1389         }
1390
1391         tmp = strsep((char **) &buf, "\n");
1392         for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
1393                 if (isalnum(*p) || (*p == '$')) {
1394                         username[i]= toupper(*p);
1395                         continue;
1396                 }
1397                 if (*p == '\n') {
1398                         /* trailing lf, grr */
1399                         break;
1400                 }
1401                 IUCV_DBF_TEXT_(setup, 2,
1402                                "username: invalid character %c\n", *p);
1403                 return -EINVAL;
1404         }
1405         while (i < 8)
1406                 username[i++] = ' ';
1407         username[8] = '\0';
1408
1409         if (memcmp(username, priv->conn->userid, 9) &&
1410             (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1411                 /* username changed while the interface is active. */
1412                 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1413                 return -EPERM;
1414         }
1415         read_lock_bh(&iucv_connection_rwlock);
1416         list_for_each_entry(cp, &iucv_connection_list, list) {
1417                 if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
1418                         read_unlock_bh(&iucv_connection_rwlock);
1419                         IUCV_DBF_TEXT_(setup, 2, "user_write: Connection "
1420                                 "to %s already exists\n", username);
1421                         return -EEXIST;
1422                 }
1423         }
1424         read_unlock_bh(&iucv_connection_rwlock);
1425         memcpy(priv->conn->userid, username, 9);
1426         return count;
1427 }
1428
1429 static DEVICE_ATTR(user, 0644, user_show, user_write);
1430
1431 static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1432                             char *buf)
1433 {       struct netiucv_priv *priv = dev->driver_data;
1434
1435         IUCV_DBF_TEXT(trace, 5, __func__);
1436         return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1437 }
1438
1439 static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1440                              const char *buf, size_t count)
1441 {
1442         struct netiucv_priv *priv = dev->driver_data;
1443         struct net_device *ndev = priv->conn->netdev;
1444         char         *e;
1445         int          bs1;
1446
1447         IUCV_DBF_TEXT(trace, 3, __func__);
1448         if (count >= 39)
1449                 return -EINVAL;
1450
1451         bs1 = simple_strtoul(buf, &e, 0);
1452
1453         if (e && (!isspace(*e))) {
1454                 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
1455                 return -EINVAL;
1456         }
1457         if (bs1 > NETIUCV_BUFSIZE_MAX) {
1458                 IUCV_DBF_TEXT_(setup, 2,
1459                         "buffer_write: buffer size %d too large\n",
1460                         bs1);
1461                 return -EINVAL;
1462         }
1463         if ((ndev->flags & IFF_RUNNING) &&
1464             (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1465                 IUCV_DBF_TEXT_(setup, 2,
1466                         "buffer_write: buffer size %d too small\n",
1467                         bs1);
1468                 return -EINVAL;
1469         }
1470         if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1471                 IUCV_DBF_TEXT_(setup, 2,
1472                         "buffer_write: buffer size %d too small\n",
1473                         bs1);
1474                 return -EINVAL;
1475         }
1476
1477         priv->conn->max_buffsize = bs1;
1478         if (!(ndev->flags & IFF_RUNNING))
1479                 ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1480
1481         return count;
1482
1483 }
1484
1485 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1486
1487 static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1488                              char *buf)
1489 {
1490         struct netiucv_priv *priv = dev->driver_data;
1491
1492         IUCV_DBF_TEXT(trace, 5, __func__);
1493         return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1494 }
1495
1496 static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1497
1498 static ssize_t conn_fsm_show (struct device *dev,
1499                               struct device_attribute *attr, char *buf)
1500 {
1501         struct netiucv_priv *priv = dev->driver_data;
1502
1503         IUCV_DBF_TEXT(trace, 5, __func__);
1504         return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1505 }
1506
1507 static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1508
1509 static ssize_t maxmulti_show (struct device *dev,
1510                               struct device_attribute *attr, char *buf)
1511 {
1512         struct netiucv_priv *priv = dev->driver_data;
1513
1514         IUCV_DBF_TEXT(trace, 5, __func__);
1515         return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1516 }
1517
1518 static ssize_t maxmulti_write (struct device *dev,
1519                                struct device_attribute *attr,
1520                                const char *buf, size_t count)
1521 {
1522         struct netiucv_priv *priv = dev->driver_data;
1523
1524         IUCV_DBF_TEXT(trace, 4, __func__);
1525         priv->conn->prof.maxmulti = 0;
1526         return count;
1527 }
1528
1529 static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1530
1531 static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1532                            char *buf)
1533 {
1534         struct netiucv_priv *priv = dev->driver_data;
1535
1536         IUCV_DBF_TEXT(trace, 5, __func__);
1537         return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1538 }
1539
1540 static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1541                             const char *buf, size_t count)
1542 {
1543         struct netiucv_priv *priv = dev->driver_data;
1544
1545         IUCV_DBF_TEXT(trace, 4, __func__);
1546         priv->conn->prof.maxcqueue = 0;
1547         return count;
1548 }
1549
1550 static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1551
1552 static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1553                            char *buf)
1554 {
1555         struct netiucv_priv *priv = dev->driver_data;
1556
1557         IUCV_DBF_TEXT(trace, 5, __func__);
1558         return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1559 }
1560
1561 static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1562                             const char *buf, size_t count)
1563 {
1564         struct netiucv_priv *priv = dev->driver_data;
1565
1566         IUCV_DBF_TEXT(trace, 4, __func__);
1567         priv->conn->prof.doios_single = 0;
1568         return count;
1569 }
1570
1571 static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1572
1573 static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1574                            char *buf)
1575 {
1576         struct netiucv_priv *priv = dev->driver_data;
1577
1578         IUCV_DBF_TEXT(trace, 5, __func__);
1579         return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1580 }
1581
1582 static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1583                             const char *buf, size_t count)
1584 {
1585         struct netiucv_priv *priv = dev->driver_data;
1586
1587         IUCV_DBF_TEXT(trace, 5, __func__);
1588         priv->conn->prof.doios_multi = 0;
1589         return count;
1590 }
1591
1592 static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1593
1594 static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1595                            char *buf)
1596 {
1597         struct netiucv_priv *priv = dev->driver_data;
1598
1599         IUCV_DBF_TEXT(trace, 5, __func__);
1600         return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1601 }
1602
1603 static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1604                             const char *buf, size_t count)
1605 {
1606         struct netiucv_priv *priv = dev->driver_data;
1607
1608         IUCV_DBF_TEXT(trace, 4, __func__);
1609         priv->conn->prof.txlen = 0;
1610         return count;
1611 }
1612
1613 static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1614
1615 static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1616                             char *buf)
1617 {
1618         struct netiucv_priv *priv = dev->driver_data;
1619
1620         IUCV_DBF_TEXT(trace, 5, __func__);
1621         return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1622 }
1623
1624 static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1625                              const char *buf, size_t count)
1626 {
1627         struct netiucv_priv *priv = dev->driver_data;
1628
1629         IUCV_DBF_TEXT(trace, 4, __func__);
1630         priv->conn->prof.tx_time = 0;
1631         return count;
1632 }
1633
1634 static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1635
1636 static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1637                             char *buf)
1638 {
1639         struct netiucv_priv *priv = dev->driver_data;
1640
1641         IUCV_DBF_TEXT(trace, 5, __func__);
1642         return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1643 }
1644
1645 static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1646                              const char *buf, size_t count)
1647 {
1648         struct netiucv_priv *priv = dev->driver_data;
1649
1650         IUCV_DBF_TEXT(trace, 4, __func__);
1651         priv->conn->prof.tx_pending = 0;
1652         return count;
1653 }
1654
1655 static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1656
1657 static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1658                             char *buf)
1659 {
1660         struct netiucv_priv *priv = dev->driver_data;
1661
1662         IUCV_DBF_TEXT(trace, 5, __func__);
1663         return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1664 }
1665
1666 static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1667                              const char *buf, size_t count)
1668 {
1669         struct netiucv_priv *priv = dev->driver_data;
1670
1671         IUCV_DBF_TEXT(trace, 4, __func__);
1672         priv->conn->prof.tx_max_pending = 0;
1673         return count;
1674 }
1675
1676 static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1677
1678 static struct attribute *netiucv_attrs[] = {
1679         &dev_attr_buffer.attr,
1680         &dev_attr_user.attr,
1681         NULL,
1682 };
1683
1684 static struct attribute_group netiucv_attr_group = {
1685         .attrs = netiucv_attrs,
1686 };
1687
1688 static struct attribute *netiucv_stat_attrs[] = {
1689         &dev_attr_device_fsm_state.attr,
1690         &dev_attr_connection_fsm_state.attr,
1691         &dev_attr_max_tx_buffer_used.attr,
1692         &dev_attr_max_chained_skbs.attr,
1693         &dev_attr_tx_single_write_ops.attr,
1694         &dev_attr_tx_multi_write_ops.attr,
1695         &dev_attr_netto_bytes.attr,
1696         &dev_attr_max_tx_io_time.attr,
1697         &dev_attr_tx_pending.attr,
1698         &dev_attr_tx_max_pending.attr,
1699         NULL,
1700 };
1701
1702 static struct attribute_group netiucv_stat_attr_group = {
1703         .name  = "stats",
1704         .attrs = netiucv_stat_attrs,
1705 };
1706
1707 static int netiucv_add_files(struct device *dev)
1708 {
1709         int ret;
1710
1711         IUCV_DBF_TEXT(trace, 3, __func__);
1712         ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
1713         if (ret)
1714                 return ret;
1715         ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group);
1716         if (ret)
1717                 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1718         return ret;
1719 }
1720
1721 static void netiucv_remove_files(struct device *dev)
1722 {
1723         IUCV_DBF_TEXT(trace, 3, __func__);
1724         sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
1725         sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1726 }
1727
1728 static int netiucv_register_device(struct net_device *ndev)
1729 {
1730         struct netiucv_priv *priv = netdev_priv(ndev);
1731         struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1732         int ret;
1733
1734
1735         IUCV_DBF_TEXT(trace, 3, __func__);
1736
1737         if (dev) {
1738                 dev_set_name(dev, "net%s", ndev->name);
1739                 dev->bus = &iucv_bus;
1740                 dev->parent = iucv_root;
1741                 /*
1742                  * The release function could be called after the
1743                  * module has been unloaded. It's _only_ task is to
1744                  * free the struct. Therefore, we specify kfree()
1745                  * directly here. (Probably a little bit obfuscating
1746                  * but legitime ...).
1747                  */
1748                 dev->release = (void (*)(struct device *))kfree;
1749                 dev->driver = &netiucv_driver;
1750         } else
1751                 return -ENOMEM;
1752
1753         ret = device_register(dev);
1754
1755         if (ret)
1756                 return ret;
1757         ret = netiucv_add_files(dev);
1758         if (ret)
1759                 goto out_unreg;
1760         priv->dev = dev;
1761         dev->driver_data = priv;
1762         return 0;
1763
1764 out_unreg:
1765         device_unregister(dev);
1766         return ret;
1767 }
1768
1769 static void netiucv_unregister_device(struct device *dev)
1770 {
1771         IUCV_DBF_TEXT(trace, 3, __func__);
1772         netiucv_remove_files(dev);
1773         device_unregister(dev);
1774 }
1775
1776 /**
1777  * Allocate and initialize a new connection structure.
1778  * Add it to the list of netiucv connections;
1779  */
1780 static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1781                                                       char *username)
1782 {
1783         struct iucv_connection *conn;
1784
1785         conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1786         if (!conn)
1787                 goto out;
1788         skb_queue_head_init(&conn->collect_queue);
1789         skb_queue_head_init(&conn->commit_queue);
1790         spin_lock_init(&conn->collect_lock);
1791         conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1792         conn->netdev = dev;
1793
1794         conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1795         if (!conn->rx_buff)
1796                 goto out_conn;
1797         conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1798         if (!conn->tx_buff)
1799                 goto out_rx;
1800         conn->fsm = init_fsm("netiucvconn", conn_state_names,
1801                              conn_event_names, NR_CONN_STATES,
1802                              NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1803                              GFP_KERNEL);
1804         if (!conn->fsm)
1805                 goto out_tx;
1806
1807         fsm_settimer(conn->fsm, &conn->timer);
1808         fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1809
1810         if (username) {
1811                 memcpy(conn->userid, username, 9);
1812                 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1813         }
1814
1815         write_lock_bh(&iucv_connection_rwlock);
1816         list_add_tail(&conn->list, &iucv_connection_list);
1817         write_unlock_bh(&iucv_connection_rwlock);
1818         return conn;
1819
1820 out_tx:
1821         kfree_skb(conn->tx_buff);
1822 out_rx:
1823         kfree_skb(conn->rx_buff);
1824 out_conn:
1825         kfree(conn);
1826 out:
1827         return NULL;
1828 }
1829
1830 /**
1831  * Release a connection structure and remove it from the
1832  * list of netiucv connections.
1833  */
1834 static void netiucv_remove_connection(struct iucv_connection *conn)
1835 {
1836         IUCV_DBF_TEXT(trace, 3, __func__);
1837         write_lock_bh(&iucv_connection_rwlock);
1838         list_del_init(&conn->list);
1839         write_unlock_bh(&iucv_connection_rwlock);
1840         fsm_deltimer(&conn->timer);
1841         netiucv_purge_skb_queue(&conn->collect_queue);
1842         if (conn->path) {
1843                 iucv_path_sever(conn->path, iucvMagic);
1844                 kfree(conn->path);
1845                 conn->path = NULL;
1846         }
1847         netiucv_purge_skb_queue(&conn->commit_queue);
1848         kfree_fsm(conn->fsm);
1849         kfree_skb(conn->rx_buff);
1850         kfree_skb(conn->tx_buff);
1851 }
1852
1853 /**
1854  * Release everything of a net device.
1855  */
1856 static void netiucv_free_netdevice(struct net_device *dev)
1857 {
1858         struct netiucv_priv *privptr = netdev_priv(dev);
1859
1860         IUCV_DBF_TEXT(trace, 3, __func__);
1861
1862         if (!dev)
1863                 return;
1864
1865         if (privptr) {
1866                 if (privptr->conn)
1867                         netiucv_remove_connection(privptr->conn);
1868                 if (privptr->fsm)
1869                         kfree_fsm(privptr->fsm);
1870                 privptr->conn = NULL; privptr->fsm = NULL;
1871                 /* privptr gets freed by free_netdev() */
1872         }
1873         free_netdev(dev);
1874 }
1875
1876 /**
1877  * Initialize a net device. (Called from kernel in alloc_netdev())
1878  */
1879 static void netiucv_setup_netdevice(struct net_device *dev)
1880 {
1881         dev->mtu                 = NETIUCV_MTU_DEFAULT;
1882         dev->hard_start_xmit     = netiucv_tx;
1883         dev->open                = netiucv_open;
1884         dev->stop                = netiucv_close;
1885         dev->get_stats           = netiucv_stats;
1886         dev->change_mtu          = netiucv_change_mtu;
1887         dev->destructor          = netiucv_free_netdevice;
1888         dev->hard_header_len     = NETIUCV_HDRLEN;
1889         dev->addr_len            = 0;
1890         dev->type                = ARPHRD_SLIP;
1891         dev->tx_queue_len        = NETIUCV_QUEUELEN_DEFAULT;
1892         dev->flags               = IFF_POINTOPOINT | IFF_NOARP;
1893 }
1894
1895 /**
1896  * Allocate and initialize everything of a net device.
1897  */
1898 static struct net_device *netiucv_init_netdevice(char *username)
1899 {
1900         struct netiucv_priv *privptr;
1901         struct net_device *dev;
1902
1903         dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
1904                            netiucv_setup_netdevice);
1905         if (!dev)
1906                 return NULL;
1907         if (dev_alloc_name(dev, dev->name) < 0)
1908                 goto out_netdev;
1909
1910         privptr = netdev_priv(dev);
1911         privptr->fsm = init_fsm("netiucvdev", dev_state_names,
1912                                 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
1913                                 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
1914         if (!privptr->fsm)
1915                 goto out_netdev;
1916
1917         privptr->conn = netiucv_new_connection(dev, username);
1918         if (!privptr->conn) {
1919                 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
1920                 goto out_fsm;
1921         }
1922         fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1923         return dev;
1924
1925 out_fsm:
1926         kfree_fsm(privptr->fsm);
1927 out_netdev:
1928         free_netdev(dev);
1929         return NULL;
1930 }
1931
1932 static ssize_t conn_write(struct device_driver *drv,
1933                           const char *buf, size_t count)
1934 {
1935         const char *p;
1936         char username[9];
1937         int i, rc;
1938         struct net_device *dev;
1939         struct netiucv_priv *priv;
1940         struct iucv_connection *cp;
1941
1942         IUCV_DBF_TEXT(trace, 3, __func__);
1943         if (count>9) {
1944                 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1945                 return -EINVAL;
1946         }
1947
1948         for (i = 0, p = buf; i < 8 && *p; i++, p++) {
1949                 if (isalnum(*p) || *p == '$') {
1950                         username[i] = toupper(*p);
1951                         continue;
1952                 }
1953                 if (*p == '\n')
1954                         /* trailing lf, grr */
1955                         break;
1956                 IUCV_DBF_TEXT_(setup, 2,
1957                                "conn_write: invalid character %c\n", *p);
1958                 return -EINVAL;
1959         }
1960         while (i < 8)
1961                 username[i++] = ' ';
1962         username[8] = '\0';
1963
1964         read_lock_bh(&iucv_connection_rwlock);
1965         list_for_each_entry(cp, &iucv_connection_list, list) {
1966                 if (!strncmp(username, cp->userid, 9)) {
1967                         read_unlock_bh(&iucv_connection_rwlock);
1968                         IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection "
1969                                 "to %s already exists\n", username);
1970                         return -EEXIST;
1971                 }
1972         }
1973         read_unlock_bh(&iucv_connection_rwlock);
1974
1975         dev = netiucv_init_netdevice(username);
1976         if (!dev) {
1977                 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
1978                 return -ENODEV;
1979         }
1980
1981         rc = netiucv_register_device(dev);
1982         if (rc) {
1983                 IUCV_DBF_TEXT_(setup, 2,
1984                         "ret %d from netiucv_register_device\n", rc);
1985                 goto out_free_ndev;
1986         }
1987
1988         /* sysfs magic */
1989         priv = netdev_priv(dev);
1990         SET_NETDEV_DEV(dev, priv->dev);
1991
1992         rc = register_netdev(dev);
1993         if (rc)
1994                 goto out_unreg;
1995
1996         dev_info(priv->dev, "The IUCV interface to %s has been"
1997                 " established successfully\n", netiucv_printname(username));
1998
1999         return count;
2000
2001 out_unreg:
2002         netiucv_unregister_device(priv->dev);
2003 out_free_ndev:
2004         netiucv_free_netdevice(dev);
2005         return rc;
2006 }
2007
2008 static DRIVER_ATTR(connection, 0200, NULL, conn_write);
2009
2010 static ssize_t remove_write (struct device_driver *drv,
2011                              const char *buf, size_t count)
2012 {
2013         struct iucv_connection *cp;
2014         struct net_device *ndev;
2015         struct netiucv_priv *priv;
2016         struct device *dev;
2017         char name[IFNAMSIZ];
2018         const char *p;
2019         int i;
2020
2021         IUCV_DBF_TEXT(trace, 3, __func__);
2022
2023         if (count >= IFNAMSIZ)
2024                 count = IFNAMSIZ - 1;;
2025
2026         for (i = 0, p = buf; i < count && *p; i++, p++) {
2027                 if (*p == '\n' || *p == ' ')
2028                         /* trailing lf, grr */
2029                         break;
2030                 name[i] = *p;
2031         }
2032         name[i] = '\0';
2033
2034         read_lock_bh(&iucv_connection_rwlock);
2035         list_for_each_entry(cp, &iucv_connection_list, list) {
2036                 ndev = cp->netdev;
2037                 priv = netdev_priv(ndev);
2038                 dev = priv->dev;
2039                 if (strncmp(name, ndev->name, count))
2040                         continue;
2041                 read_unlock_bh(&iucv_connection_rwlock);
2042                 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2043                         dev_warn(dev, "The IUCV device is connected"
2044                                 " to %s and cannot be removed\n",
2045                                 priv->conn->userid);
2046                         IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2047                         return -EPERM;
2048                 }
2049                 unregister_netdev(ndev);
2050                 netiucv_unregister_device(dev);
2051                 return count;
2052         }
2053         read_unlock_bh(&iucv_connection_rwlock);
2054         IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2055         return -EINVAL;
2056 }
2057
2058 static DRIVER_ATTR(remove, 0200, NULL, remove_write);
2059
2060 static struct attribute * netiucv_drv_attrs[] = {
2061         &driver_attr_connection.attr,
2062         &driver_attr_remove.attr,
2063         NULL,
2064 };
2065
2066 static struct attribute_group netiucv_drv_attr_group = {
2067         .attrs = netiucv_drv_attrs,
2068 };
2069
2070 static struct attribute_group *netiucv_drv_attr_groups[] = {
2071         &netiucv_drv_attr_group,
2072         NULL,
2073 };
2074
2075 static void netiucv_banner(void)
2076 {
2077         pr_info("driver initialized\n");
2078 }
2079
2080 static void __exit netiucv_exit(void)
2081 {
2082         struct iucv_connection *cp;
2083         struct net_device *ndev;
2084         struct netiucv_priv *priv;
2085         struct device *dev;
2086
2087         IUCV_DBF_TEXT(trace, 3, __func__);
2088         while (!list_empty(&iucv_connection_list)) {
2089                 cp = list_entry(iucv_connection_list.next,
2090                                 struct iucv_connection, list);
2091                 ndev = cp->netdev;
2092                 priv = netdev_priv(ndev);
2093                 dev = priv->dev;
2094
2095                 unregister_netdev(ndev);
2096                 netiucv_unregister_device(dev);
2097         }
2098
2099         driver_unregister(&netiucv_driver);
2100         iucv_unregister(&netiucv_handler, 1);
2101         iucv_unregister_dbf_views();
2102
2103         pr_info("driver unloaded\n");
2104         return;
2105 }
2106
2107 static int __init netiucv_init(void)
2108 {
2109         int rc;
2110
2111         rc = iucv_register_dbf_views();
2112         if (rc)
2113                 goto out;
2114         rc = iucv_register(&netiucv_handler, 1);
2115         if (rc)
2116                 goto out_dbf;
2117         IUCV_DBF_TEXT(trace, 3, __func__);
2118         netiucv_driver.groups = netiucv_drv_attr_groups;
2119         rc = driver_register(&netiucv_driver);
2120         if (rc) {
2121                 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2122                 goto out_iucv;
2123         }
2124
2125         netiucv_banner();
2126         return rc;
2127
2128 out_iucv:
2129         iucv_unregister(&netiucv_handler, 1);
2130 out_dbf:
2131         iucv_unregister_dbf_views();
2132 out:
2133         return rc;
2134 }
2135
2136 module_init(netiucv_init);
2137 module_exit(netiucv_exit);
2138 MODULE_LICENSE("GPL");