Merge git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
[linux-2.6] / net / iucv / af_iucv.c
1 /*
2  *  linux/net/iucv/af_iucv.c
3  *
4  *  IUCV protocol stack for Linux on zSeries
5  *
6  *  Copyright 2006 IBM Corporation
7  *
8  *  Author(s):  Jennifer Hunt <jenhunt@us.ibm.com>
9  */
10
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/list.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/poll.h>
21 #include <net/sock.h>
22 #include <asm/ebcdic.h>
23 #include <asm/cpcmd.h>
24 #include <linux/kmod.h>
25
26 #include <net/iucv/iucv.h>
27 #include <net/iucv/af_iucv.h>
28
29 #define CONFIG_IUCV_SOCK_DEBUG 1
30
31 #define IPRMDATA 0x80
32 #define VERSION "1.0"
33
34 static char iucv_userid[80];
35
36 static struct proto_ops iucv_sock_ops;
37
38 static struct proto iucv_proto = {
39         .name           = "AF_IUCV",
40         .owner          = THIS_MODULE,
41         .obj_size       = sizeof(struct iucv_sock),
42 };
43
44 /* Call Back functions */
45 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
46 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
47 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
48 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
49                                  u8 ipuser[16]);
50 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
51
52 static struct iucv_sock_list iucv_sk_list = {
53         .lock = RW_LOCK_UNLOCKED,
54         .autobind_name = ATOMIC_INIT(0)
55 };
56
57 static struct iucv_handler af_iucv_handler = {
58         .path_pending     = iucv_callback_connreq,
59         .path_complete    = iucv_callback_connack,
60         .path_severed     = iucv_callback_connrej,
61         .message_pending  = iucv_callback_rx,
62         .message_complete = iucv_callback_txdone
63 };
64
65 static inline void high_nmcpy(unsigned char *dst, char *src)
66 {
67        memcpy(dst, src, 8);
68 }
69
70 static inline void low_nmcpy(unsigned char *dst, char *src)
71 {
72        memcpy(&dst[8], src, 8);
73 }
74
75 /* Timers */
76 static void iucv_sock_timeout(unsigned long arg)
77 {
78         struct sock *sk = (struct sock *)arg;
79
80         bh_lock_sock(sk);
81         sk->sk_err = ETIMEDOUT;
82         sk->sk_state_change(sk);
83         bh_unlock_sock(sk);
84
85         iucv_sock_kill(sk);
86         sock_put(sk);
87 }
88
89 static void iucv_sock_clear_timer(struct sock *sk)
90 {
91         sk_stop_timer(sk, &sk->sk_timer);
92 }
93
94 static void iucv_sock_init_timer(struct sock *sk)
95 {
96         init_timer(&sk->sk_timer);
97         sk->sk_timer.function = iucv_sock_timeout;
98         sk->sk_timer.data = (unsigned long)sk;
99 }
100
101 static struct sock *__iucv_get_sock_by_name(char *nm)
102 {
103         struct sock *sk;
104         struct hlist_node *node;
105
106         sk_for_each(sk, node, &iucv_sk_list.head)
107                 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
108                         return sk;
109
110         return NULL;
111 }
112
113 static void iucv_sock_destruct(struct sock *sk)
114 {
115         skb_queue_purge(&sk->sk_receive_queue);
116         skb_queue_purge(&sk->sk_write_queue);
117 }
118
119 /* Cleanup Listen */
120 static void iucv_sock_cleanup_listen(struct sock *parent)
121 {
122         struct sock *sk;
123
124         /* Close non-accepted connections */
125         while ((sk = iucv_accept_dequeue(parent, NULL))) {
126                 iucv_sock_close(sk);
127                 iucv_sock_kill(sk);
128         }
129
130         parent->sk_state = IUCV_CLOSED;
131         sock_set_flag(parent, SOCK_ZAPPED);
132 }
133
134 /* Kill socket */
135 static void iucv_sock_kill(struct sock *sk)
136 {
137         if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
138                 return;
139
140         iucv_sock_unlink(&iucv_sk_list, sk);
141         sock_set_flag(sk, SOCK_DEAD);
142         sock_put(sk);
143 }
144
145 /* Close an IUCV socket */
146 static void iucv_sock_close(struct sock *sk)
147 {
148         unsigned char user_data[16];
149         struct iucv_sock *iucv = iucv_sk(sk);
150         int err;
151         unsigned long timeo;
152
153         iucv_sock_clear_timer(sk);
154         lock_sock(sk);
155
156         switch (sk->sk_state) {
157         case IUCV_LISTEN:
158                 iucv_sock_cleanup_listen(sk);
159                 break;
160
161         case IUCV_CONNECTED:
162         case IUCV_DISCONN:
163                 err = 0;
164
165                 sk->sk_state = IUCV_CLOSING;
166                 sk->sk_state_change(sk);
167
168                 if (!skb_queue_empty(&iucv->send_skb_q)) {
169                         if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
170                                 timeo = sk->sk_lingertime;
171                         else
172                                 timeo = IUCV_DISCONN_TIMEOUT;
173                         err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
174                 }
175
176                 sk->sk_state = IUCV_CLOSED;
177                 sk->sk_state_change(sk);
178
179                 if (iucv->path) {
180                         low_nmcpy(user_data, iucv->src_name);
181                         high_nmcpy(user_data, iucv->dst_name);
182                         ASCEBC(user_data, sizeof(user_data));
183                         err = iucv_path_sever(iucv->path, user_data);
184                         iucv_path_free(iucv->path);
185                         iucv->path = NULL;
186                 }
187
188                 sk->sk_err = ECONNRESET;
189                 sk->sk_state_change(sk);
190
191                 skb_queue_purge(&iucv->send_skb_q);
192                 skb_queue_purge(&iucv->backlog_skb_q);
193
194                 sock_set_flag(sk, SOCK_ZAPPED);
195                 break;
196
197         default:
198                 sock_set_flag(sk, SOCK_ZAPPED);
199                 break;
200         }
201
202         release_sock(sk);
203         iucv_sock_kill(sk);
204 }
205
206 static void iucv_sock_init(struct sock *sk, struct sock *parent)
207 {
208         if (parent)
209                 sk->sk_type = parent->sk_type;
210 }
211
212 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
213 {
214         struct sock *sk;
215
216         sk = sk_alloc(PF_IUCV, prio, &iucv_proto, 1);
217         if (!sk)
218                 return NULL;
219
220         sock_init_data(sock, sk);
221         INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
222         skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
223         skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
224         iucv_sk(sk)->send_tag = 0;
225
226         sk->sk_destruct = iucv_sock_destruct;
227         sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
228         sk->sk_allocation = GFP_DMA;
229
230         sock_reset_flag(sk, SOCK_ZAPPED);
231
232         sk->sk_protocol = proto;
233         sk->sk_state    = IUCV_OPEN;
234
235         iucv_sock_init_timer(sk);
236
237         iucv_sock_link(&iucv_sk_list, sk);
238         return sk;
239 }
240
241 /* Create an IUCV socket */
242 static int iucv_sock_create(struct socket *sock, int protocol)
243 {
244         struct sock *sk;
245
246         if (sock->type != SOCK_STREAM)
247                 return -ESOCKTNOSUPPORT;
248
249         sock->state = SS_UNCONNECTED;
250         sock->ops = &iucv_sock_ops;
251
252         sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
253         if (!sk)
254                 return -ENOMEM;
255
256         iucv_sock_init(sk, NULL);
257
258         return 0;
259 }
260
261 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
262 {
263         write_lock_bh(&l->lock);
264         sk_add_node(sk, &l->head);
265         write_unlock_bh(&l->lock);
266 }
267
268 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
269 {
270         write_lock_bh(&l->lock);
271         sk_del_node_init(sk);
272         write_unlock_bh(&l->lock);
273 }
274
275 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
276 {
277         sock_hold(sk);
278         list_add_tail(&iucv_sk(sk)->accept_q, &iucv_sk(parent)->accept_q);
279         iucv_sk(sk)->parent = parent;
280         parent->sk_ack_backlog++;
281 }
282
283 void iucv_accept_unlink(struct sock *sk)
284 {
285         list_del_init(&iucv_sk(sk)->accept_q);
286         iucv_sk(sk)->parent->sk_ack_backlog--;
287         iucv_sk(sk)->parent = NULL;
288         sock_put(sk);
289 }
290
291 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
292 {
293         struct iucv_sock *isk, *n;
294         struct sock *sk;
295
296         list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
297                 sk = (struct sock *) isk;
298                 lock_sock(sk);
299
300                 if (sk->sk_state == IUCV_CLOSED) {
301                         release_sock(sk);
302                         iucv_accept_unlink(sk);
303                         continue;
304                 }
305
306                 if (sk->sk_state == IUCV_CONNECTED ||
307                     sk->sk_state == IUCV_SEVERED ||
308                     !newsock) {
309                         iucv_accept_unlink(sk);
310                         if (newsock)
311                                 sock_graft(sk, newsock);
312
313                         if (sk->sk_state == IUCV_SEVERED)
314                                 sk->sk_state = IUCV_DISCONN;
315
316                         release_sock(sk);
317                         return sk;
318                 }
319
320                 release_sock(sk);
321         }
322         return NULL;
323 }
324
325 int iucv_sock_wait_state(struct sock *sk, int state, int state2,
326                          unsigned long timeo)
327 {
328         DECLARE_WAITQUEUE(wait, current);
329         int err = 0;
330
331         add_wait_queue(sk->sk_sleep, &wait);
332         while (sk->sk_state != state && sk->sk_state != state2) {
333                 set_current_state(TASK_INTERRUPTIBLE);
334
335                 if (!timeo) {
336                         err = -EAGAIN;
337                         break;
338                 }
339
340                 if (signal_pending(current)) {
341                         err = sock_intr_errno(timeo);
342                         break;
343                 }
344
345                 release_sock(sk);
346                 timeo = schedule_timeout(timeo);
347                 lock_sock(sk);
348
349                 err = sock_error(sk);
350                 if (err)
351                         break;
352         }
353         set_current_state(TASK_RUNNING);
354         remove_wait_queue(sk->sk_sleep, &wait);
355         return err;
356 }
357
358 /* Bind an unbound socket */
359 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
360                           int addr_len)
361 {
362         struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
363         struct sock *sk = sock->sk;
364         struct iucv_sock *iucv;
365         int err;
366
367         /* Verify the input sockaddr */
368         if (!addr || addr->sa_family != AF_IUCV)
369                 return -EINVAL;
370
371         lock_sock(sk);
372         if (sk->sk_state != IUCV_OPEN) {
373                 err = -EBADFD;
374                 goto done;
375         }
376
377         write_lock_bh(&iucv_sk_list.lock);
378
379         iucv = iucv_sk(sk);
380         if (__iucv_get_sock_by_name(sa->siucv_name)) {
381                 err = -EADDRINUSE;
382                 goto done_unlock;
383         }
384         if (iucv->path) {
385                 err = 0;
386                 goto done_unlock;
387         }
388
389         /* Bind the socket */
390         memcpy(iucv->src_name, sa->siucv_name, 8);
391
392         /* Copy the user id */
393         memcpy(iucv->src_user_id, iucv_userid, 8);
394         sk->sk_state = IUCV_BOUND;
395         err = 0;
396
397 done_unlock:
398         /* Release the socket list lock */
399         write_unlock_bh(&iucv_sk_list.lock);
400 done:
401         release_sock(sk);
402         return err;
403 }
404
405 /* Automatically bind an unbound socket */
406 static int iucv_sock_autobind(struct sock *sk)
407 {
408         struct iucv_sock *iucv = iucv_sk(sk);
409         char query_buffer[80];
410         char name[12];
411         int err = 0;
412
413         /* Set the userid and name */
414         cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
415         if (unlikely(err))
416                 return -EPROTO;
417
418         memcpy(iucv->src_user_id, query_buffer, 8);
419
420         write_lock_bh(&iucv_sk_list.lock);
421
422         sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
423         while (__iucv_get_sock_by_name(name)) {
424                 sprintf(name, "%08x",
425                         atomic_inc_return(&iucv_sk_list.autobind_name));
426         }
427
428         write_unlock_bh(&iucv_sk_list.lock);
429
430         memcpy(&iucv->src_name, name, 8);
431
432         return err;
433 }
434
435 /* Connect an unconnected socket */
436 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
437                              int alen, int flags)
438 {
439         struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
440         struct sock *sk = sock->sk;
441         struct iucv_sock *iucv;
442         unsigned char user_data[16];
443         int err;
444
445         if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
446                 return -EINVAL;
447
448         if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
449                 return -EBADFD;
450
451         if (sk->sk_type != SOCK_STREAM)
452                 return -EINVAL;
453
454         iucv = iucv_sk(sk);
455
456         if (sk->sk_state == IUCV_OPEN) {
457                 err = iucv_sock_autobind(sk);
458                 if (unlikely(err))
459                         return err;
460         }
461
462         lock_sock(sk);
463
464         /* Set the destination information */
465         memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
466         memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
467
468         high_nmcpy(user_data, sa->siucv_name);
469         low_nmcpy(user_data, iucv_sk(sk)->src_name);
470         ASCEBC(user_data, sizeof(user_data));
471
472         iucv = iucv_sk(sk);
473         /* Create path. */
474         iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT,
475                                      IPRMDATA, GFP_KERNEL);
476         err = iucv_path_connect(iucv->path, &af_iucv_handler,
477                                 sa->siucv_user_id, NULL, user_data, sk);
478         if (err) {
479                 iucv_path_free(iucv->path);
480                 iucv->path = NULL;
481                 err = -ECONNREFUSED;
482                 goto done;
483         }
484
485         if (sk->sk_state != IUCV_CONNECTED) {
486                 err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
487                                 sock_sndtimeo(sk, flags & O_NONBLOCK));
488         }
489
490         if (sk->sk_state == IUCV_DISCONN) {
491                 release_sock(sk);
492                 return -ECONNREFUSED;
493         }
494 done:
495         release_sock(sk);
496         return err;
497 }
498
499 /* Move a socket into listening state. */
500 static int iucv_sock_listen(struct socket *sock, int backlog)
501 {
502         struct sock *sk = sock->sk;
503         int err;
504
505         lock_sock(sk);
506
507         err = -EINVAL;
508         if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM)
509                 goto done;
510
511         sk->sk_max_ack_backlog = backlog;
512         sk->sk_ack_backlog = 0;
513         sk->sk_state = IUCV_LISTEN;
514         err = 0;
515
516 done:
517         release_sock(sk);
518         return err;
519 }
520
521 /* Accept a pending connection */
522 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
523                             int flags)
524 {
525         DECLARE_WAITQUEUE(wait, current);
526         struct sock *sk = sock->sk, *nsk;
527         long timeo;
528         int err = 0;
529
530         lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
531
532         if (sk->sk_state != IUCV_LISTEN) {
533                 err = -EBADFD;
534                 goto done;
535         }
536
537         timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
538
539         /* Wait for an incoming connection */
540         add_wait_queue_exclusive(sk->sk_sleep, &wait);
541         while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
542                 set_current_state(TASK_INTERRUPTIBLE);
543                 if (!timeo) {
544                         err = -EAGAIN;
545                         break;
546                 }
547
548                 release_sock(sk);
549                 timeo = schedule_timeout(timeo);
550                 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
551
552                 if (sk->sk_state != IUCV_LISTEN) {
553                         err = -EBADFD;
554                         break;
555                 }
556
557                 if (signal_pending(current)) {
558                         err = sock_intr_errno(timeo);
559                         break;
560                 }
561         }
562
563         set_current_state(TASK_RUNNING);
564         remove_wait_queue(sk->sk_sleep, &wait);
565
566         if (err)
567                 goto done;
568
569         newsock->state = SS_CONNECTED;
570
571 done:
572         release_sock(sk);
573         return err;
574 }
575
576 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
577                              int *len, int peer)
578 {
579         struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
580         struct sock *sk = sock->sk;
581
582         addr->sa_family = AF_IUCV;
583         *len = sizeof(struct sockaddr_iucv);
584
585         if (peer) {
586                 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
587                 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
588         } else {
589                 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
590                 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
591         }
592         memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
593         memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
594         memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
595
596         return 0;
597 }
598
599 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
600                              struct msghdr *msg, size_t len)
601 {
602         struct sock *sk = sock->sk;
603         struct iucv_sock *iucv = iucv_sk(sk);
604         struct sk_buff *skb;
605         struct iucv_message txmsg;
606         int err;
607
608         err = sock_error(sk);
609         if (err)
610                 return err;
611
612         if (msg->msg_flags & MSG_OOB)
613                 return -EOPNOTSUPP;
614
615         lock_sock(sk);
616
617         if (sk->sk_shutdown & SEND_SHUTDOWN) {
618                 err = -EPIPE;
619                 goto out;
620         }
621
622         if (sk->sk_state == IUCV_CONNECTED) {
623                 if (!(skb = sock_alloc_send_skb(sk, len,
624                                                 msg->msg_flags & MSG_DONTWAIT,
625                                                 &err)))
626                         goto out;
627
628                 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
629                         err = -EFAULT;
630                         goto fail;
631                 }
632
633                 txmsg.class = 0;
634                 txmsg.tag = iucv->send_tag++;
635                 memcpy(skb->cb, &txmsg.tag, 4);
636                 skb_queue_tail(&iucv->send_skb_q, skb);
637                 err = iucv_message_send(iucv->path, &txmsg, 0, 0,
638                                         (void *) skb->data, skb->len);
639                 if (err) {
640                         if (err == 3)
641                                 printk(KERN_ERR "AF_IUCV msg limit exceeded\n");
642                         skb_unlink(skb, &iucv->send_skb_q);
643                         err = -EPIPE;
644                         goto fail;
645                 }
646
647         } else {
648                 err = -ENOTCONN;
649                 goto out;
650         }
651
652         release_sock(sk);
653         return len;
654
655 fail:
656         kfree_skb(skb);
657 out:
658         release_sock(sk);
659         return err;
660 }
661
662 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
663                              struct msghdr *msg, size_t len, int flags)
664 {
665         int noblock = flags & MSG_DONTWAIT;
666         struct sock *sk = sock->sk;
667         struct iucv_sock *iucv = iucv_sk(sk);
668         int target, copied = 0;
669         struct sk_buff *skb, *rskb, *cskb;
670         int err = 0;
671
672         if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
673                 skb_queue_empty(&iucv->backlog_skb_q) &&
674                 skb_queue_empty(&sk->sk_receive_queue))
675                 return 0;
676
677         if (flags & (MSG_OOB))
678                 return -EOPNOTSUPP;
679
680         target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
681
682         skb = skb_recv_datagram(sk, flags, noblock, &err);
683         if (!skb) {
684                 if (sk->sk_shutdown & RCV_SHUTDOWN)
685                         return 0;
686                 return err;
687         }
688
689         copied = min_t(unsigned int, skb->len, len);
690
691         cskb = skb;
692         if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
693                 skb_queue_head(&sk->sk_receive_queue, skb);
694                 if (copied == 0)
695                         return -EFAULT;
696                 goto done;
697         }
698
699         len -= copied;
700
701         /* Mark read part of skb as used */
702         if (!(flags & MSG_PEEK)) {
703                 skb_pull(skb, copied);
704
705                 if (skb->len) {
706                         skb_queue_head(&sk->sk_receive_queue, skb);
707                         goto done;
708                 }
709
710                 kfree_skb(skb);
711
712                 /* Queue backlog skbs */
713                 rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
714                 while (rskb) {
715                         if (sock_queue_rcv_skb(sk, rskb)) {
716                                 skb_queue_head(&iucv_sk(sk)->backlog_skb_q,
717                                                 rskb);
718                                 break;
719                         } else {
720                                 rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
721                         }
722                 }
723         } else
724                 skb_queue_head(&sk->sk_receive_queue, skb);
725
726 done:
727         return err ? : copied;
728 }
729
730 static inline unsigned int iucv_accept_poll(struct sock *parent)
731 {
732         struct iucv_sock *isk, *n;
733         struct sock *sk;
734
735         list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
736                 sk = (struct sock *) isk;
737
738                 if (sk->sk_state == IUCV_CONNECTED)
739                         return POLLIN | POLLRDNORM;
740         }
741
742         return 0;
743 }
744
745 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
746                             poll_table *wait)
747 {
748         struct sock *sk = sock->sk;
749         unsigned int mask = 0;
750
751         poll_wait(file, sk->sk_sleep, wait);
752
753         if (sk->sk_state == IUCV_LISTEN)
754                 return iucv_accept_poll(sk);
755
756         if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
757                 mask |= POLLERR;
758
759         if (sk->sk_shutdown & RCV_SHUTDOWN)
760                 mask |= POLLRDHUP;
761
762         if (sk->sk_shutdown == SHUTDOWN_MASK)
763                 mask |= POLLHUP;
764
765         if (!skb_queue_empty(&sk->sk_receive_queue) ||
766             (sk->sk_shutdown & RCV_SHUTDOWN))
767                 mask |= POLLIN | POLLRDNORM;
768
769         if (sk->sk_state == IUCV_CLOSED)
770                 mask |= POLLHUP;
771
772         if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
773                 mask |= POLLIN;
774
775         if (sock_writeable(sk))
776                 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
777         else
778                 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
779
780         return mask;
781 }
782
783 static int iucv_sock_shutdown(struct socket *sock, int how)
784 {
785         struct sock *sk = sock->sk;
786         struct iucv_sock *iucv = iucv_sk(sk);
787         struct iucv_message txmsg;
788         int err = 0;
789         u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
790
791         how++;
792
793         if ((how & ~SHUTDOWN_MASK) || !how)
794                 return -EINVAL;
795
796         lock_sock(sk);
797         switch (sk->sk_state) {
798         case IUCV_CLOSED:
799                 err = -ENOTCONN;
800                 goto fail;
801
802         default:
803                 sk->sk_shutdown |= how;
804                 break;
805         }
806
807         if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
808                 txmsg.class = 0;
809                 txmsg.tag = 0;
810                 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
811                                         (void *) prmmsg, 8);
812                 if (err) {
813                         switch (err) {
814                         case 1:
815                                 err = -ENOTCONN;
816                                 break;
817                         case 2:
818                                 err = -ECONNRESET;
819                                 break;
820                         default:
821                                 err = -ENOTCONN;
822                                 break;
823                         }
824                 }
825         }
826
827         if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
828                 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
829                 if (err)
830                         err = -ENOTCONN;
831
832                 skb_queue_purge(&sk->sk_receive_queue);
833         }
834
835         /* Wake up anyone sleeping in poll */
836         sk->sk_state_change(sk);
837
838 fail:
839         release_sock(sk);
840         return err;
841 }
842
843 static int iucv_sock_release(struct socket *sock)
844 {
845         struct sock *sk = sock->sk;
846         int err = 0;
847
848         if (!sk)
849                 return 0;
850
851         iucv_sock_close(sk);
852
853         /* Unregister with IUCV base support */
854         if (iucv_sk(sk)->path) {
855                 iucv_path_sever(iucv_sk(sk)->path, NULL);
856                 iucv_path_free(iucv_sk(sk)->path);
857                 iucv_sk(sk)->path = NULL;
858         }
859
860         sock_orphan(sk);
861         iucv_sock_kill(sk);
862         return err;
863 }
864
865 /* Callback wrappers - called from iucv base support */
866 static int iucv_callback_connreq(struct iucv_path *path,
867                                  u8 ipvmid[8], u8 ipuser[16])
868 {
869         unsigned char user_data[16];
870         unsigned char nuser_data[16];
871         unsigned char src_name[8];
872         struct hlist_node *node;
873         struct sock *sk, *nsk;
874         struct iucv_sock *iucv, *niucv;
875         int err;
876
877         memcpy(src_name, ipuser, 8);
878         EBCASC(src_name, 8);
879         /* Find out if this path belongs to af_iucv. */
880         read_lock(&iucv_sk_list.lock);
881         iucv = NULL;
882         sk_for_each(sk, node, &iucv_sk_list.head)
883                 if (sk->sk_state == IUCV_LISTEN &&
884                     !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
885                         /*
886                          * Found a listening socket with
887                          * src_name == ipuser[0-7].
888                          */
889                         iucv = iucv_sk(sk);
890                         break;
891                 }
892         read_unlock(&iucv_sk_list.lock);
893         if (!iucv)
894                 /* No socket found, not one of our paths. */
895                 return -EINVAL;
896
897         bh_lock_sock(sk);
898
899         /* Check if parent socket is listening */
900         low_nmcpy(user_data, iucv->src_name);
901         high_nmcpy(user_data, iucv->dst_name);
902         ASCEBC(user_data, sizeof(user_data));
903         if (sk->sk_state != IUCV_LISTEN) {
904                 err = iucv_path_sever(path, user_data);
905                 goto fail;
906         }
907
908         /* Check for backlog size */
909         if (sk_acceptq_is_full(sk)) {
910                 err = iucv_path_sever(path, user_data);
911                 goto fail;
912         }
913
914         /* Create the new socket */
915         nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
916         if (!nsk) {
917                 err = iucv_path_sever(path, user_data);
918                 goto fail;
919         }
920
921         niucv = iucv_sk(nsk);
922         iucv_sock_init(nsk, sk);
923
924         /* Set the new iucv_sock */
925         memcpy(niucv->dst_name, ipuser + 8, 8);
926         EBCASC(niucv->dst_name, 8);
927         memcpy(niucv->dst_user_id, ipvmid, 8);
928         memcpy(niucv->src_name, iucv->src_name, 8);
929         memcpy(niucv->src_user_id, iucv->src_user_id, 8);
930         niucv->path = path;
931
932         /* Call iucv_accept */
933         high_nmcpy(nuser_data, ipuser + 8);
934         memcpy(nuser_data + 8, niucv->src_name, 8);
935         ASCEBC(nuser_data + 8, 8);
936
937         path->msglim = IUCV_QUEUELEN_DEFAULT;
938         err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
939         if (err) {
940                 err = iucv_path_sever(path, user_data);
941                 goto fail;
942         }
943
944         iucv_accept_enqueue(sk, nsk);
945
946         /* Wake up accept */
947         nsk->sk_state = IUCV_CONNECTED;
948         sk->sk_data_ready(sk, 1);
949         err = 0;
950 fail:
951         bh_unlock_sock(sk);
952         return 0;
953 }
954
955 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
956 {
957         struct sock *sk = path->private;
958
959         sk->sk_state = IUCV_CONNECTED;
960         sk->sk_state_change(sk);
961 }
962
963 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len,
964                              struct sk_buff_head *fragmented_skb_q)
965 {
966         int dataleft, size, copied = 0;
967         struct sk_buff *nskb;
968
969         dataleft = len;
970         while (dataleft) {
971                 if (dataleft >= sk->sk_rcvbuf / 4)
972                         size = sk->sk_rcvbuf / 4;
973                 else
974                         size = dataleft;
975
976                 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
977                 if (!nskb)
978                         return -ENOMEM;
979
980                 memcpy(nskb->data, skb->data + copied, size);
981                 copied += size;
982                 dataleft -= size;
983
984                 skb_reset_transport_header(nskb);
985                 skb_reset_network_header(nskb);
986                 nskb->len = size;
987
988                 skb_queue_tail(fragmented_skb_q, nskb);
989         }
990
991         return 0;
992 }
993
994 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
995 {
996         struct sock *sk = path->private;
997         struct iucv_sock *iucv = iucv_sk(sk);
998         struct sk_buff *skb, *fskb;
999         struct sk_buff_head fragmented_skb_q;
1000         int rc;
1001
1002         skb_queue_head_init(&fragmented_skb_q);
1003
1004         if (sk->sk_shutdown & RCV_SHUTDOWN)
1005                 return;
1006
1007         skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA);
1008         if (!skb) {
1009                 iucv_path_sever(path, NULL);
1010                 return;
1011         }
1012
1013         if (msg->flags & IPRMDATA) {
1014                 skb->data = NULL;
1015                 skb->len = 0;
1016         } else {
1017                 rc = iucv_message_receive(path, msg, 0, skb->data,
1018                                           msg->length, NULL);
1019                 if (rc) {
1020                         kfree_skb(skb);
1021                         return;
1022                 }
1023                 if (skb->truesize >= sk->sk_rcvbuf / 4) {
1024                         rc = iucv_fragment_skb(sk, skb, msg->length,
1025                                                &fragmented_skb_q);
1026                         kfree_skb(skb);
1027                         skb = NULL;
1028                         if (rc) {
1029                                 iucv_path_sever(path, NULL);
1030                                 return;
1031                         }
1032                 } else {
1033                         skb_reset_transport_header(skb);
1034                         skb_reset_network_header(skb);
1035                         skb->len = msg->length;
1036                 }
1037         }
1038         /* Queue the fragmented skb */
1039         fskb = skb_dequeue(&fragmented_skb_q);
1040         while (fskb) {
1041                 if (!skb_queue_empty(&iucv->backlog_skb_q))
1042                         skb_queue_tail(&iucv->backlog_skb_q, fskb);
1043                 else if (sock_queue_rcv_skb(sk, fskb))
1044                         skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, fskb);
1045                 fskb = skb_dequeue(&fragmented_skb_q);
1046         }
1047
1048         /* Queue the original skb if it exists (was not fragmented) */
1049         if (skb) {
1050                 if (!skb_queue_empty(&iucv->backlog_skb_q))
1051                         skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
1052                 else if (sock_queue_rcv_skb(sk, skb))
1053                         skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
1054         }
1055
1056 }
1057
1058 static void iucv_callback_txdone(struct iucv_path *path,
1059                                  struct iucv_message *msg)
1060 {
1061         struct sock *sk = path->private;
1062         struct sk_buff *this;
1063         struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1064         struct sk_buff *list_skb = list->next;
1065         unsigned long flags;
1066
1067         if (list_skb) {
1068                 spin_lock_irqsave(&list->lock, flags);
1069
1070                 do {
1071                         this = list_skb;
1072                         list_skb = list_skb->next;
1073                 } while (memcmp(&msg->tag, this->cb, 4) && list_skb);
1074
1075                 spin_unlock_irqrestore(&list->lock, flags);
1076
1077                 skb_unlink(this, &iucv_sk(sk)->send_skb_q);
1078                 kfree_skb(this);
1079         }
1080
1081         if (sk->sk_state == IUCV_CLOSING) {
1082                 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1083                         sk->sk_state = IUCV_CLOSED;
1084                         sk->sk_state_change(sk);
1085                 }
1086         }
1087
1088 }
1089
1090 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1091 {
1092         struct sock *sk = path->private;
1093
1094         if (!list_empty(&iucv_sk(sk)->accept_q))
1095                 sk->sk_state = IUCV_SEVERED;
1096         else
1097                 sk->sk_state = IUCV_DISCONN;
1098
1099         sk->sk_state_change(sk);
1100 }
1101
1102 static struct proto_ops iucv_sock_ops = {
1103         .family         = PF_IUCV,
1104         .owner          = THIS_MODULE,
1105         .release        = iucv_sock_release,
1106         .bind           = iucv_sock_bind,
1107         .connect        = iucv_sock_connect,
1108         .listen         = iucv_sock_listen,
1109         .accept         = iucv_sock_accept,
1110         .getname        = iucv_sock_getname,
1111         .sendmsg        = iucv_sock_sendmsg,
1112         .recvmsg        = iucv_sock_recvmsg,
1113         .poll           = iucv_sock_poll,
1114         .ioctl          = sock_no_ioctl,
1115         .mmap           = sock_no_mmap,
1116         .socketpair     = sock_no_socketpair,
1117         .shutdown       = iucv_sock_shutdown,
1118         .setsockopt     = sock_no_setsockopt,
1119         .getsockopt     = sock_no_getsockopt
1120 };
1121
1122 static struct net_proto_family iucv_sock_family_ops = {
1123         .family = AF_IUCV,
1124         .owner  = THIS_MODULE,
1125         .create = iucv_sock_create,
1126 };
1127
1128 static int __init afiucv_init(void)
1129 {
1130         int err;
1131
1132         if (!MACHINE_IS_VM) {
1133                 printk(KERN_ERR "AF_IUCV connection needs VM as base\n");
1134                 err = -EPROTONOSUPPORT;
1135                 goto out;
1136         }
1137         cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1138         if (unlikely(err)) {
1139                 printk(KERN_ERR "AF_IUCV needs the VM userid\n");
1140                 err = -EPROTONOSUPPORT;
1141                 goto out;
1142         }
1143
1144         err = iucv_register(&af_iucv_handler, 0);
1145         if (err)
1146                 goto out;
1147         err = proto_register(&iucv_proto, 0);
1148         if (err)
1149                 goto out_iucv;
1150         err = sock_register(&iucv_sock_family_ops);
1151         if (err)
1152                 goto out_proto;
1153         printk(KERN_INFO "AF_IUCV lowlevel driver initialized\n");
1154         return 0;
1155
1156 out_proto:
1157         proto_unregister(&iucv_proto);
1158 out_iucv:
1159         iucv_unregister(&af_iucv_handler, 0);
1160 out:
1161         return err;
1162 }
1163
1164 static void __exit afiucv_exit(void)
1165 {
1166         sock_unregister(PF_IUCV);
1167         proto_unregister(&iucv_proto);
1168         iucv_unregister(&af_iucv_handler, 0);
1169
1170         printk(KERN_INFO "AF_IUCV lowlevel driver unloaded\n");
1171 }
1172
1173 module_init(afiucv_init);
1174 module_exit(afiucv_exit);
1175
1176 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1177 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1178 MODULE_VERSION(VERSION);
1179 MODULE_LICENSE("GPL");
1180 MODULE_ALIAS_NETPROTO(PF_IUCV);