ieee1394: sbp2: optimize DMA direction of s/g tables
[linux-2.6] / net / iucv / af_iucv.c
1 /*
2  *  linux/net/iucv/af_iucv.c
3  *
4  *  IUCV protocol stack for Linux on zSeries
5  *
6  *  Copyright 2006 IBM Corporation
7  *
8  *  Author(s):  Jennifer Hunt <jenhunt@us.ibm.com>
9  */
10
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/list.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/poll.h>
21 #include <net/sock.h>
22 #include <asm/ebcdic.h>
23 #include <asm/cpcmd.h>
24 #include <linux/kmod.h>
25
26 #include <net/iucv/iucv.h>
27 #include <net/iucv/af_iucv.h>
28
29 #define CONFIG_IUCV_SOCK_DEBUG 1
30
31 #define IPRMDATA 0x80
32 #define VERSION "1.0"
33
34 static char iucv_userid[80];
35
36 static struct proto_ops iucv_sock_ops;
37
38 static struct proto iucv_proto = {
39         .name           = "AF_IUCV",
40         .owner          = THIS_MODULE,
41         .obj_size       = sizeof(struct iucv_sock),
42 };
43
44 /* Call Back functions */
45 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
46 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
47 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
48 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
49 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
50
51 static struct iucv_sock_list iucv_sk_list = {
52         .lock = RW_LOCK_UNLOCKED,
53         .autobind_name = ATOMIC_INIT(0)
54 };
55
56 static struct iucv_handler af_iucv_handler = {
57         .path_pending     = iucv_callback_connreq,
58         .path_complete    = iucv_callback_connack,
59         .path_severed     = iucv_callback_connrej,
60         .message_pending  = iucv_callback_rx,
61         .message_complete = iucv_callback_txdone
62 };
63
64 static inline void high_nmcpy(unsigned char *dst, char *src)
65 {
66        memcpy(dst, src, 8);
67 }
68
69 static inline void low_nmcpy(unsigned char *dst, char *src)
70 {
71        memcpy(&dst[8], src, 8);
72 }
73
74 /* Timers */
75 static void iucv_sock_timeout(unsigned long arg)
76 {
77         struct sock *sk = (struct sock *)arg;
78
79         bh_lock_sock(sk);
80         sk->sk_err = ETIMEDOUT;
81         sk->sk_state_change(sk);
82         bh_unlock_sock(sk);
83
84         iucv_sock_kill(sk);
85         sock_put(sk);
86 }
87
88 static void iucv_sock_clear_timer(struct sock *sk)
89 {
90         sk_stop_timer(sk, &sk->sk_timer);
91 }
92
93 static void iucv_sock_init_timer(struct sock *sk)
94 {
95         init_timer(&sk->sk_timer);
96         sk->sk_timer.function = iucv_sock_timeout;
97         sk->sk_timer.data = (unsigned long)sk;
98 }
99
100 static struct sock *__iucv_get_sock_by_name(char *nm)
101 {
102         struct sock *sk;
103         struct hlist_node *node;
104
105         sk_for_each(sk, node, &iucv_sk_list.head)
106                 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
107                         return sk;
108
109         return NULL;
110 }
111
112 static void iucv_sock_destruct(struct sock *sk)
113 {
114         skb_queue_purge(&sk->sk_receive_queue);
115         skb_queue_purge(&sk->sk_write_queue);
116 }
117
118 /* Cleanup Listen */
119 static void iucv_sock_cleanup_listen(struct sock *parent)
120 {
121         struct sock *sk;
122
123         /* Close non-accepted connections */
124         while ((sk = iucv_accept_dequeue(parent, NULL))) {
125                 iucv_sock_close(sk);
126                 iucv_sock_kill(sk);
127         }
128
129         parent->sk_state = IUCV_CLOSED;
130         sock_set_flag(parent, SOCK_ZAPPED);
131 }
132
133 /* Kill socket */
134 static void iucv_sock_kill(struct sock *sk)
135 {
136         if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
137                 return;
138
139         iucv_sock_unlink(&iucv_sk_list, sk);
140         sock_set_flag(sk, SOCK_DEAD);
141         sock_put(sk);
142 }
143
144 /* Close an IUCV socket */
145 static void iucv_sock_close(struct sock *sk)
146 {
147         unsigned char user_data[16];
148         struct iucv_sock *iucv = iucv_sk(sk);
149         int err;
150
151         iucv_sock_clear_timer(sk);
152         lock_sock(sk);
153
154         switch(sk->sk_state) {
155         case IUCV_LISTEN:
156                 iucv_sock_cleanup_listen(sk);
157                 break;
158
159         case IUCV_CONNECTED:
160         case IUCV_DISCONN:
161                 err = 0;
162                 if (iucv->path) {
163                         low_nmcpy(user_data, iucv->src_name);
164                         high_nmcpy(user_data, iucv->dst_name);
165                         ASCEBC(user_data, sizeof(user_data));
166                         err = iucv_path_sever(iucv->path, user_data);
167                         iucv_path_free(iucv->path);
168                         iucv->path = NULL;
169                 }
170
171                 sk->sk_state = IUCV_CLOSED;
172                 sk->sk_state_change(sk);
173                 sk->sk_err = ECONNRESET;
174                 sk->sk_state_change(sk);
175
176                 skb_queue_purge(&iucv->send_skb_q);
177
178                 sock_set_flag(sk, SOCK_ZAPPED);
179                 break;
180
181         default:
182                 sock_set_flag(sk, SOCK_ZAPPED);
183                 break;
184         }
185
186         release_sock(sk);
187         iucv_sock_kill(sk);
188 }
189
190 static void iucv_sock_init(struct sock *sk, struct sock *parent)
191 {
192         if (parent)
193                 sk->sk_type = parent->sk_type;
194 }
195
196 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
197 {
198         struct sock *sk;
199
200         sk = sk_alloc(PF_IUCV, prio, &iucv_proto, 1);
201         if (!sk)
202                 return NULL;
203
204         sock_init_data(sock, sk);
205         INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
206         skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
207         iucv_sk(sk)->send_tag = 0;
208
209         sk->sk_destruct = iucv_sock_destruct;
210         sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
211         sk->sk_allocation = GFP_DMA;
212
213         sock_reset_flag(sk, SOCK_ZAPPED);
214
215         sk->sk_protocol = proto;
216         sk->sk_state    = IUCV_OPEN;
217
218         iucv_sock_init_timer(sk);
219
220         iucv_sock_link(&iucv_sk_list, sk);
221         return sk;
222 }
223
224 /* Create an IUCV socket */
225 static int iucv_sock_create(struct socket *sock, int protocol)
226 {
227         struct sock *sk;
228
229         if (sock->type != SOCK_STREAM)
230                 return -ESOCKTNOSUPPORT;
231
232         sock->state = SS_UNCONNECTED;
233         sock->ops = &iucv_sock_ops;
234
235         sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
236         if (!sk)
237                 return -ENOMEM;
238
239         iucv_sock_init(sk, NULL);
240
241         return 0;
242 }
243
244 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
245 {
246         write_lock_bh(&l->lock);
247         sk_add_node(sk, &l->head);
248         write_unlock_bh(&l->lock);
249 }
250
251 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
252 {
253         write_lock_bh(&l->lock);
254         sk_del_node_init(sk);
255         write_unlock_bh(&l->lock);
256 }
257
258 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
259 {
260         sock_hold(sk);
261         list_add_tail(&iucv_sk(sk)->accept_q, &iucv_sk(parent)->accept_q);
262         iucv_sk(sk)->parent = parent;
263         parent->sk_ack_backlog++;
264 }
265
266 void iucv_accept_unlink(struct sock *sk)
267 {
268         list_del_init(&iucv_sk(sk)->accept_q);
269         iucv_sk(sk)->parent->sk_ack_backlog--;
270         iucv_sk(sk)->parent = NULL;
271         sock_put(sk);
272 }
273
274 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
275 {
276         struct iucv_sock *isk, *n;
277         struct sock *sk;
278
279         list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){
280                 sk = (struct sock *) isk;
281                 lock_sock(sk);
282
283                 if (sk->sk_state == IUCV_CLOSED) {
284                         release_sock(sk);
285                         iucv_accept_unlink(sk);
286                         continue;
287                 }
288
289                 if (sk->sk_state == IUCV_CONNECTED ||
290                     sk->sk_state == IUCV_SEVERED ||
291                     !newsock) {
292                         iucv_accept_unlink(sk);
293                         if (newsock)
294                                 sock_graft(sk, newsock);
295
296                         if (sk->sk_state == IUCV_SEVERED)
297                                 sk->sk_state = IUCV_DISCONN;
298
299                         release_sock(sk);
300                         return sk;
301                 }
302
303                 release_sock(sk);
304         }
305         return NULL;
306 }
307
308 int iucv_sock_wait_state(struct sock *sk, int state, int state2,
309                          unsigned long timeo)
310 {
311         DECLARE_WAITQUEUE(wait, current);
312         int err = 0;
313
314         add_wait_queue(sk->sk_sleep, &wait);
315         while (sk->sk_state != state && sk->sk_state != state2) {
316                 set_current_state(TASK_INTERRUPTIBLE);
317
318                 if (!timeo) {
319                         err = -EAGAIN;
320                         break;
321                 }
322
323                 if (signal_pending(current)) {
324                         err = sock_intr_errno(timeo);
325                         break;
326                 }
327
328                 release_sock(sk);
329                 timeo = schedule_timeout(timeo);
330                 lock_sock(sk);
331
332                 err = sock_error(sk);
333                 if (err)
334                         break;
335         }
336         set_current_state(TASK_RUNNING);
337         remove_wait_queue(sk->sk_sleep, &wait);
338         return err;
339 }
340
341 /* Bind an unbound socket */
342 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
343                           int addr_len)
344 {
345         struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
346         struct sock *sk = sock->sk;
347         struct iucv_sock *iucv;
348         int err;
349
350         /* Verify the input sockaddr */
351         if (!addr || addr->sa_family != AF_IUCV)
352                 return -EINVAL;
353
354         lock_sock(sk);
355         if (sk->sk_state != IUCV_OPEN) {
356                 err = -EBADFD;
357                 goto done;
358         }
359
360         write_lock_bh(&iucv_sk_list.lock);
361
362         iucv = iucv_sk(sk);
363         if (__iucv_get_sock_by_name(sa->siucv_name)) {
364                 err = -EADDRINUSE;
365                 goto done_unlock;
366         }
367         if (iucv->path) {
368                 err = 0;
369                 goto done_unlock;
370         }
371
372         /* Bind the socket */
373         memcpy(iucv->src_name, sa->siucv_name, 8);
374
375         /* Copy the user id */
376         memcpy(iucv->src_user_id, iucv_userid, 8);
377         sk->sk_state = IUCV_BOUND;
378         err = 0;
379
380 done_unlock:
381         /* Release the socket list lock */
382         write_unlock_bh(&iucv_sk_list.lock);
383 done:
384         release_sock(sk);
385         return err;
386 }
387
388 /* Automatically bind an unbound socket */
389 static int iucv_sock_autobind(struct sock *sk)
390 {
391         struct iucv_sock *iucv = iucv_sk(sk);
392         char query_buffer[80];
393         char name[12];
394         int err = 0;
395
396         /* Set the userid and name */
397         cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
398         if (unlikely(err))
399                 return -EPROTO;
400
401         memcpy(iucv->src_user_id, query_buffer, 8);
402
403         write_lock_bh(&iucv_sk_list.lock);
404
405         sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
406         while (__iucv_get_sock_by_name(name)) {
407                 sprintf(name, "%08x",
408                         atomic_inc_return(&iucv_sk_list.autobind_name));
409         }
410
411         write_unlock_bh(&iucv_sk_list.lock);
412
413         memcpy(&iucv->src_name, name, 8);
414
415         return err;
416 }
417
418 /* Connect an unconnected socket */
419 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
420                              int alen, int flags)
421 {
422         struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
423         struct sock *sk = sock->sk;
424         struct iucv_sock *iucv;
425         unsigned char user_data[16];
426         int err;
427
428         if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
429                 return -EINVAL;
430
431         if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
432                 return -EBADFD;
433
434         if (sk->sk_type != SOCK_STREAM)
435                 return -EINVAL;
436
437         iucv = iucv_sk(sk);
438
439         if (sk->sk_state == IUCV_OPEN) {
440                 err = iucv_sock_autobind(sk);
441                 if (unlikely(err))
442                         return err;
443         }
444
445         lock_sock(sk);
446
447         /* Set the destination information */
448         memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
449         memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
450
451         high_nmcpy(user_data, sa->siucv_name);
452         low_nmcpy(user_data, iucv_sk(sk)->src_name);
453         ASCEBC(user_data, sizeof(user_data));
454
455         iucv = iucv_sk(sk);
456         /* Create path. */
457         iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT,
458                                      IPRMDATA, GFP_KERNEL);
459         err = iucv_path_connect(iucv->path, &af_iucv_handler,
460                                 sa->siucv_user_id, NULL, user_data, sk);
461         if (err) {
462                 iucv_path_free(iucv->path);
463                 iucv->path = NULL;
464                 err = -ECONNREFUSED;
465                 goto done;
466         }
467
468         if (sk->sk_state != IUCV_CONNECTED) {
469                 err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
470                                 sock_sndtimeo(sk, flags & O_NONBLOCK));
471         }
472
473         if (sk->sk_state == IUCV_DISCONN) {
474                 release_sock(sk);
475                 return -ECONNREFUSED;
476         }
477 done:
478         release_sock(sk);
479         return err;
480 }
481
482 /* Move a socket into listening state. */
483 static int iucv_sock_listen(struct socket *sock, int backlog)
484 {
485         struct sock *sk = sock->sk;
486         int err;
487
488         lock_sock(sk);
489
490         err = -EINVAL;
491         if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM)
492                 goto done;
493
494         sk->sk_max_ack_backlog = backlog;
495         sk->sk_ack_backlog = 0;
496         sk->sk_state = IUCV_LISTEN;
497         err = 0;
498
499 done:
500         release_sock(sk);
501         return err;
502 }
503
504 /* Accept a pending connection */
505 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
506                             int flags)
507 {
508         DECLARE_WAITQUEUE(wait, current);
509         struct sock *sk = sock->sk, *nsk;
510         long timeo;
511         int err = 0;
512
513         lock_sock(sk);
514
515         if (sk->sk_state != IUCV_LISTEN) {
516                 err = -EBADFD;
517                 goto done;
518         }
519
520         timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
521
522         /* Wait for an incoming connection */
523         add_wait_queue_exclusive(sk->sk_sleep, &wait);
524         while (!(nsk = iucv_accept_dequeue(sk, newsock))){
525                 set_current_state(TASK_INTERRUPTIBLE);
526                 if (!timeo) {
527                         err = -EAGAIN;
528                         break;
529                 }
530
531                 release_sock(sk);
532                 timeo = schedule_timeout(timeo);
533                 lock_sock(sk);
534
535                 if (sk->sk_state != IUCV_LISTEN) {
536                         err = -EBADFD;
537                         break;
538                 }
539
540                 if (signal_pending(current)) {
541                         err = sock_intr_errno(timeo);
542                         break;
543                 }
544         }
545
546         set_current_state(TASK_RUNNING);
547         remove_wait_queue(sk->sk_sleep, &wait);
548
549         if (err)
550                 goto done;
551
552         newsock->state = SS_CONNECTED;
553
554 done:
555         release_sock(sk);
556         return err;
557 }
558
559 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
560                              int *len, int peer)
561 {
562         struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
563         struct sock *sk = sock->sk;
564
565         addr->sa_family = AF_IUCV;
566         *len = sizeof(struct sockaddr_iucv);
567
568         if (peer) {
569                 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
570                 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
571         } else {
572                 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
573                 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
574         }
575         memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
576         memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
577         memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
578
579         return 0;
580 }
581
582 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
583                              struct msghdr *msg, size_t len)
584 {
585         struct sock *sk = sock->sk;
586         struct iucv_sock *iucv = iucv_sk(sk);
587         struct sk_buff *skb;
588         struct iucv_message txmsg;
589         int err;
590
591         err = sock_error(sk);
592         if (err)
593                 return err;
594
595         if (msg->msg_flags & MSG_OOB)
596                 return -EOPNOTSUPP;
597
598         lock_sock(sk);
599
600         if (sk->sk_shutdown & SEND_SHUTDOWN) {
601                 err = -EPIPE;
602                 goto out;
603         }
604
605         if (sk->sk_state == IUCV_CONNECTED){
606                 if(!(skb = sock_alloc_send_skb(sk, len,
607                                        msg->msg_flags & MSG_DONTWAIT,
608                                        &err)))
609                         return err;
610
611                 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)){
612                         err = -EFAULT;
613                         goto fail;
614                 }
615
616                 txmsg.class = 0;
617                 txmsg.tag = iucv->send_tag++;
618                 memcpy(skb->cb, &txmsg.tag, 4);
619                 skb_queue_tail(&iucv->send_skb_q, skb);
620                 err = iucv_message_send(iucv->path, &txmsg, 0, 0,
621                                         (void *) skb->data, skb->len);
622                 if (err) {
623                         if (err == 3)
624                                 printk(KERN_ERR "AF_IUCV msg limit exceeded\n");
625                         skb_unlink(skb, &iucv->send_skb_q);
626                         err = -EPIPE;
627                         goto fail;
628                 }
629
630         } else {
631                 err = -ENOTCONN;
632                 goto out;
633         }
634
635         release_sock(sk);
636         return len;
637
638 fail:
639         kfree_skb(skb);
640 out:
641         release_sock(sk);
642         return err;
643 }
644
645 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
646                              struct msghdr *msg, size_t len, int flags)
647 {
648         int noblock = flags & MSG_DONTWAIT;
649         struct sock *sk = sock->sk;
650         int target, copied = 0;
651         struct sk_buff *skb;
652         int err = 0;
653
654         if (flags & (MSG_OOB))
655                 return -EOPNOTSUPP;
656
657         target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
658
659         skb = skb_recv_datagram(sk, flags, noblock, &err);
660         if (!skb) {
661                 if (sk->sk_shutdown & RCV_SHUTDOWN)
662                         return 0;
663                 return err;
664         }
665
666         copied = min_t(unsigned int, skb->len, len);
667
668         if (memcpy_toiovec(msg->msg_iov, skb->data, copied)) {
669                 skb_queue_head(&sk->sk_receive_queue, skb);
670                 if (copied == 0)
671                         return -EFAULT;
672         }
673
674         len -= copied;
675
676         /* Mark read part of skb as used */
677         if (!(flags & MSG_PEEK)) {
678                 skb_pull(skb, copied);
679
680                 if (skb->len) {
681                         skb_queue_head(&sk->sk_receive_queue, skb);
682                         goto done;
683                 }
684
685                 kfree_skb(skb);
686         } else
687                 skb_queue_head(&sk->sk_receive_queue, skb);
688
689 done:
690         return err ? : copied;
691 }
692
693 static inline unsigned int iucv_accept_poll(struct sock *parent)
694 {
695         struct iucv_sock *isk, *n;
696         struct sock *sk;
697
698         list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){
699                 sk = (struct sock *) isk;
700
701                 if (sk->sk_state == IUCV_CONNECTED)
702                         return POLLIN | POLLRDNORM;
703         }
704
705         return 0;
706 }
707
708 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
709                             poll_table *wait)
710 {
711         struct sock *sk = sock->sk;
712         unsigned int mask = 0;
713
714         poll_wait(file, sk->sk_sleep, wait);
715
716         if (sk->sk_state == IUCV_LISTEN)
717                 return iucv_accept_poll(sk);
718
719         if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
720                 mask |= POLLERR;
721
722         if (sk->sk_shutdown & RCV_SHUTDOWN)
723                 mask |= POLLRDHUP;
724
725         if (sk->sk_shutdown == SHUTDOWN_MASK)
726                 mask |= POLLHUP;
727
728         if (!skb_queue_empty(&sk->sk_receive_queue) ||
729                         (sk->sk_shutdown & RCV_SHUTDOWN))
730                 mask |= POLLIN | POLLRDNORM;
731
732         if (sk->sk_state == IUCV_CLOSED)
733                 mask |= POLLHUP;
734
735         if (sock_writeable(sk))
736                 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
737         else
738                 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
739
740         return mask;
741 }
742
743 static int iucv_sock_shutdown(struct socket *sock, int how)
744 {
745         struct sock *sk = sock->sk;
746         struct iucv_sock *iucv = iucv_sk(sk);
747         struct iucv_message txmsg;
748         int err = 0;
749         u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
750
751         how++;
752
753         if ((how & ~SHUTDOWN_MASK) || !how)
754                 return -EINVAL;
755
756         lock_sock(sk);
757         switch(sk->sk_state) {
758         case IUCV_CLOSED:
759                 err = -ENOTCONN;
760                 goto fail;
761
762         default:
763                 sk->sk_shutdown |= how;
764                 break;
765         }
766
767         if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
768                 txmsg.class = 0;
769                 txmsg.tag = 0;
770                 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
771                                         (void *) prmmsg, 8);
772                 if (err) {
773                         switch(err) {
774                         case 1:
775                                 err = -ENOTCONN;
776                                 break;
777                         case 2:
778                                 err = -ECONNRESET;
779                                 break;
780                         default:
781                                 err = -ENOTCONN;
782                                 break;
783                         }
784                 }
785         }
786
787         if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
788                 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
789                 if (err)
790                         err = -ENOTCONN;
791
792                 skb_queue_purge(&sk->sk_receive_queue);
793         }
794
795         /* Wake up anyone sleeping in poll */
796         sk->sk_state_change(sk);
797
798 fail:
799         release_sock(sk);
800         return err;
801 }
802
803 static int iucv_sock_release(struct socket *sock)
804 {
805         struct sock *sk = sock->sk;
806         int err = 0;
807
808         if (!sk)
809                 return 0;
810
811         iucv_sock_close(sk);
812
813         /* Unregister with IUCV base support */
814         if (iucv_sk(sk)->path) {
815                 iucv_path_sever(iucv_sk(sk)->path, NULL);
816                 iucv_path_free(iucv_sk(sk)->path);
817                 iucv_sk(sk)->path = NULL;
818         }
819
820         if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime){
821                 lock_sock(sk);
822                 err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0,
823                                            sk->sk_lingertime);
824                 release_sock(sk);
825         }
826
827         sock_orphan(sk);
828         iucv_sock_kill(sk);
829         return err;
830 }
831
832 /* Callback wrappers - called from iucv base support */
833 static int iucv_callback_connreq(struct iucv_path *path,
834                                  u8 ipvmid[8], u8 ipuser[16])
835 {
836         unsigned char user_data[16];
837         unsigned char nuser_data[16];
838         unsigned char src_name[8];
839         struct hlist_node *node;
840         struct sock *sk, *nsk;
841         struct iucv_sock *iucv, *niucv;
842         int err;
843
844         memcpy(src_name, ipuser, 8);
845         EBCASC(src_name, 8);
846         /* Find out if this path belongs to af_iucv. */
847         read_lock(&iucv_sk_list.lock);
848         iucv = NULL;
849         sk_for_each(sk, node, &iucv_sk_list.head)
850                 if (sk->sk_state == IUCV_LISTEN &&
851                     !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
852                         /*
853                          * Found a listening socket with
854                          * src_name == ipuser[0-7].
855                          */
856                         iucv = iucv_sk(sk);
857                         break;
858                 }
859         read_unlock(&iucv_sk_list.lock);
860         if (!iucv)
861                 /* No socket found, not one of our paths. */
862                 return -EINVAL;
863
864         bh_lock_sock(sk);
865
866         /* Check if parent socket is listening */
867         low_nmcpy(user_data, iucv->src_name);
868         high_nmcpy(user_data, iucv->dst_name);
869         ASCEBC(user_data, sizeof(user_data));
870         if (sk->sk_state != IUCV_LISTEN) {
871                 err = iucv_path_sever(path, user_data);
872                 goto fail;
873         }
874
875         /* Check for backlog size */
876         if (sk_acceptq_is_full(sk)) {
877                 err = iucv_path_sever(path, user_data);
878                 goto fail;
879         }
880
881         /* Create the new socket */
882         nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
883         if (!nsk){
884                 err = iucv_path_sever(path, user_data);
885                 goto fail;
886         }
887
888         niucv = iucv_sk(nsk);
889         iucv_sock_init(nsk, sk);
890
891         /* Set the new iucv_sock */
892         memcpy(niucv->dst_name, ipuser + 8, 8);
893         EBCASC(niucv->dst_name, 8);
894         memcpy(niucv->dst_user_id, ipvmid, 8);
895         memcpy(niucv->src_name, iucv->src_name, 8);
896         memcpy(niucv->src_user_id, iucv->src_user_id, 8);
897         niucv->path = path;
898
899         /* Call iucv_accept */
900         high_nmcpy(nuser_data, ipuser + 8);
901         memcpy(nuser_data + 8, niucv->src_name, 8);
902         ASCEBC(nuser_data + 8, 8);
903
904         path->msglim = IUCV_QUEUELEN_DEFAULT;
905         err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
906         if (err){
907                 err = iucv_path_sever(path, user_data);
908                 goto fail;
909         }
910
911         iucv_accept_enqueue(sk, nsk);
912
913         /* Wake up accept */
914         nsk->sk_state = IUCV_CONNECTED;
915         sk->sk_data_ready(sk, 1);
916         err = 0;
917 fail:
918         bh_unlock_sock(sk);
919         return 0;
920 }
921
922 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
923 {
924         struct sock *sk = path->private;
925
926         sk->sk_state = IUCV_CONNECTED;
927         sk->sk_state_change(sk);
928 }
929
930 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
931 {
932         struct sock *sk = path->private;
933         struct sk_buff *skb;
934         int rc;
935
936         if (sk->sk_shutdown & RCV_SHUTDOWN)
937                 return;
938
939         skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA);
940         if (!skb) {
941                 iucv_message_reject(path, msg);
942                 return;
943         }
944
945         if (msg->flags & IPRMDATA) {
946                 skb->data = NULL;
947                 skb->len = 0;
948         } else {
949                 rc = iucv_message_receive(path, msg, 0, skb->data,
950                                           msg->length, NULL);
951                 if (rc) {
952                         kfree_skb(skb);
953                         return;
954                 }
955
956                 skb_reset_transport_header(skb);
957                 skb_reset_network_header(skb);
958                 skb->len = msg->length;
959         }
960
961         if (sock_queue_rcv_skb(sk, skb))
962                 kfree_skb(skb);
963 }
964
965 static void iucv_callback_txdone(struct iucv_path *path,
966                                  struct iucv_message *msg)
967 {
968         struct sock *sk = path->private;
969         struct sk_buff *this;
970         struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
971         struct sk_buff *list_skb = list->next;
972         unsigned long flags;
973
974         spin_lock_irqsave(&list->lock, flags);
975
976         do {
977                 this = list_skb;
978                 list_skb = list_skb->next;
979         } while (memcmp(&msg->tag, this->cb, 4));
980
981         spin_unlock_irqrestore(&list->lock, flags);
982
983         skb_unlink(this, &iucv_sk(sk)->send_skb_q);
984         kfree_skb(this);
985 }
986
987 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
988 {
989         struct sock *sk = path->private;
990
991         if (!list_empty(&iucv_sk(sk)->accept_q))
992                 sk->sk_state = IUCV_SEVERED;
993         else
994                 sk->sk_state = IUCV_DISCONN;
995
996         sk->sk_state_change(sk);
997 }
998
999 static struct proto_ops iucv_sock_ops = {
1000         .family         = PF_IUCV,
1001         .owner          = THIS_MODULE,
1002         .release        = iucv_sock_release,
1003         .bind           = iucv_sock_bind,
1004         .connect        = iucv_sock_connect,
1005         .listen         = iucv_sock_listen,
1006         .accept         = iucv_sock_accept,
1007         .getname        = iucv_sock_getname,
1008         .sendmsg        = iucv_sock_sendmsg,
1009         .recvmsg        = iucv_sock_recvmsg,
1010         .poll           = iucv_sock_poll,
1011         .ioctl          = sock_no_ioctl,
1012         .mmap           = sock_no_mmap,
1013         .socketpair     = sock_no_socketpair,
1014         .shutdown       = iucv_sock_shutdown,
1015         .setsockopt     = sock_no_setsockopt,
1016         .getsockopt     = sock_no_getsockopt
1017 };
1018
1019 static struct net_proto_family iucv_sock_family_ops = {
1020         .family = AF_IUCV,
1021         .owner  = THIS_MODULE,
1022         .create = iucv_sock_create,
1023 };
1024
1025 static int afiucv_init(void)
1026 {
1027         int err;
1028
1029         if (!MACHINE_IS_VM) {
1030                 printk(KERN_ERR "AF_IUCV connection needs VM as base\n");
1031                 err = -EPROTONOSUPPORT;
1032                 goto out;
1033         }
1034         cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1035         if (unlikely(err)) {
1036                 printk(KERN_ERR "AF_IUCV needs the VM userid\n");
1037                 err = -EPROTONOSUPPORT;
1038                 goto out;
1039         }
1040
1041         err = iucv_register(&af_iucv_handler, 0);
1042         if (err)
1043                 goto out;
1044         err = proto_register(&iucv_proto, 0);
1045         if (err)
1046                 goto out_iucv;
1047         err = sock_register(&iucv_sock_family_ops);
1048         if (err)
1049                 goto out_proto;
1050         printk(KERN_INFO "AF_IUCV lowlevel driver initialized\n");
1051         return 0;
1052
1053 out_proto:
1054         proto_unregister(&iucv_proto);
1055 out_iucv:
1056         iucv_unregister(&af_iucv_handler, 0);
1057 out:
1058         return err;
1059 }
1060
1061 static void __exit afiucv_exit(void)
1062 {
1063         sock_unregister(PF_IUCV);
1064         proto_unregister(&iucv_proto);
1065         iucv_unregister(&af_iucv_handler, 0);
1066
1067         printk(KERN_INFO "AF_IUCV lowlevel driver unloaded\n");
1068 }
1069
1070 module_init(afiucv_init);
1071 module_exit(afiucv_exit);
1072
1073 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1074 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1075 MODULE_VERSION(VERSION);
1076 MODULE_LICENSE("GPL");
1077 MODULE_ALIAS_NETPROTO(PF_IUCV);