CRED: Wrap task credential accesses in the UNIX socket protocol
[linux-2.6] / net / unix / af_unix.c
1 /*
2  * NET4:        Implementation of BSD Unix domain sockets.
3  *
4  * Authors:     Alan Cox, <alan@lxorguk.ukuu.org.uk>
5  *
6  *              This program is free software; you can redistribute it and/or
7  *              modify it under the terms of the GNU General Public License
8  *              as published by the Free Software Foundation; either version
9  *              2 of the License, or (at your option) any later version.
10  *
11  * Fixes:
12  *              Linus Torvalds  :       Assorted bug cures.
13  *              Niibe Yutaka    :       async I/O support.
14  *              Carsten Paeth   :       PF_UNIX check, address fixes.
15  *              Alan Cox        :       Limit size of allocated blocks.
16  *              Alan Cox        :       Fixed the stupid socketpair bug.
17  *              Alan Cox        :       BSD compatibility fine tuning.
18  *              Alan Cox        :       Fixed a bug in connect when interrupted.
19  *              Alan Cox        :       Sorted out a proper draft version of
20  *                                      file descriptor passing hacked up from
21  *                                      Mike Shaver's work.
22  *              Marty Leisner   :       Fixes to fd passing
23  *              Nick Nevin      :       recvmsg bugfix.
24  *              Alan Cox        :       Started proper garbage collector
25  *              Heiko EiBfeldt  :       Missing verify_area check
26  *              Alan Cox        :       Started POSIXisms
27  *              Andreas Schwab  :       Replace inode by dentry for proper
28  *                                      reference counting
29  *              Kirk Petersen   :       Made this a module
30  *          Christoph Rohland   :       Elegant non-blocking accept/connect algorithm.
31  *                                      Lots of bug fixes.
32  *           Alexey Kuznetosv   :       Repaired (I hope) bugs introduces
33  *                                      by above two patches.
34  *           Andrea Arcangeli   :       If possible we block in connect(2)
35  *                                      if the max backlog of the listen socket
36  *                                      is been reached. This won't break
37  *                                      old apps and it will avoid huge amount
38  *                                      of socks hashed (this for unix_gc()
39  *                                      performances reasons).
40  *                                      Security fix that limits the max
41  *                                      number of socks to 2*max_files and
42  *                                      the number of skb queueable in the
43  *                                      dgram receiver.
44  *              Artur Skawina   :       Hash function optimizations
45  *           Alexey Kuznetsov   :       Full scale SMP. Lot of bugs are introduced 8)
46  *            Malcolm Beattie   :       Set peercred for socketpair
47  *           Michal Ostrowski   :       Module initialization cleanup.
48  *           Arnaldo C. Melo    :       Remove MOD_{INC,DEC}_USE_COUNT,
49  *                                      the core infrastructure is doing that
50  *                                      for all net proto families now (2.5.69+)
51  *
52  *
53  * Known differences from reference BSD that was tested:
54  *
55  *      [TO FIX]
56  *      ECONNREFUSED is not returned from one end of a connected() socket to the
57  *              other the moment one end closes.
58  *      fstat() doesn't return st_dev=0, and give the blksize as high water mark
59  *              and a fake inode identifier (nor the BSD first socket fstat twice bug).
60  *      [NOT TO FIX]
61  *      accept() returns a path name even if the connecting socket has closed
62  *              in the meantime (BSD loses the path and gives up).
63  *      accept() returns 0 length path for an unbound connector. BSD returns 16
64  *              and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65  *      socketpair(...SOCK_RAW..) doesn't panic the kernel.
66  *      BSD af_unix apparently has connect forgetting to block properly.
67  *              (need to check this with the POSIX spec in detail)
68  *
69  * Differences from 2.0.0-11-... (ANK)
70  *      Bug fixes and improvements.
71  *              - client shutdown killed server socket.
72  *              - removed all useless cli/sti pairs.
73  *
74  *      Semantic changes/extensions.
75  *              - generic control message passing.
76  *              - SCM_CREDENTIALS control message.
77  *              - "Abstract" (not FS based) socket bindings.
78  *                Abstract names are sequences of bytes (not zero terminated)
79  *                started by 0, so that this name space does not intersect
80  *                with BSD names.
81  */
82
83 #include <linux/module.h>
84 #include <linux/kernel.h>
85 #include <linux/signal.h>
86 #include <linux/sched.h>
87 #include <linux/errno.h>
88 #include <linux/string.h>
89 #include <linux/stat.h>
90 #include <linux/dcache.h>
91 #include <linux/namei.h>
92 #include <linux/socket.h>
93 #include <linux/un.h>
94 #include <linux/fcntl.h>
95 #include <linux/termios.h>
96 #include <linux/sockios.h>
97 #include <linux/net.h>
98 #include <linux/in.h>
99 #include <linux/fs.h>
100 #include <linux/slab.h>
101 #include <asm/uaccess.h>
102 #include <linux/skbuff.h>
103 #include <linux/netdevice.h>
104 #include <net/net_namespace.h>
105 #include <net/sock.h>
106 #include <net/tcp_states.h>
107 #include <net/af_unix.h>
108 #include <linux/proc_fs.h>
109 #include <linux/seq_file.h>
110 #include <net/scm.h>
111 #include <linux/init.h>
112 #include <linux/poll.h>
113 #include <linux/rtnetlink.h>
114 #include <linux/mount.h>
115 #include <net/checksum.h>
116 #include <linux/security.h>
117
118 static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
119 static DEFINE_SPINLOCK(unix_table_lock);
120 static atomic_t unix_nr_socks = ATOMIC_INIT(0);
121
122 #define unix_sockets_unbound    (&unix_socket_table[UNIX_HASH_SIZE])
123
124 #define UNIX_ABSTRACT(sk)       (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
125
126 #ifdef CONFIG_SECURITY_NETWORK
127 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
128 {
129         memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
130 }
131
132 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
133 {
134         scm->secid = *UNIXSID(skb);
135 }
136 #else
137 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
138 { }
139
140 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
141 { }
142 #endif /* CONFIG_SECURITY_NETWORK */
143
144 /*
145  *  SMP locking strategy:
146  *    hash table is protected with spinlock unix_table_lock
147  *    each socket state is protected by separate rwlock.
148  */
149
150 static inline unsigned unix_hash_fold(__wsum n)
151 {
152         unsigned hash = (__force unsigned)n;
153         hash ^= hash>>16;
154         hash ^= hash>>8;
155         return hash&(UNIX_HASH_SIZE-1);
156 }
157
158 #define unix_peer(sk) (unix_sk(sk)->peer)
159
160 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
161 {
162         return unix_peer(osk) == sk;
163 }
164
165 static inline int unix_may_send(struct sock *sk, struct sock *osk)
166 {
167         return (unix_peer(osk) == NULL || unix_our_peer(sk, osk));
168 }
169
170 static inline int unix_recvq_full(struct sock const *sk)
171 {
172         return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
173 }
174
175 static struct sock *unix_peer_get(struct sock *s)
176 {
177         struct sock *peer;
178
179         unix_state_lock(s);
180         peer = unix_peer(s);
181         if (peer)
182                 sock_hold(peer);
183         unix_state_unlock(s);
184         return peer;
185 }
186
187 static inline void unix_release_addr(struct unix_address *addr)
188 {
189         if (atomic_dec_and_test(&addr->refcnt))
190                 kfree(addr);
191 }
192
193 /*
194  *      Check unix socket name:
195  *              - should be not zero length.
196  *              - if started by not zero, should be NULL terminated (FS object)
197  *              - if started by zero, it is abstract name.
198  */
199
200 static int unix_mkname(struct sockaddr_un * sunaddr, int len, unsigned *hashp)
201 {
202         if (len <= sizeof(short) || len > sizeof(*sunaddr))
203                 return -EINVAL;
204         if (!sunaddr || sunaddr->sun_family != AF_UNIX)
205                 return -EINVAL;
206         if (sunaddr->sun_path[0]) {
207                 /*
208                  * This may look like an off by one error but it is a bit more
209                  * subtle. 108 is the longest valid AF_UNIX path for a binding.
210                  * sun_path[108] doesnt as such exist.  However in kernel space
211                  * we are guaranteed that it is a valid memory location in our
212                  * kernel address buffer.
213                  */
214                 ((char *)sunaddr)[len]=0;
215                 len = strlen(sunaddr->sun_path)+1+sizeof(short);
216                 return len;
217         }
218
219         *hashp = unix_hash_fold(csum_partial((char*)sunaddr, len, 0));
220         return len;
221 }
222
223 static void __unix_remove_socket(struct sock *sk)
224 {
225         sk_del_node_init(sk);
226 }
227
228 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
229 {
230         WARN_ON(!sk_unhashed(sk));
231         sk_add_node(sk, list);
232 }
233
234 static inline void unix_remove_socket(struct sock *sk)
235 {
236         spin_lock(&unix_table_lock);
237         __unix_remove_socket(sk);
238         spin_unlock(&unix_table_lock);
239 }
240
241 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
242 {
243         spin_lock(&unix_table_lock);
244         __unix_insert_socket(list, sk);
245         spin_unlock(&unix_table_lock);
246 }
247
248 static struct sock *__unix_find_socket_byname(struct net *net,
249                                               struct sockaddr_un *sunname,
250                                               int len, int type, unsigned hash)
251 {
252         struct sock *s;
253         struct hlist_node *node;
254
255         sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
256                 struct unix_sock *u = unix_sk(s);
257
258                 if (!net_eq(sock_net(s), net))
259                         continue;
260
261                 if (u->addr->len == len &&
262                     !memcmp(u->addr->name, sunname, len))
263                         goto found;
264         }
265         s = NULL;
266 found:
267         return s;
268 }
269
270 static inline struct sock *unix_find_socket_byname(struct net *net,
271                                                    struct sockaddr_un *sunname,
272                                                    int len, int type,
273                                                    unsigned hash)
274 {
275         struct sock *s;
276
277         spin_lock(&unix_table_lock);
278         s = __unix_find_socket_byname(net, sunname, len, type, hash);
279         if (s)
280                 sock_hold(s);
281         spin_unlock(&unix_table_lock);
282         return s;
283 }
284
285 static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i)
286 {
287         struct sock *s;
288         struct hlist_node *node;
289
290         spin_lock(&unix_table_lock);
291         sk_for_each(s, node,
292                     &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
293                 struct dentry *dentry = unix_sk(s)->dentry;
294
295                 if (!net_eq(sock_net(s), net))
296                         continue;
297
298                 if(dentry && dentry->d_inode == i)
299                 {
300                         sock_hold(s);
301                         goto found;
302                 }
303         }
304         s = NULL;
305 found:
306         spin_unlock(&unix_table_lock);
307         return s;
308 }
309
310 static inline int unix_writable(struct sock *sk)
311 {
312         return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
313 }
314
315 static void unix_write_space(struct sock *sk)
316 {
317         read_lock(&sk->sk_callback_lock);
318         if (unix_writable(sk)) {
319                 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
320                         wake_up_interruptible_sync(sk->sk_sleep);
321                 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
322         }
323         read_unlock(&sk->sk_callback_lock);
324 }
325
326 /* When dgram socket disconnects (or changes its peer), we clear its receive
327  * queue of packets arrived from previous peer. First, it allows to do
328  * flow control based only on wmem_alloc; second, sk connected to peer
329  * may receive messages only from that peer. */
330 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
331 {
332         if (!skb_queue_empty(&sk->sk_receive_queue)) {
333                 skb_queue_purge(&sk->sk_receive_queue);
334                 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
335
336                 /* If one link of bidirectional dgram pipe is disconnected,
337                  * we signal error. Messages are lost. Do not make this,
338                  * when peer was not connected to us.
339                  */
340                 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
341                         other->sk_err = ECONNRESET;
342                         other->sk_error_report(other);
343                 }
344         }
345 }
346
347 static void unix_sock_destructor(struct sock *sk)
348 {
349         struct unix_sock *u = unix_sk(sk);
350
351         skb_queue_purge(&sk->sk_receive_queue);
352
353         WARN_ON(atomic_read(&sk->sk_wmem_alloc));
354         WARN_ON(!sk_unhashed(sk));
355         WARN_ON(sk->sk_socket);
356         if (!sock_flag(sk, SOCK_DEAD)) {
357                 printk("Attempt to release alive unix socket: %p\n", sk);
358                 return;
359         }
360
361         if (u->addr)
362                 unix_release_addr(u->addr);
363
364         atomic_dec(&unix_nr_socks);
365 #ifdef UNIX_REFCNT_DEBUG
366         printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk, atomic_read(&unix_nr_socks));
367 #endif
368 }
369
370 static int unix_release_sock (struct sock *sk, int embrion)
371 {
372         struct unix_sock *u = unix_sk(sk);
373         struct dentry *dentry;
374         struct vfsmount *mnt;
375         struct sock *skpair;
376         struct sk_buff *skb;
377         int state;
378
379         unix_remove_socket(sk);
380
381         /* Clear state */
382         unix_state_lock(sk);
383         sock_orphan(sk);
384         sk->sk_shutdown = SHUTDOWN_MASK;
385         dentry       = u->dentry;
386         u->dentry    = NULL;
387         mnt          = u->mnt;
388         u->mnt       = NULL;
389         state = sk->sk_state;
390         sk->sk_state = TCP_CLOSE;
391         unix_state_unlock(sk);
392
393         wake_up_interruptible_all(&u->peer_wait);
394
395         skpair=unix_peer(sk);
396
397         if (skpair!=NULL) {
398                 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
399                         unix_state_lock(skpair);
400                         /* No more writes */
401                         skpair->sk_shutdown = SHUTDOWN_MASK;
402                         if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
403                                 skpair->sk_err = ECONNRESET;
404                         unix_state_unlock(skpair);
405                         skpair->sk_state_change(skpair);
406                         read_lock(&skpair->sk_callback_lock);
407                         sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
408                         read_unlock(&skpair->sk_callback_lock);
409                 }
410                 sock_put(skpair); /* It may now die */
411                 unix_peer(sk) = NULL;
412         }
413
414         /* Try to flush out this socket. Throw out buffers at least */
415
416         while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
417                 if (state==TCP_LISTEN)
418                         unix_release_sock(skb->sk, 1);
419                 /* passed fds are erased in the kfree_skb hook        */
420                 kfree_skb(skb);
421         }
422
423         if (dentry) {
424                 dput(dentry);
425                 mntput(mnt);
426         }
427
428         sock_put(sk);
429
430         /* ---- Socket is dead now and most probably destroyed ---- */
431
432         /*
433          * Fixme: BSD difference: In BSD all sockets connected to use get
434          *        ECONNRESET and we die on the spot. In Linux we behave
435          *        like files and pipes do and wait for the last
436          *        dereference.
437          *
438          * Can't we simply set sock->err?
439          *
440          *        What the above comment does talk about? --ANK(980817)
441          */
442
443         if (unix_tot_inflight)
444                 unix_gc();              /* Garbage collect fds */
445
446         return 0;
447 }
448
449 static int unix_listen(struct socket *sock, int backlog)
450 {
451         int err;
452         struct sock *sk = sock->sk;
453         struct unix_sock *u = unix_sk(sk);
454
455         err = -EOPNOTSUPP;
456         if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
457                 goto out;                       /* Only stream/seqpacket sockets accept */
458         err = -EINVAL;
459         if (!u->addr)
460                 goto out;                       /* No listens on an unbound socket */
461         unix_state_lock(sk);
462         if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
463                 goto out_unlock;
464         if (backlog > sk->sk_max_ack_backlog)
465                 wake_up_interruptible_all(&u->peer_wait);
466         sk->sk_max_ack_backlog  = backlog;
467         sk->sk_state            = TCP_LISTEN;
468         /* set credentials so connect can copy them */
469         sk->sk_peercred.pid     = task_tgid_vnr(current);
470         current_euid_egid(&sk->sk_peercred.uid, &sk->sk_peercred.gid);
471         err = 0;
472
473 out_unlock:
474         unix_state_unlock(sk);
475 out:
476         return err;
477 }
478
479 static int unix_release(struct socket *);
480 static int unix_bind(struct socket *, struct sockaddr *, int);
481 static int unix_stream_connect(struct socket *, struct sockaddr *,
482                                int addr_len, int flags);
483 static int unix_socketpair(struct socket *, struct socket *);
484 static int unix_accept(struct socket *, struct socket *, int);
485 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
486 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
487 static unsigned int unix_dgram_poll(struct file *, struct socket *,
488                                     poll_table *);
489 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
490 static int unix_shutdown(struct socket *, int);
491 static int unix_stream_sendmsg(struct kiocb *, struct socket *,
492                                struct msghdr *, size_t);
493 static int unix_stream_recvmsg(struct kiocb *, struct socket *,
494                                struct msghdr *, size_t, int);
495 static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
496                               struct msghdr *, size_t);
497 static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
498                               struct msghdr *, size_t, int);
499 static int unix_dgram_connect(struct socket *, struct sockaddr *,
500                               int, int);
501 static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
502                                   struct msghdr *, size_t);
503
504 static const struct proto_ops unix_stream_ops = {
505         .family =       PF_UNIX,
506         .owner =        THIS_MODULE,
507         .release =      unix_release,
508         .bind =         unix_bind,
509         .connect =      unix_stream_connect,
510         .socketpair =   unix_socketpair,
511         .accept =       unix_accept,
512         .getname =      unix_getname,
513         .poll =         unix_poll,
514         .ioctl =        unix_ioctl,
515         .listen =       unix_listen,
516         .shutdown =     unix_shutdown,
517         .setsockopt =   sock_no_setsockopt,
518         .getsockopt =   sock_no_getsockopt,
519         .sendmsg =      unix_stream_sendmsg,
520         .recvmsg =      unix_stream_recvmsg,
521         .mmap =         sock_no_mmap,
522         .sendpage =     sock_no_sendpage,
523 };
524
525 static const struct proto_ops unix_dgram_ops = {
526         .family =       PF_UNIX,
527         .owner =        THIS_MODULE,
528         .release =      unix_release,
529         .bind =         unix_bind,
530         .connect =      unix_dgram_connect,
531         .socketpair =   unix_socketpair,
532         .accept =       sock_no_accept,
533         .getname =      unix_getname,
534         .poll =         unix_dgram_poll,
535         .ioctl =        unix_ioctl,
536         .listen =       sock_no_listen,
537         .shutdown =     unix_shutdown,
538         .setsockopt =   sock_no_setsockopt,
539         .getsockopt =   sock_no_getsockopt,
540         .sendmsg =      unix_dgram_sendmsg,
541         .recvmsg =      unix_dgram_recvmsg,
542         .mmap =         sock_no_mmap,
543         .sendpage =     sock_no_sendpage,
544 };
545
546 static const struct proto_ops unix_seqpacket_ops = {
547         .family =       PF_UNIX,
548         .owner =        THIS_MODULE,
549         .release =      unix_release,
550         .bind =         unix_bind,
551         .connect =      unix_stream_connect,
552         .socketpair =   unix_socketpair,
553         .accept =       unix_accept,
554         .getname =      unix_getname,
555         .poll =         unix_dgram_poll,
556         .ioctl =        unix_ioctl,
557         .listen =       unix_listen,
558         .shutdown =     unix_shutdown,
559         .setsockopt =   sock_no_setsockopt,
560         .getsockopt =   sock_no_getsockopt,
561         .sendmsg =      unix_seqpacket_sendmsg,
562         .recvmsg =      unix_dgram_recvmsg,
563         .mmap =         sock_no_mmap,
564         .sendpage =     sock_no_sendpage,
565 };
566
567 static struct proto unix_proto = {
568         .name     = "UNIX",
569         .owner    = THIS_MODULE,
570         .obj_size = sizeof(struct unix_sock),
571 };
572
573 /*
574  * AF_UNIX sockets do not interact with hardware, hence they
575  * dont trigger interrupts - so it's safe for them to have
576  * bh-unsafe locking for their sk_receive_queue.lock. Split off
577  * this special lock-class by reinitializing the spinlock key:
578  */
579 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
580
581 static struct sock * unix_create1(struct net *net, struct socket *sock)
582 {
583         struct sock *sk = NULL;
584         struct unix_sock *u;
585
586         atomic_inc(&unix_nr_socks);
587         if (atomic_read(&unix_nr_socks) > 2 * get_max_files())
588                 goto out;
589
590         sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
591         if (!sk)
592                 goto out;
593
594         sock_init_data(sock,sk);
595         lockdep_set_class(&sk->sk_receive_queue.lock,
596                                 &af_unix_sk_receive_queue_lock_key);
597
598         sk->sk_write_space      = unix_write_space;
599         sk->sk_max_ack_backlog  = net->unx.sysctl_max_dgram_qlen;
600         sk->sk_destruct         = unix_sock_destructor;
601         u         = unix_sk(sk);
602         u->dentry = NULL;
603         u->mnt    = NULL;
604         spin_lock_init(&u->lock);
605         atomic_long_set(&u->inflight, 0);
606         INIT_LIST_HEAD(&u->link);
607         mutex_init(&u->readlock); /* single task reading lock */
608         init_waitqueue_head(&u->peer_wait);
609         unix_insert_socket(unix_sockets_unbound, sk);
610 out:
611         if (sk == NULL)
612                 atomic_dec(&unix_nr_socks);
613         return sk;
614 }
615
616 static int unix_create(struct net *net, struct socket *sock, int protocol)
617 {
618         if (protocol && protocol != PF_UNIX)
619                 return -EPROTONOSUPPORT;
620
621         sock->state = SS_UNCONNECTED;
622
623         switch (sock->type) {
624         case SOCK_STREAM:
625                 sock->ops = &unix_stream_ops;
626                 break;
627                 /*
628                  *      Believe it or not BSD has AF_UNIX, SOCK_RAW though
629                  *      nothing uses it.
630                  */
631         case SOCK_RAW:
632                 sock->type=SOCK_DGRAM;
633         case SOCK_DGRAM:
634                 sock->ops = &unix_dgram_ops;
635                 break;
636         case SOCK_SEQPACKET:
637                 sock->ops = &unix_seqpacket_ops;
638                 break;
639         default:
640                 return -ESOCKTNOSUPPORT;
641         }
642
643         return unix_create1(net, sock) ? 0 : -ENOMEM;
644 }
645
646 static int unix_release(struct socket *sock)
647 {
648         struct sock *sk = sock->sk;
649
650         if (!sk)
651                 return 0;
652
653         sock->sk = NULL;
654
655         return unix_release_sock (sk, 0);
656 }
657
658 static int unix_autobind(struct socket *sock)
659 {
660         struct sock *sk = sock->sk;
661         struct net *net = sock_net(sk);
662         struct unix_sock *u = unix_sk(sk);
663         static u32 ordernum = 1;
664         struct unix_address * addr;
665         int err;
666
667         mutex_lock(&u->readlock);
668
669         err = 0;
670         if (u->addr)
671                 goto out;
672
673         err = -ENOMEM;
674         addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
675         if (!addr)
676                 goto out;
677
678         addr->name->sun_family = AF_UNIX;
679         atomic_set(&addr->refcnt, 1);
680
681 retry:
682         addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
683         addr->hash = unix_hash_fold(csum_partial((void*)addr->name, addr->len, 0));
684
685         spin_lock(&unix_table_lock);
686         ordernum = (ordernum+1)&0xFFFFF;
687
688         if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
689                                       addr->hash)) {
690                 spin_unlock(&unix_table_lock);
691                 /* Sanity yield. It is unusual case, but yet... */
692                 if (!(ordernum&0xFF))
693                         yield();
694                 goto retry;
695         }
696         addr->hash ^= sk->sk_type;
697
698         __unix_remove_socket(sk);
699         u->addr = addr;
700         __unix_insert_socket(&unix_socket_table[addr->hash], sk);
701         spin_unlock(&unix_table_lock);
702         err = 0;
703
704 out:    mutex_unlock(&u->readlock);
705         return err;
706 }
707
708 static struct sock *unix_find_other(struct net *net,
709                                     struct sockaddr_un *sunname, int len,
710                                     int type, unsigned hash, int *error)
711 {
712         struct sock *u;
713         struct path path;
714         int err = 0;
715
716         if (sunname->sun_path[0]) {
717                 struct inode *inode;
718                 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
719                 if (err)
720                         goto fail;
721                 inode = path.dentry->d_inode;
722                 err = inode_permission(inode, MAY_WRITE);
723                 if (err)
724                         goto put_fail;
725
726                 err = -ECONNREFUSED;
727                 if (!S_ISSOCK(inode->i_mode))
728                         goto put_fail;
729                 u = unix_find_socket_byinode(net, inode);
730                 if (!u)
731                         goto put_fail;
732
733                 if (u->sk_type == type)
734                         touch_atime(path.mnt, path.dentry);
735
736                 path_put(&path);
737
738                 err=-EPROTOTYPE;
739                 if (u->sk_type != type) {
740                         sock_put(u);
741                         goto fail;
742                 }
743         } else {
744                 err = -ECONNREFUSED;
745                 u=unix_find_socket_byname(net, sunname, len, type, hash);
746                 if (u) {
747                         struct dentry *dentry;
748                         dentry = unix_sk(u)->dentry;
749                         if (dentry)
750                                 touch_atime(unix_sk(u)->mnt, dentry);
751                 } else
752                         goto fail;
753         }
754         return u;
755
756 put_fail:
757         path_put(&path);
758 fail:
759         *error=err;
760         return NULL;
761 }
762
763
764 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
765 {
766         struct sock *sk = sock->sk;
767         struct net *net = sock_net(sk);
768         struct unix_sock *u = unix_sk(sk);
769         struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
770         struct dentry * dentry = NULL;
771         struct nameidata nd;
772         int err;
773         unsigned hash;
774         struct unix_address *addr;
775         struct hlist_head *list;
776
777         err = -EINVAL;
778         if (sunaddr->sun_family != AF_UNIX)
779                 goto out;
780
781         if (addr_len==sizeof(short)) {
782                 err = unix_autobind(sock);
783                 goto out;
784         }
785
786         err = unix_mkname(sunaddr, addr_len, &hash);
787         if (err < 0)
788                 goto out;
789         addr_len = err;
790
791         mutex_lock(&u->readlock);
792
793         err = -EINVAL;
794         if (u->addr)
795                 goto out_up;
796
797         err = -ENOMEM;
798         addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
799         if (!addr)
800                 goto out_up;
801
802         memcpy(addr->name, sunaddr, addr_len);
803         addr->len = addr_len;
804         addr->hash = hash ^ sk->sk_type;
805         atomic_set(&addr->refcnt, 1);
806
807         if (sunaddr->sun_path[0]) {
808                 unsigned int mode;
809                 err = 0;
810                 /*
811                  * Get the parent directory, calculate the hash for last
812                  * component.
813                  */
814                 err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
815                 if (err)
816                         goto out_mknod_parent;
817
818                 dentry = lookup_create(&nd, 0);
819                 err = PTR_ERR(dentry);
820                 if (IS_ERR(dentry))
821                         goto out_mknod_unlock;
822
823                 /*
824                  * All right, let's create it.
825                  */
826                 mode = S_IFSOCK |
827                        (SOCK_INODE(sock)->i_mode & ~current->fs->umask);
828                 err = mnt_want_write(nd.path.mnt);
829                 if (err)
830                         goto out_mknod_dput;
831                 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
832                 mnt_drop_write(nd.path.mnt);
833                 if (err)
834                         goto out_mknod_dput;
835                 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
836                 dput(nd.path.dentry);
837                 nd.path.dentry = dentry;
838
839                 addr->hash = UNIX_HASH_SIZE;
840         }
841
842         spin_lock(&unix_table_lock);
843
844         if (!sunaddr->sun_path[0]) {
845                 err = -EADDRINUSE;
846                 if (__unix_find_socket_byname(net, sunaddr, addr_len,
847                                               sk->sk_type, hash)) {
848                         unix_release_addr(addr);
849                         goto out_unlock;
850                 }
851
852                 list = &unix_socket_table[addr->hash];
853         } else {
854                 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
855                 u->dentry = nd.path.dentry;
856                 u->mnt    = nd.path.mnt;
857         }
858
859         err = 0;
860         __unix_remove_socket(sk);
861         u->addr = addr;
862         __unix_insert_socket(list, sk);
863
864 out_unlock:
865         spin_unlock(&unix_table_lock);
866 out_up:
867         mutex_unlock(&u->readlock);
868 out:
869         return err;
870
871 out_mknod_dput:
872         dput(dentry);
873 out_mknod_unlock:
874         mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
875         path_put(&nd.path);
876 out_mknod_parent:
877         if (err==-EEXIST)
878                 err=-EADDRINUSE;
879         unix_release_addr(addr);
880         goto out_up;
881 }
882
883 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
884 {
885         if (unlikely(sk1 == sk2) || !sk2) {
886                 unix_state_lock(sk1);
887                 return;
888         }
889         if (sk1 < sk2) {
890                 unix_state_lock(sk1);
891                 unix_state_lock_nested(sk2);
892         } else {
893                 unix_state_lock(sk2);
894                 unix_state_lock_nested(sk1);
895         }
896 }
897
898 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
899 {
900         if (unlikely(sk1 == sk2) || !sk2) {
901                 unix_state_unlock(sk1);
902                 return;
903         }
904         unix_state_unlock(sk1);
905         unix_state_unlock(sk2);
906 }
907
908 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
909                               int alen, int flags)
910 {
911         struct sock *sk = sock->sk;
912         struct net *net = sock_net(sk);
913         struct sockaddr_un *sunaddr=(struct sockaddr_un*)addr;
914         struct sock *other;
915         unsigned hash;
916         int err;
917
918         if (addr->sa_family != AF_UNSPEC) {
919                 err = unix_mkname(sunaddr, alen, &hash);
920                 if (err < 0)
921                         goto out;
922                 alen = err;
923
924                 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
925                     !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
926                         goto out;
927
928 restart:
929                 other=unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
930                 if (!other)
931                         goto out;
932
933                 unix_state_double_lock(sk, other);
934
935                 /* Apparently VFS overslept socket death. Retry. */
936                 if (sock_flag(other, SOCK_DEAD)) {
937                         unix_state_double_unlock(sk, other);
938                         sock_put(other);
939                         goto restart;
940                 }
941
942                 err = -EPERM;
943                 if (!unix_may_send(sk, other))
944                         goto out_unlock;
945
946                 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
947                 if (err)
948                         goto out_unlock;
949
950         } else {
951                 /*
952                  *      1003.1g breaking connected state with AF_UNSPEC
953                  */
954                 other = NULL;
955                 unix_state_double_lock(sk, other);
956         }
957
958         /*
959          * If it was connected, reconnect.
960          */
961         if (unix_peer(sk)) {
962                 struct sock *old_peer = unix_peer(sk);
963                 unix_peer(sk)=other;
964                 unix_state_double_unlock(sk, other);
965
966                 if (other != old_peer)
967                         unix_dgram_disconnected(sk, old_peer);
968                 sock_put(old_peer);
969         } else {
970                 unix_peer(sk)=other;
971                 unix_state_double_unlock(sk, other);
972         }
973         return 0;
974
975 out_unlock:
976         unix_state_double_unlock(sk, other);
977         sock_put(other);
978 out:
979         return err;
980 }
981
982 static long unix_wait_for_peer(struct sock *other, long timeo)
983 {
984         struct unix_sock *u = unix_sk(other);
985         int sched;
986         DEFINE_WAIT(wait);
987
988         prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
989
990         sched = !sock_flag(other, SOCK_DEAD) &&
991                 !(other->sk_shutdown & RCV_SHUTDOWN) &&
992                 unix_recvq_full(other);
993
994         unix_state_unlock(other);
995
996         if (sched)
997                 timeo = schedule_timeout(timeo);
998
999         finish_wait(&u->peer_wait, &wait);
1000         return timeo;
1001 }
1002
1003 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1004                                int addr_len, int flags)
1005 {
1006         struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
1007         struct sock *sk = sock->sk;
1008         struct net *net = sock_net(sk);
1009         struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1010         struct sock *newsk = NULL;
1011         struct sock *other = NULL;
1012         struct sk_buff *skb = NULL;
1013         unsigned hash;
1014         int st;
1015         int err;
1016         long timeo;
1017
1018         err = unix_mkname(sunaddr, addr_len, &hash);
1019         if (err < 0)
1020                 goto out;
1021         addr_len = err;
1022
1023         if (test_bit(SOCK_PASSCRED, &sock->flags)
1024                 && !u->addr && (err = unix_autobind(sock)) != 0)
1025                 goto out;
1026
1027         timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1028
1029         /* First of all allocate resources.
1030            If we will make it after state is locked,
1031            we will have to recheck all again in any case.
1032          */
1033
1034         err = -ENOMEM;
1035
1036         /* create new sock for complete connection */
1037         newsk = unix_create1(sock_net(sk), NULL);
1038         if (newsk == NULL)
1039                 goto out;
1040
1041         /* Allocate skb for sending to listening sock */
1042         skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1043         if (skb == NULL)
1044                 goto out;
1045
1046 restart:
1047         /*  Find listening sock. */
1048         other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1049         if (!other)
1050                 goto out;
1051
1052         /* Latch state of peer */
1053         unix_state_lock(other);
1054
1055         /* Apparently VFS overslept socket death. Retry. */
1056         if (sock_flag(other, SOCK_DEAD)) {
1057                 unix_state_unlock(other);
1058                 sock_put(other);
1059                 goto restart;
1060         }
1061
1062         err = -ECONNREFUSED;
1063         if (other->sk_state != TCP_LISTEN)
1064                 goto out_unlock;
1065
1066         if (unix_recvq_full(other)) {
1067                 err = -EAGAIN;
1068                 if (!timeo)
1069                         goto out_unlock;
1070
1071                 timeo = unix_wait_for_peer(other, timeo);
1072
1073                 err = sock_intr_errno(timeo);
1074                 if (signal_pending(current))
1075                         goto out;
1076                 sock_put(other);
1077                 goto restart;
1078         }
1079
1080         /* Latch our state.
1081
1082            It is tricky place. We need to grab write lock and cannot
1083            drop lock on peer. It is dangerous because deadlock is
1084            possible. Connect to self case and simultaneous
1085            attempt to connect are eliminated by checking socket
1086            state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1087            check this before attempt to grab lock.
1088
1089            Well, and we have to recheck the state after socket locked.
1090          */
1091         st = sk->sk_state;
1092
1093         switch (st) {
1094         case TCP_CLOSE:
1095                 /* This is ok... continue with connect */
1096                 break;
1097         case TCP_ESTABLISHED:
1098                 /* Socket is already connected */
1099                 err = -EISCONN;
1100                 goto out_unlock;
1101         default:
1102                 err = -EINVAL;
1103                 goto out_unlock;
1104         }
1105
1106         unix_state_lock_nested(sk);
1107
1108         if (sk->sk_state != st) {
1109                 unix_state_unlock(sk);
1110                 unix_state_unlock(other);
1111                 sock_put(other);
1112                 goto restart;
1113         }
1114
1115         err = security_unix_stream_connect(sock, other->sk_socket, newsk);
1116         if (err) {
1117                 unix_state_unlock(sk);
1118                 goto out_unlock;
1119         }
1120
1121         /* The way is open! Fastly set all the necessary fields... */
1122
1123         sock_hold(sk);
1124         unix_peer(newsk)        = sk;
1125         newsk->sk_state         = TCP_ESTABLISHED;
1126         newsk->sk_type          = sk->sk_type;
1127         newsk->sk_peercred.pid  = task_tgid_vnr(current);
1128         current_euid_egid(&newsk->sk_peercred.uid, &newsk->sk_peercred.gid);
1129         newu = unix_sk(newsk);
1130         newsk->sk_sleep         = &newu->peer_wait;
1131         otheru = unix_sk(other);
1132
1133         /* copy address information from listening to new sock*/
1134         if (otheru->addr) {
1135                 atomic_inc(&otheru->addr->refcnt);
1136                 newu->addr = otheru->addr;
1137         }
1138         if (otheru->dentry) {
1139                 newu->dentry    = dget(otheru->dentry);
1140                 newu->mnt       = mntget(otheru->mnt);
1141         }
1142
1143         /* Set credentials */
1144         sk->sk_peercred = other->sk_peercred;
1145
1146         sock->state     = SS_CONNECTED;
1147         sk->sk_state    = TCP_ESTABLISHED;
1148         sock_hold(newsk);
1149
1150         smp_mb__after_atomic_inc();     /* sock_hold() does an atomic_inc() */
1151         unix_peer(sk)   = newsk;
1152
1153         unix_state_unlock(sk);
1154
1155         /* take ten and and send info to listening sock */
1156         spin_lock(&other->sk_receive_queue.lock);
1157         __skb_queue_tail(&other->sk_receive_queue, skb);
1158         spin_unlock(&other->sk_receive_queue.lock);
1159         unix_state_unlock(other);
1160         other->sk_data_ready(other, 0);
1161         sock_put(other);
1162         return 0;
1163
1164 out_unlock:
1165         if (other)
1166                 unix_state_unlock(other);
1167
1168 out:
1169         if (skb)
1170                 kfree_skb(skb);
1171         if (newsk)
1172                 unix_release_sock(newsk, 0);
1173         if (other)
1174                 sock_put(other);
1175         return err;
1176 }
1177
1178 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1179 {
1180         struct sock *ska=socka->sk, *skb = sockb->sk;
1181
1182         /* Join our sockets back to back */
1183         sock_hold(ska);
1184         sock_hold(skb);
1185         unix_peer(ska)=skb;
1186         unix_peer(skb)=ska;
1187         ska->sk_peercred.pid = skb->sk_peercred.pid = task_tgid_vnr(current);
1188         current_euid_egid(&skb->sk_peercred.uid, &skb->sk_peercred.gid);
1189         ska->sk_peercred.uid = skb->sk_peercred.uid;
1190         ska->sk_peercred.gid = skb->sk_peercred.gid;
1191
1192         if (ska->sk_type != SOCK_DGRAM) {
1193                 ska->sk_state = TCP_ESTABLISHED;
1194                 skb->sk_state = TCP_ESTABLISHED;
1195                 socka->state  = SS_CONNECTED;
1196                 sockb->state  = SS_CONNECTED;
1197         }
1198         return 0;
1199 }
1200
1201 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1202 {
1203         struct sock *sk = sock->sk;
1204         struct sock *tsk;
1205         struct sk_buff *skb;
1206         int err;
1207
1208         err = -EOPNOTSUPP;
1209         if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
1210                 goto out;
1211
1212         err = -EINVAL;
1213         if (sk->sk_state != TCP_LISTEN)
1214                 goto out;
1215
1216         /* If socket state is TCP_LISTEN it cannot change (for now...),
1217          * so that no locks are necessary.
1218          */
1219
1220         skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1221         if (!skb) {
1222                 /* This means receive shutdown. */
1223                 if (err == 0)
1224                         err = -EINVAL;
1225                 goto out;
1226         }
1227
1228         tsk = skb->sk;
1229         skb_free_datagram(sk, skb);
1230         wake_up_interruptible(&unix_sk(sk)->peer_wait);
1231
1232         /* attach accepted sock to socket */
1233         unix_state_lock(tsk);
1234         newsock->state = SS_CONNECTED;
1235         sock_graft(tsk, newsock);
1236         unix_state_unlock(tsk);
1237         return 0;
1238
1239 out:
1240         return err;
1241 }
1242
1243
1244 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1245 {
1246         struct sock *sk = sock->sk;
1247         struct unix_sock *u;
1248         struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
1249         int err = 0;
1250
1251         if (peer) {
1252                 sk = unix_peer_get(sk);
1253
1254                 err = -ENOTCONN;
1255                 if (!sk)
1256                         goto out;
1257                 err = 0;
1258         } else {
1259                 sock_hold(sk);
1260         }
1261
1262         u = unix_sk(sk);
1263         unix_state_lock(sk);
1264         if (!u->addr) {
1265                 sunaddr->sun_family = AF_UNIX;
1266                 sunaddr->sun_path[0] = 0;
1267                 *uaddr_len = sizeof(short);
1268         } else {
1269                 struct unix_address *addr = u->addr;
1270
1271                 *uaddr_len = addr->len;
1272                 memcpy(sunaddr, addr->name, *uaddr_len);
1273         }
1274         unix_state_unlock(sk);
1275         sock_put(sk);
1276 out:
1277         return err;
1278 }
1279
1280 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1281 {
1282         int i;
1283
1284         scm->fp = UNIXCB(skb).fp;
1285         skb->destructor = sock_wfree;
1286         UNIXCB(skb).fp = NULL;
1287
1288         for (i=scm->fp->count-1; i>=0; i--)
1289                 unix_notinflight(scm->fp->fp[i]);
1290 }
1291
1292 static void unix_destruct_fds(struct sk_buff *skb)
1293 {
1294         struct scm_cookie scm;
1295         memset(&scm, 0, sizeof(scm));
1296         unix_detach_fds(&scm, skb);
1297
1298         /* Alas, it calls VFS */
1299         /* So fscking what? fput() had been SMP-safe since the last Summer */
1300         scm_destroy(&scm);
1301         sock_wfree(skb);
1302 }
1303
1304 static void unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1305 {
1306         int i;
1307         for (i=scm->fp->count-1; i>=0; i--)
1308                 unix_inflight(scm->fp->fp[i]);
1309         UNIXCB(skb).fp = scm->fp;
1310         skb->destructor = unix_destruct_fds;
1311         scm->fp = NULL;
1312 }
1313
1314 /*
1315  *      Send AF_UNIX data.
1316  */
1317
1318 static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1319                               struct msghdr *msg, size_t len)
1320 {
1321         struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1322         struct sock *sk = sock->sk;
1323         struct net *net = sock_net(sk);
1324         struct unix_sock *u = unix_sk(sk);
1325         struct sockaddr_un *sunaddr=msg->msg_name;
1326         struct sock *other = NULL;
1327         int namelen = 0; /* fake GCC */
1328         int err;
1329         unsigned hash;
1330         struct sk_buff *skb;
1331         long timeo;
1332         struct scm_cookie tmp_scm;
1333
1334         if (NULL == siocb->scm)
1335                 siocb->scm = &tmp_scm;
1336         err = scm_send(sock, msg, siocb->scm);
1337         if (err < 0)
1338                 return err;
1339
1340         err = -EOPNOTSUPP;
1341         if (msg->msg_flags&MSG_OOB)
1342                 goto out;
1343
1344         if (msg->msg_namelen) {
1345                 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1346                 if (err < 0)
1347                         goto out;
1348                 namelen = err;
1349         } else {
1350                 sunaddr = NULL;
1351                 err = -ENOTCONN;
1352                 other = unix_peer_get(sk);
1353                 if (!other)
1354                         goto out;
1355         }
1356
1357         if (test_bit(SOCK_PASSCRED, &sock->flags)
1358                 && !u->addr && (err = unix_autobind(sock)) != 0)
1359                 goto out;
1360
1361         err = -EMSGSIZE;
1362         if (len > sk->sk_sndbuf - 32)
1363                 goto out;
1364
1365         skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
1366         if (skb==NULL)
1367                 goto out;
1368
1369         memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1370         if (siocb->scm->fp)
1371                 unix_attach_fds(siocb->scm, skb);
1372         unix_get_secdata(siocb->scm, skb);
1373
1374         skb_reset_transport_header(skb);
1375         err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
1376         if (err)
1377                 goto out_free;
1378
1379         timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1380
1381 restart:
1382         if (!other) {
1383                 err = -ECONNRESET;
1384                 if (sunaddr == NULL)
1385                         goto out_free;
1386
1387                 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1388                                         hash, &err);
1389                 if (other==NULL)
1390                         goto out_free;
1391         }
1392
1393         unix_state_lock(other);
1394         err = -EPERM;
1395         if (!unix_may_send(sk, other))
1396                 goto out_unlock;
1397
1398         if (sock_flag(other, SOCK_DEAD)) {
1399                 /*
1400                  *      Check with 1003.1g - what should
1401                  *      datagram error
1402                  */
1403                 unix_state_unlock(other);
1404                 sock_put(other);
1405
1406                 err = 0;
1407                 unix_state_lock(sk);
1408                 if (unix_peer(sk) == other) {
1409                         unix_peer(sk)=NULL;
1410                         unix_state_unlock(sk);
1411
1412                         unix_dgram_disconnected(sk, other);
1413                         sock_put(other);
1414                         err = -ECONNREFUSED;
1415                 } else {
1416                         unix_state_unlock(sk);
1417                 }
1418
1419                 other = NULL;
1420                 if (err)
1421                         goto out_free;
1422                 goto restart;
1423         }
1424
1425         err = -EPIPE;
1426         if (other->sk_shutdown & RCV_SHUTDOWN)
1427                 goto out_unlock;
1428
1429         if (sk->sk_type != SOCK_SEQPACKET) {
1430                 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1431                 if (err)
1432                         goto out_unlock;
1433         }
1434
1435         if (unix_peer(other) != sk && unix_recvq_full(other)) {
1436                 if (!timeo) {
1437                         err = -EAGAIN;
1438                         goto out_unlock;
1439                 }
1440
1441                 timeo = unix_wait_for_peer(other, timeo);
1442
1443                 err = sock_intr_errno(timeo);
1444                 if (signal_pending(current))
1445                         goto out_free;
1446
1447                 goto restart;
1448         }
1449
1450         skb_queue_tail(&other->sk_receive_queue, skb);
1451         unix_state_unlock(other);
1452         other->sk_data_ready(other, len);
1453         sock_put(other);
1454         scm_destroy(siocb->scm);
1455         return len;
1456
1457 out_unlock:
1458         unix_state_unlock(other);
1459 out_free:
1460         kfree_skb(skb);
1461 out:
1462         if (other)
1463                 sock_put(other);
1464         scm_destroy(siocb->scm);
1465         return err;
1466 }
1467
1468
1469 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1470                                struct msghdr *msg, size_t len)
1471 {
1472         struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1473         struct sock *sk = sock->sk;
1474         struct sock *other = NULL;
1475         struct sockaddr_un *sunaddr=msg->msg_name;
1476         int err,size;
1477         struct sk_buff *skb;
1478         int sent=0;
1479         struct scm_cookie tmp_scm;
1480
1481         if (NULL == siocb->scm)
1482                 siocb->scm = &tmp_scm;
1483         err = scm_send(sock, msg, siocb->scm);
1484         if (err < 0)
1485                 return err;
1486
1487         err = -EOPNOTSUPP;
1488         if (msg->msg_flags&MSG_OOB)
1489                 goto out_err;
1490
1491         if (msg->msg_namelen) {
1492                 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1493                 goto out_err;
1494         } else {
1495                 sunaddr = NULL;
1496                 err = -ENOTCONN;
1497                 other = unix_peer(sk);
1498                 if (!other)
1499                         goto out_err;
1500         }
1501
1502         if (sk->sk_shutdown & SEND_SHUTDOWN)
1503                 goto pipe_err;
1504
1505         while(sent < len)
1506         {
1507                 /*
1508                  *      Optimisation for the fact that under 0.01% of X
1509                  *      messages typically need breaking up.
1510                  */
1511
1512                 size = len-sent;
1513
1514                 /* Keep two messages in the pipe so it schedules better */
1515                 if (size > ((sk->sk_sndbuf >> 1) - 64))
1516                         size = (sk->sk_sndbuf >> 1) - 64;
1517
1518                 if (size > SKB_MAX_ALLOC)
1519                         size = SKB_MAX_ALLOC;
1520
1521                 /*
1522                  *      Grab a buffer
1523                  */
1524
1525                 skb=sock_alloc_send_skb(sk,size,msg->msg_flags&MSG_DONTWAIT, &err);
1526
1527                 if (skb==NULL)
1528                         goto out_err;
1529
1530                 /*
1531                  *      If you pass two values to the sock_alloc_send_skb
1532                  *      it tries to grab the large buffer with GFP_NOFS
1533                  *      (which can fail easily), and if it fails grab the
1534                  *      fallback size buffer which is under a page and will
1535                  *      succeed. [Alan]
1536                  */
1537                 size = min_t(int, size, skb_tailroom(skb));
1538
1539                 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1540                 if (siocb->scm->fp)
1541                         unix_attach_fds(siocb->scm, skb);
1542
1543                 if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) {
1544                         kfree_skb(skb);
1545                         goto out_err;
1546                 }
1547
1548                 unix_state_lock(other);
1549
1550                 if (sock_flag(other, SOCK_DEAD) ||
1551                     (other->sk_shutdown & RCV_SHUTDOWN))
1552                         goto pipe_err_free;
1553
1554                 skb_queue_tail(&other->sk_receive_queue, skb);
1555                 unix_state_unlock(other);
1556                 other->sk_data_ready(other, size);
1557                 sent+=size;
1558         }
1559
1560         scm_destroy(siocb->scm);
1561         siocb->scm = NULL;
1562
1563         return sent;
1564
1565 pipe_err_free:
1566         unix_state_unlock(other);
1567         kfree_skb(skb);
1568 pipe_err:
1569         if (sent==0 && !(msg->msg_flags&MSG_NOSIGNAL))
1570                 send_sig(SIGPIPE,current,0);
1571         err = -EPIPE;
1572 out_err:
1573         scm_destroy(siocb->scm);
1574         siocb->scm = NULL;
1575         return sent ? : err;
1576 }
1577
1578 static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1579                                   struct msghdr *msg, size_t len)
1580 {
1581         int err;
1582         struct sock *sk = sock->sk;
1583
1584         err = sock_error(sk);
1585         if (err)
1586                 return err;
1587
1588         if (sk->sk_state != TCP_ESTABLISHED)
1589                 return -ENOTCONN;
1590
1591         if (msg->msg_namelen)
1592                 msg->msg_namelen = 0;
1593
1594         return unix_dgram_sendmsg(kiocb, sock, msg, len);
1595 }
1596
1597 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1598 {
1599         struct unix_sock *u = unix_sk(sk);
1600
1601         msg->msg_namelen = 0;
1602         if (u->addr) {
1603                 msg->msg_namelen = u->addr->len;
1604                 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1605         }
1606 }
1607
1608 static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1609                               struct msghdr *msg, size_t size,
1610                               int flags)
1611 {
1612         struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1613         struct scm_cookie tmp_scm;
1614         struct sock *sk = sock->sk;
1615         struct unix_sock *u = unix_sk(sk);
1616         int noblock = flags & MSG_DONTWAIT;
1617         struct sk_buff *skb;
1618         int err;
1619
1620         err = -EOPNOTSUPP;
1621         if (flags&MSG_OOB)
1622                 goto out;
1623
1624         msg->msg_namelen = 0;
1625
1626         mutex_lock(&u->readlock);
1627
1628         skb = skb_recv_datagram(sk, flags, noblock, &err);
1629         if (!skb) {
1630                 unix_state_lock(sk);
1631                 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1632                 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1633                     (sk->sk_shutdown & RCV_SHUTDOWN))
1634                         err = 0;
1635                 unix_state_unlock(sk);
1636                 goto out_unlock;
1637         }
1638
1639         wake_up_interruptible_sync(&u->peer_wait);
1640
1641         if (msg->msg_name)
1642                 unix_copy_addr(msg, skb->sk);
1643
1644         if (size > skb->len)
1645                 size = skb->len;
1646         else if (size < skb->len)
1647                 msg->msg_flags |= MSG_TRUNC;
1648
1649         err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1650         if (err)
1651                 goto out_free;
1652
1653         if (!siocb->scm) {
1654                 siocb->scm = &tmp_scm;
1655                 memset(&tmp_scm, 0, sizeof(tmp_scm));
1656         }
1657         siocb->scm->creds = *UNIXCREDS(skb);
1658         unix_set_secdata(siocb->scm, skb);
1659
1660         if (!(flags & MSG_PEEK))
1661         {
1662                 if (UNIXCB(skb).fp)
1663                         unix_detach_fds(siocb->scm, skb);
1664         }
1665         else
1666         {
1667                 /* It is questionable: on PEEK we could:
1668                    - do not return fds - good, but too simple 8)
1669                    - return fds, and do not return them on read (old strategy,
1670                      apparently wrong)
1671                    - clone fds (I chose it for now, it is the most universal
1672                      solution)
1673
1674                    POSIX 1003.1g does not actually define this clearly
1675                    at all. POSIX 1003.1g doesn't define a lot of things
1676                    clearly however!
1677
1678                 */
1679                 if (UNIXCB(skb).fp)
1680                         siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1681         }
1682         err = size;
1683
1684         scm_recv(sock, msg, siocb->scm, flags);
1685
1686 out_free:
1687         skb_free_datagram(sk,skb);
1688 out_unlock:
1689         mutex_unlock(&u->readlock);
1690 out:
1691         return err;
1692 }
1693
1694 /*
1695  *      Sleep until data has arrive. But check for races..
1696  */
1697
1698 static long unix_stream_data_wait(struct sock * sk, long timeo)
1699 {
1700         DEFINE_WAIT(wait);
1701
1702         unix_state_lock(sk);
1703
1704         for (;;) {
1705                 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1706
1707                 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1708                     sk->sk_err ||
1709                     (sk->sk_shutdown & RCV_SHUTDOWN) ||
1710                     signal_pending(current) ||
1711                     !timeo)
1712                         break;
1713
1714                 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1715                 unix_state_unlock(sk);
1716                 timeo = schedule_timeout(timeo);
1717                 unix_state_lock(sk);
1718                 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1719         }
1720
1721         finish_wait(sk->sk_sleep, &wait);
1722         unix_state_unlock(sk);
1723         return timeo;
1724 }
1725
1726
1727
1728 static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1729                                struct msghdr *msg, size_t size,
1730                                int flags)
1731 {
1732         struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1733         struct scm_cookie tmp_scm;
1734         struct sock *sk = sock->sk;
1735         struct unix_sock *u = unix_sk(sk);
1736         struct sockaddr_un *sunaddr=msg->msg_name;
1737         int copied = 0;
1738         int check_creds = 0;
1739         int target;
1740         int err = 0;
1741         long timeo;
1742
1743         err = -EINVAL;
1744         if (sk->sk_state != TCP_ESTABLISHED)
1745                 goto out;
1746
1747         err = -EOPNOTSUPP;
1748         if (flags&MSG_OOB)
1749                 goto out;
1750
1751         target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1752         timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1753
1754         msg->msg_namelen = 0;
1755
1756         /* Lock the socket to prevent queue disordering
1757          * while sleeps in memcpy_tomsg
1758          */
1759
1760         if (!siocb->scm) {
1761                 siocb->scm = &tmp_scm;
1762                 memset(&tmp_scm, 0, sizeof(tmp_scm));
1763         }
1764
1765         mutex_lock(&u->readlock);
1766
1767         do
1768         {
1769                 int chunk;
1770                 struct sk_buff *skb;
1771
1772                 unix_state_lock(sk);
1773                 skb = skb_dequeue(&sk->sk_receive_queue);
1774                 if (skb==NULL)
1775                 {
1776                         if (copied >= target)
1777                                 goto unlock;
1778
1779                         /*
1780                          *      POSIX 1003.1g mandates this order.
1781                          */
1782
1783                         if ((err = sock_error(sk)) != 0)
1784                                 goto unlock;
1785                         if (sk->sk_shutdown & RCV_SHUTDOWN)
1786                                 goto unlock;
1787
1788                         unix_state_unlock(sk);
1789                         err = -EAGAIN;
1790                         if (!timeo)
1791                                 break;
1792                         mutex_unlock(&u->readlock);
1793
1794                         timeo = unix_stream_data_wait(sk, timeo);
1795
1796                         if (signal_pending(current)) {
1797                                 err = sock_intr_errno(timeo);
1798                                 goto out;
1799                         }
1800                         mutex_lock(&u->readlock);
1801                         continue;
1802  unlock:
1803                         unix_state_unlock(sk);
1804                         break;
1805                 }
1806                 unix_state_unlock(sk);
1807
1808                 if (check_creds) {
1809                         /* Never glue messages from different writers */
1810                         if (memcmp(UNIXCREDS(skb), &siocb->scm->creds, sizeof(siocb->scm->creds)) != 0) {
1811                                 skb_queue_head(&sk->sk_receive_queue, skb);
1812                                 break;
1813                         }
1814                 } else {
1815                         /* Copy credentials */
1816                         siocb->scm->creds = *UNIXCREDS(skb);
1817                         check_creds = 1;
1818                 }
1819
1820                 /* Copy address just once */
1821                 if (sunaddr)
1822                 {
1823                         unix_copy_addr(msg, skb->sk);
1824                         sunaddr = NULL;
1825                 }
1826
1827                 chunk = min_t(unsigned int, skb->len, size);
1828                 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1829                         skb_queue_head(&sk->sk_receive_queue, skb);
1830                         if (copied == 0)
1831                                 copied = -EFAULT;
1832                         break;
1833                 }
1834                 copied += chunk;
1835                 size -= chunk;
1836
1837                 /* Mark read part of skb as used */
1838                 if (!(flags & MSG_PEEK))
1839                 {
1840                         skb_pull(skb, chunk);
1841
1842                         if (UNIXCB(skb).fp)
1843                                 unix_detach_fds(siocb->scm, skb);
1844
1845                         /* put the skb back if we didn't use it up.. */
1846                         if (skb->len)
1847                         {
1848                                 skb_queue_head(&sk->sk_receive_queue, skb);
1849                                 break;
1850                         }
1851
1852                         kfree_skb(skb);
1853
1854                         if (siocb->scm->fp)
1855                                 break;
1856                 }
1857                 else
1858                 {
1859                         /* It is questionable, see note in unix_dgram_recvmsg.
1860                          */
1861                         if (UNIXCB(skb).fp)
1862                                 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1863
1864                         /* put message back and return */
1865                         skb_queue_head(&sk->sk_receive_queue, skb);
1866                         break;
1867                 }
1868         } while (size);
1869
1870         mutex_unlock(&u->readlock);
1871         scm_recv(sock, msg, siocb->scm, flags);
1872 out:
1873         return copied ? : err;
1874 }
1875
1876 static int unix_shutdown(struct socket *sock, int mode)
1877 {
1878         struct sock *sk = sock->sk;
1879         struct sock *other;
1880
1881         mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
1882
1883         if (mode) {
1884                 unix_state_lock(sk);
1885                 sk->sk_shutdown |= mode;
1886                 other=unix_peer(sk);
1887                 if (other)
1888                         sock_hold(other);
1889                 unix_state_unlock(sk);
1890                 sk->sk_state_change(sk);
1891
1892                 if (other &&
1893                         (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
1894
1895                         int peer_mode = 0;
1896
1897                         if (mode&RCV_SHUTDOWN)
1898                                 peer_mode |= SEND_SHUTDOWN;
1899                         if (mode&SEND_SHUTDOWN)
1900                                 peer_mode |= RCV_SHUTDOWN;
1901                         unix_state_lock(other);
1902                         other->sk_shutdown |= peer_mode;
1903                         unix_state_unlock(other);
1904                         other->sk_state_change(other);
1905                         read_lock(&other->sk_callback_lock);
1906                         if (peer_mode == SHUTDOWN_MASK)
1907                                 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
1908                         else if (peer_mode & RCV_SHUTDOWN)
1909                                 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
1910                         read_unlock(&other->sk_callback_lock);
1911                 }
1912                 if (other)
1913                         sock_put(other);
1914         }
1915         return 0;
1916 }
1917
1918 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1919 {
1920         struct sock *sk = sock->sk;
1921         long amount=0;
1922         int err;
1923
1924         switch(cmd)
1925         {
1926                 case SIOCOUTQ:
1927                         amount = atomic_read(&sk->sk_wmem_alloc);
1928                         err = put_user(amount, (int __user *)arg);
1929                         break;
1930                 case SIOCINQ:
1931                 {
1932                         struct sk_buff *skb;
1933
1934                         if (sk->sk_state == TCP_LISTEN) {
1935                                 err = -EINVAL;
1936                                 break;
1937                         }
1938
1939                         spin_lock(&sk->sk_receive_queue.lock);
1940                         if (sk->sk_type == SOCK_STREAM ||
1941                             sk->sk_type == SOCK_SEQPACKET) {
1942                                 skb_queue_walk(&sk->sk_receive_queue, skb)
1943                                         amount += skb->len;
1944                         } else {
1945                                 skb = skb_peek(&sk->sk_receive_queue);
1946                                 if (skb)
1947                                         amount=skb->len;
1948                         }
1949                         spin_unlock(&sk->sk_receive_queue.lock);
1950                         err = put_user(amount, (int __user *)arg);
1951                         break;
1952                 }
1953
1954                 default:
1955                         err = -ENOIOCTLCMD;
1956                         break;
1957         }
1958         return err;
1959 }
1960
1961 static unsigned int unix_poll(struct file * file, struct socket *sock, poll_table *wait)
1962 {
1963         struct sock *sk = sock->sk;
1964         unsigned int mask;
1965
1966         poll_wait(file, sk->sk_sleep, wait);
1967         mask = 0;
1968
1969         /* exceptional events? */
1970         if (sk->sk_err)
1971                 mask |= POLLERR;
1972         if (sk->sk_shutdown == SHUTDOWN_MASK)
1973                 mask |= POLLHUP;
1974         if (sk->sk_shutdown & RCV_SHUTDOWN)
1975                 mask |= POLLRDHUP;
1976
1977         /* readable? */
1978         if (!skb_queue_empty(&sk->sk_receive_queue) ||
1979             (sk->sk_shutdown & RCV_SHUTDOWN))
1980                 mask |= POLLIN | POLLRDNORM;
1981
1982         /* Connection-based need to check for termination and startup */
1983         if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && sk->sk_state == TCP_CLOSE)
1984                 mask |= POLLHUP;
1985
1986         /*
1987          * we set writable also when the other side has shut down the
1988          * connection. This prevents stuck sockets.
1989          */
1990         if (unix_writable(sk))
1991                 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1992
1993         return mask;
1994 }
1995
1996 static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
1997                                     poll_table *wait)
1998 {
1999         struct sock *sk = sock->sk, *other;
2000         unsigned int mask, writable;
2001
2002         poll_wait(file, sk->sk_sleep, wait);
2003         mask = 0;
2004
2005         /* exceptional events? */
2006         if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2007                 mask |= POLLERR;
2008         if (sk->sk_shutdown & RCV_SHUTDOWN)
2009                 mask |= POLLRDHUP;
2010         if (sk->sk_shutdown == SHUTDOWN_MASK)
2011                 mask |= POLLHUP;
2012
2013         /* readable? */
2014         if (!skb_queue_empty(&sk->sk_receive_queue) ||
2015             (sk->sk_shutdown & RCV_SHUTDOWN))
2016                 mask |= POLLIN | POLLRDNORM;
2017
2018         /* Connection-based need to check for termination and startup */
2019         if (sk->sk_type == SOCK_SEQPACKET) {
2020                 if (sk->sk_state == TCP_CLOSE)
2021                         mask |= POLLHUP;
2022                 /* connection hasn't started yet? */
2023                 if (sk->sk_state == TCP_SYN_SENT)
2024                         return mask;
2025         }
2026
2027         /* writable? */
2028         writable = unix_writable(sk);
2029         if (writable) {
2030                 other = unix_peer_get(sk);
2031                 if (other) {
2032                         if (unix_peer(other) != sk) {
2033                                 poll_wait(file, &unix_sk(other)->peer_wait,
2034                                           wait);
2035                                 if (unix_recvq_full(other))
2036                                         writable = 0;
2037                         }
2038
2039                         sock_put(other);
2040                 }
2041         }
2042
2043         if (writable)
2044                 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2045         else
2046                 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2047
2048         return mask;
2049 }
2050
2051 #ifdef CONFIG_PROC_FS
2052 static struct sock *first_unix_socket(int *i)
2053 {
2054         for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
2055                 if (!hlist_empty(&unix_socket_table[*i]))
2056                         return __sk_head(&unix_socket_table[*i]);
2057         }
2058         return NULL;
2059 }
2060
2061 static struct sock *next_unix_socket(int *i, struct sock *s)
2062 {
2063         struct sock *next = sk_next(s);
2064         /* More in this chain? */
2065         if (next)
2066                 return next;
2067         /* Look for next non-empty chain. */
2068         for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
2069                 if (!hlist_empty(&unix_socket_table[*i]))
2070                         return __sk_head(&unix_socket_table[*i]);
2071         }
2072         return NULL;
2073 }
2074
2075 struct unix_iter_state {
2076         struct seq_net_private p;
2077         int i;
2078 };
2079 static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos)
2080 {
2081         struct unix_iter_state *iter = seq->private;
2082         loff_t off = 0;
2083         struct sock *s;
2084
2085         for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
2086                 if (sock_net(s) != seq_file_net(seq))
2087                         continue;
2088                 if (off == pos)
2089                         return s;
2090                 ++off;
2091         }
2092         return NULL;
2093 }
2094
2095
2096 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2097         __acquires(unix_table_lock)
2098 {
2099         spin_lock(&unix_table_lock);
2100         return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2101 }
2102
2103 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2104 {
2105         struct unix_iter_state *iter = seq->private;
2106         struct sock *sk = v;
2107         ++*pos;
2108
2109         if (v == SEQ_START_TOKEN)
2110                 sk = first_unix_socket(&iter->i);
2111         else
2112                 sk = next_unix_socket(&iter->i, sk);
2113         while (sk && (sock_net(sk) != seq_file_net(seq)))
2114                 sk = next_unix_socket(&iter->i, sk);
2115         return sk;
2116 }
2117
2118 static void unix_seq_stop(struct seq_file *seq, void *v)
2119         __releases(unix_table_lock)
2120 {
2121         spin_unlock(&unix_table_lock);
2122 }
2123
2124 static int unix_seq_show(struct seq_file *seq, void *v)
2125 {
2126
2127         if (v == SEQ_START_TOKEN)
2128                 seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
2129                          "Inode Path\n");
2130         else {
2131                 struct sock *s = v;
2132                 struct unix_sock *u = unix_sk(s);
2133                 unix_state_lock(s);
2134
2135                 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
2136                         s,
2137                         atomic_read(&s->sk_refcnt),
2138                         0,
2139                         s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2140                         s->sk_type,
2141                         s->sk_socket ?
2142                         (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2143                         (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2144                         sock_i_ino(s));
2145
2146                 if (u->addr) {
2147                         int i, len;
2148                         seq_putc(seq, ' ');
2149
2150                         i = 0;
2151                         len = u->addr->len - sizeof(short);
2152                         if (!UNIX_ABSTRACT(s))
2153                                 len--;
2154                         else {
2155                                 seq_putc(seq, '@');
2156                                 i++;
2157                         }
2158                         for ( ; i < len; i++)
2159                                 seq_putc(seq, u->addr->name->sun_path[i]);
2160                 }
2161                 unix_state_unlock(s);
2162                 seq_putc(seq, '\n');
2163         }
2164
2165         return 0;
2166 }
2167
2168 static const struct seq_operations unix_seq_ops = {
2169         .start  = unix_seq_start,
2170         .next   = unix_seq_next,
2171         .stop   = unix_seq_stop,
2172         .show   = unix_seq_show,
2173 };
2174
2175
2176 static int unix_seq_open(struct inode *inode, struct file *file)
2177 {
2178         return seq_open_net(inode, file, &unix_seq_ops,
2179                             sizeof(struct unix_iter_state));
2180 }
2181
2182 static const struct file_operations unix_seq_fops = {
2183         .owner          = THIS_MODULE,
2184         .open           = unix_seq_open,
2185         .read           = seq_read,
2186         .llseek         = seq_lseek,
2187         .release        = seq_release_net,
2188 };
2189
2190 #endif
2191
2192 static struct net_proto_family unix_family_ops = {
2193         .family = PF_UNIX,
2194         .create = unix_create,
2195         .owner  = THIS_MODULE,
2196 };
2197
2198
2199 static int unix_net_init(struct net *net)
2200 {
2201         int error = -ENOMEM;
2202
2203         net->unx.sysctl_max_dgram_qlen = 10;
2204         if (unix_sysctl_register(net))
2205                 goto out;
2206
2207 #ifdef CONFIG_PROC_FS
2208         if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) {
2209                 unix_sysctl_unregister(net);
2210                 goto out;
2211         }
2212 #endif
2213         error = 0;
2214 out:
2215         return error;
2216 }
2217
2218 static void unix_net_exit(struct net *net)
2219 {
2220         unix_sysctl_unregister(net);
2221         proc_net_remove(net, "unix");
2222 }
2223
2224 static struct pernet_operations unix_net_ops = {
2225         .init = unix_net_init,
2226         .exit = unix_net_exit,
2227 };
2228
2229 static int __init af_unix_init(void)
2230 {
2231         int rc = -1;
2232         struct sk_buff *dummy_skb;
2233
2234         BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
2235
2236         rc = proto_register(&unix_proto, 1);
2237         if (rc != 0) {
2238                 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2239                        __func__);
2240                 goto out;
2241         }
2242
2243         sock_register(&unix_family_ops);
2244         register_pernet_subsys(&unix_net_ops);
2245 out:
2246         return rc;
2247 }
2248
2249 static void __exit af_unix_exit(void)
2250 {
2251         sock_unregister(PF_UNIX);
2252         proto_unregister(&unix_proto);
2253         unregister_pernet_subsys(&unix_net_ops);
2254 }
2255
2256 /* Earlier than device_initcall() so that other drivers invoking
2257    request_module() don't end up in a loop when modprobe tries
2258    to use a UNIX socket. But later than subsys_initcall() because
2259    we depend on stuff initialised there */
2260 fs_initcall(af_unix_init);
2261 module_exit(af_unix_exit);
2262
2263 MODULE_LICENSE("GPL");
2264 MODULE_ALIAS_NETPROTO(PF_UNIX);