Merge branch 'release' of git://lm-sensors.org/kernel/mhoffman/hwmon-2.6
[linux-2.6] / net / unix / af_unix.c
1 /*
2  * NET4:        Implementation of BSD Unix domain sockets.
3  *
4  * Authors:     Alan Cox, <alan.cox@linux.org>
5  *
6  *              This program is free software; you can redistribute it and/or
7  *              modify it under the terms of the GNU General Public License
8  *              as published by the Free Software Foundation; either version
9  *              2 of the License, or (at your option) any later version.
10  *
11  * Version:     $Id: af_unix.c,v 1.133 2002/02/08 03:57:19 davem Exp $
12  *
13  * Fixes:
14  *              Linus Torvalds  :       Assorted bug cures.
15  *              Niibe Yutaka    :       async I/O support.
16  *              Carsten Paeth   :       PF_UNIX check, address fixes.
17  *              Alan Cox        :       Limit size of allocated blocks.
18  *              Alan Cox        :       Fixed the stupid socketpair bug.
19  *              Alan Cox        :       BSD compatibility fine tuning.
20  *              Alan Cox        :       Fixed a bug in connect when interrupted.
21  *              Alan Cox        :       Sorted out a proper draft version of
22  *                                      file descriptor passing hacked up from
23  *                                      Mike Shaver's work.
24  *              Marty Leisner   :       Fixes to fd passing
25  *              Nick Nevin      :       recvmsg bugfix.
26  *              Alan Cox        :       Started proper garbage collector
27  *              Heiko EiBfeldt  :       Missing verify_area check
28  *              Alan Cox        :       Started POSIXisms
29  *              Andreas Schwab  :       Replace inode by dentry for proper
30  *                                      reference counting
31  *              Kirk Petersen   :       Made this a module
32  *          Christoph Rohland   :       Elegant non-blocking accept/connect algorithm.
33  *                                      Lots of bug fixes.
34  *           Alexey Kuznetosv   :       Repaired (I hope) bugs introduces
35  *                                      by above two patches.
36  *           Andrea Arcangeli   :       If possible we block in connect(2)
37  *                                      if the max backlog of the listen socket
38  *                                      is been reached. This won't break
39  *                                      old apps and it will avoid huge amount
40  *                                      of socks hashed (this for unix_gc()
41  *                                      performances reasons).
42  *                                      Security fix that limits the max
43  *                                      number of socks to 2*max_files and
44  *                                      the number of skb queueable in the
45  *                                      dgram receiver.
46  *              Artur Skawina   :       Hash function optimizations
47  *           Alexey Kuznetsov   :       Full scale SMP. Lot of bugs are introduced 8)
48  *            Malcolm Beattie   :       Set peercred for socketpair
49  *           Michal Ostrowski   :       Module initialization cleanup.
50  *           Arnaldo C. Melo    :       Remove MOD_{INC,DEC}_USE_COUNT,
51  *                                      the core infrastructure is doing that
52  *                                      for all net proto families now (2.5.69+)
53  *
54  *
55  * Known differences from reference BSD that was tested:
56  *
57  *      [TO FIX]
58  *      ECONNREFUSED is not returned from one end of a connected() socket to the
59  *              other the moment one end closes.
60  *      fstat() doesn't return st_dev=0, and give the blksize as high water mark
61  *              and a fake inode identifier (nor the BSD first socket fstat twice bug).
62  *      [NOT TO FIX]
63  *      accept() returns a path name even if the connecting socket has closed
64  *              in the meantime (BSD loses the path and gives up).
65  *      accept() returns 0 length path for an unbound connector. BSD returns 16
66  *              and a null first byte in the path (but not for gethost/peername - BSD bug ??)
67  *      socketpair(...SOCK_RAW..) doesn't panic the kernel.
68  *      BSD af_unix apparently has connect forgetting to block properly.
69  *              (need to check this with the POSIX spec in detail)
70  *
71  * Differences from 2.0.0-11-... (ANK)
72  *      Bug fixes and improvements.
73  *              - client shutdown killed server socket.
74  *              - removed all useless cli/sti pairs.
75  *
76  *      Semantic changes/extensions.
77  *              - generic control message passing.
78  *              - SCM_CREDENTIALS control message.
79  *              - "Abstract" (not FS based) socket bindings.
80  *                Abstract names are sequences of bytes (not zero terminated)
81  *                started by 0, so that this name space does not intersect
82  *                with BSD names.
83  */
84
85 #include <linux/module.h>
86 #include <linux/kernel.h>
87 #include <linux/signal.h>
88 #include <linux/sched.h>
89 #include <linux/errno.h>
90 #include <linux/string.h>
91 #include <linux/stat.h>
92 #include <linux/dcache.h>
93 #include <linux/namei.h>
94 #include <linux/socket.h>
95 #include <linux/un.h>
96 #include <linux/fcntl.h>
97 #include <linux/termios.h>
98 #include <linux/sockios.h>
99 #include <linux/net.h>
100 #include <linux/in.h>
101 #include <linux/fs.h>
102 #include <linux/slab.h>
103 #include <asm/uaccess.h>
104 #include <linux/skbuff.h>
105 #include <linux/netdevice.h>
106 #include <net/net_namespace.h>
107 #include <net/sock.h>
108 #include <net/tcp_states.h>
109 #include <net/af_unix.h>
110 #include <linux/proc_fs.h>
111 #include <linux/seq_file.h>
112 #include <net/scm.h>
113 #include <linux/init.h>
114 #include <linux/poll.h>
115 #include <linux/rtnetlink.h>
116 #include <linux/mount.h>
117 #include <net/checksum.h>
118 #include <linux/security.h>
119
120 int sysctl_unix_max_dgram_qlen __read_mostly = 10;
121
122 static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
123 static DEFINE_SPINLOCK(unix_table_lock);
124 static atomic_t unix_nr_socks = ATOMIC_INIT(0);
125
126 #define unix_sockets_unbound    (&unix_socket_table[UNIX_HASH_SIZE])
127
128 #define UNIX_ABSTRACT(sk)       (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
129
130 static struct sock *first_unix_socket(int *i)
131 {
132         for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
133                 if (!hlist_empty(&unix_socket_table[*i]))
134                         return __sk_head(&unix_socket_table[*i]);
135         }
136         return NULL;
137 }
138
139 static struct sock *next_unix_socket(int *i, struct sock *s)
140 {
141         struct sock *next = sk_next(s);
142         /* More in this chain? */
143         if (next)
144                 return next;
145         /* Look for next non-empty chain. */
146         for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
147                 if (!hlist_empty(&unix_socket_table[*i]))
148                         return __sk_head(&unix_socket_table[*i]);
149         }
150         return NULL;
151 }
152
153 #define forall_unix_sockets(i, s) \
154         for (s = first_unix_socket(&(i)); s; s = next_unix_socket(&(i),(s)))
155
156 #ifdef CONFIG_SECURITY_NETWORK
157 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
158 {
159         memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
160 }
161
162 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
163 {
164         scm->secid = *UNIXSID(skb);
165 }
166 #else
167 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
168 { }
169
170 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
171 { }
172 #endif /* CONFIG_SECURITY_NETWORK */
173
174 /*
175  *  SMP locking strategy:
176  *    hash table is protected with spinlock unix_table_lock
177  *    each socket state is protected by separate rwlock.
178  */
179
180 static inline unsigned unix_hash_fold(__wsum n)
181 {
182         unsigned hash = (__force unsigned)n;
183         hash ^= hash>>16;
184         hash ^= hash>>8;
185         return hash&(UNIX_HASH_SIZE-1);
186 }
187
188 #define unix_peer(sk) (unix_sk(sk)->peer)
189
190 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
191 {
192         return unix_peer(osk) == sk;
193 }
194
195 static inline int unix_may_send(struct sock *sk, struct sock *osk)
196 {
197         return (unix_peer(osk) == NULL || unix_our_peer(sk, osk));
198 }
199
200 static struct sock *unix_peer_get(struct sock *s)
201 {
202         struct sock *peer;
203
204         unix_state_lock(s);
205         peer = unix_peer(s);
206         if (peer)
207                 sock_hold(peer);
208         unix_state_unlock(s);
209         return peer;
210 }
211
212 static inline void unix_release_addr(struct unix_address *addr)
213 {
214         if (atomic_dec_and_test(&addr->refcnt))
215                 kfree(addr);
216 }
217
218 /*
219  *      Check unix socket name:
220  *              - should be not zero length.
221  *              - if started by not zero, should be NULL terminated (FS object)
222  *              - if started by zero, it is abstract name.
223  */
224
225 static int unix_mkname(struct sockaddr_un * sunaddr, int len, unsigned *hashp)
226 {
227         if (len <= sizeof(short) || len > sizeof(*sunaddr))
228                 return -EINVAL;
229         if (!sunaddr || sunaddr->sun_family != AF_UNIX)
230                 return -EINVAL;
231         if (sunaddr->sun_path[0]) {
232                 /*
233                  * This may look like an off by one error but it is a bit more
234                  * subtle. 108 is the longest valid AF_UNIX path for a binding.
235                  * sun_path[108] doesnt as such exist.  However in kernel space
236                  * we are guaranteed that it is a valid memory location in our
237                  * kernel address buffer.
238                  */
239                 ((char *)sunaddr)[len]=0;
240                 len = strlen(sunaddr->sun_path)+1+sizeof(short);
241                 return len;
242         }
243
244         *hashp = unix_hash_fold(csum_partial((char*)sunaddr, len, 0));
245         return len;
246 }
247
248 static void __unix_remove_socket(struct sock *sk)
249 {
250         sk_del_node_init(sk);
251 }
252
253 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
254 {
255         BUG_TRAP(sk_unhashed(sk));
256         sk_add_node(sk, list);
257 }
258
259 static inline void unix_remove_socket(struct sock *sk)
260 {
261         spin_lock(&unix_table_lock);
262         __unix_remove_socket(sk);
263         spin_unlock(&unix_table_lock);
264 }
265
266 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
267 {
268         spin_lock(&unix_table_lock);
269         __unix_insert_socket(list, sk);
270         spin_unlock(&unix_table_lock);
271 }
272
273 static struct sock *__unix_find_socket_byname(struct sockaddr_un *sunname,
274                                               int len, int type, unsigned hash)
275 {
276         struct sock *s;
277         struct hlist_node *node;
278
279         sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
280                 struct unix_sock *u = unix_sk(s);
281
282                 if (u->addr->len == len &&
283                     !memcmp(u->addr->name, sunname, len))
284                         goto found;
285         }
286         s = NULL;
287 found:
288         return s;
289 }
290
291 static inline struct sock *unix_find_socket_byname(struct sockaddr_un *sunname,
292                                                    int len, int type,
293                                                    unsigned hash)
294 {
295         struct sock *s;
296
297         spin_lock(&unix_table_lock);
298         s = __unix_find_socket_byname(sunname, len, type, hash);
299         if (s)
300                 sock_hold(s);
301         spin_unlock(&unix_table_lock);
302         return s;
303 }
304
305 static struct sock *unix_find_socket_byinode(struct inode *i)
306 {
307         struct sock *s;
308         struct hlist_node *node;
309
310         spin_lock(&unix_table_lock);
311         sk_for_each(s, node,
312                     &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
313                 struct dentry *dentry = unix_sk(s)->dentry;
314
315                 if(dentry && dentry->d_inode == i)
316                 {
317                         sock_hold(s);
318                         goto found;
319                 }
320         }
321         s = NULL;
322 found:
323         spin_unlock(&unix_table_lock);
324         return s;
325 }
326
327 static inline int unix_writable(struct sock *sk)
328 {
329         return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
330 }
331
332 static void unix_write_space(struct sock *sk)
333 {
334         read_lock(&sk->sk_callback_lock);
335         if (unix_writable(sk)) {
336                 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
337                         wake_up_interruptible(sk->sk_sleep);
338                 sk_wake_async(sk, 2, POLL_OUT);
339         }
340         read_unlock(&sk->sk_callback_lock);
341 }
342
343 /* When dgram socket disconnects (or changes its peer), we clear its receive
344  * queue of packets arrived from previous peer. First, it allows to do
345  * flow control based only on wmem_alloc; second, sk connected to peer
346  * may receive messages only from that peer. */
347 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
348 {
349         if (!skb_queue_empty(&sk->sk_receive_queue)) {
350                 skb_queue_purge(&sk->sk_receive_queue);
351                 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
352
353                 /* If one link of bidirectional dgram pipe is disconnected,
354                  * we signal error. Messages are lost. Do not make this,
355                  * when peer was not connected to us.
356                  */
357                 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
358                         other->sk_err = ECONNRESET;
359                         other->sk_error_report(other);
360                 }
361         }
362 }
363
364 static void unix_sock_destructor(struct sock *sk)
365 {
366         struct unix_sock *u = unix_sk(sk);
367
368         skb_queue_purge(&sk->sk_receive_queue);
369
370         BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
371         BUG_TRAP(sk_unhashed(sk));
372         BUG_TRAP(!sk->sk_socket);
373         if (!sock_flag(sk, SOCK_DEAD)) {
374                 printk("Attempt to release alive unix socket: %p\n", sk);
375                 return;
376         }
377
378         if (u->addr)
379                 unix_release_addr(u->addr);
380
381         atomic_dec(&unix_nr_socks);
382 #ifdef UNIX_REFCNT_DEBUG
383         printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk, atomic_read(&unix_nr_socks));
384 #endif
385 }
386
387 static int unix_release_sock (struct sock *sk, int embrion)
388 {
389         struct unix_sock *u = unix_sk(sk);
390         struct dentry *dentry;
391         struct vfsmount *mnt;
392         struct sock *skpair;
393         struct sk_buff *skb;
394         int state;
395
396         unix_remove_socket(sk);
397
398         /* Clear state */
399         unix_state_lock(sk);
400         sock_orphan(sk);
401         sk->sk_shutdown = SHUTDOWN_MASK;
402         dentry       = u->dentry;
403         u->dentry    = NULL;
404         mnt          = u->mnt;
405         u->mnt       = NULL;
406         state = sk->sk_state;
407         sk->sk_state = TCP_CLOSE;
408         unix_state_unlock(sk);
409
410         wake_up_interruptible_all(&u->peer_wait);
411
412         skpair=unix_peer(sk);
413
414         if (skpair!=NULL) {
415                 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
416                         unix_state_lock(skpair);
417                         /* No more writes */
418                         skpair->sk_shutdown = SHUTDOWN_MASK;
419                         if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
420                                 skpair->sk_err = ECONNRESET;
421                         unix_state_unlock(skpair);
422                         skpair->sk_state_change(skpair);
423                         read_lock(&skpair->sk_callback_lock);
424                         sk_wake_async(skpair,1,POLL_HUP);
425                         read_unlock(&skpair->sk_callback_lock);
426                 }
427                 sock_put(skpair); /* It may now die */
428                 unix_peer(sk) = NULL;
429         }
430
431         /* Try to flush out this socket. Throw out buffers at least */
432
433         while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
434                 if (state==TCP_LISTEN)
435                         unix_release_sock(skb->sk, 1);
436                 /* passed fds are erased in the kfree_skb hook        */
437                 kfree_skb(skb);
438         }
439
440         if (dentry) {
441                 dput(dentry);
442                 mntput(mnt);
443         }
444
445         sock_put(sk);
446
447         /* ---- Socket is dead now and most probably destroyed ---- */
448
449         /*
450          * Fixme: BSD difference: In BSD all sockets connected to use get
451          *        ECONNRESET and we die on the spot. In Linux we behave
452          *        like files and pipes do and wait for the last
453          *        dereference.
454          *
455          * Can't we simply set sock->err?
456          *
457          *        What the above comment does talk about? --ANK(980817)
458          */
459
460         if (atomic_read(&unix_tot_inflight))
461                 unix_gc();              /* Garbage collect fds */
462
463         return 0;
464 }
465
466 static int unix_listen(struct socket *sock, int backlog)
467 {
468         int err;
469         struct sock *sk = sock->sk;
470         struct unix_sock *u = unix_sk(sk);
471
472         err = -EOPNOTSUPP;
473         if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
474                 goto out;                       /* Only stream/seqpacket sockets accept */
475         err = -EINVAL;
476         if (!u->addr)
477                 goto out;                       /* No listens on an unbound socket */
478         unix_state_lock(sk);
479         if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
480                 goto out_unlock;
481         if (backlog > sk->sk_max_ack_backlog)
482                 wake_up_interruptible_all(&u->peer_wait);
483         sk->sk_max_ack_backlog  = backlog;
484         sk->sk_state            = TCP_LISTEN;
485         /* set credentials so connect can copy them */
486         sk->sk_peercred.pid     = current->tgid;
487         sk->sk_peercred.uid     = current->euid;
488         sk->sk_peercred.gid     = current->egid;
489         err = 0;
490
491 out_unlock:
492         unix_state_unlock(sk);
493 out:
494         return err;
495 }
496
497 static int unix_release(struct socket *);
498 static int unix_bind(struct socket *, struct sockaddr *, int);
499 static int unix_stream_connect(struct socket *, struct sockaddr *,
500                                int addr_len, int flags);
501 static int unix_socketpair(struct socket *, struct socket *);
502 static int unix_accept(struct socket *, struct socket *, int);
503 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
504 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
505 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
506 static int unix_shutdown(struct socket *, int);
507 static int unix_stream_sendmsg(struct kiocb *, struct socket *,
508                                struct msghdr *, size_t);
509 static int unix_stream_recvmsg(struct kiocb *, struct socket *,
510                                struct msghdr *, size_t, int);
511 static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
512                               struct msghdr *, size_t);
513 static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
514                               struct msghdr *, size_t, int);
515 static int unix_dgram_connect(struct socket *, struct sockaddr *,
516                               int, int);
517 static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
518                                   struct msghdr *, size_t);
519
520 static const struct proto_ops unix_stream_ops = {
521         .family =       PF_UNIX,
522         .owner =        THIS_MODULE,
523         .release =      unix_release,
524         .bind =         unix_bind,
525         .connect =      unix_stream_connect,
526         .socketpair =   unix_socketpair,
527         .accept =       unix_accept,
528         .getname =      unix_getname,
529         .poll =         unix_poll,
530         .ioctl =        unix_ioctl,
531         .listen =       unix_listen,
532         .shutdown =     unix_shutdown,
533         .setsockopt =   sock_no_setsockopt,
534         .getsockopt =   sock_no_getsockopt,
535         .sendmsg =      unix_stream_sendmsg,
536         .recvmsg =      unix_stream_recvmsg,
537         .mmap =         sock_no_mmap,
538         .sendpage =     sock_no_sendpage,
539 };
540
541 static const struct proto_ops unix_dgram_ops = {
542         .family =       PF_UNIX,
543         .owner =        THIS_MODULE,
544         .release =      unix_release,
545         .bind =         unix_bind,
546         .connect =      unix_dgram_connect,
547         .socketpair =   unix_socketpair,
548         .accept =       sock_no_accept,
549         .getname =      unix_getname,
550         .poll =         datagram_poll,
551         .ioctl =        unix_ioctl,
552         .listen =       sock_no_listen,
553         .shutdown =     unix_shutdown,
554         .setsockopt =   sock_no_setsockopt,
555         .getsockopt =   sock_no_getsockopt,
556         .sendmsg =      unix_dgram_sendmsg,
557         .recvmsg =      unix_dgram_recvmsg,
558         .mmap =         sock_no_mmap,
559         .sendpage =     sock_no_sendpage,
560 };
561
562 static const struct proto_ops unix_seqpacket_ops = {
563         .family =       PF_UNIX,
564         .owner =        THIS_MODULE,
565         .release =      unix_release,
566         .bind =         unix_bind,
567         .connect =      unix_stream_connect,
568         .socketpair =   unix_socketpair,
569         .accept =       unix_accept,
570         .getname =      unix_getname,
571         .poll =         datagram_poll,
572         .ioctl =        unix_ioctl,
573         .listen =       unix_listen,
574         .shutdown =     unix_shutdown,
575         .setsockopt =   sock_no_setsockopt,
576         .getsockopt =   sock_no_getsockopt,
577         .sendmsg =      unix_seqpacket_sendmsg,
578         .recvmsg =      unix_dgram_recvmsg,
579         .mmap =         sock_no_mmap,
580         .sendpage =     sock_no_sendpage,
581 };
582
583 static struct proto unix_proto = {
584         .name     = "UNIX",
585         .owner    = THIS_MODULE,
586         .obj_size = sizeof(struct unix_sock),
587 };
588
589 /*
590  * AF_UNIX sockets do not interact with hardware, hence they
591  * dont trigger interrupts - so it's safe for them to have
592  * bh-unsafe locking for their sk_receive_queue.lock. Split off
593  * this special lock-class by reinitializing the spinlock key:
594  */
595 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
596
597 static struct sock * unix_create1(struct net *net, struct socket *sock)
598 {
599         struct sock *sk = NULL;
600         struct unix_sock *u;
601
602         if (atomic_read(&unix_nr_socks) >= 2*get_max_files())
603                 goto out;
604
605         sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, 1);
606         if (!sk)
607                 goto out;
608
609         atomic_inc(&unix_nr_socks);
610
611         sock_init_data(sock,sk);
612         lockdep_set_class(&sk->sk_receive_queue.lock,
613                                 &af_unix_sk_receive_queue_lock_key);
614
615         sk->sk_write_space      = unix_write_space;
616         sk->sk_max_ack_backlog  = sysctl_unix_max_dgram_qlen;
617         sk->sk_destruct         = unix_sock_destructor;
618         u         = unix_sk(sk);
619         u->dentry = NULL;
620         u->mnt    = NULL;
621         spin_lock_init(&u->lock);
622         atomic_set(&u->inflight, 0);
623         INIT_LIST_HEAD(&u->link);
624         mutex_init(&u->readlock); /* single task reading lock */
625         init_waitqueue_head(&u->peer_wait);
626         unix_insert_socket(unix_sockets_unbound, sk);
627 out:
628         return sk;
629 }
630
631 static int unix_create(struct net *net, struct socket *sock, int protocol)
632 {
633         if (net != &init_net)
634                 return -EAFNOSUPPORT;
635
636         if (protocol && protocol != PF_UNIX)
637                 return -EPROTONOSUPPORT;
638
639         sock->state = SS_UNCONNECTED;
640
641         switch (sock->type) {
642         case SOCK_STREAM:
643                 sock->ops = &unix_stream_ops;
644                 break;
645                 /*
646                  *      Believe it or not BSD has AF_UNIX, SOCK_RAW though
647                  *      nothing uses it.
648                  */
649         case SOCK_RAW:
650                 sock->type=SOCK_DGRAM;
651         case SOCK_DGRAM:
652                 sock->ops = &unix_dgram_ops;
653                 break;
654         case SOCK_SEQPACKET:
655                 sock->ops = &unix_seqpacket_ops;
656                 break;
657         default:
658                 return -ESOCKTNOSUPPORT;
659         }
660
661         return unix_create1(net, sock) ? 0 : -ENOMEM;
662 }
663
664 static int unix_release(struct socket *sock)
665 {
666         struct sock *sk = sock->sk;
667
668         if (!sk)
669                 return 0;
670
671         sock->sk = NULL;
672
673         return unix_release_sock (sk, 0);
674 }
675
676 static int unix_autobind(struct socket *sock)
677 {
678         struct sock *sk = sock->sk;
679         struct unix_sock *u = unix_sk(sk);
680         static u32 ordernum = 1;
681         struct unix_address * addr;
682         int err;
683
684         mutex_lock(&u->readlock);
685
686         err = 0;
687         if (u->addr)
688                 goto out;
689
690         err = -ENOMEM;
691         addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
692         if (!addr)
693                 goto out;
694
695         addr->name->sun_family = AF_UNIX;
696         atomic_set(&addr->refcnt, 1);
697
698 retry:
699         addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
700         addr->hash = unix_hash_fold(csum_partial((void*)addr->name, addr->len, 0));
701
702         spin_lock(&unix_table_lock);
703         ordernum = (ordernum+1)&0xFFFFF;
704
705         if (__unix_find_socket_byname(addr->name, addr->len, sock->type,
706                                       addr->hash)) {
707                 spin_unlock(&unix_table_lock);
708                 /* Sanity yield. It is unusual case, but yet... */
709                 if (!(ordernum&0xFF))
710                         yield();
711                 goto retry;
712         }
713         addr->hash ^= sk->sk_type;
714
715         __unix_remove_socket(sk);
716         u->addr = addr;
717         __unix_insert_socket(&unix_socket_table[addr->hash], sk);
718         spin_unlock(&unix_table_lock);
719         err = 0;
720
721 out:    mutex_unlock(&u->readlock);
722         return err;
723 }
724
725 static struct sock *unix_find_other(struct sockaddr_un *sunname, int len,
726                                     int type, unsigned hash, int *error)
727 {
728         struct sock *u;
729         struct nameidata nd;
730         int err = 0;
731
732         if (sunname->sun_path[0]) {
733                 err = path_lookup(sunname->sun_path, LOOKUP_FOLLOW, &nd);
734                 if (err)
735                         goto fail;
736                 err = vfs_permission(&nd, MAY_WRITE);
737                 if (err)
738                         goto put_fail;
739
740                 err = -ECONNREFUSED;
741                 if (!S_ISSOCK(nd.dentry->d_inode->i_mode))
742                         goto put_fail;
743                 u=unix_find_socket_byinode(nd.dentry->d_inode);
744                 if (!u)
745                         goto put_fail;
746
747                 if (u->sk_type == type)
748                         touch_atime(nd.mnt, nd.dentry);
749
750                 path_release(&nd);
751
752                 err=-EPROTOTYPE;
753                 if (u->sk_type != type) {
754                         sock_put(u);
755                         goto fail;
756                 }
757         } else {
758                 err = -ECONNREFUSED;
759                 u=unix_find_socket_byname(sunname, len, type, hash);
760                 if (u) {
761                         struct dentry *dentry;
762                         dentry = unix_sk(u)->dentry;
763                         if (dentry)
764                                 touch_atime(unix_sk(u)->mnt, dentry);
765                 } else
766                         goto fail;
767         }
768         return u;
769
770 put_fail:
771         path_release(&nd);
772 fail:
773         *error=err;
774         return NULL;
775 }
776
777
778 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
779 {
780         struct sock *sk = sock->sk;
781         struct unix_sock *u = unix_sk(sk);
782         struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
783         struct dentry * dentry = NULL;
784         struct nameidata nd;
785         int err;
786         unsigned hash;
787         struct unix_address *addr;
788         struct hlist_head *list;
789
790         err = -EINVAL;
791         if (sunaddr->sun_family != AF_UNIX)
792                 goto out;
793
794         if (addr_len==sizeof(short)) {
795                 err = unix_autobind(sock);
796                 goto out;
797         }
798
799         err = unix_mkname(sunaddr, addr_len, &hash);
800         if (err < 0)
801                 goto out;
802         addr_len = err;
803
804         mutex_lock(&u->readlock);
805
806         err = -EINVAL;
807         if (u->addr)
808                 goto out_up;
809
810         err = -ENOMEM;
811         addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
812         if (!addr)
813                 goto out_up;
814
815         memcpy(addr->name, sunaddr, addr_len);
816         addr->len = addr_len;
817         addr->hash = hash ^ sk->sk_type;
818         atomic_set(&addr->refcnt, 1);
819
820         if (sunaddr->sun_path[0]) {
821                 unsigned int mode;
822                 err = 0;
823                 /*
824                  * Get the parent directory, calculate the hash for last
825                  * component.
826                  */
827                 err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
828                 if (err)
829                         goto out_mknod_parent;
830
831                 dentry = lookup_create(&nd, 0);
832                 err = PTR_ERR(dentry);
833                 if (IS_ERR(dentry))
834                         goto out_mknod_unlock;
835
836                 /*
837                  * All right, let's create it.
838                  */
839                 mode = S_IFSOCK |
840                        (SOCK_INODE(sock)->i_mode & ~current->fs->umask);
841                 err = vfs_mknod(nd.dentry->d_inode, dentry, mode, 0);
842                 if (err)
843                         goto out_mknod_dput;
844                 mutex_unlock(&nd.dentry->d_inode->i_mutex);
845                 dput(nd.dentry);
846                 nd.dentry = dentry;
847
848                 addr->hash = UNIX_HASH_SIZE;
849         }
850
851         spin_lock(&unix_table_lock);
852
853         if (!sunaddr->sun_path[0]) {
854                 err = -EADDRINUSE;
855                 if (__unix_find_socket_byname(sunaddr, addr_len,
856                                               sk->sk_type, hash)) {
857                         unix_release_addr(addr);
858                         goto out_unlock;
859                 }
860
861                 list = &unix_socket_table[addr->hash];
862         } else {
863                 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
864                 u->dentry = nd.dentry;
865                 u->mnt    = nd.mnt;
866         }
867
868         err = 0;
869         __unix_remove_socket(sk);
870         u->addr = addr;
871         __unix_insert_socket(list, sk);
872
873 out_unlock:
874         spin_unlock(&unix_table_lock);
875 out_up:
876         mutex_unlock(&u->readlock);
877 out:
878         return err;
879
880 out_mknod_dput:
881         dput(dentry);
882 out_mknod_unlock:
883         mutex_unlock(&nd.dentry->d_inode->i_mutex);
884         path_release(&nd);
885 out_mknod_parent:
886         if (err==-EEXIST)
887                 err=-EADDRINUSE;
888         unix_release_addr(addr);
889         goto out_up;
890 }
891
892 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
893 {
894         if (unlikely(sk1 == sk2) || !sk2) {
895                 unix_state_lock(sk1);
896                 return;
897         }
898         if (sk1 < sk2) {
899                 unix_state_lock(sk1);
900                 unix_state_lock_nested(sk2);
901         } else {
902                 unix_state_lock(sk2);
903                 unix_state_lock_nested(sk1);
904         }
905 }
906
907 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
908 {
909         if (unlikely(sk1 == sk2) || !sk2) {
910                 unix_state_unlock(sk1);
911                 return;
912         }
913         unix_state_unlock(sk1);
914         unix_state_unlock(sk2);
915 }
916
917 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
918                               int alen, int flags)
919 {
920         struct sock *sk = sock->sk;
921         struct sockaddr_un *sunaddr=(struct sockaddr_un*)addr;
922         struct sock *other;
923         unsigned hash;
924         int err;
925
926         if (addr->sa_family != AF_UNSPEC) {
927                 err = unix_mkname(sunaddr, alen, &hash);
928                 if (err < 0)
929                         goto out;
930                 alen = err;
931
932                 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
933                     !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
934                         goto out;
935
936 restart:
937                 other=unix_find_other(sunaddr, alen, sock->type, hash, &err);
938                 if (!other)
939                         goto out;
940
941                 unix_state_double_lock(sk, other);
942
943                 /* Apparently VFS overslept socket death. Retry. */
944                 if (sock_flag(other, SOCK_DEAD)) {
945                         unix_state_double_unlock(sk, other);
946                         sock_put(other);
947                         goto restart;
948                 }
949
950                 err = -EPERM;
951                 if (!unix_may_send(sk, other))
952                         goto out_unlock;
953
954                 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
955                 if (err)
956                         goto out_unlock;
957
958         } else {
959                 /*
960                  *      1003.1g breaking connected state with AF_UNSPEC
961                  */
962                 other = NULL;
963                 unix_state_double_lock(sk, other);
964         }
965
966         /*
967          * If it was connected, reconnect.
968          */
969         if (unix_peer(sk)) {
970                 struct sock *old_peer = unix_peer(sk);
971                 unix_peer(sk)=other;
972                 unix_state_double_unlock(sk, other);
973
974                 if (other != old_peer)
975                         unix_dgram_disconnected(sk, old_peer);
976                 sock_put(old_peer);
977         } else {
978                 unix_peer(sk)=other;
979                 unix_state_double_unlock(sk, other);
980         }
981         return 0;
982
983 out_unlock:
984         unix_state_double_unlock(sk, other);
985         sock_put(other);
986 out:
987         return err;
988 }
989
990 static long unix_wait_for_peer(struct sock *other, long timeo)
991 {
992         struct unix_sock *u = unix_sk(other);
993         int sched;
994         DEFINE_WAIT(wait);
995
996         prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
997
998         sched = !sock_flag(other, SOCK_DEAD) &&
999                 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1000                 (skb_queue_len(&other->sk_receive_queue) >
1001                  other->sk_max_ack_backlog);
1002
1003         unix_state_unlock(other);
1004
1005         if (sched)
1006                 timeo = schedule_timeout(timeo);
1007
1008         finish_wait(&u->peer_wait, &wait);
1009         return timeo;
1010 }
1011
1012 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1013                                int addr_len, int flags)
1014 {
1015         struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
1016         struct sock *sk = sock->sk;
1017         struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1018         struct sock *newsk = NULL;
1019         struct sock *other = NULL;
1020         struct sk_buff *skb = NULL;
1021         unsigned hash;
1022         int st;
1023         int err;
1024         long timeo;
1025
1026         err = unix_mkname(sunaddr, addr_len, &hash);
1027         if (err < 0)
1028                 goto out;
1029         addr_len = err;
1030
1031         if (test_bit(SOCK_PASSCRED, &sock->flags)
1032                 && !u->addr && (err = unix_autobind(sock)) != 0)
1033                 goto out;
1034
1035         timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1036
1037         /* First of all allocate resources.
1038            If we will make it after state is locked,
1039            we will have to recheck all again in any case.
1040          */
1041
1042         err = -ENOMEM;
1043
1044         /* create new sock for complete connection */
1045         newsk = unix_create1(sk->sk_net, NULL);
1046         if (newsk == NULL)
1047                 goto out;
1048
1049         /* Allocate skb for sending to listening sock */
1050         skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1051         if (skb == NULL)
1052                 goto out;
1053
1054 restart:
1055         /*  Find listening sock. */
1056         other = unix_find_other(sunaddr, addr_len, sk->sk_type, hash, &err);
1057         if (!other)
1058                 goto out;
1059
1060         /* Latch state of peer */
1061         unix_state_lock(other);
1062
1063         /* Apparently VFS overslept socket death. Retry. */
1064         if (sock_flag(other, SOCK_DEAD)) {
1065                 unix_state_unlock(other);
1066                 sock_put(other);
1067                 goto restart;
1068         }
1069
1070         err = -ECONNREFUSED;
1071         if (other->sk_state != TCP_LISTEN)
1072                 goto out_unlock;
1073
1074         if (skb_queue_len(&other->sk_receive_queue) >
1075             other->sk_max_ack_backlog) {
1076                 err = -EAGAIN;
1077                 if (!timeo)
1078                         goto out_unlock;
1079
1080                 timeo = unix_wait_for_peer(other, timeo);
1081
1082                 err = sock_intr_errno(timeo);
1083                 if (signal_pending(current))
1084                         goto out;
1085                 sock_put(other);
1086                 goto restart;
1087         }
1088
1089         /* Latch our state.
1090
1091            It is tricky place. We need to grab write lock and cannot
1092            drop lock on peer. It is dangerous because deadlock is
1093            possible. Connect to self case and simultaneous
1094            attempt to connect are eliminated by checking socket
1095            state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1096            check this before attempt to grab lock.
1097
1098            Well, and we have to recheck the state after socket locked.
1099          */
1100         st = sk->sk_state;
1101
1102         switch (st) {
1103         case TCP_CLOSE:
1104                 /* This is ok... continue with connect */
1105                 break;
1106         case TCP_ESTABLISHED:
1107                 /* Socket is already connected */
1108                 err = -EISCONN;
1109                 goto out_unlock;
1110         default:
1111                 err = -EINVAL;
1112                 goto out_unlock;
1113         }
1114
1115         unix_state_lock_nested(sk);
1116
1117         if (sk->sk_state != st) {
1118                 unix_state_unlock(sk);
1119                 unix_state_unlock(other);
1120                 sock_put(other);
1121                 goto restart;
1122         }
1123
1124         err = security_unix_stream_connect(sock, other->sk_socket, newsk);
1125         if (err) {
1126                 unix_state_unlock(sk);
1127                 goto out_unlock;
1128         }
1129
1130         /* The way is open! Fastly set all the necessary fields... */
1131
1132         sock_hold(sk);
1133         unix_peer(newsk)        = sk;
1134         newsk->sk_state         = TCP_ESTABLISHED;
1135         newsk->sk_type          = sk->sk_type;
1136         newsk->sk_peercred.pid  = current->tgid;
1137         newsk->sk_peercred.uid  = current->euid;
1138         newsk->sk_peercred.gid  = current->egid;
1139         newu = unix_sk(newsk);
1140         newsk->sk_sleep         = &newu->peer_wait;
1141         otheru = unix_sk(other);
1142
1143         /* copy address information from listening to new sock*/
1144         if (otheru->addr) {
1145                 atomic_inc(&otheru->addr->refcnt);
1146                 newu->addr = otheru->addr;
1147         }
1148         if (otheru->dentry) {
1149                 newu->dentry    = dget(otheru->dentry);
1150                 newu->mnt       = mntget(otheru->mnt);
1151         }
1152
1153         /* Set credentials */
1154         sk->sk_peercred = other->sk_peercred;
1155
1156         sock->state     = SS_CONNECTED;
1157         sk->sk_state    = TCP_ESTABLISHED;
1158         sock_hold(newsk);
1159
1160         smp_mb__after_atomic_inc();     /* sock_hold() does an atomic_inc() */
1161         unix_peer(sk)   = newsk;
1162
1163         unix_state_unlock(sk);
1164
1165         /* take ten and and send info to listening sock */
1166         spin_lock(&other->sk_receive_queue.lock);
1167         __skb_queue_tail(&other->sk_receive_queue, skb);
1168         spin_unlock(&other->sk_receive_queue.lock);
1169         unix_state_unlock(other);
1170         other->sk_data_ready(other, 0);
1171         sock_put(other);
1172         return 0;
1173
1174 out_unlock:
1175         if (other)
1176                 unix_state_unlock(other);
1177
1178 out:
1179         if (skb)
1180                 kfree_skb(skb);
1181         if (newsk)
1182                 unix_release_sock(newsk, 0);
1183         if (other)
1184                 sock_put(other);
1185         return err;
1186 }
1187
1188 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1189 {
1190         struct sock *ska=socka->sk, *skb = sockb->sk;
1191
1192         /* Join our sockets back to back */
1193         sock_hold(ska);
1194         sock_hold(skb);
1195         unix_peer(ska)=skb;
1196         unix_peer(skb)=ska;
1197         ska->sk_peercred.pid = skb->sk_peercred.pid = current->tgid;
1198         ska->sk_peercred.uid = skb->sk_peercred.uid = current->euid;
1199         ska->sk_peercred.gid = skb->sk_peercred.gid = current->egid;
1200
1201         if (ska->sk_type != SOCK_DGRAM) {
1202                 ska->sk_state = TCP_ESTABLISHED;
1203                 skb->sk_state = TCP_ESTABLISHED;
1204                 socka->state  = SS_CONNECTED;
1205                 sockb->state  = SS_CONNECTED;
1206         }
1207         return 0;
1208 }
1209
1210 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1211 {
1212         struct sock *sk = sock->sk;
1213         struct sock *tsk;
1214         struct sk_buff *skb;
1215         int err;
1216
1217         err = -EOPNOTSUPP;
1218         if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
1219                 goto out;
1220
1221         err = -EINVAL;
1222         if (sk->sk_state != TCP_LISTEN)
1223                 goto out;
1224
1225         /* If socket state is TCP_LISTEN it cannot change (for now...),
1226          * so that no locks are necessary.
1227          */
1228
1229         skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1230         if (!skb) {
1231                 /* This means receive shutdown. */
1232                 if (err == 0)
1233                         err = -EINVAL;
1234                 goto out;
1235         }
1236
1237         tsk = skb->sk;
1238         skb_free_datagram(sk, skb);
1239         wake_up_interruptible(&unix_sk(sk)->peer_wait);
1240
1241         /* attach accepted sock to socket */
1242         unix_state_lock(tsk);
1243         newsock->state = SS_CONNECTED;
1244         sock_graft(tsk, newsock);
1245         unix_state_unlock(tsk);
1246         return 0;
1247
1248 out:
1249         return err;
1250 }
1251
1252
1253 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1254 {
1255         struct sock *sk = sock->sk;
1256         struct unix_sock *u;
1257         struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
1258         int err = 0;
1259
1260         if (peer) {
1261                 sk = unix_peer_get(sk);
1262
1263                 err = -ENOTCONN;
1264                 if (!sk)
1265                         goto out;
1266                 err = 0;
1267         } else {
1268                 sock_hold(sk);
1269         }
1270
1271         u = unix_sk(sk);
1272         unix_state_lock(sk);
1273         if (!u->addr) {
1274                 sunaddr->sun_family = AF_UNIX;
1275                 sunaddr->sun_path[0] = 0;
1276                 *uaddr_len = sizeof(short);
1277         } else {
1278                 struct unix_address *addr = u->addr;
1279
1280                 *uaddr_len = addr->len;
1281                 memcpy(sunaddr, addr->name, *uaddr_len);
1282         }
1283         unix_state_unlock(sk);
1284         sock_put(sk);
1285 out:
1286         return err;
1287 }
1288
1289 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1290 {
1291         int i;
1292
1293         scm->fp = UNIXCB(skb).fp;
1294         skb->destructor = sock_wfree;
1295         UNIXCB(skb).fp = NULL;
1296
1297         for (i=scm->fp->count-1; i>=0; i--)
1298                 unix_notinflight(scm->fp->fp[i]);
1299 }
1300
1301 static void unix_destruct_fds(struct sk_buff *skb)
1302 {
1303         struct scm_cookie scm;
1304         memset(&scm, 0, sizeof(scm));
1305         unix_detach_fds(&scm, skb);
1306
1307         /* Alas, it calls VFS */
1308         /* So fscking what? fput() had been SMP-safe since the last Summer */
1309         scm_destroy(&scm);
1310         sock_wfree(skb);
1311 }
1312
1313 static void unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1314 {
1315         int i;
1316         for (i=scm->fp->count-1; i>=0; i--)
1317                 unix_inflight(scm->fp->fp[i]);
1318         UNIXCB(skb).fp = scm->fp;
1319         skb->destructor = unix_destruct_fds;
1320         scm->fp = NULL;
1321 }
1322
1323 /*
1324  *      Send AF_UNIX data.
1325  */
1326
1327 static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1328                               struct msghdr *msg, size_t len)
1329 {
1330         struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1331         struct sock *sk = sock->sk;
1332         struct unix_sock *u = unix_sk(sk);
1333         struct sockaddr_un *sunaddr=msg->msg_name;
1334         struct sock *other = NULL;
1335         int namelen = 0; /* fake GCC */
1336         int err;
1337         unsigned hash;
1338         struct sk_buff *skb;
1339         long timeo;
1340         struct scm_cookie tmp_scm;
1341
1342         if (NULL == siocb->scm)
1343                 siocb->scm = &tmp_scm;
1344         err = scm_send(sock, msg, siocb->scm);
1345         if (err < 0)
1346                 return err;
1347
1348         err = -EOPNOTSUPP;
1349         if (msg->msg_flags&MSG_OOB)
1350                 goto out;
1351
1352         if (msg->msg_namelen) {
1353                 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1354                 if (err < 0)
1355                         goto out;
1356                 namelen = err;
1357         } else {
1358                 sunaddr = NULL;
1359                 err = -ENOTCONN;
1360                 other = unix_peer_get(sk);
1361                 if (!other)
1362                         goto out;
1363         }
1364
1365         if (test_bit(SOCK_PASSCRED, &sock->flags)
1366                 && !u->addr && (err = unix_autobind(sock)) != 0)
1367                 goto out;
1368
1369         err = -EMSGSIZE;
1370         if (len > sk->sk_sndbuf - 32)
1371                 goto out;
1372
1373         skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
1374         if (skb==NULL)
1375                 goto out;
1376
1377         memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1378         if (siocb->scm->fp)
1379                 unix_attach_fds(siocb->scm, skb);
1380         unix_get_secdata(siocb->scm, skb);
1381
1382         skb_reset_transport_header(skb);
1383         err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
1384         if (err)
1385                 goto out_free;
1386
1387         timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1388
1389 restart:
1390         if (!other) {
1391                 err = -ECONNRESET;
1392                 if (sunaddr == NULL)
1393                         goto out_free;
1394
1395                 other = unix_find_other(sunaddr, namelen, sk->sk_type,
1396                                         hash, &err);
1397                 if (other==NULL)
1398                         goto out_free;
1399         }
1400
1401         unix_state_lock(other);
1402         err = -EPERM;
1403         if (!unix_may_send(sk, other))
1404                 goto out_unlock;
1405
1406         if (sock_flag(other, SOCK_DEAD)) {
1407                 /*
1408                  *      Check with 1003.1g - what should
1409                  *      datagram error
1410                  */
1411                 unix_state_unlock(other);
1412                 sock_put(other);
1413
1414                 err = 0;
1415                 unix_state_lock(sk);
1416                 if (unix_peer(sk) == other) {
1417                         unix_peer(sk)=NULL;
1418                         unix_state_unlock(sk);
1419
1420                         unix_dgram_disconnected(sk, other);
1421                         sock_put(other);
1422                         err = -ECONNREFUSED;
1423                 } else {
1424                         unix_state_unlock(sk);
1425                 }
1426
1427                 other = NULL;
1428                 if (err)
1429                         goto out_free;
1430                 goto restart;
1431         }
1432
1433         err = -EPIPE;
1434         if (other->sk_shutdown & RCV_SHUTDOWN)
1435                 goto out_unlock;
1436
1437         if (sk->sk_type != SOCK_SEQPACKET) {
1438                 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1439                 if (err)
1440                         goto out_unlock;
1441         }
1442
1443         if (unix_peer(other) != sk &&
1444             (skb_queue_len(&other->sk_receive_queue) >
1445              other->sk_max_ack_backlog)) {
1446                 if (!timeo) {
1447                         err = -EAGAIN;
1448                         goto out_unlock;
1449                 }
1450
1451                 timeo = unix_wait_for_peer(other, timeo);
1452
1453                 err = sock_intr_errno(timeo);
1454                 if (signal_pending(current))
1455                         goto out_free;
1456
1457                 goto restart;
1458         }
1459
1460         skb_queue_tail(&other->sk_receive_queue, skb);
1461         unix_state_unlock(other);
1462         other->sk_data_ready(other, len);
1463         sock_put(other);
1464         scm_destroy(siocb->scm);
1465         return len;
1466
1467 out_unlock:
1468         unix_state_unlock(other);
1469 out_free:
1470         kfree_skb(skb);
1471 out:
1472         if (other)
1473                 sock_put(other);
1474         scm_destroy(siocb->scm);
1475         return err;
1476 }
1477
1478
1479 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1480                                struct msghdr *msg, size_t len)
1481 {
1482         struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1483         struct sock *sk = sock->sk;
1484         struct sock *other = NULL;
1485         struct sockaddr_un *sunaddr=msg->msg_name;
1486         int err,size;
1487         struct sk_buff *skb;
1488         int sent=0;
1489         struct scm_cookie tmp_scm;
1490
1491         if (NULL == siocb->scm)
1492                 siocb->scm = &tmp_scm;
1493         err = scm_send(sock, msg, siocb->scm);
1494         if (err < 0)
1495                 return err;
1496
1497         err = -EOPNOTSUPP;
1498         if (msg->msg_flags&MSG_OOB)
1499                 goto out_err;
1500
1501         if (msg->msg_namelen) {
1502                 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1503                 goto out_err;
1504         } else {
1505                 sunaddr = NULL;
1506                 err = -ENOTCONN;
1507                 other = unix_peer(sk);
1508                 if (!other)
1509                         goto out_err;
1510         }
1511
1512         if (sk->sk_shutdown & SEND_SHUTDOWN)
1513                 goto pipe_err;
1514
1515         while(sent < len)
1516         {
1517                 /*
1518                  *      Optimisation for the fact that under 0.01% of X
1519                  *      messages typically need breaking up.
1520                  */
1521
1522                 size = len-sent;
1523
1524                 /* Keep two messages in the pipe so it schedules better */
1525                 if (size > ((sk->sk_sndbuf >> 1) - 64))
1526                         size = (sk->sk_sndbuf >> 1) - 64;
1527
1528                 if (size > SKB_MAX_ALLOC)
1529                         size = SKB_MAX_ALLOC;
1530
1531                 /*
1532                  *      Grab a buffer
1533                  */
1534
1535                 skb=sock_alloc_send_skb(sk,size,msg->msg_flags&MSG_DONTWAIT, &err);
1536
1537                 if (skb==NULL)
1538                         goto out_err;
1539
1540                 /*
1541                  *      If you pass two values to the sock_alloc_send_skb
1542                  *      it tries to grab the large buffer with GFP_NOFS
1543                  *      (which can fail easily), and if it fails grab the
1544                  *      fallback size buffer which is under a page and will
1545                  *      succeed. [Alan]
1546                  */
1547                 size = min_t(int, size, skb_tailroom(skb));
1548
1549                 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1550                 if (siocb->scm->fp)
1551                         unix_attach_fds(siocb->scm, skb);
1552
1553                 if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) {
1554                         kfree_skb(skb);
1555                         goto out_err;
1556                 }
1557
1558                 unix_state_lock(other);
1559
1560                 if (sock_flag(other, SOCK_DEAD) ||
1561                     (other->sk_shutdown & RCV_SHUTDOWN))
1562                         goto pipe_err_free;
1563
1564                 skb_queue_tail(&other->sk_receive_queue, skb);
1565                 unix_state_unlock(other);
1566                 other->sk_data_ready(other, size);
1567                 sent+=size;
1568         }
1569
1570         scm_destroy(siocb->scm);
1571         siocb->scm = NULL;
1572
1573         return sent;
1574
1575 pipe_err_free:
1576         unix_state_unlock(other);
1577         kfree_skb(skb);
1578 pipe_err:
1579         if (sent==0 && !(msg->msg_flags&MSG_NOSIGNAL))
1580                 send_sig(SIGPIPE,current,0);
1581         err = -EPIPE;
1582 out_err:
1583         scm_destroy(siocb->scm);
1584         siocb->scm = NULL;
1585         return sent ? : err;
1586 }
1587
1588 static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1589                                   struct msghdr *msg, size_t len)
1590 {
1591         int err;
1592         struct sock *sk = sock->sk;
1593
1594         err = sock_error(sk);
1595         if (err)
1596                 return err;
1597
1598         if (sk->sk_state != TCP_ESTABLISHED)
1599                 return -ENOTCONN;
1600
1601         if (msg->msg_namelen)
1602                 msg->msg_namelen = 0;
1603
1604         return unix_dgram_sendmsg(kiocb, sock, msg, len);
1605 }
1606
1607 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1608 {
1609         struct unix_sock *u = unix_sk(sk);
1610
1611         msg->msg_namelen = 0;
1612         if (u->addr) {
1613                 msg->msg_namelen = u->addr->len;
1614                 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1615         }
1616 }
1617
1618 static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1619                               struct msghdr *msg, size_t size,
1620                               int flags)
1621 {
1622         struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1623         struct scm_cookie tmp_scm;
1624         struct sock *sk = sock->sk;
1625         struct unix_sock *u = unix_sk(sk);
1626         int noblock = flags & MSG_DONTWAIT;
1627         struct sk_buff *skb;
1628         int err;
1629
1630         err = -EOPNOTSUPP;
1631         if (flags&MSG_OOB)
1632                 goto out;
1633
1634         msg->msg_namelen = 0;
1635
1636         mutex_lock(&u->readlock);
1637
1638         skb = skb_recv_datagram(sk, flags, noblock, &err);
1639         if (!skb)
1640                 goto out_unlock;
1641
1642         wake_up_interruptible(&u->peer_wait);
1643
1644         if (msg->msg_name)
1645                 unix_copy_addr(msg, skb->sk);
1646
1647         if (size > skb->len)
1648                 size = skb->len;
1649         else if (size < skb->len)
1650                 msg->msg_flags |= MSG_TRUNC;
1651
1652         err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1653         if (err)
1654                 goto out_free;
1655
1656         if (!siocb->scm) {
1657                 siocb->scm = &tmp_scm;
1658                 memset(&tmp_scm, 0, sizeof(tmp_scm));
1659         }
1660         siocb->scm->creds = *UNIXCREDS(skb);
1661         unix_set_secdata(siocb->scm, skb);
1662
1663         if (!(flags & MSG_PEEK))
1664         {
1665                 if (UNIXCB(skb).fp)
1666                         unix_detach_fds(siocb->scm, skb);
1667         }
1668         else
1669         {
1670                 /* It is questionable: on PEEK we could:
1671                    - do not return fds - good, but too simple 8)
1672                    - return fds, and do not return them on read (old strategy,
1673                      apparently wrong)
1674                    - clone fds (I chose it for now, it is the most universal
1675                      solution)
1676
1677                    POSIX 1003.1g does not actually define this clearly
1678                    at all. POSIX 1003.1g doesn't define a lot of things
1679                    clearly however!
1680
1681                 */
1682                 if (UNIXCB(skb).fp)
1683                         siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1684         }
1685         err = size;
1686
1687         scm_recv(sock, msg, siocb->scm, flags);
1688
1689 out_free:
1690         skb_free_datagram(sk,skb);
1691 out_unlock:
1692         mutex_unlock(&u->readlock);
1693 out:
1694         return err;
1695 }
1696
1697 /*
1698  *      Sleep until data has arrive. But check for races..
1699  */
1700
1701 static long unix_stream_data_wait(struct sock * sk, long timeo)
1702 {
1703         DEFINE_WAIT(wait);
1704
1705         unix_state_lock(sk);
1706
1707         for (;;) {
1708                 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1709
1710                 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1711                     sk->sk_err ||
1712                     (sk->sk_shutdown & RCV_SHUTDOWN) ||
1713                     signal_pending(current) ||
1714                     !timeo)
1715                         break;
1716
1717                 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1718                 unix_state_unlock(sk);
1719                 timeo = schedule_timeout(timeo);
1720                 unix_state_lock(sk);
1721                 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1722         }
1723
1724         finish_wait(sk->sk_sleep, &wait);
1725         unix_state_unlock(sk);
1726         return timeo;
1727 }
1728
1729
1730
1731 static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1732                                struct msghdr *msg, size_t size,
1733                                int flags)
1734 {
1735         struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1736         struct scm_cookie tmp_scm;
1737         struct sock *sk = sock->sk;
1738         struct unix_sock *u = unix_sk(sk);
1739         struct sockaddr_un *sunaddr=msg->msg_name;
1740         int copied = 0;
1741         int check_creds = 0;
1742         int target;
1743         int err = 0;
1744         long timeo;
1745
1746         err = -EINVAL;
1747         if (sk->sk_state != TCP_ESTABLISHED)
1748                 goto out;
1749
1750         err = -EOPNOTSUPP;
1751         if (flags&MSG_OOB)
1752                 goto out;
1753
1754         target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1755         timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1756
1757         msg->msg_namelen = 0;
1758
1759         /* Lock the socket to prevent queue disordering
1760          * while sleeps in memcpy_tomsg
1761          */
1762
1763         if (!siocb->scm) {
1764                 siocb->scm = &tmp_scm;
1765                 memset(&tmp_scm, 0, sizeof(tmp_scm));
1766         }
1767
1768         mutex_lock(&u->readlock);
1769
1770         do
1771         {
1772                 int chunk;
1773                 struct sk_buff *skb;
1774
1775                 unix_state_lock(sk);
1776                 skb = skb_dequeue(&sk->sk_receive_queue);
1777                 if (skb==NULL)
1778                 {
1779                         if (copied >= target)
1780                                 goto unlock;
1781
1782                         /*
1783                          *      POSIX 1003.1g mandates this order.
1784                          */
1785
1786                         if ((err = sock_error(sk)) != 0)
1787                                 goto unlock;
1788                         if (sk->sk_shutdown & RCV_SHUTDOWN)
1789                                 goto unlock;
1790
1791                         unix_state_unlock(sk);
1792                         err = -EAGAIN;
1793                         if (!timeo)
1794                                 break;
1795                         mutex_unlock(&u->readlock);
1796
1797                         timeo = unix_stream_data_wait(sk, timeo);
1798
1799                         if (signal_pending(current)) {
1800                                 err = sock_intr_errno(timeo);
1801                                 goto out;
1802                         }
1803                         mutex_lock(&u->readlock);
1804                         continue;
1805  unlock:
1806                         unix_state_unlock(sk);
1807                         break;
1808                 }
1809                 unix_state_unlock(sk);
1810
1811                 if (check_creds) {
1812                         /* Never glue messages from different writers */
1813                         if (memcmp(UNIXCREDS(skb), &siocb->scm->creds, sizeof(siocb->scm->creds)) != 0) {
1814                                 skb_queue_head(&sk->sk_receive_queue, skb);
1815                                 break;
1816                         }
1817                 } else {
1818                         /* Copy credentials */
1819                         siocb->scm->creds = *UNIXCREDS(skb);
1820                         check_creds = 1;
1821                 }
1822
1823                 /* Copy address just once */
1824                 if (sunaddr)
1825                 {
1826                         unix_copy_addr(msg, skb->sk);
1827                         sunaddr = NULL;
1828                 }
1829
1830                 chunk = min_t(unsigned int, skb->len, size);
1831                 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1832                         skb_queue_head(&sk->sk_receive_queue, skb);
1833                         if (copied == 0)
1834                                 copied = -EFAULT;
1835                         break;
1836                 }
1837                 copied += chunk;
1838                 size -= chunk;
1839
1840                 /* Mark read part of skb as used */
1841                 if (!(flags & MSG_PEEK))
1842                 {
1843                         skb_pull(skb, chunk);
1844
1845                         if (UNIXCB(skb).fp)
1846                                 unix_detach_fds(siocb->scm, skb);
1847
1848                         /* put the skb back if we didn't use it up.. */
1849                         if (skb->len)
1850                         {
1851                                 skb_queue_head(&sk->sk_receive_queue, skb);
1852                                 break;
1853                         }
1854
1855                         kfree_skb(skb);
1856
1857                         if (siocb->scm->fp)
1858                                 break;
1859                 }
1860                 else
1861                 {
1862                         /* It is questionable, see note in unix_dgram_recvmsg.
1863                          */
1864                         if (UNIXCB(skb).fp)
1865                                 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1866
1867                         /* put message back and return */
1868                         skb_queue_head(&sk->sk_receive_queue, skb);
1869                         break;
1870                 }
1871         } while (size);
1872
1873         mutex_unlock(&u->readlock);
1874         scm_recv(sock, msg, siocb->scm, flags);
1875 out:
1876         return copied ? : err;
1877 }
1878
1879 static int unix_shutdown(struct socket *sock, int mode)
1880 {
1881         struct sock *sk = sock->sk;
1882         struct sock *other;
1883
1884         mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
1885
1886         if (mode) {
1887                 unix_state_lock(sk);
1888                 sk->sk_shutdown |= mode;
1889                 other=unix_peer(sk);
1890                 if (other)
1891                         sock_hold(other);
1892                 unix_state_unlock(sk);
1893                 sk->sk_state_change(sk);
1894
1895                 if (other &&
1896                         (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
1897
1898                         int peer_mode = 0;
1899
1900                         if (mode&RCV_SHUTDOWN)
1901                                 peer_mode |= SEND_SHUTDOWN;
1902                         if (mode&SEND_SHUTDOWN)
1903                                 peer_mode |= RCV_SHUTDOWN;
1904                         unix_state_lock(other);
1905                         other->sk_shutdown |= peer_mode;
1906                         unix_state_unlock(other);
1907                         other->sk_state_change(other);
1908                         read_lock(&other->sk_callback_lock);
1909                         if (peer_mode == SHUTDOWN_MASK)
1910                                 sk_wake_async(other,1,POLL_HUP);
1911                         else if (peer_mode & RCV_SHUTDOWN)
1912                                 sk_wake_async(other,1,POLL_IN);
1913                         read_unlock(&other->sk_callback_lock);
1914                 }
1915                 if (other)
1916                         sock_put(other);
1917         }
1918         return 0;
1919 }
1920
1921 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1922 {
1923         struct sock *sk = sock->sk;
1924         long amount=0;
1925         int err;
1926
1927         switch(cmd)
1928         {
1929                 case SIOCOUTQ:
1930                         amount = atomic_read(&sk->sk_wmem_alloc);
1931                         err = put_user(amount, (int __user *)arg);
1932                         break;
1933                 case SIOCINQ:
1934                 {
1935                         struct sk_buff *skb;
1936
1937                         if (sk->sk_state == TCP_LISTEN) {
1938                                 err = -EINVAL;
1939                                 break;
1940                         }
1941
1942                         spin_lock(&sk->sk_receive_queue.lock);
1943                         if (sk->sk_type == SOCK_STREAM ||
1944                             sk->sk_type == SOCK_SEQPACKET) {
1945                                 skb_queue_walk(&sk->sk_receive_queue, skb)
1946                                         amount += skb->len;
1947                         } else {
1948                                 skb = skb_peek(&sk->sk_receive_queue);
1949                                 if (skb)
1950                                         amount=skb->len;
1951                         }
1952                         spin_unlock(&sk->sk_receive_queue.lock);
1953                         err = put_user(amount, (int __user *)arg);
1954                         break;
1955                 }
1956
1957                 default:
1958                         err = -ENOIOCTLCMD;
1959                         break;
1960         }
1961         return err;
1962 }
1963
1964 static unsigned int unix_poll(struct file * file, struct socket *sock, poll_table *wait)
1965 {
1966         struct sock *sk = sock->sk;
1967         unsigned int mask;
1968
1969         poll_wait(file, sk->sk_sleep, wait);
1970         mask = 0;
1971
1972         /* exceptional events? */
1973         if (sk->sk_err)
1974                 mask |= POLLERR;
1975         if (sk->sk_shutdown == SHUTDOWN_MASK)
1976                 mask |= POLLHUP;
1977         if (sk->sk_shutdown & RCV_SHUTDOWN)
1978                 mask |= POLLRDHUP;
1979
1980         /* readable? */
1981         if (!skb_queue_empty(&sk->sk_receive_queue) ||
1982             (sk->sk_shutdown & RCV_SHUTDOWN))
1983                 mask |= POLLIN | POLLRDNORM;
1984
1985         /* Connection-based need to check for termination and startup */
1986         if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && sk->sk_state == TCP_CLOSE)
1987                 mask |= POLLHUP;
1988
1989         /*
1990          * we set writable also when the other side has shut down the
1991          * connection. This prevents stuck sockets.
1992          */
1993         if (unix_writable(sk))
1994                 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1995
1996         return mask;
1997 }
1998
1999
2000 #ifdef CONFIG_PROC_FS
2001 static struct sock *unix_seq_idx(int *iter, loff_t pos)
2002 {
2003         loff_t off = 0;
2004         struct sock *s;
2005
2006         for (s = first_unix_socket(iter); s; s = next_unix_socket(iter, s)) {
2007                 if (off == pos)
2008                         return s;
2009                 ++off;
2010         }
2011         return NULL;
2012 }
2013
2014
2015 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2016 {
2017         spin_lock(&unix_table_lock);
2018         return *pos ? unix_seq_idx(seq->private, *pos - 1) : ((void *) 1);
2019 }
2020
2021 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2022 {
2023         ++*pos;
2024
2025         if (v == (void *)1)
2026                 return first_unix_socket(seq->private);
2027         return next_unix_socket(seq->private, v);
2028 }
2029
2030 static void unix_seq_stop(struct seq_file *seq, void *v)
2031 {
2032         spin_unlock(&unix_table_lock);
2033 }
2034
2035 static int unix_seq_show(struct seq_file *seq, void *v)
2036 {
2037
2038         if (v == (void *)1)
2039                 seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
2040                          "Inode Path\n");
2041         else {
2042                 struct sock *s = v;
2043                 struct unix_sock *u = unix_sk(s);
2044                 unix_state_lock(s);
2045
2046                 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
2047                         s,
2048                         atomic_read(&s->sk_refcnt),
2049                         0,
2050                         s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2051                         s->sk_type,
2052                         s->sk_socket ?
2053                         (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2054                         (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2055                         sock_i_ino(s));
2056
2057                 if (u->addr) {
2058                         int i, len;
2059                         seq_putc(seq, ' ');
2060
2061                         i = 0;
2062                         len = u->addr->len - sizeof(short);
2063                         if (!UNIX_ABSTRACT(s))
2064                                 len--;
2065                         else {
2066                                 seq_putc(seq, '@');
2067                                 i++;
2068                         }
2069                         for ( ; i < len; i++)
2070                                 seq_putc(seq, u->addr->name->sun_path[i]);
2071                 }
2072                 unix_state_unlock(s);
2073                 seq_putc(seq, '\n');
2074         }
2075
2076         return 0;
2077 }
2078
2079 static const struct seq_operations unix_seq_ops = {
2080         .start  = unix_seq_start,
2081         .next   = unix_seq_next,
2082         .stop   = unix_seq_stop,
2083         .show   = unix_seq_show,
2084 };
2085
2086
2087 static int unix_seq_open(struct inode *inode, struct file *file)
2088 {
2089         return seq_open_private(file, &unix_seq_ops, sizeof(int));
2090 }
2091
2092 static const struct file_operations unix_seq_fops = {
2093         .owner          = THIS_MODULE,
2094         .open           = unix_seq_open,
2095         .read           = seq_read,
2096         .llseek         = seq_lseek,
2097         .release        = seq_release_private,
2098 };
2099
2100 #endif
2101
2102 static struct net_proto_family unix_family_ops = {
2103         .family = PF_UNIX,
2104         .create = unix_create,
2105         .owner  = THIS_MODULE,
2106 };
2107
2108 static int __init af_unix_init(void)
2109 {
2110         int rc = -1;
2111         struct sk_buff *dummy_skb;
2112
2113         BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
2114
2115         rc = proto_register(&unix_proto, 1);
2116         if (rc != 0) {
2117                 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2118                        __FUNCTION__);
2119                 goto out;
2120         }
2121
2122         sock_register(&unix_family_ops);
2123 #ifdef CONFIG_PROC_FS
2124         proc_net_fops_create(&init_net, "unix", 0, &unix_seq_fops);
2125 #endif
2126         unix_sysctl_register();
2127 out:
2128         return rc;
2129 }
2130
2131 static void __exit af_unix_exit(void)
2132 {
2133         sock_unregister(PF_UNIX);
2134         unix_sysctl_unregister();
2135         proc_net_remove(&init_net, "unix");
2136         proto_unregister(&unix_proto);
2137 }
2138
2139 module_init(af_unix_init);
2140 module_exit(af_unix_exit);
2141
2142 MODULE_LICENSE("GPL");
2143 MODULE_ALIAS_NETPROTO(PF_UNIX);