2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
10 * Version: $Id: sock.c,v 1.117 2002/02/01 22:01:03 davem Exp $
13 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Alan Cox, <A.Cox@swansea.ac.uk>
18 * Alan Cox : Numerous verify_area() problems
19 * Alan Cox : Connecting on a connecting socket
20 * now returns an error for tcp.
21 * Alan Cox : sock->protocol is set correctly.
22 * and is not sometimes left as 0.
23 * Alan Cox : connect handles icmp errors on a
24 * connect properly. Unfortunately there
25 * is a restart syscall nasty there. I
26 * can't match BSD without hacking the C
27 * library. Ideas urgently sought!
28 * Alan Cox : Disallow bind() to addresses that are
29 * not ours - especially broadcast ones!!
30 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
31 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
32 * instead they leave that for the DESTROY timer.
33 * Alan Cox : Clean up error flag in accept
34 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
35 * was buggy. Put a remove_sock() in the handler
36 * for memory when we hit 0. Also altered the timer
37 * code. The ACK stuff can wait and needs major
39 * Alan Cox : Fixed TCP ack bug, removed remove sock
40 * and fixed timer/inet_bh race.
41 * Alan Cox : Added zapped flag for TCP
42 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
43 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
44 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
45 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
46 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
47 * Rick Sladkey : Relaxed UDP rules for matching packets.
48 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
49 * Pauline Middelink : identd support
50 * Alan Cox : Fixed connect() taking signals I think.
51 * Alan Cox : SO_LINGER supported
52 * Alan Cox : Error reporting fixes
53 * Anonymous : inet_create tidied up (sk->reuse setting)
54 * Alan Cox : inet sockets don't set sk->type!
55 * Alan Cox : Split socket option code
56 * Alan Cox : Callbacks
57 * Alan Cox : Nagle flag for Charles & Johannes stuff
58 * Alex : Removed restriction on inet fioctl
59 * Alan Cox : Splitting INET from NET core
60 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
61 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
62 * Alan Cox : Split IP from generic code
63 * Alan Cox : New kfree_skbmem()
64 * Alan Cox : Make SO_DEBUG superuser only.
65 * Alan Cox : Allow anyone to clear SO_DEBUG
67 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
68 * Alan Cox : Allocator for a socket is settable.
69 * Alan Cox : SO_ERROR includes soft errors.
70 * Alan Cox : Allow NULL arguments on some SO_ opts
71 * Alan Cox : Generic socket allocation to make hooks
72 * easier (suggested by Craig Metz).
73 * Michael Pall : SO_ERROR returns positive errno again
74 * Steve Whitehouse: Added default destructor to free
75 * protocol private data.
76 * Steve Whitehouse: Added various other default routines
77 * common to several socket families.
78 * Chris Evans : Call suser() check last on F_SETOWN
79 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
80 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
81 * Andi Kleen : Fix write_space callback
82 * Chris Evans : Security fixes - signedness again
83 * Arnaldo C. Melo : cleanups, use skb_queue_purge
88 * This program is free software; you can redistribute it and/or
89 * modify it under the terms of the GNU General Public License
90 * as published by the Free Software Foundation; either version
91 * 2 of the License, or (at your option) any later version.
94 #include <linux/config.h>
95 #include <linux/errno.h>
96 #include <linux/types.h>
97 #include <linux/socket.h>
99 #include <linux/kernel.h>
100 #include <linux/module.h>
101 #include <linux/proc_fs.h>
102 #include <linux/seq_file.h>
103 #include <linux/sched.h>
104 #include <linux/timer.h>
105 #include <linux/string.h>
106 #include <linux/sockios.h>
107 #include <linux/net.h>
108 #include <linux/mm.h>
109 #include <linux/slab.h>
110 #include <linux/interrupt.h>
111 #include <linux/poll.h>
112 #include <linux/tcp.h>
113 #include <linux/init.h>
115 #include <asm/uaccess.h>
116 #include <asm/system.h>
118 #include <linux/netdevice.h>
119 #include <net/protocol.h>
120 #include <linux/skbuff.h>
121 #include <net/request_sock.h>
122 #include <net/sock.h>
123 #include <net/xfrm.h>
124 #include <linux/ipsec.h>
126 #include <linux/filter.h>
132 /* Take into consideration the size of the struct sk_buff overhead in the
133 * determination of these values, since that is non-constant across
134 * platforms. This makes socket queueing behavior and performance
135 * not depend upon such differences.
137 #define _SK_MEM_PACKETS 256
138 #define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256)
139 #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
140 #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
142 /* Run time adjustable parameters. */
143 __u32 sysctl_wmem_max = SK_WMEM_MAX;
144 __u32 sysctl_rmem_max = SK_RMEM_MAX;
145 __u32 sysctl_wmem_default = SK_WMEM_MAX;
146 __u32 sysctl_rmem_default = SK_RMEM_MAX;
148 /* Maximal space eaten by iovec or ancilliary data plus some space */
149 int sysctl_optmem_max = sizeof(unsigned long)*(2*UIO_MAXIOV + 512);
151 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
155 if (optlen < sizeof(tv))
157 if (copy_from_user(&tv, optval, sizeof(tv)))
160 *timeo_p = MAX_SCHEDULE_TIMEOUT;
161 if (tv.tv_sec == 0 && tv.tv_usec == 0)
163 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
164 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
168 static void sock_warn_obsolete_bsdism(const char *name)
171 static char warncomm[TASK_COMM_LEN];
172 if (strcmp(warncomm, current->comm) && warned < 5) {
173 strcpy(warncomm, current->comm);
174 printk(KERN_WARNING "process `%s' is using obsolete "
175 "%s SO_BSDCOMPAT\n", warncomm, name);
180 static void sock_disable_timestamp(struct sock *sk)
182 if (sock_flag(sk, SOCK_TIMESTAMP)) {
183 sock_reset_flag(sk, SOCK_TIMESTAMP);
184 net_disable_timestamp();
190 * This is meant for all protocols to use and covers goings on
191 * at the socket level. Everything here is generic.
194 int sock_setsockopt(struct socket *sock, int level, int optname,
195 char __user *optval, int optlen)
197 struct sock *sk=sock->sk;
198 struct sk_filter *filter;
205 * Options without arguments
208 #ifdef SO_DONTLINGER /* Compatibility item... */
209 if (optname == SO_DONTLINGER) {
211 sock_reset_flag(sk, SOCK_LINGER);
217 if(optlen<sizeof(int))
220 if (get_user(val, (int __user *)optval))
230 if(val && !capable(CAP_NET_ADMIN))
235 sock_set_flag(sk, SOCK_DBG);
237 sock_reset_flag(sk, SOCK_DBG);
240 sk->sk_reuse = valbool;
248 sock_set_flag(sk, SOCK_LOCALROUTE);
250 sock_reset_flag(sk, SOCK_LOCALROUTE);
253 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
256 /* Don't error on this BSD doesn't and if you think
257 about it this is right. Otherwise apps have to
258 play 'guess the biggest size' games. RCVBUF/SNDBUF
259 are treated in BSD as hints */
261 if (val > sysctl_wmem_max)
262 val = sysctl_wmem_max;
264 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
265 if ((val * 2) < SOCK_MIN_SNDBUF)
266 sk->sk_sndbuf = SOCK_MIN_SNDBUF;
268 sk->sk_sndbuf = val * 2;
271 * Wake up sending tasks if we
274 sk->sk_write_space(sk);
278 if (!capable(CAP_NET_ADMIN)) {
285 /* Don't error on this BSD doesn't and if you think
286 about it this is right. Otherwise apps have to
287 play 'guess the biggest size' games. RCVBUF/SNDBUF
288 are treated in BSD as hints */
290 if (val > sysctl_rmem_max)
291 val = sysctl_rmem_max;
293 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
294 /* FIXME: is this lower bound the right one? */
295 if ((val * 2) < SOCK_MIN_RCVBUF)
296 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
298 sk->sk_rcvbuf = val * 2;
302 if (!capable(CAP_NET_ADMIN)) {
310 if (sk->sk_protocol == IPPROTO_TCP)
311 tcp_set_keepalive(sk, valbool);
313 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
317 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
321 sk->sk_no_check = valbool;
325 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
326 sk->sk_priority = val;
332 if(optlen<sizeof(ling)) {
333 ret = -EINVAL; /* 1003.1g */
336 if (copy_from_user(&ling,optval,sizeof(ling))) {
341 sock_reset_flag(sk, SOCK_LINGER);
343 #if (BITS_PER_LONG == 32)
344 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
345 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
348 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
349 sock_set_flag(sk, SOCK_LINGER);
354 sock_warn_obsolete_bsdism("setsockopt");
359 set_bit(SOCK_PASSCRED, &sock->flags);
361 clear_bit(SOCK_PASSCRED, &sock->flags);
366 sock_set_flag(sk, SOCK_RCVTSTAMP);
367 sock_enable_timestamp(sk);
369 sock_reset_flag(sk, SOCK_RCVTSTAMP);
375 sk->sk_rcvlowat = val ? : 1;
379 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
383 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
386 #ifdef CONFIG_NETDEVICES
387 case SO_BINDTODEVICE:
389 char devname[IFNAMSIZ];
392 if (!capable(CAP_NET_RAW)) {
397 /* Bind this socket to a particular device like "eth0",
398 * as specified in the passed interface name. If the
399 * name is "" or the option length is zero the socket
404 sk->sk_bound_dev_if = 0;
406 if (optlen > IFNAMSIZ)
408 if (copy_from_user(devname, optval, optlen)) {
413 /* Remove any cached route for this socket. */
416 if (devname[0] == '\0') {
417 sk->sk_bound_dev_if = 0;
419 struct net_device *dev = dev_get_by_name(devname);
424 sk->sk_bound_dev_if = dev->ifindex;
433 case SO_ATTACH_FILTER:
435 if (optlen == sizeof(struct sock_fprog)) {
436 struct sock_fprog fprog;
439 if (copy_from_user(&fprog, optval, sizeof(fprog)))
442 ret = sk_attach_filter(&fprog, sk);
446 case SO_DETACH_FILTER:
447 spin_lock_bh(&sk->sk_lock.slock);
448 filter = sk->sk_filter;
450 sk->sk_filter = NULL;
451 spin_unlock_bh(&sk->sk_lock.slock);
452 sk_filter_release(sk, filter);
455 spin_unlock_bh(&sk->sk_lock.slock);
459 /* We implement the SO_SNDLOWAT etc to
460 not be settable (1003.1g 5.3) */
470 int sock_getsockopt(struct socket *sock, int level, int optname,
471 char __user *optval, int __user *optlen)
473 struct sock *sk = sock->sk;
482 unsigned int lv = sizeof(int);
485 if(get_user(len,optlen))
493 v.val = sock_flag(sk, SOCK_DBG);
497 v.val = sock_flag(sk, SOCK_LOCALROUTE);
501 v.val = !!sock_flag(sk, SOCK_BROADCAST);
505 v.val = sk->sk_sndbuf;
509 v.val = sk->sk_rcvbuf;
513 v.val = sk->sk_reuse;
517 v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
525 v.val = -sock_error(sk);
527 v.val = xchg(&sk->sk_err_soft, 0);
531 v.val = !!sock_flag(sk, SOCK_URGINLINE);
535 v.val = sk->sk_no_check;
539 v.val = sk->sk_priority;
544 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
545 v.ling.l_linger = sk->sk_lingertime / HZ;
549 sock_warn_obsolete_bsdism("getsockopt");
553 v.val = sock_flag(sk, SOCK_RCVTSTAMP);
557 lv=sizeof(struct timeval);
558 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
562 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
563 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
568 lv=sizeof(struct timeval);
569 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
573 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
574 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
579 v.val = sk->sk_rcvlowat;
587 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
591 if (len > sizeof(sk->sk_peercred))
592 len = sizeof(sk->sk_peercred);
593 if (copy_to_user(optval, &sk->sk_peercred, len))
601 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
605 if (copy_to_user(optval, address, len))
610 /* Dubious BSD thing... Probably nobody even uses it, but
611 * the UNIX standard wants it for whatever reason... -DaveM
614 v.val = sk->sk_state == TCP_LISTEN;
618 return security_socket_getpeersec(sock, optval, optlen, len);
621 return(-ENOPROTOOPT);
625 if (copy_to_user(optval, &v, len))
628 if (put_user(len, optlen))
634 * sk_alloc - All socket objects are allocated here
635 * @family: protocol family
636 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
637 * @prot: struct proto associated with this new sock instance
638 * @zero_it: if we should zero the newly allocated sock
640 struct sock *sk_alloc(int family, unsigned int __nocast priority,
641 struct proto *prot, int zero_it)
643 struct sock *sk = NULL;
644 kmem_cache_t *slab = prot->slab;
647 sk = kmem_cache_alloc(slab, priority);
649 sk = kmalloc(prot->obj_size, priority);
653 memset(sk, 0, prot->obj_size);
654 sk->sk_family = family;
656 * See comment in struct sock definition to understand
657 * why we need sk_prot_creator -acme
659 sk->sk_prot = sk->sk_prot_creator = prot;
663 if (security_sk_alloc(sk, family, priority)) {
665 kmem_cache_free(slab, sk);
670 __module_get(prot->owner);
675 void sk_free(struct sock *sk)
677 struct sk_filter *filter;
678 struct module *owner = sk->sk_prot_creator->owner;
683 filter = sk->sk_filter;
685 sk_filter_release(sk, filter);
686 sk->sk_filter = NULL;
689 sock_disable_timestamp(sk);
691 if (atomic_read(&sk->sk_omem_alloc))
692 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
693 __FUNCTION__, atomic_read(&sk->sk_omem_alloc));
695 security_sk_free(sk);
696 if (sk->sk_prot_creator->slab != NULL)
697 kmem_cache_free(sk->sk_prot_creator->slab, sk);
703 struct sock *sk_clone(const struct sock *sk, const unsigned int __nocast priority)
705 struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0);
708 struct sk_filter *filter;
710 memcpy(newsk, sk, sk->sk_prot->obj_size);
713 sk_node_init(&newsk->sk_node);
714 sock_lock_init(newsk);
717 atomic_set(&newsk->sk_rmem_alloc, 0);
718 atomic_set(&newsk->sk_wmem_alloc, 0);
719 atomic_set(&newsk->sk_omem_alloc, 0);
720 skb_queue_head_init(&newsk->sk_receive_queue);
721 skb_queue_head_init(&newsk->sk_write_queue);
723 rwlock_init(&newsk->sk_dst_lock);
724 rwlock_init(&newsk->sk_callback_lock);
726 newsk->sk_dst_cache = NULL;
727 newsk->sk_wmem_queued = 0;
728 newsk->sk_forward_alloc = 0;
729 newsk->sk_send_head = NULL;
730 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
731 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
733 sock_reset_flag(newsk, SOCK_DONE);
734 skb_queue_head_init(&newsk->sk_error_queue);
736 filter = newsk->sk_filter;
738 sk_filter_charge(newsk, filter);
740 if (unlikely(xfrm_sk_clone_policy(newsk))) {
741 /* It is still raw copy of parent, so invalidate
742 * destructor and make plain sk_free() */
743 newsk->sk_destruct = NULL;
750 newsk->sk_priority = 0;
751 atomic_set(&newsk->sk_refcnt, 2);
754 * Increment the counter in the same struct proto as the master
755 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
756 * is the same as sk->sk_prot->socks, as this field was copied
759 * This _changes_ the previous behaviour, where
760 * tcp_create_openreq_child always was incrementing the
761 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
762 * to be taken into account in all callers. -acme
764 sk_refcnt_debug_inc(newsk);
765 newsk->sk_socket = NULL;
766 newsk->sk_sleep = NULL;
768 if (newsk->sk_prot->sockets_allocated)
769 atomic_inc(newsk->sk_prot->sockets_allocated);
775 EXPORT_SYMBOL_GPL(sk_clone);
777 void __init sk_init(void)
779 if (num_physpages <= 4096) {
780 sysctl_wmem_max = 32767;
781 sysctl_rmem_max = 32767;
782 sysctl_wmem_default = 32767;
783 sysctl_rmem_default = 32767;
784 } else if (num_physpages >= 131072) {
785 sysctl_wmem_max = 131071;
786 sysctl_rmem_max = 131071;
791 * Simple resource managers for sockets.
796 * Write buffer destructor automatically called from kfree_skb.
798 void sock_wfree(struct sk_buff *skb)
800 struct sock *sk = skb->sk;
802 /* In case it might be waiting for more memory. */
803 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
804 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE))
805 sk->sk_write_space(sk);
810 * Read buffer destructor automatically called from kfree_skb.
812 void sock_rfree(struct sk_buff *skb)
814 struct sock *sk = skb->sk;
816 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
820 int sock_i_uid(struct sock *sk)
824 read_lock(&sk->sk_callback_lock);
825 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
826 read_unlock(&sk->sk_callback_lock);
830 unsigned long sock_i_ino(struct sock *sk)
834 read_lock(&sk->sk_callback_lock);
835 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
836 read_unlock(&sk->sk_callback_lock);
841 * Allocate a skb from the socket's send buffer.
843 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
844 unsigned int __nocast priority)
846 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
847 struct sk_buff * skb = alloc_skb(size, priority);
849 skb_set_owner_w(skb, sk);
857 * Allocate a skb from the socket's receive buffer.
859 struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
860 unsigned int __nocast priority)
862 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
863 struct sk_buff *skb = alloc_skb(size, priority);
865 skb_set_owner_r(skb, sk);
873 * Allocate a memory block from the socket's option memory buffer.
875 void *sock_kmalloc(struct sock *sk, int size, unsigned int __nocast priority)
877 if ((unsigned)size <= sysctl_optmem_max &&
878 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
880 /* First do the add, to avoid the race if kmalloc
883 atomic_add(size, &sk->sk_omem_alloc);
884 mem = kmalloc(size, priority);
887 atomic_sub(size, &sk->sk_omem_alloc);
893 * Free an option memory block.
895 void sock_kfree_s(struct sock *sk, void *mem, int size)
898 atomic_sub(size, &sk->sk_omem_alloc);
901 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
902 I think, these locks should be removed for datagram sockets.
904 static long sock_wait_for_wmem(struct sock * sk, long timeo)
908 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
912 if (signal_pending(current))
914 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
915 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
916 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
918 if (sk->sk_shutdown & SEND_SHUTDOWN)
922 timeo = schedule_timeout(timeo);
924 finish_wait(sk->sk_sleep, &wait);
930 * Generic send/receive buffer handlers
933 static struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
934 unsigned long header_len,
935 unsigned long data_len,
936 int noblock, int *errcode)
939 unsigned int gfp_mask;
943 gfp_mask = sk->sk_allocation;
944 if (gfp_mask & __GFP_WAIT)
945 gfp_mask |= __GFP_REPEAT;
947 timeo = sock_sndtimeo(sk, noblock);
949 err = sock_error(sk);
954 if (sk->sk_shutdown & SEND_SHUTDOWN)
957 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
958 skb = alloc_skb(header_len, sk->sk_allocation);
963 /* No pages, we're done... */
967 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
968 skb->truesize += data_len;
969 skb_shinfo(skb)->nr_frags = npages;
970 for (i = 0; i < npages; i++) {
974 page = alloc_pages(sk->sk_allocation, 0);
977 skb_shinfo(skb)->nr_frags = i;
982 frag = &skb_shinfo(skb)->frags[i];
984 frag->page_offset = 0;
985 frag->size = (data_len >= PAGE_SIZE ?
988 data_len -= PAGE_SIZE;
991 /* Full success... */
997 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
998 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1002 if (signal_pending(current))
1004 timeo = sock_wait_for_wmem(sk, timeo);
1007 skb_set_owner_w(skb, sk);
1011 err = sock_intr_errno(timeo);
1017 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1018 int noblock, int *errcode)
1020 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1023 static void __lock_sock(struct sock *sk)
1028 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1029 TASK_UNINTERRUPTIBLE);
1030 spin_unlock_bh(&sk->sk_lock.slock);
1032 spin_lock_bh(&sk->sk_lock.slock);
1033 if(!sock_owned_by_user(sk))
1036 finish_wait(&sk->sk_lock.wq, &wait);
1039 static void __release_sock(struct sock *sk)
1041 struct sk_buff *skb = sk->sk_backlog.head;
1044 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1048 struct sk_buff *next = skb->next;
1051 sk->sk_backlog_rcv(sk, skb);
1054 * We are in process context here with softirqs
1055 * disabled, use cond_resched_softirq() to preempt.
1056 * This is safe to do because we've taken the backlog
1059 cond_resched_softirq();
1062 } while (skb != NULL);
1065 } while((skb = sk->sk_backlog.head) != NULL);
1069 * sk_wait_data - wait for data to arrive at sk_receive_queue
1070 * @sk: sock to wait on
1071 * @timeo: for how long
1073 * Now socket state including sk->sk_err is changed only under lock,
1074 * hence we may omit checks after joining wait queue.
1075 * We check receive queue before schedule() only as optimization;
1076 * it is very likely that release_sock() added new data.
1078 int sk_wait_data(struct sock *sk, long *timeo)
1083 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1084 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1085 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1086 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1087 finish_wait(sk->sk_sleep, &wait);
1091 EXPORT_SYMBOL(sk_wait_data);
1094 * Set of default routines for initialising struct proto_ops when
1095 * the protocol does not support a particular function. In certain
1096 * cases where it makes no sense for a protocol to have a "do nothing"
1097 * function, some default processing is provided.
1100 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1105 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1111 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1116 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1121 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
1127 unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt)
1132 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1137 int sock_no_listen(struct socket *sock, int backlog)
1142 int sock_no_shutdown(struct socket *sock, int how)
1147 int sock_no_setsockopt(struct socket *sock, int level, int optname,
1148 char __user *optval, int optlen)
1153 int sock_no_getsockopt(struct socket *sock, int level, int optname,
1154 char __user *optval, int __user *optlen)
1159 int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1165 int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1166 size_t len, int flags)
1171 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1173 /* Mirror missing mmap method error code */
1177 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1180 struct msghdr msg = {.msg_flags = flags};
1182 char *kaddr = kmap(page);
1183 iov.iov_base = kaddr + offset;
1185 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1191 * Default Socket Callbacks
1194 static void sock_def_wakeup(struct sock *sk)
1196 read_lock(&sk->sk_callback_lock);
1197 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1198 wake_up_interruptible_all(sk->sk_sleep);
1199 read_unlock(&sk->sk_callback_lock);
1202 static void sock_def_error_report(struct sock *sk)
1204 read_lock(&sk->sk_callback_lock);
1205 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1206 wake_up_interruptible(sk->sk_sleep);
1207 sk_wake_async(sk,0,POLL_ERR);
1208 read_unlock(&sk->sk_callback_lock);
1211 static void sock_def_readable(struct sock *sk, int len)
1213 read_lock(&sk->sk_callback_lock);
1214 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1215 wake_up_interruptible(sk->sk_sleep);
1216 sk_wake_async(sk,1,POLL_IN);
1217 read_unlock(&sk->sk_callback_lock);
1220 static void sock_def_write_space(struct sock *sk)
1222 read_lock(&sk->sk_callback_lock);
1224 /* Do not wake up a writer until he can make "significant"
1227 if((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
1228 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1229 wake_up_interruptible(sk->sk_sleep);
1231 /* Should agree with poll, otherwise some programs break */
1232 if (sock_writeable(sk))
1233 sk_wake_async(sk, 2, POLL_OUT);
1236 read_unlock(&sk->sk_callback_lock);
1239 static void sock_def_destruct(struct sock *sk)
1241 if (sk->sk_protinfo)
1242 kfree(sk->sk_protinfo);
1245 void sk_send_sigurg(struct sock *sk)
1247 if (sk->sk_socket && sk->sk_socket->file)
1248 if (send_sigurg(&sk->sk_socket->file->f_owner))
1249 sk_wake_async(sk, 3, POLL_PRI);
1252 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1253 unsigned long expires)
1255 if (!mod_timer(timer, expires))
1259 EXPORT_SYMBOL(sk_reset_timer);
1261 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1263 if (timer_pending(timer) && del_timer(timer))
1267 EXPORT_SYMBOL(sk_stop_timer);
1269 void sock_init_data(struct socket *sock, struct sock *sk)
1271 skb_queue_head_init(&sk->sk_receive_queue);
1272 skb_queue_head_init(&sk->sk_write_queue);
1273 skb_queue_head_init(&sk->sk_error_queue);
1275 sk->sk_send_head = NULL;
1277 init_timer(&sk->sk_timer);
1279 sk->sk_allocation = GFP_KERNEL;
1280 sk->sk_rcvbuf = sysctl_rmem_default;
1281 sk->sk_sndbuf = sysctl_wmem_default;
1282 sk->sk_state = TCP_CLOSE;
1283 sk->sk_socket = sock;
1285 sock_set_flag(sk, SOCK_ZAPPED);
1289 sk->sk_type = sock->type;
1290 sk->sk_sleep = &sock->wait;
1293 sk->sk_sleep = NULL;
1295 rwlock_init(&sk->sk_dst_lock);
1296 rwlock_init(&sk->sk_callback_lock);
1298 sk->sk_state_change = sock_def_wakeup;
1299 sk->sk_data_ready = sock_def_readable;
1300 sk->sk_write_space = sock_def_write_space;
1301 sk->sk_error_report = sock_def_error_report;
1302 sk->sk_destruct = sock_def_destruct;
1304 sk->sk_sndmsg_page = NULL;
1305 sk->sk_sndmsg_off = 0;
1307 sk->sk_peercred.pid = 0;
1308 sk->sk_peercred.uid = -1;
1309 sk->sk_peercred.gid = -1;
1310 sk->sk_write_pending = 0;
1311 sk->sk_rcvlowat = 1;
1312 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1313 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
1315 sk->sk_stamp.tv_sec = -1L;
1316 sk->sk_stamp.tv_usec = -1L;
1318 atomic_set(&sk->sk_refcnt, 1);
1321 void fastcall lock_sock(struct sock *sk)
1324 spin_lock_bh(&(sk->sk_lock.slock));
1325 if (sk->sk_lock.owner)
1327 sk->sk_lock.owner = (void *)1;
1328 spin_unlock_bh(&(sk->sk_lock.slock));
1331 EXPORT_SYMBOL(lock_sock);
1333 void fastcall release_sock(struct sock *sk)
1335 spin_lock_bh(&(sk->sk_lock.slock));
1336 if (sk->sk_backlog.tail)
1338 sk->sk_lock.owner = NULL;
1339 if (waitqueue_active(&(sk->sk_lock.wq)))
1340 wake_up(&(sk->sk_lock.wq));
1341 spin_unlock_bh(&(sk->sk_lock.slock));
1343 EXPORT_SYMBOL(release_sock);
1345 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
1347 if (!sock_flag(sk, SOCK_TIMESTAMP))
1348 sock_enable_timestamp(sk);
1349 if (sk->sk_stamp.tv_sec == -1)
1351 if (sk->sk_stamp.tv_sec == 0)
1352 do_gettimeofday(&sk->sk_stamp);
1353 return copy_to_user(userstamp, &sk->sk_stamp, sizeof(struct timeval)) ?
1356 EXPORT_SYMBOL(sock_get_timestamp);
1358 void sock_enable_timestamp(struct sock *sk)
1360 if (!sock_flag(sk, SOCK_TIMESTAMP)) {
1361 sock_set_flag(sk, SOCK_TIMESTAMP);
1362 net_enable_timestamp();
1365 EXPORT_SYMBOL(sock_enable_timestamp);
1368 * Get a socket option on an socket.
1370 * FIX: POSIX 1003.1g is very ambiguous here. It states that
1371 * asynchronous errors should be reported by getsockopt. We assume
1372 * this means if you specify SO_ERROR (otherwise whats the point of it).
1374 int sock_common_getsockopt(struct socket *sock, int level, int optname,
1375 char __user *optval, int __user *optlen)
1377 struct sock *sk = sock->sk;
1379 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
1382 EXPORT_SYMBOL(sock_common_getsockopt);
1384 int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
1385 struct msghdr *msg, size_t size, int flags)
1387 struct sock *sk = sock->sk;
1391 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
1392 flags & ~MSG_DONTWAIT, &addr_len);
1394 msg->msg_namelen = addr_len;
1398 EXPORT_SYMBOL(sock_common_recvmsg);
1401 * Set socket options on an inet socket.
1403 int sock_common_setsockopt(struct socket *sock, int level, int optname,
1404 char __user *optval, int optlen)
1406 struct sock *sk = sock->sk;
1408 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
1411 EXPORT_SYMBOL(sock_common_setsockopt);
1413 void sk_common_release(struct sock *sk)
1415 if (sk->sk_prot->destroy)
1416 sk->sk_prot->destroy(sk);
1419 * Observation: when sock_common_release is called, processes have
1420 * no access to socket. But net still has.
1421 * Step one, detach it from networking:
1423 * A. Remove from hash tables.
1426 sk->sk_prot->unhash(sk);
1429 * In this point socket cannot receive new packets, but it is possible
1430 * that some packets are in flight because some CPU runs receiver and
1431 * did hash table lookup before we unhashed socket. They will achieve
1432 * receive queue and will be purged by socket destructor.
1434 * Also we still have packets pending on receive queue and probably,
1435 * our own packets waiting in device queues. sock_destroy will drain
1436 * receive queue, but transmitted packets will delay socket destruction
1437 * until the last reference will be released.
1442 xfrm_sk_free_policy(sk);
1444 sk_refcnt_debug_release(sk);
1448 EXPORT_SYMBOL(sk_common_release);
1450 static DEFINE_RWLOCK(proto_list_lock);
1451 static LIST_HEAD(proto_list);
1453 int proto_register(struct proto *prot, int alloc_slab)
1455 char *request_sock_slab_name = NULL;
1456 char *timewait_sock_slab_name;
1460 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
1461 SLAB_HWCACHE_ALIGN, NULL, NULL);
1463 if (prot->slab == NULL) {
1464 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
1469 if (prot->rsk_prot != NULL) {
1470 static const char mask[] = "request_sock_%s";
1472 request_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
1473 if (request_sock_slab_name == NULL)
1474 goto out_free_sock_slab;
1476 sprintf(request_sock_slab_name, mask, prot->name);
1477 prot->rsk_prot->slab = kmem_cache_create(request_sock_slab_name,
1478 prot->rsk_prot->obj_size, 0,
1479 SLAB_HWCACHE_ALIGN, NULL, NULL);
1481 if (prot->rsk_prot->slab == NULL) {
1482 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
1484 goto out_free_request_sock_slab_name;
1488 if (prot->twsk_obj_size) {
1489 static const char mask[] = "tw_sock_%s";
1491 timewait_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
1493 if (timewait_sock_slab_name == NULL)
1494 goto out_free_request_sock_slab;
1496 sprintf(timewait_sock_slab_name, mask, prot->name);
1497 prot->twsk_slab = kmem_cache_create(timewait_sock_slab_name,
1498 prot->twsk_obj_size,
1499 0, SLAB_HWCACHE_ALIGN,
1501 if (prot->twsk_slab == NULL)
1502 goto out_free_timewait_sock_slab_name;
1506 write_lock(&proto_list_lock);
1507 list_add(&prot->node, &proto_list);
1508 write_unlock(&proto_list_lock);
1512 out_free_timewait_sock_slab_name:
1513 kfree(timewait_sock_slab_name);
1514 out_free_request_sock_slab:
1515 if (prot->rsk_prot && prot->rsk_prot->slab) {
1516 kmem_cache_destroy(prot->rsk_prot->slab);
1517 prot->rsk_prot->slab = NULL;
1519 out_free_request_sock_slab_name:
1520 kfree(request_sock_slab_name);
1522 kmem_cache_destroy(prot->slab);
1527 EXPORT_SYMBOL(proto_register);
1529 void proto_unregister(struct proto *prot)
1531 write_lock(&proto_list_lock);
1532 list_del(&prot->node);
1533 write_unlock(&proto_list_lock);
1535 if (prot->slab != NULL) {
1536 kmem_cache_destroy(prot->slab);
1540 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
1541 const char *name = kmem_cache_name(prot->rsk_prot->slab);
1543 kmem_cache_destroy(prot->rsk_prot->slab);
1545 prot->rsk_prot->slab = NULL;
1548 if (prot->twsk_slab != NULL) {
1549 const char *name = kmem_cache_name(prot->twsk_slab);
1551 kmem_cache_destroy(prot->twsk_slab);
1553 prot->twsk_slab = NULL;
1557 EXPORT_SYMBOL(proto_unregister);
1559 #ifdef CONFIG_PROC_FS
1560 static inline struct proto *__proto_head(void)
1562 return list_entry(proto_list.next, struct proto, node);
1565 static inline struct proto *proto_head(void)
1567 return list_empty(&proto_list) ? NULL : __proto_head();
1570 static inline struct proto *proto_next(struct proto *proto)
1572 return proto->node.next == &proto_list ? NULL :
1573 list_entry(proto->node.next, struct proto, node);
1576 static inline struct proto *proto_get_idx(loff_t pos)
1578 struct proto *proto;
1581 list_for_each_entry(proto, &proto_list, node)
1590 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
1592 read_lock(&proto_list_lock);
1593 return *pos ? proto_get_idx(*pos - 1) : SEQ_START_TOKEN;
1596 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1599 return v == SEQ_START_TOKEN ? proto_head() : proto_next(v);
1602 static void proto_seq_stop(struct seq_file *seq, void *v)
1604 read_unlock(&proto_list_lock);
1607 static char proto_method_implemented(const void *method)
1609 return method == NULL ? 'n' : 'y';
1612 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
1614 seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s "
1615 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
1618 proto->sockets_allocated != NULL ? atomic_read(proto->sockets_allocated) : -1,
1619 proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
1620 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
1622 proto->slab == NULL ? "no" : "yes",
1623 module_name(proto->owner),
1624 proto_method_implemented(proto->close),
1625 proto_method_implemented(proto->connect),
1626 proto_method_implemented(proto->disconnect),
1627 proto_method_implemented(proto->accept),
1628 proto_method_implemented(proto->ioctl),
1629 proto_method_implemented(proto->init),
1630 proto_method_implemented(proto->destroy),
1631 proto_method_implemented(proto->shutdown),
1632 proto_method_implemented(proto->setsockopt),
1633 proto_method_implemented(proto->getsockopt),
1634 proto_method_implemented(proto->sendmsg),
1635 proto_method_implemented(proto->recvmsg),
1636 proto_method_implemented(proto->sendpage),
1637 proto_method_implemented(proto->bind),
1638 proto_method_implemented(proto->backlog_rcv),
1639 proto_method_implemented(proto->hash),
1640 proto_method_implemented(proto->unhash),
1641 proto_method_implemented(proto->get_port),
1642 proto_method_implemented(proto->enter_memory_pressure));
1645 static int proto_seq_show(struct seq_file *seq, void *v)
1647 if (v == SEQ_START_TOKEN)
1648 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
1657 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
1659 proto_seq_printf(seq, v);
1663 static struct seq_operations proto_seq_ops = {
1664 .start = proto_seq_start,
1665 .next = proto_seq_next,
1666 .stop = proto_seq_stop,
1667 .show = proto_seq_show,
1670 static int proto_seq_open(struct inode *inode, struct file *file)
1672 return seq_open(file, &proto_seq_ops);
1675 static struct file_operations proto_seq_fops = {
1676 .owner = THIS_MODULE,
1677 .open = proto_seq_open,
1679 .llseek = seq_lseek,
1680 .release = seq_release,
1683 static int __init proto_init(void)
1685 /* register /proc/net/protocols */
1686 return proc_net_fops_create("protocols", S_IRUGO, &proto_seq_fops) == NULL ? -ENOBUFS : 0;
1689 subsys_initcall(proto_init);
1691 #endif /* PROC_FS */
1693 EXPORT_SYMBOL(sk_alloc);
1694 EXPORT_SYMBOL(sk_free);
1695 EXPORT_SYMBOL(sk_send_sigurg);
1696 EXPORT_SYMBOL(sock_alloc_send_skb);
1697 EXPORT_SYMBOL(sock_init_data);
1698 EXPORT_SYMBOL(sock_kfree_s);
1699 EXPORT_SYMBOL(sock_kmalloc);
1700 EXPORT_SYMBOL(sock_no_accept);
1701 EXPORT_SYMBOL(sock_no_bind);
1702 EXPORT_SYMBOL(sock_no_connect);
1703 EXPORT_SYMBOL(sock_no_getname);
1704 EXPORT_SYMBOL(sock_no_getsockopt);
1705 EXPORT_SYMBOL(sock_no_ioctl);
1706 EXPORT_SYMBOL(sock_no_listen);
1707 EXPORT_SYMBOL(sock_no_mmap);
1708 EXPORT_SYMBOL(sock_no_poll);
1709 EXPORT_SYMBOL(sock_no_recvmsg);
1710 EXPORT_SYMBOL(sock_no_sendmsg);
1711 EXPORT_SYMBOL(sock_no_sendpage);
1712 EXPORT_SYMBOL(sock_no_setsockopt);
1713 EXPORT_SYMBOL(sock_no_shutdown);
1714 EXPORT_SYMBOL(sock_no_socketpair);
1715 EXPORT_SYMBOL(sock_rfree);
1716 EXPORT_SYMBOL(sock_setsockopt);
1717 EXPORT_SYMBOL(sock_wfree);
1718 EXPORT_SYMBOL(sock_wmalloc);
1719 EXPORT_SYMBOL(sock_i_uid);
1720 EXPORT_SYMBOL(sock_i_ino);
1721 EXPORT_SYMBOL(sysctl_optmem_max);
1722 #ifdef CONFIG_SYSCTL
1723 EXPORT_SYMBOL(sysctl_rmem_max);
1724 EXPORT_SYMBOL(sysctl_wmem_max);