2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
10 * Version: $Id: sock.c,v 1.117 2002/02/01 22:01:03 davem Exp $
13 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Alan Cox, <A.Cox@swansea.ac.uk>
18 * Alan Cox : Numerous verify_area() problems
19 * Alan Cox : Connecting on a connecting socket
20 * now returns an error for tcp.
21 * Alan Cox : sock->protocol is set correctly.
22 * and is not sometimes left as 0.
23 * Alan Cox : connect handles icmp errors on a
24 * connect properly. Unfortunately there
25 * is a restart syscall nasty there. I
26 * can't match BSD without hacking the C
27 * library. Ideas urgently sought!
28 * Alan Cox : Disallow bind() to addresses that are
29 * not ours - especially broadcast ones!!
30 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
31 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
32 * instead they leave that for the DESTROY timer.
33 * Alan Cox : Clean up error flag in accept
34 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
35 * was buggy. Put a remove_sock() in the handler
36 * for memory when we hit 0. Also altered the timer
37 * code. The ACK stuff can wait and needs major
39 * Alan Cox : Fixed TCP ack bug, removed remove sock
40 * and fixed timer/inet_bh race.
41 * Alan Cox : Added zapped flag for TCP
42 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
43 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
44 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
45 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
46 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
47 * Rick Sladkey : Relaxed UDP rules for matching packets.
48 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
49 * Pauline Middelink : identd support
50 * Alan Cox : Fixed connect() taking signals I think.
51 * Alan Cox : SO_LINGER supported
52 * Alan Cox : Error reporting fixes
53 * Anonymous : inet_create tidied up (sk->reuse setting)
54 * Alan Cox : inet sockets don't set sk->type!
55 * Alan Cox : Split socket option code
56 * Alan Cox : Callbacks
57 * Alan Cox : Nagle flag for Charles & Johannes stuff
58 * Alex : Removed restriction on inet fioctl
59 * Alan Cox : Splitting INET from NET core
60 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
61 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
62 * Alan Cox : Split IP from generic code
63 * Alan Cox : New kfree_skbmem()
64 * Alan Cox : Make SO_DEBUG superuser only.
65 * Alan Cox : Allow anyone to clear SO_DEBUG
67 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
68 * Alan Cox : Allocator for a socket is settable.
69 * Alan Cox : SO_ERROR includes soft errors.
70 * Alan Cox : Allow NULL arguments on some SO_ opts
71 * Alan Cox : Generic socket allocation to make hooks
72 * easier (suggested by Craig Metz).
73 * Michael Pall : SO_ERROR returns positive errno again
74 * Steve Whitehouse: Added default destructor to free
75 * protocol private data.
76 * Steve Whitehouse: Added various other default routines
77 * common to several socket families.
78 * Chris Evans : Call suser() check last on F_SETOWN
79 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
80 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
81 * Andi Kleen : Fix write_space callback
82 * Chris Evans : Security fixes - signedness again
83 * Arnaldo C. Melo : cleanups, use skb_queue_purge
88 * This program is free software; you can redistribute it and/or
89 * modify it under the terms of the GNU General Public License
90 * as published by the Free Software Foundation; either version
91 * 2 of the License, or (at your option) any later version.
94 #include <linux/capability.h>
95 #include <linux/errno.h>
96 #include <linux/types.h>
97 #include <linux/socket.h>
99 #include <linux/kernel.h>
100 #include <linux/module.h>
101 #include <linux/proc_fs.h>
102 #include <linux/seq_file.h>
103 #include <linux/sched.h>
104 #include <linux/timer.h>
105 #include <linux/string.h>
106 #include <linux/sockios.h>
107 #include <linux/net.h>
108 #include <linux/mm.h>
109 #include <linux/slab.h>
110 #include <linux/interrupt.h>
111 #include <linux/poll.h>
112 #include <linux/tcp.h>
113 #include <linux/init.h>
114 #include <linux/highmem.h>
116 #include <asm/uaccess.h>
117 #include <asm/system.h>
119 #include <linux/netdevice.h>
120 #include <net/protocol.h>
121 #include <linux/skbuff.h>
122 #include <net/net_namespace.h>
123 #include <net/request_sock.h>
124 #include <net/sock.h>
125 #include <net/xfrm.h>
126 #include <linux/ipsec.h>
128 #include <linux/filter.h>
135 * Each address family might have different locking rules, so we have
136 * one slock key per address family:
138 static struct lock_class_key af_family_keys[AF_MAX];
139 static struct lock_class_key af_family_slock_keys[AF_MAX];
141 #ifdef CONFIG_DEBUG_LOCK_ALLOC
143 * Make lock validator output more readable. (we pre-construct these
144 * strings build-time, so that runtime initialization of socket
147 static const char *af_family_key_strings[AF_MAX+1] = {
148 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
149 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
150 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
151 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
152 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
153 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
154 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
155 "sk_lock-21" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
156 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
157 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
158 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
159 "sk_lock-AF_RXRPC" , "sk_lock-AF_MAX"
161 static const char *af_family_slock_key_strings[AF_MAX+1] = {
162 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
163 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
164 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
165 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
166 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
167 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
168 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
169 "slock-21" , "slock-AF_SNA" , "slock-AF_IRDA" ,
170 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
171 "slock-27" , "slock-28" , "slock-AF_CAN" ,
172 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
173 "slock-AF_RXRPC" , "slock-AF_MAX"
175 static const char *af_family_clock_key_strings[AF_MAX+1] = {
176 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
177 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
178 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
179 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
180 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
181 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
182 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
183 "clock-21" , "clock-AF_SNA" , "clock-AF_IRDA" ,
184 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
185 "clock-27" , "clock-28" , "clock-29" ,
186 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
187 "clock-AF_RXRPC" , "clock-AF_MAX"
192 * sk_callback_lock locking rules are per-address-family,
193 * so split the lock classes by using a per-AF key:
195 static struct lock_class_key af_callback_keys[AF_MAX];
197 /* Take into consideration the size of the struct sk_buff overhead in the
198 * determination of these values, since that is non-constant across
199 * platforms. This makes socket queueing behavior and performance
200 * not depend upon such differences.
202 #define _SK_MEM_PACKETS 256
203 #define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256)
204 #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
205 #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
207 /* Run time adjustable parameters. */
208 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
209 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
210 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
211 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
213 /* Maximal space eaten by iovec or ancilliary data plus some space */
214 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
216 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
220 if (optlen < sizeof(tv))
222 if (copy_from_user(&tv, optval, sizeof(tv)))
224 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
228 static int warned __read_mostly;
231 if (warned < 10 && net_ratelimit())
233 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
234 "tries to set negative timeout\n",
235 current->comm, task_pid_nr(current));
238 *timeo_p = MAX_SCHEDULE_TIMEOUT;
239 if (tv.tv_sec == 0 && tv.tv_usec == 0)
241 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
242 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
246 static void sock_warn_obsolete_bsdism(const char *name)
249 static char warncomm[TASK_COMM_LEN];
250 if (strcmp(warncomm, current->comm) && warned < 5) {
251 strcpy(warncomm, current->comm);
252 printk(KERN_WARNING "process `%s' is using obsolete "
253 "%s SO_BSDCOMPAT\n", warncomm, name);
258 static void sock_disable_timestamp(struct sock *sk)
260 if (sock_flag(sk, SOCK_TIMESTAMP)) {
261 sock_reset_flag(sk, SOCK_TIMESTAMP);
262 net_disable_timestamp();
267 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
272 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
273 number of warnings when compiling with -W --ANK
275 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
276 (unsigned)sk->sk_rcvbuf) {
281 err = sk_filter(sk, skb);
285 if (!sk_rmem_schedule(sk, skb->truesize)) {
291 skb_set_owner_r(skb, sk);
293 /* Cache the SKB length before we tack it onto the receive
294 * queue. Once it is added it no longer belongs to us and
295 * may be freed by other threads of control pulling packets
300 skb_queue_tail(&sk->sk_receive_queue, skb);
302 if (!sock_flag(sk, SOCK_DEAD))
303 sk->sk_data_ready(sk, skb_len);
307 EXPORT_SYMBOL(sock_queue_rcv_skb);
309 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
311 int rc = NET_RX_SUCCESS;
313 if (sk_filter(sk, skb))
314 goto discard_and_relse;
319 bh_lock_sock_nested(sk);
322 if (!sock_owned_by_user(sk)) {
324 * trylock + unlock semantics:
326 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
328 rc = sk->sk_backlog_rcv(sk, skb);
330 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
332 sk_add_backlog(sk, skb);
341 EXPORT_SYMBOL(sk_receive_skb);
343 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
345 struct dst_entry *dst = sk->sk_dst_cache;
347 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
348 sk->sk_dst_cache = NULL;
355 EXPORT_SYMBOL(__sk_dst_check);
357 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
359 struct dst_entry *dst = sk_dst_get(sk);
361 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
369 EXPORT_SYMBOL(sk_dst_check);
371 static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
373 int ret = -ENOPROTOOPT;
374 #ifdef CONFIG_NETDEVICES
375 struct net *net = sk->sk_net;
376 char devname[IFNAMSIZ];
381 if (!capable(CAP_NET_RAW))
388 /* Bind this socket to a particular device like "eth0",
389 * as specified in the passed interface name. If the
390 * name is "" or the option length is zero the socket
393 if (optlen > IFNAMSIZ - 1)
394 optlen = IFNAMSIZ - 1;
395 memset(devname, 0, sizeof(devname));
398 if (copy_from_user(devname, optval, optlen))
401 if (devname[0] == '\0') {
404 struct net_device *dev = dev_get_by_name(net, devname);
410 index = dev->ifindex;
415 sk->sk_bound_dev_if = index;
427 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
430 sock_set_flag(sk, bit);
432 sock_reset_flag(sk, bit);
436 * This is meant for all protocols to use and covers goings on
437 * at the socket level. Everything here is generic.
440 int sock_setsockopt(struct socket *sock, int level, int optname,
441 char __user *optval, int optlen)
443 struct sock *sk=sock->sk;
450 * Options without arguments
453 #ifdef SO_DONTLINGER /* Compatibility item... */
454 if (optname == SO_DONTLINGER) {
456 sock_reset_flag(sk, SOCK_LINGER);
462 if (optname == SO_BINDTODEVICE)
463 return sock_bindtodevice(sk, optval, optlen);
465 if (optlen < sizeof(int))
468 if (get_user(val, (int __user *)optval))
477 if (val && !capable(CAP_NET_ADMIN)) {
480 sock_valbool_flag(sk, SOCK_DBG, valbool);
483 sk->sk_reuse = valbool;
490 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
493 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
496 /* Don't error on this BSD doesn't and if you think
497 about it this is right. Otherwise apps have to
498 play 'guess the biggest size' games. RCVBUF/SNDBUF
499 are treated in BSD as hints */
501 if (val > sysctl_wmem_max)
502 val = sysctl_wmem_max;
504 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
505 if ((val * 2) < SOCK_MIN_SNDBUF)
506 sk->sk_sndbuf = SOCK_MIN_SNDBUF;
508 sk->sk_sndbuf = val * 2;
511 * Wake up sending tasks if we
514 sk->sk_write_space(sk);
518 if (!capable(CAP_NET_ADMIN)) {
525 /* Don't error on this BSD doesn't and if you think
526 about it this is right. Otherwise apps have to
527 play 'guess the biggest size' games. RCVBUF/SNDBUF
528 are treated in BSD as hints */
530 if (val > sysctl_rmem_max)
531 val = sysctl_rmem_max;
533 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
535 * We double it on the way in to account for
536 * "struct sk_buff" etc. overhead. Applications
537 * assume that the SO_RCVBUF setting they make will
538 * allow that much actual data to be received on that
541 * Applications are unaware that "struct sk_buff" and
542 * other overheads allocate from the receive buffer
543 * during socket buffer allocation.
545 * And after considering the possible alternatives,
546 * returning the value we actually used in getsockopt
547 * is the most desirable behavior.
549 if ((val * 2) < SOCK_MIN_RCVBUF)
550 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
552 sk->sk_rcvbuf = val * 2;
556 if (!capable(CAP_NET_ADMIN)) {
564 if (sk->sk_protocol == IPPROTO_TCP)
565 tcp_set_keepalive(sk, valbool);
567 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
571 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
575 sk->sk_no_check = valbool;
579 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
580 sk->sk_priority = val;
586 if (optlen < sizeof(ling)) {
587 ret = -EINVAL; /* 1003.1g */
590 if (copy_from_user(&ling,optval,sizeof(ling))) {
595 sock_reset_flag(sk, SOCK_LINGER);
597 #if (BITS_PER_LONG == 32)
598 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
599 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
602 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
603 sock_set_flag(sk, SOCK_LINGER);
608 sock_warn_obsolete_bsdism("setsockopt");
613 set_bit(SOCK_PASSCRED, &sock->flags);
615 clear_bit(SOCK_PASSCRED, &sock->flags);
621 if (optname == SO_TIMESTAMP)
622 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
624 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
625 sock_set_flag(sk, SOCK_RCVTSTAMP);
626 sock_enable_timestamp(sk);
628 sock_reset_flag(sk, SOCK_RCVTSTAMP);
629 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
636 sk->sk_rcvlowat = val ? : 1;
640 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
644 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
647 case SO_ATTACH_FILTER:
649 if (optlen == sizeof(struct sock_fprog)) {
650 struct sock_fprog fprog;
653 if (copy_from_user(&fprog, optval, sizeof(fprog)))
656 ret = sk_attach_filter(&fprog, sk);
660 case SO_DETACH_FILTER:
661 ret = sk_detach_filter(sk);
666 set_bit(SOCK_PASSSEC, &sock->flags);
668 clear_bit(SOCK_PASSSEC, &sock->flags);
671 /* We implement the SO_SNDLOWAT etc to
672 not be settable (1003.1g 5.3) */
682 int sock_getsockopt(struct socket *sock, int level, int optname,
683 char __user *optval, int __user *optlen)
685 struct sock *sk = sock->sk;
693 unsigned int lv = sizeof(int);
696 if (get_user(len, optlen))
703 v.val = sock_flag(sk, SOCK_DBG);
707 v.val = sock_flag(sk, SOCK_LOCALROUTE);
711 v.val = !!sock_flag(sk, SOCK_BROADCAST);
715 v.val = sk->sk_sndbuf;
719 v.val = sk->sk_rcvbuf;
723 v.val = sk->sk_reuse;
727 v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
735 v.val = -sock_error(sk);
737 v.val = xchg(&sk->sk_err_soft, 0);
741 v.val = !!sock_flag(sk, SOCK_URGINLINE);
745 v.val = sk->sk_no_check;
749 v.val = sk->sk_priority;
754 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
755 v.ling.l_linger = sk->sk_lingertime / HZ;
759 sock_warn_obsolete_bsdism("getsockopt");
763 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
764 !sock_flag(sk, SOCK_RCVTSTAMPNS);
768 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
772 lv=sizeof(struct timeval);
773 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
777 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
778 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
783 lv=sizeof(struct timeval);
784 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
788 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
789 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
794 v.val = sk->sk_rcvlowat;
802 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
806 if (len > sizeof(sk->sk_peercred))
807 len = sizeof(sk->sk_peercred);
808 if (copy_to_user(optval, &sk->sk_peercred, len))
816 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
820 if (copy_to_user(optval, address, len))
825 /* Dubious BSD thing... Probably nobody even uses it, but
826 * the UNIX standard wants it for whatever reason... -DaveM
829 v.val = sk->sk_state == TCP_LISTEN;
833 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
837 return security_socket_getpeersec_stream(sock, optval, optlen, len);
845 if (copy_to_user(optval, &v, len))
848 if (put_user(len, optlen))
854 * Initialize an sk_lock.
856 * (We also register the sk_lock with the lock validator.)
858 static inline void sock_lock_init(struct sock *sk)
860 sock_lock_init_class_and_name(sk,
861 af_family_slock_key_strings[sk->sk_family],
862 af_family_slock_keys + sk->sk_family,
863 af_family_key_strings[sk->sk_family],
864 af_family_keys + sk->sk_family);
867 static void sock_copy(struct sock *nsk, const struct sock *osk)
869 #ifdef CONFIG_SECURITY_NETWORK
870 void *sptr = nsk->sk_security;
873 memcpy(nsk, osk, osk->sk_prot->obj_size);
874 #ifdef CONFIG_SECURITY_NETWORK
875 nsk->sk_security = sptr;
876 security_sk_clone(osk, nsk);
880 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
884 struct kmem_cache *slab;
888 sk = kmem_cache_alloc(slab, priority);
890 sk = kmalloc(prot->obj_size, priority);
893 if (security_sk_alloc(sk, family, priority))
896 if (!try_module_get(prot->owner))
903 security_sk_free(sk);
906 kmem_cache_free(slab, sk);
912 static void sk_prot_free(struct proto *prot, struct sock *sk)
914 struct kmem_cache *slab;
915 struct module *owner;
920 security_sk_free(sk);
922 kmem_cache_free(slab, sk);
929 * sk_alloc - All socket objects are allocated here
930 * @net: the applicable net namespace
931 * @family: protocol family
932 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
933 * @prot: struct proto associated with this new sock instance
934 * @zero_it: if we should zero the newly allocated sock
936 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
941 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
943 sk->sk_family = family;
945 * See comment in struct sock definition to understand
946 * why we need sk_prot_creator -acme
948 sk->sk_prot = sk->sk_prot_creator = prot;
950 sk->sk_net = get_net(net);
956 void sk_free(struct sock *sk)
958 struct sk_filter *filter;
963 filter = rcu_dereference(sk->sk_filter);
965 sk_filter_uncharge(sk, filter);
966 rcu_assign_pointer(sk->sk_filter, NULL);
969 sock_disable_timestamp(sk);
971 if (atomic_read(&sk->sk_omem_alloc))
972 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
973 __FUNCTION__, atomic_read(&sk->sk_omem_alloc));
976 sk_prot_free(sk->sk_prot_creator, sk);
979 struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
983 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
985 struct sk_filter *filter;
987 sock_copy(newsk, sk);
990 get_net(newsk->sk_net);
991 sk_node_init(&newsk->sk_node);
992 sock_lock_init(newsk);
994 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
996 atomic_set(&newsk->sk_rmem_alloc, 0);
997 atomic_set(&newsk->sk_wmem_alloc, 0);
998 atomic_set(&newsk->sk_omem_alloc, 0);
999 skb_queue_head_init(&newsk->sk_receive_queue);
1000 skb_queue_head_init(&newsk->sk_write_queue);
1001 #ifdef CONFIG_NET_DMA
1002 skb_queue_head_init(&newsk->sk_async_wait_queue);
1005 rwlock_init(&newsk->sk_dst_lock);
1006 rwlock_init(&newsk->sk_callback_lock);
1007 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1008 af_callback_keys + newsk->sk_family,
1009 af_family_clock_key_strings[newsk->sk_family]);
1011 newsk->sk_dst_cache = NULL;
1012 newsk->sk_wmem_queued = 0;
1013 newsk->sk_forward_alloc = 0;
1014 newsk->sk_send_head = NULL;
1015 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1017 sock_reset_flag(newsk, SOCK_DONE);
1018 skb_queue_head_init(&newsk->sk_error_queue);
1020 filter = newsk->sk_filter;
1022 sk_filter_charge(newsk, filter);
1024 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1025 /* It is still raw copy of parent, so invalidate
1026 * destructor and make plain sk_free() */
1027 newsk->sk_destruct = NULL;
1034 newsk->sk_priority = 0;
1035 atomic_set(&newsk->sk_refcnt, 2);
1038 * Increment the counter in the same struct proto as the master
1039 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1040 * is the same as sk->sk_prot->socks, as this field was copied
1043 * This _changes_ the previous behaviour, where
1044 * tcp_create_openreq_child always was incrementing the
1045 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1046 * to be taken into account in all callers. -acme
1048 sk_refcnt_debug_inc(newsk);
1049 newsk->sk_socket = NULL;
1050 newsk->sk_sleep = NULL;
1052 if (newsk->sk_prot->sockets_allocated)
1053 atomic_inc(newsk->sk_prot->sockets_allocated);
1059 EXPORT_SYMBOL_GPL(sk_clone);
1061 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1063 __sk_dst_set(sk, dst);
1064 sk->sk_route_caps = dst->dev->features;
1065 if (sk->sk_route_caps & NETIF_F_GSO)
1066 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1067 if (sk_can_gso(sk)) {
1068 if (dst->header_len)
1069 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1071 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1074 EXPORT_SYMBOL_GPL(sk_setup_caps);
1076 void __init sk_init(void)
1078 if (num_physpages <= 4096) {
1079 sysctl_wmem_max = 32767;
1080 sysctl_rmem_max = 32767;
1081 sysctl_wmem_default = 32767;
1082 sysctl_rmem_default = 32767;
1083 } else if (num_physpages >= 131072) {
1084 sysctl_wmem_max = 131071;
1085 sysctl_rmem_max = 131071;
1090 * Simple resource managers for sockets.
1095 * Write buffer destructor automatically called from kfree_skb.
1097 void sock_wfree(struct sk_buff *skb)
1099 struct sock *sk = skb->sk;
1101 /* In case it might be waiting for more memory. */
1102 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
1103 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE))
1104 sk->sk_write_space(sk);
1109 * Read buffer destructor automatically called from kfree_skb.
1111 void sock_rfree(struct sk_buff *skb)
1113 struct sock *sk = skb->sk;
1115 skb_truesize_check(skb);
1116 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
1117 sk_mem_uncharge(skb->sk, skb->truesize);
1121 int sock_i_uid(struct sock *sk)
1125 read_lock(&sk->sk_callback_lock);
1126 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
1127 read_unlock(&sk->sk_callback_lock);
1131 unsigned long sock_i_ino(struct sock *sk)
1135 read_lock(&sk->sk_callback_lock);
1136 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1137 read_unlock(&sk->sk_callback_lock);
1142 * Allocate a skb from the socket's send buffer.
1144 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1147 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1148 struct sk_buff * skb = alloc_skb(size, priority);
1150 skb_set_owner_w(skb, sk);
1158 * Allocate a skb from the socket's receive buffer.
1160 struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
1163 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1164 struct sk_buff *skb = alloc_skb(size, priority);
1166 skb_set_owner_r(skb, sk);
1174 * Allocate a memory block from the socket's option memory buffer.
1176 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1178 if ((unsigned)size <= sysctl_optmem_max &&
1179 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1181 /* First do the add, to avoid the race if kmalloc
1184 atomic_add(size, &sk->sk_omem_alloc);
1185 mem = kmalloc(size, priority);
1188 atomic_sub(size, &sk->sk_omem_alloc);
1194 * Free an option memory block.
1196 void sock_kfree_s(struct sock *sk, void *mem, int size)
1199 atomic_sub(size, &sk->sk_omem_alloc);
1202 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1203 I think, these locks should be removed for datagram sockets.
1205 static long sock_wait_for_wmem(struct sock * sk, long timeo)
1209 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1213 if (signal_pending(current))
1215 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1216 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1217 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1219 if (sk->sk_shutdown & SEND_SHUTDOWN)
1223 timeo = schedule_timeout(timeo);
1225 finish_wait(sk->sk_sleep, &wait);
1231 * Generic send/receive buffer handlers
1234 static struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
1235 unsigned long header_len,
1236 unsigned long data_len,
1237 int noblock, int *errcode)
1239 struct sk_buff *skb;
1244 gfp_mask = sk->sk_allocation;
1245 if (gfp_mask & __GFP_WAIT)
1246 gfp_mask |= __GFP_REPEAT;
1248 timeo = sock_sndtimeo(sk, noblock);
1250 err = sock_error(sk);
1255 if (sk->sk_shutdown & SEND_SHUTDOWN)
1258 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1259 skb = alloc_skb(header_len, gfp_mask);
1264 /* No pages, we're done... */
1268 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1269 skb->truesize += data_len;
1270 skb_shinfo(skb)->nr_frags = npages;
1271 for (i = 0; i < npages; i++) {
1275 page = alloc_pages(sk->sk_allocation, 0);
1278 skb_shinfo(skb)->nr_frags = i;
1283 frag = &skb_shinfo(skb)->frags[i];
1285 frag->page_offset = 0;
1286 frag->size = (data_len >= PAGE_SIZE ?
1289 data_len -= PAGE_SIZE;
1292 /* Full success... */
1298 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1299 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1303 if (signal_pending(current))
1305 timeo = sock_wait_for_wmem(sk, timeo);
1308 skb_set_owner_w(skb, sk);
1312 err = sock_intr_errno(timeo);
1318 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1319 int noblock, int *errcode)
1321 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1324 static void __lock_sock(struct sock *sk)
1329 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1330 TASK_UNINTERRUPTIBLE);
1331 spin_unlock_bh(&sk->sk_lock.slock);
1333 spin_lock_bh(&sk->sk_lock.slock);
1334 if (!sock_owned_by_user(sk))
1337 finish_wait(&sk->sk_lock.wq, &wait);
1340 static void __release_sock(struct sock *sk)
1342 struct sk_buff *skb = sk->sk_backlog.head;
1345 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1349 struct sk_buff *next = skb->next;
1352 sk->sk_backlog_rcv(sk, skb);
1355 * We are in process context here with softirqs
1356 * disabled, use cond_resched_softirq() to preempt.
1357 * This is safe to do because we've taken the backlog
1360 cond_resched_softirq();
1363 } while (skb != NULL);
1366 } while ((skb = sk->sk_backlog.head) != NULL);
1370 * sk_wait_data - wait for data to arrive at sk_receive_queue
1371 * @sk: sock to wait on
1372 * @timeo: for how long
1374 * Now socket state including sk->sk_err is changed only under lock,
1375 * hence we may omit checks after joining wait queue.
1376 * We check receive queue before schedule() only as optimization;
1377 * it is very likely that release_sock() added new data.
1379 int sk_wait_data(struct sock *sk, long *timeo)
1384 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1385 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1386 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1387 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1388 finish_wait(sk->sk_sleep, &wait);
1392 EXPORT_SYMBOL(sk_wait_data);
1395 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1397 * @size: memory size to allocate
1398 * @kind: allocation type
1400 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1401 * rmem allocation. This function assumes that protocols which have
1402 * memory_pressure use sk_wmem_queued as write buffer accounting.
1404 int __sk_mem_schedule(struct sock *sk, int size, int kind)
1406 struct proto *prot = sk->sk_prot;
1407 int amt = sk_mem_pages(size);
1410 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1411 allocated = atomic_add_return(amt, prot->memory_allocated);
1414 if (allocated <= prot->sysctl_mem[0]) {
1415 if (prot->memory_pressure && *prot->memory_pressure)
1416 *prot->memory_pressure = 0;
1420 /* Under pressure. */
1421 if (allocated > prot->sysctl_mem[1])
1422 if (prot->enter_memory_pressure)
1423 prot->enter_memory_pressure();
1425 /* Over hard limit. */
1426 if (allocated > prot->sysctl_mem[2])
1427 goto suppress_allocation;
1429 /* guarantee minimum buffer size under pressure */
1430 if (kind == SK_MEM_RECV) {
1431 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1433 } else { /* SK_MEM_SEND */
1434 if (sk->sk_type == SOCK_STREAM) {
1435 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1437 } else if (atomic_read(&sk->sk_wmem_alloc) <
1438 prot->sysctl_wmem[0])
1442 if (prot->memory_pressure) {
1443 if (!*prot->memory_pressure ||
1444 prot->sysctl_mem[2] > atomic_read(prot->sockets_allocated) *
1445 sk_mem_pages(sk->sk_wmem_queued +
1446 atomic_read(&sk->sk_rmem_alloc) +
1447 sk->sk_forward_alloc))
1451 suppress_allocation:
1453 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1454 sk_stream_moderate_sndbuf(sk);
1456 /* Fail only if socket is _under_ its sndbuf.
1457 * In this case we cannot block, so that we have to fail.
1459 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1463 /* Alas. Undo changes. */
1464 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1465 atomic_sub(amt, prot->memory_allocated);
1469 EXPORT_SYMBOL(__sk_mem_schedule);
1472 * __sk_reclaim - reclaim memory_allocated
1475 void __sk_mem_reclaim(struct sock *sk)
1477 struct proto *prot = sk->sk_prot;
1479 atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
1480 prot->memory_allocated);
1481 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1483 if (prot->memory_pressure && *prot->memory_pressure &&
1484 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1485 *prot->memory_pressure = 0;
1488 EXPORT_SYMBOL(__sk_mem_reclaim);
1492 * Set of default routines for initialising struct proto_ops when
1493 * the protocol does not support a particular function. In certain
1494 * cases where it makes no sense for a protocol to have a "do nothing"
1495 * function, some default processing is provided.
1498 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1503 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1509 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1514 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1519 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
1525 unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt)
1530 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1535 int sock_no_listen(struct socket *sock, int backlog)
1540 int sock_no_shutdown(struct socket *sock, int how)
1545 int sock_no_setsockopt(struct socket *sock, int level, int optname,
1546 char __user *optval, int optlen)
1551 int sock_no_getsockopt(struct socket *sock, int level, int optname,
1552 char __user *optval, int __user *optlen)
1557 int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1563 int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1564 size_t len, int flags)
1569 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1571 /* Mirror missing mmap method error code */
1575 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1578 struct msghdr msg = {.msg_flags = flags};
1580 char *kaddr = kmap(page);
1581 iov.iov_base = kaddr + offset;
1583 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1589 * Default Socket Callbacks
1592 static void sock_def_wakeup(struct sock *sk)
1594 read_lock(&sk->sk_callback_lock);
1595 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1596 wake_up_interruptible_all(sk->sk_sleep);
1597 read_unlock(&sk->sk_callback_lock);
1600 static void sock_def_error_report(struct sock *sk)
1602 read_lock(&sk->sk_callback_lock);
1603 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1604 wake_up_interruptible(sk->sk_sleep);
1605 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
1606 read_unlock(&sk->sk_callback_lock);
1609 static void sock_def_readable(struct sock *sk, int len)
1611 read_lock(&sk->sk_callback_lock);
1612 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1613 wake_up_interruptible(sk->sk_sleep);
1614 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
1615 read_unlock(&sk->sk_callback_lock);
1618 static void sock_def_write_space(struct sock *sk)
1620 read_lock(&sk->sk_callback_lock);
1622 /* Do not wake up a writer until he can make "significant"
1625 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
1626 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1627 wake_up_interruptible(sk->sk_sleep);
1629 /* Should agree with poll, otherwise some programs break */
1630 if (sock_writeable(sk))
1631 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
1634 read_unlock(&sk->sk_callback_lock);
1637 static void sock_def_destruct(struct sock *sk)
1639 kfree(sk->sk_protinfo);
1642 void sk_send_sigurg(struct sock *sk)
1644 if (sk->sk_socket && sk->sk_socket->file)
1645 if (send_sigurg(&sk->sk_socket->file->f_owner))
1646 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
1649 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1650 unsigned long expires)
1652 if (!mod_timer(timer, expires))
1656 EXPORT_SYMBOL(sk_reset_timer);
1658 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1660 if (timer_pending(timer) && del_timer(timer))
1664 EXPORT_SYMBOL(sk_stop_timer);
1666 void sock_init_data(struct socket *sock, struct sock *sk)
1668 skb_queue_head_init(&sk->sk_receive_queue);
1669 skb_queue_head_init(&sk->sk_write_queue);
1670 skb_queue_head_init(&sk->sk_error_queue);
1671 #ifdef CONFIG_NET_DMA
1672 skb_queue_head_init(&sk->sk_async_wait_queue);
1675 sk->sk_send_head = NULL;
1677 init_timer(&sk->sk_timer);
1679 sk->sk_allocation = GFP_KERNEL;
1680 sk->sk_rcvbuf = sysctl_rmem_default;
1681 sk->sk_sndbuf = sysctl_wmem_default;
1682 sk->sk_state = TCP_CLOSE;
1683 sk->sk_socket = sock;
1685 sock_set_flag(sk, SOCK_ZAPPED);
1688 sk->sk_type = sock->type;
1689 sk->sk_sleep = &sock->wait;
1692 sk->sk_sleep = NULL;
1694 rwlock_init(&sk->sk_dst_lock);
1695 rwlock_init(&sk->sk_callback_lock);
1696 lockdep_set_class_and_name(&sk->sk_callback_lock,
1697 af_callback_keys + sk->sk_family,
1698 af_family_clock_key_strings[sk->sk_family]);
1700 sk->sk_state_change = sock_def_wakeup;
1701 sk->sk_data_ready = sock_def_readable;
1702 sk->sk_write_space = sock_def_write_space;
1703 sk->sk_error_report = sock_def_error_report;
1704 sk->sk_destruct = sock_def_destruct;
1706 sk->sk_sndmsg_page = NULL;
1707 sk->sk_sndmsg_off = 0;
1709 sk->sk_peercred.pid = 0;
1710 sk->sk_peercred.uid = -1;
1711 sk->sk_peercred.gid = -1;
1712 sk->sk_write_pending = 0;
1713 sk->sk_rcvlowat = 1;
1714 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1715 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
1717 sk->sk_stamp = ktime_set(-1L, -1L);
1719 atomic_set(&sk->sk_refcnt, 1);
1720 atomic_set(&sk->sk_drops, 0);
1723 void fastcall lock_sock_nested(struct sock *sk, int subclass)
1726 spin_lock_bh(&sk->sk_lock.slock);
1727 if (sk->sk_lock.owned)
1729 sk->sk_lock.owned = 1;
1730 spin_unlock(&sk->sk_lock.slock);
1732 * The sk_lock has mutex_lock() semantics here:
1734 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
1738 EXPORT_SYMBOL(lock_sock_nested);
1740 void fastcall release_sock(struct sock *sk)
1743 * The sk_lock has mutex_unlock() semantics:
1745 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
1747 spin_lock_bh(&sk->sk_lock.slock);
1748 if (sk->sk_backlog.tail)
1750 sk->sk_lock.owned = 0;
1751 if (waitqueue_active(&sk->sk_lock.wq))
1752 wake_up(&sk->sk_lock.wq);
1753 spin_unlock_bh(&sk->sk_lock.slock);
1755 EXPORT_SYMBOL(release_sock);
1757 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
1760 if (!sock_flag(sk, SOCK_TIMESTAMP))
1761 sock_enable_timestamp(sk);
1762 tv = ktime_to_timeval(sk->sk_stamp);
1763 if (tv.tv_sec == -1)
1765 if (tv.tv_sec == 0) {
1766 sk->sk_stamp = ktime_get_real();
1767 tv = ktime_to_timeval(sk->sk_stamp);
1769 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
1771 EXPORT_SYMBOL(sock_get_timestamp);
1773 int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
1776 if (!sock_flag(sk, SOCK_TIMESTAMP))
1777 sock_enable_timestamp(sk);
1778 ts = ktime_to_timespec(sk->sk_stamp);
1779 if (ts.tv_sec == -1)
1781 if (ts.tv_sec == 0) {
1782 sk->sk_stamp = ktime_get_real();
1783 ts = ktime_to_timespec(sk->sk_stamp);
1785 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
1787 EXPORT_SYMBOL(sock_get_timestampns);
1789 void sock_enable_timestamp(struct sock *sk)
1791 if (!sock_flag(sk, SOCK_TIMESTAMP)) {
1792 sock_set_flag(sk, SOCK_TIMESTAMP);
1793 net_enable_timestamp();
1798 * Get a socket option on an socket.
1800 * FIX: POSIX 1003.1g is very ambiguous here. It states that
1801 * asynchronous errors should be reported by getsockopt. We assume
1802 * this means if you specify SO_ERROR (otherwise whats the point of it).
1804 int sock_common_getsockopt(struct socket *sock, int level, int optname,
1805 char __user *optval, int __user *optlen)
1807 struct sock *sk = sock->sk;
1809 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
1812 EXPORT_SYMBOL(sock_common_getsockopt);
1814 #ifdef CONFIG_COMPAT
1815 int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
1816 char __user *optval, int __user *optlen)
1818 struct sock *sk = sock->sk;
1820 if (sk->sk_prot->compat_getsockopt != NULL)
1821 return sk->sk_prot->compat_getsockopt(sk, level, optname,
1823 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
1825 EXPORT_SYMBOL(compat_sock_common_getsockopt);
1828 int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
1829 struct msghdr *msg, size_t size, int flags)
1831 struct sock *sk = sock->sk;
1835 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
1836 flags & ~MSG_DONTWAIT, &addr_len);
1838 msg->msg_namelen = addr_len;
1842 EXPORT_SYMBOL(sock_common_recvmsg);
1845 * Set socket options on an inet socket.
1847 int sock_common_setsockopt(struct socket *sock, int level, int optname,
1848 char __user *optval, int optlen)
1850 struct sock *sk = sock->sk;
1852 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
1855 EXPORT_SYMBOL(sock_common_setsockopt);
1857 #ifdef CONFIG_COMPAT
1858 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
1859 char __user *optval, int optlen)
1861 struct sock *sk = sock->sk;
1863 if (sk->sk_prot->compat_setsockopt != NULL)
1864 return sk->sk_prot->compat_setsockopt(sk, level, optname,
1866 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
1868 EXPORT_SYMBOL(compat_sock_common_setsockopt);
1871 void sk_common_release(struct sock *sk)
1873 if (sk->sk_prot->destroy)
1874 sk->sk_prot->destroy(sk);
1877 * Observation: when sock_common_release is called, processes have
1878 * no access to socket. But net still has.
1879 * Step one, detach it from networking:
1881 * A. Remove from hash tables.
1884 sk->sk_prot->unhash(sk);
1887 * In this point socket cannot receive new packets, but it is possible
1888 * that some packets are in flight because some CPU runs receiver and
1889 * did hash table lookup before we unhashed socket. They will achieve
1890 * receive queue and will be purged by socket destructor.
1892 * Also we still have packets pending on receive queue and probably,
1893 * our own packets waiting in device queues. sock_destroy will drain
1894 * receive queue, but transmitted packets will delay socket destruction
1895 * until the last reference will be released.
1900 xfrm_sk_free_policy(sk);
1902 sk_refcnt_debug_release(sk);
1906 EXPORT_SYMBOL(sk_common_release);
1908 static DEFINE_RWLOCK(proto_list_lock);
1909 static LIST_HEAD(proto_list);
1911 int proto_register(struct proto *prot, int alloc_slab)
1913 char *request_sock_slab_name = NULL;
1914 char *timewait_sock_slab_name;
1916 if (sock_prot_inuse_init(prot) != 0) {
1917 printk(KERN_CRIT "%s: Can't alloc inuse counters!\n", prot->name);
1922 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
1923 SLAB_HWCACHE_ALIGN, NULL);
1925 if (prot->slab == NULL) {
1926 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
1928 goto out_free_inuse;
1931 if (prot->rsk_prot != NULL) {
1932 static const char mask[] = "request_sock_%s";
1934 request_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
1935 if (request_sock_slab_name == NULL)
1936 goto out_free_sock_slab;
1938 sprintf(request_sock_slab_name, mask, prot->name);
1939 prot->rsk_prot->slab = kmem_cache_create(request_sock_slab_name,
1940 prot->rsk_prot->obj_size, 0,
1941 SLAB_HWCACHE_ALIGN, NULL);
1943 if (prot->rsk_prot->slab == NULL) {
1944 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
1946 goto out_free_request_sock_slab_name;
1950 if (prot->twsk_prot != NULL) {
1951 static const char mask[] = "tw_sock_%s";
1953 timewait_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
1955 if (timewait_sock_slab_name == NULL)
1956 goto out_free_request_sock_slab;
1958 sprintf(timewait_sock_slab_name, mask, prot->name);
1959 prot->twsk_prot->twsk_slab =
1960 kmem_cache_create(timewait_sock_slab_name,
1961 prot->twsk_prot->twsk_obj_size,
1962 0, SLAB_HWCACHE_ALIGN,
1964 if (prot->twsk_prot->twsk_slab == NULL)
1965 goto out_free_timewait_sock_slab_name;
1969 write_lock(&proto_list_lock);
1970 list_add(&prot->node, &proto_list);
1971 write_unlock(&proto_list_lock);
1974 out_free_timewait_sock_slab_name:
1975 kfree(timewait_sock_slab_name);
1976 out_free_request_sock_slab:
1977 if (prot->rsk_prot && prot->rsk_prot->slab) {
1978 kmem_cache_destroy(prot->rsk_prot->slab);
1979 prot->rsk_prot->slab = NULL;
1981 out_free_request_sock_slab_name:
1982 kfree(request_sock_slab_name);
1984 kmem_cache_destroy(prot->slab);
1987 sock_prot_inuse_free(prot);
1992 EXPORT_SYMBOL(proto_register);
1994 void proto_unregister(struct proto *prot)
1996 write_lock(&proto_list_lock);
1997 list_del(&prot->node);
1998 write_unlock(&proto_list_lock);
2000 sock_prot_inuse_free(prot);
2002 if (prot->slab != NULL) {
2003 kmem_cache_destroy(prot->slab);
2007 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2008 const char *name = kmem_cache_name(prot->rsk_prot->slab);
2010 kmem_cache_destroy(prot->rsk_prot->slab);
2012 prot->rsk_prot->slab = NULL;
2015 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2016 const char *name = kmem_cache_name(prot->twsk_prot->twsk_slab);
2018 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2020 prot->twsk_prot->twsk_slab = NULL;
2024 EXPORT_SYMBOL(proto_unregister);
2026 #ifdef CONFIG_PROC_FS
2027 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2028 __acquires(proto_list_lock)
2030 read_lock(&proto_list_lock);
2031 return seq_list_start_head(&proto_list, *pos);
2034 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2036 return seq_list_next(v, &proto_list, pos);
2039 static void proto_seq_stop(struct seq_file *seq, void *v)
2040 __releases(proto_list_lock)
2042 read_unlock(&proto_list_lock);
2045 static char proto_method_implemented(const void *method)
2047 return method == NULL ? 'n' : 'y';
2050 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2052 seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s "
2053 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2056 proto->sockets_allocated != NULL ? atomic_read(proto->sockets_allocated) : -1,
2057 proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
2058 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
2060 proto->slab == NULL ? "no" : "yes",
2061 module_name(proto->owner),
2062 proto_method_implemented(proto->close),
2063 proto_method_implemented(proto->connect),
2064 proto_method_implemented(proto->disconnect),
2065 proto_method_implemented(proto->accept),
2066 proto_method_implemented(proto->ioctl),
2067 proto_method_implemented(proto->init),
2068 proto_method_implemented(proto->destroy),
2069 proto_method_implemented(proto->shutdown),
2070 proto_method_implemented(proto->setsockopt),
2071 proto_method_implemented(proto->getsockopt),
2072 proto_method_implemented(proto->sendmsg),
2073 proto_method_implemented(proto->recvmsg),
2074 proto_method_implemented(proto->sendpage),
2075 proto_method_implemented(proto->bind),
2076 proto_method_implemented(proto->backlog_rcv),
2077 proto_method_implemented(proto->hash),
2078 proto_method_implemented(proto->unhash),
2079 proto_method_implemented(proto->get_port),
2080 proto_method_implemented(proto->enter_memory_pressure));
2083 static int proto_seq_show(struct seq_file *seq, void *v)
2085 if (v == &proto_list)
2086 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2095 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2097 proto_seq_printf(seq, list_entry(v, struct proto, node));
2101 static const struct seq_operations proto_seq_ops = {
2102 .start = proto_seq_start,
2103 .next = proto_seq_next,
2104 .stop = proto_seq_stop,
2105 .show = proto_seq_show,
2108 static int proto_seq_open(struct inode *inode, struct file *file)
2110 return seq_open(file, &proto_seq_ops);
2113 static const struct file_operations proto_seq_fops = {
2114 .owner = THIS_MODULE,
2115 .open = proto_seq_open,
2117 .llseek = seq_lseek,
2118 .release = seq_release,
2121 static int __init proto_init(void)
2123 /* register /proc/net/protocols */
2124 return proc_net_fops_create(&init_net, "protocols", S_IRUGO, &proto_seq_fops) == NULL ? -ENOBUFS : 0;
2127 subsys_initcall(proto_init);
2129 #endif /* PROC_FS */
2131 EXPORT_SYMBOL(sk_alloc);
2132 EXPORT_SYMBOL(sk_free);
2133 EXPORT_SYMBOL(sk_send_sigurg);
2134 EXPORT_SYMBOL(sock_alloc_send_skb);
2135 EXPORT_SYMBOL(sock_init_data);
2136 EXPORT_SYMBOL(sock_kfree_s);
2137 EXPORT_SYMBOL(sock_kmalloc);
2138 EXPORT_SYMBOL(sock_no_accept);
2139 EXPORT_SYMBOL(sock_no_bind);
2140 EXPORT_SYMBOL(sock_no_connect);
2141 EXPORT_SYMBOL(sock_no_getname);
2142 EXPORT_SYMBOL(sock_no_getsockopt);
2143 EXPORT_SYMBOL(sock_no_ioctl);
2144 EXPORT_SYMBOL(sock_no_listen);
2145 EXPORT_SYMBOL(sock_no_mmap);
2146 EXPORT_SYMBOL(sock_no_poll);
2147 EXPORT_SYMBOL(sock_no_recvmsg);
2148 EXPORT_SYMBOL(sock_no_sendmsg);
2149 EXPORT_SYMBOL(sock_no_sendpage);
2150 EXPORT_SYMBOL(sock_no_setsockopt);
2151 EXPORT_SYMBOL(sock_no_shutdown);
2152 EXPORT_SYMBOL(sock_no_socketpair);
2153 EXPORT_SYMBOL(sock_rfree);
2154 EXPORT_SYMBOL(sock_setsockopt);
2155 EXPORT_SYMBOL(sock_wfree);
2156 EXPORT_SYMBOL(sock_wmalloc);
2157 EXPORT_SYMBOL(sock_i_uid);
2158 EXPORT_SYMBOL(sock_i_ino);
2159 EXPORT_SYMBOL(sysctl_optmem_max);