2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
57 * Alan Cox : Tidied tcp_data to avoid a potential
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
215 * Description of States:
217 * TCP_SYN_SENT sent a connection request, waiting for ack
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
222 * TCP_ESTABLISHED connection established
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
247 * TCP_CLOSE socket is finished
250 #include <linux/kernel.h>
251 #include <linux/module.h>
252 #include <linux/types.h>
253 #include <linux/fcntl.h>
254 #include <linux/poll.h>
255 #include <linux/init.h>
256 #include <linux/fs.h>
257 #include <linux/skbuff.h>
258 #include <linux/splice.h>
259 #include <linux/net.h>
260 #include <linux/socket.h>
261 #include <linux/random.h>
262 #include <linux/bootmem.h>
263 #include <linux/highmem.h>
264 #include <linux/swap.h>
265 #include <linux/cache.h>
266 #include <linux/err.h>
267 #include <linux/crypto.h>
269 #include <net/icmp.h>
271 #include <net/xfrm.h>
273 #include <net/netdma.h>
274 #include <net/sock.h>
276 #include <asm/uaccess.h>
277 #include <asm/ioctls.h>
279 int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
281 DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
283 atomic_t tcp_orphan_count = ATOMIC_INIT(0);
285 EXPORT_SYMBOL_GPL(tcp_orphan_count);
287 int sysctl_tcp_mem[3] __read_mostly;
288 int sysctl_tcp_wmem[3] __read_mostly;
289 int sysctl_tcp_rmem[3] __read_mostly;
291 EXPORT_SYMBOL(sysctl_tcp_mem);
292 EXPORT_SYMBOL(sysctl_tcp_rmem);
293 EXPORT_SYMBOL(sysctl_tcp_wmem);
295 atomic_t tcp_memory_allocated; /* Current allocated memory. */
296 atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
298 EXPORT_SYMBOL(tcp_memory_allocated);
299 EXPORT_SYMBOL(tcp_sockets_allocated);
304 struct tcp_splice_state {
305 struct pipe_inode_info *pipe;
311 * Pressure flag: try to collapse.
312 * Technical note: it is used by multiple contexts non atomically.
313 * All the __sk_mem_schedule() is of this nature: accounting
314 * is strict, actions are advisory and have some latency.
316 int tcp_memory_pressure __read_mostly;
318 EXPORT_SYMBOL(tcp_memory_pressure);
320 void tcp_enter_memory_pressure(void)
322 if (!tcp_memory_pressure) {
323 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
324 tcp_memory_pressure = 1;
328 EXPORT_SYMBOL(tcp_enter_memory_pressure);
331 * Wait for a TCP event.
333 * Note that we don't need to lock the socket, as the upper poll layers
334 * take care of normal races (between the test and the event) and we don't
335 * go look at any of the socket buffers directly.
337 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
340 struct sock *sk = sock->sk;
341 struct tcp_sock *tp = tcp_sk(sk);
343 poll_wait(file, sk->sk_sleep, wait);
344 if (sk->sk_state == TCP_LISTEN)
345 return inet_csk_listen_poll(sk);
347 /* Socket is not locked. We are protected from async events
348 by poll logic and correct handling of state changes
349 made by another threads is impossible in any case.
357 * POLLHUP is certainly not done right. But poll() doesn't
358 * have a notion of HUP in just one direction, and for a
359 * socket the read side is more interesting.
361 * Some poll() documentation says that POLLHUP is incompatible
362 * with the POLLOUT/POLLWR flags, so somebody should check this
363 * all. But careful, it tends to be safer to return too many
364 * bits than too few, and you can easily break real applications
365 * if you don't tell them that something has hung up!
369 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
370 * our fs/select.c). It means that after we received EOF,
371 * poll always returns immediately, making impossible poll() on write()
372 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
373 * if and only if shutdown has been made in both directions.
374 * Actually, it is interesting to look how Solaris and DUX
375 * solve this dilemma. I would prefer, if PULLHUP were maskable,
376 * then we could set it on SND_SHUTDOWN. BTW examples given
377 * in Stevens' books assume exactly this behaviour, it explains
378 * why PULLHUP is incompatible with POLLOUT. --ANK
380 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
381 * blocking on fresh not-connected or disconnected socket. --ANK
383 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
385 if (sk->sk_shutdown & RCV_SHUTDOWN)
386 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
389 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
390 /* Potential race condition. If read of tp below will
391 * escape above sk->sk_state, we can be illegally awaken
392 * in SYN_* states. */
393 if ((tp->rcv_nxt != tp->copied_seq) &&
394 (tp->urg_seq != tp->copied_seq ||
395 tp->rcv_nxt != tp->copied_seq + 1 ||
396 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
397 mask |= POLLIN | POLLRDNORM;
399 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
400 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
401 mask |= POLLOUT | POLLWRNORM;
402 } else { /* send SIGIO later */
403 set_bit(SOCK_ASYNC_NOSPACE,
404 &sk->sk_socket->flags);
405 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
407 /* Race breaker. If space is freed after
408 * wspace test but before the flags are set,
409 * IO signal will be lost.
411 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
412 mask |= POLLOUT | POLLWRNORM;
416 if (tp->urg_data & TCP_URG_VALID)
422 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
424 struct tcp_sock *tp = tcp_sk(sk);
429 if (sk->sk_state == TCP_LISTEN)
433 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
435 else if (sock_flag(sk, SOCK_URGINLINE) ||
437 before(tp->urg_seq, tp->copied_seq) ||
438 !before(tp->urg_seq, tp->rcv_nxt)) {
439 answ = tp->rcv_nxt - tp->copied_seq;
441 /* Subtract 1, if FIN is in queue. */
442 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
444 tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin;
446 answ = tp->urg_seq - tp->copied_seq;
450 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
453 if (sk->sk_state == TCP_LISTEN)
456 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
459 answ = tp->write_seq - tp->snd_una;
465 return put_user(answ, (int __user *)arg);
468 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
470 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
471 tp->pushed_seq = tp->write_seq;
474 static inline int forced_push(struct tcp_sock *tp)
476 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
479 static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
481 struct tcp_sock *tp = tcp_sk(sk);
482 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
485 tcb->seq = tcb->end_seq = tp->write_seq;
486 tcb->flags = TCPCB_FLAG_ACK;
488 skb_header_release(skb);
489 tcp_add_write_queue_tail(sk, skb);
490 sk->sk_wmem_queued += skb->truesize;
491 sk_mem_charge(sk, skb->truesize);
492 if (tp->nonagle & TCP_NAGLE_PUSH)
493 tp->nonagle &= ~TCP_NAGLE_PUSH;
496 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
499 if (flags & MSG_OOB) {
501 tp->snd_up = tp->write_seq;
505 static inline void tcp_push(struct sock *sk, int flags, int mss_now,
508 struct tcp_sock *tp = tcp_sk(sk);
510 if (tcp_send_head(sk)) {
511 struct sk_buff *skb = tcp_write_queue_tail(sk);
512 if (!(flags & MSG_MORE) || forced_push(tp))
513 tcp_mark_push(tp, skb);
514 tcp_mark_urg(tp, flags, skb);
515 __tcp_push_pending_frames(sk, mss_now,
516 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
520 static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
521 unsigned int offset, size_t len)
523 struct tcp_splice_state *tss = rd_desc->arg.data;
525 return skb_splice_bits(skb, offset, tss->pipe, tss->len, tss->flags);
528 static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
530 /* Store TCP splice context information in read_descriptor_t. */
531 read_descriptor_t rd_desc = {
535 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
539 * tcp_splice_read - splice data from TCP socket to a pipe
540 * @sock: socket to splice from
541 * @ppos: position (not valid)
542 * @pipe: pipe to splice to
543 * @len: number of bytes to splice
544 * @flags: splice modifier flags
547 * Will read pages from given socket and fill them into a pipe.
550 ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
551 struct pipe_inode_info *pipe, size_t len,
554 struct sock *sk = sock->sk;
555 struct tcp_splice_state tss = {
565 * We can't seek on a socket input
574 timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
576 ret = __tcp_splice_read(sk, &tss);
582 if (flags & SPLICE_F_NONBLOCK) {
586 if (sock_flag(sk, SOCK_DONE))
589 ret = sock_error(sk);
592 if (sk->sk_shutdown & RCV_SHUTDOWN)
594 if (sk->sk_state == TCP_CLOSE) {
596 * This occurs when user tries to read
597 * from never connected socket.
599 if (!sock_flag(sk, SOCK_DONE))
607 sk_wait_data(sk, &timeo);
608 if (signal_pending(current)) {
609 ret = sock_intr_errno(timeo);
620 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
621 (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo ||
622 signal_pending(current))
634 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
638 /* The TCP header must be at least 32-bit aligned. */
639 size = ALIGN(size, 4);
641 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
643 if (sk_wmem_schedule(sk, skb->truesize)) {
645 * Make sure that we have exactly size bytes
646 * available to the caller, no more, no less.
648 skb_reserve(skb, skb_tailroom(skb) - size);
653 sk->sk_prot->enter_memory_pressure();
654 sk_stream_moderate_sndbuf(sk);
659 static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
660 size_t psize, int flags)
662 struct tcp_sock *tp = tcp_sk(sk);
663 int mss_now, size_goal;
666 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
668 /* Wait for a connection to finish. */
669 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
670 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
673 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
675 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
676 size_goal = tp->xmit_size_goal;
680 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
684 struct sk_buff *skb = tcp_write_queue_tail(sk);
685 struct page *page = pages[poffset / PAGE_SIZE];
686 int copy, i, can_coalesce;
687 int offset = poffset % PAGE_SIZE;
688 int size = min_t(size_t, psize, PAGE_SIZE - offset);
690 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
692 if (!sk_stream_memory_free(sk))
693 goto wait_for_sndbuf;
695 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
697 goto wait_for_memory;
706 i = skb_shinfo(skb)->nr_frags;
707 can_coalesce = skb_can_coalesce(skb, i, page, offset);
708 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
709 tcp_mark_push(tp, skb);
712 if (!sk_wmem_schedule(sk, copy))
713 goto wait_for_memory;
716 skb_shinfo(skb)->frags[i - 1].size += copy;
719 skb_fill_page_desc(skb, i, page, offset, copy);
723 skb->data_len += copy;
724 skb->truesize += copy;
725 sk->sk_wmem_queued += copy;
726 sk_mem_charge(sk, copy);
727 skb->ip_summed = CHECKSUM_PARTIAL;
728 tp->write_seq += copy;
729 TCP_SKB_CB(skb)->end_seq += copy;
730 skb_shinfo(skb)->gso_segs = 0;
733 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
737 if (!(psize -= copy))
740 if (skb->len < size_goal || (flags & MSG_OOB))
743 if (forced_push(tp)) {
744 tcp_mark_push(tp, skb);
745 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
746 } else if (skb == tcp_send_head(sk))
747 tcp_push_one(sk, mss_now);
751 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
754 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
756 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
759 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
760 size_goal = tp->xmit_size_goal;
765 tcp_push(sk, flags, mss_now, tp->nonagle);
772 return sk_stream_error(sk, flags, err);
775 ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
776 size_t size, int flags)
779 struct sock *sk = sock->sk;
781 if (!(sk->sk_route_caps & NETIF_F_SG) ||
782 !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
783 return sock_no_sendpage(sock, page, offset, size, flags);
787 res = do_tcp_sendpages(sk, &page, offset, size, flags);
793 #define TCP_PAGE(sk) (sk->sk_sndmsg_page)
794 #define TCP_OFF(sk) (sk->sk_sndmsg_off)
796 static inline int select_size(struct sock *sk)
798 struct tcp_sock *tp = tcp_sk(sk);
799 int tmp = tp->mss_cache;
801 if (sk->sk_route_caps & NETIF_F_SG) {
805 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
807 if (tmp >= pgbreak &&
808 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
816 int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
819 struct sock *sk = sock->sk;
821 struct tcp_sock *tp = tcp_sk(sk);
824 int mss_now, size_goal;
831 flags = msg->msg_flags;
832 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
834 /* Wait for a connection to finish. */
835 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
836 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
839 /* This should be in poll */
840 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
842 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
843 size_goal = tp->xmit_size_goal;
845 /* Ok commence sending. */
846 iovlen = msg->msg_iovlen;
851 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
854 while (--iovlen >= 0) {
855 int seglen = iov->iov_len;
856 unsigned char __user *from = iov->iov_base;
863 skb = tcp_write_queue_tail(sk);
865 if (!tcp_send_head(sk) ||
866 (copy = size_goal - skb->len) <= 0) {
869 /* Allocate new segment. If the interface is SG,
870 * allocate skb fitting to single page.
872 if (!sk_stream_memory_free(sk))
873 goto wait_for_sndbuf;
875 skb = sk_stream_alloc_skb(sk, select_size(sk),
878 goto wait_for_memory;
881 * Check whether we can use HW checksum.
883 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
884 skb->ip_summed = CHECKSUM_PARTIAL;
890 /* Try to append data to the end of skb. */
894 /* Where to copy to? */
895 if (skb_tailroom(skb) > 0) {
896 /* We have some space in skb head. Superb! */
897 if (copy > skb_tailroom(skb))
898 copy = skb_tailroom(skb);
899 if ((err = skb_add_data(skb, from, copy)) != 0)
903 int i = skb_shinfo(skb)->nr_frags;
904 struct page *page = TCP_PAGE(sk);
905 int off = TCP_OFF(sk);
907 if (skb_can_coalesce(skb, i, page, off) &&
909 /* We can extend the last page
912 } else if (i == MAX_SKB_FRAGS ||
914 !(sk->sk_route_caps & NETIF_F_SG))) {
915 /* Need to add new fragment and cannot
916 * do this because interface is non-SG,
917 * or because all the page slots are
919 tcp_mark_push(tp, skb);
922 if (off == PAGE_SIZE) {
924 TCP_PAGE(sk) = page = NULL;
930 if (copy > PAGE_SIZE - off)
931 copy = PAGE_SIZE - off;
933 if (!sk_wmem_schedule(sk, copy))
934 goto wait_for_memory;
937 /* Allocate new cache page. */
938 if (!(page = sk_stream_alloc_page(sk)))
939 goto wait_for_memory;
942 /* Time to copy data. We are close to
944 err = skb_copy_to_page(sk, from, skb, page,
947 /* If this page was new, give it to the
948 * socket so it does not get leaked.
957 /* Update the skb. */
959 skb_shinfo(skb)->frags[i - 1].size +=
962 skb_fill_page_desc(skb, i, page, off, copy);
965 } else if (off + copy < PAGE_SIZE) {
971 TCP_OFF(sk) = off + copy;
975 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
977 tp->write_seq += copy;
978 TCP_SKB_CB(skb)->end_seq += copy;
979 skb_shinfo(skb)->gso_segs = 0;
983 if ((seglen -= copy) == 0 && iovlen == 0)
986 if (skb->len < size_goal || (flags & MSG_OOB))
989 if (forced_push(tp)) {
990 tcp_mark_push(tp, skb);
991 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
992 } else if (skb == tcp_send_head(sk))
993 tcp_push_one(sk, mss_now);
997 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1000 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
1002 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
1005 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
1006 size_goal = tp->xmit_size_goal;
1012 tcp_push(sk, flags, mss_now, tp->nonagle);
1013 TCP_CHECK_TIMER(sk);
1019 tcp_unlink_write_queue(skb, sk);
1020 /* It is the one place in all of TCP, except connection
1021 * reset, where we can be unlinking the send_head.
1023 tcp_check_send_head(sk, skb);
1024 sk_wmem_free_skb(sk, skb);
1031 err = sk_stream_error(sk, flags, err);
1032 TCP_CHECK_TIMER(sk);
1038 * Handle reading urgent data. BSD has very simple semantics for
1039 * this, no blocking and very strange errors 8)
1042 static int tcp_recv_urg(struct sock *sk, long timeo,
1043 struct msghdr *msg, int len, int flags,
1046 struct tcp_sock *tp = tcp_sk(sk);
1048 /* No URG data to read. */
1049 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1050 tp->urg_data == TCP_URG_READ)
1051 return -EINVAL; /* Yes this is right ! */
1053 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1056 if (tp->urg_data & TCP_URG_VALID) {
1058 char c = tp->urg_data;
1060 if (!(flags & MSG_PEEK))
1061 tp->urg_data = TCP_URG_READ;
1063 /* Read urgent data. */
1064 msg->msg_flags |= MSG_OOB;
1067 if (!(flags & MSG_TRUNC))
1068 err = memcpy_toiovec(msg->msg_iov, &c, 1);
1071 msg->msg_flags |= MSG_TRUNC;
1073 return err ? -EFAULT : len;
1076 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1079 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1080 * the available implementations agree in this case:
1081 * this call should never block, independent of the
1082 * blocking state of the socket.
1083 * Mike <pall@rz.uni-karlsruhe.de>
1088 /* Clean up the receive buffer for full frames taken by the user,
1089 * then send an ACK if necessary. COPIED is the number of bytes
1090 * tcp_recvmsg has given to the user so far, it speeds up the
1091 * calculation of whether or not we must ACK for the sake of
1094 void tcp_cleanup_rbuf(struct sock *sk, int copied)
1096 struct tcp_sock *tp = tcp_sk(sk);
1097 int time_to_ack = 0;
1100 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1102 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
1105 if (inet_csk_ack_scheduled(sk)) {
1106 const struct inet_connection_sock *icsk = inet_csk(sk);
1107 /* Delayed ACKs frequently hit locked sockets during bulk
1109 if (icsk->icsk_ack.blocked ||
1110 /* Once-per-two-segments ACK was not sent by tcp_input.c */
1111 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1113 * If this read emptied read buffer, we send ACK, if
1114 * connection is not bidirectional, user drained
1115 * receive buffer and there was a small segment
1119 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
1120 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1121 !icsk->icsk_ack.pingpong)) &&
1122 !atomic_read(&sk->sk_rmem_alloc)))
1126 /* We send an ACK if we can now advertise a non-zero window
1127 * which has been raised "significantly".
1129 * Even if window raised up to infinity, do not send window open ACK
1130 * in states, where we will not receive more. It is useless.
1132 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1133 __u32 rcv_window_now = tcp_receive_window(tp);
1135 /* Optimize, __tcp_select_window() is not cheap. */
1136 if (2*rcv_window_now <= tp->window_clamp) {
1137 __u32 new_window = __tcp_select_window(sk);
1139 /* Send ACK now, if this read freed lots of space
1140 * in our buffer. Certainly, new_window is new window.
1141 * We can advertise it now, if it is not less than current one.
1142 * "Lots" means "at least twice" here.
1144 if (new_window && new_window >= 2 * rcv_window_now)
1152 static void tcp_prequeue_process(struct sock *sk)
1154 struct sk_buff *skb;
1155 struct tcp_sock *tp = tcp_sk(sk);
1157 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
1159 /* RX process wants to run with disabled BHs, though it is not
1162 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1163 sk->sk_backlog_rcv(sk, skb);
1166 /* Clear memory counter. */
1167 tp->ucopy.memory = 0;
1170 static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1172 struct sk_buff *skb;
1175 skb_queue_walk(&sk->sk_receive_queue, skb) {
1176 offset = seq - TCP_SKB_CB(skb)->seq;
1177 if (tcp_hdr(skb)->syn)
1179 if (offset < skb->len || tcp_hdr(skb)->fin) {
1188 * This routine provides an alternative to tcp_recvmsg() for routines
1189 * that would like to handle copying from skbuffs directly in 'sendfile'
1192 * - It is assumed that the socket was locked by the caller.
1193 * - The routine does not block.
1194 * - At present, there is no support for reading OOB data
1195 * or for 'peeking' the socket using this routine
1196 * (although both would be easy to implement).
1198 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1199 sk_read_actor_t recv_actor)
1201 struct sk_buff *skb;
1202 struct tcp_sock *tp = tcp_sk(sk);
1203 u32 seq = tp->copied_seq;
1207 if (sk->sk_state == TCP_LISTEN)
1209 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1210 if (offset < skb->len) {
1213 len = skb->len - offset;
1214 /* Stop reading if we hit a patch of urgent data */
1216 u32 urg_offset = tp->urg_seq - seq;
1217 if (urg_offset < len)
1222 used = recv_actor(desc, skb, offset, len);
1227 } else if (used <= len) {
1233 * If recv_actor drops the lock (e.g. TCP splice
1234 * receive) the skb pointer might be invalid when
1235 * getting here: tcp_collapse might have deleted it
1236 * while aggregating skbs from the socket queue.
1238 skb = tcp_recv_skb(sk, seq-1, &offset);
1239 if (!skb || (offset+1 != skb->len))
1242 if (tcp_hdr(skb)->fin) {
1243 sk_eat_skb(sk, skb, 0);
1247 sk_eat_skb(sk, skb, 0);
1251 tp->copied_seq = seq;
1253 tcp_rcv_space_adjust(sk);
1255 /* Clean up data we have read: This will do ACK frames. */
1257 tcp_cleanup_rbuf(sk, copied);
1262 * This routine copies from a sock struct into the user buffer.
1264 * Technical note: in 2.3 we work on _locked_ socket, so that
1265 * tricks with *seq access order and skb->users are not required.
1266 * Probably, code can be easily improved even more.
1269 int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1270 size_t len, int nonblock, int flags, int *addr_len)
1272 struct tcp_sock *tp = tcp_sk(sk);
1278 int target; /* Read at least this many bytes */
1280 struct task_struct *user_recv = NULL;
1281 int copied_early = 0;
1282 struct sk_buff *skb;
1286 TCP_CHECK_TIMER(sk);
1289 if (sk->sk_state == TCP_LISTEN)
1292 timeo = sock_rcvtimeo(sk, nonblock);
1294 /* Urgent data needs to be handled specially. */
1295 if (flags & MSG_OOB)
1298 seq = &tp->copied_seq;
1299 if (flags & MSG_PEEK) {
1300 peek_seq = tp->copied_seq;
1304 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1306 #ifdef CONFIG_NET_DMA
1307 tp->ucopy.dma_chan = NULL;
1309 skb = skb_peek_tail(&sk->sk_receive_queue);
1314 available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1315 if ((available < target) &&
1316 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1317 !sysctl_tcp_low_latency &&
1318 __get_cpu_var(softnet_data).net_dma) {
1319 preempt_enable_no_resched();
1320 tp->ucopy.pinned_list =
1321 dma_pin_iovec_pages(msg->msg_iov, len);
1323 preempt_enable_no_resched();
1331 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1332 if (tp->urg_data && tp->urg_seq == *seq) {
1335 if (signal_pending(current)) {
1336 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1341 /* Next get a buffer. */
1343 skb = skb_peek(&sk->sk_receive_queue);
1348 /* Now that we have two receive queues this
1351 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1352 printk(KERN_INFO "recvmsg bug: copied %X "
1353 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1356 offset = *seq - TCP_SKB_CB(skb)->seq;
1357 if (tcp_hdr(skb)->syn)
1359 if (offset < skb->len)
1361 if (tcp_hdr(skb)->fin)
1363 BUG_TRAP(flags & MSG_PEEK);
1365 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1367 /* Well, if we have backlog, try to process it now yet. */
1369 if (copied >= target && !sk->sk_backlog.tail)
1374 sk->sk_state == TCP_CLOSE ||
1375 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1377 signal_pending(current) ||
1381 if (sock_flag(sk, SOCK_DONE))
1385 copied = sock_error(sk);
1389 if (sk->sk_shutdown & RCV_SHUTDOWN)
1392 if (sk->sk_state == TCP_CLOSE) {
1393 if (!sock_flag(sk, SOCK_DONE)) {
1394 /* This occurs when user tries to read
1395 * from never connected socket.
1408 if (signal_pending(current)) {
1409 copied = sock_intr_errno(timeo);
1414 tcp_cleanup_rbuf(sk, copied);
1416 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1417 /* Install new reader */
1418 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1419 user_recv = current;
1420 tp->ucopy.task = user_recv;
1421 tp->ucopy.iov = msg->msg_iov;
1424 tp->ucopy.len = len;
1426 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1427 (flags & (MSG_PEEK | MSG_TRUNC)));
1429 /* Ugly... If prequeue is not empty, we have to
1430 * process it before releasing socket, otherwise
1431 * order will be broken at second iteration.
1432 * More elegant solution is required!!!
1434 * Look: we have the following (pseudo)queues:
1436 * 1. packets in flight
1441 * Each queue can be processed only if the next ones
1442 * are empty. At this point we have empty receive_queue.
1443 * But prequeue _can_ be not empty after 2nd iteration,
1444 * when we jumped to start of loop because backlog
1445 * processing added something to receive_queue.
1446 * We cannot release_sock(), because backlog contains
1447 * packets arrived _after_ prequeued ones.
1449 * Shortly, algorithm is clear --- to process all
1450 * the queues in order. We could make it more directly,
1451 * requeueing packets from backlog to prequeue, if
1452 * is not empty. It is more elegant, but eats cycles,
1455 if (!skb_queue_empty(&tp->ucopy.prequeue))
1458 /* __ Set realtime policy in scheduler __ */
1461 if (copied >= target) {
1462 /* Do not sleep, just process backlog. */
1466 sk_wait_data(sk, &timeo);
1468 #ifdef CONFIG_NET_DMA
1469 tp->ucopy.wakeup = 0;
1475 /* __ Restore normal policy in scheduler __ */
1477 if ((chunk = len - tp->ucopy.len) != 0) {
1478 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1483 if (tp->rcv_nxt == tp->copied_seq &&
1484 !skb_queue_empty(&tp->ucopy.prequeue)) {
1486 tcp_prequeue_process(sk);
1488 if ((chunk = len - tp->ucopy.len) != 0) {
1489 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1495 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1496 if (net_ratelimit())
1497 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1498 current->comm, task_pid_nr(current));
1499 peek_seq = tp->copied_seq;
1504 /* Ok so how much can we use? */
1505 used = skb->len - offset;
1509 /* Do we have urgent data here? */
1511 u32 urg_offset = tp->urg_seq - *seq;
1512 if (urg_offset < used) {
1514 if (!sock_flag(sk, SOCK_URGINLINE)) {
1526 if (!(flags & MSG_TRUNC)) {
1527 #ifdef CONFIG_NET_DMA
1528 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1529 tp->ucopy.dma_chan = get_softnet_dma();
1531 if (tp->ucopy.dma_chan) {
1532 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1533 tp->ucopy.dma_chan, skb, offset,
1535 tp->ucopy.pinned_list);
1537 if (tp->ucopy.dma_cookie < 0) {
1539 printk(KERN_ALERT "dma_cookie < 0\n");
1541 /* Exception. Bailout! */
1546 if ((offset + used) == skb->len)
1552 err = skb_copy_datagram_iovec(skb, offset,
1553 msg->msg_iov, used);
1555 /* Exception. Bailout! */
1567 tcp_rcv_space_adjust(sk);
1570 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1572 tcp_fast_path_check(sk);
1574 if (used + offset < skb->len)
1577 if (tcp_hdr(skb)->fin)
1579 if (!(flags & MSG_PEEK)) {
1580 sk_eat_skb(sk, skb, copied_early);
1586 /* Process the FIN. */
1588 if (!(flags & MSG_PEEK)) {
1589 sk_eat_skb(sk, skb, copied_early);
1596 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1599 tp->ucopy.len = copied > 0 ? len : 0;
1601 tcp_prequeue_process(sk);
1603 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1604 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1610 tp->ucopy.task = NULL;
1614 #ifdef CONFIG_NET_DMA
1615 if (tp->ucopy.dma_chan) {
1616 dma_cookie_t done, used;
1618 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1620 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1621 tp->ucopy.dma_cookie, &done,
1622 &used) == DMA_IN_PROGRESS) {
1623 /* do partial cleanup of sk_async_wait_queue */
1624 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1625 (dma_async_is_complete(skb->dma_cookie, done,
1626 used) == DMA_SUCCESS)) {
1627 __skb_dequeue(&sk->sk_async_wait_queue);
1632 /* Safe to free early-copied skbs now */
1633 __skb_queue_purge(&sk->sk_async_wait_queue);
1634 dma_chan_put(tp->ucopy.dma_chan);
1635 tp->ucopy.dma_chan = NULL;
1637 if (tp->ucopy.pinned_list) {
1638 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1639 tp->ucopy.pinned_list = NULL;
1643 /* According to UNIX98, msg_name/msg_namelen are ignored
1644 * on connected socket. I was just happy when found this 8) --ANK
1647 /* Clean up data we have read: This will do ACK frames. */
1648 tcp_cleanup_rbuf(sk, copied);
1650 TCP_CHECK_TIMER(sk);
1655 TCP_CHECK_TIMER(sk);
1660 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1664 void tcp_set_state(struct sock *sk, int state)
1666 int oldstate = sk->sk_state;
1669 case TCP_ESTABLISHED:
1670 if (oldstate != TCP_ESTABLISHED)
1671 TCP_INC_STATS(TCP_MIB_CURRESTAB);
1675 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
1676 TCP_INC_STATS(TCP_MIB_ESTABRESETS);
1678 sk->sk_prot->unhash(sk);
1679 if (inet_csk(sk)->icsk_bind_hash &&
1680 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
1684 if (oldstate==TCP_ESTABLISHED)
1685 TCP_DEC_STATS(TCP_MIB_CURRESTAB);
1688 /* Change state AFTER socket is unhashed to avoid closed
1689 * socket sitting in hash tables.
1691 sk->sk_state = state;
1694 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
1697 EXPORT_SYMBOL_GPL(tcp_set_state);
1700 * State processing on a close. This implements the state shift for
1701 * sending our FIN frame. Note that we only send a FIN for some
1702 * states. A shutdown() may have already sent the FIN, or we may be
1706 static const unsigned char new_state[16] = {
1707 /* current state: new state: action: */
1708 /* (Invalid) */ TCP_CLOSE,
1709 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1710 /* TCP_SYN_SENT */ TCP_CLOSE,
1711 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1712 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1713 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1714 /* TCP_TIME_WAIT */ TCP_CLOSE,
1715 /* TCP_CLOSE */ TCP_CLOSE,
1716 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1717 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1718 /* TCP_LISTEN */ TCP_CLOSE,
1719 /* TCP_CLOSING */ TCP_CLOSING,
1722 static int tcp_close_state(struct sock *sk)
1724 int next = (int)new_state[sk->sk_state];
1725 int ns = next & TCP_STATE_MASK;
1727 tcp_set_state(sk, ns);
1729 return next & TCP_ACTION_FIN;
1733 * Shutdown the sending side of a connection. Much like close except
1734 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
1737 void tcp_shutdown(struct sock *sk, int how)
1739 /* We need to grab some memory, and put together a FIN,
1740 * and then put it into the queue to be sent.
1741 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1743 if (!(how & SEND_SHUTDOWN))
1746 /* If we've already sent a FIN, or it's a closed state, skip this. */
1747 if ((1 << sk->sk_state) &
1748 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1749 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1750 /* Clear out any half completed packets. FIN if needed. */
1751 if (tcp_close_state(sk))
1756 void tcp_close(struct sock *sk, long timeout)
1758 struct sk_buff *skb;
1759 int data_was_unread = 0;
1763 sk->sk_shutdown = SHUTDOWN_MASK;
1765 if (sk->sk_state == TCP_LISTEN) {
1766 tcp_set_state(sk, TCP_CLOSE);
1769 inet_csk_listen_stop(sk);
1771 goto adjudge_to_death;
1774 /* We need to flush the recv. buffs. We do this only on the
1775 * descriptor close, not protocol-sourced closes, because the
1776 * reader process may not have drained the data yet!
1778 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1779 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1781 data_was_unread += len;
1787 /* As outlined in RFC 2525, section 2.17, we send a RST here because
1788 * data was lost. To witness the awful effects of the old behavior of
1789 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
1790 * GET in an FTP client, suspend the process, wait for the client to
1791 * advertise a zero window, then kill -9 the FTP client, wheee...
1792 * Note: timeout is always zero in such a case.
1794 if (data_was_unread) {
1795 /* Unread data was tossed, zap the connection. */
1796 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1797 tcp_set_state(sk, TCP_CLOSE);
1798 tcp_send_active_reset(sk, GFP_KERNEL);
1799 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1800 /* Check zero linger _after_ checking for unread data. */
1801 sk->sk_prot->disconnect(sk, 0);
1802 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1803 } else if (tcp_close_state(sk)) {
1804 /* We FIN if the application ate all the data before
1805 * zapping the connection.
1808 /* RED-PEN. Formally speaking, we have broken TCP state
1809 * machine. State transitions:
1811 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1812 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1813 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1815 * are legal only when FIN has been sent (i.e. in window),
1816 * rather than queued out of window. Purists blame.
1818 * F.e. "RFC state" is ESTABLISHED,
1819 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1821 * The visible declinations are that sometimes
1822 * we enter time-wait state, when it is not required really
1823 * (harmless), do not send active resets, when they are
1824 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1825 * they look as CLOSING or LAST_ACK for Linux)
1826 * Probably, I missed some more holelets.
1832 sk_stream_wait_close(sk, timeout);
1835 state = sk->sk_state;
1838 atomic_inc(sk->sk_prot->orphan_count);
1840 /* It is the last release_sock in its life. It will remove backlog. */
1844 /* Now socket is owned by kernel and we acquire BH lock
1845 to finish close. No need to check for user refs.
1849 BUG_TRAP(!sock_owned_by_user(sk));
1851 /* Have we already been destroyed by a softirq or backlog? */
1852 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1855 /* This is a (useful) BSD violating of the RFC. There is a
1856 * problem with TCP as specified in that the other end could
1857 * keep a socket open forever with no application left this end.
1858 * We use a 3 minute timeout (about the same as BSD) then kill
1859 * our end. If they send after that then tough - BUT: long enough
1860 * that we won't make the old 4*rto = almost no time - whoops
1863 * Nope, it was not mistake. It is really desired behaviour
1864 * f.e. on http servers, when such sockets are useless, but
1865 * consume significant resources. Let's do it with special
1866 * linger2 option. --ANK
1869 if (sk->sk_state == TCP_FIN_WAIT2) {
1870 struct tcp_sock *tp = tcp_sk(sk);
1871 if (tp->linger2 < 0) {
1872 tcp_set_state(sk, TCP_CLOSE);
1873 tcp_send_active_reset(sk, GFP_ATOMIC);
1874 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1876 const int tmo = tcp_fin_time(sk);
1878 if (tmo > TCP_TIMEWAIT_LEN) {
1879 inet_csk_reset_keepalive_timer(sk,
1880 tmo - TCP_TIMEWAIT_LEN);
1882 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1887 if (sk->sk_state != TCP_CLOSE) {
1889 if (tcp_too_many_orphans(sk,
1890 atomic_read(sk->sk_prot->orphan_count))) {
1891 if (net_ratelimit())
1892 printk(KERN_INFO "TCP: too many of orphaned "
1894 tcp_set_state(sk, TCP_CLOSE);
1895 tcp_send_active_reset(sk, GFP_ATOMIC);
1896 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1900 if (sk->sk_state == TCP_CLOSE)
1901 inet_csk_destroy_sock(sk);
1902 /* Otherwise, socket is reprieved until protocol close. */
1910 /* These states need RST on ABORT according to RFC793 */
1912 static inline int tcp_need_reset(int state)
1914 return (1 << state) &
1915 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1916 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1919 int tcp_disconnect(struct sock *sk, int flags)
1921 struct inet_sock *inet = inet_sk(sk);
1922 struct inet_connection_sock *icsk = inet_csk(sk);
1923 struct tcp_sock *tp = tcp_sk(sk);
1925 int old_state = sk->sk_state;
1927 if (old_state != TCP_CLOSE)
1928 tcp_set_state(sk, TCP_CLOSE);
1930 /* ABORT function of RFC793 */
1931 if (old_state == TCP_LISTEN) {
1932 inet_csk_listen_stop(sk);
1933 } else if (tcp_need_reset(old_state) ||
1934 (tp->snd_nxt != tp->write_seq &&
1935 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
1936 /* The last check adjusts for discrepancy of Linux wrt. RFC
1939 tcp_send_active_reset(sk, gfp_any());
1940 sk->sk_err = ECONNRESET;
1941 } else if (old_state == TCP_SYN_SENT)
1942 sk->sk_err = ECONNRESET;
1944 tcp_clear_xmit_timers(sk);
1945 __skb_queue_purge(&sk->sk_receive_queue);
1946 tcp_write_queue_purge(sk);
1947 __skb_queue_purge(&tp->out_of_order_queue);
1948 #ifdef CONFIG_NET_DMA
1949 __skb_queue_purge(&sk->sk_async_wait_queue);
1954 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1955 inet_reset_saddr(sk);
1957 sk->sk_shutdown = 0;
1958 sock_reset_flag(sk, SOCK_DONE);
1960 if ((tp->write_seq += tp->max_window + 2) == 0)
1962 icsk->icsk_backoff = 0;
1964 icsk->icsk_probes_out = 0;
1965 tp->packets_out = 0;
1966 tp->snd_ssthresh = 0x7fffffff;
1967 tp->snd_cwnd_cnt = 0;
1968 tp->bytes_acked = 0;
1969 tcp_set_ca_state(sk, TCP_CA_Open);
1970 tcp_clear_retrans(tp);
1971 inet_csk_delack_init(sk);
1972 tcp_init_send_head(sk);
1973 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
1976 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
1978 sk->sk_error_report(sk);
1983 * Socket option code for TCP.
1985 static int do_tcp_setsockopt(struct sock *sk, int level,
1986 int optname, char __user *optval, int optlen)
1988 struct tcp_sock *tp = tcp_sk(sk);
1989 struct inet_connection_sock *icsk = inet_csk(sk);
1993 /* This is a string value all the others are int's */
1994 if (optname == TCP_CONGESTION) {
1995 char name[TCP_CA_NAME_MAX];
2000 val = strncpy_from_user(name, optval,
2001 min(TCP_CA_NAME_MAX-1, optlen));
2007 err = tcp_set_congestion_control(sk, name);
2012 if (optlen < sizeof(int))
2015 if (get_user(val, (int __user *)optval))
2022 /* Values greater than interface MTU won't take effect. However
2023 * at the point when this call is done we typically don't yet
2024 * know which interface is going to be used */
2025 if (val < 8 || val > MAX_TCP_WINDOW) {
2029 tp->rx_opt.user_mss = val;
2034 /* TCP_NODELAY is weaker than TCP_CORK, so that
2035 * this option on corked socket is remembered, but
2036 * it is not activated until cork is cleared.
2038 * However, when TCP_NODELAY is set we make
2039 * an explicit push, which overrides even TCP_CORK
2040 * for currently queued segments.
2042 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
2043 tcp_push_pending_frames(sk);
2045 tp->nonagle &= ~TCP_NAGLE_OFF;
2050 /* When set indicates to always queue non-full frames.
2051 * Later the user clears this option and we transmit
2052 * any pending partial frames in the queue. This is
2053 * meant to be used alongside sendfile() to get properly
2054 * filled frames when the user (for example) must write
2055 * out headers with a write() call first and then use
2056 * sendfile to send out the data parts.
2058 * TCP_CORK can be set together with TCP_NODELAY and it is
2059 * stronger than TCP_NODELAY.
2062 tp->nonagle |= TCP_NAGLE_CORK;
2064 tp->nonagle &= ~TCP_NAGLE_CORK;
2065 if (tp->nonagle&TCP_NAGLE_OFF)
2066 tp->nonagle |= TCP_NAGLE_PUSH;
2067 tcp_push_pending_frames(sk);
2072 if (val < 1 || val > MAX_TCP_KEEPIDLE)
2075 tp->keepalive_time = val * HZ;
2076 if (sock_flag(sk, SOCK_KEEPOPEN) &&
2077 !((1 << sk->sk_state) &
2078 (TCPF_CLOSE | TCPF_LISTEN))) {
2079 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
2080 if (tp->keepalive_time > elapsed)
2081 elapsed = tp->keepalive_time - elapsed;
2084 inet_csk_reset_keepalive_timer(sk, elapsed);
2089 if (val < 1 || val > MAX_TCP_KEEPINTVL)
2092 tp->keepalive_intvl = val * HZ;
2095 if (val < 1 || val > MAX_TCP_KEEPCNT)
2098 tp->keepalive_probes = val;
2101 if (val < 1 || val > MAX_TCP_SYNCNT)
2104 icsk->icsk_syn_retries = val;
2110 else if (val > sysctl_tcp_fin_timeout / HZ)
2113 tp->linger2 = val * HZ;
2116 case TCP_DEFER_ACCEPT:
2117 icsk->icsk_accept_queue.rskq_defer_accept = 0;
2119 /* Translate value in seconds to number of
2121 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
2122 val > ((TCP_TIMEOUT_INIT / HZ) <<
2123 icsk->icsk_accept_queue.rskq_defer_accept))
2124 icsk->icsk_accept_queue.rskq_defer_accept++;
2125 icsk->icsk_accept_queue.rskq_defer_accept++;
2129 case TCP_WINDOW_CLAMP:
2131 if (sk->sk_state != TCP_CLOSE) {
2135 tp->window_clamp = 0;
2137 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2138 SOCK_MIN_RCVBUF / 2 : val;
2143 icsk->icsk_ack.pingpong = 1;
2145 icsk->icsk_ack.pingpong = 0;
2146 if ((1 << sk->sk_state) &
2147 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2148 inet_csk_ack_scheduled(sk)) {
2149 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
2150 tcp_cleanup_rbuf(sk, 1);
2152 icsk->icsk_ack.pingpong = 1;
2157 #ifdef CONFIG_TCP_MD5SIG
2159 /* Read the IP->Key mappings from userspace */
2160 err = tp->af_specific->md5_parse(sk, optval, optlen);
2173 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2176 struct inet_connection_sock *icsk = inet_csk(sk);
2178 if (level != SOL_TCP)
2179 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
2181 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2184 #ifdef CONFIG_COMPAT
2185 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
2186 char __user *optval, int optlen)
2188 if (level != SOL_TCP)
2189 return inet_csk_compat_setsockopt(sk, level, optname,
2191 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2194 EXPORT_SYMBOL(compat_tcp_setsockopt);
2197 /* Return information about state of tcp endpoint in API format. */
2198 void tcp_get_info(struct sock *sk, struct tcp_info *info)
2200 struct tcp_sock *tp = tcp_sk(sk);
2201 const struct inet_connection_sock *icsk = inet_csk(sk);
2202 u32 now = tcp_time_stamp;
2204 memset(info, 0, sizeof(*info));
2206 info->tcpi_state = sk->sk_state;
2207 info->tcpi_ca_state = icsk->icsk_ca_state;
2208 info->tcpi_retransmits = icsk->icsk_retransmits;
2209 info->tcpi_probes = icsk->icsk_probes_out;
2210 info->tcpi_backoff = icsk->icsk_backoff;
2212 if (tp->rx_opt.tstamp_ok)
2213 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2214 if (tcp_is_sack(tp))
2215 info->tcpi_options |= TCPI_OPT_SACK;
2216 if (tp->rx_opt.wscale_ok) {
2217 info->tcpi_options |= TCPI_OPT_WSCALE;
2218 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2219 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2222 if (tp->ecn_flags&TCP_ECN_OK)
2223 info->tcpi_options |= TCPI_OPT_ECN;
2225 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2226 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
2227 info->tcpi_snd_mss = tp->mss_cache;
2228 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2230 if (sk->sk_state == TCP_LISTEN) {
2231 info->tcpi_unacked = sk->sk_ack_backlog;
2232 info->tcpi_sacked = sk->sk_max_ack_backlog;
2234 info->tcpi_unacked = tp->packets_out;
2235 info->tcpi_sacked = tp->sacked_out;
2237 info->tcpi_lost = tp->lost_out;
2238 info->tcpi_retrans = tp->retrans_out;
2239 info->tcpi_fackets = tp->fackets_out;
2241 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2242 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
2243 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2245 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
2246 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2247 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2248 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2249 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2250 info->tcpi_snd_cwnd = tp->snd_cwnd;
2251 info->tcpi_advmss = tp->advmss;
2252 info->tcpi_reordering = tp->reordering;
2254 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2255 info->tcpi_rcv_space = tp->rcvq_space.space;
2257 info->tcpi_total_retrans = tp->total_retrans;
2260 EXPORT_SYMBOL_GPL(tcp_get_info);
2262 static int do_tcp_getsockopt(struct sock *sk, int level,
2263 int optname, char __user *optval, int __user *optlen)
2265 struct inet_connection_sock *icsk = inet_csk(sk);
2266 struct tcp_sock *tp = tcp_sk(sk);
2269 if (get_user(len, optlen))
2272 len = min_t(unsigned int, len, sizeof(int));
2279 val = tp->mss_cache;
2280 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2281 val = tp->rx_opt.user_mss;
2284 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2287 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2290 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2293 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2296 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2299 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
2304 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2306 case TCP_DEFER_ACCEPT:
2307 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
2308 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
2310 case TCP_WINDOW_CLAMP:
2311 val = tp->window_clamp;
2314 struct tcp_info info;
2316 if (get_user(len, optlen))
2319 tcp_get_info(sk, &info);
2321 len = min_t(unsigned int, len, sizeof(info));
2322 if (put_user(len, optlen))
2324 if (copy_to_user(optval, &info, len))
2329 val = !icsk->icsk_ack.pingpong;
2332 case TCP_CONGESTION:
2333 if (get_user(len, optlen))
2335 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2336 if (put_user(len, optlen))
2338 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
2342 return -ENOPROTOOPT;
2345 if (put_user(len, optlen))
2347 if (copy_to_user(optval, &val, len))
2352 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2355 struct inet_connection_sock *icsk = inet_csk(sk);
2357 if (level != SOL_TCP)
2358 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2360 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2363 #ifdef CONFIG_COMPAT
2364 int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2365 char __user *optval, int __user *optlen)
2367 if (level != SOL_TCP)
2368 return inet_csk_compat_getsockopt(sk, level, optname,
2370 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2373 EXPORT_SYMBOL(compat_tcp_getsockopt);
2376 struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2378 struct sk_buff *segs = ERR_PTR(-EINVAL);
2383 unsigned int oldlen;
2386 if (!pskb_may_pull(skb, sizeof(*th)))
2390 thlen = th->doff * 4;
2391 if (thlen < sizeof(*th))
2394 if (!pskb_may_pull(skb, thlen))
2397 oldlen = (u16)~skb->len;
2398 __skb_pull(skb, thlen);
2400 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2401 /* Packet is from an untrusted source, reset gso_segs. */
2402 int type = skb_shinfo(skb)->gso_type;
2411 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2414 mss = skb_shinfo(skb)->gso_size;
2415 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
2421 segs = skb_segment(skb, features);
2425 len = skb_shinfo(skb)->gso_size;
2426 delta = htonl(oldlen + (thlen + len));
2430 seq = ntohl(th->seq);
2433 th->fin = th->psh = 0;
2435 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2436 (__force u32)delta));
2437 if (skb->ip_summed != CHECKSUM_PARTIAL)
2439 csum_fold(csum_partial(skb_transport_header(skb),
2446 th->seq = htonl(seq);
2448 } while (skb->next);
2450 delta = htonl(oldlen + (skb->tail - skb->transport_header) +
2452 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2453 (__force u32)delta));
2454 if (skb->ip_summed != CHECKSUM_PARTIAL)
2455 th->check = csum_fold(csum_partial(skb_transport_header(skb),
2461 EXPORT_SYMBOL(tcp_tso_segment);
2463 #ifdef CONFIG_TCP_MD5SIG
2464 static unsigned long tcp_md5sig_users;
2465 static struct tcp_md5sig_pool **tcp_md5sig_pool;
2466 static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2468 static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2471 for_each_possible_cpu(cpu) {
2472 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2474 if (p->md5_desc.tfm)
2475 crypto_free_hash(p->md5_desc.tfm);
2483 void tcp_free_md5sig_pool(void)
2485 struct tcp_md5sig_pool **pool = NULL;
2487 spin_lock_bh(&tcp_md5sig_pool_lock);
2488 if (--tcp_md5sig_users == 0) {
2489 pool = tcp_md5sig_pool;
2490 tcp_md5sig_pool = NULL;
2492 spin_unlock_bh(&tcp_md5sig_pool_lock);
2494 __tcp_free_md5sig_pool(pool);
2497 EXPORT_SYMBOL(tcp_free_md5sig_pool);
2499 static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
2502 struct tcp_md5sig_pool **pool;
2504 pool = alloc_percpu(struct tcp_md5sig_pool *);
2508 for_each_possible_cpu(cpu) {
2509 struct tcp_md5sig_pool *p;
2510 struct crypto_hash *hash;
2512 p = kzalloc(sizeof(*p), GFP_KERNEL);
2515 *per_cpu_ptr(pool, cpu) = p;
2517 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2518 if (!hash || IS_ERR(hash))
2521 p->md5_desc.tfm = hash;
2525 __tcp_free_md5sig_pool(pool);
2529 struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
2531 struct tcp_md5sig_pool **pool;
2535 spin_lock_bh(&tcp_md5sig_pool_lock);
2536 pool = tcp_md5sig_pool;
2537 if (tcp_md5sig_users++ == 0) {
2539 spin_unlock_bh(&tcp_md5sig_pool_lock);
2542 spin_unlock_bh(&tcp_md5sig_pool_lock);
2546 spin_unlock_bh(&tcp_md5sig_pool_lock);
2549 /* we cannot hold spinlock here because this may sleep. */
2550 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
2551 spin_lock_bh(&tcp_md5sig_pool_lock);
2554 spin_unlock_bh(&tcp_md5sig_pool_lock);
2557 pool = tcp_md5sig_pool;
2559 /* oops, it has already been assigned. */
2560 spin_unlock_bh(&tcp_md5sig_pool_lock);
2561 __tcp_free_md5sig_pool(p);
2563 tcp_md5sig_pool = pool = p;
2564 spin_unlock_bh(&tcp_md5sig_pool_lock);
2570 EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2572 struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2574 struct tcp_md5sig_pool **p;
2575 spin_lock_bh(&tcp_md5sig_pool_lock);
2576 p = tcp_md5sig_pool;
2579 spin_unlock_bh(&tcp_md5sig_pool_lock);
2580 return (p ? *per_cpu_ptr(p, cpu) : NULL);
2583 EXPORT_SYMBOL(__tcp_get_md5sig_pool);
2585 void __tcp_put_md5sig_pool(void)
2587 tcp_free_md5sig_pool();
2590 EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2593 void tcp_done(struct sock *sk)
2595 if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
2596 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
2598 tcp_set_state(sk, TCP_CLOSE);
2599 tcp_clear_xmit_timers(sk);
2601 sk->sk_shutdown = SHUTDOWN_MASK;
2603 if (!sock_flag(sk, SOCK_DEAD))
2604 sk->sk_state_change(sk);
2606 inet_csk_destroy_sock(sk);
2608 EXPORT_SYMBOL_GPL(tcp_done);
2610 extern struct tcp_congestion_ops tcp_reno;
2612 static __initdata unsigned long thash_entries;
2613 static int __init set_thash_entries(char *str)
2617 thash_entries = simple_strtoul(str, &str, 0);
2620 __setup("thash_entries=", set_thash_entries);
2622 void __init tcp_init(void)
2624 struct sk_buff *skb = NULL;
2625 unsigned long nr_pages, limit;
2626 int order, i, max_share;
2628 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
2630 tcp_hashinfo.bind_bucket_cachep =
2631 kmem_cache_create("tcp_bind_bucket",
2632 sizeof(struct inet_bind_bucket), 0,
2633 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2635 /* Size and allocate the main established and bind bucket
2638 * The methodology is similar to that of the buffer cache.
2640 tcp_hashinfo.ehash =
2641 alloc_large_system_hash("TCP established",
2642 sizeof(struct inet_ehash_bucket),
2644 (num_physpages >= 128 * 1024) ?
2647 &tcp_hashinfo.ehash_size,
2649 thash_entries ? 0 : 512 * 1024);
2650 tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
2651 for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
2652 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
2653 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain);
2655 if (inet_ehash_locks_alloc(&tcp_hashinfo))
2656 panic("TCP: failed to alloc ehash_locks");
2657 tcp_hashinfo.bhash =
2658 alloc_large_system_hash("TCP bind",
2659 sizeof(struct inet_bind_hashbucket),
2660 tcp_hashinfo.ehash_size,
2661 (num_physpages >= 128 * 1024) ?
2664 &tcp_hashinfo.bhash_size,
2667 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2668 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2669 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2670 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
2673 /* Try to be a bit smarter and adjust defaults depending
2674 * on available memory.
2676 for (order = 0; ((1 << order) << PAGE_SHIFT) <
2677 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
2681 tcp_death_row.sysctl_max_tw_buckets = 180000;
2682 sysctl_tcp_max_orphans = 4096 << (order - 4);
2683 sysctl_max_syn_backlog = 1024;
2684 } else if (order < 3) {
2685 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
2686 sysctl_tcp_max_orphans >>= (3 - order);
2687 sysctl_max_syn_backlog = 128;
2690 /* Set the pressure threshold to be a fraction of global memory that
2691 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
2692 * memory, with a floor of 128 pages.
2694 nr_pages = totalram_pages - totalhigh_pages;
2695 limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
2696 limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
2697 limit = max(limit, 128UL);
2698 sysctl_tcp_mem[0] = limit / 4 * 3;
2699 sysctl_tcp_mem[1] = limit;
2700 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
2702 /* Set per-socket limits to no more than 1/128 the pressure threshold */
2703 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2704 max_share = min(4UL*1024*1024, limit);
2706 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
2707 sysctl_tcp_wmem[1] = 16*1024;
2708 sysctl_tcp_wmem[2] = max(64*1024, max_share);
2710 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
2711 sysctl_tcp_rmem[1] = 87380;
2712 sysctl_tcp_rmem[2] = max(87380, max_share);
2714 printk(KERN_INFO "TCP: Hash tables configured "
2715 "(established %d bind %d)\n",
2716 tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size);
2718 tcp_register_congestion_control(&tcp_reno);
2721 EXPORT_SYMBOL(tcp_close);
2722 EXPORT_SYMBOL(tcp_disconnect);
2723 EXPORT_SYMBOL(tcp_getsockopt);
2724 EXPORT_SYMBOL(tcp_ioctl);
2725 EXPORT_SYMBOL(tcp_poll);
2726 EXPORT_SYMBOL(tcp_read_sock);
2727 EXPORT_SYMBOL(tcp_recvmsg);
2728 EXPORT_SYMBOL(tcp_sendmsg);
2729 EXPORT_SYMBOL(tcp_splice_read);
2730 EXPORT_SYMBOL(tcp_sendpage);
2731 EXPORT_SYMBOL(tcp_setsockopt);
2732 EXPORT_SYMBOL(tcp_shutdown);
2733 EXPORT_SYMBOL(tcp_statistics);