2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
57 * Alan Cox : Tidied tcp_data to avoid a potential
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
215 * Description of States:
217 * TCP_SYN_SENT sent a connection request, waiting for ack
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
222 * TCP_ESTABLISHED connection established
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
247 * TCP_CLOSE socket is finished
250 #include <linux/config.h>
251 #include <linux/module.h>
252 #include <linux/types.h>
253 #include <linux/fcntl.h>
254 #include <linux/poll.h>
255 #include <linux/init.h>
256 #include <linux/smp_lock.h>
257 #include <linux/fs.h>
258 #include <linux/random.h>
259 #include <linux/bootmem.h>
261 #include <net/icmp.h>
263 #include <net/xfrm.h>
267 #include <asm/uaccess.h>
268 #include <asm/ioctls.h>
270 int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
272 DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
274 kmem_cache_t *tcp_openreq_cachep;
275 kmem_cache_t *tcp_bucket_cachep;
276 kmem_cache_t *tcp_timewait_cachep;
278 atomic_t tcp_orphan_count = ATOMIC_INIT(0);
280 int sysctl_tcp_mem[3];
281 int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
282 int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
284 EXPORT_SYMBOL(sysctl_tcp_mem);
285 EXPORT_SYMBOL(sysctl_tcp_rmem);
286 EXPORT_SYMBOL(sysctl_tcp_wmem);
288 atomic_t tcp_memory_allocated; /* Current allocated memory. */
289 atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
291 EXPORT_SYMBOL(tcp_memory_allocated);
292 EXPORT_SYMBOL(tcp_sockets_allocated);
295 * Pressure flag: try to collapse.
296 * Technical note: it is used by multiple contexts non atomically.
297 * All the sk_stream_mem_schedule() is of this nature: accounting
298 * is strict, actions are advisory and have some latency.
300 int tcp_memory_pressure;
302 EXPORT_SYMBOL(tcp_memory_pressure);
304 void tcp_enter_memory_pressure(void)
306 if (!tcp_memory_pressure) {
307 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
308 tcp_memory_pressure = 1;
312 EXPORT_SYMBOL(tcp_enter_memory_pressure);
315 * LISTEN is a special case for poll..
317 static __inline__ unsigned int tcp_listen_poll(struct sock *sk,
320 return tcp_sk(sk)->accept_queue ? (POLLIN | POLLRDNORM) : 0;
324 * Wait for a TCP event.
326 * Note that we don't need to lock the socket, as the upper poll layers
327 * take care of normal races (between the test and the event) and we don't
328 * go look at any of the socket buffers directly.
330 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
333 struct sock *sk = sock->sk;
334 struct tcp_sock *tp = tcp_sk(sk);
336 poll_wait(file, sk->sk_sleep, wait);
337 if (sk->sk_state == TCP_LISTEN)
338 return tcp_listen_poll(sk, wait);
340 /* Socket is not locked. We are protected from async events
341 by poll logic and correct handling of state changes
342 made by another threads is impossible in any case.
350 * POLLHUP is certainly not done right. But poll() doesn't
351 * have a notion of HUP in just one direction, and for a
352 * socket the read side is more interesting.
354 * Some poll() documentation says that POLLHUP is incompatible
355 * with the POLLOUT/POLLWR flags, so somebody should check this
356 * all. But careful, it tends to be safer to return too many
357 * bits than too few, and you can easily break real applications
358 * if you don't tell them that something has hung up!
362 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
363 * our fs/select.c). It means that after we received EOF,
364 * poll always returns immediately, making impossible poll() on write()
365 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
366 * if and only if shutdown has been made in both directions.
367 * Actually, it is interesting to look how Solaris and DUX
368 * solve this dilemma. I would prefer, if PULLHUP were maskable,
369 * then we could set it on SND_SHUTDOWN. BTW examples given
370 * in Stevens' books assume exactly this behaviour, it explains
371 * why PULLHUP is incompatible with POLLOUT. --ANK
373 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
374 * blocking on fresh not-connected or disconnected socket. --ANK
376 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
378 if (sk->sk_shutdown & RCV_SHUTDOWN)
379 mask |= POLLIN | POLLRDNORM;
382 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
383 /* Potential race condition. If read of tp below will
384 * escape above sk->sk_state, we can be illegally awaken
385 * in SYN_* states. */
386 if ((tp->rcv_nxt != tp->copied_seq) &&
387 (tp->urg_seq != tp->copied_seq ||
388 tp->rcv_nxt != tp->copied_seq + 1 ||
389 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
390 mask |= POLLIN | POLLRDNORM;
392 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
393 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
394 mask |= POLLOUT | POLLWRNORM;
395 } else { /* send SIGIO later */
396 set_bit(SOCK_ASYNC_NOSPACE,
397 &sk->sk_socket->flags);
398 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
400 /* Race breaker. If space is freed after
401 * wspace test but before the flags are set,
402 * IO signal will be lost.
404 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
405 mask |= POLLOUT | POLLWRNORM;
409 if (tp->urg_data & TCP_URG_VALID)
415 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
417 struct tcp_sock *tp = tcp_sk(sk);
422 if (sk->sk_state == TCP_LISTEN)
426 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
428 else if (sock_flag(sk, SOCK_URGINLINE) ||
430 before(tp->urg_seq, tp->copied_seq) ||
431 !before(tp->urg_seq, tp->rcv_nxt)) {
432 answ = tp->rcv_nxt - tp->copied_seq;
434 /* Subtract 1, if FIN is in queue. */
435 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
437 ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
439 answ = tp->urg_seq - tp->copied_seq;
443 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
446 if (sk->sk_state == TCP_LISTEN)
449 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
452 answ = tp->write_seq - tp->snd_una;
458 return put_user(answ, (int __user *)arg);
462 int tcp_listen_start(struct sock *sk)
464 struct inet_sock *inet = inet_sk(sk);
465 struct tcp_sock *tp = tcp_sk(sk);
466 struct tcp_listen_opt *lopt;
468 sk->sk_max_ack_backlog = 0;
469 sk->sk_ack_backlog = 0;
470 tp->accept_queue = tp->accept_queue_tail = NULL;
471 rwlock_init(&tp->syn_wait_lock);
474 lopt = kmalloc(sizeof(struct tcp_listen_opt), GFP_KERNEL);
478 memset(lopt, 0, sizeof(struct tcp_listen_opt));
479 for (lopt->max_qlen_log = 6; ; lopt->max_qlen_log++)
480 if ((1 << lopt->max_qlen_log) >= sysctl_max_syn_backlog)
482 get_random_bytes(&lopt->hash_rnd, 4);
484 write_lock_bh(&tp->syn_wait_lock);
485 tp->listen_opt = lopt;
486 write_unlock_bh(&tp->syn_wait_lock);
488 /* There is race window here: we announce ourselves listening,
489 * but this transition is still not validated by get_port().
490 * It is OK, because this socket enters to hash table only
491 * after validation is complete.
493 sk->sk_state = TCP_LISTEN;
494 if (!sk->sk_prot->get_port(sk, inet->num)) {
495 inet->sport = htons(inet->num);
498 sk->sk_prot->hash(sk);
503 sk->sk_state = TCP_CLOSE;
504 write_lock_bh(&tp->syn_wait_lock);
505 tp->listen_opt = NULL;
506 write_unlock_bh(&tp->syn_wait_lock);
512 * This routine closes sockets which have been at least partially
513 * opened, but not yet accepted.
516 static void tcp_listen_stop (struct sock *sk)
518 struct tcp_sock *tp = tcp_sk(sk);
519 struct tcp_listen_opt *lopt = tp->listen_opt;
520 struct open_request *acc_req = tp->accept_queue;
521 struct open_request *req;
524 tcp_delete_keepalive_timer(sk);
526 /* make all the listen_opt local to us */
527 write_lock_bh(&tp->syn_wait_lock);
528 tp->listen_opt = NULL;
529 write_unlock_bh(&tp->syn_wait_lock);
530 tp->accept_queue = tp->accept_queue_tail = NULL;
533 for (i = 0; i < TCP_SYNQ_HSIZE; i++) {
534 while ((req = lopt->syn_table[i]) != NULL) {
535 lopt->syn_table[i] = req->dl_next;
537 tcp_openreq_free(req);
539 /* Following specs, it would be better either to send FIN
540 * (and enter FIN-WAIT-1, it is normal close)
541 * or to send active reset (abort).
542 * Certainly, it is pretty dangerous while synflood, but it is
543 * bad justification for our negligence 8)
544 * To be honest, we are not able to make either
545 * of the variants now. --ANK
550 BUG_TRAP(!lopt->qlen);
554 while ((req = acc_req) != NULL) {
555 struct sock *child = req->sk;
557 acc_req = req->dl_next;
561 BUG_TRAP(!sock_owned_by_user(child));
564 tcp_disconnect(child, O_NONBLOCK);
568 atomic_inc(&tcp_orphan_count);
570 tcp_destroy_sock(child);
572 bh_unlock_sock(child);
576 sk_acceptq_removed(sk);
577 tcp_openreq_fastfree(req);
579 BUG_TRAP(!sk->sk_ack_backlog);
582 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
584 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
585 tp->pushed_seq = tp->write_seq;
588 static inline int forced_push(struct tcp_sock *tp)
590 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
593 static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
597 TCP_SKB_CB(skb)->seq = tp->write_seq;
598 TCP_SKB_CB(skb)->end_seq = tp->write_seq;
599 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
600 TCP_SKB_CB(skb)->sacked = 0;
601 skb_header_release(skb);
602 __skb_queue_tail(&sk->sk_write_queue, skb);
603 sk_charge_skb(sk, skb);
604 if (!sk->sk_send_head)
605 sk->sk_send_head = skb;
606 else if (tp->nonagle&TCP_NAGLE_PUSH)
607 tp->nonagle &= ~TCP_NAGLE_PUSH;
610 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
613 if (flags & MSG_OOB) {
615 tp->snd_up = tp->write_seq;
616 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
620 static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
621 int mss_now, int nonagle)
623 if (sk->sk_send_head) {
624 struct sk_buff *skb = sk->sk_write_queue.prev;
625 if (!(flags & MSG_MORE) || forced_push(tp))
626 tcp_mark_push(tp, skb);
627 tcp_mark_urg(tp, flags, skb);
628 __tcp_push_pending_frames(sk, tp, mss_now,
629 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
633 static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
634 size_t psize, int flags)
636 struct tcp_sock *tp = tcp_sk(sk);
640 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
642 /* Wait for a connection to finish. */
643 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
644 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
647 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
649 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
653 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
657 struct sk_buff *skb = sk->sk_write_queue.prev;
658 struct page *page = pages[poffset / PAGE_SIZE];
659 int copy, i, can_coalesce;
660 int offset = poffset % PAGE_SIZE;
661 int size = min_t(size_t, psize, PAGE_SIZE - offset);
663 if (!sk->sk_send_head || (copy = mss_now - skb->len) <= 0) {
665 if (!sk_stream_memory_free(sk))
666 goto wait_for_sndbuf;
668 skb = sk_stream_alloc_pskb(sk, 0, 0,
671 goto wait_for_memory;
673 skb_entail(sk, tp, skb);
680 i = skb_shinfo(skb)->nr_frags;
681 can_coalesce = skb_can_coalesce(skb, i, page, offset);
682 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
683 tcp_mark_push(tp, skb);
686 if (sk->sk_forward_alloc < copy &&
687 !sk_stream_mem_schedule(sk, copy, 0))
688 goto wait_for_memory;
691 skb_shinfo(skb)->frags[i - 1].size += copy;
694 skb_fill_page_desc(skb, i, page, offset, copy);
698 skb->data_len += copy;
699 skb->truesize += copy;
700 sk->sk_wmem_queued += copy;
701 sk->sk_forward_alloc -= copy;
702 skb->ip_summed = CHECKSUM_HW;
703 tp->write_seq += copy;
704 TCP_SKB_CB(skb)->end_seq += copy;
705 skb_shinfo(skb)->tso_segs = 0;
708 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
712 if (!(psize -= copy))
715 if (skb->len != mss_now || (flags & MSG_OOB))
718 if (forced_push(tp)) {
719 tcp_mark_push(tp, skb);
720 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
721 } else if (skb == sk->sk_send_head)
722 tcp_push_one(sk, mss_now);
726 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
729 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
731 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
734 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
739 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
746 return sk_stream_error(sk, flags, err);
749 ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
750 size_t size, int flags)
753 struct sock *sk = sock->sk;
755 #define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
757 if (!(sk->sk_route_caps & NETIF_F_SG) ||
758 !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS))
759 return sock_no_sendpage(sock, page, offset, size, flags);
761 #undef TCP_ZC_CSUM_FLAGS
765 res = do_tcp_sendpages(sk, &page, offset, size, flags);
771 #define TCP_PAGE(sk) (sk->sk_sndmsg_page)
772 #define TCP_OFF(sk) (sk->sk_sndmsg_off)
774 static inline int select_size(struct sock *sk, struct tcp_sock *tp)
776 int tmp = tp->mss_cache_std;
778 if (sk->sk_route_caps & NETIF_F_SG) {
779 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
781 if (tmp >= pgbreak &&
782 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
788 int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
792 struct tcp_sock *tp = tcp_sk(sk);
802 flags = msg->msg_flags;
803 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
805 /* Wait for a connection to finish. */
806 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
807 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
810 /* This should be in poll */
811 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
813 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
815 /* Ok commence sending. */
816 iovlen = msg->msg_iovlen;
821 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
824 while (--iovlen >= 0) {
825 int seglen = iov->iov_len;
826 unsigned char __user *from = iov->iov_base;
833 skb = sk->sk_write_queue.prev;
835 if (!sk->sk_send_head ||
836 (copy = mss_now - skb->len) <= 0) {
839 /* Allocate new segment. If the interface is SG,
840 * allocate skb fitting to single page.
842 if (!sk_stream_memory_free(sk))
843 goto wait_for_sndbuf;
845 skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
846 0, sk->sk_allocation);
848 goto wait_for_memory;
851 * Check whether we can use HW checksum.
853 if (sk->sk_route_caps &
854 (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM |
856 skb->ip_summed = CHECKSUM_HW;
858 skb_entail(sk, tp, skb);
862 /* Try to append data to the end of skb. */
866 /* Where to copy to? */
867 if (skb_tailroom(skb) > 0) {
868 /* We have some space in skb head. Superb! */
869 if (copy > skb_tailroom(skb))
870 copy = skb_tailroom(skb);
871 if ((err = skb_add_data(skb, from, copy)) != 0)
875 int i = skb_shinfo(skb)->nr_frags;
876 struct page *page = TCP_PAGE(sk);
877 int off = TCP_OFF(sk);
879 if (skb_can_coalesce(skb, i, page, off) &&
881 /* We can extend the last page
884 } else if (i == MAX_SKB_FRAGS ||
886 !(sk->sk_route_caps & NETIF_F_SG))) {
887 /* Need to add new fragment and cannot
888 * do this because interface is non-SG,
889 * or because all the page slots are
891 tcp_mark_push(tp, skb);
894 /* If page is cached, align
895 * offset to L1 cache boundary
897 off = (off + L1_CACHE_BYTES - 1) &
898 ~(L1_CACHE_BYTES - 1);
899 if (off == PAGE_SIZE) {
901 TCP_PAGE(sk) = page = NULL;
906 /* Allocate new cache page. */
907 if (!(page = sk_stream_alloc_page(sk)))
908 goto wait_for_memory;
912 if (copy > PAGE_SIZE - off)
913 copy = PAGE_SIZE - off;
915 /* Time to copy data. We are close to
917 err = skb_copy_to_page(sk, from, skb, page,
920 /* If this page was new, give it to the
921 * socket so it does not get leaked.
930 /* Update the skb. */
932 skb_shinfo(skb)->frags[i - 1].size +=
935 skb_fill_page_desc(skb, i, page, off, copy);
938 } else if (off + copy < PAGE_SIZE) {
944 TCP_OFF(sk) = off + copy;
948 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
950 tp->write_seq += copy;
951 TCP_SKB_CB(skb)->end_seq += copy;
952 skb_shinfo(skb)->tso_segs = 0;
956 if ((seglen -= copy) == 0 && iovlen == 0)
959 if (skb->len != mss_now || (flags & MSG_OOB))
962 if (forced_push(tp)) {
963 tcp_mark_push(tp, skb);
964 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
965 } else if (skb == sk->sk_send_head)
966 tcp_push_one(sk, mss_now);
970 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
973 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
975 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
978 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
984 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
991 if (sk->sk_send_head == skb)
992 sk->sk_send_head = NULL;
993 __skb_unlink(skb, skb->list);
994 sk_stream_free_skb(sk, skb);
1001 err = sk_stream_error(sk, flags, err);
1002 TCP_CHECK_TIMER(sk);
1008 * Handle reading urgent data. BSD has very simple semantics for
1009 * this, no blocking and very strange errors 8)
1012 static int tcp_recv_urg(struct sock *sk, long timeo,
1013 struct msghdr *msg, int len, int flags,
1016 struct tcp_sock *tp = tcp_sk(sk);
1018 /* No URG data to read. */
1019 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1020 tp->urg_data == TCP_URG_READ)
1021 return -EINVAL; /* Yes this is right ! */
1023 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1026 if (tp->urg_data & TCP_URG_VALID) {
1028 char c = tp->urg_data;
1030 if (!(flags & MSG_PEEK))
1031 tp->urg_data = TCP_URG_READ;
1033 /* Read urgent data. */
1034 msg->msg_flags |= MSG_OOB;
1037 if (!(flags & MSG_TRUNC))
1038 err = memcpy_toiovec(msg->msg_iov, &c, 1);
1041 msg->msg_flags |= MSG_TRUNC;
1043 return err ? -EFAULT : len;
1046 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1049 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1050 * the available implementations agree in this case:
1051 * this call should never block, independent of the
1052 * blocking state of the socket.
1053 * Mike <pall@rz.uni-karlsruhe.de>
1058 /* Clean up the receive buffer for full frames taken by the user,
1059 * then send an ACK if necessary. COPIED is the number of bytes
1060 * tcp_recvmsg has given to the user so far, it speeds up the
1061 * calculation of whether or not we must ACK for the sake of
1064 static void cleanup_rbuf(struct sock *sk, int copied)
1066 struct tcp_sock *tp = tcp_sk(sk);
1067 int time_to_ack = 0;
1070 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1072 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
1075 if (tcp_ack_scheduled(tp)) {
1076 /* Delayed ACKs frequently hit locked sockets during bulk
1078 if (tp->ack.blocked ||
1079 /* Once-per-two-segments ACK was not sent by tcp_input.c */
1080 tp->rcv_nxt - tp->rcv_wup > tp->ack.rcv_mss ||
1082 * If this read emptied read buffer, we send ACK, if
1083 * connection is not bidirectional, user drained
1084 * receive buffer and there was a small segment
1087 (copied > 0 && (tp->ack.pending & TCP_ACK_PUSHED) &&
1088 !tp->ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
1092 /* We send an ACK if we can now advertise a non-zero window
1093 * which has been raised "significantly".
1095 * Even if window raised up to infinity, do not send window open ACK
1096 * in states, where we will not receive more. It is useless.
1098 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1099 __u32 rcv_window_now = tcp_receive_window(tp);
1101 /* Optimize, __tcp_select_window() is not cheap. */
1102 if (2*rcv_window_now <= tp->window_clamp) {
1103 __u32 new_window = __tcp_select_window(sk);
1105 /* Send ACK now, if this read freed lots of space
1106 * in our buffer. Certainly, new_window is new window.
1107 * We can advertise it now, if it is not less than current one.
1108 * "Lots" means "at least twice" here.
1110 if (new_window && new_window >= 2 * rcv_window_now)
1118 static void tcp_prequeue_process(struct sock *sk)
1120 struct sk_buff *skb;
1121 struct tcp_sock *tp = tcp_sk(sk);
1123 NET_ADD_STATS_USER(LINUX_MIB_TCPPREQUEUED, skb_queue_len(&tp->ucopy.prequeue));
1125 /* RX process wants to run with disabled BHs, though it is not
1128 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1129 sk->sk_backlog_rcv(sk, skb);
1132 /* Clear memory counter. */
1133 tp->ucopy.memory = 0;
1136 static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1138 struct sk_buff *skb;
1141 skb_queue_walk(&sk->sk_receive_queue, skb) {
1142 offset = seq - TCP_SKB_CB(skb)->seq;
1145 if (offset < skb->len || skb->h.th->fin) {
1154 * This routine provides an alternative to tcp_recvmsg() for routines
1155 * that would like to handle copying from skbuffs directly in 'sendfile'
1158 * - It is assumed that the socket was locked by the caller.
1159 * - The routine does not block.
1160 * - At present, there is no support for reading OOB data
1161 * or for 'peeking' the socket using this routine
1162 * (although both would be easy to implement).
1164 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1165 sk_read_actor_t recv_actor)
1167 struct sk_buff *skb;
1168 struct tcp_sock *tp = tcp_sk(sk);
1169 u32 seq = tp->copied_seq;
1173 if (sk->sk_state == TCP_LISTEN)
1175 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1176 if (offset < skb->len) {
1179 len = skb->len - offset;
1180 /* Stop reading if we hit a patch of urgent data */
1182 u32 urg_offset = tp->urg_seq - seq;
1183 if (urg_offset < len)
1188 used = recv_actor(desc, skb, offset, len);
1194 if (offset != skb->len)
1197 if (skb->h.th->fin) {
1198 sk_eat_skb(sk, skb);
1202 sk_eat_skb(sk, skb);
1206 tp->copied_seq = seq;
1208 tcp_rcv_space_adjust(sk);
1210 /* Clean up data we have read: This will do ACK frames. */
1212 cleanup_rbuf(sk, copied);
1217 * This routine copies from a sock struct into the user buffer.
1219 * Technical note: in 2.3 we work on _locked_ socket, so that
1220 * tricks with *seq access order and skb->users are not required.
1221 * Probably, code can be easily improved even more.
1224 int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1225 size_t len, int nonblock, int flags, int *addr_len)
1227 struct tcp_sock *tp = tcp_sk(sk);
1233 int target; /* Read at least this many bytes */
1235 struct task_struct *user_recv = NULL;
1239 TCP_CHECK_TIMER(sk);
1242 if (sk->sk_state == TCP_LISTEN)
1245 timeo = sock_rcvtimeo(sk, nonblock);
1247 /* Urgent data needs to be handled specially. */
1248 if (flags & MSG_OOB)
1251 seq = &tp->copied_seq;
1252 if (flags & MSG_PEEK) {
1253 peek_seq = tp->copied_seq;
1257 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1260 struct sk_buff *skb;
1263 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1264 if (tp->urg_data && tp->urg_seq == *seq) {
1267 if (signal_pending(current)) {
1268 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1273 /* Next get a buffer. */
1275 skb = skb_peek(&sk->sk_receive_queue);
1280 /* Now that we have two receive queues this
1283 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1284 printk(KERN_INFO "recvmsg bug: copied %X "
1285 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1288 offset = *seq - TCP_SKB_CB(skb)->seq;
1291 if (offset < skb->len)
1295 BUG_TRAP(flags & MSG_PEEK);
1297 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1299 /* Well, if we have backlog, try to process it now yet. */
1301 if (copied >= target && !sk->sk_backlog.tail)
1306 sk->sk_state == TCP_CLOSE ||
1307 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1309 signal_pending(current) ||
1313 if (sock_flag(sk, SOCK_DONE))
1317 copied = sock_error(sk);
1321 if (sk->sk_shutdown & RCV_SHUTDOWN)
1324 if (sk->sk_state == TCP_CLOSE) {
1325 if (!sock_flag(sk, SOCK_DONE)) {
1326 /* This occurs when user tries to read
1327 * from never connected socket.
1340 if (signal_pending(current)) {
1341 copied = sock_intr_errno(timeo);
1346 cleanup_rbuf(sk, copied);
1348 if (tp->ucopy.task == user_recv) {
1349 /* Install new reader */
1350 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1351 user_recv = current;
1352 tp->ucopy.task = user_recv;
1353 tp->ucopy.iov = msg->msg_iov;
1356 tp->ucopy.len = len;
1358 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1359 (flags & (MSG_PEEK | MSG_TRUNC)));
1361 /* Ugly... If prequeue is not empty, we have to
1362 * process it before releasing socket, otherwise
1363 * order will be broken at second iteration.
1364 * More elegant solution is required!!!
1366 * Look: we have the following (pseudo)queues:
1368 * 1. packets in flight
1373 * Each queue can be processed only if the next ones
1374 * are empty. At this point we have empty receive_queue.
1375 * But prequeue _can_ be not empty after 2nd iteration,
1376 * when we jumped to start of loop because backlog
1377 * processing added something to receive_queue.
1378 * We cannot release_sock(), because backlog contains
1379 * packets arrived _after_ prequeued ones.
1381 * Shortly, algorithm is clear --- to process all
1382 * the queues in order. We could make it more directly,
1383 * requeueing packets from backlog to prequeue, if
1384 * is not empty. It is more elegant, but eats cycles,
1387 if (skb_queue_len(&tp->ucopy.prequeue))
1390 /* __ Set realtime policy in scheduler __ */
1393 if (copied >= target) {
1394 /* Do not sleep, just process backlog. */
1398 sk_wait_data(sk, &timeo);
1403 /* __ Restore normal policy in scheduler __ */
1405 if ((chunk = len - tp->ucopy.len) != 0) {
1406 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1411 if (tp->rcv_nxt == tp->copied_seq &&
1412 skb_queue_len(&tp->ucopy.prequeue)) {
1414 tcp_prequeue_process(sk);
1416 if ((chunk = len - tp->ucopy.len) != 0) {
1417 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1423 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1424 if (net_ratelimit())
1425 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1426 current->comm, current->pid);
1427 peek_seq = tp->copied_seq;
1432 /* Ok so how much can we use? */
1433 used = skb->len - offset;
1437 /* Do we have urgent data here? */
1439 u32 urg_offset = tp->urg_seq - *seq;
1440 if (urg_offset < used) {
1442 if (!sock_flag(sk, SOCK_URGINLINE)) {
1454 if (!(flags & MSG_TRUNC)) {
1455 err = skb_copy_datagram_iovec(skb, offset,
1456 msg->msg_iov, used);
1458 /* Exception. Bailout! */
1469 tcp_rcv_space_adjust(sk);
1472 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1474 tcp_fast_path_check(sk, tp);
1476 if (used + offset < skb->len)
1481 if (!(flags & MSG_PEEK))
1482 sk_eat_skb(sk, skb);
1486 /* Process the FIN. */
1488 if (!(flags & MSG_PEEK))
1489 sk_eat_skb(sk, skb);
1494 if (skb_queue_len(&tp->ucopy.prequeue)) {
1497 tp->ucopy.len = copied > 0 ? len : 0;
1499 tcp_prequeue_process(sk);
1501 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1502 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1508 tp->ucopy.task = NULL;
1512 /* According to UNIX98, msg_name/msg_namelen are ignored
1513 * on connected socket. I was just happy when found this 8) --ANK
1516 /* Clean up data we have read: This will do ACK frames. */
1517 cleanup_rbuf(sk, copied);
1519 TCP_CHECK_TIMER(sk);
1524 TCP_CHECK_TIMER(sk);
1529 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1534 * State processing on a close. This implements the state shift for
1535 * sending our FIN frame. Note that we only send a FIN for some
1536 * states. A shutdown() may have already sent the FIN, or we may be
1540 static unsigned char new_state[16] = {
1541 /* current state: new state: action: */
1542 /* (Invalid) */ TCP_CLOSE,
1543 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1544 /* TCP_SYN_SENT */ TCP_CLOSE,
1545 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1546 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1547 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1548 /* TCP_TIME_WAIT */ TCP_CLOSE,
1549 /* TCP_CLOSE */ TCP_CLOSE,
1550 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1551 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1552 /* TCP_LISTEN */ TCP_CLOSE,
1553 /* TCP_CLOSING */ TCP_CLOSING,
1556 static int tcp_close_state(struct sock *sk)
1558 int next = (int)new_state[sk->sk_state];
1559 int ns = next & TCP_STATE_MASK;
1561 tcp_set_state(sk, ns);
1563 return next & TCP_ACTION_FIN;
1567 * Shutdown the sending side of a connection. Much like close except
1568 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1571 void tcp_shutdown(struct sock *sk, int how)
1573 /* We need to grab some memory, and put together a FIN,
1574 * and then put it into the queue to be sent.
1575 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1577 if (!(how & SEND_SHUTDOWN))
1580 /* If we've already sent a FIN, or it's a closed state, skip this. */
1581 if ((1 << sk->sk_state) &
1582 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1583 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1584 /* Clear out any half completed packets. FIN if needed. */
1585 if (tcp_close_state(sk))
1591 * At this point, there should be no process reference to this
1592 * socket, and thus no user references at all. Therefore we
1593 * can assume the socket waitqueue is inactive and nobody will
1594 * try to jump onto it.
1596 void tcp_destroy_sock(struct sock *sk)
1598 BUG_TRAP(sk->sk_state == TCP_CLOSE);
1599 BUG_TRAP(sock_flag(sk, SOCK_DEAD));
1601 /* It cannot be in hash table! */
1602 BUG_TRAP(sk_unhashed(sk));
1604 /* If it has not 0 inet_sk(sk)->num, it must be bound */
1605 BUG_TRAP(!inet_sk(sk)->num || tcp_sk(sk)->bind_hash);
1607 sk->sk_prot->destroy(sk);
1609 sk_stream_kill_queues(sk);
1611 xfrm_sk_free_policy(sk);
1613 #ifdef INET_REFCNT_DEBUG
1614 if (atomic_read(&sk->sk_refcnt) != 1) {
1615 printk(KERN_DEBUG "Destruction TCP %p delayed, c=%d\n",
1616 sk, atomic_read(&sk->sk_refcnt));
1620 atomic_dec(&tcp_orphan_count);
1624 void tcp_close(struct sock *sk, long timeout)
1626 struct sk_buff *skb;
1627 int data_was_unread = 0;
1630 sk->sk_shutdown = SHUTDOWN_MASK;
1632 if (sk->sk_state == TCP_LISTEN) {
1633 tcp_set_state(sk, TCP_CLOSE);
1636 tcp_listen_stop(sk);
1638 goto adjudge_to_death;
1641 /* We need to flush the recv. buffs. We do this only on the
1642 * descriptor close, not protocol-sourced closes, because the
1643 * reader process may not have drained the data yet!
1645 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1646 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1648 data_was_unread += len;
1652 sk_stream_mem_reclaim(sk);
1654 /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
1655 * 3.10, we send a RST here because data was lost. To
1656 * witness the awful effects of the old behavior of always
1657 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start
1658 * a bulk GET in an FTP client, suspend the process, wait
1659 * for the client to advertise a zero window, then kill -9
1660 * the FTP client, wheee... Note: timeout is always zero
1663 if (data_was_unread) {
1664 /* Unread data was tossed, zap the connection. */
1665 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1666 tcp_set_state(sk, TCP_CLOSE);
1667 tcp_send_active_reset(sk, GFP_KERNEL);
1668 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1669 /* Check zero linger _after_ checking for unread data. */
1670 sk->sk_prot->disconnect(sk, 0);
1671 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1672 } else if (tcp_close_state(sk)) {
1673 /* We FIN if the application ate all the data before
1674 * zapping the connection.
1677 /* RED-PEN. Formally speaking, we have broken TCP state
1678 * machine. State transitions:
1680 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1681 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1682 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1684 * are legal only when FIN has been sent (i.e. in window),
1685 * rather than queued out of window. Purists blame.
1687 * F.e. "RFC state" is ESTABLISHED,
1688 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1690 * The visible declinations are that sometimes
1691 * we enter time-wait state, when it is not required really
1692 * (harmless), do not send active resets, when they are
1693 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1694 * they look as CLOSING or LAST_ACK for Linux)
1695 * Probably, I missed some more holelets.
1701 sk_stream_wait_close(sk, timeout);
1704 /* It is the last release_sock in its life. It will remove backlog. */
1708 /* Now socket is owned by kernel and we acquire BH lock
1709 to finish close. No need to check for user refs.
1713 BUG_TRAP(!sock_owned_by_user(sk));
1718 /* This is a (useful) BSD violating of the RFC. There is a
1719 * problem with TCP as specified in that the other end could
1720 * keep a socket open forever with no application left this end.
1721 * We use a 3 minute timeout (about the same as BSD) then kill
1722 * our end. If they send after that then tough - BUT: long enough
1723 * that we won't make the old 4*rto = almost no time - whoops
1726 * Nope, it was not mistake. It is really desired behaviour
1727 * f.e. on http servers, when such sockets are useless, but
1728 * consume significant resources. Let's do it with special
1729 * linger2 option. --ANK
1732 if (sk->sk_state == TCP_FIN_WAIT2) {
1733 struct tcp_sock *tp = tcp_sk(sk);
1734 if (tp->linger2 < 0) {
1735 tcp_set_state(sk, TCP_CLOSE);
1736 tcp_send_active_reset(sk, GFP_ATOMIC);
1737 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1739 int tmo = tcp_fin_time(tp);
1741 if (tmo > TCP_TIMEWAIT_LEN) {
1742 tcp_reset_keepalive_timer(sk, tcp_fin_time(tp));
1744 atomic_inc(&tcp_orphan_count);
1745 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1750 if (sk->sk_state != TCP_CLOSE) {
1751 sk_stream_mem_reclaim(sk);
1752 if (atomic_read(&tcp_orphan_count) > sysctl_tcp_max_orphans ||
1753 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
1754 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
1755 if (net_ratelimit())
1756 printk(KERN_INFO "TCP: too many of orphaned "
1758 tcp_set_state(sk, TCP_CLOSE);
1759 tcp_send_active_reset(sk, GFP_ATOMIC);
1760 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1763 atomic_inc(&tcp_orphan_count);
1765 if (sk->sk_state == TCP_CLOSE)
1766 tcp_destroy_sock(sk);
1767 /* Otherwise, socket is reprieved until protocol close. */
1775 /* These states need RST on ABORT according to RFC793 */
1777 static inline int tcp_need_reset(int state)
1779 return (1 << state) &
1780 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1781 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1784 int tcp_disconnect(struct sock *sk, int flags)
1786 struct inet_sock *inet = inet_sk(sk);
1787 struct tcp_sock *tp = tcp_sk(sk);
1789 int old_state = sk->sk_state;
1791 if (old_state != TCP_CLOSE)
1792 tcp_set_state(sk, TCP_CLOSE);
1794 /* ABORT function of RFC793 */
1795 if (old_state == TCP_LISTEN) {
1796 tcp_listen_stop(sk);
1797 } else if (tcp_need_reset(old_state) ||
1798 (tp->snd_nxt != tp->write_seq &&
1799 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
1800 /* The last check adjusts for discrepance of Linux wrt. RFC
1803 tcp_send_active_reset(sk, gfp_any());
1804 sk->sk_err = ECONNRESET;
1805 } else if (old_state == TCP_SYN_SENT)
1806 sk->sk_err = ECONNRESET;
1808 tcp_clear_xmit_timers(sk);
1809 __skb_queue_purge(&sk->sk_receive_queue);
1810 sk_stream_writequeue_purge(sk);
1811 __skb_queue_purge(&tp->out_of_order_queue);
1815 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1816 inet_reset_saddr(sk);
1818 sk->sk_shutdown = 0;
1819 sock_reset_flag(sk, SOCK_DONE);
1821 if ((tp->write_seq += tp->max_window + 2) == 0)
1826 tp->packets_out = 0;
1827 tp->snd_ssthresh = 0x7fffffff;
1828 tp->snd_cwnd_cnt = 0;
1829 tcp_set_ca_state(tp, TCP_CA_Open);
1830 tcp_clear_retrans(tp);
1831 tcp_delack_init(tp);
1832 sk->sk_send_head = NULL;
1833 tp->rx_opt.saw_tstamp = 0;
1834 tcp_sack_reset(&tp->rx_opt);
1837 BUG_TRAP(!inet->num || tp->bind_hash);
1839 sk->sk_error_report(sk);
1844 * Wait for an incoming connection, avoid race
1845 * conditions. This must be called with the socket locked.
1847 static int wait_for_connect(struct sock *sk, long timeo)
1849 struct tcp_sock *tp = tcp_sk(sk);
1854 * True wake-one mechanism for incoming connections: only
1855 * one process gets woken up, not the 'whole herd'.
1856 * Since we do not 'race & poll' for established sockets
1857 * anymore, the common case will execute the loop only once.
1859 * Subtle issue: "add_wait_queue_exclusive()" will be added
1860 * after any current non-exclusive waiters, and we know that
1861 * it will always _stay_ after any new non-exclusive waiters
1862 * because all non-exclusive waiters are added at the
1863 * beginning of the wait-queue. As such, it's ok to "drop"
1864 * our exclusiveness temporarily when we get woken up without
1865 * having to remove and re-insert us on the wait queue.
1868 prepare_to_wait_exclusive(sk->sk_sleep, &wait,
1869 TASK_INTERRUPTIBLE);
1871 if (!tp->accept_queue)
1872 timeo = schedule_timeout(timeo);
1875 if (tp->accept_queue)
1878 if (sk->sk_state != TCP_LISTEN)
1880 err = sock_intr_errno(timeo);
1881 if (signal_pending(current))
1887 finish_wait(sk->sk_sleep, &wait);
1892 * This will accept the next outstanding connection.
1895 struct sock *tcp_accept(struct sock *sk, int flags, int *err)
1897 struct tcp_sock *tp = tcp_sk(sk);
1898 struct open_request *req;
1904 /* We need to make sure that this socket is listening,
1905 * and that it has something pending.
1908 if (sk->sk_state != TCP_LISTEN)
1911 /* Find already established connection */
1912 if (!tp->accept_queue) {
1913 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1915 /* If this is a non blocking socket don't sleep */
1920 error = wait_for_connect(sk, timeo);
1925 req = tp->accept_queue;
1926 if ((tp->accept_queue = req->dl_next) == NULL)
1927 tp->accept_queue_tail = NULL;
1930 sk_acceptq_removed(sk);
1931 tcp_openreq_fastfree(req);
1932 BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
1943 * Socket option code for TCP.
1945 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1948 struct tcp_sock *tp = tcp_sk(sk);
1952 if (level != SOL_TCP)
1953 return tp->af_specific->setsockopt(sk, level, optname,
1956 if (optlen < sizeof(int))
1959 if (get_user(val, (int __user *)optval))
1966 /* Values greater than interface MTU won't take effect. However
1967 * at the point when this call is done we typically don't yet
1968 * know which interface is going to be used */
1969 if (val < 8 || val > MAX_TCP_WINDOW) {
1973 tp->rx_opt.user_mss = val;
1978 /* TCP_NODELAY is weaker than TCP_CORK, so that
1979 * this option on corked socket is remembered, but
1980 * it is not activated until cork is cleared.
1982 * However, when TCP_NODELAY is set we make
1983 * an explicit push, which overrides even TCP_CORK
1984 * for currently queued segments.
1986 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
1987 tcp_push_pending_frames(sk, tp);
1989 tp->nonagle &= ~TCP_NAGLE_OFF;
1994 /* When set indicates to always queue non-full frames.
1995 * Later the user clears this option and we transmit
1996 * any pending partial frames in the queue. This is
1997 * meant to be used alongside sendfile() to get properly
1998 * filled frames when the user (for example) must write
1999 * out headers with a write() call first and then use
2000 * sendfile to send out the data parts.
2002 * TCP_CORK can be set together with TCP_NODELAY and it is
2003 * stronger than TCP_NODELAY.
2006 tp->nonagle |= TCP_NAGLE_CORK;
2008 tp->nonagle &= ~TCP_NAGLE_CORK;
2009 if (tp->nonagle&TCP_NAGLE_OFF)
2010 tp->nonagle |= TCP_NAGLE_PUSH;
2011 tcp_push_pending_frames(sk, tp);
2016 if (val < 1 || val > MAX_TCP_KEEPIDLE)
2019 tp->keepalive_time = val * HZ;
2020 if (sock_flag(sk, SOCK_KEEPOPEN) &&
2021 !((1 << sk->sk_state) &
2022 (TCPF_CLOSE | TCPF_LISTEN))) {
2023 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
2024 if (tp->keepalive_time > elapsed)
2025 elapsed = tp->keepalive_time - elapsed;
2028 tcp_reset_keepalive_timer(sk, elapsed);
2033 if (val < 1 || val > MAX_TCP_KEEPINTVL)
2036 tp->keepalive_intvl = val * HZ;
2039 if (val < 1 || val > MAX_TCP_KEEPCNT)
2042 tp->keepalive_probes = val;
2045 if (val < 1 || val > MAX_TCP_SYNCNT)
2048 tp->syn_retries = val;
2054 else if (val > sysctl_tcp_fin_timeout / HZ)
2057 tp->linger2 = val * HZ;
2060 case TCP_DEFER_ACCEPT:
2061 tp->defer_accept = 0;
2063 /* Translate value in seconds to number of
2065 while (tp->defer_accept < 32 &&
2066 val > ((TCP_TIMEOUT_INIT / HZ) <<
2073 case TCP_WINDOW_CLAMP:
2075 if (sk->sk_state != TCP_CLOSE) {
2079 tp->window_clamp = 0;
2081 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2082 SOCK_MIN_RCVBUF / 2 : val;
2087 tp->ack.pingpong = 1;
2089 tp->ack.pingpong = 0;
2090 if ((1 << sk->sk_state) &
2091 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2092 tcp_ack_scheduled(tp)) {
2093 tp->ack.pending |= TCP_ACK_PUSHED;
2094 cleanup_rbuf(sk, 1);
2096 tp->ack.pingpong = 1;
2109 /* Return information about state of tcp endpoint in API format. */
2110 void tcp_get_info(struct sock *sk, struct tcp_info *info)
2112 struct tcp_sock *tp = tcp_sk(sk);
2113 u32 now = tcp_time_stamp;
2115 memset(info, 0, sizeof(*info));
2117 info->tcpi_state = sk->sk_state;
2118 info->tcpi_ca_state = tp->ca_state;
2119 info->tcpi_retransmits = tp->retransmits;
2120 info->tcpi_probes = tp->probes_out;
2121 info->tcpi_backoff = tp->backoff;
2123 if (tp->rx_opt.tstamp_ok)
2124 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2125 if (tp->rx_opt.sack_ok)
2126 info->tcpi_options |= TCPI_OPT_SACK;
2127 if (tp->rx_opt.wscale_ok) {
2128 info->tcpi_options |= TCPI_OPT_WSCALE;
2129 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2130 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2133 if (tp->ecn_flags&TCP_ECN_OK)
2134 info->tcpi_options |= TCPI_OPT_ECN;
2136 info->tcpi_rto = jiffies_to_usecs(tp->rto);
2137 info->tcpi_ato = jiffies_to_usecs(tp->ack.ato);
2138 info->tcpi_snd_mss = tp->mss_cache_std;
2139 info->tcpi_rcv_mss = tp->ack.rcv_mss;
2141 info->tcpi_unacked = tp->packets_out;
2142 info->tcpi_sacked = tp->sacked_out;
2143 info->tcpi_lost = tp->lost_out;
2144 info->tcpi_retrans = tp->retrans_out;
2145 info->tcpi_fackets = tp->fackets_out;
2147 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2148 info->tcpi_last_data_recv = jiffies_to_msecs(now - tp->ack.lrcvtime);
2149 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2151 info->tcpi_pmtu = tp->pmtu_cookie;
2152 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2153 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2154 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2155 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2156 info->tcpi_snd_cwnd = tp->snd_cwnd;
2157 info->tcpi_advmss = tp->advmss;
2158 info->tcpi_reordering = tp->reordering;
2160 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2161 info->tcpi_rcv_space = tp->rcvq_space.space;
2163 info->tcpi_total_retrans = tp->total_retrans;
2166 EXPORT_SYMBOL_GPL(tcp_get_info);
2168 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2171 struct tcp_sock *tp = tcp_sk(sk);
2174 if (level != SOL_TCP)
2175 return tp->af_specific->getsockopt(sk, level, optname,
2178 if (get_user(len, optlen))
2181 len = min_t(unsigned int, len, sizeof(int));
2188 val = tp->mss_cache_std;
2189 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2190 val = tp->rx_opt.user_mss;
2193 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2196 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2199 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2202 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2205 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2208 val = tp->syn_retries ? : sysctl_tcp_syn_retries;
2213 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2215 case TCP_DEFER_ACCEPT:
2216 val = !tp->defer_accept ? 0 : ((TCP_TIMEOUT_INIT / HZ) <<
2217 (tp->defer_accept - 1));
2219 case TCP_WINDOW_CLAMP:
2220 val = tp->window_clamp;
2223 struct tcp_info info;
2225 if (get_user(len, optlen))
2228 tcp_get_info(sk, &info);
2230 len = min_t(unsigned int, len, sizeof(info));
2231 if (put_user(len, optlen))
2233 if (copy_to_user(optval, &info, len))
2238 val = !tp->ack.pingpong;
2241 return -ENOPROTOOPT;
2244 if (put_user(len, optlen))
2246 if (copy_to_user(optval, &val, len))
2252 extern void __skb_cb_too_small_for_tcp(int, int);
2253 extern void tcpdiag_init(void);
2255 static __initdata unsigned long thash_entries;
2256 static int __init set_thash_entries(char *str)
2260 thash_entries = simple_strtoul(str, &str, 0);
2263 __setup("thash_entries=", set_thash_entries);
2265 void __init tcp_init(void)
2267 struct sk_buff *skb = NULL;
2270 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2271 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2274 tcp_openreq_cachep = kmem_cache_create("tcp_open_request",
2275 sizeof(struct open_request),
2276 0, SLAB_HWCACHE_ALIGN,
2278 if (!tcp_openreq_cachep)
2279 panic("tcp_init: Cannot alloc open_request cache.");
2281 tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket",
2282 sizeof(struct tcp_bind_bucket),
2283 0, SLAB_HWCACHE_ALIGN,
2285 if (!tcp_bucket_cachep)
2286 panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
2288 tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket",
2289 sizeof(struct tcp_tw_bucket),
2290 0, SLAB_HWCACHE_ALIGN,
2292 if (!tcp_timewait_cachep)
2293 panic("tcp_init: Cannot alloc tcp_tw_bucket cache.");
2295 /* Size and allocate the main established and bind bucket
2298 * The methodology is similar to that of the buffer cache.
2300 tcp_ehash = (struct tcp_ehash_bucket *)
2301 alloc_large_system_hash("TCP established",
2302 sizeof(struct tcp_ehash_bucket),
2304 (num_physpages >= 128 * 1024) ?
2311 tcp_ehash_size = (1 << tcp_ehash_size) >> 1;
2312 for (i = 0; i < (tcp_ehash_size << 1); i++) {
2313 rwlock_init(&tcp_ehash[i].lock);
2314 INIT_HLIST_HEAD(&tcp_ehash[i].chain);
2317 tcp_bhash = (struct tcp_bind_hashbucket *)
2318 alloc_large_system_hash("TCP bind",
2319 sizeof(struct tcp_bind_hashbucket),
2321 (num_physpages >= 128 * 1024) ?
2328 tcp_bhash_size = 1 << tcp_bhash_size;
2329 for (i = 0; i < tcp_bhash_size; i++) {
2330 spin_lock_init(&tcp_bhash[i].lock);
2331 INIT_HLIST_HEAD(&tcp_bhash[i].chain);
2334 /* Try to be a bit smarter and adjust defaults depending
2335 * on available memory.
2337 for (order = 0; ((1 << order) << PAGE_SHIFT) <
2338 (tcp_bhash_size * sizeof(struct tcp_bind_hashbucket));
2342 sysctl_local_port_range[0] = 32768;
2343 sysctl_local_port_range[1] = 61000;
2344 sysctl_tcp_max_tw_buckets = 180000;
2345 sysctl_tcp_max_orphans = 4096 << (order - 4);
2346 sysctl_max_syn_backlog = 1024;
2347 } else if (order < 3) {
2348 sysctl_local_port_range[0] = 1024 * (3 - order);
2349 sysctl_tcp_max_tw_buckets >>= (3 - order);
2350 sysctl_tcp_max_orphans >>= (3 - order);
2351 sysctl_max_syn_backlog = 128;
2353 tcp_port_rover = sysctl_local_port_range[0] - 1;
2355 sysctl_tcp_mem[0] = 768 << order;
2356 sysctl_tcp_mem[1] = 1024 << order;
2357 sysctl_tcp_mem[2] = 1536 << order;
2360 sysctl_tcp_wmem[2] = 64 * 1024;
2361 sysctl_tcp_rmem[0] = PAGE_SIZE;
2362 sysctl_tcp_rmem[1] = 43689;
2363 sysctl_tcp_rmem[2] = 2 * 43689;
2366 printk(KERN_INFO "TCP: Hash tables configured "
2367 "(established %d bind %d)\n",
2368 tcp_ehash_size << 1, tcp_bhash_size);
2371 EXPORT_SYMBOL(tcp_accept);
2372 EXPORT_SYMBOL(tcp_close);
2373 EXPORT_SYMBOL(tcp_destroy_sock);
2374 EXPORT_SYMBOL(tcp_disconnect);
2375 EXPORT_SYMBOL(tcp_getsockopt);
2376 EXPORT_SYMBOL(tcp_ioctl);
2377 EXPORT_SYMBOL(tcp_openreq_cachep);
2378 EXPORT_SYMBOL(tcp_poll);
2379 EXPORT_SYMBOL(tcp_read_sock);
2380 EXPORT_SYMBOL(tcp_recvmsg);
2381 EXPORT_SYMBOL(tcp_sendmsg);
2382 EXPORT_SYMBOL(tcp_sendpage);
2383 EXPORT_SYMBOL(tcp_setsockopt);
2384 EXPORT_SYMBOL(tcp_shutdown);
2385 EXPORT_SYMBOL(tcp_statistics);
2386 EXPORT_SYMBOL(tcp_timewait_cachep);