e1000: checkpatch clean
[linux-2.6] / net / ipv4 / tcp.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Mark Evans, <evansmp@uhura.aston.ac.uk>
11  *              Corey Minyard <wf-rch!minyard@relay.EU.net>
12  *              Florian La Roche, <flla@stud.uni-sb.de>
13  *              Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14  *              Linus Torvalds, <torvalds@cs.helsinki.fi>
15  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
16  *              Matthew Dillon, <dillon@apollo.west.oic.com>
17  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18  *              Jorge Cwik, <jorge@laser.satlink.net>
19  *
20  * Fixes:
21  *              Alan Cox        :       Numerous verify_area() calls
22  *              Alan Cox        :       Set the ACK bit on a reset
23  *              Alan Cox        :       Stopped it crashing if it closed while
24  *                                      sk->inuse=1 and was trying to connect
25  *                                      (tcp_err()).
26  *              Alan Cox        :       All icmp error handling was broken
27  *                                      pointers passed where wrong and the
28  *                                      socket was looked up backwards. Nobody
29  *                                      tested any icmp error code obviously.
30  *              Alan Cox        :       tcp_err() now handled properly. It
31  *                                      wakes people on errors. poll
32  *                                      behaves and the icmp error race
33  *                                      has gone by moving it into sock.c
34  *              Alan Cox        :       tcp_send_reset() fixed to work for
35  *                                      everything not just packets for
36  *                                      unknown sockets.
37  *              Alan Cox        :       tcp option processing.
38  *              Alan Cox        :       Reset tweaked (still not 100%) [Had
39  *                                      syn rule wrong]
40  *              Herp Rosmanith  :       More reset fixes
41  *              Alan Cox        :       No longer acks invalid rst frames.
42  *                                      Acking any kind of RST is right out.
43  *              Alan Cox        :       Sets an ignore me flag on an rst
44  *                                      receive otherwise odd bits of prattle
45  *                                      escape still
46  *              Alan Cox        :       Fixed another acking RST frame bug.
47  *                                      Should stop LAN workplace lockups.
48  *              Alan Cox        :       Some tidyups using the new skb list
49  *                                      facilities
50  *              Alan Cox        :       sk->keepopen now seems to work
51  *              Alan Cox        :       Pulls options out correctly on accepts
52  *              Alan Cox        :       Fixed assorted sk->rqueue->next errors
53  *              Alan Cox        :       PSH doesn't end a TCP read. Switched a
54  *                                      bit to skb ops.
55  *              Alan Cox        :       Tidied tcp_data to avoid a potential
56  *                                      nasty.
57  *              Alan Cox        :       Added some better commenting, as the
58  *                                      tcp is hard to follow
59  *              Alan Cox        :       Removed incorrect check for 20 * psh
60  *      Michael O'Reilly        :       ack < copied bug fix.
61  *      Johannes Stille         :       Misc tcp fixes (not all in yet).
62  *              Alan Cox        :       FIN with no memory -> CRASH
63  *              Alan Cox        :       Added socket option proto entries.
64  *                                      Also added awareness of them to accept.
65  *              Alan Cox        :       Added TCP options (SOL_TCP)
66  *              Alan Cox        :       Switched wakeup calls to callbacks,
67  *                                      so the kernel can layer network
68  *                                      sockets.
69  *              Alan Cox        :       Use ip_tos/ip_ttl settings.
70  *              Alan Cox        :       Handle FIN (more) properly (we hope).
71  *              Alan Cox        :       RST frames sent on unsynchronised
72  *                                      state ack error.
73  *              Alan Cox        :       Put in missing check for SYN bit.
74  *              Alan Cox        :       Added tcp_select_window() aka NET2E
75  *                                      window non shrink trick.
76  *              Alan Cox        :       Added a couple of small NET2E timer
77  *                                      fixes
78  *              Charles Hedrick :       TCP fixes
79  *              Toomas Tamm     :       TCP window fixes
80  *              Alan Cox        :       Small URG fix to rlogin ^C ack fight
81  *              Charles Hedrick :       Rewrote most of it to actually work
82  *              Linus           :       Rewrote tcp_read() and URG handling
83  *                                      completely
84  *              Gerhard Koerting:       Fixed some missing timer handling
85  *              Matthew Dillon  :       Reworked TCP machine states as per RFC
86  *              Gerhard Koerting:       PC/TCP workarounds
87  *              Adam Caldwell   :       Assorted timer/timing errors
88  *              Matthew Dillon  :       Fixed another RST bug
89  *              Alan Cox        :       Move to kernel side addressing changes.
90  *              Alan Cox        :       Beginning work on TCP fastpathing
91  *                                      (not yet usable)
92  *              Arnt Gulbrandsen:       Turbocharged tcp_check() routine.
93  *              Alan Cox        :       TCP fast path debugging
94  *              Alan Cox        :       Window clamping
95  *              Michael Riepe   :       Bug in tcp_check()
96  *              Matt Dillon     :       More TCP improvements and RST bug fixes
97  *              Matt Dillon     :       Yet more small nasties remove from the
98  *                                      TCP code (Be very nice to this man if
99  *                                      tcp finally works 100%) 8)
100  *              Alan Cox        :       BSD accept semantics.
101  *              Alan Cox        :       Reset on closedown bug.
102  *      Peter De Schrijver      :       ENOTCONN check missing in tcp_sendto().
103  *              Michael Pall    :       Handle poll() after URG properly in
104  *                                      all cases.
105  *              Michael Pall    :       Undo the last fix in tcp_read_urg()
106  *                                      (multi URG PUSH broke rlogin).
107  *              Michael Pall    :       Fix the multi URG PUSH problem in
108  *                                      tcp_readable(), poll() after URG
109  *                                      works now.
110  *              Michael Pall    :       recv(...,MSG_OOB) never blocks in the
111  *                                      BSD api.
112  *              Alan Cox        :       Changed the semantics of sk->socket to
113  *                                      fix a race and a signal problem with
114  *                                      accept() and async I/O.
115  *              Alan Cox        :       Relaxed the rules on tcp_sendto().
116  *              Yury Shevchuk   :       Really fixed accept() blocking problem.
117  *              Craig I. Hagan  :       Allow for BSD compatible TIME_WAIT for
118  *                                      clients/servers which listen in on
119  *                                      fixed ports.
120  *              Alan Cox        :       Cleaned the above up and shrank it to
121  *                                      a sensible code size.
122  *              Alan Cox        :       Self connect lockup fix.
123  *              Alan Cox        :       No connect to multicast.
124  *              Ross Biro       :       Close unaccepted children on master
125  *                                      socket close.
126  *              Alan Cox        :       Reset tracing code.
127  *              Alan Cox        :       Spurious resets on shutdown.
128  *              Alan Cox        :       Giant 15 minute/60 second timer error
129  *              Alan Cox        :       Small whoops in polling before an
130  *                                      accept.
131  *              Alan Cox        :       Kept the state trace facility since
132  *                                      it's handy for debugging.
133  *              Alan Cox        :       More reset handler fixes.
134  *              Alan Cox        :       Started rewriting the code based on
135  *                                      the RFC's for other useful protocol
136  *                                      references see: Comer, KA9Q NOS, and
137  *                                      for a reference on the difference
138  *                                      between specifications and how BSD
139  *                                      works see the 4.4lite source.
140  *              A.N.Kuznetsov   :       Don't time wait on completion of tidy
141  *                                      close.
142  *              Linus Torvalds  :       Fin/Shutdown & copied_seq changes.
143  *              Linus Torvalds  :       Fixed BSD port reuse to work first syn
144  *              Alan Cox        :       Reimplemented timers as per the RFC
145  *                                      and using multiple timers for sanity.
146  *              Alan Cox        :       Small bug fixes, and a lot of new
147  *                                      comments.
148  *              Alan Cox        :       Fixed dual reader crash by locking
149  *                                      the buffers (much like datagram.c)
150  *              Alan Cox        :       Fixed stuck sockets in probe. A probe
151  *                                      now gets fed up of retrying without
152  *                                      (even a no space) answer.
153  *              Alan Cox        :       Extracted closing code better
154  *              Alan Cox        :       Fixed the closing state machine to
155  *                                      resemble the RFC.
156  *              Alan Cox        :       More 'per spec' fixes.
157  *              Jorge Cwik      :       Even faster checksumming.
158  *              Alan Cox        :       tcp_data() doesn't ack illegal PSH
159  *                                      only frames. At least one pc tcp stack
160  *                                      generates them.
161  *              Alan Cox        :       Cache last socket.
162  *              Alan Cox        :       Per route irtt.
163  *              Matt Day        :       poll()->select() match BSD precisely on error
164  *              Alan Cox        :       New buffers
165  *              Marc Tamsky     :       Various sk->prot->retransmits and
166  *                                      sk->retransmits misupdating fixed.
167  *                                      Fixed tcp_write_timeout: stuck close,
168  *                                      and TCP syn retries gets used now.
169  *              Mark Yarvis     :       In tcp_read_wakeup(), don't send an
170  *                                      ack if state is TCP_CLOSED.
171  *              Alan Cox        :       Look up device on a retransmit - routes may
172  *                                      change. Doesn't yet cope with MSS shrink right
173  *                                      but it's a start!
174  *              Marc Tamsky     :       Closing in closing fixes.
175  *              Mike Shaver     :       RFC1122 verifications.
176  *              Alan Cox        :       rcv_saddr errors.
177  *              Alan Cox        :       Block double connect().
178  *              Alan Cox        :       Small hooks for enSKIP.
179  *              Alexey Kuznetsov:       Path MTU discovery.
180  *              Alan Cox        :       Support soft errors.
181  *              Alan Cox        :       Fix MTU discovery pathological case
182  *                                      when the remote claims no mtu!
183  *              Marc Tamsky     :       TCP_CLOSE fix.
184  *              Colin (G3TNE)   :       Send a reset on syn ack replies in
185  *                                      window but wrong (fixes NT lpd problems)
186  *              Pedro Roque     :       Better TCP window handling, delayed ack.
187  *              Joerg Reuter    :       No modification of locked buffers in
188  *                                      tcp_do_retransmit()
189  *              Eric Schenk     :       Changed receiver side silly window
190  *                                      avoidance algorithm to BSD style
191  *                                      algorithm. This doubles throughput
192  *                                      against machines running Solaris,
193  *                                      and seems to result in general
194  *                                      improvement.
195  *      Stefan Magdalinski      :       adjusted tcp_readable() to fix FIONREAD
196  *      Willy Konynenberg       :       Transparent proxying support.
197  *      Mike McLagan            :       Routing by source
198  *              Keith Owens     :       Do proper merging with partial SKB's in
199  *                                      tcp_do_sendmsg to avoid burstiness.
200  *              Eric Schenk     :       Fix fast close down bug with
201  *                                      shutdown() followed by close().
202  *              Andi Kleen      :       Make poll agree with SIGIO
203  *      Salvatore Sanfilippo    :       Support SO_LINGER with linger == 1 and
204  *                                      lingertime == 0 (RFC 793 ABORT Call)
205  *      Hirokazu Takahashi      :       Use copy_from_user() instead of
206  *                                      csum_and_copy_from_user() if possible.
207  *
208  *              This program is free software; you can redistribute it and/or
209  *              modify it under the terms of the GNU General Public License
210  *              as published by the Free Software Foundation; either version
211  *              2 of the License, or(at your option) any later version.
212  *
213  * Description of States:
214  *
215  *      TCP_SYN_SENT            sent a connection request, waiting for ack
216  *
217  *      TCP_SYN_RECV            received a connection request, sent ack,
218  *                              waiting for final ack in three-way handshake.
219  *
220  *      TCP_ESTABLISHED         connection established
221  *
222  *      TCP_FIN_WAIT1           our side has shutdown, waiting to complete
223  *                              transmission of remaining buffered data
224  *
225  *      TCP_FIN_WAIT2           all buffered data sent, waiting for remote
226  *                              to shutdown
227  *
228  *      TCP_CLOSING             both sides have shutdown but we still have
229  *                              data we have to finish sending
230  *
231  *      TCP_TIME_WAIT           timeout to catch resent junk before entering
232  *                              closed, can only be entered from FIN_WAIT2
233  *                              or CLOSING.  Required because the other end
234  *                              may not have gotten our last ACK causing it
235  *                              to retransmit the data packet (which we ignore)
236  *
237  *      TCP_CLOSE_WAIT          remote side has shutdown and is waiting for
238  *                              us to finish writing our data and to shutdown
239  *                              (we have to close() to move on to LAST_ACK)
240  *
241  *      TCP_LAST_ACK            out side has shutdown after remote has
242  *                              shutdown.  There may still be data in our
243  *                              buffer that we have to finish sending
244  *
245  *      TCP_CLOSE               socket is finished
246  */
247
248 #include <linux/kernel.h>
249 #include <linux/module.h>
250 #include <linux/types.h>
251 #include <linux/fcntl.h>
252 #include <linux/poll.h>
253 #include <linux/init.h>
254 #include <linux/fs.h>
255 #include <linux/skbuff.h>
256 #include <linux/scatterlist.h>
257 #include <linux/splice.h>
258 #include <linux/net.h>
259 #include <linux/socket.h>
260 #include <linux/random.h>
261 #include <linux/bootmem.h>
262 #include <linux/highmem.h>
263 #include <linux/swap.h>
264 #include <linux/cache.h>
265 #include <linux/err.h>
266 #include <linux/crypto.h>
267
268 #include <net/icmp.h>
269 #include <net/tcp.h>
270 #include <net/xfrm.h>
271 #include <net/ip.h>
272 #include <net/netdma.h>
273 #include <net/sock.h>
274
275 #include <asm/uaccess.h>
276 #include <asm/ioctls.h>
277
278 int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
279
280 atomic_t tcp_orphan_count = ATOMIC_INIT(0);
281
282 EXPORT_SYMBOL_GPL(tcp_orphan_count);
283
284 int sysctl_tcp_mem[3] __read_mostly;
285 int sysctl_tcp_wmem[3] __read_mostly;
286 int sysctl_tcp_rmem[3] __read_mostly;
287
288 EXPORT_SYMBOL(sysctl_tcp_mem);
289 EXPORT_SYMBOL(sysctl_tcp_rmem);
290 EXPORT_SYMBOL(sysctl_tcp_wmem);
291
292 atomic_t tcp_memory_allocated;  /* Current allocated memory. */
293 atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
294
295 EXPORT_SYMBOL(tcp_memory_allocated);
296 EXPORT_SYMBOL(tcp_sockets_allocated);
297
298 /*
299  * TCP splice context
300  */
301 struct tcp_splice_state {
302         struct pipe_inode_info *pipe;
303         size_t len;
304         unsigned int flags;
305 };
306
307 /*
308  * Pressure flag: try to collapse.
309  * Technical note: it is used by multiple contexts non atomically.
310  * All the __sk_mem_schedule() is of this nature: accounting
311  * is strict, actions are advisory and have some latency.
312  */
313 int tcp_memory_pressure __read_mostly;
314
315 EXPORT_SYMBOL(tcp_memory_pressure);
316
317 void tcp_enter_memory_pressure(struct sock *sk)
318 {
319         if (!tcp_memory_pressure) {
320                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
321                 tcp_memory_pressure = 1;
322         }
323 }
324
325 EXPORT_SYMBOL(tcp_enter_memory_pressure);
326
327 /*
328  *      Wait for a TCP event.
329  *
330  *      Note that we don't need to lock the socket, as the upper poll layers
331  *      take care of normal races (between the test and the event) and we don't
332  *      go look at any of the socket buffers directly.
333  */
334 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
335 {
336         unsigned int mask;
337         struct sock *sk = sock->sk;
338         struct tcp_sock *tp = tcp_sk(sk);
339
340         poll_wait(file, sk->sk_sleep, wait);
341         if (sk->sk_state == TCP_LISTEN)
342                 return inet_csk_listen_poll(sk);
343
344         /* Socket is not locked. We are protected from async events
345          * by poll logic and correct handling of state changes
346          * made by other threads is impossible in any case.
347          */
348
349         mask = 0;
350         if (sk->sk_err)
351                 mask = POLLERR;
352
353         /*
354          * POLLHUP is certainly not done right. But poll() doesn't
355          * have a notion of HUP in just one direction, and for a
356          * socket the read side is more interesting.
357          *
358          * Some poll() documentation says that POLLHUP is incompatible
359          * with the POLLOUT/POLLWR flags, so somebody should check this
360          * all. But careful, it tends to be safer to return too many
361          * bits than too few, and you can easily break real applications
362          * if you don't tell them that something has hung up!
363          *
364          * Check-me.
365          *
366          * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
367          * our fs/select.c). It means that after we received EOF,
368          * poll always returns immediately, making impossible poll() on write()
369          * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
370          * if and only if shutdown has been made in both directions.
371          * Actually, it is interesting to look how Solaris and DUX
372          * solve this dilemma. I would prefer, if POLLHUP were maskable,
373          * then we could set it on SND_SHUTDOWN. BTW examples given
374          * in Stevens' books assume exactly this behaviour, it explains
375          * why POLLHUP is incompatible with POLLOUT.    --ANK
376          *
377          * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
378          * blocking on fresh not-connected or disconnected socket. --ANK
379          */
380         if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
381                 mask |= POLLHUP;
382         if (sk->sk_shutdown & RCV_SHUTDOWN)
383                 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
384
385         /* Connected? */
386         if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
387                 /* Potential race condition. If read of tp below will
388                  * escape above sk->sk_state, we can be illegally awaken
389                  * in SYN_* states. */
390                 if ((tp->rcv_nxt != tp->copied_seq) &&
391                     (tp->urg_seq != tp->copied_seq ||
392                      tp->rcv_nxt != tp->copied_seq + 1 ||
393                      sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
394                         mask |= POLLIN | POLLRDNORM;
395
396                 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
397                         if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
398                                 mask |= POLLOUT | POLLWRNORM;
399                         } else {  /* send SIGIO later */
400                                 set_bit(SOCK_ASYNC_NOSPACE,
401                                         &sk->sk_socket->flags);
402                                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
403
404                                 /* Race breaker. If space is freed after
405                                  * wspace test but before the flags are set,
406                                  * IO signal will be lost.
407                                  */
408                                 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
409                                         mask |= POLLOUT | POLLWRNORM;
410                         }
411                 }
412
413                 if (tp->urg_data & TCP_URG_VALID)
414                         mask |= POLLPRI;
415         }
416         return mask;
417 }
418
419 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
420 {
421         struct tcp_sock *tp = tcp_sk(sk);
422         int answ;
423
424         switch (cmd) {
425         case SIOCINQ:
426                 if (sk->sk_state == TCP_LISTEN)
427                         return -EINVAL;
428
429                 lock_sock(sk);
430                 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
431                         answ = 0;
432                 else if (sock_flag(sk, SOCK_URGINLINE) ||
433                          !tp->urg_data ||
434                          before(tp->urg_seq, tp->copied_seq) ||
435                          !before(tp->urg_seq, tp->rcv_nxt)) {
436                         answ = tp->rcv_nxt - tp->copied_seq;
437
438                         /* Subtract 1, if FIN is in queue. */
439                         if (answ && !skb_queue_empty(&sk->sk_receive_queue))
440                                 answ -=
441                        tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin;
442                 } else
443                         answ = tp->urg_seq - tp->copied_seq;
444                 release_sock(sk);
445                 break;
446         case SIOCATMARK:
447                 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
448                 break;
449         case SIOCOUTQ:
450                 if (sk->sk_state == TCP_LISTEN)
451                         return -EINVAL;
452
453                 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
454                         answ = 0;
455                 else
456                         answ = tp->write_seq - tp->snd_una;
457                 break;
458         default:
459                 return -ENOIOCTLCMD;
460         }
461
462         return put_user(answ, (int __user *)arg);
463 }
464
465 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
466 {
467         TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
468         tp->pushed_seq = tp->write_seq;
469 }
470
471 static inline int forced_push(struct tcp_sock *tp)
472 {
473         return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
474 }
475
476 static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
477 {
478         struct tcp_sock *tp = tcp_sk(sk);
479         struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
480
481         skb->csum    = 0;
482         tcb->seq     = tcb->end_seq = tp->write_seq;
483         tcb->flags   = TCPCB_FLAG_ACK;
484         tcb->sacked  = 0;
485         skb_header_release(skb);
486         tcp_add_write_queue_tail(sk, skb);
487         sk->sk_wmem_queued += skb->truesize;
488         sk_mem_charge(sk, skb->truesize);
489         if (tp->nonagle & TCP_NAGLE_PUSH)
490                 tp->nonagle &= ~TCP_NAGLE_PUSH;
491 }
492
493 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
494                                 struct sk_buff *skb)
495 {
496         if (flags & MSG_OOB) {
497                 tp->urg_mode = 1;
498                 tp->snd_up = tp->write_seq;
499         }
500 }
501
502 static inline void tcp_push(struct sock *sk, int flags, int mss_now,
503                             int nonagle)
504 {
505         struct tcp_sock *tp = tcp_sk(sk);
506
507         if (tcp_send_head(sk)) {
508                 struct sk_buff *skb = tcp_write_queue_tail(sk);
509                 if (!(flags & MSG_MORE) || forced_push(tp))
510                         tcp_mark_push(tp, skb);
511                 tcp_mark_urg(tp, flags, skb);
512                 __tcp_push_pending_frames(sk, mss_now,
513                                           (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
514         }
515 }
516
517 static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
518                                 unsigned int offset, size_t len)
519 {
520         struct tcp_splice_state *tss = rd_desc->arg.data;
521
522         return skb_splice_bits(skb, offset, tss->pipe, tss->len, tss->flags);
523 }
524
525 static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
526 {
527         /* Store TCP splice context information in read_descriptor_t. */
528         read_descriptor_t rd_desc = {
529                 .arg.data = tss,
530         };
531
532         return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
533 }
534
535 /**
536  *  tcp_splice_read - splice data from TCP socket to a pipe
537  * @sock:       socket to splice from
538  * @ppos:       position (not valid)
539  * @pipe:       pipe to splice to
540  * @len:        number of bytes to splice
541  * @flags:      splice modifier flags
542  *
543  * Description:
544  *    Will read pages from given socket and fill them into a pipe.
545  *
546  **/
547 ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
548                         struct pipe_inode_info *pipe, size_t len,
549                         unsigned int flags)
550 {
551         struct sock *sk = sock->sk;
552         struct tcp_splice_state tss = {
553                 .pipe = pipe,
554                 .len = len,
555                 .flags = flags,
556         };
557         long timeo;
558         ssize_t spliced;
559         int ret;
560
561         /*
562          * We can't seek on a socket input
563          */
564         if (unlikely(*ppos))
565                 return -ESPIPE;
566
567         ret = spliced = 0;
568
569         lock_sock(sk);
570
571         timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
572         while (tss.len) {
573                 ret = __tcp_splice_read(sk, &tss);
574                 if (ret < 0)
575                         break;
576                 else if (!ret) {
577                         if (spliced)
578                                 break;
579                         if (flags & SPLICE_F_NONBLOCK) {
580                                 ret = -EAGAIN;
581                                 break;
582                         }
583                         if (sock_flag(sk, SOCK_DONE))
584                                 break;
585                         if (sk->sk_err) {
586                                 ret = sock_error(sk);
587                                 break;
588                         }
589                         if (sk->sk_shutdown & RCV_SHUTDOWN)
590                                 break;
591                         if (sk->sk_state == TCP_CLOSE) {
592                                 /*
593                                  * This occurs when user tries to read
594                                  * from never connected socket.
595                                  */
596                                 if (!sock_flag(sk, SOCK_DONE))
597                                         ret = -ENOTCONN;
598                                 break;
599                         }
600                         if (!timeo) {
601                                 ret = -EAGAIN;
602                                 break;
603                         }
604                         sk_wait_data(sk, &timeo);
605                         if (signal_pending(current)) {
606                                 ret = sock_intr_errno(timeo);
607                                 break;
608                         }
609                         continue;
610                 }
611                 tss.len -= ret;
612                 spliced += ret;
613
614                 release_sock(sk);
615                 lock_sock(sk);
616
617                 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
618                     (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo ||
619                     signal_pending(current))
620                         break;
621         }
622
623         release_sock(sk);
624
625         if (spliced)
626                 return spliced;
627
628         return ret;
629 }
630
631 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
632 {
633         struct sk_buff *skb;
634
635         /* The TCP header must be at least 32-bit aligned.  */
636         size = ALIGN(size, 4);
637
638         skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
639         if (skb) {
640                 if (sk_wmem_schedule(sk, skb->truesize)) {
641                         /*
642                          * Make sure that we have exactly size bytes
643                          * available to the caller, no more, no less.
644                          */
645                         skb_reserve(skb, skb_tailroom(skb) - size);
646                         return skb;
647                 }
648                 __kfree_skb(skb);
649         } else {
650                 sk->sk_prot->enter_memory_pressure(sk);
651                 sk_stream_moderate_sndbuf(sk);
652         }
653         return NULL;
654 }
655
656 static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
657                          size_t psize, int flags)
658 {
659         struct tcp_sock *tp = tcp_sk(sk);
660         int mss_now, size_goal;
661         int err;
662         ssize_t copied;
663         long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
664
665         /* Wait for a connection to finish. */
666         if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
667                 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
668                         goto out_err;
669
670         clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
671
672         mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
673         size_goal = tp->xmit_size_goal;
674         copied = 0;
675
676         err = -EPIPE;
677         if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
678                 goto do_error;
679
680         while (psize > 0) {
681                 struct sk_buff *skb = tcp_write_queue_tail(sk);
682                 struct page *page = pages[poffset / PAGE_SIZE];
683                 int copy, i, can_coalesce;
684                 int offset = poffset % PAGE_SIZE;
685                 int size = min_t(size_t, psize, PAGE_SIZE - offset);
686
687                 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
688 new_segment:
689                         if (!sk_stream_memory_free(sk))
690                                 goto wait_for_sndbuf;
691
692                         skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
693                         if (!skb)
694                                 goto wait_for_memory;
695
696                         skb_entail(sk, skb);
697                         copy = size_goal;
698                 }
699
700                 if (copy > size)
701                         copy = size;
702
703                 i = skb_shinfo(skb)->nr_frags;
704                 can_coalesce = skb_can_coalesce(skb, i, page, offset);
705                 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
706                         tcp_mark_push(tp, skb);
707                         goto new_segment;
708                 }
709                 if (!sk_wmem_schedule(sk, copy))
710                         goto wait_for_memory;
711
712                 if (can_coalesce) {
713                         skb_shinfo(skb)->frags[i - 1].size += copy;
714                 } else {
715                         get_page(page);
716                         skb_fill_page_desc(skb, i, page, offset, copy);
717                 }
718
719                 skb->len += copy;
720                 skb->data_len += copy;
721                 skb->truesize += copy;
722                 sk->sk_wmem_queued += copy;
723                 sk_mem_charge(sk, copy);
724                 skb->ip_summed = CHECKSUM_PARTIAL;
725                 tp->write_seq += copy;
726                 TCP_SKB_CB(skb)->end_seq += copy;
727                 skb_shinfo(skb)->gso_segs = 0;
728
729                 if (!copied)
730                         TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
731
732                 copied += copy;
733                 poffset += copy;
734                 if (!(psize -= copy))
735                         goto out;
736
737                 if (skb->len < size_goal || (flags & MSG_OOB))
738                         continue;
739
740                 if (forced_push(tp)) {
741                         tcp_mark_push(tp, skb);
742                         __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
743                 } else if (skb == tcp_send_head(sk))
744                         tcp_push_one(sk, mss_now);
745                 continue;
746
747 wait_for_sndbuf:
748                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
749 wait_for_memory:
750                 if (copied)
751                         tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
752
753                 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
754                         goto do_error;
755
756                 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
757                 size_goal = tp->xmit_size_goal;
758         }
759
760 out:
761         if (copied)
762                 tcp_push(sk, flags, mss_now, tp->nonagle);
763         return copied;
764
765 do_error:
766         if (copied)
767                 goto out;
768 out_err:
769         return sk_stream_error(sk, flags, err);
770 }
771
772 ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
773                      size_t size, int flags)
774 {
775         ssize_t res;
776         struct sock *sk = sock->sk;
777
778         if (!(sk->sk_route_caps & NETIF_F_SG) ||
779             !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
780                 return sock_no_sendpage(sock, page, offset, size, flags);
781
782         lock_sock(sk);
783         TCP_CHECK_TIMER(sk);
784         res = do_tcp_sendpages(sk, &page, offset, size, flags);
785         TCP_CHECK_TIMER(sk);
786         release_sock(sk);
787         return res;
788 }
789
790 #define TCP_PAGE(sk)    (sk->sk_sndmsg_page)
791 #define TCP_OFF(sk)     (sk->sk_sndmsg_off)
792
793 static inline int select_size(struct sock *sk)
794 {
795         struct tcp_sock *tp = tcp_sk(sk);
796         int tmp = tp->mss_cache;
797
798         if (sk->sk_route_caps & NETIF_F_SG) {
799                 if (sk_can_gso(sk))
800                         tmp = 0;
801                 else {
802                         int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
803
804                         if (tmp >= pgbreak &&
805                             tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
806                                 tmp = pgbreak;
807                 }
808         }
809
810         return tmp;
811 }
812
813 int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
814                 size_t size)
815 {
816         struct sock *sk = sock->sk;
817         struct iovec *iov;
818         struct tcp_sock *tp = tcp_sk(sk);
819         struct sk_buff *skb;
820         int iovlen, flags;
821         int mss_now, size_goal;
822         int err, copied;
823         long timeo;
824
825         lock_sock(sk);
826         TCP_CHECK_TIMER(sk);
827
828         flags = msg->msg_flags;
829         timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
830
831         /* Wait for a connection to finish. */
832         if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
833                 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
834                         goto out_err;
835
836         /* This should be in poll */
837         clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
838
839         mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
840         size_goal = tp->xmit_size_goal;
841
842         /* Ok commence sending. */
843         iovlen = msg->msg_iovlen;
844         iov = msg->msg_iov;
845         copied = 0;
846
847         err = -EPIPE;
848         if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
849                 goto do_error;
850
851         while (--iovlen >= 0) {
852                 int seglen = iov->iov_len;
853                 unsigned char __user *from = iov->iov_base;
854
855                 iov++;
856
857                 while (seglen > 0) {
858                         int copy;
859
860                         skb = tcp_write_queue_tail(sk);
861
862                         if (!tcp_send_head(sk) ||
863                             (copy = size_goal - skb->len) <= 0) {
864
865 new_segment:
866                                 /* Allocate new segment. If the interface is SG,
867                                  * allocate skb fitting to single page.
868                                  */
869                                 if (!sk_stream_memory_free(sk))
870                                         goto wait_for_sndbuf;
871
872                                 skb = sk_stream_alloc_skb(sk, select_size(sk),
873                                                 sk->sk_allocation);
874                                 if (!skb)
875                                         goto wait_for_memory;
876
877                                 /*
878                                  * Check whether we can use HW checksum.
879                                  */
880                                 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
881                                         skb->ip_summed = CHECKSUM_PARTIAL;
882
883                                 skb_entail(sk, skb);
884                                 copy = size_goal;
885                         }
886
887                         /* Try to append data to the end of skb. */
888                         if (copy > seglen)
889                                 copy = seglen;
890
891                         /* Where to copy to? */
892                         if (skb_tailroom(skb) > 0) {
893                                 /* We have some space in skb head. Superb! */
894                                 if (copy > skb_tailroom(skb))
895                                         copy = skb_tailroom(skb);
896                                 if ((err = skb_add_data(skb, from, copy)) != 0)
897                                         goto do_fault;
898                         } else {
899                                 int merge = 0;
900                                 int i = skb_shinfo(skb)->nr_frags;
901                                 struct page *page = TCP_PAGE(sk);
902                                 int off = TCP_OFF(sk);
903
904                                 if (skb_can_coalesce(skb, i, page, off) &&
905                                     off != PAGE_SIZE) {
906                                         /* We can extend the last page
907                                          * fragment. */
908                                         merge = 1;
909                                 } else if (i == MAX_SKB_FRAGS ||
910                                            (!i &&
911                                            !(sk->sk_route_caps & NETIF_F_SG))) {
912                                         /* Need to add new fragment and cannot
913                                          * do this because interface is non-SG,
914                                          * or because all the page slots are
915                                          * busy. */
916                                         tcp_mark_push(tp, skb);
917                                         goto new_segment;
918                                 } else if (page) {
919                                         if (off == PAGE_SIZE) {
920                                                 put_page(page);
921                                                 TCP_PAGE(sk) = page = NULL;
922                                                 off = 0;
923                                         }
924                                 } else
925                                         off = 0;
926
927                                 if (copy > PAGE_SIZE - off)
928                                         copy = PAGE_SIZE - off;
929
930                                 if (!sk_wmem_schedule(sk, copy))
931                                         goto wait_for_memory;
932
933                                 if (!page) {
934                                         /* Allocate new cache page. */
935                                         if (!(page = sk_stream_alloc_page(sk)))
936                                                 goto wait_for_memory;
937                                 }
938
939                                 /* Time to copy data. We are close to
940                                  * the end! */
941                                 err = skb_copy_to_page(sk, from, skb, page,
942                                                        off, copy);
943                                 if (err) {
944                                         /* If this page was new, give it to the
945                                          * socket so it does not get leaked.
946                                          */
947                                         if (!TCP_PAGE(sk)) {
948                                                 TCP_PAGE(sk) = page;
949                                                 TCP_OFF(sk) = 0;
950                                         }
951                                         goto do_error;
952                                 }
953
954                                 /* Update the skb. */
955                                 if (merge) {
956                                         skb_shinfo(skb)->frags[i - 1].size +=
957                                                                         copy;
958                                 } else {
959                                         skb_fill_page_desc(skb, i, page, off, copy);
960                                         if (TCP_PAGE(sk)) {
961                                                 get_page(page);
962                                         } else if (off + copy < PAGE_SIZE) {
963                                                 get_page(page);
964                                                 TCP_PAGE(sk) = page;
965                                         }
966                                 }
967
968                                 TCP_OFF(sk) = off + copy;
969                         }
970
971                         if (!copied)
972                                 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
973
974                         tp->write_seq += copy;
975                         TCP_SKB_CB(skb)->end_seq += copy;
976                         skb_shinfo(skb)->gso_segs = 0;
977
978                         from += copy;
979                         copied += copy;
980                         if ((seglen -= copy) == 0 && iovlen == 0)
981                                 goto out;
982
983                         if (skb->len < size_goal || (flags & MSG_OOB))
984                                 continue;
985
986                         if (forced_push(tp)) {
987                                 tcp_mark_push(tp, skb);
988                                 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
989                         } else if (skb == tcp_send_head(sk))
990                                 tcp_push_one(sk, mss_now);
991                         continue;
992
993 wait_for_sndbuf:
994                         set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
995 wait_for_memory:
996                         if (copied)
997                                 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
998
999                         if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
1000                                 goto do_error;
1001
1002                         mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
1003                         size_goal = tp->xmit_size_goal;
1004                 }
1005         }
1006
1007 out:
1008         if (copied)
1009                 tcp_push(sk, flags, mss_now, tp->nonagle);
1010         TCP_CHECK_TIMER(sk);
1011         release_sock(sk);
1012         return copied;
1013
1014 do_fault:
1015         if (!skb->len) {
1016                 tcp_unlink_write_queue(skb, sk);
1017                 /* It is the one place in all of TCP, except connection
1018                  * reset, where we can be unlinking the send_head.
1019                  */
1020                 tcp_check_send_head(sk, skb);
1021                 sk_wmem_free_skb(sk, skb);
1022         }
1023
1024 do_error:
1025         if (copied)
1026                 goto out;
1027 out_err:
1028         err = sk_stream_error(sk, flags, err);
1029         TCP_CHECK_TIMER(sk);
1030         release_sock(sk);
1031         return err;
1032 }
1033
1034 /*
1035  *      Handle reading urgent data. BSD has very simple semantics for
1036  *      this, no blocking and very strange errors 8)
1037  */
1038
1039 static int tcp_recv_urg(struct sock *sk, long timeo,
1040                         struct msghdr *msg, int len, int flags,
1041                         int *addr_len)
1042 {
1043         struct tcp_sock *tp = tcp_sk(sk);
1044
1045         /* No URG data to read. */
1046         if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1047             tp->urg_data == TCP_URG_READ)
1048                 return -EINVAL; /* Yes this is right ! */
1049
1050         if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1051                 return -ENOTCONN;
1052
1053         if (tp->urg_data & TCP_URG_VALID) {
1054                 int err = 0;
1055                 char c = tp->urg_data;
1056
1057                 if (!(flags & MSG_PEEK))
1058                         tp->urg_data = TCP_URG_READ;
1059
1060                 /* Read urgent data. */
1061                 msg->msg_flags |= MSG_OOB;
1062
1063                 if (len > 0) {
1064                         if (!(flags & MSG_TRUNC))
1065                                 err = memcpy_toiovec(msg->msg_iov, &c, 1);
1066                         len = 1;
1067                 } else
1068                         msg->msg_flags |= MSG_TRUNC;
1069
1070                 return err ? -EFAULT : len;
1071         }
1072
1073         if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1074                 return 0;
1075
1076         /* Fixed the recv(..., MSG_OOB) behaviour.  BSD docs and
1077          * the available implementations agree in this case:
1078          * this call should never block, independent of the
1079          * blocking state of the socket.
1080          * Mike <pall@rz.uni-karlsruhe.de>
1081          */
1082         return -EAGAIN;
1083 }
1084
1085 /* Clean up the receive buffer for full frames taken by the user,
1086  * then send an ACK if necessary.  COPIED is the number of bytes
1087  * tcp_recvmsg has given to the user so far, it speeds up the
1088  * calculation of whether or not we must ACK for the sake of
1089  * a window update.
1090  */
1091 void tcp_cleanup_rbuf(struct sock *sk, int copied)
1092 {
1093         struct tcp_sock *tp = tcp_sk(sk);
1094         int time_to_ack = 0;
1095
1096 #if TCP_DEBUG
1097         struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1098
1099         BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
1100 #endif
1101
1102         if (inet_csk_ack_scheduled(sk)) {
1103                 const struct inet_connection_sock *icsk = inet_csk(sk);
1104                    /* Delayed ACKs frequently hit locked sockets during bulk
1105                     * receive. */
1106                 if (icsk->icsk_ack.blocked ||
1107                     /* Once-per-two-segments ACK was not sent by tcp_input.c */
1108                     tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1109                     /*
1110                      * If this read emptied read buffer, we send ACK, if
1111                      * connection is not bidirectional, user drained
1112                      * receive buffer and there was a small segment
1113                      * in queue.
1114                      */
1115                     (copied > 0 &&
1116                      ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
1117                       ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1118                        !icsk->icsk_ack.pingpong)) &&
1119                       !atomic_read(&sk->sk_rmem_alloc)))
1120                         time_to_ack = 1;
1121         }
1122
1123         /* We send an ACK if we can now advertise a non-zero window
1124          * which has been raised "significantly".
1125          *
1126          * Even if window raised up to infinity, do not send window open ACK
1127          * in states, where we will not receive more. It is useless.
1128          */
1129         if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1130                 __u32 rcv_window_now = tcp_receive_window(tp);
1131
1132                 /* Optimize, __tcp_select_window() is not cheap. */
1133                 if (2*rcv_window_now <= tp->window_clamp) {
1134                         __u32 new_window = __tcp_select_window(sk);
1135
1136                         /* Send ACK now, if this read freed lots of space
1137                          * in our buffer. Certainly, new_window is new window.
1138                          * We can advertise it now, if it is not less than current one.
1139                          * "Lots" means "at least twice" here.
1140                          */
1141                         if (new_window && new_window >= 2 * rcv_window_now)
1142                                 time_to_ack = 1;
1143                 }
1144         }
1145         if (time_to_ack)
1146                 tcp_send_ack(sk);
1147 }
1148
1149 static void tcp_prequeue_process(struct sock *sk)
1150 {
1151         struct sk_buff *skb;
1152         struct tcp_sock *tp = tcp_sk(sk);
1153
1154         NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
1155
1156         /* RX process wants to run with disabled BHs, though it is not
1157          * necessary */
1158         local_bh_disable();
1159         while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1160                 sk->sk_backlog_rcv(sk, skb);
1161         local_bh_enable();
1162
1163         /* Clear memory counter. */
1164         tp->ucopy.memory = 0;
1165 }
1166
1167 static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1168 {
1169         struct sk_buff *skb;
1170         u32 offset;
1171
1172         skb_queue_walk(&sk->sk_receive_queue, skb) {
1173                 offset = seq - TCP_SKB_CB(skb)->seq;
1174                 if (tcp_hdr(skb)->syn)
1175                         offset--;
1176                 if (offset < skb->len || tcp_hdr(skb)->fin) {
1177                         *off = offset;
1178                         return skb;
1179                 }
1180         }
1181         return NULL;
1182 }
1183
1184 /*
1185  * This routine provides an alternative to tcp_recvmsg() for routines
1186  * that would like to handle copying from skbuffs directly in 'sendfile'
1187  * fashion.
1188  * Note:
1189  *      - It is assumed that the socket was locked by the caller.
1190  *      - The routine does not block.
1191  *      - At present, there is no support for reading OOB data
1192  *        or for 'peeking' the socket using this routine
1193  *        (although both would be easy to implement).
1194  */
1195 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1196                   sk_read_actor_t recv_actor)
1197 {
1198         struct sk_buff *skb;
1199         struct tcp_sock *tp = tcp_sk(sk);
1200         u32 seq = tp->copied_seq;
1201         u32 offset;
1202         int copied = 0;
1203
1204         if (sk->sk_state == TCP_LISTEN)
1205                 return -ENOTCONN;
1206         while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1207                 if (offset < skb->len) {
1208                         int used;
1209                         size_t len;
1210
1211                         len = skb->len - offset;
1212                         /* Stop reading if we hit a patch of urgent data */
1213                         if (tp->urg_data) {
1214                                 u32 urg_offset = tp->urg_seq - seq;
1215                                 if (urg_offset < len)
1216                                         len = urg_offset;
1217                                 if (!len)
1218                                         break;
1219                         }
1220                         used = recv_actor(desc, skb, offset, len);
1221                         if (used < 0) {
1222                                 if (!copied)
1223                                         copied = used;
1224                                 break;
1225                         } else if (used <= len) {
1226                                 seq += used;
1227                                 copied += used;
1228                                 offset += used;
1229                         }
1230                         /*
1231                          * If recv_actor drops the lock (e.g. TCP splice
1232                          * receive) the skb pointer might be invalid when
1233                          * getting here: tcp_collapse might have deleted it
1234                          * while aggregating skbs from the socket queue.
1235                          */
1236                         skb = tcp_recv_skb(sk, seq-1, &offset);
1237                         if (!skb || (offset+1 != skb->len))
1238                                 break;
1239                 }
1240                 if (tcp_hdr(skb)->fin) {
1241                         sk_eat_skb(sk, skb, 0);
1242                         ++seq;
1243                         break;
1244                 }
1245                 sk_eat_skb(sk, skb, 0);
1246                 if (!desc->count)
1247                         break;
1248         }
1249         tp->copied_seq = seq;
1250
1251         tcp_rcv_space_adjust(sk);
1252
1253         /* Clean up data we have read: This will do ACK frames. */
1254         if (copied > 0)
1255                 tcp_cleanup_rbuf(sk, copied);
1256         return copied;
1257 }
1258
1259 /*
1260  *      This routine copies from a sock struct into the user buffer.
1261  *
1262  *      Technical note: in 2.3 we work on _locked_ socket, so that
1263  *      tricks with *seq access order and skb->users are not required.
1264  *      Probably, code can be easily improved even more.
1265  */
1266
1267 int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1268                 size_t len, int nonblock, int flags, int *addr_len)
1269 {
1270         struct tcp_sock *tp = tcp_sk(sk);
1271         int copied = 0;
1272         u32 peek_seq;
1273         u32 *seq;
1274         unsigned long used;
1275         int err;
1276         int target;             /* Read at least this many bytes */
1277         long timeo;
1278         struct task_struct *user_recv = NULL;
1279         int copied_early = 0;
1280         struct sk_buff *skb;
1281
1282         lock_sock(sk);
1283
1284         TCP_CHECK_TIMER(sk);
1285
1286         err = -ENOTCONN;
1287         if (sk->sk_state == TCP_LISTEN)
1288                 goto out;
1289
1290         timeo = sock_rcvtimeo(sk, nonblock);
1291
1292         /* Urgent data needs to be handled specially. */
1293         if (flags & MSG_OOB)
1294                 goto recv_urg;
1295
1296         seq = &tp->copied_seq;
1297         if (flags & MSG_PEEK) {
1298                 peek_seq = tp->copied_seq;
1299                 seq = &peek_seq;
1300         }
1301
1302         target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1303
1304 #ifdef CONFIG_NET_DMA
1305         tp->ucopy.dma_chan = NULL;
1306         preempt_disable();
1307         skb = skb_peek_tail(&sk->sk_receive_queue);
1308         {
1309                 int available = 0;
1310
1311                 if (skb)
1312                         available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1313                 if ((available < target) &&
1314                     (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1315                     !sysctl_tcp_low_latency &&
1316                     __get_cpu_var(softnet_data).net_dma) {
1317                         preempt_enable_no_resched();
1318                         tp->ucopy.pinned_list =
1319                                         dma_pin_iovec_pages(msg->msg_iov, len);
1320                 } else {
1321                         preempt_enable_no_resched();
1322                 }
1323         }
1324 #endif
1325
1326         do {
1327                 u32 offset;
1328
1329                 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1330                 if (tp->urg_data && tp->urg_seq == *seq) {
1331                         if (copied)
1332                                 break;
1333                         if (signal_pending(current)) {
1334                                 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1335                                 break;
1336                         }
1337                 }
1338
1339                 /* Next get a buffer. */
1340
1341                 skb = skb_peek(&sk->sk_receive_queue);
1342                 do {
1343                         if (!skb)
1344                                 break;
1345
1346                         /* Now that we have two receive queues this
1347                          * shouldn't happen.
1348                          */
1349                         if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1350                                 printk(KERN_INFO "recvmsg bug: copied %X "
1351                                        "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1352                                 break;
1353                         }
1354                         offset = *seq - TCP_SKB_CB(skb)->seq;
1355                         if (tcp_hdr(skb)->syn)
1356                                 offset--;
1357                         if (offset < skb->len)
1358                                 goto found_ok_skb;
1359                         if (tcp_hdr(skb)->fin)
1360                                 goto found_fin_ok;
1361                         BUG_TRAP(flags & MSG_PEEK);
1362                         skb = skb->next;
1363                 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1364
1365                 /* Well, if we have backlog, try to process it now yet. */
1366
1367                 if (copied >= target && !sk->sk_backlog.tail)
1368                         break;
1369
1370                 if (copied) {
1371                         if (sk->sk_err ||
1372                             sk->sk_state == TCP_CLOSE ||
1373                             (sk->sk_shutdown & RCV_SHUTDOWN) ||
1374                             !timeo ||
1375                             signal_pending(current) ||
1376                             (flags & MSG_PEEK))
1377                                 break;
1378                 } else {
1379                         if (sock_flag(sk, SOCK_DONE))
1380                                 break;
1381
1382                         if (sk->sk_err) {
1383                                 copied = sock_error(sk);
1384                                 break;
1385                         }
1386
1387                         if (sk->sk_shutdown & RCV_SHUTDOWN)
1388                                 break;
1389
1390                         if (sk->sk_state == TCP_CLOSE) {
1391                                 if (!sock_flag(sk, SOCK_DONE)) {
1392                                         /* This occurs when user tries to read
1393                                          * from never connected socket.
1394                                          */
1395                                         copied = -ENOTCONN;
1396                                         break;
1397                                 }
1398                                 break;
1399                         }
1400
1401                         if (!timeo) {
1402                                 copied = -EAGAIN;
1403                                 break;
1404                         }
1405
1406                         if (signal_pending(current)) {
1407                                 copied = sock_intr_errno(timeo);
1408                                 break;
1409                         }
1410                 }
1411
1412                 tcp_cleanup_rbuf(sk, copied);
1413
1414                 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1415                         /* Install new reader */
1416                         if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1417                                 user_recv = current;
1418                                 tp->ucopy.task = user_recv;
1419                                 tp->ucopy.iov = msg->msg_iov;
1420                         }
1421
1422                         tp->ucopy.len = len;
1423
1424                         BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1425                                  (flags & (MSG_PEEK | MSG_TRUNC)));
1426
1427                         /* Ugly... If prequeue is not empty, we have to
1428                          * process it before releasing socket, otherwise
1429                          * order will be broken at second iteration.
1430                          * More elegant solution is required!!!
1431                          *
1432                          * Look: we have the following (pseudo)queues:
1433                          *
1434                          * 1. packets in flight
1435                          * 2. backlog
1436                          * 3. prequeue
1437                          * 4. receive_queue
1438                          *
1439                          * Each queue can be processed only if the next ones
1440                          * are empty. At this point we have empty receive_queue.
1441                          * But prequeue _can_ be not empty after 2nd iteration,
1442                          * when we jumped to start of loop because backlog
1443                          * processing added something to receive_queue.
1444                          * We cannot release_sock(), because backlog contains
1445                          * packets arrived _after_ prequeued ones.
1446                          *
1447                          * Shortly, algorithm is clear --- to process all
1448                          * the queues in order. We could make it more directly,
1449                          * requeueing packets from backlog to prequeue, if
1450                          * is not empty. It is more elegant, but eats cycles,
1451                          * unfortunately.
1452                          */
1453                         if (!skb_queue_empty(&tp->ucopy.prequeue))
1454                                 goto do_prequeue;
1455
1456                         /* __ Set realtime policy in scheduler __ */
1457                 }
1458
1459                 if (copied >= target) {
1460                         /* Do not sleep, just process backlog. */
1461                         release_sock(sk);
1462                         lock_sock(sk);
1463                 } else
1464                         sk_wait_data(sk, &timeo);
1465
1466 #ifdef CONFIG_NET_DMA
1467                 tp->ucopy.wakeup = 0;
1468 #endif
1469
1470                 if (user_recv) {
1471                         int chunk;
1472
1473                         /* __ Restore normal policy in scheduler __ */
1474
1475                         if ((chunk = len - tp->ucopy.len) != 0) {
1476                                 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1477                                 len -= chunk;
1478                                 copied += chunk;
1479                         }
1480
1481                         if (tp->rcv_nxt == tp->copied_seq &&
1482                             !skb_queue_empty(&tp->ucopy.prequeue)) {
1483 do_prequeue:
1484                                 tcp_prequeue_process(sk);
1485
1486                                 if ((chunk = len - tp->ucopy.len) != 0) {
1487                                         NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1488                                         len -= chunk;
1489                                         copied += chunk;
1490                                 }
1491                         }
1492                 }
1493                 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1494                         if (net_ratelimit())
1495                                 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1496                                        current->comm, task_pid_nr(current));
1497                         peek_seq = tp->copied_seq;
1498                 }
1499                 continue;
1500
1501         found_ok_skb:
1502                 /* Ok so how much can we use? */
1503                 used = skb->len - offset;
1504                 if (len < used)
1505                         used = len;
1506
1507                 /* Do we have urgent data here? */
1508                 if (tp->urg_data) {
1509                         u32 urg_offset = tp->urg_seq - *seq;
1510                         if (urg_offset < used) {
1511                                 if (!urg_offset) {
1512                                         if (!sock_flag(sk, SOCK_URGINLINE)) {
1513                                                 ++*seq;
1514                                                 offset++;
1515                                                 used--;
1516                                                 if (!used)
1517                                                         goto skip_copy;
1518                                         }
1519                                 } else
1520                                         used = urg_offset;
1521                         }
1522                 }
1523
1524                 if (!(flags & MSG_TRUNC)) {
1525 #ifdef CONFIG_NET_DMA
1526                         if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1527                                 tp->ucopy.dma_chan = get_softnet_dma();
1528
1529                         if (tp->ucopy.dma_chan) {
1530                                 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1531                                         tp->ucopy.dma_chan, skb, offset,
1532                                         msg->msg_iov, used,
1533                                         tp->ucopy.pinned_list);
1534
1535                                 if (tp->ucopy.dma_cookie < 0) {
1536
1537                                         printk(KERN_ALERT "dma_cookie < 0\n");
1538
1539                                         /* Exception. Bailout! */
1540                                         if (!copied)
1541                                                 copied = -EFAULT;
1542                                         break;
1543                                 }
1544                                 if ((offset + used) == skb->len)
1545                                         copied_early = 1;
1546
1547                         } else
1548 #endif
1549                         {
1550                                 err = skb_copy_datagram_iovec(skb, offset,
1551                                                 msg->msg_iov, used);
1552                                 if (err) {
1553                                         /* Exception. Bailout! */
1554                                         if (!copied)
1555                                                 copied = -EFAULT;
1556                                         break;
1557                                 }
1558                         }
1559                 }
1560
1561                 *seq += used;
1562                 copied += used;
1563                 len -= used;
1564
1565                 tcp_rcv_space_adjust(sk);
1566
1567 skip_copy:
1568                 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1569                         tp->urg_data = 0;
1570                         tcp_fast_path_check(sk);
1571                 }
1572                 if (used + offset < skb->len)
1573                         continue;
1574
1575                 if (tcp_hdr(skb)->fin)
1576                         goto found_fin_ok;
1577                 if (!(flags & MSG_PEEK)) {
1578                         sk_eat_skb(sk, skb, copied_early);
1579                         copied_early = 0;
1580                 }
1581                 continue;
1582
1583         found_fin_ok:
1584                 /* Process the FIN. */
1585                 ++*seq;
1586                 if (!(flags & MSG_PEEK)) {
1587                         sk_eat_skb(sk, skb, copied_early);
1588                         copied_early = 0;
1589                 }
1590                 break;
1591         } while (len > 0);
1592
1593         if (user_recv) {
1594                 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1595                         int chunk;
1596
1597                         tp->ucopy.len = copied > 0 ? len : 0;
1598
1599                         tcp_prequeue_process(sk);
1600
1601                         if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1602                                 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1603                                 len -= chunk;
1604                                 copied += chunk;
1605                         }
1606                 }
1607
1608                 tp->ucopy.task = NULL;
1609                 tp->ucopy.len = 0;
1610         }
1611
1612 #ifdef CONFIG_NET_DMA
1613         if (tp->ucopy.dma_chan) {
1614                 dma_cookie_t done, used;
1615
1616                 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1617
1618                 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1619                                                  tp->ucopy.dma_cookie, &done,
1620                                                  &used) == DMA_IN_PROGRESS) {
1621                         /* do partial cleanup of sk_async_wait_queue */
1622                         while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1623                                (dma_async_is_complete(skb->dma_cookie, done,
1624                                                       used) == DMA_SUCCESS)) {
1625                                 __skb_dequeue(&sk->sk_async_wait_queue);
1626                                 kfree_skb(skb);
1627                         }
1628                 }
1629
1630                 /* Safe to free early-copied skbs now */
1631                 __skb_queue_purge(&sk->sk_async_wait_queue);
1632                 dma_chan_put(tp->ucopy.dma_chan);
1633                 tp->ucopy.dma_chan = NULL;
1634         }
1635         if (tp->ucopy.pinned_list) {
1636                 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1637                 tp->ucopy.pinned_list = NULL;
1638         }
1639 #endif
1640
1641         /* According to UNIX98, msg_name/msg_namelen are ignored
1642          * on connected socket. I was just happy when found this 8) --ANK
1643          */
1644
1645         /* Clean up data we have read: This will do ACK frames. */
1646         tcp_cleanup_rbuf(sk, copied);
1647
1648         TCP_CHECK_TIMER(sk);
1649         release_sock(sk);
1650         return copied;
1651
1652 out:
1653         TCP_CHECK_TIMER(sk);
1654         release_sock(sk);
1655         return err;
1656
1657 recv_urg:
1658         err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1659         goto out;
1660 }
1661
1662 void tcp_set_state(struct sock *sk, int state)
1663 {
1664         int oldstate = sk->sk_state;
1665
1666         switch (state) {
1667         case TCP_ESTABLISHED:
1668                 if (oldstate != TCP_ESTABLISHED)
1669                         TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
1670                 break;
1671
1672         case TCP_CLOSE:
1673                 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
1674                         TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
1675
1676                 sk->sk_prot->unhash(sk);
1677                 if (inet_csk(sk)->icsk_bind_hash &&
1678                     !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
1679                         inet_put_port(sk);
1680                 /* fall through */
1681         default:
1682                 if (oldstate==TCP_ESTABLISHED)
1683                         TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
1684         }
1685
1686         /* Change state AFTER socket is unhashed to avoid closed
1687          * socket sitting in hash tables.
1688          */
1689         sk->sk_state = state;
1690
1691 #ifdef STATE_TRACE
1692         SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
1693 #endif
1694 }
1695 EXPORT_SYMBOL_GPL(tcp_set_state);
1696
1697 /*
1698  *      State processing on a close. This implements the state shift for
1699  *      sending our FIN frame. Note that we only send a FIN for some
1700  *      states. A shutdown() may have already sent the FIN, or we may be
1701  *      closed.
1702  */
1703
1704 static const unsigned char new_state[16] = {
1705   /* current state:        new state:      action:      */
1706   /* (Invalid)          */ TCP_CLOSE,
1707   /* TCP_ESTABLISHED    */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1708   /* TCP_SYN_SENT       */ TCP_CLOSE,
1709   /* TCP_SYN_RECV       */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1710   /* TCP_FIN_WAIT1      */ TCP_FIN_WAIT1,
1711   /* TCP_FIN_WAIT2      */ TCP_FIN_WAIT2,
1712   /* TCP_TIME_WAIT      */ TCP_CLOSE,
1713   /* TCP_CLOSE          */ TCP_CLOSE,
1714   /* TCP_CLOSE_WAIT     */ TCP_LAST_ACK  | TCP_ACTION_FIN,
1715   /* TCP_LAST_ACK       */ TCP_LAST_ACK,
1716   /* TCP_LISTEN         */ TCP_CLOSE,
1717   /* TCP_CLOSING        */ TCP_CLOSING,
1718 };
1719
1720 static int tcp_close_state(struct sock *sk)
1721 {
1722         int next = (int)new_state[sk->sk_state];
1723         int ns = next & TCP_STATE_MASK;
1724
1725         tcp_set_state(sk, ns);
1726
1727         return next & TCP_ACTION_FIN;
1728 }
1729
1730 /*
1731  *      Shutdown the sending side of a connection. Much like close except
1732  *      that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
1733  */
1734
1735 void tcp_shutdown(struct sock *sk, int how)
1736 {
1737         /*      We need to grab some memory, and put together a FIN,
1738          *      and then put it into the queue to be sent.
1739          *              Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1740          */
1741         if (!(how & SEND_SHUTDOWN))
1742                 return;
1743
1744         /* If we've already sent a FIN, or it's a closed state, skip this. */
1745         if ((1 << sk->sk_state) &
1746             (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1747              TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1748                 /* Clear out any half completed packets.  FIN if needed. */
1749                 if (tcp_close_state(sk))
1750                         tcp_send_fin(sk);
1751         }
1752 }
1753
1754 void tcp_close(struct sock *sk, long timeout)
1755 {
1756         struct sk_buff *skb;
1757         int data_was_unread = 0;
1758         int state;
1759
1760         lock_sock(sk);
1761         sk->sk_shutdown = SHUTDOWN_MASK;
1762
1763         if (sk->sk_state == TCP_LISTEN) {
1764                 tcp_set_state(sk, TCP_CLOSE);
1765
1766                 /* Special case. */
1767                 inet_csk_listen_stop(sk);
1768
1769                 goto adjudge_to_death;
1770         }
1771
1772         /*  We need to flush the recv. buffs.  We do this only on the
1773          *  descriptor close, not protocol-sourced closes, because the
1774          *  reader process may not have drained the data yet!
1775          */
1776         while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1777                 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1778                           tcp_hdr(skb)->fin;
1779                 data_was_unread += len;
1780                 __kfree_skb(skb);
1781         }
1782
1783         sk_mem_reclaim(sk);
1784
1785         /* As outlined in RFC 2525, section 2.17, we send a RST here because
1786          * data was lost. To witness the awful effects of the old behavior of
1787          * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
1788          * GET in an FTP client, suspend the process, wait for the client to
1789          * advertise a zero window, then kill -9 the FTP client, wheee...
1790          * Note: timeout is always zero in such a case.
1791          */
1792         if (data_was_unread) {
1793                 /* Unread data was tossed, zap the connection. */
1794                 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
1795                 tcp_set_state(sk, TCP_CLOSE);
1796                 tcp_send_active_reset(sk, GFP_KERNEL);
1797         } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1798                 /* Check zero linger _after_ checking for unread data. */
1799                 sk->sk_prot->disconnect(sk, 0);
1800                 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
1801         } else if (tcp_close_state(sk)) {
1802                 /* We FIN if the application ate all the data before
1803                  * zapping the connection.
1804                  */
1805
1806                 /* RED-PEN. Formally speaking, we have broken TCP state
1807                  * machine. State transitions:
1808                  *
1809                  * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1810                  * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1811                  * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1812                  *
1813                  * are legal only when FIN has been sent (i.e. in window),
1814                  * rather than queued out of window. Purists blame.
1815                  *
1816                  * F.e. "RFC state" is ESTABLISHED,
1817                  * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1818                  *
1819                  * The visible declinations are that sometimes
1820                  * we enter time-wait state, when it is not required really
1821                  * (harmless), do not send active resets, when they are
1822                  * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1823                  * they look as CLOSING or LAST_ACK for Linux)
1824                  * Probably, I missed some more holelets.
1825                  *                                              --ANK
1826                  */
1827                 tcp_send_fin(sk);
1828         }
1829
1830         sk_stream_wait_close(sk, timeout);
1831
1832 adjudge_to_death:
1833         state = sk->sk_state;
1834         sock_hold(sk);
1835         sock_orphan(sk);
1836         atomic_inc(sk->sk_prot->orphan_count);
1837
1838         /* It is the last release_sock in its life. It will remove backlog. */
1839         release_sock(sk);
1840
1841
1842         /* Now socket is owned by kernel and we acquire BH lock
1843            to finish close. No need to check for user refs.
1844          */
1845         local_bh_disable();
1846         bh_lock_sock(sk);
1847         BUG_TRAP(!sock_owned_by_user(sk));
1848
1849         /* Have we already been destroyed by a softirq or backlog? */
1850         if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1851                 goto out;
1852
1853         /*      This is a (useful) BSD violating of the RFC. There is a
1854          *      problem with TCP as specified in that the other end could
1855          *      keep a socket open forever with no application left this end.
1856          *      We use a 3 minute timeout (about the same as BSD) then kill
1857          *      our end. If they send after that then tough - BUT: long enough
1858          *      that we won't make the old 4*rto = almost no time - whoops
1859          *      reset mistake.
1860          *
1861          *      Nope, it was not mistake. It is really desired behaviour
1862          *      f.e. on http servers, when such sockets are useless, but
1863          *      consume significant resources. Let's do it with special
1864          *      linger2 option.                                 --ANK
1865          */
1866
1867         if (sk->sk_state == TCP_FIN_WAIT2) {
1868                 struct tcp_sock *tp = tcp_sk(sk);
1869                 if (tp->linger2 < 0) {
1870                         tcp_set_state(sk, TCP_CLOSE);
1871                         tcp_send_active_reset(sk, GFP_ATOMIC);
1872                         NET_INC_STATS_BH(sock_net(sk),
1873                                         LINUX_MIB_TCPABORTONLINGER);
1874                 } else {
1875                         const int tmo = tcp_fin_time(sk);
1876
1877                         if (tmo > TCP_TIMEWAIT_LEN) {
1878                                 inet_csk_reset_keepalive_timer(sk,
1879                                                 tmo - TCP_TIMEWAIT_LEN);
1880                         } else {
1881                                 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1882                                 goto out;
1883                         }
1884                 }
1885         }
1886         if (sk->sk_state != TCP_CLOSE) {
1887                 sk_mem_reclaim(sk);
1888                 if (tcp_too_many_orphans(sk,
1889                                 atomic_read(sk->sk_prot->orphan_count))) {
1890                         if (net_ratelimit())
1891                                 printk(KERN_INFO "TCP: too many of orphaned "
1892                                        "sockets\n");
1893                         tcp_set_state(sk, TCP_CLOSE);
1894                         tcp_send_active_reset(sk, GFP_ATOMIC);
1895                         NET_INC_STATS_BH(sock_net(sk),
1896                                         LINUX_MIB_TCPABORTONMEMORY);
1897                 }
1898         }
1899
1900         if (sk->sk_state == TCP_CLOSE)
1901                 inet_csk_destroy_sock(sk);
1902         /* Otherwise, socket is reprieved until protocol close. */
1903
1904 out:
1905         bh_unlock_sock(sk);
1906         local_bh_enable();
1907         sock_put(sk);
1908 }
1909
1910 /* These states need RST on ABORT according to RFC793 */
1911
1912 static inline int tcp_need_reset(int state)
1913 {
1914         return (1 << state) &
1915                (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1916                 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1917 }
1918
1919 int tcp_disconnect(struct sock *sk, int flags)
1920 {
1921         struct inet_sock *inet = inet_sk(sk);
1922         struct inet_connection_sock *icsk = inet_csk(sk);
1923         struct tcp_sock *tp = tcp_sk(sk);
1924         int err = 0;
1925         int old_state = sk->sk_state;
1926
1927         if (old_state != TCP_CLOSE)
1928                 tcp_set_state(sk, TCP_CLOSE);
1929
1930         /* ABORT function of RFC793 */
1931         if (old_state == TCP_LISTEN) {
1932                 inet_csk_listen_stop(sk);
1933         } else if (tcp_need_reset(old_state) ||
1934                    (tp->snd_nxt != tp->write_seq &&
1935                     (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
1936                 /* The last check adjusts for discrepancy of Linux wrt. RFC
1937                  * states
1938                  */
1939                 tcp_send_active_reset(sk, gfp_any());
1940                 sk->sk_err = ECONNRESET;
1941         } else if (old_state == TCP_SYN_SENT)
1942                 sk->sk_err = ECONNRESET;
1943
1944         tcp_clear_xmit_timers(sk);
1945         __skb_queue_purge(&sk->sk_receive_queue);
1946         tcp_write_queue_purge(sk);
1947         __skb_queue_purge(&tp->out_of_order_queue);
1948 #ifdef CONFIG_NET_DMA
1949         __skb_queue_purge(&sk->sk_async_wait_queue);
1950 #endif
1951
1952         inet->dport = 0;
1953
1954         if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1955                 inet_reset_saddr(sk);
1956
1957         sk->sk_shutdown = 0;
1958         sock_reset_flag(sk, SOCK_DONE);
1959         tp->srtt = 0;
1960         if ((tp->write_seq += tp->max_window + 2) == 0)
1961                 tp->write_seq = 1;
1962         icsk->icsk_backoff = 0;
1963         tp->snd_cwnd = 2;
1964         icsk->icsk_probes_out = 0;
1965         tp->packets_out = 0;
1966         tp->snd_ssthresh = 0x7fffffff;
1967         tp->snd_cwnd_cnt = 0;
1968         tp->bytes_acked = 0;
1969         tcp_set_ca_state(sk, TCP_CA_Open);
1970         tcp_clear_retrans(tp);
1971         inet_csk_delack_init(sk);
1972         tcp_init_send_head(sk);
1973         memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
1974         __sk_dst_reset(sk);
1975
1976         BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
1977
1978         sk->sk_error_report(sk);
1979         return err;
1980 }
1981
1982 /*
1983  *      Socket option code for TCP.
1984  */
1985 static int do_tcp_setsockopt(struct sock *sk, int level,
1986                 int optname, char __user *optval, int optlen)
1987 {
1988         struct tcp_sock *tp = tcp_sk(sk);
1989         struct inet_connection_sock *icsk = inet_csk(sk);
1990         int val;
1991         int err = 0;
1992
1993         /* This is a string value all the others are int's */
1994         if (optname == TCP_CONGESTION) {
1995                 char name[TCP_CA_NAME_MAX];
1996
1997                 if (optlen < 1)
1998                         return -EINVAL;
1999
2000                 val = strncpy_from_user(name, optval,
2001                                         min(TCP_CA_NAME_MAX-1, optlen));
2002                 if (val < 0)
2003                         return -EFAULT;
2004                 name[val] = 0;
2005
2006                 lock_sock(sk);
2007                 err = tcp_set_congestion_control(sk, name);
2008                 release_sock(sk);
2009                 return err;
2010         }
2011
2012         if (optlen < sizeof(int))
2013                 return -EINVAL;
2014
2015         if (get_user(val, (int __user *)optval))
2016                 return -EFAULT;
2017
2018         lock_sock(sk);
2019
2020         switch (optname) {
2021         case TCP_MAXSEG:
2022                 /* Values greater than interface MTU won't take effect. However
2023                  * at the point when this call is done we typically don't yet
2024                  * know which interface is going to be used */
2025                 if (val < 8 || val > MAX_TCP_WINDOW) {
2026                         err = -EINVAL;
2027                         break;
2028                 }
2029                 tp->rx_opt.user_mss = val;
2030                 break;
2031
2032         case TCP_NODELAY:
2033                 if (val) {
2034                         /* TCP_NODELAY is weaker than TCP_CORK, so that
2035                          * this option on corked socket is remembered, but
2036                          * it is not activated until cork is cleared.
2037                          *
2038                          * However, when TCP_NODELAY is set we make
2039                          * an explicit push, which overrides even TCP_CORK
2040                          * for currently queued segments.
2041                          */
2042                         tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
2043                         tcp_push_pending_frames(sk);
2044                 } else {
2045                         tp->nonagle &= ~TCP_NAGLE_OFF;
2046                 }
2047                 break;
2048
2049         case TCP_CORK:
2050                 /* When set indicates to always queue non-full frames.
2051                  * Later the user clears this option and we transmit
2052                  * any pending partial frames in the queue.  This is
2053                  * meant to be used alongside sendfile() to get properly
2054                  * filled frames when the user (for example) must write
2055                  * out headers with a write() call first and then use
2056                  * sendfile to send out the data parts.
2057                  *
2058                  * TCP_CORK can be set together with TCP_NODELAY and it is
2059                  * stronger than TCP_NODELAY.
2060                  */
2061                 if (val) {
2062                         tp->nonagle |= TCP_NAGLE_CORK;
2063                 } else {
2064                         tp->nonagle &= ~TCP_NAGLE_CORK;
2065                         if (tp->nonagle&TCP_NAGLE_OFF)
2066                                 tp->nonagle |= TCP_NAGLE_PUSH;
2067                         tcp_push_pending_frames(sk);
2068                 }
2069                 break;
2070
2071         case TCP_KEEPIDLE:
2072                 if (val < 1 || val > MAX_TCP_KEEPIDLE)
2073                         err = -EINVAL;
2074                 else {
2075                         tp->keepalive_time = val * HZ;
2076                         if (sock_flag(sk, SOCK_KEEPOPEN) &&
2077                             !((1 << sk->sk_state) &
2078                               (TCPF_CLOSE | TCPF_LISTEN))) {
2079                                 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
2080                                 if (tp->keepalive_time > elapsed)
2081                                         elapsed = tp->keepalive_time - elapsed;
2082                                 else
2083                                         elapsed = 0;
2084                                 inet_csk_reset_keepalive_timer(sk, elapsed);
2085                         }
2086                 }
2087                 break;
2088         case TCP_KEEPINTVL:
2089                 if (val < 1 || val > MAX_TCP_KEEPINTVL)
2090                         err = -EINVAL;
2091                 else
2092                         tp->keepalive_intvl = val * HZ;
2093                 break;
2094         case TCP_KEEPCNT:
2095                 if (val < 1 || val > MAX_TCP_KEEPCNT)
2096                         err = -EINVAL;
2097                 else
2098                         tp->keepalive_probes = val;
2099                 break;
2100         case TCP_SYNCNT:
2101                 if (val < 1 || val > MAX_TCP_SYNCNT)
2102                         err = -EINVAL;
2103                 else
2104                         icsk->icsk_syn_retries = val;
2105                 break;
2106
2107         case TCP_LINGER2:
2108                 if (val < 0)
2109                         tp->linger2 = -1;
2110                 else if (val > sysctl_tcp_fin_timeout / HZ)
2111                         tp->linger2 = 0;
2112                 else
2113                         tp->linger2 = val * HZ;
2114                 break;
2115
2116         case TCP_DEFER_ACCEPT:
2117                 icsk->icsk_accept_queue.rskq_defer_accept = 0;
2118                 if (val > 0) {
2119                         /* Translate value in seconds to number of
2120                          * retransmits */
2121                         while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
2122                                val > ((TCP_TIMEOUT_INIT / HZ) <<
2123                                        icsk->icsk_accept_queue.rskq_defer_accept))
2124                                 icsk->icsk_accept_queue.rskq_defer_accept++;
2125                         icsk->icsk_accept_queue.rskq_defer_accept++;
2126                 }
2127                 break;
2128
2129         case TCP_WINDOW_CLAMP:
2130                 if (!val) {
2131                         if (sk->sk_state != TCP_CLOSE) {
2132                                 err = -EINVAL;
2133                                 break;
2134                         }
2135                         tp->window_clamp = 0;
2136                 } else
2137                         tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2138                                                 SOCK_MIN_RCVBUF / 2 : val;
2139                 break;
2140
2141         case TCP_QUICKACK:
2142                 if (!val) {
2143                         icsk->icsk_ack.pingpong = 1;
2144                 } else {
2145                         icsk->icsk_ack.pingpong = 0;
2146                         if ((1 << sk->sk_state) &
2147                             (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2148                             inet_csk_ack_scheduled(sk)) {
2149                                 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
2150                                 tcp_cleanup_rbuf(sk, 1);
2151                                 if (!(val & 1))
2152                                         icsk->icsk_ack.pingpong = 1;
2153                         }
2154                 }
2155                 break;
2156
2157 #ifdef CONFIG_TCP_MD5SIG
2158         case TCP_MD5SIG:
2159                 /* Read the IP->Key mappings from userspace */
2160                 err = tp->af_specific->md5_parse(sk, optval, optlen);
2161                 break;
2162 #endif
2163
2164         default:
2165                 err = -ENOPROTOOPT;
2166                 break;
2167         }
2168
2169         release_sock(sk);
2170         return err;
2171 }
2172
2173 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2174                    int optlen)
2175 {
2176         struct inet_connection_sock *icsk = inet_csk(sk);
2177
2178         if (level != SOL_TCP)
2179                 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
2180                                                      optval, optlen);
2181         return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2182 }
2183
2184 #ifdef CONFIG_COMPAT
2185 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
2186                           char __user *optval, int optlen)
2187 {
2188         if (level != SOL_TCP)
2189                 return inet_csk_compat_setsockopt(sk, level, optname,
2190                                                   optval, optlen);
2191         return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2192 }
2193
2194 EXPORT_SYMBOL(compat_tcp_setsockopt);
2195 #endif
2196
2197 /* Return information about state of tcp endpoint in API format. */
2198 void tcp_get_info(struct sock *sk, struct tcp_info *info)
2199 {
2200         struct tcp_sock *tp = tcp_sk(sk);
2201         const struct inet_connection_sock *icsk = inet_csk(sk);
2202         u32 now = tcp_time_stamp;
2203
2204         memset(info, 0, sizeof(*info));
2205
2206         info->tcpi_state = sk->sk_state;
2207         info->tcpi_ca_state = icsk->icsk_ca_state;
2208         info->tcpi_retransmits = icsk->icsk_retransmits;
2209         info->tcpi_probes = icsk->icsk_probes_out;
2210         info->tcpi_backoff = icsk->icsk_backoff;
2211
2212         if (tp->rx_opt.tstamp_ok)
2213                 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2214         if (tcp_is_sack(tp))
2215                 info->tcpi_options |= TCPI_OPT_SACK;
2216         if (tp->rx_opt.wscale_ok) {
2217                 info->tcpi_options |= TCPI_OPT_WSCALE;
2218                 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2219                 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2220         }
2221
2222         if (tp->ecn_flags&TCP_ECN_OK)
2223                 info->tcpi_options |= TCPI_OPT_ECN;
2224
2225         info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2226         info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
2227         info->tcpi_snd_mss = tp->mss_cache;
2228         info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2229
2230         if (sk->sk_state == TCP_LISTEN) {
2231                 info->tcpi_unacked = sk->sk_ack_backlog;
2232                 info->tcpi_sacked = sk->sk_max_ack_backlog;
2233         } else {
2234                 info->tcpi_unacked = tp->packets_out;
2235                 info->tcpi_sacked = tp->sacked_out;
2236         }
2237         info->tcpi_lost = tp->lost_out;
2238         info->tcpi_retrans = tp->retrans_out;
2239         info->tcpi_fackets = tp->fackets_out;
2240
2241         info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2242         info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
2243         info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2244
2245         info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
2246         info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2247         info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2248         info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2249         info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2250         info->tcpi_snd_cwnd = tp->snd_cwnd;
2251         info->tcpi_advmss = tp->advmss;
2252         info->tcpi_reordering = tp->reordering;
2253
2254         info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2255         info->tcpi_rcv_space = tp->rcvq_space.space;
2256
2257         info->tcpi_total_retrans = tp->total_retrans;
2258 }
2259
2260 EXPORT_SYMBOL_GPL(tcp_get_info);
2261
2262 static int do_tcp_getsockopt(struct sock *sk, int level,
2263                 int optname, char __user *optval, int __user *optlen)
2264 {
2265         struct inet_connection_sock *icsk = inet_csk(sk);
2266         struct tcp_sock *tp = tcp_sk(sk);
2267         int val, len;
2268
2269         if (get_user(len, optlen))
2270                 return -EFAULT;
2271
2272         len = min_t(unsigned int, len, sizeof(int));
2273
2274         if (len < 0)
2275                 return -EINVAL;
2276
2277         switch (optname) {
2278         case TCP_MAXSEG:
2279                 val = tp->mss_cache;
2280                 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2281                         val = tp->rx_opt.user_mss;
2282                 break;
2283         case TCP_NODELAY:
2284                 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2285                 break;
2286         case TCP_CORK:
2287                 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2288                 break;
2289         case TCP_KEEPIDLE:
2290                 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2291                 break;
2292         case TCP_KEEPINTVL:
2293                 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2294                 break;
2295         case TCP_KEEPCNT:
2296                 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2297                 break;
2298         case TCP_SYNCNT:
2299                 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
2300                 break;
2301         case TCP_LINGER2:
2302                 val = tp->linger2;
2303                 if (val >= 0)
2304                         val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2305                 break;
2306         case TCP_DEFER_ACCEPT:
2307                 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
2308                         ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
2309                 break;
2310         case TCP_WINDOW_CLAMP:
2311                 val = tp->window_clamp;
2312                 break;
2313         case TCP_INFO: {
2314                 struct tcp_info info;
2315
2316                 if (get_user(len, optlen))
2317                         return -EFAULT;
2318
2319                 tcp_get_info(sk, &info);
2320
2321                 len = min_t(unsigned int, len, sizeof(info));
2322                 if (put_user(len, optlen))
2323                         return -EFAULT;
2324                 if (copy_to_user(optval, &info, len))
2325                         return -EFAULT;
2326                 return 0;
2327         }
2328         case TCP_QUICKACK:
2329                 val = !icsk->icsk_ack.pingpong;
2330                 break;
2331
2332         case TCP_CONGESTION:
2333                 if (get_user(len, optlen))
2334                         return -EFAULT;
2335                 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2336                 if (put_user(len, optlen))
2337                         return -EFAULT;
2338                 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
2339                         return -EFAULT;
2340                 return 0;
2341         default:
2342                 return -ENOPROTOOPT;
2343         }
2344
2345         if (put_user(len, optlen))
2346                 return -EFAULT;
2347         if (copy_to_user(optval, &val, len))
2348                 return -EFAULT;
2349         return 0;
2350 }
2351
2352 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2353                    int __user *optlen)
2354 {
2355         struct inet_connection_sock *icsk = inet_csk(sk);
2356
2357         if (level != SOL_TCP)
2358                 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2359                                                      optval, optlen);
2360         return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2361 }
2362
2363 #ifdef CONFIG_COMPAT
2364 int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2365                           char __user *optval, int __user *optlen)
2366 {
2367         if (level != SOL_TCP)
2368                 return inet_csk_compat_getsockopt(sk, level, optname,
2369                                                   optval, optlen);
2370         return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2371 }
2372
2373 EXPORT_SYMBOL(compat_tcp_getsockopt);
2374 #endif
2375
2376 struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2377 {
2378         struct sk_buff *segs = ERR_PTR(-EINVAL);
2379         struct tcphdr *th;
2380         unsigned thlen;
2381         unsigned int seq;
2382         __be32 delta;
2383         unsigned int oldlen;
2384         unsigned int len;
2385
2386         if (!pskb_may_pull(skb, sizeof(*th)))
2387                 goto out;
2388
2389         th = tcp_hdr(skb);
2390         thlen = th->doff * 4;
2391         if (thlen < sizeof(*th))
2392                 goto out;
2393
2394         if (!pskb_may_pull(skb, thlen))
2395                 goto out;
2396
2397         oldlen = (u16)~skb->len;
2398         __skb_pull(skb, thlen);
2399
2400         if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2401                 /* Packet is from an untrusted source, reset gso_segs. */
2402                 int type = skb_shinfo(skb)->gso_type;
2403                 int mss;
2404
2405                 if (unlikely(type &
2406                              ~(SKB_GSO_TCPV4 |
2407                                SKB_GSO_DODGY |
2408                                SKB_GSO_TCP_ECN |
2409                                SKB_GSO_TCPV6 |
2410                                0) ||
2411                              !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2412                         goto out;
2413
2414                 mss = skb_shinfo(skb)->gso_size;
2415                 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
2416
2417                 segs = NULL;
2418                 goto out;
2419         }
2420
2421         segs = skb_segment(skb, features);
2422         if (IS_ERR(segs))
2423                 goto out;
2424
2425         len = skb_shinfo(skb)->gso_size;
2426         delta = htonl(oldlen + (thlen + len));
2427
2428         skb = segs;
2429         th = tcp_hdr(skb);
2430         seq = ntohl(th->seq);
2431
2432         do {
2433                 th->fin = th->psh = 0;
2434
2435                 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2436                                        (__force u32)delta));
2437                 if (skb->ip_summed != CHECKSUM_PARTIAL)
2438                         th->check =
2439                              csum_fold(csum_partial(skb_transport_header(skb),
2440                                                     thlen, skb->csum));
2441
2442                 seq += len;
2443                 skb = skb->next;
2444                 th = tcp_hdr(skb);
2445
2446                 th->seq = htonl(seq);
2447                 th->cwr = 0;
2448         } while (skb->next);
2449
2450         delta = htonl(oldlen + (skb->tail - skb->transport_header) +
2451                       skb->data_len);
2452         th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2453                                 (__force u32)delta));
2454         if (skb->ip_summed != CHECKSUM_PARTIAL)
2455                 th->check = csum_fold(csum_partial(skb_transport_header(skb),
2456                                                    thlen, skb->csum));
2457
2458 out:
2459         return segs;
2460 }
2461 EXPORT_SYMBOL(tcp_tso_segment);
2462
2463 #ifdef CONFIG_TCP_MD5SIG
2464 static unsigned long tcp_md5sig_users;
2465 static struct tcp_md5sig_pool **tcp_md5sig_pool;
2466 static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2467
2468 static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2469 {
2470         int cpu;
2471         for_each_possible_cpu(cpu) {
2472                 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2473                 if (p) {
2474                         if (p->md5_desc.tfm)
2475                                 crypto_free_hash(p->md5_desc.tfm);
2476                         kfree(p);
2477                         p = NULL;
2478                 }
2479         }
2480         free_percpu(pool);
2481 }
2482
2483 void tcp_free_md5sig_pool(void)
2484 {
2485         struct tcp_md5sig_pool **pool = NULL;
2486
2487         spin_lock_bh(&tcp_md5sig_pool_lock);
2488         if (--tcp_md5sig_users == 0) {
2489                 pool = tcp_md5sig_pool;
2490                 tcp_md5sig_pool = NULL;
2491         }
2492         spin_unlock_bh(&tcp_md5sig_pool_lock);
2493         if (pool)
2494                 __tcp_free_md5sig_pool(pool);
2495 }
2496
2497 EXPORT_SYMBOL(tcp_free_md5sig_pool);
2498
2499 static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
2500 {
2501         int cpu;
2502         struct tcp_md5sig_pool **pool;
2503
2504         pool = alloc_percpu(struct tcp_md5sig_pool *);
2505         if (!pool)
2506                 return NULL;
2507
2508         for_each_possible_cpu(cpu) {
2509                 struct tcp_md5sig_pool *p;
2510                 struct crypto_hash *hash;
2511
2512                 p = kzalloc(sizeof(*p), GFP_KERNEL);
2513                 if (!p)
2514                         goto out_free;
2515                 *per_cpu_ptr(pool, cpu) = p;
2516
2517                 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2518                 if (!hash || IS_ERR(hash))
2519                         goto out_free;
2520
2521                 p->md5_desc.tfm = hash;
2522         }
2523         return pool;
2524 out_free:
2525         __tcp_free_md5sig_pool(pool);
2526         return NULL;
2527 }
2528
2529 struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
2530 {
2531         struct tcp_md5sig_pool **pool;
2532         int alloc = 0;
2533
2534 retry:
2535         spin_lock_bh(&tcp_md5sig_pool_lock);
2536         pool = tcp_md5sig_pool;
2537         if (tcp_md5sig_users++ == 0) {
2538                 alloc = 1;
2539                 spin_unlock_bh(&tcp_md5sig_pool_lock);
2540         } else if (!pool) {
2541                 tcp_md5sig_users--;
2542                 spin_unlock_bh(&tcp_md5sig_pool_lock);
2543                 cpu_relax();
2544                 goto retry;
2545         } else
2546                 spin_unlock_bh(&tcp_md5sig_pool_lock);
2547
2548         if (alloc) {
2549                 /* we cannot hold spinlock here because this may sleep. */
2550                 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
2551                 spin_lock_bh(&tcp_md5sig_pool_lock);
2552                 if (!p) {
2553                         tcp_md5sig_users--;
2554                         spin_unlock_bh(&tcp_md5sig_pool_lock);
2555                         return NULL;
2556                 }
2557                 pool = tcp_md5sig_pool;
2558                 if (pool) {
2559                         /* oops, it has already been assigned. */
2560                         spin_unlock_bh(&tcp_md5sig_pool_lock);
2561                         __tcp_free_md5sig_pool(p);
2562                 } else {
2563                         tcp_md5sig_pool = pool = p;
2564                         spin_unlock_bh(&tcp_md5sig_pool_lock);
2565                 }
2566         }
2567         return pool;
2568 }
2569
2570 EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2571
2572 struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2573 {
2574         struct tcp_md5sig_pool **p;
2575         spin_lock_bh(&tcp_md5sig_pool_lock);
2576         p = tcp_md5sig_pool;
2577         if (p)
2578                 tcp_md5sig_users++;
2579         spin_unlock_bh(&tcp_md5sig_pool_lock);
2580         return (p ? *per_cpu_ptr(p, cpu) : NULL);
2581 }
2582
2583 EXPORT_SYMBOL(__tcp_get_md5sig_pool);
2584
2585 void __tcp_put_md5sig_pool(void)
2586 {
2587         tcp_free_md5sig_pool();
2588 }
2589
2590 EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2591
2592 int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
2593                         struct tcphdr *th)
2594 {
2595         struct scatterlist sg;
2596         int err;
2597
2598         __sum16 old_checksum = th->check;
2599         th->check = 0;
2600         /* options aren't included in the hash */
2601         sg_init_one(&sg, th, sizeof(struct tcphdr));
2602         err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(struct tcphdr));
2603         th->check = old_checksum;
2604         return err;
2605 }
2606
2607 EXPORT_SYMBOL(tcp_md5_hash_header);
2608
2609 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
2610                           struct sk_buff *skb, unsigned header_len)
2611 {
2612         struct scatterlist sg;
2613         const struct tcphdr *tp = tcp_hdr(skb);
2614         struct hash_desc *desc = &hp->md5_desc;
2615         unsigned i;
2616         const unsigned head_data_len = skb_headlen(skb) > header_len ?
2617                                        skb_headlen(skb) - header_len : 0;
2618         const struct skb_shared_info *shi = skb_shinfo(skb);
2619
2620         sg_init_table(&sg, 1);
2621
2622         sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
2623         if (crypto_hash_update(desc, &sg, head_data_len))
2624                 return 1;
2625
2626         for (i = 0; i < shi->nr_frags; ++i) {
2627                 const struct skb_frag_struct *f = &shi->frags[i];
2628                 sg_set_page(&sg, f->page, f->size, f->page_offset);
2629                 if (crypto_hash_update(desc, &sg, f->size))
2630                         return 1;
2631         }
2632
2633         return 0;
2634 }
2635
2636 EXPORT_SYMBOL(tcp_md5_hash_skb_data);
2637
2638 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key)
2639 {
2640         struct scatterlist sg;
2641
2642         sg_init_one(&sg, key->key, key->keylen);
2643         return crypto_hash_update(&hp->md5_desc, &sg, key->keylen);
2644 }
2645
2646 EXPORT_SYMBOL(tcp_md5_hash_key);
2647
2648 #endif
2649
2650 void tcp_done(struct sock *sk)
2651 {
2652         if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
2653                 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
2654
2655         tcp_set_state(sk, TCP_CLOSE);
2656         tcp_clear_xmit_timers(sk);
2657
2658         sk->sk_shutdown = SHUTDOWN_MASK;
2659
2660         if (!sock_flag(sk, SOCK_DEAD))
2661                 sk->sk_state_change(sk);
2662         else
2663                 inet_csk_destroy_sock(sk);
2664 }
2665 EXPORT_SYMBOL_GPL(tcp_done);
2666
2667 extern struct tcp_congestion_ops tcp_reno;
2668
2669 static __initdata unsigned long thash_entries;
2670 static int __init set_thash_entries(char *str)
2671 {
2672         if (!str)
2673                 return 0;
2674         thash_entries = simple_strtoul(str, &str, 0);
2675         return 1;
2676 }
2677 __setup("thash_entries=", set_thash_entries);
2678
2679 void __init tcp_init(void)
2680 {
2681         struct sk_buff *skb = NULL;
2682         unsigned long nr_pages, limit;
2683         int order, i, max_share;
2684
2685         BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
2686
2687         tcp_hashinfo.bind_bucket_cachep =
2688                 kmem_cache_create("tcp_bind_bucket",
2689                                   sizeof(struct inet_bind_bucket), 0,
2690                                   SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2691
2692         /* Size and allocate the main established and bind bucket
2693          * hash tables.
2694          *
2695          * The methodology is similar to that of the buffer cache.
2696          */
2697         tcp_hashinfo.ehash =
2698                 alloc_large_system_hash("TCP established",
2699                                         sizeof(struct inet_ehash_bucket),
2700                                         thash_entries,
2701                                         (num_physpages >= 128 * 1024) ?
2702                                         13 : 15,
2703                                         0,
2704                                         &tcp_hashinfo.ehash_size,
2705                                         NULL,
2706                                         thash_entries ? 0 : 512 * 1024);
2707         tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
2708         for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
2709                 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
2710                 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain);
2711         }
2712         if (inet_ehash_locks_alloc(&tcp_hashinfo))
2713                 panic("TCP: failed to alloc ehash_locks");
2714         tcp_hashinfo.bhash =
2715                 alloc_large_system_hash("TCP bind",
2716                                         sizeof(struct inet_bind_hashbucket),
2717                                         tcp_hashinfo.ehash_size,
2718                                         (num_physpages >= 128 * 1024) ?
2719                                         13 : 15,
2720                                         0,
2721                                         &tcp_hashinfo.bhash_size,
2722                                         NULL,
2723                                         64 * 1024);
2724         tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2725         for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2726                 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2727                 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
2728         }
2729
2730         /* Try to be a bit smarter and adjust defaults depending
2731          * on available memory.
2732          */
2733         for (order = 0; ((1 << order) << PAGE_SHIFT) <
2734                         (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
2735                         order++)
2736                 ;
2737         if (order >= 4) {
2738                 tcp_death_row.sysctl_max_tw_buckets = 180000;
2739                 sysctl_tcp_max_orphans = 4096 << (order - 4);
2740                 sysctl_max_syn_backlog = 1024;
2741         } else if (order < 3) {
2742                 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
2743                 sysctl_tcp_max_orphans >>= (3 - order);
2744                 sysctl_max_syn_backlog = 128;
2745         }
2746
2747         /* Set the pressure threshold to be a fraction of global memory that
2748          * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
2749          * memory, with a floor of 128 pages.
2750          */
2751         nr_pages = totalram_pages - totalhigh_pages;
2752         limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
2753         limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
2754         limit = max(limit, 128UL);
2755         sysctl_tcp_mem[0] = limit / 4 * 3;
2756         sysctl_tcp_mem[1] = limit;
2757         sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
2758
2759         /* Set per-socket limits to no more than 1/128 the pressure threshold */
2760         limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2761         max_share = min(4UL*1024*1024, limit);
2762
2763         sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
2764         sysctl_tcp_wmem[1] = 16*1024;
2765         sysctl_tcp_wmem[2] = max(64*1024, max_share);
2766
2767         sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
2768         sysctl_tcp_rmem[1] = 87380;
2769         sysctl_tcp_rmem[2] = max(87380, max_share);
2770
2771         printk(KERN_INFO "TCP: Hash tables configured "
2772                "(established %d bind %d)\n",
2773                tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size);
2774
2775         tcp_register_congestion_control(&tcp_reno);
2776 }
2777
2778 EXPORT_SYMBOL(tcp_close);
2779 EXPORT_SYMBOL(tcp_disconnect);
2780 EXPORT_SYMBOL(tcp_getsockopt);
2781 EXPORT_SYMBOL(tcp_ioctl);
2782 EXPORT_SYMBOL(tcp_poll);
2783 EXPORT_SYMBOL(tcp_read_sock);
2784 EXPORT_SYMBOL(tcp_recvmsg);
2785 EXPORT_SYMBOL(tcp_sendmsg);
2786 EXPORT_SYMBOL(tcp_splice_read);
2787 EXPORT_SYMBOL(tcp_sendpage);
2788 EXPORT_SYMBOL(tcp_setsockopt);
2789 EXPORT_SYMBOL(tcp_shutdown);