2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
24 * Changes: Pedro Roque : Retransmit queue handled by TCP.
25 * : Fragmentation on mtu decrease
26 * : Segment collapse on retransmit
29 * Linus Torvalds : send_delayed_ack
30 * David S. Miller : Charge memory using the right skb
31 * during syn/ack processing.
32 * David S. Miller : Output engine completely rewritten.
33 * Andrea Arcangeli: SYNACK carry ts_recent in tsecr.
34 * Cacophonix Gaul : draft-minshall-nagle-01
35 * J Hadi Salim : ECN support
41 #include <linux/compiler.h>
42 #include <linux/module.h>
43 #include <linux/smp_lock.h>
45 /* People can turn this off for buggy TCP's found in printers etc. */
46 int sysctl_tcp_retrans_collapse __read_mostly = 1;
48 /* People can turn this on to work with those rare, broken TCPs that
49 * interpret the window field as a signed quantity.
51 int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
53 /* This limits the percentage of the congestion window which we
54 * will allow a single TSO frame to consume. Building TSO frames
55 * which are too large can cause TCP streams to be bursty.
57 int sysctl_tcp_tso_win_divisor __read_mostly = 3;
59 int sysctl_tcp_mtu_probing __read_mostly = 0;
60 int sysctl_tcp_base_mss __read_mostly = 512;
62 /* By default, RFC2861 behavior. */
63 int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
65 static void update_send_head(struct sock *sk, struct tcp_sock *tp,
68 sk->sk_send_head = skb->next;
69 if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
70 sk->sk_send_head = NULL;
71 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
72 tcp_packets_out_inc(sk, tp, skb);
75 /* SND.NXT, if window was not shrunk.
76 * If window has been shrunk, what should we make? It is not clear at all.
77 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
78 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
79 * invalid. OK, let's make this for now:
81 static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp)
83 if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt))
86 return tp->snd_una+tp->snd_wnd;
89 /* Calculate mss to advertise in SYN segment.
90 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
92 * 1. It is independent of path mtu.
93 * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
94 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
95 * attached devices, because some buggy hosts are confused by
97 * 4. We do not make 3, we advertise MSS, calculated from first
98 * hop device mtu, but allow to raise it to ip_rt_min_advmss.
99 * This may be overridden via information stored in routing table.
100 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
101 * probably even Jumbo".
103 static __u16 tcp_advertise_mss(struct sock *sk)
105 struct tcp_sock *tp = tcp_sk(sk);
106 struct dst_entry *dst = __sk_dst_get(sk);
107 int mss = tp->advmss;
109 if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) {
110 mss = dst_metric(dst, RTAX_ADVMSS);
117 /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
118 * This is the first part of cwnd validation mechanism. */
119 static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
121 struct tcp_sock *tp = tcp_sk(sk);
122 s32 delta = tcp_time_stamp - tp->lsndtime;
123 u32 restart_cwnd = tcp_init_cwnd(tp, dst);
124 u32 cwnd = tp->snd_cwnd;
126 tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
128 tp->snd_ssthresh = tcp_current_ssthresh(sk);
129 restart_cwnd = min(restart_cwnd, cwnd);
131 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
133 tp->snd_cwnd = max(cwnd, restart_cwnd);
134 tp->snd_cwnd_stamp = tcp_time_stamp;
135 tp->snd_cwnd_used = 0;
138 static void tcp_event_data_sent(struct tcp_sock *tp,
139 struct sk_buff *skb, struct sock *sk)
141 struct inet_connection_sock *icsk = inet_csk(sk);
142 const u32 now = tcp_time_stamp;
144 if (sysctl_tcp_slow_start_after_idle &&
145 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
146 tcp_cwnd_restart(sk, __sk_dst_get(sk));
150 /* If it is a reply for ato after last received
151 * packet, enter pingpong mode.
153 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
154 icsk->icsk_ack.pingpong = 1;
157 static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
159 tcp_dec_quickack_mode(sk, pkts);
160 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
163 /* Determine a window scaling and initial window to offer.
164 * Based on the assumption that the given amount of space
165 * will be offered. Store the results in the tp structure.
166 * NOTE: for smooth operation initial space offering should
167 * be a multiple of mss if possible. We assume here that mss >= 1.
168 * This MUST be enforced by all callers.
170 void tcp_select_initial_window(int __space, __u32 mss,
171 __u32 *rcv_wnd, __u32 *window_clamp,
172 int wscale_ok, __u8 *rcv_wscale)
174 unsigned int space = (__space < 0 ? 0 : __space);
176 /* If no clamp set the clamp to the max possible scaled window */
177 if (*window_clamp == 0)
178 (*window_clamp) = (65535 << 14);
179 space = min(*window_clamp, space);
181 /* Quantize space offering to a multiple of mss if possible. */
183 space = (space / mss) * mss;
185 /* NOTE: offering an initial window larger than 32767
186 * will break some buggy TCP stacks. If the admin tells us
187 * it is likely we could be speaking with such a buggy stack
188 * we will truncate our initial window offering to 32K-1
189 * unless the remote has sent us a window scaling option,
190 * which we interpret as a sign the remote TCP is not
191 * misinterpreting the window field as a signed quantity.
193 if (sysctl_tcp_workaround_signed_windows)
194 (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
200 /* Set window scaling on max possible window
201 * See RFC1323 for an explanation of the limit to 14
203 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
204 space = min_t(u32, space, *window_clamp);
205 while (space > 65535 && (*rcv_wscale) < 14) {
211 /* Set initial window to value enough for senders,
212 * following RFC2414. Senders, not following this RFC,
213 * will be satisfied with 2.
215 if (mss > (1<<*rcv_wscale)) {
221 if (*rcv_wnd > init_cwnd*mss)
222 *rcv_wnd = init_cwnd*mss;
225 /* Set the clamp no higher than max representable value */
226 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
229 /* Chose a new window to advertise, update state in tcp_sock for the
230 * socket, and return result with RFC1323 scaling applied. The return
231 * value can be stuffed directly into th->window for an outgoing
234 static u16 tcp_select_window(struct sock *sk)
236 struct tcp_sock *tp = tcp_sk(sk);
237 u32 cur_win = tcp_receive_window(tp);
238 u32 new_win = __tcp_select_window(sk);
240 /* Never shrink the offered window */
241 if(new_win < cur_win) {
242 /* Danger Will Robinson!
243 * Don't update rcv_wup/rcv_wnd here or else
244 * we will not be able to advertise a zero
245 * window in time. --DaveM
247 * Relax Will Robinson.
251 tp->rcv_wnd = new_win;
252 tp->rcv_wup = tp->rcv_nxt;
254 /* Make sure we do not exceed the maximum possible
257 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
258 new_win = min(new_win, MAX_TCP_WINDOW);
260 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
262 /* RFC1323 scaling applied */
263 new_win >>= tp->rx_opt.rcv_wscale;
265 /* If we advertise zero window, disable fast path. */
272 static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
273 __u32 tstamp, __u8 **md5_hash)
275 if (tp->rx_opt.tstamp_ok) {
276 *ptr++ = htonl((TCPOPT_NOP << 24) |
278 (TCPOPT_TIMESTAMP << 8) |
280 *ptr++ = htonl(tstamp);
281 *ptr++ = htonl(tp->rx_opt.ts_recent);
283 if (tp->rx_opt.eff_sacks) {
284 struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks;
287 *ptr++ = htonl((TCPOPT_NOP << 24) |
290 (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks *
291 TCPOLEN_SACK_PERBLOCK)));
292 for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
293 *ptr++ = htonl(sp[this_sack].start_seq);
294 *ptr++ = htonl(sp[this_sack].end_seq);
296 if (tp->rx_opt.dsack) {
297 tp->rx_opt.dsack = 0;
298 tp->rx_opt.eff_sacks--;
301 #ifdef CONFIG_TCP_MD5SIG
303 *ptr++ = htonl((TCPOPT_NOP << 24) |
305 (TCPOPT_MD5SIG << 8) |
307 *md5_hash = (__u8 *)ptr;
312 /* Construct a tcp options header for a SYN or SYN_ACK packet.
313 * If this is every changed make sure to change the definition of
314 * MAX_SYN_SIZE to match the new maximum number of options that you
317 * Note - that with the RFC2385 TCP option, we make room for the
318 * 16 byte MD5 hash. This will be filled in later, so the pointer for the
319 * location to be filled is passed back up.
321 static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
322 int offer_wscale, int wscale, __u32 tstamp,
323 __u32 ts_recent, __u8 **md5_hash)
325 /* We always get an MSS option.
326 * The option bytes which will be seen in normal data
327 * packets should timestamps be used, must be in the MSS
328 * advertised. But we subtract them from tp->mss_cache so
329 * that calculations in tcp_sendmsg are simpler etc.
330 * So account for this fact here if necessary. If we
331 * don't do this correctly, as a receiver we won't
332 * recognize data packets as being full sized when we
333 * should, and thus we won't abide by the delayed ACK
335 * SACKs don't matter, we never delay an ACK when we
336 * have any of those going out.
338 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
341 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
342 (TCPOLEN_SACK_PERM << 16) |
343 (TCPOPT_TIMESTAMP << 8) |
346 *ptr++ = htonl((TCPOPT_NOP << 24) |
348 (TCPOPT_TIMESTAMP << 8) |
350 *ptr++ = htonl(tstamp); /* TSVAL */
351 *ptr++ = htonl(ts_recent); /* TSECR */
353 *ptr++ = htonl((TCPOPT_NOP << 24) |
355 (TCPOPT_SACK_PERM << 8) |
358 *ptr++ = htonl((TCPOPT_NOP << 24) |
359 (TCPOPT_WINDOW << 16) |
360 (TCPOLEN_WINDOW << 8) |
362 #ifdef CONFIG_TCP_MD5SIG
364 * If MD5 is enabled, then we set the option, and include the size
365 * (always 18). The actual MD5 hash is added just before the
369 *ptr++ = htonl((TCPOPT_NOP << 24) |
371 (TCPOPT_MD5SIG << 8) |
373 *md5_hash = (__u8 *) ptr;
378 /* This routine actually transmits TCP packets queued in by
379 * tcp_do_sendmsg(). This is used by both the initial
380 * transmission and possible later retransmissions.
381 * All SKB's seen here are completely headerless. It is our
382 * job to build the TCP header, and pass the packet down to
383 * IP so it can do the same plus pass the packet off to the
386 * We are working here with either a clone of the original
387 * SKB, or a fresh unique copy made by the retransmit engine.
389 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, gfp_t gfp_mask)
391 const struct inet_connection_sock *icsk = inet_csk(sk);
392 struct inet_sock *inet;
394 struct tcp_skb_cb *tcb;
396 #ifdef CONFIG_TCP_MD5SIG
397 struct tcp_md5sig_key *md5;
398 __u8 *md5_hash_location;
404 BUG_ON(!skb || !tcp_skb_pcount(skb));
406 /* If congestion control is doing timestamping, we must
407 * take such a timestamp before we potentially clone/copy.
409 if (icsk->icsk_ca_ops->rtt_sample)
410 __net_timestamp(skb);
412 if (likely(clone_it)) {
413 if (unlikely(skb_cloned(skb)))
414 skb = pskb_copy(skb, gfp_mask);
416 skb = skb_clone(skb, gfp_mask);
423 tcb = TCP_SKB_CB(skb);
424 tcp_header_size = tp->tcp_header_len;
426 #define SYSCTL_FLAG_TSTAMPS 0x1
427 #define SYSCTL_FLAG_WSCALE 0x2
428 #define SYSCTL_FLAG_SACK 0x4
431 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
432 tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS;
433 if(sysctl_tcp_timestamps) {
434 tcp_header_size += TCPOLEN_TSTAMP_ALIGNED;
435 sysctl_flags |= SYSCTL_FLAG_TSTAMPS;
437 if (sysctl_tcp_window_scaling) {
438 tcp_header_size += TCPOLEN_WSCALE_ALIGNED;
439 sysctl_flags |= SYSCTL_FLAG_WSCALE;
441 if (sysctl_tcp_sack) {
442 sysctl_flags |= SYSCTL_FLAG_SACK;
443 if (!(sysctl_flags & SYSCTL_FLAG_TSTAMPS))
444 tcp_header_size += TCPOLEN_SACKPERM_ALIGNED;
446 } else if (unlikely(tp->rx_opt.eff_sacks)) {
447 /* A SACK is 2 pad bytes, a 2 byte header, plus
448 * 2 32-bit sequence numbers for each SACK block.
450 tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED +
451 (tp->rx_opt.eff_sacks *
452 TCPOLEN_SACK_PERBLOCK));
455 if (tcp_packets_in_flight(tp) == 0)
456 tcp_ca_event(sk, CA_EVENT_TX_START);
458 #ifdef CONFIG_TCP_MD5SIG
460 * Are we doing MD5 on this segment? If so - make
463 md5 = tp->af_specific->md5_lookup(sk, sk);
465 tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
468 th = (struct tcphdr *) skb_push(skb, tcp_header_size);
470 skb_set_owner_w(skb, sk);
472 /* Build TCP header and checksum it. */
473 th->source = inet->sport;
474 th->dest = inet->dport;
475 th->seq = htonl(tcb->seq);
476 th->ack_seq = htonl(tp->rcv_nxt);
477 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
480 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
481 /* RFC1323: The window in SYN & SYN/ACK segments
484 th->window = htons(tp->rcv_wnd);
486 th->window = htons(tcp_select_window(sk));
491 if (unlikely(tp->urg_mode &&
492 between(tp->snd_up, tcb->seq+1, tcb->seq+0xFFFF))) {
493 th->urg_ptr = htons(tp->snd_up-tcb->seq);
497 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
498 tcp_syn_build_options((__be32 *)(th + 1),
499 tcp_advertise_mss(sk),
500 (sysctl_flags & SYSCTL_FLAG_TSTAMPS),
501 (sysctl_flags & SYSCTL_FLAG_SACK),
502 (sysctl_flags & SYSCTL_FLAG_WSCALE),
503 tp->rx_opt.rcv_wscale,
505 tp->rx_opt.ts_recent,
507 #ifdef CONFIG_TCP_MD5SIG
508 md5 ? &md5_hash_location :
512 tcp_build_and_update_options((__be32 *)(th + 1),
514 #ifdef CONFIG_TCP_MD5SIG
515 md5 ? &md5_hash_location :
518 TCP_ECN_send(sk, tp, skb, tcp_header_size);
521 #ifdef CONFIG_TCP_MD5SIG
522 /* Calculate the MD5 hash, as we have all we need now */
524 tp->af_specific->calc_md5_hash(md5_hash_location,
533 icsk->icsk_af_ops->send_check(sk, skb->len, skb);
535 if (likely(tcb->flags & TCPCB_FLAG_ACK))
536 tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
538 if (skb->len != tcp_header_size)
539 tcp_event_data_sent(tp, skb, sk);
541 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
542 TCP_INC_STATS(TCP_MIB_OUTSEGS);
544 err = icsk->icsk_af_ops->queue_xmit(skb, 0);
545 if (likely(err <= 0))
550 return net_xmit_eval(err);
552 #undef SYSCTL_FLAG_TSTAMPS
553 #undef SYSCTL_FLAG_WSCALE
554 #undef SYSCTL_FLAG_SACK
558 /* This routine just queue's the buffer
560 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
561 * otherwise socket can stall.
563 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
565 struct tcp_sock *tp = tcp_sk(sk);
567 /* Advance write_seq and place onto the write_queue. */
568 tp->write_seq = TCP_SKB_CB(skb)->end_seq;
569 skb_header_release(skb);
570 __skb_queue_tail(&sk->sk_write_queue, skb);
571 sk_charge_skb(sk, skb);
573 /* Queue it, remembering where we must start sending. */
574 if (sk->sk_send_head == NULL)
575 sk->sk_send_head = skb;
578 static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
580 if (skb->len <= mss_now || !sk_can_gso(sk)) {
581 /* Avoid the costly divide in the normal
584 skb_shinfo(skb)->gso_segs = 1;
585 skb_shinfo(skb)->gso_size = 0;
586 skb_shinfo(skb)->gso_type = 0;
590 factor = skb->len + (mss_now - 1);
592 skb_shinfo(skb)->gso_segs = factor;
593 skb_shinfo(skb)->gso_size = mss_now;
594 skb_shinfo(skb)->gso_type = sk->sk_gso_type;
598 /* Function to create two new TCP segments. Shrinks the given segment
599 * to the specified size and appends a new segment with the rest of the
600 * packet to the list. This won't be called frequently, I hope.
601 * Remember, these are still headerless SKBs at this point.
603 int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
605 struct tcp_sock *tp = tcp_sk(sk);
606 struct sk_buff *buff;
607 int nsize, old_factor;
611 BUG_ON(len > skb->len);
613 clear_all_retrans_hints(tp);
614 nsize = skb_headlen(skb) - len;
618 if (skb_cloned(skb) &&
619 skb_is_nonlinear(skb) &&
620 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
623 /* Get a new skb... force flag on. */
624 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
626 return -ENOMEM; /* We'll just try again later. */
628 sk_charge_skb(sk, buff);
629 nlen = skb->len - len - nsize;
630 buff->truesize += nlen;
631 skb->truesize -= nlen;
633 /* Correct the sequence numbers. */
634 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
635 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
636 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
638 /* PSH and FIN should only be set in the second packet. */
639 flags = TCP_SKB_CB(skb)->flags;
640 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
641 TCP_SKB_CB(buff)->flags = flags;
642 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
643 TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL;
645 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
646 /* Copy and checksum data tail into the new buffer. */
647 buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize),
652 skb->csum = csum_block_sub(skb->csum, buff->csum, len);
654 skb->ip_summed = CHECKSUM_PARTIAL;
655 skb_split(skb, buff, len);
658 buff->ip_summed = skb->ip_summed;
660 /* Looks stupid, but our code really uses when of
661 * skbs, which it never sent before. --ANK
663 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
664 buff->tstamp = skb->tstamp;
666 old_factor = tcp_skb_pcount(skb);
668 /* Fix up tso_factor for both original and new SKB. */
669 tcp_set_skb_tso_segs(sk, skb, mss_now);
670 tcp_set_skb_tso_segs(sk, buff, mss_now);
672 /* If this packet has been sent out already, we must
673 * adjust the various packet counters.
675 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
676 int diff = old_factor - tcp_skb_pcount(skb) -
677 tcp_skb_pcount(buff);
679 tp->packets_out -= diff;
681 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
682 tp->sacked_out -= diff;
683 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
684 tp->retrans_out -= diff;
686 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
687 tp->lost_out -= diff;
688 tp->left_out -= diff;
692 /* Adjust Reno SACK estimate. */
693 if (!tp->rx_opt.sack_ok) {
694 tp->sacked_out -= diff;
695 if ((int)tp->sacked_out < 0)
697 tcp_sync_left_out(tp);
700 tp->fackets_out -= diff;
701 if ((int)tp->fackets_out < 0)
706 /* Link BUFF into the send queue. */
707 skb_header_release(buff);
708 __skb_append(skb, buff, &sk->sk_write_queue);
713 /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
714 * eventually). The difference is that pulled data not copied, but
715 * immediately discarded.
717 static void __pskb_trim_head(struct sk_buff *skb, int len)
723 for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
724 if (skb_shinfo(skb)->frags[i].size <= eat) {
725 put_page(skb_shinfo(skb)->frags[i].page);
726 eat -= skb_shinfo(skb)->frags[i].size;
728 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
730 skb_shinfo(skb)->frags[k].page_offset += eat;
731 skb_shinfo(skb)->frags[k].size -= eat;
737 skb_shinfo(skb)->nr_frags = k;
739 skb->tail = skb->data;
740 skb->data_len -= len;
741 skb->len = skb->data_len;
744 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
746 if (skb_cloned(skb) &&
747 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
750 /* If len == headlen, we avoid __skb_pull to preserve alignment. */
751 if (unlikely(len < skb_headlen(skb)))
752 __skb_pull(skb, len);
754 __pskb_trim_head(skb, len - skb_headlen(skb));
756 TCP_SKB_CB(skb)->seq += len;
757 skb->ip_summed = CHECKSUM_PARTIAL;
759 skb->truesize -= len;
760 sk->sk_wmem_queued -= len;
761 sk->sk_forward_alloc += len;
762 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
764 /* Any change of skb->len requires recalculation of tso
767 if (tcp_skb_pcount(skb) > 1)
768 tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1));
773 /* Not accounting for SACKs here. */
774 int tcp_mtu_to_mss(struct sock *sk, int pmtu)
776 struct tcp_sock *tp = tcp_sk(sk);
777 struct inet_connection_sock *icsk = inet_csk(sk);
780 /* Calculate base mss without TCP options:
781 It is MMS_S - sizeof(tcphdr) of rfc1122
783 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
785 /* Clamp it (mss_clamp does not include tcp options) */
786 if (mss_now > tp->rx_opt.mss_clamp)
787 mss_now = tp->rx_opt.mss_clamp;
789 /* Now subtract optional transport overhead */
790 mss_now -= icsk->icsk_ext_hdr_len;
792 /* Then reserve room for full set of TCP options and 8 bytes of data */
796 /* Now subtract TCP options size, not including SACKs */
797 mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
802 /* Inverse of above */
803 int tcp_mss_to_mtu(struct sock *sk, int mss)
805 struct tcp_sock *tp = tcp_sk(sk);
806 struct inet_connection_sock *icsk = inet_csk(sk);
811 icsk->icsk_ext_hdr_len +
812 icsk->icsk_af_ops->net_header_len;
817 void tcp_mtup_init(struct sock *sk)
819 struct tcp_sock *tp = tcp_sk(sk);
820 struct inet_connection_sock *icsk = inet_csk(sk);
822 icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
823 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
824 icsk->icsk_af_ops->net_header_len;
825 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
826 icsk->icsk_mtup.probe_size = 0;
829 /* This function synchronize snd mss to current pmtu/exthdr set.
831 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
832 for TCP options, but includes only bare TCP header.
834 tp->rx_opt.mss_clamp is mss negotiated at connection setup.
835 It is minimum of user_mss and mss received with SYN.
836 It also does not include TCP options.
838 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
840 tp->mss_cache is current effective sending mss, including
841 all tcp options except for SACKs. It is evaluated,
842 taking into account current pmtu, but never exceeds
843 tp->rx_opt.mss_clamp.
845 NOTE1. rfc1122 clearly states that advertised MSS
846 DOES NOT include either tcp or ip options.
848 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
849 are READ ONLY outside this function. --ANK (980731)
852 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
854 struct tcp_sock *tp = tcp_sk(sk);
855 struct inet_connection_sock *icsk = inet_csk(sk);
858 if (icsk->icsk_mtup.search_high > pmtu)
859 icsk->icsk_mtup.search_high = pmtu;
861 mss_now = tcp_mtu_to_mss(sk, pmtu);
863 /* Bound mss with half of window */
864 if (tp->max_window && mss_now > (tp->max_window>>1))
865 mss_now = max((tp->max_window>>1), 68U - tp->tcp_header_len);
867 /* And store cached results */
868 icsk->icsk_pmtu_cookie = pmtu;
869 if (icsk->icsk_mtup.enabled)
870 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
871 tp->mss_cache = mss_now;
876 /* Compute the current effective MSS, taking SACKs and IP options,
877 * and even PMTU discovery events into account.
879 * LARGESEND note: !urg_mode is overkill, only frames up to snd_up
880 * cannot be large. However, taking into account rare use of URG, this
883 unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
885 struct tcp_sock *tp = tcp_sk(sk);
886 struct dst_entry *dst = __sk_dst_get(sk);
891 mss_now = tp->mss_cache;
893 if (large_allowed && sk_can_gso(sk) && !tp->urg_mode)
897 u32 mtu = dst_mtu(dst);
898 if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
899 mss_now = tcp_sync_mss(sk, mtu);
902 if (tp->rx_opt.eff_sacks)
903 mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
904 (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK));
906 #ifdef CONFIG_TCP_MD5SIG
907 if (tp->af_specific->md5_lookup(sk, sk))
908 mss_now -= TCPOLEN_MD5SIG_ALIGNED;
911 xmit_size_goal = mss_now;
914 xmit_size_goal = (65535 -
915 inet_csk(sk)->icsk_af_ops->net_header_len -
916 inet_csk(sk)->icsk_ext_hdr_len -
919 if (tp->max_window &&
920 (xmit_size_goal > (tp->max_window >> 1)))
921 xmit_size_goal = max((tp->max_window >> 1),
922 68U - tp->tcp_header_len);
924 xmit_size_goal -= (xmit_size_goal % mss_now);
926 tp->xmit_size_goal = xmit_size_goal;
931 /* Congestion window validation. (RFC2861) */
933 static void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
935 __u32 packets_out = tp->packets_out;
937 if (packets_out >= tp->snd_cwnd) {
938 /* Network is feed fully. */
939 tp->snd_cwnd_used = 0;
940 tp->snd_cwnd_stamp = tcp_time_stamp;
942 /* Network starves. */
943 if (tp->packets_out > tp->snd_cwnd_used)
944 tp->snd_cwnd_used = tp->packets_out;
946 if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
947 tcp_cwnd_application_limited(sk);
951 static unsigned int tcp_window_allows(struct tcp_sock *tp, struct sk_buff *skb, unsigned int mss_now, unsigned int cwnd)
953 u32 window, cwnd_len;
955 window = (tp->snd_una + tp->snd_wnd - TCP_SKB_CB(skb)->seq);
956 cwnd_len = mss_now * cwnd;
957 return min(window, cwnd_len);
960 /* Can at least one segment of SKB be sent right now, according to the
961 * congestion window rules? If so, return how many segments are allowed.
963 static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb)
967 /* Don't be strict about the congestion window for the final FIN. */
968 if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
969 tcp_skb_pcount(skb) == 1)
972 in_flight = tcp_packets_in_flight(tp);
974 if (in_flight < cwnd)
975 return (cwnd - in_flight);
980 /* This must be invoked the first time we consider transmitting
983 static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
985 int tso_segs = tcp_skb_pcount(skb);
989 tcp_skb_mss(skb) != mss_now)) {
990 tcp_set_skb_tso_segs(sk, skb, mss_now);
991 tso_segs = tcp_skb_pcount(skb);
996 static inline int tcp_minshall_check(const struct tcp_sock *tp)
998 return after(tp->snd_sml,tp->snd_una) &&
999 !after(tp->snd_sml, tp->snd_nxt);
1002 /* Return 0, if packet can be sent now without violation Nagle's rules:
1003 * 1. It is full sized.
1004 * 2. Or it contains FIN. (already checked by caller)
1005 * 3. Or TCP_NODELAY was set.
1006 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1007 * With Minshall's modification: all sent small packets are ACKed.
1010 static inline int tcp_nagle_check(const struct tcp_sock *tp,
1011 const struct sk_buff *skb,
1012 unsigned mss_now, int nonagle)
1014 return (skb->len < mss_now &&
1015 ((nonagle&TCP_NAGLE_CORK) ||
1018 tcp_minshall_check(tp))));
1021 /* Return non-zero if the Nagle test allows this packet to be
1024 static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
1025 unsigned int cur_mss, int nonagle)
1027 /* Nagle rule does not apply to frames, which sit in the middle of the
1028 * write_queue (they have no chances to get new data).
1030 * This is implemented in the callers, where they modify the 'nonagle'
1031 * argument based upon the location of SKB in the send queue.
1033 if (nonagle & TCP_NAGLE_PUSH)
1036 /* Don't use the nagle rule for urgent data (or for the final FIN). */
1038 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
1041 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
1047 /* Does at least the first segment of SKB fit into the send window? */
1048 static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, unsigned int cur_mss)
1050 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1052 if (skb->len > cur_mss)
1053 end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1055 return !after(end_seq, tp->snd_una + tp->snd_wnd);
1058 /* This checks if the data bearing packet SKB (usually sk->sk_send_head)
1059 * should be put on the wire right now. If so, it returns the number of
1060 * packets allowed by the congestion window.
1062 static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
1063 unsigned int cur_mss, int nonagle)
1065 struct tcp_sock *tp = tcp_sk(sk);
1066 unsigned int cwnd_quota;
1068 tcp_init_tso_segs(sk, skb, cur_mss);
1070 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1073 cwnd_quota = tcp_cwnd_test(tp, skb);
1075 !tcp_snd_wnd_test(tp, skb, cur_mss))
1081 static inline int tcp_skb_is_last(const struct sock *sk,
1082 const struct sk_buff *skb)
1084 return skb->next == (struct sk_buff *)&sk->sk_write_queue;
1087 int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
1089 struct sk_buff *skb = sk->sk_send_head;
1092 tcp_snd_test(sk, skb, tcp_current_mss(sk, 1),
1093 (tcp_skb_is_last(sk, skb) ?
1098 /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1099 * which is put after SKB on the list. It is very much like
1100 * tcp_fragment() except that it may make several kinds of assumptions
1101 * in order to speed up the splitting operation. In particular, we
1102 * know that all the data is in scatter-gather pages, and that the
1103 * packet has never been sent out before (and thus is not cloned).
1105 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now)
1107 struct sk_buff *buff;
1108 int nlen = skb->len - len;
1111 /* All of a TSO frame must be composed of paged data. */
1112 if (skb->len != skb->data_len)
1113 return tcp_fragment(sk, skb, len, mss_now);
1115 buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC);
1116 if (unlikely(buff == NULL))
1119 sk_charge_skb(sk, buff);
1120 buff->truesize += nlen;
1121 skb->truesize -= nlen;
1123 /* Correct the sequence numbers. */
1124 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1125 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1126 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1128 /* PSH and FIN should only be set in the second packet. */
1129 flags = TCP_SKB_CB(skb)->flags;
1130 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
1131 TCP_SKB_CB(buff)->flags = flags;
1133 /* This packet was never sent out yet, so no SACK bits. */
1134 TCP_SKB_CB(buff)->sacked = 0;
1136 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1137 skb_split(skb, buff, len);
1139 /* Fix up tso_factor for both original and new SKB. */
1140 tcp_set_skb_tso_segs(sk, skb, mss_now);
1141 tcp_set_skb_tso_segs(sk, buff, mss_now);
1143 /* Link BUFF into the send queue. */
1144 skb_header_release(buff);
1145 __skb_append(skb, buff, &sk->sk_write_queue);
1150 /* Try to defer sending, if possible, in order to minimize the amount
1151 * of TSO splitting we do. View it as a kind of TSO Nagle test.
1153 * This algorithm is from John Heffner.
1155 static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
1157 const struct inet_connection_sock *icsk = inet_csk(sk);
1158 u32 send_win, cong_win, limit, in_flight;
1160 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
1163 if (icsk->icsk_ca_state != TCP_CA_Open)
1166 /* Defer for less than two clock ticks. */
1167 if (!tp->tso_deferred && ((jiffies<<1)>>1) - (tp->tso_deferred>>1) > 1)
1170 in_flight = tcp_packets_in_flight(tp);
1172 BUG_ON(tcp_skb_pcount(skb) <= 1 ||
1173 (tp->snd_cwnd <= in_flight));
1175 send_win = (tp->snd_una + tp->snd_wnd) - TCP_SKB_CB(skb)->seq;
1177 /* From in_flight test above, we know that cwnd > in_flight. */
1178 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1180 limit = min(send_win, cong_win);
1182 /* If a full-sized TSO skb can be sent, do it. */
1186 if (sysctl_tcp_tso_win_divisor) {
1187 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1189 /* If at least some fraction of a window is available,
1192 chunk /= sysctl_tcp_tso_win_divisor;
1196 /* Different approach, try not to defer past a single
1197 * ACK. Receiver should ACK every other full sized
1198 * frame, so if we have space for more than 3 frames
1201 if (limit > tcp_max_burst(tp) * tp->mss_cache)
1205 /* Ok, it looks like it is advisable to defer. */
1206 tp->tso_deferred = 1 | (jiffies<<1);
1211 tp->tso_deferred = 0;
1215 /* Create a new MTU probe if we are ready.
1216 * Returns 0 if we should wait to probe (no cwnd available),
1217 * 1 if a probe was sent,
1219 static int tcp_mtu_probe(struct sock *sk)
1221 struct tcp_sock *tp = tcp_sk(sk);
1222 struct inet_connection_sock *icsk = inet_csk(sk);
1223 struct sk_buff *skb, *nskb, *next;
1230 /* Not currently probing/verifying,
1232 * have enough cwnd, and
1233 * not SACKing (the variable headers throw things off) */
1234 if (!icsk->icsk_mtup.enabled ||
1235 icsk->icsk_mtup.probe_size ||
1236 inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
1237 tp->snd_cwnd < 11 ||
1238 tp->rx_opt.eff_sacks)
1241 /* Very simple search strategy: just double the MSS. */
1242 mss_now = tcp_current_mss(sk, 0);
1243 probe_size = 2*tp->mss_cache;
1244 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
1245 /* TODO: set timer for probe_converge_event */
1249 /* Have enough data in the send queue to probe? */
1251 if ((skb = sk->sk_send_head) == NULL)
1253 while ((len += skb->len) < probe_size && !tcp_skb_is_last(sk, skb))
1255 if (len < probe_size)
1258 /* Receive window check. */
1259 if (after(TCP_SKB_CB(skb)->seq + probe_size, tp->snd_una + tp->snd_wnd)) {
1260 if (tp->snd_wnd < probe_size)
1266 /* Do we need to wait to drain cwnd? */
1267 pif = tcp_packets_in_flight(tp);
1268 if (pif + 2 > tp->snd_cwnd) {
1269 /* With no packets in flight, don't stall. */
1276 /* We're allowed to probe. Build it now. */
1277 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
1279 sk_charge_skb(sk, nskb);
1281 skb = sk->sk_send_head;
1282 __skb_insert(nskb, skb->prev, skb, &sk->sk_write_queue);
1283 sk->sk_send_head = nskb;
1285 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
1286 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
1287 TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK;
1288 TCP_SKB_CB(nskb)->sacked = 0;
1290 nskb->ip_summed = skb->ip_summed;
1293 while (len < probe_size) {
1296 copy = min_t(int, skb->len, probe_size - len);
1297 if (nskb->ip_summed)
1298 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
1300 nskb->csum = skb_copy_and_csum_bits(skb, 0,
1301 skb_put(nskb, copy), copy, nskb->csum);
1303 if (skb->len <= copy) {
1304 /* We've eaten all the data from this skb.
1306 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags;
1307 __skb_unlink(skb, &sk->sk_write_queue);
1308 sk_stream_free_skb(sk, skb);
1310 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
1311 ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
1312 if (!skb_shinfo(skb)->nr_frags) {
1313 skb_pull(skb, copy);
1314 if (skb->ip_summed != CHECKSUM_PARTIAL)
1315 skb->csum = csum_partial(skb->data, skb->len, 0);
1317 __pskb_trim_head(skb, copy);
1318 tcp_set_skb_tso_segs(sk, skb, mss_now);
1320 TCP_SKB_CB(skb)->seq += copy;
1326 tcp_init_tso_segs(sk, nskb, nskb->len);
1328 /* We're ready to send. If this fails, the probe will
1329 * be resegmented into mss-sized pieces by tcp_write_xmit(). */
1330 TCP_SKB_CB(nskb)->when = tcp_time_stamp;
1331 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
1332 /* Decrement cwnd here because we are sending
1333 * effectively two packets. */
1335 update_send_head(sk, tp, nskb);
1337 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
1338 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
1339 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
1348 /* This routine writes packets to the network. It advances the
1349 * send_head. This happens as incoming acks open up the remote
1352 * Returns 1, if no segments are in flight and we have queued segments, but
1353 * cannot send anything now because of SWS or another problem.
1355 static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
1357 struct tcp_sock *tp = tcp_sk(sk);
1358 struct sk_buff *skb;
1359 unsigned int tso_segs, sent_pkts;
1363 /* If we are closed, the bytes will have to remain here.
1364 * In time closedown will finish, we empty the write queue and all
1367 if (unlikely(sk->sk_state == TCP_CLOSE))
1372 /* Do MTU probing. */
1373 if ((result = tcp_mtu_probe(sk)) == 0) {
1375 } else if (result > 0) {
1379 while ((skb = sk->sk_send_head)) {
1382 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1385 cwnd_quota = tcp_cwnd_test(tp, skb);
1389 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1392 if (tso_segs == 1) {
1393 if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1394 (tcp_skb_is_last(sk, skb) ?
1395 nonagle : TCP_NAGLE_PUSH))))
1398 if (tcp_tso_should_defer(sk, tp, skb))
1404 limit = tcp_window_allows(tp, skb,
1405 mss_now, cwnd_quota);
1407 if (skb->len < limit) {
1408 unsigned int trim = skb->len % mss_now;
1411 limit = skb->len - trim;
1415 if (skb->len > limit &&
1416 unlikely(tso_fragment(sk, skb, limit, mss_now)))
1419 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1421 if (unlikely(tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC)))
1424 /* Advance the send_head. This one is sent out.
1425 * This call will increment packets_out.
1427 update_send_head(sk, tp, skb);
1429 tcp_minshall_update(tp, mss_now, skb);
1433 if (likely(sent_pkts)) {
1434 tcp_cwnd_validate(sk, tp);
1437 return !tp->packets_out && sk->sk_send_head;
1440 /* Push out any pending frames which were held back due to
1441 * TCP_CORK or attempt at coalescing tiny packets.
1442 * The socket must be locked by the caller.
1444 void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
1445 unsigned int cur_mss, int nonagle)
1447 struct sk_buff *skb = sk->sk_send_head;
1450 if (tcp_write_xmit(sk, cur_mss, nonagle))
1451 tcp_check_probe_timer(sk, tp);
1455 /* Send _single_ skb sitting at the send head. This function requires
1456 * true push pending frames to setup probe timer etc.
1458 void tcp_push_one(struct sock *sk, unsigned int mss_now)
1460 struct tcp_sock *tp = tcp_sk(sk);
1461 struct sk_buff *skb = sk->sk_send_head;
1462 unsigned int tso_segs, cwnd_quota;
1464 BUG_ON(!skb || skb->len < mss_now);
1466 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1467 cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);
1469 if (likely(cwnd_quota)) {
1476 limit = tcp_window_allows(tp, skb,
1477 mss_now, cwnd_quota);
1479 if (skb->len < limit) {
1480 unsigned int trim = skb->len % mss_now;
1483 limit = skb->len - trim;
1487 if (skb->len > limit &&
1488 unlikely(tso_fragment(sk, skb, limit, mss_now)))
1491 /* Send it out now. */
1492 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1494 if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) {
1495 update_send_head(sk, tp, skb);
1496 tcp_cwnd_validate(sk, tp);
1502 /* This function returns the amount that we can raise the
1503 * usable window based on the following constraints
1505 * 1. The window can never be shrunk once it is offered (RFC 793)
1506 * 2. We limit memory per socket
1509 * "the suggested [SWS] avoidance algorithm for the receiver is to keep
1510 * RECV.NEXT + RCV.WIN fixed until:
1511 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
1513 * i.e. don't raise the right edge of the window until you can raise
1514 * it at least MSS bytes.
1516 * Unfortunately, the recommended algorithm breaks header prediction,
1517 * since header prediction assumes th->window stays fixed.
1519 * Strictly speaking, keeping th->window fixed violates the receiver
1520 * side SWS prevention criteria. The problem is that under this rule
1521 * a stream of single byte packets will cause the right side of the
1522 * window to always advance by a single byte.
1524 * Of course, if the sender implements sender side SWS prevention
1525 * then this will not be a problem.
1527 * BSD seems to make the following compromise:
1529 * If the free space is less than the 1/4 of the maximum
1530 * space available and the free space is less than 1/2 mss,
1531 * then set the window to 0.
1532 * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
1533 * Otherwise, just prevent the window from shrinking
1534 * and from being larger than the largest representable value.
1536 * This prevents incremental opening of the window in the regime
1537 * where TCP is limited by the speed of the reader side taking
1538 * data out of the TCP receive queue. It does nothing about
1539 * those cases where the window is constrained on the sender side
1540 * because the pipeline is full.
1542 * BSD also seems to "accidentally" limit itself to windows that are a
1543 * multiple of MSS, at least until the free space gets quite small.
1544 * This would appear to be a side effect of the mbuf implementation.
1545 * Combining these two algorithms results in the observed behavior
1546 * of having a fixed window size at almost all times.
1548 * Below we obtain similar behavior by forcing the offered window to
1549 * a multiple of the mss when it is feasible to do so.
1551 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
1552 * Regular options like TIMESTAMP are taken into account.
1554 u32 __tcp_select_window(struct sock *sk)
1556 struct inet_connection_sock *icsk = inet_csk(sk);
1557 struct tcp_sock *tp = tcp_sk(sk);
1558 /* MSS for the peer's data. Previous versions used mss_clamp
1559 * here. I don't know if the value based on our guesses
1560 * of peer's MSS is better for the performance. It's more correct
1561 * but may be worse for the performance because of rcv_mss
1562 * fluctuations. --SAW 1998/11/1
1564 int mss = icsk->icsk_ack.rcv_mss;
1565 int free_space = tcp_space(sk);
1566 int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
1569 if (mss > full_space)
1572 if (free_space < full_space/2) {
1573 icsk->icsk_ack.quick = 0;
1575 if (tcp_memory_pressure)
1576 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss);
1578 if (free_space < mss)
1582 if (free_space > tp->rcv_ssthresh)
1583 free_space = tp->rcv_ssthresh;
1585 /* Don't do rounding if we are using window scaling, since the
1586 * scaled window will not line up with the MSS boundary anyway.
1588 window = tp->rcv_wnd;
1589 if (tp->rx_opt.rcv_wscale) {
1590 window = free_space;
1592 /* Advertise enough space so that it won't get scaled away.
1593 * Import case: prevent zero window announcement if
1594 * 1<<rcv_wscale > mss.
1596 if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
1597 window = (((window >> tp->rx_opt.rcv_wscale) + 1)
1598 << tp->rx_opt.rcv_wscale);
1600 /* Get the largest window that is a nice multiple of mss.
1601 * Window clamp already applied above.
1602 * If our current window offering is within 1 mss of the
1603 * free space we just keep it. This prevents the divide
1604 * and multiply from happening most of the time.
1605 * We also don't do any window rounding when the free space
1608 if (window <= free_space - mss || window > free_space)
1609 window = (free_space/mss)*mss;
1615 /* Attempt to collapse two adjacent SKB's during retransmission. */
1616 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now)
1618 struct tcp_sock *tp = tcp_sk(sk);
1619 struct sk_buff *next_skb = skb->next;
1621 /* The first test we must make is that neither of these two
1622 * SKB's are still referenced by someone else.
1624 if (!skb_cloned(skb) && !skb_cloned(next_skb)) {
1625 int skb_size = skb->len, next_skb_size = next_skb->len;
1626 u16 flags = TCP_SKB_CB(skb)->flags;
1628 /* Also punt if next skb has been SACK'd. */
1629 if(TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED)
1632 /* Next skb is out of window. */
1633 if (after(TCP_SKB_CB(next_skb)->end_seq, tp->snd_una+tp->snd_wnd))
1636 /* Punt if not enough space exists in the first SKB for
1637 * the data in the second, or the total combined payload
1638 * would exceed the MSS.
1640 if ((next_skb_size > skb_tailroom(skb)) ||
1641 ((skb_size + next_skb_size) > mss_now))
1644 BUG_ON(tcp_skb_pcount(skb) != 1 ||
1645 tcp_skb_pcount(next_skb) != 1);
1647 /* changing transmit queue under us so clear hints */
1648 clear_all_retrans_hints(tp);
1650 /* Ok. We will be able to collapse the packet. */
1651 __skb_unlink(next_skb, &sk->sk_write_queue);
1653 memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
1655 if (next_skb->ip_summed == CHECKSUM_PARTIAL)
1656 skb->ip_summed = CHECKSUM_PARTIAL;
1658 if (skb->ip_summed != CHECKSUM_PARTIAL)
1659 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
1661 /* Update sequence range on original skb. */
1662 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
1664 /* Merge over control information. */
1665 flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */
1666 TCP_SKB_CB(skb)->flags = flags;
1668 /* All done, get rid of second SKB and account for it so
1669 * packet counting does not break.
1671 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked&(TCPCB_EVER_RETRANS|TCPCB_AT_TAIL);
1672 if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS)
1673 tp->retrans_out -= tcp_skb_pcount(next_skb);
1674 if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) {
1675 tp->lost_out -= tcp_skb_pcount(next_skb);
1676 tp->left_out -= tcp_skb_pcount(next_skb);
1678 /* Reno case is special. Sigh... */
1679 if (!tp->rx_opt.sack_ok && tp->sacked_out) {
1680 tcp_dec_pcount_approx(&tp->sacked_out, next_skb);
1681 tp->left_out -= tcp_skb_pcount(next_skb);
1684 /* Not quite right: it can be > snd.fack, but
1685 * it is better to underestimate fackets.
1687 tcp_dec_pcount_approx(&tp->fackets_out, next_skb);
1688 tcp_packets_out_dec(tp, next_skb);
1689 sk_stream_free_skb(sk, next_skb);
1693 /* Do a simple retransmit without using the backoff mechanisms in
1694 * tcp_timer. This is used for path mtu discovery.
1695 * The socket is already locked here.
1697 void tcp_simple_retransmit(struct sock *sk)
1699 const struct inet_connection_sock *icsk = inet_csk(sk);
1700 struct tcp_sock *tp = tcp_sk(sk);
1701 struct sk_buff *skb;
1702 unsigned int mss = tcp_current_mss(sk, 0);
1705 sk_stream_for_retrans_queue(skb, sk) {
1706 if (skb->len > mss &&
1707 !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
1708 if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
1709 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1710 tp->retrans_out -= tcp_skb_pcount(skb);
1712 if (!(TCP_SKB_CB(skb)->sacked&TCPCB_LOST)) {
1713 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1714 tp->lost_out += tcp_skb_pcount(skb);
1720 clear_all_retrans_hints(tp);
1725 tcp_sync_left_out(tp);
1727 /* Don't muck with the congestion window here.
1728 * Reason is that we do not increase amount of _data_
1729 * in network, but units changed and effective
1730 * cwnd/ssthresh really reduced now.
1732 if (icsk->icsk_ca_state != TCP_CA_Loss) {
1733 tp->high_seq = tp->snd_nxt;
1734 tp->snd_ssthresh = tcp_current_ssthresh(sk);
1735 tp->prior_ssthresh = 0;
1736 tp->undo_marker = 0;
1737 tcp_set_ca_state(sk, TCP_CA_Loss);
1739 tcp_xmit_retransmit_queue(sk);
1742 /* This retransmits one SKB. Policy decisions and retransmit queue
1743 * state updates are done by the caller. Returns non-zero if an
1744 * error occurred which prevented the send.
1746 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1748 struct tcp_sock *tp = tcp_sk(sk);
1749 struct inet_connection_sock *icsk = inet_csk(sk);
1750 unsigned int cur_mss = tcp_current_mss(sk, 0);
1753 /* Inconslusive MTU probe */
1754 if (icsk->icsk_mtup.probe_size) {
1755 icsk->icsk_mtup.probe_size = 0;
1758 /* Do not sent more than we queued. 1/4 is reserved for possible
1759 * copying overhead: fragmentation, tunneling, mangling etc.
1761 if (atomic_read(&sk->sk_wmem_alloc) >
1762 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
1765 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
1766 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1768 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
1772 /* If receiver has shrunk his window, and skb is out of
1773 * new window, do not retransmit it. The exception is the
1774 * case, when window is shrunk to zero. In this case
1775 * our retransmit serves as a zero window probe.
1777 if (!before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)
1778 && TCP_SKB_CB(skb)->seq != tp->snd_una)
1781 if (skb->len > cur_mss) {
1782 if (tcp_fragment(sk, skb, cur_mss, cur_mss))
1783 return -ENOMEM; /* We'll try again later. */
1786 /* Collapse two adjacent packets if worthwhile and we can. */
1787 if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
1788 (skb->len < (cur_mss >> 1)) &&
1789 (skb->next != sk->sk_send_head) &&
1790 (skb->next != (struct sk_buff *)&sk->sk_write_queue) &&
1791 (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(skb->next)->nr_frags == 0) &&
1792 (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(skb->next) == 1) &&
1793 (sysctl_tcp_retrans_collapse != 0))
1794 tcp_retrans_try_collapse(sk, skb, cur_mss);
1796 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
1797 return -EHOSTUNREACH; /* Routing failure or similar. */
1799 /* Some Solaris stacks overoptimize and ignore the FIN on a
1800 * retransmit when old data is attached. So strip it off
1801 * since it is cheap to do so and saves bytes on the network.
1804 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
1805 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
1806 if (!pskb_trim(skb, 0)) {
1807 TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
1808 skb_shinfo(skb)->gso_segs = 1;
1809 skb_shinfo(skb)->gso_size = 0;
1810 skb_shinfo(skb)->gso_type = 0;
1811 skb->ip_summed = CHECKSUM_NONE;
1816 /* Make a copy, if the first transmission SKB clone we made
1817 * is still in somebody's hands, else make a clone.
1819 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1821 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
1824 /* Update global TCP statistics. */
1825 TCP_INC_STATS(TCP_MIB_RETRANSSEGS);
1827 tp->total_retrans++;
1829 #if FASTRETRANS_DEBUG > 0
1830 if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
1831 if (net_ratelimit())
1832 printk(KERN_DEBUG "retrans_out leaked.\n");
1835 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
1836 tp->retrans_out += tcp_skb_pcount(skb);
1838 /* Save stamp of the first retransmit. */
1839 if (!tp->retrans_stamp)
1840 tp->retrans_stamp = TCP_SKB_CB(skb)->when;
1844 /* snd_nxt is stored to detect loss of retransmitted segment,
1845 * see tcp_input.c tcp_sacktag_write_queue().
1847 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
1852 /* This gets called after a retransmit timeout, and the initially
1853 * retransmitted data is acknowledged. It tries to continue
1854 * resending the rest of the retransmit queue, until either
1855 * we've sent it all or the congestion window limit is reached.
1856 * If doing SACK, the first ACK which comes back for a timeout
1857 * based retransmit packet might feed us FACK information again.
1858 * If so, we use it to avoid unnecessarily retransmissions.
1860 void tcp_xmit_retransmit_queue(struct sock *sk)
1862 const struct inet_connection_sock *icsk = inet_csk(sk);
1863 struct tcp_sock *tp = tcp_sk(sk);
1864 struct sk_buff *skb;
1867 if (tp->retransmit_skb_hint) {
1868 skb = tp->retransmit_skb_hint;
1869 packet_cnt = tp->retransmit_cnt_hint;
1871 skb = sk->sk_write_queue.next;
1875 /* First pass: retransmit lost packets. */
1877 sk_stream_for_retrans_queue_from(skb, sk) {
1878 __u8 sacked = TCP_SKB_CB(skb)->sacked;
1880 /* we could do better than to assign each time */
1881 tp->retransmit_skb_hint = skb;
1882 tp->retransmit_cnt_hint = packet_cnt;
1884 /* Assume this retransmit will generate
1885 * only one packet for congestion window
1886 * calculation purposes. This works because
1887 * tcp_retransmit_skb() will chop up the
1888 * packet to be MSS sized and all the
1889 * packet counting works out.
1891 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
1894 if (sacked & TCPCB_LOST) {
1895 if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
1896 if (tcp_retransmit_skb(sk, skb)) {
1897 tp->retransmit_skb_hint = NULL;
1900 if (icsk->icsk_ca_state != TCP_CA_Loss)
1901 NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS);
1903 NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS);
1906 skb_peek(&sk->sk_write_queue))
1907 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
1908 inet_csk(sk)->icsk_rto,
1912 packet_cnt += tcp_skb_pcount(skb);
1913 if (packet_cnt >= tp->lost_out)
1919 /* OK, demanded retransmission is finished. */
1921 /* Forward retransmissions are possible only during Recovery. */
1922 if (icsk->icsk_ca_state != TCP_CA_Recovery)
1925 /* No forward retransmissions in Reno are possible. */
1926 if (!tp->rx_opt.sack_ok)
1929 /* Yeah, we have to make difficult choice between forward transmission
1930 * and retransmission... Both ways have their merits...
1932 * For now we do not retransmit anything, while we have some new
1936 if (tcp_may_send_now(sk, tp))
1939 if (tp->forward_skb_hint) {
1940 skb = tp->forward_skb_hint;
1941 packet_cnt = tp->forward_cnt_hint;
1943 skb = sk->sk_write_queue.next;
1947 sk_stream_for_retrans_queue_from(skb, sk) {
1948 tp->forward_cnt_hint = packet_cnt;
1949 tp->forward_skb_hint = skb;
1951 /* Similar to the retransmit loop above we
1952 * can pretend that the retransmitted SKB
1953 * we send out here will be composed of one
1954 * real MSS sized packet because tcp_retransmit_skb()
1955 * will fragment it if necessary.
1957 if (++packet_cnt > tp->fackets_out)
1960 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
1963 if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS)
1966 /* Ok, retransmit it. */
1967 if (tcp_retransmit_skb(sk, skb)) {
1968 tp->forward_skb_hint = NULL;
1972 if (skb == skb_peek(&sk->sk_write_queue))
1973 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
1974 inet_csk(sk)->icsk_rto,
1977 NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);
1982 /* Send a fin. The caller locks the socket for us. This cannot be
1983 * allowed to fail queueing a FIN frame under any circumstances.
1985 void tcp_send_fin(struct sock *sk)
1987 struct tcp_sock *tp = tcp_sk(sk);
1988 struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue);
1991 /* Optimization, tack on the FIN if we have a queue of
1992 * unsent frames. But be careful about outgoing SACKS
1995 mss_now = tcp_current_mss(sk, 1);
1997 if (sk->sk_send_head != NULL) {
1998 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
1999 TCP_SKB_CB(skb)->end_seq++;
2002 /* Socket is locked, keep trying until memory is available. */
2004 skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL);
2010 /* Reserve space for headers and prepare control bits. */
2011 skb_reserve(skb, MAX_TCP_HEADER);
2013 TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
2014 TCP_SKB_CB(skb)->sacked = 0;
2015 skb_shinfo(skb)->gso_segs = 1;
2016 skb_shinfo(skb)->gso_size = 0;
2017 skb_shinfo(skb)->gso_type = 0;
2019 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2020 TCP_SKB_CB(skb)->seq = tp->write_seq;
2021 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
2022 tcp_queue_skb(sk, skb);
2024 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF);
2027 /* We get here when a process closes a file descriptor (either due to
2028 * an explicit close() or as a byproduct of exit()'ing) and there
2029 * was unread data in the receive queue. This behavior is recommended
2030 * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM
2032 void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2034 struct tcp_sock *tp = tcp_sk(sk);
2035 struct sk_buff *skb;
2037 /* NOTE: No TCP options attached and we never retransmit this. */
2038 skb = alloc_skb(MAX_TCP_HEADER, priority);
2040 NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
2044 /* Reserve space for headers and prepare control bits. */
2045 skb_reserve(skb, MAX_TCP_HEADER);
2047 TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
2048 TCP_SKB_CB(skb)->sacked = 0;
2049 skb_shinfo(skb)->gso_segs = 1;
2050 skb_shinfo(skb)->gso_size = 0;
2051 skb_shinfo(skb)->gso_type = 0;
2054 TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp);
2055 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
2056 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2057 if (tcp_transmit_skb(sk, skb, 0, priority))
2058 NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
2061 /* WARNING: This routine must only be called when we have already sent
2062 * a SYN packet that crossed the incoming SYN that caused this routine
2063 * to get called. If this assumption fails then the initial rcv_wnd
2064 * and rcv_wscale values will not be correct.
2066 int tcp_send_synack(struct sock *sk)
2068 struct sk_buff* skb;
2070 skb = skb_peek(&sk->sk_write_queue);
2071 if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) {
2072 printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
2075 if (!(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_ACK)) {
2076 if (skb_cloned(skb)) {
2077 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
2080 __skb_unlink(skb, &sk->sk_write_queue);
2081 skb_header_release(nskb);
2082 __skb_queue_head(&sk->sk_write_queue, nskb);
2083 sk_stream_free_skb(sk, skb);
2084 sk_charge_skb(sk, nskb);
2088 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK;
2089 TCP_ECN_send_synack(tcp_sk(sk), skb);
2091 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2092 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2096 * Prepare a SYN-ACK.
2098 struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2099 struct request_sock *req)
2101 struct inet_request_sock *ireq = inet_rsk(req);
2102 struct tcp_sock *tp = tcp_sk(sk);
2104 int tcp_header_size;
2105 struct sk_buff *skb;
2106 #ifdef CONFIG_TCP_MD5SIG
2107 struct tcp_md5sig_key *md5;
2108 __u8 *md5_hash_location;
2111 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
2115 /* Reserve space for headers. */
2116 skb_reserve(skb, MAX_TCP_HEADER);
2118 skb->dst = dst_clone(dst);
2120 tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS +
2121 (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) +
2122 (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) +
2123 /* SACK_PERM is in the place of NOP NOP of TS */
2124 ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0));
2126 #ifdef CONFIG_TCP_MD5SIG
2127 /* Are we doing MD5 on this segment? If so - make room for it */
2128 md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
2130 tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
2132 skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size);
2134 memset(th, 0, sizeof(struct tcphdr));
2137 TCP_ECN_make_synack(req, th);
2138 th->source = inet_sk(sk)->sport;
2139 th->dest = ireq->rmt_port;
2140 TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn;
2141 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
2142 TCP_SKB_CB(skb)->sacked = 0;
2143 skb_shinfo(skb)->gso_segs = 1;
2144 skb_shinfo(skb)->gso_size = 0;
2145 skb_shinfo(skb)->gso_type = 0;
2146 th->seq = htonl(TCP_SKB_CB(skb)->seq);
2147 th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
2148 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
2150 /* Set this up on the first call only */
2151 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
2152 /* tcp_full_space because it is guaranteed to be the first packet */
2153 tcp_select_initial_window(tcp_full_space(sk),
2154 dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
2159 ireq->rcv_wscale = rcv_wscale;
2162 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2163 th->window = htons(req->rcv_wnd);
2165 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2166 tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok,
2167 ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale,
2168 TCP_SKB_CB(skb)->when,
2171 #ifdef CONFIG_TCP_MD5SIG
2172 md5 ? &md5_hash_location :
2178 th->doff = (tcp_header_size >> 2);
2179 TCP_INC_STATS(TCP_MIB_OUTSEGS);
2181 #ifdef CONFIG_TCP_MD5SIG
2182 /* Okay, we have all we need - do the md5 hash if needed */
2184 tp->af_specific->calc_md5_hash(md5_hash_location,
2187 skb->h.th, sk->sk_protocol,
2196 * Do all connect socket setups that can be done AF independent.
2198 static void tcp_connect_init(struct sock *sk)
2200 struct dst_entry *dst = __sk_dst_get(sk);
2201 struct tcp_sock *tp = tcp_sk(sk);
2204 /* We'll fix this up when we get a response from the other end.
2205 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
2207 tp->tcp_header_len = sizeof(struct tcphdr) +
2208 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
2210 #ifdef CONFIG_TCP_MD5SIG
2211 if (tp->af_specific->md5_lookup(sk, sk) != NULL)
2212 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
2215 /* If user gave his TCP_MAXSEG, record it to clamp */
2216 if (tp->rx_opt.user_mss)
2217 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
2220 tcp_sync_mss(sk, dst_mtu(dst));
2222 if (!tp->window_clamp)
2223 tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
2224 tp->advmss = dst_metric(dst, RTAX_ADVMSS);
2225 tcp_initialize_rcv_mss(sk);
2227 tcp_select_initial_window(tcp_full_space(sk),
2228 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
2231 sysctl_tcp_window_scaling,
2234 tp->rx_opt.rcv_wscale = rcv_wscale;
2235 tp->rcv_ssthresh = tp->rcv_wnd;
2238 sock_reset_flag(sk, SOCK_DONE);
2240 tcp_init_wl(tp, tp->write_seq, 0);
2241 tp->snd_una = tp->write_seq;
2242 tp->snd_sml = tp->write_seq;
2247 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2248 inet_csk(sk)->icsk_retransmits = 0;
2249 tcp_clear_retrans(tp);
2253 * Build a SYN and send it off.
2255 int tcp_connect(struct sock *sk)
2257 struct tcp_sock *tp = tcp_sk(sk);
2258 struct sk_buff *buff;
2260 tcp_connect_init(sk);
2262 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
2263 if (unlikely(buff == NULL))
2266 /* Reserve space for headers. */
2267 skb_reserve(buff, MAX_TCP_HEADER);
2269 TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
2270 TCP_ECN_send_syn(sk, tp, buff);
2271 TCP_SKB_CB(buff)->sacked = 0;
2272 skb_shinfo(buff)->gso_segs = 1;
2273 skb_shinfo(buff)->gso_size = 0;
2274 skb_shinfo(buff)->gso_type = 0;
2276 tp->snd_nxt = tp->write_seq;
2277 TCP_SKB_CB(buff)->seq = tp->write_seq++;
2278 TCP_SKB_CB(buff)->end_seq = tp->write_seq;
2281 TCP_SKB_CB(buff)->when = tcp_time_stamp;
2282 tp->retrans_stamp = TCP_SKB_CB(buff)->when;
2283 skb_header_release(buff);
2284 __skb_queue_tail(&sk->sk_write_queue, buff);
2285 sk_charge_skb(sk, buff);
2286 tp->packets_out += tcp_skb_pcount(buff);
2287 tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
2289 /* We change tp->snd_nxt after the tcp_transmit_skb() call
2290 * in order to make this packet get counted in tcpOutSegs.
2292 tp->snd_nxt = tp->write_seq;
2293 tp->pushed_seq = tp->write_seq;
2294 TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
2296 /* Timer for repeating the SYN until an answer. */
2297 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2298 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
2302 /* Send out a delayed ack, the caller does the policy checking
2303 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check()
2306 void tcp_send_delayed_ack(struct sock *sk)
2308 struct inet_connection_sock *icsk = inet_csk(sk);
2309 int ato = icsk->icsk_ack.ato;
2310 unsigned long timeout;
2312 if (ato > TCP_DELACK_MIN) {
2313 const struct tcp_sock *tp = tcp_sk(sk);
2316 if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
2317 max_ato = TCP_DELACK_MAX;
2319 /* Slow path, intersegment interval is "high". */
2321 /* If some rtt estimate is known, use it to bound delayed ack.
2322 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
2326 int rtt = max(tp->srtt>>3, TCP_DELACK_MIN);
2332 ato = min(ato, max_ato);
2335 /* Stay within the limit we were given */
2336 timeout = jiffies + ato;
2338 /* Use new timeout only if there wasn't a older one earlier. */
2339 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
2340 /* If delack timer was blocked or is about to expire,
2343 if (icsk->icsk_ack.blocked ||
2344 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
2349 if (!time_before(timeout, icsk->icsk_ack.timeout))
2350 timeout = icsk->icsk_ack.timeout;
2352 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
2353 icsk->icsk_ack.timeout = timeout;
2354 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
2357 /* This routine sends an ack and also updates the window. */
2358 void tcp_send_ack(struct sock *sk)
2360 /* If we have been reset, we may not send again. */
2361 if (sk->sk_state != TCP_CLOSE) {
2362 struct tcp_sock *tp = tcp_sk(sk);
2363 struct sk_buff *buff;
2365 /* We are not putting this on the write queue, so
2366 * tcp_transmit_skb() will set the ownership to this
2369 buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
2371 inet_csk_schedule_ack(sk);
2372 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
2373 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
2374 TCP_DELACK_MAX, TCP_RTO_MAX);
2378 /* Reserve space for headers and prepare control bits. */
2379 skb_reserve(buff, MAX_TCP_HEADER);
2381 TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;
2382 TCP_SKB_CB(buff)->sacked = 0;
2383 skb_shinfo(buff)->gso_segs = 1;
2384 skb_shinfo(buff)->gso_size = 0;
2385 skb_shinfo(buff)->gso_type = 0;
2387 /* Send it off, this clears delayed acks for us. */
2388 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp);
2389 TCP_SKB_CB(buff)->when = tcp_time_stamp;
2390 tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
2394 /* This routine sends a packet with an out of date sequence
2395 * number. It assumes the other end will try to ack it.
2397 * Question: what should we make while urgent mode?
2398 * 4.4BSD forces sending single byte of data. We cannot send
2399 * out of window data, because we have SND.NXT==SND.MAX...
2401 * Current solution: to send TWO zero-length segments in urgent mode:
2402 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
2403 * out-of-date with SND.UNA-1 to probe window.
2405 static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
2407 struct tcp_sock *tp = tcp_sk(sk);
2408 struct sk_buff *skb;
2410 /* We don't queue it, tcp_transmit_skb() sets ownership. */
2411 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
2415 /* Reserve space for headers and set control bits. */
2416 skb_reserve(skb, MAX_TCP_HEADER);
2418 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
2419 TCP_SKB_CB(skb)->sacked = urgent;
2420 skb_shinfo(skb)->gso_segs = 1;
2421 skb_shinfo(skb)->gso_size = 0;
2422 skb_shinfo(skb)->gso_type = 0;
2424 /* Use a previous sequence. This should cause the other
2425 * end to send an ack. Don't queue or clone SKB, just
2428 TCP_SKB_CB(skb)->seq = urgent ? tp->snd_una : tp->snd_una - 1;
2429 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
2430 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2431 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
2434 int tcp_write_wakeup(struct sock *sk)
2436 if (sk->sk_state != TCP_CLOSE) {
2437 struct tcp_sock *tp = tcp_sk(sk);
2438 struct sk_buff *skb;
2440 if ((skb = sk->sk_send_head) != NULL &&
2441 before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) {
2443 unsigned int mss = tcp_current_mss(sk, 0);
2444 unsigned int seg_size = tp->snd_una+tp->snd_wnd-TCP_SKB_CB(skb)->seq;
2446 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
2447 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
2449 /* We are probing the opening of a window
2450 * but the window size is != 0
2451 * must have been a result SWS avoidance ( sender )
2453 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
2455 seg_size = min(seg_size, mss);
2456 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
2457 if (tcp_fragment(sk, skb, seg_size, mss))
2459 } else if (!tcp_skb_pcount(skb))
2460 tcp_set_skb_tso_segs(sk, skb, mss);
2462 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
2463 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2464 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2466 update_send_head(sk, tp, skb);
2471 between(tp->snd_up, tp->snd_una+1, tp->snd_una+0xFFFF))
2472 tcp_xmit_probe_skb(sk, TCPCB_URG);
2473 return tcp_xmit_probe_skb(sk, 0);
2479 /* A window probe timeout has occurred. If window is not closed send
2480 * a partial packet else a zero probe.
2482 void tcp_send_probe0(struct sock *sk)
2484 struct inet_connection_sock *icsk = inet_csk(sk);
2485 struct tcp_sock *tp = tcp_sk(sk);
2488 err = tcp_write_wakeup(sk);
2490 if (tp->packets_out || !sk->sk_send_head) {
2491 /* Cancel probe timer, if it is not required. */
2492 icsk->icsk_probes_out = 0;
2493 icsk->icsk_backoff = 0;
2498 if (icsk->icsk_backoff < sysctl_tcp_retries2)
2499 icsk->icsk_backoff++;
2500 icsk->icsk_probes_out++;
2501 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2502 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
2505 /* If packet was not sent due to local congestion,
2506 * do not backoff and do not remember icsk_probes_out.
2507 * Let local senders to fight for local resources.
2509 * Use accumulated backoff yet.
2511 if (!icsk->icsk_probes_out)
2512 icsk->icsk_probes_out = 1;
2513 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2514 min(icsk->icsk_rto << icsk->icsk_backoff,
2515 TCP_RESOURCE_PROBE_INTERVAL),
2520 EXPORT_SYMBOL(tcp_connect);
2521 EXPORT_SYMBOL(tcp_make_synack);
2522 EXPORT_SYMBOL(tcp_simple_retransmit);
2523 EXPORT_SYMBOL(tcp_sync_mss);
2524 EXPORT_SYMBOL(sysctl_tcp_tso_win_divisor);
2525 EXPORT_SYMBOL(tcp_mtup_init);