2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
24 * Changes: Pedro Roque : Retransmit queue handled by TCP.
25 * : Fragmentation on mtu decrease
26 * : Segment collapse on retransmit
29 * Linus Torvalds : send_delayed_ack
30 * David S. Miller : Charge memory using the right skb
31 * during syn/ack processing.
32 * David S. Miller : Output engine completely rewritten.
33 * Andrea Arcangeli: SYNACK carry ts_recent in tsecr.
34 * Cacophonix Gaul : draft-minshall-nagle-01
35 * J Hadi Salim : ECN support
41 #include <linux/compiler.h>
42 #include <linux/module.h>
43 #include <linux/smp_lock.h>
45 /* People can turn this off for buggy TCP's found in printers etc. */
46 int sysctl_tcp_retrans_collapse __read_mostly = 1;
48 /* People can turn this on to work with those rare, broken TCPs that
49 * interpret the window field as a signed quantity.
51 int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
53 /* This limits the percentage of the congestion window which we
54 * will allow a single TSO frame to consume. Building TSO frames
55 * which are too large can cause TCP streams to be bursty.
57 int sysctl_tcp_tso_win_divisor __read_mostly = 3;
59 int sysctl_tcp_mtu_probing __read_mostly = 0;
60 int sysctl_tcp_base_mss __read_mostly = 512;
62 /* By default, RFC2861 behavior. */
63 int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
65 static void update_send_head(struct sock *sk, struct tcp_sock *tp,
68 sk->sk_send_head = skb->next;
69 if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
70 sk->sk_send_head = NULL;
71 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
72 tcp_packets_out_inc(sk, tp, skb);
75 /* SND.NXT, if window was not shrunk.
76 * If window has been shrunk, what should we make? It is not clear at all.
77 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
78 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
79 * invalid. OK, let's make this for now:
81 static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp)
83 if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt))
86 return tp->snd_una+tp->snd_wnd;
89 /* Calculate mss to advertise in SYN segment.
90 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
92 * 1. It is independent of path mtu.
93 * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
94 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
95 * attached devices, because some buggy hosts are confused by
97 * 4. We do not make 3, we advertise MSS, calculated from first
98 * hop device mtu, but allow to raise it to ip_rt_min_advmss.
99 * This may be overridden via information stored in routing table.
100 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
101 * probably even Jumbo".
103 static __u16 tcp_advertise_mss(struct sock *sk)
105 struct tcp_sock *tp = tcp_sk(sk);
106 struct dst_entry *dst = __sk_dst_get(sk);
107 int mss = tp->advmss;
109 if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) {
110 mss = dst_metric(dst, RTAX_ADVMSS);
117 /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
118 * This is the first part of cwnd validation mechanism. */
119 static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
121 struct tcp_sock *tp = tcp_sk(sk);
122 s32 delta = tcp_time_stamp - tp->lsndtime;
123 u32 restart_cwnd = tcp_init_cwnd(tp, dst);
124 u32 cwnd = tp->snd_cwnd;
126 tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
128 tp->snd_ssthresh = tcp_current_ssthresh(sk);
129 restart_cwnd = min(restart_cwnd, cwnd);
131 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
133 tp->snd_cwnd = max(cwnd, restart_cwnd);
134 tp->snd_cwnd_stamp = tcp_time_stamp;
135 tp->snd_cwnd_used = 0;
138 static void tcp_event_data_sent(struct tcp_sock *tp,
139 struct sk_buff *skb, struct sock *sk)
141 struct inet_connection_sock *icsk = inet_csk(sk);
142 const u32 now = tcp_time_stamp;
144 if (sysctl_tcp_slow_start_after_idle &&
145 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
146 tcp_cwnd_restart(sk, __sk_dst_get(sk));
150 /* If it is a reply for ato after last received
151 * packet, enter pingpong mode.
153 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
154 icsk->icsk_ack.pingpong = 1;
157 static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
159 tcp_dec_quickack_mode(sk, pkts);
160 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
163 /* Determine a window scaling and initial window to offer.
164 * Based on the assumption that the given amount of space
165 * will be offered. Store the results in the tp structure.
166 * NOTE: for smooth operation initial space offering should
167 * be a multiple of mss if possible. We assume here that mss >= 1.
168 * This MUST be enforced by all callers.
170 void tcp_select_initial_window(int __space, __u32 mss,
171 __u32 *rcv_wnd, __u32 *window_clamp,
172 int wscale_ok, __u8 *rcv_wscale)
174 unsigned int space = (__space < 0 ? 0 : __space);
176 /* If no clamp set the clamp to the max possible scaled window */
177 if (*window_clamp == 0)
178 (*window_clamp) = (65535 << 14);
179 space = min(*window_clamp, space);
181 /* Quantize space offering to a multiple of mss if possible. */
183 space = (space / mss) * mss;
185 /* NOTE: offering an initial window larger than 32767
186 * will break some buggy TCP stacks. If the admin tells us
187 * it is likely we could be speaking with such a buggy stack
188 * we will truncate our initial window offering to 32K-1
189 * unless the remote has sent us a window scaling option,
190 * which we interpret as a sign the remote TCP is not
191 * misinterpreting the window field as a signed quantity.
193 if (sysctl_tcp_workaround_signed_windows)
194 (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
200 /* Set window scaling on max possible window
201 * See RFC1323 for an explanation of the limit to 14
203 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
204 space = min_t(u32, space, *window_clamp);
205 while (space > 65535 && (*rcv_wscale) < 14) {
211 /* Set initial window to value enough for senders,
212 * following RFC2414. Senders, not following this RFC,
213 * will be satisfied with 2.
215 if (mss > (1<<*rcv_wscale)) {
221 if (*rcv_wnd > init_cwnd*mss)
222 *rcv_wnd = init_cwnd*mss;
225 /* Set the clamp no higher than max representable value */
226 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
229 /* Chose a new window to advertise, update state in tcp_sock for the
230 * socket, and return result with RFC1323 scaling applied. The return
231 * value can be stuffed directly into th->window for an outgoing
234 static u16 tcp_select_window(struct sock *sk)
236 struct tcp_sock *tp = tcp_sk(sk);
237 u32 cur_win = tcp_receive_window(tp);
238 u32 new_win = __tcp_select_window(sk);
240 /* Never shrink the offered window */
241 if(new_win < cur_win) {
242 /* Danger Will Robinson!
243 * Don't update rcv_wup/rcv_wnd here or else
244 * we will not be able to advertise a zero
245 * window in time. --DaveM
247 * Relax Will Robinson.
251 tp->rcv_wnd = new_win;
252 tp->rcv_wup = tp->rcv_nxt;
254 /* Make sure we do not exceed the maximum possible
257 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
258 new_win = min(new_win, MAX_TCP_WINDOW);
260 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
262 /* RFC1323 scaling applied */
263 new_win >>= tp->rx_opt.rcv_wscale;
265 /* If we advertise zero window, disable fast path. */
272 static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
273 __u32 tstamp, __u8 **md5_hash)
275 if (tp->rx_opt.tstamp_ok) {
276 *ptr++ = htonl((TCPOPT_NOP << 24) |
278 (TCPOPT_TIMESTAMP << 8) |
280 *ptr++ = htonl(tstamp);
281 *ptr++ = htonl(tp->rx_opt.ts_recent);
283 if (tp->rx_opt.eff_sacks) {
284 struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks;
287 *ptr++ = htonl((TCPOPT_NOP << 24) |
290 (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks *
291 TCPOLEN_SACK_PERBLOCK)));
292 for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
293 *ptr++ = htonl(sp[this_sack].start_seq);
294 *ptr++ = htonl(sp[this_sack].end_seq);
296 if (tp->rx_opt.dsack) {
297 tp->rx_opt.dsack = 0;
298 tp->rx_opt.eff_sacks--;
301 #ifdef CONFIG_TCP_MD5SIG
303 *ptr++ = htonl((TCPOPT_NOP << 24) |
305 (TCPOPT_MD5SIG << 8) |
307 *md5_hash = (__u8 *)ptr;
312 /* Construct a tcp options header for a SYN or SYN_ACK packet.
313 * If this is every changed make sure to change the definition of
314 * MAX_SYN_SIZE to match the new maximum number of options that you
317 * Note - that with the RFC2385 TCP option, we make room for the
318 * 16 byte MD5 hash. This will be filled in later, so the pointer for the
319 * location to be filled is passed back up.
321 static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
322 int offer_wscale, int wscale, __u32 tstamp,
323 __u32 ts_recent, __u8 **md5_hash)
325 /* We always get an MSS option.
326 * The option bytes which will be seen in normal data
327 * packets should timestamps be used, must be in the MSS
328 * advertised. But we subtract them from tp->mss_cache so
329 * that calculations in tcp_sendmsg are simpler etc.
330 * So account for this fact here if necessary. If we
331 * don't do this correctly, as a receiver we won't
332 * recognize data packets as being full sized when we
333 * should, and thus we won't abide by the delayed ACK
335 * SACKs don't matter, we never delay an ACK when we
336 * have any of those going out.
338 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
341 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
342 (TCPOLEN_SACK_PERM << 16) |
343 (TCPOPT_TIMESTAMP << 8) |
346 *ptr++ = htonl((TCPOPT_NOP << 24) |
348 (TCPOPT_TIMESTAMP << 8) |
350 *ptr++ = htonl(tstamp); /* TSVAL */
351 *ptr++ = htonl(ts_recent); /* TSECR */
353 *ptr++ = htonl((TCPOPT_NOP << 24) |
355 (TCPOPT_SACK_PERM << 8) |
358 *ptr++ = htonl((TCPOPT_NOP << 24) |
359 (TCPOPT_WINDOW << 16) |
360 (TCPOLEN_WINDOW << 8) |
362 #ifdef CONFIG_TCP_MD5SIG
364 * If MD5 is enabled, then we set the option, and include the size
365 * (always 18). The actual MD5 hash is added just before the
369 *ptr++ = htonl((TCPOPT_NOP << 24) |
371 (TCPOPT_MD5SIG << 8) |
373 *md5_hash = (__u8 *) ptr;
378 /* This routine actually transmits TCP packets queued in by
379 * tcp_do_sendmsg(). This is used by both the initial
380 * transmission and possible later retransmissions.
381 * All SKB's seen here are completely headerless. It is our
382 * job to build the TCP header, and pass the packet down to
383 * IP so it can do the same plus pass the packet off to the
386 * We are working here with either a clone of the original
387 * SKB, or a fresh unique copy made by the retransmit engine.
389 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, gfp_t gfp_mask)
391 const struct inet_connection_sock *icsk = inet_csk(sk);
392 struct inet_sock *inet;
394 struct tcp_skb_cb *tcb;
396 #ifdef CONFIG_TCP_MD5SIG
397 struct tcp_md5sig_key *md5;
398 __u8 *md5_hash_location;
404 BUG_ON(!skb || !tcp_skb_pcount(skb));
406 /* If congestion control is doing timestamping, we must
407 * take such a timestamp before we potentially clone/copy.
409 if (icsk->icsk_ca_ops->rtt_sample)
410 __net_timestamp(skb);
412 if (likely(clone_it)) {
413 if (unlikely(skb_cloned(skb)))
414 skb = pskb_copy(skb, gfp_mask);
416 skb = skb_clone(skb, gfp_mask);
423 tcb = TCP_SKB_CB(skb);
424 tcp_header_size = tp->tcp_header_len;
426 #define SYSCTL_FLAG_TSTAMPS 0x1
427 #define SYSCTL_FLAG_WSCALE 0x2
428 #define SYSCTL_FLAG_SACK 0x4
431 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
432 tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS;
433 if(sysctl_tcp_timestamps) {
434 tcp_header_size += TCPOLEN_TSTAMP_ALIGNED;
435 sysctl_flags |= SYSCTL_FLAG_TSTAMPS;
437 if (sysctl_tcp_window_scaling) {
438 tcp_header_size += TCPOLEN_WSCALE_ALIGNED;
439 sysctl_flags |= SYSCTL_FLAG_WSCALE;
441 if (sysctl_tcp_sack) {
442 sysctl_flags |= SYSCTL_FLAG_SACK;
443 if (!(sysctl_flags & SYSCTL_FLAG_TSTAMPS))
444 tcp_header_size += TCPOLEN_SACKPERM_ALIGNED;
446 } else if (unlikely(tp->rx_opt.eff_sacks)) {
447 /* A SACK is 2 pad bytes, a 2 byte header, plus
448 * 2 32-bit sequence numbers for each SACK block.
450 tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED +
451 (tp->rx_opt.eff_sacks *
452 TCPOLEN_SACK_PERBLOCK));
455 if (tcp_packets_in_flight(tp) == 0)
456 tcp_ca_event(sk, CA_EVENT_TX_START);
458 #ifdef CONFIG_TCP_MD5SIG
460 * Are we doing MD5 on this segment? If so - make
463 md5 = tp->af_specific->md5_lookup(sk, sk);
465 tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
468 th = (struct tcphdr *) skb_push(skb, tcp_header_size);
470 skb_set_owner_w(skb, sk);
472 /* Build TCP header and checksum it. */
473 th->source = inet->sport;
474 th->dest = inet->dport;
475 th->seq = htonl(tcb->seq);
476 th->ack_seq = htonl(tp->rcv_nxt);
477 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
480 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
481 /* RFC1323: The window in SYN & SYN/ACK segments
484 th->window = htons(tp->rcv_wnd);
486 th->window = htons(tcp_select_window(sk));
491 if (unlikely(tp->urg_mode &&
492 between(tp->snd_up, tcb->seq+1, tcb->seq+0xFFFF))) {
493 th->urg_ptr = htons(tp->snd_up-tcb->seq);
497 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
498 tcp_syn_build_options((__be32 *)(th + 1),
499 tcp_advertise_mss(sk),
500 (sysctl_flags & SYSCTL_FLAG_TSTAMPS),
501 (sysctl_flags & SYSCTL_FLAG_SACK),
502 (sysctl_flags & SYSCTL_FLAG_WSCALE),
503 tp->rx_opt.rcv_wscale,
505 tp->rx_opt.ts_recent,
507 #ifdef CONFIG_TCP_MD5SIG
508 md5 ? &md5_hash_location :
512 tcp_build_and_update_options((__be32 *)(th + 1),
514 #ifdef CONFIG_TCP_MD5SIG
515 md5 ? &md5_hash_location :
518 TCP_ECN_send(sk, tp, skb, tcp_header_size);
521 #ifdef CONFIG_TCP_MD5SIG
522 /* Calculate the MD5 hash, as we have all we need now */
524 tp->af_specific->calc_md5_hash(md5_hash_location,
533 icsk->icsk_af_ops->send_check(sk, skb->len, skb);
535 if (likely(tcb->flags & TCPCB_FLAG_ACK))
536 tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
538 if (skb->len != tcp_header_size)
539 tcp_event_data_sent(tp, skb, sk);
541 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
542 TCP_INC_STATS(TCP_MIB_OUTSEGS);
544 err = icsk->icsk_af_ops->queue_xmit(skb, 0);
545 if (likely(err <= 0))
550 return net_xmit_eval(err);
552 #undef SYSCTL_FLAG_TSTAMPS
553 #undef SYSCTL_FLAG_WSCALE
554 #undef SYSCTL_FLAG_SACK
558 /* This routine just queue's the buffer
560 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
561 * otherwise socket can stall.
563 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
565 struct tcp_sock *tp = tcp_sk(sk);
567 /* Advance write_seq and place onto the write_queue. */
568 tp->write_seq = TCP_SKB_CB(skb)->end_seq;
569 skb_header_release(skb);
570 __skb_queue_tail(&sk->sk_write_queue, skb);
571 sk_charge_skb(sk, skb);
573 /* Queue it, remembering where we must start sending. */
574 if (sk->sk_send_head == NULL)
575 sk->sk_send_head = skb;
578 static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
580 if (skb->len <= mss_now || !sk_can_gso(sk)) {
581 /* Avoid the costly divide in the normal
584 skb_shinfo(skb)->gso_segs = 1;
585 skb_shinfo(skb)->gso_size = 0;
586 skb_shinfo(skb)->gso_type = 0;
590 factor = skb->len + (mss_now - 1);
592 skb_shinfo(skb)->gso_segs = factor;
593 skb_shinfo(skb)->gso_size = mss_now;
594 skb_shinfo(skb)->gso_type = sk->sk_gso_type;
598 /* Function to create two new TCP segments. Shrinks the given segment
599 * to the specified size and appends a new segment with the rest of the
600 * packet to the list. This won't be called frequently, I hope.
601 * Remember, these are still headerless SKBs at this point.
603 int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
605 struct tcp_sock *tp = tcp_sk(sk);
606 struct sk_buff *buff;
607 int nsize, old_factor;
611 BUG_ON(len > skb->len);
613 clear_all_retrans_hints(tp);
614 nsize = skb_headlen(skb) - len;
618 if (skb_cloned(skb) &&
619 skb_is_nonlinear(skb) &&
620 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
623 /* Get a new skb... force flag on. */
624 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
626 return -ENOMEM; /* We'll just try again later. */
628 sk_charge_skb(sk, buff);
629 nlen = skb->len - len - nsize;
630 buff->truesize += nlen;
631 skb->truesize -= nlen;
633 /* Correct the sequence numbers. */
634 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
635 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
636 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
638 /* PSH and FIN should only be set in the second packet. */
639 flags = TCP_SKB_CB(skb)->flags;
640 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
641 TCP_SKB_CB(buff)->flags = flags;
642 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
643 TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL;
645 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
646 /* Copy and checksum data tail into the new buffer. */
647 buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize),
652 skb->csum = csum_block_sub(skb->csum, buff->csum, len);
654 skb->ip_summed = CHECKSUM_PARTIAL;
655 skb_split(skb, buff, len);
658 buff->ip_summed = skb->ip_summed;
660 /* Looks stupid, but our code really uses when of
661 * skbs, which it never sent before. --ANK
663 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
664 buff->tstamp = skb->tstamp;
666 old_factor = tcp_skb_pcount(skb);
668 /* Fix up tso_factor for both original and new SKB. */
669 tcp_set_skb_tso_segs(sk, skb, mss_now);
670 tcp_set_skb_tso_segs(sk, buff, mss_now);
672 /* If this packet has been sent out already, we must
673 * adjust the various packet counters.
675 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
676 int diff = old_factor - tcp_skb_pcount(skb) -
677 tcp_skb_pcount(buff);
679 tp->packets_out -= diff;
681 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
682 tp->sacked_out -= diff;
683 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
684 tp->retrans_out -= diff;
686 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
687 tp->lost_out -= diff;
688 tp->left_out -= diff;
692 /* Adjust Reno SACK estimate. */
693 if (!tp->rx_opt.sack_ok) {
694 tp->sacked_out -= diff;
695 if ((int)tp->sacked_out < 0)
697 tcp_sync_left_out(tp);
700 tp->fackets_out -= diff;
701 if ((int)tp->fackets_out < 0)
706 /* Link BUFF into the send queue. */
707 skb_header_release(buff);
708 __skb_append(skb, buff, &sk->sk_write_queue);
713 /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
714 * eventually). The difference is that pulled data not copied, but
715 * immediately discarded.
717 static void __pskb_trim_head(struct sk_buff *skb, int len)
723 for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
724 if (skb_shinfo(skb)->frags[i].size <= eat) {
725 put_page(skb_shinfo(skb)->frags[i].page);
726 eat -= skb_shinfo(skb)->frags[i].size;
728 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
730 skb_shinfo(skb)->frags[k].page_offset += eat;
731 skb_shinfo(skb)->frags[k].size -= eat;
737 skb_shinfo(skb)->nr_frags = k;
739 skb->tail = skb->data;
740 skb->data_len -= len;
741 skb->len = skb->data_len;
744 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
746 if (skb_cloned(skb) &&
747 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
750 /* If len == headlen, we avoid __skb_pull to preserve alignment. */
751 if (unlikely(len < skb_headlen(skb)))
752 __skb_pull(skb, len);
754 __pskb_trim_head(skb, len - skb_headlen(skb));
756 TCP_SKB_CB(skb)->seq += len;
757 skb->ip_summed = CHECKSUM_PARTIAL;
759 skb->truesize -= len;
760 sk->sk_wmem_queued -= len;
761 sk->sk_forward_alloc += len;
762 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
764 /* Any change of skb->len requires recalculation of tso
767 if (tcp_skb_pcount(skb) > 1)
768 tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1));
773 /* Not accounting for SACKs here. */
774 int tcp_mtu_to_mss(struct sock *sk, int pmtu)
776 struct tcp_sock *tp = tcp_sk(sk);
777 struct inet_connection_sock *icsk = inet_csk(sk);
780 /* Calculate base mss without TCP options:
781 It is MMS_S - sizeof(tcphdr) of rfc1122
783 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
785 /* Clamp it (mss_clamp does not include tcp options) */
786 if (mss_now > tp->rx_opt.mss_clamp)
787 mss_now = tp->rx_opt.mss_clamp;
789 /* Now subtract optional transport overhead */
790 mss_now -= icsk->icsk_ext_hdr_len;
792 /* Then reserve room for full set of TCP options and 8 bytes of data */
796 /* Now subtract TCP options size, not including SACKs */
797 mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
802 /* Inverse of above */
803 int tcp_mss_to_mtu(struct sock *sk, int mss)
805 struct tcp_sock *tp = tcp_sk(sk);
806 struct inet_connection_sock *icsk = inet_csk(sk);
811 icsk->icsk_ext_hdr_len +
812 icsk->icsk_af_ops->net_header_len;
817 void tcp_mtup_init(struct sock *sk)
819 struct tcp_sock *tp = tcp_sk(sk);
820 struct inet_connection_sock *icsk = inet_csk(sk);
822 icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
823 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
824 icsk->icsk_af_ops->net_header_len;
825 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
826 icsk->icsk_mtup.probe_size = 0;
829 /* This function synchronize snd mss to current pmtu/exthdr set.
831 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
832 for TCP options, but includes only bare TCP header.
834 tp->rx_opt.mss_clamp is mss negotiated at connection setup.
835 It is minimum of user_mss and mss received with SYN.
836 It also does not include TCP options.
838 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
840 tp->mss_cache is current effective sending mss, including
841 all tcp options except for SACKs. It is evaluated,
842 taking into account current pmtu, but never exceeds
843 tp->rx_opt.mss_clamp.
845 NOTE1. rfc1122 clearly states that advertised MSS
846 DOES NOT include either tcp or ip options.
848 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
849 are READ ONLY outside this function. --ANK (980731)
852 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
854 struct tcp_sock *tp = tcp_sk(sk);
855 struct inet_connection_sock *icsk = inet_csk(sk);
858 if (icsk->icsk_mtup.search_high > pmtu)
859 icsk->icsk_mtup.search_high = pmtu;
861 mss_now = tcp_mtu_to_mss(sk, pmtu);
863 /* Bound mss with half of window */
864 if (tp->max_window && mss_now > (tp->max_window>>1))
865 mss_now = max((tp->max_window>>1), 68U - tp->tcp_header_len);
867 /* And store cached results */
868 icsk->icsk_pmtu_cookie = pmtu;
869 if (icsk->icsk_mtup.enabled)
870 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
871 tp->mss_cache = mss_now;
876 /* Compute the current effective MSS, taking SACKs and IP options,
877 * and even PMTU discovery events into account.
879 * LARGESEND note: !urg_mode is overkill, only frames up to snd_up
880 * cannot be large. However, taking into account rare use of URG, this
883 unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
885 struct tcp_sock *tp = tcp_sk(sk);
886 struct dst_entry *dst = __sk_dst_get(sk);
891 mss_now = tp->mss_cache;
893 if (large_allowed && sk_can_gso(sk) && !tp->urg_mode)
897 u32 mtu = dst_mtu(dst);
898 if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
899 mss_now = tcp_sync_mss(sk, mtu);
902 if (tp->rx_opt.eff_sacks)
903 mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
904 (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK));
906 #ifdef CONFIG_TCP_MD5SIG
907 if (tp->af_specific->md5_lookup(sk, sk))
908 mss_now -= TCPOLEN_MD5SIG_ALIGNED;
911 xmit_size_goal = mss_now;
914 xmit_size_goal = (65535 -
915 inet_csk(sk)->icsk_af_ops->net_header_len -
916 inet_csk(sk)->icsk_ext_hdr_len -
919 if (tp->max_window &&
920 (xmit_size_goal > (tp->max_window >> 1)))
921 xmit_size_goal = max((tp->max_window >> 1),
922 68U - tp->tcp_header_len);
924 xmit_size_goal -= (xmit_size_goal % mss_now);
926 tp->xmit_size_goal = xmit_size_goal;
931 /* Congestion window validation. (RFC2861) */
933 static void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
935 __u32 packets_out = tp->packets_out;
937 if (packets_out >= tp->snd_cwnd) {
938 /* Network is feed fully. */
939 tp->snd_cwnd_used = 0;
940 tp->snd_cwnd_stamp = tcp_time_stamp;
942 /* Network starves. */
943 if (tp->packets_out > tp->snd_cwnd_used)
944 tp->snd_cwnd_used = tp->packets_out;
946 if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
947 tcp_cwnd_application_limited(sk);
951 static unsigned int tcp_window_allows(struct tcp_sock *tp, struct sk_buff *skb, unsigned int mss_now, unsigned int cwnd)
953 u32 window, cwnd_len;
955 window = (tp->snd_una + tp->snd_wnd - TCP_SKB_CB(skb)->seq);
956 cwnd_len = mss_now * cwnd;
957 return min(window, cwnd_len);
960 /* Can at least one segment of SKB be sent right now, according to the
961 * congestion window rules? If so, return how many segments are allowed.
963 static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb)
967 /* Don't be strict about the congestion window for the final FIN. */
968 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
971 in_flight = tcp_packets_in_flight(tp);
973 if (in_flight < cwnd)
974 return (cwnd - in_flight);
979 /* This must be invoked the first time we consider transmitting
982 static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
984 int tso_segs = tcp_skb_pcount(skb);
988 tcp_skb_mss(skb) != mss_now)) {
989 tcp_set_skb_tso_segs(sk, skb, mss_now);
990 tso_segs = tcp_skb_pcount(skb);
995 static inline int tcp_minshall_check(const struct tcp_sock *tp)
997 return after(tp->snd_sml,tp->snd_una) &&
998 !after(tp->snd_sml, tp->snd_nxt);
1001 /* Return 0, if packet can be sent now without violation Nagle's rules:
1002 * 1. It is full sized.
1003 * 2. Or it contains FIN. (already checked by caller)
1004 * 3. Or TCP_NODELAY was set.
1005 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1006 * With Minshall's modification: all sent small packets are ACKed.
1009 static inline int tcp_nagle_check(const struct tcp_sock *tp,
1010 const struct sk_buff *skb,
1011 unsigned mss_now, int nonagle)
1013 return (skb->len < mss_now &&
1014 ((nonagle&TCP_NAGLE_CORK) ||
1017 tcp_minshall_check(tp))));
1020 /* Return non-zero if the Nagle test allows this packet to be
1023 static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
1024 unsigned int cur_mss, int nonagle)
1026 /* Nagle rule does not apply to frames, which sit in the middle of the
1027 * write_queue (they have no chances to get new data).
1029 * This is implemented in the callers, where they modify the 'nonagle'
1030 * argument based upon the location of SKB in the send queue.
1032 if (nonagle & TCP_NAGLE_PUSH)
1035 /* Don't use the nagle rule for urgent data (or for the final FIN). */
1037 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
1040 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
1046 /* Does at least the first segment of SKB fit into the send window? */
1047 static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, unsigned int cur_mss)
1049 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1051 if (skb->len > cur_mss)
1052 end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1054 return !after(end_seq, tp->snd_una + tp->snd_wnd);
1057 /* This checks if the data bearing packet SKB (usually sk->sk_send_head)
1058 * should be put on the wire right now. If so, it returns the number of
1059 * packets allowed by the congestion window.
1061 static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
1062 unsigned int cur_mss, int nonagle)
1064 struct tcp_sock *tp = tcp_sk(sk);
1065 unsigned int cwnd_quota;
1067 tcp_init_tso_segs(sk, skb, cur_mss);
1069 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1072 cwnd_quota = tcp_cwnd_test(tp, skb);
1074 !tcp_snd_wnd_test(tp, skb, cur_mss))
1080 static inline int tcp_skb_is_last(const struct sock *sk,
1081 const struct sk_buff *skb)
1083 return skb->next == (struct sk_buff *)&sk->sk_write_queue;
1086 int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
1088 struct sk_buff *skb = sk->sk_send_head;
1091 tcp_snd_test(sk, skb, tcp_current_mss(sk, 1),
1092 (tcp_skb_is_last(sk, skb) ?
1097 /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1098 * which is put after SKB on the list. It is very much like
1099 * tcp_fragment() except that it may make several kinds of assumptions
1100 * in order to speed up the splitting operation. In particular, we
1101 * know that all the data is in scatter-gather pages, and that the
1102 * packet has never been sent out before (and thus is not cloned).
1104 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now)
1106 struct sk_buff *buff;
1107 int nlen = skb->len - len;
1110 /* All of a TSO frame must be composed of paged data. */
1111 if (skb->len != skb->data_len)
1112 return tcp_fragment(sk, skb, len, mss_now);
1114 buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC);
1115 if (unlikely(buff == NULL))
1118 sk_charge_skb(sk, buff);
1119 buff->truesize += nlen;
1120 skb->truesize -= nlen;
1122 /* Correct the sequence numbers. */
1123 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1124 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1125 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1127 /* PSH and FIN should only be set in the second packet. */
1128 flags = TCP_SKB_CB(skb)->flags;
1129 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
1130 TCP_SKB_CB(buff)->flags = flags;
1132 /* This packet was never sent out yet, so no SACK bits. */
1133 TCP_SKB_CB(buff)->sacked = 0;
1135 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1136 skb_split(skb, buff, len);
1138 /* Fix up tso_factor for both original and new SKB. */
1139 tcp_set_skb_tso_segs(sk, skb, mss_now);
1140 tcp_set_skb_tso_segs(sk, buff, mss_now);
1142 /* Link BUFF into the send queue. */
1143 skb_header_release(buff);
1144 __skb_append(skb, buff, &sk->sk_write_queue);
1149 /* Try to defer sending, if possible, in order to minimize the amount
1150 * of TSO splitting we do. View it as a kind of TSO Nagle test.
1152 * This algorithm is from John Heffner.
1154 static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
1156 const struct inet_connection_sock *icsk = inet_csk(sk);
1157 u32 send_win, cong_win, limit, in_flight;
1159 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
1162 if (icsk->icsk_ca_state != TCP_CA_Open)
1165 /* Defer for less than two clock ticks. */
1166 if (!tp->tso_deferred && ((jiffies<<1)>>1) - (tp->tso_deferred>>1) > 1)
1169 in_flight = tcp_packets_in_flight(tp);
1171 BUG_ON(tcp_skb_pcount(skb) <= 1 ||
1172 (tp->snd_cwnd <= in_flight));
1174 send_win = (tp->snd_una + tp->snd_wnd) - TCP_SKB_CB(skb)->seq;
1176 /* From in_flight test above, we know that cwnd > in_flight. */
1177 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1179 limit = min(send_win, cong_win);
1181 /* If a full-sized TSO skb can be sent, do it. */
1185 if (sysctl_tcp_tso_win_divisor) {
1186 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1188 /* If at least some fraction of a window is available,
1191 chunk /= sysctl_tcp_tso_win_divisor;
1195 /* Different approach, try not to defer past a single
1196 * ACK. Receiver should ACK every other full sized
1197 * frame, so if we have space for more than 3 frames
1200 if (limit > tcp_max_burst(tp) * tp->mss_cache)
1204 /* Ok, it looks like it is advisable to defer. */
1205 tp->tso_deferred = 1 | (jiffies<<1);
1210 tp->tso_deferred = 0;
1214 /* Create a new MTU probe if we are ready.
1215 * Returns 0 if we should wait to probe (no cwnd available),
1216 * 1 if a probe was sent,
1218 static int tcp_mtu_probe(struct sock *sk)
1220 struct tcp_sock *tp = tcp_sk(sk);
1221 struct inet_connection_sock *icsk = inet_csk(sk);
1222 struct sk_buff *skb, *nskb, *next;
1229 /* Not currently probing/verifying,
1231 * have enough cwnd, and
1232 * not SACKing (the variable headers throw things off) */
1233 if (!icsk->icsk_mtup.enabled ||
1234 icsk->icsk_mtup.probe_size ||
1235 inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
1236 tp->snd_cwnd < 11 ||
1237 tp->rx_opt.eff_sacks)
1240 /* Very simple search strategy: just double the MSS. */
1241 mss_now = tcp_current_mss(sk, 0);
1242 probe_size = 2*tp->mss_cache;
1243 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
1244 /* TODO: set timer for probe_converge_event */
1248 /* Have enough data in the send queue to probe? */
1250 if ((skb = sk->sk_send_head) == NULL)
1252 while ((len += skb->len) < probe_size && !tcp_skb_is_last(sk, skb))
1254 if (len < probe_size)
1257 /* Receive window check. */
1258 if (after(TCP_SKB_CB(skb)->seq + probe_size, tp->snd_una + tp->snd_wnd)) {
1259 if (tp->snd_wnd < probe_size)
1265 /* Do we need to wait to drain cwnd? */
1266 pif = tcp_packets_in_flight(tp);
1267 if (pif + 2 > tp->snd_cwnd) {
1268 /* With no packets in flight, don't stall. */
1275 /* We're allowed to probe. Build it now. */
1276 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
1278 sk_charge_skb(sk, nskb);
1280 skb = sk->sk_send_head;
1281 __skb_insert(nskb, skb->prev, skb, &sk->sk_write_queue);
1282 sk->sk_send_head = nskb;
1284 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
1285 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
1286 TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK;
1287 TCP_SKB_CB(nskb)->sacked = 0;
1289 nskb->ip_summed = skb->ip_summed;
1292 while (len < probe_size) {
1295 copy = min_t(int, skb->len, probe_size - len);
1296 if (nskb->ip_summed)
1297 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
1299 nskb->csum = skb_copy_and_csum_bits(skb, 0,
1300 skb_put(nskb, copy), copy, nskb->csum);
1302 if (skb->len <= copy) {
1303 /* We've eaten all the data from this skb.
1305 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags;
1306 __skb_unlink(skb, &sk->sk_write_queue);
1307 sk_stream_free_skb(sk, skb);
1309 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
1310 ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
1311 if (!skb_shinfo(skb)->nr_frags) {
1312 skb_pull(skb, copy);
1313 if (skb->ip_summed != CHECKSUM_PARTIAL)
1314 skb->csum = csum_partial(skb->data, skb->len, 0);
1316 __pskb_trim_head(skb, copy);
1317 tcp_set_skb_tso_segs(sk, skb, mss_now);
1319 TCP_SKB_CB(skb)->seq += copy;
1325 tcp_init_tso_segs(sk, nskb, nskb->len);
1327 /* We're ready to send. If this fails, the probe will
1328 * be resegmented into mss-sized pieces by tcp_write_xmit(). */
1329 TCP_SKB_CB(nskb)->when = tcp_time_stamp;
1330 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
1331 /* Decrement cwnd here because we are sending
1332 * effectively two packets. */
1334 update_send_head(sk, tp, nskb);
1336 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
1337 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
1338 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
1347 /* This routine writes packets to the network. It advances the
1348 * send_head. This happens as incoming acks open up the remote
1351 * Returns 1, if no segments are in flight and we have queued segments, but
1352 * cannot send anything now because of SWS or another problem.
1354 static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
1356 struct tcp_sock *tp = tcp_sk(sk);
1357 struct sk_buff *skb;
1358 unsigned int tso_segs, sent_pkts;
1362 /* If we are closed, the bytes will have to remain here.
1363 * In time closedown will finish, we empty the write queue and all
1366 if (unlikely(sk->sk_state == TCP_CLOSE))
1371 /* Do MTU probing. */
1372 if ((result = tcp_mtu_probe(sk)) == 0) {
1374 } else if (result > 0) {
1378 while ((skb = sk->sk_send_head)) {
1381 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1384 cwnd_quota = tcp_cwnd_test(tp, skb);
1388 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1391 if (tso_segs == 1) {
1392 if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1393 (tcp_skb_is_last(sk, skb) ?
1394 nonagle : TCP_NAGLE_PUSH))))
1397 if (tcp_tso_should_defer(sk, tp, skb))
1403 limit = tcp_window_allows(tp, skb,
1404 mss_now, cwnd_quota);
1406 if (skb->len < limit) {
1407 unsigned int trim = skb->len % mss_now;
1410 limit = skb->len - trim;
1414 if (skb->len > limit &&
1415 unlikely(tso_fragment(sk, skb, limit, mss_now)))
1418 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1420 if (unlikely(tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC)))
1423 /* Advance the send_head. This one is sent out.
1424 * This call will increment packets_out.
1426 update_send_head(sk, tp, skb);
1428 tcp_minshall_update(tp, mss_now, skb);
1432 if (likely(sent_pkts)) {
1433 tcp_cwnd_validate(sk, tp);
1436 return !tp->packets_out && sk->sk_send_head;
1439 /* Push out any pending frames which were held back due to
1440 * TCP_CORK or attempt at coalescing tiny packets.
1441 * The socket must be locked by the caller.
1443 void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
1444 unsigned int cur_mss, int nonagle)
1446 struct sk_buff *skb = sk->sk_send_head;
1449 if (tcp_write_xmit(sk, cur_mss, nonagle))
1450 tcp_check_probe_timer(sk, tp);
1454 /* Send _single_ skb sitting at the send head. This function requires
1455 * true push pending frames to setup probe timer etc.
1457 void tcp_push_one(struct sock *sk, unsigned int mss_now)
1459 struct tcp_sock *tp = tcp_sk(sk);
1460 struct sk_buff *skb = sk->sk_send_head;
1461 unsigned int tso_segs, cwnd_quota;
1463 BUG_ON(!skb || skb->len < mss_now);
1465 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1466 cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);
1468 if (likely(cwnd_quota)) {
1475 limit = tcp_window_allows(tp, skb,
1476 mss_now, cwnd_quota);
1478 if (skb->len < limit) {
1479 unsigned int trim = skb->len % mss_now;
1482 limit = skb->len - trim;
1486 if (skb->len > limit &&
1487 unlikely(tso_fragment(sk, skb, limit, mss_now)))
1490 /* Send it out now. */
1491 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1493 if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) {
1494 update_send_head(sk, tp, skb);
1495 tcp_cwnd_validate(sk, tp);
1501 /* This function returns the amount that we can raise the
1502 * usable window based on the following constraints
1504 * 1. The window can never be shrunk once it is offered (RFC 793)
1505 * 2. We limit memory per socket
1508 * "the suggested [SWS] avoidance algorithm for the receiver is to keep
1509 * RECV.NEXT + RCV.WIN fixed until:
1510 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
1512 * i.e. don't raise the right edge of the window until you can raise
1513 * it at least MSS bytes.
1515 * Unfortunately, the recommended algorithm breaks header prediction,
1516 * since header prediction assumes th->window stays fixed.
1518 * Strictly speaking, keeping th->window fixed violates the receiver
1519 * side SWS prevention criteria. The problem is that under this rule
1520 * a stream of single byte packets will cause the right side of the
1521 * window to always advance by a single byte.
1523 * Of course, if the sender implements sender side SWS prevention
1524 * then this will not be a problem.
1526 * BSD seems to make the following compromise:
1528 * If the free space is less than the 1/4 of the maximum
1529 * space available and the free space is less than 1/2 mss,
1530 * then set the window to 0.
1531 * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
1532 * Otherwise, just prevent the window from shrinking
1533 * and from being larger than the largest representable value.
1535 * This prevents incremental opening of the window in the regime
1536 * where TCP is limited by the speed of the reader side taking
1537 * data out of the TCP receive queue. It does nothing about
1538 * those cases where the window is constrained on the sender side
1539 * because the pipeline is full.
1541 * BSD also seems to "accidentally" limit itself to windows that are a
1542 * multiple of MSS, at least until the free space gets quite small.
1543 * This would appear to be a side effect of the mbuf implementation.
1544 * Combining these two algorithms results in the observed behavior
1545 * of having a fixed window size at almost all times.
1547 * Below we obtain similar behavior by forcing the offered window to
1548 * a multiple of the mss when it is feasible to do so.
1550 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
1551 * Regular options like TIMESTAMP are taken into account.
1553 u32 __tcp_select_window(struct sock *sk)
1555 struct inet_connection_sock *icsk = inet_csk(sk);
1556 struct tcp_sock *tp = tcp_sk(sk);
1557 /* MSS for the peer's data. Previous versions used mss_clamp
1558 * here. I don't know if the value based on our guesses
1559 * of peer's MSS is better for the performance. It's more correct
1560 * but may be worse for the performance because of rcv_mss
1561 * fluctuations. --SAW 1998/11/1
1563 int mss = icsk->icsk_ack.rcv_mss;
1564 int free_space = tcp_space(sk);
1565 int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
1568 if (mss > full_space)
1571 if (free_space < full_space/2) {
1572 icsk->icsk_ack.quick = 0;
1574 if (tcp_memory_pressure)
1575 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss);
1577 if (free_space < mss)
1581 if (free_space > tp->rcv_ssthresh)
1582 free_space = tp->rcv_ssthresh;
1584 /* Don't do rounding if we are using window scaling, since the
1585 * scaled window will not line up with the MSS boundary anyway.
1587 window = tp->rcv_wnd;
1588 if (tp->rx_opt.rcv_wscale) {
1589 window = free_space;
1591 /* Advertise enough space so that it won't get scaled away.
1592 * Import case: prevent zero window announcement if
1593 * 1<<rcv_wscale > mss.
1595 if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
1596 window = (((window >> tp->rx_opt.rcv_wscale) + 1)
1597 << tp->rx_opt.rcv_wscale);
1599 /* Get the largest window that is a nice multiple of mss.
1600 * Window clamp already applied above.
1601 * If our current window offering is within 1 mss of the
1602 * free space we just keep it. This prevents the divide
1603 * and multiply from happening most of the time.
1604 * We also don't do any window rounding when the free space
1607 if (window <= free_space - mss || window > free_space)
1608 window = (free_space/mss)*mss;
1614 /* Attempt to collapse two adjacent SKB's during retransmission. */
1615 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now)
1617 struct tcp_sock *tp = tcp_sk(sk);
1618 struct sk_buff *next_skb = skb->next;
1620 /* The first test we must make is that neither of these two
1621 * SKB's are still referenced by someone else.
1623 if (!skb_cloned(skb) && !skb_cloned(next_skb)) {
1624 int skb_size = skb->len, next_skb_size = next_skb->len;
1625 u16 flags = TCP_SKB_CB(skb)->flags;
1627 /* Also punt if next skb has been SACK'd. */
1628 if(TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED)
1631 /* Next skb is out of window. */
1632 if (after(TCP_SKB_CB(next_skb)->end_seq, tp->snd_una+tp->snd_wnd))
1635 /* Punt if not enough space exists in the first SKB for
1636 * the data in the second, or the total combined payload
1637 * would exceed the MSS.
1639 if ((next_skb_size > skb_tailroom(skb)) ||
1640 ((skb_size + next_skb_size) > mss_now))
1643 BUG_ON(tcp_skb_pcount(skb) != 1 ||
1644 tcp_skb_pcount(next_skb) != 1);
1646 /* changing transmit queue under us so clear hints */
1647 clear_all_retrans_hints(tp);
1649 /* Ok. We will be able to collapse the packet. */
1650 __skb_unlink(next_skb, &sk->sk_write_queue);
1652 memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
1654 if (next_skb->ip_summed == CHECKSUM_PARTIAL)
1655 skb->ip_summed = CHECKSUM_PARTIAL;
1657 if (skb->ip_summed != CHECKSUM_PARTIAL)
1658 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
1660 /* Update sequence range on original skb. */
1661 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
1663 /* Merge over control information. */
1664 flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */
1665 TCP_SKB_CB(skb)->flags = flags;
1667 /* All done, get rid of second SKB and account for it so
1668 * packet counting does not break.
1670 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked&(TCPCB_EVER_RETRANS|TCPCB_AT_TAIL);
1671 if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS)
1672 tp->retrans_out -= tcp_skb_pcount(next_skb);
1673 if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) {
1674 tp->lost_out -= tcp_skb_pcount(next_skb);
1675 tp->left_out -= tcp_skb_pcount(next_skb);
1677 /* Reno case is special. Sigh... */
1678 if (!tp->rx_opt.sack_ok && tp->sacked_out) {
1679 tcp_dec_pcount_approx(&tp->sacked_out, next_skb);
1680 tp->left_out -= tcp_skb_pcount(next_skb);
1683 /* Not quite right: it can be > snd.fack, but
1684 * it is better to underestimate fackets.
1686 tcp_dec_pcount_approx(&tp->fackets_out, next_skb);
1687 tcp_packets_out_dec(tp, next_skb);
1688 sk_stream_free_skb(sk, next_skb);
1692 /* Do a simple retransmit without using the backoff mechanisms in
1693 * tcp_timer. This is used for path mtu discovery.
1694 * The socket is already locked here.
1696 void tcp_simple_retransmit(struct sock *sk)
1698 const struct inet_connection_sock *icsk = inet_csk(sk);
1699 struct tcp_sock *tp = tcp_sk(sk);
1700 struct sk_buff *skb;
1701 unsigned int mss = tcp_current_mss(sk, 0);
1704 sk_stream_for_retrans_queue(skb, sk) {
1705 if (skb->len > mss &&
1706 !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
1707 if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
1708 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1709 tp->retrans_out -= tcp_skb_pcount(skb);
1711 if (!(TCP_SKB_CB(skb)->sacked&TCPCB_LOST)) {
1712 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1713 tp->lost_out += tcp_skb_pcount(skb);
1719 clear_all_retrans_hints(tp);
1724 tcp_sync_left_out(tp);
1726 /* Don't muck with the congestion window here.
1727 * Reason is that we do not increase amount of _data_
1728 * in network, but units changed and effective
1729 * cwnd/ssthresh really reduced now.
1731 if (icsk->icsk_ca_state != TCP_CA_Loss) {
1732 tp->high_seq = tp->snd_nxt;
1733 tp->snd_ssthresh = tcp_current_ssthresh(sk);
1734 tp->prior_ssthresh = 0;
1735 tp->undo_marker = 0;
1736 tcp_set_ca_state(sk, TCP_CA_Loss);
1738 tcp_xmit_retransmit_queue(sk);
1741 /* This retransmits one SKB. Policy decisions and retransmit queue
1742 * state updates are done by the caller. Returns non-zero if an
1743 * error occurred which prevented the send.
1745 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1747 struct tcp_sock *tp = tcp_sk(sk);
1748 struct inet_connection_sock *icsk = inet_csk(sk);
1749 unsigned int cur_mss = tcp_current_mss(sk, 0);
1752 /* Inconslusive MTU probe */
1753 if (icsk->icsk_mtup.probe_size) {
1754 icsk->icsk_mtup.probe_size = 0;
1757 /* Do not sent more than we queued. 1/4 is reserved for possible
1758 * copying overhead: fragmentation, tunneling, mangling etc.
1760 if (atomic_read(&sk->sk_wmem_alloc) >
1761 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
1764 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
1765 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1767 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
1771 /* If receiver has shrunk his window, and skb is out of
1772 * new window, do not retransmit it. The exception is the
1773 * case, when window is shrunk to zero. In this case
1774 * our retransmit serves as a zero window probe.
1776 if (!before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)
1777 && TCP_SKB_CB(skb)->seq != tp->snd_una)
1780 if (skb->len > cur_mss) {
1781 if (tcp_fragment(sk, skb, cur_mss, cur_mss))
1782 return -ENOMEM; /* We'll try again later. */
1785 /* Collapse two adjacent packets if worthwhile and we can. */
1786 if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
1787 (skb->len < (cur_mss >> 1)) &&
1788 (skb->next != sk->sk_send_head) &&
1789 (skb->next != (struct sk_buff *)&sk->sk_write_queue) &&
1790 (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(skb->next)->nr_frags == 0) &&
1791 (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(skb->next) == 1) &&
1792 (sysctl_tcp_retrans_collapse != 0))
1793 tcp_retrans_try_collapse(sk, skb, cur_mss);
1795 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
1796 return -EHOSTUNREACH; /* Routing failure or similar. */
1798 /* Some Solaris stacks overoptimize and ignore the FIN on a
1799 * retransmit when old data is attached. So strip it off
1800 * since it is cheap to do so and saves bytes on the network.
1803 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
1804 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
1805 if (!pskb_trim(skb, 0)) {
1806 TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
1807 skb_shinfo(skb)->gso_segs = 1;
1808 skb_shinfo(skb)->gso_size = 0;
1809 skb_shinfo(skb)->gso_type = 0;
1810 skb->ip_summed = CHECKSUM_NONE;
1815 /* Make a copy, if the first transmission SKB clone we made
1816 * is still in somebody's hands, else make a clone.
1818 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1820 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
1823 /* Update global TCP statistics. */
1824 TCP_INC_STATS(TCP_MIB_RETRANSSEGS);
1826 tp->total_retrans++;
1828 #if FASTRETRANS_DEBUG > 0
1829 if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
1830 if (net_ratelimit())
1831 printk(KERN_DEBUG "retrans_out leaked.\n");
1834 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
1835 tp->retrans_out += tcp_skb_pcount(skb);
1837 /* Save stamp of the first retransmit. */
1838 if (!tp->retrans_stamp)
1839 tp->retrans_stamp = TCP_SKB_CB(skb)->when;
1843 /* snd_nxt is stored to detect loss of retransmitted segment,
1844 * see tcp_input.c tcp_sacktag_write_queue().
1846 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
1851 /* This gets called after a retransmit timeout, and the initially
1852 * retransmitted data is acknowledged. It tries to continue
1853 * resending the rest of the retransmit queue, until either
1854 * we've sent it all or the congestion window limit is reached.
1855 * If doing SACK, the first ACK which comes back for a timeout
1856 * based retransmit packet might feed us FACK information again.
1857 * If so, we use it to avoid unnecessarily retransmissions.
1859 void tcp_xmit_retransmit_queue(struct sock *sk)
1861 const struct inet_connection_sock *icsk = inet_csk(sk);
1862 struct tcp_sock *tp = tcp_sk(sk);
1863 struct sk_buff *skb;
1866 if (tp->retransmit_skb_hint) {
1867 skb = tp->retransmit_skb_hint;
1868 packet_cnt = tp->retransmit_cnt_hint;
1870 skb = sk->sk_write_queue.next;
1874 /* First pass: retransmit lost packets. */
1876 sk_stream_for_retrans_queue_from(skb, sk) {
1877 __u8 sacked = TCP_SKB_CB(skb)->sacked;
1879 /* we could do better than to assign each time */
1880 tp->retransmit_skb_hint = skb;
1881 tp->retransmit_cnt_hint = packet_cnt;
1883 /* Assume this retransmit will generate
1884 * only one packet for congestion window
1885 * calculation purposes. This works because
1886 * tcp_retransmit_skb() will chop up the
1887 * packet to be MSS sized and all the
1888 * packet counting works out.
1890 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
1893 if (sacked & TCPCB_LOST) {
1894 if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
1895 if (tcp_retransmit_skb(sk, skb)) {
1896 tp->retransmit_skb_hint = NULL;
1899 if (icsk->icsk_ca_state != TCP_CA_Loss)
1900 NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS);
1902 NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS);
1905 skb_peek(&sk->sk_write_queue))
1906 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
1907 inet_csk(sk)->icsk_rto,
1911 packet_cnt += tcp_skb_pcount(skb);
1912 if (packet_cnt >= tp->lost_out)
1918 /* OK, demanded retransmission is finished. */
1920 /* Forward retransmissions are possible only during Recovery. */
1921 if (icsk->icsk_ca_state != TCP_CA_Recovery)
1924 /* No forward retransmissions in Reno are possible. */
1925 if (!tp->rx_opt.sack_ok)
1928 /* Yeah, we have to make difficult choice between forward transmission
1929 * and retransmission... Both ways have their merits...
1931 * For now we do not retransmit anything, while we have some new
1935 if (tcp_may_send_now(sk, tp))
1938 if (tp->forward_skb_hint) {
1939 skb = tp->forward_skb_hint;
1940 packet_cnt = tp->forward_cnt_hint;
1942 skb = sk->sk_write_queue.next;
1946 sk_stream_for_retrans_queue_from(skb, sk) {
1947 tp->forward_cnt_hint = packet_cnt;
1948 tp->forward_skb_hint = skb;
1950 /* Similar to the retransmit loop above we
1951 * can pretend that the retransmitted SKB
1952 * we send out here will be composed of one
1953 * real MSS sized packet because tcp_retransmit_skb()
1954 * will fragment it if necessary.
1956 if (++packet_cnt > tp->fackets_out)
1959 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
1962 if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS)
1965 /* Ok, retransmit it. */
1966 if (tcp_retransmit_skb(sk, skb)) {
1967 tp->forward_skb_hint = NULL;
1971 if (skb == skb_peek(&sk->sk_write_queue))
1972 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
1973 inet_csk(sk)->icsk_rto,
1976 NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);
1981 /* Send a fin. The caller locks the socket for us. This cannot be
1982 * allowed to fail queueing a FIN frame under any circumstances.
1984 void tcp_send_fin(struct sock *sk)
1986 struct tcp_sock *tp = tcp_sk(sk);
1987 struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue);
1990 /* Optimization, tack on the FIN if we have a queue of
1991 * unsent frames. But be careful about outgoing SACKS
1994 mss_now = tcp_current_mss(sk, 1);
1996 if (sk->sk_send_head != NULL) {
1997 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
1998 TCP_SKB_CB(skb)->end_seq++;
2001 /* Socket is locked, keep trying until memory is available. */
2003 skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL);
2009 /* Reserve space for headers and prepare control bits. */
2010 skb_reserve(skb, MAX_TCP_HEADER);
2012 TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
2013 TCP_SKB_CB(skb)->sacked = 0;
2014 skb_shinfo(skb)->gso_segs = 1;
2015 skb_shinfo(skb)->gso_size = 0;
2016 skb_shinfo(skb)->gso_type = 0;
2018 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2019 TCP_SKB_CB(skb)->seq = tp->write_seq;
2020 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
2021 tcp_queue_skb(sk, skb);
2023 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF);
2026 /* We get here when a process closes a file descriptor (either due to
2027 * an explicit close() or as a byproduct of exit()'ing) and there
2028 * was unread data in the receive queue. This behavior is recommended
2029 * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM
2031 void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2033 struct tcp_sock *tp = tcp_sk(sk);
2034 struct sk_buff *skb;
2036 /* NOTE: No TCP options attached and we never retransmit this. */
2037 skb = alloc_skb(MAX_TCP_HEADER, priority);
2039 NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
2043 /* Reserve space for headers and prepare control bits. */
2044 skb_reserve(skb, MAX_TCP_HEADER);
2046 TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
2047 TCP_SKB_CB(skb)->sacked = 0;
2048 skb_shinfo(skb)->gso_segs = 1;
2049 skb_shinfo(skb)->gso_size = 0;
2050 skb_shinfo(skb)->gso_type = 0;
2053 TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp);
2054 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
2055 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2056 if (tcp_transmit_skb(sk, skb, 0, priority))
2057 NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
2060 /* WARNING: This routine must only be called when we have already sent
2061 * a SYN packet that crossed the incoming SYN that caused this routine
2062 * to get called. If this assumption fails then the initial rcv_wnd
2063 * and rcv_wscale values will not be correct.
2065 int tcp_send_synack(struct sock *sk)
2067 struct sk_buff* skb;
2069 skb = skb_peek(&sk->sk_write_queue);
2070 if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) {
2071 printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
2074 if (!(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_ACK)) {
2075 if (skb_cloned(skb)) {
2076 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
2079 __skb_unlink(skb, &sk->sk_write_queue);
2080 skb_header_release(nskb);
2081 __skb_queue_head(&sk->sk_write_queue, nskb);
2082 sk_stream_free_skb(sk, skb);
2083 sk_charge_skb(sk, nskb);
2087 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK;
2088 TCP_ECN_send_synack(tcp_sk(sk), skb);
2090 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2091 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2095 * Prepare a SYN-ACK.
2097 struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2098 struct request_sock *req)
2100 struct inet_request_sock *ireq = inet_rsk(req);
2101 struct tcp_sock *tp = tcp_sk(sk);
2103 int tcp_header_size;
2104 struct sk_buff *skb;
2105 #ifdef CONFIG_TCP_MD5SIG
2106 struct tcp_md5sig_key *md5;
2107 __u8 *md5_hash_location;
2110 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
2114 /* Reserve space for headers. */
2115 skb_reserve(skb, MAX_TCP_HEADER);
2117 skb->dst = dst_clone(dst);
2119 tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS +
2120 (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) +
2121 (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) +
2122 /* SACK_PERM is in the place of NOP NOP of TS */
2123 ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0));
2125 #ifdef CONFIG_TCP_MD5SIG
2126 /* Are we doing MD5 on this segment? If so - make room for it */
2127 md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
2129 tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
2131 skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size);
2133 memset(th, 0, sizeof(struct tcphdr));
2136 TCP_ECN_make_synack(req, th);
2137 th->source = inet_sk(sk)->sport;
2138 th->dest = ireq->rmt_port;
2139 TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn;
2140 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
2141 TCP_SKB_CB(skb)->sacked = 0;
2142 skb_shinfo(skb)->gso_segs = 1;
2143 skb_shinfo(skb)->gso_size = 0;
2144 skb_shinfo(skb)->gso_type = 0;
2145 th->seq = htonl(TCP_SKB_CB(skb)->seq);
2146 th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
2147 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
2149 /* Set this up on the first call only */
2150 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
2151 /* tcp_full_space because it is guaranteed to be the first packet */
2152 tcp_select_initial_window(tcp_full_space(sk),
2153 dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
2158 ireq->rcv_wscale = rcv_wscale;
2161 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2162 th->window = htons(req->rcv_wnd);
2164 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2165 tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok,
2166 ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale,
2167 TCP_SKB_CB(skb)->when,
2170 #ifdef CONFIG_TCP_MD5SIG
2171 md5 ? &md5_hash_location :
2177 th->doff = (tcp_header_size >> 2);
2178 TCP_INC_STATS(TCP_MIB_OUTSEGS);
2180 #ifdef CONFIG_TCP_MD5SIG
2181 /* Okay, we have all we need - do the md5 hash if needed */
2183 tp->af_specific->calc_md5_hash(md5_hash_location,
2186 skb->h.th, sk->sk_protocol,
2195 * Do all connect socket setups that can be done AF independent.
2197 static void tcp_connect_init(struct sock *sk)
2199 struct dst_entry *dst = __sk_dst_get(sk);
2200 struct tcp_sock *tp = tcp_sk(sk);
2203 /* We'll fix this up when we get a response from the other end.
2204 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
2206 tp->tcp_header_len = sizeof(struct tcphdr) +
2207 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
2209 #ifdef CONFIG_TCP_MD5SIG
2210 if (tp->af_specific->md5_lookup(sk, sk) != NULL)
2211 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
2214 /* If user gave his TCP_MAXSEG, record it to clamp */
2215 if (tp->rx_opt.user_mss)
2216 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
2219 tcp_sync_mss(sk, dst_mtu(dst));
2221 if (!tp->window_clamp)
2222 tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
2223 tp->advmss = dst_metric(dst, RTAX_ADVMSS);
2224 tcp_initialize_rcv_mss(sk);
2226 tcp_select_initial_window(tcp_full_space(sk),
2227 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
2230 sysctl_tcp_window_scaling,
2233 tp->rx_opt.rcv_wscale = rcv_wscale;
2234 tp->rcv_ssthresh = tp->rcv_wnd;
2237 sock_reset_flag(sk, SOCK_DONE);
2239 tcp_init_wl(tp, tp->write_seq, 0);
2240 tp->snd_una = tp->write_seq;
2241 tp->snd_sml = tp->write_seq;
2246 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2247 inet_csk(sk)->icsk_retransmits = 0;
2248 tcp_clear_retrans(tp);
2252 * Build a SYN and send it off.
2254 int tcp_connect(struct sock *sk)
2256 struct tcp_sock *tp = tcp_sk(sk);
2257 struct sk_buff *buff;
2259 tcp_connect_init(sk);
2261 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
2262 if (unlikely(buff == NULL))
2265 /* Reserve space for headers. */
2266 skb_reserve(buff, MAX_TCP_HEADER);
2268 TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
2269 TCP_ECN_send_syn(sk, tp, buff);
2270 TCP_SKB_CB(buff)->sacked = 0;
2271 skb_shinfo(buff)->gso_segs = 1;
2272 skb_shinfo(buff)->gso_size = 0;
2273 skb_shinfo(buff)->gso_type = 0;
2275 tp->snd_nxt = tp->write_seq;
2276 TCP_SKB_CB(buff)->seq = tp->write_seq++;
2277 TCP_SKB_CB(buff)->end_seq = tp->write_seq;
2280 TCP_SKB_CB(buff)->when = tcp_time_stamp;
2281 tp->retrans_stamp = TCP_SKB_CB(buff)->when;
2282 skb_header_release(buff);
2283 __skb_queue_tail(&sk->sk_write_queue, buff);
2284 sk_charge_skb(sk, buff);
2285 tp->packets_out += tcp_skb_pcount(buff);
2286 tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
2288 /* We change tp->snd_nxt after the tcp_transmit_skb() call
2289 * in order to make this packet get counted in tcpOutSegs.
2291 tp->snd_nxt = tp->write_seq;
2292 tp->pushed_seq = tp->write_seq;
2293 TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
2295 /* Timer for repeating the SYN until an answer. */
2296 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2297 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
2301 /* Send out a delayed ack, the caller does the policy checking
2302 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check()
2305 void tcp_send_delayed_ack(struct sock *sk)
2307 struct inet_connection_sock *icsk = inet_csk(sk);
2308 int ato = icsk->icsk_ack.ato;
2309 unsigned long timeout;
2311 if (ato > TCP_DELACK_MIN) {
2312 const struct tcp_sock *tp = tcp_sk(sk);
2315 if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
2316 max_ato = TCP_DELACK_MAX;
2318 /* Slow path, intersegment interval is "high". */
2320 /* If some rtt estimate is known, use it to bound delayed ack.
2321 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
2325 int rtt = max(tp->srtt>>3, TCP_DELACK_MIN);
2331 ato = min(ato, max_ato);
2334 /* Stay within the limit we were given */
2335 timeout = jiffies + ato;
2337 /* Use new timeout only if there wasn't a older one earlier. */
2338 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
2339 /* If delack timer was blocked or is about to expire,
2342 if (icsk->icsk_ack.blocked ||
2343 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
2348 if (!time_before(timeout, icsk->icsk_ack.timeout))
2349 timeout = icsk->icsk_ack.timeout;
2351 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
2352 icsk->icsk_ack.timeout = timeout;
2353 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
2356 /* This routine sends an ack and also updates the window. */
2357 void tcp_send_ack(struct sock *sk)
2359 /* If we have been reset, we may not send again. */
2360 if (sk->sk_state != TCP_CLOSE) {
2361 struct tcp_sock *tp = tcp_sk(sk);
2362 struct sk_buff *buff;
2364 /* We are not putting this on the write queue, so
2365 * tcp_transmit_skb() will set the ownership to this
2368 buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
2370 inet_csk_schedule_ack(sk);
2371 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
2372 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
2373 TCP_DELACK_MAX, TCP_RTO_MAX);
2377 /* Reserve space for headers and prepare control bits. */
2378 skb_reserve(buff, MAX_TCP_HEADER);
2380 TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;
2381 TCP_SKB_CB(buff)->sacked = 0;
2382 skb_shinfo(buff)->gso_segs = 1;
2383 skb_shinfo(buff)->gso_size = 0;
2384 skb_shinfo(buff)->gso_type = 0;
2386 /* Send it off, this clears delayed acks for us. */
2387 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp);
2388 TCP_SKB_CB(buff)->when = tcp_time_stamp;
2389 tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
2393 /* This routine sends a packet with an out of date sequence
2394 * number. It assumes the other end will try to ack it.
2396 * Question: what should we make while urgent mode?
2397 * 4.4BSD forces sending single byte of data. We cannot send
2398 * out of window data, because we have SND.NXT==SND.MAX...
2400 * Current solution: to send TWO zero-length segments in urgent mode:
2401 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
2402 * out-of-date with SND.UNA-1 to probe window.
2404 static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
2406 struct tcp_sock *tp = tcp_sk(sk);
2407 struct sk_buff *skb;
2409 /* We don't queue it, tcp_transmit_skb() sets ownership. */
2410 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
2414 /* Reserve space for headers and set control bits. */
2415 skb_reserve(skb, MAX_TCP_HEADER);
2417 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
2418 TCP_SKB_CB(skb)->sacked = urgent;
2419 skb_shinfo(skb)->gso_segs = 1;
2420 skb_shinfo(skb)->gso_size = 0;
2421 skb_shinfo(skb)->gso_type = 0;
2423 /* Use a previous sequence. This should cause the other
2424 * end to send an ack. Don't queue or clone SKB, just
2427 TCP_SKB_CB(skb)->seq = urgent ? tp->snd_una : tp->snd_una - 1;
2428 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
2429 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2430 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
2433 int tcp_write_wakeup(struct sock *sk)
2435 if (sk->sk_state != TCP_CLOSE) {
2436 struct tcp_sock *tp = tcp_sk(sk);
2437 struct sk_buff *skb;
2439 if ((skb = sk->sk_send_head) != NULL &&
2440 before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) {
2442 unsigned int mss = tcp_current_mss(sk, 0);
2443 unsigned int seg_size = tp->snd_una+tp->snd_wnd-TCP_SKB_CB(skb)->seq;
2445 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
2446 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
2448 /* We are probing the opening of a window
2449 * but the window size is != 0
2450 * must have been a result SWS avoidance ( sender )
2452 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
2454 seg_size = min(seg_size, mss);
2455 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
2456 if (tcp_fragment(sk, skb, seg_size, mss))
2458 } else if (!tcp_skb_pcount(skb))
2459 tcp_set_skb_tso_segs(sk, skb, mss);
2461 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
2462 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2463 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2465 update_send_head(sk, tp, skb);
2470 between(tp->snd_up, tp->snd_una+1, tp->snd_una+0xFFFF))
2471 tcp_xmit_probe_skb(sk, TCPCB_URG);
2472 return tcp_xmit_probe_skb(sk, 0);
2478 /* A window probe timeout has occurred. If window is not closed send
2479 * a partial packet else a zero probe.
2481 void tcp_send_probe0(struct sock *sk)
2483 struct inet_connection_sock *icsk = inet_csk(sk);
2484 struct tcp_sock *tp = tcp_sk(sk);
2487 err = tcp_write_wakeup(sk);
2489 if (tp->packets_out || !sk->sk_send_head) {
2490 /* Cancel probe timer, if it is not required. */
2491 icsk->icsk_probes_out = 0;
2492 icsk->icsk_backoff = 0;
2497 if (icsk->icsk_backoff < sysctl_tcp_retries2)
2498 icsk->icsk_backoff++;
2499 icsk->icsk_probes_out++;
2500 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2501 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
2504 /* If packet was not sent due to local congestion,
2505 * do not backoff and do not remember icsk_probes_out.
2506 * Let local senders to fight for local resources.
2508 * Use accumulated backoff yet.
2510 if (!icsk->icsk_probes_out)
2511 icsk->icsk_probes_out = 1;
2512 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2513 min(icsk->icsk_rto << icsk->icsk_backoff,
2514 TCP_RESOURCE_PROBE_INTERVAL),
2519 EXPORT_SYMBOL(tcp_connect);
2520 EXPORT_SYMBOL(tcp_make_synack);
2521 EXPORT_SYMBOL(tcp_simple_retransmit);
2522 EXPORT_SYMBOL(tcp_sync_mss);
2523 EXPORT_SYMBOL(sysctl_tcp_tso_win_divisor);
2524 EXPORT_SYMBOL(tcp_mtup_init);