2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
10 * IPv4 specific functions
15 * linux/ipv4/tcp_input.c
16 * linux/ipv4/tcp_output.c
18 * See tcp.c for author information
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
28 * David S. Miller : New socket lookup architecture.
29 * This code is dedicated to John Dyson.
30 * David S. Miller : Change semantics of established hash,
31 * half is devoted to TIME_WAIT sockets
32 * and the rest go in the other half.
33 * Andi Kleen : Add support for syncookies and fixed
34 * some bugs: ip options weren't passed to
35 * the TCP layer, missed a check for an
37 * Andi Kleen : Implemented fast path mtu discovery.
38 * Fixed many serious bugs in the
39 * request_sock handling and moved
40 * most of it into the af independent code.
41 * Added tail drop and some other bugfixes.
42 * Added new listen semantics.
43 * Mike McLagan : Routing by source
44 * Juan Jose Ciarlante: ip_dynaddr bits
45 * Andi Kleen: various fixes.
46 * Vitaly E. Lavrov : Transparent proxy revived after year
48 * Andi Kleen : Fix new listen.
49 * Andi Kleen : Fix accept error reporting.
50 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
51 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
52 * a single port at the same time.
55 #include <linux/config.h>
57 #include <linux/types.h>
58 #include <linux/fcntl.h>
59 #include <linux/module.h>
60 #include <linux/random.h>
61 #include <linux/cache.h>
62 #include <linux/jhash.h>
63 #include <linux/init.h>
64 #include <linux/times.h>
67 #include <net/inet_hashtables.h>
69 #include <net/transp_v6.h>
71 #include <net/inet_common.h>
74 #include <linux/inet.h>
75 #include <linux/ipv6.h>
76 #include <linux/stddef.h>
77 #include <linux/proc_fs.h>
78 #include <linux/seq_file.h>
80 int sysctl_tcp_tw_reuse;
81 int sysctl_tcp_low_latency;
83 /* Check TCP sequence numbers in ICMP packets. */
84 #define ICMP_MIN_LENGTH 8
86 /* Socket used for sending RSTs */
87 static struct socket *tcp_socket;
89 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
92 struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
93 .lhash_lock = RW_LOCK_UNLOCKED,
94 .lhash_users = ATOMIC_INIT(0),
95 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
98 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
100 return inet_csk_get_port(&tcp_hashinfo, sk, snum,
101 inet_csk_bind_conflict);
104 static void tcp_v4_hash(struct sock *sk)
106 inet_hash(&tcp_hashinfo, sk);
109 void tcp_unhash(struct sock *sk)
111 inet_unhash(&tcp_hashinfo, sk);
114 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
116 return secure_tcp_sequence_number(skb->nh.iph->daddr,
122 /* called with local bh disabled */
123 static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
124 struct inet_timewait_sock **twp)
126 struct inet_sock *inet = inet_sk(sk);
127 u32 daddr = inet->rcv_saddr;
128 u32 saddr = inet->daddr;
129 int dif = sk->sk_bound_dev_if;
130 INET_ADDR_COOKIE(acookie, saddr, daddr)
131 const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
132 unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport);
133 struct inet_ehash_bucket *head = inet_ehash_bucket(&tcp_hashinfo, hash);
135 const struct hlist_node *node;
136 struct inet_timewait_sock *tw;
138 prefetch(head->chain.first);
139 write_lock(&head->lock);
141 /* Check TIME-WAIT sockets first. */
142 sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
145 if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) {
146 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2);
147 struct tcp_sock *tp = tcp_sk(sk);
149 /* With PAWS, it is safe from the viewpoint
150 of data integrity. Even without PAWS it
151 is safe provided sequence spaces do not
152 overlap i.e. at data rates <= 80Mbit/sec.
154 Actually, the idea is close to VJ's one,
155 only timestamp cache is held not per host,
156 but per port pair and TW bucket is used
159 If TW bucket has been already destroyed we
160 fall back to VJ's scheme and use initial
161 timestamp retrieved from peer table.
163 if (tcptw->tw_ts_recent_stamp &&
164 (!twp || (sysctl_tcp_tw_reuse &&
166 tcptw->tw_ts_recent_stamp > 1))) {
167 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
168 if (tp->write_seq == 0)
170 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
171 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
180 /* And established part... */
181 sk_for_each(sk2, node, &head->chain) {
182 if (INET_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif))
187 /* Must record num and sport now. Otherwise we will see
188 * in hash table socket with a funny identity. */
190 inet->sport = htons(lport);
192 BUG_TRAP(sk_unhashed(sk));
193 __sk_add_node(sk, &head->chain);
194 sock_prot_inc_use(sk->sk_prot);
195 write_unlock(&head->lock);
199 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
201 /* Silly. Should hash-dance instead... */
202 inet_twsk_deschedule(tw, &tcp_death_row);
203 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
211 write_unlock(&head->lock);
212 return -EADDRNOTAVAIL;
215 static inline u32 connect_port_offset(const struct sock *sk)
217 const struct inet_sock *inet = inet_sk(sk);
219 return secure_tcp_port_ephemeral(inet->rcv_saddr, inet->daddr,
224 * Bind a port for a connect operation and hash it.
226 static inline int tcp_v4_hash_connect(struct sock *sk)
228 const unsigned short snum = inet_sk(sk)->num;
229 struct inet_bind_hashbucket *head;
230 struct inet_bind_bucket *tb;
234 int low = sysctl_local_port_range[0];
235 int high = sysctl_local_port_range[1];
236 int range = high - low;
240 u32 offset = hint + connect_port_offset(sk);
241 struct hlist_node *node;
242 struct inet_timewait_sock *tw = NULL;
245 for (i = 1; i <= range; i++) {
246 port = low + (i + offset) % range;
247 head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)];
248 spin_lock(&head->lock);
250 /* Does not bother with rcv_saddr checks,
251 * because the established check is already
254 inet_bind_bucket_for_each(tb, node, &head->chain) {
255 if (tb->port == port) {
256 BUG_TRAP(!hlist_empty(&tb->owners));
257 if (tb->fastreuse >= 0)
259 if (!__tcp_v4_check_established(sk,
267 tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port);
269 spin_unlock(&head->lock);
276 spin_unlock(&head->lock);
280 return -EADDRNOTAVAIL;
285 /* Head lock still held and bh's disabled */
286 inet_bind_hash(sk, tb, port);
287 if (sk_unhashed(sk)) {
288 inet_sk(sk)->sport = htons(port);
289 __inet_hash(&tcp_hashinfo, sk, 0);
291 spin_unlock(&head->lock);
294 inet_twsk_deschedule(tw, &tcp_death_row);;
302 head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
303 tb = inet_csk(sk)->icsk_bind_hash;
304 spin_lock_bh(&head->lock);
305 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
306 __inet_hash(&tcp_hashinfo, sk, 0);
307 spin_unlock_bh(&head->lock);
310 spin_unlock(&head->lock);
311 /* No definite answer... Walk to established hash table */
312 ret = __tcp_v4_check_established(sk, snum, NULL);
319 /* This will initiate an outgoing connection. */
320 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
322 struct inet_sock *inet = inet_sk(sk);
323 struct tcp_sock *tp = tcp_sk(sk);
324 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
330 if (addr_len < sizeof(struct sockaddr_in))
333 if (usin->sin_family != AF_INET)
334 return -EAFNOSUPPORT;
336 nexthop = daddr = usin->sin_addr.s_addr;
337 if (inet->opt && inet->opt->srr) {
340 nexthop = inet->opt->faddr;
343 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
344 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
346 inet->sport, usin->sin_port, sk);
350 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
355 if (!inet->opt || !inet->opt->srr)
359 inet->saddr = rt->rt_src;
360 inet->rcv_saddr = inet->saddr;
362 if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) {
363 /* Reset inherited state */
364 tp->rx_opt.ts_recent = 0;
365 tp->rx_opt.ts_recent_stamp = 0;
369 if (tcp_death_row.sysctl_tw_recycle &&
370 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
371 struct inet_peer *peer = rt_get_peer(rt);
373 /* VJ's idea. We save last timestamp seen from
374 * the destination in peer table, when entering state TIME-WAIT
375 * and initialize rx_opt.ts_recent from it, when trying new connection.
378 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
379 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
380 tp->rx_opt.ts_recent = peer->tcp_ts;
384 inet->dport = usin->sin_port;
387 tp->ext_header_len = 0;
389 tp->ext_header_len = inet->opt->optlen;
391 tp->rx_opt.mss_clamp = 536;
393 /* Socket identity is still unknown (sport may be zero).
394 * However we set state to SYN-SENT and not releasing socket
395 * lock select source port, enter ourselves into the hash tables and
396 * complete initialization after this.
398 tcp_set_state(sk, TCP_SYN_SENT);
399 err = tcp_v4_hash_connect(sk);
403 err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
407 /* OK, now commit destination to socket. */
408 sk_setup_caps(sk, &rt->u.dst);
411 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
416 inet->id = tp->write_seq ^ jiffies;
418 err = tcp_connect(sk);
426 /* This unhashes the socket and releases the local port, if necessary. */
427 tcp_set_state(sk, TCP_CLOSE);
429 sk->sk_route_caps = 0;
435 * This routine does path mtu discovery as defined in RFC1191.
437 static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
440 struct dst_entry *dst;
441 struct inet_sock *inet = inet_sk(sk);
442 struct tcp_sock *tp = tcp_sk(sk);
444 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
445 * send out by Linux are always <576bytes so they should go through
448 if (sk->sk_state == TCP_LISTEN)
451 /* We don't check in the destentry if pmtu discovery is forbidden
452 * on this route. We just assume that no packet_to_big packets
453 * are send back when pmtu discovery is not active.
454 * There is a small race when the user changes this flag in the
455 * route, but I think that's acceptable.
457 if ((dst = __sk_dst_check(sk, 0)) == NULL)
460 dst->ops->update_pmtu(dst, mtu);
462 /* Something is about to be wrong... Remember soft error
463 * for the case, if this connection will not able to recover.
465 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
466 sk->sk_err_soft = EMSGSIZE;
470 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
471 tp->pmtu_cookie > mtu) {
472 tcp_sync_mss(sk, mtu);
474 /* Resend the TCP packet because it's
475 * clear that the old packet has been
476 * dropped. This is the new "fast" path mtu
479 tcp_simple_retransmit(sk);
480 } /* else let the usual retransmit timer handle it */
484 * This routine is called by the ICMP module when it gets some
485 * sort of error condition. If err < 0 then the socket should
486 * be closed and the error returned to the user. If err > 0
487 * it's just the icmp type << 8 | icmp code. After adjustment
488 * header points to the first 8 bytes of the tcp header. We need
489 * to find the appropriate port.
491 * The locking strategy used here is very "optimistic". When
492 * someone else accesses the socket the ICMP is just dropped
493 * and for some paths there is no check at all.
494 * A more general error queue to queue errors for later handling
495 * is probably better.
499 void tcp_v4_err(struct sk_buff *skb, u32 info)
501 struct iphdr *iph = (struct iphdr *)skb->data;
502 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
504 struct inet_sock *inet;
505 int type = skb->h.icmph->type;
506 int code = skb->h.icmph->code;
511 if (skb->len < (iph->ihl << 2) + 8) {
512 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
516 sk = inet_lookup(&tcp_hashinfo, iph->daddr, th->dest, iph->saddr,
517 th->source, inet_iif(skb));
519 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
522 if (sk->sk_state == TCP_TIME_WAIT) {
523 inet_twsk_put((struct inet_timewait_sock *)sk);
528 /* If too many ICMPs get dropped on busy
529 * servers this needs to be solved differently.
531 if (sock_owned_by_user(sk))
532 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
534 if (sk->sk_state == TCP_CLOSE)
538 seq = ntohl(th->seq);
539 if (sk->sk_state != TCP_LISTEN &&
540 !between(seq, tp->snd_una, tp->snd_nxt)) {
541 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
546 case ICMP_SOURCE_QUENCH:
547 /* Just silently ignore these. */
549 case ICMP_PARAMETERPROB:
552 case ICMP_DEST_UNREACH:
553 if (code > NR_ICMP_UNREACH)
556 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
557 if (!sock_owned_by_user(sk))
558 do_pmtu_discovery(sk, iph, info);
562 err = icmp_err_convert[code].errno;
564 case ICMP_TIME_EXCEEDED:
571 switch (sk->sk_state) {
572 struct request_sock *req, **prev;
574 if (sock_owned_by_user(sk))
577 req = inet_csk_search_req(sk, &prev, th->dest,
578 iph->daddr, iph->saddr);
582 /* ICMPs are not backlogged, hence we cannot get
583 an established socket here.
587 if (seq != tcp_rsk(req)->snt_isn) {
588 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
593 * Still in SYN_RECV, just remove it silently.
594 * There is no good way to pass the error to the newly
595 * created socket, and POSIX does not want network
596 * errors returned from accept().
598 inet_csk_reqsk_queue_drop(sk, req, prev);
602 case TCP_SYN_RECV: /* Cannot happen.
603 It can f.e. if SYNs crossed.
605 if (!sock_owned_by_user(sk)) {
606 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
609 sk->sk_error_report(sk);
613 sk->sk_err_soft = err;
618 /* If we've already connected we will keep trying
619 * until we time out, or the user gives up.
621 * rfc1122 4.2.3.9 allows to consider as hard errors
622 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
623 * but it is obsoleted by pmtu discovery).
625 * Note, that in modern internet, where routing is unreliable
626 * and in each dark corner broken firewalls sit, sending random
627 * errors ordered by their masters even this two messages finally lose
628 * their original sense (even Linux sends invalid PORT_UNREACHs)
630 * Now we are in compliance with RFCs.
635 if (!sock_owned_by_user(sk) && inet->recverr) {
637 sk->sk_error_report(sk);
638 } else { /* Only an error on timeout */
639 sk->sk_err_soft = err;
647 /* This routine computes an IPv4 TCP checksum. */
648 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
651 struct inet_sock *inet = inet_sk(sk);
653 if (skb->ip_summed == CHECKSUM_HW) {
654 th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
655 skb->csum = offsetof(struct tcphdr, check);
657 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
658 csum_partial((char *)th,
665 * This routine will send an RST to the other tcp.
667 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
669 * Answer: if a packet caused RST, it is not for a socket
670 * existing in our system, if it is matched to a socket,
671 * it is just duplicate segment or bug in other side's TCP.
672 * So that we build reply only basing on parameters
673 * arrived with segment.
674 * Exception: precedence violation. We do not implement it in any case.
677 static void tcp_v4_send_reset(struct sk_buff *skb)
679 struct tcphdr *th = skb->h.th;
681 struct ip_reply_arg arg;
683 /* Never send a reset in response to a reset. */
687 if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
690 /* Swap the send and the receive. */
691 memset(&rth, 0, sizeof(struct tcphdr));
692 rth.dest = th->source;
693 rth.source = th->dest;
694 rth.doff = sizeof(struct tcphdr) / 4;
698 rth.seq = th->ack_seq;
701 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
702 skb->len - (th->doff << 2));
705 memset(&arg, 0, sizeof arg);
706 arg.iov[0].iov_base = (unsigned char *)&rth;
707 arg.iov[0].iov_len = sizeof rth;
708 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
709 skb->nh.iph->saddr, /*XXX*/
710 sizeof(struct tcphdr), IPPROTO_TCP, 0);
711 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
713 ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
715 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
716 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
719 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
720 outside socket context is ugly, certainly. What can I do?
723 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
726 struct tcphdr *th = skb->h.th;
731 struct ip_reply_arg arg;
733 memset(&rep.th, 0, sizeof(struct tcphdr));
734 memset(&arg, 0, sizeof arg);
736 arg.iov[0].iov_base = (unsigned char *)&rep;
737 arg.iov[0].iov_len = sizeof(rep.th);
739 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
740 (TCPOPT_TIMESTAMP << 8) |
742 rep.tsopt[1] = htonl(tcp_time_stamp);
743 rep.tsopt[2] = htonl(ts);
744 arg.iov[0].iov_len = sizeof(rep);
747 /* Swap the send and the receive. */
748 rep.th.dest = th->source;
749 rep.th.source = th->dest;
750 rep.th.doff = arg.iov[0].iov_len / 4;
751 rep.th.seq = htonl(seq);
752 rep.th.ack_seq = htonl(ack);
754 rep.th.window = htons(win);
756 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
757 skb->nh.iph->saddr, /*XXX*/
758 arg.iov[0].iov_len, IPPROTO_TCP, 0);
759 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
761 ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
763 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
766 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
768 struct inet_timewait_sock *tw = inet_twsk(sk);
769 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
771 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
772 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent);
777 static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
779 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
784 * Send a SYN-ACK after having received an ACK.
785 * This still operates on a request_sock only, not on a big
788 static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
789 struct dst_entry *dst)
791 const struct inet_request_sock *ireq = inet_rsk(req);
793 struct sk_buff * skb;
795 /* First, grab a route. */
796 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
799 skb = tcp_make_synack(sk, dst, req);
802 struct tcphdr *th = skb->h.th;
804 th->check = tcp_v4_check(th, skb->len,
807 csum_partial((char *)th, skb->len,
810 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
813 if (err == NET_XMIT_CN)
823 * IPv4 request_sock destructor.
825 static void tcp_v4_reqsk_destructor(struct request_sock *req)
827 kfree(inet_rsk(req)->opt);
830 static inline void syn_flood_warning(struct sk_buff *skb)
832 static unsigned long warntime;
834 if (time_after(jiffies, (warntime + HZ * 60))) {
837 "possible SYN flooding on port %d. Sending cookies.\n",
838 ntohs(skb->h.th->dest));
843 * Save and compile IPv4 options into the request_sock if needed.
845 static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
848 struct ip_options *opt = &(IPCB(skb)->opt);
849 struct ip_options *dopt = NULL;
851 if (opt && opt->optlen) {
852 int opt_size = optlength(opt);
853 dopt = kmalloc(opt_size, GFP_ATOMIC);
855 if (ip_options_echo(dopt, skb)) {
864 struct request_sock_ops tcp_request_sock_ops = {
866 .obj_size = sizeof(struct tcp_request_sock),
867 .rtx_syn_ack = tcp_v4_send_synack,
868 .send_ack = tcp_v4_reqsk_send_ack,
869 .destructor = tcp_v4_reqsk_destructor,
870 .send_reset = tcp_v4_send_reset,
873 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
875 struct inet_request_sock *ireq;
876 struct tcp_options_received tmp_opt;
877 struct request_sock *req;
878 __u32 saddr = skb->nh.iph->saddr;
879 __u32 daddr = skb->nh.iph->daddr;
880 __u32 isn = TCP_SKB_CB(skb)->when;
881 struct dst_entry *dst = NULL;
882 #ifdef CONFIG_SYN_COOKIES
885 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
888 /* Never answer to SYNs send to broadcast or multicast */
889 if (((struct rtable *)skb->dst)->rt_flags &
890 (RTCF_BROADCAST | RTCF_MULTICAST))
893 /* TW buckets are converted to open requests without
894 * limitations, they conserve resources and peer is
895 * evidently real one.
897 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
898 #ifdef CONFIG_SYN_COOKIES
899 if (sysctl_tcp_syncookies) {
906 /* Accept backlog is full. If we have already queued enough
907 * of warm entries in syn queue, drop request. It is better than
908 * clogging syn queue with openreqs with exponentially increasing
911 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
914 req = reqsk_alloc(&tcp_request_sock_ops);
918 tcp_clear_options(&tmp_opt);
919 tmp_opt.mss_clamp = 536;
920 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
922 tcp_parse_options(skb, &tmp_opt, 0);
925 tcp_clear_options(&tmp_opt);
926 tmp_opt.saw_tstamp = 0;
929 if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) {
930 /* Some OSes (unknown ones, but I see them on web server, which
931 * contains information interesting only for windows'
932 * users) do not send their stamp in SYN. It is easy case.
933 * We simply do not advertise TS support.
935 tmp_opt.saw_tstamp = 0;
936 tmp_opt.tstamp_ok = 0;
938 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
940 tcp_openreq_init(req, &tmp_opt, skb);
942 ireq = inet_rsk(req);
943 ireq->loc_addr = daddr;
944 ireq->rmt_addr = saddr;
945 ireq->opt = tcp_v4_save_options(sk, skb);
947 TCP_ECN_create_request(req, skb->h.th);
950 #ifdef CONFIG_SYN_COOKIES
951 syn_flood_warning(skb);
953 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
955 struct inet_peer *peer = NULL;
957 /* VJ's idea. We save last timestamp seen
958 * from the destination in peer table, when entering
959 * state TIME-WAIT, and check against it before
960 * accepting new connection request.
962 * If "isn" is not zero, this request hit alive
963 * timewait bucket, so that all the necessary checks
964 * are made in the function processing timewait state.
966 if (tmp_opt.saw_tstamp &&
967 tcp_death_row.sysctl_tw_recycle &&
968 (dst = inet_csk_route_req(sk, req)) != NULL &&
969 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
970 peer->v4daddr == saddr) {
971 if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
972 (s32)(peer->tcp_ts - req->ts_recent) >
974 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
979 /* Kill the following clause, if you dislike this way. */
980 else if (!sysctl_tcp_syncookies &&
981 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
982 (sysctl_max_syn_backlog >> 2)) &&
983 (!peer || !peer->tcp_ts_stamp) &&
984 (!dst || !dst_metric(dst, RTAX_RTT))) {
985 /* Without syncookies last quarter of
986 * backlog is filled with destinations,
987 * proven to be alive.
988 * It means that we continue to communicate
989 * to destinations, already remembered
990 * to the moment of synflood.
992 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
993 "request from %u.%u.%u.%u/%u\n",
995 ntohs(skb->h.th->source));
1000 isn = tcp_v4_init_sequence(sk, skb);
1002 tcp_rsk(req)->snt_isn = isn;
1004 if (tcp_v4_send_synack(sk, req, dst))
1010 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1017 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1023 * The three way handshake has completed - we got a valid synack -
1024 * now create the new socket.
1026 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1027 struct request_sock *req,
1028 struct dst_entry *dst)
1030 struct inet_request_sock *ireq;
1031 struct inet_sock *newinet;
1032 struct tcp_sock *newtp;
1035 if (sk_acceptq_is_full(sk))
1038 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1041 newsk = tcp_create_openreq_child(sk, req, skb);
1045 sk_setup_caps(newsk, dst);
1047 newtp = tcp_sk(newsk);
1048 newinet = inet_sk(newsk);
1049 ireq = inet_rsk(req);
1050 newinet->daddr = ireq->rmt_addr;
1051 newinet->rcv_saddr = ireq->loc_addr;
1052 newinet->saddr = ireq->loc_addr;
1053 newinet->opt = ireq->opt;
1055 newinet->mc_index = inet_iif(skb);
1056 newinet->mc_ttl = skb->nh.iph->ttl;
1057 newtp->ext_header_len = 0;
1059 newtp->ext_header_len = newinet->opt->optlen;
1060 newinet->id = newtp->write_seq ^ jiffies;
1062 tcp_sync_mss(newsk, dst_mtu(dst));
1063 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1064 tcp_initialize_rcv_mss(newsk);
1066 __inet_hash(&tcp_hashinfo, newsk, 0);
1067 __inet_inherit_port(&tcp_hashinfo, sk, newsk);
1072 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1074 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1079 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1081 struct tcphdr *th = skb->h.th;
1082 struct iphdr *iph = skb->nh.iph;
1084 struct request_sock **prev;
1085 /* Find possible connection requests. */
1086 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1087 iph->saddr, iph->daddr);
1089 return tcp_check_req(sk, skb, req, prev);
1091 nsk = __inet_lookup_established(&tcp_hashinfo, skb->nh.iph->saddr,
1092 th->source, skb->nh.iph->daddr,
1093 ntohs(th->dest), inet_iif(skb));
1096 if (nsk->sk_state != TCP_TIME_WAIT) {
1100 inet_twsk_put((struct inet_timewait_sock *)nsk);
1104 #ifdef CONFIG_SYN_COOKIES
1105 if (!th->rst && !th->syn && th->ack)
1106 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1111 static int tcp_v4_checksum_init(struct sk_buff *skb)
1113 if (skb->ip_summed == CHECKSUM_HW) {
1114 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1115 skb->nh.iph->daddr, skb->csum)) {
1116 skb->ip_summed = CHECKSUM_UNNECESSARY;
1121 skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr, skb->nh.iph->daddr,
1122 skb->len, IPPROTO_TCP, 0);
1124 if (skb->len <= 76) {
1125 return __skb_checksum_complete(skb);
1131 /* The socket must have it's spinlock held when we get
1134 * We have a potential double-lock case here, so even when
1135 * doing backlog processing we use the BH locking scheme.
1136 * This is because we cannot sleep with the original spinlock
1139 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1141 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1142 TCP_CHECK_TIMER(sk);
1143 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1145 TCP_CHECK_TIMER(sk);
1149 if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
1152 if (sk->sk_state == TCP_LISTEN) {
1153 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1158 if (tcp_child_process(sk, nsk, skb))
1164 TCP_CHECK_TIMER(sk);
1165 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1167 TCP_CHECK_TIMER(sk);
1171 tcp_v4_send_reset(skb);
1174 /* Be careful here. If this function gets more complicated and
1175 * gcc suffers from register pressure on the x86, sk (in %ebx)
1176 * might be destroyed here. This current version compiles correctly,
1177 * but you have been warned.
1182 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1190 int tcp_v4_rcv(struct sk_buff *skb)
1196 if (skb->pkt_type != PACKET_HOST)
1199 /* Count it even if it's bad */
1200 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1202 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1207 if (th->doff < sizeof(struct tcphdr) / 4)
1209 if (!pskb_may_pull(skb, th->doff * 4))
1212 /* An explanation is required here, I think.
1213 * Packet length and doff are validated by header prediction,
1214 * provided case of th->doff==0 is eliminated.
1215 * So, we defer the checks. */
1216 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1217 tcp_v4_checksum_init(skb)))
1221 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1222 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1223 skb->len - th->doff * 4);
1224 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1225 TCP_SKB_CB(skb)->when = 0;
1226 TCP_SKB_CB(skb)->flags = skb->nh.iph->tos;
1227 TCP_SKB_CB(skb)->sacked = 0;
1229 sk = __inet_lookup(&tcp_hashinfo, skb->nh.iph->saddr, th->source,
1230 skb->nh.iph->daddr, ntohs(th->dest),
1237 if (sk->sk_state == TCP_TIME_WAIT)
1240 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1241 goto discard_and_relse;
1243 if (sk_filter(sk, skb, 0))
1244 goto discard_and_relse;
1250 if (!sock_owned_by_user(sk)) {
1251 if (!tcp_prequeue(sk, skb))
1252 ret = tcp_v4_do_rcv(sk, skb);
1254 sk_add_backlog(sk, skb);
1262 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1265 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1267 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1269 tcp_v4_send_reset(skb);
1273 /* Discard frame. */
1282 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1283 inet_twsk_put((struct inet_timewait_sock *) sk);
1287 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1288 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1289 inet_twsk_put((struct inet_timewait_sock *) sk);
1292 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1295 struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo,
1300 inet_twsk_deschedule((struct inet_timewait_sock *)sk,
1302 inet_twsk_put((struct inet_timewait_sock *)sk);
1306 /* Fall through to ACK */
1309 tcp_v4_timewait_ack(sk, skb);
1313 case TCP_TW_SUCCESS:;
1318 static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
1320 struct sockaddr_in *sin = (struct sockaddr_in *) uaddr;
1321 struct inet_sock *inet = inet_sk(sk);
1323 sin->sin_family = AF_INET;
1324 sin->sin_addr.s_addr = inet->daddr;
1325 sin->sin_port = inet->dport;
1328 /* VJ's idea. Save last timestamp seen from this destination
1329 * and hold it at least for normal timewait interval to use for duplicate
1330 * segment detection in subsequent connections, before they enter synchronized
1334 int tcp_v4_remember_stamp(struct sock *sk)
1336 struct inet_sock *inet = inet_sk(sk);
1337 struct tcp_sock *tp = tcp_sk(sk);
1338 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1339 struct inet_peer *peer = NULL;
1342 if (!rt || rt->rt_dst != inet->daddr) {
1343 peer = inet_getpeer(inet->daddr, 1);
1347 rt_bind_peer(rt, 1);
1352 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1353 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
1354 peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
1355 peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
1356 peer->tcp_ts = tp->rx_opt.ts_recent;
1366 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1368 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1371 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1373 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1374 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
1375 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
1376 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
1377 peer->tcp_ts = tcptw->tw_ts_recent;
1386 struct tcp_func ipv4_specific = {
1387 .queue_xmit = ip_queue_xmit,
1388 .send_check = tcp_v4_send_check,
1389 .rebuild_header = inet_sk_rebuild_header,
1390 .conn_request = tcp_v4_conn_request,
1391 .syn_recv_sock = tcp_v4_syn_recv_sock,
1392 .remember_stamp = tcp_v4_remember_stamp,
1393 .net_header_len = sizeof(struct iphdr),
1394 .setsockopt = ip_setsockopt,
1395 .getsockopt = ip_getsockopt,
1396 .addr2sockaddr = v4_addr2sockaddr,
1397 .sockaddr_len = sizeof(struct sockaddr_in),
1400 /* NOTE: A lot of things set to zero explicitly by call to
1401 * sk_alloc() so need not be done here.
1403 static int tcp_v4_init_sock(struct sock *sk)
1405 struct inet_connection_sock *icsk = inet_csk(sk);
1406 struct tcp_sock *tp = tcp_sk(sk);
1408 skb_queue_head_init(&tp->out_of_order_queue);
1409 tcp_init_xmit_timers(sk);
1410 tcp_prequeue_init(tp);
1412 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1413 tp->mdev = TCP_TIMEOUT_INIT;
1415 /* So many TCP implementations out there (incorrectly) count the
1416 * initial SYN frame in their delayed-ACK and congestion control
1417 * algorithms that we must have the following bandaid to talk
1418 * efficiently to them. -DaveM
1422 /* See draft-stevens-tcpca-spec-01 for discussion of the
1423 * initialization of these values.
1425 tp->snd_ssthresh = 0x7fffffff; /* Infinity */
1426 tp->snd_cwnd_clamp = ~0;
1427 tp->mss_cache = 536;
1429 tp->reordering = sysctl_tcp_reordering;
1430 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1432 sk->sk_state = TCP_CLOSE;
1434 sk->sk_write_space = sk_stream_write_space;
1435 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1437 tp->af_specific = &ipv4_specific;
1439 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1440 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1442 atomic_inc(&tcp_sockets_allocated);
1447 int tcp_v4_destroy_sock(struct sock *sk)
1449 struct tcp_sock *tp = tcp_sk(sk);
1451 tcp_clear_xmit_timers(sk);
1453 tcp_cleanup_congestion_control(sk);
1455 /* Cleanup up the write buffer. */
1456 sk_stream_writequeue_purge(sk);
1458 /* Cleans up our, hopefully empty, out_of_order_queue. */
1459 __skb_queue_purge(&tp->out_of_order_queue);
1461 /* Clean prequeue, it must be empty really */
1462 __skb_queue_purge(&tp->ucopy.prequeue);
1464 /* Clean up a referenced TCP bind bucket. */
1465 if (inet_csk(sk)->icsk_bind_hash)
1466 inet_put_port(&tcp_hashinfo, sk);
1469 * If sendmsg cached page exists, toss it.
1471 if (sk->sk_sndmsg_page) {
1472 __free_page(sk->sk_sndmsg_page);
1473 sk->sk_sndmsg_page = NULL;
1476 atomic_dec(&tcp_sockets_allocated);
1481 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1483 #ifdef CONFIG_PROC_FS
1484 /* Proc filesystem TCP sock list dumping. */
1486 static inline struct inet_timewait_sock *tw_head(struct hlist_head *head)
1488 return hlist_empty(head) ? NULL :
1489 list_entry(head->first, struct inet_timewait_sock, tw_node);
1492 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1494 return tw->tw_node.next ?
1495 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1498 static void *listening_get_next(struct seq_file *seq, void *cur)
1500 struct inet_connection_sock *icsk;
1501 struct hlist_node *node;
1502 struct sock *sk = cur;
1503 struct tcp_iter_state* st = seq->private;
1507 sk = sk_head(&tcp_hashinfo.listening_hash[0]);
1513 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1514 struct request_sock *req = cur;
1516 icsk = inet_csk(st->syn_wait_sk);
1520 if (req->rsk_ops->family == st->family) {
1526 if (++st->sbucket >= TCP_SYNQ_HSIZE)
1529 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1531 sk = sk_next(st->syn_wait_sk);
1532 st->state = TCP_SEQ_STATE_LISTENING;
1533 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1535 icsk = inet_csk(sk);
1536 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1537 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1539 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1543 sk_for_each_from(sk, node) {
1544 if (sk->sk_family == st->family) {
1548 icsk = inet_csk(sk);
1549 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1550 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1552 st->uid = sock_i_uid(sk);
1553 st->syn_wait_sk = sk;
1554 st->state = TCP_SEQ_STATE_OPENREQ;
1558 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1560 if (++st->bucket < INET_LHTABLE_SIZE) {
1561 sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
1569 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1571 void *rc = listening_get_next(seq, NULL);
1573 while (rc && *pos) {
1574 rc = listening_get_next(seq, rc);
1580 static void *established_get_first(struct seq_file *seq)
1582 struct tcp_iter_state* st = seq->private;
1585 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
1587 struct hlist_node *node;
1588 struct inet_timewait_sock *tw;
1590 /* We can reschedule _before_ having picked the target: */
1591 cond_resched_softirq();
1593 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
1594 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1595 if (sk->sk_family != st->family) {
1601 st->state = TCP_SEQ_STATE_TIME_WAIT;
1602 inet_twsk_for_each(tw, node,
1603 &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) {
1604 if (tw->tw_family != st->family) {
1610 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1611 st->state = TCP_SEQ_STATE_ESTABLISHED;
1617 static void *established_get_next(struct seq_file *seq, void *cur)
1619 struct sock *sk = cur;
1620 struct inet_timewait_sock *tw;
1621 struct hlist_node *node;
1622 struct tcp_iter_state* st = seq->private;
1626 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
1630 while (tw && tw->tw_family != st->family) {
1637 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1638 st->state = TCP_SEQ_STATE_ESTABLISHED;
1640 /* We can reschedule between buckets: */
1641 cond_resched_softirq();
1643 if (++st->bucket < tcp_hashinfo.ehash_size) {
1644 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
1645 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
1653 sk_for_each_from(sk, node) {
1654 if (sk->sk_family == st->family)
1658 st->state = TCP_SEQ_STATE_TIME_WAIT;
1659 tw = tw_head(&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain);
1667 static void *established_get_idx(struct seq_file *seq, loff_t pos)
1669 void *rc = established_get_first(seq);
1672 rc = established_get_next(seq, rc);
1678 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1681 struct tcp_iter_state* st = seq->private;
1683 inet_listen_lock(&tcp_hashinfo);
1684 st->state = TCP_SEQ_STATE_LISTENING;
1685 rc = listening_get_idx(seq, &pos);
1688 inet_listen_unlock(&tcp_hashinfo);
1690 st->state = TCP_SEQ_STATE_ESTABLISHED;
1691 rc = established_get_idx(seq, pos);
1697 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
1699 struct tcp_iter_state* st = seq->private;
1700 st->state = TCP_SEQ_STATE_LISTENING;
1702 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1705 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1708 struct tcp_iter_state* st;
1710 if (v == SEQ_START_TOKEN) {
1711 rc = tcp_get_idx(seq, 0);
1716 switch (st->state) {
1717 case TCP_SEQ_STATE_OPENREQ:
1718 case TCP_SEQ_STATE_LISTENING:
1719 rc = listening_get_next(seq, v);
1721 inet_listen_unlock(&tcp_hashinfo);
1723 st->state = TCP_SEQ_STATE_ESTABLISHED;
1724 rc = established_get_first(seq);
1727 case TCP_SEQ_STATE_ESTABLISHED:
1728 case TCP_SEQ_STATE_TIME_WAIT:
1729 rc = established_get_next(seq, v);
1737 static void tcp_seq_stop(struct seq_file *seq, void *v)
1739 struct tcp_iter_state* st = seq->private;
1741 switch (st->state) {
1742 case TCP_SEQ_STATE_OPENREQ:
1744 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
1745 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1747 case TCP_SEQ_STATE_LISTENING:
1748 if (v != SEQ_START_TOKEN)
1749 inet_listen_unlock(&tcp_hashinfo);
1751 case TCP_SEQ_STATE_TIME_WAIT:
1752 case TCP_SEQ_STATE_ESTABLISHED:
1754 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1760 static int tcp_seq_open(struct inode *inode, struct file *file)
1762 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
1763 struct seq_file *seq;
1764 struct tcp_iter_state *s;
1767 if (unlikely(afinfo == NULL))
1770 s = kmalloc(sizeof(*s), GFP_KERNEL);
1773 memset(s, 0, sizeof(*s));
1774 s->family = afinfo->family;
1775 s->seq_ops.start = tcp_seq_start;
1776 s->seq_ops.next = tcp_seq_next;
1777 s->seq_ops.show = afinfo->seq_show;
1778 s->seq_ops.stop = tcp_seq_stop;
1780 rc = seq_open(file, &s->seq_ops);
1783 seq = file->private_data;
1792 int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
1795 struct proc_dir_entry *p;
1799 afinfo->seq_fops->owner = afinfo->owner;
1800 afinfo->seq_fops->open = tcp_seq_open;
1801 afinfo->seq_fops->read = seq_read;
1802 afinfo->seq_fops->llseek = seq_lseek;
1803 afinfo->seq_fops->release = seq_release_private;
1805 p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
1813 void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
1817 proc_net_remove(afinfo->name);
1818 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
1821 static void get_openreq4(struct sock *sk, struct request_sock *req,
1822 char *tmpbuf, int i, int uid)
1824 const struct inet_request_sock *ireq = inet_rsk(req);
1825 int ttd = req->expires - jiffies;
1827 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1828 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
1831 ntohs(inet_sk(sk)->sport),
1833 ntohs(ireq->rmt_port),
1835 0, 0, /* could print option size, but that is af dependent. */
1836 1, /* timers active (only the expire timer) */
1837 jiffies_to_clock_t(ttd),
1840 0, /* non standard timer */
1841 0, /* open_requests have no inode */
1842 atomic_read(&sk->sk_refcnt),
1846 static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
1849 unsigned long timer_expires;
1850 struct tcp_sock *tp = tcp_sk(sp);
1851 const struct inet_connection_sock *icsk = inet_csk(sp);
1852 struct inet_sock *inet = inet_sk(sp);
1853 unsigned int dest = inet->daddr;
1854 unsigned int src = inet->rcv_saddr;
1855 __u16 destp = ntohs(inet->dport);
1856 __u16 srcp = ntohs(inet->sport);
1858 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1860 timer_expires = icsk->icsk_timeout;
1861 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1863 timer_expires = icsk->icsk_timeout;
1864 } else if (timer_pending(&sp->sk_timer)) {
1866 timer_expires = sp->sk_timer.expires;
1869 timer_expires = jiffies;
1872 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
1873 "%08X %5d %8d %lu %d %p %u %u %u %u %d",
1874 i, src, srcp, dest, destp, sp->sk_state,
1875 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
1877 jiffies_to_clock_t(timer_expires - jiffies),
1878 icsk->icsk_retransmits,
1880 icsk->icsk_probes_out,
1882 atomic_read(&sp->sk_refcnt), sp,
1885 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1887 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
1890 static void get_timewait4_sock(struct inet_timewait_sock *tw, char *tmpbuf, int i)
1892 unsigned int dest, src;
1894 int ttd = tw->tw_ttd - jiffies;
1899 dest = tw->tw_daddr;
1900 src = tw->tw_rcv_saddr;
1901 destp = ntohs(tw->tw_dport);
1902 srcp = ntohs(tw->tw_sport);
1904 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1905 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p",
1906 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
1907 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1908 atomic_read(&tw->tw_refcnt), tw);
1913 static int tcp4_seq_show(struct seq_file *seq, void *v)
1915 struct tcp_iter_state* st;
1916 char tmpbuf[TMPSZ + 1];
1918 if (v == SEQ_START_TOKEN) {
1919 seq_printf(seq, "%-*s\n", TMPSZ - 1,
1920 " sl local_address rem_address st tx_queue "
1921 "rx_queue tr tm->when retrnsmt uid timeout "
1927 switch (st->state) {
1928 case TCP_SEQ_STATE_LISTENING:
1929 case TCP_SEQ_STATE_ESTABLISHED:
1930 get_tcp4_sock(v, tmpbuf, st->num);
1932 case TCP_SEQ_STATE_OPENREQ:
1933 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid);
1935 case TCP_SEQ_STATE_TIME_WAIT:
1936 get_timewait4_sock(v, tmpbuf, st->num);
1939 seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
1944 static struct file_operations tcp4_seq_fops;
1945 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1946 .owner = THIS_MODULE,
1949 .seq_show = tcp4_seq_show,
1950 .seq_fops = &tcp4_seq_fops,
1953 int __init tcp4_proc_init(void)
1955 return tcp_proc_register(&tcp4_seq_afinfo);
1958 void tcp4_proc_exit(void)
1960 tcp_proc_unregister(&tcp4_seq_afinfo);
1962 #endif /* CONFIG_PROC_FS */
1964 struct proto tcp_prot = {
1966 .owner = THIS_MODULE,
1968 .connect = tcp_v4_connect,
1969 .disconnect = tcp_disconnect,
1970 .accept = inet_csk_accept,
1972 .init = tcp_v4_init_sock,
1973 .destroy = tcp_v4_destroy_sock,
1974 .shutdown = tcp_shutdown,
1975 .setsockopt = tcp_setsockopt,
1976 .getsockopt = tcp_getsockopt,
1977 .sendmsg = tcp_sendmsg,
1978 .recvmsg = tcp_recvmsg,
1979 .backlog_rcv = tcp_v4_do_rcv,
1980 .hash = tcp_v4_hash,
1981 .unhash = tcp_unhash,
1982 .get_port = tcp_v4_get_port,
1983 .enter_memory_pressure = tcp_enter_memory_pressure,
1984 .sockets_allocated = &tcp_sockets_allocated,
1985 .orphan_count = &tcp_orphan_count,
1986 .memory_allocated = &tcp_memory_allocated,
1987 .memory_pressure = &tcp_memory_pressure,
1988 .sysctl_mem = sysctl_tcp_mem,
1989 .sysctl_wmem = sysctl_tcp_wmem,
1990 .sysctl_rmem = sysctl_tcp_rmem,
1991 .max_header = MAX_TCP_HEADER,
1992 .obj_size = sizeof(struct tcp_sock),
1993 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1994 .rsk_prot = &tcp_request_sock_ops,
1999 void __init tcp_v4_init(struct net_proto_family *ops)
2001 int err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
2003 panic("Failed to create the TCP control socket.\n");
2004 tcp_socket->sk->sk_allocation = GFP_ATOMIC;
2005 inet_sk(tcp_socket->sk)->uc_ttl = -1;
2007 /* Unhash it so that IP input processing does not even
2008 * see it, we do not wish this socket to see incoming
2011 tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
2014 EXPORT_SYMBOL(ipv4_specific);
2015 EXPORT_SYMBOL(inet_bind_bucket_create);
2016 EXPORT_SYMBOL(tcp_hashinfo);
2017 EXPORT_SYMBOL(tcp_prot);
2018 EXPORT_SYMBOL(tcp_unhash);
2019 EXPORT_SYMBOL(tcp_v4_conn_request);
2020 EXPORT_SYMBOL(tcp_v4_connect);
2021 EXPORT_SYMBOL(tcp_v4_do_rcv);
2022 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2023 EXPORT_SYMBOL(tcp_v4_send_check);
2024 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2026 #ifdef CONFIG_PROC_FS
2027 EXPORT_SYMBOL(tcp_proc_register);
2028 EXPORT_SYMBOL(tcp_proc_unregister);
2030 EXPORT_SYMBOL(sysctl_local_port_range);
2031 EXPORT_SYMBOL(sysctl_tcp_low_latency);
2032 EXPORT_SYMBOL(sysctl_tcp_tw_reuse);