2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
10 * IPv4 specific functions
15 * linux/ipv4/tcp_input.c
16 * linux/ipv4/tcp_output.c
18 * See tcp.c for author information
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
28 * David S. Miller : New socket lookup architecture.
29 * This code is dedicated to John Dyson.
30 * David S. Miller : Change semantics of established hash,
31 * half is devoted to TIME_WAIT sockets
32 * and the rest go in the other half.
33 * Andi Kleen : Add support for syncookies and fixed
34 * some bugs: ip options weren't passed to
35 * the TCP layer, missed a check for an
37 * Andi Kleen : Implemented fast path mtu discovery.
38 * Fixed many serious bugs in the
39 * request_sock handling and moved
40 * most of it into the af independent code.
41 * Added tail drop and some other bugfixes.
42 * Added new listen sematics.
43 * Mike McLagan : Routing by source
44 * Juan Jose Ciarlante: ip_dynaddr bits
45 * Andi Kleen: various fixes.
46 * Vitaly E. Lavrov : Transparent proxy revived after year
48 * Andi Kleen : Fix new listen.
49 * Andi Kleen : Fix accept error reporting.
50 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
51 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
52 * a single port at the same time.
55 #include <linux/config.h>
57 #include <linux/types.h>
58 #include <linux/fcntl.h>
59 #include <linux/module.h>
60 #include <linux/random.h>
61 #include <linux/cache.h>
62 #include <linux/jhash.h>
63 #include <linux/init.h>
64 #include <linux/times.h>
67 #include <net/inet_hashtables.h>
69 #include <net/transp_v6.h>
71 #include <net/inet_common.h>
74 #include <linux/inet.h>
75 #include <linux/ipv6.h>
76 #include <linux/stddef.h>
77 #include <linux/proc_fs.h>
78 #include <linux/seq_file.h>
80 int sysctl_tcp_tw_reuse;
81 int sysctl_tcp_low_latency;
83 /* Check TCP sequence numbers in ICMP packets. */
84 #define ICMP_MIN_LENGTH 8
86 /* Socket used for sending RSTs */
87 static struct socket *tcp_socket;
89 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
92 struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
93 .lhash_lock = RW_LOCK_UNLOCKED,
94 .lhash_users = ATOMIC_INIT(0),
95 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
96 .portalloc_lock = SPIN_LOCK_UNLOCKED,
97 .port_rover = 1024 - 1,
100 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
102 return inet_csk_get_port(&tcp_hashinfo, sk, snum);
105 static void tcp_v4_hash(struct sock *sk)
107 inet_hash(&tcp_hashinfo, sk);
110 void tcp_unhash(struct sock *sk)
112 inet_unhash(&tcp_hashinfo, sk);
115 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
117 return secure_tcp_sequence_number(skb->nh.iph->daddr,
123 /* called with local bh disabled */
124 static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
125 struct inet_timewait_sock **twp)
127 struct inet_sock *inet = inet_sk(sk);
128 u32 daddr = inet->rcv_saddr;
129 u32 saddr = inet->daddr;
130 int dif = sk->sk_bound_dev_if;
131 INET_ADDR_COOKIE(acookie, saddr, daddr)
132 const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
133 unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport);
134 struct inet_ehash_bucket *head = inet_ehash_bucket(&tcp_hashinfo, hash);
136 const struct hlist_node *node;
137 struct inet_timewait_sock *tw;
139 prefetch(head->chain.first);
140 write_lock(&head->lock);
142 /* Check TIME-WAIT sockets first. */
143 sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
146 if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) {
147 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2);
148 struct tcp_sock *tp = tcp_sk(sk);
150 /* With PAWS, it is safe from the viewpoint
151 of data integrity. Even without PAWS it
152 is safe provided sequence spaces do not
153 overlap i.e. at data rates <= 80Mbit/sec.
155 Actually, the idea is close to VJ's one,
156 only timestamp cache is held not per host,
157 but per port pair and TW bucket is used
160 If TW bucket has been already destroyed we
161 fall back to VJ's scheme and use initial
162 timestamp retrieved from peer table.
164 if (tcptw->tw_ts_recent_stamp &&
165 (!twp || (sysctl_tcp_tw_reuse &&
167 tcptw->tw_ts_recent_stamp > 1))) {
168 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
169 if (tp->write_seq == 0)
171 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
172 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
181 /* And established part... */
182 sk_for_each(sk2, node, &head->chain) {
183 if (INET_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif))
188 /* Must record num and sport now. Otherwise we will see
189 * in hash table socket with a funny identity. */
191 inet->sport = htons(lport);
193 BUG_TRAP(sk_unhashed(sk));
194 __sk_add_node(sk, &head->chain);
195 sock_prot_inc_use(sk->sk_prot);
196 write_unlock(&head->lock);
200 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
202 /* Silly. Should hash-dance instead... */
203 inet_twsk_deschedule(tw, &tcp_death_row);
204 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
212 write_unlock(&head->lock);
213 return -EADDRNOTAVAIL;
216 static inline u32 connect_port_offset(const struct sock *sk)
218 const struct inet_sock *inet = inet_sk(sk);
220 return secure_tcp_port_ephemeral(inet->rcv_saddr, inet->daddr,
225 * Bind a port for a connect operation and hash it.
227 static inline int tcp_v4_hash_connect(struct sock *sk)
229 const unsigned short snum = inet_sk(sk)->num;
230 struct inet_bind_hashbucket *head;
231 struct inet_bind_bucket *tb;
235 int low = sysctl_local_port_range[0];
236 int high = sysctl_local_port_range[1];
237 int range = high - low;
241 u32 offset = hint + connect_port_offset(sk);
242 struct hlist_node *node;
243 struct inet_timewait_sock *tw = NULL;
246 for (i = 1; i <= range; i++) {
247 port = low + (i + offset) % range;
248 head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)];
249 spin_lock(&head->lock);
251 /* Does not bother with rcv_saddr checks,
252 * because the established check is already
255 inet_bind_bucket_for_each(tb, node, &head->chain) {
256 if (tb->port == port) {
257 BUG_TRAP(!hlist_empty(&tb->owners));
258 if (tb->fastreuse >= 0)
260 if (!__tcp_v4_check_established(sk,
268 tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port);
270 spin_unlock(&head->lock);
277 spin_unlock(&head->lock);
281 return -EADDRNOTAVAIL;
286 /* Head lock still held and bh's disabled */
287 inet_bind_hash(sk, tb, port);
288 if (sk_unhashed(sk)) {
289 inet_sk(sk)->sport = htons(port);
290 __inet_hash(&tcp_hashinfo, sk, 0);
292 spin_unlock(&head->lock);
295 inet_twsk_deschedule(tw, &tcp_death_row);;
303 head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
304 tb = inet_csk(sk)->icsk_bind_hash;
305 spin_lock_bh(&head->lock);
306 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
307 __inet_hash(&tcp_hashinfo, sk, 0);
308 spin_unlock_bh(&head->lock);
311 spin_unlock(&head->lock);
312 /* No definite answer... Walk to established hash table */
313 ret = __tcp_v4_check_established(sk, snum, NULL);
320 /* This will initiate an outgoing connection. */
321 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
323 struct inet_sock *inet = inet_sk(sk);
324 struct tcp_sock *tp = tcp_sk(sk);
325 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
331 if (addr_len < sizeof(struct sockaddr_in))
334 if (usin->sin_family != AF_INET)
335 return -EAFNOSUPPORT;
337 nexthop = daddr = usin->sin_addr.s_addr;
338 if (inet->opt && inet->opt->srr) {
341 nexthop = inet->opt->faddr;
344 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
345 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
347 inet->sport, usin->sin_port, sk);
351 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
356 if (!inet->opt || !inet->opt->srr)
360 inet->saddr = rt->rt_src;
361 inet->rcv_saddr = inet->saddr;
363 if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) {
364 /* Reset inherited state */
365 tp->rx_opt.ts_recent = 0;
366 tp->rx_opt.ts_recent_stamp = 0;
370 if (tcp_death_row.sysctl_tw_recycle &&
371 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
372 struct inet_peer *peer = rt_get_peer(rt);
374 /* VJ's idea. We save last timestamp seen from
375 * the destination in peer table, when entering state TIME-WAIT
376 * and initialize rx_opt.ts_recent from it, when trying new connection.
379 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
380 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
381 tp->rx_opt.ts_recent = peer->tcp_ts;
385 inet->dport = usin->sin_port;
388 tp->ext_header_len = 0;
390 tp->ext_header_len = inet->opt->optlen;
392 tp->rx_opt.mss_clamp = 536;
394 /* Socket identity is still unknown (sport may be zero).
395 * However we set state to SYN-SENT and not releasing socket
396 * lock select source port, enter ourselves into the hash tables and
397 * complete initialization after this.
399 tcp_set_state(sk, TCP_SYN_SENT);
400 err = tcp_v4_hash_connect(sk);
404 err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
408 /* OK, now commit destination to socket. */
409 sk_setup_caps(sk, &rt->u.dst);
412 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
417 inet->id = tp->write_seq ^ jiffies;
419 err = tcp_connect(sk);
427 /* This unhashes the socket and releases the local port, if necessary. */
428 tcp_set_state(sk, TCP_CLOSE);
430 sk->sk_route_caps = 0;
436 * This routine does path mtu discovery as defined in RFC1191.
438 static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
441 struct dst_entry *dst;
442 struct inet_sock *inet = inet_sk(sk);
443 struct tcp_sock *tp = tcp_sk(sk);
445 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
446 * send out by Linux are always <576bytes so they should go through
449 if (sk->sk_state == TCP_LISTEN)
452 /* We don't check in the destentry if pmtu discovery is forbidden
453 * on this route. We just assume that no packet_to_big packets
454 * are send back when pmtu discovery is not active.
455 * There is a small race when the user changes this flag in the
456 * route, but I think that's acceptable.
458 if ((dst = __sk_dst_check(sk, 0)) == NULL)
461 dst->ops->update_pmtu(dst, mtu);
463 /* Something is about to be wrong... Remember soft error
464 * for the case, if this connection will not able to recover.
466 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
467 sk->sk_err_soft = EMSGSIZE;
471 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
472 tp->pmtu_cookie > mtu) {
473 tcp_sync_mss(sk, mtu);
475 /* Resend the TCP packet because it's
476 * clear that the old packet has been
477 * dropped. This is the new "fast" path mtu
480 tcp_simple_retransmit(sk);
481 } /* else let the usual retransmit timer handle it */
485 * This routine is called by the ICMP module when it gets some
486 * sort of error condition. If err < 0 then the socket should
487 * be closed and the error returned to the user. If err > 0
488 * it's just the icmp type << 8 | icmp code. After adjustment
489 * header points to the first 8 bytes of the tcp header. We need
490 * to find the appropriate port.
492 * The locking strategy used here is very "optimistic". When
493 * someone else accesses the socket the ICMP is just dropped
494 * and for some paths there is no check at all.
495 * A more general error queue to queue errors for later handling
496 * is probably better.
500 void tcp_v4_err(struct sk_buff *skb, u32 info)
502 struct iphdr *iph = (struct iphdr *)skb->data;
503 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
505 struct inet_sock *inet;
506 int type = skb->h.icmph->type;
507 int code = skb->h.icmph->code;
512 if (skb->len < (iph->ihl << 2) + 8) {
513 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
517 sk = inet_lookup(&tcp_hashinfo, iph->daddr, th->dest, iph->saddr,
518 th->source, inet_iif(skb));
520 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
523 if (sk->sk_state == TCP_TIME_WAIT) {
524 inet_twsk_put((struct inet_timewait_sock *)sk);
529 /* If too many ICMPs get dropped on busy
530 * servers this needs to be solved differently.
532 if (sock_owned_by_user(sk))
533 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
535 if (sk->sk_state == TCP_CLOSE)
539 seq = ntohl(th->seq);
540 if (sk->sk_state != TCP_LISTEN &&
541 !between(seq, tp->snd_una, tp->snd_nxt)) {
542 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
547 case ICMP_SOURCE_QUENCH:
548 /* Just silently ignore these. */
550 case ICMP_PARAMETERPROB:
553 case ICMP_DEST_UNREACH:
554 if (code > NR_ICMP_UNREACH)
557 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
558 if (!sock_owned_by_user(sk))
559 do_pmtu_discovery(sk, iph, info);
563 err = icmp_err_convert[code].errno;
565 case ICMP_TIME_EXCEEDED:
572 switch (sk->sk_state) {
573 struct request_sock *req, **prev;
575 if (sock_owned_by_user(sk))
578 req = inet_csk_search_req(sk, &prev, th->dest,
579 iph->daddr, iph->saddr);
583 /* ICMPs are not backlogged, hence we cannot get
584 an established socket here.
588 if (seq != tcp_rsk(req)->snt_isn) {
589 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
594 * Still in SYN_RECV, just remove it silently.
595 * There is no good way to pass the error to the newly
596 * created socket, and POSIX does not want network
597 * errors returned from accept().
599 inet_csk_reqsk_queue_drop(sk, req, prev);
603 case TCP_SYN_RECV: /* Cannot happen.
604 It can f.e. if SYNs crossed.
606 if (!sock_owned_by_user(sk)) {
607 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
610 sk->sk_error_report(sk);
614 sk->sk_err_soft = err;
619 /* If we've already connected we will keep trying
620 * until we time out, or the user gives up.
622 * rfc1122 4.2.3.9 allows to consider as hard errors
623 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
624 * but it is obsoleted by pmtu discovery).
626 * Note, that in modern internet, where routing is unreliable
627 * and in each dark corner broken firewalls sit, sending random
628 * errors ordered by their masters even this two messages finally lose
629 * their original sense (even Linux sends invalid PORT_UNREACHs)
631 * Now we are in compliance with RFCs.
636 if (!sock_owned_by_user(sk) && inet->recverr) {
638 sk->sk_error_report(sk);
639 } else { /* Only an error on timeout */
640 sk->sk_err_soft = err;
648 /* This routine computes an IPv4 TCP checksum. */
649 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
652 struct inet_sock *inet = inet_sk(sk);
654 if (skb->ip_summed == CHECKSUM_HW) {
655 th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
656 skb->csum = offsetof(struct tcphdr, check);
658 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
659 csum_partial((char *)th,
666 * This routine will send an RST to the other tcp.
668 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
670 * Answer: if a packet caused RST, it is not for a socket
671 * existing in our system, if it is matched to a socket,
672 * it is just duplicate segment or bug in other side's TCP.
673 * So that we build reply only basing on parameters
674 * arrived with segment.
675 * Exception: precedence violation. We do not implement it in any case.
678 static void tcp_v4_send_reset(struct sk_buff *skb)
680 struct tcphdr *th = skb->h.th;
682 struct ip_reply_arg arg;
684 /* Never send a reset in response to a reset. */
688 if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
691 /* Swap the send and the receive. */
692 memset(&rth, 0, sizeof(struct tcphdr));
693 rth.dest = th->source;
694 rth.source = th->dest;
695 rth.doff = sizeof(struct tcphdr) / 4;
699 rth.seq = th->ack_seq;
702 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
703 skb->len - (th->doff << 2));
706 memset(&arg, 0, sizeof arg);
707 arg.iov[0].iov_base = (unsigned char *)&rth;
708 arg.iov[0].iov_len = sizeof rth;
709 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
710 skb->nh.iph->saddr, /*XXX*/
711 sizeof(struct tcphdr), IPPROTO_TCP, 0);
712 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
714 ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
716 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
717 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
720 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
721 outside socket context is ugly, certainly. What can I do?
724 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
727 struct tcphdr *th = skb->h.th;
732 struct ip_reply_arg arg;
734 memset(&rep.th, 0, sizeof(struct tcphdr));
735 memset(&arg, 0, sizeof arg);
737 arg.iov[0].iov_base = (unsigned char *)&rep;
738 arg.iov[0].iov_len = sizeof(rep.th);
740 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
741 (TCPOPT_TIMESTAMP << 8) |
743 rep.tsopt[1] = htonl(tcp_time_stamp);
744 rep.tsopt[2] = htonl(ts);
745 arg.iov[0].iov_len = sizeof(rep);
748 /* Swap the send and the receive. */
749 rep.th.dest = th->source;
750 rep.th.source = th->dest;
751 rep.th.doff = arg.iov[0].iov_len / 4;
752 rep.th.seq = htonl(seq);
753 rep.th.ack_seq = htonl(ack);
755 rep.th.window = htons(win);
757 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
758 skb->nh.iph->saddr, /*XXX*/
759 arg.iov[0].iov_len, IPPROTO_TCP, 0);
760 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
762 ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
764 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
767 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
769 struct inet_timewait_sock *tw = inet_twsk(sk);
770 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
772 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
773 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent);
778 static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
780 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
785 * Send a SYN-ACK after having received an ACK.
786 * This still operates on a request_sock only, not on a big
789 static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
790 struct dst_entry *dst)
792 const struct inet_request_sock *ireq = inet_rsk(req);
794 struct sk_buff * skb;
796 /* First, grab a route. */
797 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
800 skb = tcp_make_synack(sk, dst, req);
803 struct tcphdr *th = skb->h.th;
805 th->check = tcp_v4_check(th, skb->len,
808 csum_partial((char *)th, skb->len,
811 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
814 if (err == NET_XMIT_CN)
824 * IPv4 request_sock destructor.
826 static void tcp_v4_reqsk_destructor(struct request_sock *req)
828 if (inet_rsk(req)->opt)
829 kfree(inet_rsk(req)->opt);
832 static inline void syn_flood_warning(struct sk_buff *skb)
834 static unsigned long warntime;
836 if (time_after(jiffies, (warntime + HZ * 60))) {
839 "possible SYN flooding on port %d. Sending cookies.\n",
840 ntohs(skb->h.th->dest));
845 * Save and compile IPv4 options into the request_sock if needed.
847 static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
850 struct ip_options *opt = &(IPCB(skb)->opt);
851 struct ip_options *dopt = NULL;
853 if (opt && opt->optlen) {
854 int opt_size = optlength(opt);
855 dopt = kmalloc(opt_size, GFP_ATOMIC);
857 if (ip_options_echo(dopt, skb)) {
866 struct request_sock_ops tcp_request_sock_ops = {
868 .obj_size = sizeof(struct tcp_request_sock),
869 .rtx_syn_ack = tcp_v4_send_synack,
870 .send_ack = tcp_v4_reqsk_send_ack,
871 .destructor = tcp_v4_reqsk_destructor,
872 .send_reset = tcp_v4_send_reset,
875 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
877 struct inet_request_sock *ireq;
878 struct tcp_options_received tmp_opt;
879 struct request_sock *req;
880 __u32 saddr = skb->nh.iph->saddr;
881 __u32 daddr = skb->nh.iph->daddr;
882 __u32 isn = TCP_SKB_CB(skb)->when;
883 struct dst_entry *dst = NULL;
884 #ifdef CONFIG_SYN_COOKIES
887 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
890 /* Never answer to SYNs send to broadcast or multicast */
891 if (((struct rtable *)skb->dst)->rt_flags &
892 (RTCF_BROADCAST | RTCF_MULTICAST))
895 /* TW buckets are converted to open requests without
896 * limitations, they conserve resources and peer is
897 * evidently real one.
899 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
900 #ifdef CONFIG_SYN_COOKIES
901 if (sysctl_tcp_syncookies) {
908 /* Accept backlog is full. If we have already queued enough
909 * of warm entries in syn queue, drop request. It is better than
910 * clogging syn queue with openreqs with exponentially increasing
913 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
916 req = reqsk_alloc(&tcp_request_sock_ops);
920 tcp_clear_options(&tmp_opt);
921 tmp_opt.mss_clamp = 536;
922 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
924 tcp_parse_options(skb, &tmp_opt, 0);
927 tcp_clear_options(&tmp_opt);
928 tmp_opt.saw_tstamp = 0;
931 if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) {
932 /* Some OSes (unknown ones, but I see them on web server, which
933 * contains information interesting only for windows'
934 * users) do not send their stamp in SYN. It is easy case.
935 * We simply do not advertise TS support.
937 tmp_opt.saw_tstamp = 0;
938 tmp_opt.tstamp_ok = 0;
940 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
942 tcp_openreq_init(req, &tmp_opt, skb);
944 ireq = inet_rsk(req);
945 ireq->loc_addr = daddr;
946 ireq->rmt_addr = saddr;
947 ireq->opt = tcp_v4_save_options(sk, skb);
949 TCP_ECN_create_request(req, skb->h.th);
952 #ifdef CONFIG_SYN_COOKIES
953 syn_flood_warning(skb);
955 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
957 struct inet_peer *peer = NULL;
959 /* VJ's idea. We save last timestamp seen
960 * from the destination in peer table, when entering
961 * state TIME-WAIT, and check against it before
962 * accepting new connection request.
964 * If "isn" is not zero, this request hit alive
965 * timewait bucket, so that all the necessary checks
966 * are made in the function processing timewait state.
968 if (tmp_opt.saw_tstamp &&
969 tcp_death_row.sysctl_tw_recycle &&
970 (dst = inet_csk_route_req(sk, req)) != NULL &&
971 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
972 peer->v4daddr == saddr) {
973 if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
974 (s32)(peer->tcp_ts - req->ts_recent) >
976 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
981 /* Kill the following clause, if you dislike this way. */
982 else if (!sysctl_tcp_syncookies &&
983 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
984 (sysctl_max_syn_backlog >> 2)) &&
985 (!peer || !peer->tcp_ts_stamp) &&
986 (!dst || !dst_metric(dst, RTAX_RTT))) {
987 /* Without syncookies last quarter of
988 * backlog is filled with destinations,
989 * proven to be alive.
990 * It means that we continue to communicate
991 * to destinations, already remembered
992 * to the moment of synflood.
994 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
995 "request from %u.%u.%u.%u/%u\n",
997 ntohs(skb->h.th->source));
1002 isn = tcp_v4_init_sequence(sk, skb);
1004 tcp_rsk(req)->snt_isn = isn;
1006 if (tcp_v4_send_synack(sk, req, dst))
1012 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1019 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1025 * The three way handshake has completed - we got a valid synack -
1026 * now create the new socket.
1028 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1029 struct request_sock *req,
1030 struct dst_entry *dst)
1032 struct inet_request_sock *ireq;
1033 struct inet_sock *newinet;
1034 struct tcp_sock *newtp;
1037 if (sk_acceptq_is_full(sk))
1040 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1043 newsk = tcp_create_openreq_child(sk, req, skb);
1047 sk_setup_caps(newsk, dst);
1049 newtp = tcp_sk(newsk);
1050 newinet = inet_sk(newsk);
1051 ireq = inet_rsk(req);
1052 newinet->daddr = ireq->rmt_addr;
1053 newinet->rcv_saddr = ireq->loc_addr;
1054 newinet->saddr = ireq->loc_addr;
1055 newinet->opt = ireq->opt;
1057 newinet->mc_index = inet_iif(skb);
1058 newinet->mc_ttl = skb->nh.iph->ttl;
1059 newtp->ext_header_len = 0;
1061 newtp->ext_header_len = newinet->opt->optlen;
1062 newinet->id = newtp->write_seq ^ jiffies;
1064 tcp_sync_mss(newsk, dst_mtu(dst));
1065 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1066 tcp_initialize_rcv_mss(newsk);
1068 __inet_hash(&tcp_hashinfo, newsk, 0);
1069 __inet_inherit_port(&tcp_hashinfo, sk, newsk);
1074 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1076 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1081 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1083 struct tcphdr *th = skb->h.th;
1084 struct iphdr *iph = skb->nh.iph;
1086 struct request_sock **prev;
1087 /* Find possible connection requests. */
1088 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1089 iph->saddr, iph->daddr);
1091 return tcp_check_req(sk, skb, req, prev);
1093 nsk = __inet_lookup_established(&tcp_hashinfo, skb->nh.iph->saddr,
1094 th->source, skb->nh.iph->daddr,
1095 ntohs(th->dest), inet_iif(skb));
1098 if (nsk->sk_state != TCP_TIME_WAIT) {
1102 inet_twsk_put((struct inet_timewait_sock *)nsk);
1106 #ifdef CONFIG_SYN_COOKIES
1107 if (!th->rst && !th->syn && th->ack)
1108 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1113 static int tcp_v4_checksum_init(struct sk_buff *skb)
1115 if (skb->ip_summed == CHECKSUM_HW) {
1116 skb->ip_summed = CHECKSUM_UNNECESSARY;
1117 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1118 skb->nh.iph->daddr, skb->csum))
1121 LIMIT_NETDEBUG(KERN_DEBUG "hw tcp v4 csum failed\n");
1122 skb->ip_summed = CHECKSUM_NONE;
1124 if (skb->len <= 76) {
1125 if (tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1127 skb_checksum(skb, 0, skb->len, 0)))
1129 skb->ip_summed = CHECKSUM_UNNECESSARY;
1131 skb->csum = ~tcp_v4_check(skb->h.th, skb->len,
1133 skb->nh.iph->daddr, 0);
1139 /* The socket must have it's spinlock held when we get
1142 * We have a potential double-lock case here, so even when
1143 * doing backlog processing we use the BH locking scheme.
1144 * This is because we cannot sleep with the original spinlock
1147 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1149 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1150 TCP_CHECK_TIMER(sk);
1151 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1153 TCP_CHECK_TIMER(sk);
1157 if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
1160 if (sk->sk_state == TCP_LISTEN) {
1161 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1166 if (tcp_child_process(sk, nsk, skb))
1172 TCP_CHECK_TIMER(sk);
1173 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1175 TCP_CHECK_TIMER(sk);
1179 tcp_v4_send_reset(skb);
1182 /* Be careful here. If this function gets more complicated and
1183 * gcc suffers from register pressure on the x86, sk (in %ebx)
1184 * might be destroyed here. This current version compiles correctly,
1185 * but you have been warned.
1190 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1198 int tcp_v4_rcv(struct sk_buff *skb)
1204 if (skb->pkt_type != PACKET_HOST)
1207 /* Count it even if it's bad */
1208 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1210 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1215 if (th->doff < sizeof(struct tcphdr) / 4)
1217 if (!pskb_may_pull(skb, th->doff * 4))
1220 /* An explanation is required here, I think.
1221 * Packet length and doff are validated by header prediction,
1222 * provided case of th->doff==0 is elimineted.
1223 * So, we defer the checks. */
1224 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1225 tcp_v4_checksum_init(skb) < 0))
1229 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1230 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1231 skb->len - th->doff * 4);
1232 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1233 TCP_SKB_CB(skb)->when = 0;
1234 TCP_SKB_CB(skb)->flags = skb->nh.iph->tos;
1235 TCP_SKB_CB(skb)->sacked = 0;
1237 sk = __inet_lookup(&tcp_hashinfo, skb->nh.iph->saddr, th->source,
1238 skb->nh.iph->daddr, ntohs(th->dest),
1245 if (sk->sk_state == TCP_TIME_WAIT)
1248 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1249 goto discard_and_relse;
1251 if (sk_filter(sk, skb, 0))
1252 goto discard_and_relse;
1258 if (!sock_owned_by_user(sk)) {
1259 if (!tcp_prequeue(sk, skb))
1260 ret = tcp_v4_do_rcv(sk, skb);
1262 sk_add_backlog(sk, skb);
1270 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1273 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1275 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1277 tcp_v4_send_reset(skb);
1281 /* Discard frame. */
1290 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1291 inet_twsk_put((struct inet_timewait_sock *) sk);
1295 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1296 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1297 inet_twsk_put((struct inet_timewait_sock *) sk);
1300 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1303 struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo,
1308 inet_twsk_deschedule((struct inet_timewait_sock *)sk,
1310 inet_twsk_put((struct inet_timewait_sock *)sk);
1314 /* Fall through to ACK */
1317 tcp_v4_timewait_ack(sk, skb);
1321 case TCP_TW_SUCCESS:;
1326 static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
1328 struct sockaddr_in *sin = (struct sockaddr_in *) uaddr;
1329 struct inet_sock *inet = inet_sk(sk);
1331 sin->sin_family = AF_INET;
1332 sin->sin_addr.s_addr = inet->daddr;
1333 sin->sin_port = inet->dport;
1336 /* VJ's idea. Save last timestamp seen from this destination
1337 * and hold it at least for normal timewait interval to use for duplicate
1338 * segment detection in subsequent connections, before they enter synchronized
1342 int tcp_v4_remember_stamp(struct sock *sk)
1344 struct inet_sock *inet = inet_sk(sk);
1345 struct tcp_sock *tp = tcp_sk(sk);
1346 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1347 struct inet_peer *peer = NULL;
1350 if (!rt || rt->rt_dst != inet->daddr) {
1351 peer = inet_getpeer(inet->daddr, 1);
1355 rt_bind_peer(rt, 1);
1360 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1361 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
1362 peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
1363 peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
1364 peer->tcp_ts = tp->rx_opt.ts_recent;
1374 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1376 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1379 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1381 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1382 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
1383 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
1384 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
1385 peer->tcp_ts = tcptw->tw_ts_recent;
1394 struct tcp_func ipv4_specific = {
1395 .queue_xmit = ip_queue_xmit,
1396 .send_check = tcp_v4_send_check,
1397 .rebuild_header = inet_sk_rebuild_header,
1398 .conn_request = tcp_v4_conn_request,
1399 .syn_recv_sock = tcp_v4_syn_recv_sock,
1400 .remember_stamp = tcp_v4_remember_stamp,
1401 .net_header_len = sizeof(struct iphdr),
1402 .setsockopt = ip_setsockopt,
1403 .getsockopt = ip_getsockopt,
1404 .addr2sockaddr = v4_addr2sockaddr,
1405 .sockaddr_len = sizeof(struct sockaddr_in),
1408 /* NOTE: A lot of things set to zero explicitly by call to
1409 * sk_alloc() so need not be done here.
1411 static int tcp_v4_init_sock(struct sock *sk)
1413 struct inet_connection_sock *icsk = inet_csk(sk);
1414 struct tcp_sock *tp = tcp_sk(sk);
1416 skb_queue_head_init(&tp->out_of_order_queue);
1417 tcp_init_xmit_timers(sk);
1418 tcp_prequeue_init(tp);
1420 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1421 tp->mdev = TCP_TIMEOUT_INIT;
1423 /* So many TCP implementations out there (incorrectly) count the
1424 * initial SYN frame in their delayed-ACK and congestion control
1425 * algorithms that we must have the following bandaid to talk
1426 * efficiently to them. -DaveM
1430 /* See draft-stevens-tcpca-spec-01 for discussion of the
1431 * initialization of these values.
1433 tp->snd_ssthresh = 0x7fffffff; /* Infinity */
1434 tp->snd_cwnd_clamp = ~0;
1435 tp->mss_cache = 536;
1437 tp->reordering = sysctl_tcp_reordering;
1438 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1440 sk->sk_state = TCP_CLOSE;
1442 sk->sk_write_space = sk_stream_write_space;
1443 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1445 tp->af_specific = &ipv4_specific;
1447 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1448 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1450 atomic_inc(&tcp_sockets_allocated);
1455 int tcp_v4_destroy_sock(struct sock *sk)
1457 struct tcp_sock *tp = tcp_sk(sk);
1459 tcp_clear_xmit_timers(sk);
1461 tcp_cleanup_congestion_control(sk);
1463 /* Cleanup up the write buffer. */
1464 sk_stream_writequeue_purge(sk);
1466 /* Cleans up our, hopefully empty, out_of_order_queue. */
1467 __skb_queue_purge(&tp->out_of_order_queue);
1469 /* Clean prequeue, it must be empty really */
1470 __skb_queue_purge(&tp->ucopy.prequeue);
1472 /* Clean up a referenced TCP bind bucket. */
1473 if (inet_csk(sk)->icsk_bind_hash)
1474 inet_put_port(&tcp_hashinfo, sk);
1477 * If sendmsg cached page exists, toss it.
1479 if (sk->sk_sndmsg_page) {
1480 __free_page(sk->sk_sndmsg_page);
1481 sk->sk_sndmsg_page = NULL;
1484 atomic_dec(&tcp_sockets_allocated);
1489 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1491 #ifdef CONFIG_PROC_FS
1492 /* Proc filesystem TCP sock list dumping. */
1494 static inline struct inet_timewait_sock *tw_head(struct hlist_head *head)
1496 return hlist_empty(head) ? NULL :
1497 list_entry(head->first, struct inet_timewait_sock, tw_node);
1500 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1502 return tw->tw_node.next ?
1503 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1506 static void *listening_get_next(struct seq_file *seq, void *cur)
1508 struct inet_connection_sock *icsk;
1509 struct hlist_node *node;
1510 struct sock *sk = cur;
1511 struct tcp_iter_state* st = seq->private;
1515 sk = sk_head(&tcp_hashinfo.listening_hash[0]);
1521 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1522 struct request_sock *req = cur;
1524 icsk = inet_csk(st->syn_wait_sk);
1528 if (req->rsk_ops->family == st->family) {
1534 if (++st->sbucket >= TCP_SYNQ_HSIZE)
1537 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1539 sk = sk_next(st->syn_wait_sk);
1540 st->state = TCP_SEQ_STATE_LISTENING;
1541 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1543 icsk = inet_csk(sk);
1544 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1545 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1547 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1551 sk_for_each_from(sk, node) {
1552 if (sk->sk_family == st->family) {
1556 icsk = inet_csk(sk);
1557 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1558 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1560 st->uid = sock_i_uid(sk);
1561 st->syn_wait_sk = sk;
1562 st->state = TCP_SEQ_STATE_OPENREQ;
1566 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1568 if (++st->bucket < INET_LHTABLE_SIZE) {
1569 sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
1577 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1579 void *rc = listening_get_next(seq, NULL);
1581 while (rc && *pos) {
1582 rc = listening_get_next(seq, rc);
1588 static void *established_get_first(struct seq_file *seq)
1590 struct tcp_iter_state* st = seq->private;
1593 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
1595 struct hlist_node *node;
1596 struct inet_timewait_sock *tw;
1598 /* We can reschedule _before_ having picked the target: */
1599 cond_resched_softirq();
1601 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
1602 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1603 if (sk->sk_family != st->family) {
1609 st->state = TCP_SEQ_STATE_TIME_WAIT;
1610 inet_twsk_for_each(tw, node,
1611 &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) {
1612 if (tw->tw_family != st->family) {
1618 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1619 st->state = TCP_SEQ_STATE_ESTABLISHED;
1625 static void *established_get_next(struct seq_file *seq, void *cur)
1627 struct sock *sk = cur;
1628 struct inet_timewait_sock *tw;
1629 struct hlist_node *node;
1630 struct tcp_iter_state* st = seq->private;
1634 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
1638 while (tw && tw->tw_family != st->family) {
1645 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1646 st->state = TCP_SEQ_STATE_ESTABLISHED;
1648 /* We can reschedule between buckets: */
1649 cond_resched_softirq();
1651 if (++st->bucket < tcp_hashinfo.ehash_size) {
1652 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
1653 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
1661 sk_for_each_from(sk, node) {
1662 if (sk->sk_family == st->family)
1666 st->state = TCP_SEQ_STATE_TIME_WAIT;
1667 tw = tw_head(&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain);
1675 static void *established_get_idx(struct seq_file *seq, loff_t pos)
1677 void *rc = established_get_first(seq);
1680 rc = established_get_next(seq, rc);
1686 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1689 struct tcp_iter_state* st = seq->private;
1691 inet_listen_lock(&tcp_hashinfo);
1692 st->state = TCP_SEQ_STATE_LISTENING;
1693 rc = listening_get_idx(seq, &pos);
1696 inet_listen_unlock(&tcp_hashinfo);
1698 st->state = TCP_SEQ_STATE_ESTABLISHED;
1699 rc = established_get_idx(seq, pos);
1705 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
1707 struct tcp_iter_state* st = seq->private;
1708 st->state = TCP_SEQ_STATE_LISTENING;
1710 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1713 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1716 struct tcp_iter_state* st;
1718 if (v == SEQ_START_TOKEN) {
1719 rc = tcp_get_idx(seq, 0);
1724 switch (st->state) {
1725 case TCP_SEQ_STATE_OPENREQ:
1726 case TCP_SEQ_STATE_LISTENING:
1727 rc = listening_get_next(seq, v);
1729 inet_listen_unlock(&tcp_hashinfo);
1731 st->state = TCP_SEQ_STATE_ESTABLISHED;
1732 rc = established_get_first(seq);
1735 case TCP_SEQ_STATE_ESTABLISHED:
1736 case TCP_SEQ_STATE_TIME_WAIT:
1737 rc = established_get_next(seq, v);
1745 static void tcp_seq_stop(struct seq_file *seq, void *v)
1747 struct tcp_iter_state* st = seq->private;
1749 switch (st->state) {
1750 case TCP_SEQ_STATE_OPENREQ:
1752 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
1753 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1755 case TCP_SEQ_STATE_LISTENING:
1756 if (v != SEQ_START_TOKEN)
1757 inet_listen_unlock(&tcp_hashinfo);
1759 case TCP_SEQ_STATE_TIME_WAIT:
1760 case TCP_SEQ_STATE_ESTABLISHED:
1762 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1768 static int tcp_seq_open(struct inode *inode, struct file *file)
1770 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
1771 struct seq_file *seq;
1772 struct tcp_iter_state *s;
1775 if (unlikely(afinfo == NULL))
1778 s = kmalloc(sizeof(*s), GFP_KERNEL);
1781 memset(s, 0, sizeof(*s));
1782 s->family = afinfo->family;
1783 s->seq_ops.start = tcp_seq_start;
1784 s->seq_ops.next = tcp_seq_next;
1785 s->seq_ops.show = afinfo->seq_show;
1786 s->seq_ops.stop = tcp_seq_stop;
1788 rc = seq_open(file, &s->seq_ops);
1791 seq = file->private_data;
1800 int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
1803 struct proc_dir_entry *p;
1807 afinfo->seq_fops->owner = afinfo->owner;
1808 afinfo->seq_fops->open = tcp_seq_open;
1809 afinfo->seq_fops->read = seq_read;
1810 afinfo->seq_fops->llseek = seq_lseek;
1811 afinfo->seq_fops->release = seq_release_private;
1813 p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
1821 void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
1825 proc_net_remove(afinfo->name);
1826 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
1829 static void get_openreq4(struct sock *sk, struct request_sock *req,
1830 char *tmpbuf, int i, int uid)
1832 const struct inet_request_sock *ireq = inet_rsk(req);
1833 int ttd = req->expires - jiffies;
1835 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1836 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
1839 ntohs(inet_sk(sk)->sport),
1841 ntohs(ireq->rmt_port),
1843 0, 0, /* could print option size, but that is af dependent. */
1844 1, /* timers active (only the expire timer) */
1845 jiffies_to_clock_t(ttd),
1848 0, /* non standard timer */
1849 0, /* open_requests have no inode */
1850 atomic_read(&sk->sk_refcnt),
1854 static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
1857 unsigned long timer_expires;
1858 struct tcp_sock *tp = tcp_sk(sp);
1859 const struct inet_connection_sock *icsk = inet_csk(sp);
1860 struct inet_sock *inet = inet_sk(sp);
1861 unsigned int dest = inet->daddr;
1862 unsigned int src = inet->rcv_saddr;
1863 __u16 destp = ntohs(inet->dport);
1864 __u16 srcp = ntohs(inet->sport);
1866 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1868 timer_expires = icsk->icsk_timeout;
1869 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1871 timer_expires = icsk->icsk_timeout;
1872 } else if (timer_pending(&sp->sk_timer)) {
1874 timer_expires = sp->sk_timer.expires;
1877 timer_expires = jiffies;
1880 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
1881 "%08X %5d %8d %lu %d %p %u %u %u %u %d",
1882 i, src, srcp, dest, destp, sp->sk_state,
1883 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
1885 jiffies_to_clock_t(timer_expires - jiffies),
1886 icsk->icsk_retransmits,
1888 icsk->icsk_probes_out,
1890 atomic_read(&sp->sk_refcnt), sp,
1893 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1895 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
1898 static void get_timewait4_sock(struct inet_timewait_sock *tw, char *tmpbuf, int i)
1900 unsigned int dest, src;
1902 int ttd = tw->tw_ttd - jiffies;
1907 dest = tw->tw_daddr;
1908 src = tw->tw_rcv_saddr;
1909 destp = ntohs(tw->tw_dport);
1910 srcp = ntohs(tw->tw_sport);
1912 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1913 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p",
1914 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
1915 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1916 atomic_read(&tw->tw_refcnt), tw);
1921 static int tcp4_seq_show(struct seq_file *seq, void *v)
1923 struct tcp_iter_state* st;
1924 char tmpbuf[TMPSZ + 1];
1926 if (v == SEQ_START_TOKEN) {
1927 seq_printf(seq, "%-*s\n", TMPSZ - 1,
1928 " sl local_address rem_address st tx_queue "
1929 "rx_queue tr tm->when retrnsmt uid timeout "
1935 switch (st->state) {
1936 case TCP_SEQ_STATE_LISTENING:
1937 case TCP_SEQ_STATE_ESTABLISHED:
1938 get_tcp4_sock(v, tmpbuf, st->num);
1940 case TCP_SEQ_STATE_OPENREQ:
1941 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid);
1943 case TCP_SEQ_STATE_TIME_WAIT:
1944 get_timewait4_sock(v, tmpbuf, st->num);
1947 seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
1952 static struct file_operations tcp4_seq_fops;
1953 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1954 .owner = THIS_MODULE,
1957 .seq_show = tcp4_seq_show,
1958 .seq_fops = &tcp4_seq_fops,
1961 int __init tcp4_proc_init(void)
1963 return tcp_proc_register(&tcp4_seq_afinfo);
1966 void tcp4_proc_exit(void)
1968 tcp_proc_unregister(&tcp4_seq_afinfo);
1970 #endif /* CONFIG_PROC_FS */
1972 struct proto tcp_prot = {
1974 .owner = THIS_MODULE,
1976 .connect = tcp_v4_connect,
1977 .disconnect = tcp_disconnect,
1978 .accept = inet_csk_accept,
1980 .init = tcp_v4_init_sock,
1981 .destroy = tcp_v4_destroy_sock,
1982 .shutdown = tcp_shutdown,
1983 .setsockopt = tcp_setsockopt,
1984 .getsockopt = tcp_getsockopt,
1985 .sendmsg = tcp_sendmsg,
1986 .recvmsg = tcp_recvmsg,
1987 .backlog_rcv = tcp_v4_do_rcv,
1988 .hash = tcp_v4_hash,
1989 .unhash = tcp_unhash,
1990 .get_port = tcp_v4_get_port,
1991 .enter_memory_pressure = tcp_enter_memory_pressure,
1992 .sockets_allocated = &tcp_sockets_allocated,
1993 .orphan_count = &tcp_orphan_count,
1994 .memory_allocated = &tcp_memory_allocated,
1995 .memory_pressure = &tcp_memory_pressure,
1996 .sysctl_mem = sysctl_tcp_mem,
1997 .sysctl_wmem = sysctl_tcp_wmem,
1998 .sysctl_rmem = sysctl_tcp_rmem,
1999 .max_header = MAX_TCP_HEADER,
2000 .obj_size = sizeof(struct tcp_sock),
2001 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2002 .rsk_prot = &tcp_request_sock_ops,
2007 void __init tcp_v4_init(struct net_proto_family *ops)
2009 int err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
2011 panic("Failed to create the TCP control socket.\n");
2012 tcp_socket->sk->sk_allocation = GFP_ATOMIC;
2013 inet_sk(tcp_socket->sk)->uc_ttl = -1;
2015 /* Unhash it so that IP input processing does not even
2016 * see it, we do not wish this socket to see incoming
2019 tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
2022 EXPORT_SYMBOL(ipv4_specific);
2023 EXPORT_SYMBOL(inet_bind_bucket_create);
2024 EXPORT_SYMBOL(tcp_hashinfo);
2025 EXPORT_SYMBOL(tcp_prot);
2026 EXPORT_SYMBOL(tcp_unhash);
2027 EXPORT_SYMBOL(tcp_v4_conn_request);
2028 EXPORT_SYMBOL(tcp_v4_connect);
2029 EXPORT_SYMBOL(tcp_v4_do_rcv);
2030 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2031 EXPORT_SYMBOL(tcp_v4_send_check);
2032 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2034 #ifdef CONFIG_PROC_FS
2035 EXPORT_SYMBOL(tcp_proc_register);
2036 EXPORT_SYMBOL(tcp_proc_unregister);
2038 EXPORT_SYMBOL(sysctl_local_port_range);
2039 EXPORT_SYMBOL(sysctl_tcp_low_latency);
2040 EXPORT_SYMBOL(sysctl_tcp_tw_reuse);