2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
54 #include <linux/bottom_half.h>
55 #include <linux/types.h>
56 #include <linux/fcntl.h>
57 #include <linux/module.h>
58 #include <linux/random.h>
59 #include <linux/cache.h>
60 #include <linux/jhash.h>
61 #include <linux/init.h>
62 #include <linux/times.h>
64 #include <net/net_namespace.h>
66 #include <net/inet_hashtables.h>
68 #include <net/transp_v6.h>
70 #include <net/inet_common.h>
71 #include <net/timewait_sock.h>
73 #include <net/netdma.h>
75 #include <linux/inet.h>
76 #include <linux/ipv6.h>
77 #include <linux/stddef.h>
78 #include <linux/proc_fs.h>
79 #include <linux/seq_file.h>
81 #include <linux/crypto.h>
82 #include <linux/scatterlist.h>
84 int sysctl_tcp_tw_reuse __read_mostly;
85 int sysctl_tcp_low_latency __read_mostly;
88 #ifdef CONFIG_TCP_MD5SIG
89 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
91 static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
92 __be32 daddr, __be32 saddr, struct tcphdr *th);
95 struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
101 struct inet_hashinfo tcp_hashinfo;
103 static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
105 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
108 tcp_hdr(skb)->source);
111 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
113 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
114 struct tcp_sock *tp = tcp_sk(sk);
116 /* With PAWS, it is safe from the viewpoint
117 of data integrity. Even without PAWS it is safe provided sequence
118 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
120 Actually, the idea is close to VJ's one, only timestamp cache is
121 held not per host, but per port pair and TW bucket is used as state
124 If TW bucket has been already destroyed we fall back to VJ's scheme
125 and use initial timestamp retrieved from peer table.
127 if (tcptw->tw_ts_recent_stamp &&
128 (twp == NULL || (sysctl_tcp_tw_reuse &&
129 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
130 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
131 if (tp->write_seq == 0)
133 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
134 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
142 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
144 /* This will initiate an outgoing connection. */
145 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
147 struct inet_sock *inet = inet_sk(sk);
148 struct tcp_sock *tp = tcp_sk(sk);
149 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
151 __be32 daddr, nexthop;
155 if (addr_len < sizeof(struct sockaddr_in))
158 if (usin->sin_family != AF_INET)
159 return -EAFNOSUPPORT;
161 nexthop = daddr = usin->sin_addr.s_addr;
162 if (inet->opt && inet->opt->srr) {
165 nexthop = inet->opt->faddr;
168 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
169 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
171 inet->sport, usin->sin_port, sk, 1);
173 if (tmp == -ENETUNREACH)
174 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
178 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183 if (!inet->opt || !inet->opt->srr)
187 inet->saddr = rt->rt_src;
188 inet->rcv_saddr = inet->saddr;
190 if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) {
191 /* Reset inherited state */
192 tp->rx_opt.ts_recent = 0;
193 tp->rx_opt.ts_recent_stamp = 0;
197 if (tcp_death_row.sysctl_tw_recycle &&
198 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
199 struct inet_peer *peer = rt_get_peer(rt);
201 * VJ's idea. We save last timestamp seen from
202 * the destination in peer table, when entering state
203 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
204 * when trying new connection.
207 peer->tcp_ts_stamp + TCP_PAWS_MSL >= get_seconds()) {
208 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
209 tp->rx_opt.ts_recent = peer->tcp_ts;
213 inet->dport = usin->sin_port;
216 inet_csk(sk)->icsk_ext_hdr_len = 0;
218 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
220 tp->rx_opt.mss_clamp = 536;
222 /* Socket identity is still unknown (sport may be zero).
223 * However we set state to SYN-SENT and not releasing socket
224 * lock select source port, enter ourselves into the hash tables and
225 * complete initialization after this.
227 tcp_set_state(sk, TCP_SYN_SENT);
228 err = inet_hash_connect(&tcp_death_row, sk);
232 err = ip_route_newports(&rt, IPPROTO_TCP,
233 inet->sport, inet->dport, sk);
237 /* OK, now commit destination to socket. */
238 sk->sk_gso_type = SKB_GSO_TCPV4;
239 sk_setup_caps(sk, &rt->u.dst);
242 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
247 inet->id = tp->write_seq ^ jiffies;
249 err = tcp_connect(sk);
258 * This unhashes the socket and releases the local port,
261 tcp_set_state(sk, TCP_CLOSE);
263 sk->sk_route_caps = 0;
269 * This routine does path mtu discovery as defined in RFC1191.
271 static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
273 struct dst_entry *dst;
274 struct inet_sock *inet = inet_sk(sk);
276 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
277 * send out by Linux are always <576bytes so they should go through
280 if (sk->sk_state == TCP_LISTEN)
283 /* We don't check in the destentry if pmtu discovery is forbidden
284 * on this route. We just assume that no packet_to_big packets
285 * are send back when pmtu discovery is not active.
286 * There is a small race when the user changes this flag in the
287 * route, but I think that's acceptable.
289 if ((dst = __sk_dst_check(sk, 0)) == NULL)
292 dst->ops->update_pmtu(dst, mtu);
294 /* Something is about to be wrong... Remember soft error
295 * for the case, if this connection will not able to recover.
297 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
298 sk->sk_err_soft = EMSGSIZE;
302 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
303 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
304 tcp_sync_mss(sk, mtu);
306 /* Resend the TCP packet because it's
307 * clear that the old packet has been
308 * dropped. This is the new "fast" path mtu
311 tcp_simple_retransmit(sk);
312 } /* else let the usual retransmit timer handle it */
316 * This routine is called by the ICMP module when it gets some
317 * sort of error condition. If err < 0 then the socket should
318 * be closed and the error returned to the user. If err > 0
319 * it's just the icmp type << 8 | icmp code. After adjustment
320 * header points to the first 8 bytes of the tcp header. We need
321 * to find the appropriate port.
323 * The locking strategy used here is very "optimistic". When
324 * someone else accesses the socket the ICMP is just dropped
325 * and for some paths there is no check at all.
326 * A more general error queue to queue errors for later handling
327 * is probably better.
331 void tcp_v4_err(struct sk_buff *skb, u32 info)
333 struct iphdr *iph = (struct iphdr *)skb->data;
334 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
336 struct inet_sock *inet;
337 const int type = icmp_hdr(skb)->type;
338 const int code = icmp_hdr(skb)->code;
342 struct net *net = dev_net(skb->dev);
344 if (skb->len < (iph->ihl << 2) + 8) {
345 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
349 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
350 iph->saddr, th->source, inet_iif(skb));
352 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
355 if (sk->sk_state == TCP_TIME_WAIT) {
356 inet_twsk_put(inet_twsk(sk));
361 /* If too many ICMPs get dropped on busy
362 * servers this needs to be solved differently.
364 if (sock_owned_by_user(sk))
365 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
367 if (sk->sk_state == TCP_CLOSE)
371 seq = ntohl(th->seq);
372 if (sk->sk_state != TCP_LISTEN &&
373 !between(seq, tp->snd_una, tp->snd_nxt)) {
374 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
379 case ICMP_SOURCE_QUENCH:
380 /* Just silently ignore these. */
382 case ICMP_PARAMETERPROB:
385 case ICMP_DEST_UNREACH:
386 if (code > NR_ICMP_UNREACH)
389 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
390 if (!sock_owned_by_user(sk))
391 do_pmtu_discovery(sk, iph, info);
395 err = icmp_err_convert[code].errno;
397 case ICMP_TIME_EXCEEDED:
404 switch (sk->sk_state) {
405 struct request_sock *req, **prev;
407 if (sock_owned_by_user(sk))
410 req = inet_csk_search_req(sk, &prev, th->dest,
411 iph->daddr, iph->saddr);
415 /* ICMPs are not backlogged, hence we cannot get
416 an established socket here.
420 if (seq != tcp_rsk(req)->snt_isn) {
421 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
426 * Still in SYN_RECV, just remove it silently.
427 * There is no good way to pass the error to the newly
428 * created socket, and POSIX does not want network
429 * errors returned from accept().
431 inet_csk_reqsk_queue_drop(sk, req, prev);
435 case TCP_SYN_RECV: /* Cannot happen.
436 It can f.e. if SYNs crossed.
438 if (!sock_owned_by_user(sk)) {
441 sk->sk_error_report(sk);
445 sk->sk_err_soft = err;
450 /* If we've already connected we will keep trying
451 * until we time out, or the user gives up.
453 * rfc1122 4.2.3.9 allows to consider as hard errors
454 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
455 * but it is obsoleted by pmtu discovery).
457 * Note, that in modern internet, where routing is unreliable
458 * and in each dark corner broken firewalls sit, sending random
459 * errors ordered by their masters even this two messages finally lose
460 * their original sense (even Linux sends invalid PORT_UNREACHs)
462 * Now we are in compliance with RFCs.
467 if (!sock_owned_by_user(sk) && inet->recverr) {
469 sk->sk_error_report(sk);
470 } else { /* Only an error on timeout */
471 sk->sk_err_soft = err;
479 /* This routine computes an IPv4 TCP checksum. */
480 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
482 struct inet_sock *inet = inet_sk(sk);
483 struct tcphdr *th = tcp_hdr(skb);
485 if (skb->ip_summed == CHECKSUM_PARTIAL) {
486 th->check = ~tcp_v4_check(len, inet->saddr,
488 skb->csum_start = skb_transport_header(skb) - skb->head;
489 skb->csum_offset = offsetof(struct tcphdr, check);
491 th->check = tcp_v4_check(len, inet->saddr, inet->daddr,
498 int tcp_v4_gso_send_check(struct sk_buff *skb)
500 const struct iphdr *iph;
503 if (!pskb_may_pull(skb, sizeof(*th)))
510 th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
511 skb->csum_start = skb_transport_header(skb) - skb->head;
512 skb->csum_offset = offsetof(struct tcphdr, check);
513 skb->ip_summed = CHECKSUM_PARTIAL;
518 * This routine will send an RST to the other tcp.
520 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
522 * Answer: if a packet caused RST, it is not for a socket
523 * existing in our system, if it is matched to a socket,
524 * it is just duplicate segment or bug in other side's TCP.
525 * So that we build reply only basing on parameters
526 * arrived with segment.
527 * Exception: precedence violation. We do not implement it in any case.
530 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
532 struct tcphdr *th = tcp_hdr(skb);
535 #ifdef CONFIG_TCP_MD5SIG
536 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
539 struct ip_reply_arg arg;
540 #ifdef CONFIG_TCP_MD5SIG
541 struct tcp_md5sig_key *key;
545 /* Never send a reset in response to a reset. */
549 if (skb->rtable->rt_type != RTN_LOCAL)
552 /* Swap the send and the receive. */
553 memset(&rep, 0, sizeof(rep));
554 rep.th.dest = th->source;
555 rep.th.source = th->dest;
556 rep.th.doff = sizeof(struct tcphdr) / 4;
560 rep.th.seq = th->ack_seq;
563 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
564 skb->len - (th->doff << 2));
567 memset(&arg, 0, sizeof(arg));
568 arg.iov[0].iov_base = (unsigned char *)&rep;
569 arg.iov[0].iov_len = sizeof(rep.th);
571 #ifdef CONFIG_TCP_MD5SIG
572 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
574 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
576 (TCPOPT_MD5SIG << 8) |
578 /* Update length and the length the header thinks exists */
579 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
580 rep.th.doff = arg.iov[0].iov_len / 4;
582 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
583 key, ip_hdr(skb)->saddr,
584 ip_hdr(skb)->daddr, &rep.th);
587 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
588 ip_hdr(skb)->saddr, /* XXX */
589 arg.iov[0].iov_len, IPPROTO_TCP, 0);
590 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
591 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
593 net = dev_net(skb->dst->dev);
594 ip_send_reply(net->ipv4.tcp_sock, skb,
595 &arg, arg.iov[0].iov_len);
597 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
598 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
601 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
602 outside socket context is ugly, certainly. What can I do?
605 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
606 u32 win, u32 ts, int oif,
607 struct tcp_md5sig_key *key,
610 struct tcphdr *th = tcp_hdr(skb);
613 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
614 #ifdef CONFIG_TCP_MD5SIG
615 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
619 struct ip_reply_arg arg;
620 struct net *net = dev_net(skb->dst->dev);
622 memset(&rep.th, 0, sizeof(struct tcphdr));
623 memset(&arg, 0, sizeof(arg));
625 arg.iov[0].iov_base = (unsigned char *)&rep;
626 arg.iov[0].iov_len = sizeof(rep.th);
628 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
629 (TCPOPT_TIMESTAMP << 8) |
631 rep.opt[1] = htonl(tcp_time_stamp);
632 rep.opt[2] = htonl(ts);
633 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
636 /* Swap the send and the receive. */
637 rep.th.dest = th->source;
638 rep.th.source = th->dest;
639 rep.th.doff = arg.iov[0].iov_len / 4;
640 rep.th.seq = htonl(seq);
641 rep.th.ack_seq = htonl(ack);
643 rep.th.window = htons(win);
645 #ifdef CONFIG_TCP_MD5SIG
647 int offset = (ts) ? 3 : 0;
649 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
651 (TCPOPT_MD5SIG << 8) |
653 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
654 rep.th.doff = arg.iov[0].iov_len/4;
656 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
657 key, ip_hdr(skb)->saddr,
658 ip_hdr(skb)->daddr, &rep.th);
661 arg.flags = reply_flags;
662 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
663 ip_hdr(skb)->saddr, /* XXX */
664 arg.iov[0].iov_len, IPPROTO_TCP, 0);
665 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
667 arg.bound_dev_if = oif;
669 ip_send_reply(net->ipv4.tcp_sock, skb,
670 &arg, arg.iov[0].iov_len);
672 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
675 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
677 struct inet_timewait_sock *tw = inet_twsk(sk);
678 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
680 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
681 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
684 tcp_twsk_md5_key(tcptw),
685 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0
691 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
692 struct request_sock *req)
694 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
695 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
698 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
699 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0);
703 * Send a SYN-ACK after having received a SYN.
704 * This still operates on a request_sock only, not on a big
707 static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
708 struct dst_entry *dst)
710 const struct inet_request_sock *ireq = inet_rsk(req);
712 struct sk_buff * skb;
714 /* First, grab a route. */
715 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
718 skb = tcp_make_synack(sk, dst, req);
721 struct tcphdr *th = tcp_hdr(skb);
723 th->check = tcp_v4_check(skb->len,
726 csum_partial(th, skb->len,
729 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
732 err = net_xmit_eval(err);
739 static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req)
741 return __tcp_v4_send_synack(sk, req, NULL);
745 * IPv4 request_sock destructor.
747 static void tcp_v4_reqsk_destructor(struct request_sock *req)
749 kfree(inet_rsk(req)->opt);
752 #ifdef CONFIG_SYN_COOKIES
753 static void syn_flood_warning(struct sk_buff *skb)
755 static unsigned long warntime;
757 if (time_after(jiffies, (warntime + HZ * 60))) {
760 "possible SYN flooding on port %d. Sending cookies.\n",
761 ntohs(tcp_hdr(skb)->dest));
767 * Save and compile IPv4 options into the request_sock if needed.
769 static struct ip_options *tcp_v4_save_options(struct sock *sk,
772 struct ip_options *opt = &(IPCB(skb)->opt);
773 struct ip_options *dopt = NULL;
775 if (opt && opt->optlen) {
776 int opt_size = optlength(opt);
777 dopt = kmalloc(opt_size, GFP_ATOMIC);
779 if (ip_options_echo(dopt, skb)) {
788 #ifdef CONFIG_TCP_MD5SIG
790 * RFC2385 MD5 checksumming requires a mapping of
791 * IP address->MD5 Key.
792 * We need to maintain these in the sk structure.
795 /* Find the Key structure for an address. */
796 static struct tcp_md5sig_key *
797 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
799 struct tcp_sock *tp = tcp_sk(sk);
802 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
804 for (i = 0; i < tp->md5sig_info->entries4; i++) {
805 if (tp->md5sig_info->keys4[i].addr == addr)
806 return &tp->md5sig_info->keys4[i].base;
811 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
812 struct sock *addr_sk)
814 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->daddr);
817 EXPORT_SYMBOL(tcp_v4_md5_lookup);
819 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
820 struct request_sock *req)
822 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
825 /* This can be called on a newly created socket, from other files */
826 int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
827 u8 *newkey, u8 newkeylen)
829 /* Add Key to the list */
830 struct tcp_md5sig_key *key;
831 struct tcp_sock *tp = tcp_sk(sk);
832 struct tcp4_md5sig_key *keys;
834 key = tcp_v4_md5_do_lookup(sk, addr);
836 /* Pre-existing entry - just update that one. */
839 key->keylen = newkeylen;
841 struct tcp_md5sig_info *md5sig;
843 if (!tp->md5sig_info) {
844 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
846 if (!tp->md5sig_info) {
850 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
852 if (tcp_alloc_md5sig_pool() == NULL) {
856 md5sig = tp->md5sig_info;
858 if (md5sig->alloced4 == md5sig->entries4) {
859 keys = kmalloc((sizeof(*keys) *
860 (md5sig->entries4 + 1)), GFP_ATOMIC);
863 tcp_free_md5sig_pool();
867 if (md5sig->entries4)
868 memcpy(keys, md5sig->keys4,
869 sizeof(*keys) * md5sig->entries4);
871 /* Free old key list, and reference new one */
872 kfree(md5sig->keys4);
873 md5sig->keys4 = keys;
877 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
878 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
879 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
884 EXPORT_SYMBOL(tcp_v4_md5_do_add);
886 static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
887 u8 *newkey, u8 newkeylen)
889 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->daddr,
893 int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
895 struct tcp_sock *tp = tcp_sk(sk);
898 for (i = 0; i < tp->md5sig_info->entries4; i++) {
899 if (tp->md5sig_info->keys4[i].addr == addr) {
901 kfree(tp->md5sig_info->keys4[i].base.key);
902 tp->md5sig_info->entries4--;
904 if (tp->md5sig_info->entries4 == 0) {
905 kfree(tp->md5sig_info->keys4);
906 tp->md5sig_info->keys4 = NULL;
907 tp->md5sig_info->alloced4 = 0;
908 } else if (tp->md5sig_info->entries4 != i) {
909 /* Need to do some manipulation */
910 memmove(&tp->md5sig_info->keys4[i],
911 &tp->md5sig_info->keys4[i+1],
912 (tp->md5sig_info->entries4 - i) *
913 sizeof(struct tcp4_md5sig_key));
915 tcp_free_md5sig_pool();
922 EXPORT_SYMBOL(tcp_v4_md5_do_del);
924 static void tcp_v4_clear_md5_list(struct sock *sk)
926 struct tcp_sock *tp = tcp_sk(sk);
928 /* Free each key, then the set of key keys,
929 * the crypto element, and then decrement our
930 * hold on the last resort crypto.
932 if (tp->md5sig_info->entries4) {
934 for (i = 0; i < tp->md5sig_info->entries4; i++)
935 kfree(tp->md5sig_info->keys4[i].base.key);
936 tp->md5sig_info->entries4 = 0;
937 tcp_free_md5sig_pool();
939 if (tp->md5sig_info->keys4) {
940 kfree(tp->md5sig_info->keys4);
941 tp->md5sig_info->keys4 = NULL;
942 tp->md5sig_info->alloced4 = 0;
946 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
949 struct tcp_md5sig cmd;
950 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
953 if (optlen < sizeof(cmd))
956 if (copy_from_user(&cmd, optval, sizeof(cmd)))
959 if (sin->sin_family != AF_INET)
962 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
963 if (!tcp_sk(sk)->md5sig_info)
965 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
968 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
971 if (!tcp_sk(sk)->md5sig_info) {
972 struct tcp_sock *tp = tcp_sk(sk);
973 struct tcp_md5sig_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
979 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
982 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
985 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
986 newkey, cmd.tcpm_keylen);
989 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
990 __be32 daddr, __be32 saddr, int nbytes)
992 struct tcp4_pseudohdr *bp;
993 struct scatterlist sg;
995 bp = &hp->md5_blk.ip4;
998 * 1. the TCP pseudo-header (in the order: source IP address,
999 * destination IP address, zero-padded protocol number, and
1005 bp->protocol = IPPROTO_TCP;
1006 bp->len = cpu_to_be16(nbytes);
1008 sg_init_one(&sg, bp, sizeof(*bp));
1009 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1012 static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
1013 __be32 daddr, __be32 saddr, struct tcphdr *th)
1015 struct tcp_md5sig_pool *hp;
1016 struct hash_desc *desc;
1018 hp = tcp_get_md5sig_pool();
1020 goto clear_hash_noput;
1021 desc = &hp->md5_desc;
1023 if (crypto_hash_init(desc))
1025 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1027 if (tcp_md5_hash_header(hp, th))
1029 if (tcp_md5_hash_key(hp, key))
1031 if (crypto_hash_final(desc, md5_hash))
1034 tcp_put_md5sig_pool();
1038 tcp_put_md5sig_pool();
1040 memset(md5_hash, 0, 16);
1044 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1045 struct sock *sk, struct request_sock *req,
1046 struct sk_buff *skb)
1048 struct tcp_md5sig_pool *hp;
1049 struct hash_desc *desc;
1050 struct tcphdr *th = tcp_hdr(skb);
1051 __be32 saddr, daddr;
1054 saddr = inet_sk(sk)->saddr;
1055 daddr = inet_sk(sk)->daddr;
1057 saddr = inet_rsk(req)->loc_addr;
1058 daddr = inet_rsk(req)->rmt_addr;
1060 const struct iphdr *iph = ip_hdr(skb);
1065 hp = tcp_get_md5sig_pool();
1067 goto clear_hash_noput;
1068 desc = &hp->md5_desc;
1070 if (crypto_hash_init(desc))
1073 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1075 if (tcp_md5_hash_header(hp, th))
1077 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1079 if (tcp_md5_hash_key(hp, key))
1081 if (crypto_hash_final(desc, md5_hash))
1084 tcp_put_md5sig_pool();
1088 tcp_put_md5sig_pool();
1090 memset(md5_hash, 0, 16);
1094 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1096 static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1099 * This gets called for each TCP segment that arrives
1100 * so we want to be efficient.
1101 * We have 3 drop cases:
1102 * o No MD5 hash and one expected.
1103 * o MD5 hash and we're not expecting one.
1104 * o MD5 hash and its wrong.
1106 __u8 *hash_location = NULL;
1107 struct tcp_md5sig_key *hash_expected;
1108 const struct iphdr *iph = ip_hdr(skb);
1109 struct tcphdr *th = tcp_hdr(skb);
1111 unsigned char newhash[16];
1113 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
1114 hash_location = tcp_parse_md5sig_option(th);
1116 /* We've parsed the options - do we have a hash? */
1117 if (!hash_expected && !hash_location)
1120 if (hash_expected && !hash_location) {
1121 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1125 if (!hash_expected && hash_location) {
1126 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1130 /* Okay, so this is hash_expected and hash_location -
1131 * so we need to calculate the checksum.
1133 genhash = tcp_v4_md5_hash_skb(newhash,
1137 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1138 if (net_ratelimit()) {
1139 printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1140 &iph->saddr, ntohs(th->source),
1141 &iph->daddr, ntohs(th->dest),
1142 genhash ? " tcp_v4_calc_md5_hash failed" : "");
1151 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1153 .obj_size = sizeof(struct tcp_request_sock),
1154 .rtx_syn_ack = tcp_v4_send_synack,
1155 .send_ack = tcp_v4_reqsk_send_ack,
1156 .destructor = tcp_v4_reqsk_destructor,
1157 .send_reset = tcp_v4_send_reset,
1160 #ifdef CONFIG_TCP_MD5SIG
1161 static struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1162 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1166 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1167 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1168 .twsk_unique = tcp_twsk_unique,
1169 .twsk_destructor= tcp_twsk_destructor,
1172 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1174 struct inet_request_sock *ireq;
1175 struct tcp_options_received tmp_opt;
1176 struct request_sock *req;
1177 __be32 saddr = ip_hdr(skb)->saddr;
1178 __be32 daddr = ip_hdr(skb)->daddr;
1179 __u32 isn = TCP_SKB_CB(skb)->when;
1180 struct dst_entry *dst = NULL;
1181 #ifdef CONFIG_SYN_COOKIES
1182 int want_cookie = 0;
1184 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1187 /* Never answer to SYNs send to broadcast or multicast */
1188 if (skb->rtable->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1191 /* TW buckets are converted to open requests without
1192 * limitations, they conserve resources and peer is
1193 * evidently real one.
1195 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1196 #ifdef CONFIG_SYN_COOKIES
1197 if (sysctl_tcp_syncookies) {
1204 /* Accept backlog is full. If we have already queued enough
1205 * of warm entries in syn queue, drop request. It is better than
1206 * clogging syn queue with openreqs with exponentially increasing
1209 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1212 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1216 #ifdef CONFIG_TCP_MD5SIG
1217 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1220 tcp_clear_options(&tmp_opt);
1221 tmp_opt.mss_clamp = 536;
1222 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
1224 tcp_parse_options(skb, &tmp_opt, 0);
1226 if (want_cookie && !tmp_opt.saw_tstamp)
1227 tcp_clear_options(&tmp_opt);
1229 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1231 tcp_openreq_init(req, &tmp_opt, skb);
1233 if (security_inet_conn_request(sk, skb, req))
1236 ireq = inet_rsk(req);
1237 ireq->loc_addr = daddr;
1238 ireq->rmt_addr = saddr;
1239 ireq->no_srccheck = inet_sk(sk)->transparent;
1240 ireq->opt = tcp_v4_save_options(sk, skb);
1242 TCP_ECN_create_request(req, tcp_hdr(skb));
1245 #ifdef CONFIG_SYN_COOKIES
1246 syn_flood_warning(skb);
1247 req->cookie_ts = tmp_opt.tstamp_ok;
1249 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1251 struct inet_peer *peer = NULL;
1253 /* VJ's idea. We save last timestamp seen
1254 * from the destination in peer table, when entering
1255 * state TIME-WAIT, and check against it before
1256 * accepting new connection request.
1258 * If "isn" is not zero, this request hit alive
1259 * timewait bucket, so that all the necessary checks
1260 * are made in the function processing timewait state.
1262 if (tmp_opt.saw_tstamp &&
1263 tcp_death_row.sysctl_tw_recycle &&
1264 (dst = inet_csk_route_req(sk, req)) != NULL &&
1265 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1266 peer->v4daddr == saddr) {
1267 if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1268 (s32)(peer->tcp_ts - req->ts_recent) >
1270 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1271 goto drop_and_release;
1274 /* Kill the following clause, if you dislike this way. */
1275 else if (!sysctl_tcp_syncookies &&
1276 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1277 (sysctl_max_syn_backlog >> 2)) &&
1278 (!peer || !peer->tcp_ts_stamp) &&
1279 (!dst || !dst_metric(dst, RTAX_RTT))) {
1280 /* Without syncookies last quarter of
1281 * backlog is filled with destinations,
1282 * proven to be alive.
1283 * It means that we continue to communicate
1284 * to destinations, already remembered
1285 * to the moment of synflood.
1287 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
1288 &saddr, ntohs(tcp_hdr(skb)->source));
1289 goto drop_and_release;
1292 isn = tcp_v4_init_sequence(skb);
1294 tcp_rsk(req)->snt_isn = isn;
1296 if (__tcp_v4_send_synack(sk, req, dst) || want_cookie)
1299 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1312 * The three way handshake has completed - we got a valid synack -
1313 * now create the new socket.
1315 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1316 struct request_sock *req,
1317 struct dst_entry *dst)
1319 struct inet_request_sock *ireq;
1320 struct inet_sock *newinet;
1321 struct tcp_sock *newtp;
1323 #ifdef CONFIG_TCP_MD5SIG
1324 struct tcp_md5sig_key *key;
1327 if (sk_acceptq_is_full(sk))
1330 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1333 newsk = tcp_create_openreq_child(sk, req, skb);
1337 newsk->sk_gso_type = SKB_GSO_TCPV4;
1338 sk_setup_caps(newsk, dst);
1340 newtp = tcp_sk(newsk);
1341 newinet = inet_sk(newsk);
1342 ireq = inet_rsk(req);
1343 newinet->daddr = ireq->rmt_addr;
1344 newinet->rcv_saddr = ireq->loc_addr;
1345 newinet->saddr = ireq->loc_addr;
1346 newinet->opt = ireq->opt;
1348 newinet->mc_index = inet_iif(skb);
1349 newinet->mc_ttl = ip_hdr(skb)->ttl;
1350 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1352 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
1353 newinet->id = newtp->write_seq ^ jiffies;
1355 tcp_mtup_init(newsk);
1356 tcp_sync_mss(newsk, dst_mtu(dst));
1357 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1358 if (tcp_sk(sk)->rx_opt.user_mss &&
1359 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1360 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1362 tcp_initialize_rcv_mss(newsk);
1364 #ifdef CONFIG_TCP_MD5SIG
1365 /* Copy over the MD5 key from the original socket */
1366 if ((key = tcp_v4_md5_do_lookup(sk, newinet->daddr)) != NULL) {
1368 * We're using one, so create a matching key
1369 * on the newsk structure. If we fail to get
1370 * memory, then we end up not copying the key
1373 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1375 tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr,
1376 newkey, key->keylen);
1377 newsk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1381 __inet_hash_nolisten(newsk);
1382 __inet_inherit_port(sk, newsk);
1387 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1389 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1394 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1396 struct tcphdr *th = tcp_hdr(skb);
1397 const struct iphdr *iph = ip_hdr(skb);
1399 struct request_sock **prev;
1400 /* Find possible connection requests. */
1401 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1402 iph->saddr, iph->daddr);
1404 return tcp_check_req(sk, skb, req, prev);
1406 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1407 th->source, iph->daddr, th->dest, inet_iif(skb));
1410 if (nsk->sk_state != TCP_TIME_WAIT) {
1414 inet_twsk_put(inet_twsk(nsk));
1418 #ifdef CONFIG_SYN_COOKIES
1419 if (!th->rst && !th->syn && th->ack)
1420 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1425 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1427 const struct iphdr *iph = ip_hdr(skb);
1429 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1430 if (!tcp_v4_check(skb->len, iph->saddr,
1431 iph->daddr, skb->csum)) {
1432 skb->ip_summed = CHECKSUM_UNNECESSARY;
1437 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1438 skb->len, IPPROTO_TCP, 0);
1440 if (skb->len <= 76) {
1441 return __skb_checksum_complete(skb);
1447 /* The socket must have it's spinlock held when we get
1450 * We have a potential double-lock case here, so even when
1451 * doing backlog processing we use the BH locking scheme.
1452 * This is because we cannot sleep with the original spinlock
1455 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1458 #ifdef CONFIG_TCP_MD5SIG
1460 * We really want to reject the packet as early as possible
1462 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1463 * o There is an MD5 option and we're not expecting one
1465 if (tcp_v4_inbound_md5_hash(sk, skb))
1469 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1470 TCP_CHECK_TIMER(sk);
1471 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1475 TCP_CHECK_TIMER(sk);
1479 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1482 if (sk->sk_state == TCP_LISTEN) {
1483 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1488 if (tcp_child_process(sk, nsk, skb)) {
1496 TCP_CHECK_TIMER(sk);
1497 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1501 TCP_CHECK_TIMER(sk);
1505 tcp_v4_send_reset(rsk, skb);
1508 /* Be careful here. If this function gets more complicated and
1509 * gcc suffers from register pressure on the x86, sk (in %ebx)
1510 * might be destroyed here. This current version compiles correctly,
1511 * but you have been warned.
1516 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1524 int tcp_v4_rcv(struct sk_buff *skb)
1526 const struct iphdr *iph;
1530 struct net *net = dev_net(skb->dev);
1532 if (skb->pkt_type != PACKET_HOST)
1535 /* Count it even if it's bad */
1536 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1538 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1543 if (th->doff < sizeof(struct tcphdr) / 4)
1545 if (!pskb_may_pull(skb, th->doff * 4))
1548 /* An explanation is required here, I think.
1549 * Packet length and doff are validated by header prediction,
1550 * provided case of th->doff==0 is eliminated.
1551 * So, we defer the checks. */
1552 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1557 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1558 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1559 skb->len - th->doff * 4);
1560 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1561 TCP_SKB_CB(skb)->when = 0;
1562 TCP_SKB_CB(skb)->flags = iph->tos;
1563 TCP_SKB_CB(skb)->sacked = 0;
1565 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1570 if (sk->sk_state == TCP_TIME_WAIT)
1573 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1574 goto discard_and_relse;
1577 if (sk_filter(sk, skb))
1578 goto discard_and_relse;
1582 bh_lock_sock_nested(sk);
1584 if (!sock_owned_by_user(sk)) {
1585 #ifdef CONFIG_NET_DMA
1586 struct tcp_sock *tp = tcp_sk(sk);
1587 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1588 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1589 if (tp->ucopy.dma_chan)
1590 ret = tcp_v4_do_rcv(sk, skb);
1594 if (!tcp_prequeue(sk, skb))
1595 ret = tcp_v4_do_rcv(sk, skb);
1598 sk_add_backlog(sk, skb);
1606 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1609 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1611 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1613 tcp_v4_send_reset(NULL, skb);
1617 /* Discard frame. */
1626 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1627 inet_twsk_put(inet_twsk(sk));
1631 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1632 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1633 inet_twsk_put(inet_twsk(sk));
1636 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1638 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1640 iph->daddr, th->dest,
1643 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1644 inet_twsk_put(inet_twsk(sk));
1648 /* Fall through to ACK */
1651 tcp_v4_timewait_ack(sk, skb);
1655 case TCP_TW_SUCCESS:;
1660 /* VJ's idea. Save last timestamp seen from this destination
1661 * and hold it at least for normal timewait interval to use for duplicate
1662 * segment detection in subsequent connections, before they enter synchronized
1666 int tcp_v4_remember_stamp(struct sock *sk)
1668 struct inet_sock *inet = inet_sk(sk);
1669 struct tcp_sock *tp = tcp_sk(sk);
1670 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1671 struct inet_peer *peer = NULL;
1674 if (!rt || rt->rt_dst != inet->daddr) {
1675 peer = inet_getpeer(inet->daddr, 1);
1679 rt_bind_peer(rt, 1);
1684 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1685 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
1686 peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
1687 peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
1688 peer->tcp_ts = tp->rx_opt.ts_recent;
1698 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1700 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1703 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1705 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1706 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
1707 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
1708 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
1709 peer->tcp_ts = tcptw->tw_ts_recent;
1718 struct inet_connection_sock_af_ops ipv4_specific = {
1719 .queue_xmit = ip_queue_xmit,
1720 .send_check = tcp_v4_send_check,
1721 .rebuild_header = inet_sk_rebuild_header,
1722 .conn_request = tcp_v4_conn_request,
1723 .syn_recv_sock = tcp_v4_syn_recv_sock,
1724 .remember_stamp = tcp_v4_remember_stamp,
1725 .net_header_len = sizeof(struct iphdr),
1726 .setsockopt = ip_setsockopt,
1727 .getsockopt = ip_getsockopt,
1728 .addr2sockaddr = inet_csk_addr2sockaddr,
1729 .sockaddr_len = sizeof(struct sockaddr_in),
1730 .bind_conflict = inet_csk_bind_conflict,
1731 #ifdef CONFIG_COMPAT
1732 .compat_setsockopt = compat_ip_setsockopt,
1733 .compat_getsockopt = compat_ip_getsockopt,
1737 #ifdef CONFIG_TCP_MD5SIG
1738 static struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1739 .md5_lookup = tcp_v4_md5_lookup,
1740 .calc_md5_hash = tcp_v4_md5_hash_skb,
1741 .md5_add = tcp_v4_md5_add_func,
1742 .md5_parse = tcp_v4_parse_md5_keys,
1746 /* NOTE: A lot of things set to zero explicitly by call to
1747 * sk_alloc() so need not be done here.
1749 static int tcp_v4_init_sock(struct sock *sk)
1751 struct inet_connection_sock *icsk = inet_csk(sk);
1752 struct tcp_sock *tp = tcp_sk(sk);
1754 skb_queue_head_init(&tp->out_of_order_queue);
1755 tcp_init_xmit_timers(sk);
1756 tcp_prequeue_init(tp);
1758 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1759 tp->mdev = TCP_TIMEOUT_INIT;
1761 /* So many TCP implementations out there (incorrectly) count the
1762 * initial SYN frame in their delayed-ACK and congestion control
1763 * algorithms that we must have the following bandaid to talk
1764 * efficiently to them. -DaveM
1768 /* See draft-stevens-tcpca-spec-01 for discussion of the
1769 * initialization of these values.
1771 tp->snd_ssthresh = 0x7fffffff; /* Infinity */
1772 tp->snd_cwnd_clamp = ~0;
1773 tp->mss_cache = 536;
1775 tp->reordering = sysctl_tcp_reordering;
1776 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1778 sk->sk_state = TCP_CLOSE;
1780 sk->sk_write_space = sk_stream_write_space;
1781 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1783 icsk->icsk_af_ops = &ipv4_specific;
1784 icsk->icsk_sync_mss = tcp_sync_mss;
1785 #ifdef CONFIG_TCP_MD5SIG
1786 tp->af_specific = &tcp_sock_ipv4_specific;
1789 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1790 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1793 percpu_counter_inc(&tcp_sockets_allocated);
1799 void tcp_v4_destroy_sock(struct sock *sk)
1801 struct tcp_sock *tp = tcp_sk(sk);
1803 tcp_clear_xmit_timers(sk);
1805 tcp_cleanup_congestion_control(sk);
1807 /* Cleanup up the write buffer. */
1808 tcp_write_queue_purge(sk);
1810 /* Cleans up our, hopefully empty, out_of_order_queue. */
1811 __skb_queue_purge(&tp->out_of_order_queue);
1813 #ifdef CONFIG_TCP_MD5SIG
1814 /* Clean up the MD5 key list, if any */
1815 if (tp->md5sig_info) {
1816 tcp_v4_clear_md5_list(sk);
1817 kfree(tp->md5sig_info);
1818 tp->md5sig_info = NULL;
1822 #ifdef CONFIG_NET_DMA
1823 /* Cleans up our sk_async_wait_queue */
1824 __skb_queue_purge(&sk->sk_async_wait_queue);
1827 /* Clean prequeue, it must be empty really */
1828 __skb_queue_purge(&tp->ucopy.prequeue);
1830 /* Clean up a referenced TCP bind bucket. */
1831 if (inet_csk(sk)->icsk_bind_hash)
1835 * If sendmsg cached page exists, toss it.
1837 if (sk->sk_sndmsg_page) {
1838 __free_page(sk->sk_sndmsg_page);
1839 sk->sk_sndmsg_page = NULL;
1842 percpu_counter_dec(&tcp_sockets_allocated);
1845 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1847 #ifdef CONFIG_PROC_FS
1848 /* Proc filesystem TCP sock list dumping. */
1850 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1852 return hlist_nulls_empty(head) ? NULL :
1853 list_entry(head->first, struct inet_timewait_sock, tw_node);
1856 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1858 return !is_a_nulls(tw->tw_node.next) ?
1859 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1862 static void *listening_get_next(struct seq_file *seq, void *cur)
1864 struct inet_connection_sock *icsk;
1865 struct hlist_nulls_node *node;
1866 struct sock *sk = cur;
1867 struct inet_listen_hashbucket *ilb;
1868 struct tcp_iter_state *st = seq->private;
1869 struct net *net = seq_file_net(seq);
1873 ilb = &tcp_hashinfo.listening_hash[0];
1874 spin_lock_bh(&ilb->lock);
1875 sk = sk_nulls_head(&ilb->head);
1878 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1881 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1882 struct request_sock *req = cur;
1884 icsk = inet_csk(st->syn_wait_sk);
1888 if (req->rsk_ops->family == st->family) {
1894 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1897 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1899 sk = sk_next(st->syn_wait_sk);
1900 st->state = TCP_SEQ_STATE_LISTENING;
1901 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1903 icsk = inet_csk(sk);
1904 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1905 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1907 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1911 sk_nulls_for_each_from(sk, node) {
1912 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
1916 icsk = inet_csk(sk);
1917 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1918 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1920 st->uid = sock_i_uid(sk);
1921 st->syn_wait_sk = sk;
1922 st->state = TCP_SEQ_STATE_OPENREQ;
1926 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1928 spin_unlock_bh(&ilb->lock);
1929 if (++st->bucket < INET_LHTABLE_SIZE) {
1930 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1931 spin_lock_bh(&ilb->lock);
1932 sk = sk_nulls_head(&ilb->head);
1940 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1942 void *rc = listening_get_next(seq, NULL);
1944 while (rc && *pos) {
1945 rc = listening_get_next(seq, rc);
1951 static inline int empty_bucket(struct tcp_iter_state *st)
1953 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
1954 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
1957 static void *established_get_first(struct seq_file *seq)
1959 struct tcp_iter_state *st = seq->private;
1960 struct net *net = seq_file_net(seq);
1963 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
1965 struct hlist_nulls_node *node;
1966 struct inet_timewait_sock *tw;
1967 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1969 /* Lockless fast path for the common case of empty buckets */
1970 if (empty_bucket(st))
1974 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1975 if (sk->sk_family != st->family ||
1976 !net_eq(sock_net(sk), net)) {
1982 st->state = TCP_SEQ_STATE_TIME_WAIT;
1983 inet_twsk_for_each(tw, node,
1984 &tcp_hashinfo.ehash[st->bucket].twchain) {
1985 if (tw->tw_family != st->family ||
1986 !net_eq(twsk_net(tw), net)) {
1992 spin_unlock_bh(lock);
1993 st->state = TCP_SEQ_STATE_ESTABLISHED;
1999 static void *established_get_next(struct seq_file *seq, void *cur)
2001 struct sock *sk = cur;
2002 struct inet_timewait_sock *tw;
2003 struct hlist_nulls_node *node;
2004 struct tcp_iter_state *st = seq->private;
2005 struct net *net = seq_file_net(seq);
2009 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2013 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2020 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2021 st->state = TCP_SEQ_STATE_ESTABLISHED;
2023 /* Look for next non empty bucket */
2024 while (++st->bucket < tcp_hashinfo.ehash_size &&
2027 if (st->bucket >= tcp_hashinfo.ehash_size)
2030 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2031 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2033 sk = sk_nulls_next(sk);
2035 sk_nulls_for_each_from(sk, node) {
2036 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2040 st->state = TCP_SEQ_STATE_TIME_WAIT;
2041 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2049 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2051 void *rc = established_get_first(seq);
2054 rc = established_get_next(seq, rc);
2060 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2063 struct tcp_iter_state *st = seq->private;
2065 st->state = TCP_SEQ_STATE_LISTENING;
2066 rc = listening_get_idx(seq, &pos);
2069 st->state = TCP_SEQ_STATE_ESTABLISHED;
2070 rc = established_get_idx(seq, pos);
2076 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2078 struct tcp_iter_state *st = seq->private;
2079 st->state = TCP_SEQ_STATE_LISTENING;
2081 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2084 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2087 struct tcp_iter_state *st;
2089 if (v == SEQ_START_TOKEN) {
2090 rc = tcp_get_idx(seq, 0);
2095 switch (st->state) {
2096 case TCP_SEQ_STATE_OPENREQ:
2097 case TCP_SEQ_STATE_LISTENING:
2098 rc = listening_get_next(seq, v);
2100 st->state = TCP_SEQ_STATE_ESTABLISHED;
2101 rc = established_get_first(seq);
2104 case TCP_SEQ_STATE_ESTABLISHED:
2105 case TCP_SEQ_STATE_TIME_WAIT:
2106 rc = established_get_next(seq, v);
2114 static void tcp_seq_stop(struct seq_file *seq, void *v)
2116 struct tcp_iter_state *st = seq->private;
2118 switch (st->state) {
2119 case TCP_SEQ_STATE_OPENREQ:
2121 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2122 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2124 case TCP_SEQ_STATE_LISTENING:
2125 if (v != SEQ_START_TOKEN)
2126 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2128 case TCP_SEQ_STATE_TIME_WAIT:
2129 case TCP_SEQ_STATE_ESTABLISHED:
2131 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2136 static int tcp_seq_open(struct inode *inode, struct file *file)
2138 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2139 struct tcp_iter_state *s;
2142 err = seq_open_net(inode, file, &afinfo->seq_ops,
2143 sizeof(struct tcp_iter_state));
2147 s = ((struct seq_file *)file->private_data)->private;
2148 s->family = afinfo->family;
2152 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2155 struct proc_dir_entry *p;
2157 afinfo->seq_fops.open = tcp_seq_open;
2158 afinfo->seq_fops.read = seq_read;
2159 afinfo->seq_fops.llseek = seq_lseek;
2160 afinfo->seq_fops.release = seq_release_net;
2162 afinfo->seq_ops.start = tcp_seq_start;
2163 afinfo->seq_ops.next = tcp_seq_next;
2164 afinfo->seq_ops.stop = tcp_seq_stop;
2166 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2167 &afinfo->seq_fops, afinfo);
2173 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2175 proc_net_remove(net, afinfo->name);
2178 static void get_openreq4(struct sock *sk, struct request_sock *req,
2179 struct seq_file *f, int i, int uid, int *len)
2181 const struct inet_request_sock *ireq = inet_rsk(req);
2182 int ttd = req->expires - jiffies;
2184 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2185 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
2188 ntohs(inet_sk(sk)->sport),
2190 ntohs(ireq->rmt_port),
2192 0, 0, /* could print option size, but that is af dependent. */
2193 1, /* timers active (only the expire timer) */
2194 jiffies_to_clock_t(ttd),
2197 0, /* non standard timer */
2198 0, /* open_requests have no inode */
2199 atomic_read(&sk->sk_refcnt),
2204 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2207 unsigned long timer_expires;
2208 struct tcp_sock *tp = tcp_sk(sk);
2209 const struct inet_connection_sock *icsk = inet_csk(sk);
2210 struct inet_sock *inet = inet_sk(sk);
2211 __be32 dest = inet->daddr;
2212 __be32 src = inet->rcv_saddr;
2213 __u16 destp = ntohs(inet->dport);
2214 __u16 srcp = ntohs(inet->sport);
2216 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2218 timer_expires = icsk->icsk_timeout;
2219 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2221 timer_expires = icsk->icsk_timeout;
2222 } else if (timer_pending(&sk->sk_timer)) {
2224 timer_expires = sk->sk_timer.expires;
2227 timer_expires = jiffies;
2230 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2231 "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
2232 i, src, srcp, dest, destp, sk->sk_state,
2233 tp->write_seq - tp->snd_una,
2234 sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog :
2235 (tp->rcv_nxt - tp->copied_seq),
2237 jiffies_to_clock_t(timer_expires - jiffies),
2238 icsk->icsk_retransmits,
2240 icsk->icsk_probes_out,
2242 atomic_read(&sk->sk_refcnt), sk,
2243 jiffies_to_clock_t(icsk->icsk_rto),
2244 jiffies_to_clock_t(icsk->icsk_ack.ato),
2245 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2247 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh,
2251 static void get_timewait4_sock(struct inet_timewait_sock *tw,
2252 struct seq_file *f, int i, int *len)
2256 int ttd = tw->tw_ttd - jiffies;
2261 dest = tw->tw_daddr;
2262 src = tw->tw_rcv_saddr;
2263 destp = ntohs(tw->tw_dport);
2264 srcp = ntohs(tw->tw_sport);
2266 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2267 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
2268 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2269 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2270 atomic_read(&tw->tw_refcnt), tw, len);
2275 static int tcp4_seq_show(struct seq_file *seq, void *v)
2277 struct tcp_iter_state *st;
2280 if (v == SEQ_START_TOKEN) {
2281 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2282 " sl local_address rem_address st tx_queue "
2283 "rx_queue tr tm->when retrnsmt uid timeout "
2289 switch (st->state) {
2290 case TCP_SEQ_STATE_LISTENING:
2291 case TCP_SEQ_STATE_ESTABLISHED:
2292 get_tcp4_sock(v, seq, st->num, &len);
2294 case TCP_SEQ_STATE_OPENREQ:
2295 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2297 case TCP_SEQ_STATE_TIME_WAIT:
2298 get_timewait4_sock(v, seq, st->num, &len);
2301 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2306 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2310 .owner = THIS_MODULE,
2313 .show = tcp4_seq_show,
2317 static int tcp4_proc_init_net(struct net *net)
2319 return tcp_proc_register(net, &tcp4_seq_afinfo);
2322 static void tcp4_proc_exit_net(struct net *net)
2324 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2327 static struct pernet_operations tcp4_net_ops = {
2328 .init = tcp4_proc_init_net,
2329 .exit = tcp4_proc_exit_net,
2332 int __init tcp4_proc_init(void)
2334 return register_pernet_subsys(&tcp4_net_ops);
2337 void tcp4_proc_exit(void)
2339 unregister_pernet_subsys(&tcp4_net_ops);
2341 #endif /* CONFIG_PROC_FS */
2343 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2345 struct iphdr *iph = ip_hdr(skb);
2347 switch (skb->ip_summed) {
2348 case CHECKSUM_COMPLETE:
2349 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2351 skb->ip_summed = CHECKSUM_UNNECESSARY;
2357 NAPI_GRO_CB(skb)->flush = 1;
2361 return tcp_gro_receive(head, skb);
2363 EXPORT_SYMBOL(tcp4_gro_receive);
2365 int tcp4_gro_complete(struct sk_buff *skb)
2367 struct iphdr *iph = ip_hdr(skb);
2368 struct tcphdr *th = tcp_hdr(skb);
2370 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2371 iph->saddr, iph->daddr, 0);
2372 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2374 return tcp_gro_complete(skb);
2376 EXPORT_SYMBOL(tcp4_gro_complete);
2378 struct proto tcp_prot = {
2380 .owner = THIS_MODULE,
2382 .connect = tcp_v4_connect,
2383 .disconnect = tcp_disconnect,
2384 .accept = inet_csk_accept,
2386 .init = tcp_v4_init_sock,
2387 .destroy = tcp_v4_destroy_sock,
2388 .shutdown = tcp_shutdown,
2389 .setsockopt = tcp_setsockopt,
2390 .getsockopt = tcp_getsockopt,
2391 .recvmsg = tcp_recvmsg,
2392 .backlog_rcv = tcp_v4_do_rcv,
2394 .unhash = inet_unhash,
2395 .get_port = inet_csk_get_port,
2396 .enter_memory_pressure = tcp_enter_memory_pressure,
2397 .sockets_allocated = &tcp_sockets_allocated,
2398 .orphan_count = &tcp_orphan_count,
2399 .memory_allocated = &tcp_memory_allocated,
2400 .memory_pressure = &tcp_memory_pressure,
2401 .sysctl_mem = sysctl_tcp_mem,
2402 .sysctl_wmem = sysctl_tcp_wmem,
2403 .sysctl_rmem = sysctl_tcp_rmem,
2404 .max_header = MAX_TCP_HEADER,
2405 .obj_size = sizeof(struct tcp_sock),
2406 .slab_flags = SLAB_DESTROY_BY_RCU,
2407 .twsk_prot = &tcp_timewait_sock_ops,
2408 .rsk_prot = &tcp_request_sock_ops,
2409 .h.hashinfo = &tcp_hashinfo,
2410 #ifdef CONFIG_COMPAT
2411 .compat_setsockopt = compat_tcp_setsockopt,
2412 .compat_getsockopt = compat_tcp_getsockopt,
2417 static int __net_init tcp_sk_init(struct net *net)
2419 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2420 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2423 static void __net_exit tcp_sk_exit(struct net *net)
2425 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2426 inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET);
2429 static struct pernet_operations __net_initdata tcp_sk_ops = {
2430 .init = tcp_sk_init,
2431 .exit = tcp_sk_exit,
2434 void __init tcp_v4_init(void)
2436 inet_hashinfo_init(&tcp_hashinfo);
2437 if (register_pernet_subsys(&tcp_sk_ops))
2438 panic("Failed to create the TCP control socket.\n");
2441 EXPORT_SYMBOL(ipv4_specific);
2442 EXPORT_SYMBOL(tcp_hashinfo);
2443 EXPORT_SYMBOL(tcp_prot);
2444 EXPORT_SYMBOL(tcp_v4_conn_request);
2445 EXPORT_SYMBOL(tcp_v4_connect);
2446 EXPORT_SYMBOL(tcp_v4_do_rcv);
2447 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2448 EXPORT_SYMBOL(tcp_v4_send_check);
2449 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2451 #ifdef CONFIG_PROC_FS
2452 EXPORT_SYMBOL(tcp_proc_register);
2453 EXPORT_SYMBOL(tcp_proc_unregister);
2455 EXPORT_SYMBOL(sysctl_tcp_low_latency);