2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
10 * IPv4 specific functions
15 * linux/ipv4/tcp_input.c
16 * linux/ipv4/tcp_output.c
18 * See tcp.c for author information
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
28 * David S. Miller : New socket lookup architecture.
29 * This code is dedicated to John Dyson.
30 * David S. Miller : Change semantics of established hash,
31 * half is devoted to TIME_WAIT sockets
32 * and the rest go in the other half.
33 * Andi Kleen : Add support for syncookies and fixed
34 * some bugs: ip options weren't passed to
35 * the TCP layer, missed a check for an
37 * Andi Kleen : Implemented fast path mtu discovery.
38 * Fixed many serious bugs in the
39 * request_sock handling and moved
40 * most of it into the af independent code.
41 * Added tail drop and some other bugfixes.
42 * Added new listen semantics.
43 * Mike McLagan : Routing by source
44 * Juan Jose Ciarlante: ip_dynaddr bits
45 * Andi Kleen: various fixes.
46 * Vitaly E. Lavrov : Transparent proxy revived after year
48 * Andi Kleen : Fix new listen.
49 * Andi Kleen : Fix accept error reporting.
50 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
51 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
52 * a single port at the same time.
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
65 #include <net/net_namespace.h>
67 #include <net/inet_hashtables.h>
69 #include <net/transp_v6.h>
71 #include <net/inet_common.h>
72 #include <net/timewait_sock.h>
74 #include <net/netdma.h>
76 #include <linux/inet.h>
77 #include <linux/ipv6.h>
78 #include <linux/stddef.h>
79 #include <linux/proc_fs.h>
80 #include <linux/seq_file.h>
82 #include <linux/crypto.h>
83 #include <linux/scatterlist.h>
85 int sysctl_tcp_tw_reuse __read_mostly;
86 int sysctl_tcp_low_latency __read_mostly;
89 #ifdef CONFIG_TCP_MD5SIG
90 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
92 static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
93 __be32 saddr, __be32 daddr,
94 struct tcphdr *th, int protocol,
98 struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
99 .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock),
100 .lhash_users = ATOMIC_INIT(0),
101 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
104 static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
106 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
109 tcp_hdr(skb)->source);
112 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
114 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
115 struct tcp_sock *tp = tcp_sk(sk);
117 /* With PAWS, it is safe from the viewpoint
118 of data integrity. Even without PAWS it is safe provided sequence
119 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
121 Actually, the idea is close to VJ's one, only timestamp cache is
122 held not per host, but per port pair and TW bucket is used as state
125 If TW bucket has been already destroyed we fall back to VJ's scheme
126 and use initial timestamp retrieved from peer table.
128 if (tcptw->tw_ts_recent_stamp &&
129 (twp == NULL || (sysctl_tcp_tw_reuse &&
130 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
131 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
132 if (tp->write_seq == 0)
134 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
135 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
143 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
145 /* This will initiate an outgoing connection. */
146 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
148 struct inet_sock *inet = inet_sk(sk);
149 struct tcp_sock *tp = tcp_sk(sk);
150 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
152 __be32 daddr, nexthop;
156 if (addr_len < sizeof(struct sockaddr_in))
159 if (usin->sin_family != AF_INET)
160 return -EAFNOSUPPORT;
162 nexthop = daddr = usin->sin_addr.s_addr;
163 if (inet->opt && inet->opt->srr) {
166 nexthop = inet->opt->faddr;
169 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
170 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
172 inet->sport, usin->sin_port, sk, 1);
174 if (tmp == -ENETUNREACH)
175 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
179 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
184 if (!inet->opt || !inet->opt->srr)
188 inet->saddr = rt->rt_src;
189 inet->rcv_saddr = inet->saddr;
191 if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) {
192 /* Reset inherited state */
193 tp->rx_opt.ts_recent = 0;
194 tp->rx_opt.ts_recent_stamp = 0;
198 if (tcp_death_row.sysctl_tw_recycle &&
199 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
200 struct inet_peer *peer = rt_get_peer(rt);
202 * VJ's idea. We save last timestamp seen from
203 * the destination in peer table, when entering state
204 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
205 * when trying new connection.
208 peer->tcp_ts_stamp + TCP_PAWS_MSL >= get_seconds()) {
209 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
210 tp->rx_opt.ts_recent = peer->tcp_ts;
214 inet->dport = usin->sin_port;
217 inet_csk(sk)->icsk_ext_hdr_len = 0;
219 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
221 tp->rx_opt.mss_clamp = 536;
223 /* Socket identity is still unknown (sport may be zero).
224 * However we set state to SYN-SENT and not releasing socket
225 * lock select source port, enter ourselves into the hash tables and
226 * complete initialization after this.
228 tcp_set_state(sk, TCP_SYN_SENT);
229 err = inet_hash_connect(&tcp_death_row, sk);
233 err = ip_route_newports(&rt, IPPROTO_TCP,
234 inet->sport, inet->dport, sk);
238 /* OK, now commit destination to socket. */
239 sk->sk_gso_type = SKB_GSO_TCPV4;
240 sk_setup_caps(sk, &rt->u.dst);
243 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
248 inet->id = tp->write_seq ^ jiffies;
250 err = tcp_connect(sk);
259 * This unhashes the socket and releases the local port,
262 tcp_set_state(sk, TCP_CLOSE);
264 sk->sk_route_caps = 0;
270 * This routine does path mtu discovery as defined in RFC1191.
272 static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
274 struct dst_entry *dst;
275 struct inet_sock *inet = inet_sk(sk);
277 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
278 * send out by Linux are always <576bytes so they should go through
281 if (sk->sk_state == TCP_LISTEN)
284 /* We don't check in the destentry if pmtu discovery is forbidden
285 * on this route. We just assume that no packet_to_big packets
286 * are send back when pmtu discovery is not active.
287 * There is a small race when the user changes this flag in the
288 * route, but I think that's acceptable.
290 if ((dst = __sk_dst_check(sk, 0)) == NULL)
293 dst->ops->update_pmtu(dst, mtu);
295 /* Something is about to be wrong... Remember soft error
296 * for the case, if this connection will not able to recover.
298 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
299 sk->sk_err_soft = EMSGSIZE;
303 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
304 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
305 tcp_sync_mss(sk, mtu);
307 /* Resend the TCP packet because it's
308 * clear that the old packet has been
309 * dropped. This is the new "fast" path mtu
312 tcp_simple_retransmit(sk);
313 } /* else let the usual retransmit timer handle it */
317 * This routine is called by the ICMP module when it gets some
318 * sort of error condition. If err < 0 then the socket should
319 * be closed and the error returned to the user. If err > 0
320 * it's just the icmp type << 8 | icmp code. After adjustment
321 * header points to the first 8 bytes of the tcp header. We need
322 * to find the appropriate port.
324 * The locking strategy used here is very "optimistic". When
325 * someone else accesses the socket the ICMP is just dropped
326 * and for some paths there is no check at all.
327 * A more general error queue to queue errors for later handling
328 * is probably better.
332 void tcp_v4_err(struct sk_buff *skb, u32 info)
334 struct iphdr *iph = (struct iphdr *)skb->data;
335 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
337 struct inet_sock *inet;
338 const int type = icmp_hdr(skb)->type;
339 const int code = icmp_hdr(skb)->code;
344 if (skb->len < (iph->ihl << 2) + 8) {
345 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
349 sk = inet_lookup(dev_net(skb->dev), &tcp_hashinfo, iph->daddr, th->dest,
350 iph->saddr, th->source, inet_iif(skb));
352 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
355 if (sk->sk_state == TCP_TIME_WAIT) {
356 inet_twsk_put(inet_twsk(sk));
361 /* If too many ICMPs get dropped on busy
362 * servers this needs to be solved differently.
364 if (sock_owned_by_user(sk))
365 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
367 if (sk->sk_state == TCP_CLOSE)
371 seq = ntohl(th->seq);
372 if (sk->sk_state != TCP_LISTEN &&
373 !between(seq, tp->snd_una, tp->snd_nxt)) {
374 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
379 case ICMP_SOURCE_QUENCH:
380 /* Just silently ignore these. */
382 case ICMP_PARAMETERPROB:
385 case ICMP_DEST_UNREACH:
386 if (code > NR_ICMP_UNREACH)
389 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
390 if (!sock_owned_by_user(sk))
391 do_pmtu_discovery(sk, iph, info);
395 err = icmp_err_convert[code].errno;
397 case ICMP_TIME_EXCEEDED:
404 switch (sk->sk_state) {
405 struct request_sock *req, **prev;
407 if (sock_owned_by_user(sk))
410 req = inet_csk_search_req(sk, &prev, th->dest,
411 iph->daddr, iph->saddr);
415 /* ICMPs are not backlogged, hence we cannot get
416 an established socket here.
420 if (seq != tcp_rsk(req)->snt_isn) {
421 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
426 * Still in SYN_RECV, just remove it silently.
427 * There is no good way to pass the error to the newly
428 * created socket, and POSIX does not want network
429 * errors returned from accept().
431 inet_csk_reqsk_queue_drop(sk, req, prev);
435 case TCP_SYN_RECV: /* Cannot happen.
436 It can f.e. if SYNs crossed.
438 if (!sock_owned_by_user(sk)) {
441 sk->sk_error_report(sk);
445 sk->sk_err_soft = err;
450 /* If we've already connected we will keep trying
451 * until we time out, or the user gives up.
453 * rfc1122 4.2.3.9 allows to consider as hard errors
454 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
455 * but it is obsoleted by pmtu discovery).
457 * Note, that in modern internet, where routing is unreliable
458 * and in each dark corner broken firewalls sit, sending random
459 * errors ordered by their masters even this two messages finally lose
460 * their original sense (even Linux sends invalid PORT_UNREACHs)
462 * Now we are in compliance with RFCs.
467 if (!sock_owned_by_user(sk) && inet->recverr) {
469 sk->sk_error_report(sk);
470 } else { /* Only an error on timeout */
471 sk->sk_err_soft = err;
479 /* This routine computes an IPv4 TCP checksum. */
480 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
482 struct inet_sock *inet = inet_sk(sk);
483 struct tcphdr *th = tcp_hdr(skb);
485 if (skb->ip_summed == CHECKSUM_PARTIAL) {
486 th->check = ~tcp_v4_check(len, inet->saddr,
488 skb->csum_start = skb_transport_header(skb) - skb->head;
489 skb->csum_offset = offsetof(struct tcphdr, check);
491 th->check = tcp_v4_check(len, inet->saddr, inet->daddr,
492 csum_partial((char *)th,
498 int tcp_v4_gso_send_check(struct sk_buff *skb)
500 const struct iphdr *iph;
503 if (!pskb_may_pull(skb, sizeof(*th)))
510 th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
511 skb->csum_start = skb_transport_header(skb) - skb->head;
512 skb->csum_offset = offsetof(struct tcphdr, check);
513 skb->ip_summed = CHECKSUM_PARTIAL;
518 * This routine will send an RST to the other tcp.
520 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
522 * Answer: if a packet caused RST, it is not for a socket
523 * existing in our system, if it is matched to a socket,
524 * it is just duplicate segment or bug in other side's TCP.
525 * So that we build reply only basing on parameters
526 * arrived with segment.
527 * Exception: precedence violation. We do not implement it in any case.
530 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
532 struct tcphdr *th = tcp_hdr(skb);
535 #ifdef CONFIG_TCP_MD5SIG
536 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
539 struct ip_reply_arg arg;
540 #ifdef CONFIG_TCP_MD5SIG
541 struct tcp_md5sig_key *key;
544 /* Never send a reset in response to a reset. */
548 if (skb->rtable->rt_type != RTN_LOCAL)
551 /* Swap the send and the receive. */
552 memset(&rep, 0, sizeof(rep));
553 rep.th.dest = th->source;
554 rep.th.source = th->dest;
555 rep.th.doff = sizeof(struct tcphdr) / 4;
559 rep.th.seq = th->ack_seq;
562 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
563 skb->len - (th->doff << 2));
566 memset(&arg, 0, sizeof(arg));
567 arg.iov[0].iov_base = (unsigned char *)&rep;
568 arg.iov[0].iov_len = sizeof(rep.th);
570 #ifdef CONFIG_TCP_MD5SIG
571 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
573 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
575 (TCPOPT_MD5SIG << 8) |
577 /* Update length and the length the header thinks exists */
578 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
579 rep.th.doff = arg.iov[0].iov_len / 4;
581 tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[1],
585 &rep.th, IPPROTO_TCP,
589 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
590 ip_hdr(skb)->saddr, /* XXX */
591 sizeof(struct tcphdr), IPPROTO_TCP, 0);
592 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
594 ip_send_reply(dev_net(skb->dst->dev)->ipv4.tcp_sock, skb,
595 &arg, arg.iov[0].iov_len);
597 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
598 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
601 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
602 outside socket context is ugly, certainly. What can I do?
605 static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
606 struct sk_buff *skb, u32 seq, u32 ack,
609 struct tcphdr *th = tcp_hdr(skb);
612 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
613 #ifdef CONFIG_TCP_MD5SIG
614 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
618 struct ip_reply_arg arg;
619 #ifdef CONFIG_TCP_MD5SIG
620 struct tcp_md5sig_key *key;
621 struct tcp_md5sig_key tw_key;
624 memset(&rep.th, 0, sizeof(struct tcphdr));
625 memset(&arg, 0, sizeof(arg));
627 arg.iov[0].iov_base = (unsigned char *)&rep;
628 arg.iov[0].iov_len = sizeof(rep.th);
630 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
631 (TCPOPT_TIMESTAMP << 8) |
633 rep.opt[1] = htonl(tcp_time_stamp);
634 rep.opt[2] = htonl(ts);
635 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
638 /* Swap the send and the receive. */
639 rep.th.dest = th->source;
640 rep.th.source = th->dest;
641 rep.th.doff = arg.iov[0].iov_len / 4;
642 rep.th.seq = htonl(seq);
643 rep.th.ack_seq = htonl(ack);
645 rep.th.window = htons(win);
647 #ifdef CONFIG_TCP_MD5SIG
649 * The SKB holds an imcoming packet, but may not have a valid ->sk
650 * pointer. This is especially the case when we're dealing with a
651 * TIME_WAIT ack, because the sk structure is long gone, and only
652 * the tcp_timewait_sock remains. So the md5 key is stashed in that
653 * structure, and we use it in preference. I believe that (twsk ||
654 * skb->sk) holds true, but we program defensively.
656 if (!twsk && skb->sk) {
657 key = tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr);
658 } else if (twsk && twsk->tw_md5_keylen) {
659 tw_key.key = twsk->tw_md5_key;
660 tw_key.keylen = twsk->tw_md5_keylen;
666 int offset = (ts) ? 3 : 0;
668 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
670 (TCPOPT_MD5SIG << 8) |
672 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
673 rep.th.doff = arg.iov[0].iov_len/4;
675 tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[offset],
679 &rep.th, IPPROTO_TCP,
683 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
684 ip_hdr(skb)->saddr, /* XXX */
685 arg.iov[0].iov_len, IPPROTO_TCP, 0);
686 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
688 arg.bound_dev_if = twsk->tw_sk.tw_bound_dev_if;
690 ip_send_reply(dev_net(skb->dev)->ipv4.tcp_sock, skb,
691 &arg, arg.iov[0].iov_len);
693 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
696 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
698 struct inet_timewait_sock *tw = inet_twsk(sk);
699 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
701 tcp_v4_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
702 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
703 tcptw->tw_ts_recent);
708 static void tcp_v4_reqsk_send_ack(struct sk_buff *skb,
709 struct request_sock *req)
711 tcp_v4_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1,
712 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
717 * Send a SYN-ACK after having received a SYN.
718 * This still operates on a request_sock only, not on a big
721 static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
722 struct dst_entry *dst)
724 const struct inet_request_sock *ireq = inet_rsk(req);
726 struct sk_buff * skb;
728 /* First, grab a route. */
729 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
732 skb = tcp_make_synack(sk, dst, req);
735 struct tcphdr *th = tcp_hdr(skb);
737 th->check = tcp_v4_check(skb->len,
740 csum_partial((char *)th, skb->len,
743 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
746 err = net_xmit_eval(err);
753 static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req)
755 return __tcp_v4_send_synack(sk, req, NULL);
759 * IPv4 request_sock destructor.
761 static void tcp_v4_reqsk_destructor(struct request_sock *req)
763 kfree(inet_rsk(req)->opt);
766 #ifdef CONFIG_SYN_COOKIES
767 static void syn_flood_warning(struct sk_buff *skb)
769 static unsigned long warntime;
771 if (time_after(jiffies, (warntime + HZ * 60))) {
774 "possible SYN flooding on port %d. Sending cookies.\n",
775 ntohs(tcp_hdr(skb)->dest));
781 * Save and compile IPv4 options into the request_sock if needed.
783 static struct ip_options *tcp_v4_save_options(struct sock *sk,
786 struct ip_options *opt = &(IPCB(skb)->opt);
787 struct ip_options *dopt = NULL;
789 if (opt && opt->optlen) {
790 int opt_size = optlength(opt);
791 dopt = kmalloc(opt_size, GFP_ATOMIC);
793 if (ip_options_echo(dopt, skb)) {
802 #ifdef CONFIG_TCP_MD5SIG
804 * RFC2385 MD5 checksumming requires a mapping of
805 * IP address->MD5 Key.
806 * We need to maintain these in the sk structure.
809 /* Find the Key structure for an address. */
810 static struct tcp_md5sig_key *
811 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
813 struct tcp_sock *tp = tcp_sk(sk);
816 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
818 for (i = 0; i < tp->md5sig_info->entries4; i++) {
819 if (tp->md5sig_info->keys4[i].addr == addr)
820 return &tp->md5sig_info->keys4[i].base;
825 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
826 struct sock *addr_sk)
828 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->daddr);
831 EXPORT_SYMBOL(tcp_v4_md5_lookup);
833 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
834 struct request_sock *req)
836 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
839 /* This can be called on a newly created socket, from other files */
840 int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
841 u8 *newkey, u8 newkeylen)
843 /* Add Key to the list */
844 struct tcp_md5sig_key *key;
845 struct tcp_sock *tp = tcp_sk(sk);
846 struct tcp4_md5sig_key *keys;
848 key = tcp_v4_md5_do_lookup(sk, addr);
850 /* Pre-existing entry - just update that one. */
853 key->keylen = newkeylen;
855 struct tcp_md5sig_info *md5sig;
857 if (!tp->md5sig_info) {
858 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
860 if (!tp->md5sig_info) {
864 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
866 if (tcp_alloc_md5sig_pool() == NULL) {
870 md5sig = tp->md5sig_info;
872 if (md5sig->alloced4 == md5sig->entries4) {
873 keys = kmalloc((sizeof(*keys) *
874 (md5sig->entries4 + 1)), GFP_ATOMIC);
877 tcp_free_md5sig_pool();
881 if (md5sig->entries4)
882 memcpy(keys, md5sig->keys4,
883 sizeof(*keys) * md5sig->entries4);
885 /* Free old key list, and reference new one */
886 kfree(md5sig->keys4);
887 md5sig->keys4 = keys;
891 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
892 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
893 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
898 EXPORT_SYMBOL(tcp_v4_md5_do_add);
900 static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
901 u8 *newkey, u8 newkeylen)
903 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->daddr,
907 int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
909 struct tcp_sock *tp = tcp_sk(sk);
912 for (i = 0; i < tp->md5sig_info->entries4; i++) {
913 if (tp->md5sig_info->keys4[i].addr == addr) {
915 kfree(tp->md5sig_info->keys4[i].base.key);
916 tp->md5sig_info->entries4--;
918 if (tp->md5sig_info->entries4 == 0) {
919 kfree(tp->md5sig_info->keys4);
920 tp->md5sig_info->keys4 = NULL;
921 tp->md5sig_info->alloced4 = 0;
922 } else if (tp->md5sig_info->entries4 != i) {
923 /* Need to do some manipulation */
924 memmove(&tp->md5sig_info->keys4[i],
925 &tp->md5sig_info->keys4[i+1],
926 (tp->md5sig_info->entries4 - i) *
927 sizeof(struct tcp4_md5sig_key));
929 tcp_free_md5sig_pool();
936 EXPORT_SYMBOL(tcp_v4_md5_do_del);
938 static void tcp_v4_clear_md5_list(struct sock *sk)
940 struct tcp_sock *tp = tcp_sk(sk);
942 /* Free each key, then the set of key keys,
943 * the crypto element, and then decrement our
944 * hold on the last resort crypto.
946 if (tp->md5sig_info->entries4) {
948 for (i = 0; i < tp->md5sig_info->entries4; i++)
949 kfree(tp->md5sig_info->keys4[i].base.key);
950 tp->md5sig_info->entries4 = 0;
951 tcp_free_md5sig_pool();
953 if (tp->md5sig_info->keys4) {
954 kfree(tp->md5sig_info->keys4);
955 tp->md5sig_info->keys4 = NULL;
956 tp->md5sig_info->alloced4 = 0;
960 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
963 struct tcp_md5sig cmd;
964 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
967 if (optlen < sizeof(cmd))
970 if (copy_from_user(&cmd, optval, sizeof(cmd)))
973 if (sin->sin_family != AF_INET)
976 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
977 if (!tcp_sk(sk)->md5sig_info)
979 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
982 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
985 if (!tcp_sk(sk)->md5sig_info) {
986 struct tcp_sock *tp = tcp_sk(sk);
987 struct tcp_md5sig_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
993 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
996 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
999 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
1000 newkey, cmd.tcpm_keylen);
1003 static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1004 __be32 saddr, __be32 daddr,
1005 struct tcphdr *th, int protocol,
1006 unsigned int tcplen)
1008 struct scatterlist sg[4];
1011 __sum16 old_checksum;
1012 struct tcp_md5sig_pool *hp;
1013 struct tcp4_pseudohdr *bp;
1014 struct hash_desc *desc;
1016 unsigned int nbytes = 0;
1019 * Okay, so RFC2385 is turned on for this connection,
1020 * so we need to generate the MD5 hash for the packet now.
1023 hp = tcp_get_md5sig_pool();
1025 goto clear_hash_noput;
1027 bp = &hp->md5_blk.ip4;
1028 desc = &hp->md5_desc;
1031 * 1. the TCP pseudo-header (in the order: source IP address,
1032 * destination IP address, zero-padded protocol number, and
1038 bp->protocol = protocol;
1039 bp->len = htons(tcplen);
1041 sg_init_table(sg, 4);
1043 sg_set_buf(&sg[block++], bp, sizeof(*bp));
1044 nbytes += sizeof(*bp);
1046 /* 2. the TCP header, excluding options, and assuming a
1049 old_checksum = th->check;
1051 sg_set_buf(&sg[block++], th, sizeof(struct tcphdr));
1052 nbytes += sizeof(struct tcphdr);
1054 /* 3. the TCP segment data (if any) */
1055 data_len = tcplen - (th->doff << 2);
1057 unsigned char *data = (unsigned char *)th + (th->doff << 2);
1058 sg_set_buf(&sg[block++], data, data_len);
1062 /* 4. an independently-specified key or password, known to both
1063 * TCPs and presumably connection-specific
1065 sg_set_buf(&sg[block++], key->key, key->keylen);
1066 nbytes += key->keylen;
1068 sg_mark_end(&sg[block - 1]);
1070 /* Now store the Hash into the packet */
1071 err = crypto_hash_init(desc);
1074 err = crypto_hash_update(desc, sg, nbytes);
1077 err = crypto_hash_final(desc, md5_hash);
1081 /* Reset header, and free up the crypto */
1082 tcp_put_md5sig_pool();
1083 th->check = old_checksum;
1088 tcp_put_md5sig_pool();
1090 memset(md5_hash, 0, 16);
1094 int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1096 struct dst_entry *dst,
1097 struct request_sock *req,
1098 struct tcphdr *th, int protocol,
1099 unsigned int tcplen)
1101 __be32 saddr, daddr;
1104 saddr = inet_sk(sk)->saddr;
1105 daddr = inet_sk(sk)->daddr;
1107 struct rtable *rt = (struct rtable *)dst;
1112 return tcp_v4_do_calc_md5_hash(md5_hash, key,
1114 th, protocol, tcplen);
1117 EXPORT_SYMBOL(tcp_v4_calc_md5_hash);
1119 static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1122 * This gets called for each TCP segment that arrives
1123 * so we want to be efficient.
1124 * We have 3 drop cases:
1125 * o No MD5 hash and one expected.
1126 * o MD5 hash and we're not expecting one.
1127 * o MD5 hash and its wrong.
1129 __u8 *hash_location = NULL;
1130 struct tcp_md5sig_key *hash_expected;
1131 const struct iphdr *iph = ip_hdr(skb);
1132 struct tcphdr *th = tcp_hdr(skb);
1133 int length = (th->doff << 2) - sizeof(struct tcphdr);
1136 unsigned char newhash[16];
1138 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
1141 * If the TCP option length is less than the TCP_MD5SIG
1142 * option length, then we can shortcut
1144 if (length < TCPOLEN_MD5SIG) {
1151 /* Okay, we can't shortcut - we have to grub through the options */
1152 ptr = (unsigned char *)(th + 1);
1153 while (length > 0) {
1154 int opcode = *ptr++;
1167 if (opsize > length)
1170 if (opcode == TCPOPT_MD5SIG) {
1171 hash_location = ptr;
1179 /* We've parsed the options - do we have a hash? */
1180 if (!hash_expected && !hash_location)
1183 if (hash_expected && !hash_location) {
1184 LIMIT_NETDEBUG(KERN_INFO "MD5 Hash expected but NOT found "
1185 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
1186 NIPQUAD(iph->saddr), ntohs(th->source),
1187 NIPQUAD(iph->daddr), ntohs(th->dest));
1191 if (!hash_expected && hash_location) {
1192 LIMIT_NETDEBUG(KERN_INFO "MD5 Hash NOT expected but found "
1193 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
1194 NIPQUAD(iph->saddr), ntohs(th->source),
1195 NIPQUAD(iph->daddr), ntohs(th->dest));
1199 /* Okay, so this is hash_expected and hash_location -
1200 * so we need to calculate the checksum.
1202 genhash = tcp_v4_do_calc_md5_hash(newhash,
1204 iph->saddr, iph->daddr,
1205 th, sk->sk_protocol,
1208 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1209 if (net_ratelimit()) {
1210 printk(KERN_INFO "MD5 Hash failed for "
1211 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)%s\n",
1212 NIPQUAD(iph->saddr), ntohs(th->source),
1213 NIPQUAD(iph->daddr), ntohs(th->dest),
1214 genhash ? " tcp_v4_calc_md5_hash failed" : "");
1223 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1225 .obj_size = sizeof(struct tcp_request_sock),
1226 .rtx_syn_ack = tcp_v4_send_synack,
1227 .send_ack = tcp_v4_reqsk_send_ack,
1228 .destructor = tcp_v4_reqsk_destructor,
1229 .send_reset = tcp_v4_send_reset,
1232 #ifdef CONFIG_TCP_MD5SIG
1233 static struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1234 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1238 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1239 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1240 .twsk_unique = tcp_twsk_unique,
1241 .twsk_destructor= tcp_twsk_destructor,
1244 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1246 struct inet_request_sock *ireq;
1247 struct tcp_options_received tmp_opt;
1248 struct request_sock *req;
1249 __be32 saddr = ip_hdr(skb)->saddr;
1250 __be32 daddr = ip_hdr(skb)->daddr;
1251 __u32 isn = TCP_SKB_CB(skb)->when;
1252 struct dst_entry *dst = NULL;
1253 #ifdef CONFIG_SYN_COOKIES
1254 int want_cookie = 0;
1256 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1259 /* Never answer to SYNs send to broadcast or multicast */
1260 if (skb->rtable->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1263 /* TW buckets are converted to open requests without
1264 * limitations, they conserve resources and peer is
1265 * evidently real one.
1267 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1268 #ifdef CONFIG_SYN_COOKIES
1269 if (sysctl_tcp_syncookies) {
1276 /* Accept backlog is full. If we have already queued enough
1277 * of warm entries in syn queue, drop request. It is better than
1278 * clogging syn queue with openreqs with exponentially increasing
1281 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1284 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1288 #ifdef CONFIG_TCP_MD5SIG
1289 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1292 tcp_clear_options(&tmp_opt);
1293 tmp_opt.mss_clamp = 536;
1294 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
1296 tcp_parse_options(skb, &tmp_opt, 0);
1298 if (want_cookie && !tmp_opt.saw_tstamp)
1299 tcp_clear_options(&tmp_opt);
1301 if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) {
1302 /* Some OSes (unknown ones, but I see them on web server, which
1303 * contains information interesting only for windows'
1304 * users) do not send their stamp in SYN. It is easy case.
1305 * We simply do not advertise TS support.
1307 tmp_opt.saw_tstamp = 0;
1308 tmp_opt.tstamp_ok = 0;
1310 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1312 tcp_openreq_init(req, &tmp_opt, skb);
1314 if (security_inet_conn_request(sk, skb, req))
1317 ireq = inet_rsk(req);
1318 ireq->loc_addr = daddr;
1319 ireq->rmt_addr = saddr;
1320 ireq->opt = tcp_v4_save_options(sk, skb);
1322 TCP_ECN_create_request(req, tcp_hdr(skb));
1325 #ifdef CONFIG_SYN_COOKIES
1326 syn_flood_warning(skb);
1327 req->cookie_ts = tmp_opt.tstamp_ok;
1329 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1331 struct inet_peer *peer = NULL;
1333 /* VJ's idea. We save last timestamp seen
1334 * from the destination in peer table, when entering
1335 * state TIME-WAIT, and check against it before
1336 * accepting new connection request.
1338 * If "isn" is not zero, this request hit alive
1339 * timewait bucket, so that all the necessary checks
1340 * are made in the function processing timewait state.
1342 if (tmp_opt.saw_tstamp &&
1343 tcp_death_row.sysctl_tw_recycle &&
1344 (dst = inet_csk_route_req(sk, req)) != NULL &&
1345 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1346 peer->v4daddr == saddr) {
1347 if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1348 (s32)(peer->tcp_ts - req->ts_recent) >
1350 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
1351 goto drop_and_release;
1354 /* Kill the following clause, if you dislike this way. */
1355 else if (!sysctl_tcp_syncookies &&
1356 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1357 (sysctl_max_syn_backlog >> 2)) &&
1358 (!peer || !peer->tcp_ts_stamp) &&
1359 (!dst || !dst_metric(dst, RTAX_RTT))) {
1360 /* Without syncookies last quarter of
1361 * backlog is filled with destinations,
1362 * proven to be alive.
1363 * It means that we continue to communicate
1364 * to destinations, already remembered
1365 * to the moment of synflood.
1367 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
1368 "request from " NIPQUAD_FMT "/%u\n",
1370 ntohs(tcp_hdr(skb)->source));
1371 goto drop_and_release;
1374 isn = tcp_v4_init_sequence(skb);
1376 tcp_rsk(req)->snt_isn = isn;
1378 if (__tcp_v4_send_synack(sk, req, dst) || want_cookie)
1381 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1394 * The three way handshake has completed - we got a valid synack -
1395 * now create the new socket.
1397 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1398 struct request_sock *req,
1399 struct dst_entry *dst)
1401 struct inet_request_sock *ireq;
1402 struct inet_sock *newinet;
1403 struct tcp_sock *newtp;
1405 #ifdef CONFIG_TCP_MD5SIG
1406 struct tcp_md5sig_key *key;
1409 if (sk_acceptq_is_full(sk))
1412 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1415 newsk = tcp_create_openreq_child(sk, req, skb);
1419 newsk->sk_gso_type = SKB_GSO_TCPV4;
1420 sk_setup_caps(newsk, dst);
1422 newtp = tcp_sk(newsk);
1423 newinet = inet_sk(newsk);
1424 ireq = inet_rsk(req);
1425 newinet->daddr = ireq->rmt_addr;
1426 newinet->rcv_saddr = ireq->loc_addr;
1427 newinet->saddr = ireq->loc_addr;
1428 newinet->opt = ireq->opt;
1430 newinet->mc_index = inet_iif(skb);
1431 newinet->mc_ttl = ip_hdr(skb)->ttl;
1432 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1434 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
1435 newinet->id = newtp->write_seq ^ jiffies;
1437 tcp_mtup_init(newsk);
1438 tcp_sync_mss(newsk, dst_mtu(dst));
1439 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1440 tcp_initialize_rcv_mss(newsk);
1442 #ifdef CONFIG_TCP_MD5SIG
1443 /* Copy over the MD5 key from the original socket */
1444 if ((key = tcp_v4_md5_do_lookup(sk, newinet->daddr)) != NULL) {
1446 * We're using one, so create a matching key
1447 * on the newsk structure. If we fail to get
1448 * memory, then we end up not copying the key
1451 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1453 tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr,
1454 newkey, key->keylen);
1458 __inet_hash_nolisten(newsk);
1459 __inet_inherit_port(sk, newsk);
1464 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1466 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1471 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1473 struct tcphdr *th = tcp_hdr(skb);
1474 const struct iphdr *iph = ip_hdr(skb);
1476 struct request_sock **prev;
1477 /* Find possible connection requests. */
1478 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1479 iph->saddr, iph->daddr);
1481 return tcp_check_req(sk, skb, req, prev);
1483 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1484 th->source, iph->daddr, th->dest, inet_iif(skb));
1487 if (nsk->sk_state != TCP_TIME_WAIT) {
1491 inet_twsk_put(inet_twsk(nsk));
1495 #ifdef CONFIG_SYN_COOKIES
1496 if (!th->rst && !th->syn && th->ack)
1497 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1502 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1504 const struct iphdr *iph = ip_hdr(skb);
1506 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1507 if (!tcp_v4_check(skb->len, iph->saddr,
1508 iph->daddr, skb->csum)) {
1509 skb->ip_summed = CHECKSUM_UNNECESSARY;
1514 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1515 skb->len, IPPROTO_TCP, 0);
1517 if (skb->len <= 76) {
1518 return __skb_checksum_complete(skb);
1524 /* The socket must have it's spinlock held when we get
1527 * We have a potential double-lock case here, so even when
1528 * doing backlog processing we use the BH locking scheme.
1529 * This is because we cannot sleep with the original spinlock
1532 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1535 #ifdef CONFIG_TCP_MD5SIG
1537 * We really want to reject the packet as early as possible
1539 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1540 * o There is an MD5 option and we're not expecting one
1542 if (tcp_v4_inbound_md5_hash(sk, skb))
1546 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1547 TCP_CHECK_TIMER(sk);
1548 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1552 TCP_CHECK_TIMER(sk);
1556 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1559 if (sk->sk_state == TCP_LISTEN) {
1560 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1565 if (tcp_child_process(sk, nsk, skb)) {
1573 TCP_CHECK_TIMER(sk);
1574 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1578 TCP_CHECK_TIMER(sk);
1582 tcp_v4_send_reset(rsk, skb);
1585 /* Be careful here. If this function gets more complicated and
1586 * gcc suffers from register pressure on the x86, sk (in %ebx)
1587 * might be destroyed here. This current version compiles correctly,
1588 * but you have been warned.
1593 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1601 int tcp_v4_rcv(struct sk_buff *skb)
1603 const struct iphdr *iph;
1608 if (skb->pkt_type != PACKET_HOST)
1611 /* Count it even if it's bad */
1612 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1614 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1619 if (th->doff < sizeof(struct tcphdr) / 4)
1621 if (!pskb_may_pull(skb, th->doff * 4))
1624 /* An explanation is required here, I think.
1625 * Packet length and doff are validated by header prediction,
1626 * provided case of th->doff==0 is eliminated.
1627 * So, we defer the checks. */
1628 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1633 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1634 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1635 skb->len - th->doff * 4);
1636 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1637 TCP_SKB_CB(skb)->when = 0;
1638 TCP_SKB_CB(skb)->flags = iph->tos;
1639 TCP_SKB_CB(skb)->sacked = 0;
1641 sk = __inet_lookup(dev_net(skb->dev), &tcp_hashinfo, iph->saddr,
1642 th->source, iph->daddr, th->dest, inet_iif(skb));
1647 if (sk->sk_state == TCP_TIME_WAIT)
1650 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1651 goto discard_and_relse;
1654 if (sk_filter(sk, skb))
1655 goto discard_and_relse;
1659 bh_lock_sock_nested(sk);
1661 if (!sock_owned_by_user(sk)) {
1662 #ifdef CONFIG_NET_DMA
1663 struct tcp_sock *tp = tcp_sk(sk);
1664 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1665 tp->ucopy.dma_chan = get_softnet_dma();
1666 if (tp->ucopy.dma_chan)
1667 ret = tcp_v4_do_rcv(sk, skb);
1671 if (!tcp_prequeue(sk, skb))
1672 ret = tcp_v4_do_rcv(sk, skb);
1675 sk_add_backlog(sk, skb);
1683 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1686 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1688 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1690 tcp_v4_send_reset(NULL, skb);
1694 /* Discard frame. */
1703 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1704 inet_twsk_put(inet_twsk(sk));
1708 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1709 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1710 inet_twsk_put(inet_twsk(sk));
1713 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1715 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1717 iph->daddr, th->dest,
1720 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1721 inet_twsk_put(inet_twsk(sk));
1725 /* Fall through to ACK */
1728 tcp_v4_timewait_ack(sk, skb);
1732 case TCP_TW_SUCCESS:;
1737 /* VJ's idea. Save last timestamp seen from this destination
1738 * and hold it at least for normal timewait interval to use for duplicate
1739 * segment detection in subsequent connections, before they enter synchronized
1743 int tcp_v4_remember_stamp(struct sock *sk)
1745 struct inet_sock *inet = inet_sk(sk);
1746 struct tcp_sock *tp = tcp_sk(sk);
1747 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1748 struct inet_peer *peer = NULL;
1751 if (!rt || rt->rt_dst != inet->daddr) {
1752 peer = inet_getpeer(inet->daddr, 1);
1756 rt_bind_peer(rt, 1);
1761 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1762 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
1763 peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
1764 peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
1765 peer->tcp_ts = tp->rx_opt.ts_recent;
1775 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1777 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1780 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1782 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1783 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
1784 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
1785 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
1786 peer->tcp_ts = tcptw->tw_ts_recent;
1795 struct inet_connection_sock_af_ops ipv4_specific = {
1796 .queue_xmit = ip_queue_xmit,
1797 .send_check = tcp_v4_send_check,
1798 .rebuild_header = inet_sk_rebuild_header,
1799 .conn_request = tcp_v4_conn_request,
1800 .syn_recv_sock = tcp_v4_syn_recv_sock,
1801 .remember_stamp = tcp_v4_remember_stamp,
1802 .net_header_len = sizeof(struct iphdr),
1803 .setsockopt = ip_setsockopt,
1804 .getsockopt = ip_getsockopt,
1805 .addr2sockaddr = inet_csk_addr2sockaddr,
1806 .sockaddr_len = sizeof(struct sockaddr_in),
1807 .bind_conflict = inet_csk_bind_conflict,
1808 #ifdef CONFIG_COMPAT
1809 .compat_setsockopt = compat_ip_setsockopt,
1810 .compat_getsockopt = compat_ip_getsockopt,
1814 #ifdef CONFIG_TCP_MD5SIG
1815 static struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1816 .md5_lookup = tcp_v4_md5_lookup,
1817 .calc_md5_hash = tcp_v4_calc_md5_hash,
1818 .md5_add = tcp_v4_md5_add_func,
1819 .md5_parse = tcp_v4_parse_md5_keys,
1823 /* NOTE: A lot of things set to zero explicitly by call to
1824 * sk_alloc() so need not be done here.
1826 static int tcp_v4_init_sock(struct sock *sk)
1828 struct inet_connection_sock *icsk = inet_csk(sk);
1829 struct tcp_sock *tp = tcp_sk(sk);
1831 skb_queue_head_init(&tp->out_of_order_queue);
1832 tcp_init_xmit_timers(sk);
1833 tcp_prequeue_init(tp);
1835 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1836 tp->mdev = TCP_TIMEOUT_INIT;
1838 /* So many TCP implementations out there (incorrectly) count the
1839 * initial SYN frame in their delayed-ACK and congestion control
1840 * algorithms that we must have the following bandaid to talk
1841 * efficiently to them. -DaveM
1845 /* See draft-stevens-tcpca-spec-01 for discussion of the
1846 * initialization of these values.
1848 tp->snd_ssthresh = 0x7fffffff; /* Infinity */
1849 tp->snd_cwnd_clamp = ~0;
1850 tp->mss_cache = 536;
1852 tp->reordering = sysctl_tcp_reordering;
1853 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1855 sk->sk_state = TCP_CLOSE;
1857 sk->sk_write_space = sk_stream_write_space;
1858 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1860 icsk->icsk_af_ops = &ipv4_specific;
1861 icsk->icsk_sync_mss = tcp_sync_mss;
1862 #ifdef CONFIG_TCP_MD5SIG
1863 tp->af_specific = &tcp_sock_ipv4_specific;
1866 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1867 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1869 atomic_inc(&tcp_sockets_allocated);
1874 int tcp_v4_destroy_sock(struct sock *sk)
1876 struct tcp_sock *tp = tcp_sk(sk);
1878 tcp_clear_xmit_timers(sk);
1880 tcp_cleanup_congestion_control(sk);
1882 /* Cleanup up the write buffer. */
1883 tcp_write_queue_purge(sk);
1885 /* Cleans up our, hopefully empty, out_of_order_queue. */
1886 __skb_queue_purge(&tp->out_of_order_queue);
1888 #ifdef CONFIG_TCP_MD5SIG
1889 /* Clean up the MD5 key list, if any */
1890 if (tp->md5sig_info) {
1891 tcp_v4_clear_md5_list(sk);
1892 kfree(tp->md5sig_info);
1893 tp->md5sig_info = NULL;
1897 #ifdef CONFIG_NET_DMA
1898 /* Cleans up our sk_async_wait_queue */
1899 __skb_queue_purge(&sk->sk_async_wait_queue);
1902 /* Clean prequeue, it must be empty really */
1903 __skb_queue_purge(&tp->ucopy.prequeue);
1905 /* Clean up a referenced TCP bind bucket. */
1906 if (inet_csk(sk)->icsk_bind_hash)
1910 * If sendmsg cached page exists, toss it.
1912 if (sk->sk_sndmsg_page) {
1913 __free_page(sk->sk_sndmsg_page);
1914 sk->sk_sndmsg_page = NULL;
1917 atomic_dec(&tcp_sockets_allocated);
1922 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1924 #ifdef CONFIG_PROC_FS
1925 /* Proc filesystem TCP sock list dumping. */
1927 static inline struct inet_timewait_sock *tw_head(struct hlist_head *head)
1929 return hlist_empty(head) ? NULL :
1930 list_entry(head->first, struct inet_timewait_sock, tw_node);
1933 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1935 return tw->tw_node.next ?
1936 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1939 static void *listening_get_next(struct seq_file *seq, void *cur)
1941 struct inet_connection_sock *icsk;
1942 struct hlist_node *node;
1943 struct sock *sk = cur;
1944 struct tcp_iter_state* st = seq->private;
1945 struct net *net = seq_file_net(seq);
1949 sk = sk_head(&tcp_hashinfo.listening_hash[0]);
1955 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1956 struct request_sock *req = cur;
1958 icsk = inet_csk(st->syn_wait_sk);
1962 if (req->rsk_ops->family == st->family &&
1963 net_eq(sock_net(req->sk), net)) {
1969 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1972 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1974 sk = sk_next(st->syn_wait_sk);
1975 st->state = TCP_SEQ_STATE_LISTENING;
1976 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1978 icsk = inet_csk(sk);
1979 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1980 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1982 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1986 sk_for_each_from(sk, node) {
1987 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
1991 icsk = inet_csk(sk);
1992 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1993 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1995 st->uid = sock_i_uid(sk);
1996 st->syn_wait_sk = sk;
1997 st->state = TCP_SEQ_STATE_OPENREQ;
2001 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2003 if (++st->bucket < INET_LHTABLE_SIZE) {
2004 sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
2012 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2014 void *rc = listening_get_next(seq, NULL);
2016 while (rc && *pos) {
2017 rc = listening_get_next(seq, rc);
2023 static void *established_get_first(struct seq_file *seq)
2025 struct tcp_iter_state* st = seq->private;
2026 struct net *net = seq_file_net(seq);
2029 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
2031 struct hlist_node *node;
2032 struct inet_timewait_sock *tw;
2033 rwlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2036 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2037 if (sk->sk_family != st->family ||
2038 !net_eq(sock_net(sk), net)) {
2044 st->state = TCP_SEQ_STATE_TIME_WAIT;
2045 inet_twsk_for_each(tw, node,
2046 &tcp_hashinfo.ehash[st->bucket].twchain) {
2047 if (tw->tw_family != st->family ||
2048 !net_eq(twsk_net(tw), net)) {
2054 read_unlock_bh(lock);
2055 st->state = TCP_SEQ_STATE_ESTABLISHED;
2061 static void *established_get_next(struct seq_file *seq, void *cur)
2063 struct sock *sk = cur;
2064 struct inet_timewait_sock *tw;
2065 struct hlist_node *node;
2066 struct tcp_iter_state* st = seq->private;
2067 struct net *net = seq_file_net(seq);
2071 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2075 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2082 read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2083 st->state = TCP_SEQ_STATE_ESTABLISHED;
2085 if (++st->bucket < tcp_hashinfo.ehash_size) {
2086 read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2087 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
2095 sk_for_each_from(sk, node) {
2096 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2100 st->state = TCP_SEQ_STATE_TIME_WAIT;
2101 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2109 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2111 void *rc = established_get_first(seq);
2114 rc = established_get_next(seq, rc);
2120 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2123 struct tcp_iter_state* st = seq->private;
2125 inet_listen_lock(&tcp_hashinfo);
2126 st->state = TCP_SEQ_STATE_LISTENING;
2127 rc = listening_get_idx(seq, &pos);
2130 inet_listen_unlock(&tcp_hashinfo);
2131 st->state = TCP_SEQ_STATE_ESTABLISHED;
2132 rc = established_get_idx(seq, pos);
2138 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2140 struct tcp_iter_state* st = seq->private;
2141 st->state = TCP_SEQ_STATE_LISTENING;
2143 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2146 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2149 struct tcp_iter_state* st;
2151 if (v == SEQ_START_TOKEN) {
2152 rc = tcp_get_idx(seq, 0);
2157 switch (st->state) {
2158 case TCP_SEQ_STATE_OPENREQ:
2159 case TCP_SEQ_STATE_LISTENING:
2160 rc = listening_get_next(seq, v);
2162 inet_listen_unlock(&tcp_hashinfo);
2163 st->state = TCP_SEQ_STATE_ESTABLISHED;
2164 rc = established_get_first(seq);
2167 case TCP_SEQ_STATE_ESTABLISHED:
2168 case TCP_SEQ_STATE_TIME_WAIT:
2169 rc = established_get_next(seq, v);
2177 static void tcp_seq_stop(struct seq_file *seq, void *v)
2179 struct tcp_iter_state* st = seq->private;
2181 switch (st->state) {
2182 case TCP_SEQ_STATE_OPENREQ:
2184 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2185 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2187 case TCP_SEQ_STATE_LISTENING:
2188 if (v != SEQ_START_TOKEN)
2189 inet_listen_unlock(&tcp_hashinfo);
2191 case TCP_SEQ_STATE_TIME_WAIT:
2192 case TCP_SEQ_STATE_ESTABLISHED:
2194 read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2199 static int tcp_seq_open(struct inode *inode, struct file *file)
2201 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2202 struct tcp_iter_state *s;
2205 err = seq_open_net(inode, file, &afinfo->seq_ops,
2206 sizeof(struct tcp_iter_state));
2210 s = ((struct seq_file *)file->private_data)->private;
2211 s->family = afinfo->family;
2215 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2218 struct proc_dir_entry *p;
2220 afinfo->seq_fops.open = tcp_seq_open;
2221 afinfo->seq_fops.read = seq_read;
2222 afinfo->seq_fops.llseek = seq_lseek;
2223 afinfo->seq_fops.release = seq_release_net;
2225 afinfo->seq_ops.start = tcp_seq_start;
2226 afinfo->seq_ops.next = tcp_seq_next;
2227 afinfo->seq_ops.stop = tcp_seq_stop;
2229 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2230 &afinfo->seq_fops, afinfo);
2236 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2238 proc_net_remove(net, afinfo->name);
2241 static void get_openreq4(struct sock *sk, struct request_sock *req,
2242 struct seq_file *f, int i, int uid, int *len)
2244 const struct inet_request_sock *ireq = inet_rsk(req);
2245 int ttd = req->expires - jiffies;
2247 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2248 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
2251 ntohs(inet_sk(sk)->sport),
2253 ntohs(ireq->rmt_port),
2255 0, 0, /* could print option size, but that is af dependent. */
2256 1, /* timers active (only the expire timer) */
2257 jiffies_to_clock_t(ttd),
2260 0, /* non standard timer */
2261 0, /* open_requests have no inode */
2262 atomic_read(&sk->sk_refcnt),
2267 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2270 unsigned long timer_expires;
2271 struct tcp_sock *tp = tcp_sk(sk);
2272 const struct inet_connection_sock *icsk = inet_csk(sk);
2273 struct inet_sock *inet = inet_sk(sk);
2274 __be32 dest = inet->daddr;
2275 __be32 src = inet->rcv_saddr;
2276 __u16 destp = ntohs(inet->dport);
2277 __u16 srcp = ntohs(inet->sport);
2279 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2281 timer_expires = icsk->icsk_timeout;
2282 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2284 timer_expires = icsk->icsk_timeout;
2285 } else if (timer_pending(&sk->sk_timer)) {
2287 timer_expires = sk->sk_timer.expires;
2290 timer_expires = jiffies;
2293 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2294 "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
2295 i, src, srcp, dest, destp, sk->sk_state,
2296 tp->write_seq - tp->snd_una,
2297 sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog :
2298 (tp->rcv_nxt - tp->copied_seq),
2300 jiffies_to_clock_t(timer_expires - jiffies),
2301 icsk->icsk_retransmits,
2303 icsk->icsk_probes_out,
2305 atomic_read(&sk->sk_refcnt), sk,
2306 jiffies_to_clock_t(icsk->icsk_rto),
2307 jiffies_to_clock_t(icsk->icsk_ack.ato),
2308 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2310 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh,
2314 static void get_timewait4_sock(struct inet_timewait_sock *tw,
2315 struct seq_file *f, int i, int *len)
2319 int ttd = tw->tw_ttd - jiffies;
2324 dest = tw->tw_daddr;
2325 src = tw->tw_rcv_saddr;
2326 destp = ntohs(tw->tw_dport);
2327 srcp = ntohs(tw->tw_sport);
2329 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2330 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
2331 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2332 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2333 atomic_read(&tw->tw_refcnt), tw, len);
2338 static int tcp4_seq_show(struct seq_file *seq, void *v)
2340 struct tcp_iter_state* st;
2343 if (v == SEQ_START_TOKEN) {
2344 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2345 " sl local_address rem_address st tx_queue "
2346 "rx_queue tr tm->when retrnsmt uid timeout "
2352 switch (st->state) {
2353 case TCP_SEQ_STATE_LISTENING:
2354 case TCP_SEQ_STATE_ESTABLISHED:
2355 get_tcp4_sock(v, seq, st->num, &len);
2357 case TCP_SEQ_STATE_OPENREQ:
2358 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2360 case TCP_SEQ_STATE_TIME_WAIT:
2361 get_timewait4_sock(v, seq, st->num, &len);
2364 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2369 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2373 .owner = THIS_MODULE,
2376 .show = tcp4_seq_show,
2380 static int tcp4_proc_init_net(struct net *net)
2382 return tcp_proc_register(net, &tcp4_seq_afinfo);
2385 static void tcp4_proc_exit_net(struct net *net)
2387 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2390 static struct pernet_operations tcp4_net_ops = {
2391 .init = tcp4_proc_init_net,
2392 .exit = tcp4_proc_exit_net,
2395 int __init tcp4_proc_init(void)
2397 return register_pernet_subsys(&tcp4_net_ops);
2400 void tcp4_proc_exit(void)
2402 unregister_pernet_subsys(&tcp4_net_ops);
2404 #endif /* CONFIG_PROC_FS */
2406 struct proto tcp_prot = {
2408 .owner = THIS_MODULE,
2410 .connect = tcp_v4_connect,
2411 .disconnect = tcp_disconnect,
2412 .accept = inet_csk_accept,
2414 .init = tcp_v4_init_sock,
2415 .destroy = tcp_v4_destroy_sock,
2416 .shutdown = tcp_shutdown,
2417 .setsockopt = tcp_setsockopt,
2418 .getsockopt = tcp_getsockopt,
2419 .recvmsg = tcp_recvmsg,
2420 .backlog_rcv = tcp_v4_do_rcv,
2422 .unhash = inet_unhash,
2423 .get_port = inet_csk_get_port,
2424 .enter_memory_pressure = tcp_enter_memory_pressure,
2425 .sockets_allocated = &tcp_sockets_allocated,
2426 .orphan_count = &tcp_orphan_count,
2427 .memory_allocated = &tcp_memory_allocated,
2428 .memory_pressure = &tcp_memory_pressure,
2429 .sysctl_mem = sysctl_tcp_mem,
2430 .sysctl_wmem = sysctl_tcp_wmem,
2431 .sysctl_rmem = sysctl_tcp_rmem,
2432 .max_header = MAX_TCP_HEADER,
2433 .obj_size = sizeof(struct tcp_sock),
2434 .twsk_prot = &tcp_timewait_sock_ops,
2435 .rsk_prot = &tcp_request_sock_ops,
2436 .h.hashinfo = &tcp_hashinfo,
2437 #ifdef CONFIG_COMPAT
2438 .compat_setsockopt = compat_tcp_setsockopt,
2439 .compat_getsockopt = compat_tcp_getsockopt,
2444 static int __net_init tcp_sk_init(struct net *net)
2446 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2447 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2450 static void __net_exit tcp_sk_exit(struct net *net)
2452 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2455 static struct pernet_operations __net_initdata tcp_sk_ops = {
2456 .init = tcp_sk_init,
2457 .exit = tcp_sk_exit,
2460 void __init tcp_v4_init(void)
2462 if (register_pernet_device(&tcp_sk_ops))
2463 panic("Failed to create the TCP control socket.\n");
2466 EXPORT_SYMBOL(ipv4_specific);
2467 EXPORT_SYMBOL(tcp_hashinfo);
2468 EXPORT_SYMBOL(tcp_prot);
2469 EXPORT_SYMBOL(tcp_v4_conn_request);
2470 EXPORT_SYMBOL(tcp_v4_connect);
2471 EXPORT_SYMBOL(tcp_v4_do_rcv);
2472 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2473 EXPORT_SYMBOL(tcp_v4_send_check);
2474 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2476 #ifdef CONFIG_PROC_FS
2477 EXPORT_SYMBOL(tcp_proc_register);
2478 EXPORT_SYMBOL(tcp_proc_unregister);
2480 EXPORT_SYMBOL(sysctl_tcp_low_latency);