3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
28 #include <linux/module.h>
29 #include <linux/config.h>
30 #include <linux/errno.h>
31 #include <linux/types.h>
32 #include <linux/socket.h>
33 #include <linux/sockios.h>
34 #include <linux/net.h>
35 #include <linux/jiffies.h>
37 #include <linux/in6.h>
38 #include <linux/netdevice.h>
39 #include <linux/init.h>
40 #include <linux/jhash.h>
41 #include <linux/ipsec.h>
42 #include <linux/times.h>
44 #include <linux/ipv6.h>
45 #include <linux/icmpv6.h>
46 #include <linux/random.h>
49 #include <net/ndisc.h>
50 #include <net/inet6_hashtables.h>
51 #include <net/inet6_connection_sock.h>
53 #include <net/transp_v6.h>
54 #include <net/addrconf.h>
55 #include <net/ip6_route.h>
56 #include <net/ip6_checksum.h>
57 #include <net/inet_ecn.h>
58 #include <net/protocol.h>
60 #include <net/addrconf.h>
62 #include <net/dsfield.h>
63 #include <net/timewait_sock.h>
65 #include <asm/uaccess.h>
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
70 static void tcp_v6_send_reset(struct sk_buff *skb);
71 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
72 static void tcp_v6_send_check(struct sock *sk, int len,
75 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
77 static struct inet_connection_sock_af_ops ipv6_mapped;
78 static struct inet_connection_sock_af_ops ipv6_specific;
80 static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
82 return inet_csk_get_port(&tcp_hashinfo, sk, snum,
83 inet6_csk_bind_conflict);
86 static void tcp_v6_hash(struct sock *sk)
88 if (sk->sk_state != TCP_CLOSE) {
89 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
94 __inet6_hash(&tcp_hashinfo, sk);
99 static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len,
100 struct in6_addr *saddr,
101 struct in6_addr *daddr,
104 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
107 static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
109 if (skb->protocol == htons(ETH_P_IPV6)) {
110 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
111 skb->nh.ipv6h->saddr.s6_addr32,
115 return secure_tcp_sequence_number(skb->nh.iph->daddr,
122 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
125 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
126 struct inet_sock *inet = inet_sk(sk);
127 struct inet_connection_sock *icsk = inet_csk(sk);
128 struct ipv6_pinfo *np = inet6_sk(sk);
129 struct tcp_sock *tp = tcp_sk(sk);
130 struct in6_addr *saddr = NULL, *final_p = NULL, final;
132 struct dst_entry *dst;
136 if (addr_len < SIN6_LEN_RFC2133)
139 if (usin->sin6_family != AF_INET6)
140 return(-EAFNOSUPPORT);
142 memset(&fl, 0, sizeof(fl));
145 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
146 IP6_ECN_flow_init(fl.fl6_flowlabel);
147 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
148 struct ip6_flowlabel *flowlabel;
149 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
150 if (flowlabel == NULL)
152 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
153 fl6_sock_release(flowlabel);
158 * connect() to INADDR_ANY means loopback (BSD'ism).
161 if(ipv6_addr_any(&usin->sin6_addr))
162 usin->sin6_addr.s6_addr[15] = 0x1;
164 addr_type = ipv6_addr_type(&usin->sin6_addr);
166 if(addr_type & IPV6_ADDR_MULTICAST)
169 if (addr_type&IPV6_ADDR_LINKLOCAL) {
170 if (addr_len >= sizeof(struct sockaddr_in6) &&
171 usin->sin6_scope_id) {
172 /* If interface is set while binding, indices
175 if (sk->sk_bound_dev_if &&
176 sk->sk_bound_dev_if != usin->sin6_scope_id)
179 sk->sk_bound_dev_if = usin->sin6_scope_id;
182 /* Connect to link-local address requires an interface */
183 if (!sk->sk_bound_dev_if)
187 if (tp->rx_opt.ts_recent_stamp &&
188 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
189 tp->rx_opt.ts_recent = 0;
190 tp->rx_opt.ts_recent_stamp = 0;
194 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
195 np->flow_label = fl.fl6_flowlabel;
201 if (addr_type == IPV6_ADDR_MAPPED) {
202 u32 exthdrlen = icsk->icsk_ext_hdr_len;
203 struct sockaddr_in sin;
205 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
207 if (__ipv6_only_sock(sk))
210 sin.sin_family = AF_INET;
211 sin.sin_port = usin->sin6_port;
212 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
214 icsk->icsk_af_ops = &ipv6_mapped;
215 sk->sk_backlog_rcv = tcp_v4_do_rcv;
217 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
220 icsk->icsk_ext_hdr_len = exthdrlen;
221 icsk->icsk_af_ops = &ipv6_specific;
222 sk->sk_backlog_rcv = tcp_v6_do_rcv;
225 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
227 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
234 if (!ipv6_addr_any(&np->rcv_saddr))
235 saddr = &np->rcv_saddr;
237 fl.proto = IPPROTO_TCP;
238 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
239 ipv6_addr_copy(&fl.fl6_src,
240 (saddr ? saddr : &np->saddr));
241 fl.oif = sk->sk_bound_dev_if;
242 fl.fl_ip_dport = usin->sin6_port;
243 fl.fl_ip_sport = inet->sport;
245 if (np->opt && np->opt->srcrt) {
246 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
247 ipv6_addr_copy(&final, &fl.fl6_dst);
248 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
252 err = ip6_dst_lookup(sk, &dst, &fl);
256 ipv6_addr_copy(&fl.fl6_dst, final_p);
258 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
263 ipv6_addr_copy(&np->rcv_saddr, saddr);
266 /* set the source address */
267 ipv6_addr_copy(&np->saddr, saddr);
268 inet->rcv_saddr = LOOPBACK4_IPV6;
270 ip6_dst_store(sk, dst, NULL);
271 sk->sk_route_caps = dst->dev->features &
272 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
274 icsk->icsk_ext_hdr_len = 0;
276 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
279 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
281 inet->dport = usin->sin6_port;
283 tcp_set_state(sk, TCP_SYN_SENT);
284 err = inet6_hash_connect(&tcp_death_row, sk);
289 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
294 err = tcp_connect(sk);
301 tcp_set_state(sk, TCP_CLOSE);
305 sk->sk_route_caps = 0;
309 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
310 int type, int code, int offset, __u32 info)
312 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
313 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
314 struct ipv6_pinfo *np;
320 sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr,
321 th->source, skb->dev->ifindex);
324 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
328 if (sk->sk_state == TCP_TIME_WAIT) {
329 inet_twsk_put((struct inet_timewait_sock *)sk);
334 if (sock_owned_by_user(sk))
335 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
337 if (sk->sk_state == TCP_CLOSE)
341 seq = ntohl(th->seq);
342 if (sk->sk_state != TCP_LISTEN &&
343 !between(seq, tp->snd_una, tp->snd_nxt)) {
344 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
350 if (type == ICMPV6_PKT_TOOBIG) {
351 struct dst_entry *dst = NULL;
353 if (sock_owned_by_user(sk))
355 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
358 /* icmp should have updated the destination cache entry */
359 dst = __sk_dst_check(sk, np->dst_cookie);
362 struct inet_sock *inet = inet_sk(sk);
365 /* BUGGG_FUTURE: Again, it is not clear how
366 to handle rthdr case. Ignore this complexity
369 memset(&fl, 0, sizeof(fl));
370 fl.proto = IPPROTO_TCP;
371 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
372 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
373 fl.oif = sk->sk_bound_dev_if;
374 fl.fl_ip_dport = inet->dport;
375 fl.fl_ip_sport = inet->sport;
377 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
378 sk->sk_err_soft = -err;
382 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
383 sk->sk_err_soft = -err;
390 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
391 tcp_sync_mss(sk, dst_mtu(dst));
392 tcp_simple_retransmit(sk);
393 } /* else let the usual retransmit timer handle it */
398 icmpv6_err_convert(type, code, &err);
400 /* Might be for an request_sock */
401 switch (sk->sk_state) {
402 struct request_sock *req, **prev;
404 if (sock_owned_by_user(sk))
407 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
408 &hdr->saddr, inet6_iif(skb));
412 /* ICMPs are not backlogged, hence we cannot get
413 * an established socket here.
415 BUG_TRAP(req->sk == NULL);
417 if (seq != tcp_rsk(req)->snt_isn) {
418 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
422 inet_csk_reqsk_queue_drop(sk, req, prev);
426 case TCP_SYN_RECV: /* Cannot happen.
427 It can, it SYNs are crossed. --ANK */
428 if (!sock_owned_by_user(sk)) {
429 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
431 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
435 sk->sk_err_soft = err;
439 if (!sock_owned_by_user(sk) && np->recverr) {
441 sk->sk_error_report(sk);
443 sk->sk_err_soft = err;
451 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
452 struct dst_entry *dst)
454 struct inet6_request_sock *treq = inet6_rsk(req);
455 struct ipv6_pinfo *np = inet6_sk(sk);
456 struct sk_buff * skb;
457 struct ipv6_txoptions *opt = NULL;
458 struct in6_addr * final_p = NULL, final;
462 memset(&fl, 0, sizeof(fl));
463 fl.proto = IPPROTO_TCP;
464 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
465 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
466 fl.fl6_flowlabel = 0;
468 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
469 fl.fl_ip_sport = inet_sk(sk)->sport;
474 np->rxopt.bits.osrcrt == 2 &&
476 struct sk_buff *pktopts = treq->pktopts;
477 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
479 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt));
482 if (opt && opt->srcrt) {
483 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
484 ipv6_addr_copy(&final, &fl.fl6_dst);
485 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
489 err = ip6_dst_lookup(sk, &dst, &fl);
493 ipv6_addr_copy(&fl.fl6_dst, final_p);
494 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
498 skb = tcp_make_synack(sk, dst, req);
500 struct tcphdr *th = skb->h.th;
502 th->check = tcp_v6_check(th, skb->len,
503 &treq->loc_addr, &treq->rmt_addr,
504 csum_partial((char *)th, skb->len, skb->csum));
506 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
507 err = ip6_xmit(sk, skb, &fl, opt, 0);
508 if (err == NET_XMIT_CN)
513 if (opt && opt != np->opt)
514 sock_kfree_s(sk, opt, opt->tot_len);
518 static void tcp_v6_reqsk_destructor(struct request_sock *req)
520 if (inet6_rsk(req)->pktopts)
521 kfree_skb(inet6_rsk(req)->pktopts);
524 static struct request_sock_ops tcp6_request_sock_ops = {
526 .obj_size = sizeof(struct tcp6_request_sock),
527 .rtx_syn_ack = tcp_v6_send_synack,
528 .send_ack = tcp_v6_reqsk_send_ack,
529 .destructor = tcp_v6_reqsk_destructor,
530 .send_reset = tcp_v6_send_reset
533 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
534 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
535 .twsk_unique = tcp_twsk_unique,
538 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
540 struct ipv6_pinfo *np = inet6_sk(sk);
541 struct tcphdr *th = skb->h.th;
543 if (skb->ip_summed == CHECKSUM_HW) {
544 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
545 skb->csum = offsetof(struct tcphdr, check);
547 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
548 csum_partial((char *)th, th->doff<<2,
554 static void tcp_v6_send_reset(struct sk_buff *skb)
556 struct tcphdr *th = skb->h.th, *t1;
557 struct sk_buff *buff;
563 if (!ipv6_unicast_destination(skb))
567 * We need to grab some memory, and put together an RST,
568 * and then put it into the queue to be sent.
571 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr),
576 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr));
578 t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr));
580 /* Swap the send and the receive. */
581 memset(t1, 0, sizeof(*t1));
582 t1->dest = th->source;
583 t1->source = th->dest;
584 t1->doff = sizeof(*t1)/4;
588 t1->seq = th->ack_seq;
591 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
592 + skb->len - (th->doff<<2));
595 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
597 memset(&fl, 0, sizeof(fl));
598 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
599 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
601 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
602 sizeof(*t1), IPPROTO_TCP,
605 fl.proto = IPPROTO_TCP;
606 fl.oif = inet6_iif(skb);
607 fl.fl_ip_dport = t1->dest;
608 fl.fl_ip_sport = t1->source;
610 /* sk = NULL, but it is safe for now. RST socket required. */
611 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
613 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
614 ip6_xmit(NULL, buff, &fl, NULL, 0);
615 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
616 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
624 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
626 struct tcphdr *th = skb->h.th, *t1;
627 struct sk_buff *buff;
629 int tot_len = sizeof(struct tcphdr);
634 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
639 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
641 t1 = (struct tcphdr *) skb_push(buff,tot_len);
643 /* Swap the send and the receive. */
644 memset(t1, 0, sizeof(*t1));
645 t1->dest = th->source;
646 t1->source = th->dest;
647 t1->doff = tot_len/4;
648 t1->seq = htonl(seq);
649 t1->ack_seq = htonl(ack);
651 t1->window = htons(win);
654 u32 *ptr = (u32*)(t1 + 1);
655 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
656 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
657 *ptr++ = htonl(tcp_time_stamp);
661 buff->csum = csum_partial((char *)t1, tot_len, 0);
663 memset(&fl, 0, sizeof(fl));
664 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
665 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
667 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
668 tot_len, IPPROTO_TCP,
671 fl.proto = IPPROTO_TCP;
672 fl.oif = inet6_iif(skb);
673 fl.fl_ip_dport = t1->dest;
674 fl.fl_ip_sport = t1->source;
676 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
677 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
678 ip6_xmit(NULL, buff, &fl, NULL, 0);
679 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
687 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
689 struct inet_timewait_sock *tw = inet_twsk(sk);
690 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
692 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
693 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
694 tcptw->tw_ts_recent);
699 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
701 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
705 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
707 struct request_sock *req, **prev;
708 const struct tcphdr *th = skb->h.th;
711 /* Find possible connection requests. */
712 req = inet6_csk_search_req(sk, &prev, th->source,
713 &skb->nh.ipv6h->saddr,
714 &skb->nh.ipv6h->daddr, inet6_iif(skb));
716 return tcp_check_req(sk, skb, req, prev);
718 nsk = __inet6_lookup_established(&tcp_hashinfo, &skb->nh.ipv6h->saddr,
719 th->source, &skb->nh.ipv6h->daddr,
720 ntohs(th->dest), inet6_iif(skb));
723 if (nsk->sk_state != TCP_TIME_WAIT) {
727 inet_twsk_put((struct inet_timewait_sock *)nsk);
731 #if 0 /*def CONFIG_SYN_COOKIES*/
732 if (!th->rst && !th->syn && th->ack)
733 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt));
738 /* FIXME: this is substantially similar to the ipv4 code.
739 * Can some kind of merge be done? -- erics
741 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
743 struct inet6_request_sock *treq;
744 struct ipv6_pinfo *np = inet6_sk(sk);
745 struct tcp_options_received tmp_opt;
746 struct tcp_sock *tp = tcp_sk(sk);
747 struct request_sock *req = NULL;
748 __u32 isn = TCP_SKB_CB(skb)->when;
750 if (skb->protocol == htons(ETH_P_IP))
751 return tcp_v4_conn_request(sk, skb);
753 if (!ipv6_unicast_destination(skb))
757 * There are no SYN attacks on IPv6, yet...
759 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
761 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
765 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
768 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
772 tcp_clear_options(&tmp_opt);
773 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
774 tmp_opt.user_mss = tp->rx_opt.user_mss;
776 tcp_parse_options(skb, &tmp_opt, 0);
778 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
779 tcp_openreq_init(req, &tmp_opt, skb);
781 treq = inet6_rsk(req);
782 ipv6_addr_copy(&treq->rmt_addr, &skb->nh.ipv6h->saddr);
783 ipv6_addr_copy(&treq->loc_addr, &skb->nh.ipv6h->daddr);
784 TCP_ECN_create_request(req, skb->h.th);
785 treq->pktopts = NULL;
786 if (ipv6_opt_accepted(sk, skb) ||
787 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
788 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
789 atomic_inc(&skb->users);
792 treq->iif = sk->sk_bound_dev_if;
794 /* So that link locals have meaning */
795 if (!sk->sk_bound_dev_if &&
796 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
797 treq->iif = inet6_iif(skb);
800 isn = tcp_v6_init_sequence(sk,skb);
802 tcp_rsk(req)->snt_isn = isn;
804 if (tcp_v6_send_synack(sk, req, NULL))
807 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
814 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
815 return 0; /* don't send reset */
818 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
819 struct request_sock *req,
820 struct dst_entry *dst)
822 struct inet6_request_sock *treq = inet6_rsk(req);
823 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
824 struct tcp6_sock *newtcp6sk;
825 struct inet_sock *newinet;
826 struct tcp_sock *newtp;
828 struct ipv6_txoptions *opt;
830 if (skb->protocol == htons(ETH_P_IP)) {
835 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
840 newtcp6sk = (struct tcp6_sock *)newsk;
841 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
843 newinet = inet_sk(newsk);
844 newnp = inet6_sk(newsk);
845 newtp = tcp_sk(newsk);
847 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
849 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
852 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
855 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
857 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
858 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
859 newnp->pktoptions = NULL;
861 newnp->mcast_oif = inet6_iif(skb);
862 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
865 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
866 * here, tcp_create_openreq_child now does this for us, see the comment in
867 * that function for the gory details. -acme
870 /* It is tricky place. Until this moment IPv4 tcp
871 worked with IPv6 icsk.icsk_af_ops.
874 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
881 if (sk_acceptq_is_full(sk))
884 if (np->rxopt.bits.osrcrt == 2 &&
885 opt == NULL && treq->pktopts) {
886 struct inet6_skb_parm *rxopt = IP6CB(treq->pktopts);
888 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr *)(treq->pktopts->nh.raw + rxopt->srcrt));
892 struct in6_addr *final_p = NULL, final;
895 memset(&fl, 0, sizeof(fl));
896 fl.proto = IPPROTO_TCP;
897 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
898 if (opt && opt->srcrt) {
899 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
900 ipv6_addr_copy(&final, &fl.fl6_dst);
901 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
904 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
905 fl.oif = sk->sk_bound_dev_if;
906 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
907 fl.fl_ip_sport = inet_sk(sk)->sport;
909 if (ip6_dst_lookup(sk, &dst, &fl))
913 ipv6_addr_copy(&fl.fl6_dst, final_p);
915 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
919 newsk = tcp_create_openreq_child(sk, req, skb);
924 * No need to charge this sock to the relevant IPv6 refcnt debug socks
925 * count here, tcp_create_openreq_child now does this for us, see the
926 * comment in that function for the gory details. -acme
929 ip6_dst_store(newsk, dst, NULL);
930 newsk->sk_route_caps = dst->dev->features &
931 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
933 newtcp6sk = (struct tcp6_sock *)newsk;
934 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
936 newtp = tcp_sk(newsk);
937 newinet = inet_sk(newsk);
938 newnp = inet6_sk(newsk);
940 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
942 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
943 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
944 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
945 newsk->sk_bound_dev_if = treq->iif;
947 /* Now IPv6 options...
949 First: no IPv4 options.
954 newnp->rxopt.all = np->rxopt.all;
956 /* Clone pktoptions received with SYN */
957 newnp->pktoptions = NULL;
958 if (treq->pktopts != NULL) {
959 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
960 kfree_skb(treq->pktopts);
961 treq->pktopts = NULL;
962 if (newnp->pktoptions)
963 skb_set_owner_r(newnp->pktoptions, newsk);
966 newnp->mcast_oif = inet6_iif(skb);
967 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
969 /* Clone native IPv6 options from listening socket (if any)
971 Yes, keeping reference count would be much more clever,
972 but we make one more one thing there: reattach optmem
976 newnp->opt = ipv6_dup_options(newsk, opt);
978 sock_kfree_s(sk, opt, opt->tot_len);
981 inet_csk(newsk)->icsk_ext_hdr_len = 0;
983 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
984 newnp->opt->opt_flen);
986 tcp_sync_mss(newsk, dst_mtu(dst));
987 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
988 tcp_initialize_rcv_mss(newsk);
990 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
992 __inet6_hash(&tcp_hashinfo, newsk);
993 inet_inherit_port(&tcp_hashinfo, sk, newsk);
998 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1000 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1001 if (opt && opt != np->opt)
1002 sock_kfree_s(sk, opt, opt->tot_len);
1007 static int tcp_v6_checksum_init(struct sk_buff *skb)
1009 if (skb->ip_summed == CHECKSUM_HW) {
1010 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1011 &skb->nh.ipv6h->daddr,skb->csum)) {
1012 skb->ip_summed = CHECKSUM_UNNECESSARY;
1017 skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1018 &skb->nh.ipv6h->daddr, 0);
1020 if (skb->len <= 76) {
1021 return __skb_checksum_complete(skb);
1026 /* The socket must have it's spinlock held when we get
1029 * We have a potential double-lock case here, so even when
1030 * doing backlog processing we use the BH locking scheme.
1031 * This is because we cannot sleep with the original spinlock
1034 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1036 struct ipv6_pinfo *np = inet6_sk(sk);
1037 struct tcp_sock *tp;
1038 struct sk_buff *opt_skb = NULL;
1040 /* Imagine: socket is IPv6. IPv4 packet arrives,
1041 goes to IPv4 receive handler and backlogged.
1042 From backlog it always goes here. Kerboom...
1043 Fortunately, tcp_rcv_established and rcv_established
1044 handle them correctly, but it is not case with
1045 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1048 if (skb->protocol == htons(ETH_P_IP))
1049 return tcp_v4_do_rcv(sk, skb);
1051 if (sk_filter(sk, skb, 0))
1055 * socket locking is here for SMP purposes as backlog rcv
1056 * is currently called with bh processing disabled.
1059 /* Do Stevens' IPV6_PKTOPTIONS.
1061 Yes, guys, it is the only place in our code, where we
1062 may make it not affecting IPv4.
1063 The rest of code is protocol independent,
1064 and I do not like idea to uglify IPv4.
1066 Actually, all the idea behind IPV6_PKTOPTIONS
1067 looks not very well thought. For now we latch
1068 options, received in the last packet, enqueued
1069 by tcp. Feel free to propose better solution.
1073 opt_skb = skb_clone(skb, GFP_ATOMIC);
1075 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1076 TCP_CHECK_TIMER(sk);
1077 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1079 TCP_CHECK_TIMER(sk);
1081 goto ipv6_pktoptions;
1085 if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
1088 if (sk->sk_state == TCP_LISTEN) {
1089 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1094 * Queue it on the new socket if the new socket is active,
1095 * otherwise we just shortcircuit this and continue with
1099 if (tcp_child_process(sk, nsk, skb))
1102 __kfree_skb(opt_skb);
1107 TCP_CHECK_TIMER(sk);
1108 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1110 TCP_CHECK_TIMER(sk);
1112 goto ipv6_pktoptions;
1116 tcp_v6_send_reset(skb);
1119 __kfree_skb(opt_skb);
1123 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1128 /* Do you ask, what is it?
1130 1. skb was enqueued by tcp.
1131 2. skb is added to tail of read queue, rather than out of order.
1132 3. socket is not in passive state.
1133 4. Finally, it really contains options, which user wants to receive.
1136 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1137 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1138 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1139 np->mcast_oif = inet6_iif(opt_skb);
1140 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1141 np->mcast_hops = opt_skb->nh.ipv6h->hop_limit;
1142 if (ipv6_opt_accepted(sk, opt_skb)) {
1143 skb_set_owner_r(opt_skb, sk);
1144 opt_skb = xchg(&np->pktoptions, opt_skb);
1146 __kfree_skb(opt_skb);
1147 opt_skb = xchg(&np->pktoptions, NULL);
1156 static int tcp_v6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
1158 struct sk_buff *skb = *pskb;
1163 if (skb->pkt_type != PACKET_HOST)
1167 * Count it even if it's bad.
1169 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1171 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1176 if (th->doff < sizeof(struct tcphdr)/4)
1178 if (!pskb_may_pull(skb, th->doff*4))
1181 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1182 tcp_v6_checksum_init(skb)))
1186 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1187 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1188 skb->len - th->doff*4);
1189 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1190 TCP_SKB_CB(skb)->when = 0;
1191 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h);
1192 TCP_SKB_CB(skb)->sacked = 0;
1194 sk = __inet6_lookup(&tcp_hashinfo, &skb->nh.ipv6h->saddr, th->source,
1195 &skb->nh.ipv6h->daddr, ntohs(th->dest),
1202 if (sk->sk_state == TCP_TIME_WAIT)
1205 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1206 goto discard_and_relse;
1208 if (sk_filter(sk, skb, 0))
1209 goto discard_and_relse;
1215 if (!sock_owned_by_user(sk)) {
1216 if (!tcp_prequeue(sk, skb))
1217 ret = tcp_v6_do_rcv(sk, skb);
1219 sk_add_backlog(sk, skb);
1223 return ret ? -1 : 0;
1226 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1229 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1231 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1233 tcp_v6_send_reset(skb);
1250 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1251 inet_twsk_put((struct inet_timewait_sock *)sk);
1255 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1256 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1257 inet_twsk_put((struct inet_timewait_sock *)sk);
1261 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1267 sk2 = inet6_lookup_listener(&tcp_hashinfo,
1268 &skb->nh.ipv6h->daddr,
1269 ntohs(th->dest), inet6_iif(skb));
1271 struct inet_timewait_sock *tw = inet_twsk(sk);
1272 inet_twsk_deschedule(tw, &tcp_death_row);
1277 /* Fall through to ACK */
1280 tcp_v6_timewait_ack(sk, skb);
1284 case TCP_TW_SUCCESS:;
1289 static int tcp_v6_remember_stamp(struct sock *sk)
1291 /* Alas, not yet... */
1295 static struct inet_connection_sock_af_ops ipv6_specific = {
1296 .queue_xmit = inet6_csk_xmit,
1297 .send_check = tcp_v6_send_check,
1298 .rebuild_header = inet6_sk_rebuild_header,
1299 .conn_request = tcp_v6_conn_request,
1300 .syn_recv_sock = tcp_v6_syn_recv_sock,
1301 .remember_stamp = tcp_v6_remember_stamp,
1302 .net_header_len = sizeof(struct ipv6hdr),
1304 .setsockopt = ipv6_setsockopt,
1305 .getsockopt = ipv6_getsockopt,
1306 .addr2sockaddr = inet6_csk_addr2sockaddr,
1307 .sockaddr_len = sizeof(struct sockaddr_in6)
1311 * TCP over IPv4 via INET6 API
1314 static struct inet_connection_sock_af_ops ipv6_mapped = {
1315 .queue_xmit = ip_queue_xmit,
1316 .send_check = tcp_v4_send_check,
1317 .rebuild_header = inet_sk_rebuild_header,
1318 .conn_request = tcp_v6_conn_request,
1319 .syn_recv_sock = tcp_v6_syn_recv_sock,
1320 .remember_stamp = tcp_v4_remember_stamp,
1321 .net_header_len = sizeof(struct iphdr),
1323 .setsockopt = ipv6_setsockopt,
1324 .getsockopt = ipv6_getsockopt,
1325 .addr2sockaddr = inet6_csk_addr2sockaddr,
1326 .sockaddr_len = sizeof(struct sockaddr_in6)
1331 /* NOTE: A lot of things set to zero explicitly by call to
1332 * sk_alloc() so need not be done here.
1334 static int tcp_v6_init_sock(struct sock *sk)
1336 struct inet_connection_sock *icsk = inet_csk(sk);
1337 struct tcp_sock *tp = tcp_sk(sk);
1339 skb_queue_head_init(&tp->out_of_order_queue);
1340 tcp_init_xmit_timers(sk);
1341 tcp_prequeue_init(tp);
1343 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1344 tp->mdev = TCP_TIMEOUT_INIT;
1346 /* So many TCP implementations out there (incorrectly) count the
1347 * initial SYN frame in their delayed-ACK and congestion control
1348 * algorithms that we must have the following bandaid to talk
1349 * efficiently to them. -DaveM
1353 /* See draft-stevens-tcpca-spec-01 for discussion of the
1354 * initialization of these values.
1356 tp->snd_ssthresh = 0x7fffffff;
1357 tp->snd_cwnd_clamp = ~0;
1358 tp->mss_cache = 536;
1360 tp->reordering = sysctl_tcp_reordering;
1362 sk->sk_state = TCP_CLOSE;
1364 icsk->icsk_af_ops = &ipv6_specific;
1365 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1366 icsk->icsk_sync_mss = tcp_sync_mss;
1367 sk->sk_write_space = sk_stream_write_space;
1368 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1370 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1371 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1373 atomic_inc(&tcp_sockets_allocated);
1378 static int tcp_v6_destroy_sock(struct sock *sk)
1380 tcp_v4_destroy_sock(sk);
1381 return inet6_destroy_sock(sk);
1384 /* Proc filesystem TCPv6 sock list dumping. */
1385 static void get_openreq6(struct seq_file *seq,
1386 struct sock *sk, struct request_sock *req, int i, int uid)
1388 int ttd = req->expires - jiffies;
1389 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1390 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1396 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1397 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1399 src->s6_addr32[0], src->s6_addr32[1],
1400 src->s6_addr32[2], src->s6_addr32[3],
1401 ntohs(inet_sk(sk)->sport),
1402 dest->s6_addr32[0], dest->s6_addr32[1],
1403 dest->s6_addr32[2], dest->s6_addr32[3],
1404 ntohs(inet_rsk(req)->rmt_port),
1406 0,0, /* could print option size, but that is af dependent. */
1407 1, /* timers active (only the expire timer) */
1408 jiffies_to_clock_t(ttd),
1411 0, /* non standard timer */
1412 0, /* open_requests have no inode */
1416 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1418 struct in6_addr *dest, *src;
1421 unsigned long timer_expires;
1422 struct inet_sock *inet = inet_sk(sp);
1423 struct tcp_sock *tp = tcp_sk(sp);
1424 const struct inet_connection_sock *icsk = inet_csk(sp);
1425 struct ipv6_pinfo *np = inet6_sk(sp);
1428 src = &np->rcv_saddr;
1429 destp = ntohs(inet->dport);
1430 srcp = ntohs(inet->sport);
1432 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1434 timer_expires = icsk->icsk_timeout;
1435 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1437 timer_expires = icsk->icsk_timeout;
1438 } else if (timer_pending(&sp->sk_timer)) {
1440 timer_expires = sp->sk_timer.expires;
1443 timer_expires = jiffies;
1447 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1448 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
1450 src->s6_addr32[0], src->s6_addr32[1],
1451 src->s6_addr32[2], src->s6_addr32[3], srcp,
1452 dest->s6_addr32[0], dest->s6_addr32[1],
1453 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1455 tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq,
1457 jiffies_to_clock_t(timer_expires - jiffies),
1458 icsk->icsk_retransmits,
1460 icsk->icsk_probes_out,
1462 atomic_read(&sp->sk_refcnt), sp,
1465 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1466 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
1470 static void get_timewait6_sock(struct seq_file *seq,
1471 struct inet_timewait_sock *tw, int i)
1473 struct in6_addr *dest, *src;
1475 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1476 int ttd = tw->tw_ttd - jiffies;
1481 dest = &tw6->tw_v6_daddr;
1482 src = &tw6->tw_v6_rcv_saddr;
1483 destp = ntohs(tw->tw_dport);
1484 srcp = ntohs(tw->tw_sport);
1487 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1488 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1490 src->s6_addr32[0], src->s6_addr32[1],
1491 src->s6_addr32[2], src->s6_addr32[3], srcp,
1492 dest->s6_addr32[0], dest->s6_addr32[1],
1493 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1494 tw->tw_substate, 0, 0,
1495 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1496 atomic_read(&tw->tw_refcnt), tw);
1499 #ifdef CONFIG_PROC_FS
1500 static int tcp6_seq_show(struct seq_file *seq, void *v)
1502 struct tcp_iter_state *st;
1504 if (v == SEQ_START_TOKEN) {
1509 "st tx_queue rx_queue tr tm->when retrnsmt"
1510 " uid timeout inode\n");
1515 switch (st->state) {
1516 case TCP_SEQ_STATE_LISTENING:
1517 case TCP_SEQ_STATE_ESTABLISHED:
1518 get_tcp6_sock(seq, v, st->num);
1520 case TCP_SEQ_STATE_OPENREQ:
1521 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1523 case TCP_SEQ_STATE_TIME_WAIT:
1524 get_timewait6_sock(seq, v, st->num);
1531 static struct file_operations tcp6_seq_fops;
1532 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1533 .owner = THIS_MODULE,
1536 .seq_show = tcp6_seq_show,
1537 .seq_fops = &tcp6_seq_fops,
1540 int __init tcp6_proc_init(void)
1542 return tcp_proc_register(&tcp6_seq_afinfo);
1545 void tcp6_proc_exit(void)
1547 tcp_proc_unregister(&tcp6_seq_afinfo);
1551 struct proto tcpv6_prot = {
1553 .owner = THIS_MODULE,
1555 .connect = tcp_v6_connect,
1556 .disconnect = tcp_disconnect,
1557 .accept = inet_csk_accept,
1559 .init = tcp_v6_init_sock,
1560 .destroy = tcp_v6_destroy_sock,
1561 .shutdown = tcp_shutdown,
1562 .setsockopt = tcp_setsockopt,
1563 .getsockopt = tcp_getsockopt,
1564 .sendmsg = tcp_sendmsg,
1565 .recvmsg = tcp_recvmsg,
1566 .backlog_rcv = tcp_v6_do_rcv,
1567 .hash = tcp_v6_hash,
1568 .unhash = tcp_unhash,
1569 .get_port = tcp_v6_get_port,
1570 .enter_memory_pressure = tcp_enter_memory_pressure,
1571 .sockets_allocated = &tcp_sockets_allocated,
1572 .memory_allocated = &tcp_memory_allocated,
1573 .memory_pressure = &tcp_memory_pressure,
1574 .orphan_count = &tcp_orphan_count,
1575 .sysctl_mem = sysctl_tcp_mem,
1576 .sysctl_wmem = sysctl_tcp_wmem,
1577 .sysctl_rmem = sysctl_tcp_rmem,
1578 .max_header = MAX_TCP_HEADER,
1579 .obj_size = sizeof(struct tcp6_sock),
1580 .twsk_prot = &tcp6_timewait_sock_ops,
1581 .rsk_prot = &tcp6_request_sock_ops,
1584 static struct inet6_protocol tcpv6_protocol = {
1585 .handler = tcp_v6_rcv,
1586 .err_handler = tcp_v6_err,
1587 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1590 static struct inet_protosw tcpv6_protosw = {
1591 .type = SOCK_STREAM,
1592 .protocol = IPPROTO_TCP,
1593 .prot = &tcpv6_prot,
1594 .ops = &inet6_stream_ops,
1597 .flags = INET_PROTOSW_PERMANENT |
1601 void __init tcpv6_init(void)
1603 /* register inet6 protocol */
1604 if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0)
1605 printk(KERN_ERR "tcpv6_init: Could not register protocol\n");
1606 inet6_register_protosw(&tcpv6_protosw);