3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/jiffies.h>
36 #include <linux/in6.h>
37 #include <linux/netdevice.h>
38 #include <linux/init.h>
39 #include <linux/jhash.h>
40 #include <linux/ipsec.h>
41 #include <linux/times.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
59 #include <net/addrconf.h>
61 #include <net/dsfield.h>
62 #include <net/timewait_sock.h>
64 #include <asm/uaccess.h>
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
69 /* Socket used for sending RSTs and ACKs */
70 static struct socket *tcp6_socket;
72 static void tcp_v6_send_reset(struct sk_buff *skb);
73 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
74 static void tcp_v6_send_check(struct sock *sk, int len,
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79 static struct inet_connection_sock_af_ops ipv6_mapped;
80 static struct inet_connection_sock_af_ops ipv6_specific;
82 static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
84 return inet_csk_get_port(&tcp_hashinfo, sk, snum,
85 inet6_csk_bind_conflict);
88 static void tcp_v6_hash(struct sock *sk)
90 if (sk->sk_state != TCP_CLOSE) {
91 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
96 __inet6_hash(&tcp_hashinfo, sk);
101 static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len,
102 struct in6_addr *saddr,
103 struct in6_addr *daddr,
106 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
109 static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
111 if (skb->protocol == htons(ETH_P_IPV6)) {
112 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
113 skb->nh.ipv6h->saddr.s6_addr32,
117 return secure_tcp_sequence_number(skb->nh.iph->daddr,
124 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
127 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
128 struct inet_sock *inet = inet_sk(sk);
129 struct inet_connection_sock *icsk = inet_csk(sk);
130 struct ipv6_pinfo *np = inet6_sk(sk);
131 struct tcp_sock *tp = tcp_sk(sk);
132 struct in6_addr *saddr = NULL, *final_p = NULL, final;
134 struct dst_entry *dst;
138 if (addr_len < SIN6_LEN_RFC2133)
141 if (usin->sin6_family != AF_INET6)
142 return(-EAFNOSUPPORT);
144 memset(&fl, 0, sizeof(fl));
147 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
148 IP6_ECN_flow_init(fl.fl6_flowlabel);
149 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
150 struct ip6_flowlabel *flowlabel;
151 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
152 if (flowlabel == NULL)
154 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
155 fl6_sock_release(flowlabel);
160 * connect() to INADDR_ANY means loopback (BSD'ism).
163 if(ipv6_addr_any(&usin->sin6_addr))
164 usin->sin6_addr.s6_addr[15] = 0x1;
166 addr_type = ipv6_addr_type(&usin->sin6_addr);
168 if(addr_type & IPV6_ADDR_MULTICAST)
171 if (addr_type&IPV6_ADDR_LINKLOCAL) {
172 if (addr_len >= sizeof(struct sockaddr_in6) &&
173 usin->sin6_scope_id) {
174 /* If interface is set while binding, indices
177 if (sk->sk_bound_dev_if &&
178 sk->sk_bound_dev_if != usin->sin6_scope_id)
181 sk->sk_bound_dev_if = usin->sin6_scope_id;
184 /* Connect to link-local address requires an interface */
185 if (!sk->sk_bound_dev_if)
189 if (tp->rx_opt.ts_recent_stamp &&
190 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
191 tp->rx_opt.ts_recent = 0;
192 tp->rx_opt.ts_recent_stamp = 0;
196 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
197 np->flow_label = fl.fl6_flowlabel;
203 if (addr_type == IPV6_ADDR_MAPPED) {
204 u32 exthdrlen = icsk->icsk_ext_hdr_len;
205 struct sockaddr_in sin;
207 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
209 if (__ipv6_only_sock(sk))
212 sin.sin_family = AF_INET;
213 sin.sin_port = usin->sin6_port;
214 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
216 icsk->icsk_af_ops = &ipv6_mapped;
217 sk->sk_backlog_rcv = tcp_v4_do_rcv;
219 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
222 icsk->icsk_ext_hdr_len = exthdrlen;
223 icsk->icsk_af_ops = &ipv6_specific;
224 sk->sk_backlog_rcv = tcp_v6_do_rcv;
227 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
229 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
236 if (!ipv6_addr_any(&np->rcv_saddr))
237 saddr = &np->rcv_saddr;
239 fl.proto = IPPROTO_TCP;
240 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
241 ipv6_addr_copy(&fl.fl6_src,
242 (saddr ? saddr : &np->saddr));
243 fl.oif = sk->sk_bound_dev_if;
244 fl.fl_ip_dport = usin->sin6_port;
245 fl.fl_ip_sport = inet->sport;
247 if (np->opt && np->opt->srcrt) {
248 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
249 ipv6_addr_copy(&final, &fl.fl6_dst);
250 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
254 err = ip6_dst_lookup(sk, &dst, &fl);
258 ipv6_addr_copy(&fl.fl6_dst, final_p);
260 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
265 ipv6_addr_copy(&np->rcv_saddr, saddr);
268 /* set the source address */
269 ipv6_addr_copy(&np->saddr, saddr);
270 inet->rcv_saddr = LOOPBACK4_IPV6;
272 sk->sk_gso_type = SKB_GSO_TCPV6;
273 __ip6_dst_store(sk, dst, NULL);
275 icsk->icsk_ext_hdr_len = 0;
277 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
280 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
282 inet->dport = usin->sin6_port;
284 tcp_set_state(sk, TCP_SYN_SENT);
285 err = inet6_hash_connect(&tcp_death_row, sk);
290 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
295 err = tcp_connect(sk);
302 tcp_set_state(sk, TCP_CLOSE);
306 sk->sk_route_caps = 0;
310 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
311 int type, int code, int offset, __u32 info)
313 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
314 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
315 struct ipv6_pinfo *np;
321 sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr,
322 th->source, skb->dev->ifindex);
325 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
329 if (sk->sk_state == TCP_TIME_WAIT) {
330 inet_twsk_put((struct inet_timewait_sock *)sk);
335 if (sock_owned_by_user(sk))
336 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
338 if (sk->sk_state == TCP_CLOSE)
342 seq = ntohl(th->seq);
343 if (sk->sk_state != TCP_LISTEN &&
344 !between(seq, tp->snd_una, tp->snd_nxt)) {
345 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
351 if (type == ICMPV6_PKT_TOOBIG) {
352 struct dst_entry *dst = NULL;
354 if (sock_owned_by_user(sk))
356 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
359 /* icmp should have updated the destination cache entry */
360 dst = __sk_dst_check(sk, np->dst_cookie);
363 struct inet_sock *inet = inet_sk(sk);
366 /* BUGGG_FUTURE: Again, it is not clear how
367 to handle rthdr case. Ignore this complexity
370 memset(&fl, 0, sizeof(fl));
371 fl.proto = IPPROTO_TCP;
372 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
373 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
374 fl.oif = sk->sk_bound_dev_if;
375 fl.fl_ip_dport = inet->dport;
376 fl.fl_ip_sport = inet->sport;
378 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
379 sk->sk_err_soft = -err;
383 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
384 sk->sk_err_soft = -err;
391 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
392 tcp_sync_mss(sk, dst_mtu(dst));
393 tcp_simple_retransmit(sk);
394 } /* else let the usual retransmit timer handle it */
399 icmpv6_err_convert(type, code, &err);
401 /* Might be for an request_sock */
402 switch (sk->sk_state) {
403 struct request_sock *req, **prev;
405 if (sock_owned_by_user(sk))
408 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
409 &hdr->saddr, inet6_iif(skb));
413 /* ICMPs are not backlogged, hence we cannot get
414 * an established socket here.
416 BUG_TRAP(req->sk == NULL);
418 if (seq != tcp_rsk(req)->snt_isn) {
419 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
423 inet_csk_reqsk_queue_drop(sk, req, prev);
427 case TCP_SYN_RECV: /* Cannot happen.
428 It can, it SYNs are crossed. --ANK */
429 if (!sock_owned_by_user(sk)) {
431 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
435 sk->sk_err_soft = err;
439 if (!sock_owned_by_user(sk) && np->recverr) {
441 sk->sk_error_report(sk);
443 sk->sk_err_soft = err;
451 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
452 struct dst_entry *dst)
454 struct inet6_request_sock *treq = inet6_rsk(req);
455 struct ipv6_pinfo *np = inet6_sk(sk);
456 struct sk_buff * skb;
457 struct ipv6_txoptions *opt = NULL;
458 struct in6_addr * final_p = NULL, final;
462 memset(&fl, 0, sizeof(fl));
463 fl.proto = IPPROTO_TCP;
464 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
465 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
466 fl.fl6_flowlabel = 0;
468 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
469 fl.fl_ip_sport = inet_sk(sk)->sport;
474 np->rxopt.bits.osrcrt == 2 &&
476 struct sk_buff *pktopts = treq->pktopts;
477 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
479 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt));
482 if (opt && opt->srcrt) {
483 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
484 ipv6_addr_copy(&final, &fl.fl6_dst);
485 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
489 err = ip6_dst_lookup(sk, &dst, &fl);
493 ipv6_addr_copy(&fl.fl6_dst, final_p);
494 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
498 skb = tcp_make_synack(sk, dst, req);
500 struct tcphdr *th = skb->h.th;
502 th->check = tcp_v6_check(th, skb->len,
503 &treq->loc_addr, &treq->rmt_addr,
504 csum_partial((char *)th, skb->len, skb->csum));
506 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
507 err = ip6_xmit(sk, skb, &fl, opt, 0);
508 if (err == NET_XMIT_CN)
513 if (opt && opt != np->opt)
514 sock_kfree_s(sk, opt, opt->tot_len);
519 static void tcp_v6_reqsk_destructor(struct request_sock *req)
521 if (inet6_rsk(req)->pktopts)
522 kfree_skb(inet6_rsk(req)->pktopts);
525 static struct request_sock_ops tcp6_request_sock_ops = {
527 .obj_size = sizeof(struct tcp6_request_sock),
528 .rtx_syn_ack = tcp_v6_send_synack,
529 .send_ack = tcp_v6_reqsk_send_ack,
530 .destructor = tcp_v6_reqsk_destructor,
531 .send_reset = tcp_v6_send_reset
534 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
535 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
536 .twsk_unique = tcp_twsk_unique,
539 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
541 struct ipv6_pinfo *np = inet6_sk(sk);
542 struct tcphdr *th = skb->h.th;
544 if (skb->ip_summed == CHECKSUM_HW) {
545 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
546 skb->csum = offsetof(struct tcphdr, check);
548 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
549 csum_partial((char *)th, th->doff<<2,
554 static int tcp_v6_gso_send_check(struct sk_buff *skb)
556 struct ipv6hdr *ipv6h;
559 if (!pskb_may_pull(skb, sizeof(*th)))
562 ipv6h = skb->nh.ipv6h;
566 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
568 skb->csum = offsetof(struct tcphdr, check);
569 skb->ip_summed = CHECKSUM_HW;
573 static void tcp_v6_send_reset(struct sk_buff *skb)
575 struct tcphdr *th = skb->h.th, *t1;
576 struct sk_buff *buff;
582 if (!ipv6_unicast_destination(skb))
586 * We need to grab some memory, and put together an RST,
587 * and then put it into the queue to be sent.
590 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr),
595 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr));
597 t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr));
599 /* Swap the send and the receive. */
600 memset(t1, 0, sizeof(*t1));
601 t1->dest = th->source;
602 t1->source = th->dest;
603 t1->doff = sizeof(*t1)/4;
607 t1->seq = th->ack_seq;
610 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
611 + skb->len - (th->doff<<2));
614 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
616 memset(&fl, 0, sizeof(fl));
617 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
618 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
620 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
621 sizeof(*t1), IPPROTO_TCP,
624 fl.proto = IPPROTO_TCP;
625 fl.oif = inet6_iif(skb);
626 fl.fl_ip_dport = t1->dest;
627 fl.fl_ip_sport = t1->source;
629 /* sk = NULL, but it is safe for now. RST socket required. */
630 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
632 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
633 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
634 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
635 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
643 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
645 struct tcphdr *th = skb->h.th, *t1;
646 struct sk_buff *buff;
648 int tot_len = sizeof(struct tcphdr);
653 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
658 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
660 t1 = (struct tcphdr *) skb_push(buff,tot_len);
662 /* Swap the send and the receive. */
663 memset(t1, 0, sizeof(*t1));
664 t1->dest = th->source;
665 t1->source = th->dest;
666 t1->doff = tot_len/4;
667 t1->seq = htonl(seq);
668 t1->ack_seq = htonl(ack);
670 t1->window = htons(win);
673 u32 *ptr = (u32*)(t1 + 1);
674 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
675 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
676 *ptr++ = htonl(tcp_time_stamp);
680 buff->csum = csum_partial((char *)t1, tot_len, 0);
682 memset(&fl, 0, sizeof(fl));
683 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
684 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
686 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
687 tot_len, IPPROTO_TCP,
690 fl.proto = IPPROTO_TCP;
691 fl.oif = inet6_iif(skb);
692 fl.fl_ip_dport = t1->dest;
693 fl.fl_ip_sport = t1->source;
695 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
696 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
697 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
698 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
706 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
708 struct inet_timewait_sock *tw = inet_twsk(sk);
709 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
711 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
712 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
713 tcptw->tw_ts_recent);
718 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
720 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
724 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
726 struct request_sock *req, **prev;
727 const struct tcphdr *th = skb->h.th;
730 /* Find possible connection requests. */
731 req = inet6_csk_search_req(sk, &prev, th->source,
732 &skb->nh.ipv6h->saddr,
733 &skb->nh.ipv6h->daddr, inet6_iif(skb));
735 return tcp_check_req(sk, skb, req, prev);
737 nsk = __inet6_lookup_established(&tcp_hashinfo, &skb->nh.ipv6h->saddr,
738 th->source, &skb->nh.ipv6h->daddr,
739 ntohs(th->dest), inet6_iif(skb));
742 if (nsk->sk_state != TCP_TIME_WAIT) {
746 inet_twsk_put((struct inet_timewait_sock *)nsk);
750 #if 0 /*def CONFIG_SYN_COOKIES*/
751 if (!th->rst && !th->syn && th->ack)
752 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt));
757 /* FIXME: this is substantially similar to the ipv4 code.
758 * Can some kind of merge be done? -- erics
760 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
762 struct inet6_request_sock *treq;
763 struct ipv6_pinfo *np = inet6_sk(sk);
764 struct tcp_options_received tmp_opt;
765 struct tcp_sock *tp = tcp_sk(sk);
766 struct request_sock *req = NULL;
767 __u32 isn = TCP_SKB_CB(skb)->when;
769 if (skb->protocol == htons(ETH_P_IP))
770 return tcp_v4_conn_request(sk, skb);
772 if (!ipv6_unicast_destination(skb))
776 * There are no SYN attacks on IPv6, yet...
778 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
780 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
784 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
787 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
791 tcp_clear_options(&tmp_opt);
792 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
793 tmp_opt.user_mss = tp->rx_opt.user_mss;
795 tcp_parse_options(skb, &tmp_opt, 0);
797 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
798 tcp_openreq_init(req, &tmp_opt, skb);
800 treq = inet6_rsk(req);
801 ipv6_addr_copy(&treq->rmt_addr, &skb->nh.ipv6h->saddr);
802 ipv6_addr_copy(&treq->loc_addr, &skb->nh.ipv6h->daddr);
803 TCP_ECN_create_request(req, skb->h.th);
804 treq->pktopts = NULL;
805 if (ipv6_opt_accepted(sk, skb) ||
806 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
807 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
808 atomic_inc(&skb->users);
811 treq->iif = sk->sk_bound_dev_if;
813 /* So that link locals have meaning */
814 if (!sk->sk_bound_dev_if &&
815 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
816 treq->iif = inet6_iif(skb);
819 isn = tcp_v6_init_sequence(sk,skb);
821 tcp_rsk(req)->snt_isn = isn;
823 if (tcp_v6_send_synack(sk, req, NULL))
826 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
833 return 0; /* don't send reset */
836 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
837 struct request_sock *req,
838 struct dst_entry *dst)
840 struct inet6_request_sock *treq = inet6_rsk(req);
841 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
842 struct tcp6_sock *newtcp6sk;
843 struct inet_sock *newinet;
844 struct tcp_sock *newtp;
846 struct ipv6_txoptions *opt;
848 if (skb->protocol == htons(ETH_P_IP)) {
853 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
858 newtcp6sk = (struct tcp6_sock *)newsk;
859 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
861 newinet = inet_sk(newsk);
862 newnp = inet6_sk(newsk);
863 newtp = tcp_sk(newsk);
865 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
867 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
870 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
873 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
875 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
876 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
877 newnp->pktoptions = NULL;
879 newnp->mcast_oif = inet6_iif(skb);
880 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
883 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
884 * here, tcp_create_openreq_child now does this for us, see the comment in
885 * that function for the gory details. -acme
888 /* It is tricky place. Until this moment IPv4 tcp
889 worked with IPv6 icsk.icsk_af_ops.
892 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
899 if (sk_acceptq_is_full(sk))
902 if (np->rxopt.bits.osrcrt == 2 &&
903 opt == NULL && treq->pktopts) {
904 struct inet6_skb_parm *rxopt = IP6CB(treq->pktopts);
906 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr *)(treq->pktopts->nh.raw + rxopt->srcrt));
910 struct in6_addr *final_p = NULL, final;
913 memset(&fl, 0, sizeof(fl));
914 fl.proto = IPPROTO_TCP;
915 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
916 if (opt && opt->srcrt) {
917 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
918 ipv6_addr_copy(&final, &fl.fl6_dst);
919 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
922 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
923 fl.oif = sk->sk_bound_dev_if;
924 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
925 fl.fl_ip_sport = inet_sk(sk)->sport;
927 if (ip6_dst_lookup(sk, &dst, &fl))
931 ipv6_addr_copy(&fl.fl6_dst, final_p);
933 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
937 newsk = tcp_create_openreq_child(sk, req, skb);
942 * No need to charge this sock to the relevant IPv6 refcnt debug socks
943 * count here, tcp_create_openreq_child now does this for us, see the
944 * comment in that function for the gory details. -acme
947 newsk->sk_gso_type = SKB_GSO_TCPV6;
948 __ip6_dst_store(newsk, dst, NULL);
950 newtcp6sk = (struct tcp6_sock *)newsk;
951 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
953 newtp = tcp_sk(newsk);
954 newinet = inet_sk(newsk);
955 newnp = inet6_sk(newsk);
957 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
959 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
960 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
961 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
962 newsk->sk_bound_dev_if = treq->iif;
964 /* Now IPv6 options...
966 First: no IPv4 options.
971 newnp->rxopt.all = np->rxopt.all;
973 /* Clone pktoptions received with SYN */
974 newnp->pktoptions = NULL;
975 if (treq->pktopts != NULL) {
976 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
977 kfree_skb(treq->pktopts);
978 treq->pktopts = NULL;
979 if (newnp->pktoptions)
980 skb_set_owner_r(newnp->pktoptions, newsk);
983 newnp->mcast_oif = inet6_iif(skb);
984 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
986 /* Clone native IPv6 options from listening socket (if any)
988 Yes, keeping reference count would be much more clever,
989 but we make one more one thing there: reattach optmem
993 newnp->opt = ipv6_dup_options(newsk, opt);
995 sock_kfree_s(sk, opt, opt->tot_len);
998 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1000 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1001 newnp->opt->opt_flen);
1003 tcp_mtup_init(newsk);
1004 tcp_sync_mss(newsk, dst_mtu(dst));
1005 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1006 tcp_initialize_rcv_mss(newsk);
1008 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1010 __inet6_hash(&tcp_hashinfo, newsk);
1011 inet_inherit_port(&tcp_hashinfo, sk, newsk);
1016 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1018 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1019 if (opt && opt != np->opt)
1020 sock_kfree_s(sk, opt, opt->tot_len);
1025 static int tcp_v6_checksum_init(struct sk_buff *skb)
1027 if (skb->ip_summed == CHECKSUM_HW) {
1028 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1029 &skb->nh.ipv6h->daddr,skb->csum)) {
1030 skb->ip_summed = CHECKSUM_UNNECESSARY;
1035 skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1036 &skb->nh.ipv6h->daddr, 0);
1038 if (skb->len <= 76) {
1039 return __skb_checksum_complete(skb);
1044 /* The socket must have it's spinlock held when we get
1047 * We have a potential double-lock case here, so even when
1048 * doing backlog processing we use the BH locking scheme.
1049 * This is because we cannot sleep with the original spinlock
1052 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1054 struct ipv6_pinfo *np = inet6_sk(sk);
1055 struct tcp_sock *tp;
1056 struct sk_buff *opt_skb = NULL;
1058 /* Imagine: socket is IPv6. IPv4 packet arrives,
1059 goes to IPv4 receive handler and backlogged.
1060 From backlog it always goes here. Kerboom...
1061 Fortunately, tcp_rcv_established and rcv_established
1062 handle them correctly, but it is not case with
1063 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1066 if (skb->protocol == htons(ETH_P_IP))
1067 return tcp_v4_do_rcv(sk, skb);
1069 if (sk_filter(sk, skb, 0))
1073 * socket locking is here for SMP purposes as backlog rcv
1074 * is currently called with bh processing disabled.
1077 /* Do Stevens' IPV6_PKTOPTIONS.
1079 Yes, guys, it is the only place in our code, where we
1080 may make it not affecting IPv4.
1081 The rest of code is protocol independent,
1082 and I do not like idea to uglify IPv4.
1084 Actually, all the idea behind IPV6_PKTOPTIONS
1085 looks not very well thought. For now we latch
1086 options, received in the last packet, enqueued
1087 by tcp. Feel free to propose better solution.
1091 opt_skb = skb_clone(skb, GFP_ATOMIC);
1093 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1094 TCP_CHECK_TIMER(sk);
1095 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1097 TCP_CHECK_TIMER(sk);
1099 goto ipv6_pktoptions;
1103 if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
1106 if (sk->sk_state == TCP_LISTEN) {
1107 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1112 * Queue it on the new socket if the new socket is active,
1113 * otherwise we just shortcircuit this and continue with
1117 if (tcp_child_process(sk, nsk, skb))
1120 __kfree_skb(opt_skb);
1125 TCP_CHECK_TIMER(sk);
1126 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1128 TCP_CHECK_TIMER(sk);
1130 goto ipv6_pktoptions;
1134 tcp_v6_send_reset(skb);
1137 __kfree_skb(opt_skb);
1141 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1146 /* Do you ask, what is it?
1148 1. skb was enqueued by tcp.
1149 2. skb is added to tail of read queue, rather than out of order.
1150 3. socket is not in passive state.
1151 4. Finally, it really contains options, which user wants to receive.
1154 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1155 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1156 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1157 np->mcast_oif = inet6_iif(opt_skb);
1158 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1159 np->mcast_hops = opt_skb->nh.ipv6h->hop_limit;
1160 if (ipv6_opt_accepted(sk, opt_skb)) {
1161 skb_set_owner_r(opt_skb, sk);
1162 opt_skb = xchg(&np->pktoptions, opt_skb);
1164 __kfree_skb(opt_skb);
1165 opt_skb = xchg(&np->pktoptions, NULL);
1174 static int tcp_v6_rcv(struct sk_buff **pskb)
1176 struct sk_buff *skb = *pskb;
1181 if (skb->pkt_type != PACKET_HOST)
1185 * Count it even if it's bad.
1187 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1189 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1194 if (th->doff < sizeof(struct tcphdr)/4)
1196 if (!pskb_may_pull(skb, th->doff*4))
1199 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1200 tcp_v6_checksum_init(skb)))
1204 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1205 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1206 skb->len - th->doff*4);
1207 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1208 TCP_SKB_CB(skb)->when = 0;
1209 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h);
1210 TCP_SKB_CB(skb)->sacked = 0;
1212 sk = __inet6_lookup(&tcp_hashinfo, &skb->nh.ipv6h->saddr, th->source,
1213 &skb->nh.ipv6h->daddr, ntohs(th->dest),
1220 if (sk->sk_state == TCP_TIME_WAIT)
1223 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1224 goto discard_and_relse;
1226 if (sk_filter(sk, skb, 0))
1227 goto discard_and_relse;
1233 if (!sock_owned_by_user(sk)) {
1234 #ifdef CONFIG_NET_DMA
1235 struct tcp_sock *tp = tcp_sk(sk);
1236 if (tp->ucopy.dma_chan)
1237 ret = tcp_v6_do_rcv(sk, skb);
1241 if (!tcp_prequeue(sk, skb))
1242 ret = tcp_v6_do_rcv(sk, skb);
1245 sk_add_backlog(sk, skb);
1249 return ret ? -1 : 0;
1252 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1255 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1257 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1259 tcp_v6_send_reset(skb);
1276 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1277 inet_twsk_put((struct inet_timewait_sock *)sk);
1281 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1282 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1283 inet_twsk_put((struct inet_timewait_sock *)sk);
1287 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1293 sk2 = inet6_lookup_listener(&tcp_hashinfo,
1294 &skb->nh.ipv6h->daddr,
1295 ntohs(th->dest), inet6_iif(skb));
1297 struct inet_timewait_sock *tw = inet_twsk(sk);
1298 inet_twsk_deschedule(tw, &tcp_death_row);
1303 /* Fall through to ACK */
1306 tcp_v6_timewait_ack(sk, skb);
1310 case TCP_TW_SUCCESS:;
1315 static int tcp_v6_remember_stamp(struct sock *sk)
1317 /* Alas, not yet... */
1321 static struct inet_connection_sock_af_ops ipv6_specific = {
1322 .queue_xmit = inet6_csk_xmit,
1323 .send_check = tcp_v6_send_check,
1324 .rebuild_header = inet6_sk_rebuild_header,
1325 .conn_request = tcp_v6_conn_request,
1326 .syn_recv_sock = tcp_v6_syn_recv_sock,
1327 .remember_stamp = tcp_v6_remember_stamp,
1328 .net_header_len = sizeof(struct ipv6hdr),
1329 .setsockopt = ipv6_setsockopt,
1330 .getsockopt = ipv6_getsockopt,
1331 .addr2sockaddr = inet6_csk_addr2sockaddr,
1332 .sockaddr_len = sizeof(struct sockaddr_in6),
1333 #ifdef CONFIG_COMPAT
1334 .compat_setsockopt = compat_ipv6_setsockopt,
1335 .compat_getsockopt = compat_ipv6_getsockopt,
1340 * TCP over IPv4 via INET6 API
1343 static struct inet_connection_sock_af_ops ipv6_mapped = {
1344 .queue_xmit = ip_queue_xmit,
1345 .send_check = tcp_v4_send_check,
1346 .rebuild_header = inet_sk_rebuild_header,
1347 .conn_request = tcp_v6_conn_request,
1348 .syn_recv_sock = tcp_v6_syn_recv_sock,
1349 .remember_stamp = tcp_v4_remember_stamp,
1350 .net_header_len = sizeof(struct iphdr),
1351 .setsockopt = ipv6_setsockopt,
1352 .getsockopt = ipv6_getsockopt,
1353 .addr2sockaddr = inet6_csk_addr2sockaddr,
1354 .sockaddr_len = sizeof(struct sockaddr_in6),
1355 #ifdef CONFIG_COMPAT
1356 .compat_setsockopt = compat_ipv6_setsockopt,
1357 .compat_getsockopt = compat_ipv6_getsockopt,
1361 /* NOTE: A lot of things set to zero explicitly by call to
1362 * sk_alloc() so need not be done here.
1364 static int tcp_v6_init_sock(struct sock *sk)
1366 struct inet_connection_sock *icsk = inet_csk(sk);
1367 struct tcp_sock *tp = tcp_sk(sk);
1369 skb_queue_head_init(&tp->out_of_order_queue);
1370 tcp_init_xmit_timers(sk);
1371 tcp_prequeue_init(tp);
1373 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1374 tp->mdev = TCP_TIMEOUT_INIT;
1376 /* So many TCP implementations out there (incorrectly) count the
1377 * initial SYN frame in their delayed-ACK and congestion control
1378 * algorithms that we must have the following bandaid to talk
1379 * efficiently to them. -DaveM
1383 /* See draft-stevens-tcpca-spec-01 for discussion of the
1384 * initialization of these values.
1386 tp->snd_ssthresh = 0x7fffffff;
1387 tp->snd_cwnd_clamp = ~0;
1388 tp->mss_cache = 536;
1390 tp->reordering = sysctl_tcp_reordering;
1392 sk->sk_state = TCP_CLOSE;
1394 icsk->icsk_af_ops = &ipv6_specific;
1395 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1396 icsk->icsk_sync_mss = tcp_sync_mss;
1397 sk->sk_write_space = sk_stream_write_space;
1398 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1400 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1401 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1403 atomic_inc(&tcp_sockets_allocated);
1408 static int tcp_v6_destroy_sock(struct sock *sk)
1410 tcp_v4_destroy_sock(sk);
1411 return inet6_destroy_sock(sk);
1414 /* Proc filesystem TCPv6 sock list dumping. */
1415 static void get_openreq6(struct seq_file *seq,
1416 struct sock *sk, struct request_sock *req, int i, int uid)
1418 int ttd = req->expires - jiffies;
1419 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1420 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1426 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1427 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1429 src->s6_addr32[0], src->s6_addr32[1],
1430 src->s6_addr32[2], src->s6_addr32[3],
1431 ntohs(inet_sk(sk)->sport),
1432 dest->s6_addr32[0], dest->s6_addr32[1],
1433 dest->s6_addr32[2], dest->s6_addr32[3],
1434 ntohs(inet_rsk(req)->rmt_port),
1436 0,0, /* could print option size, but that is af dependent. */
1437 1, /* timers active (only the expire timer) */
1438 jiffies_to_clock_t(ttd),
1441 0, /* non standard timer */
1442 0, /* open_requests have no inode */
1446 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1448 struct in6_addr *dest, *src;
1451 unsigned long timer_expires;
1452 struct inet_sock *inet = inet_sk(sp);
1453 struct tcp_sock *tp = tcp_sk(sp);
1454 const struct inet_connection_sock *icsk = inet_csk(sp);
1455 struct ipv6_pinfo *np = inet6_sk(sp);
1458 src = &np->rcv_saddr;
1459 destp = ntohs(inet->dport);
1460 srcp = ntohs(inet->sport);
1462 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1464 timer_expires = icsk->icsk_timeout;
1465 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1467 timer_expires = icsk->icsk_timeout;
1468 } else if (timer_pending(&sp->sk_timer)) {
1470 timer_expires = sp->sk_timer.expires;
1473 timer_expires = jiffies;
1477 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1478 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
1480 src->s6_addr32[0], src->s6_addr32[1],
1481 src->s6_addr32[2], src->s6_addr32[3], srcp,
1482 dest->s6_addr32[0], dest->s6_addr32[1],
1483 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1485 tp->write_seq-tp->snd_una,
1486 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1488 jiffies_to_clock_t(timer_expires - jiffies),
1489 icsk->icsk_retransmits,
1491 icsk->icsk_probes_out,
1493 atomic_read(&sp->sk_refcnt), sp,
1496 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1497 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
1501 static void get_timewait6_sock(struct seq_file *seq,
1502 struct inet_timewait_sock *tw, int i)
1504 struct in6_addr *dest, *src;
1506 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1507 int ttd = tw->tw_ttd - jiffies;
1512 dest = &tw6->tw_v6_daddr;
1513 src = &tw6->tw_v6_rcv_saddr;
1514 destp = ntohs(tw->tw_dport);
1515 srcp = ntohs(tw->tw_sport);
1518 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1519 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1521 src->s6_addr32[0], src->s6_addr32[1],
1522 src->s6_addr32[2], src->s6_addr32[3], srcp,
1523 dest->s6_addr32[0], dest->s6_addr32[1],
1524 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1525 tw->tw_substate, 0, 0,
1526 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1527 atomic_read(&tw->tw_refcnt), tw);
1530 #ifdef CONFIG_PROC_FS
1531 static int tcp6_seq_show(struct seq_file *seq, void *v)
1533 struct tcp_iter_state *st;
1535 if (v == SEQ_START_TOKEN) {
1540 "st tx_queue rx_queue tr tm->when retrnsmt"
1541 " uid timeout inode\n");
1546 switch (st->state) {
1547 case TCP_SEQ_STATE_LISTENING:
1548 case TCP_SEQ_STATE_ESTABLISHED:
1549 get_tcp6_sock(seq, v, st->num);
1551 case TCP_SEQ_STATE_OPENREQ:
1552 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1554 case TCP_SEQ_STATE_TIME_WAIT:
1555 get_timewait6_sock(seq, v, st->num);
1562 static struct file_operations tcp6_seq_fops;
1563 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1564 .owner = THIS_MODULE,
1567 .seq_show = tcp6_seq_show,
1568 .seq_fops = &tcp6_seq_fops,
1571 int __init tcp6_proc_init(void)
1573 return tcp_proc_register(&tcp6_seq_afinfo);
1576 void tcp6_proc_exit(void)
1578 tcp_proc_unregister(&tcp6_seq_afinfo);
1582 struct proto tcpv6_prot = {
1584 .owner = THIS_MODULE,
1586 .connect = tcp_v6_connect,
1587 .disconnect = tcp_disconnect,
1588 .accept = inet_csk_accept,
1590 .init = tcp_v6_init_sock,
1591 .destroy = tcp_v6_destroy_sock,
1592 .shutdown = tcp_shutdown,
1593 .setsockopt = tcp_setsockopt,
1594 .getsockopt = tcp_getsockopt,
1595 .sendmsg = tcp_sendmsg,
1596 .recvmsg = tcp_recvmsg,
1597 .backlog_rcv = tcp_v6_do_rcv,
1598 .hash = tcp_v6_hash,
1599 .unhash = tcp_unhash,
1600 .get_port = tcp_v6_get_port,
1601 .enter_memory_pressure = tcp_enter_memory_pressure,
1602 .sockets_allocated = &tcp_sockets_allocated,
1603 .memory_allocated = &tcp_memory_allocated,
1604 .memory_pressure = &tcp_memory_pressure,
1605 .orphan_count = &tcp_orphan_count,
1606 .sysctl_mem = sysctl_tcp_mem,
1607 .sysctl_wmem = sysctl_tcp_wmem,
1608 .sysctl_rmem = sysctl_tcp_rmem,
1609 .max_header = MAX_TCP_HEADER,
1610 .obj_size = sizeof(struct tcp6_sock),
1611 .twsk_prot = &tcp6_timewait_sock_ops,
1612 .rsk_prot = &tcp6_request_sock_ops,
1613 #ifdef CONFIG_COMPAT
1614 .compat_setsockopt = compat_tcp_setsockopt,
1615 .compat_getsockopt = compat_tcp_getsockopt,
1619 static struct inet6_protocol tcpv6_protocol = {
1620 .handler = tcp_v6_rcv,
1621 .err_handler = tcp_v6_err,
1622 .gso_send_check = tcp_v6_gso_send_check,
1623 .gso_segment = tcp_tso_segment,
1624 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1627 static struct inet_protosw tcpv6_protosw = {
1628 .type = SOCK_STREAM,
1629 .protocol = IPPROTO_TCP,
1630 .prot = &tcpv6_prot,
1631 .ops = &inet6_stream_ops,
1634 .flags = INET_PROTOSW_PERMANENT |
1638 void __init tcpv6_init(void)
1640 /* register inet6 protocol */
1641 if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0)
1642 printk(KERN_ERR "tcpv6_init: Could not register protocol\n");
1643 inet6_register_protosw(&tcpv6_protosw);
1645 if (inet_csk_ctl_sock_create(&tcp6_socket, PF_INET6, SOCK_RAW,
1647 panic("Failed to create the TCPv6 control socket.\n");