Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[linux-2.6] / net / ipv6 / tcp_ipv6.c
1 /*
2  *      TCP over IPv6
3  *      Linux INET6 implementation 
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>     
7  *
8  *      $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
9  *
10  *      Based on: 
11  *      linux/net/ipv4/tcp.c
12  *      linux/net/ipv4/tcp_input.c
13  *      linux/net/ipv4/tcp_output.c
14  *
15  *      Fixes:
16  *      Hideaki YOSHIFUJI       :       sin6_scope_id support
17  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
18  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
19  *                                      a single port at the same time.
20  *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
21  *
22  *      This program is free software; you can redistribute it and/or
23  *      modify it under the terms of the GNU General Public License
24  *      as published by the Free Software Foundation; either version
25  *      2 of the License, or (at your option) any later version.
26  */
27
28 #include <linux/module.h>
29 #include <linux/config.h>
30 #include <linux/errno.h>
31 #include <linux/types.h>
32 #include <linux/socket.h>
33 #include <linux/sockios.h>
34 #include <linux/net.h>
35 #include <linux/jiffies.h>
36 #include <linux/in.h>
37 #include <linux/in6.h>
38 #include <linux/netdevice.h>
39 #include <linux/init.h>
40 #include <linux/jhash.h>
41 #include <linux/ipsec.h>
42 #include <linux/times.h>
43
44 #include <linux/ipv6.h>
45 #include <linux/icmpv6.h>
46 #include <linux/random.h>
47
48 #include <net/tcp.h>
49 #include <net/ndisc.h>
50 #include <net/inet6_hashtables.h>
51 #include <net/inet6_connection_sock.h>
52 #include <net/ipv6.h>
53 #include <net/transp_v6.h>
54 #include <net/addrconf.h>
55 #include <net/ip6_route.h>
56 #include <net/ip6_checksum.h>
57 #include <net/inet_ecn.h>
58 #include <net/protocol.h>
59 #include <net/xfrm.h>
60 #include <net/addrconf.h>
61 #include <net/snmp.h>
62 #include <net/dsfield.h>
63 #include <net/timewait_sock.h>
64
65 #include <asm/uaccess.h>
66
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
69
70 static void     tcp_v6_send_reset(struct sk_buff *skb);
71 static void     tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
72 static void     tcp_v6_send_check(struct sock *sk, int len, 
73                                   struct sk_buff *skb);
74
75 static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
76
77 static struct inet_connection_sock_af_ops ipv6_mapped;
78 static struct inet_connection_sock_af_ops ipv6_specific;
79
80 static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
81 {
82         return inet_csk_get_port(&tcp_hashinfo, sk, snum,
83                                  inet6_csk_bind_conflict);
84 }
85
86 static void tcp_v6_hash(struct sock *sk)
87 {
88         if (sk->sk_state != TCP_CLOSE) {
89                 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
90                         tcp_prot.hash(sk);
91                         return;
92                 }
93                 local_bh_disable();
94                 __inet6_hash(&tcp_hashinfo, sk);
95                 local_bh_enable();
96         }
97 }
98
99 static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len,
100                                    struct in6_addr *saddr, 
101                                    struct in6_addr *daddr, 
102                                    unsigned long base)
103 {
104         return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
105 }
106
107 static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
108 {
109         if (skb->protocol == htons(ETH_P_IPV6)) {
110                 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
111                                                     skb->nh.ipv6h->saddr.s6_addr32,
112                                                     skb->h.th->dest,
113                                                     skb->h.th->source);
114         } else {
115                 return secure_tcp_sequence_number(skb->nh.iph->daddr,
116                                                   skb->nh.iph->saddr,
117                                                   skb->h.th->dest,
118                                                   skb->h.th->source);
119         }
120 }
121
122 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 
123                           int addr_len)
124 {
125         struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
126         struct inet_sock *inet = inet_sk(sk);
127         struct inet_connection_sock *icsk = inet_csk(sk);
128         struct ipv6_pinfo *np = inet6_sk(sk);
129         struct tcp_sock *tp = tcp_sk(sk);
130         struct in6_addr *saddr = NULL, *final_p = NULL, final;
131         struct flowi fl;
132         struct dst_entry *dst;
133         int addr_type;
134         int err;
135
136         if (addr_len < SIN6_LEN_RFC2133) 
137                 return -EINVAL;
138
139         if (usin->sin6_family != AF_INET6) 
140                 return(-EAFNOSUPPORT);
141
142         memset(&fl, 0, sizeof(fl));
143
144         if (np->sndflow) {
145                 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
146                 IP6_ECN_flow_init(fl.fl6_flowlabel);
147                 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
148                         struct ip6_flowlabel *flowlabel;
149                         flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
150                         if (flowlabel == NULL)
151                                 return -EINVAL;
152                         ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
153                         fl6_sock_release(flowlabel);
154                 }
155         }
156
157         /*
158          *      connect() to INADDR_ANY means loopback (BSD'ism).
159          */
160         
161         if(ipv6_addr_any(&usin->sin6_addr))
162                 usin->sin6_addr.s6_addr[15] = 0x1; 
163
164         addr_type = ipv6_addr_type(&usin->sin6_addr);
165
166         if(addr_type & IPV6_ADDR_MULTICAST)
167                 return -ENETUNREACH;
168
169         if (addr_type&IPV6_ADDR_LINKLOCAL) {
170                 if (addr_len >= sizeof(struct sockaddr_in6) &&
171                     usin->sin6_scope_id) {
172                         /* If interface is set while binding, indices
173                          * must coincide.
174                          */
175                         if (sk->sk_bound_dev_if &&
176                             sk->sk_bound_dev_if != usin->sin6_scope_id)
177                                 return -EINVAL;
178
179                         sk->sk_bound_dev_if = usin->sin6_scope_id;
180                 }
181
182                 /* Connect to link-local address requires an interface */
183                 if (!sk->sk_bound_dev_if)
184                         return -EINVAL;
185         }
186
187         if (tp->rx_opt.ts_recent_stamp &&
188             !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
189                 tp->rx_opt.ts_recent = 0;
190                 tp->rx_opt.ts_recent_stamp = 0;
191                 tp->write_seq = 0;
192         }
193
194         ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
195         np->flow_label = fl.fl6_flowlabel;
196
197         /*
198          *      TCP over IPv4
199          */
200
201         if (addr_type == IPV6_ADDR_MAPPED) {
202                 u32 exthdrlen = icsk->icsk_ext_hdr_len;
203                 struct sockaddr_in sin;
204
205                 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
206
207                 if (__ipv6_only_sock(sk))
208                         return -ENETUNREACH;
209
210                 sin.sin_family = AF_INET;
211                 sin.sin_port = usin->sin6_port;
212                 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
213
214                 icsk->icsk_af_ops = &ipv6_mapped;
215                 sk->sk_backlog_rcv = tcp_v4_do_rcv;
216
217                 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
218
219                 if (err) {
220                         icsk->icsk_ext_hdr_len = exthdrlen;
221                         icsk->icsk_af_ops = &ipv6_specific;
222                         sk->sk_backlog_rcv = tcp_v6_do_rcv;
223                         goto failure;
224                 } else {
225                         ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
226                                       inet->saddr);
227                         ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
228                                       inet->rcv_saddr);
229                 }
230
231                 return err;
232         }
233
234         if (!ipv6_addr_any(&np->rcv_saddr))
235                 saddr = &np->rcv_saddr;
236
237         fl.proto = IPPROTO_TCP;
238         ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
239         ipv6_addr_copy(&fl.fl6_src,
240                        (saddr ? saddr : &np->saddr));
241         fl.oif = sk->sk_bound_dev_if;
242         fl.fl_ip_dport = usin->sin6_port;
243         fl.fl_ip_sport = inet->sport;
244
245         if (np->opt && np->opt->srcrt) {
246                 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
247                 ipv6_addr_copy(&final, &fl.fl6_dst);
248                 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
249                 final_p = &final;
250         }
251
252         err = ip6_dst_lookup(sk, &dst, &fl);
253         if (err)
254                 goto failure;
255         if (final_p)
256                 ipv6_addr_copy(&fl.fl6_dst, final_p);
257
258         if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
259                 goto failure;
260
261         if (saddr == NULL) {
262                 saddr = &fl.fl6_src;
263                 ipv6_addr_copy(&np->rcv_saddr, saddr);
264         }
265
266         /* set the source address */
267         ipv6_addr_copy(&np->saddr, saddr);
268         inet->rcv_saddr = LOOPBACK4_IPV6;
269
270         ip6_dst_store(sk, dst, NULL);
271         sk->sk_route_caps = dst->dev->features &
272                 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
273
274         icsk->icsk_ext_hdr_len = 0;
275         if (np->opt)
276                 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
277                                           np->opt->opt_nflen);
278
279         tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
280
281         inet->dport = usin->sin6_port;
282
283         tcp_set_state(sk, TCP_SYN_SENT);
284         err = inet6_hash_connect(&tcp_death_row, sk);
285         if (err)
286                 goto late_failure;
287
288         if (!tp->write_seq)
289                 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
290                                                              np->daddr.s6_addr32,
291                                                              inet->sport,
292                                                              inet->dport);
293
294         err = tcp_connect(sk);
295         if (err)
296                 goto late_failure;
297
298         return 0;
299
300 late_failure:
301         tcp_set_state(sk, TCP_CLOSE);
302         __sk_dst_reset(sk);
303 failure:
304         inet->dport = 0;
305         sk->sk_route_caps = 0;
306         return err;
307 }
308
309 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
310                 int type, int code, int offset, __u32 info)
311 {
312         struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
313         const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
314         struct ipv6_pinfo *np;
315         struct sock *sk;
316         int err;
317         struct tcp_sock *tp; 
318         __u32 seq;
319
320         sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr,
321                           th->source, skb->dev->ifindex);
322
323         if (sk == NULL) {
324                 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
325                 return;
326         }
327
328         if (sk->sk_state == TCP_TIME_WAIT) {
329                 inet_twsk_put((struct inet_timewait_sock *)sk);
330                 return;
331         }
332
333         bh_lock_sock(sk);
334         if (sock_owned_by_user(sk))
335                 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
336
337         if (sk->sk_state == TCP_CLOSE)
338                 goto out;
339
340         tp = tcp_sk(sk);
341         seq = ntohl(th->seq); 
342         if (sk->sk_state != TCP_LISTEN &&
343             !between(seq, tp->snd_una, tp->snd_nxt)) {
344                 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
345                 goto out;
346         }
347
348         np = inet6_sk(sk);
349
350         if (type == ICMPV6_PKT_TOOBIG) {
351                 struct dst_entry *dst = NULL;
352
353                 if (sock_owned_by_user(sk))
354                         goto out;
355                 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
356                         goto out;
357
358                 /* icmp should have updated the destination cache entry */
359                 dst = __sk_dst_check(sk, np->dst_cookie);
360
361                 if (dst == NULL) {
362                         struct inet_sock *inet = inet_sk(sk);
363                         struct flowi fl;
364
365                         /* BUGGG_FUTURE: Again, it is not clear how
366                            to handle rthdr case. Ignore this complexity
367                            for now.
368                          */
369                         memset(&fl, 0, sizeof(fl));
370                         fl.proto = IPPROTO_TCP;
371                         ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
372                         ipv6_addr_copy(&fl.fl6_src, &np->saddr);
373                         fl.oif = sk->sk_bound_dev_if;
374                         fl.fl_ip_dport = inet->dport;
375                         fl.fl_ip_sport = inet->sport;
376
377                         if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
378                                 sk->sk_err_soft = -err;
379                                 goto out;
380                         }
381
382                         if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
383                                 sk->sk_err_soft = -err;
384                                 goto out;
385                         }
386
387                 } else
388                         dst_hold(dst);
389
390                 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
391                         tcp_sync_mss(sk, dst_mtu(dst));
392                         tcp_simple_retransmit(sk);
393                 } /* else let the usual retransmit timer handle it */
394                 dst_release(dst);
395                 goto out;
396         }
397
398         icmpv6_err_convert(type, code, &err);
399
400         /* Might be for an request_sock */
401         switch (sk->sk_state) {
402                 struct request_sock *req, **prev;
403         case TCP_LISTEN:
404                 if (sock_owned_by_user(sk))
405                         goto out;
406
407                 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
408                                            &hdr->saddr, inet6_iif(skb));
409                 if (!req)
410                         goto out;
411
412                 /* ICMPs are not backlogged, hence we cannot get
413                  * an established socket here.
414                  */
415                 BUG_TRAP(req->sk == NULL);
416
417                 if (seq != tcp_rsk(req)->snt_isn) {
418                         NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
419                         goto out;
420                 }
421
422                 inet_csk_reqsk_queue_drop(sk, req, prev);
423                 goto out;
424
425         case TCP_SYN_SENT:
426         case TCP_SYN_RECV:  /* Cannot happen.
427                                It can, it SYNs are crossed. --ANK */ 
428                 if (!sock_owned_by_user(sk)) {
429                         TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
430                         sk->sk_err = err;
431                         sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
432
433                         tcp_done(sk);
434                 } else
435                         sk->sk_err_soft = err;
436                 goto out;
437         }
438
439         if (!sock_owned_by_user(sk) && np->recverr) {
440                 sk->sk_err = err;
441                 sk->sk_error_report(sk);
442         } else
443                 sk->sk_err_soft = err;
444
445 out:
446         bh_unlock_sock(sk);
447         sock_put(sk);
448 }
449
450
451 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
452                               struct dst_entry *dst)
453 {
454         struct inet6_request_sock *treq = inet6_rsk(req);
455         struct ipv6_pinfo *np = inet6_sk(sk);
456         struct sk_buff * skb;
457         struct ipv6_txoptions *opt = NULL;
458         struct in6_addr * final_p = NULL, final;
459         struct flowi fl;
460         int err = -1;
461
462         memset(&fl, 0, sizeof(fl));
463         fl.proto = IPPROTO_TCP;
464         ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
465         ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
466         fl.fl6_flowlabel = 0;
467         fl.oif = treq->iif;
468         fl.fl_ip_dport = inet_rsk(req)->rmt_port;
469         fl.fl_ip_sport = inet_sk(sk)->sport;
470
471         if (dst == NULL) {
472                 opt = np->opt;
473                 if (opt == NULL &&
474                     np->rxopt.bits.osrcrt == 2 &&
475                     treq->pktopts) {
476                         struct sk_buff *pktopts = treq->pktopts;
477                         struct inet6_skb_parm *rxopt = IP6CB(pktopts);
478                         if (rxopt->srcrt)
479                                 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt));
480                 }
481
482                 if (opt && opt->srcrt) {
483                         struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
484                         ipv6_addr_copy(&final, &fl.fl6_dst);
485                         ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
486                         final_p = &final;
487                 }
488
489                 err = ip6_dst_lookup(sk, &dst, &fl);
490                 if (err)
491                         goto done;
492                 if (final_p)
493                         ipv6_addr_copy(&fl.fl6_dst, final_p);
494                 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
495                         goto done;
496         }
497
498         skb = tcp_make_synack(sk, dst, req);
499         if (skb) {
500                 struct tcphdr *th = skb->h.th;
501
502                 th->check = tcp_v6_check(th, skb->len,
503                                          &treq->loc_addr, &treq->rmt_addr,
504                                          csum_partial((char *)th, skb->len, skb->csum));
505
506                 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
507                 err = ip6_xmit(sk, skb, &fl, opt, 0);
508                 if (err == NET_XMIT_CN)
509                         err = 0;
510         }
511
512 done:
513         if (opt && opt != np->opt)
514                 sock_kfree_s(sk, opt, opt->tot_len);
515         return err;
516 }
517
518 static void tcp_v6_reqsk_destructor(struct request_sock *req)
519 {
520         if (inet6_rsk(req)->pktopts)
521                 kfree_skb(inet6_rsk(req)->pktopts);
522 }
523
524 static struct request_sock_ops tcp6_request_sock_ops = {
525         .family         =       AF_INET6,
526         .obj_size       =       sizeof(struct tcp6_request_sock),
527         .rtx_syn_ack    =       tcp_v6_send_synack,
528         .send_ack       =       tcp_v6_reqsk_send_ack,
529         .destructor     =       tcp_v6_reqsk_destructor,
530         .send_reset     =       tcp_v6_send_reset
531 };
532
533 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
534         .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
535         .twsk_unique    = tcp_twsk_unique,
536 };
537
538 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
539 {
540         struct ipv6_pinfo *np = inet6_sk(sk);
541         struct tcphdr *th = skb->h.th;
542
543         if (skb->ip_summed == CHECKSUM_HW) {
544                 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,  0);
545                 skb->csum = offsetof(struct tcphdr, check);
546         } else {
547                 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 
548                                             csum_partial((char *)th, th->doff<<2, 
549                                                          skb->csum));
550         }
551 }
552
553
554 static void tcp_v6_send_reset(struct sk_buff *skb)
555 {
556         struct tcphdr *th = skb->h.th, *t1; 
557         struct sk_buff *buff;
558         struct flowi fl;
559
560         if (th->rst)
561                 return;
562
563         if (!ipv6_unicast_destination(skb))
564                 return; 
565
566         /*
567          * We need to grab some memory, and put together an RST,
568          * and then put it into the queue to be sent.
569          */
570
571         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr),
572                          GFP_ATOMIC);
573         if (buff == NULL) 
574                 return;
575
576         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr));
577
578         t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr));
579
580         /* Swap the send and the receive. */
581         memset(t1, 0, sizeof(*t1));
582         t1->dest = th->source;
583         t1->source = th->dest;
584         t1->doff = sizeof(*t1)/4;
585         t1->rst = 1;
586   
587         if(th->ack) {
588                 t1->seq = th->ack_seq;
589         } else {
590                 t1->ack = 1;
591                 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
592                                     + skb->len - (th->doff<<2));
593         }
594
595         buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
596
597         memset(&fl, 0, sizeof(fl));
598         ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
599         ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
600
601         t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
602                                     sizeof(*t1), IPPROTO_TCP,
603                                     buff->csum);
604
605         fl.proto = IPPROTO_TCP;
606         fl.oif = inet6_iif(skb);
607         fl.fl_ip_dport = t1->dest;
608         fl.fl_ip_sport = t1->source;
609
610         /* sk = NULL, but it is safe for now. RST socket required. */
611         if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
612
613                 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
614                         ip6_xmit(NULL, buff, &fl, NULL, 0);
615                         TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
616                         TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
617                         return;
618                 }
619         }
620
621         kfree_skb(buff);
622 }
623
624 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
625 {
626         struct tcphdr *th = skb->h.th, *t1;
627         struct sk_buff *buff;
628         struct flowi fl;
629         int tot_len = sizeof(struct tcphdr);
630
631         if (ts)
632                 tot_len += 3*4;
633
634         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
635                          GFP_ATOMIC);
636         if (buff == NULL)
637                 return;
638
639         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
640
641         t1 = (struct tcphdr *) skb_push(buff,tot_len);
642
643         /* Swap the send and the receive. */
644         memset(t1, 0, sizeof(*t1));
645         t1->dest = th->source;
646         t1->source = th->dest;
647         t1->doff = tot_len/4;
648         t1->seq = htonl(seq);
649         t1->ack_seq = htonl(ack);
650         t1->ack = 1;
651         t1->window = htons(win);
652         
653         if (ts) {
654                 u32 *ptr = (u32*)(t1 + 1);
655                 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
656                                (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
657                 *ptr++ = htonl(tcp_time_stamp);
658                 *ptr = htonl(ts);
659         }
660
661         buff->csum = csum_partial((char *)t1, tot_len, 0);
662
663         memset(&fl, 0, sizeof(fl));
664         ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
665         ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
666
667         t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
668                                     tot_len, IPPROTO_TCP,
669                                     buff->csum);
670
671         fl.proto = IPPROTO_TCP;
672         fl.oif = inet6_iif(skb);
673         fl.fl_ip_dport = t1->dest;
674         fl.fl_ip_sport = t1->source;
675
676         if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
677                 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
678                         ip6_xmit(NULL, buff, &fl, NULL, 0);
679                         TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
680                         return;
681                 }
682         }
683
684         kfree_skb(buff);
685 }
686
687 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
688 {
689         struct inet_timewait_sock *tw = inet_twsk(sk);
690         const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
691
692         tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
693                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
694                         tcptw->tw_ts_recent);
695
696         inet_twsk_put(tw);
697 }
698
699 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
700 {
701         tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
702 }
703
704
705 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
706 {
707         struct request_sock *req, **prev;
708         const struct tcphdr *th = skb->h.th;
709         struct sock *nsk;
710
711         /* Find possible connection requests. */
712         req = inet6_csk_search_req(sk, &prev, th->source,
713                                    &skb->nh.ipv6h->saddr,
714                                    &skb->nh.ipv6h->daddr, inet6_iif(skb));
715         if (req)
716                 return tcp_check_req(sk, skb, req, prev);
717
718         nsk = __inet6_lookup_established(&tcp_hashinfo, &skb->nh.ipv6h->saddr,
719                                          th->source, &skb->nh.ipv6h->daddr,
720                                          ntohs(th->dest), inet6_iif(skb));
721
722         if (nsk) {
723                 if (nsk->sk_state != TCP_TIME_WAIT) {
724                         bh_lock_sock(nsk);
725                         return nsk;
726                 }
727                 inet_twsk_put((struct inet_timewait_sock *)nsk);
728                 return NULL;
729         }
730
731 #if 0 /*def CONFIG_SYN_COOKIES*/
732         if (!th->rst && !th->syn && th->ack)
733                 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt));
734 #endif
735         return sk;
736 }
737
738 /* FIXME: this is substantially similar to the ipv4 code.
739  * Can some kind of merge be done? -- erics
740  */
741 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
742 {
743         struct inet6_request_sock *treq;
744         struct ipv6_pinfo *np = inet6_sk(sk);
745         struct tcp_options_received tmp_opt;
746         struct tcp_sock *tp = tcp_sk(sk);
747         struct request_sock *req = NULL;
748         __u32 isn = TCP_SKB_CB(skb)->when;
749
750         if (skb->protocol == htons(ETH_P_IP))
751                 return tcp_v4_conn_request(sk, skb);
752
753         if (!ipv6_unicast_destination(skb))
754                 goto drop; 
755
756         /*
757          *      There are no SYN attacks on IPv6, yet...        
758          */
759         if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
760                 if (net_ratelimit())
761                         printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
762                 goto drop;              
763         }
764
765         if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
766                 goto drop;
767
768         req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
769         if (req == NULL)
770                 goto drop;
771
772         tcp_clear_options(&tmp_opt);
773         tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
774         tmp_opt.user_mss = tp->rx_opt.user_mss;
775
776         tcp_parse_options(skb, &tmp_opt, 0);
777
778         tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
779         tcp_openreq_init(req, &tmp_opt, skb);
780
781         treq = inet6_rsk(req);
782         ipv6_addr_copy(&treq->rmt_addr, &skb->nh.ipv6h->saddr);
783         ipv6_addr_copy(&treq->loc_addr, &skb->nh.ipv6h->daddr);
784         TCP_ECN_create_request(req, skb->h.th);
785         treq->pktopts = NULL;
786         if (ipv6_opt_accepted(sk, skb) ||
787             np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
788             np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
789                 atomic_inc(&skb->users);
790                 treq->pktopts = skb;
791         }
792         treq->iif = sk->sk_bound_dev_if;
793
794         /* So that link locals have meaning */
795         if (!sk->sk_bound_dev_if &&
796             ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
797                 treq->iif = inet6_iif(skb);
798
799         if (isn == 0) 
800                 isn = tcp_v6_init_sequence(sk,skb);
801
802         tcp_rsk(req)->snt_isn = isn;
803
804         if (tcp_v6_send_synack(sk, req, NULL))
805                 goto drop;
806
807         inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
808         return 0;
809
810 drop:
811         if (req)
812                 reqsk_free(req);
813
814         TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
815         return 0; /* don't send reset */
816 }
817
818 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
819                                           struct request_sock *req,
820                                           struct dst_entry *dst)
821 {
822         struct inet6_request_sock *treq = inet6_rsk(req);
823         struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
824         struct tcp6_sock *newtcp6sk;
825         struct inet_sock *newinet;
826         struct tcp_sock *newtp;
827         struct sock *newsk;
828         struct ipv6_txoptions *opt;
829
830         if (skb->protocol == htons(ETH_P_IP)) {
831                 /*
832                  *      v6 mapped
833                  */
834
835                 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
836
837                 if (newsk == NULL) 
838                         return NULL;
839
840                 newtcp6sk = (struct tcp6_sock *)newsk;
841                 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
842
843                 newinet = inet_sk(newsk);
844                 newnp = inet6_sk(newsk);
845                 newtp = tcp_sk(newsk);
846
847                 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
848
849                 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
850                               newinet->daddr);
851
852                 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
853                               newinet->saddr);
854
855                 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
856
857                 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
858                 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
859                 newnp->pktoptions  = NULL;
860                 newnp->opt         = NULL;
861                 newnp->mcast_oif   = inet6_iif(skb);
862                 newnp->mcast_hops  = skb->nh.ipv6h->hop_limit;
863
864                 /*
865                  * No need to charge this sock to the relevant IPv6 refcnt debug socks count
866                  * here, tcp_create_openreq_child now does this for us, see the comment in
867                  * that function for the gory details. -acme
868                  */
869
870                 /* It is tricky place. Until this moment IPv4 tcp
871                    worked with IPv6 icsk.icsk_af_ops.
872                    Sync it now.
873                  */
874                 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
875
876                 return newsk;
877         }
878
879         opt = np->opt;
880
881         if (sk_acceptq_is_full(sk))
882                 goto out_overflow;
883
884         if (np->rxopt.bits.osrcrt == 2 &&
885             opt == NULL && treq->pktopts) {
886                 struct inet6_skb_parm *rxopt = IP6CB(treq->pktopts);
887                 if (rxopt->srcrt)
888                         opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr *)(treq->pktopts->nh.raw + rxopt->srcrt));
889         }
890
891         if (dst == NULL) {
892                 struct in6_addr *final_p = NULL, final;
893                 struct flowi fl;
894
895                 memset(&fl, 0, sizeof(fl));
896                 fl.proto = IPPROTO_TCP;
897                 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
898                 if (opt && opt->srcrt) {
899                         struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
900                         ipv6_addr_copy(&final, &fl.fl6_dst);
901                         ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
902                         final_p = &final;
903                 }
904                 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
905                 fl.oif = sk->sk_bound_dev_if;
906                 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
907                 fl.fl_ip_sport = inet_sk(sk)->sport;
908
909                 if (ip6_dst_lookup(sk, &dst, &fl))
910                         goto out;
911
912                 if (final_p)
913                         ipv6_addr_copy(&fl.fl6_dst, final_p);
914
915                 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
916                         goto out;
917         } 
918
919         newsk = tcp_create_openreq_child(sk, req, skb);
920         if (newsk == NULL)
921                 goto out;
922
923         /*
924          * No need to charge this sock to the relevant IPv6 refcnt debug socks
925          * count here, tcp_create_openreq_child now does this for us, see the
926          * comment in that function for the gory details. -acme
927          */
928
929         ip6_dst_store(newsk, dst, NULL);
930         newsk->sk_route_caps = dst->dev->features &
931                 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
932
933         newtcp6sk = (struct tcp6_sock *)newsk;
934         inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
935
936         newtp = tcp_sk(newsk);
937         newinet = inet_sk(newsk);
938         newnp = inet6_sk(newsk);
939
940         memcpy(newnp, np, sizeof(struct ipv6_pinfo));
941
942         ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
943         ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
944         ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
945         newsk->sk_bound_dev_if = treq->iif;
946
947         /* Now IPv6 options... 
948
949            First: no IPv4 options.
950          */
951         newinet->opt = NULL;
952
953         /* Clone RX bits */
954         newnp->rxopt.all = np->rxopt.all;
955
956         /* Clone pktoptions received with SYN */
957         newnp->pktoptions = NULL;
958         if (treq->pktopts != NULL) {
959                 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
960                 kfree_skb(treq->pktopts);
961                 treq->pktopts = NULL;
962                 if (newnp->pktoptions)
963                         skb_set_owner_r(newnp->pktoptions, newsk);
964         }
965         newnp->opt        = NULL;
966         newnp->mcast_oif  = inet6_iif(skb);
967         newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
968
969         /* Clone native IPv6 options from listening socket (if any)
970
971            Yes, keeping reference count would be much more clever,
972            but we make one more one thing there: reattach optmem
973            to newsk.
974          */
975         if (opt) {
976                 newnp->opt = ipv6_dup_options(newsk, opt);
977                 if (opt != np->opt)
978                         sock_kfree_s(sk, opt, opt->tot_len);
979         }
980
981         inet_csk(newsk)->icsk_ext_hdr_len = 0;
982         if (newnp->opt)
983                 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
984                                                      newnp->opt->opt_flen);
985
986         tcp_sync_mss(newsk, dst_mtu(dst));
987         newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
988         tcp_initialize_rcv_mss(newsk);
989
990         newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
991
992         __inet6_hash(&tcp_hashinfo, newsk);
993         inet_inherit_port(&tcp_hashinfo, sk, newsk);
994
995         return newsk;
996
997 out_overflow:
998         NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
999 out:
1000         NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1001         if (opt && opt != np->opt)
1002                 sock_kfree_s(sk, opt, opt->tot_len);
1003         dst_release(dst);
1004         return NULL;
1005 }
1006
1007 static int tcp_v6_checksum_init(struct sk_buff *skb)
1008 {
1009         if (skb->ip_summed == CHECKSUM_HW) {
1010                 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1011                                   &skb->nh.ipv6h->daddr,skb->csum)) {
1012                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1013                         return 0;
1014                 }
1015         }
1016
1017         skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1018                                   &skb->nh.ipv6h->daddr, 0);
1019
1020         if (skb->len <= 76) {
1021                 return __skb_checksum_complete(skb);
1022         }
1023         return 0;
1024 }
1025
1026 /* The socket must have it's spinlock held when we get
1027  * here.
1028  *
1029  * We have a potential double-lock case here, so even when
1030  * doing backlog processing we use the BH locking scheme.
1031  * This is because we cannot sleep with the original spinlock
1032  * held.
1033  */
1034 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1035 {
1036         struct ipv6_pinfo *np = inet6_sk(sk);
1037         struct tcp_sock *tp;
1038         struct sk_buff *opt_skb = NULL;
1039
1040         /* Imagine: socket is IPv6. IPv4 packet arrives,
1041            goes to IPv4 receive handler and backlogged.
1042            From backlog it always goes here. Kerboom...
1043            Fortunately, tcp_rcv_established and rcv_established
1044            handle them correctly, but it is not case with
1045            tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1046          */
1047
1048         if (skb->protocol == htons(ETH_P_IP))
1049                 return tcp_v4_do_rcv(sk, skb);
1050
1051         if (sk_filter(sk, skb, 0))
1052                 goto discard;
1053
1054         /*
1055          *      socket locking is here for SMP purposes as backlog rcv
1056          *      is currently called with bh processing disabled.
1057          */
1058
1059         /* Do Stevens' IPV6_PKTOPTIONS.
1060
1061            Yes, guys, it is the only place in our code, where we
1062            may make it not affecting IPv4.
1063            The rest of code is protocol independent,
1064            and I do not like idea to uglify IPv4.
1065
1066            Actually, all the idea behind IPV6_PKTOPTIONS
1067            looks not very well thought. For now we latch
1068            options, received in the last packet, enqueued
1069            by tcp. Feel free to propose better solution.
1070                                                --ANK (980728)
1071          */
1072         if (np->rxopt.all)
1073                 opt_skb = skb_clone(skb, GFP_ATOMIC);
1074
1075         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1076                 TCP_CHECK_TIMER(sk);
1077                 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1078                         goto reset;
1079                 TCP_CHECK_TIMER(sk);
1080                 if (opt_skb)
1081                         goto ipv6_pktoptions;
1082                 return 0;
1083         }
1084
1085         if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
1086                 goto csum_err;
1087
1088         if (sk->sk_state == TCP_LISTEN) { 
1089                 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1090                 if (!nsk)
1091                         goto discard;
1092
1093                 /*
1094                  * Queue it on the new socket if the new socket is active,
1095                  * otherwise we just shortcircuit this and continue with
1096                  * the new socket..
1097                  */
1098                 if(nsk != sk) {
1099                         if (tcp_child_process(sk, nsk, skb))
1100                                 goto reset;
1101                         if (opt_skb)
1102                                 __kfree_skb(opt_skb);
1103                         return 0;
1104                 }
1105         }
1106
1107         TCP_CHECK_TIMER(sk);
1108         if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1109                 goto reset;
1110         TCP_CHECK_TIMER(sk);
1111         if (opt_skb)
1112                 goto ipv6_pktoptions;
1113         return 0;
1114
1115 reset:
1116         tcp_v6_send_reset(skb);
1117 discard:
1118         if (opt_skb)
1119                 __kfree_skb(opt_skb);
1120         kfree_skb(skb);
1121         return 0;
1122 csum_err:
1123         TCP_INC_STATS_BH(TCP_MIB_INERRS);
1124         goto discard;
1125
1126
1127 ipv6_pktoptions:
1128         /* Do you ask, what is it?
1129
1130            1. skb was enqueued by tcp.
1131            2. skb is added to tail of read queue, rather than out of order.
1132            3. socket is not in passive state.
1133            4. Finally, it really contains options, which user wants to receive.
1134          */
1135         tp = tcp_sk(sk);
1136         if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1137             !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1138                 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1139                         np->mcast_oif = inet6_iif(opt_skb);
1140                 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1141                         np->mcast_hops = opt_skb->nh.ipv6h->hop_limit;
1142                 if (ipv6_opt_accepted(sk, opt_skb)) {
1143                         skb_set_owner_r(opt_skb, sk);
1144                         opt_skb = xchg(&np->pktoptions, opt_skb);
1145                 } else {
1146                         __kfree_skb(opt_skb);
1147                         opt_skb = xchg(&np->pktoptions, NULL);
1148                 }
1149         }
1150
1151         if (opt_skb)
1152                 kfree_skb(opt_skb);
1153         return 0;
1154 }
1155
1156 static int tcp_v6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
1157 {
1158         struct sk_buff *skb = *pskb;
1159         struct tcphdr *th;      
1160         struct sock *sk;
1161         int ret;
1162
1163         if (skb->pkt_type != PACKET_HOST)
1164                 goto discard_it;
1165
1166         /*
1167          *      Count it even if it's bad.
1168          */
1169         TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1170
1171         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1172                 goto discard_it;
1173
1174         th = skb->h.th;
1175
1176         if (th->doff < sizeof(struct tcphdr)/4)
1177                 goto bad_packet;
1178         if (!pskb_may_pull(skb, th->doff*4))
1179                 goto discard_it;
1180
1181         if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1182              tcp_v6_checksum_init(skb)))
1183                 goto bad_packet;
1184
1185         th = skb->h.th;
1186         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1187         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1188                                     skb->len - th->doff*4);
1189         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1190         TCP_SKB_CB(skb)->when = 0;
1191         TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h);
1192         TCP_SKB_CB(skb)->sacked = 0;
1193
1194         sk = __inet6_lookup(&tcp_hashinfo, &skb->nh.ipv6h->saddr, th->source,
1195                             &skb->nh.ipv6h->daddr, ntohs(th->dest),
1196                             inet6_iif(skb));
1197
1198         if (!sk)
1199                 goto no_tcp_socket;
1200
1201 process:
1202         if (sk->sk_state == TCP_TIME_WAIT)
1203                 goto do_time_wait;
1204
1205         if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1206                 goto discard_and_relse;
1207
1208         if (sk_filter(sk, skb, 0))
1209                 goto discard_and_relse;
1210
1211         skb->dev = NULL;
1212
1213         bh_lock_sock(sk);
1214         ret = 0;
1215         if (!sock_owned_by_user(sk)) {
1216                 if (!tcp_prequeue(sk, skb))
1217                         ret = tcp_v6_do_rcv(sk, skb);
1218         } else
1219                 sk_add_backlog(sk, skb);
1220         bh_unlock_sock(sk);
1221
1222         sock_put(sk);
1223         return ret ? -1 : 0;
1224
1225 no_tcp_socket:
1226         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1227                 goto discard_it;
1228
1229         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1230 bad_packet:
1231                 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1232         } else {
1233                 tcp_v6_send_reset(skb);
1234         }
1235
1236 discard_it:
1237
1238         /*
1239          *      Discard frame
1240          */
1241
1242         kfree_skb(skb);
1243         return 0;
1244
1245 discard_and_relse:
1246         sock_put(sk);
1247         goto discard_it;
1248
1249 do_time_wait:
1250         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1251                 inet_twsk_put((struct inet_timewait_sock *)sk);
1252                 goto discard_it;
1253         }
1254
1255         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1256                 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1257                 inet_twsk_put((struct inet_timewait_sock *)sk);
1258                 goto discard_it;
1259         }
1260
1261         switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1262                                            skb, th)) {
1263         case TCP_TW_SYN:
1264         {
1265                 struct sock *sk2;
1266
1267                 sk2 = inet6_lookup_listener(&tcp_hashinfo,
1268                                             &skb->nh.ipv6h->daddr,
1269                                             ntohs(th->dest), inet6_iif(skb));
1270                 if (sk2 != NULL) {
1271                         struct inet_timewait_sock *tw = inet_twsk(sk);
1272                         inet_twsk_deschedule(tw, &tcp_death_row);
1273                         inet_twsk_put(tw);
1274                         sk = sk2;
1275                         goto process;
1276                 }
1277                 /* Fall through to ACK */
1278         }
1279         case TCP_TW_ACK:
1280                 tcp_v6_timewait_ack(sk, skb);
1281                 break;
1282         case TCP_TW_RST:
1283                 goto no_tcp_socket;
1284         case TCP_TW_SUCCESS:;
1285         }
1286         goto discard_it;
1287 }
1288
1289 static int tcp_v6_remember_stamp(struct sock *sk)
1290 {
1291         /* Alas, not yet... */
1292         return 0;
1293 }
1294
1295 static struct inet_connection_sock_af_ops ipv6_specific = {
1296         .queue_xmit     =       inet6_csk_xmit,
1297         .send_check     =       tcp_v6_send_check,
1298         .rebuild_header =       inet6_sk_rebuild_header,
1299         .conn_request   =       tcp_v6_conn_request,
1300         .syn_recv_sock  =       tcp_v6_syn_recv_sock,
1301         .remember_stamp =       tcp_v6_remember_stamp,
1302         .net_header_len =       sizeof(struct ipv6hdr),
1303
1304         .setsockopt     =       ipv6_setsockopt,
1305         .getsockopt     =       ipv6_getsockopt,
1306         .addr2sockaddr  =       inet6_csk_addr2sockaddr,
1307         .sockaddr_len   =       sizeof(struct sockaddr_in6)
1308 };
1309
1310 /*
1311  *      TCP over IPv4 via INET6 API
1312  */
1313
1314 static struct inet_connection_sock_af_ops ipv6_mapped = {
1315         .queue_xmit     =       ip_queue_xmit,
1316         .send_check     =       tcp_v4_send_check,
1317         .rebuild_header =       inet_sk_rebuild_header,
1318         .conn_request   =       tcp_v6_conn_request,
1319         .syn_recv_sock  =       tcp_v6_syn_recv_sock,
1320         .remember_stamp =       tcp_v4_remember_stamp,
1321         .net_header_len =       sizeof(struct iphdr),
1322
1323         .setsockopt     =       ipv6_setsockopt,
1324         .getsockopt     =       ipv6_getsockopt,
1325         .addr2sockaddr  =       inet6_csk_addr2sockaddr,
1326         .sockaddr_len   =       sizeof(struct sockaddr_in6)
1327 };
1328
1329
1330
1331 /* NOTE: A lot of things set to zero explicitly by call to
1332  *       sk_alloc() so need not be done here.
1333  */
1334 static int tcp_v6_init_sock(struct sock *sk)
1335 {
1336         struct inet_connection_sock *icsk = inet_csk(sk);
1337         struct tcp_sock *tp = tcp_sk(sk);
1338
1339         skb_queue_head_init(&tp->out_of_order_queue);
1340         tcp_init_xmit_timers(sk);
1341         tcp_prequeue_init(tp);
1342
1343         icsk->icsk_rto = TCP_TIMEOUT_INIT;
1344         tp->mdev = TCP_TIMEOUT_INIT;
1345
1346         /* So many TCP implementations out there (incorrectly) count the
1347          * initial SYN frame in their delayed-ACK and congestion control
1348          * algorithms that we must have the following bandaid to talk
1349          * efficiently to them.  -DaveM
1350          */
1351         tp->snd_cwnd = 2;
1352
1353         /* See draft-stevens-tcpca-spec-01 for discussion of the
1354          * initialization of these values.
1355          */
1356         tp->snd_ssthresh = 0x7fffffff;
1357         tp->snd_cwnd_clamp = ~0;
1358         tp->mss_cache = 536;
1359
1360         tp->reordering = sysctl_tcp_reordering;
1361
1362         sk->sk_state = TCP_CLOSE;
1363
1364         icsk->icsk_af_ops = &ipv6_specific;
1365         icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1366         icsk->icsk_sync_mss = tcp_sync_mss;
1367         sk->sk_write_space = sk_stream_write_space;
1368         sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1369
1370         sk->sk_sndbuf = sysctl_tcp_wmem[1];
1371         sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1372
1373         atomic_inc(&tcp_sockets_allocated);
1374
1375         return 0;
1376 }
1377
1378 static int tcp_v6_destroy_sock(struct sock *sk)
1379 {
1380         tcp_v4_destroy_sock(sk);
1381         return inet6_destroy_sock(sk);
1382 }
1383
1384 /* Proc filesystem TCPv6 sock list dumping. */
1385 static void get_openreq6(struct seq_file *seq, 
1386                          struct sock *sk, struct request_sock *req, int i, int uid)
1387 {
1388         int ttd = req->expires - jiffies;
1389         struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1390         struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1391
1392         if (ttd < 0)
1393                 ttd = 0;
1394
1395         seq_printf(seq,
1396                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1397                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1398                    i,
1399                    src->s6_addr32[0], src->s6_addr32[1],
1400                    src->s6_addr32[2], src->s6_addr32[3],
1401                    ntohs(inet_sk(sk)->sport),
1402                    dest->s6_addr32[0], dest->s6_addr32[1],
1403                    dest->s6_addr32[2], dest->s6_addr32[3],
1404                    ntohs(inet_rsk(req)->rmt_port),
1405                    TCP_SYN_RECV,
1406                    0,0, /* could print option size, but that is af dependent. */
1407                    1,   /* timers active (only the expire timer) */  
1408                    jiffies_to_clock_t(ttd), 
1409                    req->retrans,
1410                    uid,
1411                    0,  /* non standard timer */  
1412                    0, /* open_requests have no inode */
1413                    0, req);
1414 }
1415
1416 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1417 {
1418         struct in6_addr *dest, *src;
1419         __u16 destp, srcp;
1420         int timer_active;
1421         unsigned long timer_expires;
1422         struct inet_sock *inet = inet_sk(sp);
1423         struct tcp_sock *tp = tcp_sk(sp);
1424         const struct inet_connection_sock *icsk = inet_csk(sp);
1425         struct ipv6_pinfo *np = inet6_sk(sp);
1426
1427         dest  = &np->daddr;
1428         src   = &np->rcv_saddr;
1429         destp = ntohs(inet->dport);
1430         srcp  = ntohs(inet->sport);
1431
1432         if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1433                 timer_active    = 1;
1434                 timer_expires   = icsk->icsk_timeout;
1435         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1436                 timer_active    = 4;
1437                 timer_expires   = icsk->icsk_timeout;
1438         } else if (timer_pending(&sp->sk_timer)) {
1439                 timer_active    = 2;
1440                 timer_expires   = sp->sk_timer.expires;
1441         } else {
1442                 timer_active    = 0;
1443                 timer_expires = jiffies;
1444         }
1445
1446         seq_printf(seq,
1447                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1448                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
1449                    i,
1450                    src->s6_addr32[0], src->s6_addr32[1],
1451                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1452                    dest->s6_addr32[0], dest->s6_addr32[1],
1453                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1454                    sp->sk_state, 
1455                    tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq,
1456                    timer_active,
1457                    jiffies_to_clock_t(timer_expires - jiffies),
1458                    icsk->icsk_retransmits,
1459                    sock_i_uid(sp),
1460                    icsk->icsk_probes_out,
1461                    sock_i_ino(sp),
1462                    atomic_read(&sp->sk_refcnt), sp,
1463                    icsk->icsk_rto,
1464                    icsk->icsk_ack.ato,
1465                    (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1466                    tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
1467                    );
1468 }
1469
1470 static void get_timewait6_sock(struct seq_file *seq, 
1471                                struct inet_timewait_sock *tw, int i)
1472 {
1473         struct in6_addr *dest, *src;
1474         __u16 destp, srcp;
1475         struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1476         int ttd = tw->tw_ttd - jiffies;
1477
1478         if (ttd < 0)
1479                 ttd = 0;
1480
1481         dest = &tw6->tw_v6_daddr;
1482         src  = &tw6->tw_v6_rcv_saddr;
1483         destp = ntohs(tw->tw_dport);
1484         srcp  = ntohs(tw->tw_sport);
1485
1486         seq_printf(seq,
1487                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1488                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1489                    i,
1490                    src->s6_addr32[0], src->s6_addr32[1],
1491                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1492                    dest->s6_addr32[0], dest->s6_addr32[1],
1493                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1494                    tw->tw_substate, 0, 0,
1495                    3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1496                    atomic_read(&tw->tw_refcnt), tw);
1497 }
1498
1499 #ifdef CONFIG_PROC_FS
1500 static int tcp6_seq_show(struct seq_file *seq, void *v)
1501 {
1502         struct tcp_iter_state *st;
1503
1504         if (v == SEQ_START_TOKEN) {
1505                 seq_puts(seq,
1506                          "  sl  "
1507                          "local_address                         "
1508                          "remote_address                        "
1509                          "st tx_queue rx_queue tr tm->when retrnsmt"
1510                          "   uid  timeout inode\n");
1511                 goto out;
1512         }
1513         st = seq->private;
1514
1515         switch (st->state) {
1516         case TCP_SEQ_STATE_LISTENING:
1517         case TCP_SEQ_STATE_ESTABLISHED:
1518                 get_tcp6_sock(seq, v, st->num);
1519                 break;
1520         case TCP_SEQ_STATE_OPENREQ:
1521                 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1522                 break;
1523         case TCP_SEQ_STATE_TIME_WAIT:
1524                 get_timewait6_sock(seq, v, st->num);
1525                 break;
1526         }
1527 out:
1528         return 0;
1529 }
1530
1531 static struct file_operations tcp6_seq_fops;
1532 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1533         .owner          = THIS_MODULE,
1534         .name           = "tcp6",
1535         .family         = AF_INET6,
1536         .seq_show       = tcp6_seq_show,
1537         .seq_fops       = &tcp6_seq_fops,
1538 };
1539
1540 int __init tcp6_proc_init(void)
1541 {
1542         return tcp_proc_register(&tcp6_seq_afinfo);
1543 }
1544
1545 void tcp6_proc_exit(void)
1546 {
1547         tcp_proc_unregister(&tcp6_seq_afinfo);
1548 }
1549 #endif
1550
1551 struct proto tcpv6_prot = {
1552         .name                   = "TCPv6",
1553         .owner                  = THIS_MODULE,
1554         .close                  = tcp_close,
1555         .connect                = tcp_v6_connect,
1556         .disconnect             = tcp_disconnect,
1557         .accept                 = inet_csk_accept,
1558         .ioctl                  = tcp_ioctl,
1559         .init                   = tcp_v6_init_sock,
1560         .destroy                = tcp_v6_destroy_sock,
1561         .shutdown               = tcp_shutdown,
1562         .setsockopt             = tcp_setsockopt,
1563         .getsockopt             = tcp_getsockopt,
1564         .sendmsg                = tcp_sendmsg,
1565         .recvmsg                = tcp_recvmsg,
1566         .backlog_rcv            = tcp_v6_do_rcv,
1567         .hash                   = tcp_v6_hash,
1568         .unhash                 = tcp_unhash,
1569         .get_port               = tcp_v6_get_port,
1570         .enter_memory_pressure  = tcp_enter_memory_pressure,
1571         .sockets_allocated      = &tcp_sockets_allocated,
1572         .memory_allocated       = &tcp_memory_allocated,
1573         .memory_pressure        = &tcp_memory_pressure,
1574         .orphan_count           = &tcp_orphan_count,
1575         .sysctl_mem             = sysctl_tcp_mem,
1576         .sysctl_wmem            = sysctl_tcp_wmem,
1577         .sysctl_rmem            = sysctl_tcp_rmem,
1578         .max_header             = MAX_TCP_HEADER,
1579         .obj_size               = sizeof(struct tcp6_sock),
1580         .twsk_prot              = &tcp6_timewait_sock_ops,
1581         .rsk_prot               = &tcp6_request_sock_ops,
1582 };
1583
1584 static struct inet6_protocol tcpv6_protocol = {
1585         .handler        =       tcp_v6_rcv,
1586         .err_handler    =       tcp_v6_err,
1587         .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1588 };
1589
1590 static struct inet_protosw tcpv6_protosw = {
1591         .type           =       SOCK_STREAM,
1592         .protocol       =       IPPROTO_TCP,
1593         .prot           =       &tcpv6_prot,
1594         .ops            =       &inet6_stream_ops,
1595         .capability     =       -1,
1596         .no_check       =       0,
1597         .flags          =       INET_PROTOSW_PERMANENT |
1598                                 INET_PROTOSW_ICSK,
1599 };
1600
1601 void __init tcpv6_init(void)
1602 {
1603         /* register inet6 protocol */
1604         if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0)
1605                 printk(KERN_ERR "tcpv6_init: Could not register protocol\n");
1606         inet6_register_protosw(&tcpv6_protosw);
1607 }