2 * IPv6 output functions
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
10 * Based on linux/net/ipv4/ip_output.c
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 * A.N.Kuznetsov : airthmetics in fragmentation.
19 * extension headers are implemented.
20 * route changes now work.
21 * ip6_forward does not confuse sniffers.
24 * H. von Brand : Added missing #include <linux/string.h>
25 * Imran Patel : frag id should be in NBO
26 * Kazunori MIYAZAWA @USAGI
27 * : add ip6_append_data and related functions
31 #include <linux/errno.h>
32 #include <linux/types.h>
33 #include <linux/string.h>
34 #include <linux/socket.h>
35 #include <linux/net.h>
36 #include <linux/netdevice.h>
37 #include <linux/if_arp.h>
38 #include <linux/in6.h>
39 #include <linux/tcp.h>
40 #include <linux/route.h>
41 #include <linux/module.h>
43 #include <linux/netfilter.h>
44 #include <linux/netfilter_ipv6.h>
50 #include <net/ndisc.h>
51 #include <net/protocol.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/rawv6.h>
57 #include <net/checksum.h>
59 static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
61 static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr)
63 static u32 ipv6_fragmentation_id = 1;
64 static DEFINE_SPINLOCK(ip6_id_lock);
66 spin_lock_bh(&ip6_id_lock);
67 fhdr->identification = htonl(ipv6_fragmentation_id);
68 if (++ipv6_fragmentation_id == 0)
69 ipv6_fragmentation_id = 1;
70 spin_unlock_bh(&ip6_id_lock);
73 static inline int ip6_output_finish(struct sk_buff *skb)
75 struct dst_entry *dst = skb->dst;
78 return neigh_hh_output(dst->hh, skb);
79 else if (dst->neighbour)
80 return dst->neighbour->output(skb);
82 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
88 /* dev_loopback_xmit for use with netfilter. */
89 static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
91 skb_reset_mac_header(newskb);
92 __skb_pull(newskb, skb_network_offset(newskb));
93 newskb->pkt_type = PACKET_LOOPBACK;
94 newskb->ip_summed = CHECKSUM_UNNECESSARY;
95 BUG_TRAP(newskb->dst);
102 static int ip6_output2(struct sk_buff *skb)
104 struct dst_entry *dst = skb->dst;
105 struct net_device *dev = dst->dev;
107 skb->protocol = htons(ETH_P_IPV6);
110 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
111 struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
112 struct inet6_dev *idev = ip6_dst_idev(skb->dst);
114 if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
115 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
116 &ipv6_hdr(skb)->saddr)) {
117 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
119 /* Do not check for IFF_ALLMULTI; multicast routing
120 is not supported in any case.
123 NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, newskb, NULL,
125 ip6_dev_loopback_xmit);
127 if (ipv6_hdr(skb)->hop_limit == 0) {
128 IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS);
134 IP6_INC_STATS(idev, IPSTATS_MIB_OUTMCASTPKTS);
137 return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
140 static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
142 struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
144 return (np && np->pmtudisc == IPV6_PMTUDISC_PROBE) ?
145 skb->dst->dev->mtu : dst_mtu(skb->dst);
148 int ip6_output(struct sk_buff *skb)
150 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
151 dst_allfrag(skb->dst))
152 return ip6_fragment(skb, ip6_output2);
154 return ip6_output2(skb);
158 * xmit an sk_buff (used by TCP)
161 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
162 struct ipv6_txoptions *opt, int ipfragok)
164 struct ipv6_pinfo *np = inet6_sk(sk);
165 struct in6_addr *first_hop = &fl->fl6_dst;
166 struct dst_entry *dst = skb->dst;
168 u8 proto = fl->proto;
169 int seg_len = skb->len;
176 /* First: exthdrs may take lots of space (~8K for now)
177 MAX_HEADER is not enough.
179 head_room = opt->opt_nflen + opt->opt_flen;
180 seg_len += head_room;
181 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
183 if (skb_headroom(skb) < head_room) {
184 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
186 IP6_INC_STATS(ip6_dst_idev(skb->dst),
187 IPSTATS_MIB_OUTDISCARDS);
194 skb_set_owner_w(skb, sk);
197 ipv6_push_frag_opts(skb, opt, &proto);
199 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
202 skb_push(skb, sizeof(struct ipv6hdr));
203 skb_reset_network_header(skb);
207 * Fill in the IPv6 header
212 hlimit = np->hop_limit;
214 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
216 hlimit = ipv6_get_hoplimit(dst->dev);
224 *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;
226 hdr->payload_len = htons(seg_len);
227 hdr->nexthdr = proto;
228 hdr->hop_limit = hlimit;
230 ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
231 ipv6_addr_copy(&hdr->daddr, first_hop);
233 skb->priority = sk->sk_priority;
236 if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {
237 IP6_INC_STATS(ip6_dst_idev(skb->dst),
238 IPSTATS_MIB_OUTREQUESTS);
239 return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
244 printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
246 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
247 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
252 EXPORT_SYMBOL(ip6_xmit);
255 * To avoid extra problems ND packets are send through this
256 * routine. It's code duplication but I really want to avoid
257 * extra checks since ipv6_build_header is used by TCP (which
258 * is for us performance critical)
261 int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
262 struct in6_addr *saddr, struct in6_addr *daddr,
265 struct ipv6_pinfo *np = inet6_sk(sk);
269 skb->protocol = htons(ETH_P_IPV6);
272 totlen = len + sizeof(struct ipv6hdr);
274 skb_reset_network_header(skb);
275 skb_put(skb, sizeof(struct ipv6hdr));
278 *(__be32*)hdr = htonl(0x60000000);
280 hdr->payload_len = htons(len);
281 hdr->nexthdr = proto;
282 hdr->hop_limit = np->hop_limit;
284 ipv6_addr_copy(&hdr->saddr, saddr);
285 ipv6_addr_copy(&hdr->daddr, daddr);
290 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
292 struct ip6_ra_chain *ra;
293 struct sock *last = NULL;
295 read_lock(&ip6_ra_lock);
296 for (ra = ip6_ra_chain; ra; ra = ra->next) {
297 struct sock *sk = ra->sk;
298 if (sk && ra->sel == sel &&
299 (!sk->sk_bound_dev_if ||
300 sk->sk_bound_dev_if == skb->dev->ifindex)) {
302 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
304 rawv6_rcv(last, skb2);
311 rawv6_rcv(last, skb);
312 read_unlock(&ip6_ra_lock);
315 read_unlock(&ip6_ra_lock);
319 static int ip6_forward_proxy_check(struct sk_buff *skb)
321 struct ipv6hdr *hdr = ipv6_hdr(skb);
322 u8 nexthdr = hdr->nexthdr;
325 if (ipv6_ext_hdr(nexthdr)) {
326 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr);
330 offset = sizeof(struct ipv6hdr);
332 if (nexthdr == IPPROTO_ICMPV6) {
333 struct icmp6hdr *icmp6;
335 if (!pskb_may_pull(skb, (skb_network_header(skb) +
336 offset + 1 - skb->data)))
339 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
341 switch (icmp6->icmp6_type) {
342 case NDISC_ROUTER_SOLICITATION:
343 case NDISC_ROUTER_ADVERTISEMENT:
344 case NDISC_NEIGHBOUR_SOLICITATION:
345 case NDISC_NEIGHBOUR_ADVERTISEMENT:
347 /* For reaction involving unicast neighbor discovery
348 * message destined to the proxied address, pass it to
358 * The proxying router can't forward traffic sent to a link-local
359 * address, so signal the sender and discard the packet. This
360 * behavior is clarified by the MIPv6 specification.
362 if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
363 dst_link_failure(skb);
370 static inline int ip6_forward_finish(struct sk_buff *skb)
372 return dst_output(skb);
375 int ip6_forward(struct sk_buff *skb)
377 struct dst_entry *dst = skb->dst;
378 struct ipv6hdr *hdr = ipv6_hdr(skb);
379 struct inet6_skb_parm *opt = IP6CB(skb);
381 if (ipv6_devconf.forwarding == 0)
384 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
385 IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
389 skb_forward_csum(skb);
392 * We DO NOT make any processing on
393 * RA packets, pushing them to user level AS IS
394 * without ane WARRANTY that application will be able
395 * to interpret them. The reason is that we
396 * cannot make anything clever here.
398 * We are not end-node, so that if packet contains
399 * AH/ESP, we cannot make anything.
400 * Defragmentation also would be mistake, RA packets
401 * cannot be fragmented, because there is no warranty
402 * that different fragments will go along one path. --ANK
405 u8 *ptr = skb_network_header(skb) + opt->ra;
406 if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
411 * check and decrement ttl
413 if (hdr->hop_limit <= 1) {
414 /* Force OUTPUT device used as source address */
416 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
418 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
424 /* XXX: idev->cnf.proxy_ndp? */
425 if (ipv6_devconf.proxy_ndp &&
426 pneigh_lookup(&nd_tbl, &hdr->daddr, skb->dev, 0)) {
427 int proxied = ip6_forward_proxy_check(skb);
429 return ip6_input(skb);
430 else if (proxied < 0) {
431 IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
436 if (!xfrm6_route_forward(skb)) {
437 IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
442 /* IPv6 specs say nothing about it, but it is clear that we cannot
443 send redirects to source routed frames.
445 if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) {
446 struct in6_addr *target = NULL;
448 struct neighbour *n = dst->neighbour;
451 * incoming and outgoing devices are the same
455 rt = (struct rt6_info *) dst;
456 if ((rt->rt6i_flags & RTF_GATEWAY))
457 target = (struct in6_addr*)&n->primary_key;
459 target = &hdr->daddr;
461 /* Limit redirects both by destination (here)
462 and by source (inside ndisc_send_redirect)
464 if (xrlim_allow(dst, 1*HZ))
465 ndisc_send_redirect(skb, n, target);
467 int addrtype = ipv6_addr_type(&hdr->saddr);
469 /* This check is security critical. */
470 if (addrtype & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK))
472 if (addrtype & IPV6_ADDR_LINKLOCAL) {
473 icmpv6_send(skb, ICMPV6_DEST_UNREACH,
474 ICMPV6_NOT_NEIGHBOUR, 0, skb->dev);
479 if (skb->len > dst_mtu(dst)) {
480 /* Again, force OUTPUT device used as source address */
482 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);
483 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
484 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
489 if (skb_cow(skb, dst->dev->hard_header_len)) {
490 IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
496 /* Mangling hops number delayed to point after skb COW */
500 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
501 return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);
504 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
510 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
512 to->pkt_type = from->pkt_type;
513 to->priority = from->priority;
514 to->protocol = from->protocol;
515 dst_release(to->dst);
516 to->dst = dst_clone(from->dst);
518 to->mark = from->mark;
520 #ifdef CONFIG_NET_SCHED
521 to->tc_index = from->tc_index;
524 skb_copy_secmark(to, from);
527 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
529 u16 offset = sizeof(struct ipv6hdr);
530 struct ipv6_opt_hdr *exthdr =
531 (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
532 unsigned int packet_len = skb->tail - skb->network_header;
534 *nexthdr = &ipv6_hdr(skb)->nexthdr;
536 while (offset + 1 <= packet_len) {
542 case NEXTHDR_ROUTING:
546 #ifdef CONFIG_IPV6_MIP6
547 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
557 offset += ipv6_optlen(exthdr);
558 *nexthdr = &exthdr->nexthdr;
559 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
565 EXPORT_SYMBOL_GPL(ip6_find_1stfragopt);
567 static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
569 struct net_device *dev;
570 struct sk_buff *frag;
571 struct rt6_info *rt = (struct rt6_info*)skb->dst;
572 struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
573 struct ipv6hdr *tmp_hdr;
575 unsigned int mtu, hlen, left, len;
577 int ptr, offset = 0, err=0;
578 u8 *prevhdr, nexthdr = 0;
581 hlen = ip6_find_1stfragopt(skb, &prevhdr);
584 mtu = ip6_skb_dst_mtu(skb);
586 /* We must not fragment if the socket is set to force MTU discovery
587 * or if the skb it not generated by a local socket. (This last
588 * check should be redundant, but it's free.)
590 if (!np || np->pmtudisc >= IPV6_PMTUDISC_DO) {
591 skb->dev = skb->dst->dev;
592 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
593 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
598 if (np && np->frag_size < mtu) {
602 mtu -= hlen + sizeof(struct frag_hdr);
604 if (skb_shinfo(skb)->frag_list) {
605 int first_len = skb_pagelen(skb);
607 if (first_len - hlen > mtu ||
608 ((first_len - hlen) & 7) ||
612 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
613 /* Correct geometry. */
614 if (frag->len > mtu ||
615 ((frag->len & 7) && frag->next) ||
616 skb_headroom(frag) < hlen)
619 /* Partially cloned skb? */
620 if (skb_shared(frag))
627 frag->destructor = sock_wfree;
628 skb->truesize -= frag->truesize;
634 frag = skb_shinfo(skb)->frag_list;
635 skb_shinfo(skb)->frag_list = NULL;
638 *prevhdr = NEXTHDR_FRAGMENT;
639 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
641 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
645 __skb_pull(skb, hlen);
646 fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
647 __skb_push(skb, hlen);
648 skb_reset_network_header(skb);
649 memcpy(skb_network_header(skb), tmp_hdr, hlen);
651 ipv6_select_ident(skb, fh);
652 fh->nexthdr = nexthdr;
654 fh->frag_off = htons(IP6_MF);
655 frag_id = fh->identification;
657 first_len = skb_pagelen(skb);
658 skb->data_len = first_len - skb_headlen(skb);
659 skb->len = first_len;
660 ipv6_hdr(skb)->payload_len = htons(first_len -
661 sizeof(struct ipv6hdr));
663 dst_hold(&rt->u.dst);
666 /* Prepare header of the next frame,
667 * before previous one went down. */
669 frag->ip_summed = CHECKSUM_NONE;
670 skb_reset_transport_header(frag);
671 fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
672 __skb_push(frag, hlen);
673 skb_reset_network_header(frag);
674 memcpy(skb_network_header(frag), tmp_hdr,
676 offset += skb->len - hlen - sizeof(struct frag_hdr);
677 fh->nexthdr = nexthdr;
679 fh->frag_off = htons(offset);
680 if (frag->next != NULL)
681 fh->frag_off |= htons(IP6_MF);
682 fh->identification = frag_id;
683 ipv6_hdr(frag)->payload_len =
685 sizeof(struct ipv6hdr));
686 ip6_copy_metadata(frag, skb);
691 IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGCREATES);
704 IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGOKS);
705 dst_release(&rt->u.dst);
715 IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGFAILS);
716 dst_release(&rt->u.dst);
721 left = skb->len - hlen; /* Space per frame */
722 ptr = hlen; /* Where to start from */
725 * Fragment the datagram.
728 *prevhdr = NEXTHDR_FRAGMENT;
731 * Keep copying data until we run out.
735 /* IF: it doesn't fit, use 'mtu' - the data space left */
738 /* IF: we are not sending upto and including the packet end
739 then align the next start on an eight byte boundary */
747 if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
748 NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
749 IP6_INC_STATS(ip6_dst_idev(skb->dst),
750 IPSTATS_MIB_FRAGFAILS);
756 * Set up data on packet
759 ip6_copy_metadata(frag, skb);
760 skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
761 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
762 skb_reset_network_header(frag);
763 fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
764 frag->transport_header = (frag->network_header + hlen +
765 sizeof(struct frag_hdr));
768 * Charge the memory for the fragment to any owner
772 skb_set_owner_w(frag, skb->sk);
775 * Copy the packet header into the new buffer.
777 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
780 * Build fragment header.
782 fh->nexthdr = nexthdr;
785 ipv6_select_ident(skb, fh);
786 frag_id = fh->identification;
788 fh->identification = frag_id;
791 * Copy a block of the IP datagram.
793 if (skb_copy_bits(skb, ptr, skb_transport_header(skb), len))
797 fh->frag_off = htons(offset);
799 fh->frag_off |= htons(IP6_MF);
800 ipv6_hdr(frag)->payload_len = htons(frag->len -
801 sizeof(struct ipv6hdr));
807 * Put this fragment into the sending queue.
813 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGCREATES);
815 IP6_INC_STATS(ip6_dst_idev(skb->dst),
816 IPSTATS_MIB_FRAGOKS);
821 IP6_INC_STATS(ip6_dst_idev(skb->dst),
822 IPSTATS_MIB_FRAGFAILS);
827 static inline int ip6_rt_check(struct rt6key *rt_key,
828 struct in6_addr *fl_addr,
829 struct in6_addr *addr_cache)
831 return ((rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
832 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)));
835 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
836 struct dst_entry *dst,
839 struct ipv6_pinfo *np = inet6_sk(sk);
840 struct rt6_info *rt = (struct rt6_info *)dst;
845 /* Yes, checking route validity in not connected
846 * case is not very simple. Take into account,
847 * that we do not support routing by source, TOS,
848 * and MSG_DONTROUTE --ANK (980726)
850 * 1. ip6_rt_check(): If route was host route,
851 * check that cached destination is current.
852 * If it is network route, we still may
853 * check its validity using saved pointer
854 * to the last used address: daddr_cache.
855 * We do not want to save whole address now,
856 * (because main consumer of this service
857 * is tcp, which has not this problem),
858 * so that the last trick works only on connected
860 * 2. oif also should be the same.
862 if (ip6_rt_check(&rt->rt6i_dst, &fl->fl6_dst, np->daddr_cache) ||
863 #ifdef CONFIG_IPV6_SUBTREES
864 ip6_rt_check(&rt->rt6i_src, &fl->fl6_src, np->saddr_cache) ||
866 (fl->oif && fl->oif != dst->dev->ifindex)) {
875 static int ip6_dst_lookup_tail(struct sock *sk,
876 struct dst_entry **dst, struct flowi *fl)
881 *dst = ip6_route_output(sk, fl);
883 if ((err = (*dst)->error))
884 goto out_err_release;
886 if (ipv6_addr_any(&fl->fl6_src)) {
887 err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src);
889 goto out_err_release;
892 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
894 * Here if the dst entry we've looked up
895 * has a neighbour entry that is in the INCOMPLETE
896 * state and the src address from the flow is
897 * marked as OPTIMISTIC, we release the found
898 * dst entry and replace it instead with the
899 * dst entry of the nexthop router
901 if (!((*dst)->neighbour->nud_state & NUD_VALID)) {
902 struct inet6_ifaddr *ifp;
906 ifp = ipv6_get_ifaddr(&fl->fl6_src, (*dst)->dev, 1);
908 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
914 * We need to get the dst entry for the
915 * default router instead
918 memcpy(&fl_gw, fl, sizeof(struct flowi));
919 memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr));
920 *dst = ip6_route_output(sk, &fl_gw);
921 if ((err = (*dst)->error))
922 goto out_err_release;
936 * ip6_dst_lookup - perform route lookup on flow
937 * @sk: socket which provides route info
938 * @dst: pointer to dst_entry * for result
939 * @fl: flow to lookup
941 * This function performs a route lookup on the given flow.
943 * It returns zero on success, or a standard errno code on error.
945 int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
948 return ip6_dst_lookup_tail(sk, dst, fl);
950 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
953 * ip6_sk_dst_lookup - perform socket cached route lookup on flow
954 * @sk: socket which provides the dst cache and route info
955 * @dst: pointer to dst_entry * for result
956 * @fl: flow to lookup
958 * This function performs a route lookup on the given flow with the
959 * possibility of using the cached route in the socket if it is valid.
960 * It will take the socket dst lock when operating on the dst cache.
961 * As a result, this function can only be used in process context.
963 * It returns zero on success, or a standard errno code on error.
965 int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
969 *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
970 *dst = ip6_sk_dst_check(sk, *dst, fl);
973 return ip6_dst_lookup_tail(sk, dst, fl);
975 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup);
977 static inline int ip6_ufo_append_data(struct sock *sk,
978 int getfrag(void *from, char *to, int offset, int len,
979 int odd, struct sk_buff *skb),
980 void *from, int length, int hh_len, int fragheaderlen,
981 int transhdrlen, int mtu,unsigned int flags)
987 /* There is support for UDP large send offload by network
988 * device, so create one single skb packet containing complete
991 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
992 skb = sock_alloc_send_skb(sk,
993 hh_len + fragheaderlen + transhdrlen + 20,
994 (flags & MSG_DONTWAIT), &err);
998 /* reserve space for Hardware header */
999 skb_reserve(skb, hh_len);
1001 /* create space for UDP/IP header */
1002 skb_put(skb,fragheaderlen + transhdrlen);
1004 /* initialize network header pointer */
1005 skb_reset_network_header(skb);
1007 /* initialize protocol header pointer */
1008 skb->transport_header = skb->network_header + fragheaderlen;
1010 skb->ip_summed = CHECKSUM_PARTIAL;
1012 sk->sk_sndmsg_off = 0;
1015 err = skb_append_datato_frags(sk,skb, getfrag, from,
1016 (length - transhdrlen));
1018 struct frag_hdr fhdr;
1020 /* specify the length of each IP datagram fragment*/
1021 skb_shinfo(skb)->gso_size = mtu - fragheaderlen -
1022 sizeof(struct frag_hdr);
1023 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1024 ipv6_select_ident(skb, &fhdr);
1025 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1026 __skb_queue_tail(&sk->sk_write_queue, skb);
1030 /* There is not enough support do UPD LSO,
1031 * so follow normal path
1038 int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1039 int offset, int len, int odd, struct sk_buff *skb),
1040 void *from, int length, int transhdrlen,
1041 int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
1042 struct rt6_info *rt, unsigned int flags)
1044 struct inet_sock *inet = inet_sk(sk);
1045 struct ipv6_pinfo *np = inet6_sk(sk);
1046 struct sk_buff *skb;
1047 unsigned int maxfraglen, fragheaderlen;
1054 int csummode = CHECKSUM_NONE;
1056 if (flags&MSG_PROBE)
1058 if (skb_queue_empty(&sk->sk_write_queue)) {
1063 if (np->cork.opt == NULL) {
1064 np->cork.opt = kmalloc(opt->tot_len,
1066 if (unlikely(np->cork.opt == NULL))
1068 } else if (np->cork.opt->tot_len < opt->tot_len) {
1069 printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
1072 memcpy(np->cork.opt, opt, opt->tot_len);
1073 inet->cork.flags |= IPCORK_OPT;
1074 /* need source address above miyazawa*/
1076 dst_hold(&rt->u.dst);
1078 inet->cork.fl = *fl;
1079 np->cork.hop_limit = hlimit;
1080 np->cork.tclass = tclass;
1081 mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1082 rt->u.dst.dev->mtu : dst_mtu(rt->u.dst.path);
1083 if (np->frag_size < mtu) {
1085 mtu = np->frag_size;
1087 inet->cork.fragsize = mtu;
1088 if (dst_allfrag(rt->u.dst.path))
1089 inet->cork.flags |= IPCORK_ALLFRAG;
1090 inet->cork.length = 0;
1091 sk->sk_sndmsg_page = NULL;
1092 sk->sk_sndmsg_off = 0;
1093 exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0);
1094 length += exthdrlen;
1095 transhdrlen += exthdrlen;
1098 fl = &inet->cork.fl;
1099 if (inet->cork.flags & IPCORK_OPT)
1103 mtu = inet->cork.fragsize;
1106 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1108 fragheaderlen = sizeof(struct ipv6hdr) + rt->u.dst.nfheader_len + (opt ? opt->opt_nflen : 0);
1109 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
1111 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1112 if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
1113 ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
1119 * Let's try using as much space as possible.
1120 * Use MTU if total length of the message fits into the MTU.
1121 * Otherwise, we need to reserve fragment header and
1122 * fragment alignment (= 8-15 octects, in total).
1124 * Note that we may need to "move" the data from the tail of
1125 * of the buffer to the new fragment when we split
1128 * FIXME: It may be fragmented into multiple chunks
1129 * at once if non-fragmentable extension headers
1134 inet->cork.length += length;
1135 if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
1136 (rt->u.dst.dev->features & NETIF_F_UFO)) {
1138 err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
1139 fragheaderlen, transhdrlen, mtu,
1146 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1149 while (length > 0) {
1150 /* Check if the remaining data fits into current packet. */
1151 copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1153 copy = maxfraglen - skb->len;
1157 unsigned int datalen;
1158 unsigned int fraglen;
1159 unsigned int fraggap;
1160 unsigned int alloclen;
1161 struct sk_buff *skb_prev;
1165 /* There's no room in the current skb */
1167 fraggap = skb_prev->len - maxfraglen;
1172 * If remaining data exceeds the mtu,
1173 * we know we need more fragment(s).
1175 datalen = length + fraggap;
1176 if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1177 datalen = maxfraglen - fragheaderlen;
1179 fraglen = datalen + fragheaderlen;
1180 if ((flags & MSG_MORE) &&
1181 !(rt->u.dst.dev->features&NETIF_F_SG))
1184 alloclen = datalen + fragheaderlen;
1187 * The last fragment gets additional space at tail.
1188 * Note: we overallocate on fragments with MSG_MODE
1189 * because we have no idea if we're the last one.
1191 if (datalen == length + fraggap)
1192 alloclen += rt->u.dst.trailer_len;
1195 * We just reserve space for fragment header.
1196 * Note: this may be overallocation if the message
1197 * (without MSG_MORE) fits into the MTU.
1199 alloclen += sizeof(struct frag_hdr);
1202 skb = sock_alloc_send_skb(sk,
1204 (flags & MSG_DONTWAIT), &err);
1207 if (atomic_read(&sk->sk_wmem_alloc) <=
1209 skb = sock_wmalloc(sk,
1210 alloclen + hh_len, 1,
1212 if (unlikely(skb == NULL))
1218 * Fill in the control structures
1220 skb->ip_summed = csummode;
1222 /* reserve for fragmentation */
1223 skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
1226 * Find where to start putting bytes
1228 data = skb_put(skb, fraglen);
1229 skb_set_network_header(skb, exthdrlen);
1230 data += fragheaderlen;
1231 skb->transport_header = (skb->network_header +
1234 skb->csum = skb_copy_and_csum_bits(
1235 skb_prev, maxfraglen,
1236 data + transhdrlen, fraggap, 0);
1237 skb_prev->csum = csum_sub(skb_prev->csum,
1240 pskb_trim_unique(skb_prev, maxfraglen);
1242 copy = datalen - transhdrlen - fraggap;
1247 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1254 length -= datalen - fraggap;
1257 csummode = CHECKSUM_NONE;
1260 * Put the packet on the pending queue
1262 __skb_queue_tail(&sk->sk_write_queue, skb);
1269 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
1273 if (getfrag(from, skb_put(skb, copy),
1274 offset, copy, off, skb) < 0) {
1275 __skb_trim(skb, off);
1280 int i = skb_shinfo(skb)->nr_frags;
1281 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1282 struct page *page = sk->sk_sndmsg_page;
1283 int off = sk->sk_sndmsg_off;
1286 if (page && (left = PAGE_SIZE - off) > 0) {
1289 if (page != frag->page) {
1290 if (i == MAX_SKB_FRAGS) {
1295 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1296 frag = &skb_shinfo(skb)->frags[i];
1298 } else if(i < MAX_SKB_FRAGS) {
1299 if (copy > PAGE_SIZE)
1301 page = alloc_pages(sk->sk_allocation, 0);
1306 sk->sk_sndmsg_page = page;
1307 sk->sk_sndmsg_off = 0;
1309 skb_fill_page_desc(skb, i, page, 0, 0);
1310 frag = &skb_shinfo(skb)->frags[i];
1311 skb->truesize += PAGE_SIZE;
1312 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1317 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1321 sk->sk_sndmsg_off += copy;
1324 skb->data_len += copy;
1331 inet->cork.length -= length;
1332 IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1336 int ip6_push_pending_frames(struct sock *sk)
1338 struct sk_buff *skb, *tmp_skb;
1339 struct sk_buff **tail_skb;
1340 struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1341 struct inet_sock *inet = inet_sk(sk);
1342 struct ipv6_pinfo *np = inet6_sk(sk);
1343 struct ipv6hdr *hdr;
1344 struct ipv6_txoptions *opt = np->cork.opt;
1345 struct rt6_info *rt = np->cork.rt;
1346 struct flowi *fl = &inet->cork.fl;
1347 unsigned char proto = fl->proto;
1350 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1352 tail_skb = &(skb_shinfo(skb)->frag_list);
1354 /* move skb->data to ip header from ext header */
1355 if (skb->data < skb_network_header(skb))
1356 __skb_pull(skb, skb_network_offset(skb));
1357 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1358 __skb_pull(tmp_skb, skb_network_header_len(skb));
1359 *tail_skb = tmp_skb;
1360 tail_skb = &(tmp_skb->next);
1361 skb->len += tmp_skb->len;
1362 skb->data_len += tmp_skb->len;
1363 skb->truesize += tmp_skb->truesize;
1364 __sock_put(tmp_skb->sk);
1365 tmp_skb->destructor = NULL;
1369 ipv6_addr_copy(final_dst, &fl->fl6_dst);
1370 __skb_pull(skb, skb_network_header_len(skb));
1371 if (opt && opt->opt_flen)
1372 ipv6_push_frag_opts(skb, opt, &proto);
1373 if (opt && opt->opt_nflen)
1374 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1376 skb_push(skb, sizeof(struct ipv6hdr));
1377 skb_reset_network_header(skb);
1378 hdr = ipv6_hdr(skb);
1380 *(__be32*)hdr = fl->fl6_flowlabel |
1381 htonl(0x60000000 | ((int)np->cork.tclass << 20));
1383 if (skb->len <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN)
1384 hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
1386 hdr->payload_len = 0;
1387 hdr->hop_limit = np->cork.hop_limit;
1388 hdr->nexthdr = proto;
1389 ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
1390 ipv6_addr_copy(&hdr->daddr, final_dst);
1392 skb->priority = sk->sk_priority;
1394 skb->dst = dst_clone(&rt->u.dst);
1395 IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
1396 err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);
1399 err = np->recverr ? net_xmit_errno(err) : 0;
1405 inet->cork.flags &= ~IPCORK_OPT;
1406 kfree(np->cork.opt);
1407 np->cork.opt = NULL;
1409 dst_release(&np->cork.rt->u.dst);
1411 inet->cork.flags &= ~IPCORK_ALLFRAG;
1413 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1419 void ip6_flush_pending_frames(struct sock *sk)
1421 struct inet_sock *inet = inet_sk(sk);
1422 struct ipv6_pinfo *np = inet6_sk(sk);
1423 struct sk_buff *skb;
1425 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1426 IP6_INC_STATS(ip6_dst_idev(skb->dst),
1427 IPSTATS_MIB_OUTDISCARDS);
1431 inet->cork.flags &= ~IPCORK_OPT;
1433 kfree(np->cork.opt);
1434 np->cork.opt = NULL;
1436 dst_release(&np->cork.rt->u.dst);
1438 inet->cork.flags &= ~IPCORK_ALLFRAG;
1440 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));