2 * IPv6 output functions
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
10 * Based on linux/net/ipv4/ip_output.c
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 * A.N.Kuznetsov : airthmetics in fragmentation.
19 * extension headers are implemented.
20 * route changes now work.
21 * ip6_forward does not confuse sniffers.
24 * H. von Brand : Added missing #include <linux/string.h>
25 * Imran Patel : frag id should be in NBO
26 * Kazunori MIYAZAWA @USAGI
27 * : add ip6_append_data and related functions
31 #include <linux/errno.h>
32 #include <linux/types.h>
33 #include <linux/string.h>
34 #include <linux/socket.h>
35 #include <linux/net.h>
36 #include <linux/netdevice.h>
37 #include <linux/if_arp.h>
38 #include <linux/in6.h>
39 #include <linux/tcp.h>
40 #include <linux/route.h>
41 #include <linux/module.h>
43 #include <linux/netfilter.h>
44 #include <linux/netfilter_ipv6.h>
50 #include <net/ndisc.h>
51 #include <net/protocol.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/rawv6.h>
57 #include <net/checksum.h>
59 static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
61 static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr)
63 static u32 ipv6_fragmentation_id = 1;
64 static DEFINE_SPINLOCK(ip6_id_lock);
66 spin_lock_bh(&ip6_id_lock);
67 fhdr->identification = htonl(ipv6_fragmentation_id);
68 if (++ipv6_fragmentation_id == 0)
69 ipv6_fragmentation_id = 1;
70 spin_unlock_bh(&ip6_id_lock);
73 static inline int ip6_output_finish(struct sk_buff *skb)
76 struct dst_entry *dst = skb->dst;
77 struct hh_cache *hh = dst->hh;
82 read_lock_bh(&hh->hh_lock);
83 hh_alen = HH_DATA_ALIGN(hh->hh_len);
84 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
85 read_unlock_bh(&hh->hh_lock);
86 skb_push(skb, hh->hh_len);
87 return hh->hh_output(skb);
88 } else if (dst->neighbour)
89 return dst->neighbour->output(skb);
91 IP6_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
97 /* dev_loopback_xmit for use with netfilter. */
98 static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
100 newskb->mac.raw = newskb->data;
101 __skb_pull(newskb, newskb->nh.raw - newskb->data);
102 newskb->pkt_type = PACKET_LOOPBACK;
103 newskb->ip_summed = CHECKSUM_UNNECESSARY;
104 BUG_TRAP(newskb->dst);
111 static int ip6_output2(struct sk_buff *skb)
113 struct dst_entry *dst = skb->dst;
114 struct net_device *dev = dst->dev;
116 skb->protocol = htons(ETH_P_IPV6);
119 if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) {
120 struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
122 if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
123 ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr,
124 &skb->nh.ipv6h->saddr)) {
125 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
127 /* Do not check for IFF_ALLMULTI; multicast routing
128 is not supported in any case.
131 NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, newskb, NULL,
133 ip6_dev_loopback_xmit);
135 if (skb->nh.ipv6h->hop_limit == 0) {
136 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
142 IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);
145 return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
148 int ip6_output(struct sk_buff *skb)
150 if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) ||
151 dst_allfrag(skb->dst))
152 return ip6_fragment(skb, ip6_output2);
154 return ip6_output2(skb);
158 * xmit an sk_buff (used by TCP)
161 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
162 struct ipv6_txoptions *opt, int ipfragok)
164 struct ipv6_pinfo *np = inet6_sk(sk);
165 struct in6_addr *first_hop = &fl->fl6_dst;
166 struct dst_entry *dst = skb->dst;
168 u8 proto = fl->proto;
169 int seg_len = skb->len;
176 /* First: exthdrs may take lots of space (~8K for now)
177 MAX_HEADER is not enough.
179 head_room = opt->opt_nflen + opt->opt_flen;
180 seg_len += head_room;
181 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
183 if (skb_headroom(skb) < head_room) {
184 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
188 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
192 skb_set_owner_w(skb, sk);
195 ipv6_push_frag_opts(skb, opt, &proto);
197 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
200 hdr = skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, sizeof(struct ipv6hdr));
203 * Fill in the IPv6 header
208 hlimit = np->hop_limit;
210 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
212 hlimit = ipv6_get_hoplimit(dst->dev);
220 *(u32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;
222 hdr->payload_len = htons(seg_len);
223 hdr->nexthdr = proto;
224 hdr->hop_limit = hlimit;
226 ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
227 ipv6_addr_copy(&hdr->daddr, first_hop);
229 skb->priority = sk->sk_priority;
232 if ((skb->len <= mtu) || ipfragok || skb_shinfo(skb)->gso_size) {
233 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
234 return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
239 printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
241 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
242 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
248 * To avoid extra problems ND packets are send through this
249 * routine. It's code duplication but I really want to avoid
250 * extra checks since ipv6_build_header is used by TCP (which
251 * is for us performance critical)
254 int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
255 struct in6_addr *saddr, struct in6_addr *daddr,
258 struct ipv6_pinfo *np = inet6_sk(sk);
262 skb->protocol = htons(ETH_P_IPV6);
265 totlen = len + sizeof(struct ipv6hdr);
267 hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr));
270 *(u32*)hdr = htonl(0x60000000);
272 hdr->payload_len = htons(len);
273 hdr->nexthdr = proto;
274 hdr->hop_limit = np->hop_limit;
276 ipv6_addr_copy(&hdr->saddr, saddr);
277 ipv6_addr_copy(&hdr->daddr, daddr);
282 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
284 struct ip6_ra_chain *ra;
285 struct sock *last = NULL;
287 read_lock(&ip6_ra_lock);
288 for (ra = ip6_ra_chain; ra; ra = ra->next) {
289 struct sock *sk = ra->sk;
290 if (sk && ra->sel == sel &&
291 (!sk->sk_bound_dev_if ||
292 sk->sk_bound_dev_if == skb->dev->ifindex)) {
294 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
296 rawv6_rcv(last, skb2);
303 rawv6_rcv(last, skb);
304 read_unlock(&ip6_ra_lock);
307 read_unlock(&ip6_ra_lock);
311 static inline int ip6_forward_finish(struct sk_buff *skb)
313 return dst_output(skb);
316 int ip6_forward(struct sk_buff *skb)
318 struct dst_entry *dst = skb->dst;
319 struct ipv6hdr *hdr = skb->nh.ipv6h;
320 struct inet6_skb_parm *opt = IP6CB(skb);
322 if (ipv6_devconf.forwarding == 0)
325 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
326 IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);
330 skb->ip_summed = CHECKSUM_NONE;
333 * We DO NOT make any processing on
334 * RA packets, pushing them to user level AS IS
335 * without ane WARRANTY that application will be able
336 * to interpret them. The reason is that we
337 * cannot make anything clever here.
339 * We are not end-node, so that if packet contains
340 * AH/ESP, we cannot make anything.
341 * Defragmentation also would be mistake, RA packets
342 * cannot be fragmented, because there is no warranty
343 * that different fragments will go along one path. --ANK
346 u8 *ptr = skb->nh.raw + opt->ra;
347 if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
352 * check and decrement ttl
354 if (hdr->hop_limit <= 1) {
355 /* Force OUTPUT device used as source address */
357 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
364 if (!xfrm6_route_forward(skb)) {
365 IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);
370 /* IPv6 specs say nothing about it, but it is clear that we cannot
371 send redirects to source routed frames.
373 if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) {
374 struct in6_addr *target = NULL;
376 struct neighbour *n = dst->neighbour;
379 * incoming and outgoing devices are the same
383 rt = (struct rt6_info *) dst;
384 if ((rt->rt6i_flags & RTF_GATEWAY))
385 target = (struct in6_addr*)&n->primary_key;
387 target = &hdr->daddr;
389 /* Limit redirects both by destination (here)
390 and by source (inside ndisc_send_redirect)
392 if (xrlim_allow(dst, 1*HZ))
393 ndisc_send_redirect(skb, n, target);
394 } else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK
395 |IPV6_ADDR_LINKLOCAL)) {
396 /* This check is security critical. */
400 if (skb->len > dst_mtu(dst)) {
401 /* Again, force OUTPUT device used as source address */
403 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);
404 IP6_INC_STATS_BH(IPSTATS_MIB_INTOOBIGERRORS);
405 IP6_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS);
410 if (skb_cow(skb, dst->dev->hard_header_len)) {
411 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
417 /* Mangling hops number delayed to point after skb COW */
421 IP6_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);
422 return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);
425 IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
431 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
433 to->pkt_type = from->pkt_type;
434 to->priority = from->priority;
435 to->protocol = from->protocol;
436 dst_release(to->dst);
437 to->dst = dst_clone(from->dst);
440 #ifdef CONFIG_NET_SCHED
441 to->tc_index = from->tc_index;
443 #ifdef CONFIG_NETFILTER
444 to->nfmark = from->nfmark;
445 /* Connection association is same as pre-frag packet */
446 nf_conntrack_put(to->nfct);
447 to->nfct = from->nfct;
448 nf_conntrack_get(to->nfct);
449 to->nfctinfo = from->nfctinfo;
450 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
451 nf_conntrack_put_reasm(to->nfct_reasm);
452 to->nfct_reasm = from->nfct_reasm;
453 nf_conntrack_get_reasm(to->nfct_reasm);
455 #ifdef CONFIG_BRIDGE_NETFILTER
456 nf_bridge_put(to->nf_bridge);
457 to->nf_bridge = from->nf_bridge;
458 nf_bridge_get(to->nf_bridge);
461 skb_copy_secmark(to, from);
464 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
466 u16 offset = sizeof(struct ipv6hdr);
467 struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1);
468 unsigned int packet_len = skb->tail - skb->nh.raw;
470 *nexthdr = &skb->nh.ipv6h->nexthdr;
472 while (offset + 1 <= packet_len) {
477 case NEXTHDR_ROUTING:
479 if (**nexthdr == NEXTHDR_ROUTING) found_rhdr = 1;
480 if (**nexthdr == NEXTHDR_DEST && found_rhdr) return offset;
481 offset += ipv6_optlen(exthdr);
482 *nexthdr = &exthdr->nexthdr;
483 exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
492 EXPORT_SYMBOL_GPL(ip6_find_1stfragopt);
494 static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
496 struct net_device *dev;
497 struct sk_buff *frag;
498 struct rt6_info *rt = (struct rt6_info*)skb->dst;
499 struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
500 struct ipv6hdr *tmp_hdr;
502 unsigned int mtu, hlen, left, len;
504 int ptr, offset = 0, err=0;
505 u8 *prevhdr, nexthdr = 0;
508 hlen = ip6_find_1stfragopt(skb, &prevhdr);
511 mtu = dst_mtu(&rt->u.dst);
512 if (np && np->frag_size < mtu) {
516 mtu -= hlen + sizeof(struct frag_hdr);
518 if (skb_shinfo(skb)->frag_list) {
519 int first_len = skb_pagelen(skb);
521 if (first_len - hlen > mtu ||
522 ((first_len - hlen) & 7) ||
526 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
527 /* Correct geometry. */
528 if (frag->len > mtu ||
529 ((frag->len & 7) && frag->next) ||
530 skb_headroom(frag) < hlen)
533 /* Partially cloned skb? */
534 if (skb_shared(frag))
541 frag->destructor = sock_wfree;
542 skb->truesize -= frag->truesize;
548 frag = skb_shinfo(skb)->frag_list;
549 skb_shinfo(skb)->frag_list = NULL;
552 tmp_hdr = kmalloc(hlen, GFP_ATOMIC);
554 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
558 *prevhdr = NEXTHDR_FRAGMENT;
559 memcpy(tmp_hdr, skb->nh.raw, hlen);
560 __skb_pull(skb, hlen);
561 fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
562 skb->nh.raw = __skb_push(skb, hlen);
563 memcpy(skb->nh.raw, tmp_hdr, hlen);
565 ipv6_select_ident(skb, fh);
566 fh->nexthdr = nexthdr;
568 fh->frag_off = htons(IP6_MF);
569 frag_id = fh->identification;
571 first_len = skb_pagelen(skb);
572 skb->data_len = first_len - skb_headlen(skb);
573 skb->len = first_len;
574 skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr));
578 /* Prepare header of the next frame,
579 * before previous one went down. */
581 frag->ip_summed = CHECKSUM_NONE;
582 frag->h.raw = frag->data;
583 fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
584 frag->nh.raw = __skb_push(frag, hlen);
585 memcpy(frag->nh.raw, tmp_hdr, hlen);
586 offset += skb->len - hlen - sizeof(struct frag_hdr);
587 fh->nexthdr = nexthdr;
589 fh->frag_off = htons(offset);
590 if (frag->next != NULL)
591 fh->frag_off |= htons(IP6_MF);
592 fh->identification = frag_id;
593 frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
594 ip6_copy_metadata(frag, skb);
609 IP6_INC_STATS(IPSTATS_MIB_FRAGOKS);
619 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
624 left = skb->len - hlen; /* Space per frame */
625 ptr = hlen; /* Where to start from */
628 * Fragment the datagram.
631 *prevhdr = NEXTHDR_FRAGMENT;
634 * Keep copying data until we run out.
638 /* IF: it doesn't fit, use 'mtu' - the data space left */
641 /* IF: we are not sending upto and including the packet end
642 then align the next start on an eight byte boundary */
650 if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
651 NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
652 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
658 * Set up data on packet
661 ip6_copy_metadata(frag, skb);
662 skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
663 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
664 frag->nh.raw = frag->data;
665 fh = (struct frag_hdr*)(frag->data + hlen);
666 frag->h.raw = frag->data + hlen + sizeof(struct frag_hdr);
669 * Charge the memory for the fragment to any owner
673 skb_set_owner_w(frag, skb->sk);
676 * Copy the packet header into the new buffer.
678 memcpy(frag->nh.raw, skb->data, hlen);
681 * Build fragment header.
683 fh->nexthdr = nexthdr;
686 ipv6_select_ident(skb, fh);
687 frag_id = fh->identification;
689 fh->identification = frag_id;
692 * Copy a block of the IP datagram.
694 if (skb_copy_bits(skb, ptr, frag->h.raw, len))
698 fh->frag_off = htons(offset);
700 fh->frag_off |= htons(IP6_MF);
701 frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
707 * Put this fragment into the sending queue.
710 IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES);
717 IP6_INC_STATS(IPSTATS_MIB_FRAGOKS);
722 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
726 int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
732 struct ipv6_pinfo *np = inet6_sk(sk);
734 *dst = sk_dst_check(sk, np->dst_cookie);
736 struct rt6_info *rt = (struct rt6_info*)*dst;
738 /* Yes, checking route validity in not connected
739 * case is not very simple. Take into account,
740 * that we do not support routing by source, TOS,
741 * and MSG_DONTROUTE --ANK (980726)
743 * 1. If route was host route, check that
744 * cached destination is current.
745 * If it is network route, we still may
746 * check its validity using saved pointer
747 * to the last used address: daddr_cache.
748 * We do not want to save whole address now,
749 * (because main consumer of this service
750 * is tcp, which has not this problem),
751 * so that the last trick works only on connected
753 * 2. oif also should be the same.
755 if (((rt->rt6i_dst.plen != 128 ||
756 !ipv6_addr_equal(&fl->fl6_dst,
758 && (np->daddr_cache == NULL ||
759 !ipv6_addr_equal(&fl->fl6_dst,
761 || (fl->oif && fl->oif != (*dst)->dev->ifindex)) {
769 *dst = ip6_route_output(sk, fl);
771 if ((err = (*dst)->error))
772 goto out_err_release;
774 if (ipv6_addr_any(&fl->fl6_src)) {
775 err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src);
778 goto out_err_release;
789 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
791 static inline int ip6_ufo_append_data(struct sock *sk,
792 int getfrag(void *from, char *to, int offset, int len,
793 int odd, struct sk_buff *skb),
794 void *from, int length, int hh_len, int fragheaderlen,
795 int transhdrlen, int mtu,unsigned int flags)
801 /* There is support for UDP large send offload by network
802 * device, so create one single skb packet containing complete
805 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
806 skb = sock_alloc_send_skb(sk,
807 hh_len + fragheaderlen + transhdrlen + 20,
808 (flags & MSG_DONTWAIT), &err);
812 /* reserve space for Hardware header */
813 skb_reserve(skb, hh_len);
815 /* create space for UDP/IP header */
816 skb_put(skb,fragheaderlen + transhdrlen);
818 /* initialize network header pointer */
819 skb->nh.raw = skb->data;
821 /* initialize protocol header pointer */
822 skb->h.raw = skb->data + fragheaderlen;
824 skb->ip_summed = CHECKSUM_HW;
826 sk->sk_sndmsg_off = 0;
829 err = skb_append_datato_frags(sk,skb, getfrag, from,
830 (length - transhdrlen));
832 struct frag_hdr fhdr;
834 /* specify the length of each IP datagram fragment*/
835 skb_shinfo(skb)->gso_size = mtu - fragheaderlen -
836 sizeof(struct frag_hdr);
837 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
838 ipv6_select_ident(skb, &fhdr);
839 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
840 __skb_queue_tail(&sk->sk_write_queue, skb);
844 /* There is not enough support do UPD LSO,
845 * so follow normal path
852 int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
853 int offset, int len, int odd, struct sk_buff *skb),
854 void *from, int length, int transhdrlen,
855 int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
856 struct rt6_info *rt, unsigned int flags)
858 struct inet_sock *inet = inet_sk(sk);
859 struct ipv6_pinfo *np = inet6_sk(sk);
861 unsigned int maxfraglen, fragheaderlen;
868 int csummode = CHECKSUM_NONE;
872 if (skb_queue_empty(&sk->sk_write_queue)) {
877 if (np->cork.opt == NULL) {
878 np->cork.opt = kmalloc(opt->tot_len,
880 if (unlikely(np->cork.opt == NULL))
882 } else if (np->cork.opt->tot_len < opt->tot_len) {
883 printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
886 memcpy(np->cork.opt, opt, opt->tot_len);
887 inet->cork.flags |= IPCORK_OPT;
888 /* need source address above miyazawa*/
890 dst_hold(&rt->u.dst);
893 np->cork.hop_limit = hlimit;
894 np->cork.tclass = tclass;
895 mtu = dst_mtu(rt->u.dst.path);
896 if (np->frag_size < mtu) {
900 inet->cork.fragsize = mtu;
901 if (dst_allfrag(rt->u.dst.path))
902 inet->cork.flags |= IPCORK_ALLFRAG;
903 inet->cork.length = 0;
904 sk->sk_sndmsg_page = NULL;
905 sk->sk_sndmsg_off = 0;
906 exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0);
908 transhdrlen += exthdrlen;
912 if (inet->cork.flags & IPCORK_OPT)
916 mtu = inet->cork.fragsize;
919 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
921 fragheaderlen = sizeof(struct ipv6hdr) + (opt ? opt->opt_nflen : 0);
922 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
924 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
925 if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
926 ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
932 * Let's try using as much space as possible.
933 * Use MTU if total length of the message fits into the MTU.
934 * Otherwise, we need to reserve fragment header and
935 * fragment alignment (= 8-15 octects, in total).
937 * Note that we may need to "move" the data from the tail of
938 * of the buffer to the new fragment when we split
941 * FIXME: It may be fragmented into multiple chunks
942 * at once if non-fragmentable extension headers
947 inet->cork.length += length;
948 if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
949 (rt->u.dst.dev->features & NETIF_F_UFO)) {
951 err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
952 fragheaderlen, transhdrlen, mtu,
959 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
963 /* Check if the remaining data fits into current packet. */
964 copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
966 copy = maxfraglen - skb->len;
970 unsigned int datalen;
971 unsigned int fraglen;
972 unsigned int fraggap;
973 unsigned int alloclen;
974 struct sk_buff *skb_prev;
978 /* There's no room in the current skb */
980 fraggap = skb_prev->len - maxfraglen;
985 * If remaining data exceeds the mtu,
986 * we know we need more fragment(s).
988 datalen = length + fraggap;
989 if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
990 datalen = maxfraglen - fragheaderlen;
992 fraglen = datalen + fragheaderlen;
993 if ((flags & MSG_MORE) &&
994 !(rt->u.dst.dev->features&NETIF_F_SG))
997 alloclen = datalen + fragheaderlen;
1000 * The last fragment gets additional space at tail.
1001 * Note: we overallocate on fragments with MSG_MODE
1002 * because we have no idea if we're the last one.
1004 if (datalen == length + fraggap)
1005 alloclen += rt->u.dst.trailer_len;
1008 * We just reserve space for fragment header.
1009 * Note: this may be overallocation if the message
1010 * (without MSG_MORE) fits into the MTU.
1012 alloclen += sizeof(struct frag_hdr);
1015 skb = sock_alloc_send_skb(sk,
1017 (flags & MSG_DONTWAIT), &err);
1020 if (atomic_read(&sk->sk_wmem_alloc) <=
1022 skb = sock_wmalloc(sk,
1023 alloclen + hh_len, 1,
1025 if (unlikely(skb == NULL))
1031 * Fill in the control structures
1033 skb->ip_summed = csummode;
1035 /* reserve for fragmentation */
1036 skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
1039 * Find where to start putting bytes
1041 data = skb_put(skb, fraglen);
1042 skb->nh.raw = data + exthdrlen;
1043 data += fragheaderlen;
1044 skb->h.raw = data + exthdrlen;
1047 skb->csum = skb_copy_and_csum_bits(
1048 skb_prev, maxfraglen,
1049 data + transhdrlen, fraggap, 0);
1050 skb_prev->csum = csum_sub(skb_prev->csum,
1053 skb_trim(skb_prev, maxfraglen);
1055 copy = datalen - transhdrlen - fraggap;
1060 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1067 length -= datalen - fraggap;
1070 csummode = CHECKSUM_NONE;
1073 * Put the packet on the pending queue
1075 __skb_queue_tail(&sk->sk_write_queue, skb);
1082 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
1086 if (getfrag(from, skb_put(skb, copy),
1087 offset, copy, off, skb) < 0) {
1088 __skb_trim(skb, off);
1093 int i = skb_shinfo(skb)->nr_frags;
1094 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1095 struct page *page = sk->sk_sndmsg_page;
1096 int off = sk->sk_sndmsg_off;
1099 if (page && (left = PAGE_SIZE - off) > 0) {
1102 if (page != frag->page) {
1103 if (i == MAX_SKB_FRAGS) {
1108 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1109 frag = &skb_shinfo(skb)->frags[i];
1111 } else if(i < MAX_SKB_FRAGS) {
1112 if (copy > PAGE_SIZE)
1114 page = alloc_pages(sk->sk_allocation, 0);
1119 sk->sk_sndmsg_page = page;
1120 sk->sk_sndmsg_off = 0;
1122 skb_fill_page_desc(skb, i, page, 0, 0);
1123 frag = &skb_shinfo(skb)->frags[i];
1124 skb->truesize += PAGE_SIZE;
1125 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1130 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1134 sk->sk_sndmsg_off += copy;
1137 skb->data_len += copy;
1144 inet->cork.length -= length;
1145 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1149 int ip6_push_pending_frames(struct sock *sk)
1151 struct sk_buff *skb, *tmp_skb;
1152 struct sk_buff **tail_skb;
1153 struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1154 struct inet_sock *inet = inet_sk(sk);
1155 struct ipv6_pinfo *np = inet6_sk(sk);
1156 struct ipv6hdr *hdr;
1157 struct ipv6_txoptions *opt = np->cork.opt;
1158 struct rt6_info *rt = np->cork.rt;
1159 struct flowi *fl = &inet->cork.fl;
1160 unsigned char proto = fl->proto;
1163 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1165 tail_skb = &(skb_shinfo(skb)->frag_list);
1167 /* move skb->data to ip header from ext header */
1168 if (skb->data < skb->nh.raw)
1169 __skb_pull(skb, skb->nh.raw - skb->data);
1170 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1171 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1172 *tail_skb = tmp_skb;
1173 tail_skb = &(tmp_skb->next);
1174 skb->len += tmp_skb->len;
1175 skb->data_len += tmp_skb->len;
1176 skb->truesize += tmp_skb->truesize;
1177 __sock_put(tmp_skb->sk);
1178 tmp_skb->destructor = NULL;
1182 ipv6_addr_copy(final_dst, &fl->fl6_dst);
1183 __skb_pull(skb, skb->h.raw - skb->nh.raw);
1184 if (opt && opt->opt_flen)
1185 ipv6_push_frag_opts(skb, opt, &proto);
1186 if (opt && opt->opt_nflen)
1187 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1189 skb->nh.ipv6h = hdr = (struct ipv6hdr*) skb_push(skb, sizeof(struct ipv6hdr));
1191 *(u32*)hdr = fl->fl6_flowlabel |
1192 htonl(0x60000000 | ((int)np->cork.tclass << 20));
1194 if (skb->len <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN)
1195 hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
1197 hdr->payload_len = 0;
1198 hdr->hop_limit = np->cork.hop_limit;
1199 hdr->nexthdr = proto;
1200 ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
1201 ipv6_addr_copy(&hdr->daddr, final_dst);
1203 skb->priority = sk->sk_priority;
1205 skb->dst = dst_clone(&rt->u.dst);
1206 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
1207 err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);
1210 err = np->recverr ? net_xmit_errno(err) : 0;
1216 inet->cork.flags &= ~IPCORK_OPT;
1217 kfree(np->cork.opt);
1218 np->cork.opt = NULL;
1220 dst_release(&np->cork.rt->u.dst);
1222 inet->cork.flags &= ~IPCORK_ALLFRAG;
1224 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1230 void ip6_flush_pending_frames(struct sock *sk)
1232 struct inet_sock *inet = inet_sk(sk);
1233 struct ipv6_pinfo *np = inet6_sk(sk);
1234 struct sk_buff *skb;
1236 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1237 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1241 inet->cork.flags &= ~IPCORK_OPT;
1243 kfree(np->cork.opt);
1244 np->cork.opt = NULL;
1246 dst_release(&np->cork.rt->u.dst);
1248 inet->cork.flags &= ~IPCORK_ALLFRAG;
1250 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));