2 * IPv6 output functions
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
10 * Based on linux/net/ipv4/ip_output.c
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 * A.N.Kuznetsov : airthmetics in fragmentation.
19 * extension headers are implemented.
20 * route changes now work.
21 * ip6_forward does not confuse sniffers.
24 * H. von Brand : Added missing #include <linux/string.h>
25 * Imran Patel : frag id should be in NBO
26 * Kazunori MIYAZAWA @USAGI
27 * : add ip6_append_data and related functions
31 #include <linux/config.h>
32 #include <linux/errno.h>
33 #include <linux/types.h>
34 #include <linux/string.h>
35 #include <linux/socket.h>
36 #include <linux/net.h>
37 #include <linux/netdevice.h>
38 #include <linux/if_arp.h>
39 #include <linux/in6.h>
40 #include <linux/tcp.h>
41 #include <linux/route.h>
43 #include <linux/netfilter.h>
44 #include <linux/netfilter_ipv6.h>
50 #include <net/ndisc.h>
51 #include <net/protocol.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/rawv6.h>
57 #include <net/checksum.h>
59 static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
61 static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr)
63 static u32 ipv6_fragmentation_id = 1;
64 static DEFINE_SPINLOCK(ip6_id_lock);
66 spin_lock_bh(&ip6_id_lock);
67 fhdr->identification = htonl(ipv6_fragmentation_id);
68 if (++ipv6_fragmentation_id == 0)
69 ipv6_fragmentation_id = 1;
70 spin_unlock_bh(&ip6_id_lock);
73 static inline int ip6_output_finish(struct sk_buff *skb)
76 struct dst_entry *dst = skb->dst;
77 struct hh_cache *hh = dst->hh;
82 read_lock_bh(&hh->hh_lock);
83 hh_alen = HH_DATA_ALIGN(hh->hh_len);
84 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
85 read_unlock_bh(&hh->hh_lock);
86 skb_push(skb, hh->hh_len);
87 return hh->hh_output(skb);
88 } else if (dst->neighbour)
89 return dst->neighbour->output(skb);
91 IP6_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
97 /* dev_loopback_xmit for use with netfilter. */
98 static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
100 newskb->mac.raw = newskb->data;
101 __skb_pull(newskb, newskb->nh.raw - newskb->data);
102 newskb->pkt_type = PACKET_LOOPBACK;
103 newskb->ip_summed = CHECKSUM_UNNECESSARY;
104 BUG_TRAP(newskb->dst);
111 static int ip6_output2(struct sk_buff *skb)
113 struct dst_entry *dst = skb->dst;
114 struct net_device *dev = dst->dev;
116 skb->protocol = htons(ETH_P_IPV6);
119 if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) {
120 struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
122 if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
123 ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr,
124 &skb->nh.ipv6h->saddr)) {
125 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
127 /* Do not check for IFF_ALLMULTI; multicast routing
128 is not supported in any case.
131 NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, newskb, NULL,
133 ip6_dev_loopback_xmit);
135 if (skb->nh.ipv6h->hop_limit == 0) {
136 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
142 IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);
145 return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
148 int ip6_output(struct sk_buff *skb)
150 if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->ufo_size) ||
151 dst_allfrag(skb->dst))
152 return ip6_fragment(skb, ip6_output2);
154 return ip6_output2(skb);
158 * xmit an sk_buff (used by TCP)
161 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
162 struct ipv6_txoptions *opt, int ipfragok)
164 struct ipv6_pinfo *np = sk ? inet6_sk(sk) : NULL;
165 struct in6_addr *first_hop = &fl->fl6_dst;
166 struct dst_entry *dst = skb->dst;
168 u8 proto = fl->proto;
169 int seg_len = skb->len;
176 /* First: exthdrs may take lots of space (~8K for now)
177 MAX_HEADER is not enough.
179 head_room = opt->opt_nflen + opt->opt_flen;
180 seg_len += head_room;
181 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
183 if (skb_headroom(skb) < head_room) {
184 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
188 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
192 skb_set_owner_w(skb, sk);
195 ipv6_push_frag_opts(skb, opt, &proto);
197 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
200 hdr = skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, sizeof(struct ipv6hdr));
203 * Fill in the IPv6 header
208 hlimit = np->hop_limit;
210 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
212 hlimit = ipv6_get_hoplimit(dst->dev);
220 *(u32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;
222 hdr->payload_len = htons(seg_len);
223 hdr->nexthdr = proto;
224 hdr->hop_limit = hlimit;
226 ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
227 ipv6_addr_copy(&hdr->daddr, first_hop);
230 if ((skb->len <= mtu) || ipfragok) {
231 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
232 return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
237 printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
239 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
240 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
246 * To avoid extra problems ND packets are send through this
247 * routine. It's code duplication but I really want to avoid
248 * extra checks since ipv6_build_header is used by TCP (which
249 * is for us performance critical)
252 int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
253 struct in6_addr *saddr, struct in6_addr *daddr,
256 struct ipv6_pinfo *np = inet6_sk(sk);
260 skb->protocol = htons(ETH_P_IPV6);
263 totlen = len + sizeof(struct ipv6hdr);
265 hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr));
268 *(u32*)hdr = htonl(0x60000000);
270 hdr->payload_len = htons(len);
271 hdr->nexthdr = proto;
272 hdr->hop_limit = np->hop_limit;
274 ipv6_addr_copy(&hdr->saddr, saddr);
275 ipv6_addr_copy(&hdr->daddr, daddr);
280 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
282 struct ip6_ra_chain *ra;
283 struct sock *last = NULL;
285 read_lock(&ip6_ra_lock);
286 for (ra = ip6_ra_chain; ra; ra = ra->next) {
287 struct sock *sk = ra->sk;
288 if (sk && ra->sel == sel &&
289 (!sk->sk_bound_dev_if ||
290 sk->sk_bound_dev_if == skb->dev->ifindex)) {
292 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
294 rawv6_rcv(last, skb2);
301 rawv6_rcv(last, skb);
302 read_unlock(&ip6_ra_lock);
305 read_unlock(&ip6_ra_lock);
309 static inline int ip6_forward_finish(struct sk_buff *skb)
311 return dst_output(skb);
314 int ip6_forward(struct sk_buff *skb)
316 struct dst_entry *dst = skb->dst;
317 struct ipv6hdr *hdr = skb->nh.ipv6h;
318 struct inet6_skb_parm *opt = IP6CB(skb);
320 if (ipv6_devconf.forwarding == 0)
323 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
324 IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);
328 skb->ip_summed = CHECKSUM_NONE;
331 * We DO NOT make any processing on
332 * RA packets, pushing them to user level AS IS
333 * without ane WARRANTY that application will be able
334 * to interpret them. The reason is that we
335 * cannot make anything clever here.
337 * We are not end-node, so that if packet contains
338 * AH/ESP, we cannot make anything.
339 * Defragmentation also would be mistake, RA packets
340 * cannot be fragmented, because there is no warranty
341 * that different fragments will go along one path. --ANK
344 u8 *ptr = skb->nh.raw + opt->ra;
345 if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
350 * check and decrement ttl
352 if (hdr->hop_limit <= 1) {
353 /* Force OUTPUT device used as source address */
355 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
362 if (!xfrm6_route_forward(skb)) {
363 IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);
368 /* IPv6 specs say nothing about it, but it is clear that we cannot
369 send redirects to source routed frames.
371 if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) {
372 struct in6_addr *target = NULL;
374 struct neighbour *n = dst->neighbour;
377 * incoming and outgoing devices are the same
381 rt = (struct rt6_info *) dst;
382 if ((rt->rt6i_flags & RTF_GATEWAY))
383 target = (struct in6_addr*)&n->primary_key;
385 target = &hdr->daddr;
387 /* Limit redirects both by destination (here)
388 and by source (inside ndisc_send_redirect)
390 if (xrlim_allow(dst, 1*HZ))
391 ndisc_send_redirect(skb, n, target);
392 } else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK
393 |IPV6_ADDR_LINKLOCAL)) {
394 /* This check is security critical. */
398 if (skb->len > dst_mtu(dst)) {
399 /* Again, force OUTPUT device used as source address */
401 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);
402 IP6_INC_STATS_BH(IPSTATS_MIB_INTOOBIGERRORS);
403 IP6_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS);
408 if (skb_cow(skb, dst->dev->hard_header_len)) {
409 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
415 /* Mangling hops number delayed to point after skb COW */
419 IP6_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);
420 return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);
423 IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
429 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
431 to->pkt_type = from->pkt_type;
432 to->priority = from->priority;
433 to->protocol = from->protocol;
434 dst_release(to->dst);
435 to->dst = dst_clone(from->dst);
438 #ifdef CONFIG_NET_SCHED
439 to->tc_index = from->tc_index;
441 #ifdef CONFIG_NETFILTER
442 to->nfmark = from->nfmark;
443 /* Connection association is same as pre-frag packet */
444 to->nfct = from->nfct;
445 nf_conntrack_get(to->nfct);
446 to->nfctinfo = from->nfctinfo;
447 #ifdef CONFIG_BRIDGE_NETFILTER
448 nf_bridge_put(to->nf_bridge);
449 to->nf_bridge = from->nf_bridge;
450 nf_bridge_get(to->nf_bridge);
455 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
457 u16 offset = sizeof(struct ipv6hdr);
458 struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1);
459 unsigned int packet_len = skb->tail - skb->nh.raw;
461 *nexthdr = &skb->nh.ipv6h->nexthdr;
463 while (offset + 1 <= packet_len) {
468 case NEXTHDR_ROUTING:
470 if (**nexthdr == NEXTHDR_ROUTING) found_rhdr = 1;
471 if (**nexthdr == NEXTHDR_DEST && found_rhdr) return offset;
472 offset += ipv6_optlen(exthdr);
473 *nexthdr = &exthdr->nexthdr;
474 exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
484 static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
486 struct net_device *dev;
487 struct sk_buff *frag;
488 struct rt6_info *rt = (struct rt6_info*)skb->dst;
489 struct ipv6hdr *tmp_hdr;
491 unsigned int mtu, hlen, left, len;
493 int ptr, offset = 0, err=0;
494 u8 *prevhdr, nexthdr = 0;
497 hlen = ip6_find_1stfragopt(skb, &prevhdr);
500 mtu = dst_mtu(&rt->u.dst) - hlen - sizeof(struct frag_hdr);
502 if (skb_shinfo(skb)->frag_list) {
503 int first_len = skb_pagelen(skb);
505 if (first_len - hlen > mtu ||
506 ((first_len - hlen) & 7) ||
510 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
511 /* Correct geometry. */
512 if (frag->len > mtu ||
513 ((frag->len & 7) && frag->next) ||
514 skb_headroom(frag) < hlen)
517 /* Partially cloned skb? */
518 if (skb_shared(frag))
525 frag->destructor = sock_wfree;
526 skb->truesize -= frag->truesize;
532 frag = skb_shinfo(skb)->frag_list;
533 skb_shinfo(skb)->frag_list = NULL;
536 tmp_hdr = kmalloc(hlen, GFP_ATOMIC);
538 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
542 *prevhdr = NEXTHDR_FRAGMENT;
543 memcpy(tmp_hdr, skb->nh.raw, hlen);
544 __skb_pull(skb, hlen);
545 fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
546 skb->nh.raw = __skb_push(skb, hlen);
547 memcpy(skb->nh.raw, tmp_hdr, hlen);
549 ipv6_select_ident(skb, fh);
550 fh->nexthdr = nexthdr;
552 fh->frag_off = htons(IP6_MF);
553 frag_id = fh->identification;
555 first_len = skb_pagelen(skb);
556 skb->data_len = first_len - skb_headlen(skb);
557 skb->len = first_len;
558 skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr));
562 /* Prepare header of the next frame,
563 * before previous one went down. */
565 frag->ip_summed = CHECKSUM_NONE;
566 frag->h.raw = frag->data;
567 fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
568 frag->nh.raw = __skb_push(frag, hlen);
569 memcpy(frag->nh.raw, tmp_hdr, hlen);
570 offset += skb->len - hlen - sizeof(struct frag_hdr);
571 fh->nexthdr = nexthdr;
573 fh->frag_off = htons(offset);
574 if (frag->next != NULL)
575 fh->frag_off |= htons(IP6_MF);
576 fh->identification = frag_id;
577 frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
578 ip6_copy_metadata(frag, skb);
593 IP6_INC_STATS(IPSTATS_MIB_FRAGOKS);
603 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
608 left = skb->len - hlen; /* Space per frame */
609 ptr = hlen; /* Where to start from */
612 * Fragment the datagram.
615 *prevhdr = NEXTHDR_FRAGMENT;
618 * Keep copying data until we run out.
622 /* IF: it doesn't fit, use 'mtu' - the data space left */
625 /* IF: we are not sending upto and including the packet end
626 then align the next start on an eight byte boundary */
634 if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
635 NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
636 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
642 * Set up data on packet
645 ip6_copy_metadata(frag, skb);
646 skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
647 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
648 frag->nh.raw = frag->data;
649 fh = (struct frag_hdr*)(frag->data + hlen);
650 frag->h.raw = frag->data + hlen + sizeof(struct frag_hdr);
653 * Charge the memory for the fragment to any owner
657 skb_set_owner_w(frag, skb->sk);
660 * Copy the packet header into the new buffer.
662 memcpy(frag->nh.raw, skb->data, hlen);
665 * Build fragment header.
667 fh->nexthdr = nexthdr;
670 ipv6_select_ident(skb, fh);
671 frag_id = fh->identification;
673 fh->identification = frag_id;
676 * Copy a block of the IP datagram.
678 if (skb_copy_bits(skb, ptr, frag->h.raw, len))
682 fh->frag_off = htons(offset);
684 fh->frag_off |= htons(IP6_MF);
685 frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
691 * Put this fragment into the sending queue.
694 IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES);
701 IP6_INC_STATS(IPSTATS_MIB_FRAGOKS);
706 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
710 int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
716 struct ipv6_pinfo *np = inet6_sk(sk);
718 *dst = sk_dst_check(sk, np->dst_cookie);
720 struct rt6_info *rt = (struct rt6_info*)*dst;
722 /* Yes, checking route validity in not connected
723 case is not very simple. Take into account,
724 that we do not support routing by source, TOS,
725 and MSG_DONTROUTE --ANK (980726)
727 1. If route was host route, check that
728 cached destination is current.
729 If it is network route, we still may
730 check its validity using saved pointer
731 to the last used address: daddr_cache.
732 We do not want to save whole address now,
733 (because main consumer of this service
734 is tcp, which has not this problem),
735 so that the last trick works only on connected
737 2. oif also should be the same.
740 if (((rt->rt6i_dst.plen != 128 ||
741 !ipv6_addr_equal(&fl->fl6_dst, &rt->rt6i_dst.addr))
742 && (np->daddr_cache == NULL ||
743 !ipv6_addr_equal(&fl->fl6_dst, np->daddr_cache)))
744 || (fl->oif && fl->oif != (*dst)->dev->ifindex)) {
752 *dst = ip6_route_output(sk, fl);
754 if ((err = (*dst)->error))
755 goto out_err_release;
757 if (ipv6_addr_any(&fl->fl6_src)) {
758 err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src);
761 goto out_err_release;
771 inline int ip6_ufo_append_data(struct sock *sk,
772 int getfrag(void *from, char *to, int offset, int len,
773 int odd, struct sk_buff *skb),
774 void *from, int length, int hh_len, int fragheaderlen,
775 int transhdrlen, int mtu,unsigned int flags)
781 /* There is support for UDP large send offload by network
782 * device, so create one single skb packet containing complete
785 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
786 skb = sock_alloc_send_skb(sk,
787 hh_len + fragheaderlen + transhdrlen + 20,
788 (flags & MSG_DONTWAIT), &err);
792 /* reserve space for Hardware header */
793 skb_reserve(skb, hh_len);
795 /* create space for UDP/IP header */
796 skb_put(skb,fragheaderlen + transhdrlen);
798 /* initialize network header pointer */
799 skb->nh.raw = skb->data;
801 /* initialize protocol header pointer */
802 skb->h.raw = skb->data + fragheaderlen;
804 skb->ip_summed = CHECKSUM_HW;
806 sk->sk_sndmsg_off = 0;
809 err = skb_append_datato_frags(sk,skb, getfrag, from,
810 (length - transhdrlen));
812 struct frag_hdr fhdr;
814 /* specify the length of each IP datagram fragment*/
815 skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen) -
816 sizeof(struct frag_hdr);
817 ipv6_select_ident(skb, &fhdr);
818 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
819 __skb_queue_tail(&sk->sk_write_queue, skb);
823 /* There is not enough support do UPD LSO,
824 * so follow normal path
831 int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
832 int offset, int len, int odd, struct sk_buff *skb),
833 void *from, int length, int transhdrlen,
834 int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
835 struct rt6_info *rt, unsigned int flags)
837 struct inet_sock *inet = inet_sk(sk);
838 struct ipv6_pinfo *np = inet6_sk(sk);
840 unsigned int maxfraglen, fragheaderlen;
847 int csummode = CHECKSUM_NONE;
851 if (skb_queue_empty(&sk->sk_write_queue)) {
856 if (np->cork.opt == NULL) {
857 np->cork.opt = kmalloc(opt->tot_len,
859 if (unlikely(np->cork.opt == NULL))
861 } else if (np->cork.opt->tot_len < opt->tot_len) {
862 printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
865 memcpy(np->cork.opt, opt, opt->tot_len);
866 inet->cork.flags |= IPCORK_OPT;
867 /* need source address above miyazawa*/
869 dst_hold(&rt->u.dst);
872 np->cork.hop_limit = hlimit;
873 np->cork.tclass = tclass;
874 inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
875 if (dst_allfrag(rt->u.dst.path))
876 inet->cork.flags |= IPCORK_ALLFRAG;
877 inet->cork.length = 0;
878 sk->sk_sndmsg_page = NULL;
879 sk->sk_sndmsg_off = 0;
880 exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0);
882 transhdrlen += exthdrlen;
886 if (inet->cork.flags & IPCORK_OPT)
890 mtu = inet->cork.fragsize;
893 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
895 fragheaderlen = sizeof(struct ipv6hdr) + (opt ? opt->opt_nflen : 0);
896 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
898 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
899 if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
900 ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
906 * Let's try using as much space as possible.
907 * Use MTU if total length of the message fits into the MTU.
908 * Otherwise, we need to reserve fragment header and
909 * fragment alignment (= 8-15 octects, in total).
911 * Note that we may need to "move" the data from the tail of
912 * of the buffer to the new fragment when we split
915 * FIXME: It may be fragmented into multiple chunks
916 * at once if non-fragmentable extension headers
921 inet->cork.length += length;
922 if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
923 (rt->u.dst.dev->features & NETIF_F_UFO)) {
925 if(ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
926 fragheaderlen, transhdrlen, mtu, flags))
932 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
936 /* Check if the remaining data fits into current packet. */
937 copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
939 copy = maxfraglen - skb->len;
943 unsigned int datalen;
944 unsigned int fraglen;
945 unsigned int fraggap;
946 unsigned int alloclen;
947 struct sk_buff *skb_prev;
951 /* There's no room in the current skb */
953 fraggap = skb_prev->len - maxfraglen;
958 * If remaining data exceeds the mtu,
959 * we know we need more fragment(s).
961 datalen = length + fraggap;
962 if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
963 datalen = maxfraglen - fragheaderlen;
965 fraglen = datalen + fragheaderlen;
966 if ((flags & MSG_MORE) &&
967 !(rt->u.dst.dev->features&NETIF_F_SG))
970 alloclen = datalen + fragheaderlen;
973 * The last fragment gets additional space at tail.
974 * Note: we overallocate on fragments with MSG_MODE
975 * because we have no idea if we're the last one.
977 if (datalen == length + fraggap)
978 alloclen += rt->u.dst.trailer_len;
981 * We just reserve space for fragment header.
982 * Note: this may be overallocation if the message
983 * (without MSG_MORE) fits into the MTU.
985 alloclen += sizeof(struct frag_hdr);
988 skb = sock_alloc_send_skb(sk,
990 (flags & MSG_DONTWAIT), &err);
993 if (atomic_read(&sk->sk_wmem_alloc) <=
995 skb = sock_wmalloc(sk,
996 alloclen + hh_len, 1,
998 if (unlikely(skb == NULL))
1004 * Fill in the control structures
1006 skb->ip_summed = csummode;
1008 /* reserve for fragmentation */
1009 skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
1012 * Find where to start putting bytes
1014 data = skb_put(skb, fraglen);
1015 skb->nh.raw = data + exthdrlen;
1016 data += fragheaderlen;
1017 skb->h.raw = data + exthdrlen;
1020 skb->csum = skb_copy_and_csum_bits(
1021 skb_prev, maxfraglen,
1022 data + transhdrlen, fraggap, 0);
1023 skb_prev->csum = csum_sub(skb_prev->csum,
1026 skb_trim(skb_prev, maxfraglen);
1028 copy = datalen - transhdrlen - fraggap;
1033 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1040 length -= datalen - fraggap;
1043 csummode = CHECKSUM_NONE;
1046 * Put the packet on the pending queue
1048 __skb_queue_tail(&sk->sk_write_queue, skb);
1055 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
1059 if (getfrag(from, skb_put(skb, copy),
1060 offset, copy, off, skb) < 0) {
1061 __skb_trim(skb, off);
1066 int i = skb_shinfo(skb)->nr_frags;
1067 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1068 struct page *page = sk->sk_sndmsg_page;
1069 int off = sk->sk_sndmsg_off;
1072 if (page && (left = PAGE_SIZE - off) > 0) {
1075 if (page != frag->page) {
1076 if (i == MAX_SKB_FRAGS) {
1081 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1082 frag = &skb_shinfo(skb)->frags[i];
1084 } else if(i < MAX_SKB_FRAGS) {
1085 if (copy > PAGE_SIZE)
1087 page = alloc_pages(sk->sk_allocation, 0);
1092 sk->sk_sndmsg_page = page;
1093 sk->sk_sndmsg_off = 0;
1095 skb_fill_page_desc(skb, i, page, 0, 0);
1096 frag = &skb_shinfo(skb)->frags[i];
1097 skb->truesize += PAGE_SIZE;
1098 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1103 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1107 sk->sk_sndmsg_off += copy;
1110 skb->data_len += copy;
1117 inet->cork.length -= length;
1118 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1122 int ip6_push_pending_frames(struct sock *sk)
1124 struct sk_buff *skb, *tmp_skb;
1125 struct sk_buff **tail_skb;
1126 struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1127 struct inet_sock *inet = inet_sk(sk);
1128 struct ipv6_pinfo *np = inet6_sk(sk);
1129 struct ipv6hdr *hdr;
1130 struct ipv6_txoptions *opt = np->cork.opt;
1131 struct rt6_info *rt = np->cork.rt;
1132 struct flowi *fl = &inet->cork.fl;
1133 unsigned char proto = fl->proto;
1136 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1138 tail_skb = &(skb_shinfo(skb)->frag_list);
1140 /* move skb->data to ip header from ext header */
1141 if (skb->data < skb->nh.raw)
1142 __skb_pull(skb, skb->nh.raw - skb->data);
1143 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1144 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1145 *tail_skb = tmp_skb;
1146 tail_skb = &(tmp_skb->next);
1147 skb->len += tmp_skb->len;
1148 skb->data_len += tmp_skb->len;
1149 skb->truesize += tmp_skb->truesize;
1150 __sock_put(tmp_skb->sk);
1151 tmp_skb->destructor = NULL;
1155 ipv6_addr_copy(final_dst, &fl->fl6_dst);
1156 __skb_pull(skb, skb->h.raw - skb->nh.raw);
1157 if (opt && opt->opt_flen)
1158 ipv6_push_frag_opts(skb, opt, &proto);
1159 if (opt && opt->opt_nflen)
1160 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1162 skb->nh.ipv6h = hdr = (struct ipv6hdr*) skb_push(skb, sizeof(struct ipv6hdr));
1164 *(u32*)hdr = fl->fl6_flowlabel |
1165 htonl(0x60000000 | ((int)np->cork.tclass << 20));
1167 if (skb->len <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN)
1168 hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
1170 hdr->payload_len = 0;
1171 hdr->hop_limit = np->cork.hop_limit;
1172 hdr->nexthdr = proto;
1173 ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
1174 ipv6_addr_copy(&hdr->daddr, final_dst);
1176 skb->dst = dst_clone(&rt->u.dst);
1177 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
1178 err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);
1181 err = np->recverr ? net_xmit_errno(err) : 0;
1187 inet->cork.flags &= ~IPCORK_OPT;
1188 kfree(np->cork.opt);
1189 np->cork.opt = NULL;
1191 dst_release(&np->cork.rt->u.dst);
1193 inet->cork.flags &= ~IPCORK_ALLFRAG;
1195 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1201 void ip6_flush_pending_frames(struct sock *sk)
1203 struct inet_sock *inet = inet_sk(sk);
1204 struct ipv6_pinfo *np = inet6_sk(sk);
1205 struct sk_buff *skb;
1207 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1208 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1212 inet->cork.flags &= ~IPCORK_OPT;
1214 kfree(np->cork.opt);
1215 np->cork.opt = NULL;
1217 dst_release(&np->cork.rt->u.dst);
1219 inet->cork.flags &= ~IPCORK_ALLFRAG;
1221 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));