2 * IPv6 output functions
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
10 * Based on linux/net/ipv4/ip_output.c
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 * A.N.Kuznetsov : airthmetics in fragmentation.
19 * extension headers are implemented.
20 * route changes now work.
21 * ip6_forward does not confuse sniffers.
24 * H. von Brand : Added missing #include <linux/string.h>
25 * Imran Patel : frag id should be in NBO
26 * Kazunori MIYAZAWA @USAGI
27 * : add ip6_append_data and related functions
31 #include <linux/config.h>
32 #include <linux/errno.h>
33 #include <linux/types.h>
34 #include <linux/string.h>
35 #include <linux/socket.h>
36 #include <linux/net.h>
37 #include <linux/netdevice.h>
38 #include <linux/if_arp.h>
39 #include <linux/in6.h>
40 #include <linux/tcp.h>
41 #include <linux/route.h>
43 #include <linux/netfilter.h>
44 #include <linux/netfilter_ipv6.h>
50 #include <net/ndisc.h>
51 #include <net/protocol.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/rawv6.h>
57 #include <net/checksum.h>
59 static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
61 static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr)
63 static u32 ipv6_fragmentation_id = 1;
64 static DEFINE_SPINLOCK(ip6_id_lock);
66 spin_lock_bh(&ip6_id_lock);
67 fhdr->identification = htonl(ipv6_fragmentation_id);
68 if (++ipv6_fragmentation_id == 0)
69 ipv6_fragmentation_id = 1;
70 spin_unlock_bh(&ip6_id_lock);
73 static inline int ip6_output_finish(struct sk_buff *skb)
76 struct dst_entry *dst = skb->dst;
77 struct hh_cache *hh = dst->hh;
82 read_lock_bh(&hh->hh_lock);
83 hh_alen = HH_DATA_ALIGN(hh->hh_len);
84 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
85 read_unlock_bh(&hh->hh_lock);
86 skb_push(skb, hh->hh_len);
87 return hh->hh_output(skb);
88 } else if (dst->neighbour)
89 return dst->neighbour->output(skb);
91 IP6_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
97 /* dev_loopback_xmit for use with netfilter. */
98 static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
100 newskb->mac.raw = newskb->data;
101 __skb_pull(newskb, newskb->nh.raw - newskb->data);
102 newskb->pkt_type = PACKET_LOOPBACK;
103 newskb->ip_summed = CHECKSUM_UNNECESSARY;
104 BUG_TRAP(newskb->dst);
111 static int ip6_output2(struct sk_buff *skb)
113 struct dst_entry *dst = skb->dst;
114 struct net_device *dev = dst->dev;
116 skb->protocol = htons(ETH_P_IPV6);
119 if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) {
120 struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
122 if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
123 ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr,
124 &skb->nh.ipv6h->saddr)) {
125 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
127 /* Do not check for IFF_ALLMULTI; multicast routing
128 is not supported in any case.
131 NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, newskb, NULL,
133 ip6_dev_loopback_xmit);
135 if (skb->nh.ipv6h->hop_limit == 0) {
136 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
142 IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);
145 return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
148 int ip6_output(struct sk_buff *skb)
150 if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->ufo_size) ||
151 dst_allfrag(skb->dst))
152 return ip6_fragment(skb, ip6_output2);
154 return ip6_output2(skb);
158 * xmit an sk_buff (used by TCP)
161 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
162 struct ipv6_txoptions *opt, int ipfragok)
164 struct ipv6_pinfo *np = sk ? inet6_sk(sk) : NULL;
165 struct in6_addr *first_hop = &fl->fl6_dst;
166 struct dst_entry *dst = skb->dst;
168 u8 proto = fl->proto;
169 int seg_len = skb->len;
176 /* First: exthdrs may take lots of space (~8K for now)
177 MAX_HEADER is not enough.
179 head_room = opt->opt_nflen + opt->opt_flen;
180 seg_len += head_room;
181 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
183 if (skb_headroom(skb) < head_room) {
184 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
188 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
192 skb_set_owner_w(skb, sk);
195 ipv6_push_frag_opts(skb, opt, &proto);
197 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
200 hdr = skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, sizeof(struct ipv6hdr));
203 * Fill in the IPv6 header
208 hlimit = np->hop_limit;
210 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
212 hlimit = ipv6_get_hoplimit(dst->dev);
220 *(u32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;
222 hdr->payload_len = htons(seg_len);
223 hdr->nexthdr = proto;
224 hdr->hop_limit = hlimit;
226 ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
227 ipv6_addr_copy(&hdr->daddr, first_hop);
230 if ((skb->len <= mtu) || ipfragok) {
231 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
232 return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
237 printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
239 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
240 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
246 * To avoid extra problems ND packets are send through this
247 * routine. It's code duplication but I really want to avoid
248 * extra checks since ipv6_build_header is used by TCP (which
249 * is for us performance critical)
252 int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
253 struct in6_addr *saddr, struct in6_addr *daddr,
256 struct ipv6_pinfo *np = inet6_sk(sk);
260 skb->protocol = htons(ETH_P_IPV6);
263 totlen = len + sizeof(struct ipv6hdr);
265 hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr));
268 *(u32*)hdr = htonl(0x60000000);
270 hdr->payload_len = htons(len);
271 hdr->nexthdr = proto;
272 hdr->hop_limit = np->hop_limit;
274 ipv6_addr_copy(&hdr->saddr, saddr);
275 ipv6_addr_copy(&hdr->daddr, daddr);
280 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
282 struct ip6_ra_chain *ra;
283 struct sock *last = NULL;
285 read_lock(&ip6_ra_lock);
286 for (ra = ip6_ra_chain; ra; ra = ra->next) {
287 struct sock *sk = ra->sk;
288 if (sk && ra->sel == sel &&
289 (!sk->sk_bound_dev_if ||
290 sk->sk_bound_dev_if == skb->dev->ifindex)) {
292 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
294 rawv6_rcv(last, skb2);
301 rawv6_rcv(last, skb);
302 read_unlock(&ip6_ra_lock);
305 read_unlock(&ip6_ra_lock);
309 static inline int ip6_forward_finish(struct sk_buff *skb)
311 return dst_output(skb);
314 int ip6_forward(struct sk_buff *skb)
316 struct dst_entry *dst = skb->dst;
317 struct ipv6hdr *hdr = skb->nh.ipv6h;
318 struct inet6_skb_parm *opt = IP6CB(skb);
320 if (ipv6_devconf.forwarding == 0)
323 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
324 IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);
328 skb->ip_summed = CHECKSUM_NONE;
331 * We DO NOT make any processing on
332 * RA packets, pushing them to user level AS IS
333 * without ane WARRANTY that application will be able
334 * to interpret them. The reason is that we
335 * cannot make anything clever here.
337 * We are not end-node, so that if packet contains
338 * AH/ESP, we cannot make anything.
339 * Defragmentation also would be mistake, RA packets
340 * cannot be fragmented, because there is no warranty
341 * that different fragments will go along one path. --ANK
344 u8 *ptr = skb->nh.raw + opt->ra;
345 if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
350 * check and decrement ttl
352 if (hdr->hop_limit <= 1) {
353 /* Force OUTPUT device used as source address */
355 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
362 if (!xfrm6_route_forward(skb)) {
363 IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);
368 /* IPv6 specs say nothing about it, but it is clear that we cannot
369 send redirects to source routed frames.
371 if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) {
372 struct in6_addr *target = NULL;
374 struct neighbour *n = dst->neighbour;
377 * incoming and outgoing devices are the same
381 rt = (struct rt6_info *) dst;
382 if ((rt->rt6i_flags & RTF_GATEWAY))
383 target = (struct in6_addr*)&n->primary_key;
385 target = &hdr->daddr;
387 /* Limit redirects both by destination (here)
388 and by source (inside ndisc_send_redirect)
390 if (xrlim_allow(dst, 1*HZ))
391 ndisc_send_redirect(skb, n, target);
392 } else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK
393 |IPV6_ADDR_LINKLOCAL)) {
394 /* This check is security critical. */
398 if (skb->len > dst_mtu(dst)) {
399 /* Again, force OUTPUT device used as source address */
401 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);
402 IP6_INC_STATS_BH(IPSTATS_MIB_INTOOBIGERRORS);
403 IP6_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS);
408 if (skb_cow(skb, dst->dev->hard_header_len)) {
409 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
415 /* Mangling hops number delayed to point after skb COW */
419 IP6_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);
420 return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);
423 IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
429 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
431 to->pkt_type = from->pkt_type;
432 to->priority = from->priority;
433 to->protocol = from->protocol;
434 dst_release(to->dst);
435 to->dst = dst_clone(from->dst);
438 #ifdef CONFIG_NET_SCHED
439 to->tc_index = from->tc_index;
441 #ifdef CONFIG_NETFILTER
442 to->nfmark = from->nfmark;
443 /* Connection association is same as pre-frag packet */
444 nf_conntrack_put(to->nfct);
445 to->nfct = from->nfct;
446 nf_conntrack_get(to->nfct);
447 to->nfctinfo = from->nfctinfo;
448 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
449 nf_conntrack_put_reasm(to->nfct_reasm);
450 to->nfct_reasm = from->nfct_reasm;
451 nf_conntrack_get_reasm(to->nfct_reasm);
453 #ifdef CONFIG_BRIDGE_NETFILTER
454 nf_bridge_put(to->nf_bridge);
455 to->nf_bridge = from->nf_bridge;
456 nf_bridge_get(to->nf_bridge);
461 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
463 u16 offset = sizeof(struct ipv6hdr);
464 struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1);
465 unsigned int packet_len = skb->tail - skb->nh.raw;
467 *nexthdr = &skb->nh.ipv6h->nexthdr;
469 while (offset + 1 <= packet_len) {
474 case NEXTHDR_ROUTING:
476 if (**nexthdr == NEXTHDR_ROUTING) found_rhdr = 1;
477 if (**nexthdr == NEXTHDR_DEST && found_rhdr) return offset;
478 offset += ipv6_optlen(exthdr);
479 *nexthdr = &exthdr->nexthdr;
480 exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
490 static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
492 struct net_device *dev;
493 struct sk_buff *frag;
494 struct rt6_info *rt = (struct rt6_info*)skb->dst;
495 struct ipv6hdr *tmp_hdr;
497 unsigned int mtu, hlen, left, len;
499 int ptr, offset = 0, err=0;
500 u8 *prevhdr, nexthdr = 0;
503 hlen = ip6_find_1stfragopt(skb, &prevhdr);
506 mtu = dst_mtu(&rt->u.dst) - hlen - sizeof(struct frag_hdr);
508 if (skb_shinfo(skb)->frag_list) {
509 int first_len = skb_pagelen(skb);
511 if (first_len - hlen > mtu ||
512 ((first_len - hlen) & 7) ||
516 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
517 /* Correct geometry. */
518 if (frag->len > mtu ||
519 ((frag->len & 7) && frag->next) ||
520 skb_headroom(frag) < hlen)
523 /* Partially cloned skb? */
524 if (skb_shared(frag))
531 frag->destructor = sock_wfree;
532 skb->truesize -= frag->truesize;
538 frag = skb_shinfo(skb)->frag_list;
539 skb_shinfo(skb)->frag_list = NULL;
542 tmp_hdr = kmalloc(hlen, GFP_ATOMIC);
544 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
548 *prevhdr = NEXTHDR_FRAGMENT;
549 memcpy(tmp_hdr, skb->nh.raw, hlen);
550 __skb_pull(skb, hlen);
551 fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
552 skb->nh.raw = __skb_push(skb, hlen);
553 memcpy(skb->nh.raw, tmp_hdr, hlen);
555 ipv6_select_ident(skb, fh);
556 fh->nexthdr = nexthdr;
558 fh->frag_off = htons(IP6_MF);
559 frag_id = fh->identification;
561 first_len = skb_pagelen(skb);
562 skb->data_len = first_len - skb_headlen(skb);
563 skb->len = first_len;
564 skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr));
568 /* Prepare header of the next frame,
569 * before previous one went down. */
571 frag->ip_summed = CHECKSUM_NONE;
572 frag->h.raw = frag->data;
573 fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
574 frag->nh.raw = __skb_push(frag, hlen);
575 memcpy(frag->nh.raw, tmp_hdr, hlen);
576 offset += skb->len - hlen - sizeof(struct frag_hdr);
577 fh->nexthdr = nexthdr;
579 fh->frag_off = htons(offset);
580 if (frag->next != NULL)
581 fh->frag_off |= htons(IP6_MF);
582 fh->identification = frag_id;
583 frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
584 ip6_copy_metadata(frag, skb);
599 IP6_INC_STATS(IPSTATS_MIB_FRAGOKS);
609 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
614 left = skb->len - hlen; /* Space per frame */
615 ptr = hlen; /* Where to start from */
618 * Fragment the datagram.
621 *prevhdr = NEXTHDR_FRAGMENT;
624 * Keep copying data until we run out.
628 /* IF: it doesn't fit, use 'mtu' - the data space left */
631 /* IF: we are not sending upto and including the packet end
632 then align the next start on an eight byte boundary */
640 if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
641 NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
642 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
648 * Set up data on packet
651 ip6_copy_metadata(frag, skb);
652 skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
653 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
654 frag->nh.raw = frag->data;
655 fh = (struct frag_hdr*)(frag->data + hlen);
656 frag->h.raw = frag->data + hlen + sizeof(struct frag_hdr);
659 * Charge the memory for the fragment to any owner
663 skb_set_owner_w(frag, skb->sk);
666 * Copy the packet header into the new buffer.
668 memcpy(frag->nh.raw, skb->data, hlen);
671 * Build fragment header.
673 fh->nexthdr = nexthdr;
676 ipv6_select_ident(skb, fh);
677 frag_id = fh->identification;
679 fh->identification = frag_id;
682 * Copy a block of the IP datagram.
684 if (skb_copy_bits(skb, ptr, frag->h.raw, len))
688 fh->frag_off = htons(offset);
690 fh->frag_off |= htons(IP6_MF);
691 frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
697 * Put this fragment into the sending queue.
700 IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES);
707 IP6_INC_STATS(IPSTATS_MIB_FRAGOKS);
712 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
716 int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
722 struct ipv6_pinfo *np = inet6_sk(sk);
724 *dst = sk_dst_check(sk, np->dst_cookie);
726 struct rt6_info *rt = (struct rt6_info*)*dst;
728 /* Yes, checking route validity in not connected
729 case is not very simple. Take into account,
730 that we do not support routing by source, TOS,
731 and MSG_DONTROUTE --ANK (980726)
733 1. If route was host route, check that
734 cached destination is current.
735 If it is network route, we still may
736 check its validity using saved pointer
737 to the last used address: daddr_cache.
738 We do not want to save whole address now,
739 (because main consumer of this service
740 is tcp, which has not this problem),
741 so that the last trick works only on connected
743 2. oif also should be the same.
746 if (((rt->rt6i_dst.plen != 128 ||
747 !ipv6_addr_equal(&fl->fl6_dst, &rt->rt6i_dst.addr))
748 && (np->daddr_cache == NULL ||
749 !ipv6_addr_equal(&fl->fl6_dst, np->daddr_cache)))
750 || (fl->oif && fl->oif != (*dst)->dev->ifindex)) {
758 *dst = ip6_route_output(sk, fl);
760 if ((err = (*dst)->error))
761 goto out_err_release;
763 if (ipv6_addr_any(&fl->fl6_src)) {
764 err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src);
767 goto out_err_release;
778 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
780 static inline int ip6_ufo_append_data(struct sock *sk,
781 int getfrag(void *from, char *to, int offset, int len,
782 int odd, struct sk_buff *skb),
783 void *from, int length, int hh_len, int fragheaderlen,
784 int transhdrlen, int mtu,unsigned int flags)
790 /* There is support for UDP large send offload by network
791 * device, so create one single skb packet containing complete
794 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
795 skb = sock_alloc_send_skb(sk,
796 hh_len + fragheaderlen + transhdrlen + 20,
797 (flags & MSG_DONTWAIT), &err);
801 /* reserve space for Hardware header */
802 skb_reserve(skb, hh_len);
804 /* create space for UDP/IP header */
805 skb_put(skb,fragheaderlen + transhdrlen);
807 /* initialize network header pointer */
808 skb->nh.raw = skb->data;
810 /* initialize protocol header pointer */
811 skb->h.raw = skb->data + fragheaderlen;
813 skb->ip_summed = CHECKSUM_HW;
815 sk->sk_sndmsg_off = 0;
818 err = skb_append_datato_frags(sk,skb, getfrag, from,
819 (length - transhdrlen));
821 struct frag_hdr fhdr;
823 /* specify the length of each IP datagram fragment*/
824 skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen) -
825 sizeof(struct frag_hdr);
826 ipv6_select_ident(skb, &fhdr);
827 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
828 __skb_queue_tail(&sk->sk_write_queue, skb);
832 /* There is not enough support do UPD LSO,
833 * so follow normal path
840 int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
841 int offset, int len, int odd, struct sk_buff *skb),
842 void *from, int length, int transhdrlen,
843 int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
844 struct rt6_info *rt, unsigned int flags)
846 struct inet_sock *inet = inet_sk(sk);
847 struct ipv6_pinfo *np = inet6_sk(sk);
849 unsigned int maxfraglen, fragheaderlen;
856 int csummode = CHECKSUM_NONE;
860 if (skb_queue_empty(&sk->sk_write_queue)) {
865 if (np->cork.opt == NULL) {
866 np->cork.opt = kmalloc(opt->tot_len,
868 if (unlikely(np->cork.opt == NULL))
870 } else if (np->cork.opt->tot_len < opt->tot_len) {
871 printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
874 memcpy(np->cork.opt, opt, opt->tot_len);
875 inet->cork.flags |= IPCORK_OPT;
876 /* need source address above miyazawa*/
878 dst_hold(&rt->u.dst);
881 np->cork.hop_limit = hlimit;
882 np->cork.tclass = tclass;
883 inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
884 if (dst_allfrag(rt->u.dst.path))
885 inet->cork.flags |= IPCORK_ALLFRAG;
886 inet->cork.length = 0;
887 sk->sk_sndmsg_page = NULL;
888 sk->sk_sndmsg_off = 0;
889 exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0);
891 transhdrlen += exthdrlen;
895 if (inet->cork.flags & IPCORK_OPT)
899 mtu = inet->cork.fragsize;
902 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
904 fragheaderlen = sizeof(struct ipv6hdr) + (opt ? opt->opt_nflen : 0);
905 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
907 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
908 if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
909 ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
915 * Let's try using as much space as possible.
916 * Use MTU if total length of the message fits into the MTU.
917 * Otherwise, we need to reserve fragment header and
918 * fragment alignment (= 8-15 octects, in total).
920 * Note that we may need to "move" the data from the tail of
921 * of the buffer to the new fragment when we split
924 * FIXME: It may be fragmented into multiple chunks
925 * at once if non-fragmentable extension headers
930 inet->cork.length += length;
931 if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
932 (rt->u.dst.dev->features & NETIF_F_UFO)) {
934 if(ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
935 fragheaderlen, transhdrlen, mtu, flags))
941 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
945 /* Check if the remaining data fits into current packet. */
946 copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
948 copy = maxfraglen - skb->len;
952 unsigned int datalen;
953 unsigned int fraglen;
954 unsigned int fraggap;
955 unsigned int alloclen;
956 struct sk_buff *skb_prev;
960 /* There's no room in the current skb */
962 fraggap = skb_prev->len - maxfraglen;
967 * If remaining data exceeds the mtu,
968 * we know we need more fragment(s).
970 datalen = length + fraggap;
971 if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
972 datalen = maxfraglen - fragheaderlen;
974 fraglen = datalen + fragheaderlen;
975 if ((flags & MSG_MORE) &&
976 !(rt->u.dst.dev->features&NETIF_F_SG))
979 alloclen = datalen + fragheaderlen;
982 * The last fragment gets additional space at tail.
983 * Note: we overallocate on fragments with MSG_MODE
984 * because we have no idea if we're the last one.
986 if (datalen == length + fraggap)
987 alloclen += rt->u.dst.trailer_len;
990 * We just reserve space for fragment header.
991 * Note: this may be overallocation if the message
992 * (without MSG_MORE) fits into the MTU.
994 alloclen += sizeof(struct frag_hdr);
997 skb = sock_alloc_send_skb(sk,
999 (flags & MSG_DONTWAIT), &err);
1002 if (atomic_read(&sk->sk_wmem_alloc) <=
1004 skb = sock_wmalloc(sk,
1005 alloclen + hh_len, 1,
1007 if (unlikely(skb == NULL))
1013 * Fill in the control structures
1015 skb->ip_summed = csummode;
1017 /* reserve for fragmentation */
1018 skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
1021 * Find where to start putting bytes
1023 data = skb_put(skb, fraglen);
1024 skb->nh.raw = data + exthdrlen;
1025 data += fragheaderlen;
1026 skb->h.raw = data + exthdrlen;
1029 skb->csum = skb_copy_and_csum_bits(
1030 skb_prev, maxfraglen,
1031 data + transhdrlen, fraggap, 0);
1032 skb_prev->csum = csum_sub(skb_prev->csum,
1035 skb_trim(skb_prev, maxfraglen);
1037 copy = datalen - transhdrlen - fraggap;
1042 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1049 length -= datalen - fraggap;
1052 csummode = CHECKSUM_NONE;
1055 * Put the packet on the pending queue
1057 __skb_queue_tail(&sk->sk_write_queue, skb);
1064 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
1068 if (getfrag(from, skb_put(skb, copy),
1069 offset, copy, off, skb) < 0) {
1070 __skb_trim(skb, off);
1075 int i = skb_shinfo(skb)->nr_frags;
1076 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1077 struct page *page = sk->sk_sndmsg_page;
1078 int off = sk->sk_sndmsg_off;
1081 if (page && (left = PAGE_SIZE - off) > 0) {
1084 if (page != frag->page) {
1085 if (i == MAX_SKB_FRAGS) {
1090 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1091 frag = &skb_shinfo(skb)->frags[i];
1093 } else if(i < MAX_SKB_FRAGS) {
1094 if (copy > PAGE_SIZE)
1096 page = alloc_pages(sk->sk_allocation, 0);
1101 sk->sk_sndmsg_page = page;
1102 sk->sk_sndmsg_off = 0;
1104 skb_fill_page_desc(skb, i, page, 0, 0);
1105 frag = &skb_shinfo(skb)->frags[i];
1106 skb->truesize += PAGE_SIZE;
1107 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1112 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1116 sk->sk_sndmsg_off += copy;
1119 skb->data_len += copy;
1126 inet->cork.length -= length;
1127 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1131 int ip6_push_pending_frames(struct sock *sk)
1133 struct sk_buff *skb, *tmp_skb;
1134 struct sk_buff **tail_skb;
1135 struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1136 struct inet_sock *inet = inet_sk(sk);
1137 struct ipv6_pinfo *np = inet6_sk(sk);
1138 struct ipv6hdr *hdr;
1139 struct ipv6_txoptions *opt = np->cork.opt;
1140 struct rt6_info *rt = np->cork.rt;
1141 struct flowi *fl = &inet->cork.fl;
1142 unsigned char proto = fl->proto;
1145 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1147 tail_skb = &(skb_shinfo(skb)->frag_list);
1149 /* move skb->data to ip header from ext header */
1150 if (skb->data < skb->nh.raw)
1151 __skb_pull(skb, skb->nh.raw - skb->data);
1152 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1153 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1154 *tail_skb = tmp_skb;
1155 tail_skb = &(tmp_skb->next);
1156 skb->len += tmp_skb->len;
1157 skb->data_len += tmp_skb->len;
1158 skb->truesize += tmp_skb->truesize;
1159 __sock_put(tmp_skb->sk);
1160 tmp_skb->destructor = NULL;
1164 ipv6_addr_copy(final_dst, &fl->fl6_dst);
1165 __skb_pull(skb, skb->h.raw - skb->nh.raw);
1166 if (opt && opt->opt_flen)
1167 ipv6_push_frag_opts(skb, opt, &proto);
1168 if (opt && opt->opt_nflen)
1169 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1171 skb->nh.ipv6h = hdr = (struct ipv6hdr*) skb_push(skb, sizeof(struct ipv6hdr));
1173 *(u32*)hdr = fl->fl6_flowlabel |
1174 htonl(0x60000000 | ((int)np->cork.tclass << 20));
1176 if (skb->len <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN)
1177 hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
1179 hdr->payload_len = 0;
1180 hdr->hop_limit = np->cork.hop_limit;
1181 hdr->nexthdr = proto;
1182 ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
1183 ipv6_addr_copy(&hdr->daddr, final_dst);
1185 skb->dst = dst_clone(&rt->u.dst);
1186 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
1187 err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);
1190 err = np->recverr ? net_xmit_errno(err) : 0;
1196 inet->cork.flags &= ~IPCORK_OPT;
1197 kfree(np->cork.opt);
1198 np->cork.opt = NULL;
1200 dst_release(&np->cork.rt->u.dst);
1202 inet->cork.flags &= ~IPCORK_ALLFRAG;
1204 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1210 void ip6_flush_pending_frames(struct sock *sk)
1212 struct inet_sock *inet = inet_sk(sk);
1213 struct ipv6_pinfo *np = inet6_sk(sk);
1214 struct sk_buff *skb;
1216 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1217 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1221 inet->cork.flags &= ~IPCORK_OPT;
1223 kfree(np->cork.opt);
1224 np->cork.opt = NULL;
1226 dst_release(&np->cork.rt->u.dst);
1228 inet->cork.flags &= ~IPCORK_ALLFRAG;
1230 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));