2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The Internet Protocol (IP) output module.
8 * Version: $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org>
13 * Alan Cox, <Alan.Cox@linux.org>
15 * Stefan Becker, <stefanb@yello.ping.de>
16 * Jorge Cwik, <jorge@laser.satlink.net>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Hirokazu Takahashi, <taka@valinux.co.jp>
20 * See ip_input.c for original log
23 * Alan Cox : Missing nonblock feature in ip_build_xmit.
24 * Mike Kilburn : htons() missing in ip_build_xmit.
25 * Bradford Johnson: Fix faulty handling of some frames when
27 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
28 * (in case if packet not accepted by
29 * output firewall rules)
30 * Mike McLagan : Routing by source
31 * Alexey Kuznetsov: use new route cache
32 * Andi Kleen: Fix broken PMTU recovery and remove
33 * some redundant tests.
34 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
35 * Andi Kleen : Replace ip_reply with ip_send_reply.
36 * Andi Kleen : Split fast and slow ip_build_xmit path
37 * for decreased register pressure on x86
38 * and more readibility.
39 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
40 * silently drop skb instead of failing with -EPERM.
41 * Detlev Wengorz : Copy protocol for fragments.
42 * Hirokazu Takahashi: HW checksumming for outgoing UDP
44 * Hirokazu Takahashi: sendfile() on UDP works now.
47 #include <asm/uaccess.h>
48 #include <asm/system.h>
49 #include <linux/module.h>
50 #include <linux/types.h>
51 #include <linux/kernel.h>
52 #include <linux/sched.h>
54 #include <linux/string.h>
55 #include <linux/errno.h>
56 #include <linux/config.h>
58 #include <linux/socket.h>
59 #include <linux/sockios.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/proc_fs.h>
65 #include <linux/stat.h>
66 #include <linux/init.h>
70 #include <net/protocol.h>
71 #include <net/route.h>
73 #include <linux/skbuff.h>
77 #include <net/checksum.h>
78 #include <net/inetpeer.h>
79 #include <net/checksum.h>
80 #include <linux/igmp.h>
81 #include <linux/netfilter_ipv4.h>
82 #include <linux/netfilter_bridge.h>
83 #include <linux/mroute.h>
84 #include <linux/netlink.h>
85 #include <linux/tcp.h>
87 int sysctl_ip_default_ttl = IPDEFTTL;
89 static int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*));
91 /* Generate a checksum for an outgoing IP datagram. */
92 __inline__ void ip_send_check(struct iphdr *iph)
95 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
98 /* dev_loopback_xmit for use with netfilter. */
99 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
101 newskb->mac.raw = newskb->data;
102 __skb_pull(newskb, newskb->nh.raw - newskb->data);
103 newskb->pkt_type = PACKET_LOOPBACK;
104 newskb->ip_summed = CHECKSUM_UNNECESSARY;
105 BUG_TRAP(newskb->dst);
110 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
112 int ttl = inet->uc_ttl;
115 ttl = dst_metric(dst, RTAX_HOPLIMIT);
120 * Add an ip header to a skbuff and send it out.
123 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
124 u32 saddr, u32 daddr, struct ip_options *opt)
126 struct inet_sock *inet = inet_sk(sk);
127 struct rtable *rt = (struct rtable *)skb->dst;
130 /* Build the IP header. */
132 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
134 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr));
138 iph->tos = inet->tos;
139 if (ip_dont_fragment(sk, &rt->u.dst))
140 iph->frag_off = htons(IP_DF);
143 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
144 iph->daddr = rt->rt_dst;
145 iph->saddr = rt->rt_src;
146 iph->protocol = sk->sk_protocol;
147 iph->tot_len = htons(skb->len);
148 ip_select_ident(iph, &rt->u.dst, sk);
151 if (opt && opt->optlen) {
152 iph->ihl += opt->optlen>>2;
153 ip_options_build(skb, opt, daddr, rt, 0);
157 skb->priority = sk->sk_priority;
160 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
164 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
166 static inline int ip_finish_output2(struct sk_buff *skb)
168 struct dst_entry *dst = skb->dst;
169 struct hh_cache *hh = dst->hh;
170 struct net_device *dev = dst->dev;
171 int hh_len = LL_RESERVED_SPACE(dev);
173 /* Be paranoid, rather than too clever. */
174 if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
175 struct sk_buff *skb2;
177 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
183 skb_set_owner_w(skb2, skb->sk);
191 read_lock_bh(&hh->hh_lock);
192 hh_alen = HH_DATA_ALIGN(hh->hh_len);
193 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
194 read_unlock_bh(&hh->hh_lock);
195 skb_push(skb, hh->hh_len);
196 return hh->hh_output(skb);
197 } else if (dst->neighbour)
198 return dst->neighbour->output(skb);
201 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
206 static inline int ip_finish_output(struct sk_buff *skb)
208 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
209 /* Policy lookup after SNAT yielded a new policy */
210 if (skb->dst->xfrm != NULL) {
211 IPCB(skb)->flags |= IPSKB_REROUTED;
212 return dst_output(skb);
215 if (skb->len > dst_mtu(skb->dst) &&
216 !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
217 return ip_fragment(skb, ip_finish_output2);
219 return ip_finish_output2(skb);
222 int ip_mc_output(struct sk_buff *skb)
224 struct sock *sk = skb->sk;
225 struct rtable *rt = (struct rtable*)skb->dst;
226 struct net_device *dev = rt->u.dst.dev;
229 * If the indicated interface is up and running, send the packet.
231 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
234 skb->protocol = htons(ETH_P_IP);
237 * Multicasts are looped back for other local users
240 if (rt->rt_flags&RTCF_MULTICAST) {
241 if ((!sk || inet_sk(sk)->mc_loop)
242 #ifdef CONFIG_IP_MROUTE
243 /* Small optimization: do not loopback not local frames,
244 which returned after forwarding; they will be dropped
245 by ip_mr_input in any case.
246 Note, that local frames are looped back to be delivered
249 This check is duplicated in ip_mr_input at the moment.
251 && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
254 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
256 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
258 ip_dev_loopback_xmit);
261 /* Multicasts with ttl 0 must not go beyond the host */
263 if (skb->nh.iph->ttl == 0) {
269 if (rt->rt_flags&RTCF_BROADCAST) {
270 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
272 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
273 newskb->dev, ip_dev_loopback_xmit);
276 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev,
278 !(IPCB(skb)->flags & IPSKB_REROUTED));
281 int ip_output(struct sk_buff *skb)
283 struct net_device *dev = skb->dst->dev;
285 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
288 skb->protocol = htons(ETH_P_IP);
290 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
292 !(IPCB(skb)->flags & IPSKB_REROUTED));
295 int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
297 struct sock *sk = skb->sk;
298 struct inet_sock *inet = inet_sk(sk);
299 struct ip_options *opt = inet->opt;
303 /* Skip all of this if the packet is already routed,
304 * f.e. by something like SCTP.
306 rt = (struct rtable *) skb->dst;
310 /* Make sure we can route this packet. */
311 rt = (struct rtable *)__sk_dst_check(sk, 0);
315 /* Use correct destination address if we have options. */
321 struct flowi fl = { .oif = sk->sk_bound_dev_if,
324 .saddr = inet->saddr,
325 .tos = RT_CONN_FLAGS(sk) } },
326 .proto = sk->sk_protocol,
328 { .sport = inet->sport,
329 .dport = inet->dport } } };
331 /* If this fails, retransmit mechanism of transport layer will
332 * keep trying until route appears or the connection times
335 if (ip_route_output_flow(&rt, &fl, sk, 0))
338 sk_setup_caps(sk, &rt->u.dst);
340 skb->dst = dst_clone(&rt->u.dst);
343 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
346 /* OK, we know where to send it, allocate and build IP header. */
347 iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
348 *((__u16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
349 iph->tot_len = htons(skb->len);
350 if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
351 iph->frag_off = htons(IP_DF);
354 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
355 iph->protocol = sk->sk_protocol;
356 iph->saddr = rt->rt_src;
357 iph->daddr = rt->rt_dst;
359 /* Transport layer set skb->h.foo itself. */
361 if (opt && opt->optlen) {
362 iph->ihl += opt->optlen >> 2;
363 ip_options_build(skb, opt, inet->daddr, rt, 0);
366 ip_select_ident_more(iph, &rt->u.dst, sk,
367 (skb_shinfo(skb)->tso_segs ?: 1) - 1);
369 /* Add an IP checksum. */
372 skb->priority = sk->sk_priority;
374 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
378 IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
380 return -EHOSTUNREACH;
384 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
386 to->pkt_type = from->pkt_type;
387 to->priority = from->priority;
388 to->protocol = from->protocol;
389 dst_release(to->dst);
390 to->dst = dst_clone(from->dst);
393 /* Copy the flags to each fragment. */
394 IPCB(to)->flags = IPCB(from)->flags;
396 #ifdef CONFIG_NET_SCHED
397 to->tc_index = from->tc_index;
399 #ifdef CONFIG_NETFILTER
400 to->nfmark = from->nfmark;
401 /* Connection association is same as pre-frag packet */
402 nf_conntrack_put(to->nfct);
403 to->nfct = from->nfct;
404 nf_conntrack_get(to->nfct);
405 to->nfctinfo = from->nfctinfo;
406 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
407 to->ipvs_property = from->ipvs_property;
409 #ifdef CONFIG_BRIDGE_NETFILTER
410 nf_bridge_put(to->nf_bridge);
411 to->nf_bridge = from->nf_bridge;
412 nf_bridge_get(to->nf_bridge);
418 * This IP datagram is too large to be sent in one piece. Break it up into
419 * smaller pieces (each of size equal to IP header plus
420 * a block of the data of the original IP data part) that will yet fit in a
421 * single device frame, and queue such a frame for sending.
424 static int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
429 struct net_device *dev;
430 struct sk_buff *skb2;
431 unsigned int mtu, hlen, left, len, ll_rs;
433 __be16 not_last_frag;
434 struct rtable *rt = (struct rtable*)skb->dst;
440 * Point into the IP datagram header.
445 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
446 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
447 htonl(dst_mtu(&rt->u.dst)));
453 * Setup starting values.
457 mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */
458 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
460 /* When frag_list is given, use it. First, check its validity:
461 * some transformers could create wrong frag_list or break existing
462 * one, it is not prohibited. In this case fall back to copying.
464 * LATER: this step can be merged to real generation of fragments,
465 * we can switch to copy when see the first bad fragment.
467 if (skb_shinfo(skb)->frag_list) {
468 struct sk_buff *frag;
469 int first_len = skb_pagelen(skb);
471 if (first_len - hlen > mtu ||
472 ((first_len - hlen) & 7) ||
473 (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
477 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
478 /* Correct geometry. */
479 if (frag->len > mtu ||
480 ((frag->len & 7) && frag->next) ||
481 skb_headroom(frag) < hlen)
484 /* Partially cloned skb? */
485 if (skb_shared(frag))
492 frag->destructor = sock_wfree;
493 skb->truesize -= frag->truesize;
497 /* Everything is OK. Generate! */
501 frag = skb_shinfo(skb)->frag_list;
502 skb_shinfo(skb)->frag_list = NULL;
503 skb->data_len = first_len - skb_headlen(skb);
504 skb->len = first_len;
505 iph->tot_len = htons(first_len);
506 iph->frag_off = htons(IP_MF);
510 /* Prepare header of the next frame,
511 * before previous one went down. */
513 frag->ip_summed = CHECKSUM_NONE;
514 frag->h.raw = frag->data;
515 frag->nh.raw = __skb_push(frag, hlen);
516 memcpy(frag->nh.raw, iph, hlen);
518 iph->tot_len = htons(frag->len);
519 ip_copy_metadata(frag, skb);
521 ip_options_fragment(frag);
522 offset += skb->len - hlen;
523 iph->frag_off = htons(offset>>3);
524 if (frag->next != NULL)
525 iph->frag_off |= htons(IP_MF);
526 /* Ready, complete checksum */
541 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
550 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
555 left = skb->len - hlen; /* Space per frame */
556 ptr = raw + hlen; /* Where to start from */
558 #ifdef CONFIG_BRIDGE_NETFILTER
559 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
560 * we need to make room for the encapsulating header */
561 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, nf_bridge_pad(skb));
562 mtu -= nf_bridge_pad(skb);
564 ll_rs = LL_RESERVED_SPACE(rt->u.dst.dev);
567 * Fragment the datagram.
570 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
571 not_last_frag = iph->frag_off & htons(IP_MF);
574 * Keep copying data until we run out.
579 /* IF: it doesn't fit, use 'mtu' - the data space left */
582 /* IF: we are not sending upto and including the packet end
583 then align the next start on an eight byte boundary */
591 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
592 NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
598 * Set up data on packet
601 ip_copy_metadata(skb2, skb);
602 skb_reserve(skb2, ll_rs);
603 skb_put(skb2, len + hlen);
604 skb2->nh.raw = skb2->data;
605 skb2->h.raw = skb2->data + hlen;
608 * Charge the memory for the fragment to any owner
613 skb_set_owner_w(skb2, skb->sk);
616 * Copy the packet header into the new buffer.
619 memcpy(skb2->nh.raw, skb->data, hlen);
622 * Copy a block of the IP datagram.
624 if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
629 * Fill in the new header fields.
632 iph->frag_off = htons((offset >> 3));
634 /* ANK: dirty, but effective trick. Upgrade options only if
635 * the segment to be fragmented was THE FIRST (otherwise,
636 * options are already fixed) and make it ONCE
637 * on the initial skb, so that all the following fragments
638 * will inherit fixed options.
641 ip_options_fragment(skb);
644 * Added AC : If we are fragmenting a fragment that's not the
645 * last fragment then keep MF on each bit
647 if (left > 0 || not_last_frag)
648 iph->frag_off |= htons(IP_MF);
653 * Put this fragment into the sending queue.
656 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
658 iph->tot_len = htons(len + hlen);
667 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
672 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
677 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
679 struct iovec *iov = from;
681 if (skb->ip_summed == CHECKSUM_HW) {
682 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
685 unsigned int csum = 0;
686 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
688 skb->csum = csum_block_add(skb->csum, csum, odd);
693 static inline unsigned int
694 csum_page(struct page *page, int offset, int copy)
699 csum = csum_partial(kaddr + offset, copy, 0);
704 static inline int ip_ufo_append_data(struct sock *sk,
705 int getfrag(void *from, char *to, int offset, int len,
706 int odd, struct sk_buff *skb),
707 void *from, int length, int hh_len, int fragheaderlen,
708 int transhdrlen, int mtu,unsigned int flags)
713 /* There is support for UDP fragmentation offload by network
714 * device, so create one single skb packet containing complete
717 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
718 skb = sock_alloc_send_skb(sk,
719 hh_len + fragheaderlen + transhdrlen + 20,
720 (flags & MSG_DONTWAIT), &err);
725 /* reserve space for Hardware header */
726 skb_reserve(skb, hh_len);
728 /* create space for UDP/IP header */
729 skb_put(skb,fragheaderlen + transhdrlen);
731 /* initialize network header pointer */
732 skb->nh.raw = skb->data;
734 /* initialize protocol header pointer */
735 skb->h.raw = skb->data + fragheaderlen;
737 skb->ip_summed = CHECKSUM_HW;
739 sk->sk_sndmsg_off = 0;
742 err = skb_append_datato_frags(sk,skb, getfrag, from,
743 (length - transhdrlen));
745 /* specify the length of each IP datagram fragment*/
746 skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
747 __skb_queue_tail(&sk->sk_write_queue, skb);
751 /* There is not enough support do UFO ,
752 * so follow normal path
759 * ip_append_data() and ip_append_page() can make one large IP datagram
760 * from many pieces of data. Each pieces will be holded on the socket
761 * until ip_push_pending_frames() is called. Each piece can be a page
764 * Not only UDP, other transport protocols - e.g. raw sockets - can use
765 * this interface potentially.
767 * LATER: length must be adjusted by pad at tail, when it is required.
769 int ip_append_data(struct sock *sk,
770 int getfrag(void *from, char *to, int offset, int len,
771 int odd, struct sk_buff *skb),
772 void *from, int length, int transhdrlen,
773 struct ipcm_cookie *ipc, struct rtable *rt,
776 struct inet_sock *inet = inet_sk(sk);
779 struct ip_options *opt = NULL;
786 unsigned int maxfraglen, fragheaderlen;
787 int csummode = CHECKSUM_NONE;
792 if (skb_queue_empty(&sk->sk_write_queue)) {
798 if (inet->cork.opt == NULL) {
799 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
800 if (unlikely(inet->cork.opt == NULL))
803 memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
804 inet->cork.flags |= IPCORK_OPT;
805 inet->cork.addr = ipc->addr;
807 dst_hold(&rt->u.dst);
808 inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
810 inet->cork.length = 0;
811 sk->sk_sndmsg_page = NULL;
812 sk->sk_sndmsg_off = 0;
813 if ((exthdrlen = rt->u.dst.header_len) != 0) {
815 transhdrlen += exthdrlen;
819 if (inet->cork.flags & IPCORK_OPT)
820 opt = inet->cork.opt;
824 mtu = inet->cork.fragsize;
826 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
828 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
829 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
831 if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
832 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
837 * transhdrlen > 0 means that this is the first fragment and we wish
838 * it won't be fragmented in the future.
841 length + fragheaderlen <= mtu &&
842 rt->u.dst.dev->features&(NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) &&
844 csummode = CHECKSUM_HW;
846 inet->cork.length += length;
847 if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
848 (rt->u.dst.dev->features & NETIF_F_UFO)) {
850 if(ip_ufo_append_data(sk, getfrag, from, length, hh_len,
851 fragheaderlen, transhdrlen, mtu, flags))
857 /* So, what's going on in the loop below?
859 * We use calculated fragment length to generate chained skb,
860 * each of segments is IP fragment ready for sending to network after
861 * adding appropriate IP header.
864 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
868 /* Check if the remaining data fits into current packet. */
869 copy = mtu - skb->len;
871 copy = maxfraglen - skb->len;
874 unsigned int datalen;
875 unsigned int fraglen;
876 unsigned int fraggap;
877 unsigned int alloclen;
878 struct sk_buff *skb_prev;
882 fraggap = skb_prev->len - maxfraglen;
887 * If remaining data exceeds the mtu,
888 * we know we need more fragment(s).
890 datalen = length + fraggap;
891 if (datalen > mtu - fragheaderlen)
892 datalen = maxfraglen - fragheaderlen;
893 fraglen = datalen + fragheaderlen;
895 if ((flags & MSG_MORE) &&
896 !(rt->u.dst.dev->features&NETIF_F_SG))
899 alloclen = datalen + fragheaderlen;
901 /* The last fragment gets additional space at tail.
902 * Note, with MSG_MORE we overallocate on fragments,
903 * because we have no idea what fragment will be
906 if (datalen == length)
907 alloclen += rt->u.dst.trailer_len;
910 skb = sock_alloc_send_skb(sk,
911 alloclen + hh_len + 15,
912 (flags & MSG_DONTWAIT), &err);
915 if (atomic_read(&sk->sk_wmem_alloc) <=
917 skb = sock_wmalloc(sk,
918 alloclen + hh_len + 15, 1,
920 if (unlikely(skb == NULL))
927 * Fill in the control structures
929 skb->ip_summed = csummode;
931 skb_reserve(skb, hh_len);
934 * Find where to start putting bytes.
936 data = skb_put(skb, fraglen);
937 skb->nh.raw = data + exthdrlen;
938 data += fragheaderlen;
939 skb->h.raw = data + exthdrlen;
942 skb->csum = skb_copy_and_csum_bits(
943 skb_prev, maxfraglen,
944 data + transhdrlen, fraggap, 0);
945 skb_prev->csum = csum_sub(skb_prev->csum,
948 skb_trim(skb_prev, maxfraglen);
951 copy = datalen - transhdrlen - fraggap;
952 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
959 length -= datalen - fraggap;
962 csummode = CHECKSUM_NONE;
965 * Put the packet on the pending queue.
967 __skb_queue_tail(&sk->sk_write_queue, skb);
974 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
978 if (getfrag(from, skb_put(skb, copy),
979 offset, copy, off, skb) < 0) {
980 __skb_trim(skb, off);
985 int i = skb_shinfo(skb)->nr_frags;
986 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
987 struct page *page = sk->sk_sndmsg_page;
988 int off = sk->sk_sndmsg_off;
991 if (page && (left = PAGE_SIZE - off) > 0) {
994 if (page != frag->page) {
995 if (i == MAX_SKB_FRAGS) {
1000 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1001 frag = &skb_shinfo(skb)->frags[i];
1003 } else if (i < MAX_SKB_FRAGS) {
1004 if (copy > PAGE_SIZE)
1006 page = alloc_pages(sk->sk_allocation, 0);
1011 sk->sk_sndmsg_page = page;
1012 sk->sk_sndmsg_off = 0;
1014 skb_fill_page_desc(skb, i, page, 0, 0);
1015 frag = &skb_shinfo(skb)->frags[i];
1016 skb->truesize += PAGE_SIZE;
1017 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1022 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1026 sk->sk_sndmsg_off += copy;
1029 skb->data_len += copy;
1038 inet->cork.length -= length;
1039 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1043 ssize_t ip_append_page(struct sock *sk, struct page *page,
1044 int offset, size_t size, int flags)
1046 struct inet_sock *inet = inet_sk(sk);
1047 struct sk_buff *skb;
1049 struct ip_options *opt = NULL;
1054 unsigned int maxfraglen, fragheaderlen, fraggap;
1059 if (flags&MSG_PROBE)
1062 if (skb_queue_empty(&sk->sk_write_queue))
1066 if (inet->cork.flags & IPCORK_OPT)
1067 opt = inet->cork.opt;
1069 if (!(rt->u.dst.dev->features&NETIF_F_SG))
1072 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1073 mtu = inet->cork.fragsize;
1075 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1076 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1078 if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1079 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1083 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1086 inet->cork.length += size;
1087 if ((sk->sk_protocol == IPPROTO_UDP) &&
1088 (rt->u.dst.dev->features & NETIF_F_UFO))
1089 skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
1095 if (skb_shinfo(skb)->ufo_size)
1099 /* Check if the remaining data fits into current packet. */
1100 len = mtu - skb->len;
1102 len = maxfraglen - skb->len;
1105 struct sk_buff *skb_prev;
1111 fraggap = skb_prev->len - maxfraglen;
1113 alloclen = fragheaderlen + hh_len + fraggap + 15;
1114 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1115 if (unlikely(!skb)) {
1121 * Fill in the control structures
1123 skb->ip_summed = CHECKSUM_NONE;
1125 skb_reserve(skb, hh_len);
1128 * Find where to start putting bytes.
1130 data = skb_put(skb, fragheaderlen + fraggap);
1131 skb->nh.iph = iph = (struct iphdr *)data;
1132 data += fragheaderlen;
1136 skb->csum = skb_copy_and_csum_bits(
1137 skb_prev, maxfraglen,
1139 skb_prev->csum = csum_sub(skb_prev->csum,
1141 skb_trim(skb_prev, maxfraglen);
1145 * Put the packet on the pending queue.
1147 __skb_queue_tail(&sk->sk_write_queue, skb);
1151 i = skb_shinfo(skb)->nr_frags;
1154 if (skb_can_coalesce(skb, i, page, offset)) {
1155 skb_shinfo(skb)->frags[i-1].size += len;
1156 } else if (i < MAX_SKB_FRAGS) {
1158 skb_fill_page_desc(skb, i, page, offset, len);
1164 if (skb->ip_summed == CHECKSUM_NONE) {
1166 csum = csum_page(page, offset, len);
1167 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1171 skb->data_len += len;
1178 inet->cork.length -= size;
1179 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1184 * Combined all pending IP fragments on the socket as one IP datagram
1185 * and push them out.
1187 int ip_push_pending_frames(struct sock *sk)
1189 struct sk_buff *skb, *tmp_skb;
1190 struct sk_buff **tail_skb;
1191 struct inet_sock *inet = inet_sk(sk);
1192 struct ip_options *opt = NULL;
1193 struct rtable *rt = inet->cork.rt;
1199 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1201 tail_skb = &(skb_shinfo(skb)->frag_list);
1203 /* move skb->data to ip header from ext header */
1204 if (skb->data < skb->nh.raw)
1205 __skb_pull(skb, skb->nh.raw - skb->data);
1206 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1207 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1208 *tail_skb = tmp_skb;
1209 tail_skb = &(tmp_skb->next);
1210 skb->len += tmp_skb->len;
1211 skb->data_len += tmp_skb->len;
1212 skb->truesize += tmp_skb->truesize;
1213 __sock_put(tmp_skb->sk);
1214 tmp_skb->destructor = NULL;
1218 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1219 * to fragment the frame generated here. No matter, what transforms
1220 * how transforms change size of the packet, it will come out.
1222 if (inet->pmtudisc != IP_PMTUDISC_DO)
1225 /* DF bit is set when we want to see DF on outgoing frames.
1226 * If local_df is set too, we still allow to fragment this frame
1228 if (inet->pmtudisc == IP_PMTUDISC_DO ||
1229 (skb->len <= dst_mtu(&rt->u.dst) &&
1230 ip_dont_fragment(sk, &rt->u.dst)))
1233 if (inet->cork.flags & IPCORK_OPT)
1234 opt = inet->cork.opt;
1236 if (rt->rt_type == RTN_MULTICAST)
1239 ttl = ip_select_ttl(inet, &rt->u.dst);
1241 iph = (struct iphdr *)skb->data;
1245 iph->ihl += opt->optlen>>2;
1246 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1248 iph->tos = inet->tos;
1249 iph->tot_len = htons(skb->len);
1252 __ip_select_ident(iph, &rt->u.dst, 0);
1254 iph->id = htons(inet->id++);
1257 iph->protocol = sk->sk_protocol;
1258 iph->saddr = rt->rt_src;
1259 iph->daddr = rt->rt_dst;
1262 skb->priority = sk->sk_priority;
1263 skb->dst = dst_clone(&rt->u.dst);
1265 /* Netfilter gets whole the not fragmented skb. */
1266 err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
1267 skb->dst->dev, dst_output);
1270 err = inet->recverr ? net_xmit_errno(err) : 0;
1276 inet->cork.flags &= ~IPCORK_OPT;
1277 kfree(inet->cork.opt);
1278 inet->cork.opt = NULL;
1279 if (inet->cork.rt) {
1280 ip_rt_put(inet->cork.rt);
1281 inet->cork.rt = NULL;
1286 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1291 * Throw away all pending data on the socket.
1293 void ip_flush_pending_frames(struct sock *sk)
1295 struct inet_sock *inet = inet_sk(sk);
1296 struct sk_buff *skb;
1298 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1301 inet->cork.flags &= ~IPCORK_OPT;
1302 kfree(inet->cork.opt);
1303 inet->cork.opt = NULL;
1304 if (inet->cork.rt) {
1305 ip_rt_put(inet->cork.rt);
1306 inet->cork.rt = NULL;
1312 * Fetch data from kernel space and fill in checksum if needed.
1314 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1315 int len, int odd, struct sk_buff *skb)
1319 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1320 skb->csum = csum_block_add(skb->csum, csum, odd);
1325 * Generic function to send a packet as reply to another packet.
1326 * Used to send TCP resets so far. ICMP should use this function too.
1328 * Should run single threaded per socket because it uses the sock
1329 * structure to pass arguments.
1331 * LATER: switch from ip_build_xmit to ip_append_*
1333 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1336 struct inet_sock *inet = inet_sk(sk);
1338 struct ip_options opt;
1341 struct ipcm_cookie ipc;
1343 struct rtable *rt = (struct rtable*)skb->dst;
1345 if (ip_options_echo(&replyopts.opt, skb))
1348 daddr = ipc.addr = rt->rt_src;
1351 if (replyopts.opt.optlen) {
1352 ipc.opt = &replyopts.opt;
1355 daddr = replyopts.opt.faddr;
1359 struct flowi fl = { .nl_u = { .ip4_u =
1361 .saddr = rt->rt_spec_dst,
1362 .tos = RT_TOS(skb->nh.iph->tos) } },
1363 /* Not quite clean, but right. */
1365 { .sport = skb->h.th->dest,
1366 .dport = skb->h.th->source } },
1367 .proto = sk->sk_protocol };
1368 if (ip_route_output_key(&rt, &fl))
1372 /* And let IP do all the hard work.
1374 This chunk is not reenterable, hence spinlock.
1375 Note that it uses the fact, that this function is called
1376 with locally disabled BH and that sk cannot be already spinlocked.
1379 inet->tos = skb->nh.iph->tos;
1380 sk->sk_priority = skb->priority;
1381 sk->sk_protocol = skb->nh.iph->protocol;
1382 ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1383 &ipc, rt, MSG_DONTWAIT);
1384 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1385 if (arg->csumoffset >= 0)
1386 *((u16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
1387 skb->ip_summed = CHECKSUM_NONE;
1388 ip_push_pending_frames(sk);
1396 void __init ip_init(void)
1401 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1402 igmp_mc_proc_init();
1406 EXPORT_SYMBOL(ip_generic_getfrag);
1407 EXPORT_SYMBOL(ip_queue_xmit);
1408 EXPORT_SYMBOL(ip_send_check);