2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The Internet Protocol (IP) output module.
8 * Version: $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org>
13 * Alan Cox, <Alan.Cox@linux.org>
15 * Stefan Becker, <stefanb@yello.ping.de>
16 * Jorge Cwik, <jorge@laser.satlink.net>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Hirokazu Takahashi, <taka@valinux.co.jp>
20 * See ip_input.c for original log
23 * Alan Cox : Missing nonblock feature in ip_build_xmit.
24 * Mike Kilburn : htons() missing in ip_build_xmit.
25 * Bradford Johnson: Fix faulty handling of some frames when
27 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
28 * (in case if packet not accepted by
29 * output firewall rules)
30 * Mike McLagan : Routing by source
31 * Alexey Kuznetsov: use new route cache
32 * Andi Kleen: Fix broken PMTU recovery and remove
33 * some redundant tests.
34 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
35 * Andi Kleen : Replace ip_reply with ip_send_reply.
36 * Andi Kleen : Split fast and slow ip_build_xmit path
37 * for decreased register pressure on x86
38 * and more readibility.
39 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
40 * silently drop skb instead of failing with -EPERM.
41 * Detlev Wengorz : Copy protocol for fragments.
42 * Hirokazu Takahashi: HW checksumming for outgoing UDP
44 * Hirokazu Takahashi: sendfile() on UDP works now.
47 #include <asm/uaccess.h>
48 #include <asm/system.h>
49 #include <linux/module.h>
50 #include <linux/types.h>
51 #include <linux/kernel.h>
52 #include <linux/sched.h>
54 #include <linux/string.h>
55 #include <linux/errno.h>
57 #include <linux/socket.h>
58 #include <linux/sockios.h>
60 #include <linux/inet.h>
61 #include <linux/netdevice.h>
62 #include <linux/etherdevice.h>
63 #include <linux/proc_fs.h>
64 #include <linux/stat.h>
65 #include <linux/init.h>
69 #include <net/protocol.h>
70 #include <net/route.h>
72 #include <linux/skbuff.h>
76 #include <net/checksum.h>
77 #include <net/inetpeer.h>
78 #include <net/checksum.h>
79 #include <linux/igmp.h>
80 #include <linux/netfilter_ipv4.h>
81 #include <linux/netfilter_bridge.h>
82 #include <linux/mroute.h>
83 #include <linux/netlink.h>
84 #include <linux/tcp.h>
86 int sysctl_ip_default_ttl = IPDEFTTL;
88 /* Generate a checksum for an outgoing IP datagram. */
89 __inline__ void ip_send_check(struct iphdr *iph)
92 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
95 /* dev_loopback_xmit for use with netfilter. */
96 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
98 newskb->mac.raw = newskb->data;
99 __skb_pull(newskb, newskb->nh.raw - newskb->data);
100 newskb->pkt_type = PACKET_LOOPBACK;
101 newskb->ip_summed = CHECKSUM_UNNECESSARY;
102 BUG_TRAP(newskb->dst);
107 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
109 int ttl = inet->uc_ttl;
112 ttl = dst_metric(dst, RTAX_HOPLIMIT);
117 * Add an ip header to a skbuff and send it out.
120 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
121 u32 saddr, u32 daddr, struct ip_options *opt)
123 struct inet_sock *inet = inet_sk(sk);
124 struct rtable *rt = (struct rtable *)skb->dst;
127 /* Build the IP header. */
129 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
131 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr));
135 iph->tos = inet->tos;
136 if (ip_dont_fragment(sk, &rt->u.dst))
137 iph->frag_off = htons(IP_DF);
140 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
141 iph->daddr = rt->rt_dst;
142 iph->saddr = rt->rt_src;
143 iph->protocol = sk->sk_protocol;
144 iph->tot_len = htons(skb->len);
145 ip_select_ident(iph, &rt->u.dst, sk);
148 if (opt && opt->optlen) {
149 iph->ihl += opt->optlen>>2;
150 ip_options_build(skb, opt, daddr, rt, 0);
154 skb->priority = sk->sk_priority;
157 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
161 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
163 static inline int ip_finish_output2(struct sk_buff *skb)
165 struct dst_entry *dst = skb->dst;
166 struct hh_cache *hh = dst->hh;
167 struct net_device *dev = dst->dev;
168 int hh_len = LL_RESERVED_SPACE(dev);
170 /* Be paranoid, rather than too clever. */
171 if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
172 struct sk_buff *skb2;
174 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
180 skb_set_owner_w(skb2, skb->sk);
188 read_lock_bh(&hh->hh_lock);
189 hh_alen = HH_DATA_ALIGN(hh->hh_len);
190 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
191 read_unlock_bh(&hh->hh_lock);
192 skb_push(skb, hh->hh_len);
193 return hh->hh_output(skb);
194 } else if (dst->neighbour)
195 return dst->neighbour->output(skb);
198 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
203 static inline int ip_finish_output(struct sk_buff *skb)
205 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
206 /* Policy lookup after SNAT yielded a new policy */
207 if (skb->dst->xfrm != NULL) {
208 IPCB(skb)->flags |= IPSKB_REROUTED;
209 return dst_output(skb);
212 if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb))
213 return ip_fragment(skb, ip_finish_output2);
215 return ip_finish_output2(skb);
218 int ip_mc_output(struct sk_buff *skb)
220 struct sock *sk = skb->sk;
221 struct rtable *rt = (struct rtable*)skb->dst;
222 struct net_device *dev = rt->u.dst.dev;
225 * If the indicated interface is up and running, send the packet.
227 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
230 skb->protocol = htons(ETH_P_IP);
233 * Multicasts are looped back for other local users
236 if (rt->rt_flags&RTCF_MULTICAST) {
237 if ((!sk || inet_sk(sk)->mc_loop)
238 #ifdef CONFIG_IP_MROUTE
239 /* Small optimization: do not loopback not local frames,
240 which returned after forwarding; they will be dropped
241 by ip_mr_input in any case.
242 Note, that local frames are looped back to be delivered
245 This check is duplicated in ip_mr_input at the moment.
247 && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
250 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
252 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
254 ip_dev_loopback_xmit);
257 /* Multicasts with ttl 0 must not go beyond the host */
259 if (skb->nh.iph->ttl == 0) {
265 if (rt->rt_flags&RTCF_BROADCAST) {
266 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
268 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
269 newskb->dev, ip_dev_loopback_xmit);
272 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev,
274 !(IPCB(skb)->flags & IPSKB_REROUTED));
277 int ip_output(struct sk_buff *skb)
279 struct net_device *dev = skb->dst->dev;
281 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
284 skb->protocol = htons(ETH_P_IP);
286 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
288 !(IPCB(skb)->flags & IPSKB_REROUTED));
291 int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
293 struct sock *sk = skb->sk;
294 struct inet_sock *inet = inet_sk(sk);
295 struct ip_options *opt = inet->opt;
299 /* Skip all of this if the packet is already routed,
300 * f.e. by something like SCTP.
302 rt = (struct rtable *) skb->dst;
306 /* Make sure we can route this packet. */
307 rt = (struct rtable *)__sk_dst_check(sk, 0);
311 /* Use correct destination address if we have options. */
317 struct flowi fl = { .oif = sk->sk_bound_dev_if,
320 .saddr = inet->saddr,
321 .tos = RT_CONN_FLAGS(sk) } },
322 .proto = sk->sk_protocol,
324 { .sport = inet->sport,
325 .dport = inet->dport } } };
327 /* If this fails, retransmit mechanism of transport layer will
328 * keep trying until route appears or the connection times
331 if (ip_route_output_flow(&rt, &fl, sk, 0))
334 sk_setup_caps(sk, &rt->u.dst);
336 skb->dst = dst_clone(&rt->u.dst);
339 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
342 /* OK, we know where to send it, allocate and build IP header. */
343 iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
344 *((__u16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
345 iph->tot_len = htons(skb->len);
346 if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
347 iph->frag_off = htons(IP_DF);
350 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
351 iph->protocol = sk->sk_protocol;
352 iph->saddr = rt->rt_src;
353 iph->daddr = rt->rt_dst;
355 /* Transport layer set skb->h.foo itself. */
357 if (opt && opt->optlen) {
358 iph->ihl += opt->optlen >> 2;
359 ip_options_build(skb, opt, inet->daddr, rt, 0);
362 ip_select_ident_more(iph, &rt->u.dst, sk,
363 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
365 /* Add an IP checksum. */
368 skb->priority = sk->sk_priority;
370 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
374 IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
376 return -EHOSTUNREACH;
380 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
382 to->pkt_type = from->pkt_type;
383 to->priority = from->priority;
384 to->protocol = from->protocol;
385 dst_release(to->dst);
386 to->dst = dst_clone(from->dst);
389 /* Copy the flags to each fragment. */
390 IPCB(to)->flags = IPCB(from)->flags;
392 #ifdef CONFIG_NET_SCHED
393 to->tc_index = from->tc_index;
395 #ifdef CONFIG_NETFILTER
396 to->nfmark = from->nfmark;
397 /* Connection association is same as pre-frag packet */
398 nf_conntrack_put(to->nfct);
399 to->nfct = from->nfct;
400 nf_conntrack_get(to->nfct);
401 to->nfctinfo = from->nfctinfo;
402 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
403 to->ipvs_property = from->ipvs_property;
405 #ifdef CONFIG_BRIDGE_NETFILTER
406 nf_bridge_put(to->nf_bridge);
407 to->nf_bridge = from->nf_bridge;
408 nf_bridge_get(to->nf_bridge);
411 skb_copy_secmark(to, from);
415 * This IP datagram is too large to be sent in one piece. Break it up into
416 * smaller pieces (each of size equal to IP header plus
417 * a block of the data of the original IP data part) that will yet fit in a
418 * single device frame, and queue such a frame for sending.
421 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
426 struct net_device *dev;
427 struct sk_buff *skb2;
428 unsigned int mtu, hlen, left, len, ll_rs;
430 __be16 not_last_frag;
431 struct rtable *rt = (struct rtable*)skb->dst;
437 * Point into the IP datagram header.
442 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
443 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
444 htonl(dst_mtu(&rt->u.dst)));
450 * Setup starting values.
454 mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */
455 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
457 /* When frag_list is given, use it. First, check its validity:
458 * some transformers could create wrong frag_list or break existing
459 * one, it is not prohibited. In this case fall back to copying.
461 * LATER: this step can be merged to real generation of fragments,
462 * we can switch to copy when see the first bad fragment.
464 if (skb_shinfo(skb)->frag_list) {
465 struct sk_buff *frag;
466 int first_len = skb_pagelen(skb);
468 if (first_len - hlen > mtu ||
469 ((first_len - hlen) & 7) ||
470 (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
474 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
475 /* Correct geometry. */
476 if (frag->len > mtu ||
477 ((frag->len & 7) && frag->next) ||
478 skb_headroom(frag) < hlen)
481 /* Partially cloned skb? */
482 if (skb_shared(frag))
489 frag->destructor = sock_wfree;
490 skb->truesize -= frag->truesize;
494 /* Everything is OK. Generate! */
498 frag = skb_shinfo(skb)->frag_list;
499 skb_shinfo(skb)->frag_list = NULL;
500 skb->data_len = first_len - skb_headlen(skb);
501 skb->len = first_len;
502 iph->tot_len = htons(first_len);
503 iph->frag_off = htons(IP_MF);
507 /* Prepare header of the next frame,
508 * before previous one went down. */
510 frag->ip_summed = CHECKSUM_NONE;
511 frag->h.raw = frag->data;
512 frag->nh.raw = __skb_push(frag, hlen);
513 memcpy(frag->nh.raw, iph, hlen);
515 iph->tot_len = htons(frag->len);
516 ip_copy_metadata(frag, skb);
518 ip_options_fragment(frag);
519 offset += skb->len - hlen;
520 iph->frag_off = htons(offset>>3);
521 if (frag->next != NULL)
522 iph->frag_off |= htons(IP_MF);
523 /* Ready, complete checksum */
530 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
540 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
549 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
554 left = skb->len - hlen; /* Space per frame */
555 ptr = raw + hlen; /* Where to start from */
557 #ifdef CONFIG_BRIDGE_NETFILTER
558 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
559 * we need to make room for the encapsulating header */
560 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, nf_bridge_pad(skb));
561 mtu -= nf_bridge_pad(skb);
563 ll_rs = LL_RESERVED_SPACE(rt->u.dst.dev);
566 * Fragment the datagram.
569 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
570 not_last_frag = iph->frag_off & htons(IP_MF);
573 * Keep copying data until we run out.
578 /* IF: it doesn't fit, use 'mtu' - the data space left */
581 /* IF: we are not sending upto and including the packet end
582 then align the next start on an eight byte boundary */
590 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
591 NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
597 * Set up data on packet
600 ip_copy_metadata(skb2, skb);
601 skb_reserve(skb2, ll_rs);
602 skb_put(skb2, len + hlen);
603 skb2->nh.raw = skb2->data;
604 skb2->h.raw = skb2->data + hlen;
607 * Charge the memory for the fragment to any owner
612 skb_set_owner_w(skb2, skb->sk);
615 * Copy the packet header into the new buffer.
618 memcpy(skb2->nh.raw, skb->data, hlen);
621 * Copy a block of the IP datagram.
623 if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
628 * Fill in the new header fields.
631 iph->frag_off = htons((offset >> 3));
633 /* ANK: dirty, but effective trick. Upgrade options only if
634 * the segment to be fragmented was THE FIRST (otherwise,
635 * options are already fixed) and make it ONCE
636 * on the initial skb, so that all the following fragments
637 * will inherit fixed options.
640 ip_options_fragment(skb);
643 * Added AC : If we are fragmenting a fragment that's not the
644 * last fragment then keep MF on each bit
646 if (left > 0 || not_last_frag)
647 iph->frag_off |= htons(IP_MF);
652 * Put this fragment into the sending queue.
654 iph->tot_len = htons(len + hlen);
662 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
665 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
670 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
674 EXPORT_SYMBOL(ip_fragment);
677 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
679 struct iovec *iov = from;
681 if (skb->ip_summed == CHECKSUM_HW) {
682 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
685 unsigned int csum = 0;
686 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
688 skb->csum = csum_block_add(skb->csum, csum, odd);
693 static inline unsigned int
694 csum_page(struct page *page, int offset, int copy)
699 csum = csum_partial(kaddr + offset, copy, 0);
704 static inline int ip_ufo_append_data(struct sock *sk,
705 int getfrag(void *from, char *to, int offset, int len,
706 int odd, struct sk_buff *skb),
707 void *from, int length, int hh_len, int fragheaderlen,
708 int transhdrlen, int mtu,unsigned int flags)
713 /* There is support for UDP fragmentation offload by network
714 * device, so create one single skb packet containing complete
717 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
718 skb = sock_alloc_send_skb(sk,
719 hh_len + fragheaderlen + transhdrlen + 20,
720 (flags & MSG_DONTWAIT), &err);
725 /* reserve space for Hardware header */
726 skb_reserve(skb, hh_len);
728 /* create space for UDP/IP header */
729 skb_put(skb,fragheaderlen + transhdrlen);
731 /* initialize network header pointer */
732 skb->nh.raw = skb->data;
734 /* initialize protocol header pointer */
735 skb->h.raw = skb->data + fragheaderlen;
737 skb->ip_summed = CHECKSUM_HW;
739 sk->sk_sndmsg_off = 0;
742 err = skb_append_datato_frags(sk,skb, getfrag, from,
743 (length - transhdrlen));
745 /* specify the length of each IP datagram fragment*/
746 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
747 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
748 __skb_queue_tail(&sk->sk_write_queue, skb);
752 /* There is not enough support do UFO ,
753 * so follow normal path
760 * ip_append_data() and ip_append_page() can make one large IP datagram
761 * from many pieces of data. Each pieces will be holded on the socket
762 * until ip_push_pending_frames() is called. Each piece can be a page
765 * Not only UDP, other transport protocols - e.g. raw sockets - can use
766 * this interface potentially.
768 * LATER: length must be adjusted by pad at tail, when it is required.
770 int ip_append_data(struct sock *sk,
771 int getfrag(void *from, char *to, int offset, int len,
772 int odd, struct sk_buff *skb),
773 void *from, int length, int transhdrlen,
774 struct ipcm_cookie *ipc, struct rtable *rt,
777 struct inet_sock *inet = inet_sk(sk);
780 struct ip_options *opt = NULL;
787 unsigned int maxfraglen, fragheaderlen;
788 int csummode = CHECKSUM_NONE;
793 if (skb_queue_empty(&sk->sk_write_queue)) {
799 if (inet->cork.opt == NULL) {
800 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
801 if (unlikely(inet->cork.opt == NULL))
804 memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
805 inet->cork.flags |= IPCORK_OPT;
806 inet->cork.addr = ipc->addr;
808 dst_hold(&rt->u.dst);
809 inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
811 inet->cork.length = 0;
812 sk->sk_sndmsg_page = NULL;
813 sk->sk_sndmsg_off = 0;
814 if ((exthdrlen = rt->u.dst.header_len) != 0) {
816 transhdrlen += exthdrlen;
820 if (inet->cork.flags & IPCORK_OPT)
821 opt = inet->cork.opt;
825 mtu = inet->cork.fragsize;
827 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
829 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
830 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
832 if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
833 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
838 * transhdrlen > 0 means that this is the first fragment and we wish
839 * it won't be fragmented in the future.
842 length + fragheaderlen <= mtu &&
843 rt->u.dst.dev->features & NETIF_F_ALL_CSUM &&
845 csummode = CHECKSUM_HW;
847 inet->cork.length += length;
848 if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
849 (rt->u.dst.dev->features & NETIF_F_UFO)) {
851 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
852 fragheaderlen, transhdrlen, mtu,
859 /* So, what's going on in the loop below?
861 * We use calculated fragment length to generate chained skb,
862 * each of segments is IP fragment ready for sending to network after
863 * adding appropriate IP header.
866 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
870 /* Check if the remaining data fits into current packet. */
871 copy = mtu - skb->len;
873 copy = maxfraglen - skb->len;
876 unsigned int datalen;
877 unsigned int fraglen;
878 unsigned int fraggap;
879 unsigned int alloclen;
880 struct sk_buff *skb_prev;
884 fraggap = skb_prev->len - maxfraglen;
889 * If remaining data exceeds the mtu,
890 * we know we need more fragment(s).
892 datalen = length + fraggap;
893 if (datalen > mtu - fragheaderlen)
894 datalen = maxfraglen - fragheaderlen;
895 fraglen = datalen + fragheaderlen;
897 if ((flags & MSG_MORE) &&
898 !(rt->u.dst.dev->features&NETIF_F_SG))
901 alloclen = datalen + fragheaderlen;
903 /* The last fragment gets additional space at tail.
904 * Note, with MSG_MORE we overallocate on fragments,
905 * because we have no idea what fragment will be
908 if (datalen == length + fraggap)
909 alloclen += rt->u.dst.trailer_len;
912 skb = sock_alloc_send_skb(sk,
913 alloclen + hh_len + 15,
914 (flags & MSG_DONTWAIT), &err);
917 if (atomic_read(&sk->sk_wmem_alloc) <=
919 skb = sock_wmalloc(sk,
920 alloclen + hh_len + 15, 1,
922 if (unlikely(skb == NULL))
929 * Fill in the control structures
931 skb->ip_summed = csummode;
933 skb_reserve(skb, hh_len);
936 * Find where to start putting bytes.
938 data = skb_put(skb, fraglen);
939 skb->nh.raw = data + exthdrlen;
940 data += fragheaderlen;
941 skb->h.raw = data + exthdrlen;
944 skb->csum = skb_copy_and_csum_bits(
945 skb_prev, maxfraglen,
946 data + transhdrlen, fraggap, 0);
947 skb_prev->csum = csum_sub(skb_prev->csum,
950 skb_trim(skb_prev, maxfraglen);
953 copy = datalen - transhdrlen - fraggap;
954 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
961 length -= datalen - fraggap;
964 csummode = CHECKSUM_NONE;
967 * Put the packet on the pending queue.
969 __skb_queue_tail(&sk->sk_write_queue, skb);
976 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
980 if (getfrag(from, skb_put(skb, copy),
981 offset, copy, off, skb) < 0) {
982 __skb_trim(skb, off);
987 int i = skb_shinfo(skb)->nr_frags;
988 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
989 struct page *page = sk->sk_sndmsg_page;
990 int off = sk->sk_sndmsg_off;
993 if (page && (left = PAGE_SIZE - off) > 0) {
996 if (page != frag->page) {
997 if (i == MAX_SKB_FRAGS) {
1002 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1003 frag = &skb_shinfo(skb)->frags[i];
1005 } else if (i < MAX_SKB_FRAGS) {
1006 if (copy > PAGE_SIZE)
1008 page = alloc_pages(sk->sk_allocation, 0);
1013 sk->sk_sndmsg_page = page;
1014 sk->sk_sndmsg_off = 0;
1016 skb_fill_page_desc(skb, i, page, 0, 0);
1017 frag = &skb_shinfo(skb)->frags[i];
1018 skb->truesize += PAGE_SIZE;
1019 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1024 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1028 sk->sk_sndmsg_off += copy;
1031 skb->data_len += copy;
1040 inet->cork.length -= length;
1041 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1045 ssize_t ip_append_page(struct sock *sk, struct page *page,
1046 int offset, size_t size, int flags)
1048 struct inet_sock *inet = inet_sk(sk);
1049 struct sk_buff *skb;
1051 struct ip_options *opt = NULL;
1056 unsigned int maxfraglen, fragheaderlen, fraggap;
1061 if (flags&MSG_PROBE)
1064 if (skb_queue_empty(&sk->sk_write_queue))
1068 if (inet->cork.flags & IPCORK_OPT)
1069 opt = inet->cork.opt;
1071 if (!(rt->u.dst.dev->features&NETIF_F_SG))
1074 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1075 mtu = inet->cork.fragsize;
1077 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1078 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1080 if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1081 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1085 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1088 inet->cork.length += size;
1089 if ((sk->sk_protocol == IPPROTO_UDP) &&
1090 (rt->u.dst.dev->features & NETIF_F_UFO)) {
1091 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1092 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1099 if (skb_is_gso(skb))
1103 /* Check if the remaining data fits into current packet. */
1104 len = mtu - skb->len;
1106 len = maxfraglen - skb->len;
1109 struct sk_buff *skb_prev;
1115 fraggap = skb_prev->len - maxfraglen;
1117 alloclen = fragheaderlen + hh_len + fraggap + 15;
1118 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1119 if (unlikely(!skb)) {
1125 * Fill in the control structures
1127 skb->ip_summed = CHECKSUM_NONE;
1129 skb_reserve(skb, hh_len);
1132 * Find where to start putting bytes.
1134 data = skb_put(skb, fragheaderlen + fraggap);
1135 skb->nh.iph = iph = (struct iphdr *)data;
1136 data += fragheaderlen;
1140 skb->csum = skb_copy_and_csum_bits(
1141 skb_prev, maxfraglen,
1143 skb_prev->csum = csum_sub(skb_prev->csum,
1145 skb_trim(skb_prev, maxfraglen);
1149 * Put the packet on the pending queue.
1151 __skb_queue_tail(&sk->sk_write_queue, skb);
1155 i = skb_shinfo(skb)->nr_frags;
1158 if (skb_can_coalesce(skb, i, page, offset)) {
1159 skb_shinfo(skb)->frags[i-1].size += len;
1160 } else if (i < MAX_SKB_FRAGS) {
1162 skb_fill_page_desc(skb, i, page, offset, len);
1168 if (skb->ip_summed == CHECKSUM_NONE) {
1170 csum = csum_page(page, offset, len);
1171 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1175 skb->data_len += len;
1182 inet->cork.length -= size;
1183 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1188 * Combined all pending IP fragments on the socket as one IP datagram
1189 * and push them out.
1191 int ip_push_pending_frames(struct sock *sk)
1193 struct sk_buff *skb, *tmp_skb;
1194 struct sk_buff **tail_skb;
1195 struct inet_sock *inet = inet_sk(sk);
1196 struct ip_options *opt = NULL;
1197 struct rtable *rt = inet->cork.rt;
1203 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1205 tail_skb = &(skb_shinfo(skb)->frag_list);
1207 /* move skb->data to ip header from ext header */
1208 if (skb->data < skb->nh.raw)
1209 __skb_pull(skb, skb->nh.raw - skb->data);
1210 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1211 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1212 *tail_skb = tmp_skb;
1213 tail_skb = &(tmp_skb->next);
1214 skb->len += tmp_skb->len;
1215 skb->data_len += tmp_skb->len;
1216 skb->truesize += tmp_skb->truesize;
1217 __sock_put(tmp_skb->sk);
1218 tmp_skb->destructor = NULL;
1222 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1223 * to fragment the frame generated here. No matter, what transforms
1224 * how transforms change size of the packet, it will come out.
1226 if (inet->pmtudisc != IP_PMTUDISC_DO)
1229 /* DF bit is set when we want to see DF on outgoing frames.
1230 * If local_df is set too, we still allow to fragment this frame
1232 if (inet->pmtudisc == IP_PMTUDISC_DO ||
1233 (skb->len <= dst_mtu(&rt->u.dst) &&
1234 ip_dont_fragment(sk, &rt->u.dst)))
1237 if (inet->cork.flags & IPCORK_OPT)
1238 opt = inet->cork.opt;
1240 if (rt->rt_type == RTN_MULTICAST)
1243 ttl = ip_select_ttl(inet, &rt->u.dst);
1245 iph = (struct iphdr *)skb->data;
1249 iph->ihl += opt->optlen>>2;
1250 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1252 iph->tos = inet->tos;
1253 iph->tot_len = htons(skb->len);
1255 ip_select_ident(iph, &rt->u.dst, sk);
1257 iph->protocol = sk->sk_protocol;
1258 iph->saddr = rt->rt_src;
1259 iph->daddr = rt->rt_dst;
1262 skb->priority = sk->sk_priority;
1263 skb->dst = dst_clone(&rt->u.dst);
1265 /* Netfilter gets whole the not fragmented skb. */
1266 err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
1267 skb->dst->dev, dst_output);
1270 err = inet->recverr ? net_xmit_errno(err) : 0;
1276 inet->cork.flags &= ~IPCORK_OPT;
1277 kfree(inet->cork.opt);
1278 inet->cork.opt = NULL;
1279 if (inet->cork.rt) {
1280 ip_rt_put(inet->cork.rt);
1281 inet->cork.rt = NULL;
1286 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1291 * Throw away all pending data on the socket.
1293 void ip_flush_pending_frames(struct sock *sk)
1295 struct inet_sock *inet = inet_sk(sk);
1296 struct sk_buff *skb;
1298 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1301 inet->cork.flags &= ~IPCORK_OPT;
1302 kfree(inet->cork.opt);
1303 inet->cork.opt = NULL;
1304 if (inet->cork.rt) {
1305 ip_rt_put(inet->cork.rt);
1306 inet->cork.rt = NULL;
1312 * Fetch data from kernel space and fill in checksum if needed.
1314 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1315 int len, int odd, struct sk_buff *skb)
1319 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1320 skb->csum = csum_block_add(skb->csum, csum, odd);
1325 * Generic function to send a packet as reply to another packet.
1326 * Used to send TCP resets so far. ICMP should use this function too.
1328 * Should run single threaded per socket because it uses the sock
1329 * structure to pass arguments.
1331 * LATER: switch from ip_build_xmit to ip_append_*
1333 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1336 struct inet_sock *inet = inet_sk(sk);
1338 struct ip_options opt;
1341 struct ipcm_cookie ipc;
1343 struct rtable *rt = (struct rtable*)skb->dst;
1345 if (ip_options_echo(&replyopts.opt, skb))
1348 daddr = ipc.addr = rt->rt_src;
1351 if (replyopts.opt.optlen) {
1352 ipc.opt = &replyopts.opt;
1355 daddr = replyopts.opt.faddr;
1359 struct flowi fl = { .nl_u = { .ip4_u =
1361 .saddr = rt->rt_spec_dst,
1362 .tos = RT_TOS(skb->nh.iph->tos) } },
1363 /* Not quite clean, but right. */
1365 { .sport = skb->h.th->dest,
1366 .dport = skb->h.th->source } },
1367 .proto = sk->sk_protocol };
1368 if (ip_route_output_key(&rt, &fl))
1372 /* And let IP do all the hard work.
1374 This chunk is not reenterable, hence spinlock.
1375 Note that it uses the fact, that this function is called
1376 with locally disabled BH and that sk cannot be already spinlocked.
1379 inet->tos = skb->nh.iph->tos;
1380 sk->sk_priority = skb->priority;
1381 sk->sk_protocol = skb->nh.iph->protocol;
1382 ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1383 &ipc, rt, MSG_DONTWAIT);
1384 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1385 if (arg->csumoffset >= 0)
1386 *((u16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
1387 skb->ip_summed = CHECKSUM_NONE;
1388 ip_push_pending_frames(sk);
1396 void __init ip_init(void)
1401 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1402 igmp_mc_proc_init();
1406 EXPORT_SYMBOL(ip_generic_getfrag);
1407 EXPORT_SYMBOL(ip_queue_xmit);
1408 EXPORT_SYMBOL(ip_send_check);