Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[linux-2.6] / net / ipv4 / ip_output.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              The Internet Protocol (IP) output module.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Donald Becker, <becker@super.org>
11  *              Alan Cox, <Alan.Cox@linux.org>
12  *              Richard Underwood
13  *              Stefan Becker, <stefanb@yello.ping.de>
14  *              Jorge Cwik, <jorge@laser.satlink.net>
15  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16  *              Hirokazu Takahashi, <taka@valinux.co.jp>
17  *
18  *      See ip_input.c for original log
19  *
20  *      Fixes:
21  *              Alan Cox        :       Missing nonblock feature in ip_build_xmit.
22  *              Mike Kilburn    :       htons() missing in ip_build_xmit.
23  *              Bradford Johnson:       Fix faulty handling of some frames when
24  *                                      no route is found.
25  *              Alexander Demenshin:    Missing sk/skb free in ip_queue_xmit
26  *                                      (in case if packet not accepted by
27  *                                      output firewall rules)
28  *              Mike McLagan    :       Routing by source
29  *              Alexey Kuznetsov:       use new route cache
30  *              Andi Kleen:             Fix broken PMTU recovery and remove
31  *                                      some redundant tests.
32  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
33  *              Andi Kleen      :       Replace ip_reply with ip_send_reply.
34  *              Andi Kleen      :       Split fast and slow ip_build_xmit path
35  *                                      for decreased register pressure on x86
36  *                                      and more readibility.
37  *              Marc Boucher    :       When call_out_firewall returns FW_QUEUE,
38  *                                      silently drop skb instead of failing with -EPERM.
39  *              Detlev Wengorz  :       Copy protocol for fragments.
40  *              Hirokazu Takahashi:     HW checksumming for outgoing UDP
41  *                                      datagrams.
42  *              Hirokazu Takahashi:     sendfile() on UDP works now.
43  */
44
45 #include <asm/uaccess.h>
46 #include <asm/system.h>
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/kernel.h>
50 #include <linux/mm.h>
51 #include <linux/string.h>
52 #include <linux/errno.h>
53 #include <linux/highmem.h>
54
55 #include <linux/socket.h>
56 #include <linux/sockios.h>
57 #include <linux/in.h>
58 #include <linux/inet.h>
59 #include <linux/netdevice.h>
60 #include <linux/etherdevice.h>
61 #include <linux/proc_fs.h>
62 #include <linux/stat.h>
63 #include <linux/init.h>
64
65 #include <net/snmp.h>
66 #include <net/ip.h>
67 #include <net/protocol.h>
68 #include <net/route.h>
69 #include <net/xfrm.h>
70 #include <linux/skbuff.h>
71 #include <net/sock.h>
72 #include <net/arp.h>
73 #include <net/icmp.h>
74 #include <net/checksum.h>
75 #include <net/inetpeer.h>
76 #include <linux/igmp.h>
77 #include <linux/netfilter_ipv4.h>
78 #include <linux/netfilter_bridge.h>
79 #include <linux/mroute.h>
80 #include <linux/netlink.h>
81 #include <linux/tcp.h>
82
83 int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
84
85 /* Generate a checksum for an outgoing IP datagram. */
86 __inline__ void ip_send_check(struct iphdr *iph)
87 {
88         iph->check = 0;
89         iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
90 }
91
92 int __ip_local_out(struct sk_buff *skb)
93 {
94         struct iphdr *iph = ip_hdr(skb);
95
96         iph->tot_len = htons(skb->len);
97         ip_send_check(iph);
98         return nf_hook(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, skb->dst->dev,
99                        dst_output);
100 }
101
102 int ip_local_out(struct sk_buff *skb)
103 {
104         int err;
105
106         err = __ip_local_out(skb);
107         if (likely(err == 1))
108                 err = dst_output(skb);
109
110         return err;
111 }
112 EXPORT_SYMBOL_GPL(ip_local_out);
113
114 /* dev_loopback_xmit for use with netfilter. */
115 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
116 {
117         skb_reset_mac_header(newskb);
118         __skb_pull(newskb, skb_network_offset(newskb));
119         newskb->pkt_type = PACKET_LOOPBACK;
120         newskb->ip_summed = CHECKSUM_UNNECESSARY;
121         WARN_ON(!newskb->dst);
122         netif_rx(newskb);
123         return 0;
124 }
125
126 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
127 {
128         int ttl = inet->uc_ttl;
129
130         if (ttl < 0)
131                 ttl = dst_metric(dst, RTAX_HOPLIMIT);
132         return ttl;
133 }
134
135 /*
136  *              Add an ip header to a skbuff and send it out.
137  *
138  */
139 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
140                           __be32 saddr, __be32 daddr, struct ip_options *opt)
141 {
142         struct inet_sock *inet = inet_sk(sk);
143         struct rtable *rt = skb->rtable;
144         struct iphdr *iph;
145
146         /* Build the IP header. */
147         skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
148         skb_reset_network_header(skb);
149         iph = ip_hdr(skb);
150         iph->version  = 4;
151         iph->ihl      = 5;
152         iph->tos      = inet->tos;
153         if (ip_dont_fragment(sk, &rt->u.dst))
154                 iph->frag_off = htons(IP_DF);
155         else
156                 iph->frag_off = 0;
157         iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
158         iph->daddr    = rt->rt_dst;
159         iph->saddr    = rt->rt_src;
160         iph->protocol = sk->sk_protocol;
161         ip_select_ident(iph, &rt->u.dst, sk);
162
163         if (opt && opt->optlen) {
164                 iph->ihl += opt->optlen>>2;
165                 ip_options_build(skb, opt, daddr, rt, 0);
166         }
167
168         skb->priority = sk->sk_priority;
169         skb->mark = sk->sk_mark;
170
171         /* Send it out. */
172         return ip_local_out(skb);
173 }
174
175 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
176
177 static inline int ip_finish_output2(struct sk_buff *skb)
178 {
179         struct dst_entry *dst = skb->dst;
180         struct rtable *rt = (struct rtable *)dst;
181         struct net_device *dev = dst->dev;
182         unsigned int hh_len = LL_RESERVED_SPACE(dev);
183
184         if (rt->rt_type == RTN_MULTICAST)
185                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTMCASTPKTS);
186         else if (rt->rt_type == RTN_BROADCAST)
187                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTBCASTPKTS);
188
189         /* Be paranoid, rather than too clever. */
190         if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
191                 struct sk_buff *skb2;
192
193                 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
194                 if (skb2 == NULL) {
195                         kfree_skb(skb);
196                         return -ENOMEM;
197                 }
198                 if (skb->sk)
199                         skb_set_owner_w(skb2, skb->sk);
200                 kfree_skb(skb);
201                 skb = skb2;
202         }
203
204         if (dst->hh)
205                 return neigh_hh_output(dst->hh, skb);
206         else if (dst->neighbour)
207                 return dst->neighbour->output(skb);
208
209         if (net_ratelimit())
210                 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
211         kfree_skb(skb);
212         return -EINVAL;
213 }
214
215 static inline int ip_skb_dst_mtu(struct sk_buff *skb)
216 {
217         struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
218
219         return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
220                skb->dst->dev->mtu : dst_mtu(skb->dst);
221 }
222
223 static int ip_finish_output(struct sk_buff *skb)
224 {
225 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
226         /* Policy lookup after SNAT yielded a new policy */
227         if (skb->dst->xfrm != NULL) {
228                 IPCB(skb)->flags |= IPSKB_REROUTED;
229                 return dst_output(skb);
230         }
231 #endif
232         if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
233                 return ip_fragment(skb, ip_finish_output2);
234         else
235                 return ip_finish_output2(skb);
236 }
237
238 int ip_mc_output(struct sk_buff *skb)
239 {
240         struct sock *sk = skb->sk;
241         struct rtable *rt = skb->rtable;
242         struct net_device *dev = rt->u.dst.dev;
243
244         /*
245          *      If the indicated interface is up and running, send the packet.
246          */
247         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTREQUESTS);
248
249         skb->dev = dev;
250         skb->protocol = htons(ETH_P_IP);
251
252         /*
253          *      Multicasts are looped back for other local users
254          */
255
256         if (rt->rt_flags&RTCF_MULTICAST) {
257                 if ((!sk || inet_sk(sk)->mc_loop)
258 #ifdef CONFIG_IP_MROUTE
259                 /* Small optimization: do not loopback not local frames,
260                    which returned after forwarding; they will be  dropped
261                    by ip_mr_input in any case.
262                    Note, that local frames are looped back to be delivered
263                    to local recipients.
264
265                    This check is duplicated in ip_mr_input at the moment.
266                  */
267                     && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
268 #endif
269                 ) {
270                         struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
271                         if (newskb)
272                                 NF_HOOK(PF_INET, NF_INET_POST_ROUTING, newskb,
273                                         NULL, newskb->dev,
274                                         ip_dev_loopback_xmit);
275                 }
276
277                 /* Multicasts with ttl 0 must not go beyond the host */
278
279                 if (ip_hdr(skb)->ttl == 0) {
280                         kfree_skb(skb);
281                         return 0;
282                 }
283         }
284
285         if (rt->rt_flags&RTCF_BROADCAST) {
286                 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
287                 if (newskb)
288                         NF_HOOK(PF_INET, NF_INET_POST_ROUTING, newskb, NULL,
289                                 newskb->dev, ip_dev_loopback_xmit);
290         }
291
292         return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb, NULL, skb->dev,
293                             ip_finish_output,
294                             !(IPCB(skb)->flags & IPSKB_REROUTED));
295 }
296
297 int ip_output(struct sk_buff *skb)
298 {
299         struct net_device *dev = skb->dst->dev;
300
301         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTREQUESTS);
302
303         skb->dev = dev;
304         skb->protocol = htons(ETH_P_IP);
305
306         return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb, NULL, dev,
307                             ip_finish_output,
308                             !(IPCB(skb)->flags & IPSKB_REROUTED));
309 }
310
311 int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
312 {
313         struct sock *sk = skb->sk;
314         struct inet_sock *inet = inet_sk(sk);
315         struct ip_options *opt = inet->opt;
316         struct rtable *rt;
317         struct iphdr *iph;
318
319         /* Skip all of this if the packet is already routed,
320          * f.e. by something like SCTP.
321          */
322         rt = skb->rtable;
323         if (rt != NULL)
324                 goto packet_routed;
325
326         /* Make sure we can route this packet. */
327         rt = (struct rtable *)__sk_dst_check(sk, 0);
328         if (rt == NULL) {
329                 __be32 daddr;
330
331                 /* Use correct destination address if we have options. */
332                 daddr = inet->daddr;
333                 if(opt && opt->srr)
334                         daddr = opt->faddr;
335
336                 {
337                         struct flowi fl = { .oif = sk->sk_bound_dev_if,
338                                             .nl_u = { .ip4_u =
339                                                       { .daddr = daddr,
340                                                         .saddr = inet->saddr,
341                                                         .tos = RT_CONN_FLAGS(sk) } },
342                                             .proto = sk->sk_protocol,
343                                             .uli_u = { .ports =
344                                                        { .sport = inet->sport,
345                                                          .dport = inet->dport } } };
346
347                         /* If this fails, retransmit mechanism of transport layer will
348                          * keep trying until route appears or the connection times
349                          * itself out.
350                          */
351                         security_sk_classify_flow(sk, &fl);
352                         if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
353                                 goto no_route;
354                 }
355                 sk_setup_caps(sk, &rt->u.dst);
356         }
357         skb->dst = dst_clone(&rt->u.dst);
358
359 packet_routed:
360         if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
361                 goto no_route;
362
363         /* OK, we know where to send it, allocate and build IP header. */
364         skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
365         skb_reset_network_header(skb);
366         iph = ip_hdr(skb);
367         *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
368         if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
369                 iph->frag_off = htons(IP_DF);
370         else
371                 iph->frag_off = 0;
372         iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
373         iph->protocol = sk->sk_protocol;
374         iph->saddr    = rt->rt_src;
375         iph->daddr    = rt->rt_dst;
376         /* Transport layer set skb->h.foo itself. */
377
378         if (opt && opt->optlen) {
379                 iph->ihl += opt->optlen >> 2;
380                 ip_options_build(skb, opt, inet->daddr, rt, 0);
381         }
382
383         ip_select_ident_more(iph, &rt->u.dst, sk,
384                              (skb_shinfo(skb)->gso_segs ?: 1) - 1);
385
386         skb->priority = sk->sk_priority;
387         skb->mark = sk->sk_mark;
388
389         return ip_local_out(skb);
390
391 no_route:
392         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
393         kfree_skb(skb);
394         return -EHOSTUNREACH;
395 }
396
397
398 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
399 {
400         to->pkt_type = from->pkt_type;
401         to->priority = from->priority;
402         to->protocol = from->protocol;
403         dst_release(to->dst);
404         to->dst = dst_clone(from->dst);
405         to->dev = from->dev;
406         to->mark = from->mark;
407
408         /* Copy the flags to each fragment. */
409         IPCB(to)->flags = IPCB(from)->flags;
410
411 #ifdef CONFIG_NET_SCHED
412         to->tc_index = from->tc_index;
413 #endif
414         nf_copy(to, from);
415 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
416     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
417         to->nf_trace = from->nf_trace;
418 #endif
419 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
420         to->ipvs_property = from->ipvs_property;
421 #endif
422         skb_copy_secmark(to, from);
423 }
424
425 /*
426  *      This IP datagram is too large to be sent in one piece.  Break it up into
427  *      smaller pieces (each of size equal to IP header plus
428  *      a block of the data of the original IP data part) that will yet fit in a
429  *      single device frame, and queue such a frame for sending.
430  */
431
432 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
433 {
434         struct iphdr *iph;
435         int raw = 0;
436         int ptr;
437         struct net_device *dev;
438         struct sk_buff *skb2;
439         unsigned int mtu, hlen, left, len, ll_rs, pad;
440         int offset;
441         __be16 not_last_frag;
442         struct rtable *rt = skb->rtable;
443         int err = 0;
444
445         dev = rt->u.dst.dev;
446
447         /*
448          *      Point into the IP datagram header.
449          */
450
451         iph = ip_hdr(skb);
452
453         if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
454                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
455                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
456                           htonl(ip_skb_dst_mtu(skb)));
457                 kfree_skb(skb);
458                 return -EMSGSIZE;
459         }
460
461         /*
462          *      Setup starting values.
463          */
464
465         hlen = iph->ihl * 4;
466         mtu = dst_mtu(&rt->u.dst) - hlen;       /* Size of data space */
467         IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
468
469         /* When frag_list is given, use it. First, check its validity:
470          * some transformers could create wrong frag_list or break existing
471          * one, it is not prohibited. In this case fall back to copying.
472          *
473          * LATER: this step can be merged to real generation of fragments,
474          * we can switch to copy when see the first bad fragment.
475          */
476         if (skb_shinfo(skb)->frag_list) {
477                 struct sk_buff *frag;
478                 int first_len = skb_pagelen(skb);
479                 int truesizes = 0;
480
481                 if (first_len - hlen > mtu ||
482                     ((first_len - hlen) & 7) ||
483                     (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
484                     skb_cloned(skb))
485                         goto slow_path;
486
487                 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
488                         /* Correct geometry. */
489                         if (frag->len > mtu ||
490                             ((frag->len & 7) && frag->next) ||
491                             skb_headroom(frag) < hlen)
492                             goto slow_path;
493
494                         /* Partially cloned skb? */
495                         if (skb_shared(frag))
496                                 goto slow_path;
497
498                         BUG_ON(frag->sk);
499                         if (skb->sk) {
500                                 sock_hold(skb->sk);
501                                 frag->sk = skb->sk;
502                                 frag->destructor = sock_wfree;
503                                 truesizes += frag->truesize;
504                         }
505                 }
506
507                 /* Everything is OK. Generate! */
508
509                 err = 0;
510                 offset = 0;
511                 frag = skb_shinfo(skb)->frag_list;
512                 skb_shinfo(skb)->frag_list = NULL;
513                 skb->data_len = first_len - skb_headlen(skb);
514                 skb->truesize -= truesizes;
515                 skb->len = first_len;
516                 iph->tot_len = htons(first_len);
517                 iph->frag_off = htons(IP_MF);
518                 ip_send_check(iph);
519
520                 for (;;) {
521                         /* Prepare header of the next frame,
522                          * before previous one went down. */
523                         if (frag) {
524                                 frag->ip_summed = CHECKSUM_NONE;
525                                 skb_reset_transport_header(frag);
526                                 __skb_push(frag, hlen);
527                                 skb_reset_network_header(frag);
528                                 memcpy(skb_network_header(frag), iph, hlen);
529                                 iph = ip_hdr(frag);
530                                 iph->tot_len = htons(frag->len);
531                                 ip_copy_metadata(frag, skb);
532                                 if (offset == 0)
533                                         ip_options_fragment(frag);
534                                 offset += skb->len - hlen;
535                                 iph->frag_off = htons(offset>>3);
536                                 if (frag->next != NULL)
537                                         iph->frag_off |= htons(IP_MF);
538                                 /* Ready, complete checksum */
539                                 ip_send_check(iph);
540                         }
541
542                         err = output(skb);
543
544                         if (!err)
545                                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
546                         if (err || !frag)
547                                 break;
548
549                         skb = frag;
550                         frag = skb->next;
551                         skb->next = NULL;
552                 }
553
554                 if (err == 0) {
555                         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
556                         return 0;
557                 }
558
559                 while (frag) {
560                         skb = frag->next;
561                         kfree_skb(frag);
562                         frag = skb;
563                 }
564                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
565                 return err;
566         }
567
568 slow_path:
569         left = skb->len - hlen;         /* Space per frame */
570         ptr = raw + hlen;               /* Where to start from */
571
572         /* for bridged IP traffic encapsulated inside f.e. a vlan header,
573          * we need to make room for the encapsulating header
574          */
575         pad = nf_bridge_pad(skb);
576         ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, pad);
577         mtu -= pad;
578
579         /*
580          *      Fragment the datagram.
581          */
582
583         offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
584         not_last_frag = iph->frag_off & htons(IP_MF);
585
586         /*
587          *      Keep copying data until we run out.
588          */
589
590         while (left > 0) {
591                 len = left;
592                 /* IF: it doesn't fit, use 'mtu' - the data space left */
593                 if (len > mtu)
594                         len = mtu;
595                 /* IF: we are not sending upto and including the packet end
596                    then align the next start on an eight byte boundary */
597                 if (len < left) {
598                         len &= ~7;
599                 }
600                 /*
601                  *      Allocate buffer.
602                  */
603
604                 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
605                         NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
606                         err = -ENOMEM;
607                         goto fail;
608                 }
609
610                 /*
611                  *      Set up data on packet
612                  */
613
614                 ip_copy_metadata(skb2, skb);
615                 skb_reserve(skb2, ll_rs);
616                 skb_put(skb2, len + hlen);
617                 skb_reset_network_header(skb2);
618                 skb2->transport_header = skb2->network_header + hlen;
619
620                 /*
621                  *      Charge the memory for the fragment to any owner
622                  *      it might possess
623                  */
624
625                 if (skb->sk)
626                         skb_set_owner_w(skb2, skb->sk);
627
628                 /*
629                  *      Copy the packet header into the new buffer.
630                  */
631
632                 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
633
634                 /*
635                  *      Copy a block of the IP datagram.
636                  */
637                 if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
638                         BUG();
639                 left -= len;
640
641                 /*
642                  *      Fill in the new header fields.
643                  */
644                 iph = ip_hdr(skb2);
645                 iph->frag_off = htons((offset >> 3));
646
647                 /* ANK: dirty, but effective trick. Upgrade options only if
648                  * the segment to be fragmented was THE FIRST (otherwise,
649                  * options are already fixed) and make it ONCE
650                  * on the initial skb, so that all the following fragments
651                  * will inherit fixed options.
652                  */
653                 if (offset == 0)
654                         ip_options_fragment(skb);
655
656                 /*
657                  *      Added AC : If we are fragmenting a fragment that's not the
658                  *                 last fragment then keep MF on each bit
659                  */
660                 if (left > 0 || not_last_frag)
661                         iph->frag_off |= htons(IP_MF);
662                 ptr += len;
663                 offset += len;
664
665                 /*
666                  *      Put this fragment into the sending queue.
667                  */
668                 iph->tot_len = htons(len + hlen);
669
670                 ip_send_check(iph);
671
672                 err = output(skb2);
673                 if (err)
674                         goto fail;
675
676                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
677         }
678         kfree_skb(skb);
679         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
680         return err;
681
682 fail:
683         kfree_skb(skb);
684         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
685         return err;
686 }
687
688 EXPORT_SYMBOL(ip_fragment);
689
690 int
691 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
692 {
693         struct iovec *iov = from;
694
695         if (skb->ip_summed == CHECKSUM_PARTIAL) {
696                 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
697                         return -EFAULT;
698         } else {
699                 __wsum csum = 0;
700                 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
701                         return -EFAULT;
702                 skb->csum = csum_block_add(skb->csum, csum, odd);
703         }
704         return 0;
705 }
706
707 static inline __wsum
708 csum_page(struct page *page, int offset, int copy)
709 {
710         char *kaddr;
711         __wsum csum;
712         kaddr = kmap(page);
713         csum = csum_partial(kaddr + offset, copy, 0);
714         kunmap(page);
715         return csum;
716 }
717
718 static inline int ip_ufo_append_data(struct sock *sk,
719                         int getfrag(void *from, char *to, int offset, int len,
720                                int odd, struct sk_buff *skb),
721                         void *from, int length, int hh_len, int fragheaderlen,
722                         int transhdrlen, int mtu,unsigned int flags)
723 {
724         struct sk_buff *skb;
725         int err;
726
727         /* There is support for UDP fragmentation offload by network
728          * device, so create one single skb packet containing complete
729          * udp datagram
730          */
731         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
732                 skb = sock_alloc_send_skb(sk,
733                         hh_len + fragheaderlen + transhdrlen + 20,
734                         (flags & MSG_DONTWAIT), &err);
735
736                 if (skb == NULL)
737                         return err;
738
739                 /* reserve space for Hardware header */
740                 skb_reserve(skb, hh_len);
741
742                 /* create space for UDP/IP header */
743                 skb_put(skb,fragheaderlen + transhdrlen);
744
745                 /* initialize network header pointer */
746                 skb_reset_network_header(skb);
747
748                 /* initialize protocol header pointer */
749                 skb->transport_header = skb->network_header + fragheaderlen;
750
751                 skb->ip_summed = CHECKSUM_PARTIAL;
752                 skb->csum = 0;
753                 sk->sk_sndmsg_off = 0;
754
755                 /* specify the length of each IP datagram fragment */
756                 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
757                 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
758                 __skb_queue_tail(&sk->sk_write_queue, skb);
759         }
760
761         return skb_append_datato_frags(sk, skb, getfrag, from,
762                                        (length - transhdrlen));
763 }
764
765 /*
766  *      ip_append_data() and ip_append_page() can make one large IP datagram
767  *      from many pieces of data. Each pieces will be holded on the socket
768  *      until ip_push_pending_frames() is called. Each piece can be a page
769  *      or non-page data.
770  *
771  *      Not only UDP, other transport protocols - e.g. raw sockets - can use
772  *      this interface potentially.
773  *
774  *      LATER: length must be adjusted by pad at tail, when it is required.
775  */
776 int ip_append_data(struct sock *sk,
777                    int getfrag(void *from, char *to, int offset, int len,
778                                int odd, struct sk_buff *skb),
779                    void *from, int length, int transhdrlen,
780                    struct ipcm_cookie *ipc, struct rtable *rt,
781                    unsigned int flags)
782 {
783         struct inet_sock *inet = inet_sk(sk);
784         struct sk_buff *skb;
785
786         struct ip_options *opt = NULL;
787         int hh_len;
788         int exthdrlen;
789         int mtu;
790         int copy;
791         int err;
792         int offset = 0;
793         unsigned int maxfraglen, fragheaderlen;
794         int csummode = CHECKSUM_NONE;
795
796         if (flags&MSG_PROBE)
797                 return 0;
798
799         if (skb_queue_empty(&sk->sk_write_queue)) {
800                 /*
801                  * setup for corking.
802                  */
803                 opt = ipc->opt;
804                 if (opt) {
805                         if (inet->cork.opt == NULL) {
806                                 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
807                                 if (unlikely(inet->cork.opt == NULL))
808                                         return -ENOBUFS;
809                         }
810                         memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
811                         inet->cork.flags |= IPCORK_OPT;
812                         inet->cork.addr = ipc->addr;
813                 }
814                 dst_hold(&rt->u.dst);
815                 inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
816                                             rt->u.dst.dev->mtu :
817                                             dst_mtu(rt->u.dst.path);
818                 inet->cork.dst = &rt->u.dst;
819                 inet->cork.length = 0;
820                 sk->sk_sndmsg_page = NULL;
821                 sk->sk_sndmsg_off = 0;
822                 if ((exthdrlen = rt->u.dst.header_len) != 0) {
823                         length += exthdrlen;
824                         transhdrlen += exthdrlen;
825                 }
826         } else {
827                 rt = (struct rtable *)inet->cork.dst;
828                 if (inet->cork.flags & IPCORK_OPT)
829                         opt = inet->cork.opt;
830
831                 transhdrlen = 0;
832                 exthdrlen = 0;
833                 mtu = inet->cork.fragsize;
834         }
835         hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
836
837         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
838         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
839
840         if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
841                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
842                 return -EMSGSIZE;
843         }
844
845         /*
846          * transhdrlen > 0 means that this is the first fragment and we wish
847          * it won't be fragmented in the future.
848          */
849         if (transhdrlen &&
850             length + fragheaderlen <= mtu &&
851             rt->u.dst.dev->features & NETIF_F_V4_CSUM &&
852             !exthdrlen)
853                 csummode = CHECKSUM_PARTIAL;
854
855         inet->cork.length += length;
856         if (((length> mtu) || !skb_queue_empty(&sk->sk_write_queue)) &&
857             (sk->sk_protocol == IPPROTO_UDP) &&
858             (rt->u.dst.dev->features & NETIF_F_UFO)) {
859                 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
860                                          fragheaderlen, transhdrlen, mtu,
861                                          flags);
862                 if (err)
863                         goto error;
864                 return 0;
865         }
866
867         /* So, what's going on in the loop below?
868          *
869          * We use calculated fragment length to generate chained skb,
870          * each of segments is IP fragment ready for sending to network after
871          * adding appropriate IP header.
872          */
873
874         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
875                 goto alloc_new_skb;
876
877         while (length > 0) {
878                 /* Check if the remaining data fits into current packet. */
879                 copy = mtu - skb->len;
880                 if (copy < length)
881                         copy = maxfraglen - skb->len;
882                 if (copy <= 0) {
883                         char *data;
884                         unsigned int datalen;
885                         unsigned int fraglen;
886                         unsigned int fraggap;
887                         unsigned int alloclen;
888                         struct sk_buff *skb_prev;
889 alloc_new_skb:
890                         skb_prev = skb;
891                         if (skb_prev)
892                                 fraggap = skb_prev->len - maxfraglen;
893                         else
894                                 fraggap = 0;
895
896                         /*
897                          * If remaining data exceeds the mtu,
898                          * we know we need more fragment(s).
899                          */
900                         datalen = length + fraggap;
901                         if (datalen > mtu - fragheaderlen)
902                                 datalen = maxfraglen - fragheaderlen;
903                         fraglen = datalen + fragheaderlen;
904
905                         if ((flags & MSG_MORE) &&
906                             !(rt->u.dst.dev->features&NETIF_F_SG))
907                                 alloclen = mtu;
908                         else
909                                 alloclen = datalen + fragheaderlen;
910
911                         /* The last fragment gets additional space at tail.
912                          * Note, with MSG_MORE we overallocate on fragments,
913                          * because we have no idea what fragment will be
914                          * the last.
915                          */
916                         if (datalen == length + fraggap)
917                                 alloclen += rt->u.dst.trailer_len;
918
919                         if (transhdrlen) {
920                                 skb = sock_alloc_send_skb(sk,
921                                                 alloclen + hh_len + 15,
922                                                 (flags & MSG_DONTWAIT), &err);
923                         } else {
924                                 skb = NULL;
925                                 if (atomic_read(&sk->sk_wmem_alloc) <=
926                                     2 * sk->sk_sndbuf)
927                                         skb = sock_wmalloc(sk,
928                                                            alloclen + hh_len + 15, 1,
929                                                            sk->sk_allocation);
930                                 if (unlikely(skb == NULL))
931                                         err = -ENOBUFS;
932                         }
933                         if (skb == NULL)
934                                 goto error;
935
936                         /*
937                          *      Fill in the control structures
938                          */
939                         skb->ip_summed = csummode;
940                         skb->csum = 0;
941                         skb_reserve(skb, hh_len);
942
943                         /*
944                          *      Find where to start putting bytes.
945                          */
946                         data = skb_put(skb, fraglen);
947                         skb_set_network_header(skb, exthdrlen);
948                         skb->transport_header = (skb->network_header +
949                                                  fragheaderlen);
950                         data += fragheaderlen;
951
952                         if (fraggap) {
953                                 skb->csum = skb_copy_and_csum_bits(
954                                         skb_prev, maxfraglen,
955                                         data + transhdrlen, fraggap, 0);
956                                 skb_prev->csum = csum_sub(skb_prev->csum,
957                                                           skb->csum);
958                                 data += fraggap;
959                                 pskb_trim_unique(skb_prev, maxfraglen);
960                         }
961
962                         copy = datalen - transhdrlen - fraggap;
963                         if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
964                                 err = -EFAULT;
965                                 kfree_skb(skb);
966                                 goto error;
967                         }
968
969                         offset += copy;
970                         length -= datalen - fraggap;
971                         transhdrlen = 0;
972                         exthdrlen = 0;
973                         csummode = CHECKSUM_NONE;
974
975                         /*
976                          * Put the packet on the pending queue.
977                          */
978                         __skb_queue_tail(&sk->sk_write_queue, skb);
979                         continue;
980                 }
981
982                 if (copy > length)
983                         copy = length;
984
985                 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
986                         unsigned int off;
987
988                         off = skb->len;
989                         if (getfrag(from, skb_put(skb, copy),
990                                         offset, copy, off, skb) < 0) {
991                                 __skb_trim(skb, off);
992                                 err = -EFAULT;
993                                 goto error;
994                         }
995                 } else {
996                         int i = skb_shinfo(skb)->nr_frags;
997                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
998                         struct page *page = sk->sk_sndmsg_page;
999                         int off = sk->sk_sndmsg_off;
1000                         unsigned int left;
1001
1002                         if (page && (left = PAGE_SIZE - off) > 0) {
1003                                 if (copy >= left)
1004                                         copy = left;
1005                                 if (page != frag->page) {
1006                                         if (i == MAX_SKB_FRAGS) {
1007                                                 err = -EMSGSIZE;
1008                                                 goto error;
1009                                         }
1010                                         get_page(page);
1011                                         skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1012                                         frag = &skb_shinfo(skb)->frags[i];
1013                                 }
1014                         } else if (i < MAX_SKB_FRAGS) {
1015                                 if (copy > PAGE_SIZE)
1016                                         copy = PAGE_SIZE;
1017                                 page = alloc_pages(sk->sk_allocation, 0);
1018                                 if (page == NULL)  {
1019                                         err = -ENOMEM;
1020                                         goto error;
1021                                 }
1022                                 sk->sk_sndmsg_page = page;
1023                                 sk->sk_sndmsg_off = 0;
1024
1025                                 skb_fill_page_desc(skb, i, page, 0, 0);
1026                                 frag = &skb_shinfo(skb)->frags[i];
1027                         } else {
1028                                 err = -EMSGSIZE;
1029                                 goto error;
1030                         }
1031                         if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1032                                 err = -EFAULT;
1033                                 goto error;
1034                         }
1035                         sk->sk_sndmsg_off += copy;
1036                         frag->size += copy;
1037                         skb->len += copy;
1038                         skb->data_len += copy;
1039                         skb->truesize += copy;
1040                         atomic_add(copy, &sk->sk_wmem_alloc);
1041                 }
1042                 offset += copy;
1043                 length -= copy;
1044         }
1045
1046         return 0;
1047
1048 error:
1049         inet->cork.length -= length;
1050         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1051         return err;
1052 }
1053
1054 ssize_t ip_append_page(struct sock *sk, struct page *page,
1055                        int offset, size_t size, int flags)
1056 {
1057         struct inet_sock *inet = inet_sk(sk);
1058         struct sk_buff *skb;
1059         struct rtable *rt;
1060         struct ip_options *opt = NULL;
1061         int hh_len;
1062         int mtu;
1063         int len;
1064         int err;
1065         unsigned int maxfraglen, fragheaderlen, fraggap;
1066
1067         if (inet->hdrincl)
1068                 return -EPERM;
1069
1070         if (flags&MSG_PROBE)
1071                 return 0;
1072
1073         if (skb_queue_empty(&sk->sk_write_queue))
1074                 return -EINVAL;
1075
1076         rt = (struct rtable *)inet->cork.dst;
1077         if (inet->cork.flags & IPCORK_OPT)
1078                 opt = inet->cork.opt;
1079
1080         if (!(rt->u.dst.dev->features&NETIF_F_SG))
1081                 return -EOPNOTSUPP;
1082
1083         hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1084         mtu = inet->cork.fragsize;
1085
1086         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1087         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1088
1089         if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1090                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1091                 return -EMSGSIZE;
1092         }
1093
1094         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1095                 return -EINVAL;
1096
1097         inet->cork.length += size;
1098         if ((sk->sk_protocol == IPPROTO_UDP) &&
1099             (rt->u.dst.dev->features & NETIF_F_UFO)) {
1100                 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1101                 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1102         }
1103
1104
1105         while (size > 0) {
1106                 int i;
1107
1108                 if (skb_is_gso(skb))
1109                         len = size;
1110                 else {
1111
1112                         /* Check if the remaining data fits into current packet. */
1113                         len = mtu - skb->len;
1114                         if (len < size)
1115                                 len = maxfraglen - skb->len;
1116                 }
1117                 if (len <= 0) {
1118                         struct sk_buff *skb_prev;
1119                         int alloclen;
1120
1121                         skb_prev = skb;
1122                         fraggap = skb_prev->len - maxfraglen;
1123
1124                         alloclen = fragheaderlen + hh_len + fraggap + 15;
1125                         skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1126                         if (unlikely(!skb)) {
1127                                 err = -ENOBUFS;
1128                                 goto error;
1129                         }
1130
1131                         /*
1132                          *      Fill in the control structures
1133                          */
1134                         skb->ip_summed = CHECKSUM_NONE;
1135                         skb->csum = 0;
1136                         skb_reserve(skb, hh_len);
1137
1138                         /*
1139                          *      Find where to start putting bytes.
1140                          */
1141                         skb_put(skb, fragheaderlen + fraggap);
1142                         skb_reset_network_header(skb);
1143                         skb->transport_header = (skb->network_header +
1144                                                  fragheaderlen);
1145                         if (fraggap) {
1146                                 skb->csum = skb_copy_and_csum_bits(skb_prev,
1147                                                                    maxfraglen,
1148                                                     skb_transport_header(skb),
1149                                                                    fraggap, 0);
1150                                 skb_prev->csum = csum_sub(skb_prev->csum,
1151                                                           skb->csum);
1152                                 pskb_trim_unique(skb_prev, maxfraglen);
1153                         }
1154
1155                         /*
1156                          * Put the packet on the pending queue.
1157                          */
1158                         __skb_queue_tail(&sk->sk_write_queue, skb);
1159                         continue;
1160                 }
1161
1162                 i = skb_shinfo(skb)->nr_frags;
1163                 if (len > size)
1164                         len = size;
1165                 if (skb_can_coalesce(skb, i, page, offset)) {
1166                         skb_shinfo(skb)->frags[i-1].size += len;
1167                 } else if (i < MAX_SKB_FRAGS) {
1168                         get_page(page);
1169                         skb_fill_page_desc(skb, i, page, offset, len);
1170                 } else {
1171                         err = -EMSGSIZE;
1172                         goto error;
1173                 }
1174
1175                 if (skb->ip_summed == CHECKSUM_NONE) {
1176                         __wsum csum;
1177                         csum = csum_page(page, offset, len);
1178                         skb->csum = csum_block_add(skb->csum, csum, skb->len);
1179                 }
1180
1181                 skb->len += len;
1182                 skb->data_len += len;
1183                 skb->truesize += len;
1184                 atomic_add(len, &sk->sk_wmem_alloc);
1185                 offset += len;
1186                 size -= len;
1187         }
1188         return 0;
1189
1190 error:
1191         inet->cork.length -= size;
1192         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1193         return err;
1194 }
1195
1196 static void ip_cork_release(struct inet_sock *inet)
1197 {
1198         inet->cork.flags &= ~IPCORK_OPT;
1199         kfree(inet->cork.opt);
1200         inet->cork.opt = NULL;
1201         dst_release(inet->cork.dst);
1202         inet->cork.dst = NULL;
1203 }
1204
1205 /*
1206  *      Combined all pending IP fragments on the socket as one IP datagram
1207  *      and push them out.
1208  */
1209 int ip_push_pending_frames(struct sock *sk)
1210 {
1211         struct sk_buff *skb, *tmp_skb;
1212         struct sk_buff **tail_skb;
1213         struct inet_sock *inet = inet_sk(sk);
1214         struct net *net = sock_net(sk);
1215         struct ip_options *opt = NULL;
1216         struct rtable *rt = (struct rtable *)inet->cork.dst;
1217         struct iphdr *iph;
1218         __be16 df = 0;
1219         __u8 ttl;
1220         int err = 0;
1221
1222         if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1223                 goto out;
1224         tail_skb = &(skb_shinfo(skb)->frag_list);
1225
1226         /* move skb->data to ip header from ext header */
1227         if (skb->data < skb_network_header(skb))
1228                 __skb_pull(skb, skb_network_offset(skb));
1229         while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1230                 __skb_pull(tmp_skb, skb_network_header_len(skb));
1231                 *tail_skb = tmp_skb;
1232                 tail_skb = &(tmp_skb->next);
1233                 skb->len += tmp_skb->len;
1234                 skb->data_len += tmp_skb->len;
1235                 skb->truesize += tmp_skb->truesize;
1236                 __sock_put(tmp_skb->sk);
1237                 tmp_skb->destructor = NULL;
1238                 tmp_skb->sk = NULL;
1239         }
1240
1241         /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1242          * to fragment the frame generated here. No matter, what transforms
1243          * how transforms change size of the packet, it will come out.
1244          */
1245         if (inet->pmtudisc < IP_PMTUDISC_DO)
1246                 skb->local_df = 1;
1247
1248         /* DF bit is set when we want to see DF on outgoing frames.
1249          * If local_df is set too, we still allow to fragment this frame
1250          * locally. */
1251         if (inet->pmtudisc >= IP_PMTUDISC_DO ||
1252             (skb->len <= dst_mtu(&rt->u.dst) &&
1253              ip_dont_fragment(sk, &rt->u.dst)))
1254                 df = htons(IP_DF);
1255
1256         if (inet->cork.flags & IPCORK_OPT)
1257                 opt = inet->cork.opt;
1258
1259         if (rt->rt_type == RTN_MULTICAST)
1260                 ttl = inet->mc_ttl;
1261         else
1262                 ttl = ip_select_ttl(inet, &rt->u.dst);
1263
1264         iph = (struct iphdr *)skb->data;
1265         iph->version = 4;
1266         iph->ihl = 5;
1267         if (opt) {
1268                 iph->ihl += opt->optlen>>2;
1269                 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1270         }
1271         iph->tos = inet->tos;
1272         iph->frag_off = df;
1273         ip_select_ident(iph, &rt->u.dst, sk);
1274         iph->ttl = ttl;
1275         iph->protocol = sk->sk_protocol;
1276         iph->saddr = rt->rt_src;
1277         iph->daddr = rt->rt_dst;
1278
1279         skb->priority = sk->sk_priority;
1280         skb->mark = sk->sk_mark;
1281         skb->dst = dst_clone(&rt->u.dst);
1282
1283         if (iph->protocol == IPPROTO_ICMP)
1284                 icmp_out_count(net, ((struct icmphdr *)
1285                         skb_transport_header(skb))->type);
1286
1287         /* Netfilter gets whole the not fragmented skb. */
1288         err = ip_local_out(skb);
1289         if (err) {
1290                 if (err > 0)
1291                         err = inet->recverr ? net_xmit_errno(err) : 0;
1292                 if (err)
1293                         goto error;
1294         }
1295
1296 out:
1297         ip_cork_release(inet);
1298         return err;
1299
1300 error:
1301         IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1302         goto out;
1303 }
1304
1305 /*
1306  *      Throw away all pending data on the socket.
1307  */
1308 void ip_flush_pending_frames(struct sock *sk)
1309 {
1310         struct sk_buff *skb;
1311
1312         while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1313                 kfree_skb(skb);
1314
1315         ip_cork_release(inet_sk(sk));
1316 }
1317
1318
1319 /*
1320  *      Fetch data from kernel space and fill in checksum if needed.
1321  */
1322 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1323                               int len, int odd, struct sk_buff *skb)
1324 {
1325         __wsum csum;
1326
1327         csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1328         skb->csum = csum_block_add(skb->csum, csum, odd);
1329         return 0;
1330 }
1331
1332 /*
1333  *      Generic function to send a packet as reply to another packet.
1334  *      Used to send TCP resets so far. ICMP should use this function too.
1335  *
1336  *      Should run single threaded per socket because it uses the sock
1337  *      structure to pass arguments.
1338  */
1339 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1340                    unsigned int len)
1341 {
1342         struct inet_sock *inet = inet_sk(sk);
1343         struct {
1344                 struct ip_options       opt;
1345                 char                    data[40];
1346         } replyopts;
1347         struct ipcm_cookie ipc;
1348         __be32 daddr;
1349         struct rtable *rt = skb->rtable;
1350
1351         if (ip_options_echo(&replyopts.opt, skb))
1352                 return;
1353
1354         daddr = ipc.addr = rt->rt_src;
1355         ipc.opt = NULL;
1356
1357         if (replyopts.opt.optlen) {
1358                 ipc.opt = &replyopts.opt;
1359
1360                 if (ipc.opt->srr)
1361                         daddr = replyopts.opt.faddr;
1362         }
1363
1364         {
1365                 struct flowi fl = { .oif = arg->bound_dev_if,
1366                                     .nl_u = { .ip4_u =
1367                                               { .daddr = daddr,
1368                                                 .saddr = rt->rt_spec_dst,
1369                                                 .tos = RT_TOS(ip_hdr(skb)->tos) } },
1370                                     /* Not quite clean, but right. */
1371                                     .uli_u = { .ports =
1372                                                { .sport = tcp_hdr(skb)->dest,
1373                                                  .dport = tcp_hdr(skb)->source } },
1374                                     .proto = sk->sk_protocol };
1375                 security_skb_classify_flow(skb, &fl);
1376                 if (ip_route_output_key(sock_net(sk), &rt, &fl))
1377                         return;
1378         }
1379
1380         /* And let IP do all the hard work.
1381
1382            This chunk is not reenterable, hence spinlock.
1383            Note that it uses the fact, that this function is called
1384            with locally disabled BH and that sk cannot be already spinlocked.
1385          */
1386         bh_lock_sock(sk);
1387         inet->tos = ip_hdr(skb)->tos;
1388         sk->sk_priority = skb->priority;
1389         sk->sk_protocol = ip_hdr(skb)->protocol;
1390         sk->sk_bound_dev_if = arg->bound_dev_if;
1391         ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1392                        &ipc, rt, MSG_DONTWAIT);
1393         if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1394                 if (arg->csumoffset >= 0)
1395                         *((__sum16 *)skb_transport_header(skb) +
1396                           arg->csumoffset) = csum_fold(csum_add(skb->csum,
1397                                                                 arg->csum));
1398                 skb->ip_summed = CHECKSUM_NONE;
1399                 ip_push_pending_frames(sk);
1400         }
1401
1402         bh_unlock_sock(sk);
1403
1404         ip_rt_put(rt);
1405 }
1406
1407 void __init ip_init(void)
1408 {
1409         ip_rt_init();
1410         inet_initpeers();
1411
1412 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1413         igmp_mc_proc_init();
1414 #endif
1415 }
1416
1417 EXPORT_SYMBOL(ip_generic_getfrag);
1418 EXPORT_SYMBOL(ip_queue_xmit);
1419 EXPORT_SYMBOL(ip_send_check);