Revert "ax25: zero length frame filtering in AX25"
[linux-2.6] / net / ipv4 / ip_output.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              The Internet Protocol (IP) output module.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Donald Becker, <becker@super.org>
11  *              Alan Cox, <Alan.Cox@linux.org>
12  *              Richard Underwood
13  *              Stefan Becker, <stefanb@yello.ping.de>
14  *              Jorge Cwik, <jorge@laser.satlink.net>
15  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16  *              Hirokazu Takahashi, <taka@valinux.co.jp>
17  *
18  *      See ip_input.c for original log
19  *
20  *      Fixes:
21  *              Alan Cox        :       Missing nonblock feature in ip_build_xmit.
22  *              Mike Kilburn    :       htons() missing in ip_build_xmit.
23  *              Bradford Johnson:       Fix faulty handling of some frames when
24  *                                      no route is found.
25  *              Alexander Demenshin:    Missing sk/skb free in ip_queue_xmit
26  *                                      (in case if packet not accepted by
27  *                                      output firewall rules)
28  *              Mike McLagan    :       Routing by source
29  *              Alexey Kuznetsov:       use new route cache
30  *              Andi Kleen:             Fix broken PMTU recovery and remove
31  *                                      some redundant tests.
32  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
33  *              Andi Kleen      :       Replace ip_reply with ip_send_reply.
34  *              Andi Kleen      :       Split fast and slow ip_build_xmit path
35  *                                      for decreased register pressure on x86
36  *                                      and more readibility.
37  *              Marc Boucher    :       When call_out_firewall returns FW_QUEUE,
38  *                                      silently drop skb instead of failing with -EPERM.
39  *              Detlev Wengorz  :       Copy protocol for fragments.
40  *              Hirokazu Takahashi:     HW checksumming for outgoing UDP
41  *                                      datagrams.
42  *              Hirokazu Takahashi:     sendfile() on UDP works now.
43  */
44
45 #include <asm/uaccess.h>
46 #include <asm/system.h>
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/kernel.h>
50 #include <linux/mm.h>
51 #include <linux/string.h>
52 #include <linux/errno.h>
53 #include <linux/highmem.h>
54
55 #include <linux/socket.h>
56 #include <linux/sockios.h>
57 #include <linux/in.h>
58 #include <linux/inet.h>
59 #include <linux/netdevice.h>
60 #include <linux/etherdevice.h>
61 #include <linux/proc_fs.h>
62 #include <linux/stat.h>
63 #include <linux/init.h>
64
65 #include <net/snmp.h>
66 #include <net/ip.h>
67 #include <net/protocol.h>
68 #include <net/route.h>
69 #include <net/xfrm.h>
70 #include <linux/skbuff.h>
71 #include <net/sock.h>
72 #include <net/arp.h>
73 #include <net/icmp.h>
74 #include <net/checksum.h>
75 #include <net/inetpeer.h>
76 #include <linux/igmp.h>
77 #include <linux/netfilter_ipv4.h>
78 #include <linux/netfilter_bridge.h>
79 #include <linux/mroute.h>
80 #include <linux/netlink.h>
81 #include <linux/tcp.h>
82
83 int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
84
85 /* Generate a checksum for an outgoing IP datagram. */
86 __inline__ void ip_send_check(struct iphdr *iph)
87 {
88         iph->check = 0;
89         iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
90 }
91
92 int __ip_local_out(struct sk_buff *skb)
93 {
94         struct iphdr *iph = ip_hdr(skb);
95
96         iph->tot_len = htons(skb->len);
97         ip_send_check(iph);
98         return nf_hook(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, skb->dst->dev,
99                        dst_output);
100 }
101
102 int ip_local_out(struct sk_buff *skb)
103 {
104         int err;
105
106         err = __ip_local_out(skb);
107         if (likely(err == 1))
108                 err = dst_output(skb);
109
110         return err;
111 }
112 EXPORT_SYMBOL_GPL(ip_local_out);
113
114 /* dev_loopback_xmit for use with netfilter. */
115 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
116 {
117         skb_reset_mac_header(newskb);
118         __skb_pull(newskb, skb_network_offset(newskb));
119         newskb->pkt_type = PACKET_LOOPBACK;
120         newskb->ip_summed = CHECKSUM_UNNECESSARY;
121         WARN_ON(!newskb->dst);
122         netif_rx(newskb);
123         return 0;
124 }
125
126 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
127 {
128         int ttl = inet->uc_ttl;
129
130         if (ttl < 0)
131                 ttl = dst_metric(dst, RTAX_HOPLIMIT);
132         return ttl;
133 }
134
135 /*
136  *              Add an ip header to a skbuff and send it out.
137  *
138  */
139 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
140                           __be32 saddr, __be32 daddr, struct ip_options *opt)
141 {
142         struct inet_sock *inet = inet_sk(sk);
143         struct rtable *rt = skb->rtable;
144         struct iphdr *iph;
145
146         /* Build the IP header. */
147         skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
148         skb_reset_network_header(skb);
149         iph = ip_hdr(skb);
150         iph->version  = 4;
151         iph->ihl      = 5;
152         iph->tos      = inet->tos;
153         if (ip_dont_fragment(sk, &rt->u.dst))
154                 iph->frag_off = htons(IP_DF);
155         else
156                 iph->frag_off = 0;
157         iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
158         iph->daddr    = rt->rt_dst;
159         iph->saddr    = rt->rt_src;
160         iph->protocol = sk->sk_protocol;
161         ip_select_ident(iph, &rt->u.dst, sk);
162
163         if (opt && opt->optlen) {
164                 iph->ihl += opt->optlen>>2;
165                 ip_options_build(skb, opt, daddr, rt, 0);
166         }
167
168         skb->priority = sk->sk_priority;
169         skb->mark = sk->sk_mark;
170
171         /* Send it out. */
172         return ip_local_out(skb);
173 }
174
175 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
176
177 static inline int ip_finish_output2(struct sk_buff *skb)
178 {
179         struct dst_entry *dst = skb->dst;
180         struct rtable *rt = (struct rtable *)dst;
181         struct net_device *dev = dst->dev;
182         unsigned int hh_len = LL_RESERVED_SPACE(dev);
183
184         if (rt->rt_type == RTN_MULTICAST)
185                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTMCASTPKTS);
186         else if (rt->rt_type == RTN_BROADCAST)
187                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTBCASTPKTS);
188
189         /* Be paranoid, rather than too clever. */
190         if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
191                 struct sk_buff *skb2;
192
193                 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
194                 if (skb2 == NULL) {
195                         kfree_skb(skb);
196                         return -ENOMEM;
197                 }
198                 if (skb->sk)
199                         skb_set_owner_w(skb2, skb->sk);
200                 kfree_skb(skb);
201                 skb = skb2;
202         }
203
204         if (dst->hh)
205                 return neigh_hh_output(dst->hh, skb);
206         else if (dst->neighbour)
207                 return dst->neighbour->output(skb);
208
209         if (net_ratelimit())
210                 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
211         kfree_skb(skb);
212         return -EINVAL;
213 }
214
215 static inline int ip_skb_dst_mtu(struct sk_buff *skb)
216 {
217         struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
218
219         return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
220                skb->dst->dev->mtu : dst_mtu(skb->dst);
221 }
222
223 static int ip_finish_output(struct sk_buff *skb)
224 {
225 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
226         /* Policy lookup after SNAT yielded a new policy */
227         if (skb->dst->xfrm != NULL) {
228                 IPCB(skb)->flags |= IPSKB_REROUTED;
229                 return dst_output(skb);
230         }
231 #endif
232         if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
233                 return ip_fragment(skb, ip_finish_output2);
234         else
235                 return ip_finish_output2(skb);
236 }
237
238 int ip_mc_output(struct sk_buff *skb)
239 {
240         struct sock *sk = skb->sk;
241         struct rtable *rt = skb->rtable;
242         struct net_device *dev = rt->u.dst.dev;
243
244         /*
245          *      If the indicated interface is up and running, send the packet.
246          */
247         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTREQUESTS);
248
249         skb->dev = dev;
250         skb->protocol = htons(ETH_P_IP);
251
252         /*
253          *      Multicasts are looped back for other local users
254          */
255
256         if (rt->rt_flags&RTCF_MULTICAST) {
257                 if ((!sk || inet_sk(sk)->mc_loop)
258 #ifdef CONFIG_IP_MROUTE
259                 /* Small optimization: do not loopback not local frames,
260                    which returned after forwarding; they will be  dropped
261                    by ip_mr_input in any case.
262                    Note, that local frames are looped back to be delivered
263                    to local recipients.
264
265                    This check is duplicated in ip_mr_input at the moment.
266                  */
267                     && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
268 #endif
269                 ) {
270                         struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
271                         if (newskb)
272                                 NF_HOOK(PF_INET, NF_INET_POST_ROUTING, newskb,
273                                         NULL, newskb->dev,
274                                         ip_dev_loopback_xmit);
275                 }
276
277                 /* Multicasts with ttl 0 must not go beyond the host */
278
279                 if (ip_hdr(skb)->ttl == 0) {
280                         kfree_skb(skb);
281                         return 0;
282                 }
283         }
284
285         if (rt->rt_flags&RTCF_BROADCAST) {
286                 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
287                 if (newskb)
288                         NF_HOOK(PF_INET, NF_INET_POST_ROUTING, newskb, NULL,
289                                 newskb->dev, ip_dev_loopback_xmit);
290         }
291
292         return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb, NULL, skb->dev,
293                             ip_finish_output,
294                             !(IPCB(skb)->flags & IPSKB_REROUTED));
295 }
296
297 int ip_output(struct sk_buff *skb)
298 {
299         struct net_device *dev = skb->dst->dev;
300
301         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTREQUESTS);
302
303         skb->dev = dev;
304         skb->protocol = htons(ETH_P_IP);
305
306         return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb, NULL, dev,
307                             ip_finish_output,
308                             !(IPCB(skb)->flags & IPSKB_REROUTED));
309 }
310
311 int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
312 {
313         struct sock *sk = skb->sk;
314         struct inet_sock *inet = inet_sk(sk);
315         struct ip_options *opt = inet->opt;
316         struct rtable *rt;
317         struct iphdr *iph;
318
319         /* Skip all of this if the packet is already routed,
320          * f.e. by something like SCTP.
321          */
322         rt = skb->rtable;
323         if (rt != NULL)
324                 goto packet_routed;
325
326         /* Make sure we can route this packet. */
327         rt = (struct rtable *)__sk_dst_check(sk, 0);
328         if (rt == NULL) {
329                 __be32 daddr;
330
331                 /* Use correct destination address if we have options. */
332                 daddr = inet->daddr;
333                 if(opt && opt->srr)
334                         daddr = opt->faddr;
335
336                 {
337                         struct flowi fl = { .oif = sk->sk_bound_dev_if,
338                                             .nl_u = { .ip4_u =
339                                                       { .daddr = daddr,
340                                                         .saddr = inet->saddr,
341                                                         .tos = RT_CONN_FLAGS(sk) } },
342                                             .proto = sk->sk_protocol,
343                                             .flags = inet_sk_flowi_flags(sk),
344                                             .uli_u = { .ports =
345                                                        { .sport = inet->sport,
346                                                          .dport = inet->dport } } };
347
348                         /* If this fails, retransmit mechanism of transport layer will
349                          * keep trying until route appears or the connection times
350                          * itself out.
351                          */
352                         security_sk_classify_flow(sk, &fl);
353                         if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
354                                 goto no_route;
355                 }
356                 sk_setup_caps(sk, &rt->u.dst);
357         }
358         skb->dst = dst_clone(&rt->u.dst);
359
360 packet_routed:
361         if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
362                 goto no_route;
363
364         /* OK, we know where to send it, allocate and build IP header. */
365         skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
366         skb_reset_network_header(skb);
367         iph = ip_hdr(skb);
368         *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
369         if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
370                 iph->frag_off = htons(IP_DF);
371         else
372                 iph->frag_off = 0;
373         iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
374         iph->protocol = sk->sk_protocol;
375         iph->saddr    = rt->rt_src;
376         iph->daddr    = rt->rt_dst;
377         /* Transport layer set skb->h.foo itself. */
378
379         if (opt && opt->optlen) {
380                 iph->ihl += opt->optlen >> 2;
381                 ip_options_build(skb, opt, inet->daddr, rt, 0);
382         }
383
384         ip_select_ident_more(iph, &rt->u.dst, sk,
385                              (skb_shinfo(skb)->gso_segs ?: 1) - 1);
386
387         skb->priority = sk->sk_priority;
388         skb->mark = sk->sk_mark;
389
390         return ip_local_out(skb);
391
392 no_route:
393         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
394         kfree_skb(skb);
395         return -EHOSTUNREACH;
396 }
397
398
399 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
400 {
401         to->pkt_type = from->pkt_type;
402         to->priority = from->priority;
403         to->protocol = from->protocol;
404         dst_release(to->dst);
405         to->dst = dst_clone(from->dst);
406         to->dev = from->dev;
407         to->mark = from->mark;
408
409         /* Copy the flags to each fragment. */
410         IPCB(to)->flags = IPCB(from)->flags;
411
412 #ifdef CONFIG_NET_SCHED
413         to->tc_index = from->tc_index;
414 #endif
415         nf_copy(to, from);
416 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
417     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
418         to->nf_trace = from->nf_trace;
419 #endif
420 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
421         to->ipvs_property = from->ipvs_property;
422 #endif
423         skb_copy_secmark(to, from);
424 }
425
426 /*
427  *      This IP datagram is too large to be sent in one piece.  Break it up into
428  *      smaller pieces (each of size equal to IP header plus
429  *      a block of the data of the original IP data part) that will yet fit in a
430  *      single device frame, and queue such a frame for sending.
431  */
432
433 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
434 {
435         struct iphdr *iph;
436         int raw = 0;
437         int ptr;
438         struct net_device *dev;
439         struct sk_buff *skb2;
440         unsigned int mtu, hlen, left, len, ll_rs, pad;
441         int offset;
442         __be16 not_last_frag;
443         struct rtable *rt = skb->rtable;
444         int err = 0;
445
446         dev = rt->u.dst.dev;
447
448         /*
449          *      Point into the IP datagram header.
450          */
451
452         iph = ip_hdr(skb);
453
454         if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
455                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
456                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
457                           htonl(ip_skb_dst_mtu(skb)));
458                 kfree_skb(skb);
459                 return -EMSGSIZE;
460         }
461
462         /*
463          *      Setup starting values.
464          */
465
466         hlen = iph->ihl * 4;
467         mtu = dst_mtu(&rt->u.dst) - hlen;       /* Size of data space */
468         IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
469
470         /* When frag_list is given, use it. First, check its validity:
471          * some transformers could create wrong frag_list or break existing
472          * one, it is not prohibited. In this case fall back to copying.
473          *
474          * LATER: this step can be merged to real generation of fragments,
475          * we can switch to copy when see the first bad fragment.
476          */
477         if (skb_shinfo(skb)->frag_list) {
478                 struct sk_buff *frag;
479                 int first_len = skb_pagelen(skb);
480                 int truesizes = 0;
481
482                 if (first_len - hlen > mtu ||
483                     ((first_len - hlen) & 7) ||
484                     (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
485                     skb_cloned(skb))
486                         goto slow_path;
487
488                 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
489                         /* Correct geometry. */
490                         if (frag->len > mtu ||
491                             ((frag->len & 7) && frag->next) ||
492                             skb_headroom(frag) < hlen)
493                             goto slow_path;
494
495                         /* Partially cloned skb? */
496                         if (skb_shared(frag))
497                                 goto slow_path;
498
499                         BUG_ON(frag->sk);
500                         if (skb->sk) {
501                                 sock_hold(skb->sk);
502                                 frag->sk = skb->sk;
503                                 frag->destructor = sock_wfree;
504                                 truesizes += frag->truesize;
505                         }
506                 }
507
508                 /* Everything is OK. Generate! */
509
510                 err = 0;
511                 offset = 0;
512                 frag = skb_shinfo(skb)->frag_list;
513                 skb_shinfo(skb)->frag_list = NULL;
514                 skb->data_len = first_len - skb_headlen(skb);
515                 skb->truesize -= truesizes;
516                 skb->len = first_len;
517                 iph->tot_len = htons(first_len);
518                 iph->frag_off = htons(IP_MF);
519                 ip_send_check(iph);
520
521                 for (;;) {
522                         /* Prepare header of the next frame,
523                          * before previous one went down. */
524                         if (frag) {
525                                 frag->ip_summed = CHECKSUM_NONE;
526                                 skb_reset_transport_header(frag);
527                                 __skb_push(frag, hlen);
528                                 skb_reset_network_header(frag);
529                                 memcpy(skb_network_header(frag), iph, hlen);
530                                 iph = ip_hdr(frag);
531                                 iph->tot_len = htons(frag->len);
532                                 ip_copy_metadata(frag, skb);
533                                 if (offset == 0)
534                                         ip_options_fragment(frag);
535                                 offset += skb->len - hlen;
536                                 iph->frag_off = htons(offset>>3);
537                                 if (frag->next != NULL)
538                                         iph->frag_off |= htons(IP_MF);
539                                 /* Ready, complete checksum */
540                                 ip_send_check(iph);
541                         }
542
543                         err = output(skb);
544
545                         if (!err)
546                                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
547                         if (err || !frag)
548                                 break;
549
550                         skb = frag;
551                         frag = skb->next;
552                         skb->next = NULL;
553                 }
554
555                 if (err == 0) {
556                         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
557                         return 0;
558                 }
559
560                 while (frag) {
561                         skb = frag->next;
562                         kfree_skb(frag);
563                         frag = skb;
564                 }
565                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
566                 return err;
567         }
568
569 slow_path:
570         left = skb->len - hlen;         /* Space per frame */
571         ptr = raw + hlen;               /* Where to start from */
572
573         /* for bridged IP traffic encapsulated inside f.e. a vlan header,
574          * we need to make room for the encapsulating header
575          */
576         pad = nf_bridge_pad(skb);
577         ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, pad);
578         mtu -= pad;
579
580         /*
581          *      Fragment the datagram.
582          */
583
584         offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
585         not_last_frag = iph->frag_off & htons(IP_MF);
586
587         /*
588          *      Keep copying data until we run out.
589          */
590
591         while (left > 0) {
592                 len = left;
593                 /* IF: it doesn't fit, use 'mtu' - the data space left */
594                 if (len > mtu)
595                         len = mtu;
596                 /* IF: we are not sending upto and including the packet end
597                    then align the next start on an eight byte boundary */
598                 if (len < left) {
599                         len &= ~7;
600                 }
601                 /*
602                  *      Allocate buffer.
603                  */
604
605                 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
606                         NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
607                         err = -ENOMEM;
608                         goto fail;
609                 }
610
611                 /*
612                  *      Set up data on packet
613                  */
614
615                 ip_copy_metadata(skb2, skb);
616                 skb_reserve(skb2, ll_rs);
617                 skb_put(skb2, len + hlen);
618                 skb_reset_network_header(skb2);
619                 skb2->transport_header = skb2->network_header + hlen;
620
621                 /*
622                  *      Charge the memory for the fragment to any owner
623                  *      it might possess
624                  */
625
626                 if (skb->sk)
627                         skb_set_owner_w(skb2, skb->sk);
628
629                 /*
630                  *      Copy the packet header into the new buffer.
631                  */
632
633                 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
634
635                 /*
636                  *      Copy a block of the IP datagram.
637                  */
638                 if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
639                         BUG();
640                 left -= len;
641
642                 /*
643                  *      Fill in the new header fields.
644                  */
645                 iph = ip_hdr(skb2);
646                 iph->frag_off = htons((offset >> 3));
647
648                 /* ANK: dirty, but effective trick. Upgrade options only if
649                  * the segment to be fragmented was THE FIRST (otherwise,
650                  * options are already fixed) and make it ONCE
651                  * on the initial skb, so that all the following fragments
652                  * will inherit fixed options.
653                  */
654                 if (offset == 0)
655                         ip_options_fragment(skb);
656
657                 /*
658                  *      Added AC : If we are fragmenting a fragment that's not the
659                  *                 last fragment then keep MF on each bit
660                  */
661                 if (left > 0 || not_last_frag)
662                         iph->frag_off |= htons(IP_MF);
663                 ptr += len;
664                 offset += len;
665
666                 /*
667                  *      Put this fragment into the sending queue.
668                  */
669                 iph->tot_len = htons(len + hlen);
670
671                 ip_send_check(iph);
672
673                 err = output(skb2);
674                 if (err)
675                         goto fail;
676
677                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
678         }
679         kfree_skb(skb);
680         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
681         return err;
682
683 fail:
684         kfree_skb(skb);
685         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
686         return err;
687 }
688
689 EXPORT_SYMBOL(ip_fragment);
690
691 int
692 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
693 {
694         struct iovec *iov = from;
695
696         if (skb->ip_summed == CHECKSUM_PARTIAL) {
697                 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
698                         return -EFAULT;
699         } else {
700                 __wsum csum = 0;
701                 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
702                         return -EFAULT;
703                 skb->csum = csum_block_add(skb->csum, csum, odd);
704         }
705         return 0;
706 }
707
708 static inline __wsum
709 csum_page(struct page *page, int offset, int copy)
710 {
711         char *kaddr;
712         __wsum csum;
713         kaddr = kmap(page);
714         csum = csum_partial(kaddr + offset, copy, 0);
715         kunmap(page);
716         return csum;
717 }
718
719 static inline int ip_ufo_append_data(struct sock *sk,
720                         int getfrag(void *from, char *to, int offset, int len,
721                                int odd, struct sk_buff *skb),
722                         void *from, int length, int hh_len, int fragheaderlen,
723                         int transhdrlen, int mtu, unsigned int flags)
724 {
725         struct sk_buff *skb;
726         int err;
727
728         /* There is support for UDP fragmentation offload by network
729          * device, so create one single skb packet containing complete
730          * udp datagram
731          */
732         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
733                 skb = sock_alloc_send_skb(sk,
734                         hh_len + fragheaderlen + transhdrlen + 20,
735                         (flags & MSG_DONTWAIT), &err);
736
737                 if (skb == NULL)
738                         return err;
739
740                 /* reserve space for Hardware header */
741                 skb_reserve(skb, hh_len);
742
743                 /* create space for UDP/IP header */
744                 skb_put(skb, fragheaderlen + transhdrlen);
745
746                 /* initialize network header pointer */
747                 skb_reset_network_header(skb);
748
749                 /* initialize protocol header pointer */
750                 skb->transport_header = skb->network_header + fragheaderlen;
751
752                 skb->ip_summed = CHECKSUM_PARTIAL;
753                 skb->csum = 0;
754                 sk->sk_sndmsg_off = 0;
755
756                 /* specify the length of each IP datagram fragment */
757                 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
758                 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
759                 __skb_queue_tail(&sk->sk_write_queue, skb);
760         }
761
762         return skb_append_datato_frags(sk, skb, getfrag, from,
763                                        (length - transhdrlen));
764 }
765
766 /*
767  *      ip_append_data() and ip_append_page() can make one large IP datagram
768  *      from many pieces of data. Each pieces will be holded on the socket
769  *      until ip_push_pending_frames() is called. Each piece can be a page
770  *      or non-page data.
771  *
772  *      Not only UDP, other transport protocols - e.g. raw sockets - can use
773  *      this interface potentially.
774  *
775  *      LATER: length must be adjusted by pad at tail, when it is required.
776  */
777 int ip_append_data(struct sock *sk,
778                    int getfrag(void *from, char *to, int offset, int len,
779                                int odd, struct sk_buff *skb),
780                    void *from, int length, int transhdrlen,
781                    struct ipcm_cookie *ipc, struct rtable **rtp,
782                    unsigned int flags)
783 {
784         struct inet_sock *inet = inet_sk(sk);
785         struct sk_buff *skb;
786
787         struct ip_options *opt = NULL;
788         int hh_len;
789         int exthdrlen;
790         int mtu;
791         int copy;
792         int err;
793         int offset = 0;
794         unsigned int maxfraglen, fragheaderlen;
795         int csummode = CHECKSUM_NONE;
796         struct rtable *rt;
797
798         if (flags&MSG_PROBE)
799                 return 0;
800
801         if (skb_queue_empty(&sk->sk_write_queue)) {
802                 /*
803                  * setup for corking.
804                  */
805                 opt = ipc->opt;
806                 if (opt) {
807                         if (inet->cork.opt == NULL) {
808                                 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
809                                 if (unlikely(inet->cork.opt == NULL))
810                                         return -ENOBUFS;
811                         }
812                         memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
813                         inet->cork.flags |= IPCORK_OPT;
814                         inet->cork.addr = ipc->addr;
815                 }
816                 rt = *rtp;
817                 /*
818                  * We steal reference to this route, caller should not release it
819                  */
820                 *rtp = NULL;
821                 inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
822                                             rt->u.dst.dev->mtu :
823                                             dst_mtu(rt->u.dst.path);
824                 inet->cork.dst = &rt->u.dst;
825                 inet->cork.length = 0;
826                 sk->sk_sndmsg_page = NULL;
827                 sk->sk_sndmsg_off = 0;
828                 if ((exthdrlen = rt->u.dst.header_len) != 0) {
829                         length += exthdrlen;
830                         transhdrlen += exthdrlen;
831                 }
832         } else {
833                 rt = (struct rtable *)inet->cork.dst;
834                 if (inet->cork.flags & IPCORK_OPT)
835                         opt = inet->cork.opt;
836
837                 transhdrlen = 0;
838                 exthdrlen = 0;
839                 mtu = inet->cork.fragsize;
840         }
841         hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
842
843         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
844         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
845
846         if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
847                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
848                 return -EMSGSIZE;
849         }
850
851         /*
852          * transhdrlen > 0 means that this is the first fragment and we wish
853          * it won't be fragmented in the future.
854          */
855         if (transhdrlen &&
856             length + fragheaderlen <= mtu &&
857             rt->u.dst.dev->features & NETIF_F_V4_CSUM &&
858             !exthdrlen)
859                 csummode = CHECKSUM_PARTIAL;
860
861         inet->cork.length += length;
862         if (((length> mtu) || !skb_queue_empty(&sk->sk_write_queue)) &&
863             (sk->sk_protocol == IPPROTO_UDP) &&
864             (rt->u.dst.dev->features & NETIF_F_UFO)) {
865                 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
866                                          fragheaderlen, transhdrlen, mtu,
867                                          flags);
868                 if (err)
869                         goto error;
870                 return 0;
871         }
872
873         /* So, what's going on in the loop below?
874          *
875          * We use calculated fragment length to generate chained skb,
876          * each of segments is IP fragment ready for sending to network after
877          * adding appropriate IP header.
878          */
879
880         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
881                 goto alloc_new_skb;
882
883         while (length > 0) {
884                 /* Check if the remaining data fits into current packet. */
885                 copy = mtu - skb->len;
886                 if (copy < length)
887                         copy = maxfraglen - skb->len;
888                 if (copy <= 0) {
889                         char *data;
890                         unsigned int datalen;
891                         unsigned int fraglen;
892                         unsigned int fraggap;
893                         unsigned int alloclen;
894                         struct sk_buff *skb_prev;
895 alloc_new_skb:
896                         skb_prev = skb;
897                         if (skb_prev)
898                                 fraggap = skb_prev->len - maxfraglen;
899                         else
900                                 fraggap = 0;
901
902                         /*
903                          * If remaining data exceeds the mtu,
904                          * we know we need more fragment(s).
905                          */
906                         datalen = length + fraggap;
907                         if (datalen > mtu - fragheaderlen)
908                                 datalen = maxfraglen - fragheaderlen;
909                         fraglen = datalen + fragheaderlen;
910
911                         if ((flags & MSG_MORE) &&
912                             !(rt->u.dst.dev->features&NETIF_F_SG))
913                                 alloclen = mtu;
914                         else
915                                 alloclen = datalen + fragheaderlen;
916
917                         /* The last fragment gets additional space at tail.
918                          * Note, with MSG_MORE we overallocate on fragments,
919                          * because we have no idea what fragment will be
920                          * the last.
921                          */
922                         if (datalen == length + fraggap)
923                                 alloclen += rt->u.dst.trailer_len;
924
925                         if (transhdrlen) {
926                                 skb = sock_alloc_send_skb(sk,
927                                                 alloclen + hh_len + 15,
928                                                 (flags & MSG_DONTWAIT), &err);
929                         } else {
930                                 skb = NULL;
931                                 if (atomic_read(&sk->sk_wmem_alloc) <=
932                                     2 * sk->sk_sndbuf)
933                                         skb = sock_wmalloc(sk,
934                                                            alloclen + hh_len + 15, 1,
935                                                            sk->sk_allocation);
936                                 if (unlikely(skb == NULL))
937                                         err = -ENOBUFS;
938                                 else
939                                         /* only the initial fragment is
940                                            time stamped */
941                                         ipc->shtx.flags = 0;
942                         }
943                         if (skb == NULL)
944                                 goto error;
945
946                         /*
947                          *      Fill in the control structures
948                          */
949                         skb->ip_summed = csummode;
950                         skb->csum = 0;
951                         skb_reserve(skb, hh_len);
952                         *skb_tx(skb) = ipc->shtx;
953
954                         /*
955                          *      Find where to start putting bytes.
956                          */
957                         data = skb_put(skb, fraglen);
958                         skb_set_network_header(skb, exthdrlen);
959                         skb->transport_header = (skb->network_header +
960                                                  fragheaderlen);
961                         data += fragheaderlen;
962
963                         if (fraggap) {
964                                 skb->csum = skb_copy_and_csum_bits(
965                                         skb_prev, maxfraglen,
966                                         data + transhdrlen, fraggap, 0);
967                                 skb_prev->csum = csum_sub(skb_prev->csum,
968                                                           skb->csum);
969                                 data += fraggap;
970                                 pskb_trim_unique(skb_prev, maxfraglen);
971                         }
972
973                         copy = datalen - transhdrlen - fraggap;
974                         if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
975                                 err = -EFAULT;
976                                 kfree_skb(skb);
977                                 goto error;
978                         }
979
980                         offset += copy;
981                         length -= datalen - fraggap;
982                         transhdrlen = 0;
983                         exthdrlen = 0;
984                         csummode = CHECKSUM_NONE;
985
986                         /*
987                          * Put the packet on the pending queue.
988                          */
989                         __skb_queue_tail(&sk->sk_write_queue, skb);
990                         continue;
991                 }
992
993                 if (copy > length)
994                         copy = length;
995
996                 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
997                         unsigned int off;
998
999                         off = skb->len;
1000                         if (getfrag(from, skb_put(skb, copy),
1001                                         offset, copy, off, skb) < 0) {
1002                                 __skb_trim(skb, off);
1003                                 err = -EFAULT;
1004                                 goto error;
1005                         }
1006                 } else {
1007                         int i = skb_shinfo(skb)->nr_frags;
1008                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1009                         struct page *page = sk->sk_sndmsg_page;
1010                         int off = sk->sk_sndmsg_off;
1011                         unsigned int left;
1012
1013                         if (page && (left = PAGE_SIZE - off) > 0) {
1014                                 if (copy >= left)
1015                                         copy = left;
1016                                 if (page != frag->page) {
1017                                         if (i == MAX_SKB_FRAGS) {
1018                                                 err = -EMSGSIZE;
1019                                                 goto error;
1020                                         }
1021                                         get_page(page);
1022                                         skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1023                                         frag = &skb_shinfo(skb)->frags[i];
1024                                 }
1025                         } else if (i < MAX_SKB_FRAGS) {
1026                                 if (copy > PAGE_SIZE)
1027                                         copy = PAGE_SIZE;
1028                                 page = alloc_pages(sk->sk_allocation, 0);
1029                                 if (page == NULL)  {
1030                                         err = -ENOMEM;
1031                                         goto error;
1032                                 }
1033                                 sk->sk_sndmsg_page = page;
1034                                 sk->sk_sndmsg_off = 0;
1035
1036                                 skb_fill_page_desc(skb, i, page, 0, 0);
1037                                 frag = &skb_shinfo(skb)->frags[i];
1038                         } else {
1039                                 err = -EMSGSIZE;
1040                                 goto error;
1041                         }
1042                         if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1043                                 err = -EFAULT;
1044                                 goto error;
1045                         }
1046                         sk->sk_sndmsg_off += copy;
1047                         frag->size += copy;
1048                         skb->len += copy;
1049                         skb->data_len += copy;
1050                         skb->truesize += copy;
1051                         atomic_add(copy, &sk->sk_wmem_alloc);
1052                 }
1053                 offset += copy;
1054                 length -= copy;
1055         }
1056
1057         return 0;
1058
1059 error:
1060         inet->cork.length -= length;
1061         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1062         return err;
1063 }
1064
1065 ssize_t ip_append_page(struct sock *sk, struct page *page,
1066                        int offset, size_t size, int flags)
1067 {
1068         struct inet_sock *inet = inet_sk(sk);
1069         struct sk_buff *skb;
1070         struct rtable *rt;
1071         struct ip_options *opt = NULL;
1072         int hh_len;
1073         int mtu;
1074         int len;
1075         int err;
1076         unsigned int maxfraglen, fragheaderlen, fraggap;
1077
1078         if (inet->hdrincl)
1079                 return -EPERM;
1080
1081         if (flags&MSG_PROBE)
1082                 return 0;
1083
1084         if (skb_queue_empty(&sk->sk_write_queue))
1085                 return -EINVAL;
1086
1087         rt = (struct rtable *)inet->cork.dst;
1088         if (inet->cork.flags & IPCORK_OPT)
1089                 opt = inet->cork.opt;
1090
1091         if (!(rt->u.dst.dev->features&NETIF_F_SG))
1092                 return -EOPNOTSUPP;
1093
1094         hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1095         mtu = inet->cork.fragsize;
1096
1097         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1098         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1099
1100         if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1101                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1102                 return -EMSGSIZE;
1103         }
1104
1105         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1106                 return -EINVAL;
1107
1108         inet->cork.length += size;
1109         if ((sk->sk_protocol == IPPROTO_UDP) &&
1110             (rt->u.dst.dev->features & NETIF_F_UFO)) {
1111                 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1112                 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1113         }
1114
1115
1116         while (size > 0) {
1117                 int i;
1118
1119                 if (skb_is_gso(skb))
1120                         len = size;
1121                 else {
1122
1123                         /* Check if the remaining data fits into current packet. */
1124                         len = mtu - skb->len;
1125                         if (len < size)
1126                                 len = maxfraglen - skb->len;
1127                 }
1128                 if (len <= 0) {
1129                         struct sk_buff *skb_prev;
1130                         int alloclen;
1131
1132                         skb_prev = skb;
1133                         fraggap = skb_prev->len - maxfraglen;
1134
1135                         alloclen = fragheaderlen + hh_len + fraggap + 15;
1136                         skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1137                         if (unlikely(!skb)) {
1138                                 err = -ENOBUFS;
1139                                 goto error;
1140                         }
1141
1142                         /*
1143                          *      Fill in the control structures
1144                          */
1145                         skb->ip_summed = CHECKSUM_NONE;
1146                         skb->csum = 0;
1147                         skb_reserve(skb, hh_len);
1148
1149                         /*
1150                          *      Find where to start putting bytes.
1151                          */
1152                         skb_put(skb, fragheaderlen + fraggap);
1153                         skb_reset_network_header(skb);
1154                         skb->transport_header = (skb->network_header +
1155                                                  fragheaderlen);
1156                         if (fraggap) {
1157                                 skb->csum = skb_copy_and_csum_bits(skb_prev,
1158                                                                    maxfraglen,
1159                                                     skb_transport_header(skb),
1160                                                                    fraggap, 0);
1161                                 skb_prev->csum = csum_sub(skb_prev->csum,
1162                                                           skb->csum);
1163                                 pskb_trim_unique(skb_prev, maxfraglen);
1164                         }
1165
1166                         /*
1167                          * Put the packet on the pending queue.
1168                          */
1169                         __skb_queue_tail(&sk->sk_write_queue, skb);
1170                         continue;
1171                 }
1172
1173                 i = skb_shinfo(skb)->nr_frags;
1174                 if (len > size)
1175                         len = size;
1176                 if (skb_can_coalesce(skb, i, page, offset)) {
1177                         skb_shinfo(skb)->frags[i-1].size += len;
1178                 } else if (i < MAX_SKB_FRAGS) {
1179                         get_page(page);
1180                         skb_fill_page_desc(skb, i, page, offset, len);
1181                 } else {
1182                         err = -EMSGSIZE;
1183                         goto error;
1184                 }
1185
1186                 if (skb->ip_summed == CHECKSUM_NONE) {
1187                         __wsum csum;
1188                         csum = csum_page(page, offset, len);
1189                         skb->csum = csum_block_add(skb->csum, csum, skb->len);
1190                 }
1191
1192                 skb->len += len;
1193                 skb->data_len += len;
1194                 skb->truesize += len;
1195                 atomic_add(len, &sk->sk_wmem_alloc);
1196                 offset += len;
1197                 size -= len;
1198         }
1199         return 0;
1200
1201 error:
1202         inet->cork.length -= size;
1203         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1204         return err;
1205 }
1206
1207 static void ip_cork_release(struct inet_sock *inet)
1208 {
1209         inet->cork.flags &= ~IPCORK_OPT;
1210         kfree(inet->cork.opt);
1211         inet->cork.opt = NULL;
1212         dst_release(inet->cork.dst);
1213         inet->cork.dst = NULL;
1214 }
1215
1216 /*
1217  *      Combined all pending IP fragments on the socket as one IP datagram
1218  *      and push them out.
1219  */
1220 int ip_push_pending_frames(struct sock *sk)
1221 {
1222         struct sk_buff *skb, *tmp_skb;
1223         struct sk_buff **tail_skb;
1224         struct inet_sock *inet = inet_sk(sk);
1225         struct net *net = sock_net(sk);
1226         struct ip_options *opt = NULL;
1227         struct rtable *rt = (struct rtable *)inet->cork.dst;
1228         struct iphdr *iph;
1229         __be16 df = 0;
1230         __u8 ttl;
1231         int err = 0;
1232
1233         if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1234                 goto out;
1235         tail_skb = &(skb_shinfo(skb)->frag_list);
1236
1237         /* move skb->data to ip header from ext header */
1238         if (skb->data < skb_network_header(skb))
1239                 __skb_pull(skb, skb_network_offset(skb));
1240         while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1241                 __skb_pull(tmp_skb, skb_network_header_len(skb));
1242                 *tail_skb = tmp_skb;
1243                 tail_skb = &(tmp_skb->next);
1244                 skb->len += tmp_skb->len;
1245                 skb->data_len += tmp_skb->len;
1246                 skb->truesize += tmp_skb->truesize;
1247                 __sock_put(tmp_skb->sk);
1248                 tmp_skb->destructor = NULL;
1249                 tmp_skb->sk = NULL;
1250         }
1251
1252         /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1253          * to fragment the frame generated here. No matter, what transforms
1254          * how transforms change size of the packet, it will come out.
1255          */
1256         if (inet->pmtudisc < IP_PMTUDISC_DO)
1257                 skb->local_df = 1;
1258
1259         /* DF bit is set when we want to see DF on outgoing frames.
1260          * If local_df is set too, we still allow to fragment this frame
1261          * locally. */
1262         if (inet->pmtudisc >= IP_PMTUDISC_DO ||
1263             (skb->len <= dst_mtu(&rt->u.dst) &&
1264              ip_dont_fragment(sk, &rt->u.dst)))
1265                 df = htons(IP_DF);
1266
1267         if (inet->cork.flags & IPCORK_OPT)
1268                 opt = inet->cork.opt;
1269
1270         if (rt->rt_type == RTN_MULTICAST)
1271                 ttl = inet->mc_ttl;
1272         else
1273                 ttl = ip_select_ttl(inet, &rt->u.dst);
1274
1275         iph = (struct iphdr *)skb->data;
1276         iph->version = 4;
1277         iph->ihl = 5;
1278         if (opt) {
1279                 iph->ihl += opt->optlen>>2;
1280                 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1281         }
1282         iph->tos = inet->tos;
1283         iph->frag_off = df;
1284         ip_select_ident(iph, &rt->u.dst, sk);
1285         iph->ttl = ttl;
1286         iph->protocol = sk->sk_protocol;
1287         iph->saddr = rt->rt_src;
1288         iph->daddr = rt->rt_dst;
1289
1290         skb->priority = sk->sk_priority;
1291         skb->mark = sk->sk_mark;
1292         /*
1293          * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1294          * on dst refcount
1295          */
1296         inet->cork.dst = NULL;
1297         skb->dst = &rt->u.dst;
1298
1299         if (iph->protocol == IPPROTO_ICMP)
1300                 icmp_out_count(net, ((struct icmphdr *)
1301                         skb_transport_header(skb))->type);
1302
1303         /* Netfilter gets whole the not fragmented skb. */
1304         err = ip_local_out(skb);
1305         if (err) {
1306                 if (err > 0)
1307                         err = inet->recverr ? net_xmit_errno(err) : 0;
1308                 if (err)
1309                         goto error;
1310         }
1311
1312 out:
1313         ip_cork_release(inet);
1314         return err;
1315
1316 error:
1317         IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1318         goto out;
1319 }
1320
1321 /*
1322  *      Throw away all pending data on the socket.
1323  */
1324 void ip_flush_pending_frames(struct sock *sk)
1325 {
1326         struct sk_buff *skb;
1327
1328         while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1329                 kfree_skb(skb);
1330
1331         ip_cork_release(inet_sk(sk));
1332 }
1333
1334
1335 /*
1336  *      Fetch data from kernel space and fill in checksum if needed.
1337  */
1338 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1339                               int len, int odd, struct sk_buff *skb)
1340 {
1341         __wsum csum;
1342
1343         csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1344         skb->csum = csum_block_add(skb->csum, csum, odd);
1345         return 0;
1346 }
1347
1348 /*
1349  *      Generic function to send a packet as reply to another packet.
1350  *      Used to send TCP resets so far. ICMP should use this function too.
1351  *
1352  *      Should run single threaded per socket because it uses the sock
1353  *      structure to pass arguments.
1354  */
1355 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1356                    unsigned int len)
1357 {
1358         struct inet_sock *inet = inet_sk(sk);
1359         struct {
1360                 struct ip_options       opt;
1361                 char                    data[40];
1362         } replyopts;
1363         struct ipcm_cookie ipc;
1364         __be32 daddr;
1365         struct rtable *rt = skb->rtable;
1366
1367         if (ip_options_echo(&replyopts.opt, skb))
1368                 return;
1369
1370         daddr = ipc.addr = rt->rt_src;
1371         ipc.opt = NULL;
1372         ipc.shtx.flags = 0;
1373
1374         if (replyopts.opt.optlen) {
1375                 ipc.opt = &replyopts.opt;
1376
1377                 if (ipc.opt->srr)
1378                         daddr = replyopts.opt.faddr;
1379         }
1380
1381         {
1382                 struct flowi fl = { .oif = arg->bound_dev_if,
1383                                     .nl_u = { .ip4_u =
1384                                               { .daddr = daddr,
1385                                                 .saddr = rt->rt_spec_dst,
1386                                                 .tos = RT_TOS(ip_hdr(skb)->tos) } },
1387                                     /* Not quite clean, but right. */
1388                                     .uli_u = { .ports =
1389                                                { .sport = tcp_hdr(skb)->dest,
1390                                                  .dport = tcp_hdr(skb)->source } },
1391                                     .proto = sk->sk_protocol,
1392                                     .flags = ip_reply_arg_flowi_flags(arg) };
1393                 security_skb_classify_flow(skb, &fl);
1394                 if (ip_route_output_key(sock_net(sk), &rt, &fl))
1395                         return;
1396         }
1397
1398         /* And let IP do all the hard work.
1399
1400            This chunk is not reenterable, hence spinlock.
1401            Note that it uses the fact, that this function is called
1402            with locally disabled BH and that sk cannot be already spinlocked.
1403          */
1404         bh_lock_sock(sk);
1405         inet->tos = ip_hdr(skb)->tos;
1406         sk->sk_priority = skb->priority;
1407         sk->sk_protocol = ip_hdr(skb)->protocol;
1408         sk->sk_bound_dev_if = arg->bound_dev_if;
1409         ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1410                        &ipc, &rt, MSG_DONTWAIT);
1411         if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1412                 if (arg->csumoffset >= 0)
1413                         *((__sum16 *)skb_transport_header(skb) +
1414                           arg->csumoffset) = csum_fold(csum_add(skb->csum,
1415                                                                 arg->csum));
1416                 skb->ip_summed = CHECKSUM_NONE;
1417                 ip_push_pending_frames(sk);
1418         }
1419
1420         bh_unlock_sock(sk);
1421
1422         ip_rt_put(rt);
1423 }
1424
1425 void __init ip_init(void)
1426 {
1427         ip_rt_init();
1428         inet_initpeers();
1429
1430 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1431         igmp_mc_proc_init();
1432 #endif
1433 }
1434
1435 EXPORT_SYMBOL(ip_generic_getfrag);
1436 EXPORT_SYMBOL(ip_queue_xmit);
1437 EXPORT_SYMBOL(ip_send_check);