Pull bugfix into test branch
[linux-2.6] / net / ipv6 / reassembly.c
1 /*
2  *      IPv6 fragment reassembly
3  *      Linux INET6 implementation 
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>     
7  *
8  *      $Id: reassembly.c,v 1.26 2001/03/07 22:00:57 davem Exp $
9  *
10  *      Based on: net/ipv4/ip_fragment.c
11  *
12  *      This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  */
17
18 /* 
19  *      Fixes:  
20  *      Andi Kleen      Make it work with multiple hosts.
21  *                      More RFC compliance.
22  *
23  *      Horst von Brand Add missing #include <linux/string.h>
24  *      Alexey Kuznetsov        SMP races, threading, cleanup.
25  *      Patrick McHardy         LRU queue of frag heads for evictor.
26  *      Mitsuru KANDA @USAGI    Register inet6_protocol{}.
27  *      David Stevens and
28  *      YOSHIFUJI,H. @USAGI     Always remove fragment header to
29  *                              calculate ICV correctly.
30  */
31 #include <linux/errno.h>
32 #include <linux/types.h>
33 #include <linux/string.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/jiffies.h>
37 #include <linux/net.h>
38 #include <linux/list.h>
39 #include <linux/netdevice.h>
40 #include <linux/in6.h>
41 #include <linux/ipv6.h>
42 #include <linux/icmpv6.h>
43 #include <linux/random.h>
44 #include <linux/jhash.h>
45
46 #include <net/sock.h>
47 #include <net/snmp.h>
48
49 #include <net/ipv6.h>
50 #include <net/ip6_route.h>
51 #include <net/protocol.h>
52 #include <net/transp_v6.h>
53 #include <net/rawv6.h>
54 #include <net/ndisc.h>
55 #include <net/addrconf.h>
56
57 int sysctl_ip6frag_high_thresh __read_mostly = 256*1024;
58 int sysctl_ip6frag_low_thresh __read_mostly = 192*1024;
59
60 int sysctl_ip6frag_time __read_mostly = IPV6_FRAG_TIMEOUT;
61
62 struct ip6frag_skb_cb
63 {
64         struct inet6_skb_parm   h;
65         int                     offset;
66 };
67
68 #define FRAG6_CB(skb)   ((struct ip6frag_skb_cb*)((skb)->cb))
69
70
71 /*
72  *      Equivalent of ipv4 struct ipq
73  */
74
75 struct frag_queue
76 {
77         struct hlist_node       list;
78         struct list_head lru_list;              /* lru list member      */
79
80         __be32                  id;             /* fragment id          */
81         struct in6_addr         saddr;
82         struct in6_addr         daddr;
83
84         spinlock_t              lock;
85         atomic_t                refcnt;
86         struct timer_list       timer;          /* expire timer         */
87         struct sk_buff          *fragments;
88         int                     len;
89         int                     meat;
90         int                     iif;
91         struct timeval          stamp;
92         unsigned int            csum;
93         __u8                    last_in;        /* has first/last segment arrived? */
94 #define COMPLETE                4
95 #define FIRST_IN                2
96 #define LAST_IN                 1
97         __u16                   nhoffset;
98 };
99
100 /* Hash table. */
101
102 #define IP6Q_HASHSZ     64
103
104 static struct hlist_head ip6_frag_hash[IP6Q_HASHSZ];
105 static DEFINE_RWLOCK(ip6_frag_lock);
106 static u32 ip6_frag_hash_rnd;
107 static LIST_HEAD(ip6_frag_lru_list);
108 int ip6_frag_nqueues = 0;
109
110 static __inline__ void __fq_unlink(struct frag_queue *fq)
111 {
112         hlist_del(&fq->list);
113         list_del(&fq->lru_list);
114         ip6_frag_nqueues--;
115 }
116
117 static __inline__ void fq_unlink(struct frag_queue *fq)
118 {
119         write_lock(&ip6_frag_lock);
120         __fq_unlink(fq);
121         write_unlock(&ip6_frag_lock);
122 }
123
124 /*
125  * callers should be careful not to use the hash value outside the ipfrag_lock
126  * as doing so could race with ipfrag_hash_rnd being recalculated.
127  */
128 static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
129                                struct in6_addr *daddr)
130 {
131         u32 a, b, c;
132
133         a = (__force u32)saddr->s6_addr32[0];
134         b = (__force u32)saddr->s6_addr32[1];
135         c = (__force u32)saddr->s6_addr32[2];
136
137         a += JHASH_GOLDEN_RATIO;
138         b += JHASH_GOLDEN_RATIO;
139         c += ip6_frag_hash_rnd;
140         __jhash_mix(a, b, c);
141
142         a += (__force u32)saddr->s6_addr32[3];
143         b += (__force u32)daddr->s6_addr32[0];
144         c += (__force u32)daddr->s6_addr32[1];
145         __jhash_mix(a, b, c);
146
147         a += (__force u32)daddr->s6_addr32[2];
148         b += (__force u32)daddr->s6_addr32[3];
149         c += (__force u32)id;
150         __jhash_mix(a, b, c);
151
152         return c & (IP6Q_HASHSZ - 1);
153 }
154
155 static struct timer_list ip6_frag_secret_timer;
156 int sysctl_ip6frag_secret_interval __read_mostly = 10 * 60 * HZ;
157
158 static void ip6_frag_secret_rebuild(unsigned long dummy)
159 {
160         unsigned long now = jiffies;
161         int i;
162
163         write_lock(&ip6_frag_lock);
164         get_random_bytes(&ip6_frag_hash_rnd, sizeof(u32));
165         for (i = 0; i < IP6Q_HASHSZ; i++) {
166                 struct frag_queue *q;
167                 struct hlist_node *p, *n;
168
169                 hlist_for_each_entry_safe(q, p, n, &ip6_frag_hash[i], list) {
170                         unsigned int hval = ip6qhashfn(q->id,
171                                                        &q->saddr,
172                                                        &q->daddr);
173
174                         if (hval != i) {
175                                 hlist_del(&q->list);
176
177                                 /* Relink to new hash chain. */
178                                 hlist_add_head(&q->list,
179                                                &ip6_frag_hash[hval]);
180
181                         }
182                 }
183         }
184         write_unlock(&ip6_frag_lock);
185
186         mod_timer(&ip6_frag_secret_timer, now + sysctl_ip6frag_secret_interval);
187 }
188
189 atomic_t ip6_frag_mem = ATOMIC_INIT(0);
190
191 /* Memory Tracking Functions. */
192 static inline void frag_kfree_skb(struct sk_buff *skb, int *work)
193 {
194         if (work)
195                 *work -= skb->truesize;
196         atomic_sub(skb->truesize, &ip6_frag_mem);
197         kfree_skb(skb);
198 }
199
200 static inline void frag_free_queue(struct frag_queue *fq, int *work)
201 {
202         if (work)
203                 *work -= sizeof(struct frag_queue);
204         atomic_sub(sizeof(struct frag_queue), &ip6_frag_mem);
205         kfree(fq);
206 }
207
208 static inline struct frag_queue *frag_alloc_queue(void)
209 {
210         struct frag_queue *fq = kzalloc(sizeof(struct frag_queue), GFP_ATOMIC);
211
212         if(!fq)
213                 return NULL;
214         atomic_add(sizeof(struct frag_queue), &ip6_frag_mem);
215         return fq;
216 }
217
218 /* Destruction primitives. */
219
220 /* Complete destruction of fq. */
221 static void ip6_frag_destroy(struct frag_queue *fq, int *work)
222 {
223         struct sk_buff *fp;
224
225         BUG_TRAP(fq->last_in&COMPLETE);
226         BUG_TRAP(del_timer(&fq->timer) == 0);
227
228         /* Release all fragment data. */
229         fp = fq->fragments;
230         while (fp) {
231                 struct sk_buff *xp = fp->next;
232
233                 frag_kfree_skb(fp, work);
234                 fp = xp;
235         }
236
237         frag_free_queue(fq, work);
238 }
239
240 static __inline__ void fq_put(struct frag_queue *fq, int *work)
241 {
242         if (atomic_dec_and_test(&fq->refcnt))
243                 ip6_frag_destroy(fq, work);
244 }
245
246 /* Kill fq entry. It is not destroyed immediately,
247  * because caller (and someone more) holds reference count.
248  */
249 static __inline__ void fq_kill(struct frag_queue *fq)
250 {
251         if (del_timer(&fq->timer))
252                 atomic_dec(&fq->refcnt);
253
254         if (!(fq->last_in & COMPLETE)) {
255                 fq_unlink(fq);
256                 atomic_dec(&fq->refcnt);
257                 fq->last_in |= COMPLETE;
258         }
259 }
260
261 static void ip6_evictor(struct inet6_dev *idev)
262 {
263         struct frag_queue *fq;
264         struct list_head *tmp;
265         int work;
266
267         work = atomic_read(&ip6_frag_mem) - sysctl_ip6frag_low_thresh;
268         if (work <= 0)
269                 return;
270
271         while(work > 0) {
272                 read_lock(&ip6_frag_lock);
273                 if (list_empty(&ip6_frag_lru_list)) {
274                         read_unlock(&ip6_frag_lock);
275                         return;
276                 }
277                 tmp = ip6_frag_lru_list.next;
278                 fq = list_entry(tmp, struct frag_queue, lru_list);
279                 atomic_inc(&fq->refcnt);
280                 read_unlock(&ip6_frag_lock);
281
282                 spin_lock(&fq->lock);
283                 if (!(fq->last_in&COMPLETE))
284                         fq_kill(fq);
285                 spin_unlock(&fq->lock);
286
287                 fq_put(fq, &work);
288                 IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS);
289         }
290 }
291
292 static void ip6_frag_expire(unsigned long data)
293 {
294         struct frag_queue *fq = (struct frag_queue *) data;
295         struct net_device *dev = NULL;
296
297         spin_lock(&fq->lock);
298
299         if (fq->last_in & COMPLETE)
300                 goto out;
301
302         fq_kill(fq);
303
304         dev = dev_get_by_index(fq->iif);
305         if (!dev)
306                 goto out;
307
308         rcu_read_lock();
309         IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
310         IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
311         rcu_read_unlock();
312
313         /* Don't send error if the first segment did not arrive. */
314         if (!(fq->last_in&FIRST_IN) || !fq->fragments)
315                 goto out;
316
317         /*
318            But use as source device on which LAST ARRIVED
319            segment was received. And do not use fq->dev
320            pointer directly, device might already disappeared.
321          */
322         fq->fragments->dev = dev;
323         icmpv6_send(fq->fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev);
324 out:
325         if (dev)
326                 dev_put(dev);
327         spin_unlock(&fq->lock);
328         fq_put(fq, NULL);
329 }
330
331 /* Creation primitives. */
332
333
334 static struct frag_queue *ip6_frag_intern(struct frag_queue *fq_in)
335 {
336         struct frag_queue *fq;
337         unsigned int hash;
338 #ifdef CONFIG_SMP
339         struct hlist_node *n;
340 #endif
341
342         write_lock(&ip6_frag_lock);
343         hash = ip6qhashfn(fq_in->id, &fq_in->saddr, &fq_in->daddr);
344 #ifdef CONFIG_SMP
345         hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) {
346                 if (fq->id == fq_in->id && 
347                     ipv6_addr_equal(&fq_in->saddr, &fq->saddr) &&
348                     ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) {
349                         atomic_inc(&fq->refcnt);
350                         write_unlock(&ip6_frag_lock);
351                         fq_in->last_in |= COMPLETE;
352                         fq_put(fq_in, NULL);
353                         return fq;
354                 }
355         }
356 #endif
357         fq = fq_in;
358
359         if (!mod_timer(&fq->timer, jiffies + sysctl_ip6frag_time))
360                 atomic_inc(&fq->refcnt);
361
362         atomic_inc(&fq->refcnt);
363         hlist_add_head(&fq->list, &ip6_frag_hash[hash]);
364         INIT_LIST_HEAD(&fq->lru_list);
365         list_add_tail(&fq->lru_list, &ip6_frag_lru_list);
366         ip6_frag_nqueues++;
367         write_unlock(&ip6_frag_lock);
368         return fq;
369 }
370
371
372 static struct frag_queue *
373 ip6_frag_create(__be32 id, struct in6_addr *src, struct in6_addr *dst,
374                 struct inet6_dev *idev)
375 {
376         struct frag_queue *fq;
377
378         if ((fq = frag_alloc_queue()) == NULL)
379                 goto oom;
380
381         fq->id = id;
382         ipv6_addr_copy(&fq->saddr, src);
383         ipv6_addr_copy(&fq->daddr, dst);
384
385         init_timer(&fq->timer);
386         fq->timer.function = ip6_frag_expire;
387         fq->timer.data = (long) fq;
388         spin_lock_init(&fq->lock);
389         atomic_set(&fq->refcnt, 1);
390
391         return ip6_frag_intern(fq);
392
393 oom:
394         IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS);
395         return NULL;
396 }
397
398 static __inline__ struct frag_queue *
399 fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst,
400         struct inet6_dev *idev)
401 {
402         struct frag_queue *fq;
403         struct hlist_node *n;
404         unsigned int hash;
405
406         read_lock(&ip6_frag_lock);
407         hash = ip6qhashfn(id, src, dst);
408         hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) {
409                 if (fq->id == id && 
410                     ipv6_addr_equal(src, &fq->saddr) &&
411                     ipv6_addr_equal(dst, &fq->daddr)) {
412                         atomic_inc(&fq->refcnt);
413                         read_unlock(&ip6_frag_lock);
414                         return fq;
415                 }
416         }
417         read_unlock(&ip6_frag_lock);
418
419         return ip6_frag_create(id, src, dst, idev);
420 }
421
422
423 static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, 
424                            struct frag_hdr *fhdr, int nhoff)
425 {
426         struct sk_buff *prev, *next;
427         int offset, end;
428
429         if (fq->last_in & COMPLETE)
430                 goto err;
431
432         offset = ntohs(fhdr->frag_off) & ~0x7;
433         end = offset + (ntohs(skb->nh.ipv6h->payload_len) -
434                         ((u8 *) (fhdr + 1) - (u8 *) (skb->nh.ipv6h + 1)));
435
436         if ((unsigned int)end > IPV6_MAXPLEN) {
437                 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
438                                  IPSTATS_MIB_INHDRERRORS);
439                 icmpv6_param_prob(skb,ICMPV6_HDR_FIELD, (u8*)&fhdr->frag_off - skb->nh.raw);
440                 return;
441         }
442
443         if (skb->ip_summed == CHECKSUM_COMPLETE)
444                 skb->csum = csum_sub(skb->csum,
445                                      csum_partial(skb->nh.raw, (u8*)(fhdr+1)-skb->nh.raw, 0));
446
447         /* Is this the final fragment? */
448         if (!(fhdr->frag_off & htons(IP6_MF))) {
449                 /* If we already have some bits beyond end
450                  * or have different end, the segment is corrupted.
451                  */
452                 if (end < fq->len ||
453                     ((fq->last_in & LAST_IN) && end != fq->len))
454                         goto err;
455                 fq->last_in |= LAST_IN;
456                 fq->len = end;
457         } else {
458                 /* Check if the fragment is rounded to 8 bytes.
459                  * Required by the RFC.
460                  */
461                 if (end & 0x7) {
462                         /* RFC2460 says always send parameter problem in
463                          * this case. -DaveM
464                          */
465                         IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
466                                          IPSTATS_MIB_INHDRERRORS);
467                         icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 
468                                           offsetof(struct ipv6hdr, payload_len));
469                         return;
470                 }
471                 if (end > fq->len) {
472                         /* Some bits beyond end -> corruption. */
473                         if (fq->last_in & LAST_IN)
474                                 goto err;
475                         fq->len = end;
476                 }
477         }
478
479         if (end == offset)
480                 goto err;
481
482         /* Point into the IP datagram 'data' part. */
483         if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
484                 goto err;
485         
486         if (pskb_trim_rcsum(skb, end - offset))
487                 goto err;
488
489         /* Find out which fragments are in front and at the back of us
490          * in the chain of fragments so far.  We must know where to put
491          * this fragment, right?
492          */
493         prev = NULL;
494         for(next = fq->fragments; next != NULL; next = next->next) {
495                 if (FRAG6_CB(next)->offset >= offset)
496                         break;  /* bingo! */
497                 prev = next;
498         }
499
500         /* We found where to put this one.  Check for overlap with
501          * preceding fragment, and, if needed, align things so that
502          * any overlaps are eliminated.
503          */
504         if (prev) {
505                 int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
506
507                 if (i > 0) {
508                         offset += i;
509                         if (end <= offset)
510                                 goto err;
511                         if (!pskb_pull(skb, i))
512                                 goto err;
513                         if (skb->ip_summed != CHECKSUM_UNNECESSARY)
514                                 skb->ip_summed = CHECKSUM_NONE;
515                 }
516         }
517
518         /* Look for overlap with succeeding segments.
519          * If we can merge fragments, do it.
520          */
521         while (next && FRAG6_CB(next)->offset < end) {
522                 int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
523
524                 if (i < next->len) {
525                         /* Eat head of the next overlapped fragment
526                          * and leave the loop. The next ones cannot overlap.
527                          */
528                         if (!pskb_pull(next, i))
529                                 goto err;
530                         FRAG6_CB(next)->offset += i;    /* next fragment */
531                         fq->meat -= i;
532                         if (next->ip_summed != CHECKSUM_UNNECESSARY)
533                                 next->ip_summed = CHECKSUM_NONE;
534                         break;
535                 } else {
536                         struct sk_buff *free_it = next;
537
538                         /* Old fragment is completely overridden with
539                          * new one drop it.
540                          */
541                         next = next->next;
542
543                         if (prev)
544                                 prev->next = next;
545                         else
546                                 fq->fragments = next;
547
548                         fq->meat -= free_it->len;
549                         frag_kfree_skb(free_it, NULL);
550                 }
551         }
552
553         FRAG6_CB(skb)->offset = offset;
554
555         /* Insert this fragment in the chain of fragments. */
556         skb->next = next;
557         if (prev)
558                 prev->next = skb;
559         else
560                 fq->fragments = skb;
561
562         if (skb->dev)
563                 fq->iif = skb->dev->ifindex;
564         skb->dev = NULL;
565         skb_get_timestamp(skb, &fq->stamp);
566         fq->meat += skb->len;
567         atomic_add(skb->truesize, &ip6_frag_mem);
568
569         /* The first fragment.
570          * nhoffset is obtained from the first fragment, of course.
571          */
572         if (offset == 0) {
573                 fq->nhoffset = nhoff;
574                 fq->last_in |= FIRST_IN;
575         }
576         write_lock(&ip6_frag_lock);
577         list_move_tail(&fq->lru_list, &ip6_frag_lru_list);
578         write_unlock(&ip6_frag_lock);
579         return;
580
581 err:
582         IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
583         kfree_skb(skb);
584 }
585
586 /*
587  *      Check if this packet is complete.
588  *      Returns NULL on failure by any reason, and pointer
589  *      to current nexthdr field in reassembled frame.
590  *
591  *      It is called with locked fq, and caller must check that
592  *      queue is eligible for reassembly i.e. it is not COMPLETE,
593  *      the last and the first frames arrived and all the bits are here.
594  */
595 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in,
596                           struct net_device *dev)
597 {
598         struct sk_buff *fp, *head = fq->fragments;
599         int    payload_len;
600         unsigned int nhoff;
601
602         fq_kill(fq);
603
604         BUG_TRAP(head != NULL);
605         BUG_TRAP(FRAG6_CB(head)->offset == 0);
606
607         /* Unfragmented part is taken from the first segment. */
608         payload_len = (head->data - head->nh.raw) - sizeof(struct ipv6hdr) + fq->len - sizeof(struct frag_hdr);
609         if (payload_len > IPV6_MAXPLEN)
610                 goto out_oversize;
611
612         /* Head of list must not be cloned. */
613         if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
614                 goto out_oom;
615
616         /* If the first fragment is fragmented itself, we split
617          * it to two chunks: the first with data and paged part
618          * and the second, holding only fragments. */
619         if (skb_shinfo(head)->frag_list) {
620                 struct sk_buff *clone;
621                 int i, plen = 0;
622
623                 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
624                         goto out_oom;
625                 clone->next = head->next;
626                 head->next = clone;
627                 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
628                 skb_shinfo(head)->frag_list = NULL;
629                 for (i=0; i<skb_shinfo(head)->nr_frags; i++)
630                         plen += skb_shinfo(head)->frags[i].size;
631                 clone->len = clone->data_len = head->data_len - plen;
632                 head->data_len -= clone->len;
633                 head->len -= clone->len;
634                 clone->csum = 0;
635                 clone->ip_summed = head->ip_summed;
636                 atomic_add(clone->truesize, &ip6_frag_mem);
637         }
638
639         /* We have to remove fragment header from datagram and to relocate
640          * header in order to calculate ICV correctly. */
641         nhoff = fq->nhoffset;
642         head->nh.raw[nhoff] = head->h.raw[0];
643         memmove(head->head + sizeof(struct frag_hdr), head->head, 
644                 (head->data - head->head) - sizeof(struct frag_hdr));
645         head->mac.raw += sizeof(struct frag_hdr);
646         head->nh.raw += sizeof(struct frag_hdr);
647
648         skb_shinfo(head)->frag_list = head->next;
649         head->h.raw = head->data;
650         skb_push(head, head->data - head->nh.raw);
651         atomic_sub(head->truesize, &ip6_frag_mem);
652
653         for (fp=head->next; fp; fp = fp->next) {
654                 head->data_len += fp->len;
655                 head->len += fp->len;
656                 if (head->ip_summed != fp->ip_summed)
657                         head->ip_summed = CHECKSUM_NONE;
658                 else if (head->ip_summed == CHECKSUM_COMPLETE)
659                         head->csum = csum_add(head->csum, fp->csum);
660                 head->truesize += fp->truesize;
661                 atomic_sub(fp->truesize, &ip6_frag_mem);
662         }
663
664         head->next = NULL;
665         head->dev = dev;
666         skb_set_timestamp(head, &fq->stamp);
667         head->nh.ipv6h->payload_len = htons(payload_len);
668         IP6CB(head)->nhoff = nhoff;
669
670         *skb_in = head;
671
672         /* Yes, and fold redundant checksum back. 8) */
673         if (head->ip_summed == CHECKSUM_COMPLETE)
674                 head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum);
675
676         rcu_read_lock();
677         IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
678         rcu_read_unlock();
679         fq->fragments = NULL;
680         return 1;
681
682 out_oversize:
683         if (net_ratelimit())
684                 printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
685         goto out_fail;
686 out_oom:
687         if (net_ratelimit())
688                 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
689 out_fail:
690         rcu_read_lock();
691         IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
692         rcu_read_unlock();
693         return -1;
694 }
695
696 static int ipv6_frag_rcv(struct sk_buff **skbp)
697 {
698         struct sk_buff *skb = *skbp; 
699         struct net_device *dev = skb->dev;
700         struct frag_hdr *fhdr;
701         struct frag_queue *fq;
702         struct ipv6hdr *hdr;
703
704         hdr = skb->nh.ipv6h;
705
706         IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMREQDS);
707
708         /* Jumbo payload inhibits frag. header */
709         if (hdr->payload_len==0) {
710                 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
711                 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
712                 return -1;
713         }
714         if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+sizeof(struct frag_hdr))) {
715                 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
716                 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
717                 return -1;
718         }
719
720         hdr = skb->nh.ipv6h;
721         fhdr = (struct frag_hdr *)skb->h.raw;
722
723         if (!(fhdr->frag_off & htons(0xFFF9))) {
724                 /* It is not a fragmented frame */
725                 skb->h.raw += sizeof(struct frag_hdr);
726                 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMOKS);
727
728                 IP6CB(skb)->nhoff = (u8*)fhdr - skb->nh.raw;
729                 return 1;
730         }
731
732         if (atomic_read(&ip6_frag_mem) > sysctl_ip6frag_high_thresh)
733                 ip6_evictor(ip6_dst_idev(skb->dst));
734
735         if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr,
736                           ip6_dst_idev(skb->dst))) != NULL) {
737                 int ret = -1;
738
739                 spin_lock(&fq->lock);
740
741                 ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
742
743                 if (fq->last_in == (FIRST_IN|LAST_IN) &&
744                     fq->meat == fq->len)
745                         ret = ip6_frag_reasm(fq, skbp, dev);
746
747                 spin_unlock(&fq->lock);
748                 fq_put(fq, NULL);
749                 return ret;
750         }
751
752         IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
753         kfree_skb(skb);
754         return -1;
755 }
756
757 static struct inet6_protocol frag_protocol =
758 {
759         .handler        =       ipv6_frag_rcv,
760         .flags          =       INET6_PROTO_NOPOLICY,
761 };
762
763 void __init ipv6_frag_init(void)
764 {
765         if (inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT) < 0)
766                 printk(KERN_ERR "ipv6_frag_init: Could not register protocol\n");
767
768         ip6_frag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
769                                    (jiffies ^ (jiffies >> 6)));
770
771         init_timer(&ip6_frag_secret_timer);
772         ip6_frag_secret_timer.function = ip6_frag_secret_rebuild;
773         ip6_frag_secret_timer.expires = jiffies + sysctl_ip6frag_secret_interval;
774         add_timer(&ip6_frag_secret_timer);
775 }