Merge branch 'master' into upstream
[linux-2.6] / net / core / neighbour.c
1 /*
2  *      Generic address resolution entity
3  *
4  *      Authors:
5  *      Pedro Roque             <roque@di.fc.ul.pt>
6  *      Alexey Kuznetsov        <kuznet@ms2.inr.ac.ru>
7  *
8  *      This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *      Fixes:
14  *      Vitaly E. Lavrov        releasing NULL neighbor in neigh_add.
15  *      Harald Welte            Add neighbour cache statistics like rtstat
16  */
17
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/sched.h>
23 #include <linux/netdevice.h>
24 #include <linux/proc_fs.h>
25 #ifdef CONFIG_SYSCTL
26 #include <linux/sysctl.h>
27 #endif
28 #include <linux/times.h>
29 #include <net/neighbour.h>
30 #include <net/dst.h>
31 #include <net/sock.h>
32 #include <net/netevent.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/random.h>
35 #include <linux/string.h>
36
37 #define NEIGH_DEBUG 1
38
39 #define NEIGH_PRINTK(x...) printk(x)
40 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
41 #define NEIGH_PRINTK0 NEIGH_PRINTK
42 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
43 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
44
45 #if NEIGH_DEBUG >= 1
46 #undef NEIGH_PRINTK1
47 #define NEIGH_PRINTK1 NEIGH_PRINTK
48 #endif
49 #if NEIGH_DEBUG >= 2
50 #undef NEIGH_PRINTK2
51 #define NEIGH_PRINTK2 NEIGH_PRINTK
52 #endif
53
54 #define PNEIGH_HASHMASK         0xF
55
56 static void neigh_timer_handler(unsigned long arg);
57 #ifdef CONFIG_ARPD
58 static void neigh_app_notify(struct neighbour *n);
59 #endif
60 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
61 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
62
63 static struct neigh_table *neigh_tables;
64 #ifdef CONFIG_PROC_FS
65 static struct file_operations neigh_stat_seq_fops;
66 #endif
67
68 /*
69    Neighbour hash table buckets are protected with rwlock tbl->lock.
70
71    - All the scans/updates to hash buckets MUST be made under this lock.
72    - NOTHING clever should be made under this lock: no callbacks
73      to protocol backends, no attempts to send something to network.
74      It will result in deadlocks, if backend/driver wants to use neighbour
75      cache.
76    - If the entry requires some non-trivial actions, increase
77      its reference count and release table lock.
78
79    Neighbour entries are protected:
80    - with reference count.
81    - with rwlock neigh->lock
82
83    Reference count prevents destruction.
84
85    neigh->lock mainly serializes ll address data and its validity state.
86    However, the same lock is used to protect another entry fields:
87     - timer
88     - resolution queue
89
90    Again, nothing clever shall be made under neigh->lock,
91    the most complicated procedure, which we allow is dev->hard_header.
92    It is supposed, that dev->hard_header is simplistic and does
93    not make callbacks to neighbour tables.
94
95    The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96    list of neighbour tables. This list is used only in process context,
97  */
98
99 static DEFINE_RWLOCK(neigh_tbl_lock);
100
101 static int neigh_blackhole(struct sk_buff *skb)
102 {
103         kfree_skb(skb);
104         return -ENETDOWN;
105 }
106
107 /*
108  * It is random distribution in the interval (1/2)*base...(3/2)*base.
109  * It corresponds to default IPv6 settings and is not overridable,
110  * because it is really reasonable choice.
111  */
112
113 unsigned long neigh_rand_reach_time(unsigned long base)
114 {
115         return (base ? (net_random() % base) + (base >> 1) : 0);
116 }
117
118
119 static int neigh_forced_gc(struct neigh_table *tbl)
120 {
121         int shrunk = 0;
122         int i;
123
124         NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
125
126         write_lock_bh(&tbl->lock);
127         for (i = 0; i <= tbl->hash_mask; i++) {
128                 struct neighbour *n, **np;
129
130                 np = &tbl->hash_buckets[i];
131                 while ((n = *np) != NULL) {
132                         /* Neighbour record may be discarded if:
133                          * - nobody refers to it.
134                          * - it is not permanent
135                          */
136                         write_lock(&n->lock);
137                         if (atomic_read(&n->refcnt) == 1 &&
138                             !(n->nud_state & NUD_PERMANENT)) {
139                                 *np     = n->next;
140                                 n->dead = 1;
141                                 shrunk  = 1;
142                                 write_unlock(&n->lock);
143                                 neigh_release(n);
144                                 continue;
145                         }
146                         write_unlock(&n->lock);
147                         np = &n->next;
148                 }
149         }
150
151         tbl->last_flush = jiffies;
152
153         write_unlock_bh(&tbl->lock);
154
155         return shrunk;
156 }
157
158 static int neigh_del_timer(struct neighbour *n)
159 {
160         if ((n->nud_state & NUD_IN_TIMER) &&
161             del_timer(&n->timer)) {
162                 neigh_release(n);
163                 return 1;
164         }
165         return 0;
166 }
167
168 static void pneigh_queue_purge(struct sk_buff_head *list)
169 {
170         struct sk_buff *skb;
171
172         while ((skb = skb_dequeue(list)) != NULL) {
173                 dev_put(skb->dev);
174                 kfree_skb(skb);
175         }
176 }
177
178 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
179 {
180         int i;
181
182         for (i = 0; i <= tbl->hash_mask; i++) {
183                 struct neighbour *n, **np = &tbl->hash_buckets[i];
184
185                 while ((n = *np) != NULL) {
186                         if (dev && n->dev != dev) {
187                                 np = &n->next;
188                                 continue;
189                         }
190                         *np = n->next;
191                         write_lock(&n->lock);
192                         neigh_del_timer(n);
193                         n->dead = 1;
194
195                         if (atomic_read(&n->refcnt) != 1) {
196                                 /* The most unpleasant situation.
197                                    We must destroy neighbour entry,
198                                    but someone still uses it.
199
200                                    The destroy will be delayed until
201                                    the last user releases us, but
202                                    we must kill timers etc. and move
203                                    it to safe state.
204                                  */
205                                 skb_queue_purge(&n->arp_queue);
206                                 n->output = neigh_blackhole;
207                                 if (n->nud_state & NUD_VALID)
208                                         n->nud_state = NUD_NOARP;
209                                 else
210                                         n->nud_state = NUD_NONE;
211                                 NEIGH_PRINTK2("neigh %p is stray.\n", n);
212                         }
213                         write_unlock(&n->lock);
214                         neigh_release(n);
215                 }
216         }
217 }
218
219 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
220 {
221         write_lock_bh(&tbl->lock);
222         neigh_flush_dev(tbl, dev);
223         write_unlock_bh(&tbl->lock);
224 }
225
226 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
227 {
228         write_lock_bh(&tbl->lock);
229         neigh_flush_dev(tbl, dev);
230         pneigh_ifdown(tbl, dev);
231         write_unlock_bh(&tbl->lock);
232
233         del_timer_sync(&tbl->proxy_timer);
234         pneigh_queue_purge(&tbl->proxy_queue);
235         return 0;
236 }
237
238 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
239 {
240         struct neighbour *n = NULL;
241         unsigned long now = jiffies;
242         int entries;
243
244         entries = atomic_inc_return(&tbl->entries) - 1;
245         if (entries >= tbl->gc_thresh3 ||
246             (entries >= tbl->gc_thresh2 &&
247              time_after(now, tbl->last_flush + 5 * HZ))) {
248                 if (!neigh_forced_gc(tbl) &&
249                     entries >= tbl->gc_thresh3)
250                         goto out_entries;
251         }
252
253         n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
254         if (!n)
255                 goto out_entries;
256
257         memset(n, 0, tbl->entry_size);
258
259         skb_queue_head_init(&n->arp_queue);
260         rwlock_init(&n->lock);
261         n->updated        = n->used = now;
262         n->nud_state      = NUD_NONE;
263         n->output         = neigh_blackhole;
264         n->parms          = neigh_parms_clone(&tbl->parms);
265         init_timer(&n->timer);
266         n->timer.function = neigh_timer_handler;
267         n->timer.data     = (unsigned long)n;
268
269         NEIGH_CACHE_STAT_INC(tbl, allocs);
270         n->tbl            = tbl;
271         atomic_set(&n->refcnt, 1);
272         n->dead           = 1;
273 out:
274         return n;
275
276 out_entries:
277         atomic_dec(&tbl->entries);
278         goto out;
279 }
280
281 static struct neighbour **neigh_hash_alloc(unsigned int entries)
282 {
283         unsigned long size = entries * sizeof(struct neighbour *);
284         struct neighbour **ret;
285
286         if (size <= PAGE_SIZE) {
287                 ret = kzalloc(size, GFP_ATOMIC);
288         } else {
289                 ret = (struct neighbour **)
290                       __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
291         }
292         return ret;
293 }
294
295 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
296 {
297         unsigned long size = entries * sizeof(struct neighbour *);
298
299         if (size <= PAGE_SIZE)
300                 kfree(hash);
301         else
302                 free_pages((unsigned long)hash, get_order(size));
303 }
304
305 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
306 {
307         struct neighbour **new_hash, **old_hash;
308         unsigned int i, new_hash_mask, old_entries;
309
310         NEIGH_CACHE_STAT_INC(tbl, hash_grows);
311
312         BUG_ON(new_entries & (new_entries - 1));
313         new_hash = neigh_hash_alloc(new_entries);
314         if (!new_hash)
315                 return;
316
317         old_entries = tbl->hash_mask + 1;
318         new_hash_mask = new_entries - 1;
319         old_hash = tbl->hash_buckets;
320
321         get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
322         for (i = 0; i < old_entries; i++) {
323                 struct neighbour *n, *next;
324
325                 for (n = old_hash[i]; n; n = next) {
326                         unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
327
328                         hash_val &= new_hash_mask;
329                         next = n->next;
330
331                         n->next = new_hash[hash_val];
332                         new_hash[hash_val] = n;
333                 }
334         }
335         tbl->hash_buckets = new_hash;
336         tbl->hash_mask = new_hash_mask;
337
338         neigh_hash_free(old_hash, old_entries);
339 }
340
341 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
342                                struct net_device *dev)
343 {
344         struct neighbour *n;
345         int key_len = tbl->key_len;
346         u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
347         
348         NEIGH_CACHE_STAT_INC(tbl, lookups);
349
350         read_lock_bh(&tbl->lock);
351         for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
352                 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
353                         neigh_hold(n);
354                         NEIGH_CACHE_STAT_INC(tbl, hits);
355                         break;
356                 }
357         }
358         read_unlock_bh(&tbl->lock);
359         return n;
360 }
361
362 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
363 {
364         struct neighbour *n;
365         int key_len = tbl->key_len;
366         u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
367
368         NEIGH_CACHE_STAT_INC(tbl, lookups);
369
370         read_lock_bh(&tbl->lock);
371         for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
372                 if (!memcmp(n->primary_key, pkey, key_len)) {
373                         neigh_hold(n);
374                         NEIGH_CACHE_STAT_INC(tbl, hits);
375                         break;
376                 }
377         }
378         read_unlock_bh(&tbl->lock);
379         return n;
380 }
381
382 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
383                                struct net_device *dev)
384 {
385         u32 hash_val;
386         int key_len = tbl->key_len;
387         int error;
388         struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
389
390         if (!n) {
391                 rc = ERR_PTR(-ENOBUFS);
392                 goto out;
393         }
394
395         memcpy(n->primary_key, pkey, key_len);
396         n->dev = dev;
397         dev_hold(dev);
398
399         /* Protocol specific setup. */
400         if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
401                 rc = ERR_PTR(error);
402                 goto out_neigh_release;
403         }
404
405         /* Device specific setup. */
406         if (n->parms->neigh_setup &&
407             (error = n->parms->neigh_setup(n)) < 0) {
408                 rc = ERR_PTR(error);
409                 goto out_neigh_release;
410         }
411
412         n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
413
414         write_lock_bh(&tbl->lock);
415
416         if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
417                 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
418
419         hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
420
421         if (n->parms->dead) {
422                 rc = ERR_PTR(-EINVAL);
423                 goto out_tbl_unlock;
424         }
425
426         for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
427                 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
428                         neigh_hold(n1);
429                         rc = n1;
430                         goto out_tbl_unlock;
431                 }
432         }
433
434         n->next = tbl->hash_buckets[hash_val];
435         tbl->hash_buckets[hash_val] = n;
436         n->dead = 0;
437         neigh_hold(n);
438         write_unlock_bh(&tbl->lock);
439         NEIGH_PRINTK2("neigh %p is created.\n", n);
440         rc = n;
441 out:
442         return rc;
443 out_tbl_unlock:
444         write_unlock_bh(&tbl->lock);
445 out_neigh_release:
446         neigh_release(n);
447         goto out;
448 }
449
450 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
451                                     struct net_device *dev, int creat)
452 {
453         struct pneigh_entry *n;
454         int key_len = tbl->key_len;
455         u32 hash_val = *(u32 *)(pkey + key_len - 4);
456
457         hash_val ^= (hash_val >> 16);
458         hash_val ^= hash_val >> 8;
459         hash_val ^= hash_val >> 4;
460         hash_val &= PNEIGH_HASHMASK;
461
462         read_lock_bh(&tbl->lock);
463
464         for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
465                 if (!memcmp(n->key, pkey, key_len) &&
466                     (n->dev == dev || !n->dev)) {
467                         read_unlock_bh(&tbl->lock);
468                         goto out;
469                 }
470         }
471         read_unlock_bh(&tbl->lock);
472         n = NULL;
473         if (!creat)
474                 goto out;
475
476         n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
477         if (!n)
478                 goto out;
479
480         memcpy(n->key, pkey, key_len);
481         n->dev = dev;
482         if (dev)
483                 dev_hold(dev);
484
485         if (tbl->pconstructor && tbl->pconstructor(n)) {
486                 if (dev)
487                         dev_put(dev);
488                 kfree(n);
489                 n = NULL;
490                 goto out;
491         }
492
493         write_lock_bh(&tbl->lock);
494         n->next = tbl->phash_buckets[hash_val];
495         tbl->phash_buckets[hash_val] = n;
496         write_unlock_bh(&tbl->lock);
497 out:
498         return n;
499 }
500
501
502 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
503                   struct net_device *dev)
504 {
505         struct pneigh_entry *n, **np;
506         int key_len = tbl->key_len;
507         u32 hash_val = *(u32 *)(pkey + key_len - 4);
508
509         hash_val ^= (hash_val >> 16);
510         hash_val ^= hash_val >> 8;
511         hash_val ^= hash_val >> 4;
512         hash_val &= PNEIGH_HASHMASK;
513
514         write_lock_bh(&tbl->lock);
515         for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
516              np = &n->next) {
517                 if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
518                         *np = n->next;
519                         write_unlock_bh(&tbl->lock);
520                         if (tbl->pdestructor)
521                                 tbl->pdestructor(n);
522                         if (n->dev)
523                                 dev_put(n->dev);
524                         kfree(n);
525                         return 0;
526                 }
527         }
528         write_unlock_bh(&tbl->lock);
529         return -ENOENT;
530 }
531
532 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
533 {
534         struct pneigh_entry *n, **np;
535         u32 h;
536
537         for (h = 0; h <= PNEIGH_HASHMASK; h++) {
538                 np = &tbl->phash_buckets[h];
539                 while ((n = *np) != NULL) {
540                         if (!dev || n->dev == dev) {
541                                 *np = n->next;
542                                 if (tbl->pdestructor)
543                                         tbl->pdestructor(n);
544                                 if (n->dev)
545                                         dev_put(n->dev);
546                                 kfree(n);
547                                 continue;
548                         }
549                         np = &n->next;
550                 }
551         }
552         return -ENOENT;
553 }
554
555
556 /*
557  *      neighbour must already be out of the table;
558  *
559  */
560 void neigh_destroy(struct neighbour *neigh)
561 {
562         struct hh_cache *hh;
563
564         NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
565
566         if (!neigh->dead) {
567                 printk(KERN_WARNING
568                        "Destroying alive neighbour %p\n", neigh);
569                 dump_stack();
570                 return;
571         }
572
573         if (neigh_del_timer(neigh))
574                 printk(KERN_WARNING "Impossible event.\n");
575
576         while ((hh = neigh->hh) != NULL) {
577                 neigh->hh = hh->hh_next;
578                 hh->hh_next = NULL;
579                 write_lock_bh(&hh->hh_lock);
580                 hh->hh_output = neigh_blackhole;
581                 write_unlock_bh(&hh->hh_lock);
582                 if (atomic_dec_and_test(&hh->hh_refcnt))
583                         kfree(hh);
584         }
585
586         if (neigh->parms->neigh_destructor)
587                 (neigh->parms->neigh_destructor)(neigh);
588
589         skb_queue_purge(&neigh->arp_queue);
590
591         dev_put(neigh->dev);
592         neigh_parms_put(neigh->parms);
593
594         NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
595
596         atomic_dec(&neigh->tbl->entries);
597         kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
598 }
599
600 /* Neighbour state is suspicious;
601    disable fast path.
602
603    Called with write_locked neigh.
604  */
605 static void neigh_suspect(struct neighbour *neigh)
606 {
607         struct hh_cache *hh;
608
609         NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
610
611         neigh->output = neigh->ops->output;
612
613         for (hh = neigh->hh; hh; hh = hh->hh_next)
614                 hh->hh_output = neigh->ops->output;
615 }
616
617 /* Neighbour state is OK;
618    enable fast path.
619
620    Called with write_locked neigh.
621  */
622 static void neigh_connect(struct neighbour *neigh)
623 {
624         struct hh_cache *hh;
625
626         NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
627
628         neigh->output = neigh->ops->connected_output;
629
630         for (hh = neigh->hh; hh; hh = hh->hh_next)
631                 hh->hh_output = neigh->ops->hh_output;
632 }
633
634 static void neigh_periodic_timer(unsigned long arg)
635 {
636         struct neigh_table *tbl = (struct neigh_table *)arg;
637         struct neighbour *n, **np;
638         unsigned long expire, now = jiffies;
639
640         NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
641
642         write_lock(&tbl->lock);
643
644         /*
645          *      periodically recompute ReachableTime from random function
646          */
647
648         if (time_after(now, tbl->last_rand + 300 * HZ)) {
649                 struct neigh_parms *p;
650                 tbl->last_rand = now;
651                 for (p = &tbl->parms; p; p = p->next)
652                         p->reachable_time =
653                                 neigh_rand_reach_time(p->base_reachable_time);
654         }
655
656         np = &tbl->hash_buckets[tbl->hash_chain_gc];
657         tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
658
659         while ((n = *np) != NULL) {
660                 unsigned int state;
661
662                 write_lock(&n->lock);
663
664                 state = n->nud_state;
665                 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
666                         write_unlock(&n->lock);
667                         goto next_elt;
668                 }
669
670                 if (time_before(n->used, n->confirmed))
671                         n->used = n->confirmed;
672
673                 if (atomic_read(&n->refcnt) == 1 &&
674                     (state == NUD_FAILED ||
675                      time_after(now, n->used + n->parms->gc_staletime))) {
676                         *np = n->next;
677                         n->dead = 1;
678                         write_unlock(&n->lock);
679                         neigh_release(n);
680                         continue;
681                 }
682                 write_unlock(&n->lock);
683
684 next_elt:
685                 np = &n->next;
686         }
687
688         /* Cycle through all hash buckets every base_reachable_time/2 ticks.
689          * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
690          * base_reachable_time.
691          */
692         expire = tbl->parms.base_reachable_time >> 1;
693         expire /= (tbl->hash_mask + 1);
694         if (!expire)
695                 expire = 1;
696
697         mod_timer(&tbl->gc_timer, now + expire);
698
699         write_unlock(&tbl->lock);
700 }
701
702 static __inline__ int neigh_max_probes(struct neighbour *n)
703 {
704         struct neigh_parms *p = n->parms;
705         return (n->nud_state & NUD_PROBE ?
706                 p->ucast_probes :
707                 p->ucast_probes + p->app_probes + p->mcast_probes);
708 }
709
710 static inline void neigh_add_timer(struct neighbour *n, unsigned long when)
711 {
712         if (unlikely(mod_timer(&n->timer, when))) {
713                 printk("NEIGH: BUG, double timer add, state is %x\n",
714                        n->nud_state);
715                 dump_stack();
716         }
717 }
718
719 /* Called when a timer expires for a neighbour entry. */
720
721 static void neigh_timer_handler(unsigned long arg)
722 {
723         unsigned long now, next;
724         struct neighbour *neigh = (struct neighbour *)arg;
725         unsigned state;
726         int notify = 0;
727
728         write_lock(&neigh->lock);
729
730         state = neigh->nud_state;
731         now = jiffies;
732         next = now + HZ;
733
734         if (!(state & NUD_IN_TIMER)) {
735 #ifndef CONFIG_SMP
736                 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
737 #endif
738                 goto out;
739         }
740
741         if (state & NUD_REACHABLE) {
742                 if (time_before_eq(now, 
743                                    neigh->confirmed + neigh->parms->reachable_time)) {
744                         NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
745                         next = neigh->confirmed + neigh->parms->reachable_time;
746                 } else if (time_before_eq(now,
747                                           neigh->used + neigh->parms->delay_probe_time)) {
748                         NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
749                         neigh->nud_state = NUD_DELAY;
750                         neigh->updated = jiffies;
751                         neigh_suspect(neigh);
752                         next = now + neigh->parms->delay_probe_time;
753                 } else {
754                         NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
755                         neigh->nud_state = NUD_STALE;
756                         neigh->updated = jiffies;
757                         neigh_suspect(neigh);
758                         notify = 1;
759                 }
760         } else if (state & NUD_DELAY) {
761                 if (time_before_eq(now, 
762                                    neigh->confirmed + neigh->parms->delay_probe_time)) {
763                         NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
764                         neigh->nud_state = NUD_REACHABLE;
765                         neigh->updated = jiffies;
766                         neigh_connect(neigh);
767                         notify = 1;
768                         next = neigh->confirmed + neigh->parms->reachable_time;
769                 } else {
770                         NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
771                         neigh->nud_state = NUD_PROBE;
772                         neigh->updated = jiffies;
773                         atomic_set(&neigh->probes, 0);
774                         next = now + neigh->parms->retrans_time;
775                 }
776         } else {
777                 /* NUD_PROBE|NUD_INCOMPLETE */
778                 next = now + neigh->parms->retrans_time;
779         }
780
781         if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
782             atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
783                 struct sk_buff *skb;
784
785                 neigh->nud_state = NUD_FAILED;
786                 neigh->updated = jiffies;
787                 notify = 1;
788                 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
789                 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
790
791                 /* It is very thin place. report_unreachable is very complicated
792                    routine. Particularly, it can hit the same neighbour entry!
793
794                    So that, we try to be accurate and avoid dead loop. --ANK
795                  */
796                 while (neigh->nud_state == NUD_FAILED &&
797                        (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
798                         write_unlock(&neigh->lock);
799                         neigh->ops->error_report(neigh, skb);
800                         write_lock(&neigh->lock);
801                 }
802                 skb_queue_purge(&neigh->arp_queue);
803         }
804
805         if (neigh->nud_state & NUD_IN_TIMER) {
806                 if (time_before(next, jiffies + HZ/2))
807                         next = jiffies + HZ/2;
808                 if (!mod_timer(&neigh->timer, next))
809                         neigh_hold(neigh);
810         }
811         if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
812                 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
813                 /* keep skb alive even if arp_queue overflows */
814                 if (skb)
815                         skb_get(skb);
816                 write_unlock(&neigh->lock);
817                 neigh->ops->solicit(neigh, skb);
818                 atomic_inc(&neigh->probes);
819                 if (skb)
820                         kfree_skb(skb);
821         } else {
822 out:
823                 write_unlock(&neigh->lock);
824         }
825         if (notify)
826                 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
827
828 #ifdef CONFIG_ARPD
829         if (notify && neigh->parms->app_probes)
830                 neigh_app_notify(neigh);
831 #endif
832         neigh_release(neigh);
833 }
834
835 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
836 {
837         int rc;
838         unsigned long now;
839
840         write_lock_bh(&neigh->lock);
841
842         rc = 0;
843         if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
844                 goto out_unlock_bh;
845
846         now = jiffies;
847         
848         if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
849                 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
850                         atomic_set(&neigh->probes, neigh->parms->ucast_probes);
851                         neigh->nud_state     = NUD_INCOMPLETE;
852                         neigh->updated = jiffies;
853                         neigh_hold(neigh);
854                         neigh_add_timer(neigh, now + 1);
855                 } else {
856                         neigh->nud_state = NUD_FAILED;
857                         neigh->updated = jiffies;
858                         write_unlock_bh(&neigh->lock);
859
860                         if (skb)
861                                 kfree_skb(skb);
862                         return 1;
863                 }
864         } else if (neigh->nud_state & NUD_STALE) {
865                 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
866                 neigh_hold(neigh);
867                 neigh->nud_state = NUD_DELAY;
868                 neigh->updated = jiffies;
869                 neigh_add_timer(neigh,
870                                 jiffies + neigh->parms->delay_probe_time);
871         }
872
873         if (neigh->nud_state == NUD_INCOMPLETE) {
874                 if (skb) {
875                         if (skb_queue_len(&neigh->arp_queue) >=
876                             neigh->parms->queue_len) {
877                                 struct sk_buff *buff;
878                                 buff = neigh->arp_queue.next;
879                                 __skb_unlink(buff, &neigh->arp_queue);
880                                 kfree_skb(buff);
881                         }
882                         __skb_queue_tail(&neigh->arp_queue, skb);
883                 }
884                 rc = 1;
885         }
886 out_unlock_bh:
887         write_unlock_bh(&neigh->lock);
888         return rc;
889 }
890
891 static __inline__ void neigh_update_hhs(struct neighbour *neigh)
892 {
893         struct hh_cache *hh;
894         void (*update)(struct hh_cache*, struct net_device*, unsigned char *) =
895                 neigh->dev->header_cache_update;
896
897         if (update) {
898                 for (hh = neigh->hh; hh; hh = hh->hh_next) {
899                         write_lock_bh(&hh->hh_lock);
900                         update(hh, neigh->dev, neigh->ha);
901                         write_unlock_bh(&hh->hh_lock);
902                 }
903         }
904 }
905
906
907
908 /* Generic update routine.
909    -- lladdr is new lladdr or NULL, if it is not supplied.
910    -- new    is new state.
911    -- flags
912         NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
913                                 if it is different.
914         NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
915                                 lladdr instead of overriding it 
916                                 if it is different.
917                                 It also allows to retain current state
918                                 if lladdr is unchanged.
919         NEIGH_UPDATE_F_ADMIN    means that the change is administrative.
920
921         NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing 
922                                 NTF_ROUTER flag.
923         NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
924                                 a router.
925
926    Caller MUST hold reference count on the entry.
927  */
928
929 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
930                  u32 flags)
931 {
932         u8 old;
933         int err;
934         int notify = 0;
935         struct net_device *dev;
936         int update_isrouter = 0;
937
938         write_lock_bh(&neigh->lock);
939
940         dev    = neigh->dev;
941         old    = neigh->nud_state;
942         err    = -EPERM;
943
944         if (!(flags & NEIGH_UPDATE_F_ADMIN) && 
945             (old & (NUD_NOARP | NUD_PERMANENT)))
946                 goto out;
947
948         if (!(new & NUD_VALID)) {
949                 neigh_del_timer(neigh);
950                 if (old & NUD_CONNECTED)
951                         neigh_suspect(neigh);
952                 neigh->nud_state = new;
953                 err = 0;
954                 notify = old & NUD_VALID;
955                 goto out;
956         }
957
958         /* Compare new lladdr with cached one */
959         if (!dev->addr_len) {
960                 /* First case: device needs no address. */
961                 lladdr = neigh->ha;
962         } else if (lladdr) {
963                 /* The second case: if something is already cached
964                    and a new address is proposed:
965                    - compare new & old
966                    - if they are different, check override flag
967                  */
968                 if ((old & NUD_VALID) && 
969                     !memcmp(lladdr, neigh->ha, dev->addr_len))
970                         lladdr = neigh->ha;
971         } else {
972                 /* No address is supplied; if we know something,
973                    use it, otherwise discard the request.
974                  */
975                 err = -EINVAL;
976                 if (!(old & NUD_VALID))
977                         goto out;
978                 lladdr = neigh->ha;
979         }
980
981         if (new & NUD_CONNECTED)
982                 neigh->confirmed = jiffies;
983         neigh->updated = jiffies;
984
985         /* If entry was valid and address is not changed,
986            do not change entry state, if new one is STALE.
987          */
988         err = 0;
989         update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
990         if (old & NUD_VALID) {
991                 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
992                         update_isrouter = 0;
993                         if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
994                             (old & NUD_CONNECTED)) {
995                                 lladdr = neigh->ha;
996                                 new = NUD_STALE;
997                         } else
998                                 goto out;
999                 } else {
1000                         if (lladdr == neigh->ha && new == NUD_STALE &&
1001                             ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1002                              (old & NUD_CONNECTED))
1003                             )
1004                                 new = old;
1005                 }
1006         }
1007
1008         if (new != old) {
1009                 neigh_del_timer(neigh);
1010                 if (new & NUD_IN_TIMER) {
1011                         neigh_hold(neigh);
1012                         neigh_add_timer(neigh, (jiffies + 
1013                                                 ((new & NUD_REACHABLE) ? 
1014                                                  neigh->parms->reachable_time :
1015                                                  0)));
1016                 }
1017                 neigh->nud_state = new;
1018         }
1019
1020         if (lladdr != neigh->ha) {
1021                 memcpy(&neigh->ha, lladdr, dev->addr_len);
1022                 neigh_update_hhs(neigh);
1023                 if (!(new & NUD_CONNECTED))
1024                         neigh->confirmed = jiffies -
1025                                       (neigh->parms->base_reachable_time << 1);
1026                 notify = 1;
1027         }
1028         if (new == old)
1029                 goto out;
1030         if (new & NUD_CONNECTED)
1031                 neigh_connect(neigh);
1032         else
1033                 neigh_suspect(neigh);
1034         if (!(old & NUD_VALID)) {
1035                 struct sk_buff *skb;
1036
1037                 /* Again: avoid dead loop if something went wrong */
1038
1039                 while (neigh->nud_state & NUD_VALID &&
1040                        (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1041                         struct neighbour *n1 = neigh;
1042                         write_unlock_bh(&neigh->lock);
1043                         /* On shaper/eql skb->dst->neighbour != neigh :( */
1044                         if (skb->dst && skb->dst->neighbour)
1045                                 n1 = skb->dst->neighbour;
1046                         n1->output(skb);
1047                         write_lock_bh(&neigh->lock);
1048                 }
1049                 skb_queue_purge(&neigh->arp_queue);
1050         }
1051 out:
1052         if (update_isrouter) {
1053                 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1054                         (neigh->flags | NTF_ROUTER) :
1055                         (neigh->flags & ~NTF_ROUTER);
1056         }
1057         write_unlock_bh(&neigh->lock);
1058
1059         if (notify)
1060                 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
1061 #ifdef CONFIG_ARPD
1062         if (notify && neigh->parms->app_probes)
1063                 neigh_app_notify(neigh);
1064 #endif
1065         return err;
1066 }
1067
1068 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1069                                  u8 *lladdr, void *saddr,
1070                                  struct net_device *dev)
1071 {
1072         struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1073                                                  lladdr || !dev->addr_len);
1074         if (neigh)
1075                 neigh_update(neigh, lladdr, NUD_STALE, 
1076                              NEIGH_UPDATE_F_OVERRIDE);
1077         return neigh;
1078 }
1079
1080 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1081                           u16 protocol)
1082 {
1083         struct hh_cache *hh;
1084         struct net_device *dev = dst->dev;
1085
1086         for (hh = n->hh; hh; hh = hh->hh_next)
1087                 if (hh->hh_type == protocol)
1088                         break;
1089
1090         if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1091                 rwlock_init(&hh->hh_lock);
1092                 hh->hh_type = protocol;
1093                 atomic_set(&hh->hh_refcnt, 0);
1094                 hh->hh_next = NULL;
1095                 if (dev->hard_header_cache(n, hh)) {
1096                         kfree(hh);
1097                         hh = NULL;
1098                 } else {
1099                         atomic_inc(&hh->hh_refcnt);
1100                         hh->hh_next = n->hh;
1101                         n->hh       = hh;
1102                         if (n->nud_state & NUD_CONNECTED)
1103                                 hh->hh_output = n->ops->hh_output;
1104                         else
1105                                 hh->hh_output = n->ops->output;
1106                 }
1107         }
1108         if (hh) {
1109                 atomic_inc(&hh->hh_refcnt);
1110                 dst->hh = hh;
1111         }
1112 }
1113
1114 /* This function can be used in contexts, where only old dev_queue_xmit
1115    worked, f.e. if you want to override normal output path (eql, shaper),
1116    but resolution is not made yet.
1117  */
1118
1119 int neigh_compat_output(struct sk_buff *skb)
1120 {
1121         struct net_device *dev = skb->dev;
1122
1123         __skb_pull(skb, skb->nh.raw - skb->data);
1124
1125         if (dev->hard_header &&
1126             dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1127                              skb->len) < 0 &&
1128             dev->rebuild_header(skb))
1129                 return 0;
1130
1131         return dev_queue_xmit(skb);
1132 }
1133
1134 /* Slow and careful. */
1135
1136 int neigh_resolve_output(struct sk_buff *skb)
1137 {
1138         struct dst_entry *dst = skb->dst;
1139         struct neighbour *neigh;
1140         int rc = 0;
1141
1142         if (!dst || !(neigh = dst->neighbour))
1143                 goto discard;
1144
1145         __skb_pull(skb, skb->nh.raw - skb->data);
1146
1147         if (!neigh_event_send(neigh, skb)) {
1148                 int err;
1149                 struct net_device *dev = neigh->dev;
1150                 if (dev->hard_header_cache && !dst->hh) {
1151                         write_lock_bh(&neigh->lock);
1152                         if (!dst->hh)
1153                                 neigh_hh_init(neigh, dst, dst->ops->protocol);
1154                         err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1155                                                neigh->ha, NULL, skb->len);
1156                         write_unlock_bh(&neigh->lock);
1157                 } else {
1158                         read_lock_bh(&neigh->lock);
1159                         err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1160                                                neigh->ha, NULL, skb->len);
1161                         read_unlock_bh(&neigh->lock);
1162                 }
1163                 if (err >= 0)
1164                         rc = neigh->ops->queue_xmit(skb);
1165                 else
1166                         goto out_kfree_skb;
1167         }
1168 out:
1169         return rc;
1170 discard:
1171         NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1172                       dst, dst ? dst->neighbour : NULL);
1173 out_kfree_skb:
1174         rc = -EINVAL;
1175         kfree_skb(skb);
1176         goto out;
1177 }
1178
1179 /* As fast as possible without hh cache */
1180
1181 int neigh_connected_output(struct sk_buff *skb)
1182 {
1183         int err;
1184         struct dst_entry *dst = skb->dst;
1185         struct neighbour *neigh = dst->neighbour;
1186         struct net_device *dev = neigh->dev;
1187
1188         __skb_pull(skb, skb->nh.raw - skb->data);
1189
1190         read_lock_bh(&neigh->lock);
1191         err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1192                                neigh->ha, NULL, skb->len);
1193         read_unlock_bh(&neigh->lock);
1194         if (err >= 0)
1195                 err = neigh->ops->queue_xmit(skb);
1196         else {
1197                 err = -EINVAL;
1198                 kfree_skb(skb);
1199         }
1200         return err;
1201 }
1202
1203 static void neigh_proxy_process(unsigned long arg)
1204 {
1205         struct neigh_table *tbl = (struct neigh_table *)arg;
1206         long sched_next = 0;
1207         unsigned long now = jiffies;
1208         struct sk_buff *skb;
1209
1210         spin_lock(&tbl->proxy_queue.lock);
1211
1212         skb = tbl->proxy_queue.next;
1213
1214         while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1215                 struct sk_buff *back = skb;
1216                 long tdif = NEIGH_CB(back)->sched_next - now;
1217
1218                 skb = skb->next;
1219                 if (tdif <= 0) {
1220                         struct net_device *dev = back->dev;
1221                         __skb_unlink(back, &tbl->proxy_queue);
1222                         if (tbl->proxy_redo && netif_running(dev))
1223                                 tbl->proxy_redo(back);
1224                         else
1225                                 kfree_skb(back);
1226
1227                         dev_put(dev);
1228                 } else if (!sched_next || tdif < sched_next)
1229                         sched_next = tdif;
1230         }
1231         del_timer(&tbl->proxy_timer);
1232         if (sched_next)
1233                 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1234         spin_unlock(&tbl->proxy_queue.lock);
1235 }
1236
1237 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1238                     struct sk_buff *skb)
1239 {
1240         unsigned long now = jiffies;
1241         unsigned long sched_next = now + (net_random() % p->proxy_delay);
1242
1243         if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1244                 kfree_skb(skb);
1245                 return;
1246         }
1247
1248         NEIGH_CB(skb)->sched_next = sched_next;
1249         NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1250
1251         spin_lock(&tbl->proxy_queue.lock);
1252         if (del_timer(&tbl->proxy_timer)) {
1253                 if (time_before(tbl->proxy_timer.expires, sched_next))
1254                         sched_next = tbl->proxy_timer.expires;
1255         }
1256         dst_release(skb->dst);
1257         skb->dst = NULL;
1258         dev_hold(skb->dev);
1259         __skb_queue_tail(&tbl->proxy_queue, skb);
1260         mod_timer(&tbl->proxy_timer, sched_next);
1261         spin_unlock(&tbl->proxy_queue.lock);
1262 }
1263
1264
1265 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1266                                       struct neigh_table *tbl)
1267 {
1268         struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL);
1269
1270         if (p) {
1271                 memcpy(p, &tbl->parms, sizeof(*p));
1272                 p->tbl            = tbl;
1273                 atomic_set(&p->refcnt, 1);
1274                 INIT_RCU_HEAD(&p->rcu_head);
1275                 p->reachable_time =
1276                                 neigh_rand_reach_time(p->base_reachable_time);
1277                 if (dev) {
1278                         if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1279                                 kfree(p);
1280                                 return NULL;
1281                         }
1282
1283                         dev_hold(dev);
1284                         p->dev = dev;
1285                 }
1286                 p->sysctl_table = NULL;
1287                 write_lock_bh(&tbl->lock);
1288                 p->next         = tbl->parms.next;
1289                 tbl->parms.next = p;
1290                 write_unlock_bh(&tbl->lock);
1291         }
1292         return p;
1293 }
1294
1295 static void neigh_rcu_free_parms(struct rcu_head *head)
1296 {
1297         struct neigh_parms *parms =
1298                 container_of(head, struct neigh_parms, rcu_head);
1299
1300         neigh_parms_put(parms);
1301 }
1302
1303 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1304 {
1305         struct neigh_parms **p;
1306
1307         if (!parms || parms == &tbl->parms)
1308                 return;
1309         write_lock_bh(&tbl->lock);
1310         for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1311                 if (*p == parms) {
1312                         *p = parms->next;
1313                         parms->dead = 1;
1314                         write_unlock_bh(&tbl->lock);
1315                         if (parms->dev)
1316                                 dev_put(parms->dev);
1317                         call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1318                         return;
1319                 }
1320         }
1321         write_unlock_bh(&tbl->lock);
1322         NEIGH_PRINTK1("neigh_parms_release: not found\n");
1323 }
1324
1325 void neigh_parms_destroy(struct neigh_parms *parms)
1326 {
1327         kfree(parms);
1328 }
1329
1330 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1331 {
1332         unsigned long now = jiffies;
1333         unsigned long phsize;
1334
1335         atomic_set(&tbl->parms.refcnt, 1);
1336         INIT_RCU_HEAD(&tbl->parms.rcu_head);
1337         tbl->parms.reachable_time =
1338                           neigh_rand_reach_time(tbl->parms.base_reachable_time);
1339
1340         if (!tbl->kmem_cachep)
1341                 tbl->kmem_cachep = kmem_cache_create(tbl->id,
1342                                                      tbl->entry_size,
1343                                                      0, SLAB_HWCACHE_ALIGN,
1344                                                      NULL, NULL);
1345
1346         if (!tbl->kmem_cachep)
1347                 panic("cannot create neighbour cache");
1348
1349         tbl->stats = alloc_percpu(struct neigh_statistics);
1350         if (!tbl->stats)
1351                 panic("cannot create neighbour cache statistics");
1352         
1353 #ifdef CONFIG_PROC_FS
1354         tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1355         if (!tbl->pde) 
1356                 panic("cannot create neighbour proc dir entry");
1357         tbl->pde->proc_fops = &neigh_stat_seq_fops;
1358         tbl->pde->data = tbl;
1359 #endif
1360
1361         tbl->hash_mask = 1;
1362         tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1363
1364         phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1365         tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1366
1367         if (!tbl->hash_buckets || !tbl->phash_buckets)
1368                 panic("cannot allocate neighbour cache hashes");
1369
1370         get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1371
1372         rwlock_init(&tbl->lock);
1373         init_timer(&tbl->gc_timer);
1374         tbl->gc_timer.data     = (unsigned long)tbl;
1375         tbl->gc_timer.function = neigh_periodic_timer;
1376         tbl->gc_timer.expires  = now + 1;
1377         add_timer(&tbl->gc_timer);
1378
1379         init_timer(&tbl->proxy_timer);
1380         tbl->proxy_timer.data     = (unsigned long)tbl;
1381         tbl->proxy_timer.function = neigh_proxy_process;
1382         skb_queue_head_init(&tbl->proxy_queue);
1383
1384         tbl->last_flush = now;
1385         tbl->last_rand  = now + tbl->parms.reachable_time * 20;
1386 }
1387
1388 void neigh_table_init(struct neigh_table *tbl)
1389 {
1390         struct neigh_table *tmp;
1391
1392         neigh_table_init_no_netlink(tbl);
1393         write_lock(&neigh_tbl_lock);
1394         for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1395                 if (tmp->family == tbl->family)
1396                         break;
1397         }
1398         tbl->next       = neigh_tables;
1399         neigh_tables    = tbl;
1400         write_unlock(&neigh_tbl_lock);
1401
1402         if (unlikely(tmp)) {
1403                 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1404                        "family %d\n", tbl->family);
1405                 dump_stack();
1406         }
1407 }
1408
1409 int neigh_table_clear(struct neigh_table *tbl)
1410 {
1411         struct neigh_table **tp;
1412
1413         /* It is not clean... Fix it to unload IPv6 module safely */
1414         del_timer_sync(&tbl->gc_timer);
1415         del_timer_sync(&tbl->proxy_timer);
1416         pneigh_queue_purge(&tbl->proxy_queue);
1417         neigh_ifdown(tbl, NULL);
1418         if (atomic_read(&tbl->entries))
1419                 printk(KERN_CRIT "neighbour leakage\n");
1420         write_lock(&neigh_tbl_lock);
1421         for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1422                 if (*tp == tbl) {
1423                         *tp = tbl->next;
1424                         break;
1425                 }
1426         }
1427         write_unlock(&neigh_tbl_lock);
1428
1429         neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1430         tbl->hash_buckets = NULL;
1431
1432         kfree(tbl->phash_buckets);
1433         tbl->phash_buckets = NULL;
1434
1435         free_percpu(tbl->stats);
1436         tbl->stats = NULL;
1437
1438         return 0;
1439 }
1440
1441 int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1442 {
1443         struct ndmsg *ndm = NLMSG_DATA(nlh);
1444         struct rtattr **nda = arg;
1445         struct neigh_table *tbl;
1446         struct net_device *dev = NULL;
1447         int err = -ENODEV;
1448
1449         if (ndm->ndm_ifindex &&
1450             (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1451                 goto out;
1452
1453         read_lock(&neigh_tbl_lock);
1454         for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1455                 struct rtattr *dst_attr = nda[NDA_DST - 1];
1456                 struct neighbour *n;
1457
1458                 if (tbl->family != ndm->ndm_family)
1459                         continue;
1460                 read_unlock(&neigh_tbl_lock);
1461
1462                 err = -EINVAL;
1463                 if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1464                         goto out_dev_put;
1465
1466                 if (ndm->ndm_flags & NTF_PROXY) {
1467                         err = pneigh_delete(tbl, RTA_DATA(dst_attr), dev);
1468                         goto out_dev_put;
1469                 }
1470
1471                 if (!dev)
1472                         goto out;
1473
1474                 n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1475                 if (n) {
1476                         err = neigh_update(n, NULL, NUD_FAILED, 
1477                                            NEIGH_UPDATE_F_OVERRIDE|
1478                                            NEIGH_UPDATE_F_ADMIN);
1479                         neigh_release(n);
1480                 }
1481                 goto out_dev_put;
1482         }
1483         read_unlock(&neigh_tbl_lock);
1484         err = -EADDRNOTAVAIL;
1485 out_dev_put:
1486         if (dev)
1487                 dev_put(dev);
1488 out:
1489         return err;
1490 }
1491
1492 int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1493 {
1494         struct ndmsg *ndm = NLMSG_DATA(nlh);
1495         struct rtattr **nda = arg;
1496         struct neigh_table *tbl;
1497         struct net_device *dev = NULL;
1498         int err = -ENODEV;
1499
1500         if (ndm->ndm_ifindex &&
1501             (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1502                 goto out;
1503
1504         read_lock(&neigh_tbl_lock);
1505         for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1506                 struct rtattr *lladdr_attr = nda[NDA_LLADDR - 1];
1507                 struct rtattr *dst_attr = nda[NDA_DST - 1];
1508                 int override = 1;
1509                 struct neighbour *n;
1510
1511                 if (tbl->family != ndm->ndm_family)
1512                         continue;
1513                 read_unlock(&neigh_tbl_lock);
1514
1515                 err = -EINVAL;
1516                 if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1517                         goto out_dev_put;
1518
1519                 if (ndm->ndm_flags & NTF_PROXY) {
1520                         err = -ENOBUFS;
1521                         if (pneigh_lookup(tbl, RTA_DATA(dst_attr), dev, 1))
1522                                 err = 0;
1523                         goto out_dev_put;
1524                 }
1525
1526                 err = -EINVAL;
1527                 if (!dev)
1528                         goto out;
1529                 if (lladdr_attr && RTA_PAYLOAD(lladdr_attr) < dev->addr_len)
1530                         goto out_dev_put;
1531         
1532                 n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1533                 if (n) {
1534                         if (nlh->nlmsg_flags & NLM_F_EXCL) {
1535                                 err = -EEXIST;
1536                                 neigh_release(n);
1537                                 goto out_dev_put;
1538                         }
1539                         
1540                         override = nlh->nlmsg_flags & NLM_F_REPLACE;
1541                 } else if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1542                         err = -ENOENT;
1543                         goto out_dev_put;
1544                 } else {
1545                         n = __neigh_lookup_errno(tbl, RTA_DATA(dst_attr), dev);
1546                         if (IS_ERR(n)) {
1547                                 err = PTR_ERR(n);
1548                                 goto out_dev_put;
1549                         }
1550                 }
1551
1552                 err = neigh_update(n,
1553                                    lladdr_attr ? RTA_DATA(lladdr_attr) : NULL,
1554                                    ndm->ndm_state,
1555                                    (override ? NEIGH_UPDATE_F_OVERRIDE : 0) |
1556                                    NEIGH_UPDATE_F_ADMIN);
1557
1558                 neigh_release(n);
1559                 goto out_dev_put;
1560         }
1561
1562         read_unlock(&neigh_tbl_lock);
1563         err = -EADDRNOTAVAIL;
1564 out_dev_put:
1565         if (dev)
1566                 dev_put(dev);
1567 out:
1568         return err;
1569 }
1570
1571 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1572 {
1573         struct rtattr *nest = NULL;
1574         
1575         nest = RTA_NEST(skb, NDTA_PARMS);
1576
1577         if (parms->dev)
1578                 RTA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1579
1580         RTA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1581         RTA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1582         RTA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1583         RTA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1584         RTA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1585         RTA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1586         RTA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1587         RTA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1588                       parms->base_reachable_time);
1589         RTA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1590         RTA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1591         RTA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1592         RTA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1593         RTA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1594         RTA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1595
1596         return RTA_NEST_END(skb, nest);
1597
1598 rtattr_failure:
1599         return RTA_NEST_CANCEL(skb, nest);
1600 }
1601
1602 static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
1603                               struct netlink_callback *cb)
1604 {
1605         struct nlmsghdr *nlh;
1606         struct ndtmsg *ndtmsg;
1607
1608         nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1609                                NLM_F_MULTI);
1610
1611         ndtmsg = NLMSG_DATA(nlh);
1612
1613         read_lock_bh(&tbl->lock);
1614         ndtmsg->ndtm_family = tbl->family;
1615         ndtmsg->ndtm_pad1   = 0;
1616         ndtmsg->ndtm_pad2   = 0;
1617
1618         RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1619         RTA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1620         RTA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1621         RTA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1622         RTA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1623
1624         {
1625                 unsigned long now = jiffies;
1626                 unsigned int flush_delta = now - tbl->last_flush;
1627                 unsigned int rand_delta = now - tbl->last_rand;
1628
1629                 struct ndt_config ndc = {
1630                         .ndtc_key_len           = tbl->key_len,
1631                         .ndtc_entry_size        = tbl->entry_size,
1632                         .ndtc_entries           = atomic_read(&tbl->entries),
1633                         .ndtc_last_flush        = jiffies_to_msecs(flush_delta),
1634                         .ndtc_last_rand         = jiffies_to_msecs(rand_delta),
1635                         .ndtc_hash_rnd          = tbl->hash_rnd,
1636                         .ndtc_hash_mask         = tbl->hash_mask,
1637                         .ndtc_hash_chain_gc     = tbl->hash_chain_gc,
1638                         .ndtc_proxy_qlen        = tbl->proxy_queue.qlen,
1639                 };
1640
1641                 RTA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1642         }
1643
1644         {
1645                 int cpu;
1646                 struct ndt_stats ndst;
1647
1648                 memset(&ndst, 0, sizeof(ndst));
1649
1650                 for_each_possible_cpu(cpu) {
1651                         struct neigh_statistics *st;
1652
1653                         st = per_cpu_ptr(tbl->stats, cpu);
1654                         ndst.ndts_allocs                += st->allocs;
1655                         ndst.ndts_destroys              += st->destroys;
1656                         ndst.ndts_hash_grows            += st->hash_grows;
1657                         ndst.ndts_res_failed            += st->res_failed;
1658                         ndst.ndts_lookups               += st->lookups;
1659                         ndst.ndts_hits                  += st->hits;
1660                         ndst.ndts_rcv_probes_mcast      += st->rcv_probes_mcast;
1661                         ndst.ndts_rcv_probes_ucast      += st->rcv_probes_ucast;
1662                         ndst.ndts_periodic_gc_runs      += st->periodic_gc_runs;
1663                         ndst.ndts_forced_gc_runs        += st->forced_gc_runs;
1664                 }
1665
1666                 RTA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1667         }
1668
1669         BUG_ON(tbl->parms.dev);
1670         if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1671                 goto rtattr_failure;
1672
1673         read_unlock_bh(&tbl->lock);
1674         return NLMSG_END(skb, nlh);
1675
1676 rtattr_failure:
1677         read_unlock_bh(&tbl->lock);
1678         return NLMSG_CANCEL(skb, nlh);
1679  
1680 nlmsg_failure:
1681         return -1;
1682 }
1683
1684 static int neightbl_fill_param_info(struct neigh_table *tbl,
1685                                     struct neigh_parms *parms,
1686                                     struct sk_buff *skb,
1687                                     struct netlink_callback *cb)
1688 {
1689         struct ndtmsg *ndtmsg;
1690         struct nlmsghdr *nlh;
1691
1692         nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1693                                NLM_F_MULTI);
1694
1695         ndtmsg = NLMSG_DATA(nlh);
1696
1697         read_lock_bh(&tbl->lock);
1698         ndtmsg->ndtm_family = tbl->family;
1699         ndtmsg->ndtm_pad1   = 0;
1700         ndtmsg->ndtm_pad2   = 0;
1701         RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1702
1703         if (neightbl_fill_parms(skb, parms) < 0)
1704                 goto rtattr_failure;
1705
1706         read_unlock_bh(&tbl->lock);
1707         return NLMSG_END(skb, nlh);
1708
1709 rtattr_failure:
1710         read_unlock_bh(&tbl->lock);
1711         return NLMSG_CANCEL(skb, nlh);
1712
1713 nlmsg_failure:
1714         return -1;
1715 }
1716  
1717 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1718                                                       int ifindex)
1719 {
1720         struct neigh_parms *p;
1721         
1722         for (p = &tbl->parms; p; p = p->next)
1723                 if ((p->dev && p->dev->ifindex == ifindex) ||
1724                     (!p->dev && !ifindex))
1725                         return p;
1726
1727         return NULL;
1728 }
1729
1730 int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1731 {
1732         struct neigh_table *tbl;
1733         struct ndtmsg *ndtmsg = NLMSG_DATA(nlh);
1734         struct rtattr **tb = arg;
1735         int err = -EINVAL;
1736
1737         if (!tb[NDTA_NAME - 1] || !RTA_PAYLOAD(tb[NDTA_NAME - 1]))
1738                 return -EINVAL;
1739
1740         read_lock(&neigh_tbl_lock);
1741         for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1742                 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1743                         continue;
1744
1745                 if (!rtattr_strcmp(tb[NDTA_NAME - 1], tbl->id))
1746                         break;
1747         }
1748
1749         if (tbl == NULL) {
1750                 err = -ENOENT;
1751                 goto errout;
1752         }
1753
1754         /* 
1755          * We acquire tbl->lock to be nice to the periodic timers and
1756          * make sure they always see a consistent set of values.
1757          */
1758         write_lock_bh(&tbl->lock);
1759
1760         if (tb[NDTA_THRESH1 - 1])
1761                 tbl->gc_thresh1 = RTA_GET_U32(tb[NDTA_THRESH1 - 1]);
1762
1763         if (tb[NDTA_THRESH2 - 1])
1764                 tbl->gc_thresh2 = RTA_GET_U32(tb[NDTA_THRESH2 - 1]);
1765
1766         if (tb[NDTA_THRESH3 - 1])
1767                 tbl->gc_thresh3 = RTA_GET_U32(tb[NDTA_THRESH3 - 1]);
1768
1769         if (tb[NDTA_GC_INTERVAL - 1])
1770                 tbl->gc_interval = RTA_GET_MSECS(tb[NDTA_GC_INTERVAL - 1]);
1771
1772         if (tb[NDTA_PARMS - 1]) {
1773                 struct rtattr *tbp[NDTPA_MAX];
1774                 struct neigh_parms *p;
1775                 u32 ifindex = 0;
1776
1777                 if (rtattr_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS - 1]) < 0)
1778                         goto rtattr_failure;
1779
1780                 if (tbp[NDTPA_IFINDEX - 1])
1781                         ifindex = RTA_GET_U32(tbp[NDTPA_IFINDEX - 1]);
1782
1783                 p = lookup_neigh_params(tbl, ifindex);
1784                 if (p == NULL) {
1785                         err = -ENOENT;
1786                         goto rtattr_failure;
1787                 }
1788         
1789                 if (tbp[NDTPA_QUEUE_LEN - 1])
1790                         p->queue_len = RTA_GET_U32(tbp[NDTPA_QUEUE_LEN - 1]);
1791
1792                 if (tbp[NDTPA_PROXY_QLEN - 1])
1793                         p->proxy_qlen = RTA_GET_U32(tbp[NDTPA_PROXY_QLEN - 1]);
1794
1795                 if (tbp[NDTPA_APP_PROBES - 1])
1796                         p->app_probes = RTA_GET_U32(tbp[NDTPA_APP_PROBES - 1]);
1797
1798                 if (tbp[NDTPA_UCAST_PROBES - 1])
1799                         p->ucast_probes =
1800                            RTA_GET_U32(tbp[NDTPA_UCAST_PROBES - 1]);
1801
1802                 if (tbp[NDTPA_MCAST_PROBES - 1])
1803                         p->mcast_probes =
1804                            RTA_GET_U32(tbp[NDTPA_MCAST_PROBES - 1]);
1805
1806                 if (tbp[NDTPA_BASE_REACHABLE_TIME - 1])
1807                         p->base_reachable_time =
1808                            RTA_GET_MSECS(tbp[NDTPA_BASE_REACHABLE_TIME - 1]);
1809
1810                 if (tbp[NDTPA_GC_STALETIME - 1])
1811                         p->gc_staletime =
1812                            RTA_GET_MSECS(tbp[NDTPA_GC_STALETIME - 1]);
1813
1814                 if (tbp[NDTPA_DELAY_PROBE_TIME - 1])
1815                         p->delay_probe_time =
1816                            RTA_GET_MSECS(tbp[NDTPA_DELAY_PROBE_TIME - 1]);
1817
1818                 if (tbp[NDTPA_RETRANS_TIME - 1])
1819                         p->retrans_time =
1820                            RTA_GET_MSECS(tbp[NDTPA_RETRANS_TIME - 1]);
1821
1822                 if (tbp[NDTPA_ANYCAST_DELAY - 1])
1823                         p->anycast_delay =
1824                            RTA_GET_MSECS(tbp[NDTPA_ANYCAST_DELAY - 1]);
1825
1826                 if (tbp[NDTPA_PROXY_DELAY - 1])
1827                         p->proxy_delay =
1828                            RTA_GET_MSECS(tbp[NDTPA_PROXY_DELAY - 1]);
1829
1830                 if (tbp[NDTPA_LOCKTIME - 1])
1831                         p->locktime = RTA_GET_MSECS(tbp[NDTPA_LOCKTIME - 1]);
1832         }
1833
1834         err = 0;
1835
1836 rtattr_failure:
1837         write_unlock_bh(&tbl->lock);
1838 errout:
1839         read_unlock(&neigh_tbl_lock);
1840         return err;
1841 }
1842
1843 int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1844 {
1845         int idx, family;
1846         int s_idx = cb->args[0];
1847         struct neigh_table *tbl;
1848
1849         family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1850
1851         read_lock(&neigh_tbl_lock);
1852         for (tbl = neigh_tables, idx = 0; tbl; tbl = tbl->next) {
1853                 struct neigh_parms *p;
1854
1855                 if (idx < s_idx || (family && tbl->family != family))
1856                         continue;
1857
1858                 if (neightbl_fill_info(tbl, skb, cb) <= 0)
1859                         break;
1860
1861                 for (++idx, p = tbl->parms.next; p; p = p->next, idx++) {
1862                         if (idx < s_idx)
1863                                 continue;
1864
1865                         if (neightbl_fill_param_info(tbl, p, skb, cb) <= 0)
1866                                 goto out;
1867                 }
1868
1869         }
1870 out:
1871         read_unlock(&neigh_tbl_lock);
1872         cb->args[0] = idx;
1873
1874         return skb->len;
1875 }
1876
1877 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
1878                            u32 pid, u32 seq, int event, unsigned int flags)
1879 {
1880         unsigned long now = jiffies;
1881         unsigned char *b = skb->tail;
1882         struct nda_cacheinfo ci;
1883         int locked = 0;
1884         u32 probes;
1885         struct nlmsghdr *nlh = NLMSG_NEW(skb, pid, seq, event,
1886                                          sizeof(struct ndmsg), flags);
1887         struct ndmsg *ndm = NLMSG_DATA(nlh);
1888
1889         ndm->ndm_family  = n->ops->family;
1890         ndm->ndm_pad1    = 0;
1891         ndm->ndm_pad2    = 0;
1892         ndm->ndm_flags   = n->flags;
1893         ndm->ndm_type    = n->type;
1894         ndm->ndm_ifindex = n->dev->ifindex;
1895         RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
1896         read_lock_bh(&n->lock);
1897         locked           = 1;
1898         ndm->ndm_state   = n->nud_state;
1899         if (n->nud_state & NUD_VALID)
1900                 RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
1901         ci.ndm_used      = now - n->used;
1902         ci.ndm_confirmed = now - n->confirmed;
1903         ci.ndm_updated   = now - n->updated;
1904         ci.ndm_refcnt    = atomic_read(&n->refcnt) - 1;
1905         probes = atomic_read(&n->probes);
1906         read_unlock_bh(&n->lock);
1907         locked           = 0;
1908         RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1909         RTA_PUT(skb, NDA_PROBES, sizeof(probes), &probes);
1910         nlh->nlmsg_len   = skb->tail - b;
1911         return skb->len;
1912
1913 nlmsg_failure:
1914 rtattr_failure:
1915         if (locked)
1916                 read_unlock_bh(&n->lock);
1917         skb_trim(skb, b - skb->data);
1918         return -1;
1919 }
1920
1921
1922 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1923                             struct netlink_callback *cb)
1924 {
1925         struct neighbour *n;
1926         int rc, h, s_h = cb->args[1];
1927         int idx, s_idx = idx = cb->args[2];
1928
1929         for (h = 0; h <= tbl->hash_mask; h++) {
1930                 if (h < s_h)
1931                         continue;
1932                 if (h > s_h)
1933                         s_idx = 0;
1934                 read_lock_bh(&tbl->lock);
1935                 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
1936                         if (idx < s_idx)
1937                                 continue;
1938                         if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
1939                                             cb->nlh->nlmsg_seq,
1940                                             RTM_NEWNEIGH,
1941                                             NLM_F_MULTI) <= 0) {
1942                                 read_unlock_bh(&tbl->lock);
1943                                 rc = -1;
1944                                 goto out;
1945                         }
1946                 }
1947                 read_unlock_bh(&tbl->lock);
1948         }
1949         rc = skb->len;
1950 out:
1951         cb->args[1] = h;
1952         cb->args[2] = idx;
1953         return rc;
1954 }
1955
1956 int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1957 {
1958         struct neigh_table *tbl;
1959         int t, family, s_t;
1960
1961         read_lock(&neigh_tbl_lock);
1962         family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1963         s_t = cb->args[0];
1964
1965         for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
1966                 if (t < s_t || (family && tbl->family != family))
1967                         continue;
1968                 if (t > s_t)
1969                         memset(&cb->args[1], 0, sizeof(cb->args) -
1970                                                 sizeof(cb->args[0]));
1971                 if (neigh_dump_table(tbl, skb, cb) < 0)
1972                         break;
1973         }
1974         read_unlock(&neigh_tbl_lock);
1975
1976         cb->args[0] = t;
1977         return skb->len;
1978 }
1979
1980 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
1981 {
1982         int chain;
1983
1984         read_lock_bh(&tbl->lock);
1985         for (chain = 0; chain <= tbl->hash_mask; chain++) {
1986                 struct neighbour *n;
1987
1988                 for (n = tbl->hash_buckets[chain]; n; n = n->next)
1989                         cb(n, cookie);
1990         }
1991         read_unlock_bh(&tbl->lock);
1992 }
1993 EXPORT_SYMBOL(neigh_for_each);
1994
1995 /* The tbl->lock must be held as a writer and BH disabled. */
1996 void __neigh_for_each_release(struct neigh_table *tbl,
1997                               int (*cb)(struct neighbour *))
1998 {
1999         int chain;
2000
2001         for (chain = 0; chain <= tbl->hash_mask; chain++) {
2002                 struct neighbour *n, **np;
2003
2004                 np = &tbl->hash_buckets[chain];
2005                 while ((n = *np) != NULL) {
2006                         int release;
2007
2008                         write_lock(&n->lock);
2009                         release = cb(n);
2010                         if (release) {
2011                                 *np = n->next;
2012                                 n->dead = 1;
2013                         } else
2014                                 np = &n->next;
2015                         write_unlock(&n->lock);
2016                         if (release)
2017                                 neigh_release(n);
2018                 }
2019         }
2020 }
2021 EXPORT_SYMBOL(__neigh_for_each_release);
2022
2023 #ifdef CONFIG_PROC_FS
2024
2025 static struct neighbour *neigh_get_first(struct seq_file *seq)
2026 {
2027         struct neigh_seq_state *state = seq->private;
2028         struct neigh_table *tbl = state->tbl;
2029         struct neighbour *n = NULL;
2030         int bucket = state->bucket;
2031
2032         state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2033         for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2034                 n = tbl->hash_buckets[bucket];
2035
2036                 while (n) {
2037                         if (state->neigh_sub_iter) {
2038                                 loff_t fakep = 0;
2039                                 void *v;
2040
2041                                 v = state->neigh_sub_iter(state, n, &fakep);
2042                                 if (!v)
2043                                         goto next;
2044                         }
2045                         if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2046                                 break;
2047                         if (n->nud_state & ~NUD_NOARP)
2048                                 break;
2049                 next:
2050                         n = n->next;
2051                 }
2052
2053                 if (n)
2054                         break;
2055         }
2056         state->bucket = bucket;
2057
2058         return n;
2059 }
2060
2061 static struct neighbour *neigh_get_next(struct seq_file *seq,
2062                                         struct neighbour *n,
2063                                         loff_t *pos)
2064 {
2065         struct neigh_seq_state *state = seq->private;
2066         struct neigh_table *tbl = state->tbl;
2067
2068         if (state->neigh_sub_iter) {
2069                 void *v = state->neigh_sub_iter(state, n, pos);
2070                 if (v)
2071                         return n;
2072         }
2073         n = n->next;
2074
2075         while (1) {
2076                 while (n) {
2077                         if (state->neigh_sub_iter) {
2078                                 void *v = state->neigh_sub_iter(state, n, pos);
2079                                 if (v)
2080                                         return n;
2081                                 goto next;
2082                         }
2083                         if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2084                                 break;
2085
2086                         if (n->nud_state & ~NUD_NOARP)
2087                                 break;
2088                 next:
2089                         n = n->next;
2090                 }
2091
2092                 if (n)
2093                         break;
2094
2095                 if (++state->bucket > tbl->hash_mask)
2096                         break;
2097
2098                 n = tbl->hash_buckets[state->bucket];
2099         }
2100
2101         if (n && pos)
2102                 --(*pos);
2103         return n;
2104 }
2105
2106 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2107 {
2108         struct neighbour *n = neigh_get_first(seq);
2109
2110         if (n) {
2111                 while (*pos) {
2112                         n = neigh_get_next(seq, n, pos);
2113                         if (!n)
2114                                 break;
2115                 }
2116         }
2117         return *pos ? NULL : n;
2118 }
2119
2120 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2121 {
2122         struct neigh_seq_state *state = seq->private;
2123         struct neigh_table *tbl = state->tbl;
2124         struct pneigh_entry *pn = NULL;
2125         int bucket = state->bucket;
2126
2127         state->flags |= NEIGH_SEQ_IS_PNEIGH;
2128         for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2129                 pn = tbl->phash_buckets[bucket];
2130                 if (pn)
2131                         break;
2132         }
2133         state->bucket = bucket;
2134
2135         return pn;
2136 }
2137
2138 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2139                                             struct pneigh_entry *pn,
2140                                             loff_t *pos)
2141 {
2142         struct neigh_seq_state *state = seq->private;
2143         struct neigh_table *tbl = state->tbl;
2144
2145         pn = pn->next;
2146         while (!pn) {
2147                 if (++state->bucket > PNEIGH_HASHMASK)
2148                         break;
2149                 pn = tbl->phash_buckets[state->bucket];
2150                 if (pn)
2151                         break;
2152         }
2153
2154         if (pn && pos)
2155                 --(*pos);
2156
2157         return pn;
2158 }
2159
2160 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2161 {
2162         struct pneigh_entry *pn = pneigh_get_first(seq);
2163
2164         if (pn) {
2165                 while (*pos) {
2166                         pn = pneigh_get_next(seq, pn, pos);
2167                         if (!pn)
2168                                 break;
2169                 }
2170         }
2171         return *pos ? NULL : pn;
2172 }
2173
2174 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2175 {
2176         struct neigh_seq_state *state = seq->private;
2177         void *rc;
2178
2179         rc = neigh_get_idx(seq, pos);
2180         if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2181                 rc = pneigh_get_idx(seq, pos);
2182
2183         return rc;
2184 }
2185
2186 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2187 {
2188         struct neigh_seq_state *state = seq->private;
2189         loff_t pos_minus_one;
2190
2191         state->tbl = tbl;
2192         state->bucket = 0;
2193         state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2194
2195         read_lock_bh(&tbl->lock);
2196
2197         pos_minus_one = *pos - 1;
2198         return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2199 }
2200 EXPORT_SYMBOL(neigh_seq_start);
2201
2202 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2203 {
2204         struct neigh_seq_state *state;
2205         void *rc;
2206
2207         if (v == SEQ_START_TOKEN) {
2208                 rc = neigh_get_idx(seq, pos);
2209                 goto out;
2210         }
2211
2212         state = seq->private;
2213         if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2214                 rc = neigh_get_next(seq, v, NULL);
2215                 if (rc)
2216                         goto out;
2217                 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2218                         rc = pneigh_get_first(seq);
2219         } else {
2220                 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2221                 rc = pneigh_get_next(seq, v, NULL);
2222         }
2223 out:
2224         ++(*pos);
2225         return rc;
2226 }
2227 EXPORT_SYMBOL(neigh_seq_next);
2228
2229 void neigh_seq_stop(struct seq_file *seq, void *v)
2230 {
2231         struct neigh_seq_state *state = seq->private;
2232         struct neigh_table *tbl = state->tbl;
2233
2234         read_unlock_bh(&tbl->lock);
2235 }
2236 EXPORT_SYMBOL(neigh_seq_stop);
2237
2238 /* statistics via seq_file */
2239
2240 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2241 {
2242         struct proc_dir_entry *pde = seq->private;
2243         struct neigh_table *tbl = pde->data;
2244         int cpu;
2245
2246         if (*pos == 0)
2247                 return SEQ_START_TOKEN;
2248         
2249         for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2250                 if (!cpu_possible(cpu))
2251                         continue;
2252                 *pos = cpu+1;
2253                 return per_cpu_ptr(tbl->stats, cpu);
2254         }
2255         return NULL;
2256 }
2257
2258 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2259 {
2260         struct proc_dir_entry *pde = seq->private;
2261         struct neigh_table *tbl = pde->data;
2262         int cpu;
2263
2264         for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2265                 if (!cpu_possible(cpu))
2266                         continue;
2267                 *pos = cpu+1;
2268                 return per_cpu_ptr(tbl->stats, cpu);
2269         }
2270         return NULL;
2271 }
2272
2273 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2274 {
2275
2276 }
2277
2278 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2279 {
2280         struct proc_dir_entry *pde = seq->private;
2281         struct neigh_table *tbl = pde->data;
2282         struct neigh_statistics *st = v;
2283
2284         if (v == SEQ_START_TOKEN) {
2285                 seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs\n");
2286                 return 0;
2287         }
2288
2289         seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2290                         "%08lx %08lx  %08lx %08lx\n",
2291                    atomic_read(&tbl->entries),
2292
2293                    st->allocs,
2294                    st->destroys,
2295                    st->hash_grows,
2296
2297                    st->lookups,
2298                    st->hits,
2299
2300                    st->res_failed,
2301
2302                    st->rcv_probes_mcast,
2303                    st->rcv_probes_ucast,
2304
2305                    st->periodic_gc_runs,
2306                    st->forced_gc_runs
2307                    );
2308
2309         return 0;
2310 }
2311
2312 static struct seq_operations neigh_stat_seq_ops = {
2313         .start  = neigh_stat_seq_start,
2314         .next   = neigh_stat_seq_next,
2315         .stop   = neigh_stat_seq_stop,
2316         .show   = neigh_stat_seq_show,
2317 };
2318
2319 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2320 {
2321         int ret = seq_open(file, &neigh_stat_seq_ops);
2322
2323         if (!ret) {
2324                 struct seq_file *sf = file->private_data;
2325                 sf->private = PDE(inode);
2326         }
2327         return ret;
2328 };
2329
2330 static struct file_operations neigh_stat_seq_fops = {
2331         .owner   = THIS_MODULE,
2332         .open    = neigh_stat_seq_open,
2333         .read    = seq_read,
2334         .llseek  = seq_lseek,
2335         .release = seq_release,
2336 };
2337
2338 #endif /* CONFIG_PROC_FS */
2339
2340 #ifdef CONFIG_ARPD
2341 void neigh_app_ns(struct neighbour *n)
2342 {
2343         struct nlmsghdr  *nlh;
2344         int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2345         struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2346
2347         if (!skb)
2348                 return;
2349
2350         if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH, 0) < 0) {
2351                 kfree_skb(skb);
2352                 return;
2353         }
2354         nlh                        = (struct nlmsghdr *)skb->data;
2355         nlh->nlmsg_flags           = NLM_F_REQUEST;
2356         NETLINK_CB(skb).dst_group  = RTNLGRP_NEIGH;
2357         netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2358 }
2359
2360 static void neigh_app_notify(struct neighbour *n)
2361 {
2362         struct nlmsghdr *nlh;
2363         int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2364         struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2365
2366         if (!skb)
2367                 return;
2368
2369         if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH, 0) < 0) {
2370                 kfree_skb(skb);
2371                 return;
2372         }
2373         nlh                        = (struct nlmsghdr *)skb->data;
2374         NETLINK_CB(skb).dst_group  = RTNLGRP_NEIGH;
2375         netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2376 }
2377
2378 #endif /* CONFIG_ARPD */
2379
2380 #ifdef CONFIG_SYSCTL
2381
2382 static struct neigh_sysctl_table {
2383         struct ctl_table_header *sysctl_header;
2384         ctl_table               neigh_vars[__NET_NEIGH_MAX];
2385         ctl_table               neigh_dev[2];
2386         ctl_table               neigh_neigh_dir[2];
2387         ctl_table               neigh_proto_dir[2];
2388         ctl_table               neigh_root_dir[2];
2389 } neigh_sysctl_template = {
2390         .neigh_vars = {
2391                 {
2392                         .ctl_name       = NET_NEIGH_MCAST_SOLICIT,
2393                         .procname       = "mcast_solicit",
2394                         .maxlen         = sizeof(int),
2395                         .mode           = 0644,
2396                         .proc_handler   = &proc_dointvec,
2397                 },
2398                 {
2399                         .ctl_name       = NET_NEIGH_UCAST_SOLICIT,
2400                         .procname       = "ucast_solicit",
2401                         .maxlen         = sizeof(int),
2402                         .mode           = 0644,
2403                         .proc_handler   = &proc_dointvec,
2404                 },
2405                 {
2406                         .ctl_name       = NET_NEIGH_APP_SOLICIT,
2407                         .procname       = "app_solicit",
2408                         .maxlen         = sizeof(int),
2409                         .mode           = 0644,
2410                         .proc_handler   = &proc_dointvec,
2411                 },
2412                 {
2413                         .ctl_name       = NET_NEIGH_RETRANS_TIME,
2414                         .procname       = "retrans_time",
2415                         .maxlen         = sizeof(int),
2416                         .mode           = 0644,
2417                         .proc_handler   = &proc_dointvec_userhz_jiffies,
2418                 },
2419                 {
2420                         .ctl_name       = NET_NEIGH_REACHABLE_TIME,
2421                         .procname       = "base_reachable_time",
2422                         .maxlen         = sizeof(int),
2423                         .mode           = 0644,
2424                         .proc_handler   = &proc_dointvec_jiffies,
2425                         .strategy       = &sysctl_jiffies,
2426                 },
2427                 {
2428                         .ctl_name       = NET_NEIGH_DELAY_PROBE_TIME,
2429                         .procname       = "delay_first_probe_time",
2430                         .maxlen         = sizeof(int),
2431                         .mode           = 0644,
2432                         .proc_handler   = &proc_dointvec_jiffies,
2433                         .strategy       = &sysctl_jiffies,
2434                 },
2435                 {
2436                         .ctl_name       = NET_NEIGH_GC_STALE_TIME,
2437                         .procname       = "gc_stale_time",
2438                         .maxlen         = sizeof(int),
2439                         .mode           = 0644,
2440                         .proc_handler   = &proc_dointvec_jiffies,
2441                         .strategy       = &sysctl_jiffies,
2442                 },
2443                 {
2444                         .ctl_name       = NET_NEIGH_UNRES_QLEN,
2445                         .procname       = "unres_qlen",
2446                         .maxlen         = sizeof(int),
2447                         .mode           = 0644,
2448                         .proc_handler   = &proc_dointvec,
2449                 },
2450                 {
2451                         .ctl_name       = NET_NEIGH_PROXY_QLEN,
2452                         .procname       = "proxy_qlen",
2453                         .maxlen         = sizeof(int),
2454                         .mode           = 0644,
2455                         .proc_handler   = &proc_dointvec,
2456                 },
2457                 {
2458                         .ctl_name       = NET_NEIGH_ANYCAST_DELAY,
2459                         .procname       = "anycast_delay",
2460                         .maxlen         = sizeof(int),
2461                         .mode           = 0644,
2462                         .proc_handler   = &proc_dointvec_userhz_jiffies,
2463                 },
2464                 {
2465                         .ctl_name       = NET_NEIGH_PROXY_DELAY,
2466                         .procname       = "proxy_delay",
2467                         .maxlen         = sizeof(int),
2468                         .mode           = 0644,
2469                         .proc_handler   = &proc_dointvec_userhz_jiffies,
2470                 },
2471                 {
2472                         .ctl_name       = NET_NEIGH_LOCKTIME,
2473                         .procname       = "locktime",
2474                         .maxlen         = sizeof(int),
2475                         .mode           = 0644,
2476                         .proc_handler   = &proc_dointvec_userhz_jiffies,
2477                 },
2478                 {
2479                         .ctl_name       = NET_NEIGH_GC_INTERVAL,
2480                         .procname       = "gc_interval",
2481                         .maxlen         = sizeof(int),
2482                         .mode           = 0644,
2483                         .proc_handler   = &proc_dointvec_jiffies,
2484                         .strategy       = &sysctl_jiffies,
2485                 },
2486                 {
2487                         .ctl_name       = NET_NEIGH_GC_THRESH1,
2488                         .procname       = "gc_thresh1",
2489                         .maxlen         = sizeof(int),
2490                         .mode           = 0644,
2491                         .proc_handler   = &proc_dointvec,
2492                 },
2493                 {
2494                         .ctl_name       = NET_NEIGH_GC_THRESH2,
2495                         .procname       = "gc_thresh2",
2496                         .maxlen         = sizeof(int),
2497                         .mode           = 0644,
2498                         .proc_handler   = &proc_dointvec,
2499                 },
2500                 {
2501                         .ctl_name       = NET_NEIGH_GC_THRESH3,
2502                         .procname       = "gc_thresh3",
2503                         .maxlen         = sizeof(int),
2504                         .mode           = 0644,
2505                         .proc_handler   = &proc_dointvec,
2506                 },
2507                 {
2508                         .ctl_name       = NET_NEIGH_RETRANS_TIME_MS,
2509                         .procname       = "retrans_time_ms",
2510                         .maxlen         = sizeof(int),
2511                         .mode           = 0644,
2512                         .proc_handler   = &proc_dointvec_ms_jiffies,
2513                         .strategy       = &sysctl_ms_jiffies,
2514                 },
2515                 {
2516                         .ctl_name       = NET_NEIGH_REACHABLE_TIME_MS,
2517                         .procname       = "base_reachable_time_ms",
2518                         .maxlen         = sizeof(int),
2519                         .mode           = 0644,
2520                         .proc_handler   = &proc_dointvec_ms_jiffies,
2521                         .strategy       = &sysctl_ms_jiffies,
2522                 },
2523         },
2524         .neigh_dev = {
2525                 {
2526                         .ctl_name       = NET_PROTO_CONF_DEFAULT,
2527                         .procname       = "default",
2528                         .mode           = 0555,
2529                 },
2530         },
2531         .neigh_neigh_dir = {
2532                 {
2533                         .procname       = "neigh",
2534                         .mode           = 0555,
2535                 },
2536         },
2537         .neigh_proto_dir = {
2538                 {
2539                         .mode           = 0555,
2540                 },
2541         },
2542         .neigh_root_dir = {
2543                 {
2544                         .ctl_name       = CTL_NET,
2545                         .procname       = "net",
2546                         .mode           = 0555,
2547                 },
2548         },
2549 };
2550
2551 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2552                           int p_id, int pdev_id, char *p_name, 
2553                           proc_handler *handler, ctl_handler *strategy)
2554 {
2555         struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
2556         const char *dev_name_source = NULL;
2557         char *dev_name = NULL;
2558         int err = 0;
2559
2560         if (!t)
2561                 return -ENOBUFS;
2562         memcpy(t, &neigh_sysctl_template, sizeof(*t));
2563         t->neigh_vars[0].data  = &p->mcast_probes;
2564         t->neigh_vars[1].data  = &p->ucast_probes;
2565         t->neigh_vars[2].data  = &p->app_probes;
2566         t->neigh_vars[3].data  = &p->retrans_time;
2567         t->neigh_vars[4].data  = &p->base_reachable_time;
2568         t->neigh_vars[5].data  = &p->delay_probe_time;
2569         t->neigh_vars[6].data  = &p->gc_staletime;
2570         t->neigh_vars[7].data  = &p->queue_len;
2571         t->neigh_vars[8].data  = &p->proxy_qlen;
2572         t->neigh_vars[9].data  = &p->anycast_delay;
2573         t->neigh_vars[10].data = &p->proxy_delay;
2574         t->neigh_vars[11].data = &p->locktime;
2575
2576         if (dev) {
2577                 dev_name_source = dev->name;
2578                 t->neigh_dev[0].ctl_name = dev->ifindex;
2579                 t->neigh_vars[12].procname = NULL;
2580                 t->neigh_vars[13].procname = NULL;
2581                 t->neigh_vars[14].procname = NULL;
2582                 t->neigh_vars[15].procname = NULL;
2583         } else {
2584                 dev_name_source = t->neigh_dev[0].procname;
2585                 t->neigh_vars[12].data = (int *)(p + 1);
2586                 t->neigh_vars[13].data = (int *)(p + 1) + 1;
2587                 t->neigh_vars[14].data = (int *)(p + 1) + 2;
2588                 t->neigh_vars[15].data = (int *)(p + 1) + 3;
2589         }
2590
2591         t->neigh_vars[16].data  = &p->retrans_time;
2592         t->neigh_vars[17].data  = &p->base_reachable_time;
2593
2594         if (handler || strategy) {
2595                 /* RetransTime */
2596                 t->neigh_vars[3].proc_handler = handler;
2597                 t->neigh_vars[3].strategy = strategy;
2598                 t->neigh_vars[3].extra1 = dev;
2599                 /* ReachableTime */
2600                 t->neigh_vars[4].proc_handler = handler;
2601                 t->neigh_vars[4].strategy = strategy;
2602                 t->neigh_vars[4].extra1 = dev;
2603                 /* RetransTime (in milliseconds)*/
2604                 t->neigh_vars[16].proc_handler = handler;
2605                 t->neigh_vars[16].strategy = strategy;
2606                 t->neigh_vars[16].extra1 = dev;
2607                 /* ReachableTime (in milliseconds) */
2608                 t->neigh_vars[17].proc_handler = handler;
2609                 t->neigh_vars[17].strategy = strategy;
2610                 t->neigh_vars[17].extra1 = dev;
2611         }
2612
2613         dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2614         if (!dev_name) {
2615                 err = -ENOBUFS;
2616                 goto free;
2617         }
2618
2619         t->neigh_dev[0].procname = dev_name;
2620
2621         t->neigh_neigh_dir[0].ctl_name = pdev_id;
2622
2623         t->neigh_proto_dir[0].procname = p_name;
2624         t->neigh_proto_dir[0].ctl_name = p_id;
2625
2626         t->neigh_dev[0].child          = t->neigh_vars;
2627         t->neigh_neigh_dir[0].child    = t->neigh_dev;
2628         t->neigh_proto_dir[0].child    = t->neigh_neigh_dir;
2629         t->neigh_root_dir[0].child     = t->neigh_proto_dir;
2630
2631         t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
2632         if (!t->sysctl_header) {
2633                 err = -ENOBUFS;
2634                 goto free_procname;
2635         }
2636         p->sysctl_table = t;
2637         return 0;
2638
2639         /* error path */
2640  free_procname:
2641         kfree(dev_name);
2642  free:
2643         kfree(t);
2644
2645         return err;
2646 }
2647
2648 void neigh_sysctl_unregister(struct neigh_parms *p)
2649 {
2650         if (p->sysctl_table) {
2651                 struct neigh_sysctl_table *t = p->sysctl_table;
2652                 p->sysctl_table = NULL;
2653                 unregister_sysctl_table(t->sysctl_header);
2654                 kfree(t->neigh_dev[0].procname);
2655                 kfree(t);
2656         }
2657 }
2658
2659 #endif  /* CONFIG_SYSCTL */
2660
2661 EXPORT_SYMBOL(__neigh_event_send);
2662 EXPORT_SYMBOL(neigh_add);
2663 EXPORT_SYMBOL(neigh_changeaddr);
2664 EXPORT_SYMBOL(neigh_compat_output);
2665 EXPORT_SYMBOL(neigh_connected_output);
2666 EXPORT_SYMBOL(neigh_create);
2667 EXPORT_SYMBOL(neigh_delete);
2668 EXPORT_SYMBOL(neigh_destroy);
2669 EXPORT_SYMBOL(neigh_dump_info);
2670 EXPORT_SYMBOL(neigh_event_ns);
2671 EXPORT_SYMBOL(neigh_ifdown);
2672 EXPORT_SYMBOL(neigh_lookup);
2673 EXPORT_SYMBOL(neigh_lookup_nodev);
2674 EXPORT_SYMBOL(neigh_parms_alloc);
2675 EXPORT_SYMBOL(neigh_parms_release);
2676 EXPORT_SYMBOL(neigh_rand_reach_time);
2677 EXPORT_SYMBOL(neigh_resolve_output);
2678 EXPORT_SYMBOL(neigh_table_clear);
2679 EXPORT_SYMBOL(neigh_table_init);
2680 EXPORT_SYMBOL(neigh_table_init_no_netlink);
2681 EXPORT_SYMBOL(neigh_update);
2682 EXPORT_SYMBOL(neigh_update_hhs);
2683 EXPORT_SYMBOL(pneigh_enqueue);
2684 EXPORT_SYMBOL(pneigh_lookup);
2685 EXPORT_SYMBOL(neightbl_dump_info);
2686 EXPORT_SYMBOL(neightbl_set);
2687
2688 #ifdef CONFIG_ARPD
2689 EXPORT_SYMBOL(neigh_app_ns);
2690 #endif
2691 #ifdef CONFIG_SYSCTL
2692 EXPORT_SYMBOL(neigh_sysctl_register);
2693 EXPORT_SYMBOL(neigh_sysctl_unregister);
2694 #endif