Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/sameo/irda-2.6
[linux-2.6] / net / core / neighbour.c
1 /*
2  *      Generic address resolution entity
3  *
4  *      Authors:
5  *      Pedro Roque             <roque@di.fc.ul.pt>
6  *      Alexey Kuznetsov        <kuznet@ms2.inr.ac.ru>
7  *
8  *      This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *      Fixes:
14  *      Vitaly E. Lavrov        releasing NULL neighbor in neigh_add.
15  *      Harald Welte            Add neighbour cache statistics like rtstat
16  */
17
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
24 #ifdef CONFIG_SYSCTL
25 #include <linux/sysctl.h>
26 #endif
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
30 #include <net/dst.h>
31 #include <net/sock.h>
32 #include <net/netevent.h>
33 #include <net/netlink.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/random.h>
36 #include <linux/string.h>
37 #include <linux/log2.h>
38
39 #define NEIGH_DEBUG 1
40
41 #define NEIGH_PRINTK(x...) printk(x)
42 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
43 #define NEIGH_PRINTK0 NEIGH_PRINTK
44 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
45 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
46
47 #if NEIGH_DEBUG >= 1
48 #undef NEIGH_PRINTK1
49 #define NEIGH_PRINTK1 NEIGH_PRINTK
50 #endif
51 #if NEIGH_DEBUG >= 2
52 #undef NEIGH_PRINTK2
53 #define NEIGH_PRINTK2 NEIGH_PRINTK
54 #endif
55
56 #define PNEIGH_HASHMASK         0xF
57
58 static void neigh_timer_handler(unsigned long arg);
59 static void __neigh_notify(struct neighbour *n, int type, int flags);
60 static void neigh_update_notify(struct neighbour *neigh);
61 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
62
63 static struct neigh_table *neigh_tables;
64 #ifdef CONFIG_PROC_FS
65 static const struct file_operations neigh_stat_seq_fops;
66 #endif
67
68 /*
69    Neighbour hash table buckets are protected with rwlock tbl->lock.
70
71    - All the scans/updates to hash buckets MUST be made under this lock.
72    - NOTHING clever should be made under this lock: no callbacks
73      to protocol backends, no attempts to send something to network.
74      It will result in deadlocks, if backend/driver wants to use neighbour
75      cache.
76    - If the entry requires some non-trivial actions, increase
77      its reference count and release table lock.
78
79    Neighbour entries are protected:
80    - with reference count.
81    - with rwlock neigh->lock
82
83    Reference count prevents destruction.
84
85    neigh->lock mainly serializes ll address data and its validity state.
86    However, the same lock is used to protect another entry fields:
87     - timer
88     - resolution queue
89
90    Again, nothing clever shall be made under neigh->lock,
91    the most complicated procedure, which we allow is dev->hard_header.
92    It is supposed, that dev->hard_header is simplistic and does
93    not make callbacks to neighbour tables.
94
95    The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96    list of neighbour tables. This list is used only in process context,
97  */
98
99 static DEFINE_RWLOCK(neigh_tbl_lock);
100
101 static int neigh_blackhole(struct sk_buff *skb)
102 {
103         kfree_skb(skb);
104         return -ENETDOWN;
105 }
106
107 static void neigh_cleanup_and_release(struct neighbour *neigh)
108 {
109         if (neigh->parms->neigh_cleanup)
110                 neigh->parms->neigh_cleanup(neigh);
111
112         __neigh_notify(neigh, RTM_DELNEIGH, 0);
113         neigh_release(neigh);
114 }
115
116 /*
117  * It is random distribution in the interval (1/2)*base...(3/2)*base.
118  * It corresponds to default IPv6 settings and is not overridable,
119  * because it is really reasonable choice.
120  */
121
122 unsigned long neigh_rand_reach_time(unsigned long base)
123 {
124         return (base ? (net_random() % base) + (base >> 1) : 0);
125 }
126 EXPORT_SYMBOL(neigh_rand_reach_time);
127
128
129 static int neigh_forced_gc(struct neigh_table *tbl)
130 {
131         int shrunk = 0;
132         int i;
133
134         NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
135
136         write_lock_bh(&tbl->lock);
137         for (i = 0; i <= tbl->hash_mask; i++) {
138                 struct neighbour *n, **np;
139
140                 np = &tbl->hash_buckets[i];
141                 while ((n = *np) != NULL) {
142                         /* Neighbour record may be discarded if:
143                          * - nobody refers to it.
144                          * - it is not permanent
145                          */
146                         write_lock(&n->lock);
147                         if (atomic_read(&n->refcnt) == 1 &&
148                             !(n->nud_state & NUD_PERMANENT)) {
149                                 *np     = n->next;
150                                 n->dead = 1;
151                                 shrunk  = 1;
152                                 write_unlock(&n->lock);
153                                 neigh_cleanup_and_release(n);
154                                 continue;
155                         }
156                         write_unlock(&n->lock);
157                         np = &n->next;
158                 }
159         }
160
161         tbl->last_flush = jiffies;
162
163         write_unlock_bh(&tbl->lock);
164
165         return shrunk;
166 }
167
168 static void neigh_add_timer(struct neighbour *n, unsigned long when)
169 {
170         neigh_hold(n);
171         if (unlikely(mod_timer(&n->timer, when))) {
172                 printk("NEIGH: BUG, double timer add, state is %x\n",
173                        n->nud_state);
174                 dump_stack();
175         }
176 }
177
178 static int neigh_del_timer(struct neighbour *n)
179 {
180         if ((n->nud_state & NUD_IN_TIMER) &&
181             del_timer(&n->timer)) {
182                 neigh_release(n);
183                 return 1;
184         }
185         return 0;
186 }
187
188 static void pneigh_queue_purge(struct sk_buff_head *list)
189 {
190         struct sk_buff *skb;
191
192         while ((skb = skb_dequeue(list)) != NULL) {
193                 dev_put(skb->dev);
194                 kfree_skb(skb);
195         }
196 }
197
198 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
199 {
200         int i;
201
202         for (i = 0; i <= tbl->hash_mask; i++) {
203                 struct neighbour *n, **np = &tbl->hash_buckets[i];
204
205                 while ((n = *np) != NULL) {
206                         if (dev && n->dev != dev) {
207                                 np = &n->next;
208                                 continue;
209                         }
210                         *np = n->next;
211                         write_lock(&n->lock);
212                         neigh_del_timer(n);
213                         n->dead = 1;
214
215                         if (atomic_read(&n->refcnt) != 1) {
216                                 /* The most unpleasant situation.
217                                    We must destroy neighbour entry,
218                                    but someone still uses it.
219
220                                    The destroy will be delayed until
221                                    the last user releases us, but
222                                    we must kill timers etc. and move
223                                    it to safe state.
224                                  */
225                                 skb_queue_purge(&n->arp_queue);
226                                 n->output = neigh_blackhole;
227                                 if (n->nud_state & NUD_VALID)
228                                         n->nud_state = NUD_NOARP;
229                                 else
230                                         n->nud_state = NUD_NONE;
231                                 NEIGH_PRINTK2("neigh %p is stray.\n", n);
232                         }
233                         write_unlock(&n->lock);
234                         neigh_cleanup_and_release(n);
235                 }
236         }
237 }
238
239 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
240 {
241         write_lock_bh(&tbl->lock);
242         neigh_flush_dev(tbl, dev);
243         write_unlock_bh(&tbl->lock);
244 }
245 EXPORT_SYMBOL(neigh_changeaddr);
246
247 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
248 {
249         write_lock_bh(&tbl->lock);
250         neigh_flush_dev(tbl, dev);
251         pneigh_ifdown(tbl, dev);
252         write_unlock_bh(&tbl->lock);
253
254         del_timer_sync(&tbl->proxy_timer);
255         pneigh_queue_purge(&tbl->proxy_queue);
256         return 0;
257 }
258 EXPORT_SYMBOL(neigh_ifdown);
259
260 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
261 {
262         struct neighbour *n = NULL;
263         unsigned long now = jiffies;
264         int entries;
265
266         entries = atomic_inc_return(&tbl->entries) - 1;
267         if (entries >= tbl->gc_thresh3 ||
268             (entries >= tbl->gc_thresh2 &&
269              time_after(now, tbl->last_flush + 5 * HZ))) {
270                 if (!neigh_forced_gc(tbl) &&
271                     entries >= tbl->gc_thresh3)
272                         goto out_entries;
273         }
274
275         n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
276         if (!n)
277                 goto out_entries;
278
279         skb_queue_head_init(&n->arp_queue);
280         rwlock_init(&n->lock);
281         n->updated        = n->used = now;
282         n->nud_state      = NUD_NONE;
283         n->output         = neigh_blackhole;
284         n->parms          = neigh_parms_clone(&tbl->parms);
285         setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
286
287         NEIGH_CACHE_STAT_INC(tbl, allocs);
288         n->tbl            = tbl;
289         atomic_set(&n->refcnt, 1);
290         n->dead           = 1;
291 out:
292         return n;
293
294 out_entries:
295         atomic_dec(&tbl->entries);
296         goto out;
297 }
298
299 static struct neighbour **neigh_hash_alloc(unsigned int entries)
300 {
301         unsigned long size = entries * sizeof(struct neighbour *);
302         struct neighbour **ret;
303
304         if (size <= PAGE_SIZE) {
305                 ret = kzalloc(size, GFP_ATOMIC);
306         } else {
307                 ret = (struct neighbour **)
308                       __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
309         }
310         return ret;
311 }
312
313 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
314 {
315         unsigned long size = entries * sizeof(struct neighbour *);
316
317         if (size <= PAGE_SIZE)
318                 kfree(hash);
319         else
320                 free_pages((unsigned long)hash, get_order(size));
321 }
322
323 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
324 {
325         struct neighbour **new_hash, **old_hash;
326         unsigned int i, new_hash_mask, old_entries;
327
328         NEIGH_CACHE_STAT_INC(tbl, hash_grows);
329
330         BUG_ON(!is_power_of_2(new_entries));
331         new_hash = neigh_hash_alloc(new_entries);
332         if (!new_hash)
333                 return;
334
335         old_entries = tbl->hash_mask + 1;
336         new_hash_mask = new_entries - 1;
337         old_hash = tbl->hash_buckets;
338
339         get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
340         for (i = 0; i < old_entries; i++) {
341                 struct neighbour *n, *next;
342
343                 for (n = old_hash[i]; n; n = next) {
344                         unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
345
346                         hash_val &= new_hash_mask;
347                         next = n->next;
348
349                         n->next = new_hash[hash_val];
350                         new_hash[hash_val] = n;
351                 }
352         }
353         tbl->hash_buckets = new_hash;
354         tbl->hash_mask = new_hash_mask;
355
356         neigh_hash_free(old_hash, old_entries);
357 }
358
359 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
360                                struct net_device *dev)
361 {
362         struct neighbour *n;
363         int key_len = tbl->key_len;
364         u32 hash_val;
365
366         NEIGH_CACHE_STAT_INC(tbl, lookups);
367
368         read_lock_bh(&tbl->lock);
369         hash_val = tbl->hash(pkey, dev);
370         for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
371                 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
372                         neigh_hold(n);
373                         NEIGH_CACHE_STAT_INC(tbl, hits);
374                         break;
375                 }
376         }
377         read_unlock_bh(&tbl->lock);
378         return n;
379 }
380 EXPORT_SYMBOL(neigh_lookup);
381
382 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
383                                      const void *pkey)
384 {
385         struct neighbour *n;
386         int key_len = tbl->key_len;
387         u32 hash_val;
388
389         NEIGH_CACHE_STAT_INC(tbl, lookups);
390
391         read_lock_bh(&tbl->lock);
392         hash_val = tbl->hash(pkey, NULL);
393         for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
394                 if (!memcmp(n->primary_key, pkey, key_len) &&
395                     net_eq(dev_net(n->dev), net)) {
396                         neigh_hold(n);
397                         NEIGH_CACHE_STAT_INC(tbl, hits);
398                         break;
399                 }
400         }
401         read_unlock_bh(&tbl->lock);
402         return n;
403 }
404 EXPORT_SYMBOL(neigh_lookup_nodev);
405
406 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
407                                struct net_device *dev)
408 {
409         u32 hash_val;
410         int key_len = tbl->key_len;
411         int error;
412         struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
413
414         if (!n) {
415                 rc = ERR_PTR(-ENOBUFS);
416                 goto out;
417         }
418
419         memcpy(n->primary_key, pkey, key_len);
420         n->dev = dev;
421         dev_hold(dev);
422
423         /* Protocol specific setup. */
424         if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
425                 rc = ERR_PTR(error);
426                 goto out_neigh_release;
427         }
428
429         /* Device specific setup. */
430         if (n->parms->neigh_setup &&
431             (error = n->parms->neigh_setup(n)) < 0) {
432                 rc = ERR_PTR(error);
433                 goto out_neigh_release;
434         }
435
436         n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
437
438         write_lock_bh(&tbl->lock);
439
440         if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
441                 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
442
443         hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
444
445         if (n->parms->dead) {
446                 rc = ERR_PTR(-EINVAL);
447                 goto out_tbl_unlock;
448         }
449
450         for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
451                 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
452                         neigh_hold(n1);
453                         rc = n1;
454                         goto out_tbl_unlock;
455                 }
456         }
457
458         n->next = tbl->hash_buckets[hash_val];
459         tbl->hash_buckets[hash_val] = n;
460         n->dead = 0;
461         neigh_hold(n);
462         write_unlock_bh(&tbl->lock);
463         NEIGH_PRINTK2("neigh %p is created.\n", n);
464         rc = n;
465 out:
466         return rc;
467 out_tbl_unlock:
468         write_unlock_bh(&tbl->lock);
469 out_neigh_release:
470         neigh_release(n);
471         goto out;
472 }
473 EXPORT_SYMBOL(neigh_create);
474
475 static u32 pneigh_hash(const void *pkey, int key_len)
476 {
477         u32 hash_val = *(u32 *)(pkey + key_len - 4);
478         hash_val ^= (hash_val >> 16);
479         hash_val ^= hash_val >> 8;
480         hash_val ^= hash_val >> 4;
481         hash_val &= PNEIGH_HASHMASK;
482         return hash_val;
483 }
484
485 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
486                                               struct net *net,
487                                               const void *pkey,
488                                               int key_len,
489                                               struct net_device *dev)
490 {
491         while (n) {
492                 if (!memcmp(n->key, pkey, key_len) &&
493                     net_eq(pneigh_net(n), net) &&
494                     (n->dev == dev || !n->dev))
495                         return n;
496                 n = n->next;
497         }
498         return NULL;
499 }
500
501 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
502                 struct net *net, const void *pkey, struct net_device *dev)
503 {
504         int key_len = tbl->key_len;
505         u32 hash_val = pneigh_hash(pkey, key_len);
506
507         return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
508                                  net, pkey, key_len, dev);
509 }
510 EXPORT_SYMBOL_GPL(__pneigh_lookup);
511
512 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
513                                     struct net *net, const void *pkey,
514                                     struct net_device *dev, int creat)
515 {
516         struct pneigh_entry *n;
517         int key_len = tbl->key_len;
518         u32 hash_val = pneigh_hash(pkey, key_len);
519
520         read_lock_bh(&tbl->lock);
521         n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
522                               net, pkey, key_len, dev);
523         read_unlock_bh(&tbl->lock);
524
525         if (n || !creat)
526                 goto out;
527
528         ASSERT_RTNL();
529
530         n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
531         if (!n)
532                 goto out;
533
534         write_pnet(&n->net, hold_net(net));
535         memcpy(n->key, pkey, key_len);
536         n->dev = dev;
537         if (dev)
538                 dev_hold(dev);
539
540         if (tbl->pconstructor && tbl->pconstructor(n)) {
541                 if (dev)
542                         dev_put(dev);
543                 release_net(net);
544                 kfree(n);
545                 n = NULL;
546                 goto out;
547         }
548
549         write_lock_bh(&tbl->lock);
550         n->next = tbl->phash_buckets[hash_val];
551         tbl->phash_buckets[hash_val] = n;
552         write_unlock_bh(&tbl->lock);
553 out:
554         return n;
555 }
556 EXPORT_SYMBOL(pneigh_lookup);
557
558
559 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
560                   struct net_device *dev)
561 {
562         struct pneigh_entry *n, **np;
563         int key_len = tbl->key_len;
564         u32 hash_val = pneigh_hash(pkey, key_len);
565
566         write_lock_bh(&tbl->lock);
567         for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
568              np = &n->next) {
569                 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
570                     net_eq(pneigh_net(n), net)) {
571                         *np = n->next;
572                         write_unlock_bh(&tbl->lock);
573                         if (tbl->pdestructor)
574                                 tbl->pdestructor(n);
575                         if (n->dev)
576                                 dev_put(n->dev);
577                         release_net(pneigh_net(n));
578                         kfree(n);
579                         return 0;
580                 }
581         }
582         write_unlock_bh(&tbl->lock);
583         return -ENOENT;
584 }
585
586 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
587 {
588         struct pneigh_entry *n, **np;
589         u32 h;
590
591         for (h = 0; h <= PNEIGH_HASHMASK; h++) {
592                 np = &tbl->phash_buckets[h];
593                 while ((n = *np) != NULL) {
594                         if (!dev || n->dev == dev) {
595                                 *np = n->next;
596                                 if (tbl->pdestructor)
597                                         tbl->pdestructor(n);
598                                 if (n->dev)
599                                         dev_put(n->dev);
600                                 release_net(pneigh_net(n));
601                                 kfree(n);
602                                 continue;
603                         }
604                         np = &n->next;
605                 }
606         }
607         return -ENOENT;
608 }
609
610 static void neigh_parms_destroy(struct neigh_parms *parms);
611
612 static inline void neigh_parms_put(struct neigh_parms *parms)
613 {
614         if (atomic_dec_and_test(&parms->refcnt))
615                 neigh_parms_destroy(parms);
616 }
617
618 /*
619  *      neighbour must already be out of the table;
620  *
621  */
622 void neigh_destroy(struct neighbour *neigh)
623 {
624         struct hh_cache *hh;
625
626         NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
627
628         if (!neigh->dead) {
629                 printk(KERN_WARNING
630                        "Destroying alive neighbour %p\n", neigh);
631                 dump_stack();
632                 return;
633         }
634
635         if (neigh_del_timer(neigh))
636                 printk(KERN_WARNING "Impossible event.\n");
637
638         while ((hh = neigh->hh) != NULL) {
639                 neigh->hh = hh->hh_next;
640                 hh->hh_next = NULL;
641
642                 write_seqlock_bh(&hh->hh_lock);
643                 hh->hh_output = neigh_blackhole;
644                 write_sequnlock_bh(&hh->hh_lock);
645                 if (atomic_dec_and_test(&hh->hh_refcnt))
646                         kfree(hh);
647         }
648
649         skb_queue_purge(&neigh->arp_queue);
650
651         dev_put(neigh->dev);
652         neigh_parms_put(neigh->parms);
653
654         NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
655
656         atomic_dec(&neigh->tbl->entries);
657         kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
658 }
659 EXPORT_SYMBOL(neigh_destroy);
660
661 /* Neighbour state is suspicious;
662    disable fast path.
663
664    Called with write_locked neigh.
665  */
666 static void neigh_suspect(struct neighbour *neigh)
667 {
668         struct hh_cache *hh;
669
670         NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
671
672         neigh->output = neigh->ops->output;
673
674         for (hh = neigh->hh; hh; hh = hh->hh_next)
675                 hh->hh_output = neigh->ops->output;
676 }
677
678 /* Neighbour state is OK;
679    enable fast path.
680
681    Called with write_locked neigh.
682  */
683 static void neigh_connect(struct neighbour *neigh)
684 {
685         struct hh_cache *hh;
686
687         NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
688
689         neigh->output = neigh->ops->connected_output;
690
691         for (hh = neigh->hh; hh; hh = hh->hh_next)
692                 hh->hh_output = neigh->ops->hh_output;
693 }
694
695 static void neigh_periodic_timer(unsigned long arg)
696 {
697         struct neigh_table *tbl = (struct neigh_table *)arg;
698         struct neighbour *n, **np;
699         unsigned long expire, now = jiffies;
700
701         NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
702
703         write_lock(&tbl->lock);
704
705         /*
706          *      periodically recompute ReachableTime from random function
707          */
708
709         if (time_after(now, tbl->last_rand + 300 * HZ)) {
710                 struct neigh_parms *p;
711                 tbl->last_rand = now;
712                 for (p = &tbl->parms; p; p = p->next)
713                         p->reachable_time =
714                                 neigh_rand_reach_time(p->base_reachable_time);
715         }
716
717         np = &tbl->hash_buckets[tbl->hash_chain_gc];
718         tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
719
720         while ((n = *np) != NULL) {
721                 unsigned int state;
722
723                 write_lock(&n->lock);
724
725                 state = n->nud_state;
726                 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
727                         write_unlock(&n->lock);
728                         goto next_elt;
729                 }
730
731                 if (time_before(n->used, n->confirmed))
732                         n->used = n->confirmed;
733
734                 if (atomic_read(&n->refcnt) == 1 &&
735                     (state == NUD_FAILED ||
736                      time_after(now, n->used + n->parms->gc_staletime))) {
737                         *np = n->next;
738                         n->dead = 1;
739                         write_unlock(&n->lock);
740                         neigh_cleanup_and_release(n);
741                         continue;
742                 }
743                 write_unlock(&n->lock);
744
745 next_elt:
746                 np = &n->next;
747         }
748
749         /* Cycle through all hash buckets every base_reachable_time/2 ticks.
750          * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
751          * base_reachable_time.
752          */
753         expire = tbl->parms.base_reachable_time >> 1;
754         expire /= (tbl->hash_mask + 1);
755         if (!expire)
756                 expire = 1;
757
758         if (expire>HZ)
759                 mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
760         else
761                 mod_timer(&tbl->gc_timer, now + expire);
762
763         write_unlock(&tbl->lock);
764 }
765
766 static __inline__ int neigh_max_probes(struct neighbour *n)
767 {
768         struct neigh_parms *p = n->parms;
769         return (n->nud_state & NUD_PROBE ?
770                 p->ucast_probes :
771                 p->ucast_probes + p->app_probes + p->mcast_probes);
772 }
773
774 static void neigh_invalidate(struct neighbour *neigh)
775 {
776         struct sk_buff *skb;
777
778         NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
779         NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
780         neigh->updated = jiffies;
781
782         /* It is very thin place. report_unreachable is very complicated
783            routine. Particularly, it can hit the same neighbour entry!
784
785            So that, we try to be accurate and avoid dead loop. --ANK
786          */
787         while (neigh->nud_state == NUD_FAILED &&
788                (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
789                 write_unlock(&neigh->lock);
790                 neigh->ops->error_report(neigh, skb);
791                 write_lock(&neigh->lock);
792         }
793         skb_queue_purge(&neigh->arp_queue);
794 }
795
796 /* Called when a timer expires for a neighbour entry. */
797
798 static void neigh_timer_handler(unsigned long arg)
799 {
800         unsigned long now, next;
801         struct neighbour *neigh = (struct neighbour *)arg;
802         unsigned state;
803         int notify = 0;
804
805         write_lock(&neigh->lock);
806
807         state = neigh->nud_state;
808         now = jiffies;
809         next = now + HZ;
810
811         if (!(state & NUD_IN_TIMER)) {
812 #ifndef CONFIG_SMP
813                 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
814 #endif
815                 goto out;
816         }
817
818         if (state & NUD_REACHABLE) {
819                 if (time_before_eq(now,
820                                    neigh->confirmed + neigh->parms->reachable_time)) {
821                         NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
822                         next = neigh->confirmed + neigh->parms->reachable_time;
823                 } else if (time_before_eq(now,
824                                           neigh->used + neigh->parms->delay_probe_time)) {
825                         NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
826                         neigh->nud_state = NUD_DELAY;
827                         neigh->updated = jiffies;
828                         neigh_suspect(neigh);
829                         next = now + neigh->parms->delay_probe_time;
830                 } else {
831                         NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
832                         neigh->nud_state = NUD_STALE;
833                         neigh->updated = jiffies;
834                         neigh_suspect(neigh);
835                         notify = 1;
836                 }
837         } else if (state & NUD_DELAY) {
838                 if (time_before_eq(now,
839                                    neigh->confirmed + neigh->parms->delay_probe_time)) {
840                         NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
841                         neigh->nud_state = NUD_REACHABLE;
842                         neigh->updated = jiffies;
843                         neigh_connect(neigh);
844                         notify = 1;
845                         next = neigh->confirmed + neigh->parms->reachable_time;
846                 } else {
847                         NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
848                         neigh->nud_state = NUD_PROBE;
849                         neigh->updated = jiffies;
850                         atomic_set(&neigh->probes, 0);
851                         next = now + neigh->parms->retrans_time;
852                 }
853         } else {
854                 /* NUD_PROBE|NUD_INCOMPLETE */
855                 next = now + neigh->parms->retrans_time;
856         }
857
858         if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
859             atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
860                 neigh->nud_state = NUD_FAILED;
861                 notify = 1;
862                 neigh_invalidate(neigh);
863         }
864
865         if (neigh->nud_state & NUD_IN_TIMER) {
866                 if (time_before(next, jiffies + HZ/2))
867                         next = jiffies + HZ/2;
868                 if (!mod_timer(&neigh->timer, next))
869                         neigh_hold(neigh);
870         }
871         if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
872                 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
873                 /* keep skb alive even if arp_queue overflows */
874                 if (skb)
875                         skb = skb_copy(skb, GFP_ATOMIC);
876                 write_unlock(&neigh->lock);
877                 neigh->ops->solicit(neigh, skb);
878                 atomic_inc(&neigh->probes);
879                 kfree_skb(skb);
880         } else {
881 out:
882                 write_unlock(&neigh->lock);
883         }
884
885         if (notify)
886                 neigh_update_notify(neigh);
887
888         neigh_release(neigh);
889 }
890
891 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
892 {
893         int rc;
894         unsigned long now;
895
896         write_lock_bh(&neigh->lock);
897
898         rc = 0;
899         if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
900                 goto out_unlock_bh;
901
902         now = jiffies;
903
904         if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
905                 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
906                         atomic_set(&neigh->probes, neigh->parms->ucast_probes);
907                         neigh->nud_state     = NUD_INCOMPLETE;
908                         neigh->updated = jiffies;
909                         neigh_add_timer(neigh, now + 1);
910                 } else {
911                         neigh->nud_state = NUD_FAILED;
912                         neigh->updated = jiffies;
913                         write_unlock_bh(&neigh->lock);
914
915                         kfree_skb(skb);
916                         return 1;
917                 }
918         } else if (neigh->nud_state & NUD_STALE) {
919                 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
920                 neigh->nud_state = NUD_DELAY;
921                 neigh->updated = jiffies;
922                 neigh_add_timer(neigh,
923                                 jiffies + neigh->parms->delay_probe_time);
924         }
925
926         if (neigh->nud_state == NUD_INCOMPLETE) {
927                 if (skb) {
928                         if (skb_queue_len(&neigh->arp_queue) >=
929                             neigh->parms->queue_len) {
930                                 struct sk_buff *buff;
931                                 buff = __skb_dequeue(&neigh->arp_queue);
932                                 kfree_skb(buff);
933                                 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
934                         }
935                         __skb_queue_tail(&neigh->arp_queue, skb);
936                 }
937                 rc = 1;
938         }
939 out_unlock_bh:
940         write_unlock_bh(&neigh->lock);
941         return rc;
942 }
943 EXPORT_SYMBOL(__neigh_event_send);
944
945 static void neigh_update_hhs(struct neighbour *neigh)
946 {
947         struct hh_cache *hh;
948         void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
949                 = neigh->dev->header_ops->cache_update;
950
951         if (update) {
952                 for (hh = neigh->hh; hh; hh = hh->hh_next) {
953                         write_seqlock_bh(&hh->hh_lock);
954                         update(hh, neigh->dev, neigh->ha);
955                         write_sequnlock_bh(&hh->hh_lock);
956                 }
957         }
958 }
959
960
961
962 /* Generic update routine.
963    -- lladdr is new lladdr or NULL, if it is not supplied.
964    -- new    is new state.
965    -- flags
966         NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
967                                 if it is different.
968         NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
969                                 lladdr instead of overriding it
970                                 if it is different.
971                                 It also allows to retain current state
972                                 if lladdr is unchanged.
973         NEIGH_UPDATE_F_ADMIN    means that the change is administrative.
974
975         NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
976                                 NTF_ROUTER flag.
977         NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
978                                 a router.
979
980    Caller MUST hold reference count on the entry.
981  */
982
983 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
984                  u32 flags)
985 {
986         u8 old;
987         int err;
988         int notify = 0;
989         struct net_device *dev;
990         int update_isrouter = 0;
991
992         write_lock_bh(&neigh->lock);
993
994         dev    = neigh->dev;
995         old    = neigh->nud_state;
996         err    = -EPERM;
997
998         if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
999             (old & (NUD_NOARP | NUD_PERMANENT)))
1000                 goto out;
1001
1002         if (!(new & NUD_VALID)) {
1003                 neigh_del_timer(neigh);
1004                 if (old & NUD_CONNECTED)
1005                         neigh_suspect(neigh);
1006                 neigh->nud_state = new;
1007                 err = 0;
1008                 notify = old & NUD_VALID;
1009                 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1010                     (new & NUD_FAILED)) {
1011                         neigh_invalidate(neigh);
1012                         notify = 1;
1013                 }
1014                 goto out;
1015         }
1016
1017         /* Compare new lladdr with cached one */
1018         if (!dev->addr_len) {
1019                 /* First case: device needs no address. */
1020                 lladdr = neigh->ha;
1021         } else if (lladdr) {
1022                 /* The second case: if something is already cached
1023                    and a new address is proposed:
1024                    - compare new & old
1025                    - if they are different, check override flag
1026                  */
1027                 if ((old & NUD_VALID) &&
1028                     !memcmp(lladdr, neigh->ha, dev->addr_len))
1029                         lladdr = neigh->ha;
1030         } else {
1031                 /* No address is supplied; if we know something,
1032                    use it, otherwise discard the request.
1033                  */
1034                 err = -EINVAL;
1035                 if (!(old & NUD_VALID))
1036                         goto out;
1037                 lladdr = neigh->ha;
1038         }
1039
1040         if (new & NUD_CONNECTED)
1041                 neigh->confirmed = jiffies;
1042         neigh->updated = jiffies;
1043
1044         /* If entry was valid and address is not changed,
1045            do not change entry state, if new one is STALE.
1046          */
1047         err = 0;
1048         update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1049         if (old & NUD_VALID) {
1050                 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1051                         update_isrouter = 0;
1052                         if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1053                             (old & NUD_CONNECTED)) {
1054                                 lladdr = neigh->ha;
1055                                 new = NUD_STALE;
1056                         } else
1057                                 goto out;
1058                 } else {
1059                         if (lladdr == neigh->ha && new == NUD_STALE &&
1060                             ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1061                              (old & NUD_CONNECTED))
1062                             )
1063                                 new = old;
1064                 }
1065         }
1066
1067         if (new != old) {
1068                 neigh_del_timer(neigh);
1069                 if (new & NUD_IN_TIMER)
1070                         neigh_add_timer(neigh, (jiffies +
1071                                                 ((new & NUD_REACHABLE) ?
1072                                                  neigh->parms->reachable_time :
1073                                                  0)));
1074                 neigh->nud_state = new;
1075         }
1076
1077         if (lladdr != neigh->ha) {
1078                 memcpy(&neigh->ha, lladdr, dev->addr_len);
1079                 neigh_update_hhs(neigh);
1080                 if (!(new & NUD_CONNECTED))
1081                         neigh->confirmed = jiffies -
1082                                       (neigh->parms->base_reachable_time << 1);
1083                 notify = 1;
1084         }
1085         if (new == old)
1086                 goto out;
1087         if (new & NUD_CONNECTED)
1088                 neigh_connect(neigh);
1089         else
1090                 neigh_suspect(neigh);
1091         if (!(old & NUD_VALID)) {
1092                 struct sk_buff *skb;
1093
1094                 /* Again: avoid dead loop if something went wrong */
1095
1096                 while (neigh->nud_state & NUD_VALID &&
1097                        (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1098                         struct neighbour *n1 = neigh;
1099                         write_unlock_bh(&neigh->lock);
1100                         /* On shaper/eql skb->dst->neighbour != neigh :( */
1101                         if (skb_dst(skb) && skb_dst(skb)->neighbour)
1102                                 n1 = skb_dst(skb)->neighbour;
1103                         n1->output(skb);
1104                         write_lock_bh(&neigh->lock);
1105                 }
1106                 skb_queue_purge(&neigh->arp_queue);
1107         }
1108 out:
1109         if (update_isrouter) {
1110                 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1111                         (neigh->flags | NTF_ROUTER) :
1112                         (neigh->flags & ~NTF_ROUTER);
1113         }
1114         write_unlock_bh(&neigh->lock);
1115
1116         if (notify)
1117                 neigh_update_notify(neigh);
1118
1119         return err;
1120 }
1121 EXPORT_SYMBOL(neigh_update);
1122
1123 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1124                                  u8 *lladdr, void *saddr,
1125                                  struct net_device *dev)
1126 {
1127         struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1128                                                  lladdr || !dev->addr_len);
1129         if (neigh)
1130                 neigh_update(neigh, lladdr, NUD_STALE,
1131                              NEIGH_UPDATE_F_OVERRIDE);
1132         return neigh;
1133 }
1134 EXPORT_SYMBOL(neigh_event_ns);
1135
1136 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1137                           __be16 protocol)
1138 {
1139         struct hh_cache *hh;
1140         struct net_device *dev = dst->dev;
1141
1142         for (hh = n->hh; hh; hh = hh->hh_next)
1143                 if (hh->hh_type == protocol)
1144                         break;
1145
1146         if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1147                 seqlock_init(&hh->hh_lock);
1148                 hh->hh_type = protocol;
1149                 atomic_set(&hh->hh_refcnt, 0);
1150                 hh->hh_next = NULL;
1151
1152                 if (dev->header_ops->cache(n, hh)) {
1153                         kfree(hh);
1154                         hh = NULL;
1155                 } else {
1156                         atomic_inc(&hh->hh_refcnt);
1157                         hh->hh_next = n->hh;
1158                         n->hh       = hh;
1159                         if (n->nud_state & NUD_CONNECTED)
1160                                 hh->hh_output = n->ops->hh_output;
1161                         else
1162                                 hh->hh_output = n->ops->output;
1163                 }
1164         }
1165         if (hh) {
1166                 atomic_inc(&hh->hh_refcnt);
1167                 dst->hh = hh;
1168         }
1169 }
1170
1171 /* This function can be used in contexts, where only old dev_queue_xmit
1172    worked, f.e. if you want to override normal output path (eql, shaper),
1173    but resolution is not made yet.
1174  */
1175
1176 int neigh_compat_output(struct sk_buff *skb)
1177 {
1178         struct net_device *dev = skb->dev;
1179
1180         __skb_pull(skb, skb_network_offset(skb));
1181
1182         if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1183                             skb->len) < 0 &&
1184             dev->header_ops->rebuild(skb))
1185                 return 0;
1186
1187         return dev_queue_xmit(skb);
1188 }
1189 EXPORT_SYMBOL(neigh_compat_output);
1190
1191 /* Slow and careful. */
1192
1193 int neigh_resolve_output(struct sk_buff *skb)
1194 {
1195         struct dst_entry *dst = skb_dst(skb);
1196         struct neighbour *neigh;
1197         int rc = 0;
1198
1199         if (!dst || !(neigh = dst->neighbour))
1200                 goto discard;
1201
1202         __skb_pull(skb, skb_network_offset(skb));
1203
1204         if (!neigh_event_send(neigh, skb)) {
1205                 int err;
1206                 struct net_device *dev = neigh->dev;
1207                 if (dev->header_ops->cache && !dst->hh) {
1208                         write_lock_bh(&neigh->lock);
1209                         if (!dst->hh)
1210                                 neigh_hh_init(neigh, dst, dst->ops->protocol);
1211                         err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1212                                               neigh->ha, NULL, skb->len);
1213                         write_unlock_bh(&neigh->lock);
1214                 } else {
1215                         read_lock_bh(&neigh->lock);
1216                         err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1217                                               neigh->ha, NULL, skb->len);
1218                         read_unlock_bh(&neigh->lock);
1219                 }
1220                 if (err >= 0)
1221                         rc = neigh->ops->queue_xmit(skb);
1222                 else
1223                         goto out_kfree_skb;
1224         }
1225 out:
1226         return rc;
1227 discard:
1228         NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1229                       dst, dst ? dst->neighbour : NULL);
1230 out_kfree_skb:
1231         rc = -EINVAL;
1232         kfree_skb(skb);
1233         goto out;
1234 }
1235 EXPORT_SYMBOL(neigh_resolve_output);
1236
1237 /* As fast as possible without hh cache */
1238
1239 int neigh_connected_output(struct sk_buff *skb)
1240 {
1241         int err;
1242         struct dst_entry *dst = skb_dst(skb);
1243         struct neighbour *neigh = dst->neighbour;
1244         struct net_device *dev = neigh->dev;
1245
1246         __skb_pull(skb, skb_network_offset(skb));
1247
1248         read_lock_bh(&neigh->lock);
1249         err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1250                               neigh->ha, NULL, skb->len);
1251         read_unlock_bh(&neigh->lock);
1252         if (err >= 0)
1253                 err = neigh->ops->queue_xmit(skb);
1254         else {
1255                 err = -EINVAL;
1256                 kfree_skb(skb);
1257         }
1258         return err;
1259 }
1260 EXPORT_SYMBOL(neigh_connected_output);
1261
1262 static void neigh_proxy_process(unsigned long arg)
1263 {
1264         struct neigh_table *tbl = (struct neigh_table *)arg;
1265         long sched_next = 0;
1266         unsigned long now = jiffies;
1267         struct sk_buff *skb, *n;
1268
1269         spin_lock(&tbl->proxy_queue.lock);
1270
1271         skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1272                 long tdif = NEIGH_CB(skb)->sched_next - now;
1273
1274                 if (tdif <= 0) {
1275                         struct net_device *dev = skb->dev;
1276                         __skb_unlink(skb, &tbl->proxy_queue);
1277                         if (tbl->proxy_redo && netif_running(dev))
1278                                 tbl->proxy_redo(skb);
1279                         else
1280                                 kfree_skb(skb);
1281
1282                         dev_put(dev);
1283                 } else if (!sched_next || tdif < sched_next)
1284                         sched_next = tdif;
1285         }
1286         del_timer(&tbl->proxy_timer);
1287         if (sched_next)
1288                 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1289         spin_unlock(&tbl->proxy_queue.lock);
1290 }
1291
1292 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1293                     struct sk_buff *skb)
1294 {
1295         unsigned long now = jiffies;
1296         unsigned long sched_next = now + (net_random() % p->proxy_delay);
1297
1298         if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1299                 kfree_skb(skb);
1300                 return;
1301         }
1302
1303         NEIGH_CB(skb)->sched_next = sched_next;
1304         NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1305
1306         spin_lock(&tbl->proxy_queue.lock);
1307         if (del_timer(&tbl->proxy_timer)) {
1308                 if (time_before(tbl->proxy_timer.expires, sched_next))
1309                         sched_next = tbl->proxy_timer.expires;
1310         }
1311         skb_dst_drop(skb);
1312         dev_hold(skb->dev);
1313         __skb_queue_tail(&tbl->proxy_queue, skb);
1314         mod_timer(&tbl->proxy_timer, sched_next);
1315         spin_unlock(&tbl->proxy_queue.lock);
1316 }
1317 EXPORT_SYMBOL(pneigh_enqueue);
1318
1319 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1320                                                       struct net *net, int ifindex)
1321 {
1322         struct neigh_parms *p;
1323
1324         for (p = &tbl->parms; p; p = p->next) {
1325                 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1326                     (!p->dev && !ifindex))
1327                         return p;
1328         }
1329
1330         return NULL;
1331 }
1332
1333 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1334                                       struct neigh_table *tbl)
1335 {
1336         struct neigh_parms *p, *ref;
1337         struct net *net = dev_net(dev);
1338         const struct net_device_ops *ops = dev->netdev_ops;
1339
1340         ref = lookup_neigh_params(tbl, net, 0);
1341         if (!ref)
1342                 return NULL;
1343
1344         p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
1345         if (p) {
1346                 p->tbl            = tbl;
1347                 atomic_set(&p->refcnt, 1);
1348                 p->reachable_time =
1349                                 neigh_rand_reach_time(p->base_reachable_time);
1350
1351                 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1352                         kfree(p);
1353                         return NULL;
1354                 }
1355
1356                 dev_hold(dev);
1357                 p->dev = dev;
1358                 write_pnet(&p->net, hold_net(net));
1359                 p->sysctl_table = NULL;
1360                 write_lock_bh(&tbl->lock);
1361                 p->next         = tbl->parms.next;
1362                 tbl->parms.next = p;
1363                 write_unlock_bh(&tbl->lock);
1364         }
1365         return p;
1366 }
1367 EXPORT_SYMBOL(neigh_parms_alloc);
1368
1369 static void neigh_rcu_free_parms(struct rcu_head *head)
1370 {
1371         struct neigh_parms *parms =
1372                 container_of(head, struct neigh_parms, rcu_head);
1373
1374         neigh_parms_put(parms);
1375 }
1376
1377 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1378 {
1379         struct neigh_parms **p;
1380
1381         if (!parms || parms == &tbl->parms)
1382                 return;
1383         write_lock_bh(&tbl->lock);
1384         for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1385                 if (*p == parms) {
1386                         *p = parms->next;
1387                         parms->dead = 1;
1388                         write_unlock_bh(&tbl->lock);
1389                         if (parms->dev)
1390                                 dev_put(parms->dev);
1391                         call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1392                         return;
1393                 }
1394         }
1395         write_unlock_bh(&tbl->lock);
1396         NEIGH_PRINTK1("neigh_parms_release: not found\n");
1397 }
1398 EXPORT_SYMBOL(neigh_parms_release);
1399
1400 static void neigh_parms_destroy(struct neigh_parms *parms)
1401 {
1402         release_net(neigh_parms_net(parms));
1403         kfree(parms);
1404 }
1405
1406 static struct lock_class_key neigh_table_proxy_queue_class;
1407
1408 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1409 {
1410         unsigned long now = jiffies;
1411         unsigned long phsize;
1412
1413         write_pnet(&tbl->parms.net, &init_net);
1414         atomic_set(&tbl->parms.refcnt, 1);
1415         tbl->parms.reachable_time =
1416                           neigh_rand_reach_time(tbl->parms.base_reachable_time);
1417
1418         if (!tbl->kmem_cachep)
1419                 tbl->kmem_cachep =
1420                         kmem_cache_create(tbl->id, tbl->entry_size, 0,
1421                                           SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1422                                           NULL);
1423         tbl->stats = alloc_percpu(struct neigh_statistics);
1424         if (!tbl->stats)
1425                 panic("cannot create neighbour cache statistics");
1426
1427 #ifdef CONFIG_PROC_FS
1428         if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1429                               &neigh_stat_seq_fops, tbl))
1430                 panic("cannot create neighbour proc dir entry");
1431 #endif
1432
1433         tbl->hash_mask = 1;
1434         tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1435
1436         phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1437         tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1438
1439         if (!tbl->hash_buckets || !tbl->phash_buckets)
1440                 panic("cannot allocate neighbour cache hashes");
1441
1442         get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1443
1444         rwlock_init(&tbl->lock);
1445         setup_timer(&tbl->gc_timer, neigh_periodic_timer, (unsigned long)tbl);
1446         tbl->gc_timer.expires  = now + 1;
1447         add_timer(&tbl->gc_timer);
1448
1449         setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1450         skb_queue_head_init_class(&tbl->proxy_queue,
1451                         &neigh_table_proxy_queue_class);
1452
1453         tbl->last_flush = now;
1454         tbl->last_rand  = now + tbl->parms.reachable_time * 20;
1455 }
1456 EXPORT_SYMBOL(neigh_table_init_no_netlink);
1457
1458 void neigh_table_init(struct neigh_table *tbl)
1459 {
1460         struct neigh_table *tmp;
1461
1462         neigh_table_init_no_netlink(tbl);
1463         write_lock(&neigh_tbl_lock);
1464         for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1465                 if (tmp->family == tbl->family)
1466                         break;
1467         }
1468         tbl->next       = neigh_tables;
1469         neigh_tables    = tbl;
1470         write_unlock(&neigh_tbl_lock);
1471
1472         if (unlikely(tmp)) {
1473                 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1474                        "family %d\n", tbl->family);
1475                 dump_stack();
1476         }
1477 }
1478 EXPORT_SYMBOL(neigh_table_init);
1479
1480 int neigh_table_clear(struct neigh_table *tbl)
1481 {
1482         struct neigh_table **tp;
1483
1484         /* It is not clean... Fix it to unload IPv6 module safely */
1485         del_timer_sync(&tbl->gc_timer);
1486         del_timer_sync(&tbl->proxy_timer);
1487         pneigh_queue_purge(&tbl->proxy_queue);
1488         neigh_ifdown(tbl, NULL);
1489         if (atomic_read(&tbl->entries))
1490                 printk(KERN_CRIT "neighbour leakage\n");
1491         write_lock(&neigh_tbl_lock);
1492         for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1493                 if (*tp == tbl) {
1494                         *tp = tbl->next;
1495                         break;
1496                 }
1497         }
1498         write_unlock(&neigh_tbl_lock);
1499
1500         neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1501         tbl->hash_buckets = NULL;
1502
1503         kfree(tbl->phash_buckets);
1504         tbl->phash_buckets = NULL;
1505
1506         remove_proc_entry(tbl->id, init_net.proc_net_stat);
1507
1508         free_percpu(tbl->stats);
1509         tbl->stats = NULL;
1510
1511         kmem_cache_destroy(tbl->kmem_cachep);
1512         tbl->kmem_cachep = NULL;
1513
1514         return 0;
1515 }
1516 EXPORT_SYMBOL(neigh_table_clear);
1517
1518 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1519 {
1520         struct net *net = sock_net(skb->sk);
1521         struct ndmsg *ndm;
1522         struct nlattr *dst_attr;
1523         struct neigh_table *tbl;
1524         struct net_device *dev = NULL;
1525         int err = -EINVAL;
1526
1527         if (nlmsg_len(nlh) < sizeof(*ndm))
1528                 goto out;
1529
1530         dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1531         if (dst_attr == NULL)
1532                 goto out;
1533
1534         ndm = nlmsg_data(nlh);
1535         if (ndm->ndm_ifindex) {
1536                 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1537                 if (dev == NULL) {
1538                         err = -ENODEV;
1539                         goto out;
1540                 }
1541         }
1542
1543         read_lock(&neigh_tbl_lock);
1544         for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1545                 struct neighbour *neigh;
1546
1547                 if (tbl->family != ndm->ndm_family)
1548                         continue;
1549                 read_unlock(&neigh_tbl_lock);
1550
1551                 if (nla_len(dst_attr) < tbl->key_len)
1552                         goto out_dev_put;
1553
1554                 if (ndm->ndm_flags & NTF_PROXY) {
1555                         err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1556                         goto out_dev_put;
1557                 }
1558
1559                 if (dev == NULL)
1560                         goto out_dev_put;
1561
1562                 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1563                 if (neigh == NULL) {
1564                         err = -ENOENT;
1565                         goto out_dev_put;
1566                 }
1567
1568                 err = neigh_update(neigh, NULL, NUD_FAILED,
1569                                    NEIGH_UPDATE_F_OVERRIDE |
1570                                    NEIGH_UPDATE_F_ADMIN);
1571                 neigh_release(neigh);
1572                 goto out_dev_put;
1573         }
1574         read_unlock(&neigh_tbl_lock);
1575         err = -EAFNOSUPPORT;
1576
1577 out_dev_put:
1578         if (dev)
1579                 dev_put(dev);
1580 out:
1581         return err;
1582 }
1583
1584 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1585 {
1586         struct net *net = sock_net(skb->sk);
1587         struct ndmsg *ndm;
1588         struct nlattr *tb[NDA_MAX+1];
1589         struct neigh_table *tbl;
1590         struct net_device *dev = NULL;
1591         int err;
1592
1593         err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1594         if (err < 0)
1595                 goto out;
1596
1597         err = -EINVAL;
1598         if (tb[NDA_DST] == NULL)
1599                 goto out;
1600
1601         ndm = nlmsg_data(nlh);
1602         if (ndm->ndm_ifindex) {
1603                 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1604                 if (dev == NULL) {
1605                         err = -ENODEV;
1606                         goto out;
1607                 }
1608
1609                 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1610                         goto out_dev_put;
1611         }
1612
1613         read_lock(&neigh_tbl_lock);
1614         for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1615                 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1616                 struct neighbour *neigh;
1617                 void *dst, *lladdr;
1618
1619                 if (tbl->family != ndm->ndm_family)
1620                         continue;
1621                 read_unlock(&neigh_tbl_lock);
1622
1623                 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1624                         goto out_dev_put;
1625                 dst = nla_data(tb[NDA_DST]);
1626                 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1627
1628                 if (ndm->ndm_flags & NTF_PROXY) {
1629                         struct pneigh_entry *pn;
1630
1631                         err = -ENOBUFS;
1632                         pn = pneigh_lookup(tbl, net, dst, dev, 1);
1633                         if (pn) {
1634                                 pn->flags = ndm->ndm_flags;
1635                                 err = 0;
1636                         }
1637                         goto out_dev_put;
1638                 }
1639
1640                 if (dev == NULL)
1641                         goto out_dev_put;
1642
1643                 neigh = neigh_lookup(tbl, dst, dev);
1644                 if (neigh == NULL) {
1645                         if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1646                                 err = -ENOENT;
1647                                 goto out_dev_put;
1648                         }
1649
1650                         neigh = __neigh_lookup_errno(tbl, dst, dev);
1651                         if (IS_ERR(neigh)) {
1652                                 err = PTR_ERR(neigh);
1653                                 goto out_dev_put;
1654                         }
1655                 } else {
1656                         if (nlh->nlmsg_flags & NLM_F_EXCL) {
1657                                 err = -EEXIST;
1658                                 neigh_release(neigh);
1659                                 goto out_dev_put;
1660                         }
1661
1662                         if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1663                                 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1664                 }
1665
1666                 if (ndm->ndm_flags & NTF_USE) {
1667                         neigh_event_send(neigh, NULL);
1668                         err = 0;
1669                 } else
1670                         err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1671                 neigh_release(neigh);
1672                 goto out_dev_put;
1673         }
1674
1675         read_unlock(&neigh_tbl_lock);
1676         err = -EAFNOSUPPORT;
1677
1678 out_dev_put:
1679         if (dev)
1680                 dev_put(dev);
1681 out:
1682         return err;
1683 }
1684
1685 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1686 {
1687         struct nlattr *nest;
1688
1689         nest = nla_nest_start(skb, NDTA_PARMS);
1690         if (nest == NULL)
1691                 return -ENOBUFS;
1692
1693         if (parms->dev)
1694                 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1695
1696         NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1697         NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1698         NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1699         NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1700         NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1701         NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1702         NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1703         NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1704                       parms->base_reachable_time);
1705         NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1706         NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1707         NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1708         NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1709         NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1710         NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1711
1712         return nla_nest_end(skb, nest);
1713
1714 nla_put_failure:
1715         nla_nest_cancel(skb, nest);
1716         return -EMSGSIZE;
1717 }
1718
1719 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1720                               u32 pid, u32 seq, int type, int flags)
1721 {
1722         struct nlmsghdr *nlh;
1723         struct ndtmsg *ndtmsg;
1724
1725         nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1726         if (nlh == NULL)
1727                 return -EMSGSIZE;
1728
1729         ndtmsg = nlmsg_data(nlh);
1730
1731         read_lock_bh(&tbl->lock);
1732         ndtmsg->ndtm_family = tbl->family;
1733         ndtmsg->ndtm_pad1   = 0;
1734         ndtmsg->ndtm_pad2   = 0;
1735
1736         NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1737         NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1738         NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1739         NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1740         NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1741
1742         {
1743                 unsigned long now = jiffies;
1744                 unsigned int flush_delta = now - tbl->last_flush;
1745                 unsigned int rand_delta = now - tbl->last_rand;
1746
1747                 struct ndt_config ndc = {
1748                         .ndtc_key_len           = tbl->key_len,
1749                         .ndtc_entry_size        = tbl->entry_size,
1750                         .ndtc_entries           = atomic_read(&tbl->entries),
1751                         .ndtc_last_flush        = jiffies_to_msecs(flush_delta),
1752                         .ndtc_last_rand         = jiffies_to_msecs(rand_delta),
1753                         .ndtc_hash_rnd          = tbl->hash_rnd,
1754                         .ndtc_hash_mask         = tbl->hash_mask,
1755                         .ndtc_hash_chain_gc     = tbl->hash_chain_gc,
1756                         .ndtc_proxy_qlen        = tbl->proxy_queue.qlen,
1757                 };
1758
1759                 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1760         }
1761
1762         {
1763                 int cpu;
1764                 struct ndt_stats ndst;
1765
1766                 memset(&ndst, 0, sizeof(ndst));
1767
1768                 for_each_possible_cpu(cpu) {
1769                         struct neigh_statistics *st;
1770
1771                         st = per_cpu_ptr(tbl->stats, cpu);
1772                         ndst.ndts_allocs                += st->allocs;
1773                         ndst.ndts_destroys              += st->destroys;
1774                         ndst.ndts_hash_grows            += st->hash_grows;
1775                         ndst.ndts_res_failed            += st->res_failed;
1776                         ndst.ndts_lookups               += st->lookups;
1777                         ndst.ndts_hits                  += st->hits;
1778                         ndst.ndts_rcv_probes_mcast      += st->rcv_probes_mcast;
1779                         ndst.ndts_rcv_probes_ucast      += st->rcv_probes_ucast;
1780                         ndst.ndts_periodic_gc_runs      += st->periodic_gc_runs;
1781                         ndst.ndts_forced_gc_runs        += st->forced_gc_runs;
1782                 }
1783
1784                 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1785         }
1786
1787         BUG_ON(tbl->parms.dev);
1788         if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1789                 goto nla_put_failure;
1790
1791         read_unlock_bh(&tbl->lock);
1792         return nlmsg_end(skb, nlh);
1793
1794 nla_put_failure:
1795         read_unlock_bh(&tbl->lock);
1796         nlmsg_cancel(skb, nlh);
1797         return -EMSGSIZE;
1798 }
1799
1800 static int neightbl_fill_param_info(struct sk_buff *skb,
1801                                     struct neigh_table *tbl,
1802                                     struct neigh_parms *parms,
1803                                     u32 pid, u32 seq, int type,
1804                                     unsigned int flags)
1805 {
1806         struct ndtmsg *ndtmsg;
1807         struct nlmsghdr *nlh;
1808
1809         nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1810         if (nlh == NULL)
1811                 return -EMSGSIZE;
1812
1813         ndtmsg = nlmsg_data(nlh);
1814
1815         read_lock_bh(&tbl->lock);
1816         ndtmsg->ndtm_family = tbl->family;
1817         ndtmsg->ndtm_pad1   = 0;
1818         ndtmsg->ndtm_pad2   = 0;
1819
1820         if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1821             neightbl_fill_parms(skb, parms) < 0)
1822                 goto errout;
1823
1824         read_unlock_bh(&tbl->lock);
1825         return nlmsg_end(skb, nlh);
1826 errout:
1827         read_unlock_bh(&tbl->lock);
1828         nlmsg_cancel(skb, nlh);
1829         return -EMSGSIZE;
1830 }
1831
1832 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1833         [NDTA_NAME]             = { .type = NLA_STRING },
1834         [NDTA_THRESH1]          = { .type = NLA_U32 },
1835         [NDTA_THRESH2]          = { .type = NLA_U32 },
1836         [NDTA_THRESH3]          = { .type = NLA_U32 },
1837         [NDTA_GC_INTERVAL]      = { .type = NLA_U64 },
1838         [NDTA_PARMS]            = { .type = NLA_NESTED },
1839 };
1840
1841 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1842         [NDTPA_IFINDEX]                 = { .type = NLA_U32 },
1843         [NDTPA_QUEUE_LEN]               = { .type = NLA_U32 },
1844         [NDTPA_PROXY_QLEN]              = { .type = NLA_U32 },
1845         [NDTPA_APP_PROBES]              = { .type = NLA_U32 },
1846         [NDTPA_UCAST_PROBES]            = { .type = NLA_U32 },
1847         [NDTPA_MCAST_PROBES]            = { .type = NLA_U32 },
1848         [NDTPA_BASE_REACHABLE_TIME]     = { .type = NLA_U64 },
1849         [NDTPA_GC_STALETIME]            = { .type = NLA_U64 },
1850         [NDTPA_DELAY_PROBE_TIME]        = { .type = NLA_U64 },
1851         [NDTPA_RETRANS_TIME]            = { .type = NLA_U64 },
1852         [NDTPA_ANYCAST_DELAY]           = { .type = NLA_U64 },
1853         [NDTPA_PROXY_DELAY]             = { .type = NLA_U64 },
1854         [NDTPA_LOCKTIME]                = { .type = NLA_U64 },
1855 };
1856
1857 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1858 {
1859         struct net *net = sock_net(skb->sk);
1860         struct neigh_table *tbl;
1861         struct ndtmsg *ndtmsg;
1862         struct nlattr *tb[NDTA_MAX+1];
1863         int err;
1864
1865         err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1866                           nl_neightbl_policy);
1867         if (err < 0)
1868                 goto errout;
1869
1870         if (tb[NDTA_NAME] == NULL) {
1871                 err = -EINVAL;
1872                 goto errout;
1873         }
1874
1875         ndtmsg = nlmsg_data(nlh);
1876         read_lock(&neigh_tbl_lock);
1877         for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1878                 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1879                         continue;
1880
1881                 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1882                         break;
1883         }
1884
1885         if (tbl == NULL) {
1886                 err = -ENOENT;
1887                 goto errout_locked;
1888         }
1889
1890         /*
1891          * We acquire tbl->lock to be nice to the periodic timers and
1892          * make sure they always see a consistent set of values.
1893          */
1894         write_lock_bh(&tbl->lock);
1895
1896         if (tb[NDTA_PARMS]) {
1897                 struct nlattr *tbp[NDTPA_MAX+1];
1898                 struct neigh_parms *p;
1899                 int i, ifindex = 0;
1900
1901                 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1902                                        nl_ntbl_parm_policy);
1903                 if (err < 0)
1904                         goto errout_tbl_lock;
1905
1906                 if (tbp[NDTPA_IFINDEX])
1907                         ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1908
1909                 p = lookup_neigh_params(tbl, net, ifindex);
1910                 if (p == NULL) {
1911                         err = -ENOENT;
1912                         goto errout_tbl_lock;
1913                 }
1914
1915                 for (i = 1; i <= NDTPA_MAX; i++) {
1916                         if (tbp[i] == NULL)
1917                                 continue;
1918
1919                         switch (i) {
1920                         case NDTPA_QUEUE_LEN:
1921                                 p->queue_len = nla_get_u32(tbp[i]);
1922                                 break;
1923                         case NDTPA_PROXY_QLEN:
1924                                 p->proxy_qlen = nla_get_u32(tbp[i]);
1925                                 break;
1926                         case NDTPA_APP_PROBES:
1927                                 p->app_probes = nla_get_u32(tbp[i]);
1928                                 break;
1929                         case NDTPA_UCAST_PROBES:
1930                                 p->ucast_probes = nla_get_u32(tbp[i]);
1931                                 break;
1932                         case NDTPA_MCAST_PROBES:
1933                                 p->mcast_probes = nla_get_u32(tbp[i]);
1934                                 break;
1935                         case NDTPA_BASE_REACHABLE_TIME:
1936                                 p->base_reachable_time = nla_get_msecs(tbp[i]);
1937                                 break;
1938                         case NDTPA_GC_STALETIME:
1939                                 p->gc_staletime = nla_get_msecs(tbp[i]);
1940                                 break;
1941                         case NDTPA_DELAY_PROBE_TIME:
1942                                 p->delay_probe_time = nla_get_msecs(tbp[i]);
1943                                 break;
1944                         case NDTPA_RETRANS_TIME:
1945                                 p->retrans_time = nla_get_msecs(tbp[i]);
1946                                 break;
1947                         case NDTPA_ANYCAST_DELAY:
1948                                 p->anycast_delay = nla_get_msecs(tbp[i]);
1949                                 break;
1950                         case NDTPA_PROXY_DELAY:
1951                                 p->proxy_delay = nla_get_msecs(tbp[i]);
1952                                 break;
1953                         case NDTPA_LOCKTIME:
1954                                 p->locktime = nla_get_msecs(tbp[i]);
1955                                 break;
1956                         }
1957                 }
1958         }
1959
1960         if (tb[NDTA_THRESH1])
1961                 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
1962
1963         if (tb[NDTA_THRESH2])
1964                 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
1965
1966         if (tb[NDTA_THRESH3])
1967                 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
1968
1969         if (tb[NDTA_GC_INTERVAL])
1970                 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
1971
1972         err = 0;
1973
1974 errout_tbl_lock:
1975         write_unlock_bh(&tbl->lock);
1976 errout_locked:
1977         read_unlock(&neigh_tbl_lock);
1978 errout:
1979         return err;
1980 }
1981
1982 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1983 {
1984         struct net *net = sock_net(skb->sk);
1985         int family, tidx, nidx = 0;
1986         int tbl_skip = cb->args[0];
1987         int neigh_skip = cb->args[1];
1988         struct neigh_table *tbl;
1989
1990         family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
1991
1992         read_lock(&neigh_tbl_lock);
1993         for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
1994                 struct neigh_parms *p;
1995
1996                 if (tidx < tbl_skip || (family && tbl->family != family))
1997                         continue;
1998
1999                 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
2000                                        cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2001                                        NLM_F_MULTI) <= 0)
2002                         break;
2003
2004                 for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
2005                         if (!net_eq(neigh_parms_net(p), net))
2006                                 continue;
2007
2008                         if (nidx < neigh_skip)
2009                                 goto next;
2010
2011                         if (neightbl_fill_param_info(skb, tbl, p,
2012                                                      NETLINK_CB(cb->skb).pid,
2013                                                      cb->nlh->nlmsg_seq,
2014                                                      RTM_NEWNEIGHTBL,
2015                                                      NLM_F_MULTI) <= 0)
2016                                 goto out;
2017                 next:
2018                         nidx++;
2019                 }
2020
2021                 neigh_skip = 0;
2022         }
2023 out:
2024         read_unlock(&neigh_tbl_lock);
2025         cb->args[0] = tidx;
2026         cb->args[1] = nidx;
2027
2028         return skb->len;
2029 }
2030
2031 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2032                            u32 pid, u32 seq, int type, unsigned int flags)
2033 {
2034         unsigned long now = jiffies;
2035         struct nda_cacheinfo ci;
2036         struct nlmsghdr *nlh;
2037         struct ndmsg *ndm;
2038
2039         nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2040         if (nlh == NULL)
2041                 return -EMSGSIZE;
2042
2043         ndm = nlmsg_data(nlh);
2044         ndm->ndm_family  = neigh->ops->family;
2045         ndm->ndm_pad1    = 0;
2046         ndm->ndm_pad2    = 0;
2047         ndm->ndm_flags   = neigh->flags;
2048         ndm->ndm_type    = neigh->type;
2049         ndm->ndm_ifindex = neigh->dev->ifindex;
2050
2051         NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
2052
2053         read_lock_bh(&neigh->lock);
2054         ndm->ndm_state   = neigh->nud_state;
2055         if ((neigh->nud_state & NUD_VALID) &&
2056             nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
2057                 read_unlock_bh(&neigh->lock);
2058                 goto nla_put_failure;
2059         }
2060
2061         ci.ndm_used      = jiffies_to_clock_t(now - neigh->used);
2062         ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2063         ci.ndm_updated   = jiffies_to_clock_t(now - neigh->updated);
2064         ci.ndm_refcnt    = atomic_read(&neigh->refcnt) - 1;
2065         read_unlock_bh(&neigh->lock);
2066
2067         NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
2068         NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
2069
2070         return nlmsg_end(skb, nlh);
2071
2072 nla_put_failure:
2073         nlmsg_cancel(skb, nlh);
2074         return -EMSGSIZE;
2075 }
2076
2077 static void neigh_update_notify(struct neighbour *neigh)
2078 {
2079         call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2080         __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2081 }
2082
2083 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2084                             struct netlink_callback *cb)
2085 {
2086         struct net * net = sock_net(skb->sk);
2087         struct neighbour *n;
2088         int rc, h, s_h = cb->args[1];
2089         int idx, s_idx = idx = cb->args[2];
2090
2091         read_lock_bh(&tbl->lock);
2092         for (h = 0; h <= tbl->hash_mask; h++) {
2093                 if (h < s_h)
2094                         continue;
2095                 if (h > s_h)
2096                         s_idx = 0;
2097                 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
2098                         if (dev_net(n->dev) != net)
2099                                 continue;
2100                         if (idx < s_idx)
2101                                 goto next;
2102                         if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2103                                             cb->nlh->nlmsg_seq,
2104                                             RTM_NEWNEIGH,
2105                                             NLM_F_MULTI) <= 0) {
2106                                 read_unlock_bh(&tbl->lock);
2107                                 rc = -1;
2108                                 goto out;
2109                         }
2110                 next:
2111                         idx++;
2112                 }
2113         }
2114         read_unlock_bh(&tbl->lock);
2115         rc = skb->len;
2116 out:
2117         cb->args[1] = h;
2118         cb->args[2] = idx;
2119         return rc;
2120 }
2121
2122 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2123 {
2124         struct neigh_table *tbl;
2125         int t, family, s_t;
2126
2127         read_lock(&neigh_tbl_lock);
2128         family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2129         s_t = cb->args[0];
2130
2131         for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2132                 if (t < s_t || (family && tbl->family != family))
2133                         continue;
2134                 if (t > s_t)
2135                         memset(&cb->args[1], 0, sizeof(cb->args) -
2136                                                 sizeof(cb->args[0]));
2137                 if (neigh_dump_table(tbl, skb, cb) < 0)
2138                         break;
2139         }
2140         read_unlock(&neigh_tbl_lock);
2141
2142         cb->args[0] = t;
2143         return skb->len;
2144 }
2145
2146 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2147 {
2148         int chain;
2149
2150         read_lock_bh(&tbl->lock);
2151         for (chain = 0; chain <= tbl->hash_mask; chain++) {
2152                 struct neighbour *n;
2153
2154                 for (n = tbl->hash_buckets[chain]; n; n = n->next)
2155                         cb(n, cookie);
2156         }
2157         read_unlock_bh(&tbl->lock);
2158 }
2159 EXPORT_SYMBOL(neigh_for_each);
2160
2161 /* The tbl->lock must be held as a writer and BH disabled. */
2162 void __neigh_for_each_release(struct neigh_table *tbl,
2163                               int (*cb)(struct neighbour *))
2164 {
2165         int chain;
2166
2167         for (chain = 0; chain <= tbl->hash_mask; chain++) {
2168                 struct neighbour *n, **np;
2169
2170                 np = &tbl->hash_buckets[chain];
2171                 while ((n = *np) != NULL) {
2172                         int release;
2173
2174                         write_lock(&n->lock);
2175                         release = cb(n);
2176                         if (release) {
2177                                 *np = n->next;
2178                                 n->dead = 1;
2179                         } else
2180                                 np = &n->next;
2181                         write_unlock(&n->lock);
2182                         if (release)
2183                                 neigh_cleanup_and_release(n);
2184                 }
2185         }
2186 }
2187 EXPORT_SYMBOL(__neigh_for_each_release);
2188
2189 #ifdef CONFIG_PROC_FS
2190
2191 static struct neighbour *neigh_get_first(struct seq_file *seq)
2192 {
2193         struct neigh_seq_state *state = seq->private;
2194         struct net *net = seq_file_net(seq);
2195         struct neigh_table *tbl = state->tbl;
2196         struct neighbour *n = NULL;
2197         int bucket = state->bucket;
2198
2199         state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2200         for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2201                 n = tbl->hash_buckets[bucket];
2202
2203                 while (n) {
2204                         if (!net_eq(dev_net(n->dev), net))
2205                                 goto next;
2206                         if (state->neigh_sub_iter) {
2207                                 loff_t fakep = 0;
2208                                 void *v;
2209
2210                                 v = state->neigh_sub_iter(state, n, &fakep);
2211                                 if (!v)
2212                                         goto next;
2213                         }
2214                         if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2215                                 break;
2216                         if (n->nud_state & ~NUD_NOARP)
2217                                 break;
2218                 next:
2219                         n = n->next;
2220                 }
2221
2222                 if (n)
2223                         break;
2224         }
2225         state->bucket = bucket;
2226
2227         return n;
2228 }
2229
2230 static struct neighbour *neigh_get_next(struct seq_file *seq,
2231                                         struct neighbour *n,
2232                                         loff_t *pos)
2233 {
2234         struct neigh_seq_state *state = seq->private;
2235         struct net *net = seq_file_net(seq);
2236         struct neigh_table *tbl = state->tbl;
2237
2238         if (state->neigh_sub_iter) {
2239                 void *v = state->neigh_sub_iter(state, n, pos);
2240                 if (v)
2241                         return n;
2242         }
2243         n = n->next;
2244
2245         while (1) {
2246                 while (n) {
2247                         if (!net_eq(dev_net(n->dev), net))
2248                                 goto next;
2249                         if (state->neigh_sub_iter) {
2250                                 void *v = state->neigh_sub_iter(state, n, pos);
2251                                 if (v)
2252                                         return n;
2253                                 goto next;
2254                         }
2255                         if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2256                                 break;
2257
2258                         if (n->nud_state & ~NUD_NOARP)
2259                                 break;
2260                 next:
2261                         n = n->next;
2262                 }
2263
2264                 if (n)
2265                         break;
2266
2267                 if (++state->bucket > tbl->hash_mask)
2268                         break;
2269
2270                 n = tbl->hash_buckets[state->bucket];
2271         }
2272
2273         if (n && pos)
2274                 --(*pos);
2275         return n;
2276 }
2277
2278 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2279 {
2280         struct neighbour *n = neigh_get_first(seq);
2281
2282         if (n) {
2283                 --(*pos);
2284                 while (*pos) {
2285                         n = neigh_get_next(seq, n, pos);
2286                         if (!n)
2287                                 break;
2288                 }
2289         }
2290         return *pos ? NULL : n;
2291 }
2292
2293 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2294 {
2295         struct neigh_seq_state *state = seq->private;
2296         struct net *net = seq_file_net(seq);
2297         struct neigh_table *tbl = state->tbl;
2298         struct pneigh_entry *pn = NULL;
2299         int bucket = state->bucket;
2300
2301         state->flags |= NEIGH_SEQ_IS_PNEIGH;
2302         for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2303                 pn = tbl->phash_buckets[bucket];
2304                 while (pn && !net_eq(pneigh_net(pn), net))
2305                         pn = pn->next;
2306                 if (pn)
2307                         break;
2308         }
2309         state->bucket = bucket;
2310
2311         return pn;
2312 }
2313
2314 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2315                                             struct pneigh_entry *pn,
2316                                             loff_t *pos)
2317 {
2318         struct neigh_seq_state *state = seq->private;
2319         struct net *net = seq_file_net(seq);
2320         struct neigh_table *tbl = state->tbl;
2321
2322         pn = pn->next;
2323         while (!pn) {
2324                 if (++state->bucket > PNEIGH_HASHMASK)
2325                         break;
2326                 pn = tbl->phash_buckets[state->bucket];
2327                 while (pn && !net_eq(pneigh_net(pn), net))
2328                         pn = pn->next;
2329                 if (pn)
2330                         break;
2331         }
2332
2333         if (pn && pos)
2334                 --(*pos);
2335
2336         return pn;
2337 }
2338
2339 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2340 {
2341         struct pneigh_entry *pn = pneigh_get_first(seq);
2342
2343         if (pn) {
2344                 --(*pos);
2345                 while (*pos) {
2346                         pn = pneigh_get_next(seq, pn, pos);
2347                         if (!pn)
2348                                 break;
2349                 }
2350         }
2351         return *pos ? NULL : pn;
2352 }
2353
2354 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2355 {
2356         struct neigh_seq_state *state = seq->private;
2357         void *rc;
2358         loff_t idxpos = *pos;
2359
2360         rc = neigh_get_idx(seq, &idxpos);
2361         if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2362                 rc = pneigh_get_idx(seq, &idxpos);
2363
2364         return rc;
2365 }
2366
2367 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2368         __acquires(tbl->lock)
2369 {
2370         struct neigh_seq_state *state = seq->private;
2371
2372         state->tbl = tbl;
2373         state->bucket = 0;
2374         state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2375
2376         read_lock_bh(&tbl->lock);
2377
2378         return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2379 }
2380 EXPORT_SYMBOL(neigh_seq_start);
2381
2382 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2383 {
2384         struct neigh_seq_state *state;
2385         void *rc;
2386
2387         if (v == SEQ_START_TOKEN) {
2388                 rc = neigh_get_first(seq);
2389                 goto out;
2390         }
2391
2392         state = seq->private;
2393         if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2394                 rc = neigh_get_next(seq, v, NULL);
2395                 if (rc)
2396                         goto out;
2397                 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2398                         rc = pneigh_get_first(seq);
2399         } else {
2400                 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2401                 rc = pneigh_get_next(seq, v, NULL);
2402         }
2403 out:
2404         ++(*pos);
2405         return rc;
2406 }
2407 EXPORT_SYMBOL(neigh_seq_next);
2408
2409 void neigh_seq_stop(struct seq_file *seq, void *v)
2410         __releases(tbl->lock)
2411 {
2412         struct neigh_seq_state *state = seq->private;
2413         struct neigh_table *tbl = state->tbl;
2414
2415         read_unlock_bh(&tbl->lock);
2416 }
2417 EXPORT_SYMBOL(neigh_seq_stop);
2418
2419 /* statistics via seq_file */
2420
2421 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2422 {
2423         struct proc_dir_entry *pde = seq->private;
2424         struct neigh_table *tbl = pde->data;
2425         int cpu;
2426
2427         if (*pos == 0)
2428                 return SEQ_START_TOKEN;
2429
2430         for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2431                 if (!cpu_possible(cpu))
2432                         continue;
2433                 *pos = cpu+1;
2434                 return per_cpu_ptr(tbl->stats, cpu);
2435         }
2436         return NULL;
2437 }
2438
2439 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2440 {
2441         struct proc_dir_entry *pde = seq->private;
2442         struct neigh_table *tbl = pde->data;
2443         int cpu;
2444
2445         for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2446                 if (!cpu_possible(cpu))
2447                         continue;
2448                 *pos = cpu+1;
2449                 return per_cpu_ptr(tbl->stats, cpu);
2450         }
2451         return NULL;
2452 }
2453
2454 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2455 {
2456
2457 }
2458
2459 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2460 {
2461         struct proc_dir_entry *pde = seq->private;
2462         struct neigh_table *tbl = pde->data;
2463         struct neigh_statistics *st = v;
2464
2465         if (v == SEQ_START_TOKEN) {
2466                 seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards\n");
2467                 return 0;
2468         }
2469
2470         seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2471                         "%08lx %08lx  %08lx %08lx %08lx\n",
2472                    atomic_read(&tbl->entries),
2473
2474                    st->allocs,
2475                    st->destroys,
2476                    st->hash_grows,
2477
2478                    st->lookups,
2479                    st->hits,
2480
2481                    st->res_failed,
2482
2483                    st->rcv_probes_mcast,
2484                    st->rcv_probes_ucast,
2485
2486                    st->periodic_gc_runs,
2487                    st->forced_gc_runs,
2488                    st->unres_discards
2489                    );
2490
2491         return 0;
2492 }
2493
2494 static const struct seq_operations neigh_stat_seq_ops = {
2495         .start  = neigh_stat_seq_start,
2496         .next   = neigh_stat_seq_next,
2497         .stop   = neigh_stat_seq_stop,
2498         .show   = neigh_stat_seq_show,
2499 };
2500
2501 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2502 {
2503         int ret = seq_open(file, &neigh_stat_seq_ops);
2504
2505         if (!ret) {
2506                 struct seq_file *sf = file->private_data;
2507                 sf->private = PDE(inode);
2508         }
2509         return ret;
2510 };
2511
2512 static const struct file_operations neigh_stat_seq_fops = {
2513         .owner   = THIS_MODULE,
2514         .open    = neigh_stat_seq_open,
2515         .read    = seq_read,
2516         .llseek  = seq_lseek,
2517         .release = seq_release,
2518 };
2519
2520 #endif /* CONFIG_PROC_FS */
2521
2522 static inline size_t neigh_nlmsg_size(void)
2523 {
2524         return NLMSG_ALIGN(sizeof(struct ndmsg))
2525                + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2526                + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2527                + nla_total_size(sizeof(struct nda_cacheinfo))
2528                + nla_total_size(4); /* NDA_PROBES */
2529 }
2530
2531 static void __neigh_notify(struct neighbour *n, int type, int flags)
2532 {
2533         struct net *net = dev_net(n->dev);
2534         struct sk_buff *skb;
2535         int err = -ENOBUFS;
2536
2537         skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2538         if (skb == NULL)
2539                 goto errout;
2540
2541         err = neigh_fill_info(skb, n, 0, 0, type, flags);
2542         if (err < 0) {
2543                 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2544                 WARN_ON(err == -EMSGSIZE);
2545                 kfree_skb(skb);
2546                 goto errout;
2547         }
2548         rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2549         return;
2550 errout:
2551         if (err < 0)
2552                 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2553 }
2554
2555 #ifdef CONFIG_ARPD
2556 void neigh_app_ns(struct neighbour *n)
2557 {
2558         __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2559 }
2560 EXPORT_SYMBOL(neigh_app_ns);
2561 #endif /* CONFIG_ARPD */
2562
2563 #ifdef CONFIG_SYSCTL
2564
2565 static struct neigh_sysctl_table {
2566         struct ctl_table_header *sysctl_header;
2567         struct ctl_table neigh_vars[__NET_NEIGH_MAX];
2568         char *dev_name;
2569 } neigh_sysctl_template __read_mostly = {
2570         .neigh_vars = {
2571                 {
2572                         .ctl_name       = NET_NEIGH_MCAST_SOLICIT,
2573                         .procname       = "mcast_solicit",
2574                         .maxlen         = sizeof(int),
2575                         .mode           = 0644,
2576                         .proc_handler   = proc_dointvec,
2577                 },
2578                 {
2579                         .ctl_name       = NET_NEIGH_UCAST_SOLICIT,
2580                         .procname       = "ucast_solicit",
2581                         .maxlen         = sizeof(int),
2582                         .mode           = 0644,
2583                         .proc_handler   = proc_dointvec,
2584                 },
2585                 {
2586                         .ctl_name       = NET_NEIGH_APP_SOLICIT,
2587                         .procname       = "app_solicit",
2588                         .maxlen         = sizeof(int),
2589                         .mode           = 0644,
2590                         .proc_handler   = proc_dointvec,
2591                 },
2592                 {
2593                         .procname       = "retrans_time",
2594                         .maxlen         = sizeof(int),
2595                         .mode           = 0644,
2596                         .proc_handler   = proc_dointvec_userhz_jiffies,
2597                 },
2598                 {
2599                         .ctl_name       = NET_NEIGH_REACHABLE_TIME,
2600                         .procname       = "base_reachable_time",
2601                         .maxlen         = sizeof(int),
2602                         .mode           = 0644,
2603                         .proc_handler   = proc_dointvec_jiffies,
2604                         .strategy       = sysctl_jiffies,
2605                 },
2606                 {
2607                         .ctl_name       = NET_NEIGH_DELAY_PROBE_TIME,
2608                         .procname       = "delay_first_probe_time",
2609                         .maxlen         = sizeof(int),
2610                         .mode           = 0644,
2611                         .proc_handler   = proc_dointvec_jiffies,
2612                         .strategy       = sysctl_jiffies,
2613                 },
2614                 {
2615                         .ctl_name       = NET_NEIGH_GC_STALE_TIME,
2616                         .procname       = "gc_stale_time",
2617                         .maxlen         = sizeof(int),
2618                         .mode           = 0644,
2619                         .proc_handler   = proc_dointvec_jiffies,
2620                         .strategy       = sysctl_jiffies,
2621                 },
2622                 {
2623                         .ctl_name       = NET_NEIGH_UNRES_QLEN,
2624                         .procname       = "unres_qlen",
2625                         .maxlen         = sizeof(int),
2626                         .mode           = 0644,
2627                         .proc_handler   = proc_dointvec,
2628                 },
2629                 {
2630                         .ctl_name       = NET_NEIGH_PROXY_QLEN,
2631                         .procname       = "proxy_qlen",
2632                         .maxlen         = sizeof(int),
2633                         .mode           = 0644,
2634                         .proc_handler   = proc_dointvec,
2635                 },
2636                 {
2637                         .procname       = "anycast_delay",
2638                         .maxlen         = sizeof(int),
2639                         .mode           = 0644,
2640                         .proc_handler   = proc_dointvec_userhz_jiffies,
2641                 },
2642                 {
2643                         .procname       = "proxy_delay",
2644                         .maxlen         = sizeof(int),
2645                         .mode           = 0644,
2646                         .proc_handler   = proc_dointvec_userhz_jiffies,
2647                 },
2648                 {
2649                         .procname       = "locktime",
2650                         .maxlen         = sizeof(int),
2651                         .mode           = 0644,
2652                         .proc_handler   = proc_dointvec_userhz_jiffies,
2653                 },
2654                 {
2655                         .ctl_name       = NET_NEIGH_RETRANS_TIME_MS,
2656                         .procname       = "retrans_time_ms",
2657                         .maxlen         = sizeof(int),
2658                         .mode           = 0644,
2659                         .proc_handler   = proc_dointvec_ms_jiffies,
2660                         .strategy       = sysctl_ms_jiffies,
2661                 },
2662                 {
2663                         .ctl_name       = NET_NEIGH_REACHABLE_TIME_MS,
2664                         .procname       = "base_reachable_time_ms",
2665                         .maxlen         = sizeof(int),
2666                         .mode           = 0644,
2667                         .proc_handler   = proc_dointvec_ms_jiffies,
2668                         .strategy       = sysctl_ms_jiffies,
2669                 },
2670                 {
2671                         .ctl_name       = NET_NEIGH_GC_INTERVAL,
2672                         .procname       = "gc_interval",
2673                         .maxlen         = sizeof(int),
2674                         .mode           = 0644,
2675                         .proc_handler   = proc_dointvec_jiffies,
2676                         .strategy       = sysctl_jiffies,
2677                 },
2678                 {
2679                         .ctl_name       = NET_NEIGH_GC_THRESH1,
2680                         .procname       = "gc_thresh1",
2681                         .maxlen         = sizeof(int),
2682                         .mode           = 0644,
2683                         .proc_handler   = proc_dointvec,
2684                 },
2685                 {
2686                         .ctl_name       = NET_NEIGH_GC_THRESH2,
2687                         .procname       = "gc_thresh2",
2688                         .maxlen         = sizeof(int),
2689                         .mode           = 0644,
2690                         .proc_handler   = proc_dointvec,
2691                 },
2692                 {
2693                         .ctl_name       = NET_NEIGH_GC_THRESH3,
2694                         .procname       = "gc_thresh3",
2695                         .maxlen         = sizeof(int),
2696                         .mode           = 0644,
2697                         .proc_handler   = proc_dointvec,
2698                 },
2699                 {},
2700         },
2701 };
2702
2703 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2704                           int p_id, int pdev_id, char *p_name,
2705                           proc_handler *handler, ctl_handler *strategy)
2706 {
2707         struct neigh_sysctl_table *t;
2708         const char *dev_name_source = NULL;
2709
2710 #define NEIGH_CTL_PATH_ROOT     0
2711 #define NEIGH_CTL_PATH_PROTO    1
2712 #define NEIGH_CTL_PATH_NEIGH    2
2713 #define NEIGH_CTL_PATH_DEV      3
2714
2715         struct ctl_path neigh_path[] = {
2716                 { .procname = "net",     .ctl_name = CTL_NET, },
2717                 { .procname = "proto",   .ctl_name = 0, },
2718                 { .procname = "neigh",   .ctl_name = 0, },
2719                 { .procname = "default", .ctl_name = NET_PROTO_CONF_DEFAULT, },
2720                 { },
2721         };
2722
2723         t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2724         if (!t)
2725                 goto err;
2726
2727         t->neigh_vars[0].data  = &p->mcast_probes;
2728         t->neigh_vars[1].data  = &p->ucast_probes;
2729         t->neigh_vars[2].data  = &p->app_probes;
2730         t->neigh_vars[3].data  = &p->retrans_time;
2731         t->neigh_vars[4].data  = &p->base_reachable_time;
2732         t->neigh_vars[5].data  = &p->delay_probe_time;
2733         t->neigh_vars[6].data  = &p->gc_staletime;
2734         t->neigh_vars[7].data  = &p->queue_len;
2735         t->neigh_vars[8].data  = &p->proxy_qlen;
2736         t->neigh_vars[9].data  = &p->anycast_delay;
2737         t->neigh_vars[10].data = &p->proxy_delay;
2738         t->neigh_vars[11].data = &p->locktime;
2739         t->neigh_vars[12].data  = &p->retrans_time;
2740         t->neigh_vars[13].data  = &p->base_reachable_time;
2741
2742         if (dev) {
2743                 dev_name_source = dev->name;
2744                 neigh_path[NEIGH_CTL_PATH_DEV].ctl_name = dev->ifindex;
2745                 /* Terminate the table early */
2746                 memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
2747         } else {
2748                 dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
2749                 t->neigh_vars[14].data = (int *)(p + 1);
2750                 t->neigh_vars[15].data = (int *)(p + 1) + 1;
2751                 t->neigh_vars[16].data = (int *)(p + 1) + 2;
2752                 t->neigh_vars[17].data = (int *)(p + 1) + 3;
2753         }
2754
2755
2756         if (handler || strategy) {
2757                 /* RetransTime */
2758                 t->neigh_vars[3].proc_handler = handler;
2759                 t->neigh_vars[3].strategy = strategy;
2760                 t->neigh_vars[3].extra1 = dev;
2761                 if (!strategy)
2762                         t->neigh_vars[3].ctl_name = CTL_UNNUMBERED;
2763                 /* ReachableTime */
2764                 t->neigh_vars[4].proc_handler = handler;
2765                 t->neigh_vars[4].strategy = strategy;
2766                 t->neigh_vars[4].extra1 = dev;
2767                 if (!strategy)
2768                         t->neigh_vars[4].ctl_name = CTL_UNNUMBERED;
2769                 /* RetransTime (in milliseconds)*/
2770                 t->neigh_vars[12].proc_handler = handler;
2771                 t->neigh_vars[12].strategy = strategy;
2772                 t->neigh_vars[12].extra1 = dev;
2773                 if (!strategy)
2774                         t->neigh_vars[12].ctl_name = CTL_UNNUMBERED;
2775                 /* ReachableTime (in milliseconds) */
2776                 t->neigh_vars[13].proc_handler = handler;
2777                 t->neigh_vars[13].strategy = strategy;
2778                 t->neigh_vars[13].extra1 = dev;
2779                 if (!strategy)
2780                         t->neigh_vars[13].ctl_name = CTL_UNNUMBERED;
2781         }
2782
2783         t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2784         if (!t->dev_name)
2785                 goto free;
2786
2787         neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
2788         neigh_path[NEIGH_CTL_PATH_NEIGH].ctl_name = pdev_id;
2789         neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
2790         neigh_path[NEIGH_CTL_PATH_PROTO].ctl_name = p_id;
2791
2792         t->sysctl_header =
2793                 register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
2794         if (!t->sysctl_header)
2795                 goto free_procname;
2796
2797         p->sysctl_table = t;
2798         return 0;
2799
2800 free_procname:
2801         kfree(t->dev_name);
2802 free:
2803         kfree(t);
2804 err:
2805         return -ENOBUFS;
2806 }
2807 EXPORT_SYMBOL(neigh_sysctl_register);
2808
2809 void neigh_sysctl_unregister(struct neigh_parms *p)
2810 {
2811         if (p->sysctl_table) {
2812                 struct neigh_sysctl_table *t = p->sysctl_table;
2813                 p->sysctl_table = NULL;
2814                 unregister_sysctl_table(t->sysctl_header);
2815                 kfree(t->dev_name);
2816                 kfree(t);
2817         }
2818 }
2819 EXPORT_SYMBOL(neigh_sysctl_unregister);
2820
2821 #endif  /* CONFIG_SYSCTL */
2822
2823 static int __init neigh_init(void)
2824 {
2825         rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
2826         rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
2827         rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
2828
2829         rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
2830         rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
2831
2832         return 0;
2833 }
2834
2835 subsys_initcall(neigh_init);
2836