2 * Generic address resolution entity
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
18 #include <linux/config.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/socket.h>
23 #include <linux/sched.h>
24 #include <linux/netdevice.h>
25 #include <linux/proc_fs.h>
27 #include <linux/sysctl.h>
29 #include <linux/times.h>
30 #include <net/neighbour.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/random.h>
35 #include <linux/string.h>
39 #define NEIGH_PRINTK(x...) printk(x)
40 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
41 #define NEIGH_PRINTK0 NEIGH_PRINTK
42 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
43 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
47 #define NEIGH_PRINTK1 NEIGH_PRINTK
51 #define NEIGH_PRINTK2 NEIGH_PRINTK
54 #define PNEIGH_HASHMASK 0xF
56 static void neigh_timer_handler(unsigned long arg);
58 static void neigh_app_notify(struct neighbour *n);
60 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
61 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
63 static struct neigh_table *neigh_tables;
65 static struct file_operations neigh_stat_seq_fops;
69 Neighbour hash table buckets are protected with rwlock tbl->lock.
71 - All the scans/updates to hash buckets MUST be made under this lock.
72 - NOTHING clever should be made under this lock: no callbacks
73 to protocol backends, no attempts to send something to network.
74 It will result in deadlocks, if backend/driver wants to use neighbour
76 - If the entry requires some non-trivial actions, increase
77 its reference count and release table lock.
79 Neighbour entries are protected:
80 - with reference count.
81 - with rwlock neigh->lock
83 Reference count prevents destruction.
85 neigh->lock mainly serializes ll address data and its validity state.
86 However, the same lock is used to protect another entry fields:
90 Again, nothing clever shall be made under neigh->lock,
91 the most complicated procedure, which we allow is dev->hard_header.
92 It is supposed, that dev->hard_header is simplistic and does
93 not make callbacks to neighbour tables.
95 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96 list of neighbour tables. This list is used only in process context,
99 static DEFINE_RWLOCK(neigh_tbl_lock);
101 static int neigh_blackhole(struct sk_buff *skb)
108 * It is random distribution in the interval (1/2)*base...(3/2)*base.
109 * It corresponds to default IPv6 settings and is not overridable,
110 * because it is really reasonable choice.
113 unsigned long neigh_rand_reach_time(unsigned long base)
115 return (base ? (net_random() % base) + (base >> 1) : 0);
119 static int neigh_forced_gc(struct neigh_table *tbl)
124 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
126 write_lock_bh(&tbl->lock);
127 for (i = 0; i <= tbl->hash_mask; i++) {
128 struct neighbour *n, **np;
130 np = &tbl->hash_buckets[i];
131 while ((n = *np) != NULL) {
132 /* Neighbour record may be discarded if:
133 * - nobody refers to it.
134 * - it is not permanent
136 write_lock(&n->lock);
137 if (atomic_read(&n->refcnt) == 1 &&
138 !(n->nud_state & NUD_PERMANENT)) {
142 write_unlock(&n->lock);
146 write_unlock(&n->lock);
151 tbl->last_flush = jiffies;
153 write_unlock_bh(&tbl->lock);
158 static int neigh_del_timer(struct neighbour *n)
160 if ((n->nud_state & NUD_IN_TIMER) &&
161 del_timer(&n->timer)) {
168 static void pneigh_queue_purge(struct sk_buff_head *list)
172 while ((skb = skb_dequeue(list)) != NULL) {
178 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
182 write_lock_bh(&tbl->lock);
184 for (i=0; i <= tbl->hash_mask; i++) {
185 struct neighbour *n, **np;
187 np = &tbl->hash_buckets[i];
188 while ((n = *np) != NULL) {
189 if (dev && n->dev != dev) {
194 write_lock_bh(&n->lock);
197 write_unlock_bh(&n->lock);
202 write_unlock_bh(&tbl->lock);
205 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
209 write_lock_bh(&tbl->lock);
211 for (i = 0; i <= tbl->hash_mask; i++) {
212 struct neighbour *n, **np = &tbl->hash_buckets[i];
214 while ((n = *np) != NULL) {
215 if (dev && n->dev != dev) {
220 write_lock(&n->lock);
224 if (atomic_read(&n->refcnt) != 1) {
225 /* The most unpleasant situation.
226 We must destroy neighbour entry,
227 but someone still uses it.
229 The destroy will be delayed until
230 the last user releases us, but
231 we must kill timers etc. and move
234 skb_queue_purge(&n->arp_queue);
235 n->output = neigh_blackhole;
236 if (n->nud_state & NUD_VALID)
237 n->nud_state = NUD_NOARP;
239 n->nud_state = NUD_NONE;
240 NEIGH_PRINTK2("neigh %p is stray.\n", n);
242 write_unlock(&n->lock);
247 pneigh_ifdown(tbl, dev);
248 write_unlock_bh(&tbl->lock);
250 del_timer_sync(&tbl->proxy_timer);
251 pneigh_queue_purge(&tbl->proxy_queue);
255 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
257 struct neighbour *n = NULL;
258 unsigned long now = jiffies;
261 entries = atomic_inc_return(&tbl->entries) - 1;
262 if (entries >= tbl->gc_thresh3 ||
263 (entries >= tbl->gc_thresh2 &&
264 time_after(now, tbl->last_flush + 5 * HZ))) {
265 if (!neigh_forced_gc(tbl) &&
266 entries >= tbl->gc_thresh3)
270 n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
274 memset(n, 0, tbl->entry_size);
276 skb_queue_head_init(&n->arp_queue);
277 rwlock_init(&n->lock);
278 n->updated = n->used = now;
279 n->nud_state = NUD_NONE;
280 n->output = neigh_blackhole;
281 n->parms = neigh_parms_clone(&tbl->parms);
282 init_timer(&n->timer);
283 n->timer.function = neigh_timer_handler;
284 n->timer.data = (unsigned long)n;
286 NEIGH_CACHE_STAT_INC(tbl, allocs);
288 atomic_set(&n->refcnt, 1);
294 atomic_dec(&tbl->entries);
298 static struct neighbour **neigh_hash_alloc(unsigned int entries)
300 unsigned long size = entries * sizeof(struct neighbour *);
301 struct neighbour **ret;
303 if (size <= PAGE_SIZE) {
304 ret = kmalloc(size, GFP_ATOMIC);
306 ret = (struct neighbour **)
307 __get_free_pages(GFP_ATOMIC, get_order(size));
310 memset(ret, 0, size);
315 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
317 unsigned long size = entries * sizeof(struct neighbour *);
319 if (size <= PAGE_SIZE)
322 free_pages((unsigned long)hash, get_order(size));
325 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
327 struct neighbour **new_hash, **old_hash;
328 unsigned int i, new_hash_mask, old_entries;
330 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
332 BUG_ON(new_entries & (new_entries - 1));
333 new_hash = neigh_hash_alloc(new_entries);
337 old_entries = tbl->hash_mask + 1;
338 new_hash_mask = new_entries - 1;
339 old_hash = tbl->hash_buckets;
341 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
342 for (i = 0; i < old_entries; i++) {
343 struct neighbour *n, *next;
345 for (n = old_hash[i]; n; n = next) {
346 unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
348 hash_val &= new_hash_mask;
351 n->next = new_hash[hash_val];
352 new_hash[hash_val] = n;
355 tbl->hash_buckets = new_hash;
356 tbl->hash_mask = new_hash_mask;
358 neigh_hash_free(old_hash, old_entries);
361 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
362 struct net_device *dev)
365 int key_len = tbl->key_len;
366 u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
368 NEIGH_CACHE_STAT_INC(tbl, lookups);
370 read_lock_bh(&tbl->lock);
371 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
372 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
374 NEIGH_CACHE_STAT_INC(tbl, hits);
378 read_unlock_bh(&tbl->lock);
382 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
385 int key_len = tbl->key_len;
386 u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
388 NEIGH_CACHE_STAT_INC(tbl, lookups);
390 read_lock_bh(&tbl->lock);
391 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
392 if (!memcmp(n->primary_key, pkey, key_len)) {
394 NEIGH_CACHE_STAT_INC(tbl, hits);
398 read_unlock_bh(&tbl->lock);
402 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
403 struct net_device *dev)
406 int key_len = tbl->key_len;
408 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
411 rc = ERR_PTR(-ENOBUFS);
415 memcpy(n->primary_key, pkey, key_len);
419 /* Protocol specific setup. */
420 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
422 goto out_neigh_release;
425 /* Device specific setup. */
426 if (n->parms->neigh_setup &&
427 (error = n->parms->neigh_setup(n)) < 0) {
429 goto out_neigh_release;
432 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
434 write_lock_bh(&tbl->lock);
436 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
437 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
439 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
441 if (n->parms->dead) {
442 rc = ERR_PTR(-EINVAL);
446 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
447 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
454 n->next = tbl->hash_buckets[hash_val];
455 tbl->hash_buckets[hash_val] = n;
458 write_unlock_bh(&tbl->lock);
459 NEIGH_PRINTK2("neigh %p is created.\n", n);
464 write_unlock_bh(&tbl->lock);
470 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
471 struct net_device *dev, int creat)
473 struct pneigh_entry *n;
474 int key_len = tbl->key_len;
475 u32 hash_val = *(u32 *)(pkey + key_len - 4);
477 hash_val ^= (hash_val >> 16);
478 hash_val ^= hash_val >> 8;
479 hash_val ^= hash_val >> 4;
480 hash_val &= PNEIGH_HASHMASK;
482 read_lock_bh(&tbl->lock);
484 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
485 if (!memcmp(n->key, pkey, key_len) &&
486 (n->dev == dev || !n->dev)) {
487 read_unlock_bh(&tbl->lock);
491 read_unlock_bh(&tbl->lock);
496 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
500 memcpy(n->key, pkey, key_len);
505 if (tbl->pconstructor && tbl->pconstructor(n)) {
513 write_lock_bh(&tbl->lock);
514 n->next = tbl->phash_buckets[hash_val];
515 tbl->phash_buckets[hash_val] = n;
516 write_unlock_bh(&tbl->lock);
522 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
523 struct net_device *dev)
525 struct pneigh_entry *n, **np;
526 int key_len = tbl->key_len;
527 u32 hash_val = *(u32 *)(pkey + key_len - 4);
529 hash_val ^= (hash_val >> 16);
530 hash_val ^= hash_val >> 8;
531 hash_val ^= hash_val >> 4;
532 hash_val &= PNEIGH_HASHMASK;
534 write_lock_bh(&tbl->lock);
535 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
537 if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
539 write_unlock_bh(&tbl->lock);
540 if (tbl->pdestructor)
548 write_unlock_bh(&tbl->lock);
552 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
554 struct pneigh_entry *n, **np;
557 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
558 np = &tbl->phash_buckets[h];
559 while ((n = *np) != NULL) {
560 if (!dev || n->dev == dev) {
562 if (tbl->pdestructor)
577 * neighbour must already be out of the table;
580 void neigh_destroy(struct neighbour *neigh)
584 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
588 "Destroying alive neighbour %p\n", neigh);
593 if (neigh_del_timer(neigh))
594 printk(KERN_WARNING "Impossible event.\n");
596 while ((hh = neigh->hh) != NULL) {
597 neigh->hh = hh->hh_next;
599 write_lock_bh(&hh->hh_lock);
600 hh->hh_output = neigh_blackhole;
601 write_unlock_bh(&hh->hh_lock);
602 if (atomic_dec_and_test(&hh->hh_refcnt))
606 if (neigh->ops && neigh->ops->destructor)
607 (neigh->ops->destructor)(neigh);
609 skb_queue_purge(&neigh->arp_queue);
612 neigh_parms_put(neigh->parms);
614 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
616 atomic_dec(&neigh->tbl->entries);
617 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
620 /* Neighbour state is suspicious;
623 Called with write_locked neigh.
625 static void neigh_suspect(struct neighbour *neigh)
629 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
631 neigh->output = neigh->ops->output;
633 for (hh = neigh->hh; hh; hh = hh->hh_next)
634 hh->hh_output = neigh->ops->output;
637 /* Neighbour state is OK;
640 Called with write_locked neigh.
642 static void neigh_connect(struct neighbour *neigh)
646 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
648 neigh->output = neigh->ops->connected_output;
650 for (hh = neigh->hh; hh; hh = hh->hh_next)
651 hh->hh_output = neigh->ops->hh_output;
654 static void neigh_periodic_timer(unsigned long arg)
656 struct neigh_table *tbl = (struct neigh_table *)arg;
657 struct neighbour *n, **np;
658 unsigned long expire, now = jiffies;
660 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
662 write_lock(&tbl->lock);
665 * periodically recompute ReachableTime from random function
668 if (time_after(now, tbl->last_rand + 300 * HZ)) {
669 struct neigh_parms *p;
670 tbl->last_rand = now;
671 for (p = &tbl->parms; p; p = p->next)
673 neigh_rand_reach_time(p->base_reachable_time);
676 np = &tbl->hash_buckets[tbl->hash_chain_gc];
677 tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
679 while ((n = *np) != NULL) {
682 write_lock(&n->lock);
684 state = n->nud_state;
685 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
686 write_unlock(&n->lock);
690 if (time_before(n->used, n->confirmed))
691 n->used = n->confirmed;
693 if (atomic_read(&n->refcnt) == 1 &&
694 (state == NUD_FAILED ||
695 time_after(now, n->used + n->parms->gc_staletime))) {
698 write_unlock(&n->lock);
702 write_unlock(&n->lock);
708 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
709 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
710 * base_reachable_time.
712 expire = tbl->parms.base_reachable_time >> 1;
713 expire /= (tbl->hash_mask + 1);
717 mod_timer(&tbl->gc_timer, now + expire);
719 write_unlock(&tbl->lock);
722 static __inline__ int neigh_max_probes(struct neighbour *n)
724 struct neigh_parms *p = n->parms;
725 return (n->nud_state & NUD_PROBE ?
727 p->ucast_probes + p->app_probes + p->mcast_probes);
730 static inline void neigh_add_timer(struct neighbour *n, unsigned long when)
732 if (unlikely(mod_timer(&n->timer, when))) {
733 printk("NEIGH: BUG, double timer add, state is %x\n",
738 /* Called when a timer expires for a neighbour entry. */
740 static void neigh_timer_handler(unsigned long arg)
742 unsigned long now, next;
743 struct neighbour *neigh = (struct neighbour *)arg;
747 write_lock(&neigh->lock);
749 state = neigh->nud_state;
753 if (!(state & NUD_IN_TIMER)) {
755 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
760 if (state & NUD_REACHABLE) {
761 if (time_before_eq(now,
762 neigh->confirmed + neigh->parms->reachable_time)) {
763 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
764 next = neigh->confirmed + neigh->parms->reachable_time;
765 } else if (time_before_eq(now,
766 neigh->used + neigh->parms->delay_probe_time)) {
767 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
768 neigh->nud_state = NUD_DELAY;
769 neigh_suspect(neigh);
770 next = now + neigh->parms->delay_probe_time;
772 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
773 neigh->nud_state = NUD_STALE;
774 neigh_suspect(neigh);
776 } else if (state & NUD_DELAY) {
777 if (time_before_eq(now,
778 neigh->confirmed + neigh->parms->delay_probe_time)) {
779 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
780 neigh->nud_state = NUD_REACHABLE;
781 neigh_connect(neigh);
782 next = neigh->confirmed + neigh->parms->reachable_time;
784 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
785 neigh->nud_state = NUD_PROBE;
786 atomic_set(&neigh->probes, 0);
787 next = now + neigh->parms->retrans_time;
790 /* NUD_PROBE|NUD_INCOMPLETE */
791 next = now + neigh->parms->retrans_time;
794 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
795 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
798 neigh->nud_state = NUD_FAILED;
800 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
801 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
803 /* It is very thin place. report_unreachable is very complicated
804 routine. Particularly, it can hit the same neighbour entry!
806 So that, we try to be accurate and avoid dead loop. --ANK
808 while (neigh->nud_state == NUD_FAILED &&
809 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
810 write_unlock(&neigh->lock);
811 neigh->ops->error_report(neigh, skb);
812 write_lock(&neigh->lock);
814 skb_queue_purge(&neigh->arp_queue);
817 if (neigh->nud_state & NUD_IN_TIMER) {
819 if (time_before(next, jiffies + HZ/2))
820 next = jiffies + HZ/2;
821 neigh_add_timer(neigh, next);
823 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
824 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
825 /* keep skb alive even if arp_queue overflows */
828 write_unlock(&neigh->lock);
829 neigh->ops->solicit(neigh, skb);
830 atomic_inc(&neigh->probes);
835 write_unlock(&neigh->lock);
839 if (notify && neigh->parms->app_probes)
840 neigh_app_notify(neigh);
842 neigh_release(neigh);
845 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
850 write_lock_bh(&neigh->lock);
853 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
858 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
859 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
860 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
861 neigh->nud_state = NUD_INCOMPLETE;
863 neigh_add_timer(neigh, now + 1);
865 neigh->nud_state = NUD_FAILED;
866 write_unlock_bh(&neigh->lock);
872 } else if (neigh->nud_state & NUD_STALE) {
873 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
875 neigh->nud_state = NUD_DELAY;
876 neigh_add_timer(neigh,
877 jiffies + neigh->parms->delay_probe_time);
880 if (neigh->nud_state == NUD_INCOMPLETE) {
882 if (skb_queue_len(&neigh->arp_queue) >=
883 neigh->parms->queue_len) {
884 struct sk_buff *buff;
885 buff = neigh->arp_queue.next;
886 __skb_unlink(buff, &neigh->arp_queue);
889 __skb_queue_tail(&neigh->arp_queue, skb);
894 write_unlock_bh(&neigh->lock);
898 static __inline__ void neigh_update_hhs(struct neighbour *neigh)
901 void (*update)(struct hh_cache*, struct net_device*, unsigned char *) =
902 neigh->dev->header_cache_update;
905 for (hh = neigh->hh; hh; hh = hh->hh_next) {
906 write_lock_bh(&hh->hh_lock);
907 update(hh, neigh->dev, neigh->ha);
908 write_unlock_bh(&hh->hh_lock);
915 /* Generic update routine.
916 -- lladdr is new lladdr or NULL, if it is not supplied.
919 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
921 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
922 lladdr instead of overriding it
924 It also allows to retain current state
925 if lladdr is unchanged.
926 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
928 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
930 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
933 Caller MUST hold reference count on the entry.
936 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
944 struct net_device *dev;
945 int update_isrouter = 0;
947 write_lock_bh(&neigh->lock);
950 old = neigh->nud_state;
953 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
954 (old & (NUD_NOARP | NUD_PERMANENT)))
957 if (!(new & NUD_VALID)) {
958 neigh_del_timer(neigh);
959 if (old & NUD_CONNECTED)
960 neigh_suspect(neigh);
961 neigh->nud_state = new;
964 notify = old & NUD_VALID;
969 /* Compare new lladdr with cached one */
970 if (!dev->addr_len) {
971 /* First case: device needs no address. */
974 /* The second case: if something is already cached
975 and a new address is proposed:
977 - if they are different, check override flag
979 if ((old & NUD_VALID) &&
980 !memcmp(lladdr, neigh->ha, dev->addr_len))
983 /* No address is supplied; if we know something,
984 use it, otherwise discard the request.
987 if (!(old & NUD_VALID))
992 if (new & NUD_CONNECTED)
993 neigh->confirmed = jiffies;
994 neigh->updated = jiffies;
996 /* If entry was valid and address is not changed,
997 do not change entry state, if new one is STALE.
1000 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1001 if (old & NUD_VALID) {
1002 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1003 update_isrouter = 0;
1004 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1005 (old & NUD_CONNECTED)) {
1011 if (lladdr == neigh->ha && new == NUD_STALE &&
1012 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1013 (old & NUD_CONNECTED))
1020 neigh_del_timer(neigh);
1021 if (new & NUD_IN_TIMER) {
1023 neigh_add_timer(neigh, (jiffies +
1024 ((new & NUD_REACHABLE) ?
1025 neigh->parms->reachable_time :
1028 neigh->nud_state = new;
1031 if (lladdr != neigh->ha) {
1032 memcpy(&neigh->ha, lladdr, dev->addr_len);
1033 neigh_update_hhs(neigh);
1034 if (!(new & NUD_CONNECTED))
1035 neigh->confirmed = jiffies -
1036 (neigh->parms->base_reachable_time << 1);
1043 if (new & NUD_CONNECTED)
1044 neigh_connect(neigh);
1046 neigh_suspect(neigh);
1047 if (!(old & NUD_VALID)) {
1048 struct sk_buff *skb;
1050 /* Again: avoid dead loop if something went wrong */
1052 while (neigh->nud_state & NUD_VALID &&
1053 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1054 struct neighbour *n1 = neigh;
1055 write_unlock_bh(&neigh->lock);
1056 /* On shaper/eql skb->dst->neighbour != neigh :( */
1057 if (skb->dst && skb->dst->neighbour)
1058 n1 = skb->dst->neighbour;
1060 write_lock_bh(&neigh->lock);
1062 skb_queue_purge(&neigh->arp_queue);
1065 if (update_isrouter) {
1066 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1067 (neigh->flags | NTF_ROUTER) :
1068 (neigh->flags & ~NTF_ROUTER);
1070 write_unlock_bh(&neigh->lock);
1072 if (notify && neigh->parms->app_probes)
1073 neigh_app_notify(neigh);
1078 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1079 u8 *lladdr, void *saddr,
1080 struct net_device *dev)
1082 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1083 lladdr || !dev->addr_len);
1085 neigh_update(neigh, lladdr, NUD_STALE,
1086 NEIGH_UPDATE_F_OVERRIDE);
1090 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1093 struct hh_cache *hh;
1094 struct net_device *dev = dst->dev;
1096 for (hh = n->hh; hh; hh = hh->hh_next)
1097 if (hh->hh_type == protocol)
1100 if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1101 memset(hh, 0, sizeof(struct hh_cache));
1102 rwlock_init(&hh->hh_lock);
1103 hh->hh_type = protocol;
1104 atomic_set(&hh->hh_refcnt, 0);
1106 if (dev->hard_header_cache(n, hh)) {
1110 atomic_inc(&hh->hh_refcnt);
1111 hh->hh_next = n->hh;
1113 if (n->nud_state & NUD_CONNECTED)
1114 hh->hh_output = n->ops->hh_output;
1116 hh->hh_output = n->ops->output;
1120 atomic_inc(&hh->hh_refcnt);
1125 /* This function can be used in contexts, where only old dev_queue_xmit
1126 worked, f.e. if you want to override normal output path (eql, shaper),
1127 but resolution is not made yet.
1130 int neigh_compat_output(struct sk_buff *skb)
1132 struct net_device *dev = skb->dev;
1134 __skb_pull(skb, skb->nh.raw - skb->data);
1136 if (dev->hard_header &&
1137 dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1139 dev->rebuild_header(skb))
1142 return dev_queue_xmit(skb);
1145 /* Slow and careful. */
1147 int neigh_resolve_output(struct sk_buff *skb)
1149 struct dst_entry *dst = skb->dst;
1150 struct neighbour *neigh;
1153 if (!dst || !(neigh = dst->neighbour))
1156 __skb_pull(skb, skb->nh.raw - skb->data);
1158 if (!neigh_event_send(neigh, skb)) {
1160 struct net_device *dev = neigh->dev;
1161 if (dev->hard_header_cache && !dst->hh) {
1162 write_lock_bh(&neigh->lock);
1164 neigh_hh_init(neigh, dst, dst->ops->protocol);
1165 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1166 neigh->ha, NULL, skb->len);
1167 write_unlock_bh(&neigh->lock);
1169 read_lock_bh(&neigh->lock);
1170 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1171 neigh->ha, NULL, skb->len);
1172 read_unlock_bh(&neigh->lock);
1175 rc = neigh->ops->queue_xmit(skb);
1182 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1183 dst, dst ? dst->neighbour : NULL);
1190 /* As fast as possible without hh cache */
1192 int neigh_connected_output(struct sk_buff *skb)
1195 struct dst_entry *dst = skb->dst;
1196 struct neighbour *neigh = dst->neighbour;
1197 struct net_device *dev = neigh->dev;
1199 __skb_pull(skb, skb->nh.raw - skb->data);
1201 read_lock_bh(&neigh->lock);
1202 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1203 neigh->ha, NULL, skb->len);
1204 read_unlock_bh(&neigh->lock);
1206 err = neigh->ops->queue_xmit(skb);
1214 static void neigh_proxy_process(unsigned long arg)
1216 struct neigh_table *tbl = (struct neigh_table *)arg;
1217 long sched_next = 0;
1218 unsigned long now = jiffies;
1219 struct sk_buff *skb;
1221 spin_lock(&tbl->proxy_queue.lock);
1223 skb = tbl->proxy_queue.next;
1225 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1226 struct sk_buff *back = skb;
1227 long tdif = NEIGH_CB(back)->sched_next - now;
1231 struct net_device *dev = back->dev;
1232 __skb_unlink(back, &tbl->proxy_queue);
1233 if (tbl->proxy_redo && netif_running(dev))
1234 tbl->proxy_redo(back);
1239 } else if (!sched_next || tdif < sched_next)
1242 del_timer(&tbl->proxy_timer);
1244 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1245 spin_unlock(&tbl->proxy_queue.lock);
1248 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1249 struct sk_buff *skb)
1251 unsigned long now = jiffies;
1252 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1254 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1259 NEIGH_CB(skb)->sched_next = sched_next;
1260 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1262 spin_lock(&tbl->proxy_queue.lock);
1263 if (del_timer(&tbl->proxy_timer)) {
1264 if (time_before(tbl->proxy_timer.expires, sched_next))
1265 sched_next = tbl->proxy_timer.expires;
1267 dst_release(skb->dst);
1270 __skb_queue_tail(&tbl->proxy_queue, skb);
1271 mod_timer(&tbl->proxy_timer, sched_next);
1272 spin_unlock(&tbl->proxy_queue.lock);
1276 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1277 struct neigh_table *tbl)
1279 struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL);
1282 memcpy(p, &tbl->parms, sizeof(*p));
1284 atomic_set(&p->refcnt, 1);
1285 INIT_RCU_HEAD(&p->rcu_head);
1287 neigh_rand_reach_time(p->base_reachable_time);
1289 if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1297 p->sysctl_table = NULL;
1298 write_lock_bh(&tbl->lock);
1299 p->next = tbl->parms.next;
1300 tbl->parms.next = p;
1301 write_unlock_bh(&tbl->lock);
1306 static void neigh_rcu_free_parms(struct rcu_head *head)
1308 struct neigh_parms *parms =
1309 container_of(head, struct neigh_parms, rcu_head);
1311 neigh_parms_put(parms);
1314 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1316 struct neigh_parms **p;
1318 if (!parms || parms == &tbl->parms)
1320 write_lock_bh(&tbl->lock);
1321 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1325 write_unlock_bh(&tbl->lock);
1327 dev_put(parms->dev);
1328 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1332 write_unlock_bh(&tbl->lock);
1333 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1336 void neigh_parms_destroy(struct neigh_parms *parms)
1342 void neigh_table_init(struct neigh_table *tbl)
1344 unsigned long now = jiffies;
1345 unsigned long phsize;
1347 atomic_set(&tbl->parms.refcnt, 1);
1348 INIT_RCU_HEAD(&tbl->parms.rcu_head);
1349 tbl->parms.reachable_time =
1350 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1352 if (!tbl->kmem_cachep)
1353 tbl->kmem_cachep = kmem_cache_create(tbl->id,
1355 0, SLAB_HWCACHE_ALIGN,
1358 if (!tbl->kmem_cachep)
1359 panic("cannot create neighbour cache");
1361 tbl->stats = alloc_percpu(struct neigh_statistics);
1363 panic("cannot create neighbour cache statistics");
1365 #ifdef CONFIG_PROC_FS
1366 tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1368 panic("cannot create neighbour proc dir entry");
1369 tbl->pde->proc_fops = &neigh_stat_seq_fops;
1370 tbl->pde->data = tbl;
1374 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1376 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1377 tbl->phash_buckets = kmalloc(phsize, GFP_KERNEL);
1379 if (!tbl->hash_buckets || !tbl->phash_buckets)
1380 panic("cannot allocate neighbour cache hashes");
1382 memset(tbl->phash_buckets, 0, phsize);
1384 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1386 rwlock_init(&tbl->lock);
1387 init_timer(&tbl->gc_timer);
1388 tbl->gc_timer.data = (unsigned long)tbl;
1389 tbl->gc_timer.function = neigh_periodic_timer;
1390 tbl->gc_timer.expires = now + 1;
1391 add_timer(&tbl->gc_timer);
1393 init_timer(&tbl->proxy_timer);
1394 tbl->proxy_timer.data = (unsigned long)tbl;
1395 tbl->proxy_timer.function = neigh_proxy_process;
1396 skb_queue_head_init(&tbl->proxy_queue);
1398 tbl->last_flush = now;
1399 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1400 write_lock(&neigh_tbl_lock);
1401 tbl->next = neigh_tables;
1403 write_unlock(&neigh_tbl_lock);
1406 int neigh_table_clear(struct neigh_table *tbl)
1408 struct neigh_table **tp;
1410 /* It is not clean... Fix it to unload IPv6 module safely */
1411 del_timer_sync(&tbl->gc_timer);
1412 del_timer_sync(&tbl->proxy_timer);
1413 pneigh_queue_purge(&tbl->proxy_queue);
1414 neigh_ifdown(tbl, NULL);
1415 if (atomic_read(&tbl->entries))
1416 printk(KERN_CRIT "neighbour leakage\n");
1417 write_lock(&neigh_tbl_lock);
1418 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1424 write_unlock(&neigh_tbl_lock);
1426 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1427 tbl->hash_buckets = NULL;
1429 kfree(tbl->phash_buckets);
1430 tbl->phash_buckets = NULL;
1435 int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1437 struct ndmsg *ndm = NLMSG_DATA(nlh);
1438 struct rtattr **nda = arg;
1439 struct neigh_table *tbl;
1440 struct net_device *dev = NULL;
1443 if (ndm->ndm_ifindex &&
1444 (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1447 read_lock(&neigh_tbl_lock);
1448 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1449 struct rtattr *dst_attr = nda[NDA_DST - 1];
1450 struct neighbour *n;
1452 if (tbl->family != ndm->ndm_family)
1454 read_unlock(&neigh_tbl_lock);
1457 if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1460 if (ndm->ndm_flags & NTF_PROXY) {
1461 err = pneigh_delete(tbl, RTA_DATA(dst_attr), dev);
1468 n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1470 err = neigh_update(n, NULL, NUD_FAILED,
1471 NEIGH_UPDATE_F_OVERRIDE|
1472 NEIGH_UPDATE_F_ADMIN);
1477 read_unlock(&neigh_tbl_lock);
1478 err = -EADDRNOTAVAIL;
1486 int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1488 struct ndmsg *ndm = NLMSG_DATA(nlh);
1489 struct rtattr **nda = arg;
1490 struct neigh_table *tbl;
1491 struct net_device *dev = NULL;
1494 if (ndm->ndm_ifindex &&
1495 (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1498 read_lock(&neigh_tbl_lock);
1499 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1500 struct rtattr *lladdr_attr = nda[NDA_LLADDR - 1];
1501 struct rtattr *dst_attr = nda[NDA_DST - 1];
1503 struct neighbour *n;
1505 if (tbl->family != ndm->ndm_family)
1507 read_unlock(&neigh_tbl_lock);
1510 if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1513 if (ndm->ndm_flags & NTF_PROXY) {
1515 if (pneigh_lookup(tbl, RTA_DATA(dst_attr), dev, 1))
1523 if (lladdr_attr && RTA_PAYLOAD(lladdr_attr) < dev->addr_len)
1526 n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1528 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1534 override = nlh->nlmsg_flags & NLM_F_REPLACE;
1535 } else if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1539 n = __neigh_lookup_errno(tbl, RTA_DATA(dst_attr), dev);
1546 err = neigh_update(n,
1547 lladdr_attr ? RTA_DATA(lladdr_attr) : NULL,
1549 (override ? NEIGH_UPDATE_F_OVERRIDE : 0) |
1550 NEIGH_UPDATE_F_ADMIN);
1556 read_unlock(&neigh_tbl_lock);
1557 err = -EADDRNOTAVAIL;
1565 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1567 struct rtattr *nest = NULL;
1569 nest = RTA_NEST(skb, NDTA_PARMS);
1572 RTA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1574 RTA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1575 RTA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1576 RTA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1577 RTA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1578 RTA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1579 RTA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1580 RTA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1581 RTA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1582 parms->base_reachable_time);
1583 RTA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1584 RTA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1585 RTA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1586 RTA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1587 RTA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1588 RTA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1590 return RTA_NEST_END(skb, nest);
1593 return RTA_NEST_CANCEL(skb, nest);
1596 static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
1597 struct netlink_callback *cb)
1599 struct nlmsghdr *nlh;
1600 struct ndtmsg *ndtmsg;
1602 nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1605 ndtmsg = NLMSG_DATA(nlh);
1607 read_lock_bh(&tbl->lock);
1608 ndtmsg->ndtm_family = tbl->family;
1609 ndtmsg->ndtm_pad1 = 0;
1610 ndtmsg->ndtm_pad2 = 0;
1612 RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1613 RTA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1614 RTA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1615 RTA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1616 RTA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1619 unsigned long now = jiffies;
1620 unsigned int flush_delta = now - tbl->last_flush;
1621 unsigned int rand_delta = now - tbl->last_rand;
1623 struct ndt_config ndc = {
1624 .ndtc_key_len = tbl->key_len,
1625 .ndtc_entry_size = tbl->entry_size,
1626 .ndtc_entries = atomic_read(&tbl->entries),
1627 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1628 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1629 .ndtc_hash_rnd = tbl->hash_rnd,
1630 .ndtc_hash_mask = tbl->hash_mask,
1631 .ndtc_hash_chain_gc = tbl->hash_chain_gc,
1632 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1635 RTA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1640 struct ndt_stats ndst;
1642 memset(&ndst, 0, sizeof(ndst));
1644 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1645 struct neigh_statistics *st;
1647 if (!cpu_possible(cpu))
1650 st = per_cpu_ptr(tbl->stats, cpu);
1651 ndst.ndts_allocs += st->allocs;
1652 ndst.ndts_destroys += st->destroys;
1653 ndst.ndts_hash_grows += st->hash_grows;
1654 ndst.ndts_res_failed += st->res_failed;
1655 ndst.ndts_lookups += st->lookups;
1656 ndst.ndts_hits += st->hits;
1657 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1658 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1659 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1660 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1663 RTA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1666 BUG_ON(tbl->parms.dev);
1667 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1668 goto rtattr_failure;
1670 read_unlock_bh(&tbl->lock);
1671 return NLMSG_END(skb, nlh);
1674 read_unlock_bh(&tbl->lock);
1675 return NLMSG_CANCEL(skb, nlh);
1681 static int neightbl_fill_param_info(struct neigh_table *tbl,
1682 struct neigh_parms *parms,
1683 struct sk_buff *skb,
1684 struct netlink_callback *cb)
1686 struct ndtmsg *ndtmsg;
1687 struct nlmsghdr *nlh;
1689 nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1692 ndtmsg = NLMSG_DATA(nlh);
1694 read_lock_bh(&tbl->lock);
1695 ndtmsg->ndtm_family = tbl->family;
1696 ndtmsg->ndtm_pad1 = 0;
1697 ndtmsg->ndtm_pad2 = 0;
1698 RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1700 if (neightbl_fill_parms(skb, parms) < 0)
1701 goto rtattr_failure;
1703 read_unlock_bh(&tbl->lock);
1704 return NLMSG_END(skb, nlh);
1707 read_unlock_bh(&tbl->lock);
1708 return NLMSG_CANCEL(skb, nlh);
1714 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1717 struct neigh_parms *p;
1719 for (p = &tbl->parms; p; p = p->next)
1720 if ((p->dev && p->dev->ifindex == ifindex) ||
1721 (!p->dev && !ifindex))
1727 int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1729 struct neigh_table *tbl;
1730 struct ndtmsg *ndtmsg = NLMSG_DATA(nlh);
1731 struct rtattr **tb = arg;
1734 if (!tb[NDTA_NAME - 1] || !RTA_PAYLOAD(tb[NDTA_NAME - 1]))
1737 read_lock(&neigh_tbl_lock);
1738 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1739 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1742 if (!rtattr_strcmp(tb[NDTA_NAME - 1], tbl->id))
1752 * We acquire tbl->lock to be nice to the periodic timers and
1753 * make sure they always see a consistent set of values.
1755 write_lock_bh(&tbl->lock);
1757 if (tb[NDTA_THRESH1 - 1])
1758 tbl->gc_thresh1 = RTA_GET_U32(tb[NDTA_THRESH1 - 1]);
1760 if (tb[NDTA_THRESH2 - 1])
1761 tbl->gc_thresh2 = RTA_GET_U32(tb[NDTA_THRESH2 - 1]);
1763 if (tb[NDTA_THRESH3 - 1])
1764 tbl->gc_thresh3 = RTA_GET_U32(tb[NDTA_THRESH3 - 1]);
1766 if (tb[NDTA_GC_INTERVAL - 1])
1767 tbl->gc_interval = RTA_GET_MSECS(tb[NDTA_GC_INTERVAL - 1]);
1769 if (tb[NDTA_PARMS - 1]) {
1770 struct rtattr *tbp[NDTPA_MAX];
1771 struct neigh_parms *p;
1774 if (rtattr_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS - 1]) < 0)
1775 goto rtattr_failure;
1777 if (tbp[NDTPA_IFINDEX - 1])
1778 ifindex = RTA_GET_U32(tbp[NDTPA_IFINDEX - 1]);
1780 p = lookup_neigh_params(tbl, ifindex);
1783 goto rtattr_failure;
1786 if (tbp[NDTPA_QUEUE_LEN - 1])
1787 p->queue_len = RTA_GET_U32(tbp[NDTPA_QUEUE_LEN - 1]);
1789 if (tbp[NDTPA_PROXY_QLEN - 1])
1790 p->proxy_qlen = RTA_GET_U32(tbp[NDTPA_PROXY_QLEN - 1]);
1792 if (tbp[NDTPA_APP_PROBES - 1])
1793 p->app_probes = RTA_GET_U32(tbp[NDTPA_APP_PROBES - 1]);
1795 if (tbp[NDTPA_UCAST_PROBES - 1])
1797 RTA_GET_U32(tbp[NDTPA_UCAST_PROBES - 1]);
1799 if (tbp[NDTPA_MCAST_PROBES - 1])
1801 RTA_GET_U32(tbp[NDTPA_MCAST_PROBES - 1]);
1803 if (tbp[NDTPA_BASE_REACHABLE_TIME - 1])
1804 p->base_reachable_time =
1805 RTA_GET_MSECS(tbp[NDTPA_BASE_REACHABLE_TIME - 1]);
1807 if (tbp[NDTPA_GC_STALETIME - 1])
1809 RTA_GET_MSECS(tbp[NDTPA_GC_STALETIME - 1]);
1811 if (tbp[NDTPA_DELAY_PROBE_TIME - 1])
1812 p->delay_probe_time =
1813 RTA_GET_MSECS(tbp[NDTPA_DELAY_PROBE_TIME - 1]);
1815 if (tbp[NDTPA_RETRANS_TIME - 1])
1817 RTA_GET_MSECS(tbp[NDTPA_RETRANS_TIME - 1]);
1819 if (tbp[NDTPA_ANYCAST_DELAY - 1])
1821 RTA_GET_MSECS(tbp[NDTPA_ANYCAST_DELAY - 1]);
1823 if (tbp[NDTPA_PROXY_DELAY - 1])
1825 RTA_GET_MSECS(tbp[NDTPA_PROXY_DELAY - 1]);
1827 if (tbp[NDTPA_LOCKTIME - 1])
1828 p->locktime = RTA_GET_MSECS(tbp[NDTPA_LOCKTIME - 1]);
1834 write_unlock_bh(&tbl->lock);
1836 read_unlock(&neigh_tbl_lock);
1840 int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1843 int s_idx = cb->args[0];
1844 struct neigh_table *tbl;
1846 family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1848 read_lock(&neigh_tbl_lock);
1849 for (tbl = neigh_tables, idx = 0; tbl; tbl = tbl->next) {
1850 struct neigh_parms *p;
1852 if (idx < s_idx || (family && tbl->family != family))
1855 if (neightbl_fill_info(tbl, skb, cb) <= 0)
1858 for (++idx, p = tbl->parms.next; p; p = p->next, idx++) {
1862 if (neightbl_fill_param_info(tbl, p, skb, cb) <= 0)
1868 read_unlock(&neigh_tbl_lock);
1874 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
1875 u32 pid, u32 seq, int event, unsigned int flags)
1877 unsigned long now = jiffies;
1878 unsigned char *b = skb->tail;
1879 struct nda_cacheinfo ci;
1882 struct nlmsghdr *nlh = NLMSG_NEW(skb, pid, seq, event,
1883 sizeof(struct ndmsg), flags);
1884 struct ndmsg *ndm = NLMSG_DATA(nlh);
1886 ndm->ndm_family = n->ops->family;
1889 ndm->ndm_flags = n->flags;
1890 ndm->ndm_type = n->type;
1891 ndm->ndm_ifindex = n->dev->ifindex;
1892 RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
1893 read_lock_bh(&n->lock);
1895 ndm->ndm_state = n->nud_state;
1896 if (n->nud_state & NUD_VALID)
1897 RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
1898 ci.ndm_used = now - n->used;
1899 ci.ndm_confirmed = now - n->confirmed;
1900 ci.ndm_updated = now - n->updated;
1901 ci.ndm_refcnt = atomic_read(&n->refcnt) - 1;
1902 probes = atomic_read(&n->probes);
1903 read_unlock_bh(&n->lock);
1905 RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1906 RTA_PUT(skb, NDA_PROBES, sizeof(probes), &probes);
1907 nlh->nlmsg_len = skb->tail - b;
1913 read_unlock_bh(&n->lock);
1914 skb_trim(skb, b - skb->data);
1919 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1920 struct netlink_callback *cb)
1922 struct neighbour *n;
1923 int rc, h, s_h = cb->args[1];
1924 int idx, s_idx = idx = cb->args[2];
1926 for (h = 0; h <= tbl->hash_mask; h++) {
1931 read_lock_bh(&tbl->lock);
1932 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
1935 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
1938 NLM_F_MULTI) <= 0) {
1939 read_unlock_bh(&tbl->lock);
1944 read_unlock_bh(&tbl->lock);
1953 int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1955 struct neigh_table *tbl;
1958 read_lock(&neigh_tbl_lock);
1959 family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1962 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
1963 if (t < s_t || (family && tbl->family != family))
1966 memset(&cb->args[1], 0, sizeof(cb->args) -
1967 sizeof(cb->args[0]));
1968 if (neigh_dump_table(tbl, skb, cb) < 0)
1971 read_unlock(&neigh_tbl_lock);
1977 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
1981 read_lock_bh(&tbl->lock);
1982 for (chain = 0; chain <= tbl->hash_mask; chain++) {
1983 struct neighbour *n;
1985 for (n = tbl->hash_buckets[chain]; n; n = n->next)
1988 read_unlock_bh(&tbl->lock);
1990 EXPORT_SYMBOL(neigh_for_each);
1992 /* The tbl->lock must be held as a writer and BH disabled. */
1993 void __neigh_for_each_release(struct neigh_table *tbl,
1994 int (*cb)(struct neighbour *))
1998 for (chain = 0; chain <= tbl->hash_mask; chain++) {
1999 struct neighbour *n, **np;
2001 np = &tbl->hash_buckets[chain];
2002 while ((n = *np) != NULL) {
2005 write_lock(&n->lock);
2012 write_unlock(&n->lock);
2018 EXPORT_SYMBOL(__neigh_for_each_release);
2020 #ifdef CONFIG_PROC_FS
2022 static struct neighbour *neigh_get_first(struct seq_file *seq)
2024 struct neigh_seq_state *state = seq->private;
2025 struct neigh_table *tbl = state->tbl;
2026 struct neighbour *n = NULL;
2027 int bucket = state->bucket;
2029 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2030 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2031 n = tbl->hash_buckets[bucket];
2034 if (state->neigh_sub_iter) {
2038 v = state->neigh_sub_iter(state, n, &fakep);
2042 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2044 if (n->nud_state & ~NUD_NOARP)
2053 state->bucket = bucket;
2058 static struct neighbour *neigh_get_next(struct seq_file *seq,
2059 struct neighbour *n,
2062 struct neigh_seq_state *state = seq->private;
2063 struct neigh_table *tbl = state->tbl;
2065 if (state->neigh_sub_iter) {
2066 void *v = state->neigh_sub_iter(state, n, pos);
2074 if (state->neigh_sub_iter) {
2075 void *v = state->neigh_sub_iter(state, n, pos);
2080 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2083 if (n->nud_state & ~NUD_NOARP)
2092 if (++state->bucket > tbl->hash_mask)
2095 n = tbl->hash_buckets[state->bucket];
2103 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2105 struct neighbour *n = neigh_get_first(seq);
2109 n = neigh_get_next(seq, n, pos);
2114 return *pos ? NULL : n;
2117 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2119 struct neigh_seq_state *state = seq->private;
2120 struct neigh_table *tbl = state->tbl;
2121 struct pneigh_entry *pn = NULL;
2122 int bucket = state->bucket;
2124 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2125 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2126 pn = tbl->phash_buckets[bucket];
2130 state->bucket = bucket;
2135 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2136 struct pneigh_entry *pn,
2139 struct neigh_seq_state *state = seq->private;
2140 struct neigh_table *tbl = state->tbl;
2144 if (++state->bucket > PNEIGH_HASHMASK)
2146 pn = tbl->phash_buckets[state->bucket];
2157 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2159 struct pneigh_entry *pn = pneigh_get_first(seq);
2163 pn = pneigh_get_next(seq, pn, pos);
2168 return *pos ? NULL : pn;
2171 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2173 struct neigh_seq_state *state = seq->private;
2176 rc = neigh_get_idx(seq, pos);
2177 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2178 rc = pneigh_get_idx(seq, pos);
2183 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2185 struct neigh_seq_state *state = seq->private;
2186 loff_t pos_minus_one;
2190 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2192 read_lock_bh(&tbl->lock);
2194 pos_minus_one = *pos - 1;
2195 return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2197 EXPORT_SYMBOL(neigh_seq_start);
2199 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2201 struct neigh_seq_state *state;
2204 if (v == SEQ_START_TOKEN) {
2205 rc = neigh_get_idx(seq, pos);
2209 state = seq->private;
2210 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2211 rc = neigh_get_next(seq, v, NULL);
2214 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2215 rc = pneigh_get_first(seq);
2217 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2218 rc = pneigh_get_next(seq, v, NULL);
2224 EXPORT_SYMBOL(neigh_seq_next);
2226 void neigh_seq_stop(struct seq_file *seq, void *v)
2228 struct neigh_seq_state *state = seq->private;
2229 struct neigh_table *tbl = state->tbl;
2231 read_unlock_bh(&tbl->lock);
2233 EXPORT_SYMBOL(neigh_seq_stop);
2235 /* statistics via seq_file */
2237 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2239 struct proc_dir_entry *pde = seq->private;
2240 struct neigh_table *tbl = pde->data;
2244 return SEQ_START_TOKEN;
2246 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2247 if (!cpu_possible(cpu))
2250 return per_cpu_ptr(tbl->stats, cpu);
2255 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2257 struct proc_dir_entry *pde = seq->private;
2258 struct neigh_table *tbl = pde->data;
2261 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2262 if (!cpu_possible(cpu))
2265 return per_cpu_ptr(tbl->stats, cpu);
2270 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2275 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2277 struct proc_dir_entry *pde = seq->private;
2278 struct neigh_table *tbl = pde->data;
2279 struct neigh_statistics *st = v;
2281 if (v == SEQ_START_TOKEN) {
2282 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
2286 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2287 "%08lx %08lx %08lx %08lx\n",
2288 atomic_read(&tbl->entries),
2299 st->rcv_probes_mcast,
2300 st->rcv_probes_ucast,
2302 st->periodic_gc_runs,
2309 static struct seq_operations neigh_stat_seq_ops = {
2310 .start = neigh_stat_seq_start,
2311 .next = neigh_stat_seq_next,
2312 .stop = neigh_stat_seq_stop,
2313 .show = neigh_stat_seq_show,
2316 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2318 int ret = seq_open(file, &neigh_stat_seq_ops);
2321 struct seq_file *sf = file->private_data;
2322 sf->private = PDE(inode);
2327 static struct file_operations neigh_stat_seq_fops = {
2328 .owner = THIS_MODULE,
2329 .open = neigh_stat_seq_open,
2331 .llseek = seq_lseek,
2332 .release = seq_release,
2335 #endif /* CONFIG_PROC_FS */
2338 void neigh_app_ns(struct neighbour *n)
2340 struct nlmsghdr *nlh;
2341 int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2342 struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2347 if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH, 0) < 0) {
2351 nlh = (struct nlmsghdr *)skb->data;
2352 nlh->nlmsg_flags = NLM_F_REQUEST;
2353 NETLINK_CB(skb).dst_group = RTNLGRP_NEIGH;
2354 netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2357 static void neigh_app_notify(struct neighbour *n)
2359 struct nlmsghdr *nlh;
2360 int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2361 struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2366 if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH, 0) < 0) {
2370 nlh = (struct nlmsghdr *)skb->data;
2371 NETLINK_CB(skb).dst_group = RTNLGRP_NEIGH;
2372 netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2375 #endif /* CONFIG_ARPD */
2377 #ifdef CONFIG_SYSCTL
2379 static struct neigh_sysctl_table {
2380 struct ctl_table_header *sysctl_header;
2381 ctl_table neigh_vars[__NET_NEIGH_MAX];
2382 ctl_table neigh_dev[2];
2383 ctl_table neigh_neigh_dir[2];
2384 ctl_table neigh_proto_dir[2];
2385 ctl_table neigh_root_dir[2];
2386 } neigh_sysctl_template = {
2389 .ctl_name = NET_NEIGH_MCAST_SOLICIT,
2390 .procname = "mcast_solicit",
2391 .maxlen = sizeof(int),
2393 .proc_handler = &proc_dointvec,
2396 .ctl_name = NET_NEIGH_UCAST_SOLICIT,
2397 .procname = "ucast_solicit",
2398 .maxlen = sizeof(int),
2400 .proc_handler = &proc_dointvec,
2403 .ctl_name = NET_NEIGH_APP_SOLICIT,
2404 .procname = "app_solicit",
2405 .maxlen = sizeof(int),
2407 .proc_handler = &proc_dointvec,
2410 .ctl_name = NET_NEIGH_RETRANS_TIME,
2411 .procname = "retrans_time",
2412 .maxlen = sizeof(int),
2414 .proc_handler = &proc_dointvec_userhz_jiffies,
2417 .ctl_name = NET_NEIGH_REACHABLE_TIME,
2418 .procname = "base_reachable_time",
2419 .maxlen = sizeof(int),
2421 .proc_handler = &proc_dointvec_jiffies,
2422 .strategy = &sysctl_jiffies,
2425 .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
2426 .procname = "delay_first_probe_time",
2427 .maxlen = sizeof(int),
2429 .proc_handler = &proc_dointvec_jiffies,
2430 .strategy = &sysctl_jiffies,
2433 .ctl_name = NET_NEIGH_GC_STALE_TIME,
2434 .procname = "gc_stale_time",
2435 .maxlen = sizeof(int),
2437 .proc_handler = &proc_dointvec_jiffies,
2438 .strategy = &sysctl_jiffies,
2441 .ctl_name = NET_NEIGH_UNRES_QLEN,
2442 .procname = "unres_qlen",
2443 .maxlen = sizeof(int),
2445 .proc_handler = &proc_dointvec,
2448 .ctl_name = NET_NEIGH_PROXY_QLEN,
2449 .procname = "proxy_qlen",
2450 .maxlen = sizeof(int),
2452 .proc_handler = &proc_dointvec,
2455 .ctl_name = NET_NEIGH_ANYCAST_DELAY,
2456 .procname = "anycast_delay",
2457 .maxlen = sizeof(int),
2459 .proc_handler = &proc_dointvec_userhz_jiffies,
2462 .ctl_name = NET_NEIGH_PROXY_DELAY,
2463 .procname = "proxy_delay",
2464 .maxlen = sizeof(int),
2466 .proc_handler = &proc_dointvec_userhz_jiffies,
2469 .ctl_name = NET_NEIGH_LOCKTIME,
2470 .procname = "locktime",
2471 .maxlen = sizeof(int),
2473 .proc_handler = &proc_dointvec_userhz_jiffies,
2476 .ctl_name = NET_NEIGH_GC_INTERVAL,
2477 .procname = "gc_interval",
2478 .maxlen = sizeof(int),
2480 .proc_handler = &proc_dointvec_jiffies,
2481 .strategy = &sysctl_jiffies,
2484 .ctl_name = NET_NEIGH_GC_THRESH1,
2485 .procname = "gc_thresh1",
2486 .maxlen = sizeof(int),
2488 .proc_handler = &proc_dointvec,
2491 .ctl_name = NET_NEIGH_GC_THRESH2,
2492 .procname = "gc_thresh2",
2493 .maxlen = sizeof(int),
2495 .proc_handler = &proc_dointvec,
2498 .ctl_name = NET_NEIGH_GC_THRESH3,
2499 .procname = "gc_thresh3",
2500 .maxlen = sizeof(int),
2502 .proc_handler = &proc_dointvec,
2505 .ctl_name = NET_NEIGH_RETRANS_TIME_MS,
2506 .procname = "retrans_time_ms",
2507 .maxlen = sizeof(int),
2509 .proc_handler = &proc_dointvec_ms_jiffies,
2510 .strategy = &sysctl_ms_jiffies,
2513 .ctl_name = NET_NEIGH_REACHABLE_TIME_MS,
2514 .procname = "base_reachable_time_ms",
2515 .maxlen = sizeof(int),
2517 .proc_handler = &proc_dointvec_ms_jiffies,
2518 .strategy = &sysctl_ms_jiffies,
2523 .ctl_name = NET_PROTO_CONF_DEFAULT,
2524 .procname = "default",
2528 .neigh_neigh_dir = {
2530 .procname = "neigh",
2534 .neigh_proto_dir = {
2541 .ctl_name = CTL_NET,
2548 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2549 int p_id, int pdev_id, char *p_name,
2550 proc_handler *handler, ctl_handler *strategy)
2552 struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
2553 const char *dev_name_source = NULL;
2554 char *dev_name = NULL;
2559 memcpy(t, &neigh_sysctl_template, sizeof(*t));
2560 t->neigh_vars[0].data = &p->mcast_probes;
2561 t->neigh_vars[1].data = &p->ucast_probes;
2562 t->neigh_vars[2].data = &p->app_probes;
2563 t->neigh_vars[3].data = &p->retrans_time;
2564 t->neigh_vars[4].data = &p->base_reachable_time;
2565 t->neigh_vars[5].data = &p->delay_probe_time;
2566 t->neigh_vars[6].data = &p->gc_staletime;
2567 t->neigh_vars[7].data = &p->queue_len;
2568 t->neigh_vars[8].data = &p->proxy_qlen;
2569 t->neigh_vars[9].data = &p->anycast_delay;
2570 t->neigh_vars[10].data = &p->proxy_delay;
2571 t->neigh_vars[11].data = &p->locktime;
2574 dev_name_source = dev->name;
2575 t->neigh_dev[0].ctl_name = dev->ifindex;
2576 t->neigh_vars[12].procname = NULL;
2577 t->neigh_vars[13].procname = NULL;
2578 t->neigh_vars[14].procname = NULL;
2579 t->neigh_vars[15].procname = NULL;
2581 dev_name_source = t->neigh_dev[0].procname;
2582 t->neigh_vars[12].data = (int *)(p + 1);
2583 t->neigh_vars[13].data = (int *)(p + 1) + 1;
2584 t->neigh_vars[14].data = (int *)(p + 1) + 2;
2585 t->neigh_vars[15].data = (int *)(p + 1) + 3;
2588 t->neigh_vars[16].data = &p->retrans_time;
2589 t->neigh_vars[17].data = &p->base_reachable_time;
2591 if (handler || strategy) {
2593 t->neigh_vars[3].proc_handler = handler;
2594 t->neigh_vars[3].strategy = strategy;
2595 t->neigh_vars[3].extra1 = dev;
2597 t->neigh_vars[4].proc_handler = handler;
2598 t->neigh_vars[4].strategy = strategy;
2599 t->neigh_vars[4].extra1 = dev;
2600 /* RetransTime (in milliseconds)*/
2601 t->neigh_vars[16].proc_handler = handler;
2602 t->neigh_vars[16].strategy = strategy;
2603 t->neigh_vars[16].extra1 = dev;
2604 /* ReachableTime (in milliseconds) */
2605 t->neigh_vars[17].proc_handler = handler;
2606 t->neigh_vars[17].strategy = strategy;
2607 t->neigh_vars[17].extra1 = dev;
2610 dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2616 t->neigh_dev[0].procname = dev_name;
2618 t->neigh_neigh_dir[0].ctl_name = pdev_id;
2620 t->neigh_proto_dir[0].procname = p_name;
2621 t->neigh_proto_dir[0].ctl_name = p_id;
2623 t->neigh_dev[0].child = t->neigh_vars;
2624 t->neigh_neigh_dir[0].child = t->neigh_dev;
2625 t->neigh_proto_dir[0].child = t->neigh_neigh_dir;
2626 t->neigh_root_dir[0].child = t->neigh_proto_dir;
2628 t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
2629 if (!t->sysctl_header) {
2633 p->sysctl_table = t;
2645 void neigh_sysctl_unregister(struct neigh_parms *p)
2647 if (p->sysctl_table) {
2648 struct neigh_sysctl_table *t = p->sysctl_table;
2649 p->sysctl_table = NULL;
2650 unregister_sysctl_table(t->sysctl_header);
2651 kfree(t->neigh_dev[0].procname);
2656 #endif /* CONFIG_SYSCTL */
2658 EXPORT_SYMBOL(__neigh_event_send);
2659 EXPORT_SYMBOL(neigh_add);
2660 EXPORT_SYMBOL(neigh_changeaddr);
2661 EXPORT_SYMBOL(neigh_compat_output);
2662 EXPORT_SYMBOL(neigh_connected_output);
2663 EXPORT_SYMBOL(neigh_create);
2664 EXPORT_SYMBOL(neigh_delete);
2665 EXPORT_SYMBOL(neigh_destroy);
2666 EXPORT_SYMBOL(neigh_dump_info);
2667 EXPORT_SYMBOL(neigh_event_ns);
2668 EXPORT_SYMBOL(neigh_ifdown);
2669 EXPORT_SYMBOL(neigh_lookup);
2670 EXPORT_SYMBOL(neigh_lookup_nodev);
2671 EXPORT_SYMBOL(neigh_parms_alloc);
2672 EXPORT_SYMBOL(neigh_parms_release);
2673 EXPORT_SYMBOL(neigh_rand_reach_time);
2674 EXPORT_SYMBOL(neigh_resolve_output);
2675 EXPORT_SYMBOL(neigh_table_clear);
2676 EXPORT_SYMBOL(neigh_table_init);
2677 EXPORT_SYMBOL(neigh_update);
2678 EXPORT_SYMBOL(neigh_update_hhs);
2679 EXPORT_SYMBOL(pneigh_enqueue);
2680 EXPORT_SYMBOL(pneigh_lookup);
2681 EXPORT_SYMBOL(neightbl_dump_info);
2682 EXPORT_SYMBOL(neightbl_set);
2685 EXPORT_SYMBOL(neigh_app_ns);
2687 #ifdef CONFIG_SYSCTL
2688 EXPORT_SYMBOL(neigh_sysctl_register);
2689 EXPORT_SYMBOL(neigh_sysctl_unregister);