2 * Generic address resolution entity
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
18 #include <linux/config.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/socket.h>
23 #include <linux/sched.h>
24 #include <linux/netdevice.h>
25 #include <linux/proc_fs.h>
27 #include <linux/sysctl.h>
29 #include <linux/times.h>
30 #include <net/neighbour.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/random.h>
35 #include <linux/string.h>
39 #define NEIGH_PRINTK(x...) printk(x)
40 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
41 #define NEIGH_PRINTK0 NEIGH_PRINTK
42 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
43 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
47 #define NEIGH_PRINTK1 NEIGH_PRINTK
51 #define NEIGH_PRINTK2 NEIGH_PRINTK
54 #define PNEIGH_HASHMASK 0xF
56 static void neigh_timer_handler(unsigned long arg);
58 static void neigh_app_notify(struct neighbour *n);
60 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
61 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
63 static struct neigh_table *neigh_tables;
65 static struct file_operations neigh_stat_seq_fops;
69 Neighbour hash table buckets are protected with rwlock tbl->lock.
71 - All the scans/updates to hash buckets MUST be made under this lock.
72 - NOTHING clever should be made under this lock: no callbacks
73 to protocol backends, no attempts to send something to network.
74 It will result in deadlocks, if backend/driver wants to use neighbour
76 - If the entry requires some non-trivial actions, increase
77 its reference count and release table lock.
79 Neighbour entries are protected:
80 - with reference count.
81 - with rwlock neigh->lock
83 Reference count prevents destruction.
85 neigh->lock mainly serializes ll address data and its validity state.
86 However, the same lock is used to protect another entry fields:
90 Again, nothing clever shall be made under neigh->lock,
91 the most complicated procedure, which we allow is dev->hard_header.
92 It is supposed, that dev->hard_header is simplistic and does
93 not make callbacks to neighbour tables.
95 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96 list of neighbour tables. This list is used only in process context,
99 static DEFINE_RWLOCK(neigh_tbl_lock);
101 static int neigh_blackhole(struct sk_buff *skb)
108 * It is random distribution in the interval (1/2)*base...(3/2)*base.
109 * It corresponds to default IPv6 settings and is not overridable,
110 * because it is really reasonable choice.
113 unsigned long neigh_rand_reach_time(unsigned long base)
115 return (base ? (net_random() % base) + (base >> 1) : 0);
119 static int neigh_forced_gc(struct neigh_table *tbl)
124 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
126 write_lock_bh(&tbl->lock);
127 for (i = 0; i <= tbl->hash_mask; i++) {
128 struct neighbour *n, **np;
130 np = &tbl->hash_buckets[i];
131 while ((n = *np) != NULL) {
132 /* Neighbour record may be discarded if:
133 * - nobody refers to it.
134 * - it is not permanent
136 write_lock(&n->lock);
137 if (atomic_read(&n->refcnt) == 1 &&
138 !(n->nud_state & NUD_PERMANENT)) {
142 write_unlock(&n->lock);
146 write_unlock(&n->lock);
151 tbl->last_flush = jiffies;
153 write_unlock_bh(&tbl->lock);
158 static int neigh_del_timer(struct neighbour *n)
160 if ((n->nud_state & NUD_IN_TIMER) &&
161 del_timer(&n->timer)) {
168 static void pneigh_queue_purge(struct sk_buff_head *list)
172 while ((skb = skb_dequeue(list)) != NULL) {
178 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
182 for (i = 0; i <= tbl->hash_mask; i++) {
183 struct neighbour *n, **np = &tbl->hash_buckets[i];
185 while ((n = *np) != NULL) {
186 if (dev && n->dev != dev) {
191 write_lock(&n->lock);
195 if (atomic_read(&n->refcnt) != 1) {
196 /* The most unpleasant situation.
197 We must destroy neighbour entry,
198 but someone still uses it.
200 The destroy will be delayed until
201 the last user releases us, but
202 we must kill timers etc. and move
205 skb_queue_purge(&n->arp_queue);
206 n->output = neigh_blackhole;
207 if (n->nud_state & NUD_VALID)
208 n->nud_state = NUD_NOARP;
210 n->nud_state = NUD_NONE;
211 NEIGH_PRINTK2("neigh %p is stray.\n", n);
213 write_unlock(&n->lock);
219 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
221 write_lock_bh(&tbl->lock);
222 neigh_flush_dev(tbl, dev);
223 write_unlock_bh(&tbl->lock);
226 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
228 write_lock_bh(&tbl->lock);
229 neigh_flush_dev(tbl, dev);
230 pneigh_ifdown(tbl, dev);
231 write_unlock_bh(&tbl->lock);
233 del_timer_sync(&tbl->proxy_timer);
234 pneigh_queue_purge(&tbl->proxy_queue);
238 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
240 struct neighbour *n = NULL;
241 unsigned long now = jiffies;
244 entries = atomic_inc_return(&tbl->entries) - 1;
245 if (entries >= tbl->gc_thresh3 ||
246 (entries >= tbl->gc_thresh2 &&
247 time_after(now, tbl->last_flush + 5 * HZ))) {
248 if (!neigh_forced_gc(tbl) &&
249 entries >= tbl->gc_thresh3)
253 n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
257 memset(n, 0, tbl->entry_size);
259 skb_queue_head_init(&n->arp_queue);
260 rwlock_init(&n->lock);
261 n->updated = n->used = now;
262 n->nud_state = NUD_NONE;
263 n->output = neigh_blackhole;
264 n->parms = neigh_parms_clone(&tbl->parms);
265 init_timer(&n->timer);
266 n->timer.function = neigh_timer_handler;
267 n->timer.data = (unsigned long)n;
269 NEIGH_CACHE_STAT_INC(tbl, allocs);
271 atomic_set(&n->refcnt, 1);
277 atomic_dec(&tbl->entries);
281 static struct neighbour **neigh_hash_alloc(unsigned int entries)
283 unsigned long size = entries * sizeof(struct neighbour *);
284 struct neighbour **ret;
286 if (size <= PAGE_SIZE) {
287 ret = kmalloc(size, GFP_ATOMIC);
289 ret = (struct neighbour **)
290 __get_free_pages(GFP_ATOMIC, get_order(size));
293 memset(ret, 0, size);
298 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
300 unsigned long size = entries * sizeof(struct neighbour *);
302 if (size <= PAGE_SIZE)
305 free_pages((unsigned long)hash, get_order(size));
308 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
310 struct neighbour **new_hash, **old_hash;
311 unsigned int i, new_hash_mask, old_entries;
313 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
315 BUG_ON(new_entries & (new_entries - 1));
316 new_hash = neigh_hash_alloc(new_entries);
320 old_entries = tbl->hash_mask + 1;
321 new_hash_mask = new_entries - 1;
322 old_hash = tbl->hash_buckets;
324 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
325 for (i = 0; i < old_entries; i++) {
326 struct neighbour *n, *next;
328 for (n = old_hash[i]; n; n = next) {
329 unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
331 hash_val &= new_hash_mask;
334 n->next = new_hash[hash_val];
335 new_hash[hash_val] = n;
338 tbl->hash_buckets = new_hash;
339 tbl->hash_mask = new_hash_mask;
341 neigh_hash_free(old_hash, old_entries);
344 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
345 struct net_device *dev)
348 int key_len = tbl->key_len;
349 u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
351 NEIGH_CACHE_STAT_INC(tbl, lookups);
353 read_lock_bh(&tbl->lock);
354 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
355 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
357 NEIGH_CACHE_STAT_INC(tbl, hits);
361 read_unlock_bh(&tbl->lock);
365 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
368 int key_len = tbl->key_len;
369 u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
371 NEIGH_CACHE_STAT_INC(tbl, lookups);
373 read_lock_bh(&tbl->lock);
374 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
375 if (!memcmp(n->primary_key, pkey, key_len)) {
377 NEIGH_CACHE_STAT_INC(tbl, hits);
381 read_unlock_bh(&tbl->lock);
385 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
386 struct net_device *dev)
389 int key_len = tbl->key_len;
391 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
394 rc = ERR_PTR(-ENOBUFS);
398 memcpy(n->primary_key, pkey, key_len);
402 /* Protocol specific setup. */
403 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
405 goto out_neigh_release;
408 /* Device specific setup. */
409 if (n->parms->neigh_setup &&
410 (error = n->parms->neigh_setup(n)) < 0) {
412 goto out_neigh_release;
415 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
417 write_lock_bh(&tbl->lock);
419 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
420 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
422 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
424 if (n->parms->dead) {
425 rc = ERR_PTR(-EINVAL);
429 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
430 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
437 n->next = tbl->hash_buckets[hash_val];
438 tbl->hash_buckets[hash_val] = n;
441 write_unlock_bh(&tbl->lock);
442 NEIGH_PRINTK2("neigh %p is created.\n", n);
447 write_unlock_bh(&tbl->lock);
453 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
454 struct net_device *dev, int creat)
456 struct pneigh_entry *n;
457 int key_len = tbl->key_len;
458 u32 hash_val = *(u32 *)(pkey + key_len - 4);
460 hash_val ^= (hash_val >> 16);
461 hash_val ^= hash_val >> 8;
462 hash_val ^= hash_val >> 4;
463 hash_val &= PNEIGH_HASHMASK;
465 read_lock_bh(&tbl->lock);
467 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
468 if (!memcmp(n->key, pkey, key_len) &&
469 (n->dev == dev || !n->dev)) {
470 read_unlock_bh(&tbl->lock);
474 read_unlock_bh(&tbl->lock);
479 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
483 memcpy(n->key, pkey, key_len);
488 if (tbl->pconstructor && tbl->pconstructor(n)) {
496 write_lock_bh(&tbl->lock);
497 n->next = tbl->phash_buckets[hash_val];
498 tbl->phash_buckets[hash_val] = n;
499 write_unlock_bh(&tbl->lock);
505 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
506 struct net_device *dev)
508 struct pneigh_entry *n, **np;
509 int key_len = tbl->key_len;
510 u32 hash_val = *(u32 *)(pkey + key_len - 4);
512 hash_val ^= (hash_val >> 16);
513 hash_val ^= hash_val >> 8;
514 hash_val ^= hash_val >> 4;
515 hash_val &= PNEIGH_HASHMASK;
517 write_lock_bh(&tbl->lock);
518 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
520 if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
522 write_unlock_bh(&tbl->lock);
523 if (tbl->pdestructor)
531 write_unlock_bh(&tbl->lock);
535 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
537 struct pneigh_entry *n, **np;
540 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
541 np = &tbl->phash_buckets[h];
542 while ((n = *np) != NULL) {
543 if (!dev || n->dev == dev) {
545 if (tbl->pdestructor)
560 * neighbour must already be out of the table;
563 void neigh_destroy(struct neighbour *neigh)
567 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
571 "Destroying alive neighbour %p\n", neigh);
576 if (neigh_del_timer(neigh))
577 printk(KERN_WARNING "Impossible event.\n");
579 while ((hh = neigh->hh) != NULL) {
580 neigh->hh = hh->hh_next;
582 write_lock_bh(&hh->hh_lock);
583 hh->hh_output = neigh_blackhole;
584 write_unlock_bh(&hh->hh_lock);
585 if (atomic_dec_and_test(&hh->hh_refcnt))
589 if (neigh->ops && neigh->ops->destructor)
590 (neigh->ops->destructor)(neigh);
592 skb_queue_purge(&neigh->arp_queue);
595 neigh_parms_put(neigh->parms);
597 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
599 atomic_dec(&neigh->tbl->entries);
600 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
603 /* Neighbour state is suspicious;
606 Called with write_locked neigh.
608 static void neigh_suspect(struct neighbour *neigh)
612 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
614 neigh->output = neigh->ops->output;
616 for (hh = neigh->hh; hh; hh = hh->hh_next)
617 hh->hh_output = neigh->ops->output;
620 /* Neighbour state is OK;
623 Called with write_locked neigh.
625 static void neigh_connect(struct neighbour *neigh)
629 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
631 neigh->output = neigh->ops->connected_output;
633 for (hh = neigh->hh; hh; hh = hh->hh_next)
634 hh->hh_output = neigh->ops->hh_output;
637 static void neigh_periodic_timer(unsigned long arg)
639 struct neigh_table *tbl = (struct neigh_table *)arg;
640 struct neighbour *n, **np;
641 unsigned long expire, now = jiffies;
643 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
645 write_lock(&tbl->lock);
648 * periodically recompute ReachableTime from random function
651 if (time_after(now, tbl->last_rand + 300 * HZ)) {
652 struct neigh_parms *p;
653 tbl->last_rand = now;
654 for (p = &tbl->parms; p; p = p->next)
656 neigh_rand_reach_time(p->base_reachable_time);
659 np = &tbl->hash_buckets[tbl->hash_chain_gc];
660 tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
662 while ((n = *np) != NULL) {
665 write_lock(&n->lock);
667 state = n->nud_state;
668 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
669 write_unlock(&n->lock);
673 if (time_before(n->used, n->confirmed))
674 n->used = n->confirmed;
676 if (atomic_read(&n->refcnt) == 1 &&
677 (state == NUD_FAILED ||
678 time_after(now, n->used + n->parms->gc_staletime))) {
681 write_unlock(&n->lock);
685 write_unlock(&n->lock);
691 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
692 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
693 * base_reachable_time.
695 expire = tbl->parms.base_reachable_time >> 1;
696 expire /= (tbl->hash_mask + 1);
700 mod_timer(&tbl->gc_timer, now + expire);
702 write_unlock(&tbl->lock);
705 static __inline__ int neigh_max_probes(struct neighbour *n)
707 struct neigh_parms *p = n->parms;
708 return (n->nud_state & NUD_PROBE ?
710 p->ucast_probes + p->app_probes + p->mcast_probes);
713 static inline void neigh_add_timer(struct neighbour *n, unsigned long when)
715 if (unlikely(mod_timer(&n->timer, when))) {
716 printk("NEIGH: BUG, double timer add, state is %x\n",
722 /* Called when a timer expires for a neighbour entry. */
724 static void neigh_timer_handler(unsigned long arg)
726 unsigned long now, next;
727 struct neighbour *neigh = (struct neighbour *)arg;
731 write_lock(&neigh->lock);
733 state = neigh->nud_state;
737 if (!(state & NUD_IN_TIMER)) {
739 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
744 if (state & NUD_REACHABLE) {
745 if (time_before_eq(now,
746 neigh->confirmed + neigh->parms->reachable_time)) {
747 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
748 next = neigh->confirmed + neigh->parms->reachable_time;
749 } else if (time_before_eq(now,
750 neigh->used + neigh->parms->delay_probe_time)) {
751 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
752 neigh->nud_state = NUD_DELAY;
753 neigh_suspect(neigh);
754 next = now + neigh->parms->delay_probe_time;
756 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
757 neigh->nud_state = NUD_STALE;
758 neigh_suspect(neigh);
760 } else if (state & NUD_DELAY) {
761 if (time_before_eq(now,
762 neigh->confirmed + neigh->parms->delay_probe_time)) {
763 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
764 neigh->nud_state = NUD_REACHABLE;
765 neigh_connect(neigh);
766 next = neigh->confirmed + neigh->parms->reachable_time;
768 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
769 neigh->nud_state = NUD_PROBE;
770 atomic_set(&neigh->probes, 0);
771 next = now + neigh->parms->retrans_time;
774 /* NUD_PROBE|NUD_INCOMPLETE */
775 next = now + neigh->parms->retrans_time;
778 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
779 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
782 neigh->nud_state = NUD_FAILED;
784 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
785 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
787 /* It is very thin place. report_unreachable is very complicated
788 routine. Particularly, it can hit the same neighbour entry!
790 So that, we try to be accurate and avoid dead loop. --ANK
792 while (neigh->nud_state == NUD_FAILED &&
793 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
794 write_unlock(&neigh->lock);
795 neigh->ops->error_report(neigh, skb);
796 write_lock(&neigh->lock);
798 skb_queue_purge(&neigh->arp_queue);
801 if (neigh->nud_state & NUD_IN_TIMER) {
802 if (time_before(next, jiffies + HZ/2))
803 next = jiffies + HZ/2;
804 if (!mod_timer(&neigh->timer, next))
807 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
808 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
809 /* keep skb alive even if arp_queue overflows */
812 write_unlock(&neigh->lock);
813 neigh->ops->solicit(neigh, skb);
814 atomic_inc(&neigh->probes);
819 write_unlock(&neigh->lock);
823 if (notify && neigh->parms->app_probes)
824 neigh_app_notify(neigh);
826 neigh_release(neigh);
829 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
834 write_lock_bh(&neigh->lock);
837 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
842 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
843 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
844 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
845 neigh->nud_state = NUD_INCOMPLETE;
847 neigh_add_timer(neigh, now + 1);
849 neigh->nud_state = NUD_FAILED;
850 write_unlock_bh(&neigh->lock);
856 } else if (neigh->nud_state & NUD_STALE) {
857 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
859 neigh->nud_state = NUD_DELAY;
860 neigh_add_timer(neigh,
861 jiffies + neigh->parms->delay_probe_time);
864 if (neigh->nud_state == NUD_INCOMPLETE) {
866 if (skb_queue_len(&neigh->arp_queue) >=
867 neigh->parms->queue_len) {
868 struct sk_buff *buff;
869 buff = neigh->arp_queue.next;
870 __skb_unlink(buff, &neigh->arp_queue);
873 __skb_queue_tail(&neigh->arp_queue, skb);
878 write_unlock_bh(&neigh->lock);
882 static __inline__ void neigh_update_hhs(struct neighbour *neigh)
885 void (*update)(struct hh_cache*, struct net_device*, unsigned char *) =
886 neigh->dev->header_cache_update;
889 for (hh = neigh->hh; hh; hh = hh->hh_next) {
890 write_lock_bh(&hh->hh_lock);
891 update(hh, neigh->dev, neigh->ha);
892 write_unlock_bh(&hh->hh_lock);
899 /* Generic update routine.
900 -- lladdr is new lladdr or NULL, if it is not supplied.
903 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
905 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
906 lladdr instead of overriding it
908 It also allows to retain current state
909 if lladdr is unchanged.
910 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
912 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
914 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
917 Caller MUST hold reference count on the entry.
920 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
928 struct net_device *dev;
929 int update_isrouter = 0;
931 write_lock_bh(&neigh->lock);
934 old = neigh->nud_state;
937 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
938 (old & (NUD_NOARP | NUD_PERMANENT)))
941 if (!(new & NUD_VALID)) {
942 neigh_del_timer(neigh);
943 if (old & NUD_CONNECTED)
944 neigh_suspect(neigh);
945 neigh->nud_state = new;
948 notify = old & NUD_VALID;
953 /* Compare new lladdr with cached one */
954 if (!dev->addr_len) {
955 /* First case: device needs no address. */
958 /* The second case: if something is already cached
959 and a new address is proposed:
961 - if they are different, check override flag
963 if ((old & NUD_VALID) &&
964 !memcmp(lladdr, neigh->ha, dev->addr_len))
967 /* No address is supplied; if we know something,
968 use it, otherwise discard the request.
971 if (!(old & NUD_VALID))
976 if (new & NUD_CONNECTED)
977 neigh->confirmed = jiffies;
978 neigh->updated = jiffies;
980 /* If entry was valid and address is not changed,
981 do not change entry state, if new one is STALE.
984 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
985 if (old & NUD_VALID) {
986 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
988 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
989 (old & NUD_CONNECTED)) {
995 if (lladdr == neigh->ha && new == NUD_STALE &&
996 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
997 (old & NUD_CONNECTED))
1004 neigh_del_timer(neigh);
1005 if (new & NUD_IN_TIMER) {
1007 neigh_add_timer(neigh, (jiffies +
1008 ((new & NUD_REACHABLE) ?
1009 neigh->parms->reachable_time :
1012 neigh->nud_state = new;
1015 if (lladdr != neigh->ha) {
1016 memcpy(&neigh->ha, lladdr, dev->addr_len);
1017 neigh_update_hhs(neigh);
1018 if (!(new & NUD_CONNECTED))
1019 neigh->confirmed = jiffies -
1020 (neigh->parms->base_reachable_time << 1);
1027 if (new & NUD_CONNECTED)
1028 neigh_connect(neigh);
1030 neigh_suspect(neigh);
1031 if (!(old & NUD_VALID)) {
1032 struct sk_buff *skb;
1034 /* Again: avoid dead loop if something went wrong */
1036 while (neigh->nud_state & NUD_VALID &&
1037 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1038 struct neighbour *n1 = neigh;
1039 write_unlock_bh(&neigh->lock);
1040 /* On shaper/eql skb->dst->neighbour != neigh :( */
1041 if (skb->dst && skb->dst->neighbour)
1042 n1 = skb->dst->neighbour;
1044 write_lock_bh(&neigh->lock);
1046 skb_queue_purge(&neigh->arp_queue);
1049 if (update_isrouter) {
1050 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1051 (neigh->flags | NTF_ROUTER) :
1052 (neigh->flags & ~NTF_ROUTER);
1054 write_unlock_bh(&neigh->lock);
1056 if (notify && neigh->parms->app_probes)
1057 neigh_app_notify(neigh);
1062 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1063 u8 *lladdr, void *saddr,
1064 struct net_device *dev)
1066 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1067 lladdr || !dev->addr_len);
1069 neigh_update(neigh, lladdr, NUD_STALE,
1070 NEIGH_UPDATE_F_OVERRIDE);
1074 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1077 struct hh_cache *hh;
1078 struct net_device *dev = dst->dev;
1080 for (hh = n->hh; hh; hh = hh->hh_next)
1081 if (hh->hh_type == protocol)
1084 if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1085 memset(hh, 0, sizeof(struct hh_cache));
1086 rwlock_init(&hh->hh_lock);
1087 hh->hh_type = protocol;
1088 atomic_set(&hh->hh_refcnt, 0);
1090 if (dev->hard_header_cache(n, hh)) {
1094 atomic_inc(&hh->hh_refcnt);
1095 hh->hh_next = n->hh;
1097 if (n->nud_state & NUD_CONNECTED)
1098 hh->hh_output = n->ops->hh_output;
1100 hh->hh_output = n->ops->output;
1104 atomic_inc(&hh->hh_refcnt);
1109 /* This function can be used in contexts, where only old dev_queue_xmit
1110 worked, f.e. if you want to override normal output path (eql, shaper),
1111 but resolution is not made yet.
1114 int neigh_compat_output(struct sk_buff *skb)
1116 struct net_device *dev = skb->dev;
1118 __skb_pull(skb, skb->nh.raw - skb->data);
1120 if (dev->hard_header &&
1121 dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1123 dev->rebuild_header(skb))
1126 return dev_queue_xmit(skb);
1129 /* Slow and careful. */
1131 int neigh_resolve_output(struct sk_buff *skb)
1133 struct dst_entry *dst = skb->dst;
1134 struct neighbour *neigh;
1137 if (!dst || !(neigh = dst->neighbour))
1140 __skb_pull(skb, skb->nh.raw - skb->data);
1142 if (!neigh_event_send(neigh, skb)) {
1144 struct net_device *dev = neigh->dev;
1145 if (dev->hard_header_cache && !dst->hh) {
1146 write_lock_bh(&neigh->lock);
1148 neigh_hh_init(neigh, dst, dst->ops->protocol);
1149 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1150 neigh->ha, NULL, skb->len);
1151 write_unlock_bh(&neigh->lock);
1153 read_lock_bh(&neigh->lock);
1154 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1155 neigh->ha, NULL, skb->len);
1156 read_unlock_bh(&neigh->lock);
1159 rc = neigh->ops->queue_xmit(skb);
1166 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1167 dst, dst ? dst->neighbour : NULL);
1174 /* As fast as possible without hh cache */
1176 int neigh_connected_output(struct sk_buff *skb)
1179 struct dst_entry *dst = skb->dst;
1180 struct neighbour *neigh = dst->neighbour;
1181 struct net_device *dev = neigh->dev;
1183 __skb_pull(skb, skb->nh.raw - skb->data);
1185 read_lock_bh(&neigh->lock);
1186 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1187 neigh->ha, NULL, skb->len);
1188 read_unlock_bh(&neigh->lock);
1190 err = neigh->ops->queue_xmit(skb);
1198 static void neigh_proxy_process(unsigned long arg)
1200 struct neigh_table *tbl = (struct neigh_table *)arg;
1201 long sched_next = 0;
1202 unsigned long now = jiffies;
1203 struct sk_buff *skb;
1205 spin_lock(&tbl->proxy_queue.lock);
1207 skb = tbl->proxy_queue.next;
1209 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1210 struct sk_buff *back = skb;
1211 long tdif = NEIGH_CB(back)->sched_next - now;
1215 struct net_device *dev = back->dev;
1216 __skb_unlink(back, &tbl->proxy_queue);
1217 if (tbl->proxy_redo && netif_running(dev))
1218 tbl->proxy_redo(back);
1223 } else if (!sched_next || tdif < sched_next)
1226 del_timer(&tbl->proxy_timer);
1228 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1229 spin_unlock(&tbl->proxy_queue.lock);
1232 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1233 struct sk_buff *skb)
1235 unsigned long now = jiffies;
1236 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1238 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1243 NEIGH_CB(skb)->sched_next = sched_next;
1244 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1246 spin_lock(&tbl->proxy_queue.lock);
1247 if (del_timer(&tbl->proxy_timer)) {
1248 if (time_before(tbl->proxy_timer.expires, sched_next))
1249 sched_next = tbl->proxy_timer.expires;
1251 dst_release(skb->dst);
1254 __skb_queue_tail(&tbl->proxy_queue, skb);
1255 mod_timer(&tbl->proxy_timer, sched_next);
1256 spin_unlock(&tbl->proxy_queue.lock);
1260 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1261 struct neigh_table *tbl)
1263 struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL);
1266 memcpy(p, &tbl->parms, sizeof(*p));
1268 atomic_set(&p->refcnt, 1);
1269 INIT_RCU_HEAD(&p->rcu_head);
1271 neigh_rand_reach_time(p->base_reachable_time);
1273 if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1281 p->sysctl_table = NULL;
1282 write_lock_bh(&tbl->lock);
1283 p->next = tbl->parms.next;
1284 tbl->parms.next = p;
1285 write_unlock_bh(&tbl->lock);
1290 static void neigh_rcu_free_parms(struct rcu_head *head)
1292 struct neigh_parms *parms =
1293 container_of(head, struct neigh_parms, rcu_head);
1295 neigh_parms_put(parms);
1298 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1300 struct neigh_parms **p;
1302 if (!parms || parms == &tbl->parms)
1304 write_lock_bh(&tbl->lock);
1305 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1309 write_unlock_bh(&tbl->lock);
1311 dev_put(parms->dev);
1312 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1316 write_unlock_bh(&tbl->lock);
1317 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1320 void neigh_parms_destroy(struct neigh_parms *parms)
1326 void neigh_table_init(struct neigh_table *tbl)
1328 unsigned long now = jiffies;
1329 unsigned long phsize;
1331 atomic_set(&tbl->parms.refcnt, 1);
1332 INIT_RCU_HEAD(&tbl->parms.rcu_head);
1333 tbl->parms.reachable_time =
1334 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1336 if (!tbl->kmem_cachep)
1337 tbl->kmem_cachep = kmem_cache_create(tbl->id,
1339 0, SLAB_HWCACHE_ALIGN,
1342 if (!tbl->kmem_cachep)
1343 panic("cannot create neighbour cache");
1345 tbl->stats = alloc_percpu(struct neigh_statistics);
1347 panic("cannot create neighbour cache statistics");
1349 #ifdef CONFIG_PROC_FS
1350 tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1352 panic("cannot create neighbour proc dir entry");
1353 tbl->pde->proc_fops = &neigh_stat_seq_fops;
1354 tbl->pde->data = tbl;
1358 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1360 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1361 tbl->phash_buckets = kmalloc(phsize, GFP_KERNEL);
1363 if (!tbl->hash_buckets || !tbl->phash_buckets)
1364 panic("cannot allocate neighbour cache hashes");
1366 memset(tbl->phash_buckets, 0, phsize);
1368 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1370 rwlock_init(&tbl->lock);
1371 init_timer(&tbl->gc_timer);
1372 tbl->gc_timer.data = (unsigned long)tbl;
1373 tbl->gc_timer.function = neigh_periodic_timer;
1374 tbl->gc_timer.expires = now + 1;
1375 add_timer(&tbl->gc_timer);
1377 init_timer(&tbl->proxy_timer);
1378 tbl->proxy_timer.data = (unsigned long)tbl;
1379 tbl->proxy_timer.function = neigh_proxy_process;
1380 skb_queue_head_init(&tbl->proxy_queue);
1382 tbl->last_flush = now;
1383 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1384 write_lock(&neigh_tbl_lock);
1385 tbl->next = neigh_tables;
1387 write_unlock(&neigh_tbl_lock);
1390 int neigh_table_clear(struct neigh_table *tbl)
1392 struct neigh_table **tp;
1394 /* It is not clean... Fix it to unload IPv6 module safely */
1395 del_timer_sync(&tbl->gc_timer);
1396 del_timer_sync(&tbl->proxy_timer);
1397 pneigh_queue_purge(&tbl->proxy_queue);
1398 neigh_ifdown(tbl, NULL);
1399 if (atomic_read(&tbl->entries))
1400 printk(KERN_CRIT "neighbour leakage\n");
1401 write_lock(&neigh_tbl_lock);
1402 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1408 write_unlock(&neigh_tbl_lock);
1410 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1411 tbl->hash_buckets = NULL;
1413 kfree(tbl->phash_buckets);
1414 tbl->phash_buckets = NULL;
1419 int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1421 struct ndmsg *ndm = NLMSG_DATA(nlh);
1422 struct rtattr **nda = arg;
1423 struct neigh_table *tbl;
1424 struct net_device *dev = NULL;
1427 if (ndm->ndm_ifindex &&
1428 (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1431 read_lock(&neigh_tbl_lock);
1432 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1433 struct rtattr *dst_attr = nda[NDA_DST - 1];
1434 struct neighbour *n;
1436 if (tbl->family != ndm->ndm_family)
1438 read_unlock(&neigh_tbl_lock);
1441 if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1444 if (ndm->ndm_flags & NTF_PROXY) {
1445 err = pneigh_delete(tbl, RTA_DATA(dst_attr), dev);
1452 n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1454 err = neigh_update(n, NULL, NUD_FAILED,
1455 NEIGH_UPDATE_F_OVERRIDE|
1456 NEIGH_UPDATE_F_ADMIN);
1461 read_unlock(&neigh_tbl_lock);
1462 err = -EADDRNOTAVAIL;
1470 int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1472 struct ndmsg *ndm = NLMSG_DATA(nlh);
1473 struct rtattr **nda = arg;
1474 struct neigh_table *tbl;
1475 struct net_device *dev = NULL;
1478 if (ndm->ndm_ifindex &&
1479 (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1482 read_lock(&neigh_tbl_lock);
1483 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1484 struct rtattr *lladdr_attr = nda[NDA_LLADDR - 1];
1485 struct rtattr *dst_attr = nda[NDA_DST - 1];
1487 struct neighbour *n;
1489 if (tbl->family != ndm->ndm_family)
1491 read_unlock(&neigh_tbl_lock);
1494 if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1497 if (ndm->ndm_flags & NTF_PROXY) {
1499 if (pneigh_lookup(tbl, RTA_DATA(dst_attr), dev, 1))
1507 if (lladdr_attr && RTA_PAYLOAD(lladdr_attr) < dev->addr_len)
1510 n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1512 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1518 override = nlh->nlmsg_flags & NLM_F_REPLACE;
1519 } else if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1523 n = __neigh_lookup_errno(tbl, RTA_DATA(dst_attr), dev);
1530 err = neigh_update(n,
1531 lladdr_attr ? RTA_DATA(lladdr_attr) : NULL,
1533 (override ? NEIGH_UPDATE_F_OVERRIDE : 0) |
1534 NEIGH_UPDATE_F_ADMIN);
1540 read_unlock(&neigh_tbl_lock);
1541 err = -EADDRNOTAVAIL;
1549 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1551 struct rtattr *nest = NULL;
1553 nest = RTA_NEST(skb, NDTA_PARMS);
1556 RTA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1558 RTA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1559 RTA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1560 RTA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1561 RTA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1562 RTA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1563 RTA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1564 RTA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1565 RTA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1566 parms->base_reachable_time);
1567 RTA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1568 RTA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1569 RTA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1570 RTA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1571 RTA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1572 RTA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1574 return RTA_NEST_END(skb, nest);
1577 return RTA_NEST_CANCEL(skb, nest);
1580 static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
1581 struct netlink_callback *cb)
1583 struct nlmsghdr *nlh;
1584 struct ndtmsg *ndtmsg;
1586 nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1589 ndtmsg = NLMSG_DATA(nlh);
1591 read_lock_bh(&tbl->lock);
1592 ndtmsg->ndtm_family = tbl->family;
1593 ndtmsg->ndtm_pad1 = 0;
1594 ndtmsg->ndtm_pad2 = 0;
1596 RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1597 RTA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1598 RTA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1599 RTA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1600 RTA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1603 unsigned long now = jiffies;
1604 unsigned int flush_delta = now - tbl->last_flush;
1605 unsigned int rand_delta = now - tbl->last_rand;
1607 struct ndt_config ndc = {
1608 .ndtc_key_len = tbl->key_len,
1609 .ndtc_entry_size = tbl->entry_size,
1610 .ndtc_entries = atomic_read(&tbl->entries),
1611 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1612 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1613 .ndtc_hash_rnd = tbl->hash_rnd,
1614 .ndtc_hash_mask = tbl->hash_mask,
1615 .ndtc_hash_chain_gc = tbl->hash_chain_gc,
1616 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1619 RTA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1624 struct ndt_stats ndst;
1626 memset(&ndst, 0, sizeof(ndst));
1628 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1629 struct neigh_statistics *st;
1631 if (!cpu_possible(cpu))
1634 st = per_cpu_ptr(tbl->stats, cpu);
1635 ndst.ndts_allocs += st->allocs;
1636 ndst.ndts_destroys += st->destroys;
1637 ndst.ndts_hash_grows += st->hash_grows;
1638 ndst.ndts_res_failed += st->res_failed;
1639 ndst.ndts_lookups += st->lookups;
1640 ndst.ndts_hits += st->hits;
1641 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1642 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1643 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1644 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1647 RTA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1650 BUG_ON(tbl->parms.dev);
1651 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1652 goto rtattr_failure;
1654 read_unlock_bh(&tbl->lock);
1655 return NLMSG_END(skb, nlh);
1658 read_unlock_bh(&tbl->lock);
1659 return NLMSG_CANCEL(skb, nlh);
1665 static int neightbl_fill_param_info(struct neigh_table *tbl,
1666 struct neigh_parms *parms,
1667 struct sk_buff *skb,
1668 struct netlink_callback *cb)
1670 struct ndtmsg *ndtmsg;
1671 struct nlmsghdr *nlh;
1673 nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1676 ndtmsg = NLMSG_DATA(nlh);
1678 read_lock_bh(&tbl->lock);
1679 ndtmsg->ndtm_family = tbl->family;
1680 ndtmsg->ndtm_pad1 = 0;
1681 ndtmsg->ndtm_pad2 = 0;
1682 RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1684 if (neightbl_fill_parms(skb, parms) < 0)
1685 goto rtattr_failure;
1687 read_unlock_bh(&tbl->lock);
1688 return NLMSG_END(skb, nlh);
1691 read_unlock_bh(&tbl->lock);
1692 return NLMSG_CANCEL(skb, nlh);
1698 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1701 struct neigh_parms *p;
1703 for (p = &tbl->parms; p; p = p->next)
1704 if ((p->dev && p->dev->ifindex == ifindex) ||
1705 (!p->dev && !ifindex))
1711 int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1713 struct neigh_table *tbl;
1714 struct ndtmsg *ndtmsg = NLMSG_DATA(nlh);
1715 struct rtattr **tb = arg;
1718 if (!tb[NDTA_NAME - 1] || !RTA_PAYLOAD(tb[NDTA_NAME - 1]))
1721 read_lock(&neigh_tbl_lock);
1722 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1723 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1726 if (!rtattr_strcmp(tb[NDTA_NAME - 1], tbl->id))
1736 * We acquire tbl->lock to be nice to the periodic timers and
1737 * make sure they always see a consistent set of values.
1739 write_lock_bh(&tbl->lock);
1741 if (tb[NDTA_THRESH1 - 1])
1742 tbl->gc_thresh1 = RTA_GET_U32(tb[NDTA_THRESH1 - 1]);
1744 if (tb[NDTA_THRESH2 - 1])
1745 tbl->gc_thresh2 = RTA_GET_U32(tb[NDTA_THRESH2 - 1]);
1747 if (tb[NDTA_THRESH3 - 1])
1748 tbl->gc_thresh3 = RTA_GET_U32(tb[NDTA_THRESH3 - 1]);
1750 if (tb[NDTA_GC_INTERVAL - 1])
1751 tbl->gc_interval = RTA_GET_MSECS(tb[NDTA_GC_INTERVAL - 1]);
1753 if (tb[NDTA_PARMS - 1]) {
1754 struct rtattr *tbp[NDTPA_MAX];
1755 struct neigh_parms *p;
1758 if (rtattr_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS - 1]) < 0)
1759 goto rtattr_failure;
1761 if (tbp[NDTPA_IFINDEX - 1])
1762 ifindex = RTA_GET_U32(tbp[NDTPA_IFINDEX - 1]);
1764 p = lookup_neigh_params(tbl, ifindex);
1767 goto rtattr_failure;
1770 if (tbp[NDTPA_QUEUE_LEN - 1])
1771 p->queue_len = RTA_GET_U32(tbp[NDTPA_QUEUE_LEN - 1]);
1773 if (tbp[NDTPA_PROXY_QLEN - 1])
1774 p->proxy_qlen = RTA_GET_U32(tbp[NDTPA_PROXY_QLEN - 1]);
1776 if (tbp[NDTPA_APP_PROBES - 1])
1777 p->app_probes = RTA_GET_U32(tbp[NDTPA_APP_PROBES - 1]);
1779 if (tbp[NDTPA_UCAST_PROBES - 1])
1781 RTA_GET_U32(tbp[NDTPA_UCAST_PROBES - 1]);
1783 if (tbp[NDTPA_MCAST_PROBES - 1])
1785 RTA_GET_U32(tbp[NDTPA_MCAST_PROBES - 1]);
1787 if (tbp[NDTPA_BASE_REACHABLE_TIME - 1])
1788 p->base_reachable_time =
1789 RTA_GET_MSECS(tbp[NDTPA_BASE_REACHABLE_TIME - 1]);
1791 if (tbp[NDTPA_GC_STALETIME - 1])
1793 RTA_GET_MSECS(tbp[NDTPA_GC_STALETIME - 1]);
1795 if (tbp[NDTPA_DELAY_PROBE_TIME - 1])
1796 p->delay_probe_time =
1797 RTA_GET_MSECS(tbp[NDTPA_DELAY_PROBE_TIME - 1]);
1799 if (tbp[NDTPA_RETRANS_TIME - 1])
1801 RTA_GET_MSECS(tbp[NDTPA_RETRANS_TIME - 1]);
1803 if (tbp[NDTPA_ANYCAST_DELAY - 1])
1805 RTA_GET_MSECS(tbp[NDTPA_ANYCAST_DELAY - 1]);
1807 if (tbp[NDTPA_PROXY_DELAY - 1])
1809 RTA_GET_MSECS(tbp[NDTPA_PROXY_DELAY - 1]);
1811 if (tbp[NDTPA_LOCKTIME - 1])
1812 p->locktime = RTA_GET_MSECS(tbp[NDTPA_LOCKTIME - 1]);
1818 write_unlock_bh(&tbl->lock);
1820 read_unlock(&neigh_tbl_lock);
1824 int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1827 int s_idx = cb->args[0];
1828 struct neigh_table *tbl;
1830 family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1832 read_lock(&neigh_tbl_lock);
1833 for (tbl = neigh_tables, idx = 0; tbl; tbl = tbl->next) {
1834 struct neigh_parms *p;
1836 if (idx < s_idx || (family && tbl->family != family))
1839 if (neightbl_fill_info(tbl, skb, cb) <= 0)
1842 for (++idx, p = tbl->parms.next; p; p = p->next, idx++) {
1846 if (neightbl_fill_param_info(tbl, p, skb, cb) <= 0)
1852 read_unlock(&neigh_tbl_lock);
1858 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
1859 u32 pid, u32 seq, int event, unsigned int flags)
1861 unsigned long now = jiffies;
1862 unsigned char *b = skb->tail;
1863 struct nda_cacheinfo ci;
1866 struct nlmsghdr *nlh = NLMSG_NEW(skb, pid, seq, event,
1867 sizeof(struct ndmsg), flags);
1868 struct ndmsg *ndm = NLMSG_DATA(nlh);
1870 ndm->ndm_family = n->ops->family;
1873 ndm->ndm_flags = n->flags;
1874 ndm->ndm_type = n->type;
1875 ndm->ndm_ifindex = n->dev->ifindex;
1876 RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
1877 read_lock_bh(&n->lock);
1879 ndm->ndm_state = n->nud_state;
1880 if (n->nud_state & NUD_VALID)
1881 RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
1882 ci.ndm_used = now - n->used;
1883 ci.ndm_confirmed = now - n->confirmed;
1884 ci.ndm_updated = now - n->updated;
1885 ci.ndm_refcnt = atomic_read(&n->refcnt) - 1;
1886 probes = atomic_read(&n->probes);
1887 read_unlock_bh(&n->lock);
1889 RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1890 RTA_PUT(skb, NDA_PROBES, sizeof(probes), &probes);
1891 nlh->nlmsg_len = skb->tail - b;
1897 read_unlock_bh(&n->lock);
1898 skb_trim(skb, b - skb->data);
1903 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1904 struct netlink_callback *cb)
1906 struct neighbour *n;
1907 int rc, h, s_h = cb->args[1];
1908 int idx, s_idx = idx = cb->args[2];
1910 for (h = 0; h <= tbl->hash_mask; h++) {
1915 read_lock_bh(&tbl->lock);
1916 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
1919 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
1922 NLM_F_MULTI) <= 0) {
1923 read_unlock_bh(&tbl->lock);
1928 read_unlock_bh(&tbl->lock);
1937 int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1939 struct neigh_table *tbl;
1942 read_lock(&neigh_tbl_lock);
1943 family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1946 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
1947 if (t < s_t || (family && tbl->family != family))
1950 memset(&cb->args[1], 0, sizeof(cb->args) -
1951 sizeof(cb->args[0]));
1952 if (neigh_dump_table(tbl, skb, cb) < 0)
1955 read_unlock(&neigh_tbl_lock);
1961 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
1965 read_lock_bh(&tbl->lock);
1966 for (chain = 0; chain <= tbl->hash_mask; chain++) {
1967 struct neighbour *n;
1969 for (n = tbl->hash_buckets[chain]; n; n = n->next)
1972 read_unlock_bh(&tbl->lock);
1974 EXPORT_SYMBOL(neigh_for_each);
1976 /* The tbl->lock must be held as a writer and BH disabled. */
1977 void __neigh_for_each_release(struct neigh_table *tbl,
1978 int (*cb)(struct neighbour *))
1982 for (chain = 0; chain <= tbl->hash_mask; chain++) {
1983 struct neighbour *n, **np;
1985 np = &tbl->hash_buckets[chain];
1986 while ((n = *np) != NULL) {
1989 write_lock(&n->lock);
1996 write_unlock(&n->lock);
2002 EXPORT_SYMBOL(__neigh_for_each_release);
2004 #ifdef CONFIG_PROC_FS
2006 static struct neighbour *neigh_get_first(struct seq_file *seq)
2008 struct neigh_seq_state *state = seq->private;
2009 struct neigh_table *tbl = state->tbl;
2010 struct neighbour *n = NULL;
2011 int bucket = state->bucket;
2013 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2014 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2015 n = tbl->hash_buckets[bucket];
2018 if (state->neigh_sub_iter) {
2022 v = state->neigh_sub_iter(state, n, &fakep);
2026 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2028 if (n->nud_state & ~NUD_NOARP)
2037 state->bucket = bucket;
2042 static struct neighbour *neigh_get_next(struct seq_file *seq,
2043 struct neighbour *n,
2046 struct neigh_seq_state *state = seq->private;
2047 struct neigh_table *tbl = state->tbl;
2049 if (state->neigh_sub_iter) {
2050 void *v = state->neigh_sub_iter(state, n, pos);
2058 if (state->neigh_sub_iter) {
2059 void *v = state->neigh_sub_iter(state, n, pos);
2064 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2067 if (n->nud_state & ~NUD_NOARP)
2076 if (++state->bucket > tbl->hash_mask)
2079 n = tbl->hash_buckets[state->bucket];
2087 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2089 struct neighbour *n = neigh_get_first(seq);
2093 n = neigh_get_next(seq, n, pos);
2098 return *pos ? NULL : n;
2101 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2103 struct neigh_seq_state *state = seq->private;
2104 struct neigh_table *tbl = state->tbl;
2105 struct pneigh_entry *pn = NULL;
2106 int bucket = state->bucket;
2108 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2109 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2110 pn = tbl->phash_buckets[bucket];
2114 state->bucket = bucket;
2119 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2120 struct pneigh_entry *pn,
2123 struct neigh_seq_state *state = seq->private;
2124 struct neigh_table *tbl = state->tbl;
2128 if (++state->bucket > PNEIGH_HASHMASK)
2130 pn = tbl->phash_buckets[state->bucket];
2141 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2143 struct pneigh_entry *pn = pneigh_get_first(seq);
2147 pn = pneigh_get_next(seq, pn, pos);
2152 return *pos ? NULL : pn;
2155 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2157 struct neigh_seq_state *state = seq->private;
2160 rc = neigh_get_idx(seq, pos);
2161 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2162 rc = pneigh_get_idx(seq, pos);
2167 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2169 struct neigh_seq_state *state = seq->private;
2170 loff_t pos_minus_one;
2174 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2176 read_lock_bh(&tbl->lock);
2178 pos_minus_one = *pos - 1;
2179 return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2181 EXPORT_SYMBOL(neigh_seq_start);
2183 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2185 struct neigh_seq_state *state;
2188 if (v == SEQ_START_TOKEN) {
2189 rc = neigh_get_idx(seq, pos);
2193 state = seq->private;
2194 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2195 rc = neigh_get_next(seq, v, NULL);
2198 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2199 rc = pneigh_get_first(seq);
2201 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2202 rc = pneigh_get_next(seq, v, NULL);
2208 EXPORT_SYMBOL(neigh_seq_next);
2210 void neigh_seq_stop(struct seq_file *seq, void *v)
2212 struct neigh_seq_state *state = seq->private;
2213 struct neigh_table *tbl = state->tbl;
2215 read_unlock_bh(&tbl->lock);
2217 EXPORT_SYMBOL(neigh_seq_stop);
2219 /* statistics via seq_file */
2221 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2223 struct proc_dir_entry *pde = seq->private;
2224 struct neigh_table *tbl = pde->data;
2228 return SEQ_START_TOKEN;
2230 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2231 if (!cpu_possible(cpu))
2234 return per_cpu_ptr(tbl->stats, cpu);
2239 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2241 struct proc_dir_entry *pde = seq->private;
2242 struct neigh_table *tbl = pde->data;
2245 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2246 if (!cpu_possible(cpu))
2249 return per_cpu_ptr(tbl->stats, cpu);
2254 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2259 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2261 struct proc_dir_entry *pde = seq->private;
2262 struct neigh_table *tbl = pde->data;
2263 struct neigh_statistics *st = v;
2265 if (v == SEQ_START_TOKEN) {
2266 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
2270 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2271 "%08lx %08lx %08lx %08lx\n",
2272 atomic_read(&tbl->entries),
2283 st->rcv_probes_mcast,
2284 st->rcv_probes_ucast,
2286 st->periodic_gc_runs,
2293 static struct seq_operations neigh_stat_seq_ops = {
2294 .start = neigh_stat_seq_start,
2295 .next = neigh_stat_seq_next,
2296 .stop = neigh_stat_seq_stop,
2297 .show = neigh_stat_seq_show,
2300 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2302 int ret = seq_open(file, &neigh_stat_seq_ops);
2305 struct seq_file *sf = file->private_data;
2306 sf->private = PDE(inode);
2311 static struct file_operations neigh_stat_seq_fops = {
2312 .owner = THIS_MODULE,
2313 .open = neigh_stat_seq_open,
2315 .llseek = seq_lseek,
2316 .release = seq_release,
2319 #endif /* CONFIG_PROC_FS */
2322 void neigh_app_ns(struct neighbour *n)
2324 struct nlmsghdr *nlh;
2325 int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2326 struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2331 if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH, 0) < 0) {
2335 nlh = (struct nlmsghdr *)skb->data;
2336 nlh->nlmsg_flags = NLM_F_REQUEST;
2337 NETLINK_CB(skb).dst_group = RTNLGRP_NEIGH;
2338 netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2341 static void neigh_app_notify(struct neighbour *n)
2343 struct nlmsghdr *nlh;
2344 int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2345 struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2350 if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH, 0) < 0) {
2354 nlh = (struct nlmsghdr *)skb->data;
2355 NETLINK_CB(skb).dst_group = RTNLGRP_NEIGH;
2356 netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2359 #endif /* CONFIG_ARPD */
2361 #ifdef CONFIG_SYSCTL
2363 static struct neigh_sysctl_table {
2364 struct ctl_table_header *sysctl_header;
2365 ctl_table neigh_vars[__NET_NEIGH_MAX];
2366 ctl_table neigh_dev[2];
2367 ctl_table neigh_neigh_dir[2];
2368 ctl_table neigh_proto_dir[2];
2369 ctl_table neigh_root_dir[2];
2370 } neigh_sysctl_template = {
2373 .ctl_name = NET_NEIGH_MCAST_SOLICIT,
2374 .procname = "mcast_solicit",
2375 .maxlen = sizeof(int),
2377 .proc_handler = &proc_dointvec,
2380 .ctl_name = NET_NEIGH_UCAST_SOLICIT,
2381 .procname = "ucast_solicit",
2382 .maxlen = sizeof(int),
2384 .proc_handler = &proc_dointvec,
2387 .ctl_name = NET_NEIGH_APP_SOLICIT,
2388 .procname = "app_solicit",
2389 .maxlen = sizeof(int),
2391 .proc_handler = &proc_dointvec,
2394 .ctl_name = NET_NEIGH_RETRANS_TIME,
2395 .procname = "retrans_time",
2396 .maxlen = sizeof(int),
2398 .proc_handler = &proc_dointvec_userhz_jiffies,
2401 .ctl_name = NET_NEIGH_REACHABLE_TIME,
2402 .procname = "base_reachable_time",
2403 .maxlen = sizeof(int),
2405 .proc_handler = &proc_dointvec_jiffies,
2406 .strategy = &sysctl_jiffies,
2409 .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
2410 .procname = "delay_first_probe_time",
2411 .maxlen = sizeof(int),
2413 .proc_handler = &proc_dointvec_jiffies,
2414 .strategy = &sysctl_jiffies,
2417 .ctl_name = NET_NEIGH_GC_STALE_TIME,
2418 .procname = "gc_stale_time",
2419 .maxlen = sizeof(int),
2421 .proc_handler = &proc_dointvec_jiffies,
2422 .strategy = &sysctl_jiffies,
2425 .ctl_name = NET_NEIGH_UNRES_QLEN,
2426 .procname = "unres_qlen",
2427 .maxlen = sizeof(int),
2429 .proc_handler = &proc_dointvec,
2432 .ctl_name = NET_NEIGH_PROXY_QLEN,
2433 .procname = "proxy_qlen",
2434 .maxlen = sizeof(int),
2436 .proc_handler = &proc_dointvec,
2439 .ctl_name = NET_NEIGH_ANYCAST_DELAY,
2440 .procname = "anycast_delay",
2441 .maxlen = sizeof(int),
2443 .proc_handler = &proc_dointvec_userhz_jiffies,
2446 .ctl_name = NET_NEIGH_PROXY_DELAY,
2447 .procname = "proxy_delay",
2448 .maxlen = sizeof(int),
2450 .proc_handler = &proc_dointvec_userhz_jiffies,
2453 .ctl_name = NET_NEIGH_LOCKTIME,
2454 .procname = "locktime",
2455 .maxlen = sizeof(int),
2457 .proc_handler = &proc_dointvec_userhz_jiffies,
2460 .ctl_name = NET_NEIGH_GC_INTERVAL,
2461 .procname = "gc_interval",
2462 .maxlen = sizeof(int),
2464 .proc_handler = &proc_dointvec_jiffies,
2465 .strategy = &sysctl_jiffies,
2468 .ctl_name = NET_NEIGH_GC_THRESH1,
2469 .procname = "gc_thresh1",
2470 .maxlen = sizeof(int),
2472 .proc_handler = &proc_dointvec,
2475 .ctl_name = NET_NEIGH_GC_THRESH2,
2476 .procname = "gc_thresh2",
2477 .maxlen = sizeof(int),
2479 .proc_handler = &proc_dointvec,
2482 .ctl_name = NET_NEIGH_GC_THRESH3,
2483 .procname = "gc_thresh3",
2484 .maxlen = sizeof(int),
2486 .proc_handler = &proc_dointvec,
2489 .ctl_name = NET_NEIGH_RETRANS_TIME_MS,
2490 .procname = "retrans_time_ms",
2491 .maxlen = sizeof(int),
2493 .proc_handler = &proc_dointvec_ms_jiffies,
2494 .strategy = &sysctl_ms_jiffies,
2497 .ctl_name = NET_NEIGH_REACHABLE_TIME_MS,
2498 .procname = "base_reachable_time_ms",
2499 .maxlen = sizeof(int),
2501 .proc_handler = &proc_dointvec_ms_jiffies,
2502 .strategy = &sysctl_ms_jiffies,
2507 .ctl_name = NET_PROTO_CONF_DEFAULT,
2508 .procname = "default",
2512 .neigh_neigh_dir = {
2514 .procname = "neigh",
2518 .neigh_proto_dir = {
2525 .ctl_name = CTL_NET,
2532 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2533 int p_id, int pdev_id, char *p_name,
2534 proc_handler *handler, ctl_handler *strategy)
2536 struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
2537 const char *dev_name_source = NULL;
2538 char *dev_name = NULL;
2543 memcpy(t, &neigh_sysctl_template, sizeof(*t));
2544 t->neigh_vars[0].data = &p->mcast_probes;
2545 t->neigh_vars[1].data = &p->ucast_probes;
2546 t->neigh_vars[2].data = &p->app_probes;
2547 t->neigh_vars[3].data = &p->retrans_time;
2548 t->neigh_vars[4].data = &p->base_reachable_time;
2549 t->neigh_vars[5].data = &p->delay_probe_time;
2550 t->neigh_vars[6].data = &p->gc_staletime;
2551 t->neigh_vars[7].data = &p->queue_len;
2552 t->neigh_vars[8].data = &p->proxy_qlen;
2553 t->neigh_vars[9].data = &p->anycast_delay;
2554 t->neigh_vars[10].data = &p->proxy_delay;
2555 t->neigh_vars[11].data = &p->locktime;
2558 dev_name_source = dev->name;
2559 t->neigh_dev[0].ctl_name = dev->ifindex;
2560 t->neigh_vars[12].procname = NULL;
2561 t->neigh_vars[13].procname = NULL;
2562 t->neigh_vars[14].procname = NULL;
2563 t->neigh_vars[15].procname = NULL;
2565 dev_name_source = t->neigh_dev[0].procname;
2566 t->neigh_vars[12].data = (int *)(p + 1);
2567 t->neigh_vars[13].data = (int *)(p + 1) + 1;
2568 t->neigh_vars[14].data = (int *)(p + 1) + 2;
2569 t->neigh_vars[15].data = (int *)(p + 1) + 3;
2572 t->neigh_vars[16].data = &p->retrans_time;
2573 t->neigh_vars[17].data = &p->base_reachable_time;
2575 if (handler || strategy) {
2577 t->neigh_vars[3].proc_handler = handler;
2578 t->neigh_vars[3].strategy = strategy;
2579 t->neigh_vars[3].extra1 = dev;
2581 t->neigh_vars[4].proc_handler = handler;
2582 t->neigh_vars[4].strategy = strategy;
2583 t->neigh_vars[4].extra1 = dev;
2584 /* RetransTime (in milliseconds)*/
2585 t->neigh_vars[16].proc_handler = handler;
2586 t->neigh_vars[16].strategy = strategy;
2587 t->neigh_vars[16].extra1 = dev;
2588 /* ReachableTime (in milliseconds) */
2589 t->neigh_vars[17].proc_handler = handler;
2590 t->neigh_vars[17].strategy = strategy;
2591 t->neigh_vars[17].extra1 = dev;
2594 dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2600 t->neigh_dev[0].procname = dev_name;
2602 t->neigh_neigh_dir[0].ctl_name = pdev_id;
2604 t->neigh_proto_dir[0].procname = p_name;
2605 t->neigh_proto_dir[0].ctl_name = p_id;
2607 t->neigh_dev[0].child = t->neigh_vars;
2608 t->neigh_neigh_dir[0].child = t->neigh_dev;
2609 t->neigh_proto_dir[0].child = t->neigh_neigh_dir;
2610 t->neigh_root_dir[0].child = t->neigh_proto_dir;
2612 t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
2613 if (!t->sysctl_header) {
2617 p->sysctl_table = t;
2629 void neigh_sysctl_unregister(struct neigh_parms *p)
2631 if (p->sysctl_table) {
2632 struct neigh_sysctl_table *t = p->sysctl_table;
2633 p->sysctl_table = NULL;
2634 unregister_sysctl_table(t->sysctl_header);
2635 kfree(t->neigh_dev[0].procname);
2640 #endif /* CONFIG_SYSCTL */
2642 EXPORT_SYMBOL(__neigh_event_send);
2643 EXPORT_SYMBOL(neigh_add);
2644 EXPORT_SYMBOL(neigh_changeaddr);
2645 EXPORT_SYMBOL(neigh_compat_output);
2646 EXPORT_SYMBOL(neigh_connected_output);
2647 EXPORT_SYMBOL(neigh_create);
2648 EXPORT_SYMBOL(neigh_delete);
2649 EXPORT_SYMBOL(neigh_destroy);
2650 EXPORT_SYMBOL(neigh_dump_info);
2651 EXPORT_SYMBOL(neigh_event_ns);
2652 EXPORT_SYMBOL(neigh_ifdown);
2653 EXPORT_SYMBOL(neigh_lookup);
2654 EXPORT_SYMBOL(neigh_lookup_nodev);
2655 EXPORT_SYMBOL(neigh_parms_alloc);
2656 EXPORT_SYMBOL(neigh_parms_release);
2657 EXPORT_SYMBOL(neigh_rand_reach_time);
2658 EXPORT_SYMBOL(neigh_resolve_output);
2659 EXPORT_SYMBOL(neigh_table_clear);
2660 EXPORT_SYMBOL(neigh_table_init);
2661 EXPORT_SYMBOL(neigh_update);
2662 EXPORT_SYMBOL(neigh_update_hhs);
2663 EXPORT_SYMBOL(pneigh_enqueue);
2664 EXPORT_SYMBOL(pneigh_lookup);
2665 EXPORT_SYMBOL(neightbl_dump_info);
2666 EXPORT_SYMBOL(neightbl_set);
2669 EXPORT_SYMBOL(neigh_app_ns);
2671 #ifdef CONFIG_SYSCTL
2672 EXPORT_SYMBOL(neigh_sysctl_register);
2673 EXPORT_SYMBOL(neigh_sysctl_unregister);