2 * Generic address resolution entity
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/sched.h>
23 #include <linux/netdevice.h>
24 #include <linux/proc_fs.h>
26 #include <linux/sysctl.h>
28 #include <linux/times.h>
29 #include <net/neighbour.h>
32 #include <net/netevent.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/random.h>
35 #include <linux/string.h>
39 #define NEIGH_PRINTK(x...) printk(x)
40 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
41 #define NEIGH_PRINTK0 NEIGH_PRINTK
42 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
43 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
47 #define NEIGH_PRINTK1 NEIGH_PRINTK
51 #define NEIGH_PRINTK2 NEIGH_PRINTK
54 #define PNEIGH_HASHMASK 0xF
56 static void neigh_timer_handler(unsigned long arg);
58 static void neigh_app_notify(struct neighbour *n);
60 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
61 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
63 static struct neigh_table *neigh_tables;
65 static struct file_operations neigh_stat_seq_fops;
69 Neighbour hash table buckets are protected with rwlock tbl->lock.
71 - All the scans/updates to hash buckets MUST be made under this lock.
72 - NOTHING clever should be made under this lock: no callbacks
73 to protocol backends, no attempts to send something to network.
74 It will result in deadlocks, if backend/driver wants to use neighbour
76 - If the entry requires some non-trivial actions, increase
77 its reference count and release table lock.
79 Neighbour entries are protected:
80 - with reference count.
81 - with rwlock neigh->lock
83 Reference count prevents destruction.
85 neigh->lock mainly serializes ll address data and its validity state.
86 However, the same lock is used to protect another entry fields:
90 Again, nothing clever shall be made under neigh->lock,
91 the most complicated procedure, which we allow is dev->hard_header.
92 It is supposed, that dev->hard_header is simplistic and does
93 not make callbacks to neighbour tables.
95 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96 list of neighbour tables. This list is used only in process context,
99 static DEFINE_RWLOCK(neigh_tbl_lock);
101 static int neigh_blackhole(struct sk_buff *skb)
108 * It is random distribution in the interval (1/2)*base...(3/2)*base.
109 * It corresponds to default IPv6 settings and is not overridable,
110 * because it is really reasonable choice.
113 unsigned long neigh_rand_reach_time(unsigned long base)
115 return (base ? (net_random() % base) + (base >> 1) : 0);
119 static int neigh_forced_gc(struct neigh_table *tbl)
124 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
126 write_lock_bh(&tbl->lock);
127 for (i = 0; i <= tbl->hash_mask; i++) {
128 struct neighbour *n, **np;
130 np = &tbl->hash_buckets[i];
131 while ((n = *np) != NULL) {
132 /* Neighbour record may be discarded if:
133 * - nobody refers to it.
134 * - it is not permanent
136 write_lock(&n->lock);
137 if (atomic_read(&n->refcnt) == 1 &&
138 !(n->nud_state & NUD_PERMANENT)) {
142 write_unlock(&n->lock);
146 write_unlock(&n->lock);
151 tbl->last_flush = jiffies;
153 write_unlock_bh(&tbl->lock);
158 static int neigh_del_timer(struct neighbour *n)
160 if ((n->nud_state & NUD_IN_TIMER) &&
161 del_timer(&n->timer)) {
168 static void pneigh_queue_purge(struct sk_buff_head *list)
172 while ((skb = skb_dequeue(list)) != NULL) {
178 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
182 for (i = 0; i <= tbl->hash_mask; i++) {
183 struct neighbour *n, **np = &tbl->hash_buckets[i];
185 while ((n = *np) != NULL) {
186 if (dev && n->dev != dev) {
191 write_lock(&n->lock);
195 if (atomic_read(&n->refcnt) != 1) {
196 /* The most unpleasant situation.
197 We must destroy neighbour entry,
198 but someone still uses it.
200 The destroy will be delayed until
201 the last user releases us, but
202 we must kill timers etc. and move
205 skb_queue_purge(&n->arp_queue);
206 n->output = neigh_blackhole;
207 if (n->nud_state & NUD_VALID)
208 n->nud_state = NUD_NOARP;
210 n->nud_state = NUD_NONE;
211 NEIGH_PRINTK2("neigh %p is stray.\n", n);
213 write_unlock(&n->lock);
219 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
221 write_lock_bh(&tbl->lock);
222 neigh_flush_dev(tbl, dev);
223 write_unlock_bh(&tbl->lock);
226 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
228 write_lock_bh(&tbl->lock);
229 neigh_flush_dev(tbl, dev);
230 pneigh_ifdown(tbl, dev);
231 write_unlock_bh(&tbl->lock);
233 del_timer_sync(&tbl->proxy_timer);
234 pneigh_queue_purge(&tbl->proxy_queue);
238 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
240 struct neighbour *n = NULL;
241 unsigned long now = jiffies;
244 entries = atomic_inc_return(&tbl->entries) - 1;
245 if (entries >= tbl->gc_thresh3 ||
246 (entries >= tbl->gc_thresh2 &&
247 time_after(now, tbl->last_flush + 5 * HZ))) {
248 if (!neigh_forced_gc(tbl) &&
249 entries >= tbl->gc_thresh3)
253 n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
257 memset(n, 0, tbl->entry_size);
259 skb_queue_head_init(&n->arp_queue);
260 rwlock_init(&n->lock);
261 n->updated = n->used = now;
262 n->nud_state = NUD_NONE;
263 n->output = neigh_blackhole;
264 n->parms = neigh_parms_clone(&tbl->parms);
265 init_timer(&n->timer);
266 n->timer.function = neigh_timer_handler;
267 n->timer.data = (unsigned long)n;
269 NEIGH_CACHE_STAT_INC(tbl, allocs);
271 atomic_set(&n->refcnt, 1);
277 atomic_dec(&tbl->entries);
281 static struct neighbour **neigh_hash_alloc(unsigned int entries)
283 unsigned long size = entries * sizeof(struct neighbour *);
284 struct neighbour **ret;
286 if (size <= PAGE_SIZE) {
287 ret = kzalloc(size, GFP_ATOMIC);
289 ret = (struct neighbour **)
290 __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
295 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
297 unsigned long size = entries * sizeof(struct neighbour *);
299 if (size <= PAGE_SIZE)
302 free_pages((unsigned long)hash, get_order(size));
305 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
307 struct neighbour **new_hash, **old_hash;
308 unsigned int i, new_hash_mask, old_entries;
310 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
312 BUG_ON(new_entries & (new_entries - 1));
313 new_hash = neigh_hash_alloc(new_entries);
317 old_entries = tbl->hash_mask + 1;
318 new_hash_mask = new_entries - 1;
319 old_hash = tbl->hash_buckets;
321 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
322 for (i = 0; i < old_entries; i++) {
323 struct neighbour *n, *next;
325 for (n = old_hash[i]; n; n = next) {
326 unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
328 hash_val &= new_hash_mask;
331 n->next = new_hash[hash_val];
332 new_hash[hash_val] = n;
335 tbl->hash_buckets = new_hash;
336 tbl->hash_mask = new_hash_mask;
338 neigh_hash_free(old_hash, old_entries);
341 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
342 struct net_device *dev)
345 int key_len = tbl->key_len;
346 u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
348 NEIGH_CACHE_STAT_INC(tbl, lookups);
350 read_lock_bh(&tbl->lock);
351 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
352 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
354 NEIGH_CACHE_STAT_INC(tbl, hits);
358 read_unlock_bh(&tbl->lock);
362 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
365 int key_len = tbl->key_len;
366 u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
368 NEIGH_CACHE_STAT_INC(tbl, lookups);
370 read_lock_bh(&tbl->lock);
371 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
372 if (!memcmp(n->primary_key, pkey, key_len)) {
374 NEIGH_CACHE_STAT_INC(tbl, hits);
378 read_unlock_bh(&tbl->lock);
382 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
383 struct net_device *dev)
386 int key_len = tbl->key_len;
388 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
391 rc = ERR_PTR(-ENOBUFS);
395 memcpy(n->primary_key, pkey, key_len);
399 /* Protocol specific setup. */
400 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
402 goto out_neigh_release;
405 /* Device specific setup. */
406 if (n->parms->neigh_setup &&
407 (error = n->parms->neigh_setup(n)) < 0) {
409 goto out_neigh_release;
412 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
414 write_lock_bh(&tbl->lock);
416 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
417 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
419 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
421 if (n->parms->dead) {
422 rc = ERR_PTR(-EINVAL);
426 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
427 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
434 n->next = tbl->hash_buckets[hash_val];
435 tbl->hash_buckets[hash_val] = n;
438 write_unlock_bh(&tbl->lock);
439 NEIGH_PRINTK2("neigh %p is created.\n", n);
444 write_unlock_bh(&tbl->lock);
450 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
451 struct net_device *dev, int creat)
453 struct pneigh_entry *n;
454 int key_len = tbl->key_len;
455 u32 hash_val = *(u32 *)(pkey + key_len - 4);
457 hash_val ^= (hash_val >> 16);
458 hash_val ^= hash_val >> 8;
459 hash_val ^= hash_val >> 4;
460 hash_val &= PNEIGH_HASHMASK;
462 read_lock_bh(&tbl->lock);
464 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
465 if (!memcmp(n->key, pkey, key_len) &&
466 (n->dev == dev || !n->dev)) {
467 read_unlock_bh(&tbl->lock);
471 read_unlock_bh(&tbl->lock);
476 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
480 memcpy(n->key, pkey, key_len);
485 if (tbl->pconstructor && tbl->pconstructor(n)) {
493 write_lock_bh(&tbl->lock);
494 n->next = tbl->phash_buckets[hash_val];
495 tbl->phash_buckets[hash_val] = n;
496 write_unlock_bh(&tbl->lock);
502 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
503 struct net_device *dev)
505 struct pneigh_entry *n, **np;
506 int key_len = tbl->key_len;
507 u32 hash_val = *(u32 *)(pkey + key_len - 4);
509 hash_val ^= (hash_val >> 16);
510 hash_val ^= hash_val >> 8;
511 hash_val ^= hash_val >> 4;
512 hash_val &= PNEIGH_HASHMASK;
514 write_lock_bh(&tbl->lock);
515 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
517 if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
519 write_unlock_bh(&tbl->lock);
520 if (tbl->pdestructor)
528 write_unlock_bh(&tbl->lock);
532 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
534 struct pneigh_entry *n, **np;
537 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
538 np = &tbl->phash_buckets[h];
539 while ((n = *np) != NULL) {
540 if (!dev || n->dev == dev) {
542 if (tbl->pdestructor)
557 * neighbour must already be out of the table;
560 void neigh_destroy(struct neighbour *neigh)
564 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
568 "Destroying alive neighbour %p\n", neigh);
573 if (neigh_del_timer(neigh))
574 printk(KERN_WARNING "Impossible event.\n");
576 while ((hh = neigh->hh) != NULL) {
577 neigh->hh = hh->hh_next;
579 write_lock_bh(&hh->hh_lock);
580 hh->hh_output = neigh_blackhole;
581 write_unlock_bh(&hh->hh_lock);
582 if (atomic_dec_and_test(&hh->hh_refcnt))
586 if (neigh->parms->neigh_destructor)
587 (neigh->parms->neigh_destructor)(neigh);
589 skb_queue_purge(&neigh->arp_queue);
592 neigh_parms_put(neigh->parms);
594 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
596 atomic_dec(&neigh->tbl->entries);
597 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
600 /* Neighbour state is suspicious;
603 Called with write_locked neigh.
605 static void neigh_suspect(struct neighbour *neigh)
609 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
611 neigh->output = neigh->ops->output;
613 for (hh = neigh->hh; hh; hh = hh->hh_next)
614 hh->hh_output = neigh->ops->output;
617 /* Neighbour state is OK;
620 Called with write_locked neigh.
622 static void neigh_connect(struct neighbour *neigh)
626 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
628 neigh->output = neigh->ops->connected_output;
630 for (hh = neigh->hh; hh; hh = hh->hh_next)
631 hh->hh_output = neigh->ops->hh_output;
634 static void neigh_periodic_timer(unsigned long arg)
636 struct neigh_table *tbl = (struct neigh_table *)arg;
637 struct neighbour *n, **np;
638 unsigned long expire, now = jiffies;
640 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
642 write_lock(&tbl->lock);
645 * periodically recompute ReachableTime from random function
648 if (time_after(now, tbl->last_rand + 300 * HZ)) {
649 struct neigh_parms *p;
650 tbl->last_rand = now;
651 for (p = &tbl->parms; p; p = p->next)
653 neigh_rand_reach_time(p->base_reachable_time);
656 np = &tbl->hash_buckets[tbl->hash_chain_gc];
657 tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
659 while ((n = *np) != NULL) {
662 write_lock(&n->lock);
664 state = n->nud_state;
665 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
666 write_unlock(&n->lock);
670 if (time_before(n->used, n->confirmed))
671 n->used = n->confirmed;
673 if (atomic_read(&n->refcnt) == 1 &&
674 (state == NUD_FAILED ||
675 time_after(now, n->used + n->parms->gc_staletime))) {
678 write_unlock(&n->lock);
682 write_unlock(&n->lock);
688 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
689 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
690 * base_reachable_time.
692 expire = tbl->parms.base_reachable_time >> 1;
693 expire /= (tbl->hash_mask + 1);
697 mod_timer(&tbl->gc_timer, now + expire);
699 write_unlock(&tbl->lock);
702 static __inline__ int neigh_max_probes(struct neighbour *n)
704 struct neigh_parms *p = n->parms;
705 return (n->nud_state & NUD_PROBE ?
707 p->ucast_probes + p->app_probes + p->mcast_probes);
710 static inline void neigh_add_timer(struct neighbour *n, unsigned long when)
712 if (unlikely(mod_timer(&n->timer, when))) {
713 printk("NEIGH: BUG, double timer add, state is %x\n",
719 /* Called when a timer expires for a neighbour entry. */
721 static void neigh_timer_handler(unsigned long arg)
723 unsigned long now, next;
724 struct neighbour *neigh = (struct neighbour *)arg;
728 write_lock(&neigh->lock);
730 state = neigh->nud_state;
734 if (!(state & NUD_IN_TIMER)) {
736 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
741 if (state & NUD_REACHABLE) {
742 if (time_before_eq(now,
743 neigh->confirmed + neigh->parms->reachable_time)) {
744 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
745 next = neigh->confirmed + neigh->parms->reachable_time;
746 } else if (time_before_eq(now,
747 neigh->used + neigh->parms->delay_probe_time)) {
748 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
749 neigh->nud_state = NUD_DELAY;
750 neigh->updated = jiffies;
751 neigh_suspect(neigh);
752 next = now + neigh->parms->delay_probe_time;
754 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
755 neigh->nud_state = NUD_STALE;
756 neigh->updated = jiffies;
757 neigh_suspect(neigh);
760 } else if (state & NUD_DELAY) {
761 if (time_before_eq(now,
762 neigh->confirmed + neigh->parms->delay_probe_time)) {
763 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
764 neigh->nud_state = NUD_REACHABLE;
765 neigh->updated = jiffies;
766 neigh_connect(neigh);
768 next = neigh->confirmed + neigh->parms->reachable_time;
770 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
771 neigh->nud_state = NUD_PROBE;
772 neigh->updated = jiffies;
773 atomic_set(&neigh->probes, 0);
774 next = now + neigh->parms->retrans_time;
777 /* NUD_PROBE|NUD_INCOMPLETE */
778 next = now + neigh->parms->retrans_time;
781 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
782 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
785 neigh->nud_state = NUD_FAILED;
786 neigh->updated = jiffies;
788 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
789 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
791 /* It is very thin place. report_unreachable is very complicated
792 routine. Particularly, it can hit the same neighbour entry!
794 So that, we try to be accurate and avoid dead loop. --ANK
796 while (neigh->nud_state == NUD_FAILED &&
797 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
798 write_unlock(&neigh->lock);
799 neigh->ops->error_report(neigh, skb);
800 write_lock(&neigh->lock);
802 skb_queue_purge(&neigh->arp_queue);
805 if (neigh->nud_state & NUD_IN_TIMER) {
806 if (time_before(next, jiffies + HZ/2))
807 next = jiffies + HZ/2;
808 if (!mod_timer(&neigh->timer, next))
811 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
812 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
813 /* keep skb alive even if arp_queue overflows */
816 write_unlock(&neigh->lock);
817 neigh->ops->solicit(neigh, skb);
818 atomic_inc(&neigh->probes);
823 write_unlock(&neigh->lock);
826 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
829 if (notify && neigh->parms->app_probes)
830 neigh_app_notify(neigh);
832 neigh_release(neigh);
835 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
840 write_lock_bh(&neigh->lock);
843 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
848 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
849 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
850 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
851 neigh->nud_state = NUD_INCOMPLETE;
852 neigh->updated = jiffies;
854 neigh_add_timer(neigh, now + 1);
856 neigh->nud_state = NUD_FAILED;
857 neigh->updated = jiffies;
858 write_unlock_bh(&neigh->lock);
864 } else if (neigh->nud_state & NUD_STALE) {
865 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
867 neigh->nud_state = NUD_DELAY;
868 neigh->updated = jiffies;
869 neigh_add_timer(neigh,
870 jiffies + neigh->parms->delay_probe_time);
873 if (neigh->nud_state == NUD_INCOMPLETE) {
875 if (skb_queue_len(&neigh->arp_queue) >=
876 neigh->parms->queue_len) {
877 struct sk_buff *buff;
878 buff = neigh->arp_queue.next;
879 __skb_unlink(buff, &neigh->arp_queue);
882 __skb_queue_tail(&neigh->arp_queue, skb);
887 write_unlock_bh(&neigh->lock);
891 static __inline__ void neigh_update_hhs(struct neighbour *neigh)
894 void (*update)(struct hh_cache*, struct net_device*, unsigned char *) =
895 neigh->dev->header_cache_update;
898 for (hh = neigh->hh; hh; hh = hh->hh_next) {
899 write_lock_bh(&hh->hh_lock);
900 update(hh, neigh->dev, neigh->ha);
901 write_unlock_bh(&hh->hh_lock);
908 /* Generic update routine.
909 -- lladdr is new lladdr or NULL, if it is not supplied.
912 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
914 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
915 lladdr instead of overriding it
917 It also allows to retain current state
918 if lladdr is unchanged.
919 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
921 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
923 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
926 Caller MUST hold reference count on the entry.
929 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
935 struct net_device *dev;
936 int update_isrouter = 0;
938 write_lock_bh(&neigh->lock);
941 old = neigh->nud_state;
944 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
945 (old & (NUD_NOARP | NUD_PERMANENT)))
948 if (!(new & NUD_VALID)) {
949 neigh_del_timer(neigh);
950 if (old & NUD_CONNECTED)
951 neigh_suspect(neigh);
952 neigh->nud_state = new;
954 notify = old & NUD_VALID;
958 /* Compare new lladdr with cached one */
959 if (!dev->addr_len) {
960 /* First case: device needs no address. */
963 /* The second case: if something is already cached
964 and a new address is proposed:
966 - if they are different, check override flag
968 if ((old & NUD_VALID) &&
969 !memcmp(lladdr, neigh->ha, dev->addr_len))
972 /* No address is supplied; if we know something,
973 use it, otherwise discard the request.
976 if (!(old & NUD_VALID))
981 if (new & NUD_CONNECTED)
982 neigh->confirmed = jiffies;
983 neigh->updated = jiffies;
985 /* If entry was valid and address is not changed,
986 do not change entry state, if new one is STALE.
989 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
990 if (old & NUD_VALID) {
991 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
993 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
994 (old & NUD_CONNECTED)) {
1000 if (lladdr == neigh->ha && new == NUD_STALE &&
1001 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1002 (old & NUD_CONNECTED))
1009 neigh_del_timer(neigh);
1010 if (new & NUD_IN_TIMER) {
1012 neigh_add_timer(neigh, (jiffies +
1013 ((new & NUD_REACHABLE) ?
1014 neigh->parms->reachable_time :
1017 neigh->nud_state = new;
1020 if (lladdr != neigh->ha) {
1021 memcpy(&neigh->ha, lladdr, dev->addr_len);
1022 neigh_update_hhs(neigh);
1023 if (!(new & NUD_CONNECTED))
1024 neigh->confirmed = jiffies -
1025 (neigh->parms->base_reachable_time << 1);
1030 if (new & NUD_CONNECTED)
1031 neigh_connect(neigh);
1033 neigh_suspect(neigh);
1034 if (!(old & NUD_VALID)) {
1035 struct sk_buff *skb;
1037 /* Again: avoid dead loop if something went wrong */
1039 while (neigh->nud_state & NUD_VALID &&
1040 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1041 struct neighbour *n1 = neigh;
1042 write_unlock_bh(&neigh->lock);
1043 /* On shaper/eql skb->dst->neighbour != neigh :( */
1044 if (skb->dst && skb->dst->neighbour)
1045 n1 = skb->dst->neighbour;
1047 write_lock_bh(&neigh->lock);
1049 skb_queue_purge(&neigh->arp_queue);
1052 if (update_isrouter) {
1053 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1054 (neigh->flags | NTF_ROUTER) :
1055 (neigh->flags & ~NTF_ROUTER);
1057 write_unlock_bh(&neigh->lock);
1060 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
1062 if (notify && neigh->parms->app_probes)
1063 neigh_app_notify(neigh);
1068 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1069 u8 *lladdr, void *saddr,
1070 struct net_device *dev)
1072 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1073 lladdr || !dev->addr_len);
1075 neigh_update(neigh, lladdr, NUD_STALE,
1076 NEIGH_UPDATE_F_OVERRIDE);
1080 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1083 struct hh_cache *hh;
1084 struct net_device *dev = dst->dev;
1086 for (hh = n->hh; hh; hh = hh->hh_next)
1087 if (hh->hh_type == protocol)
1090 if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1091 rwlock_init(&hh->hh_lock);
1092 hh->hh_type = protocol;
1093 atomic_set(&hh->hh_refcnt, 0);
1095 if (dev->hard_header_cache(n, hh)) {
1099 atomic_inc(&hh->hh_refcnt);
1100 hh->hh_next = n->hh;
1102 if (n->nud_state & NUD_CONNECTED)
1103 hh->hh_output = n->ops->hh_output;
1105 hh->hh_output = n->ops->output;
1109 atomic_inc(&hh->hh_refcnt);
1114 /* This function can be used in contexts, where only old dev_queue_xmit
1115 worked, f.e. if you want to override normal output path (eql, shaper),
1116 but resolution is not made yet.
1119 int neigh_compat_output(struct sk_buff *skb)
1121 struct net_device *dev = skb->dev;
1123 __skb_pull(skb, skb->nh.raw - skb->data);
1125 if (dev->hard_header &&
1126 dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1128 dev->rebuild_header(skb))
1131 return dev_queue_xmit(skb);
1134 /* Slow and careful. */
1136 int neigh_resolve_output(struct sk_buff *skb)
1138 struct dst_entry *dst = skb->dst;
1139 struct neighbour *neigh;
1142 if (!dst || !(neigh = dst->neighbour))
1145 __skb_pull(skb, skb->nh.raw - skb->data);
1147 if (!neigh_event_send(neigh, skb)) {
1149 struct net_device *dev = neigh->dev;
1150 if (dev->hard_header_cache && !dst->hh) {
1151 write_lock_bh(&neigh->lock);
1153 neigh_hh_init(neigh, dst, dst->ops->protocol);
1154 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1155 neigh->ha, NULL, skb->len);
1156 write_unlock_bh(&neigh->lock);
1158 read_lock_bh(&neigh->lock);
1159 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1160 neigh->ha, NULL, skb->len);
1161 read_unlock_bh(&neigh->lock);
1164 rc = neigh->ops->queue_xmit(skb);
1171 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1172 dst, dst ? dst->neighbour : NULL);
1179 /* As fast as possible without hh cache */
1181 int neigh_connected_output(struct sk_buff *skb)
1184 struct dst_entry *dst = skb->dst;
1185 struct neighbour *neigh = dst->neighbour;
1186 struct net_device *dev = neigh->dev;
1188 __skb_pull(skb, skb->nh.raw - skb->data);
1190 read_lock_bh(&neigh->lock);
1191 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1192 neigh->ha, NULL, skb->len);
1193 read_unlock_bh(&neigh->lock);
1195 err = neigh->ops->queue_xmit(skb);
1203 static void neigh_proxy_process(unsigned long arg)
1205 struct neigh_table *tbl = (struct neigh_table *)arg;
1206 long sched_next = 0;
1207 unsigned long now = jiffies;
1208 struct sk_buff *skb;
1210 spin_lock(&tbl->proxy_queue.lock);
1212 skb = tbl->proxy_queue.next;
1214 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1215 struct sk_buff *back = skb;
1216 long tdif = NEIGH_CB(back)->sched_next - now;
1220 struct net_device *dev = back->dev;
1221 __skb_unlink(back, &tbl->proxy_queue);
1222 if (tbl->proxy_redo && netif_running(dev))
1223 tbl->proxy_redo(back);
1228 } else if (!sched_next || tdif < sched_next)
1231 del_timer(&tbl->proxy_timer);
1233 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1234 spin_unlock(&tbl->proxy_queue.lock);
1237 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1238 struct sk_buff *skb)
1240 unsigned long now = jiffies;
1241 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1243 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1248 NEIGH_CB(skb)->sched_next = sched_next;
1249 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1251 spin_lock(&tbl->proxy_queue.lock);
1252 if (del_timer(&tbl->proxy_timer)) {
1253 if (time_before(tbl->proxy_timer.expires, sched_next))
1254 sched_next = tbl->proxy_timer.expires;
1256 dst_release(skb->dst);
1259 __skb_queue_tail(&tbl->proxy_queue, skb);
1260 mod_timer(&tbl->proxy_timer, sched_next);
1261 spin_unlock(&tbl->proxy_queue.lock);
1265 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1266 struct neigh_table *tbl)
1268 struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL);
1271 memcpy(p, &tbl->parms, sizeof(*p));
1273 atomic_set(&p->refcnt, 1);
1274 INIT_RCU_HEAD(&p->rcu_head);
1276 neigh_rand_reach_time(p->base_reachable_time);
1278 if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1286 p->sysctl_table = NULL;
1287 write_lock_bh(&tbl->lock);
1288 p->next = tbl->parms.next;
1289 tbl->parms.next = p;
1290 write_unlock_bh(&tbl->lock);
1295 static void neigh_rcu_free_parms(struct rcu_head *head)
1297 struct neigh_parms *parms =
1298 container_of(head, struct neigh_parms, rcu_head);
1300 neigh_parms_put(parms);
1303 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1305 struct neigh_parms **p;
1307 if (!parms || parms == &tbl->parms)
1309 write_lock_bh(&tbl->lock);
1310 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1314 write_unlock_bh(&tbl->lock);
1316 dev_put(parms->dev);
1317 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1321 write_unlock_bh(&tbl->lock);
1322 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1325 void neigh_parms_destroy(struct neigh_parms *parms)
1330 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1332 unsigned long now = jiffies;
1333 unsigned long phsize;
1335 atomic_set(&tbl->parms.refcnt, 1);
1336 INIT_RCU_HEAD(&tbl->parms.rcu_head);
1337 tbl->parms.reachable_time =
1338 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1340 if (!tbl->kmem_cachep)
1341 tbl->kmem_cachep = kmem_cache_create(tbl->id,
1343 0, SLAB_HWCACHE_ALIGN,
1346 if (!tbl->kmem_cachep)
1347 panic("cannot create neighbour cache");
1349 tbl->stats = alloc_percpu(struct neigh_statistics);
1351 panic("cannot create neighbour cache statistics");
1353 #ifdef CONFIG_PROC_FS
1354 tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1356 panic("cannot create neighbour proc dir entry");
1357 tbl->pde->proc_fops = &neigh_stat_seq_fops;
1358 tbl->pde->data = tbl;
1362 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1364 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1365 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1367 if (!tbl->hash_buckets || !tbl->phash_buckets)
1368 panic("cannot allocate neighbour cache hashes");
1370 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1372 rwlock_init(&tbl->lock);
1373 init_timer(&tbl->gc_timer);
1374 tbl->gc_timer.data = (unsigned long)tbl;
1375 tbl->gc_timer.function = neigh_periodic_timer;
1376 tbl->gc_timer.expires = now + 1;
1377 add_timer(&tbl->gc_timer);
1379 init_timer(&tbl->proxy_timer);
1380 tbl->proxy_timer.data = (unsigned long)tbl;
1381 tbl->proxy_timer.function = neigh_proxy_process;
1382 skb_queue_head_init(&tbl->proxy_queue);
1384 tbl->last_flush = now;
1385 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1388 void neigh_table_init(struct neigh_table *tbl)
1390 struct neigh_table *tmp;
1392 neigh_table_init_no_netlink(tbl);
1393 write_lock(&neigh_tbl_lock);
1394 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1395 if (tmp->family == tbl->family)
1398 tbl->next = neigh_tables;
1400 write_unlock(&neigh_tbl_lock);
1402 if (unlikely(tmp)) {
1403 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1404 "family %d\n", tbl->family);
1409 int neigh_table_clear(struct neigh_table *tbl)
1411 struct neigh_table **tp;
1413 /* It is not clean... Fix it to unload IPv6 module safely */
1414 del_timer_sync(&tbl->gc_timer);
1415 del_timer_sync(&tbl->proxy_timer);
1416 pneigh_queue_purge(&tbl->proxy_queue);
1417 neigh_ifdown(tbl, NULL);
1418 if (atomic_read(&tbl->entries))
1419 printk(KERN_CRIT "neighbour leakage\n");
1420 write_lock(&neigh_tbl_lock);
1421 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1427 write_unlock(&neigh_tbl_lock);
1429 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1430 tbl->hash_buckets = NULL;
1432 kfree(tbl->phash_buckets);
1433 tbl->phash_buckets = NULL;
1438 int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1440 struct ndmsg *ndm = NLMSG_DATA(nlh);
1441 struct rtattr **nda = arg;
1442 struct neigh_table *tbl;
1443 struct net_device *dev = NULL;
1446 if (ndm->ndm_ifindex &&
1447 (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1450 read_lock(&neigh_tbl_lock);
1451 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1452 struct rtattr *dst_attr = nda[NDA_DST - 1];
1453 struct neighbour *n;
1455 if (tbl->family != ndm->ndm_family)
1457 read_unlock(&neigh_tbl_lock);
1460 if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1463 if (ndm->ndm_flags & NTF_PROXY) {
1464 err = pneigh_delete(tbl, RTA_DATA(dst_attr), dev);
1471 n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1473 err = neigh_update(n, NULL, NUD_FAILED,
1474 NEIGH_UPDATE_F_OVERRIDE|
1475 NEIGH_UPDATE_F_ADMIN);
1480 read_unlock(&neigh_tbl_lock);
1481 err = -EADDRNOTAVAIL;
1489 int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1491 struct ndmsg *ndm = NLMSG_DATA(nlh);
1492 struct rtattr **nda = arg;
1493 struct neigh_table *tbl;
1494 struct net_device *dev = NULL;
1497 if (ndm->ndm_ifindex &&
1498 (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1501 read_lock(&neigh_tbl_lock);
1502 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1503 struct rtattr *lladdr_attr = nda[NDA_LLADDR - 1];
1504 struct rtattr *dst_attr = nda[NDA_DST - 1];
1506 struct neighbour *n;
1508 if (tbl->family != ndm->ndm_family)
1510 read_unlock(&neigh_tbl_lock);
1513 if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1516 if (ndm->ndm_flags & NTF_PROXY) {
1518 if (pneigh_lookup(tbl, RTA_DATA(dst_attr), dev, 1))
1526 if (lladdr_attr && RTA_PAYLOAD(lladdr_attr) < dev->addr_len)
1529 n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1531 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1537 override = nlh->nlmsg_flags & NLM_F_REPLACE;
1538 } else if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1542 n = __neigh_lookup_errno(tbl, RTA_DATA(dst_attr), dev);
1549 err = neigh_update(n,
1550 lladdr_attr ? RTA_DATA(lladdr_attr) : NULL,
1552 (override ? NEIGH_UPDATE_F_OVERRIDE : 0) |
1553 NEIGH_UPDATE_F_ADMIN);
1559 read_unlock(&neigh_tbl_lock);
1560 err = -EADDRNOTAVAIL;
1568 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1570 struct rtattr *nest = NULL;
1572 nest = RTA_NEST(skb, NDTA_PARMS);
1575 RTA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1577 RTA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1578 RTA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1579 RTA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1580 RTA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1581 RTA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1582 RTA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1583 RTA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1584 RTA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1585 parms->base_reachable_time);
1586 RTA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1587 RTA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1588 RTA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1589 RTA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1590 RTA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1591 RTA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1593 return RTA_NEST_END(skb, nest);
1596 return RTA_NEST_CANCEL(skb, nest);
1599 static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
1600 struct netlink_callback *cb)
1602 struct nlmsghdr *nlh;
1603 struct ndtmsg *ndtmsg;
1605 nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1608 ndtmsg = NLMSG_DATA(nlh);
1610 read_lock_bh(&tbl->lock);
1611 ndtmsg->ndtm_family = tbl->family;
1612 ndtmsg->ndtm_pad1 = 0;
1613 ndtmsg->ndtm_pad2 = 0;
1615 RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1616 RTA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1617 RTA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1618 RTA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1619 RTA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1622 unsigned long now = jiffies;
1623 unsigned int flush_delta = now - tbl->last_flush;
1624 unsigned int rand_delta = now - tbl->last_rand;
1626 struct ndt_config ndc = {
1627 .ndtc_key_len = tbl->key_len,
1628 .ndtc_entry_size = tbl->entry_size,
1629 .ndtc_entries = atomic_read(&tbl->entries),
1630 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1631 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1632 .ndtc_hash_rnd = tbl->hash_rnd,
1633 .ndtc_hash_mask = tbl->hash_mask,
1634 .ndtc_hash_chain_gc = tbl->hash_chain_gc,
1635 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1638 RTA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1643 struct ndt_stats ndst;
1645 memset(&ndst, 0, sizeof(ndst));
1647 for_each_possible_cpu(cpu) {
1648 struct neigh_statistics *st;
1650 st = per_cpu_ptr(tbl->stats, cpu);
1651 ndst.ndts_allocs += st->allocs;
1652 ndst.ndts_destroys += st->destroys;
1653 ndst.ndts_hash_grows += st->hash_grows;
1654 ndst.ndts_res_failed += st->res_failed;
1655 ndst.ndts_lookups += st->lookups;
1656 ndst.ndts_hits += st->hits;
1657 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1658 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1659 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1660 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1663 RTA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1666 BUG_ON(tbl->parms.dev);
1667 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1668 goto rtattr_failure;
1670 read_unlock_bh(&tbl->lock);
1671 return NLMSG_END(skb, nlh);
1674 read_unlock_bh(&tbl->lock);
1675 return NLMSG_CANCEL(skb, nlh);
1681 static int neightbl_fill_param_info(struct neigh_table *tbl,
1682 struct neigh_parms *parms,
1683 struct sk_buff *skb,
1684 struct netlink_callback *cb)
1686 struct ndtmsg *ndtmsg;
1687 struct nlmsghdr *nlh;
1689 nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1692 ndtmsg = NLMSG_DATA(nlh);
1694 read_lock_bh(&tbl->lock);
1695 ndtmsg->ndtm_family = tbl->family;
1696 ndtmsg->ndtm_pad1 = 0;
1697 ndtmsg->ndtm_pad2 = 0;
1698 RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1700 if (neightbl_fill_parms(skb, parms) < 0)
1701 goto rtattr_failure;
1703 read_unlock_bh(&tbl->lock);
1704 return NLMSG_END(skb, nlh);
1707 read_unlock_bh(&tbl->lock);
1708 return NLMSG_CANCEL(skb, nlh);
1714 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1717 struct neigh_parms *p;
1719 for (p = &tbl->parms; p; p = p->next)
1720 if ((p->dev && p->dev->ifindex == ifindex) ||
1721 (!p->dev && !ifindex))
1727 int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1729 struct neigh_table *tbl;
1730 struct ndtmsg *ndtmsg = NLMSG_DATA(nlh);
1731 struct rtattr **tb = arg;
1734 if (!tb[NDTA_NAME - 1] || !RTA_PAYLOAD(tb[NDTA_NAME - 1]))
1737 read_lock(&neigh_tbl_lock);
1738 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1739 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1742 if (!rtattr_strcmp(tb[NDTA_NAME - 1], tbl->id))
1752 * We acquire tbl->lock to be nice to the periodic timers and
1753 * make sure they always see a consistent set of values.
1755 write_lock_bh(&tbl->lock);
1757 if (tb[NDTA_THRESH1 - 1])
1758 tbl->gc_thresh1 = RTA_GET_U32(tb[NDTA_THRESH1 - 1]);
1760 if (tb[NDTA_THRESH2 - 1])
1761 tbl->gc_thresh2 = RTA_GET_U32(tb[NDTA_THRESH2 - 1]);
1763 if (tb[NDTA_THRESH3 - 1])
1764 tbl->gc_thresh3 = RTA_GET_U32(tb[NDTA_THRESH3 - 1]);
1766 if (tb[NDTA_GC_INTERVAL - 1])
1767 tbl->gc_interval = RTA_GET_MSECS(tb[NDTA_GC_INTERVAL - 1]);
1769 if (tb[NDTA_PARMS - 1]) {
1770 struct rtattr *tbp[NDTPA_MAX];
1771 struct neigh_parms *p;
1774 if (rtattr_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS - 1]) < 0)
1775 goto rtattr_failure;
1777 if (tbp[NDTPA_IFINDEX - 1])
1778 ifindex = RTA_GET_U32(tbp[NDTPA_IFINDEX - 1]);
1780 p = lookup_neigh_params(tbl, ifindex);
1783 goto rtattr_failure;
1786 if (tbp[NDTPA_QUEUE_LEN - 1])
1787 p->queue_len = RTA_GET_U32(tbp[NDTPA_QUEUE_LEN - 1]);
1789 if (tbp[NDTPA_PROXY_QLEN - 1])
1790 p->proxy_qlen = RTA_GET_U32(tbp[NDTPA_PROXY_QLEN - 1]);
1792 if (tbp[NDTPA_APP_PROBES - 1])
1793 p->app_probes = RTA_GET_U32(tbp[NDTPA_APP_PROBES - 1]);
1795 if (tbp[NDTPA_UCAST_PROBES - 1])
1797 RTA_GET_U32(tbp[NDTPA_UCAST_PROBES - 1]);
1799 if (tbp[NDTPA_MCAST_PROBES - 1])
1801 RTA_GET_U32(tbp[NDTPA_MCAST_PROBES - 1]);
1803 if (tbp[NDTPA_BASE_REACHABLE_TIME - 1])
1804 p->base_reachable_time =
1805 RTA_GET_MSECS(tbp[NDTPA_BASE_REACHABLE_TIME - 1]);
1807 if (tbp[NDTPA_GC_STALETIME - 1])
1809 RTA_GET_MSECS(tbp[NDTPA_GC_STALETIME - 1]);
1811 if (tbp[NDTPA_DELAY_PROBE_TIME - 1])
1812 p->delay_probe_time =
1813 RTA_GET_MSECS(tbp[NDTPA_DELAY_PROBE_TIME - 1]);
1815 if (tbp[NDTPA_RETRANS_TIME - 1])
1817 RTA_GET_MSECS(tbp[NDTPA_RETRANS_TIME - 1]);
1819 if (tbp[NDTPA_ANYCAST_DELAY - 1])
1821 RTA_GET_MSECS(tbp[NDTPA_ANYCAST_DELAY - 1]);
1823 if (tbp[NDTPA_PROXY_DELAY - 1])
1825 RTA_GET_MSECS(tbp[NDTPA_PROXY_DELAY - 1]);
1827 if (tbp[NDTPA_LOCKTIME - 1])
1828 p->locktime = RTA_GET_MSECS(tbp[NDTPA_LOCKTIME - 1]);
1834 write_unlock_bh(&tbl->lock);
1836 read_unlock(&neigh_tbl_lock);
1840 int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1843 int s_idx = cb->args[0];
1844 struct neigh_table *tbl;
1846 family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1848 read_lock(&neigh_tbl_lock);
1849 for (tbl = neigh_tables, idx = 0; tbl; tbl = tbl->next) {
1850 struct neigh_parms *p;
1852 if (idx < s_idx || (family && tbl->family != family))
1855 if (neightbl_fill_info(tbl, skb, cb) <= 0)
1858 for (++idx, p = tbl->parms.next; p; p = p->next, idx++) {
1862 if (neightbl_fill_param_info(tbl, p, skb, cb) <= 0)
1868 read_unlock(&neigh_tbl_lock);
1874 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
1875 u32 pid, u32 seq, int event, unsigned int flags)
1877 unsigned long now = jiffies;
1878 unsigned char *b = skb->tail;
1879 struct nda_cacheinfo ci;
1882 struct nlmsghdr *nlh = NLMSG_NEW(skb, pid, seq, event,
1883 sizeof(struct ndmsg), flags);
1884 struct ndmsg *ndm = NLMSG_DATA(nlh);
1886 ndm->ndm_family = n->ops->family;
1889 ndm->ndm_flags = n->flags;
1890 ndm->ndm_type = n->type;
1891 ndm->ndm_ifindex = n->dev->ifindex;
1892 RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
1893 read_lock_bh(&n->lock);
1895 ndm->ndm_state = n->nud_state;
1896 if (n->nud_state & NUD_VALID)
1897 RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
1898 ci.ndm_used = now - n->used;
1899 ci.ndm_confirmed = now - n->confirmed;
1900 ci.ndm_updated = now - n->updated;
1901 ci.ndm_refcnt = atomic_read(&n->refcnt) - 1;
1902 probes = atomic_read(&n->probes);
1903 read_unlock_bh(&n->lock);
1905 RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1906 RTA_PUT(skb, NDA_PROBES, sizeof(probes), &probes);
1907 nlh->nlmsg_len = skb->tail - b;
1913 read_unlock_bh(&n->lock);
1914 skb_trim(skb, b - skb->data);
1919 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1920 struct netlink_callback *cb)
1922 struct neighbour *n;
1923 int rc, h, s_h = cb->args[1];
1924 int idx, s_idx = idx = cb->args[2];
1926 for (h = 0; h <= tbl->hash_mask; h++) {
1931 read_lock_bh(&tbl->lock);
1932 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
1935 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
1938 NLM_F_MULTI) <= 0) {
1939 read_unlock_bh(&tbl->lock);
1944 read_unlock_bh(&tbl->lock);
1953 int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1955 struct neigh_table *tbl;
1958 read_lock(&neigh_tbl_lock);
1959 family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1962 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
1963 if (t < s_t || (family && tbl->family != family))
1966 memset(&cb->args[1], 0, sizeof(cb->args) -
1967 sizeof(cb->args[0]));
1968 if (neigh_dump_table(tbl, skb, cb) < 0)
1971 read_unlock(&neigh_tbl_lock);
1977 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
1981 read_lock_bh(&tbl->lock);
1982 for (chain = 0; chain <= tbl->hash_mask; chain++) {
1983 struct neighbour *n;
1985 for (n = tbl->hash_buckets[chain]; n; n = n->next)
1988 read_unlock_bh(&tbl->lock);
1990 EXPORT_SYMBOL(neigh_for_each);
1992 /* The tbl->lock must be held as a writer and BH disabled. */
1993 void __neigh_for_each_release(struct neigh_table *tbl,
1994 int (*cb)(struct neighbour *))
1998 for (chain = 0; chain <= tbl->hash_mask; chain++) {
1999 struct neighbour *n, **np;
2001 np = &tbl->hash_buckets[chain];
2002 while ((n = *np) != NULL) {
2005 write_lock(&n->lock);
2012 write_unlock(&n->lock);
2018 EXPORT_SYMBOL(__neigh_for_each_release);
2020 #ifdef CONFIG_PROC_FS
2022 static struct neighbour *neigh_get_first(struct seq_file *seq)
2024 struct neigh_seq_state *state = seq->private;
2025 struct neigh_table *tbl = state->tbl;
2026 struct neighbour *n = NULL;
2027 int bucket = state->bucket;
2029 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2030 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2031 n = tbl->hash_buckets[bucket];
2034 if (state->neigh_sub_iter) {
2038 v = state->neigh_sub_iter(state, n, &fakep);
2042 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2044 if (n->nud_state & ~NUD_NOARP)
2053 state->bucket = bucket;
2058 static struct neighbour *neigh_get_next(struct seq_file *seq,
2059 struct neighbour *n,
2062 struct neigh_seq_state *state = seq->private;
2063 struct neigh_table *tbl = state->tbl;
2065 if (state->neigh_sub_iter) {
2066 void *v = state->neigh_sub_iter(state, n, pos);
2074 if (state->neigh_sub_iter) {
2075 void *v = state->neigh_sub_iter(state, n, pos);
2080 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2083 if (n->nud_state & ~NUD_NOARP)
2092 if (++state->bucket > tbl->hash_mask)
2095 n = tbl->hash_buckets[state->bucket];
2103 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2105 struct neighbour *n = neigh_get_first(seq);
2109 n = neigh_get_next(seq, n, pos);
2114 return *pos ? NULL : n;
2117 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2119 struct neigh_seq_state *state = seq->private;
2120 struct neigh_table *tbl = state->tbl;
2121 struct pneigh_entry *pn = NULL;
2122 int bucket = state->bucket;
2124 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2125 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2126 pn = tbl->phash_buckets[bucket];
2130 state->bucket = bucket;
2135 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2136 struct pneigh_entry *pn,
2139 struct neigh_seq_state *state = seq->private;
2140 struct neigh_table *tbl = state->tbl;
2144 if (++state->bucket > PNEIGH_HASHMASK)
2146 pn = tbl->phash_buckets[state->bucket];
2157 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2159 struct pneigh_entry *pn = pneigh_get_first(seq);
2163 pn = pneigh_get_next(seq, pn, pos);
2168 return *pos ? NULL : pn;
2171 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2173 struct neigh_seq_state *state = seq->private;
2176 rc = neigh_get_idx(seq, pos);
2177 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2178 rc = pneigh_get_idx(seq, pos);
2183 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2185 struct neigh_seq_state *state = seq->private;
2186 loff_t pos_minus_one;
2190 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2192 read_lock_bh(&tbl->lock);
2194 pos_minus_one = *pos - 1;
2195 return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2197 EXPORT_SYMBOL(neigh_seq_start);
2199 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2201 struct neigh_seq_state *state;
2204 if (v == SEQ_START_TOKEN) {
2205 rc = neigh_get_idx(seq, pos);
2209 state = seq->private;
2210 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2211 rc = neigh_get_next(seq, v, NULL);
2214 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2215 rc = pneigh_get_first(seq);
2217 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2218 rc = pneigh_get_next(seq, v, NULL);
2224 EXPORT_SYMBOL(neigh_seq_next);
2226 void neigh_seq_stop(struct seq_file *seq, void *v)
2228 struct neigh_seq_state *state = seq->private;
2229 struct neigh_table *tbl = state->tbl;
2231 read_unlock_bh(&tbl->lock);
2233 EXPORT_SYMBOL(neigh_seq_stop);
2235 /* statistics via seq_file */
2237 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2239 struct proc_dir_entry *pde = seq->private;
2240 struct neigh_table *tbl = pde->data;
2244 return SEQ_START_TOKEN;
2246 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2247 if (!cpu_possible(cpu))
2250 return per_cpu_ptr(tbl->stats, cpu);
2255 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2257 struct proc_dir_entry *pde = seq->private;
2258 struct neigh_table *tbl = pde->data;
2261 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2262 if (!cpu_possible(cpu))
2265 return per_cpu_ptr(tbl->stats, cpu);
2270 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2275 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2277 struct proc_dir_entry *pde = seq->private;
2278 struct neigh_table *tbl = pde->data;
2279 struct neigh_statistics *st = v;
2281 if (v == SEQ_START_TOKEN) {
2282 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
2286 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2287 "%08lx %08lx %08lx %08lx\n",
2288 atomic_read(&tbl->entries),
2299 st->rcv_probes_mcast,
2300 st->rcv_probes_ucast,
2302 st->periodic_gc_runs,
2309 static struct seq_operations neigh_stat_seq_ops = {
2310 .start = neigh_stat_seq_start,
2311 .next = neigh_stat_seq_next,
2312 .stop = neigh_stat_seq_stop,
2313 .show = neigh_stat_seq_show,
2316 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2318 int ret = seq_open(file, &neigh_stat_seq_ops);
2321 struct seq_file *sf = file->private_data;
2322 sf->private = PDE(inode);
2327 static struct file_operations neigh_stat_seq_fops = {
2328 .owner = THIS_MODULE,
2329 .open = neigh_stat_seq_open,
2331 .llseek = seq_lseek,
2332 .release = seq_release,
2335 #endif /* CONFIG_PROC_FS */
2338 void neigh_app_ns(struct neighbour *n)
2340 struct nlmsghdr *nlh;
2341 int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2342 struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2347 if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH, 0) < 0) {
2351 nlh = (struct nlmsghdr *)skb->data;
2352 nlh->nlmsg_flags = NLM_F_REQUEST;
2353 NETLINK_CB(skb).dst_group = RTNLGRP_NEIGH;
2354 netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2357 static void neigh_app_notify(struct neighbour *n)
2359 struct nlmsghdr *nlh;
2360 int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2361 struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2366 if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH, 0) < 0) {
2370 nlh = (struct nlmsghdr *)skb->data;
2371 NETLINK_CB(skb).dst_group = RTNLGRP_NEIGH;
2372 netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2375 #endif /* CONFIG_ARPD */
2377 #ifdef CONFIG_SYSCTL
2379 static struct neigh_sysctl_table {
2380 struct ctl_table_header *sysctl_header;
2381 ctl_table neigh_vars[__NET_NEIGH_MAX];
2382 ctl_table neigh_dev[2];
2383 ctl_table neigh_neigh_dir[2];
2384 ctl_table neigh_proto_dir[2];
2385 ctl_table neigh_root_dir[2];
2386 } neigh_sysctl_template = {
2389 .ctl_name = NET_NEIGH_MCAST_SOLICIT,
2390 .procname = "mcast_solicit",
2391 .maxlen = sizeof(int),
2393 .proc_handler = &proc_dointvec,
2396 .ctl_name = NET_NEIGH_UCAST_SOLICIT,
2397 .procname = "ucast_solicit",
2398 .maxlen = sizeof(int),
2400 .proc_handler = &proc_dointvec,
2403 .ctl_name = NET_NEIGH_APP_SOLICIT,
2404 .procname = "app_solicit",
2405 .maxlen = sizeof(int),
2407 .proc_handler = &proc_dointvec,
2410 .ctl_name = NET_NEIGH_RETRANS_TIME,
2411 .procname = "retrans_time",
2412 .maxlen = sizeof(int),
2414 .proc_handler = &proc_dointvec_userhz_jiffies,
2417 .ctl_name = NET_NEIGH_REACHABLE_TIME,
2418 .procname = "base_reachable_time",
2419 .maxlen = sizeof(int),
2421 .proc_handler = &proc_dointvec_jiffies,
2422 .strategy = &sysctl_jiffies,
2425 .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
2426 .procname = "delay_first_probe_time",
2427 .maxlen = sizeof(int),
2429 .proc_handler = &proc_dointvec_jiffies,
2430 .strategy = &sysctl_jiffies,
2433 .ctl_name = NET_NEIGH_GC_STALE_TIME,
2434 .procname = "gc_stale_time",
2435 .maxlen = sizeof(int),
2437 .proc_handler = &proc_dointvec_jiffies,
2438 .strategy = &sysctl_jiffies,
2441 .ctl_name = NET_NEIGH_UNRES_QLEN,
2442 .procname = "unres_qlen",
2443 .maxlen = sizeof(int),
2445 .proc_handler = &proc_dointvec,
2448 .ctl_name = NET_NEIGH_PROXY_QLEN,
2449 .procname = "proxy_qlen",
2450 .maxlen = sizeof(int),
2452 .proc_handler = &proc_dointvec,
2455 .ctl_name = NET_NEIGH_ANYCAST_DELAY,
2456 .procname = "anycast_delay",
2457 .maxlen = sizeof(int),
2459 .proc_handler = &proc_dointvec_userhz_jiffies,
2462 .ctl_name = NET_NEIGH_PROXY_DELAY,
2463 .procname = "proxy_delay",
2464 .maxlen = sizeof(int),
2466 .proc_handler = &proc_dointvec_userhz_jiffies,
2469 .ctl_name = NET_NEIGH_LOCKTIME,
2470 .procname = "locktime",
2471 .maxlen = sizeof(int),
2473 .proc_handler = &proc_dointvec_userhz_jiffies,
2476 .ctl_name = NET_NEIGH_GC_INTERVAL,
2477 .procname = "gc_interval",
2478 .maxlen = sizeof(int),
2480 .proc_handler = &proc_dointvec_jiffies,
2481 .strategy = &sysctl_jiffies,
2484 .ctl_name = NET_NEIGH_GC_THRESH1,
2485 .procname = "gc_thresh1",
2486 .maxlen = sizeof(int),
2488 .proc_handler = &proc_dointvec,
2491 .ctl_name = NET_NEIGH_GC_THRESH2,
2492 .procname = "gc_thresh2",
2493 .maxlen = sizeof(int),
2495 .proc_handler = &proc_dointvec,
2498 .ctl_name = NET_NEIGH_GC_THRESH3,
2499 .procname = "gc_thresh3",
2500 .maxlen = sizeof(int),
2502 .proc_handler = &proc_dointvec,
2505 .ctl_name = NET_NEIGH_RETRANS_TIME_MS,
2506 .procname = "retrans_time_ms",
2507 .maxlen = sizeof(int),
2509 .proc_handler = &proc_dointvec_ms_jiffies,
2510 .strategy = &sysctl_ms_jiffies,
2513 .ctl_name = NET_NEIGH_REACHABLE_TIME_MS,
2514 .procname = "base_reachable_time_ms",
2515 .maxlen = sizeof(int),
2517 .proc_handler = &proc_dointvec_ms_jiffies,
2518 .strategy = &sysctl_ms_jiffies,
2523 .ctl_name = NET_PROTO_CONF_DEFAULT,
2524 .procname = "default",
2528 .neigh_neigh_dir = {
2530 .procname = "neigh",
2534 .neigh_proto_dir = {
2541 .ctl_name = CTL_NET,
2548 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2549 int p_id, int pdev_id, char *p_name,
2550 proc_handler *handler, ctl_handler *strategy)
2552 struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
2553 const char *dev_name_source = NULL;
2554 char *dev_name = NULL;
2559 memcpy(t, &neigh_sysctl_template, sizeof(*t));
2560 t->neigh_vars[0].data = &p->mcast_probes;
2561 t->neigh_vars[1].data = &p->ucast_probes;
2562 t->neigh_vars[2].data = &p->app_probes;
2563 t->neigh_vars[3].data = &p->retrans_time;
2564 t->neigh_vars[4].data = &p->base_reachable_time;
2565 t->neigh_vars[5].data = &p->delay_probe_time;
2566 t->neigh_vars[6].data = &p->gc_staletime;
2567 t->neigh_vars[7].data = &p->queue_len;
2568 t->neigh_vars[8].data = &p->proxy_qlen;
2569 t->neigh_vars[9].data = &p->anycast_delay;
2570 t->neigh_vars[10].data = &p->proxy_delay;
2571 t->neigh_vars[11].data = &p->locktime;
2574 dev_name_source = dev->name;
2575 t->neigh_dev[0].ctl_name = dev->ifindex;
2576 t->neigh_vars[12].procname = NULL;
2577 t->neigh_vars[13].procname = NULL;
2578 t->neigh_vars[14].procname = NULL;
2579 t->neigh_vars[15].procname = NULL;
2581 dev_name_source = t->neigh_dev[0].procname;
2582 t->neigh_vars[12].data = (int *)(p + 1);
2583 t->neigh_vars[13].data = (int *)(p + 1) + 1;
2584 t->neigh_vars[14].data = (int *)(p + 1) + 2;
2585 t->neigh_vars[15].data = (int *)(p + 1) + 3;
2588 t->neigh_vars[16].data = &p->retrans_time;
2589 t->neigh_vars[17].data = &p->base_reachable_time;
2591 if (handler || strategy) {
2593 t->neigh_vars[3].proc_handler = handler;
2594 t->neigh_vars[3].strategy = strategy;
2595 t->neigh_vars[3].extra1 = dev;
2597 t->neigh_vars[4].proc_handler = handler;
2598 t->neigh_vars[4].strategy = strategy;
2599 t->neigh_vars[4].extra1 = dev;
2600 /* RetransTime (in milliseconds)*/
2601 t->neigh_vars[16].proc_handler = handler;
2602 t->neigh_vars[16].strategy = strategy;
2603 t->neigh_vars[16].extra1 = dev;
2604 /* ReachableTime (in milliseconds) */
2605 t->neigh_vars[17].proc_handler = handler;
2606 t->neigh_vars[17].strategy = strategy;
2607 t->neigh_vars[17].extra1 = dev;
2610 dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2616 t->neigh_dev[0].procname = dev_name;
2618 t->neigh_neigh_dir[0].ctl_name = pdev_id;
2620 t->neigh_proto_dir[0].procname = p_name;
2621 t->neigh_proto_dir[0].ctl_name = p_id;
2623 t->neigh_dev[0].child = t->neigh_vars;
2624 t->neigh_neigh_dir[0].child = t->neigh_dev;
2625 t->neigh_proto_dir[0].child = t->neigh_neigh_dir;
2626 t->neigh_root_dir[0].child = t->neigh_proto_dir;
2628 t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
2629 if (!t->sysctl_header) {
2633 p->sysctl_table = t;
2645 void neigh_sysctl_unregister(struct neigh_parms *p)
2647 if (p->sysctl_table) {
2648 struct neigh_sysctl_table *t = p->sysctl_table;
2649 p->sysctl_table = NULL;
2650 unregister_sysctl_table(t->sysctl_header);
2651 kfree(t->neigh_dev[0].procname);
2656 #endif /* CONFIG_SYSCTL */
2658 EXPORT_SYMBOL(__neigh_event_send);
2659 EXPORT_SYMBOL(neigh_add);
2660 EXPORT_SYMBOL(neigh_changeaddr);
2661 EXPORT_SYMBOL(neigh_compat_output);
2662 EXPORT_SYMBOL(neigh_connected_output);
2663 EXPORT_SYMBOL(neigh_create);
2664 EXPORT_SYMBOL(neigh_delete);
2665 EXPORT_SYMBOL(neigh_destroy);
2666 EXPORT_SYMBOL(neigh_dump_info);
2667 EXPORT_SYMBOL(neigh_event_ns);
2668 EXPORT_SYMBOL(neigh_ifdown);
2669 EXPORT_SYMBOL(neigh_lookup);
2670 EXPORT_SYMBOL(neigh_lookup_nodev);
2671 EXPORT_SYMBOL(neigh_parms_alloc);
2672 EXPORT_SYMBOL(neigh_parms_release);
2673 EXPORT_SYMBOL(neigh_rand_reach_time);
2674 EXPORT_SYMBOL(neigh_resolve_output);
2675 EXPORT_SYMBOL(neigh_table_clear);
2676 EXPORT_SYMBOL(neigh_table_init);
2677 EXPORT_SYMBOL(neigh_table_init_no_netlink);
2678 EXPORT_SYMBOL(neigh_update);
2679 EXPORT_SYMBOL(neigh_update_hhs);
2680 EXPORT_SYMBOL(pneigh_enqueue);
2681 EXPORT_SYMBOL(pneigh_lookup);
2682 EXPORT_SYMBOL(neightbl_dump_info);
2683 EXPORT_SYMBOL(neightbl_set);
2686 EXPORT_SYMBOL(neigh_app_ns);
2688 #ifdef CONFIG_SYSCTL
2689 EXPORT_SYMBOL(neigh_sysctl_register);
2690 EXPORT_SYMBOL(neigh_sysctl_unregister);