2 * Generic address resolution entity
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
18 #include <linux/config.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/socket.h>
23 #include <linux/sched.h>
24 #include <linux/netdevice.h>
25 #include <linux/proc_fs.h>
27 #include <linux/sysctl.h>
29 #include <linux/times.h>
30 #include <net/neighbour.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/random.h>
35 #include <linux/string.h>
39 #define NEIGH_PRINTK(x...) printk(x)
40 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
41 #define NEIGH_PRINTK0 NEIGH_PRINTK
42 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
43 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
47 #define NEIGH_PRINTK1 NEIGH_PRINTK
51 #define NEIGH_PRINTK2 NEIGH_PRINTK
54 #define PNEIGH_HASHMASK 0xF
56 static void neigh_timer_handler(unsigned long arg);
58 static void neigh_app_notify(struct neighbour *n);
60 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
61 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
63 static struct neigh_table *neigh_tables;
65 static struct file_operations neigh_stat_seq_fops;
69 Neighbour hash table buckets are protected with rwlock tbl->lock.
71 - All the scans/updates to hash buckets MUST be made under this lock.
72 - NOTHING clever should be made under this lock: no callbacks
73 to protocol backends, no attempts to send something to network.
74 It will result in deadlocks, if backend/driver wants to use neighbour
76 - If the entry requires some non-trivial actions, increase
77 its reference count and release table lock.
79 Neighbour entries are protected:
80 - with reference count.
81 - with rwlock neigh->lock
83 Reference count prevents destruction.
85 neigh->lock mainly serializes ll address data and its validity state.
86 However, the same lock is used to protect another entry fields:
90 Again, nothing clever shall be made under neigh->lock,
91 the most complicated procedure, which we allow is dev->hard_header.
92 It is supposed, that dev->hard_header is simplistic and does
93 not make callbacks to neighbour tables.
95 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96 list of neighbour tables. This list is used only in process context,
99 static DEFINE_RWLOCK(neigh_tbl_lock);
101 static int neigh_blackhole(struct sk_buff *skb)
108 * It is random distribution in the interval (1/2)*base...(3/2)*base.
109 * It corresponds to default IPv6 settings and is not overridable,
110 * because it is really reasonable choice.
113 unsigned long neigh_rand_reach_time(unsigned long base)
115 return (base ? (net_random() % base) + (base >> 1) : 0);
119 static int neigh_forced_gc(struct neigh_table *tbl)
124 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
126 write_lock_bh(&tbl->lock);
127 for (i = 0; i <= tbl->hash_mask; i++) {
128 struct neighbour *n, **np;
130 np = &tbl->hash_buckets[i];
131 while ((n = *np) != NULL) {
132 /* Neighbour record may be discarded if:
133 * - nobody refers to it.
134 * - it is not permanent
136 write_lock(&n->lock);
137 if (atomic_read(&n->refcnt) == 1 &&
138 !(n->nud_state & NUD_PERMANENT)) {
142 write_unlock(&n->lock);
146 write_unlock(&n->lock);
151 tbl->last_flush = jiffies;
153 write_unlock_bh(&tbl->lock);
158 static int neigh_del_timer(struct neighbour *n)
160 if ((n->nud_state & NUD_IN_TIMER) &&
161 del_timer(&n->timer)) {
168 static void pneigh_queue_purge(struct sk_buff_head *list)
172 while ((skb = skb_dequeue(list)) != NULL) {
178 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
182 for (i = 0; i <= tbl->hash_mask; i++) {
183 struct neighbour *n, **np = &tbl->hash_buckets[i];
185 while ((n = *np) != NULL) {
186 if (dev && n->dev != dev) {
191 write_lock(&n->lock);
195 if (atomic_read(&n->refcnt) != 1) {
196 /* The most unpleasant situation.
197 We must destroy neighbour entry,
198 but someone still uses it.
200 The destroy will be delayed until
201 the last user releases us, but
202 we must kill timers etc. and move
205 skb_queue_purge(&n->arp_queue);
206 n->output = neigh_blackhole;
207 if (n->nud_state & NUD_VALID)
208 n->nud_state = NUD_NOARP;
210 n->nud_state = NUD_NONE;
211 NEIGH_PRINTK2("neigh %p is stray.\n", n);
213 write_unlock(&n->lock);
219 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
221 write_lock_bh(&tbl->lock);
222 neigh_flush_dev(tbl, dev);
223 write_unlock_bh(&tbl->lock);
226 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
228 write_lock_bh(&tbl->lock);
229 neigh_flush_dev(tbl, dev);
230 pneigh_ifdown(tbl, dev);
231 write_unlock_bh(&tbl->lock);
233 del_timer_sync(&tbl->proxy_timer);
234 pneigh_queue_purge(&tbl->proxy_queue);
238 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
240 struct neighbour *n = NULL;
241 unsigned long now = jiffies;
244 entries = atomic_inc_return(&tbl->entries) - 1;
245 if (entries >= tbl->gc_thresh3 ||
246 (entries >= tbl->gc_thresh2 &&
247 time_after(now, tbl->last_flush + 5 * HZ))) {
248 if (!neigh_forced_gc(tbl) &&
249 entries >= tbl->gc_thresh3)
253 n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
257 memset(n, 0, tbl->entry_size);
259 skb_queue_head_init(&n->arp_queue);
260 rwlock_init(&n->lock);
261 n->updated = n->used = now;
262 n->nud_state = NUD_NONE;
263 n->output = neigh_blackhole;
264 n->parms = neigh_parms_clone(&tbl->parms);
265 init_timer(&n->timer);
266 n->timer.function = neigh_timer_handler;
267 n->timer.data = (unsigned long)n;
269 NEIGH_CACHE_STAT_INC(tbl, allocs);
271 atomic_set(&n->refcnt, 1);
277 atomic_dec(&tbl->entries);
281 static struct neighbour **neigh_hash_alloc(unsigned int entries)
283 unsigned long size = entries * sizeof(struct neighbour *);
284 struct neighbour **ret;
286 if (size <= PAGE_SIZE) {
287 ret = kzalloc(size, GFP_ATOMIC);
289 ret = (struct neighbour **)
290 __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
295 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
297 unsigned long size = entries * sizeof(struct neighbour *);
299 if (size <= PAGE_SIZE)
302 free_pages((unsigned long)hash, get_order(size));
305 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
307 struct neighbour **new_hash, **old_hash;
308 unsigned int i, new_hash_mask, old_entries;
310 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
312 BUG_ON(new_entries & (new_entries - 1));
313 new_hash = neigh_hash_alloc(new_entries);
317 old_entries = tbl->hash_mask + 1;
318 new_hash_mask = new_entries - 1;
319 old_hash = tbl->hash_buckets;
321 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
322 for (i = 0; i < old_entries; i++) {
323 struct neighbour *n, *next;
325 for (n = old_hash[i]; n; n = next) {
326 unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
328 hash_val &= new_hash_mask;
331 n->next = new_hash[hash_val];
332 new_hash[hash_val] = n;
335 tbl->hash_buckets = new_hash;
336 tbl->hash_mask = new_hash_mask;
338 neigh_hash_free(old_hash, old_entries);
341 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
342 struct net_device *dev)
345 int key_len = tbl->key_len;
346 u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
348 NEIGH_CACHE_STAT_INC(tbl, lookups);
350 read_lock_bh(&tbl->lock);
351 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
352 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
354 NEIGH_CACHE_STAT_INC(tbl, hits);
358 read_unlock_bh(&tbl->lock);
362 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
365 int key_len = tbl->key_len;
366 u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
368 NEIGH_CACHE_STAT_INC(tbl, lookups);
370 read_lock_bh(&tbl->lock);
371 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
372 if (!memcmp(n->primary_key, pkey, key_len)) {
374 NEIGH_CACHE_STAT_INC(tbl, hits);
378 read_unlock_bh(&tbl->lock);
382 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
383 struct net_device *dev)
386 int key_len = tbl->key_len;
388 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
391 rc = ERR_PTR(-ENOBUFS);
395 memcpy(n->primary_key, pkey, key_len);
399 /* Protocol specific setup. */
400 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
402 goto out_neigh_release;
405 /* Device specific setup. */
406 if (n->parms->neigh_setup &&
407 (error = n->parms->neigh_setup(n)) < 0) {
409 goto out_neigh_release;
412 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
414 write_lock_bh(&tbl->lock);
416 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
417 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
419 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
421 if (n->parms->dead) {
422 rc = ERR_PTR(-EINVAL);
426 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
427 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
434 n->next = tbl->hash_buckets[hash_val];
435 tbl->hash_buckets[hash_val] = n;
438 write_unlock_bh(&tbl->lock);
439 NEIGH_PRINTK2("neigh %p is created.\n", n);
444 write_unlock_bh(&tbl->lock);
450 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
451 struct net_device *dev, int creat)
453 struct pneigh_entry *n;
454 int key_len = tbl->key_len;
455 u32 hash_val = *(u32 *)(pkey + key_len - 4);
457 hash_val ^= (hash_val >> 16);
458 hash_val ^= hash_val >> 8;
459 hash_val ^= hash_val >> 4;
460 hash_val &= PNEIGH_HASHMASK;
462 read_lock_bh(&tbl->lock);
464 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
465 if (!memcmp(n->key, pkey, key_len) &&
466 (n->dev == dev || !n->dev)) {
467 read_unlock_bh(&tbl->lock);
471 read_unlock_bh(&tbl->lock);
476 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
480 memcpy(n->key, pkey, key_len);
485 if (tbl->pconstructor && tbl->pconstructor(n)) {
493 write_lock_bh(&tbl->lock);
494 n->next = tbl->phash_buckets[hash_val];
495 tbl->phash_buckets[hash_val] = n;
496 write_unlock_bh(&tbl->lock);
502 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
503 struct net_device *dev)
505 struct pneigh_entry *n, **np;
506 int key_len = tbl->key_len;
507 u32 hash_val = *(u32 *)(pkey + key_len - 4);
509 hash_val ^= (hash_val >> 16);
510 hash_val ^= hash_val >> 8;
511 hash_val ^= hash_val >> 4;
512 hash_val &= PNEIGH_HASHMASK;
514 write_lock_bh(&tbl->lock);
515 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
517 if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
519 write_unlock_bh(&tbl->lock);
520 if (tbl->pdestructor)
528 write_unlock_bh(&tbl->lock);
532 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
534 struct pneigh_entry *n, **np;
537 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
538 np = &tbl->phash_buckets[h];
539 while ((n = *np) != NULL) {
540 if (!dev || n->dev == dev) {
542 if (tbl->pdestructor)
557 * neighbour must already be out of the table;
560 void neigh_destroy(struct neighbour *neigh)
564 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
568 "Destroying alive neighbour %p\n", neigh);
573 if (neigh_del_timer(neigh))
574 printk(KERN_WARNING "Impossible event.\n");
576 while ((hh = neigh->hh) != NULL) {
577 neigh->hh = hh->hh_next;
579 write_lock_bh(&hh->hh_lock);
580 hh->hh_output = neigh_blackhole;
581 write_unlock_bh(&hh->hh_lock);
582 if (atomic_dec_and_test(&hh->hh_refcnt))
586 if (neigh->parms->neigh_destructor)
587 (neigh->parms->neigh_destructor)(neigh);
589 skb_queue_purge(&neigh->arp_queue);
592 neigh_parms_put(neigh->parms);
594 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
596 atomic_dec(&neigh->tbl->entries);
597 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
600 /* Neighbour state is suspicious;
603 Called with write_locked neigh.
605 static void neigh_suspect(struct neighbour *neigh)
609 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
611 neigh->output = neigh->ops->output;
613 for (hh = neigh->hh; hh; hh = hh->hh_next)
614 hh->hh_output = neigh->ops->output;
617 /* Neighbour state is OK;
620 Called with write_locked neigh.
622 static void neigh_connect(struct neighbour *neigh)
626 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
628 neigh->output = neigh->ops->connected_output;
630 for (hh = neigh->hh; hh; hh = hh->hh_next)
631 hh->hh_output = neigh->ops->hh_output;
634 static void neigh_periodic_timer(unsigned long arg)
636 struct neigh_table *tbl = (struct neigh_table *)arg;
637 struct neighbour *n, **np;
638 unsigned long expire, now = jiffies;
640 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
642 write_lock(&tbl->lock);
645 * periodically recompute ReachableTime from random function
648 if (time_after(now, tbl->last_rand + 300 * HZ)) {
649 struct neigh_parms *p;
650 tbl->last_rand = now;
651 for (p = &tbl->parms; p; p = p->next)
653 neigh_rand_reach_time(p->base_reachable_time);
656 np = &tbl->hash_buckets[tbl->hash_chain_gc];
657 tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
659 while ((n = *np) != NULL) {
662 write_lock(&n->lock);
664 state = n->nud_state;
665 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
666 write_unlock(&n->lock);
670 if (time_before(n->used, n->confirmed))
671 n->used = n->confirmed;
673 if (atomic_read(&n->refcnt) == 1 &&
674 (state == NUD_FAILED ||
675 time_after(now, n->used + n->parms->gc_staletime))) {
678 write_unlock(&n->lock);
682 write_unlock(&n->lock);
688 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
689 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
690 * base_reachable_time.
692 expire = tbl->parms.base_reachable_time >> 1;
693 expire /= (tbl->hash_mask + 1);
697 mod_timer(&tbl->gc_timer, now + expire);
699 write_unlock(&tbl->lock);
702 static __inline__ int neigh_max_probes(struct neighbour *n)
704 struct neigh_parms *p = n->parms;
705 return (n->nud_state & NUD_PROBE ?
707 p->ucast_probes + p->app_probes + p->mcast_probes);
710 static inline void neigh_add_timer(struct neighbour *n, unsigned long when)
712 if (unlikely(mod_timer(&n->timer, when))) {
713 printk("NEIGH: BUG, double timer add, state is %x\n",
719 /* Called when a timer expires for a neighbour entry. */
721 static void neigh_timer_handler(unsigned long arg)
723 unsigned long now, next;
724 struct neighbour *neigh = (struct neighbour *)arg;
728 write_lock(&neigh->lock);
730 state = neigh->nud_state;
734 if (!(state & NUD_IN_TIMER)) {
736 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
741 if (state & NUD_REACHABLE) {
742 if (time_before_eq(now,
743 neigh->confirmed + neigh->parms->reachable_time)) {
744 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
745 next = neigh->confirmed + neigh->parms->reachable_time;
746 } else if (time_before_eq(now,
747 neigh->used + neigh->parms->delay_probe_time)) {
748 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
749 neigh->nud_state = NUD_DELAY;
750 neigh->updated = jiffies;
751 neigh_suspect(neigh);
752 next = now + neigh->parms->delay_probe_time;
754 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
755 neigh->nud_state = NUD_STALE;
756 neigh->updated = jiffies;
757 neigh_suspect(neigh);
759 } else if (state & NUD_DELAY) {
760 if (time_before_eq(now,
761 neigh->confirmed + neigh->parms->delay_probe_time)) {
762 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
763 neigh->nud_state = NUD_REACHABLE;
764 neigh->updated = jiffies;
765 neigh_connect(neigh);
766 next = neigh->confirmed + neigh->parms->reachable_time;
768 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
769 neigh->nud_state = NUD_PROBE;
770 neigh->updated = jiffies;
771 atomic_set(&neigh->probes, 0);
772 next = now + neigh->parms->retrans_time;
775 /* NUD_PROBE|NUD_INCOMPLETE */
776 next = now + neigh->parms->retrans_time;
779 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
780 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
783 neigh->nud_state = NUD_FAILED;
784 neigh->updated = jiffies;
786 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
787 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
789 /* It is very thin place. report_unreachable is very complicated
790 routine. Particularly, it can hit the same neighbour entry!
792 So that, we try to be accurate and avoid dead loop. --ANK
794 while (neigh->nud_state == NUD_FAILED &&
795 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
796 write_unlock(&neigh->lock);
797 neigh->ops->error_report(neigh, skb);
798 write_lock(&neigh->lock);
800 skb_queue_purge(&neigh->arp_queue);
803 if (neigh->nud_state & NUD_IN_TIMER) {
804 if (time_before(next, jiffies + HZ/2))
805 next = jiffies + HZ/2;
806 if (!mod_timer(&neigh->timer, next))
809 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
810 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
811 /* keep skb alive even if arp_queue overflows */
814 write_unlock(&neigh->lock);
815 neigh->ops->solicit(neigh, skb);
816 atomic_inc(&neigh->probes);
821 write_unlock(&neigh->lock);
825 if (notify && neigh->parms->app_probes)
826 neigh_app_notify(neigh);
828 neigh_release(neigh);
831 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
836 write_lock_bh(&neigh->lock);
839 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
844 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
845 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
846 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
847 neigh->nud_state = NUD_INCOMPLETE;
848 neigh->updated = jiffies;
850 neigh_add_timer(neigh, now + 1);
852 neigh->nud_state = NUD_FAILED;
853 neigh->updated = jiffies;
854 write_unlock_bh(&neigh->lock);
860 } else if (neigh->nud_state & NUD_STALE) {
861 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
863 neigh->nud_state = NUD_DELAY;
864 neigh->updated = jiffies;
865 neigh_add_timer(neigh,
866 jiffies + neigh->parms->delay_probe_time);
869 if (neigh->nud_state == NUD_INCOMPLETE) {
871 if (skb_queue_len(&neigh->arp_queue) >=
872 neigh->parms->queue_len) {
873 struct sk_buff *buff;
874 buff = neigh->arp_queue.next;
875 __skb_unlink(buff, &neigh->arp_queue);
878 __skb_queue_tail(&neigh->arp_queue, skb);
883 write_unlock_bh(&neigh->lock);
887 static __inline__ void neigh_update_hhs(struct neighbour *neigh)
890 void (*update)(struct hh_cache*, struct net_device*, unsigned char *) =
891 neigh->dev->header_cache_update;
894 for (hh = neigh->hh; hh; hh = hh->hh_next) {
895 write_lock_bh(&hh->hh_lock);
896 update(hh, neigh->dev, neigh->ha);
897 write_unlock_bh(&hh->hh_lock);
904 /* Generic update routine.
905 -- lladdr is new lladdr or NULL, if it is not supplied.
908 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
910 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
911 lladdr instead of overriding it
913 It also allows to retain current state
914 if lladdr is unchanged.
915 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
917 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
919 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
922 Caller MUST hold reference count on the entry.
925 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
933 struct net_device *dev;
934 int update_isrouter = 0;
936 write_lock_bh(&neigh->lock);
939 old = neigh->nud_state;
942 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
943 (old & (NUD_NOARP | NUD_PERMANENT)))
946 if (!(new & NUD_VALID)) {
947 neigh_del_timer(neigh);
948 if (old & NUD_CONNECTED)
949 neigh_suspect(neigh);
950 neigh->nud_state = new;
953 notify = old & NUD_VALID;
958 /* Compare new lladdr with cached one */
959 if (!dev->addr_len) {
960 /* First case: device needs no address. */
963 /* The second case: if something is already cached
964 and a new address is proposed:
966 - if they are different, check override flag
968 if ((old & NUD_VALID) &&
969 !memcmp(lladdr, neigh->ha, dev->addr_len))
972 /* No address is supplied; if we know something,
973 use it, otherwise discard the request.
976 if (!(old & NUD_VALID))
981 if (new & NUD_CONNECTED)
982 neigh->confirmed = jiffies;
983 neigh->updated = jiffies;
985 /* If entry was valid and address is not changed,
986 do not change entry state, if new one is STALE.
989 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
990 if (old & NUD_VALID) {
991 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
993 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
994 (old & NUD_CONNECTED)) {
1000 if (lladdr == neigh->ha && new == NUD_STALE &&
1001 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1002 (old & NUD_CONNECTED))
1009 neigh_del_timer(neigh);
1010 if (new & NUD_IN_TIMER) {
1012 neigh_add_timer(neigh, (jiffies +
1013 ((new & NUD_REACHABLE) ?
1014 neigh->parms->reachable_time :
1017 neigh->nud_state = new;
1020 if (lladdr != neigh->ha) {
1021 memcpy(&neigh->ha, lladdr, dev->addr_len);
1022 neigh_update_hhs(neigh);
1023 if (!(new & NUD_CONNECTED))
1024 neigh->confirmed = jiffies -
1025 (neigh->parms->base_reachable_time << 1);
1032 if (new & NUD_CONNECTED)
1033 neigh_connect(neigh);
1035 neigh_suspect(neigh);
1036 if (!(old & NUD_VALID)) {
1037 struct sk_buff *skb;
1039 /* Again: avoid dead loop if something went wrong */
1041 while (neigh->nud_state & NUD_VALID &&
1042 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1043 struct neighbour *n1 = neigh;
1044 write_unlock_bh(&neigh->lock);
1045 /* On shaper/eql skb->dst->neighbour != neigh :( */
1046 if (skb->dst && skb->dst->neighbour)
1047 n1 = skb->dst->neighbour;
1049 write_lock_bh(&neigh->lock);
1051 skb_queue_purge(&neigh->arp_queue);
1054 if (update_isrouter) {
1055 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1056 (neigh->flags | NTF_ROUTER) :
1057 (neigh->flags & ~NTF_ROUTER);
1059 write_unlock_bh(&neigh->lock);
1061 if (notify && neigh->parms->app_probes)
1062 neigh_app_notify(neigh);
1067 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1068 u8 *lladdr, void *saddr,
1069 struct net_device *dev)
1071 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1072 lladdr || !dev->addr_len);
1074 neigh_update(neigh, lladdr, NUD_STALE,
1075 NEIGH_UPDATE_F_OVERRIDE);
1079 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1082 struct hh_cache *hh;
1083 struct net_device *dev = dst->dev;
1085 for (hh = n->hh; hh; hh = hh->hh_next)
1086 if (hh->hh_type == protocol)
1089 if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1090 rwlock_init(&hh->hh_lock);
1091 hh->hh_type = protocol;
1092 atomic_set(&hh->hh_refcnt, 0);
1094 if (dev->hard_header_cache(n, hh)) {
1098 atomic_inc(&hh->hh_refcnt);
1099 hh->hh_next = n->hh;
1101 if (n->nud_state & NUD_CONNECTED)
1102 hh->hh_output = n->ops->hh_output;
1104 hh->hh_output = n->ops->output;
1108 atomic_inc(&hh->hh_refcnt);
1113 /* This function can be used in contexts, where only old dev_queue_xmit
1114 worked, f.e. if you want to override normal output path (eql, shaper),
1115 but resolution is not made yet.
1118 int neigh_compat_output(struct sk_buff *skb)
1120 struct net_device *dev = skb->dev;
1122 __skb_pull(skb, skb->nh.raw - skb->data);
1124 if (dev->hard_header &&
1125 dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1127 dev->rebuild_header(skb))
1130 return dev_queue_xmit(skb);
1133 /* Slow and careful. */
1135 int neigh_resolve_output(struct sk_buff *skb)
1137 struct dst_entry *dst = skb->dst;
1138 struct neighbour *neigh;
1141 if (!dst || !(neigh = dst->neighbour))
1144 __skb_pull(skb, skb->nh.raw - skb->data);
1146 if (!neigh_event_send(neigh, skb)) {
1148 struct net_device *dev = neigh->dev;
1149 if (dev->hard_header_cache && !dst->hh) {
1150 write_lock_bh(&neigh->lock);
1152 neigh_hh_init(neigh, dst, dst->ops->protocol);
1153 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1154 neigh->ha, NULL, skb->len);
1155 write_unlock_bh(&neigh->lock);
1157 read_lock_bh(&neigh->lock);
1158 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1159 neigh->ha, NULL, skb->len);
1160 read_unlock_bh(&neigh->lock);
1163 rc = neigh->ops->queue_xmit(skb);
1170 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1171 dst, dst ? dst->neighbour : NULL);
1178 /* As fast as possible without hh cache */
1180 int neigh_connected_output(struct sk_buff *skb)
1183 struct dst_entry *dst = skb->dst;
1184 struct neighbour *neigh = dst->neighbour;
1185 struct net_device *dev = neigh->dev;
1187 __skb_pull(skb, skb->nh.raw - skb->data);
1189 read_lock_bh(&neigh->lock);
1190 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1191 neigh->ha, NULL, skb->len);
1192 read_unlock_bh(&neigh->lock);
1194 err = neigh->ops->queue_xmit(skb);
1202 static void neigh_proxy_process(unsigned long arg)
1204 struct neigh_table *tbl = (struct neigh_table *)arg;
1205 long sched_next = 0;
1206 unsigned long now = jiffies;
1207 struct sk_buff *skb;
1209 spin_lock(&tbl->proxy_queue.lock);
1211 skb = tbl->proxy_queue.next;
1213 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1214 struct sk_buff *back = skb;
1215 long tdif = NEIGH_CB(back)->sched_next - now;
1219 struct net_device *dev = back->dev;
1220 __skb_unlink(back, &tbl->proxy_queue);
1221 if (tbl->proxy_redo && netif_running(dev))
1222 tbl->proxy_redo(back);
1227 } else if (!sched_next || tdif < sched_next)
1230 del_timer(&tbl->proxy_timer);
1232 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1233 spin_unlock(&tbl->proxy_queue.lock);
1236 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1237 struct sk_buff *skb)
1239 unsigned long now = jiffies;
1240 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1242 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1247 NEIGH_CB(skb)->sched_next = sched_next;
1248 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1250 spin_lock(&tbl->proxy_queue.lock);
1251 if (del_timer(&tbl->proxy_timer)) {
1252 if (time_before(tbl->proxy_timer.expires, sched_next))
1253 sched_next = tbl->proxy_timer.expires;
1255 dst_release(skb->dst);
1258 __skb_queue_tail(&tbl->proxy_queue, skb);
1259 mod_timer(&tbl->proxy_timer, sched_next);
1260 spin_unlock(&tbl->proxy_queue.lock);
1264 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1265 struct neigh_table *tbl)
1267 struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL);
1270 memcpy(p, &tbl->parms, sizeof(*p));
1272 atomic_set(&p->refcnt, 1);
1273 INIT_RCU_HEAD(&p->rcu_head);
1275 neigh_rand_reach_time(p->base_reachable_time);
1277 if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1285 p->sysctl_table = NULL;
1286 write_lock_bh(&tbl->lock);
1287 p->next = tbl->parms.next;
1288 tbl->parms.next = p;
1289 write_unlock_bh(&tbl->lock);
1294 static void neigh_rcu_free_parms(struct rcu_head *head)
1296 struct neigh_parms *parms =
1297 container_of(head, struct neigh_parms, rcu_head);
1299 neigh_parms_put(parms);
1302 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1304 struct neigh_parms **p;
1306 if (!parms || parms == &tbl->parms)
1308 write_lock_bh(&tbl->lock);
1309 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1313 write_unlock_bh(&tbl->lock);
1315 dev_put(parms->dev);
1316 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1320 write_unlock_bh(&tbl->lock);
1321 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1324 void neigh_parms_destroy(struct neigh_parms *parms)
1329 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1331 unsigned long now = jiffies;
1332 unsigned long phsize;
1334 atomic_set(&tbl->parms.refcnt, 1);
1335 INIT_RCU_HEAD(&tbl->parms.rcu_head);
1336 tbl->parms.reachable_time =
1337 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1339 if (!tbl->kmem_cachep)
1340 tbl->kmem_cachep = kmem_cache_create(tbl->id,
1342 0, SLAB_HWCACHE_ALIGN,
1345 if (!tbl->kmem_cachep)
1346 panic("cannot create neighbour cache");
1348 tbl->stats = alloc_percpu(struct neigh_statistics);
1350 panic("cannot create neighbour cache statistics");
1352 #ifdef CONFIG_PROC_FS
1353 tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1355 panic("cannot create neighbour proc dir entry");
1356 tbl->pde->proc_fops = &neigh_stat_seq_fops;
1357 tbl->pde->data = tbl;
1361 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1363 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1364 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1366 if (!tbl->hash_buckets || !tbl->phash_buckets)
1367 panic("cannot allocate neighbour cache hashes");
1369 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1371 rwlock_init(&tbl->lock);
1372 init_timer(&tbl->gc_timer);
1373 tbl->gc_timer.data = (unsigned long)tbl;
1374 tbl->gc_timer.function = neigh_periodic_timer;
1375 tbl->gc_timer.expires = now + 1;
1376 add_timer(&tbl->gc_timer);
1378 init_timer(&tbl->proxy_timer);
1379 tbl->proxy_timer.data = (unsigned long)tbl;
1380 tbl->proxy_timer.function = neigh_proxy_process;
1381 skb_queue_head_init(&tbl->proxy_queue);
1383 tbl->last_flush = now;
1384 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1387 void neigh_table_init(struct neigh_table *tbl)
1389 struct neigh_table *tmp;
1391 neigh_table_init_no_netlink(tbl);
1392 write_lock(&neigh_tbl_lock);
1393 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1394 if (tmp->family == tbl->family)
1397 tbl->next = neigh_tables;
1399 write_unlock(&neigh_tbl_lock);
1401 if (unlikely(tmp)) {
1402 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1403 "family %d\n", tbl->family);
1408 int neigh_table_clear(struct neigh_table *tbl)
1410 struct neigh_table **tp;
1412 /* It is not clean... Fix it to unload IPv6 module safely */
1413 del_timer_sync(&tbl->gc_timer);
1414 del_timer_sync(&tbl->proxy_timer);
1415 pneigh_queue_purge(&tbl->proxy_queue);
1416 neigh_ifdown(tbl, NULL);
1417 if (atomic_read(&tbl->entries))
1418 printk(KERN_CRIT "neighbour leakage\n");
1419 write_lock(&neigh_tbl_lock);
1420 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1426 write_unlock(&neigh_tbl_lock);
1428 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1429 tbl->hash_buckets = NULL;
1431 kfree(tbl->phash_buckets);
1432 tbl->phash_buckets = NULL;
1437 int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1439 struct ndmsg *ndm = NLMSG_DATA(nlh);
1440 struct rtattr **nda = arg;
1441 struct neigh_table *tbl;
1442 struct net_device *dev = NULL;
1445 if (ndm->ndm_ifindex &&
1446 (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1449 read_lock(&neigh_tbl_lock);
1450 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1451 struct rtattr *dst_attr = nda[NDA_DST - 1];
1452 struct neighbour *n;
1454 if (tbl->family != ndm->ndm_family)
1456 read_unlock(&neigh_tbl_lock);
1459 if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1462 if (ndm->ndm_flags & NTF_PROXY) {
1463 err = pneigh_delete(tbl, RTA_DATA(dst_attr), dev);
1470 n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1472 err = neigh_update(n, NULL, NUD_FAILED,
1473 NEIGH_UPDATE_F_OVERRIDE|
1474 NEIGH_UPDATE_F_ADMIN);
1479 read_unlock(&neigh_tbl_lock);
1480 err = -EADDRNOTAVAIL;
1488 int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1490 struct ndmsg *ndm = NLMSG_DATA(nlh);
1491 struct rtattr **nda = arg;
1492 struct neigh_table *tbl;
1493 struct net_device *dev = NULL;
1496 if (ndm->ndm_ifindex &&
1497 (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1500 read_lock(&neigh_tbl_lock);
1501 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1502 struct rtattr *lladdr_attr = nda[NDA_LLADDR - 1];
1503 struct rtattr *dst_attr = nda[NDA_DST - 1];
1505 struct neighbour *n;
1507 if (tbl->family != ndm->ndm_family)
1509 read_unlock(&neigh_tbl_lock);
1512 if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1515 if (ndm->ndm_flags & NTF_PROXY) {
1517 if (pneigh_lookup(tbl, RTA_DATA(dst_attr), dev, 1))
1525 if (lladdr_attr && RTA_PAYLOAD(lladdr_attr) < dev->addr_len)
1528 n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1530 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1536 override = nlh->nlmsg_flags & NLM_F_REPLACE;
1537 } else if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1541 n = __neigh_lookup_errno(tbl, RTA_DATA(dst_attr), dev);
1548 err = neigh_update(n,
1549 lladdr_attr ? RTA_DATA(lladdr_attr) : NULL,
1551 (override ? NEIGH_UPDATE_F_OVERRIDE : 0) |
1552 NEIGH_UPDATE_F_ADMIN);
1558 read_unlock(&neigh_tbl_lock);
1559 err = -EADDRNOTAVAIL;
1567 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1569 struct rtattr *nest = NULL;
1571 nest = RTA_NEST(skb, NDTA_PARMS);
1574 RTA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1576 RTA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1577 RTA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1578 RTA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1579 RTA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1580 RTA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1581 RTA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1582 RTA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1583 RTA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1584 parms->base_reachable_time);
1585 RTA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1586 RTA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1587 RTA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1588 RTA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1589 RTA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1590 RTA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1592 return RTA_NEST_END(skb, nest);
1595 return RTA_NEST_CANCEL(skb, nest);
1598 static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
1599 struct netlink_callback *cb)
1601 struct nlmsghdr *nlh;
1602 struct ndtmsg *ndtmsg;
1604 nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1607 ndtmsg = NLMSG_DATA(nlh);
1609 read_lock_bh(&tbl->lock);
1610 ndtmsg->ndtm_family = tbl->family;
1611 ndtmsg->ndtm_pad1 = 0;
1612 ndtmsg->ndtm_pad2 = 0;
1614 RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1615 RTA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1616 RTA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1617 RTA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1618 RTA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1621 unsigned long now = jiffies;
1622 unsigned int flush_delta = now - tbl->last_flush;
1623 unsigned int rand_delta = now - tbl->last_rand;
1625 struct ndt_config ndc = {
1626 .ndtc_key_len = tbl->key_len,
1627 .ndtc_entry_size = tbl->entry_size,
1628 .ndtc_entries = atomic_read(&tbl->entries),
1629 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1630 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1631 .ndtc_hash_rnd = tbl->hash_rnd,
1632 .ndtc_hash_mask = tbl->hash_mask,
1633 .ndtc_hash_chain_gc = tbl->hash_chain_gc,
1634 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1637 RTA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1642 struct ndt_stats ndst;
1644 memset(&ndst, 0, sizeof(ndst));
1646 for_each_possible_cpu(cpu) {
1647 struct neigh_statistics *st;
1649 st = per_cpu_ptr(tbl->stats, cpu);
1650 ndst.ndts_allocs += st->allocs;
1651 ndst.ndts_destroys += st->destroys;
1652 ndst.ndts_hash_grows += st->hash_grows;
1653 ndst.ndts_res_failed += st->res_failed;
1654 ndst.ndts_lookups += st->lookups;
1655 ndst.ndts_hits += st->hits;
1656 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1657 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1658 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1659 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1662 RTA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1665 BUG_ON(tbl->parms.dev);
1666 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1667 goto rtattr_failure;
1669 read_unlock_bh(&tbl->lock);
1670 return NLMSG_END(skb, nlh);
1673 read_unlock_bh(&tbl->lock);
1674 return NLMSG_CANCEL(skb, nlh);
1680 static int neightbl_fill_param_info(struct neigh_table *tbl,
1681 struct neigh_parms *parms,
1682 struct sk_buff *skb,
1683 struct netlink_callback *cb)
1685 struct ndtmsg *ndtmsg;
1686 struct nlmsghdr *nlh;
1688 nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1691 ndtmsg = NLMSG_DATA(nlh);
1693 read_lock_bh(&tbl->lock);
1694 ndtmsg->ndtm_family = tbl->family;
1695 ndtmsg->ndtm_pad1 = 0;
1696 ndtmsg->ndtm_pad2 = 0;
1697 RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1699 if (neightbl_fill_parms(skb, parms) < 0)
1700 goto rtattr_failure;
1702 read_unlock_bh(&tbl->lock);
1703 return NLMSG_END(skb, nlh);
1706 read_unlock_bh(&tbl->lock);
1707 return NLMSG_CANCEL(skb, nlh);
1713 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1716 struct neigh_parms *p;
1718 for (p = &tbl->parms; p; p = p->next)
1719 if ((p->dev && p->dev->ifindex == ifindex) ||
1720 (!p->dev && !ifindex))
1726 int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1728 struct neigh_table *tbl;
1729 struct ndtmsg *ndtmsg = NLMSG_DATA(nlh);
1730 struct rtattr **tb = arg;
1733 if (!tb[NDTA_NAME - 1] || !RTA_PAYLOAD(tb[NDTA_NAME - 1]))
1736 read_lock(&neigh_tbl_lock);
1737 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1738 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1741 if (!rtattr_strcmp(tb[NDTA_NAME - 1], tbl->id))
1751 * We acquire tbl->lock to be nice to the periodic timers and
1752 * make sure they always see a consistent set of values.
1754 write_lock_bh(&tbl->lock);
1756 if (tb[NDTA_THRESH1 - 1])
1757 tbl->gc_thresh1 = RTA_GET_U32(tb[NDTA_THRESH1 - 1]);
1759 if (tb[NDTA_THRESH2 - 1])
1760 tbl->gc_thresh2 = RTA_GET_U32(tb[NDTA_THRESH2 - 1]);
1762 if (tb[NDTA_THRESH3 - 1])
1763 tbl->gc_thresh3 = RTA_GET_U32(tb[NDTA_THRESH3 - 1]);
1765 if (tb[NDTA_GC_INTERVAL - 1])
1766 tbl->gc_interval = RTA_GET_MSECS(tb[NDTA_GC_INTERVAL - 1]);
1768 if (tb[NDTA_PARMS - 1]) {
1769 struct rtattr *tbp[NDTPA_MAX];
1770 struct neigh_parms *p;
1773 if (rtattr_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS - 1]) < 0)
1774 goto rtattr_failure;
1776 if (tbp[NDTPA_IFINDEX - 1])
1777 ifindex = RTA_GET_U32(tbp[NDTPA_IFINDEX - 1]);
1779 p = lookup_neigh_params(tbl, ifindex);
1782 goto rtattr_failure;
1785 if (tbp[NDTPA_QUEUE_LEN - 1])
1786 p->queue_len = RTA_GET_U32(tbp[NDTPA_QUEUE_LEN - 1]);
1788 if (tbp[NDTPA_PROXY_QLEN - 1])
1789 p->proxy_qlen = RTA_GET_U32(tbp[NDTPA_PROXY_QLEN - 1]);
1791 if (tbp[NDTPA_APP_PROBES - 1])
1792 p->app_probes = RTA_GET_U32(tbp[NDTPA_APP_PROBES - 1]);
1794 if (tbp[NDTPA_UCAST_PROBES - 1])
1796 RTA_GET_U32(tbp[NDTPA_UCAST_PROBES - 1]);
1798 if (tbp[NDTPA_MCAST_PROBES - 1])
1800 RTA_GET_U32(tbp[NDTPA_MCAST_PROBES - 1]);
1802 if (tbp[NDTPA_BASE_REACHABLE_TIME - 1])
1803 p->base_reachable_time =
1804 RTA_GET_MSECS(tbp[NDTPA_BASE_REACHABLE_TIME - 1]);
1806 if (tbp[NDTPA_GC_STALETIME - 1])
1808 RTA_GET_MSECS(tbp[NDTPA_GC_STALETIME - 1]);
1810 if (tbp[NDTPA_DELAY_PROBE_TIME - 1])
1811 p->delay_probe_time =
1812 RTA_GET_MSECS(tbp[NDTPA_DELAY_PROBE_TIME - 1]);
1814 if (tbp[NDTPA_RETRANS_TIME - 1])
1816 RTA_GET_MSECS(tbp[NDTPA_RETRANS_TIME - 1]);
1818 if (tbp[NDTPA_ANYCAST_DELAY - 1])
1820 RTA_GET_MSECS(tbp[NDTPA_ANYCAST_DELAY - 1]);
1822 if (tbp[NDTPA_PROXY_DELAY - 1])
1824 RTA_GET_MSECS(tbp[NDTPA_PROXY_DELAY - 1]);
1826 if (tbp[NDTPA_LOCKTIME - 1])
1827 p->locktime = RTA_GET_MSECS(tbp[NDTPA_LOCKTIME - 1]);
1833 write_unlock_bh(&tbl->lock);
1835 read_unlock(&neigh_tbl_lock);
1839 int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1842 int s_idx = cb->args[0];
1843 struct neigh_table *tbl;
1845 family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1847 read_lock(&neigh_tbl_lock);
1848 for (tbl = neigh_tables, idx = 0; tbl; tbl = tbl->next) {
1849 struct neigh_parms *p;
1851 if (idx < s_idx || (family && tbl->family != family))
1854 if (neightbl_fill_info(tbl, skb, cb) <= 0)
1857 for (++idx, p = tbl->parms.next; p; p = p->next, idx++) {
1861 if (neightbl_fill_param_info(tbl, p, skb, cb) <= 0)
1867 read_unlock(&neigh_tbl_lock);
1873 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
1874 u32 pid, u32 seq, int event, unsigned int flags)
1876 unsigned long now = jiffies;
1877 unsigned char *b = skb->tail;
1878 struct nda_cacheinfo ci;
1881 struct nlmsghdr *nlh = NLMSG_NEW(skb, pid, seq, event,
1882 sizeof(struct ndmsg), flags);
1883 struct ndmsg *ndm = NLMSG_DATA(nlh);
1885 ndm->ndm_family = n->ops->family;
1888 ndm->ndm_flags = n->flags;
1889 ndm->ndm_type = n->type;
1890 ndm->ndm_ifindex = n->dev->ifindex;
1891 RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
1892 read_lock_bh(&n->lock);
1894 ndm->ndm_state = n->nud_state;
1895 if (n->nud_state & NUD_VALID)
1896 RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
1897 ci.ndm_used = now - n->used;
1898 ci.ndm_confirmed = now - n->confirmed;
1899 ci.ndm_updated = now - n->updated;
1900 ci.ndm_refcnt = atomic_read(&n->refcnt) - 1;
1901 probes = atomic_read(&n->probes);
1902 read_unlock_bh(&n->lock);
1904 RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1905 RTA_PUT(skb, NDA_PROBES, sizeof(probes), &probes);
1906 nlh->nlmsg_len = skb->tail - b;
1912 read_unlock_bh(&n->lock);
1913 skb_trim(skb, b - skb->data);
1918 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1919 struct netlink_callback *cb)
1921 struct neighbour *n;
1922 int rc, h, s_h = cb->args[1];
1923 int idx, s_idx = idx = cb->args[2];
1925 for (h = 0; h <= tbl->hash_mask; h++) {
1930 read_lock_bh(&tbl->lock);
1931 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
1934 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
1937 NLM_F_MULTI) <= 0) {
1938 read_unlock_bh(&tbl->lock);
1943 read_unlock_bh(&tbl->lock);
1952 int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1954 struct neigh_table *tbl;
1957 read_lock(&neigh_tbl_lock);
1958 family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1961 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
1962 if (t < s_t || (family && tbl->family != family))
1965 memset(&cb->args[1], 0, sizeof(cb->args) -
1966 sizeof(cb->args[0]));
1967 if (neigh_dump_table(tbl, skb, cb) < 0)
1970 read_unlock(&neigh_tbl_lock);
1976 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
1980 read_lock_bh(&tbl->lock);
1981 for (chain = 0; chain <= tbl->hash_mask; chain++) {
1982 struct neighbour *n;
1984 for (n = tbl->hash_buckets[chain]; n; n = n->next)
1987 read_unlock_bh(&tbl->lock);
1989 EXPORT_SYMBOL(neigh_for_each);
1991 /* The tbl->lock must be held as a writer and BH disabled. */
1992 void __neigh_for_each_release(struct neigh_table *tbl,
1993 int (*cb)(struct neighbour *))
1997 for (chain = 0; chain <= tbl->hash_mask; chain++) {
1998 struct neighbour *n, **np;
2000 np = &tbl->hash_buckets[chain];
2001 while ((n = *np) != NULL) {
2004 write_lock(&n->lock);
2011 write_unlock(&n->lock);
2017 EXPORT_SYMBOL(__neigh_for_each_release);
2019 #ifdef CONFIG_PROC_FS
2021 static struct neighbour *neigh_get_first(struct seq_file *seq)
2023 struct neigh_seq_state *state = seq->private;
2024 struct neigh_table *tbl = state->tbl;
2025 struct neighbour *n = NULL;
2026 int bucket = state->bucket;
2028 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2029 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2030 n = tbl->hash_buckets[bucket];
2033 if (state->neigh_sub_iter) {
2037 v = state->neigh_sub_iter(state, n, &fakep);
2041 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2043 if (n->nud_state & ~NUD_NOARP)
2052 state->bucket = bucket;
2057 static struct neighbour *neigh_get_next(struct seq_file *seq,
2058 struct neighbour *n,
2061 struct neigh_seq_state *state = seq->private;
2062 struct neigh_table *tbl = state->tbl;
2064 if (state->neigh_sub_iter) {
2065 void *v = state->neigh_sub_iter(state, n, pos);
2073 if (state->neigh_sub_iter) {
2074 void *v = state->neigh_sub_iter(state, n, pos);
2079 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2082 if (n->nud_state & ~NUD_NOARP)
2091 if (++state->bucket > tbl->hash_mask)
2094 n = tbl->hash_buckets[state->bucket];
2102 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2104 struct neighbour *n = neigh_get_first(seq);
2108 n = neigh_get_next(seq, n, pos);
2113 return *pos ? NULL : n;
2116 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2118 struct neigh_seq_state *state = seq->private;
2119 struct neigh_table *tbl = state->tbl;
2120 struct pneigh_entry *pn = NULL;
2121 int bucket = state->bucket;
2123 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2124 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2125 pn = tbl->phash_buckets[bucket];
2129 state->bucket = bucket;
2134 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2135 struct pneigh_entry *pn,
2138 struct neigh_seq_state *state = seq->private;
2139 struct neigh_table *tbl = state->tbl;
2143 if (++state->bucket > PNEIGH_HASHMASK)
2145 pn = tbl->phash_buckets[state->bucket];
2156 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2158 struct pneigh_entry *pn = pneigh_get_first(seq);
2162 pn = pneigh_get_next(seq, pn, pos);
2167 return *pos ? NULL : pn;
2170 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2172 struct neigh_seq_state *state = seq->private;
2175 rc = neigh_get_idx(seq, pos);
2176 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2177 rc = pneigh_get_idx(seq, pos);
2182 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2184 struct neigh_seq_state *state = seq->private;
2185 loff_t pos_minus_one;
2189 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2191 read_lock_bh(&tbl->lock);
2193 pos_minus_one = *pos - 1;
2194 return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2196 EXPORT_SYMBOL(neigh_seq_start);
2198 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2200 struct neigh_seq_state *state;
2203 if (v == SEQ_START_TOKEN) {
2204 rc = neigh_get_idx(seq, pos);
2208 state = seq->private;
2209 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2210 rc = neigh_get_next(seq, v, NULL);
2213 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2214 rc = pneigh_get_first(seq);
2216 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2217 rc = pneigh_get_next(seq, v, NULL);
2223 EXPORT_SYMBOL(neigh_seq_next);
2225 void neigh_seq_stop(struct seq_file *seq, void *v)
2227 struct neigh_seq_state *state = seq->private;
2228 struct neigh_table *tbl = state->tbl;
2230 read_unlock_bh(&tbl->lock);
2232 EXPORT_SYMBOL(neigh_seq_stop);
2234 /* statistics via seq_file */
2236 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2238 struct proc_dir_entry *pde = seq->private;
2239 struct neigh_table *tbl = pde->data;
2243 return SEQ_START_TOKEN;
2245 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2246 if (!cpu_possible(cpu))
2249 return per_cpu_ptr(tbl->stats, cpu);
2254 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2256 struct proc_dir_entry *pde = seq->private;
2257 struct neigh_table *tbl = pde->data;
2260 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2261 if (!cpu_possible(cpu))
2264 return per_cpu_ptr(tbl->stats, cpu);
2269 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2274 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2276 struct proc_dir_entry *pde = seq->private;
2277 struct neigh_table *tbl = pde->data;
2278 struct neigh_statistics *st = v;
2280 if (v == SEQ_START_TOKEN) {
2281 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
2285 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2286 "%08lx %08lx %08lx %08lx\n",
2287 atomic_read(&tbl->entries),
2298 st->rcv_probes_mcast,
2299 st->rcv_probes_ucast,
2301 st->periodic_gc_runs,
2308 static struct seq_operations neigh_stat_seq_ops = {
2309 .start = neigh_stat_seq_start,
2310 .next = neigh_stat_seq_next,
2311 .stop = neigh_stat_seq_stop,
2312 .show = neigh_stat_seq_show,
2315 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2317 int ret = seq_open(file, &neigh_stat_seq_ops);
2320 struct seq_file *sf = file->private_data;
2321 sf->private = PDE(inode);
2326 static struct file_operations neigh_stat_seq_fops = {
2327 .owner = THIS_MODULE,
2328 .open = neigh_stat_seq_open,
2330 .llseek = seq_lseek,
2331 .release = seq_release,
2334 #endif /* CONFIG_PROC_FS */
2337 void neigh_app_ns(struct neighbour *n)
2339 struct nlmsghdr *nlh;
2340 int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2341 struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2346 if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH, 0) < 0) {
2350 nlh = (struct nlmsghdr *)skb->data;
2351 nlh->nlmsg_flags = NLM_F_REQUEST;
2352 NETLINK_CB(skb).dst_group = RTNLGRP_NEIGH;
2353 netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2356 static void neigh_app_notify(struct neighbour *n)
2358 struct nlmsghdr *nlh;
2359 int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2360 struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2365 if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH, 0) < 0) {
2369 nlh = (struct nlmsghdr *)skb->data;
2370 NETLINK_CB(skb).dst_group = RTNLGRP_NEIGH;
2371 netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2374 #endif /* CONFIG_ARPD */
2376 #ifdef CONFIG_SYSCTL
2378 static struct neigh_sysctl_table {
2379 struct ctl_table_header *sysctl_header;
2380 ctl_table neigh_vars[__NET_NEIGH_MAX];
2381 ctl_table neigh_dev[2];
2382 ctl_table neigh_neigh_dir[2];
2383 ctl_table neigh_proto_dir[2];
2384 ctl_table neigh_root_dir[2];
2385 } neigh_sysctl_template = {
2388 .ctl_name = NET_NEIGH_MCAST_SOLICIT,
2389 .procname = "mcast_solicit",
2390 .maxlen = sizeof(int),
2392 .proc_handler = &proc_dointvec,
2395 .ctl_name = NET_NEIGH_UCAST_SOLICIT,
2396 .procname = "ucast_solicit",
2397 .maxlen = sizeof(int),
2399 .proc_handler = &proc_dointvec,
2402 .ctl_name = NET_NEIGH_APP_SOLICIT,
2403 .procname = "app_solicit",
2404 .maxlen = sizeof(int),
2406 .proc_handler = &proc_dointvec,
2409 .ctl_name = NET_NEIGH_RETRANS_TIME,
2410 .procname = "retrans_time",
2411 .maxlen = sizeof(int),
2413 .proc_handler = &proc_dointvec_userhz_jiffies,
2416 .ctl_name = NET_NEIGH_REACHABLE_TIME,
2417 .procname = "base_reachable_time",
2418 .maxlen = sizeof(int),
2420 .proc_handler = &proc_dointvec_jiffies,
2421 .strategy = &sysctl_jiffies,
2424 .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
2425 .procname = "delay_first_probe_time",
2426 .maxlen = sizeof(int),
2428 .proc_handler = &proc_dointvec_jiffies,
2429 .strategy = &sysctl_jiffies,
2432 .ctl_name = NET_NEIGH_GC_STALE_TIME,
2433 .procname = "gc_stale_time",
2434 .maxlen = sizeof(int),
2436 .proc_handler = &proc_dointvec_jiffies,
2437 .strategy = &sysctl_jiffies,
2440 .ctl_name = NET_NEIGH_UNRES_QLEN,
2441 .procname = "unres_qlen",
2442 .maxlen = sizeof(int),
2444 .proc_handler = &proc_dointvec,
2447 .ctl_name = NET_NEIGH_PROXY_QLEN,
2448 .procname = "proxy_qlen",
2449 .maxlen = sizeof(int),
2451 .proc_handler = &proc_dointvec,
2454 .ctl_name = NET_NEIGH_ANYCAST_DELAY,
2455 .procname = "anycast_delay",
2456 .maxlen = sizeof(int),
2458 .proc_handler = &proc_dointvec_userhz_jiffies,
2461 .ctl_name = NET_NEIGH_PROXY_DELAY,
2462 .procname = "proxy_delay",
2463 .maxlen = sizeof(int),
2465 .proc_handler = &proc_dointvec_userhz_jiffies,
2468 .ctl_name = NET_NEIGH_LOCKTIME,
2469 .procname = "locktime",
2470 .maxlen = sizeof(int),
2472 .proc_handler = &proc_dointvec_userhz_jiffies,
2475 .ctl_name = NET_NEIGH_GC_INTERVAL,
2476 .procname = "gc_interval",
2477 .maxlen = sizeof(int),
2479 .proc_handler = &proc_dointvec_jiffies,
2480 .strategy = &sysctl_jiffies,
2483 .ctl_name = NET_NEIGH_GC_THRESH1,
2484 .procname = "gc_thresh1",
2485 .maxlen = sizeof(int),
2487 .proc_handler = &proc_dointvec,
2490 .ctl_name = NET_NEIGH_GC_THRESH2,
2491 .procname = "gc_thresh2",
2492 .maxlen = sizeof(int),
2494 .proc_handler = &proc_dointvec,
2497 .ctl_name = NET_NEIGH_GC_THRESH3,
2498 .procname = "gc_thresh3",
2499 .maxlen = sizeof(int),
2501 .proc_handler = &proc_dointvec,
2504 .ctl_name = NET_NEIGH_RETRANS_TIME_MS,
2505 .procname = "retrans_time_ms",
2506 .maxlen = sizeof(int),
2508 .proc_handler = &proc_dointvec_ms_jiffies,
2509 .strategy = &sysctl_ms_jiffies,
2512 .ctl_name = NET_NEIGH_REACHABLE_TIME_MS,
2513 .procname = "base_reachable_time_ms",
2514 .maxlen = sizeof(int),
2516 .proc_handler = &proc_dointvec_ms_jiffies,
2517 .strategy = &sysctl_ms_jiffies,
2522 .ctl_name = NET_PROTO_CONF_DEFAULT,
2523 .procname = "default",
2527 .neigh_neigh_dir = {
2529 .procname = "neigh",
2533 .neigh_proto_dir = {
2540 .ctl_name = CTL_NET,
2547 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2548 int p_id, int pdev_id, char *p_name,
2549 proc_handler *handler, ctl_handler *strategy)
2551 struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
2552 const char *dev_name_source = NULL;
2553 char *dev_name = NULL;
2558 memcpy(t, &neigh_sysctl_template, sizeof(*t));
2559 t->neigh_vars[0].data = &p->mcast_probes;
2560 t->neigh_vars[1].data = &p->ucast_probes;
2561 t->neigh_vars[2].data = &p->app_probes;
2562 t->neigh_vars[3].data = &p->retrans_time;
2563 t->neigh_vars[4].data = &p->base_reachable_time;
2564 t->neigh_vars[5].data = &p->delay_probe_time;
2565 t->neigh_vars[6].data = &p->gc_staletime;
2566 t->neigh_vars[7].data = &p->queue_len;
2567 t->neigh_vars[8].data = &p->proxy_qlen;
2568 t->neigh_vars[9].data = &p->anycast_delay;
2569 t->neigh_vars[10].data = &p->proxy_delay;
2570 t->neigh_vars[11].data = &p->locktime;
2573 dev_name_source = dev->name;
2574 t->neigh_dev[0].ctl_name = dev->ifindex;
2575 t->neigh_vars[12].procname = NULL;
2576 t->neigh_vars[13].procname = NULL;
2577 t->neigh_vars[14].procname = NULL;
2578 t->neigh_vars[15].procname = NULL;
2580 dev_name_source = t->neigh_dev[0].procname;
2581 t->neigh_vars[12].data = (int *)(p + 1);
2582 t->neigh_vars[13].data = (int *)(p + 1) + 1;
2583 t->neigh_vars[14].data = (int *)(p + 1) + 2;
2584 t->neigh_vars[15].data = (int *)(p + 1) + 3;
2587 t->neigh_vars[16].data = &p->retrans_time;
2588 t->neigh_vars[17].data = &p->base_reachable_time;
2590 if (handler || strategy) {
2592 t->neigh_vars[3].proc_handler = handler;
2593 t->neigh_vars[3].strategy = strategy;
2594 t->neigh_vars[3].extra1 = dev;
2596 t->neigh_vars[4].proc_handler = handler;
2597 t->neigh_vars[4].strategy = strategy;
2598 t->neigh_vars[4].extra1 = dev;
2599 /* RetransTime (in milliseconds)*/
2600 t->neigh_vars[16].proc_handler = handler;
2601 t->neigh_vars[16].strategy = strategy;
2602 t->neigh_vars[16].extra1 = dev;
2603 /* ReachableTime (in milliseconds) */
2604 t->neigh_vars[17].proc_handler = handler;
2605 t->neigh_vars[17].strategy = strategy;
2606 t->neigh_vars[17].extra1 = dev;
2609 dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2615 t->neigh_dev[0].procname = dev_name;
2617 t->neigh_neigh_dir[0].ctl_name = pdev_id;
2619 t->neigh_proto_dir[0].procname = p_name;
2620 t->neigh_proto_dir[0].ctl_name = p_id;
2622 t->neigh_dev[0].child = t->neigh_vars;
2623 t->neigh_neigh_dir[0].child = t->neigh_dev;
2624 t->neigh_proto_dir[0].child = t->neigh_neigh_dir;
2625 t->neigh_root_dir[0].child = t->neigh_proto_dir;
2627 t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
2628 if (!t->sysctl_header) {
2632 p->sysctl_table = t;
2644 void neigh_sysctl_unregister(struct neigh_parms *p)
2646 if (p->sysctl_table) {
2647 struct neigh_sysctl_table *t = p->sysctl_table;
2648 p->sysctl_table = NULL;
2649 unregister_sysctl_table(t->sysctl_header);
2650 kfree(t->neigh_dev[0].procname);
2655 #endif /* CONFIG_SYSCTL */
2657 EXPORT_SYMBOL(__neigh_event_send);
2658 EXPORT_SYMBOL(neigh_add);
2659 EXPORT_SYMBOL(neigh_changeaddr);
2660 EXPORT_SYMBOL(neigh_compat_output);
2661 EXPORT_SYMBOL(neigh_connected_output);
2662 EXPORT_SYMBOL(neigh_create);
2663 EXPORT_SYMBOL(neigh_delete);
2664 EXPORT_SYMBOL(neigh_destroy);
2665 EXPORT_SYMBOL(neigh_dump_info);
2666 EXPORT_SYMBOL(neigh_event_ns);
2667 EXPORT_SYMBOL(neigh_ifdown);
2668 EXPORT_SYMBOL(neigh_lookup);
2669 EXPORT_SYMBOL(neigh_lookup_nodev);
2670 EXPORT_SYMBOL(neigh_parms_alloc);
2671 EXPORT_SYMBOL(neigh_parms_release);
2672 EXPORT_SYMBOL(neigh_rand_reach_time);
2673 EXPORT_SYMBOL(neigh_resolve_output);
2674 EXPORT_SYMBOL(neigh_table_clear);
2675 EXPORT_SYMBOL(neigh_table_init);
2676 EXPORT_SYMBOL(neigh_table_init_no_netlink);
2677 EXPORT_SYMBOL(neigh_update);
2678 EXPORT_SYMBOL(neigh_update_hhs);
2679 EXPORT_SYMBOL(pneigh_enqueue);
2680 EXPORT_SYMBOL(pneigh_lookup);
2681 EXPORT_SYMBOL(neightbl_dump_info);
2682 EXPORT_SYMBOL(neightbl_set);
2685 EXPORT_SYMBOL(neigh_app_ns);
2687 #ifdef CONFIG_SYSCTL
2688 EXPORT_SYMBOL(neigh_sysctl_register);
2689 EXPORT_SYMBOL(neigh_sysctl_unregister);