2 * Generic address resolution entity
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
25 #include <linux/sysctl.h>
27 #include <linux/times.h>
28 #include <net/neighbour.h>
31 #include <net/netevent.h>
32 #include <net/netlink.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/random.h>
35 #include <linux/string.h>
36 #include <linux/log2.h>
40 #define NEIGH_PRINTK(x...) printk(x)
41 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
42 #define NEIGH_PRINTK0 NEIGH_PRINTK
43 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
44 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
48 #define NEIGH_PRINTK1 NEIGH_PRINTK
52 #define NEIGH_PRINTK2 NEIGH_PRINTK
55 #define PNEIGH_HASHMASK 0xF
57 static void neigh_timer_handler(unsigned long arg);
58 static void __neigh_notify(struct neighbour *n, int type, int flags);
59 static void neigh_update_notify(struct neighbour *neigh);
60 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
61 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
63 static struct neigh_table *neigh_tables;
65 static const struct file_operations neigh_stat_seq_fops;
69 Neighbour hash table buckets are protected with rwlock tbl->lock.
71 - All the scans/updates to hash buckets MUST be made under this lock.
72 - NOTHING clever should be made under this lock: no callbacks
73 to protocol backends, no attempts to send something to network.
74 It will result in deadlocks, if backend/driver wants to use neighbour
76 - If the entry requires some non-trivial actions, increase
77 its reference count and release table lock.
79 Neighbour entries are protected:
80 - with reference count.
81 - with rwlock neigh->lock
83 Reference count prevents destruction.
85 neigh->lock mainly serializes ll address data and its validity state.
86 However, the same lock is used to protect another entry fields:
90 Again, nothing clever shall be made under neigh->lock,
91 the most complicated procedure, which we allow is dev->hard_header.
92 It is supposed, that dev->hard_header is simplistic and does
93 not make callbacks to neighbour tables.
95 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96 list of neighbour tables. This list is used only in process context,
99 static DEFINE_RWLOCK(neigh_tbl_lock);
101 static int neigh_blackhole(struct sk_buff *skb)
107 static void neigh_cleanup_and_release(struct neighbour *neigh)
109 if (neigh->parms->neigh_cleanup)
110 neigh->parms->neigh_cleanup(neigh);
112 __neigh_notify(neigh, RTM_DELNEIGH, 0);
113 neigh_release(neigh);
117 * It is random distribution in the interval (1/2)*base...(3/2)*base.
118 * It corresponds to default IPv6 settings and is not overridable,
119 * because it is really reasonable choice.
122 unsigned long neigh_rand_reach_time(unsigned long base)
124 return (base ? (net_random() % base) + (base >> 1) : 0);
128 static int neigh_forced_gc(struct neigh_table *tbl)
133 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
135 write_lock_bh(&tbl->lock);
136 for (i = 0; i <= tbl->hash_mask; i++) {
137 struct neighbour *n, **np;
139 np = &tbl->hash_buckets[i];
140 while ((n = *np) != NULL) {
141 /* Neighbour record may be discarded if:
142 * - nobody refers to it.
143 * - it is not permanent
145 write_lock(&n->lock);
146 if (atomic_read(&n->refcnt) == 1 &&
147 !(n->nud_state & NUD_PERMANENT)) {
151 write_unlock(&n->lock);
152 neigh_cleanup_and_release(n);
155 write_unlock(&n->lock);
160 tbl->last_flush = jiffies;
162 write_unlock_bh(&tbl->lock);
167 static int neigh_del_timer(struct neighbour *n)
169 if ((n->nud_state & NUD_IN_TIMER) &&
170 del_timer(&n->timer)) {
177 static void pneigh_queue_purge(struct sk_buff_head *list)
181 while ((skb = skb_dequeue(list)) != NULL) {
187 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
191 for (i = 0; i <= tbl->hash_mask; i++) {
192 struct neighbour *n, **np = &tbl->hash_buckets[i];
194 while ((n = *np) != NULL) {
195 if (dev && n->dev != dev) {
200 write_lock(&n->lock);
204 if (atomic_read(&n->refcnt) != 1) {
205 /* The most unpleasant situation.
206 We must destroy neighbour entry,
207 but someone still uses it.
209 The destroy will be delayed until
210 the last user releases us, but
211 we must kill timers etc. and move
214 skb_queue_purge(&n->arp_queue);
215 n->output = neigh_blackhole;
216 if (n->nud_state & NUD_VALID)
217 n->nud_state = NUD_NOARP;
219 n->nud_state = NUD_NONE;
220 NEIGH_PRINTK2("neigh %p is stray.\n", n);
222 write_unlock(&n->lock);
223 neigh_cleanup_and_release(n);
228 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
230 write_lock_bh(&tbl->lock);
231 neigh_flush_dev(tbl, dev);
232 write_unlock_bh(&tbl->lock);
235 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
237 write_lock_bh(&tbl->lock);
238 neigh_flush_dev(tbl, dev);
239 pneigh_ifdown(tbl, dev);
240 write_unlock_bh(&tbl->lock);
242 del_timer_sync(&tbl->proxy_timer);
243 pneigh_queue_purge(&tbl->proxy_queue);
247 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
249 struct neighbour *n = NULL;
250 unsigned long now = jiffies;
253 entries = atomic_inc_return(&tbl->entries) - 1;
254 if (entries >= tbl->gc_thresh3 ||
255 (entries >= tbl->gc_thresh2 &&
256 time_after(now, tbl->last_flush + 5 * HZ))) {
257 if (!neigh_forced_gc(tbl) &&
258 entries >= tbl->gc_thresh3)
262 n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
266 skb_queue_head_init(&n->arp_queue);
267 rwlock_init(&n->lock);
268 n->updated = n->used = now;
269 n->nud_state = NUD_NONE;
270 n->output = neigh_blackhole;
271 n->parms = neigh_parms_clone(&tbl->parms);
272 init_timer(&n->timer);
273 n->timer.function = neigh_timer_handler;
274 n->timer.data = (unsigned long)n;
276 NEIGH_CACHE_STAT_INC(tbl, allocs);
278 atomic_set(&n->refcnt, 1);
284 atomic_dec(&tbl->entries);
288 static struct neighbour **neigh_hash_alloc(unsigned int entries)
290 unsigned long size = entries * sizeof(struct neighbour *);
291 struct neighbour **ret;
293 if (size <= PAGE_SIZE) {
294 ret = kzalloc(size, GFP_ATOMIC);
296 ret = (struct neighbour **)
297 __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
302 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
304 unsigned long size = entries * sizeof(struct neighbour *);
306 if (size <= PAGE_SIZE)
309 free_pages((unsigned long)hash, get_order(size));
312 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
314 struct neighbour **new_hash, **old_hash;
315 unsigned int i, new_hash_mask, old_entries;
317 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
319 BUG_ON(!is_power_of_2(new_entries));
320 new_hash = neigh_hash_alloc(new_entries);
324 old_entries = tbl->hash_mask + 1;
325 new_hash_mask = new_entries - 1;
326 old_hash = tbl->hash_buckets;
328 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
329 for (i = 0; i < old_entries; i++) {
330 struct neighbour *n, *next;
332 for (n = old_hash[i]; n; n = next) {
333 unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
335 hash_val &= new_hash_mask;
338 n->next = new_hash[hash_val];
339 new_hash[hash_val] = n;
342 tbl->hash_buckets = new_hash;
343 tbl->hash_mask = new_hash_mask;
345 neigh_hash_free(old_hash, old_entries);
348 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
349 struct net_device *dev)
352 int key_len = tbl->key_len;
353 u32 hash_val = tbl->hash(pkey, dev);
355 NEIGH_CACHE_STAT_INC(tbl, lookups);
357 read_lock_bh(&tbl->lock);
358 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
359 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
361 NEIGH_CACHE_STAT_INC(tbl, hits);
365 read_unlock_bh(&tbl->lock);
369 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
372 int key_len = tbl->key_len;
373 u32 hash_val = tbl->hash(pkey, NULL);
375 NEIGH_CACHE_STAT_INC(tbl, lookups);
377 read_lock_bh(&tbl->lock);
378 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
379 if (!memcmp(n->primary_key, pkey, key_len)) {
381 NEIGH_CACHE_STAT_INC(tbl, hits);
385 read_unlock_bh(&tbl->lock);
389 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
390 struct net_device *dev)
393 int key_len = tbl->key_len;
395 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
398 rc = ERR_PTR(-ENOBUFS);
402 memcpy(n->primary_key, pkey, key_len);
406 /* Protocol specific setup. */
407 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
409 goto out_neigh_release;
412 /* Device specific setup. */
413 if (n->parms->neigh_setup &&
414 (error = n->parms->neigh_setup(n)) < 0) {
416 goto out_neigh_release;
419 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
421 write_lock_bh(&tbl->lock);
423 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
424 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
426 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
428 if (n->parms->dead) {
429 rc = ERR_PTR(-EINVAL);
433 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
434 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
441 n->next = tbl->hash_buckets[hash_val];
442 tbl->hash_buckets[hash_val] = n;
445 write_unlock_bh(&tbl->lock);
446 NEIGH_PRINTK2("neigh %p is created.\n", n);
451 write_unlock_bh(&tbl->lock);
457 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
458 struct net_device *dev, int creat)
460 struct pneigh_entry *n;
461 int key_len = tbl->key_len;
462 u32 hash_val = *(u32 *)(pkey + key_len - 4);
464 hash_val ^= (hash_val >> 16);
465 hash_val ^= hash_val >> 8;
466 hash_val ^= hash_val >> 4;
467 hash_val &= PNEIGH_HASHMASK;
469 read_lock_bh(&tbl->lock);
471 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
472 if (!memcmp(n->key, pkey, key_len) &&
473 (n->dev == dev || !n->dev)) {
474 read_unlock_bh(&tbl->lock);
478 read_unlock_bh(&tbl->lock);
483 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
487 memcpy(n->key, pkey, key_len);
492 if (tbl->pconstructor && tbl->pconstructor(n)) {
500 write_lock_bh(&tbl->lock);
501 n->next = tbl->phash_buckets[hash_val];
502 tbl->phash_buckets[hash_val] = n;
503 write_unlock_bh(&tbl->lock);
509 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
510 struct net_device *dev)
512 struct pneigh_entry *n, **np;
513 int key_len = tbl->key_len;
514 u32 hash_val = *(u32 *)(pkey + key_len - 4);
516 hash_val ^= (hash_val >> 16);
517 hash_val ^= hash_val >> 8;
518 hash_val ^= hash_val >> 4;
519 hash_val &= PNEIGH_HASHMASK;
521 write_lock_bh(&tbl->lock);
522 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
524 if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
526 write_unlock_bh(&tbl->lock);
527 if (tbl->pdestructor)
535 write_unlock_bh(&tbl->lock);
539 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
541 struct pneigh_entry *n, **np;
544 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
545 np = &tbl->phash_buckets[h];
546 while ((n = *np) != NULL) {
547 if (!dev || n->dev == dev) {
549 if (tbl->pdestructor)
564 * neighbour must already be out of the table;
567 void neigh_destroy(struct neighbour *neigh)
571 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
575 "Destroying alive neighbour %p\n", neigh);
580 if (neigh_del_timer(neigh))
581 printk(KERN_WARNING "Impossible event.\n");
583 while ((hh = neigh->hh) != NULL) {
584 neigh->hh = hh->hh_next;
587 write_seqlock_bh(&hh->hh_lock);
588 hh->hh_output = neigh_blackhole;
589 write_sequnlock_bh(&hh->hh_lock);
590 if (atomic_dec_and_test(&hh->hh_refcnt))
594 skb_queue_purge(&neigh->arp_queue);
597 neigh_parms_put(neigh->parms);
599 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
601 atomic_dec(&neigh->tbl->entries);
602 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
605 /* Neighbour state is suspicious;
608 Called with write_locked neigh.
610 static void neigh_suspect(struct neighbour *neigh)
614 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
616 neigh->output = neigh->ops->output;
618 for (hh = neigh->hh; hh; hh = hh->hh_next)
619 hh->hh_output = neigh->ops->output;
622 /* Neighbour state is OK;
625 Called with write_locked neigh.
627 static void neigh_connect(struct neighbour *neigh)
631 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
633 neigh->output = neigh->ops->connected_output;
635 for (hh = neigh->hh; hh; hh = hh->hh_next)
636 hh->hh_output = neigh->ops->hh_output;
639 static void neigh_periodic_timer(unsigned long arg)
641 struct neigh_table *tbl = (struct neigh_table *)arg;
642 struct neighbour *n, **np;
643 unsigned long expire, now = jiffies;
645 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
647 write_lock(&tbl->lock);
650 * periodically recompute ReachableTime from random function
653 if (time_after(now, tbl->last_rand + 300 * HZ)) {
654 struct neigh_parms *p;
655 tbl->last_rand = now;
656 for (p = &tbl->parms; p; p = p->next)
658 neigh_rand_reach_time(p->base_reachable_time);
661 np = &tbl->hash_buckets[tbl->hash_chain_gc];
662 tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
664 while ((n = *np) != NULL) {
667 write_lock(&n->lock);
669 state = n->nud_state;
670 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
671 write_unlock(&n->lock);
675 if (time_before(n->used, n->confirmed))
676 n->used = n->confirmed;
678 if (atomic_read(&n->refcnt) == 1 &&
679 (state == NUD_FAILED ||
680 time_after(now, n->used + n->parms->gc_staletime))) {
683 write_unlock(&n->lock);
684 neigh_cleanup_and_release(n);
687 write_unlock(&n->lock);
693 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
694 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
695 * base_reachable_time.
697 expire = tbl->parms.base_reachable_time >> 1;
698 expire /= (tbl->hash_mask + 1);
703 mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
705 mod_timer(&tbl->gc_timer, now + expire);
707 write_unlock(&tbl->lock);
710 static __inline__ int neigh_max_probes(struct neighbour *n)
712 struct neigh_parms *p = n->parms;
713 return (n->nud_state & NUD_PROBE ?
715 p->ucast_probes + p->app_probes + p->mcast_probes);
718 static inline void neigh_add_timer(struct neighbour *n, unsigned long when)
720 if (unlikely(mod_timer(&n->timer, when))) {
721 printk("NEIGH: BUG, double timer add, state is %x\n",
727 /* Called when a timer expires for a neighbour entry. */
729 static void neigh_timer_handler(unsigned long arg)
731 unsigned long now, next;
732 struct neighbour *neigh = (struct neighbour *)arg;
736 write_lock(&neigh->lock);
738 state = neigh->nud_state;
742 if (!(state & NUD_IN_TIMER)) {
744 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
749 if (state & NUD_REACHABLE) {
750 if (time_before_eq(now,
751 neigh->confirmed + neigh->parms->reachable_time)) {
752 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
753 next = neigh->confirmed + neigh->parms->reachable_time;
754 } else if (time_before_eq(now,
755 neigh->used + neigh->parms->delay_probe_time)) {
756 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
757 neigh->nud_state = NUD_DELAY;
758 neigh->updated = jiffies;
759 neigh_suspect(neigh);
760 next = now + neigh->parms->delay_probe_time;
762 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
763 neigh->nud_state = NUD_STALE;
764 neigh->updated = jiffies;
765 neigh_suspect(neigh);
768 } else if (state & NUD_DELAY) {
769 if (time_before_eq(now,
770 neigh->confirmed + neigh->parms->delay_probe_time)) {
771 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
772 neigh->nud_state = NUD_REACHABLE;
773 neigh->updated = jiffies;
774 neigh_connect(neigh);
776 next = neigh->confirmed + neigh->parms->reachable_time;
778 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
779 neigh->nud_state = NUD_PROBE;
780 neigh->updated = jiffies;
781 atomic_set(&neigh->probes, 0);
782 next = now + neigh->parms->retrans_time;
785 /* NUD_PROBE|NUD_INCOMPLETE */
786 next = now + neigh->parms->retrans_time;
789 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
790 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
793 neigh->nud_state = NUD_FAILED;
794 neigh->updated = jiffies;
796 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
797 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
799 /* It is very thin place. report_unreachable is very complicated
800 routine. Particularly, it can hit the same neighbour entry!
802 So that, we try to be accurate and avoid dead loop. --ANK
804 while (neigh->nud_state == NUD_FAILED &&
805 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
806 write_unlock(&neigh->lock);
807 neigh->ops->error_report(neigh, skb);
808 write_lock(&neigh->lock);
810 skb_queue_purge(&neigh->arp_queue);
813 if (neigh->nud_state & NUD_IN_TIMER) {
814 if (time_before(next, jiffies + HZ/2))
815 next = jiffies + HZ/2;
816 if (!mod_timer(&neigh->timer, next))
819 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
820 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
821 /* keep skb alive even if arp_queue overflows */
824 write_unlock(&neigh->lock);
825 neigh->ops->solicit(neigh, skb);
826 atomic_inc(&neigh->probes);
831 write_unlock(&neigh->lock);
835 neigh_update_notify(neigh);
837 neigh_release(neigh);
840 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
845 write_lock_bh(&neigh->lock);
848 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
853 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
854 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
855 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
856 neigh->nud_state = NUD_INCOMPLETE;
857 neigh->updated = jiffies;
859 neigh_add_timer(neigh, now + 1);
861 neigh->nud_state = NUD_FAILED;
862 neigh->updated = jiffies;
863 write_unlock_bh(&neigh->lock);
869 } else if (neigh->nud_state & NUD_STALE) {
870 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
872 neigh->nud_state = NUD_DELAY;
873 neigh->updated = jiffies;
874 neigh_add_timer(neigh,
875 jiffies + neigh->parms->delay_probe_time);
878 if (neigh->nud_state == NUD_INCOMPLETE) {
880 if (skb_queue_len(&neigh->arp_queue) >=
881 neigh->parms->queue_len) {
882 struct sk_buff *buff;
883 buff = neigh->arp_queue.next;
884 __skb_unlink(buff, &neigh->arp_queue);
887 __skb_queue_tail(&neigh->arp_queue, skb);
892 write_unlock_bh(&neigh->lock);
896 static void neigh_update_hhs(struct neighbour *neigh)
899 void (*update)(struct hh_cache*, struct net_device*, unsigned char *) =
900 neigh->dev->header_cache_update;
903 for (hh = neigh->hh; hh; hh = hh->hh_next) {
904 write_seqlock_bh(&hh->hh_lock);
905 update(hh, neigh->dev, neigh->ha);
906 write_sequnlock_bh(&hh->hh_lock);
913 /* Generic update routine.
914 -- lladdr is new lladdr or NULL, if it is not supplied.
917 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
919 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
920 lladdr instead of overriding it
922 It also allows to retain current state
923 if lladdr is unchanged.
924 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
926 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
928 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
931 Caller MUST hold reference count on the entry.
934 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
940 struct net_device *dev;
941 int update_isrouter = 0;
943 write_lock_bh(&neigh->lock);
946 old = neigh->nud_state;
949 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
950 (old & (NUD_NOARP | NUD_PERMANENT)))
953 if (!(new & NUD_VALID)) {
954 neigh_del_timer(neigh);
955 if (old & NUD_CONNECTED)
956 neigh_suspect(neigh);
957 neigh->nud_state = new;
959 notify = old & NUD_VALID;
963 /* Compare new lladdr with cached one */
964 if (!dev->addr_len) {
965 /* First case: device needs no address. */
968 /* The second case: if something is already cached
969 and a new address is proposed:
971 - if they are different, check override flag
973 if ((old & NUD_VALID) &&
974 !memcmp(lladdr, neigh->ha, dev->addr_len))
977 /* No address is supplied; if we know something,
978 use it, otherwise discard the request.
981 if (!(old & NUD_VALID))
986 if (new & NUD_CONNECTED)
987 neigh->confirmed = jiffies;
988 neigh->updated = jiffies;
990 /* If entry was valid and address is not changed,
991 do not change entry state, if new one is STALE.
994 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
995 if (old & NUD_VALID) {
996 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
998 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
999 (old & NUD_CONNECTED)) {
1005 if (lladdr == neigh->ha && new == NUD_STALE &&
1006 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1007 (old & NUD_CONNECTED))
1014 neigh_del_timer(neigh);
1015 if (new & NUD_IN_TIMER) {
1017 neigh_add_timer(neigh, (jiffies +
1018 ((new & NUD_REACHABLE) ?
1019 neigh->parms->reachable_time :
1022 neigh->nud_state = new;
1025 if (lladdr != neigh->ha) {
1026 memcpy(&neigh->ha, lladdr, dev->addr_len);
1027 neigh_update_hhs(neigh);
1028 if (!(new & NUD_CONNECTED))
1029 neigh->confirmed = jiffies -
1030 (neigh->parms->base_reachable_time << 1);
1035 if (new & NUD_CONNECTED)
1036 neigh_connect(neigh);
1038 neigh_suspect(neigh);
1039 if (!(old & NUD_VALID)) {
1040 struct sk_buff *skb;
1042 /* Again: avoid dead loop if something went wrong */
1044 while (neigh->nud_state & NUD_VALID &&
1045 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1046 struct neighbour *n1 = neigh;
1047 write_unlock_bh(&neigh->lock);
1048 /* On shaper/eql skb->dst->neighbour != neigh :( */
1049 if (skb->dst && skb->dst->neighbour)
1050 n1 = skb->dst->neighbour;
1052 write_lock_bh(&neigh->lock);
1054 skb_queue_purge(&neigh->arp_queue);
1057 if (update_isrouter) {
1058 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1059 (neigh->flags | NTF_ROUTER) :
1060 (neigh->flags & ~NTF_ROUTER);
1062 write_unlock_bh(&neigh->lock);
1065 neigh_update_notify(neigh);
1070 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1071 u8 *lladdr, void *saddr,
1072 struct net_device *dev)
1074 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1075 lladdr || !dev->addr_len);
1077 neigh_update(neigh, lladdr, NUD_STALE,
1078 NEIGH_UPDATE_F_OVERRIDE);
1082 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1085 struct hh_cache *hh;
1086 struct net_device *dev = dst->dev;
1088 for (hh = n->hh; hh; hh = hh->hh_next)
1089 if (hh->hh_type == protocol)
1092 if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1093 seqlock_init(&hh->hh_lock);
1094 hh->hh_type = protocol;
1095 atomic_set(&hh->hh_refcnt, 0);
1097 if (dev->hard_header_cache(n, hh)) {
1101 atomic_inc(&hh->hh_refcnt);
1102 hh->hh_next = n->hh;
1104 if (n->nud_state & NUD_CONNECTED)
1105 hh->hh_output = n->ops->hh_output;
1107 hh->hh_output = n->ops->output;
1111 atomic_inc(&hh->hh_refcnt);
1116 /* This function can be used in contexts, where only old dev_queue_xmit
1117 worked, f.e. if you want to override normal output path (eql, shaper),
1118 but resolution is not made yet.
1121 int neigh_compat_output(struct sk_buff *skb)
1123 struct net_device *dev = skb->dev;
1125 __skb_pull(skb, skb_network_offset(skb));
1127 if (dev->hard_header &&
1128 dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1130 dev->rebuild_header(skb))
1133 return dev_queue_xmit(skb);
1136 /* Slow and careful. */
1138 int neigh_resolve_output(struct sk_buff *skb)
1140 struct dst_entry *dst = skb->dst;
1141 struct neighbour *neigh;
1144 if (!dst || !(neigh = dst->neighbour))
1147 __skb_pull(skb, skb_network_offset(skb));
1149 if (!neigh_event_send(neigh, skb)) {
1151 struct net_device *dev = neigh->dev;
1152 if (dev->hard_header_cache && !dst->hh) {
1153 write_lock_bh(&neigh->lock);
1155 neigh_hh_init(neigh, dst, dst->ops->protocol);
1156 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1157 neigh->ha, NULL, skb->len);
1158 write_unlock_bh(&neigh->lock);
1160 read_lock_bh(&neigh->lock);
1161 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1162 neigh->ha, NULL, skb->len);
1163 read_unlock_bh(&neigh->lock);
1166 rc = neigh->ops->queue_xmit(skb);
1173 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1174 dst, dst ? dst->neighbour : NULL);
1181 /* As fast as possible without hh cache */
1183 int neigh_connected_output(struct sk_buff *skb)
1186 struct dst_entry *dst = skb->dst;
1187 struct neighbour *neigh = dst->neighbour;
1188 struct net_device *dev = neigh->dev;
1190 __skb_pull(skb, skb_network_offset(skb));
1192 read_lock_bh(&neigh->lock);
1193 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1194 neigh->ha, NULL, skb->len);
1195 read_unlock_bh(&neigh->lock);
1197 err = neigh->ops->queue_xmit(skb);
1205 static void neigh_proxy_process(unsigned long arg)
1207 struct neigh_table *tbl = (struct neigh_table *)arg;
1208 long sched_next = 0;
1209 unsigned long now = jiffies;
1210 struct sk_buff *skb;
1212 spin_lock(&tbl->proxy_queue.lock);
1214 skb = tbl->proxy_queue.next;
1216 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1217 struct sk_buff *back = skb;
1218 long tdif = NEIGH_CB(back)->sched_next - now;
1222 struct net_device *dev = back->dev;
1223 __skb_unlink(back, &tbl->proxy_queue);
1224 if (tbl->proxy_redo && netif_running(dev))
1225 tbl->proxy_redo(back);
1230 } else if (!sched_next || tdif < sched_next)
1233 del_timer(&tbl->proxy_timer);
1235 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1236 spin_unlock(&tbl->proxy_queue.lock);
1239 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1240 struct sk_buff *skb)
1242 unsigned long now = jiffies;
1243 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1245 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1250 NEIGH_CB(skb)->sched_next = sched_next;
1251 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1253 spin_lock(&tbl->proxy_queue.lock);
1254 if (del_timer(&tbl->proxy_timer)) {
1255 if (time_before(tbl->proxy_timer.expires, sched_next))
1256 sched_next = tbl->proxy_timer.expires;
1258 dst_release(skb->dst);
1261 __skb_queue_tail(&tbl->proxy_queue, skb);
1262 mod_timer(&tbl->proxy_timer, sched_next);
1263 spin_unlock(&tbl->proxy_queue.lock);
1267 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1268 struct neigh_table *tbl)
1270 struct neigh_parms *p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1274 atomic_set(&p->refcnt, 1);
1275 INIT_RCU_HEAD(&p->rcu_head);
1277 neigh_rand_reach_time(p->base_reachable_time);
1279 if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1287 p->sysctl_table = NULL;
1288 write_lock_bh(&tbl->lock);
1289 p->next = tbl->parms.next;
1290 tbl->parms.next = p;
1291 write_unlock_bh(&tbl->lock);
1296 static void neigh_rcu_free_parms(struct rcu_head *head)
1298 struct neigh_parms *parms =
1299 container_of(head, struct neigh_parms, rcu_head);
1301 neigh_parms_put(parms);
1304 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1306 struct neigh_parms **p;
1308 if (!parms || parms == &tbl->parms)
1310 write_lock_bh(&tbl->lock);
1311 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1315 write_unlock_bh(&tbl->lock);
1317 dev_put(parms->dev);
1318 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1322 write_unlock_bh(&tbl->lock);
1323 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1326 void neigh_parms_destroy(struct neigh_parms *parms)
1331 static struct lock_class_key neigh_table_proxy_queue_class;
1333 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1335 unsigned long now = jiffies;
1336 unsigned long phsize;
1338 atomic_set(&tbl->parms.refcnt, 1);
1339 INIT_RCU_HEAD(&tbl->parms.rcu_head);
1340 tbl->parms.reachable_time =
1341 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1343 if (!tbl->kmem_cachep)
1345 kmem_cache_create(tbl->id, tbl->entry_size, 0,
1346 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1348 tbl->stats = alloc_percpu(struct neigh_statistics);
1350 panic("cannot create neighbour cache statistics");
1352 #ifdef CONFIG_PROC_FS
1353 tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1355 panic("cannot create neighbour proc dir entry");
1356 tbl->pde->proc_fops = &neigh_stat_seq_fops;
1357 tbl->pde->data = tbl;
1361 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1363 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1364 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1366 if (!tbl->hash_buckets || !tbl->phash_buckets)
1367 panic("cannot allocate neighbour cache hashes");
1369 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1371 rwlock_init(&tbl->lock);
1372 init_timer(&tbl->gc_timer);
1373 tbl->gc_timer.data = (unsigned long)tbl;
1374 tbl->gc_timer.function = neigh_periodic_timer;
1375 tbl->gc_timer.expires = now + 1;
1376 add_timer(&tbl->gc_timer);
1378 init_timer(&tbl->proxy_timer);
1379 tbl->proxy_timer.data = (unsigned long)tbl;
1380 tbl->proxy_timer.function = neigh_proxy_process;
1381 skb_queue_head_init_class(&tbl->proxy_queue,
1382 &neigh_table_proxy_queue_class);
1384 tbl->last_flush = now;
1385 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1388 void neigh_table_init(struct neigh_table *tbl)
1390 struct neigh_table *tmp;
1392 neigh_table_init_no_netlink(tbl);
1393 write_lock(&neigh_tbl_lock);
1394 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1395 if (tmp->family == tbl->family)
1398 tbl->next = neigh_tables;
1400 write_unlock(&neigh_tbl_lock);
1402 if (unlikely(tmp)) {
1403 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1404 "family %d\n", tbl->family);
1409 int neigh_table_clear(struct neigh_table *tbl)
1411 struct neigh_table **tp;
1413 /* It is not clean... Fix it to unload IPv6 module safely */
1414 del_timer_sync(&tbl->gc_timer);
1415 del_timer_sync(&tbl->proxy_timer);
1416 pneigh_queue_purge(&tbl->proxy_queue);
1417 neigh_ifdown(tbl, NULL);
1418 if (atomic_read(&tbl->entries))
1419 printk(KERN_CRIT "neighbour leakage\n");
1420 write_lock(&neigh_tbl_lock);
1421 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1427 write_unlock(&neigh_tbl_lock);
1429 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1430 tbl->hash_buckets = NULL;
1432 kfree(tbl->phash_buckets);
1433 tbl->phash_buckets = NULL;
1435 free_percpu(tbl->stats);
1441 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1444 struct nlattr *dst_attr;
1445 struct neigh_table *tbl;
1446 struct net_device *dev = NULL;
1449 if (nlmsg_len(nlh) < sizeof(*ndm))
1452 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1453 if (dst_attr == NULL)
1456 ndm = nlmsg_data(nlh);
1457 if (ndm->ndm_ifindex) {
1458 dev = dev_get_by_index(ndm->ndm_ifindex);
1465 read_lock(&neigh_tbl_lock);
1466 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1467 struct neighbour *neigh;
1469 if (tbl->family != ndm->ndm_family)
1471 read_unlock(&neigh_tbl_lock);
1473 if (nla_len(dst_attr) < tbl->key_len)
1476 if (ndm->ndm_flags & NTF_PROXY) {
1477 err = pneigh_delete(tbl, nla_data(dst_attr), dev);
1484 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1485 if (neigh == NULL) {
1490 err = neigh_update(neigh, NULL, NUD_FAILED,
1491 NEIGH_UPDATE_F_OVERRIDE |
1492 NEIGH_UPDATE_F_ADMIN);
1493 neigh_release(neigh);
1496 read_unlock(&neigh_tbl_lock);
1497 err = -EAFNOSUPPORT;
1506 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1509 struct nlattr *tb[NDA_MAX+1];
1510 struct neigh_table *tbl;
1511 struct net_device *dev = NULL;
1514 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1519 if (tb[NDA_DST] == NULL)
1522 ndm = nlmsg_data(nlh);
1523 if (ndm->ndm_ifindex) {
1524 dev = dev_get_by_index(ndm->ndm_ifindex);
1530 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1534 read_lock(&neigh_tbl_lock);
1535 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1536 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1537 struct neighbour *neigh;
1540 if (tbl->family != ndm->ndm_family)
1542 read_unlock(&neigh_tbl_lock);
1544 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1546 dst = nla_data(tb[NDA_DST]);
1547 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1549 if (ndm->ndm_flags & NTF_PROXY) {
1550 struct pneigh_entry *pn;
1553 pn = pneigh_lookup(tbl, dst, dev, 1);
1555 pn->flags = ndm->ndm_flags;
1564 neigh = neigh_lookup(tbl, dst, dev);
1565 if (neigh == NULL) {
1566 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1571 neigh = __neigh_lookup_errno(tbl, dst, dev);
1572 if (IS_ERR(neigh)) {
1573 err = PTR_ERR(neigh);
1577 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1579 neigh_release(neigh);
1583 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1584 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1587 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1588 neigh_release(neigh);
1592 read_unlock(&neigh_tbl_lock);
1593 err = -EAFNOSUPPORT;
1602 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1604 struct nlattr *nest;
1606 nest = nla_nest_start(skb, NDTA_PARMS);
1611 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1613 NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1614 NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1615 NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1616 NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1617 NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1618 NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1619 NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1620 NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1621 parms->base_reachable_time);
1622 NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1623 NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1624 NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1625 NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1626 NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1627 NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1629 return nla_nest_end(skb, nest);
1632 return nla_nest_cancel(skb, nest);
1635 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1636 u32 pid, u32 seq, int type, int flags)
1638 struct nlmsghdr *nlh;
1639 struct ndtmsg *ndtmsg;
1641 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1645 ndtmsg = nlmsg_data(nlh);
1647 read_lock_bh(&tbl->lock);
1648 ndtmsg->ndtm_family = tbl->family;
1649 ndtmsg->ndtm_pad1 = 0;
1650 ndtmsg->ndtm_pad2 = 0;
1652 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1653 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1654 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1655 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1656 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1659 unsigned long now = jiffies;
1660 unsigned int flush_delta = now - tbl->last_flush;
1661 unsigned int rand_delta = now - tbl->last_rand;
1663 struct ndt_config ndc = {
1664 .ndtc_key_len = tbl->key_len,
1665 .ndtc_entry_size = tbl->entry_size,
1666 .ndtc_entries = atomic_read(&tbl->entries),
1667 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1668 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1669 .ndtc_hash_rnd = tbl->hash_rnd,
1670 .ndtc_hash_mask = tbl->hash_mask,
1671 .ndtc_hash_chain_gc = tbl->hash_chain_gc,
1672 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1675 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1680 struct ndt_stats ndst;
1682 memset(&ndst, 0, sizeof(ndst));
1684 for_each_possible_cpu(cpu) {
1685 struct neigh_statistics *st;
1687 st = per_cpu_ptr(tbl->stats, cpu);
1688 ndst.ndts_allocs += st->allocs;
1689 ndst.ndts_destroys += st->destroys;
1690 ndst.ndts_hash_grows += st->hash_grows;
1691 ndst.ndts_res_failed += st->res_failed;
1692 ndst.ndts_lookups += st->lookups;
1693 ndst.ndts_hits += st->hits;
1694 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1695 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1696 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1697 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1700 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1703 BUG_ON(tbl->parms.dev);
1704 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1705 goto nla_put_failure;
1707 read_unlock_bh(&tbl->lock);
1708 return nlmsg_end(skb, nlh);
1711 read_unlock_bh(&tbl->lock);
1712 nlmsg_cancel(skb, nlh);
1716 static int neightbl_fill_param_info(struct sk_buff *skb,
1717 struct neigh_table *tbl,
1718 struct neigh_parms *parms,
1719 u32 pid, u32 seq, int type,
1722 struct ndtmsg *ndtmsg;
1723 struct nlmsghdr *nlh;
1725 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1729 ndtmsg = nlmsg_data(nlh);
1731 read_lock_bh(&tbl->lock);
1732 ndtmsg->ndtm_family = tbl->family;
1733 ndtmsg->ndtm_pad1 = 0;
1734 ndtmsg->ndtm_pad2 = 0;
1736 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1737 neightbl_fill_parms(skb, parms) < 0)
1740 read_unlock_bh(&tbl->lock);
1741 return nlmsg_end(skb, nlh);
1743 read_unlock_bh(&tbl->lock);
1744 nlmsg_cancel(skb, nlh);
1748 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1751 struct neigh_parms *p;
1753 for (p = &tbl->parms; p; p = p->next)
1754 if ((p->dev && p->dev->ifindex == ifindex) ||
1755 (!p->dev && !ifindex))
1761 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1762 [NDTA_NAME] = { .type = NLA_STRING },
1763 [NDTA_THRESH1] = { .type = NLA_U32 },
1764 [NDTA_THRESH2] = { .type = NLA_U32 },
1765 [NDTA_THRESH3] = { .type = NLA_U32 },
1766 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1767 [NDTA_PARMS] = { .type = NLA_NESTED },
1770 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1771 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1772 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1773 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1774 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1775 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1776 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1777 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1778 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1779 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1780 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1781 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1782 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1783 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1786 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1788 struct neigh_table *tbl;
1789 struct ndtmsg *ndtmsg;
1790 struct nlattr *tb[NDTA_MAX+1];
1793 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1794 nl_neightbl_policy);
1798 if (tb[NDTA_NAME] == NULL) {
1803 ndtmsg = nlmsg_data(nlh);
1804 read_lock(&neigh_tbl_lock);
1805 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1806 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1809 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1819 * We acquire tbl->lock to be nice to the periodic timers and
1820 * make sure they always see a consistent set of values.
1822 write_lock_bh(&tbl->lock);
1824 if (tb[NDTA_PARMS]) {
1825 struct nlattr *tbp[NDTPA_MAX+1];
1826 struct neigh_parms *p;
1829 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1830 nl_ntbl_parm_policy);
1832 goto errout_tbl_lock;
1834 if (tbp[NDTPA_IFINDEX])
1835 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1837 p = lookup_neigh_params(tbl, ifindex);
1840 goto errout_tbl_lock;
1843 for (i = 1; i <= NDTPA_MAX; i++) {
1848 case NDTPA_QUEUE_LEN:
1849 p->queue_len = nla_get_u32(tbp[i]);
1851 case NDTPA_PROXY_QLEN:
1852 p->proxy_qlen = nla_get_u32(tbp[i]);
1854 case NDTPA_APP_PROBES:
1855 p->app_probes = nla_get_u32(tbp[i]);
1857 case NDTPA_UCAST_PROBES:
1858 p->ucast_probes = nla_get_u32(tbp[i]);
1860 case NDTPA_MCAST_PROBES:
1861 p->mcast_probes = nla_get_u32(tbp[i]);
1863 case NDTPA_BASE_REACHABLE_TIME:
1864 p->base_reachable_time = nla_get_msecs(tbp[i]);
1866 case NDTPA_GC_STALETIME:
1867 p->gc_staletime = nla_get_msecs(tbp[i]);
1869 case NDTPA_DELAY_PROBE_TIME:
1870 p->delay_probe_time = nla_get_msecs(tbp[i]);
1872 case NDTPA_RETRANS_TIME:
1873 p->retrans_time = nla_get_msecs(tbp[i]);
1875 case NDTPA_ANYCAST_DELAY:
1876 p->anycast_delay = nla_get_msecs(tbp[i]);
1878 case NDTPA_PROXY_DELAY:
1879 p->proxy_delay = nla_get_msecs(tbp[i]);
1881 case NDTPA_LOCKTIME:
1882 p->locktime = nla_get_msecs(tbp[i]);
1888 if (tb[NDTA_THRESH1])
1889 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
1891 if (tb[NDTA_THRESH2])
1892 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
1894 if (tb[NDTA_THRESH3])
1895 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
1897 if (tb[NDTA_GC_INTERVAL])
1898 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
1903 write_unlock_bh(&tbl->lock);
1905 read_unlock(&neigh_tbl_lock);
1910 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1912 int family, tidx, nidx = 0;
1913 int tbl_skip = cb->args[0];
1914 int neigh_skip = cb->args[1];
1915 struct neigh_table *tbl;
1917 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
1919 read_lock(&neigh_tbl_lock);
1920 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
1921 struct neigh_parms *p;
1923 if (tidx < tbl_skip || (family && tbl->family != family))
1926 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
1927 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
1931 for (nidx = 0, p = tbl->parms.next; p; p = p->next, nidx++) {
1932 if (nidx < neigh_skip)
1935 if (neightbl_fill_param_info(skb, tbl, p,
1936 NETLINK_CB(cb->skb).pid,
1946 read_unlock(&neigh_tbl_lock);
1953 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
1954 u32 pid, u32 seq, int type, unsigned int flags)
1956 unsigned long now = jiffies;
1957 struct nda_cacheinfo ci;
1958 struct nlmsghdr *nlh;
1961 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
1965 ndm = nlmsg_data(nlh);
1966 ndm->ndm_family = neigh->ops->family;
1969 ndm->ndm_flags = neigh->flags;
1970 ndm->ndm_type = neigh->type;
1971 ndm->ndm_ifindex = neigh->dev->ifindex;
1973 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
1975 read_lock_bh(&neigh->lock);
1976 ndm->ndm_state = neigh->nud_state;
1977 if ((neigh->nud_state & NUD_VALID) &&
1978 nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
1979 read_unlock_bh(&neigh->lock);
1980 goto nla_put_failure;
1983 ci.ndm_used = now - neigh->used;
1984 ci.ndm_confirmed = now - neigh->confirmed;
1985 ci.ndm_updated = now - neigh->updated;
1986 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
1987 read_unlock_bh(&neigh->lock);
1989 NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
1990 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1992 return nlmsg_end(skb, nlh);
1995 nlmsg_cancel(skb, nlh);
1999 static void neigh_update_notify(struct neighbour *neigh)
2001 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2002 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2005 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2006 struct netlink_callback *cb)
2008 struct neighbour *n;
2009 int rc, h, s_h = cb->args[1];
2010 int idx, s_idx = idx = cb->args[2];
2012 read_lock_bh(&tbl->lock);
2013 for (h = 0; h <= tbl->hash_mask; h++) {
2018 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
2021 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2024 NLM_F_MULTI) <= 0) {
2025 read_unlock_bh(&tbl->lock);
2031 read_unlock_bh(&tbl->lock);
2039 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2041 struct neigh_table *tbl;
2044 read_lock(&neigh_tbl_lock);
2045 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2048 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2049 if (t < s_t || (family && tbl->family != family))
2052 memset(&cb->args[1], 0, sizeof(cb->args) -
2053 sizeof(cb->args[0]));
2054 if (neigh_dump_table(tbl, skb, cb) < 0)
2057 read_unlock(&neigh_tbl_lock);
2063 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2067 read_lock_bh(&tbl->lock);
2068 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2069 struct neighbour *n;
2071 for (n = tbl->hash_buckets[chain]; n; n = n->next)
2074 read_unlock_bh(&tbl->lock);
2076 EXPORT_SYMBOL(neigh_for_each);
2078 /* The tbl->lock must be held as a writer and BH disabled. */
2079 void __neigh_for_each_release(struct neigh_table *tbl,
2080 int (*cb)(struct neighbour *))
2084 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2085 struct neighbour *n, **np;
2087 np = &tbl->hash_buckets[chain];
2088 while ((n = *np) != NULL) {
2091 write_lock(&n->lock);
2098 write_unlock(&n->lock);
2100 neigh_cleanup_and_release(n);
2104 EXPORT_SYMBOL(__neigh_for_each_release);
2106 #ifdef CONFIG_PROC_FS
2108 static struct neighbour *neigh_get_first(struct seq_file *seq)
2110 struct neigh_seq_state *state = seq->private;
2111 struct neigh_table *tbl = state->tbl;
2112 struct neighbour *n = NULL;
2113 int bucket = state->bucket;
2115 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2116 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2117 n = tbl->hash_buckets[bucket];
2120 if (state->neigh_sub_iter) {
2124 v = state->neigh_sub_iter(state, n, &fakep);
2128 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2130 if (n->nud_state & ~NUD_NOARP)
2139 state->bucket = bucket;
2144 static struct neighbour *neigh_get_next(struct seq_file *seq,
2145 struct neighbour *n,
2148 struct neigh_seq_state *state = seq->private;
2149 struct neigh_table *tbl = state->tbl;
2151 if (state->neigh_sub_iter) {
2152 void *v = state->neigh_sub_iter(state, n, pos);
2160 if (state->neigh_sub_iter) {
2161 void *v = state->neigh_sub_iter(state, n, pos);
2166 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2169 if (n->nud_state & ~NUD_NOARP)
2178 if (++state->bucket > tbl->hash_mask)
2181 n = tbl->hash_buckets[state->bucket];
2189 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2191 struct neighbour *n = neigh_get_first(seq);
2195 n = neigh_get_next(seq, n, pos);
2200 return *pos ? NULL : n;
2203 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2205 struct neigh_seq_state *state = seq->private;
2206 struct neigh_table *tbl = state->tbl;
2207 struct pneigh_entry *pn = NULL;
2208 int bucket = state->bucket;
2210 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2211 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2212 pn = tbl->phash_buckets[bucket];
2216 state->bucket = bucket;
2221 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2222 struct pneigh_entry *pn,
2225 struct neigh_seq_state *state = seq->private;
2226 struct neigh_table *tbl = state->tbl;
2230 if (++state->bucket > PNEIGH_HASHMASK)
2232 pn = tbl->phash_buckets[state->bucket];
2243 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2245 struct pneigh_entry *pn = pneigh_get_first(seq);
2249 pn = pneigh_get_next(seq, pn, pos);
2254 return *pos ? NULL : pn;
2257 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2259 struct neigh_seq_state *state = seq->private;
2262 rc = neigh_get_idx(seq, pos);
2263 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2264 rc = pneigh_get_idx(seq, pos);
2269 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2271 struct neigh_seq_state *state = seq->private;
2272 loff_t pos_minus_one;
2276 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2278 read_lock_bh(&tbl->lock);
2280 pos_minus_one = *pos - 1;
2281 return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2283 EXPORT_SYMBOL(neigh_seq_start);
2285 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2287 struct neigh_seq_state *state;
2290 if (v == SEQ_START_TOKEN) {
2291 rc = neigh_get_idx(seq, pos);
2295 state = seq->private;
2296 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2297 rc = neigh_get_next(seq, v, NULL);
2300 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2301 rc = pneigh_get_first(seq);
2303 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2304 rc = pneigh_get_next(seq, v, NULL);
2310 EXPORT_SYMBOL(neigh_seq_next);
2312 void neigh_seq_stop(struct seq_file *seq, void *v)
2314 struct neigh_seq_state *state = seq->private;
2315 struct neigh_table *tbl = state->tbl;
2317 read_unlock_bh(&tbl->lock);
2319 EXPORT_SYMBOL(neigh_seq_stop);
2321 /* statistics via seq_file */
2323 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2325 struct proc_dir_entry *pde = seq->private;
2326 struct neigh_table *tbl = pde->data;
2330 return SEQ_START_TOKEN;
2332 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2333 if (!cpu_possible(cpu))
2336 return per_cpu_ptr(tbl->stats, cpu);
2341 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2343 struct proc_dir_entry *pde = seq->private;
2344 struct neigh_table *tbl = pde->data;
2347 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2348 if (!cpu_possible(cpu))
2351 return per_cpu_ptr(tbl->stats, cpu);
2356 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2361 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2363 struct proc_dir_entry *pde = seq->private;
2364 struct neigh_table *tbl = pde->data;
2365 struct neigh_statistics *st = v;
2367 if (v == SEQ_START_TOKEN) {
2368 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
2372 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2373 "%08lx %08lx %08lx %08lx\n",
2374 atomic_read(&tbl->entries),
2385 st->rcv_probes_mcast,
2386 st->rcv_probes_ucast,
2388 st->periodic_gc_runs,
2395 static const struct seq_operations neigh_stat_seq_ops = {
2396 .start = neigh_stat_seq_start,
2397 .next = neigh_stat_seq_next,
2398 .stop = neigh_stat_seq_stop,
2399 .show = neigh_stat_seq_show,
2402 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2404 int ret = seq_open(file, &neigh_stat_seq_ops);
2407 struct seq_file *sf = file->private_data;
2408 sf->private = PDE(inode);
2413 static const struct file_operations neigh_stat_seq_fops = {
2414 .owner = THIS_MODULE,
2415 .open = neigh_stat_seq_open,
2417 .llseek = seq_lseek,
2418 .release = seq_release,
2421 #endif /* CONFIG_PROC_FS */
2423 static inline size_t neigh_nlmsg_size(void)
2425 return NLMSG_ALIGN(sizeof(struct ndmsg))
2426 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2427 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2428 + nla_total_size(sizeof(struct nda_cacheinfo))
2429 + nla_total_size(4); /* NDA_PROBES */
2432 static void __neigh_notify(struct neighbour *n, int type, int flags)
2434 struct sk_buff *skb;
2437 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2441 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2443 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2444 WARN_ON(err == -EMSGSIZE);
2448 err = rtnl_notify(skb, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2451 rtnl_set_sk_err(RTNLGRP_NEIGH, err);
2455 void neigh_app_ns(struct neighbour *n)
2457 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2459 #endif /* CONFIG_ARPD */
2461 #ifdef CONFIG_SYSCTL
2463 static struct neigh_sysctl_table {
2464 struct ctl_table_header *sysctl_header;
2465 ctl_table neigh_vars[__NET_NEIGH_MAX];
2466 ctl_table neigh_dev[2];
2467 ctl_table neigh_neigh_dir[2];
2468 ctl_table neigh_proto_dir[2];
2469 ctl_table neigh_root_dir[2];
2470 } neigh_sysctl_template __read_mostly = {
2473 .ctl_name = NET_NEIGH_MCAST_SOLICIT,
2474 .procname = "mcast_solicit",
2475 .maxlen = sizeof(int),
2477 .proc_handler = &proc_dointvec,
2480 .ctl_name = NET_NEIGH_UCAST_SOLICIT,
2481 .procname = "ucast_solicit",
2482 .maxlen = sizeof(int),
2484 .proc_handler = &proc_dointvec,
2487 .ctl_name = NET_NEIGH_APP_SOLICIT,
2488 .procname = "app_solicit",
2489 .maxlen = sizeof(int),
2491 .proc_handler = &proc_dointvec,
2494 .ctl_name = NET_NEIGH_RETRANS_TIME,
2495 .procname = "retrans_time",
2496 .maxlen = sizeof(int),
2498 .proc_handler = &proc_dointvec_userhz_jiffies,
2501 .ctl_name = NET_NEIGH_REACHABLE_TIME,
2502 .procname = "base_reachable_time",
2503 .maxlen = sizeof(int),
2505 .proc_handler = &proc_dointvec_jiffies,
2506 .strategy = &sysctl_jiffies,
2509 .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
2510 .procname = "delay_first_probe_time",
2511 .maxlen = sizeof(int),
2513 .proc_handler = &proc_dointvec_jiffies,
2514 .strategy = &sysctl_jiffies,
2517 .ctl_name = NET_NEIGH_GC_STALE_TIME,
2518 .procname = "gc_stale_time",
2519 .maxlen = sizeof(int),
2521 .proc_handler = &proc_dointvec_jiffies,
2522 .strategy = &sysctl_jiffies,
2525 .ctl_name = NET_NEIGH_UNRES_QLEN,
2526 .procname = "unres_qlen",
2527 .maxlen = sizeof(int),
2529 .proc_handler = &proc_dointvec,
2532 .ctl_name = NET_NEIGH_PROXY_QLEN,
2533 .procname = "proxy_qlen",
2534 .maxlen = sizeof(int),
2536 .proc_handler = &proc_dointvec,
2539 .ctl_name = NET_NEIGH_ANYCAST_DELAY,
2540 .procname = "anycast_delay",
2541 .maxlen = sizeof(int),
2543 .proc_handler = &proc_dointvec_userhz_jiffies,
2546 .ctl_name = NET_NEIGH_PROXY_DELAY,
2547 .procname = "proxy_delay",
2548 .maxlen = sizeof(int),
2550 .proc_handler = &proc_dointvec_userhz_jiffies,
2553 .ctl_name = NET_NEIGH_LOCKTIME,
2554 .procname = "locktime",
2555 .maxlen = sizeof(int),
2557 .proc_handler = &proc_dointvec_userhz_jiffies,
2560 .ctl_name = NET_NEIGH_GC_INTERVAL,
2561 .procname = "gc_interval",
2562 .maxlen = sizeof(int),
2564 .proc_handler = &proc_dointvec_jiffies,
2565 .strategy = &sysctl_jiffies,
2568 .ctl_name = NET_NEIGH_GC_THRESH1,
2569 .procname = "gc_thresh1",
2570 .maxlen = sizeof(int),
2572 .proc_handler = &proc_dointvec,
2575 .ctl_name = NET_NEIGH_GC_THRESH2,
2576 .procname = "gc_thresh2",
2577 .maxlen = sizeof(int),
2579 .proc_handler = &proc_dointvec,
2582 .ctl_name = NET_NEIGH_GC_THRESH3,
2583 .procname = "gc_thresh3",
2584 .maxlen = sizeof(int),
2586 .proc_handler = &proc_dointvec,
2589 .ctl_name = NET_NEIGH_RETRANS_TIME_MS,
2590 .procname = "retrans_time_ms",
2591 .maxlen = sizeof(int),
2593 .proc_handler = &proc_dointvec_ms_jiffies,
2594 .strategy = &sysctl_ms_jiffies,
2597 .ctl_name = NET_NEIGH_REACHABLE_TIME_MS,
2598 .procname = "base_reachable_time_ms",
2599 .maxlen = sizeof(int),
2601 .proc_handler = &proc_dointvec_ms_jiffies,
2602 .strategy = &sysctl_ms_jiffies,
2607 .ctl_name = NET_PROTO_CONF_DEFAULT,
2608 .procname = "default",
2612 .neigh_neigh_dir = {
2614 .procname = "neigh",
2618 .neigh_proto_dir = {
2625 .ctl_name = CTL_NET,
2632 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2633 int p_id, int pdev_id, char *p_name,
2634 proc_handler *handler, ctl_handler *strategy)
2636 struct neigh_sysctl_table *t = kmemdup(&neigh_sysctl_template,
2637 sizeof(*t), GFP_KERNEL);
2638 const char *dev_name_source = NULL;
2639 char *dev_name = NULL;
2644 t->neigh_vars[0].data = &p->mcast_probes;
2645 t->neigh_vars[1].data = &p->ucast_probes;
2646 t->neigh_vars[2].data = &p->app_probes;
2647 t->neigh_vars[3].data = &p->retrans_time;
2648 t->neigh_vars[4].data = &p->base_reachable_time;
2649 t->neigh_vars[5].data = &p->delay_probe_time;
2650 t->neigh_vars[6].data = &p->gc_staletime;
2651 t->neigh_vars[7].data = &p->queue_len;
2652 t->neigh_vars[8].data = &p->proxy_qlen;
2653 t->neigh_vars[9].data = &p->anycast_delay;
2654 t->neigh_vars[10].data = &p->proxy_delay;
2655 t->neigh_vars[11].data = &p->locktime;
2658 dev_name_source = dev->name;
2659 t->neigh_dev[0].ctl_name = dev->ifindex;
2660 t->neigh_vars[12].procname = NULL;
2661 t->neigh_vars[13].procname = NULL;
2662 t->neigh_vars[14].procname = NULL;
2663 t->neigh_vars[15].procname = NULL;
2665 dev_name_source = t->neigh_dev[0].procname;
2666 t->neigh_vars[12].data = (int *)(p + 1);
2667 t->neigh_vars[13].data = (int *)(p + 1) + 1;
2668 t->neigh_vars[14].data = (int *)(p + 1) + 2;
2669 t->neigh_vars[15].data = (int *)(p + 1) + 3;
2672 t->neigh_vars[16].data = &p->retrans_time;
2673 t->neigh_vars[17].data = &p->base_reachable_time;
2675 if (handler || strategy) {
2677 t->neigh_vars[3].proc_handler = handler;
2678 t->neigh_vars[3].strategy = strategy;
2679 t->neigh_vars[3].extra1 = dev;
2681 t->neigh_vars[4].proc_handler = handler;
2682 t->neigh_vars[4].strategy = strategy;
2683 t->neigh_vars[4].extra1 = dev;
2684 /* RetransTime (in milliseconds)*/
2685 t->neigh_vars[16].proc_handler = handler;
2686 t->neigh_vars[16].strategy = strategy;
2687 t->neigh_vars[16].extra1 = dev;
2688 /* ReachableTime (in milliseconds) */
2689 t->neigh_vars[17].proc_handler = handler;
2690 t->neigh_vars[17].strategy = strategy;
2691 t->neigh_vars[17].extra1 = dev;
2694 dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2700 t->neigh_dev[0].procname = dev_name;
2702 t->neigh_neigh_dir[0].ctl_name = pdev_id;
2704 t->neigh_proto_dir[0].procname = p_name;
2705 t->neigh_proto_dir[0].ctl_name = p_id;
2707 t->neigh_dev[0].child = t->neigh_vars;
2708 t->neigh_neigh_dir[0].child = t->neigh_dev;
2709 t->neigh_proto_dir[0].child = t->neigh_neigh_dir;
2710 t->neigh_root_dir[0].child = t->neigh_proto_dir;
2712 t->sysctl_header = register_sysctl_table(t->neigh_root_dir);
2713 if (!t->sysctl_header) {
2717 p->sysctl_table = t;
2729 void neigh_sysctl_unregister(struct neigh_parms *p)
2731 if (p->sysctl_table) {
2732 struct neigh_sysctl_table *t = p->sysctl_table;
2733 p->sysctl_table = NULL;
2734 unregister_sysctl_table(t->sysctl_header);
2735 kfree(t->neigh_dev[0].procname);
2740 #endif /* CONFIG_SYSCTL */
2742 static int __init neigh_init(void)
2744 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
2745 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
2746 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
2748 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
2749 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
2754 subsys_initcall(neigh_init);
2756 EXPORT_SYMBOL(__neigh_event_send);
2757 EXPORT_SYMBOL(neigh_changeaddr);
2758 EXPORT_SYMBOL(neigh_compat_output);
2759 EXPORT_SYMBOL(neigh_connected_output);
2760 EXPORT_SYMBOL(neigh_create);
2761 EXPORT_SYMBOL(neigh_destroy);
2762 EXPORT_SYMBOL(neigh_event_ns);
2763 EXPORT_SYMBOL(neigh_ifdown);
2764 EXPORT_SYMBOL(neigh_lookup);
2765 EXPORT_SYMBOL(neigh_lookup_nodev);
2766 EXPORT_SYMBOL(neigh_parms_alloc);
2767 EXPORT_SYMBOL(neigh_parms_release);
2768 EXPORT_SYMBOL(neigh_rand_reach_time);
2769 EXPORT_SYMBOL(neigh_resolve_output);
2770 EXPORT_SYMBOL(neigh_table_clear);
2771 EXPORT_SYMBOL(neigh_table_init);
2772 EXPORT_SYMBOL(neigh_table_init_no_netlink);
2773 EXPORT_SYMBOL(neigh_update);
2774 EXPORT_SYMBOL(pneigh_enqueue);
2775 EXPORT_SYMBOL(pneigh_lookup);
2778 EXPORT_SYMBOL(neigh_app_ns);
2780 #ifdef CONFIG_SYSCTL
2781 EXPORT_SYMBOL(neigh_sysctl_register);
2782 EXPORT_SYMBOL(neigh_sysctl_unregister);