Pull cpuidle into release branch
[linux-2.6] / net / core / neighbour.c
1 /*
2  *      Generic address resolution entity
3  *
4  *      Authors:
5  *      Pedro Roque             <roque@di.fc.ul.pt>
6  *      Alexey Kuznetsov        <kuznet@ms2.inr.ac.ru>
7  *
8  *      This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *      Fixes:
14  *      Vitaly E. Lavrov        releasing NULL neighbor in neigh_add.
15  *      Harald Welte            Add neighbour cache statistics like rtstat
16  */
17
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
24 #ifdef CONFIG_SYSCTL
25 #include <linux/sysctl.h>
26 #endif
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
30 #include <net/dst.h>
31 #include <net/sock.h>
32 #include <net/netevent.h>
33 #include <net/netlink.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/random.h>
36 #include <linux/string.h>
37 #include <linux/log2.h>
38
39 #define NEIGH_DEBUG 1
40
41 #define NEIGH_PRINTK(x...) printk(x)
42 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
43 #define NEIGH_PRINTK0 NEIGH_PRINTK
44 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
45 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
46
47 #if NEIGH_DEBUG >= 1
48 #undef NEIGH_PRINTK1
49 #define NEIGH_PRINTK1 NEIGH_PRINTK
50 #endif
51 #if NEIGH_DEBUG >= 2
52 #undef NEIGH_PRINTK2
53 #define NEIGH_PRINTK2 NEIGH_PRINTK
54 #endif
55
56 #define PNEIGH_HASHMASK         0xF
57
58 static void neigh_timer_handler(unsigned long arg);
59 static void __neigh_notify(struct neighbour *n, int type, int flags);
60 static void neigh_update_notify(struct neighbour *neigh);
61 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
62 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
63
64 static struct neigh_table *neigh_tables;
65 #ifdef CONFIG_PROC_FS
66 static const struct file_operations neigh_stat_seq_fops;
67 #endif
68
69 /*
70    Neighbour hash table buckets are protected with rwlock tbl->lock.
71
72    - All the scans/updates to hash buckets MUST be made under this lock.
73    - NOTHING clever should be made under this lock: no callbacks
74      to protocol backends, no attempts to send something to network.
75      It will result in deadlocks, if backend/driver wants to use neighbour
76      cache.
77    - If the entry requires some non-trivial actions, increase
78      its reference count and release table lock.
79
80    Neighbour entries are protected:
81    - with reference count.
82    - with rwlock neigh->lock
83
84    Reference count prevents destruction.
85
86    neigh->lock mainly serializes ll address data and its validity state.
87    However, the same lock is used to protect another entry fields:
88     - timer
89     - resolution queue
90
91    Again, nothing clever shall be made under neigh->lock,
92    the most complicated procedure, which we allow is dev->hard_header.
93    It is supposed, that dev->hard_header is simplistic and does
94    not make callbacks to neighbour tables.
95
96    The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
97    list of neighbour tables. This list is used only in process context,
98  */
99
100 static DEFINE_RWLOCK(neigh_tbl_lock);
101
102 static int neigh_blackhole(struct sk_buff *skb)
103 {
104         kfree_skb(skb);
105         return -ENETDOWN;
106 }
107
108 static void neigh_cleanup_and_release(struct neighbour *neigh)
109 {
110         if (neigh->parms->neigh_cleanup)
111                 neigh->parms->neigh_cleanup(neigh);
112
113         __neigh_notify(neigh, RTM_DELNEIGH, 0);
114         neigh_release(neigh);
115 }
116
117 /*
118  * It is random distribution in the interval (1/2)*base...(3/2)*base.
119  * It corresponds to default IPv6 settings and is not overridable,
120  * because it is really reasonable choice.
121  */
122
123 unsigned long neigh_rand_reach_time(unsigned long base)
124 {
125         return (base ? (net_random() % base) + (base >> 1) : 0);
126 }
127
128
129 static int neigh_forced_gc(struct neigh_table *tbl)
130 {
131         int shrunk = 0;
132         int i;
133
134         NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
135
136         write_lock_bh(&tbl->lock);
137         for (i = 0; i <= tbl->hash_mask; i++) {
138                 struct neighbour *n, **np;
139
140                 np = &tbl->hash_buckets[i];
141                 while ((n = *np) != NULL) {
142                         /* Neighbour record may be discarded if:
143                          * - nobody refers to it.
144                          * - it is not permanent
145                          */
146                         write_lock(&n->lock);
147                         if (atomic_read(&n->refcnt) == 1 &&
148                             !(n->nud_state & NUD_PERMANENT)) {
149                                 *np     = n->next;
150                                 n->dead = 1;
151                                 shrunk  = 1;
152                                 write_unlock(&n->lock);
153                                 neigh_cleanup_and_release(n);
154                                 continue;
155                         }
156                         write_unlock(&n->lock);
157                         np = &n->next;
158                 }
159         }
160
161         tbl->last_flush = jiffies;
162
163         write_unlock_bh(&tbl->lock);
164
165         return shrunk;
166 }
167
168 static int neigh_del_timer(struct neighbour *n)
169 {
170         if ((n->nud_state & NUD_IN_TIMER) &&
171             del_timer(&n->timer)) {
172                 neigh_release(n);
173                 return 1;
174         }
175         return 0;
176 }
177
178 static void pneigh_queue_purge(struct sk_buff_head *list)
179 {
180         struct sk_buff *skb;
181
182         while ((skb = skb_dequeue(list)) != NULL) {
183                 dev_put(skb->dev);
184                 kfree_skb(skb);
185         }
186 }
187
188 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
189 {
190         int i;
191
192         for (i = 0; i <= tbl->hash_mask; i++) {
193                 struct neighbour *n, **np = &tbl->hash_buckets[i];
194
195                 while ((n = *np) != NULL) {
196                         if (dev && n->dev != dev) {
197                                 np = &n->next;
198                                 continue;
199                         }
200                         *np = n->next;
201                         write_lock(&n->lock);
202                         neigh_del_timer(n);
203                         n->dead = 1;
204
205                         if (atomic_read(&n->refcnt) != 1) {
206                                 /* The most unpleasant situation.
207                                    We must destroy neighbour entry,
208                                    but someone still uses it.
209
210                                    The destroy will be delayed until
211                                    the last user releases us, but
212                                    we must kill timers etc. and move
213                                    it to safe state.
214                                  */
215                                 skb_queue_purge(&n->arp_queue);
216                                 n->output = neigh_blackhole;
217                                 if (n->nud_state & NUD_VALID)
218                                         n->nud_state = NUD_NOARP;
219                                 else
220                                         n->nud_state = NUD_NONE;
221                                 NEIGH_PRINTK2("neigh %p is stray.\n", n);
222                         }
223                         write_unlock(&n->lock);
224                         neigh_cleanup_and_release(n);
225                 }
226         }
227 }
228
229 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
230 {
231         write_lock_bh(&tbl->lock);
232         neigh_flush_dev(tbl, dev);
233         write_unlock_bh(&tbl->lock);
234 }
235
236 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
237 {
238         write_lock_bh(&tbl->lock);
239         neigh_flush_dev(tbl, dev);
240         pneigh_ifdown(tbl, dev);
241         write_unlock_bh(&tbl->lock);
242
243         del_timer_sync(&tbl->proxy_timer);
244         pneigh_queue_purge(&tbl->proxy_queue);
245         return 0;
246 }
247
248 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
249 {
250         struct neighbour *n = NULL;
251         unsigned long now = jiffies;
252         int entries;
253
254         entries = atomic_inc_return(&tbl->entries) - 1;
255         if (entries >= tbl->gc_thresh3 ||
256             (entries >= tbl->gc_thresh2 &&
257              time_after(now, tbl->last_flush + 5 * HZ))) {
258                 if (!neigh_forced_gc(tbl) &&
259                     entries >= tbl->gc_thresh3)
260                         goto out_entries;
261         }
262
263         n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
264         if (!n)
265                 goto out_entries;
266
267         skb_queue_head_init(&n->arp_queue);
268         rwlock_init(&n->lock);
269         n->updated        = n->used = now;
270         n->nud_state      = NUD_NONE;
271         n->output         = neigh_blackhole;
272         n->parms          = neigh_parms_clone(&tbl->parms);
273         init_timer(&n->timer);
274         n->timer.function = neigh_timer_handler;
275         n->timer.data     = (unsigned long)n;
276
277         NEIGH_CACHE_STAT_INC(tbl, allocs);
278         n->tbl            = tbl;
279         atomic_set(&n->refcnt, 1);
280         n->dead           = 1;
281 out:
282         return n;
283
284 out_entries:
285         atomic_dec(&tbl->entries);
286         goto out;
287 }
288
289 static struct neighbour **neigh_hash_alloc(unsigned int entries)
290 {
291         unsigned long size = entries * sizeof(struct neighbour *);
292         struct neighbour **ret;
293
294         if (size <= PAGE_SIZE) {
295                 ret = kzalloc(size, GFP_ATOMIC);
296         } else {
297                 ret = (struct neighbour **)
298                       __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
299         }
300         return ret;
301 }
302
303 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
304 {
305         unsigned long size = entries * sizeof(struct neighbour *);
306
307         if (size <= PAGE_SIZE)
308                 kfree(hash);
309         else
310                 free_pages((unsigned long)hash, get_order(size));
311 }
312
313 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
314 {
315         struct neighbour **new_hash, **old_hash;
316         unsigned int i, new_hash_mask, old_entries;
317
318         NEIGH_CACHE_STAT_INC(tbl, hash_grows);
319
320         BUG_ON(!is_power_of_2(new_entries));
321         new_hash = neigh_hash_alloc(new_entries);
322         if (!new_hash)
323                 return;
324
325         old_entries = tbl->hash_mask + 1;
326         new_hash_mask = new_entries - 1;
327         old_hash = tbl->hash_buckets;
328
329         get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
330         for (i = 0; i < old_entries; i++) {
331                 struct neighbour *n, *next;
332
333                 for (n = old_hash[i]; n; n = next) {
334                         unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
335
336                         hash_val &= new_hash_mask;
337                         next = n->next;
338
339                         n->next = new_hash[hash_val];
340                         new_hash[hash_val] = n;
341                 }
342         }
343         tbl->hash_buckets = new_hash;
344         tbl->hash_mask = new_hash_mask;
345
346         neigh_hash_free(old_hash, old_entries);
347 }
348
349 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
350                                struct net_device *dev)
351 {
352         struct neighbour *n;
353         int key_len = tbl->key_len;
354         u32 hash_val = tbl->hash(pkey, dev);
355
356         NEIGH_CACHE_STAT_INC(tbl, lookups);
357
358         read_lock_bh(&tbl->lock);
359         for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
360                 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
361                         neigh_hold(n);
362                         NEIGH_CACHE_STAT_INC(tbl, hits);
363                         break;
364                 }
365         }
366         read_unlock_bh(&tbl->lock);
367         return n;
368 }
369
370 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
371 {
372         struct neighbour *n;
373         int key_len = tbl->key_len;
374         u32 hash_val = tbl->hash(pkey, NULL);
375
376         NEIGH_CACHE_STAT_INC(tbl, lookups);
377
378         read_lock_bh(&tbl->lock);
379         for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
380                 if (!memcmp(n->primary_key, pkey, key_len)) {
381                         neigh_hold(n);
382                         NEIGH_CACHE_STAT_INC(tbl, hits);
383                         break;
384                 }
385         }
386         read_unlock_bh(&tbl->lock);
387         return n;
388 }
389
390 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
391                                struct net_device *dev)
392 {
393         u32 hash_val;
394         int key_len = tbl->key_len;
395         int error;
396         struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
397
398         if (!n) {
399                 rc = ERR_PTR(-ENOBUFS);
400                 goto out;
401         }
402
403         memcpy(n->primary_key, pkey, key_len);
404         n->dev = dev;
405         dev_hold(dev);
406
407         /* Protocol specific setup. */
408         if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
409                 rc = ERR_PTR(error);
410                 goto out_neigh_release;
411         }
412
413         /* Device specific setup. */
414         if (n->parms->neigh_setup &&
415             (error = n->parms->neigh_setup(n)) < 0) {
416                 rc = ERR_PTR(error);
417                 goto out_neigh_release;
418         }
419
420         n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
421
422         write_lock_bh(&tbl->lock);
423
424         if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
425                 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
426
427         hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
428
429         if (n->parms->dead) {
430                 rc = ERR_PTR(-EINVAL);
431                 goto out_tbl_unlock;
432         }
433
434         for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
435                 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
436                         neigh_hold(n1);
437                         rc = n1;
438                         goto out_tbl_unlock;
439                 }
440         }
441
442         n->next = tbl->hash_buckets[hash_val];
443         tbl->hash_buckets[hash_val] = n;
444         n->dead = 0;
445         neigh_hold(n);
446         write_unlock_bh(&tbl->lock);
447         NEIGH_PRINTK2("neigh %p is created.\n", n);
448         rc = n;
449 out:
450         return rc;
451 out_tbl_unlock:
452         write_unlock_bh(&tbl->lock);
453 out_neigh_release:
454         neigh_release(n);
455         goto out;
456 }
457
458 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
459                                     struct net_device *dev, int creat)
460 {
461         struct pneigh_entry *n;
462         int key_len = tbl->key_len;
463         u32 hash_val = *(u32 *)(pkey + key_len - 4);
464
465         hash_val ^= (hash_val >> 16);
466         hash_val ^= hash_val >> 8;
467         hash_val ^= hash_val >> 4;
468         hash_val &= PNEIGH_HASHMASK;
469
470         read_lock_bh(&tbl->lock);
471
472         for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
473                 if (!memcmp(n->key, pkey, key_len) &&
474                     (n->dev == dev || !n->dev)) {
475                         read_unlock_bh(&tbl->lock);
476                         goto out;
477                 }
478         }
479         read_unlock_bh(&tbl->lock);
480         n = NULL;
481         if (!creat)
482                 goto out;
483
484         ASSERT_RTNL();
485
486         n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
487         if (!n)
488                 goto out;
489
490         memcpy(n->key, pkey, key_len);
491         n->dev = dev;
492         if (dev)
493                 dev_hold(dev);
494
495         if (tbl->pconstructor && tbl->pconstructor(n)) {
496                 if (dev)
497                         dev_put(dev);
498                 kfree(n);
499                 n = NULL;
500                 goto out;
501         }
502
503         write_lock_bh(&tbl->lock);
504         n->next = tbl->phash_buckets[hash_val];
505         tbl->phash_buckets[hash_val] = n;
506         write_unlock_bh(&tbl->lock);
507 out:
508         return n;
509 }
510
511
512 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
513                   struct net_device *dev)
514 {
515         struct pneigh_entry *n, **np;
516         int key_len = tbl->key_len;
517         u32 hash_val = *(u32 *)(pkey + key_len - 4);
518
519         hash_val ^= (hash_val >> 16);
520         hash_val ^= hash_val >> 8;
521         hash_val ^= hash_val >> 4;
522         hash_val &= PNEIGH_HASHMASK;
523
524         write_lock_bh(&tbl->lock);
525         for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
526              np = &n->next) {
527                 if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
528                         *np = n->next;
529                         write_unlock_bh(&tbl->lock);
530                         if (tbl->pdestructor)
531                                 tbl->pdestructor(n);
532                         if (n->dev)
533                                 dev_put(n->dev);
534                         kfree(n);
535                         return 0;
536                 }
537         }
538         write_unlock_bh(&tbl->lock);
539         return -ENOENT;
540 }
541
542 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
543 {
544         struct pneigh_entry *n, **np;
545         u32 h;
546
547         for (h = 0; h <= PNEIGH_HASHMASK; h++) {
548                 np = &tbl->phash_buckets[h];
549                 while ((n = *np) != NULL) {
550                         if (!dev || n->dev == dev) {
551                                 *np = n->next;
552                                 if (tbl->pdestructor)
553                                         tbl->pdestructor(n);
554                                 if (n->dev)
555                                         dev_put(n->dev);
556                                 kfree(n);
557                                 continue;
558                         }
559                         np = &n->next;
560                 }
561         }
562         return -ENOENT;
563 }
564
565
566 /*
567  *      neighbour must already be out of the table;
568  *
569  */
570 void neigh_destroy(struct neighbour *neigh)
571 {
572         struct hh_cache *hh;
573
574         NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
575
576         if (!neigh->dead) {
577                 printk(KERN_WARNING
578                        "Destroying alive neighbour %p\n", neigh);
579                 dump_stack();
580                 return;
581         }
582
583         if (neigh_del_timer(neigh))
584                 printk(KERN_WARNING "Impossible event.\n");
585
586         while ((hh = neigh->hh) != NULL) {
587                 neigh->hh = hh->hh_next;
588                 hh->hh_next = NULL;
589
590                 write_seqlock_bh(&hh->hh_lock);
591                 hh->hh_output = neigh_blackhole;
592                 write_sequnlock_bh(&hh->hh_lock);
593                 if (atomic_dec_and_test(&hh->hh_refcnt))
594                         kfree(hh);
595         }
596
597         skb_queue_purge(&neigh->arp_queue);
598
599         dev_put(neigh->dev);
600         neigh_parms_put(neigh->parms);
601
602         NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
603
604         atomic_dec(&neigh->tbl->entries);
605         kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
606 }
607
608 /* Neighbour state is suspicious;
609    disable fast path.
610
611    Called with write_locked neigh.
612  */
613 static void neigh_suspect(struct neighbour *neigh)
614 {
615         struct hh_cache *hh;
616
617         NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
618
619         neigh->output = neigh->ops->output;
620
621         for (hh = neigh->hh; hh; hh = hh->hh_next)
622                 hh->hh_output = neigh->ops->output;
623 }
624
625 /* Neighbour state is OK;
626    enable fast path.
627
628    Called with write_locked neigh.
629  */
630 static void neigh_connect(struct neighbour *neigh)
631 {
632         struct hh_cache *hh;
633
634         NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
635
636         neigh->output = neigh->ops->connected_output;
637
638         for (hh = neigh->hh; hh; hh = hh->hh_next)
639                 hh->hh_output = neigh->ops->hh_output;
640 }
641
642 static void neigh_periodic_timer(unsigned long arg)
643 {
644         struct neigh_table *tbl = (struct neigh_table *)arg;
645         struct neighbour *n, **np;
646         unsigned long expire, now = jiffies;
647
648         NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
649
650         write_lock(&tbl->lock);
651
652         /*
653          *      periodically recompute ReachableTime from random function
654          */
655
656         if (time_after(now, tbl->last_rand + 300 * HZ)) {
657                 struct neigh_parms *p;
658                 tbl->last_rand = now;
659                 for (p = &tbl->parms; p; p = p->next)
660                         p->reachable_time =
661                                 neigh_rand_reach_time(p->base_reachable_time);
662         }
663
664         np = &tbl->hash_buckets[tbl->hash_chain_gc];
665         tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
666
667         while ((n = *np) != NULL) {
668                 unsigned int state;
669
670                 write_lock(&n->lock);
671
672                 state = n->nud_state;
673                 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
674                         write_unlock(&n->lock);
675                         goto next_elt;
676                 }
677
678                 if (time_before(n->used, n->confirmed))
679                         n->used = n->confirmed;
680
681                 if (atomic_read(&n->refcnt) == 1 &&
682                     (state == NUD_FAILED ||
683                      time_after(now, n->used + n->parms->gc_staletime))) {
684                         *np = n->next;
685                         n->dead = 1;
686                         write_unlock(&n->lock);
687                         neigh_cleanup_and_release(n);
688                         continue;
689                 }
690                 write_unlock(&n->lock);
691
692 next_elt:
693                 np = &n->next;
694         }
695
696         /* Cycle through all hash buckets every base_reachable_time/2 ticks.
697          * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
698          * base_reachable_time.
699          */
700         expire = tbl->parms.base_reachable_time >> 1;
701         expire /= (tbl->hash_mask + 1);
702         if (!expire)
703                 expire = 1;
704
705         if (expire>HZ)
706                 mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
707         else
708                 mod_timer(&tbl->gc_timer, now + expire);
709
710         write_unlock(&tbl->lock);
711 }
712
713 static __inline__ int neigh_max_probes(struct neighbour *n)
714 {
715         struct neigh_parms *p = n->parms;
716         return (n->nud_state & NUD_PROBE ?
717                 p->ucast_probes :
718                 p->ucast_probes + p->app_probes + p->mcast_probes);
719 }
720
721 static inline void neigh_add_timer(struct neighbour *n, unsigned long when)
722 {
723         if (unlikely(mod_timer(&n->timer, when))) {
724                 printk("NEIGH: BUG, double timer add, state is %x\n",
725                        n->nud_state);
726                 dump_stack();
727         }
728 }
729
730 /* Called when a timer expires for a neighbour entry. */
731
732 static void neigh_timer_handler(unsigned long arg)
733 {
734         unsigned long now, next;
735         struct neighbour *neigh = (struct neighbour *)arg;
736         unsigned state;
737         int notify = 0;
738
739         write_lock(&neigh->lock);
740
741         state = neigh->nud_state;
742         now = jiffies;
743         next = now + HZ;
744
745         if (!(state & NUD_IN_TIMER)) {
746 #ifndef CONFIG_SMP
747                 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
748 #endif
749                 goto out;
750         }
751
752         if (state & NUD_REACHABLE) {
753                 if (time_before_eq(now,
754                                    neigh->confirmed + neigh->parms->reachable_time)) {
755                         NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
756                         next = neigh->confirmed + neigh->parms->reachable_time;
757                 } else if (time_before_eq(now,
758                                           neigh->used + neigh->parms->delay_probe_time)) {
759                         NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
760                         neigh->nud_state = NUD_DELAY;
761                         neigh->updated = jiffies;
762                         neigh_suspect(neigh);
763                         next = now + neigh->parms->delay_probe_time;
764                 } else {
765                         NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
766                         neigh->nud_state = NUD_STALE;
767                         neigh->updated = jiffies;
768                         neigh_suspect(neigh);
769                         notify = 1;
770                 }
771         } else if (state & NUD_DELAY) {
772                 if (time_before_eq(now,
773                                    neigh->confirmed + neigh->parms->delay_probe_time)) {
774                         NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
775                         neigh->nud_state = NUD_REACHABLE;
776                         neigh->updated = jiffies;
777                         neigh_connect(neigh);
778                         notify = 1;
779                         next = neigh->confirmed + neigh->parms->reachable_time;
780                 } else {
781                         NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
782                         neigh->nud_state = NUD_PROBE;
783                         neigh->updated = jiffies;
784                         atomic_set(&neigh->probes, 0);
785                         next = now + neigh->parms->retrans_time;
786                 }
787         } else {
788                 /* NUD_PROBE|NUD_INCOMPLETE */
789                 next = now + neigh->parms->retrans_time;
790         }
791
792         if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
793             atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
794                 struct sk_buff *skb;
795
796                 neigh->nud_state = NUD_FAILED;
797                 neigh->updated = jiffies;
798                 notify = 1;
799                 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
800                 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
801
802                 /* It is very thin place. report_unreachable is very complicated
803                    routine. Particularly, it can hit the same neighbour entry!
804
805                    So that, we try to be accurate and avoid dead loop. --ANK
806                  */
807                 while (neigh->nud_state == NUD_FAILED &&
808                        (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
809                         write_unlock(&neigh->lock);
810                         neigh->ops->error_report(neigh, skb);
811                         write_lock(&neigh->lock);
812                 }
813                 skb_queue_purge(&neigh->arp_queue);
814         }
815
816         if (neigh->nud_state & NUD_IN_TIMER) {
817                 if (time_before(next, jiffies + HZ/2))
818                         next = jiffies + HZ/2;
819                 if (!mod_timer(&neigh->timer, next))
820                         neigh_hold(neigh);
821         }
822         if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
823                 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
824                 /* keep skb alive even if arp_queue overflows */
825                 if (skb)
826                         skb_get(skb);
827                 write_unlock(&neigh->lock);
828                 neigh->ops->solicit(neigh, skb);
829                 atomic_inc(&neigh->probes);
830                 if (skb)
831                         kfree_skb(skb);
832         } else {
833 out:
834                 write_unlock(&neigh->lock);
835         }
836
837         if (notify)
838                 neigh_update_notify(neigh);
839
840         neigh_release(neigh);
841 }
842
843 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
844 {
845         int rc;
846         unsigned long now;
847
848         write_lock_bh(&neigh->lock);
849
850         rc = 0;
851         if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
852                 goto out_unlock_bh;
853
854         now = jiffies;
855
856         if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
857                 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
858                         atomic_set(&neigh->probes, neigh->parms->ucast_probes);
859                         neigh->nud_state     = NUD_INCOMPLETE;
860                         neigh->updated = jiffies;
861                         neigh_hold(neigh);
862                         neigh_add_timer(neigh, now + 1);
863                 } else {
864                         neigh->nud_state = NUD_FAILED;
865                         neigh->updated = jiffies;
866                         write_unlock_bh(&neigh->lock);
867
868                         if (skb)
869                                 kfree_skb(skb);
870                         return 1;
871                 }
872         } else if (neigh->nud_state & NUD_STALE) {
873                 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
874                 neigh_hold(neigh);
875                 neigh->nud_state = NUD_DELAY;
876                 neigh->updated = jiffies;
877                 neigh_add_timer(neigh,
878                                 jiffies + neigh->parms->delay_probe_time);
879         }
880
881         if (neigh->nud_state == NUD_INCOMPLETE) {
882                 if (skb) {
883                         if (skb_queue_len(&neigh->arp_queue) >=
884                             neigh->parms->queue_len) {
885                                 struct sk_buff *buff;
886                                 buff = neigh->arp_queue.next;
887                                 __skb_unlink(buff, &neigh->arp_queue);
888                                 kfree_skb(buff);
889                         }
890                         __skb_queue_tail(&neigh->arp_queue, skb);
891                 }
892                 rc = 1;
893         }
894 out_unlock_bh:
895         write_unlock_bh(&neigh->lock);
896         return rc;
897 }
898
899 static void neigh_update_hhs(struct neighbour *neigh)
900 {
901         struct hh_cache *hh;
902         void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
903                 = neigh->dev->header_ops->cache_update;
904
905         if (update) {
906                 for (hh = neigh->hh; hh; hh = hh->hh_next) {
907                         write_seqlock_bh(&hh->hh_lock);
908                         update(hh, neigh->dev, neigh->ha);
909                         write_sequnlock_bh(&hh->hh_lock);
910                 }
911         }
912 }
913
914
915
916 /* Generic update routine.
917    -- lladdr is new lladdr or NULL, if it is not supplied.
918    -- new    is new state.
919    -- flags
920         NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
921                                 if it is different.
922         NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
923                                 lladdr instead of overriding it
924                                 if it is different.
925                                 It also allows to retain current state
926                                 if lladdr is unchanged.
927         NEIGH_UPDATE_F_ADMIN    means that the change is administrative.
928
929         NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
930                                 NTF_ROUTER flag.
931         NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
932                                 a router.
933
934    Caller MUST hold reference count on the entry.
935  */
936
937 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
938                  u32 flags)
939 {
940         u8 old;
941         int err;
942         int notify = 0;
943         struct net_device *dev;
944         int update_isrouter = 0;
945
946         write_lock_bh(&neigh->lock);
947
948         dev    = neigh->dev;
949         old    = neigh->nud_state;
950         err    = -EPERM;
951
952         if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
953             (old & (NUD_NOARP | NUD_PERMANENT)))
954                 goto out;
955
956         if (!(new & NUD_VALID)) {
957                 neigh_del_timer(neigh);
958                 if (old & NUD_CONNECTED)
959                         neigh_suspect(neigh);
960                 neigh->nud_state = new;
961                 err = 0;
962                 notify = old & NUD_VALID;
963                 goto out;
964         }
965
966         /* Compare new lladdr with cached one */
967         if (!dev->addr_len) {
968                 /* First case: device needs no address. */
969                 lladdr = neigh->ha;
970         } else if (lladdr) {
971                 /* The second case: if something is already cached
972                    and a new address is proposed:
973                    - compare new & old
974                    - if they are different, check override flag
975                  */
976                 if ((old & NUD_VALID) &&
977                     !memcmp(lladdr, neigh->ha, dev->addr_len))
978                         lladdr = neigh->ha;
979         } else {
980                 /* No address is supplied; if we know something,
981                    use it, otherwise discard the request.
982                  */
983                 err = -EINVAL;
984                 if (!(old & NUD_VALID))
985                         goto out;
986                 lladdr = neigh->ha;
987         }
988
989         if (new & NUD_CONNECTED)
990                 neigh->confirmed = jiffies;
991         neigh->updated = jiffies;
992
993         /* If entry was valid and address is not changed,
994            do not change entry state, if new one is STALE.
995          */
996         err = 0;
997         update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
998         if (old & NUD_VALID) {
999                 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1000                         update_isrouter = 0;
1001                         if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1002                             (old & NUD_CONNECTED)) {
1003                                 lladdr = neigh->ha;
1004                                 new = NUD_STALE;
1005                         } else
1006                                 goto out;
1007                 } else {
1008                         if (lladdr == neigh->ha && new == NUD_STALE &&
1009                             ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1010                              (old & NUD_CONNECTED))
1011                             )
1012                                 new = old;
1013                 }
1014         }
1015
1016         if (new != old) {
1017                 neigh_del_timer(neigh);
1018                 if (new & NUD_IN_TIMER) {
1019                         neigh_hold(neigh);
1020                         neigh_add_timer(neigh, (jiffies +
1021                                                 ((new & NUD_REACHABLE) ?
1022                                                  neigh->parms->reachable_time :
1023                                                  0)));
1024                 }
1025                 neigh->nud_state = new;
1026         }
1027
1028         if (lladdr != neigh->ha) {
1029                 memcpy(&neigh->ha, lladdr, dev->addr_len);
1030                 neigh_update_hhs(neigh);
1031                 if (!(new & NUD_CONNECTED))
1032                         neigh->confirmed = jiffies -
1033                                       (neigh->parms->base_reachable_time << 1);
1034                 notify = 1;
1035         }
1036         if (new == old)
1037                 goto out;
1038         if (new & NUD_CONNECTED)
1039                 neigh_connect(neigh);
1040         else
1041                 neigh_suspect(neigh);
1042         if (!(old & NUD_VALID)) {
1043                 struct sk_buff *skb;
1044
1045                 /* Again: avoid dead loop if something went wrong */
1046
1047                 while (neigh->nud_state & NUD_VALID &&
1048                        (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1049                         struct neighbour *n1 = neigh;
1050                         write_unlock_bh(&neigh->lock);
1051                         /* On shaper/eql skb->dst->neighbour != neigh :( */
1052                         if (skb->dst && skb->dst->neighbour)
1053                                 n1 = skb->dst->neighbour;
1054                         n1->output(skb);
1055                         write_lock_bh(&neigh->lock);
1056                 }
1057                 skb_queue_purge(&neigh->arp_queue);
1058         }
1059 out:
1060         if (update_isrouter) {
1061                 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1062                         (neigh->flags | NTF_ROUTER) :
1063                         (neigh->flags & ~NTF_ROUTER);
1064         }
1065         write_unlock_bh(&neigh->lock);
1066
1067         if (notify)
1068                 neigh_update_notify(neigh);
1069
1070         return err;
1071 }
1072
1073 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1074                                  u8 *lladdr, void *saddr,
1075                                  struct net_device *dev)
1076 {
1077         struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1078                                                  lladdr || !dev->addr_len);
1079         if (neigh)
1080                 neigh_update(neigh, lladdr, NUD_STALE,
1081                              NEIGH_UPDATE_F_OVERRIDE);
1082         return neigh;
1083 }
1084
1085 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1086                           __be16 protocol)
1087 {
1088         struct hh_cache *hh;
1089         struct net_device *dev = dst->dev;
1090
1091         for (hh = n->hh; hh; hh = hh->hh_next)
1092                 if (hh->hh_type == protocol)
1093                         break;
1094
1095         if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1096                 seqlock_init(&hh->hh_lock);
1097                 hh->hh_type = protocol;
1098                 atomic_set(&hh->hh_refcnt, 0);
1099                 hh->hh_next = NULL;
1100
1101                 if (dev->header_ops->cache(n, hh)) {
1102                         kfree(hh);
1103                         hh = NULL;
1104                 } else {
1105                         atomic_inc(&hh->hh_refcnt);
1106                         hh->hh_next = n->hh;
1107                         n->hh       = hh;
1108                         if (n->nud_state & NUD_CONNECTED)
1109                                 hh->hh_output = n->ops->hh_output;
1110                         else
1111                                 hh->hh_output = n->ops->output;
1112                 }
1113         }
1114         if (hh) {
1115                 atomic_inc(&hh->hh_refcnt);
1116                 dst->hh = hh;
1117         }
1118 }
1119
1120 /* This function can be used in contexts, where only old dev_queue_xmit
1121    worked, f.e. if you want to override normal output path (eql, shaper),
1122    but resolution is not made yet.
1123  */
1124
1125 int neigh_compat_output(struct sk_buff *skb)
1126 {
1127         struct net_device *dev = skb->dev;
1128
1129         __skb_pull(skb, skb_network_offset(skb));
1130
1131         if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1132                             skb->len) < 0 &&
1133             dev->header_ops->rebuild(skb))
1134                 return 0;
1135
1136         return dev_queue_xmit(skb);
1137 }
1138
1139 /* Slow and careful. */
1140
1141 int neigh_resolve_output(struct sk_buff *skb)
1142 {
1143         struct dst_entry *dst = skb->dst;
1144         struct neighbour *neigh;
1145         int rc = 0;
1146
1147         if (!dst || !(neigh = dst->neighbour))
1148                 goto discard;
1149
1150         __skb_pull(skb, skb_network_offset(skb));
1151
1152         if (!neigh_event_send(neigh, skb)) {
1153                 int err;
1154                 struct net_device *dev = neigh->dev;
1155                 if (dev->header_ops->cache && !dst->hh) {
1156                         write_lock_bh(&neigh->lock);
1157                         if (!dst->hh)
1158                                 neigh_hh_init(neigh, dst, dst->ops->protocol);
1159                         err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1160                                               neigh->ha, NULL, skb->len);
1161                         write_unlock_bh(&neigh->lock);
1162                 } else {
1163                         read_lock_bh(&neigh->lock);
1164                         err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1165                                               neigh->ha, NULL, skb->len);
1166                         read_unlock_bh(&neigh->lock);
1167                 }
1168                 if (err >= 0)
1169                         rc = neigh->ops->queue_xmit(skb);
1170                 else
1171                         goto out_kfree_skb;
1172         }
1173 out:
1174         return rc;
1175 discard:
1176         NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1177                       dst, dst ? dst->neighbour : NULL);
1178 out_kfree_skb:
1179         rc = -EINVAL;
1180         kfree_skb(skb);
1181         goto out;
1182 }
1183
1184 /* As fast as possible without hh cache */
1185
1186 int neigh_connected_output(struct sk_buff *skb)
1187 {
1188         int err;
1189         struct dst_entry *dst = skb->dst;
1190         struct neighbour *neigh = dst->neighbour;
1191         struct net_device *dev = neigh->dev;
1192
1193         __skb_pull(skb, skb_network_offset(skb));
1194
1195         read_lock_bh(&neigh->lock);
1196         err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1197                               neigh->ha, NULL, skb->len);
1198         read_unlock_bh(&neigh->lock);
1199         if (err >= 0)
1200                 err = neigh->ops->queue_xmit(skb);
1201         else {
1202                 err = -EINVAL;
1203                 kfree_skb(skb);
1204         }
1205         return err;
1206 }
1207
1208 static void neigh_proxy_process(unsigned long arg)
1209 {
1210         struct neigh_table *tbl = (struct neigh_table *)arg;
1211         long sched_next = 0;
1212         unsigned long now = jiffies;
1213         struct sk_buff *skb;
1214
1215         spin_lock(&tbl->proxy_queue.lock);
1216
1217         skb = tbl->proxy_queue.next;
1218
1219         while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1220                 struct sk_buff *back = skb;
1221                 long tdif = NEIGH_CB(back)->sched_next - now;
1222
1223                 skb = skb->next;
1224                 if (tdif <= 0) {
1225                         struct net_device *dev = back->dev;
1226                         __skb_unlink(back, &tbl->proxy_queue);
1227                         if (tbl->proxy_redo && netif_running(dev))
1228                                 tbl->proxy_redo(back);
1229                         else
1230                                 kfree_skb(back);
1231
1232                         dev_put(dev);
1233                 } else if (!sched_next || tdif < sched_next)
1234                         sched_next = tdif;
1235         }
1236         del_timer(&tbl->proxy_timer);
1237         if (sched_next)
1238                 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1239         spin_unlock(&tbl->proxy_queue.lock);
1240 }
1241
1242 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1243                     struct sk_buff *skb)
1244 {
1245         unsigned long now = jiffies;
1246         unsigned long sched_next = now + (net_random() % p->proxy_delay);
1247
1248         if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1249                 kfree_skb(skb);
1250                 return;
1251         }
1252
1253         NEIGH_CB(skb)->sched_next = sched_next;
1254         NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1255
1256         spin_lock(&tbl->proxy_queue.lock);
1257         if (del_timer(&tbl->proxy_timer)) {
1258                 if (time_before(tbl->proxy_timer.expires, sched_next))
1259                         sched_next = tbl->proxy_timer.expires;
1260         }
1261         dst_release(skb->dst);
1262         skb->dst = NULL;
1263         dev_hold(skb->dev);
1264         __skb_queue_tail(&tbl->proxy_queue, skb);
1265         mod_timer(&tbl->proxy_timer, sched_next);
1266         spin_unlock(&tbl->proxy_queue.lock);
1267 }
1268
1269
1270 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1271                                       struct neigh_table *tbl)
1272 {
1273         struct neigh_parms *p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1274
1275         if (p) {
1276                 p->tbl            = tbl;
1277                 atomic_set(&p->refcnt, 1);
1278                 INIT_RCU_HEAD(&p->rcu_head);
1279                 p->reachable_time =
1280                                 neigh_rand_reach_time(p->base_reachable_time);
1281                 if (dev) {
1282                         if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1283                                 kfree(p);
1284                                 return NULL;
1285                         }
1286
1287                         dev_hold(dev);
1288                         p->dev = dev;
1289                 }
1290                 p->sysctl_table = NULL;
1291                 write_lock_bh(&tbl->lock);
1292                 p->next         = tbl->parms.next;
1293                 tbl->parms.next = p;
1294                 write_unlock_bh(&tbl->lock);
1295         }
1296         return p;
1297 }
1298
1299 static void neigh_rcu_free_parms(struct rcu_head *head)
1300 {
1301         struct neigh_parms *parms =
1302                 container_of(head, struct neigh_parms, rcu_head);
1303
1304         neigh_parms_put(parms);
1305 }
1306
1307 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1308 {
1309         struct neigh_parms **p;
1310
1311         if (!parms || parms == &tbl->parms)
1312                 return;
1313         write_lock_bh(&tbl->lock);
1314         for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1315                 if (*p == parms) {
1316                         *p = parms->next;
1317                         parms->dead = 1;
1318                         write_unlock_bh(&tbl->lock);
1319                         if (parms->dev)
1320                                 dev_put(parms->dev);
1321                         call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1322                         return;
1323                 }
1324         }
1325         write_unlock_bh(&tbl->lock);
1326         NEIGH_PRINTK1("neigh_parms_release: not found\n");
1327 }
1328
1329 void neigh_parms_destroy(struct neigh_parms *parms)
1330 {
1331         kfree(parms);
1332 }
1333
1334 static struct lock_class_key neigh_table_proxy_queue_class;
1335
1336 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1337 {
1338         unsigned long now = jiffies;
1339         unsigned long phsize;
1340
1341         atomic_set(&tbl->parms.refcnt, 1);
1342         INIT_RCU_HEAD(&tbl->parms.rcu_head);
1343         tbl->parms.reachable_time =
1344                           neigh_rand_reach_time(tbl->parms.base_reachable_time);
1345
1346         if (!tbl->kmem_cachep)
1347                 tbl->kmem_cachep =
1348                         kmem_cache_create(tbl->id, tbl->entry_size, 0,
1349                                           SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1350                                           NULL);
1351         tbl->stats = alloc_percpu(struct neigh_statistics);
1352         if (!tbl->stats)
1353                 panic("cannot create neighbour cache statistics");
1354
1355 #ifdef CONFIG_PROC_FS
1356         tbl->pde = create_proc_entry(tbl->id, 0, init_net.proc_net_stat);
1357         if (!tbl->pde)
1358                 panic("cannot create neighbour proc dir entry");
1359         tbl->pde->proc_fops = &neigh_stat_seq_fops;
1360         tbl->pde->data = tbl;
1361 #endif
1362
1363         tbl->hash_mask = 1;
1364         tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1365
1366         phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1367         tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1368
1369         if (!tbl->hash_buckets || !tbl->phash_buckets)
1370                 panic("cannot allocate neighbour cache hashes");
1371
1372         get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1373
1374         rwlock_init(&tbl->lock);
1375         init_timer(&tbl->gc_timer);
1376         tbl->gc_timer.data     = (unsigned long)tbl;
1377         tbl->gc_timer.function = neigh_periodic_timer;
1378         tbl->gc_timer.expires  = now + 1;
1379         add_timer(&tbl->gc_timer);
1380
1381         init_timer(&tbl->proxy_timer);
1382         tbl->proxy_timer.data     = (unsigned long)tbl;
1383         tbl->proxy_timer.function = neigh_proxy_process;
1384         skb_queue_head_init_class(&tbl->proxy_queue,
1385                         &neigh_table_proxy_queue_class);
1386
1387         tbl->last_flush = now;
1388         tbl->last_rand  = now + tbl->parms.reachable_time * 20;
1389 }
1390
1391 void neigh_table_init(struct neigh_table *tbl)
1392 {
1393         struct neigh_table *tmp;
1394
1395         neigh_table_init_no_netlink(tbl);
1396         write_lock(&neigh_tbl_lock);
1397         for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1398                 if (tmp->family == tbl->family)
1399                         break;
1400         }
1401         tbl->next       = neigh_tables;
1402         neigh_tables    = tbl;
1403         write_unlock(&neigh_tbl_lock);
1404
1405         if (unlikely(tmp)) {
1406                 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1407                        "family %d\n", tbl->family);
1408                 dump_stack();
1409         }
1410 }
1411
1412 int neigh_table_clear(struct neigh_table *tbl)
1413 {
1414         struct neigh_table **tp;
1415
1416         /* It is not clean... Fix it to unload IPv6 module safely */
1417         del_timer_sync(&tbl->gc_timer);
1418         del_timer_sync(&tbl->proxy_timer);
1419         pneigh_queue_purge(&tbl->proxy_queue);
1420         neigh_ifdown(tbl, NULL);
1421         if (atomic_read(&tbl->entries))
1422                 printk(KERN_CRIT "neighbour leakage\n");
1423         write_lock(&neigh_tbl_lock);
1424         for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1425                 if (*tp == tbl) {
1426                         *tp = tbl->next;
1427                         break;
1428                 }
1429         }
1430         write_unlock(&neigh_tbl_lock);
1431
1432         neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1433         tbl->hash_buckets = NULL;
1434
1435         kfree(tbl->phash_buckets);
1436         tbl->phash_buckets = NULL;
1437
1438         free_percpu(tbl->stats);
1439         tbl->stats = NULL;
1440
1441         kmem_cache_destroy(tbl->kmem_cachep);
1442         tbl->kmem_cachep = NULL;
1443
1444         return 0;
1445 }
1446
1447 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1448 {
1449         struct net *net = skb->sk->sk_net;
1450         struct ndmsg *ndm;
1451         struct nlattr *dst_attr;
1452         struct neigh_table *tbl;
1453         struct net_device *dev = NULL;
1454         int err = -EINVAL;
1455
1456         if (nlmsg_len(nlh) < sizeof(*ndm))
1457                 goto out;
1458
1459         dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1460         if (dst_attr == NULL)
1461                 goto out;
1462
1463         ndm = nlmsg_data(nlh);
1464         if (ndm->ndm_ifindex) {
1465                 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1466                 if (dev == NULL) {
1467                         err = -ENODEV;
1468                         goto out;
1469                 }
1470         }
1471
1472         read_lock(&neigh_tbl_lock);
1473         for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1474                 struct neighbour *neigh;
1475
1476                 if (tbl->family != ndm->ndm_family)
1477                         continue;
1478                 read_unlock(&neigh_tbl_lock);
1479
1480                 if (nla_len(dst_attr) < tbl->key_len)
1481                         goto out_dev_put;
1482
1483                 if (ndm->ndm_flags & NTF_PROXY) {
1484                         err = pneigh_delete(tbl, nla_data(dst_attr), dev);
1485                         goto out_dev_put;
1486                 }
1487
1488                 if (dev == NULL)
1489                         goto out_dev_put;
1490
1491                 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1492                 if (neigh == NULL) {
1493                         err = -ENOENT;
1494                         goto out_dev_put;
1495                 }
1496
1497                 err = neigh_update(neigh, NULL, NUD_FAILED,
1498                                    NEIGH_UPDATE_F_OVERRIDE |
1499                                    NEIGH_UPDATE_F_ADMIN);
1500                 neigh_release(neigh);
1501                 goto out_dev_put;
1502         }
1503         read_unlock(&neigh_tbl_lock);
1504         err = -EAFNOSUPPORT;
1505
1506 out_dev_put:
1507         if (dev)
1508                 dev_put(dev);
1509 out:
1510         return err;
1511 }
1512
1513 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1514 {
1515         struct net *net = skb->sk->sk_net;
1516         struct ndmsg *ndm;
1517         struct nlattr *tb[NDA_MAX+1];
1518         struct neigh_table *tbl;
1519         struct net_device *dev = NULL;
1520         int err;
1521
1522         err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1523         if (err < 0)
1524                 goto out;
1525
1526         err = -EINVAL;
1527         if (tb[NDA_DST] == NULL)
1528                 goto out;
1529
1530         ndm = nlmsg_data(nlh);
1531         if (ndm->ndm_ifindex) {
1532                 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1533                 if (dev == NULL) {
1534                         err = -ENODEV;
1535                         goto out;
1536                 }
1537
1538                 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1539                         goto out_dev_put;
1540         }
1541
1542         read_lock(&neigh_tbl_lock);
1543         for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1544                 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1545                 struct neighbour *neigh;
1546                 void *dst, *lladdr;
1547
1548                 if (tbl->family != ndm->ndm_family)
1549                         continue;
1550                 read_unlock(&neigh_tbl_lock);
1551
1552                 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1553                         goto out_dev_put;
1554                 dst = nla_data(tb[NDA_DST]);
1555                 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1556
1557                 if (ndm->ndm_flags & NTF_PROXY) {
1558                         struct pneigh_entry *pn;
1559
1560                         err = -ENOBUFS;
1561                         pn = pneigh_lookup(tbl, dst, dev, 1);
1562                         if (pn) {
1563                                 pn->flags = ndm->ndm_flags;
1564                                 err = 0;
1565                         }
1566                         goto out_dev_put;
1567                 }
1568
1569                 if (dev == NULL)
1570                         goto out_dev_put;
1571
1572                 neigh = neigh_lookup(tbl, dst, dev);
1573                 if (neigh == NULL) {
1574                         if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1575                                 err = -ENOENT;
1576                                 goto out_dev_put;
1577                         }
1578
1579                         neigh = __neigh_lookup_errno(tbl, dst, dev);
1580                         if (IS_ERR(neigh)) {
1581                                 err = PTR_ERR(neigh);
1582                                 goto out_dev_put;
1583                         }
1584                 } else {
1585                         if (nlh->nlmsg_flags & NLM_F_EXCL) {
1586                                 err = -EEXIST;
1587                                 neigh_release(neigh);
1588                                 goto out_dev_put;
1589                         }
1590
1591                         if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1592                                 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1593                 }
1594
1595                 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1596                 neigh_release(neigh);
1597                 goto out_dev_put;
1598         }
1599
1600         read_unlock(&neigh_tbl_lock);
1601         err = -EAFNOSUPPORT;
1602
1603 out_dev_put:
1604         if (dev)
1605                 dev_put(dev);
1606 out:
1607         return err;
1608 }
1609
1610 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1611 {
1612         struct nlattr *nest;
1613
1614         nest = nla_nest_start(skb, NDTA_PARMS);
1615         if (nest == NULL)
1616                 return -ENOBUFS;
1617
1618         if (parms->dev)
1619                 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1620
1621         NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1622         NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1623         NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1624         NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1625         NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1626         NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1627         NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1628         NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1629                       parms->base_reachable_time);
1630         NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1631         NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1632         NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1633         NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1634         NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1635         NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1636
1637         return nla_nest_end(skb, nest);
1638
1639 nla_put_failure:
1640         return nla_nest_cancel(skb, nest);
1641 }
1642
1643 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1644                               u32 pid, u32 seq, int type, int flags)
1645 {
1646         struct nlmsghdr *nlh;
1647         struct ndtmsg *ndtmsg;
1648
1649         nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1650         if (nlh == NULL)
1651                 return -EMSGSIZE;
1652
1653         ndtmsg = nlmsg_data(nlh);
1654
1655         read_lock_bh(&tbl->lock);
1656         ndtmsg->ndtm_family = tbl->family;
1657         ndtmsg->ndtm_pad1   = 0;
1658         ndtmsg->ndtm_pad2   = 0;
1659
1660         NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1661         NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1662         NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1663         NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1664         NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1665
1666         {
1667                 unsigned long now = jiffies;
1668                 unsigned int flush_delta = now - tbl->last_flush;
1669                 unsigned int rand_delta = now - tbl->last_rand;
1670
1671                 struct ndt_config ndc = {
1672                         .ndtc_key_len           = tbl->key_len,
1673                         .ndtc_entry_size        = tbl->entry_size,
1674                         .ndtc_entries           = atomic_read(&tbl->entries),
1675                         .ndtc_last_flush        = jiffies_to_msecs(flush_delta),
1676                         .ndtc_last_rand         = jiffies_to_msecs(rand_delta),
1677                         .ndtc_hash_rnd          = tbl->hash_rnd,
1678                         .ndtc_hash_mask         = tbl->hash_mask,
1679                         .ndtc_hash_chain_gc     = tbl->hash_chain_gc,
1680                         .ndtc_proxy_qlen        = tbl->proxy_queue.qlen,
1681                 };
1682
1683                 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1684         }
1685
1686         {
1687                 int cpu;
1688                 struct ndt_stats ndst;
1689
1690                 memset(&ndst, 0, sizeof(ndst));
1691
1692                 for_each_possible_cpu(cpu) {
1693                         struct neigh_statistics *st;
1694
1695                         st = per_cpu_ptr(tbl->stats, cpu);
1696                         ndst.ndts_allocs                += st->allocs;
1697                         ndst.ndts_destroys              += st->destroys;
1698                         ndst.ndts_hash_grows            += st->hash_grows;
1699                         ndst.ndts_res_failed            += st->res_failed;
1700                         ndst.ndts_lookups               += st->lookups;
1701                         ndst.ndts_hits                  += st->hits;
1702                         ndst.ndts_rcv_probes_mcast      += st->rcv_probes_mcast;
1703                         ndst.ndts_rcv_probes_ucast      += st->rcv_probes_ucast;
1704                         ndst.ndts_periodic_gc_runs      += st->periodic_gc_runs;
1705                         ndst.ndts_forced_gc_runs        += st->forced_gc_runs;
1706                 }
1707
1708                 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1709         }
1710
1711         BUG_ON(tbl->parms.dev);
1712         if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1713                 goto nla_put_failure;
1714
1715         read_unlock_bh(&tbl->lock);
1716         return nlmsg_end(skb, nlh);
1717
1718 nla_put_failure:
1719         read_unlock_bh(&tbl->lock);
1720         nlmsg_cancel(skb, nlh);
1721         return -EMSGSIZE;
1722 }
1723
1724 static int neightbl_fill_param_info(struct sk_buff *skb,
1725                                     struct neigh_table *tbl,
1726                                     struct neigh_parms *parms,
1727                                     u32 pid, u32 seq, int type,
1728                                     unsigned int flags)
1729 {
1730         struct ndtmsg *ndtmsg;
1731         struct nlmsghdr *nlh;
1732
1733         nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1734         if (nlh == NULL)
1735                 return -EMSGSIZE;
1736
1737         ndtmsg = nlmsg_data(nlh);
1738
1739         read_lock_bh(&tbl->lock);
1740         ndtmsg->ndtm_family = tbl->family;
1741         ndtmsg->ndtm_pad1   = 0;
1742         ndtmsg->ndtm_pad2   = 0;
1743
1744         if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1745             neightbl_fill_parms(skb, parms) < 0)
1746                 goto errout;
1747
1748         read_unlock_bh(&tbl->lock);
1749         return nlmsg_end(skb, nlh);
1750 errout:
1751         read_unlock_bh(&tbl->lock);
1752         nlmsg_cancel(skb, nlh);
1753         return -EMSGSIZE;
1754 }
1755
1756 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1757                                                       int ifindex)
1758 {
1759         struct neigh_parms *p;
1760
1761         for (p = &tbl->parms; p; p = p->next)
1762                 if ((p->dev && p->dev->ifindex == ifindex) ||
1763                     (!p->dev && !ifindex))
1764                         return p;
1765
1766         return NULL;
1767 }
1768
1769 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1770         [NDTA_NAME]             = { .type = NLA_STRING },
1771         [NDTA_THRESH1]          = { .type = NLA_U32 },
1772         [NDTA_THRESH2]          = { .type = NLA_U32 },
1773         [NDTA_THRESH3]          = { .type = NLA_U32 },
1774         [NDTA_GC_INTERVAL]      = { .type = NLA_U64 },
1775         [NDTA_PARMS]            = { .type = NLA_NESTED },
1776 };
1777
1778 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1779         [NDTPA_IFINDEX]                 = { .type = NLA_U32 },
1780         [NDTPA_QUEUE_LEN]               = { .type = NLA_U32 },
1781         [NDTPA_PROXY_QLEN]              = { .type = NLA_U32 },
1782         [NDTPA_APP_PROBES]              = { .type = NLA_U32 },
1783         [NDTPA_UCAST_PROBES]            = { .type = NLA_U32 },
1784         [NDTPA_MCAST_PROBES]            = { .type = NLA_U32 },
1785         [NDTPA_BASE_REACHABLE_TIME]     = { .type = NLA_U64 },
1786         [NDTPA_GC_STALETIME]            = { .type = NLA_U64 },
1787         [NDTPA_DELAY_PROBE_TIME]        = { .type = NLA_U64 },
1788         [NDTPA_RETRANS_TIME]            = { .type = NLA_U64 },
1789         [NDTPA_ANYCAST_DELAY]           = { .type = NLA_U64 },
1790         [NDTPA_PROXY_DELAY]             = { .type = NLA_U64 },
1791         [NDTPA_LOCKTIME]                = { .type = NLA_U64 },
1792 };
1793
1794 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1795 {
1796         struct neigh_table *tbl;
1797         struct ndtmsg *ndtmsg;
1798         struct nlattr *tb[NDTA_MAX+1];
1799         int err;
1800
1801         err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1802                           nl_neightbl_policy);
1803         if (err < 0)
1804                 goto errout;
1805
1806         if (tb[NDTA_NAME] == NULL) {
1807                 err = -EINVAL;
1808                 goto errout;
1809         }
1810
1811         ndtmsg = nlmsg_data(nlh);
1812         read_lock(&neigh_tbl_lock);
1813         for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1814                 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1815                         continue;
1816
1817                 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1818                         break;
1819         }
1820
1821         if (tbl == NULL) {
1822                 err = -ENOENT;
1823                 goto errout_locked;
1824         }
1825
1826         /*
1827          * We acquire tbl->lock to be nice to the periodic timers and
1828          * make sure they always see a consistent set of values.
1829          */
1830         write_lock_bh(&tbl->lock);
1831
1832         if (tb[NDTA_PARMS]) {
1833                 struct nlattr *tbp[NDTPA_MAX+1];
1834                 struct neigh_parms *p;
1835                 int i, ifindex = 0;
1836
1837                 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1838                                        nl_ntbl_parm_policy);
1839                 if (err < 0)
1840                         goto errout_tbl_lock;
1841
1842                 if (tbp[NDTPA_IFINDEX])
1843                         ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1844
1845                 p = lookup_neigh_params(tbl, ifindex);
1846                 if (p == NULL) {
1847                         err = -ENOENT;
1848                         goto errout_tbl_lock;
1849                 }
1850
1851                 for (i = 1; i <= NDTPA_MAX; i++) {
1852                         if (tbp[i] == NULL)
1853                                 continue;
1854
1855                         switch (i) {
1856                         case NDTPA_QUEUE_LEN:
1857                                 p->queue_len = nla_get_u32(tbp[i]);
1858                                 break;
1859                         case NDTPA_PROXY_QLEN:
1860                                 p->proxy_qlen = nla_get_u32(tbp[i]);
1861                                 break;
1862                         case NDTPA_APP_PROBES:
1863                                 p->app_probes = nla_get_u32(tbp[i]);
1864                                 break;
1865                         case NDTPA_UCAST_PROBES:
1866                                 p->ucast_probes = nla_get_u32(tbp[i]);
1867                                 break;
1868                         case NDTPA_MCAST_PROBES:
1869                                 p->mcast_probes = nla_get_u32(tbp[i]);
1870                                 break;
1871                         case NDTPA_BASE_REACHABLE_TIME:
1872                                 p->base_reachable_time = nla_get_msecs(tbp[i]);
1873                                 break;
1874                         case NDTPA_GC_STALETIME:
1875                                 p->gc_staletime = nla_get_msecs(tbp[i]);
1876                                 break;
1877                         case NDTPA_DELAY_PROBE_TIME:
1878                                 p->delay_probe_time = nla_get_msecs(tbp[i]);
1879                                 break;
1880                         case NDTPA_RETRANS_TIME:
1881                                 p->retrans_time = nla_get_msecs(tbp[i]);
1882                                 break;
1883                         case NDTPA_ANYCAST_DELAY:
1884                                 p->anycast_delay = nla_get_msecs(tbp[i]);
1885                                 break;
1886                         case NDTPA_PROXY_DELAY:
1887                                 p->proxy_delay = nla_get_msecs(tbp[i]);
1888                                 break;
1889                         case NDTPA_LOCKTIME:
1890                                 p->locktime = nla_get_msecs(tbp[i]);
1891                                 break;
1892                         }
1893                 }
1894         }
1895
1896         if (tb[NDTA_THRESH1])
1897                 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
1898
1899         if (tb[NDTA_THRESH2])
1900                 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
1901
1902         if (tb[NDTA_THRESH3])
1903                 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
1904
1905         if (tb[NDTA_GC_INTERVAL])
1906                 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
1907
1908         err = 0;
1909
1910 errout_tbl_lock:
1911         write_unlock_bh(&tbl->lock);
1912 errout_locked:
1913         read_unlock(&neigh_tbl_lock);
1914 errout:
1915         return err;
1916 }
1917
1918 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1919 {
1920         int family, tidx, nidx = 0;
1921         int tbl_skip = cb->args[0];
1922         int neigh_skip = cb->args[1];
1923         struct neigh_table *tbl;
1924
1925         family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
1926
1927         read_lock(&neigh_tbl_lock);
1928         for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
1929                 struct neigh_parms *p;
1930
1931                 if (tidx < tbl_skip || (family && tbl->family != family))
1932                         continue;
1933
1934                 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
1935                                        cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
1936                                        NLM_F_MULTI) <= 0)
1937                         break;
1938
1939                 for (nidx = 0, p = tbl->parms.next; p; p = p->next, nidx++) {
1940                         if (nidx < neigh_skip)
1941                                 continue;
1942
1943                         if (neightbl_fill_param_info(skb, tbl, p,
1944                                                      NETLINK_CB(cb->skb).pid,
1945                                                      cb->nlh->nlmsg_seq,
1946                                                      RTM_NEWNEIGHTBL,
1947                                                      NLM_F_MULTI) <= 0)
1948                                 goto out;
1949                 }
1950
1951                 neigh_skip = 0;
1952         }
1953 out:
1954         read_unlock(&neigh_tbl_lock);
1955         cb->args[0] = tidx;
1956         cb->args[1] = nidx;
1957
1958         return skb->len;
1959 }
1960
1961 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
1962                            u32 pid, u32 seq, int type, unsigned int flags)
1963 {
1964         unsigned long now = jiffies;
1965         struct nda_cacheinfo ci;
1966         struct nlmsghdr *nlh;
1967         struct ndmsg *ndm;
1968
1969         nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
1970         if (nlh == NULL)
1971                 return -EMSGSIZE;
1972
1973         ndm = nlmsg_data(nlh);
1974         ndm->ndm_family  = neigh->ops->family;
1975         ndm->ndm_pad1    = 0;
1976         ndm->ndm_pad2    = 0;
1977         ndm->ndm_flags   = neigh->flags;
1978         ndm->ndm_type    = neigh->type;
1979         ndm->ndm_ifindex = neigh->dev->ifindex;
1980
1981         NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
1982
1983         read_lock_bh(&neigh->lock);
1984         ndm->ndm_state   = neigh->nud_state;
1985         if ((neigh->nud_state & NUD_VALID) &&
1986             nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
1987                 read_unlock_bh(&neigh->lock);
1988                 goto nla_put_failure;
1989         }
1990
1991         ci.ndm_used      = now - neigh->used;
1992         ci.ndm_confirmed = now - neigh->confirmed;
1993         ci.ndm_updated   = now - neigh->updated;
1994         ci.ndm_refcnt    = atomic_read(&neigh->refcnt) - 1;
1995         read_unlock_bh(&neigh->lock);
1996
1997         NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
1998         NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1999
2000         return nlmsg_end(skb, nlh);
2001
2002 nla_put_failure:
2003         nlmsg_cancel(skb, nlh);
2004         return -EMSGSIZE;
2005 }
2006
2007 static void neigh_update_notify(struct neighbour *neigh)
2008 {
2009         call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2010         __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2011 }
2012
2013 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2014                             struct netlink_callback *cb)
2015 {
2016         struct neighbour *n;
2017         int rc, h, s_h = cb->args[1];
2018         int idx, s_idx = idx = cb->args[2];
2019
2020         read_lock_bh(&tbl->lock);
2021         for (h = 0; h <= tbl->hash_mask; h++) {
2022                 if (h < s_h)
2023                         continue;
2024                 if (h > s_h)
2025                         s_idx = 0;
2026                 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
2027                         if (idx < s_idx)
2028                                 continue;
2029                         if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2030                                             cb->nlh->nlmsg_seq,
2031                                             RTM_NEWNEIGH,
2032                                             NLM_F_MULTI) <= 0) {
2033                                 read_unlock_bh(&tbl->lock);
2034                                 rc = -1;
2035                                 goto out;
2036                         }
2037                 }
2038         }
2039         read_unlock_bh(&tbl->lock);
2040         rc = skb->len;
2041 out:
2042         cb->args[1] = h;
2043         cb->args[2] = idx;
2044         return rc;
2045 }
2046
2047 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2048 {
2049         struct neigh_table *tbl;
2050         int t, family, s_t;
2051
2052         read_lock(&neigh_tbl_lock);
2053         family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2054         s_t = cb->args[0];
2055
2056         for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2057                 if (t < s_t || (family && tbl->family != family))
2058                         continue;
2059                 if (t > s_t)
2060                         memset(&cb->args[1], 0, sizeof(cb->args) -
2061                                                 sizeof(cb->args[0]));
2062                 if (neigh_dump_table(tbl, skb, cb) < 0)
2063                         break;
2064         }
2065         read_unlock(&neigh_tbl_lock);
2066
2067         cb->args[0] = t;
2068         return skb->len;
2069 }
2070
2071 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2072 {
2073         int chain;
2074
2075         read_lock_bh(&tbl->lock);
2076         for (chain = 0; chain <= tbl->hash_mask; chain++) {
2077                 struct neighbour *n;
2078
2079                 for (n = tbl->hash_buckets[chain]; n; n = n->next)
2080                         cb(n, cookie);
2081         }
2082         read_unlock_bh(&tbl->lock);
2083 }
2084 EXPORT_SYMBOL(neigh_for_each);
2085
2086 /* The tbl->lock must be held as a writer and BH disabled. */
2087 void __neigh_for_each_release(struct neigh_table *tbl,
2088                               int (*cb)(struct neighbour *))
2089 {
2090         int chain;
2091
2092         for (chain = 0; chain <= tbl->hash_mask; chain++) {
2093                 struct neighbour *n, **np;
2094
2095                 np = &tbl->hash_buckets[chain];
2096                 while ((n = *np) != NULL) {
2097                         int release;
2098
2099                         write_lock(&n->lock);
2100                         release = cb(n);
2101                         if (release) {
2102                                 *np = n->next;
2103                                 n->dead = 1;
2104                         } else
2105                                 np = &n->next;
2106                         write_unlock(&n->lock);
2107                         if (release)
2108                                 neigh_cleanup_and_release(n);
2109                 }
2110         }
2111 }
2112 EXPORT_SYMBOL(__neigh_for_each_release);
2113
2114 #ifdef CONFIG_PROC_FS
2115
2116 static struct neighbour *neigh_get_first(struct seq_file *seq)
2117 {
2118         struct neigh_seq_state *state = seq->private;
2119         struct neigh_table *tbl = state->tbl;
2120         struct neighbour *n = NULL;
2121         int bucket = state->bucket;
2122
2123         state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2124         for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2125                 n = tbl->hash_buckets[bucket];
2126
2127                 while (n) {
2128                         if (state->neigh_sub_iter) {
2129                                 loff_t fakep = 0;
2130                                 void *v;
2131
2132                                 v = state->neigh_sub_iter(state, n, &fakep);
2133                                 if (!v)
2134                                         goto next;
2135                         }
2136                         if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2137                                 break;
2138                         if (n->nud_state & ~NUD_NOARP)
2139                                 break;
2140                 next:
2141                         n = n->next;
2142                 }
2143
2144                 if (n)
2145                         break;
2146         }
2147         state->bucket = bucket;
2148
2149         return n;
2150 }
2151
2152 static struct neighbour *neigh_get_next(struct seq_file *seq,
2153                                         struct neighbour *n,
2154                                         loff_t *pos)
2155 {
2156         struct neigh_seq_state *state = seq->private;
2157         struct neigh_table *tbl = state->tbl;
2158
2159         if (state->neigh_sub_iter) {
2160                 void *v = state->neigh_sub_iter(state, n, pos);
2161                 if (v)
2162                         return n;
2163         }
2164         n = n->next;
2165
2166         while (1) {
2167                 while (n) {
2168                         if (state->neigh_sub_iter) {
2169                                 void *v = state->neigh_sub_iter(state, n, pos);
2170                                 if (v)
2171                                         return n;
2172                                 goto next;
2173                         }
2174                         if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2175                                 break;
2176
2177                         if (n->nud_state & ~NUD_NOARP)
2178                                 break;
2179                 next:
2180                         n = n->next;
2181                 }
2182
2183                 if (n)
2184                         break;
2185
2186                 if (++state->bucket > tbl->hash_mask)
2187                         break;
2188
2189                 n = tbl->hash_buckets[state->bucket];
2190         }
2191
2192         if (n && pos)
2193                 --(*pos);
2194         return n;
2195 }
2196
2197 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2198 {
2199         struct neighbour *n = neigh_get_first(seq);
2200
2201         if (n) {
2202                 while (*pos) {
2203                         n = neigh_get_next(seq, n, pos);
2204                         if (!n)
2205                                 break;
2206                 }
2207         }
2208         return *pos ? NULL : n;
2209 }
2210
2211 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2212 {
2213         struct neigh_seq_state *state = seq->private;
2214         struct neigh_table *tbl = state->tbl;
2215         struct pneigh_entry *pn = NULL;
2216         int bucket = state->bucket;
2217
2218         state->flags |= NEIGH_SEQ_IS_PNEIGH;
2219         for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2220                 pn = tbl->phash_buckets[bucket];
2221                 if (pn)
2222                         break;
2223         }
2224         state->bucket = bucket;
2225
2226         return pn;
2227 }
2228
2229 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2230                                             struct pneigh_entry *pn,
2231                                             loff_t *pos)
2232 {
2233         struct neigh_seq_state *state = seq->private;
2234         struct neigh_table *tbl = state->tbl;
2235
2236         pn = pn->next;
2237         while (!pn) {
2238                 if (++state->bucket > PNEIGH_HASHMASK)
2239                         break;
2240                 pn = tbl->phash_buckets[state->bucket];
2241                 if (pn)
2242                         break;
2243         }
2244
2245         if (pn && pos)
2246                 --(*pos);
2247
2248         return pn;
2249 }
2250
2251 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2252 {
2253         struct pneigh_entry *pn = pneigh_get_first(seq);
2254
2255         if (pn) {
2256                 while (*pos) {
2257                         pn = pneigh_get_next(seq, pn, pos);
2258                         if (!pn)
2259                                 break;
2260                 }
2261         }
2262         return *pos ? NULL : pn;
2263 }
2264
2265 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2266 {
2267         struct neigh_seq_state *state = seq->private;
2268         void *rc;
2269
2270         rc = neigh_get_idx(seq, pos);
2271         if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2272                 rc = pneigh_get_idx(seq, pos);
2273
2274         return rc;
2275 }
2276
2277 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2278 {
2279         struct neigh_seq_state *state = seq->private;
2280         loff_t pos_minus_one;
2281
2282         state->tbl = tbl;
2283         state->bucket = 0;
2284         state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2285
2286         read_lock_bh(&tbl->lock);
2287
2288         pos_minus_one = *pos - 1;
2289         return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2290 }
2291 EXPORT_SYMBOL(neigh_seq_start);
2292
2293 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2294 {
2295         struct neigh_seq_state *state;
2296         void *rc;
2297
2298         if (v == SEQ_START_TOKEN) {
2299                 rc = neigh_get_idx(seq, pos);
2300                 goto out;
2301         }
2302
2303         state = seq->private;
2304         if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2305                 rc = neigh_get_next(seq, v, NULL);
2306                 if (rc)
2307                         goto out;
2308                 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2309                         rc = pneigh_get_first(seq);
2310         } else {
2311                 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2312                 rc = pneigh_get_next(seq, v, NULL);
2313         }
2314 out:
2315         ++(*pos);
2316         return rc;
2317 }
2318 EXPORT_SYMBOL(neigh_seq_next);
2319
2320 void neigh_seq_stop(struct seq_file *seq, void *v)
2321 {
2322         struct neigh_seq_state *state = seq->private;
2323         struct neigh_table *tbl = state->tbl;
2324
2325         read_unlock_bh(&tbl->lock);
2326 }
2327 EXPORT_SYMBOL(neigh_seq_stop);
2328
2329 /* statistics via seq_file */
2330
2331 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2332 {
2333         struct proc_dir_entry *pde = seq->private;
2334         struct neigh_table *tbl = pde->data;
2335         int cpu;
2336
2337         if (*pos == 0)
2338                 return SEQ_START_TOKEN;
2339
2340         for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2341                 if (!cpu_possible(cpu))
2342                         continue;
2343                 *pos = cpu+1;
2344                 return per_cpu_ptr(tbl->stats, cpu);
2345         }
2346         return NULL;
2347 }
2348
2349 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2350 {
2351         struct proc_dir_entry *pde = seq->private;
2352         struct neigh_table *tbl = pde->data;
2353         int cpu;
2354
2355         for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2356                 if (!cpu_possible(cpu))
2357                         continue;
2358                 *pos = cpu+1;
2359                 return per_cpu_ptr(tbl->stats, cpu);
2360         }
2361         return NULL;
2362 }
2363
2364 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2365 {
2366
2367 }
2368
2369 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2370 {
2371         struct proc_dir_entry *pde = seq->private;
2372         struct neigh_table *tbl = pde->data;
2373         struct neigh_statistics *st = v;
2374
2375         if (v == SEQ_START_TOKEN) {
2376                 seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs\n");
2377                 return 0;
2378         }
2379
2380         seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2381                         "%08lx %08lx  %08lx %08lx\n",
2382                    atomic_read(&tbl->entries),
2383
2384                    st->allocs,
2385                    st->destroys,
2386                    st->hash_grows,
2387
2388                    st->lookups,
2389                    st->hits,
2390
2391                    st->res_failed,
2392
2393                    st->rcv_probes_mcast,
2394                    st->rcv_probes_ucast,
2395
2396                    st->periodic_gc_runs,
2397                    st->forced_gc_runs
2398                    );
2399
2400         return 0;
2401 }
2402
2403 static const struct seq_operations neigh_stat_seq_ops = {
2404         .start  = neigh_stat_seq_start,
2405         .next   = neigh_stat_seq_next,
2406         .stop   = neigh_stat_seq_stop,
2407         .show   = neigh_stat_seq_show,
2408 };
2409
2410 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2411 {
2412         int ret = seq_open(file, &neigh_stat_seq_ops);
2413
2414         if (!ret) {
2415                 struct seq_file *sf = file->private_data;
2416                 sf->private = PDE(inode);
2417         }
2418         return ret;
2419 };
2420
2421 static const struct file_operations neigh_stat_seq_fops = {
2422         .owner   = THIS_MODULE,
2423         .open    = neigh_stat_seq_open,
2424         .read    = seq_read,
2425         .llseek  = seq_lseek,
2426         .release = seq_release,
2427 };
2428
2429 #endif /* CONFIG_PROC_FS */
2430
2431 static inline size_t neigh_nlmsg_size(void)
2432 {
2433         return NLMSG_ALIGN(sizeof(struct ndmsg))
2434                + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2435                + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2436                + nla_total_size(sizeof(struct nda_cacheinfo))
2437                + nla_total_size(4); /* NDA_PROBES */
2438 }
2439
2440 static void __neigh_notify(struct neighbour *n, int type, int flags)
2441 {
2442         struct sk_buff *skb;
2443         int err = -ENOBUFS;
2444
2445         skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2446         if (skb == NULL)
2447                 goto errout;
2448
2449         err = neigh_fill_info(skb, n, 0, 0, type, flags);
2450         if (err < 0) {
2451                 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2452                 WARN_ON(err == -EMSGSIZE);
2453                 kfree_skb(skb);
2454                 goto errout;
2455         }
2456         err = rtnl_notify(skb, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2457 errout:
2458         if (err < 0)
2459                 rtnl_set_sk_err(RTNLGRP_NEIGH, err);
2460 }
2461
2462 #ifdef CONFIG_ARPD
2463 void neigh_app_ns(struct neighbour *n)
2464 {
2465         __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2466 }
2467 #endif /* CONFIG_ARPD */
2468
2469 #ifdef CONFIG_SYSCTL
2470
2471 static struct neigh_sysctl_table {
2472         struct ctl_table_header *sysctl_header;
2473         ctl_table               neigh_vars[__NET_NEIGH_MAX];
2474         ctl_table               neigh_dev[2];
2475         ctl_table               neigh_neigh_dir[2];
2476         ctl_table               neigh_proto_dir[2];
2477         ctl_table               neigh_root_dir[2];
2478 } neigh_sysctl_template __read_mostly = {
2479         .neigh_vars = {
2480                 {
2481                         .ctl_name       = NET_NEIGH_MCAST_SOLICIT,
2482                         .procname       = "mcast_solicit",
2483                         .maxlen         = sizeof(int),
2484                         .mode           = 0644,
2485                         .proc_handler   = &proc_dointvec,
2486                 },
2487                 {
2488                         .ctl_name       = NET_NEIGH_UCAST_SOLICIT,
2489                         .procname       = "ucast_solicit",
2490                         .maxlen         = sizeof(int),
2491                         .mode           = 0644,
2492                         .proc_handler   = &proc_dointvec,
2493                 },
2494                 {
2495                         .ctl_name       = NET_NEIGH_APP_SOLICIT,
2496                         .procname       = "app_solicit",
2497                         .maxlen         = sizeof(int),
2498                         .mode           = 0644,
2499                         .proc_handler   = &proc_dointvec,
2500                 },
2501                 {
2502                         .procname       = "retrans_time",
2503                         .maxlen         = sizeof(int),
2504                         .mode           = 0644,
2505                         .proc_handler   = &proc_dointvec_userhz_jiffies,
2506                 },
2507                 {
2508                         .ctl_name       = NET_NEIGH_REACHABLE_TIME,
2509                         .procname       = "base_reachable_time",
2510                         .maxlen         = sizeof(int),
2511                         .mode           = 0644,
2512                         .proc_handler   = &proc_dointvec_jiffies,
2513                         .strategy       = &sysctl_jiffies,
2514                 },
2515                 {
2516                         .ctl_name       = NET_NEIGH_DELAY_PROBE_TIME,
2517                         .procname       = "delay_first_probe_time",
2518                         .maxlen         = sizeof(int),
2519                         .mode           = 0644,
2520                         .proc_handler   = &proc_dointvec_jiffies,
2521                         .strategy       = &sysctl_jiffies,
2522                 },
2523                 {
2524                         .ctl_name       = NET_NEIGH_GC_STALE_TIME,
2525                         .procname       = "gc_stale_time",
2526                         .maxlen         = sizeof(int),
2527                         .mode           = 0644,
2528                         .proc_handler   = &proc_dointvec_jiffies,
2529                         .strategy       = &sysctl_jiffies,
2530                 },
2531                 {
2532                         .ctl_name       = NET_NEIGH_UNRES_QLEN,
2533                         .procname       = "unres_qlen",
2534                         .maxlen         = sizeof(int),
2535                         .mode           = 0644,
2536                         .proc_handler   = &proc_dointvec,
2537                 },
2538                 {
2539                         .ctl_name       = NET_NEIGH_PROXY_QLEN,
2540                         .procname       = "proxy_qlen",
2541                         .maxlen         = sizeof(int),
2542                         .mode           = 0644,
2543                         .proc_handler   = &proc_dointvec,
2544                 },
2545                 {
2546                         .procname       = "anycast_delay",
2547                         .maxlen         = sizeof(int),
2548                         .mode           = 0644,
2549                         .proc_handler   = &proc_dointvec_userhz_jiffies,
2550                 },
2551                 {
2552                         .procname       = "proxy_delay",
2553                         .maxlen         = sizeof(int),
2554                         .mode           = 0644,
2555                         .proc_handler   = &proc_dointvec_userhz_jiffies,
2556                 },
2557                 {
2558                         .procname       = "locktime",
2559                         .maxlen         = sizeof(int),
2560                         .mode           = 0644,
2561                         .proc_handler   = &proc_dointvec_userhz_jiffies,
2562                 },
2563                 {
2564                         .ctl_name       = NET_NEIGH_RETRANS_TIME_MS,
2565                         .procname       = "retrans_time_ms",
2566                         .maxlen         = sizeof(int),
2567                         .mode           = 0644,
2568                         .proc_handler   = &proc_dointvec_ms_jiffies,
2569                         .strategy       = &sysctl_ms_jiffies,
2570                 },
2571                 {
2572                         .ctl_name       = NET_NEIGH_REACHABLE_TIME_MS,
2573                         .procname       = "base_reachable_time_ms",
2574                         .maxlen         = sizeof(int),
2575                         .mode           = 0644,
2576                         .proc_handler   = &proc_dointvec_ms_jiffies,
2577                         .strategy       = &sysctl_ms_jiffies,
2578                 },
2579                 {
2580                         .ctl_name       = NET_NEIGH_GC_INTERVAL,
2581                         .procname       = "gc_interval",
2582                         .maxlen         = sizeof(int),
2583                         .mode           = 0644,
2584                         .proc_handler   = &proc_dointvec_jiffies,
2585                         .strategy       = &sysctl_jiffies,
2586                 },
2587                 {
2588                         .ctl_name       = NET_NEIGH_GC_THRESH1,
2589                         .procname       = "gc_thresh1",
2590                         .maxlen         = sizeof(int),
2591                         .mode           = 0644,
2592                         .proc_handler   = &proc_dointvec,
2593                 },
2594                 {
2595                         .ctl_name       = NET_NEIGH_GC_THRESH2,
2596                         .procname       = "gc_thresh2",
2597                         .maxlen         = sizeof(int),
2598                         .mode           = 0644,
2599                         .proc_handler   = &proc_dointvec,
2600                 },
2601                 {
2602                         .ctl_name       = NET_NEIGH_GC_THRESH3,
2603                         .procname       = "gc_thresh3",
2604                         .maxlen         = sizeof(int),
2605                         .mode           = 0644,
2606                         .proc_handler   = &proc_dointvec,
2607                 },
2608                 {}
2609         },
2610         .neigh_dev = {
2611                 {
2612                         .ctl_name       = NET_PROTO_CONF_DEFAULT,
2613                         .procname       = "default",
2614                         .mode           = 0555,
2615                 },
2616         },
2617         .neigh_neigh_dir = {
2618                 {
2619                         .procname       = "neigh",
2620                         .mode           = 0555,
2621                 },
2622         },
2623         .neigh_proto_dir = {
2624                 {
2625                         .mode           = 0555,
2626                 },
2627         },
2628         .neigh_root_dir = {
2629                 {
2630                         .ctl_name       = CTL_NET,
2631                         .procname       = "net",
2632                         .mode           = 0555,
2633                 },
2634         },
2635 };
2636
2637 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2638                           int p_id, int pdev_id, char *p_name,
2639                           proc_handler *handler, ctl_handler *strategy)
2640 {
2641         struct neigh_sysctl_table *t = kmemdup(&neigh_sysctl_template,
2642                                                sizeof(*t), GFP_KERNEL);
2643         const char *dev_name_source = NULL;
2644         char *dev_name = NULL;
2645         int err = 0;
2646
2647         if (!t)
2648                 return -ENOBUFS;
2649         t->neigh_vars[0].data  = &p->mcast_probes;
2650         t->neigh_vars[1].data  = &p->ucast_probes;
2651         t->neigh_vars[2].data  = &p->app_probes;
2652         t->neigh_vars[3].data  = &p->retrans_time;
2653         t->neigh_vars[4].data  = &p->base_reachable_time;
2654         t->neigh_vars[5].data  = &p->delay_probe_time;
2655         t->neigh_vars[6].data  = &p->gc_staletime;
2656         t->neigh_vars[7].data  = &p->queue_len;
2657         t->neigh_vars[8].data  = &p->proxy_qlen;
2658         t->neigh_vars[9].data  = &p->anycast_delay;
2659         t->neigh_vars[10].data = &p->proxy_delay;
2660         t->neigh_vars[11].data = &p->locktime;
2661         t->neigh_vars[12].data  = &p->retrans_time;
2662         t->neigh_vars[13].data  = &p->base_reachable_time;
2663
2664         if (dev) {
2665                 dev_name_source = dev->name;
2666                 t->neigh_dev[0].ctl_name = dev->ifindex;
2667                 /* Terminate the table early */
2668                 memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
2669         } else {
2670                 dev_name_source = t->neigh_dev[0].procname;
2671                 t->neigh_vars[14].data = (int *)(p + 1);
2672                 t->neigh_vars[15].data = (int *)(p + 1) + 1;
2673                 t->neigh_vars[16].data = (int *)(p + 1) + 2;
2674                 t->neigh_vars[17].data = (int *)(p + 1) + 3;
2675         }
2676
2677
2678         if (handler || strategy) {
2679                 /* RetransTime */
2680                 t->neigh_vars[3].proc_handler = handler;
2681                 t->neigh_vars[3].strategy = strategy;
2682                 t->neigh_vars[3].extra1 = dev;
2683                 if (!strategy)
2684                         t->neigh_vars[3].ctl_name = CTL_UNNUMBERED;
2685                 /* ReachableTime */
2686                 t->neigh_vars[4].proc_handler = handler;
2687                 t->neigh_vars[4].strategy = strategy;
2688                 t->neigh_vars[4].extra1 = dev;
2689                 if (!strategy)
2690                         t->neigh_vars[4].ctl_name = CTL_UNNUMBERED;
2691                 /* RetransTime (in milliseconds)*/
2692                 t->neigh_vars[12].proc_handler = handler;
2693                 t->neigh_vars[12].strategy = strategy;
2694                 t->neigh_vars[12].extra1 = dev;
2695                 if (!strategy)
2696                         t->neigh_vars[12].ctl_name = CTL_UNNUMBERED;
2697                 /* ReachableTime (in milliseconds) */
2698                 t->neigh_vars[13].proc_handler = handler;
2699                 t->neigh_vars[13].strategy = strategy;
2700                 t->neigh_vars[13].extra1 = dev;
2701                 if (!strategy)
2702                         t->neigh_vars[13].ctl_name = CTL_UNNUMBERED;
2703         }
2704
2705         dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2706         if (!dev_name) {
2707                 err = -ENOBUFS;
2708                 goto free;
2709         }
2710
2711         t->neigh_dev[0].procname = dev_name;
2712
2713         t->neigh_neigh_dir[0].ctl_name = pdev_id;
2714
2715         t->neigh_proto_dir[0].procname = p_name;
2716         t->neigh_proto_dir[0].ctl_name = p_id;
2717
2718         t->neigh_dev[0].child          = t->neigh_vars;
2719         t->neigh_neigh_dir[0].child    = t->neigh_dev;
2720         t->neigh_proto_dir[0].child    = t->neigh_neigh_dir;
2721         t->neigh_root_dir[0].child     = t->neigh_proto_dir;
2722
2723         t->sysctl_header = register_sysctl_table(t->neigh_root_dir);
2724         if (!t->sysctl_header) {
2725                 err = -ENOBUFS;
2726                 goto free_procname;
2727         }
2728         p->sysctl_table = t;
2729         return 0;
2730
2731         /* error path */
2732  free_procname:
2733         kfree(dev_name);
2734  free:
2735         kfree(t);
2736
2737         return err;
2738 }
2739
2740 void neigh_sysctl_unregister(struct neigh_parms *p)
2741 {
2742         if (p->sysctl_table) {
2743                 struct neigh_sysctl_table *t = p->sysctl_table;
2744                 p->sysctl_table = NULL;
2745                 unregister_sysctl_table(t->sysctl_header);
2746                 kfree(t->neigh_dev[0].procname);
2747                 kfree(t);
2748         }
2749 }
2750
2751 #endif  /* CONFIG_SYSCTL */
2752
2753 static int __init neigh_init(void)
2754 {
2755         rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
2756         rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
2757         rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
2758
2759         rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
2760         rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
2761
2762         return 0;
2763 }
2764
2765 subsys_initcall(neigh_init);
2766
2767 EXPORT_SYMBOL(__neigh_event_send);
2768 EXPORT_SYMBOL(neigh_changeaddr);
2769 EXPORT_SYMBOL(neigh_compat_output);
2770 EXPORT_SYMBOL(neigh_connected_output);
2771 EXPORT_SYMBOL(neigh_create);
2772 EXPORT_SYMBOL(neigh_destroy);
2773 EXPORT_SYMBOL(neigh_event_ns);
2774 EXPORT_SYMBOL(neigh_ifdown);
2775 EXPORT_SYMBOL(neigh_lookup);
2776 EXPORT_SYMBOL(neigh_lookup_nodev);
2777 EXPORT_SYMBOL(neigh_parms_alloc);
2778 EXPORT_SYMBOL(neigh_parms_release);
2779 EXPORT_SYMBOL(neigh_rand_reach_time);
2780 EXPORT_SYMBOL(neigh_resolve_output);
2781 EXPORT_SYMBOL(neigh_table_clear);
2782 EXPORT_SYMBOL(neigh_table_init);
2783 EXPORT_SYMBOL(neigh_table_init_no_netlink);
2784 EXPORT_SYMBOL(neigh_update);
2785 EXPORT_SYMBOL(pneigh_enqueue);
2786 EXPORT_SYMBOL(pneigh_lookup);
2787
2788 #ifdef CONFIG_ARPD
2789 EXPORT_SYMBOL(neigh_app_ns);
2790 #endif
2791 #ifdef CONFIG_SYSCTL
2792 EXPORT_SYMBOL(neigh_sysctl_register);
2793 EXPORT_SYMBOL(neigh_sysctl_unregister);
2794 #endif