2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
8 * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
9 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi)
11 #include <linux/errno.h>
12 #include <linux/types.h>
13 #include <linux/socket.h>
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/string.h>
19 #include <linux/sockios.h>
20 #include <linux/net.h>
22 #include <linux/inet.h>
23 #include <linux/netdevice.h>
25 #include <linux/if_arp.h>
26 #include <linux/skbuff.h>
28 #include <asm/uaccess.h>
29 #include <asm/system.h>
30 #include <linux/fcntl.h>
31 #include <linux/termios.h> /* For TIOCINQ/OUTQ */
33 #include <linux/interrupt.h>
34 #include <linux/notifier.h>
35 #include <linux/netfilter.h>
36 #include <linux/init.h>
37 #include <linux/spinlock.h>
38 #include <net/netrom.h>
39 #include <linux/seq_file.h>
41 static unsigned int nr_neigh_no = 1;
43 static HLIST_HEAD(nr_node_list);
44 static DEFINE_SPINLOCK(nr_node_list_lock);
45 static HLIST_HEAD(nr_neigh_list);
46 static DEFINE_SPINLOCK(nr_neigh_list_lock);
48 static struct nr_node *nr_node_get(ax25_address *callsign)
50 struct nr_node *found = NULL;
51 struct nr_node *nr_node;
52 struct hlist_node *node;
54 spin_lock_bh(&nr_node_list_lock);
55 nr_node_for_each(nr_node, node, &nr_node_list)
56 if (ax25cmp(callsign, &nr_node->callsign) == 0) {
57 nr_node_hold(nr_node);
61 spin_unlock_bh(&nr_node_list_lock);
65 static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign,
66 struct net_device *dev)
68 struct nr_neigh *found = NULL;
69 struct nr_neigh *nr_neigh;
70 struct hlist_node *node;
72 spin_lock_bh(&nr_neigh_list_lock);
73 nr_neigh_for_each(nr_neigh, node, &nr_neigh_list)
74 if (ax25cmp(callsign, &nr_neigh->callsign) == 0 &&
75 nr_neigh->dev == dev) {
76 nr_neigh_hold(nr_neigh);
80 spin_unlock_bh(&nr_neigh_list_lock);
84 static void nr_remove_neigh(struct nr_neigh *);
87 * Add a new route to a node, and in the process add the node and the
88 * neighbour if it is new.
90 static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
91 ax25_address *ax25, ax25_digi *ax25_digi, struct net_device *dev,
92 int quality, int obs_count)
94 struct nr_node *nr_node;
95 struct nr_neigh *nr_neigh;
96 struct nr_route nr_route;
98 struct net_device *odev;
100 if ((odev=nr_dev_get(nr)) != NULL) { /* Can't add routes to ourself */
105 nr_node = nr_node_get(nr);
107 nr_neigh = nr_neigh_get_dev(ax25, dev);
110 * The L2 link to a neighbour has failed in the past
111 * and now a frame comes from this neighbour. We assume
112 * it was a temporary trouble with the link and reset the
113 * routes now (and not wait for a node broadcast).
115 if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) {
116 struct nr_node *nr_nodet;
117 struct hlist_node *node;
119 spin_lock_bh(&nr_node_list_lock);
120 nr_node_for_each(nr_nodet, node, &nr_node_list) {
121 nr_node_lock(nr_nodet);
122 for (i = 0; i < nr_nodet->count; i++)
123 if (nr_nodet->routes[i].neighbour == nr_neigh)
124 if (i < nr_nodet->which)
126 nr_node_unlock(nr_nodet);
128 spin_unlock_bh(&nr_node_list_lock);
131 if (nr_neigh != NULL)
132 nr_neigh->failed = 0;
134 if (quality == 0 && nr_neigh != NULL && nr_node != NULL) {
135 nr_neigh_put(nr_neigh);
136 nr_node_put(nr_node);
140 if (nr_neigh == NULL) {
141 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) {
143 nr_node_put(nr_node);
147 nr_neigh->callsign = *ax25;
148 nr_neigh->digipeat = NULL;
149 nr_neigh->ax25 = NULL;
151 nr_neigh->quality = sysctl_netrom_default_path_quality;
152 nr_neigh->locked = 0;
154 nr_neigh->number = nr_neigh_no++;
155 nr_neigh->failed = 0;
156 atomic_set(&nr_neigh->refcount, 1);
158 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
159 nr_neigh->digipeat = kmemdup(ax25_digi,
162 if (nr_neigh->digipeat == NULL) {
165 nr_node_put(nr_node);
170 spin_lock_bh(&nr_neigh_list_lock);
171 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
172 nr_neigh_hold(nr_neigh);
173 spin_unlock_bh(&nr_neigh_list_lock);
176 if (quality != 0 && ax25cmp(nr, ax25) == 0 && !nr_neigh->locked)
177 nr_neigh->quality = quality;
179 if (nr_node == NULL) {
180 if ((nr_node = kmalloc(sizeof(*nr_node), GFP_ATOMIC)) == NULL) {
182 nr_neigh_put(nr_neigh);
186 nr_node->callsign = *nr;
187 strcpy(nr_node->mnemonic, mnemonic);
191 atomic_set(&nr_node->refcount, 1);
192 spin_lock_init(&nr_node->node_lock);
194 nr_node->routes[0].quality = quality;
195 nr_node->routes[0].obs_count = obs_count;
196 nr_node->routes[0].neighbour = nr_neigh;
198 nr_neigh_hold(nr_neigh);
201 spin_lock_bh(&nr_node_list_lock);
202 hlist_add_head(&nr_node->node_node, &nr_node_list);
203 /* refcount initialized at 1 */
204 spin_unlock_bh(&nr_node_list_lock);
208 nr_node_lock(nr_node);
211 strcpy(nr_node->mnemonic, mnemonic);
213 for (found = 0, i = 0; i < nr_node->count; i++) {
214 if (nr_node->routes[i].neighbour == nr_neigh) {
215 nr_node->routes[i].quality = quality;
216 nr_node->routes[i].obs_count = obs_count;
223 /* We have space at the bottom, slot it in */
224 if (nr_node->count < 3) {
225 nr_node->routes[2] = nr_node->routes[1];
226 nr_node->routes[1] = nr_node->routes[0];
228 nr_node->routes[0].quality = quality;
229 nr_node->routes[0].obs_count = obs_count;
230 nr_node->routes[0].neighbour = nr_neigh;
234 nr_neigh_hold(nr_neigh);
237 /* It must be better than the worst */
238 if (quality > nr_node->routes[2].quality) {
239 nr_node->routes[2].neighbour->count--;
240 nr_neigh_put(nr_node->routes[2].neighbour);
242 if (nr_node->routes[2].neighbour->count == 0 && !nr_node->routes[2].neighbour->locked)
243 nr_remove_neigh(nr_node->routes[2].neighbour);
245 nr_node->routes[2].quality = quality;
246 nr_node->routes[2].obs_count = obs_count;
247 nr_node->routes[2].neighbour = nr_neigh;
249 nr_neigh_hold(nr_neigh);
255 /* Now re-sort the routes in quality order */
256 switch (nr_node->count) {
258 if (nr_node->routes[1].quality > nr_node->routes[0].quality) {
259 switch (nr_node->which) {
260 case 0: nr_node->which = 1; break;
261 case 1: nr_node->which = 0; break;
264 nr_route = nr_node->routes[0];
265 nr_node->routes[0] = nr_node->routes[1];
266 nr_node->routes[1] = nr_route;
268 if (nr_node->routes[2].quality > nr_node->routes[1].quality) {
269 switch (nr_node->which) {
270 case 1: nr_node->which = 2;
273 case 2: nr_node->which = 1;
279 nr_route = nr_node->routes[1];
280 nr_node->routes[1] = nr_node->routes[2];
281 nr_node->routes[2] = nr_route;
284 if (nr_node->routes[1].quality > nr_node->routes[0].quality) {
285 switch (nr_node->which) {
286 case 0: nr_node->which = 1;
289 case 1: nr_node->which = 0;
294 nr_route = nr_node->routes[0];
295 nr_node->routes[0] = nr_node->routes[1];
296 nr_node->routes[1] = nr_route;
302 for (i = 0; i < nr_node->count; i++) {
303 if (nr_node->routes[i].neighbour == nr_neigh) {
304 if (i < nr_node->which)
310 nr_neigh_put(nr_neigh);
311 nr_node_unlock(nr_node);
312 nr_node_put(nr_node);
316 static inline void __nr_remove_node(struct nr_node *nr_node)
318 hlist_del_init(&nr_node->node_node);
319 nr_node_put(nr_node);
322 #define nr_remove_node_locked(__node) \
323 __nr_remove_node(__node)
325 static void nr_remove_node(struct nr_node *nr_node)
327 spin_lock_bh(&nr_node_list_lock);
328 __nr_remove_node(nr_node);
329 spin_unlock_bh(&nr_node_list_lock);
332 static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh)
334 hlist_del_init(&nr_neigh->neigh_node);
335 nr_neigh_put(nr_neigh);
338 #define nr_remove_neigh_locked(__neigh) \
339 __nr_remove_neigh(__neigh)
341 static void nr_remove_neigh(struct nr_neigh *nr_neigh)
343 spin_lock_bh(&nr_neigh_list_lock);
344 __nr_remove_neigh(nr_neigh);
345 spin_unlock_bh(&nr_neigh_list_lock);
349 * "Delete" a node. Strictly speaking remove a route to a node. The node
350 * is only deleted if no routes are left to it.
352 static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct net_device *dev)
354 struct nr_node *nr_node;
355 struct nr_neigh *nr_neigh;
358 nr_node = nr_node_get(callsign);
363 nr_neigh = nr_neigh_get_dev(neighbour, dev);
365 if (nr_neigh == NULL) {
366 nr_node_put(nr_node);
370 nr_node_lock(nr_node);
371 for (i = 0; i < nr_node->count; i++) {
372 if (nr_node->routes[i].neighbour == nr_neigh) {
374 nr_neigh_put(nr_neigh);
376 if (nr_neigh->count == 0 && !nr_neigh->locked)
377 nr_remove_neigh(nr_neigh);
378 nr_neigh_put(nr_neigh);
382 if (nr_node->count == 0) {
383 nr_remove_node(nr_node);
387 nr_node->routes[0] = nr_node->routes[1];
389 nr_node->routes[1] = nr_node->routes[2];
393 nr_node_put(nr_node);
395 nr_node_unlock(nr_node);
400 nr_neigh_put(nr_neigh);
401 nr_node_unlock(nr_node);
402 nr_node_put(nr_node);
408 * Lock a neighbour with a quality.
410 static int __must_check nr_add_neigh(ax25_address *callsign,
411 ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality)
413 struct nr_neigh *nr_neigh;
415 nr_neigh = nr_neigh_get_dev(callsign, dev);
417 nr_neigh->quality = quality;
418 nr_neigh->locked = 1;
419 nr_neigh_put(nr_neigh);
423 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL)
426 nr_neigh->callsign = *callsign;
427 nr_neigh->digipeat = NULL;
428 nr_neigh->ax25 = NULL;
430 nr_neigh->quality = quality;
431 nr_neigh->locked = 1;
433 nr_neigh->number = nr_neigh_no++;
434 nr_neigh->failed = 0;
435 atomic_set(&nr_neigh->refcount, 1);
437 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
438 nr_neigh->digipeat = kmemdup(ax25_digi, sizeof(*ax25_digi),
440 if (nr_neigh->digipeat == NULL) {
446 spin_lock_bh(&nr_neigh_list_lock);
447 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
448 /* refcount is initialized at 1 */
449 spin_unlock_bh(&nr_neigh_list_lock);
455 * "Delete" a neighbour. The neighbour is only removed if the number
456 * of nodes that may use it is zero.
458 static int nr_del_neigh(ax25_address *callsign, struct net_device *dev, unsigned int quality)
460 struct nr_neigh *nr_neigh;
462 nr_neigh = nr_neigh_get_dev(callsign, dev);
464 if (nr_neigh == NULL) return -EINVAL;
466 nr_neigh->quality = quality;
467 nr_neigh->locked = 0;
469 if (nr_neigh->count == 0)
470 nr_remove_neigh(nr_neigh);
471 nr_neigh_put(nr_neigh);
477 * Decrement the obsolescence count by one. If a route is reduced to a
478 * count of zero, remove it. Also remove any unlocked neighbours with
479 * zero nodes routing via it.
481 static int nr_dec_obs(void)
483 struct nr_neigh *nr_neigh;
485 struct hlist_node *node, *nodet;
488 spin_lock_bh(&nr_node_list_lock);
489 nr_node_for_each_safe(s, node, nodet, &nr_node_list) {
491 for (i = 0; i < s->count; i++) {
492 switch (s->routes[i].obs_count) {
493 case 0: /* A locked entry */
496 case 1: /* From 1 -> 0 */
497 nr_neigh = s->routes[i].neighbour;
500 nr_neigh_put(nr_neigh);
502 if (nr_neigh->count == 0 && !nr_neigh->locked)
503 nr_remove_neigh(nr_neigh);
509 s->routes[0] = s->routes[1];
511 s->routes[1] = s->routes[2];
518 s->routes[i].obs_count--;
525 nr_remove_node_locked(s);
528 spin_unlock_bh(&nr_node_list_lock);
534 * A device has been removed. Remove its routes and neighbours.
536 void nr_rt_device_down(struct net_device *dev)
539 struct hlist_node *node, *nodet, *node2, *node2t;
543 spin_lock_bh(&nr_neigh_list_lock);
544 nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) {
546 spin_lock_bh(&nr_node_list_lock);
547 nr_node_for_each_safe(t, node2, node2t, &nr_node_list) {
549 for (i = 0; i < t->count; i++) {
550 if (t->routes[i].neighbour == s) {
555 t->routes[0] = t->routes[1];
557 t->routes[1] = t->routes[2];
565 nr_remove_node_locked(t);
568 spin_unlock_bh(&nr_node_list_lock);
570 nr_remove_neigh_locked(s);
573 spin_unlock_bh(&nr_neigh_list_lock);
577 * Check that the device given is a valid AX.25 interface that is "up".
578 * Or a valid ethernet interface with an AX.25 callsign binding.
580 static struct net_device *nr_ax25_dev_get(char *devname)
582 struct net_device *dev;
584 if ((dev = dev_get_by_name(devname)) == NULL)
587 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
595 * Find the first active NET/ROM device, usually "nr0".
597 struct net_device *nr_dev_first(void)
599 struct net_device *dev, *first = NULL;
601 read_lock(&dev_base_lock);
602 for (dev = dev_base; dev != NULL; dev = dev->next) {
603 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM)
604 if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
609 read_unlock(&dev_base_lock);
615 * Find the NET/ROM device for the given callsign.
617 struct net_device *nr_dev_get(ax25_address *addr)
619 struct net_device *dev;
621 read_lock(&dev_base_lock);
622 for (dev = dev_base; dev != NULL; dev = dev->next) {
623 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) {
629 read_unlock(&dev_base_lock);
633 static ax25_digi *nr_call_to_digi(int ndigis, ax25_address *digipeaters)
635 static ax25_digi ax25_digi;
641 for (i = 0; i < ndigis; i++) {
642 ax25_digi.calls[i] = digipeaters[i];
643 ax25_digi.repeated[i] = 0;
646 ax25_digi.ndigi = ndigis;
647 ax25_digi.lastrepeat = -1;
653 * Handle the ioctls that control the routing functions.
655 int nr_rt_ioctl(unsigned int cmd, void __user *arg)
657 struct nr_route_struct nr_route;
658 struct net_device *dev;
663 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
665 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
667 if (nr_route.ndigis < 0 || nr_route.ndigis > AX25_MAX_DIGIS) {
671 switch (nr_route.type) {
673 ret = nr_add_node(&nr_route.callsign,
676 nr_call_to_digi(nr_route.ndigis, nr_route.digipeaters),
677 dev, nr_route.quality,
681 ret = nr_add_neigh(&nr_route.callsign,
682 nr_call_to_digi(nr_route.ndigis, nr_route.digipeaters),
683 dev, nr_route.quality);
692 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
694 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
696 switch (nr_route.type) {
698 ret = nr_del_node(&nr_route.callsign,
699 &nr_route.neighbour, dev);
702 ret = nr_del_neigh(&nr_route.callsign,
703 dev, nr_route.quality);
722 * A level 2 link has timed out, therefore it appears to be a poor link,
723 * then don't use that neighbour until it is reset.
725 void nr_link_failed(ax25_cb *ax25, int reason)
727 struct nr_neigh *s, *nr_neigh = NULL;
728 struct hlist_node *node;
729 struct nr_node *nr_node = NULL;
731 spin_lock_bh(&nr_neigh_list_lock);
732 nr_neigh_for_each(s, node, &nr_neigh_list) {
733 if (s->ax25 == ax25) {
739 spin_unlock_bh(&nr_neigh_list_lock);
741 if (nr_neigh == NULL)
744 nr_neigh->ax25 = NULL;
747 if (++nr_neigh->failed < sysctl_netrom_link_fails_count) {
748 nr_neigh_put(nr_neigh);
751 spin_lock_bh(&nr_node_list_lock);
752 nr_node_for_each(nr_node, node, &nr_node_list) {
753 nr_node_lock(nr_node);
754 if (nr_node->which < nr_node->count &&
755 nr_node->routes[nr_node->which].neighbour == nr_neigh)
757 nr_node_unlock(nr_node);
759 spin_unlock_bh(&nr_node_list_lock);
760 nr_neigh_put(nr_neigh);
764 * Route a frame to an appropriate AX.25 connection. A NULL ax25_cb
765 * indicates an internally generated frame.
767 int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
769 ax25_address *nr_src, *nr_dest;
770 struct nr_neigh *nr_neigh;
771 struct nr_node *nr_node;
772 struct net_device *dev;
776 struct sk_buff *skbn;
779 nr_src = (ax25_address *)(skb->data + 0);
780 nr_dest = (ax25_address *)(skb->data + 7);
783 nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
784 ax25->ax25_dev->dev, 0, sysctl_netrom_obsolescence_count_initialiser);
786 if ((dev = nr_dev_get(nr_dest)) != NULL) { /* Its for me */
787 if (ax25 == NULL) /* Its from me */
788 ret = nr_loopback_queue(skb);
790 ret = nr_rx_frame(skb, dev);
795 if (!sysctl_netrom_routing_control && ax25 != NULL)
798 /* Its Time-To-Live has expired */
799 if (skb->data[14] == 1) {
803 nr_node = nr_node_get(nr_dest);
806 nr_node_lock(nr_node);
808 if (nr_node->which >= nr_node->count) {
809 nr_node_unlock(nr_node);
810 nr_node_put(nr_node);
814 nr_neigh = nr_node->routes[nr_node->which].neighbour;
816 if ((dev = nr_dev_first()) == NULL) {
817 nr_node_unlock(nr_node);
818 nr_node_put(nr_node);
822 /* We are going to change the netrom headers so we should get our
823 own skb, we also did not know until now how much header space
824 we had to reserve... - RXQ */
825 if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) {
826 nr_node_unlock(nr_node);
827 nr_node_put(nr_node);
835 dptr = skb_push(skb, 1);
836 *dptr = AX25_P_NETROM;
838 ax25s = ax25_send_frame(skb, 256, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev);
839 if (nr_neigh->ax25 && ax25s) {
840 /* We were already holding this ax25_cb */
843 nr_neigh->ax25 = ax25s;
846 ret = (nr_neigh->ax25 != NULL);
847 nr_node_unlock(nr_node);
848 nr_node_put(nr_node);
852 #ifdef CONFIG_PROC_FS
854 static void *nr_node_start(struct seq_file *seq, loff_t *pos)
856 struct nr_node *nr_node;
857 struct hlist_node *node;
860 spin_lock_bh(&nr_node_list_lock);
862 return SEQ_START_TOKEN;
864 nr_node_for_each(nr_node, node, &nr_node_list) {
873 static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos)
875 struct hlist_node *node;
878 node = (v == SEQ_START_TOKEN)
880 : ((struct nr_node *)v)->node_node.next;
882 return hlist_entry(node, struct nr_node, node_node);
885 static void nr_node_stop(struct seq_file *seq, void *v)
887 spin_unlock_bh(&nr_node_list_lock);
890 static int nr_node_show(struct seq_file *seq, void *v)
895 if (v == SEQ_START_TOKEN)
897 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n");
899 struct nr_node *nr_node = v;
900 nr_node_lock(nr_node);
901 seq_printf(seq, "%-9s %-7s %d %d",
902 ax2asc(buf, &nr_node->callsign),
903 (nr_node->mnemonic[0] == '\0') ? "*" : nr_node->mnemonic,
907 for (i = 0; i < nr_node->count; i++) {
908 seq_printf(seq, " %3d %d %05d",
909 nr_node->routes[i].quality,
910 nr_node->routes[i].obs_count,
911 nr_node->routes[i].neighbour->number);
913 nr_node_unlock(nr_node);
920 static struct seq_operations nr_node_seqops = {
921 .start = nr_node_start,
922 .next = nr_node_next,
923 .stop = nr_node_stop,
924 .show = nr_node_show,
927 static int nr_node_info_open(struct inode *inode, struct file *file)
929 return seq_open(file, &nr_node_seqops);
932 struct file_operations nr_nodes_fops = {
933 .owner = THIS_MODULE,
934 .open = nr_node_info_open,
937 .release = seq_release,
940 static void *nr_neigh_start(struct seq_file *seq, loff_t *pos)
942 struct nr_neigh *nr_neigh;
943 struct hlist_node *node;
946 spin_lock_bh(&nr_neigh_list_lock);
948 return SEQ_START_TOKEN;
950 nr_neigh_for_each(nr_neigh, node, &nr_neigh_list) {
957 static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
959 struct hlist_node *node;
962 node = (v == SEQ_START_TOKEN)
963 ? nr_neigh_list.first
964 : ((struct nr_neigh *)v)->neigh_node.next;
966 return hlist_entry(node, struct nr_neigh, neigh_node);
969 static void nr_neigh_stop(struct seq_file *seq, void *v)
971 spin_unlock_bh(&nr_neigh_list_lock);
974 static int nr_neigh_show(struct seq_file *seq, void *v)
979 if (v == SEQ_START_TOKEN)
980 seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n");
982 struct nr_neigh *nr_neigh = v;
984 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d",
986 ax2asc(buf, &nr_neigh->callsign),
987 nr_neigh->dev ? nr_neigh->dev->name : "???",
993 if (nr_neigh->digipeat != NULL) {
994 for (i = 0; i < nr_neigh->digipeat->ndigi; i++)
995 seq_printf(seq, " %s",
996 ax2asc(buf, &nr_neigh->digipeat->calls[i]));
1004 static struct seq_operations nr_neigh_seqops = {
1005 .start = nr_neigh_start,
1006 .next = nr_neigh_next,
1007 .stop = nr_neigh_stop,
1008 .show = nr_neigh_show,
1011 static int nr_neigh_info_open(struct inode *inode, struct file *file)
1013 return seq_open(file, &nr_neigh_seqops);
1016 struct file_operations nr_neigh_fops = {
1017 .owner = THIS_MODULE,
1018 .open = nr_neigh_info_open,
1020 .llseek = seq_lseek,
1021 .release = seq_release,
1027 * Free all memory associated with the nodes and routes lists.
1029 void __exit nr_rt_free(void)
1031 struct nr_neigh *s = NULL;
1032 struct nr_node *t = NULL;
1033 struct hlist_node *node, *nodet;
1035 spin_lock_bh(&nr_neigh_list_lock);
1036 spin_lock_bh(&nr_node_list_lock);
1037 nr_node_for_each_safe(t, node, nodet, &nr_node_list) {
1039 nr_remove_node_locked(t);
1042 nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) {
1047 nr_remove_neigh_locked(s);
1049 spin_unlock_bh(&nr_node_list_lock);
1050 spin_unlock_bh(&nr_neigh_list_lock);