2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
8 * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
9 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi)
11 #include <linux/errno.h>
12 #include <linux/types.h>
13 #include <linux/socket.h>
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/string.h>
19 #include <linux/sockios.h>
20 #include <linux/net.h>
22 #include <linux/inet.h>
23 #include <linux/netdevice.h>
25 #include <linux/if_arp.h>
26 #include <linux/skbuff.h>
28 #include <asm/uaccess.h>
29 #include <asm/system.h>
30 #include <linux/fcntl.h>
31 #include <linux/termios.h> /* For TIOCINQ/OUTQ */
33 #include <linux/interrupt.h>
34 #include <linux/notifier.h>
35 #include <linux/netfilter.h>
36 #include <linux/init.h>
37 #include <linux/spinlock.h>
38 #include <net/netrom.h>
39 #include <linux/seq_file.h>
41 static unsigned int nr_neigh_no = 1;
43 static HLIST_HEAD(nr_node_list);
44 static DEFINE_SPINLOCK(nr_node_list_lock);
45 static HLIST_HEAD(nr_neigh_list);
46 static DEFINE_SPINLOCK(nr_neigh_list_lock);
48 static struct nr_node *nr_node_get(ax25_address *callsign)
50 struct nr_node *found = NULL;
51 struct nr_node *nr_node;
52 struct hlist_node *node;
54 spin_lock_bh(&nr_node_list_lock);
55 nr_node_for_each(nr_node, node, &nr_node_list)
56 if (ax25cmp(callsign, &nr_node->callsign) == 0) {
57 nr_node_hold(nr_node);
61 spin_unlock_bh(&nr_node_list_lock);
65 static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign,
66 struct net_device *dev)
68 struct nr_neigh *found = NULL;
69 struct nr_neigh *nr_neigh;
70 struct hlist_node *node;
72 spin_lock_bh(&nr_neigh_list_lock);
73 nr_neigh_for_each(nr_neigh, node, &nr_neigh_list)
74 if (ax25cmp(callsign, &nr_neigh->callsign) == 0 &&
75 nr_neigh->dev == dev) {
76 nr_neigh_hold(nr_neigh);
80 spin_unlock_bh(&nr_neigh_list_lock);
84 static void nr_remove_neigh(struct nr_neigh *);
87 * Add a new route to a node, and in the process add the node and the
88 * neighbour if it is new.
90 static int nr_add_node(ax25_address *nr, const char *mnemonic, ax25_address *ax25,
91 ax25_digi *ax25_digi, struct net_device *dev, int quality, int obs_count)
93 struct nr_node *nr_node;
94 struct nr_neigh *nr_neigh;
95 struct nr_route nr_route;
97 struct net_device *odev;
99 if ((odev=nr_dev_get(nr)) != NULL) { /* Can't add routes to ourself */
104 nr_node = nr_node_get(nr);
106 nr_neigh = nr_neigh_get_dev(ax25, dev);
109 * The L2 link to a neighbour has failed in the past
110 * and now a frame comes from this neighbour. We assume
111 * it was a temporary trouble with the link and reset the
112 * routes now (and not wait for a node broadcast).
114 if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) {
115 struct nr_node *nr_nodet;
116 struct hlist_node *node;
118 spin_lock_bh(&nr_node_list_lock);
119 nr_node_for_each(nr_nodet, node, &nr_node_list) {
120 nr_node_lock(nr_nodet);
121 for (i = 0; i < nr_nodet->count; i++)
122 if (nr_nodet->routes[i].neighbour == nr_neigh)
123 if (i < nr_nodet->which)
125 nr_node_unlock(nr_nodet);
127 spin_unlock_bh(&nr_node_list_lock);
130 if (nr_neigh != NULL)
131 nr_neigh->failed = 0;
133 if (quality == 0 && nr_neigh != NULL && nr_node != NULL) {
134 nr_neigh_put(nr_neigh);
135 nr_node_put(nr_node);
139 if (nr_neigh == NULL) {
140 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) {
142 nr_node_put(nr_node);
146 nr_neigh->callsign = *ax25;
147 nr_neigh->digipeat = NULL;
148 nr_neigh->ax25 = NULL;
150 nr_neigh->quality = sysctl_netrom_default_path_quality;
151 nr_neigh->locked = 0;
153 nr_neigh->number = nr_neigh_no++;
154 nr_neigh->failed = 0;
155 atomic_set(&nr_neigh->refcount, 1);
157 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
158 if ((nr_neigh->digipeat = kmalloc(sizeof(*ax25_digi), GFP_KERNEL)) == NULL) {
161 nr_node_put(nr_node);
164 memcpy(nr_neigh->digipeat, ax25_digi,
168 spin_lock_bh(&nr_neigh_list_lock);
169 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
170 nr_neigh_hold(nr_neigh);
171 spin_unlock_bh(&nr_neigh_list_lock);
174 if (quality != 0 && ax25cmp(nr, ax25) == 0 && !nr_neigh->locked)
175 nr_neigh->quality = quality;
177 if (nr_node == NULL) {
178 if ((nr_node = kmalloc(sizeof(*nr_node), GFP_ATOMIC)) == NULL) {
180 nr_neigh_put(nr_neigh);
184 nr_node->callsign = *nr;
185 strcpy(nr_node->mnemonic, mnemonic);
189 atomic_set(&nr_node->refcount, 1);
190 spin_lock_init(&nr_node->node_lock);
192 nr_node->routes[0].quality = quality;
193 nr_node->routes[0].obs_count = obs_count;
194 nr_node->routes[0].neighbour = nr_neigh;
196 nr_neigh_hold(nr_neigh);
199 spin_lock_bh(&nr_node_list_lock);
200 hlist_add_head(&nr_node->node_node, &nr_node_list);
201 /* refcount initialized at 1 */
202 spin_unlock_bh(&nr_node_list_lock);
206 nr_node_lock(nr_node);
209 strcpy(nr_node->mnemonic, mnemonic);
211 for (found = 0, i = 0; i < nr_node->count; i++) {
212 if (nr_node->routes[i].neighbour == nr_neigh) {
213 nr_node->routes[i].quality = quality;
214 nr_node->routes[i].obs_count = obs_count;
221 /* We have space at the bottom, slot it in */
222 if (nr_node->count < 3) {
223 nr_node->routes[2] = nr_node->routes[1];
224 nr_node->routes[1] = nr_node->routes[0];
226 nr_node->routes[0].quality = quality;
227 nr_node->routes[0].obs_count = obs_count;
228 nr_node->routes[0].neighbour = nr_neigh;
232 nr_neigh_hold(nr_neigh);
235 /* It must be better than the worst */
236 if (quality > nr_node->routes[2].quality) {
237 nr_node->routes[2].neighbour->count--;
238 nr_neigh_put(nr_node->routes[2].neighbour);
240 if (nr_node->routes[2].neighbour->count == 0 && !nr_node->routes[2].neighbour->locked)
241 nr_remove_neigh(nr_node->routes[2].neighbour);
243 nr_node->routes[2].quality = quality;
244 nr_node->routes[2].obs_count = obs_count;
245 nr_node->routes[2].neighbour = nr_neigh;
247 nr_neigh_hold(nr_neigh);
253 /* Now re-sort the routes in quality order */
254 switch (nr_node->count) {
256 if (nr_node->routes[1].quality > nr_node->routes[0].quality) {
257 switch (nr_node->which) {
258 case 0: nr_node->which = 1; break;
259 case 1: nr_node->which = 0; break;
262 nr_route = nr_node->routes[0];
263 nr_node->routes[0] = nr_node->routes[1];
264 nr_node->routes[1] = nr_route;
266 if (nr_node->routes[2].quality > nr_node->routes[1].quality) {
267 switch (nr_node->which) {
268 case 1: nr_node->which = 2;
271 case 2: nr_node->which = 1;
277 nr_route = nr_node->routes[1];
278 nr_node->routes[1] = nr_node->routes[2];
279 nr_node->routes[2] = nr_route;
282 if (nr_node->routes[1].quality > nr_node->routes[0].quality) {
283 switch (nr_node->which) {
284 case 0: nr_node->which = 1;
287 case 1: nr_node->which = 0;
292 nr_route = nr_node->routes[0];
293 nr_node->routes[0] = nr_node->routes[1];
294 nr_node->routes[1] = nr_route;
300 for (i = 0; i < nr_node->count; i++) {
301 if (nr_node->routes[i].neighbour == nr_neigh) {
302 if (i < nr_node->which)
308 nr_neigh_put(nr_neigh);
309 nr_node_unlock(nr_node);
310 nr_node_put(nr_node);
314 static inline void __nr_remove_node(struct nr_node *nr_node)
316 hlist_del_init(&nr_node->node_node);
317 nr_node_put(nr_node);
320 #define nr_remove_node_locked(__node) \
321 __nr_remove_node(__node)
323 static void nr_remove_node(struct nr_node *nr_node)
325 spin_lock_bh(&nr_node_list_lock);
326 __nr_remove_node(nr_node);
327 spin_unlock_bh(&nr_node_list_lock);
330 static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh)
332 hlist_del_init(&nr_neigh->neigh_node);
333 nr_neigh_put(nr_neigh);
336 #define nr_remove_neigh_locked(__neigh) \
337 __nr_remove_neigh(__neigh)
339 static void nr_remove_neigh(struct nr_neigh *nr_neigh)
341 spin_lock_bh(&nr_neigh_list_lock);
342 __nr_remove_neigh(nr_neigh);
343 spin_unlock_bh(&nr_neigh_list_lock);
347 * "Delete" a node. Strictly speaking remove a route to a node. The node
348 * is only deleted if no routes are left to it.
350 static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct net_device *dev)
352 struct nr_node *nr_node;
353 struct nr_neigh *nr_neigh;
356 nr_node = nr_node_get(callsign);
361 nr_neigh = nr_neigh_get_dev(neighbour, dev);
363 if (nr_neigh == NULL) {
364 nr_node_put(nr_node);
368 nr_node_lock(nr_node);
369 for (i = 0; i < nr_node->count; i++) {
370 if (nr_node->routes[i].neighbour == nr_neigh) {
372 nr_neigh_put(nr_neigh);
374 if (nr_neigh->count == 0 && !nr_neigh->locked)
375 nr_remove_neigh(nr_neigh);
376 nr_neigh_put(nr_neigh);
380 if (nr_node->count == 0) {
381 nr_remove_node(nr_node);
385 nr_node->routes[0] = nr_node->routes[1];
387 nr_node->routes[1] = nr_node->routes[2];
391 nr_node_put(nr_node);
393 nr_node_unlock(nr_node);
398 nr_neigh_put(nr_neigh);
399 nr_node_unlock(nr_node);
400 nr_node_put(nr_node);
406 * Lock a neighbour with a quality.
408 static int nr_add_neigh(ax25_address *callsign, ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality)
410 struct nr_neigh *nr_neigh;
412 nr_neigh = nr_neigh_get_dev(callsign, dev);
414 nr_neigh->quality = quality;
415 nr_neigh->locked = 1;
416 nr_neigh_put(nr_neigh);
420 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL)
423 nr_neigh->callsign = *callsign;
424 nr_neigh->digipeat = NULL;
425 nr_neigh->ax25 = NULL;
427 nr_neigh->quality = quality;
428 nr_neigh->locked = 1;
430 nr_neigh->number = nr_neigh_no++;
431 nr_neigh->failed = 0;
432 atomic_set(&nr_neigh->refcount, 1);
434 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
435 if ((nr_neigh->digipeat = kmalloc(sizeof(*ax25_digi), GFP_KERNEL)) == NULL) {
439 memcpy(nr_neigh->digipeat, ax25_digi, sizeof(*ax25_digi));
442 spin_lock_bh(&nr_neigh_list_lock);
443 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
444 /* refcount is initialized at 1 */
445 spin_unlock_bh(&nr_neigh_list_lock);
451 * "Delete" a neighbour. The neighbour is only removed if the number
452 * of nodes that may use it is zero.
454 static int nr_del_neigh(ax25_address *callsign, struct net_device *dev, unsigned int quality)
456 struct nr_neigh *nr_neigh;
458 nr_neigh = nr_neigh_get_dev(callsign, dev);
460 if (nr_neigh == NULL) return -EINVAL;
462 nr_neigh->quality = quality;
463 nr_neigh->locked = 0;
465 if (nr_neigh->count == 0)
466 nr_remove_neigh(nr_neigh);
467 nr_neigh_put(nr_neigh);
473 * Decrement the obsolescence count by one. If a route is reduced to a
474 * count of zero, remove it. Also remove any unlocked neighbours with
475 * zero nodes routing via it.
477 static int nr_dec_obs(void)
479 struct nr_neigh *nr_neigh;
481 struct hlist_node *node, *nodet;
484 spin_lock_bh(&nr_node_list_lock);
485 nr_node_for_each_safe(s, node, nodet, &nr_node_list) {
487 for (i = 0; i < s->count; i++) {
488 switch (s->routes[i].obs_count) {
489 case 0: /* A locked entry */
492 case 1: /* From 1 -> 0 */
493 nr_neigh = s->routes[i].neighbour;
496 nr_neigh_put(nr_neigh);
498 if (nr_neigh->count == 0 && !nr_neigh->locked)
499 nr_remove_neigh(nr_neigh);
505 s->routes[0] = s->routes[1];
507 s->routes[1] = s->routes[2];
514 s->routes[i].obs_count--;
521 nr_remove_node_locked(s);
524 spin_unlock_bh(&nr_node_list_lock);
530 * A device has been removed. Remove its routes and neighbours.
532 void nr_rt_device_down(struct net_device *dev)
535 struct hlist_node *node, *nodet, *node2, *node2t;
539 spin_lock_bh(&nr_neigh_list_lock);
540 nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) {
542 spin_lock_bh(&nr_node_list_lock);
543 nr_node_for_each_safe(t, node2, node2t, &nr_node_list) {
545 for (i = 0; i < t->count; i++) {
546 if (t->routes[i].neighbour == s) {
551 t->routes[0] = t->routes[1];
553 t->routes[1] = t->routes[2];
561 nr_remove_node_locked(t);
564 spin_unlock_bh(&nr_node_list_lock);
566 nr_remove_neigh_locked(s);
569 spin_unlock_bh(&nr_neigh_list_lock);
573 * Check that the device given is a valid AX.25 interface that is "up".
574 * Or a valid ethernet interface with an AX.25 callsign binding.
576 static struct net_device *nr_ax25_dev_get(char *devname)
578 struct net_device *dev;
580 if ((dev = dev_get_by_name(devname)) == NULL)
583 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
591 * Find the first active NET/ROM device, usually "nr0".
593 struct net_device *nr_dev_first(void)
595 struct net_device *dev, *first = NULL;
597 read_lock(&dev_base_lock);
598 for (dev = dev_base; dev != NULL; dev = dev->next) {
599 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM)
600 if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
605 read_unlock(&dev_base_lock);
611 * Find the NET/ROM device for the given callsign.
613 struct net_device *nr_dev_get(ax25_address *addr)
615 struct net_device *dev;
617 read_lock(&dev_base_lock);
618 for (dev = dev_base; dev != NULL; dev = dev->next) {
619 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) {
625 read_unlock(&dev_base_lock);
629 static ax25_digi *nr_call_to_digi(int ndigis, ax25_address *digipeaters)
631 static ax25_digi ax25_digi;
637 for (i = 0; i < ndigis; i++) {
638 ax25_digi.calls[i] = digipeaters[i];
639 ax25_digi.repeated[i] = 0;
642 ax25_digi.ndigi = ndigis;
643 ax25_digi.lastrepeat = -1;
649 * Handle the ioctls that control the routing functions.
651 int nr_rt_ioctl(unsigned int cmd, void __user *arg)
653 struct nr_route_struct nr_route;
654 struct net_device *dev;
659 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
661 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
663 if (nr_route.ndigis < 0 || nr_route.ndigis > AX25_MAX_DIGIS) {
667 switch (nr_route.type) {
669 ret = nr_add_node(&nr_route.callsign,
672 nr_call_to_digi(nr_route.ndigis, nr_route.digipeaters),
673 dev, nr_route.quality,
677 ret = nr_add_neigh(&nr_route.callsign,
678 nr_call_to_digi(nr_route.ndigis, nr_route.digipeaters),
679 dev, nr_route.quality);
688 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
690 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
692 switch (nr_route.type) {
694 ret = nr_del_node(&nr_route.callsign,
695 &nr_route.neighbour, dev);
698 ret = nr_del_neigh(&nr_route.callsign,
699 dev, nr_route.quality);
718 * A level 2 link has timed out, therefore it appears to be a poor link,
719 * then don't use that neighbour until it is reset.
721 void nr_link_failed(ax25_cb *ax25, int reason)
723 struct nr_neigh *s, *nr_neigh = NULL;
724 struct hlist_node *node;
725 struct nr_node *nr_node = NULL;
727 spin_lock_bh(&nr_neigh_list_lock);
728 nr_neigh_for_each(s, node, &nr_neigh_list)
729 if (s->ax25 == ax25) {
734 spin_unlock_bh(&nr_neigh_list_lock);
736 if (nr_neigh == NULL) return;
738 nr_neigh->ax25 = NULL;
741 if (++nr_neigh->failed < sysctl_netrom_link_fails_count) {
742 nr_neigh_put(nr_neigh);
745 spin_lock_bh(&nr_node_list_lock);
746 nr_node_for_each(nr_node, node, &nr_node_list)
747 nr_node_lock(nr_node);
748 if (nr_node->which < nr_node->count && nr_node->routes[nr_node->which].neighbour == nr_neigh)
750 nr_node_unlock(nr_node);
751 spin_unlock_bh(&nr_node_list_lock);
752 nr_neigh_put(nr_neigh);
756 * Route a frame to an appropriate AX.25 connection. A NULL ax25_cb
757 * indicates an internally generated frame.
759 int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
761 ax25_address *nr_src, *nr_dest;
762 struct nr_neigh *nr_neigh;
763 struct nr_node *nr_node;
764 struct net_device *dev;
768 struct sk_buff *skbn;
771 nr_src = (ax25_address *)(skb->data + 0);
772 nr_dest = (ax25_address *)(skb->data + 7);
775 nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
776 ax25->ax25_dev->dev, 0, sysctl_netrom_obsolescence_count_initialiser);
778 if ((dev = nr_dev_get(nr_dest)) != NULL) { /* Its for me */
779 if (ax25 == NULL) /* Its from me */
780 ret = nr_loopback_queue(skb);
782 ret = nr_rx_frame(skb, dev);
787 if (!sysctl_netrom_routing_control && ax25 != NULL)
790 /* Its Time-To-Live has expired */
791 if (skb->data[14] == 1) {
795 nr_node = nr_node_get(nr_dest);
798 nr_node_lock(nr_node);
800 if (nr_node->which >= nr_node->count) {
801 nr_node_unlock(nr_node);
802 nr_node_put(nr_node);
806 nr_neigh = nr_node->routes[nr_node->which].neighbour;
808 if ((dev = nr_dev_first()) == NULL) {
809 nr_node_unlock(nr_node);
810 nr_node_put(nr_node);
814 /* We are going to change the netrom headers so we should get our
815 own skb, we also did not know until now how much header space
816 we had to reserve... - RXQ */
817 if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) {
818 nr_node_unlock(nr_node);
819 nr_node_put(nr_node);
827 dptr = skb_push(skb, 1);
828 *dptr = AX25_P_NETROM;
830 ax25s = ax25_send_frame(skb, 256, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev);
831 if (nr_neigh->ax25 && ax25s) {
832 /* We were already holding this ax25_cb */
835 nr_neigh->ax25 = ax25s;
838 ret = (nr_neigh->ax25 != NULL);
839 nr_node_unlock(nr_node);
840 nr_node_put(nr_node);
844 #ifdef CONFIG_PROC_FS
846 static void *nr_node_start(struct seq_file *seq, loff_t *pos)
848 struct nr_node *nr_node;
849 struct hlist_node *node;
852 spin_lock_bh(&nr_node_list_lock);
854 return SEQ_START_TOKEN;
856 nr_node_for_each(nr_node, node, &nr_node_list) {
865 static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos)
867 struct hlist_node *node;
870 node = (v == SEQ_START_TOKEN)
872 : ((struct nr_node *)v)->node_node.next;
874 return hlist_entry(node, struct nr_node, node_node);
877 static void nr_node_stop(struct seq_file *seq, void *v)
879 spin_unlock_bh(&nr_node_list_lock);
882 static int nr_node_show(struct seq_file *seq, void *v)
887 if (v == SEQ_START_TOKEN)
889 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n");
891 struct nr_node *nr_node = v;
892 nr_node_lock(nr_node);
893 seq_printf(seq, "%-9s %-7s %d %d",
894 ax2asc(buf, &nr_node->callsign),
895 (nr_node->mnemonic[0] == '\0') ? "*" : nr_node->mnemonic,
899 for (i = 0; i < nr_node->count; i++) {
900 seq_printf(seq, " %3d %d %05d",
901 nr_node->routes[i].quality,
902 nr_node->routes[i].obs_count,
903 nr_node->routes[i].neighbour->number);
905 nr_node_unlock(nr_node);
912 static struct seq_operations nr_node_seqops = {
913 .start = nr_node_start,
914 .next = nr_node_next,
915 .stop = nr_node_stop,
916 .show = nr_node_show,
919 static int nr_node_info_open(struct inode *inode, struct file *file)
921 return seq_open(file, &nr_node_seqops);
924 struct file_operations nr_nodes_fops = {
925 .owner = THIS_MODULE,
926 .open = nr_node_info_open,
929 .release = seq_release,
932 static void *nr_neigh_start(struct seq_file *seq, loff_t *pos)
934 struct nr_neigh *nr_neigh;
935 struct hlist_node *node;
938 spin_lock_bh(&nr_neigh_list_lock);
940 return SEQ_START_TOKEN;
942 nr_neigh_for_each(nr_neigh, node, &nr_neigh_list) {
949 static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
951 struct hlist_node *node;
954 node = (v == SEQ_START_TOKEN)
955 ? nr_neigh_list.first
956 : ((struct nr_neigh *)v)->neigh_node.next;
958 return hlist_entry(node, struct nr_neigh, neigh_node);
961 static void nr_neigh_stop(struct seq_file *seq, void *v)
963 spin_unlock_bh(&nr_neigh_list_lock);
966 static int nr_neigh_show(struct seq_file *seq, void *v)
971 if (v == SEQ_START_TOKEN)
972 seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n");
974 struct nr_neigh *nr_neigh = v;
976 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d",
978 ax2asc(buf, &nr_neigh->callsign),
979 nr_neigh->dev ? nr_neigh->dev->name : "???",
985 if (nr_neigh->digipeat != NULL) {
986 for (i = 0; i < nr_neigh->digipeat->ndigi; i++)
987 seq_printf(seq, " %s",
988 ax2asc(buf, &nr_neigh->digipeat->calls[i]));
996 static struct seq_operations nr_neigh_seqops = {
997 .start = nr_neigh_start,
998 .next = nr_neigh_next,
999 .stop = nr_neigh_stop,
1000 .show = nr_neigh_show,
1003 static int nr_neigh_info_open(struct inode *inode, struct file *file)
1005 return seq_open(file, &nr_neigh_seqops);
1008 struct file_operations nr_neigh_fops = {
1009 .owner = THIS_MODULE,
1010 .open = nr_neigh_info_open,
1012 .llseek = seq_lseek,
1013 .release = seq_release,
1019 * Free all memory associated with the nodes and routes lists.
1021 void __exit nr_rt_free(void)
1023 struct nr_neigh *s = NULL;
1024 struct nr_node *t = NULL;
1025 struct hlist_node *node, *nodet;
1027 spin_lock_bh(&nr_neigh_list_lock);
1028 spin_lock_bh(&nr_node_list_lock);
1029 nr_node_for_each_safe(t, node, nodet, &nr_node_list) {
1031 nr_remove_node_locked(t);
1034 nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) {
1039 nr_remove_neigh_locked(s);
1041 spin_unlock_bh(&nr_node_list_lock);
1042 spin_unlock_bh(&nr_neigh_list_lock);