Pull error-inject into release branch
[linux-2.6] / net / x25 / x25_forward.c
1 /*
2  *      This module:
3  *              This module is free software; you can redistribute it and/or
4  *              modify it under the terms of the GNU General Public License
5  *              as published by the Free Software Foundation; either version
6  *              2 of the License, or (at your option) any later version.
7  *
8  *      History
9  *      03-01-2007      Added forwarding for x.25       Andrew Hendry
10  */
11 #include <linux/if_arp.h>
12 #include <linux/init.h>
13 #include <net/x25.h>
14
15 struct list_head x25_forward_list = LIST_HEAD_INIT(x25_forward_list);
16 DEFINE_RWLOCK(x25_forward_list_lock);
17
18 int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
19                         struct sk_buff *skb, int lci)
20 {
21         struct x25_route *rt;
22         struct x25_neigh *neigh_new = NULL;
23         struct list_head *entry;
24         struct x25_forward *x25_frwd, *new_frwd;
25         struct sk_buff *skbn;
26         short same_lci = 0;
27         int rc = 0;
28
29         if ((rt = x25_get_route(dest_addr)) == NULL)
30                 goto out_no_route;
31
32         if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
33                 /* This shouldnt happen, if it occurs somehow
34                  * do something sensible
35                  */
36                 goto out_put_route;
37         }
38
39         /* Avoid a loop. This is the normal exit path for a
40          * system with only one x.25 iface and default route
41          */
42         if (rt->dev == from->dev) {
43                 goto out_put_nb;
44         }
45
46         /* Remote end sending a call request on an already
47          * established LCI? It shouldnt happen, just in case..
48          */
49         read_lock_bh(&x25_forward_list_lock);
50         list_for_each(entry, &x25_forward_list) {
51                 x25_frwd = list_entry(entry, struct x25_forward, node);
52                 if (x25_frwd->lci == lci) {
53                         printk(KERN_WARNING "X.25: call request for lci which is already registered!, transmitting but not registering new pair\n");
54                         same_lci = 1;
55                 }
56         }
57         read_unlock_bh(&x25_forward_list_lock);
58
59         /* Save the forwarding details for future traffic */
60         if (!same_lci){
61                 if ((new_frwd = kmalloc(sizeof(struct x25_forward),
62                                                 GFP_ATOMIC)) == NULL){
63                         rc = -ENOMEM;
64                         goto out_put_nb;
65                 }
66                 new_frwd->lci = lci;
67                 new_frwd->dev1 = rt->dev;
68                 new_frwd->dev2 = from->dev;
69                 write_lock_bh(&x25_forward_list_lock);
70                 list_add(&new_frwd->node, &x25_forward_list);
71                 write_unlock_bh(&x25_forward_list_lock);
72         }
73
74         /* Forward the call request */
75         if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
76                 goto out_put_nb;
77         }
78         x25_transmit_link(skbn, neigh_new);
79         rc = 1;
80
81
82 out_put_nb:
83         x25_neigh_put(neigh_new);
84
85 out_put_route:
86         x25_route_put(rt);
87
88 out_no_route:
89         return rc;
90 }
91
92
93 int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
94
95         struct x25_forward *frwd;
96         struct list_head *entry;
97         struct net_device *peer = NULL;
98         struct x25_neigh *nb;
99         struct sk_buff *skbn;
100         int rc = 0;
101
102         read_lock_bh(&x25_forward_list_lock);
103         list_for_each(entry, &x25_forward_list) {
104                 frwd = list_entry(entry, struct x25_forward, node);
105                 if (frwd->lci == lci) {
106                         /* The call is established, either side can send */
107                         if (from->dev == frwd->dev1) {
108                                 peer = frwd->dev2;
109                         } else {
110                                 peer = frwd->dev1;
111                         }
112                         break;
113                 }
114         }
115         read_unlock_bh(&x25_forward_list_lock);
116
117         if ( (nb = x25_get_neigh(peer)) == NULL)
118                 goto out;
119
120         if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
121                 goto out;
122
123         }
124         x25_transmit_link(skbn, nb);
125
126         x25_neigh_put(nb);
127         rc = 1;
128 out:
129         return rc;
130 }
131
132 void x25_clear_forward_by_lci(unsigned int lci)
133 {
134         struct x25_forward *fwd;
135         struct list_head *entry, *tmp;
136
137         write_lock_bh(&x25_forward_list_lock);
138
139         list_for_each_safe(entry, tmp, &x25_forward_list) {
140                 fwd = list_entry(entry, struct x25_forward, node);
141                 if (fwd->lci == lci) {
142                         list_del(&fwd->node);
143                         kfree(fwd);
144                 }
145         }
146         write_unlock_bh(&x25_forward_list_lock);
147 }
148
149
150 void x25_clear_forward_by_dev(struct net_device *dev)
151 {
152         struct x25_forward *fwd;
153         struct list_head *entry, *tmp;
154
155         write_lock_bh(&x25_forward_list_lock);
156
157         list_for_each_safe(entry, tmp, &x25_forward_list) {
158                 fwd = list_entry(entry, struct x25_forward, node);
159                 if ((fwd->dev1 == dev) || (fwd->dev2 == dev)){
160                         list_del(&fwd->node);
161                         kfree(fwd);
162                 }
163         }
164         write_unlock_bh(&x25_forward_list_lock);
165 }