/spare/repo/netdev-2.6 branch 'ieee80211'
[linux-2.6] / net / netfilter / nf_queue.c
1 #include <linux/config.h>
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/proc_fs.h>
6 #include <linux/skbuff.h>
7 #include <linux/netfilter.h>
8 #include <linux/seq_file.h>
9 #include <net/protocol.h>
10
11 #include "nf_internals.h"
12
13 /* 
14  * A queue handler may be registered for each protocol.  Each is protected by
15  * long term mutex.  The handler must provide an an outfn() to accept packets
16  * for queueing and must reinject all packets it receives, no matter what.
17  */
18 static struct nf_queue_handler *queue_handler[NPROTO];
19 static struct nf_queue_rerouter *queue_rerouter;
20
21 static DEFINE_RWLOCK(queue_handler_lock);
22
23 /* return EBUSY when somebody else is registered, return EEXIST if the
24  * same handler is registered, return 0 in case of success. */
25 int nf_register_queue_handler(int pf, struct nf_queue_handler *qh)
26 {      
27         int ret;
28
29         if (pf >= NPROTO)
30                 return -EINVAL;
31
32         write_lock_bh(&queue_handler_lock);
33         if (queue_handler[pf] == qh)
34                 ret = -EEXIST;
35         else if (queue_handler[pf])
36                 ret = -EBUSY;
37         else {
38                 queue_handler[pf] = qh;
39                 ret = 0;
40         }
41         write_unlock_bh(&queue_handler_lock);
42
43         return ret;
44 }
45 EXPORT_SYMBOL(nf_register_queue_handler);
46
47 /* The caller must flush their queue before this */
48 int nf_unregister_queue_handler(int pf)
49 {
50         if (pf >= NPROTO)
51                 return -EINVAL;
52
53         write_lock_bh(&queue_handler_lock);
54         queue_handler[pf] = NULL;
55         write_unlock_bh(&queue_handler_lock);
56         
57         return 0;
58 }
59 EXPORT_SYMBOL(nf_unregister_queue_handler);
60
61 int nf_register_queue_rerouter(int pf, struct nf_queue_rerouter *rer)
62 {
63         if (pf >= NPROTO)
64                 return -EINVAL;
65
66         write_lock_bh(&queue_handler_lock);
67         memcpy(&queue_rerouter[pf], rer, sizeof(queue_rerouter[pf]));
68         write_unlock_bh(&queue_handler_lock);
69
70         return 0;
71 }
72 EXPORT_SYMBOL_GPL(nf_register_queue_rerouter);
73
74 int nf_unregister_queue_rerouter(int pf)
75 {
76         if (pf >= NPROTO)
77                 return -EINVAL;
78
79         write_lock_bh(&queue_handler_lock);
80         memset(&queue_rerouter[pf], 0, sizeof(queue_rerouter[pf]));
81         write_unlock_bh(&queue_handler_lock);
82         return 0;
83 }
84 EXPORT_SYMBOL_GPL(nf_unregister_queue_rerouter);
85
86 void nf_unregister_queue_handlers(struct nf_queue_handler *qh)
87 {
88         int pf;
89
90         write_lock_bh(&queue_handler_lock);
91         for (pf = 0; pf < NPROTO; pf++)  {
92                 if (queue_handler[pf] == qh)
93                         queue_handler[pf] = NULL;
94         }
95         write_unlock_bh(&queue_handler_lock);
96 }
97 EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
98
99 /* 
100  * Any packet that leaves via this function must come back 
101  * through nf_reinject().
102  */
103 int nf_queue(struct sk_buff **skb, 
104              struct list_head *elem, 
105              int pf, unsigned int hook,
106              struct net_device *indev,
107              struct net_device *outdev,
108              int (*okfn)(struct sk_buff *),
109              unsigned int queuenum)
110 {
111         int status;
112         struct nf_info *info;
113 #ifdef CONFIG_BRIDGE_NETFILTER
114         struct net_device *physindev = NULL;
115         struct net_device *physoutdev = NULL;
116 #endif
117
118         /* QUEUE == DROP if noone is waiting, to be safe. */
119         read_lock(&queue_handler_lock);
120         if (!queue_handler[pf]->outfn) {
121                 read_unlock(&queue_handler_lock);
122                 kfree_skb(*skb);
123                 return 1;
124         }
125
126         info = kmalloc(sizeof(*info)+queue_rerouter[pf].rer_size, GFP_ATOMIC);
127         if (!info) {
128                 if (net_ratelimit())
129                         printk(KERN_ERR "OOM queueing packet %p\n",
130                                *skb);
131                 read_unlock(&queue_handler_lock);
132                 kfree_skb(*skb);
133                 return 1;
134         }
135
136         *info = (struct nf_info) { 
137                 (struct nf_hook_ops *)elem, pf, hook, indev, outdev, okfn };
138
139         /* If it's going away, ignore hook. */
140         if (!try_module_get(info->elem->owner)) {
141                 read_unlock(&queue_handler_lock);
142                 kfree(info);
143                 return 0;
144         }
145
146         /* Bump dev refs so they don't vanish while packet is out */
147         if (indev) dev_hold(indev);
148         if (outdev) dev_hold(outdev);
149
150 #ifdef CONFIG_BRIDGE_NETFILTER
151         if ((*skb)->nf_bridge) {
152                 physindev = (*skb)->nf_bridge->physindev;
153                 if (physindev) dev_hold(physindev);
154                 physoutdev = (*skb)->nf_bridge->physoutdev;
155                 if (physoutdev) dev_hold(physoutdev);
156         }
157 #endif
158         if (queue_rerouter[pf].save)
159                 queue_rerouter[pf].save(*skb, info);
160
161         status = queue_handler[pf]->outfn(*skb, info, queuenum,
162                                           queue_handler[pf]->data);
163
164         if (status >= 0 && queue_rerouter[pf].reroute)
165                 status = queue_rerouter[pf].reroute(skb, info);
166
167         read_unlock(&queue_handler_lock);
168
169         if (status < 0) {
170                 /* James M doesn't say fuck enough. */
171                 if (indev) dev_put(indev);
172                 if (outdev) dev_put(outdev);
173 #ifdef CONFIG_BRIDGE_NETFILTER
174                 if (physindev) dev_put(physindev);
175                 if (physoutdev) dev_put(physoutdev);
176 #endif
177                 module_put(info->elem->owner);
178                 kfree(info);
179                 kfree_skb(*skb);
180
181                 return 1;
182         }
183
184         return 1;
185 }
186
187 void nf_reinject(struct sk_buff *skb, struct nf_info *info,
188                  unsigned int verdict)
189 {
190         struct list_head *elem = &info->elem->list;
191         struct list_head *i;
192
193         rcu_read_lock();
194
195         /* Release those devices we held, or Alexey will kill me. */
196         if (info->indev) dev_put(info->indev);
197         if (info->outdev) dev_put(info->outdev);
198 #ifdef CONFIG_BRIDGE_NETFILTER
199         if (skb->nf_bridge) {
200                 if (skb->nf_bridge->physindev)
201                         dev_put(skb->nf_bridge->physindev);
202                 if (skb->nf_bridge->physoutdev)
203                         dev_put(skb->nf_bridge->physoutdev);
204         }
205 #endif
206
207         /* Drop reference to owner of hook which queued us. */
208         module_put(info->elem->owner);
209
210         list_for_each_rcu(i, &nf_hooks[info->pf][info->hook]) {
211                 if (i == elem) 
212                         break;
213         }
214   
215         if (elem == &nf_hooks[info->pf][info->hook]) {
216                 /* The module which sent it to userspace is gone. */
217                 NFDEBUG("%s: module disappeared, dropping packet.\n",
218                         __FUNCTION__);
219                 verdict = NF_DROP;
220         }
221
222         /* Continue traversal iff userspace said ok... */
223         if (verdict == NF_REPEAT) {
224                 elem = elem->prev;
225                 verdict = NF_ACCEPT;
226         }
227
228         if (verdict == NF_ACCEPT) {
229         next_hook:
230                 verdict = nf_iterate(&nf_hooks[info->pf][info->hook],
231                                      &skb, info->hook, 
232                                      info->indev, info->outdev, &elem,
233                                      info->okfn, INT_MIN);
234         }
235
236         switch (verdict & NF_VERDICT_MASK) {
237         case NF_ACCEPT:
238                 info->okfn(skb);
239                 break;
240
241         case NF_QUEUE:
242                 if (!nf_queue(&skb, elem, info->pf, info->hook, 
243                               info->indev, info->outdev, info->okfn,
244                               verdict >> NF_VERDICT_BITS))
245                         goto next_hook;
246                 break;
247         }
248         rcu_read_unlock();
249
250         if (verdict == NF_DROP)
251                 kfree_skb(skb);
252
253         kfree(info);
254         return;
255 }
256 EXPORT_SYMBOL(nf_reinject);
257
258 #ifdef CONFIG_PROC_FS
259 static void *seq_start(struct seq_file *seq, loff_t *pos)
260 {
261         if (*pos >= NPROTO)
262                 return NULL;
263
264         return pos;
265 }
266
267 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
268 {
269         (*pos)++;
270
271         if (*pos >= NPROTO)
272                 return NULL;
273
274         return pos;
275 }
276
277 static void seq_stop(struct seq_file *s, void *v)
278 {
279
280 }
281
282 static int seq_show(struct seq_file *s, void *v)
283 {
284         int ret;
285         loff_t *pos = v;
286         struct nf_queue_handler *qh;
287
288         read_lock_bh(&queue_handler_lock);
289         qh = queue_handler[*pos];
290         if (!qh)
291                 ret = seq_printf(s, "%2lld NONE\n", *pos);
292         else
293                 ret = seq_printf(s, "%2lld %s\n", *pos, qh->name);
294         read_unlock_bh(&queue_handler_lock);
295
296         return ret;
297 }
298
299 static struct seq_operations nfqueue_seq_ops = {
300         .start  = seq_start,
301         .next   = seq_next,
302         .stop   = seq_stop,
303         .show   = seq_show,
304 };
305
306 static int nfqueue_open(struct inode *inode, struct file *file)
307 {
308         return seq_open(file, &nfqueue_seq_ops);
309 }
310
311 static struct file_operations nfqueue_file_ops = {
312         .owner   = THIS_MODULE,
313         .open    = nfqueue_open,
314         .read    = seq_read,
315         .llseek  = seq_lseek,
316         .release = seq_release,
317 };
318 #endif /* PROC_FS */
319
320
321 int __init netfilter_queue_init(void)
322 {
323 #ifdef CONFIG_PROC_FS
324         struct proc_dir_entry *pde;
325 #endif
326         queue_rerouter = kmalloc(NPROTO * sizeof(struct nf_queue_rerouter),
327                                  GFP_KERNEL);
328         if (!queue_rerouter)
329                 return -ENOMEM;
330
331 #ifdef CONFIG_PROC_FS
332         pde = create_proc_entry("nf_queue", S_IRUGO, proc_net_netfilter);
333         if (!pde) {
334                 kfree(queue_rerouter);
335                 return -1;
336         }
337         pde->proc_fops = &nfqueue_file_ops;
338 #endif
339         memset(queue_rerouter, 0, NPROTO * sizeof(struct nf_queue_rerouter));
340
341         return 0;
342 }
343