netfilter: nf_conntrack_sip: restrict RTP expect flushing on error to last request
[linux-2.6] / net / netfilter / nfnetlink_queue.c
1 /*
2  * This is a module which is used for queueing packets and communicating with
3  * userspace via nfetlink.
4  *
5  * (C) 2005 by Harald Welte <laforge@netfilter.org>
6  * (C) 2007 by Patrick McHardy <kaber@trash.net>
7  *
8  * Based on the old ipv4-only ip_queue.c:
9  * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
10  * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  *
16  */
17 #include <linux/module.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
21 #include <linux/notifier.h>
22 #include <linux/netdevice.h>
23 #include <linux/netfilter.h>
24 #include <linux/proc_fs.h>
25 #include <linux/netfilter_ipv4.h>
26 #include <linux/netfilter_ipv6.h>
27 #include <linux/netfilter/nfnetlink.h>
28 #include <linux/netfilter/nfnetlink_queue.h>
29 #include <linux/list.h>
30 #include <net/sock.h>
31 #include <net/netfilter/nf_queue.h>
32
33 #include <asm/atomic.h>
34
35 #ifdef CONFIG_BRIDGE_NETFILTER
36 #include "../bridge/br_private.h"
37 #endif
38
39 #define NFQNL_QMAX_DEFAULT 1024
40
41 struct nfqnl_instance {
42         struct hlist_node hlist;                /* global list of queues */
43         struct rcu_head rcu;
44
45         int peer_pid;
46         unsigned int queue_maxlen;
47         unsigned int copy_range;
48         unsigned int queue_total;
49         unsigned int queue_dropped;
50         unsigned int queue_user_dropped;
51
52         unsigned int id_sequence;               /* 'sequence' of pkt ids */
53
54         u_int16_t queue_num;                    /* number of this queue */
55         u_int8_t copy_mode;
56
57         spinlock_t lock;
58
59         struct list_head queue_list;            /* packets in queue */
60 };
61
62 typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
63
64 static DEFINE_SPINLOCK(instances_lock);
65
66 #define INSTANCE_BUCKETS        16
67 static struct hlist_head instance_table[INSTANCE_BUCKETS] __read_mostly;
68
69 static inline u_int8_t instance_hashfn(u_int16_t queue_num)
70 {
71         return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS;
72 }
73
74 static struct nfqnl_instance *
75 instance_lookup(u_int16_t queue_num)
76 {
77         struct hlist_head *head;
78         struct hlist_node *pos;
79         struct nfqnl_instance *inst;
80
81         head = &instance_table[instance_hashfn(queue_num)];
82         hlist_for_each_entry_rcu(inst, pos, head, hlist) {
83                 if (inst->queue_num == queue_num)
84                         return inst;
85         }
86         return NULL;
87 }
88
89 static struct nfqnl_instance *
90 instance_create(u_int16_t queue_num, int pid)
91 {
92         struct nfqnl_instance *inst;
93         unsigned int h;
94         int err;
95
96         spin_lock(&instances_lock);
97         if (instance_lookup(queue_num)) {
98                 err = -EEXIST;
99                 goto out_unlock;
100         }
101
102         inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
103         if (!inst) {
104                 err = -ENOMEM;
105                 goto out_unlock;
106         }
107
108         inst->queue_num = queue_num;
109         inst->peer_pid = pid;
110         inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
111         inst->copy_range = 0xfffff;
112         inst->copy_mode = NFQNL_COPY_NONE;
113         spin_lock_init(&inst->lock);
114         INIT_LIST_HEAD(&inst->queue_list);
115         INIT_RCU_HEAD(&inst->rcu);
116
117         if (!try_module_get(THIS_MODULE)) {
118                 err = -EAGAIN;
119                 goto out_free;
120         }
121
122         h = instance_hashfn(queue_num);
123         hlist_add_head_rcu(&inst->hlist, &instance_table[h]);
124
125         spin_unlock(&instances_lock);
126
127         return inst;
128
129 out_free:
130         kfree(inst);
131 out_unlock:
132         spin_unlock(&instances_lock);
133         return ERR_PTR(err);
134 }
135
136 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
137                         unsigned long data);
138
139 static void
140 instance_destroy_rcu(struct rcu_head *head)
141 {
142         struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
143                                                    rcu);
144
145         nfqnl_flush(inst, NULL, 0);
146         kfree(inst);
147         module_put(THIS_MODULE);
148 }
149
150 static void
151 __instance_destroy(struct nfqnl_instance *inst)
152 {
153         hlist_del_rcu(&inst->hlist);
154         call_rcu(&inst->rcu, instance_destroy_rcu);
155 }
156
157 static void
158 instance_destroy(struct nfqnl_instance *inst)
159 {
160         spin_lock(&instances_lock);
161         __instance_destroy(inst);
162         spin_unlock(&instances_lock);
163 }
164
165 static inline void
166 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
167 {
168        list_add_tail(&entry->list, &queue->queue_list);
169        queue->queue_total++;
170 }
171
172 static struct nf_queue_entry *
173 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
174 {
175         struct nf_queue_entry *entry = NULL, *i;
176
177         spin_lock_bh(&queue->lock);
178
179         list_for_each_entry(i, &queue->queue_list, list) {
180                 if (i->id == id) {
181                         entry = i;
182                         break;
183                 }
184         }
185
186         if (entry) {
187                 list_del(&entry->list);
188                 queue->queue_total--;
189         }
190
191         spin_unlock_bh(&queue->lock);
192
193         return entry;
194 }
195
196 static void
197 nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
198 {
199         struct nf_queue_entry *entry, *next;
200
201         spin_lock_bh(&queue->lock);
202         list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
203                 if (!cmpfn || cmpfn(entry, data)) {
204                         list_del(&entry->list);
205                         queue->queue_total--;
206                         nf_reinject(entry, NF_DROP);
207                 }
208         }
209         spin_unlock_bh(&queue->lock);
210 }
211
212 static struct sk_buff *
213 nfqnl_build_packet_message(struct nfqnl_instance *queue,
214                            struct nf_queue_entry *entry)
215 {
216         sk_buff_data_t old_tail;
217         size_t size;
218         size_t data_len = 0;
219         struct sk_buff *skb;
220         struct nfqnl_msg_packet_hdr pmsg;
221         struct nlmsghdr *nlh;
222         struct nfgenmsg *nfmsg;
223         struct sk_buff *entskb = entry->skb;
224         struct net_device *indev;
225         struct net_device *outdev;
226
227         size =    NLMSG_SPACE(sizeof(struct nfgenmsg))
228                 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
229                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
230                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
231 #ifdef CONFIG_BRIDGE_NETFILTER
232                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
233                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
234 #endif
235                 + nla_total_size(sizeof(u_int32_t))     /* mark */
236                 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
237                 + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
238
239         outdev = entry->outdev;
240
241         spin_lock_bh(&queue->lock);
242
243         switch ((enum nfqnl_config_mode)queue->copy_mode) {
244         case NFQNL_COPY_META:
245         case NFQNL_COPY_NONE:
246                 data_len = 0;
247                 break;
248
249         case NFQNL_COPY_PACKET:
250                 if ((entskb->ip_summed == CHECKSUM_PARTIAL ||
251                      entskb->ip_summed == CHECKSUM_COMPLETE) &&
252                     skb_checksum_help(entskb)) {
253                         spin_unlock_bh(&queue->lock);
254                         return NULL;
255                 }
256                 if (queue->copy_range == 0
257                     || queue->copy_range > entskb->len)
258                         data_len = entskb->len;
259                 else
260                         data_len = queue->copy_range;
261
262                 size += nla_total_size(data_len);
263                 break;
264         }
265
266         entry->id = queue->id_sequence++;
267
268         spin_unlock_bh(&queue->lock);
269
270         skb = alloc_skb(size, GFP_ATOMIC);
271         if (!skb)
272                 goto nlmsg_failure;
273
274         old_tail = skb->tail;
275         nlh = NLMSG_PUT(skb, 0, 0,
276                         NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
277                         sizeof(struct nfgenmsg));
278         nfmsg = NLMSG_DATA(nlh);
279         nfmsg->nfgen_family = entry->pf;
280         nfmsg->version = NFNETLINK_V0;
281         nfmsg->res_id = htons(queue->queue_num);
282
283         pmsg.packet_id          = htonl(entry->id);
284         pmsg.hw_protocol        = entskb->protocol;
285         pmsg.hook               = entry->hook;
286
287         NLA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg);
288
289         indev = entry->indev;
290         if (indev) {
291 #ifndef CONFIG_BRIDGE_NETFILTER
292                 NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex));
293 #else
294                 if (entry->pf == PF_BRIDGE) {
295                         /* Case 1: indev is physical input device, we need to
296                          * look for bridge group (when called from
297                          * netfilter_bridge) */
298                         NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
299                                      htonl(indev->ifindex));
300                         /* this is the bridge group "brX" */
301                         NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
302                                      htonl(indev->br_port->br->dev->ifindex));
303                 } else {
304                         /* Case 2: indev is bridge group, we need to look for
305                          * physical device (when called from ipv4) */
306                         NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
307                                      htonl(indev->ifindex));
308                         if (entskb->nf_bridge && entskb->nf_bridge->physindev)
309                                 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
310                                              htonl(entskb->nf_bridge->physindev->ifindex));
311                 }
312 #endif
313         }
314
315         if (outdev) {
316 #ifndef CONFIG_BRIDGE_NETFILTER
317                 NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex));
318 #else
319                 if (entry->pf == PF_BRIDGE) {
320                         /* Case 1: outdev is physical output device, we need to
321                          * look for bridge group (when called from
322                          * netfilter_bridge) */
323                         NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
324                                      htonl(outdev->ifindex));
325                         /* this is the bridge group "brX" */
326                         NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
327                                      htonl(outdev->br_port->br->dev->ifindex));
328                 } else {
329                         /* Case 2: outdev is bridge group, we need to look for
330                          * physical output device (when called from ipv4) */
331                         NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
332                                      htonl(outdev->ifindex));
333                         if (entskb->nf_bridge && entskb->nf_bridge->physoutdev)
334                                 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
335                                              htonl(entskb->nf_bridge->physoutdev->ifindex));
336                 }
337 #endif
338         }
339
340         if (entskb->mark)
341                 NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark));
342
343         if (indev && entskb->dev) {
344                 struct nfqnl_msg_packet_hw phw;
345                 int len = dev_parse_header(entskb, phw.hw_addr);
346                 if (len) {
347                         phw.hw_addrlen = htons(len);
348                         NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
349                 }
350         }
351
352         if (entskb->tstamp.tv64) {
353                 struct nfqnl_msg_packet_timestamp ts;
354                 struct timeval tv = ktime_to_timeval(entskb->tstamp);
355                 ts.sec = cpu_to_be64(tv.tv_sec);
356                 ts.usec = cpu_to_be64(tv.tv_usec);
357
358                 NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts);
359         }
360
361         if (data_len) {
362                 struct nlattr *nla;
363                 int sz = nla_attr_size(data_len);
364
365                 if (skb_tailroom(skb) < nla_total_size(data_len)) {
366                         printk(KERN_WARNING "nf_queue: no tailroom!\n");
367                         goto nlmsg_failure;
368                 }
369
370                 nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len));
371                 nla->nla_type = NFQA_PAYLOAD;
372                 nla->nla_len = sz;
373
374                 if (skb_copy_bits(entskb, 0, nla_data(nla), data_len))
375                         BUG();
376         }
377
378         nlh->nlmsg_len = skb->tail - old_tail;
379         return skb;
380
381 nlmsg_failure:
382 nla_put_failure:
383         if (skb)
384                 kfree_skb(skb);
385         if (net_ratelimit())
386                 printk(KERN_ERR "nf_queue: error creating packet message\n");
387         return NULL;
388 }
389
390 static int
391 nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
392 {
393         struct sk_buff *nskb;
394         struct nfqnl_instance *queue;
395         int err;
396
397         /* rcu_read_lock()ed by nf_hook_slow() */
398         queue = instance_lookup(queuenum);
399         if (!queue)
400                 goto err_out;
401
402         if (queue->copy_mode == NFQNL_COPY_NONE)
403                 goto err_out;
404
405         nskb = nfqnl_build_packet_message(queue, entry);
406         if (nskb == NULL)
407                 goto err_out;
408
409         spin_lock_bh(&queue->lock);
410
411         if (!queue->peer_pid)
412                 goto err_out_free_nskb;
413
414         if (queue->queue_total >= queue->queue_maxlen) {
415                 queue->queue_dropped++;
416                 if (net_ratelimit())
417                           printk(KERN_WARNING "nf_queue: full at %d entries, "
418                                  "dropping packets(s). Dropped: %d\n",
419                                  queue->queue_total, queue->queue_dropped);
420                 goto err_out_free_nskb;
421         }
422
423         /* nfnetlink_unicast will either free the nskb or add it to a socket */
424         err = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT);
425         if (err < 0) {
426                 queue->queue_user_dropped++;
427                 goto err_out_unlock;
428         }
429
430         __enqueue_entry(queue, entry);
431
432         spin_unlock_bh(&queue->lock);
433         return 0;
434
435 err_out_free_nskb:
436         kfree_skb(nskb);
437 err_out_unlock:
438         spin_unlock_bh(&queue->lock);
439 err_out:
440         return -1;
441 }
442
443 static int
444 nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e)
445 {
446         struct sk_buff *nskb;
447         int diff;
448
449         diff = data_len - e->skb->len;
450         if (diff < 0) {
451                 if (pskb_trim(e->skb, data_len))
452                         return -ENOMEM;
453         } else if (diff > 0) {
454                 if (data_len > 0xFFFF)
455                         return -EINVAL;
456                 if (diff > skb_tailroom(e->skb)) {
457                         nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
458                                                diff, GFP_ATOMIC);
459                         if (!nskb) {
460                                 printk(KERN_WARNING "nf_queue: OOM "
461                                       "in mangle, dropping packet\n");
462                                 return -ENOMEM;
463                         }
464                         kfree_skb(e->skb);
465                         e->skb = nskb;
466                 }
467                 skb_put(e->skb, diff);
468         }
469         if (!skb_make_writable(e->skb, data_len))
470                 return -ENOMEM;
471         skb_copy_to_linear_data(e->skb, data, data_len);
472         e->skb->ip_summed = CHECKSUM_NONE;
473         return 0;
474 }
475
476 static int
477 nfqnl_set_mode(struct nfqnl_instance *queue,
478                unsigned char mode, unsigned int range)
479 {
480         int status = 0;
481
482         spin_lock_bh(&queue->lock);
483         switch (mode) {
484         case NFQNL_COPY_NONE:
485         case NFQNL_COPY_META:
486                 queue->copy_mode = mode;
487                 queue->copy_range = 0;
488                 break;
489
490         case NFQNL_COPY_PACKET:
491                 queue->copy_mode = mode;
492                 /* we're using struct nlattr which has 16bit nla_len */
493                 if (range > 0xffff)
494                         queue->copy_range = 0xffff;
495                 else
496                         queue->copy_range = range;
497                 break;
498
499         default:
500                 status = -EINVAL;
501
502         }
503         spin_unlock_bh(&queue->lock);
504
505         return status;
506 }
507
508 static int
509 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
510 {
511         if (entry->indev)
512                 if (entry->indev->ifindex == ifindex)
513                         return 1;
514         if (entry->outdev)
515                 if (entry->outdev->ifindex == ifindex)
516                         return 1;
517 #ifdef CONFIG_BRIDGE_NETFILTER
518         if (entry->skb->nf_bridge) {
519                 if (entry->skb->nf_bridge->physindev &&
520                     entry->skb->nf_bridge->physindev->ifindex == ifindex)
521                         return 1;
522                 if (entry->skb->nf_bridge->physoutdev &&
523                     entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
524                         return 1;
525         }
526 #endif
527         return 0;
528 }
529
530 /* drop all packets with either indev or outdev == ifindex from all queue
531  * instances */
532 static void
533 nfqnl_dev_drop(int ifindex)
534 {
535         int i;
536
537         rcu_read_lock();
538
539         for (i = 0; i < INSTANCE_BUCKETS; i++) {
540                 struct hlist_node *tmp;
541                 struct nfqnl_instance *inst;
542                 struct hlist_head *head = &instance_table[i];
543
544                 hlist_for_each_entry_rcu(inst, tmp, head, hlist)
545                         nfqnl_flush(inst, dev_cmp, ifindex);
546         }
547
548         rcu_read_unlock();
549 }
550
551 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
552
553 static int
554 nfqnl_rcv_dev_event(struct notifier_block *this,
555                     unsigned long event, void *ptr)
556 {
557         struct net_device *dev = ptr;
558
559         if (dev_net(dev) != &init_net)
560                 return NOTIFY_DONE;
561
562         /* Drop any packets associated with the downed device */
563         if (event == NETDEV_DOWN)
564                 nfqnl_dev_drop(dev->ifindex);
565         return NOTIFY_DONE;
566 }
567
568 static struct notifier_block nfqnl_dev_notifier = {
569         .notifier_call  = nfqnl_rcv_dev_event,
570 };
571
572 static int
573 nfqnl_rcv_nl_event(struct notifier_block *this,
574                    unsigned long event, void *ptr)
575 {
576         struct netlink_notify *n = ptr;
577
578         if (event == NETLINK_URELEASE &&
579             n->protocol == NETLINK_NETFILTER && n->pid) {
580                 int i;
581
582                 /* destroy all instances for this pid */
583                 spin_lock(&instances_lock);
584                 for (i = 0; i < INSTANCE_BUCKETS; i++) {
585                         struct hlist_node *tmp, *t2;
586                         struct nfqnl_instance *inst;
587                         struct hlist_head *head = &instance_table[i];
588
589                         hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
590                                 if ((n->net == &init_net) &&
591                                     (n->pid == inst->peer_pid))
592                                         __instance_destroy(inst);
593                         }
594                 }
595                 spin_unlock(&instances_lock);
596         }
597         return NOTIFY_DONE;
598 }
599
600 static struct notifier_block nfqnl_rtnl_notifier = {
601         .notifier_call  = nfqnl_rcv_nl_event,
602 };
603
604 static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
605         [NFQA_VERDICT_HDR]      = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
606         [NFQA_MARK]             = { .type = NLA_U32 },
607         [NFQA_PAYLOAD]          = { .type = NLA_UNSPEC },
608 };
609
610 static int
611 nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
612                    struct nlmsghdr *nlh, struct nlattr *nfqa[])
613 {
614         struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
615         u_int16_t queue_num = ntohs(nfmsg->res_id);
616
617         struct nfqnl_msg_verdict_hdr *vhdr;
618         struct nfqnl_instance *queue;
619         unsigned int verdict;
620         struct nf_queue_entry *entry;
621         int err;
622
623         rcu_read_lock();
624         queue = instance_lookup(queue_num);
625         if (!queue) {
626                 err = -ENODEV;
627                 goto err_out_unlock;
628         }
629
630         if (queue->peer_pid != NETLINK_CB(skb).pid) {
631                 err = -EPERM;
632                 goto err_out_unlock;
633         }
634
635         if (!nfqa[NFQA_VERDICT_HDR]) {
636                 err = -EINVAL;
637                 goto err_out_unlock;
638         }
639
640         vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
641         verdict = ntohl(vhdr->verdict);
642
643         if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) {
644                 err = -EINVAL;
645                 goto err_out_unlock;
646         }
647
648         entry = find_dequeue_entry(queue, ntohl(vhdr->id));
649         if (entry == NULL) {
650                 err = -ENOENT;
651                 goto err_out_unlock;
652         }
653         rcu_read_unlock();
654
655         if (nfqa[NFQA_PAYLOAD]) {
656                 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
657                                  nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0)
658                         verdict = NF_DROP;
659         }
660
661         if (nfqa[NFQA_MARK])
662                 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
663
664         nf_reinject(entry, verdict);
665         return 0;
666
667 err_out_unlock:
668         rcu_read_unlock();
669         return err;
670 }
671
672 static int
673 nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
674                   struct nlmsghdr *nlh, struct nlattr *nfqa[])
675 {
676         return -ENOTSUPP;
677 }
678
679 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
680         [NFQA_CFG_CMD]          = { .len = sizeof(struct nfqnl_msg_config_cmd) },
681         [NFQA_CFG_PARAMS]       = { .len = sizeof(struct nfqnl_msg_config_params) },
682 };
683
684 static const struct nf_queue_handler nfqh = {
685         .name   = "nf_queue",
686         .outfn  = &nfqnl_enqueue_packet,
687 };
688
689 static int
690 nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
691                   struct nlmsghdr *nlh, struct nlattr *nfqa[])
692 {
693         struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
694         u_int16_t queue_num = ntohs(nfmsg->res_id);
695         struct nfqnl_instance *queue;
696         struct nfqnl_msg_config_cmd *cmd = NULL;
697         int ret = 0;
698
699         if (nfqa[NFQA_CFG_CMD]) {
700                 cmd = nla_data(nfqa[NFQA_CFG_CMD]);
701
702                 /* Commands without queue context - might sleep */
703                 switch (cmd->command) {
704                 case NFQNL_CFG_CMD_PF_BIND:
705                         return nf_register_queue_handler(ntohs(cmd->pf),
706                                                          &nfqh);
707                 case NFQNL_CFG_CMD_PF_UNBIND:
708                         return nf_unregister_queue_handler(ntohs(cmd->pf),
709                                                            &nfqh);
710                 }
711         }
712
713         rcu_read_lock();
714         queue = instance_lookup(queue_num);
715         if (queue && queue->peer_pid != NETLINK_CB(skb).pid) {
716                 ret = -EPERM;
717                 goto err_out_unlock;
718         }
719
720         if (cmd != NULL) {
721                 switch (cmd->command) {
722                 case NFQNL_CFG_CMD_BIND:
723                         if (queue) {
724                                 ret = -EBUSY;
725                                 goto err_out_unlock;
726                         }
727                         queue = instance_create(queue_num, NETLINK_CB(skb).pid);
728                         if (IS_ERR(queue)) {
729                                 ret = PTR_ERR(queue);
730                                 goto err_out_unlock;
731                         }
732                         break;
733                 case NFQNL_CFG_CMD_UNBIND:
734                         if (!queue) {
735                                 ret = -ENODEV;
736                                 goto err_out_unlock;
737                         }
738                         instance_destroy(queue);
739                         break;
740                 case NFQNL_CFG_CMD_PF_BIND:
741                 case NFQNL_CFG_CMD_PF_UNBIND:
742                         break;
743                 default:
744                         ret = -ENOTSUPP;
745                         break;
746                 }
747         }
748
749         if (nfqa[NFQA_CFG_PARAMS]) {
750                 struct nfqnl_msg_config_params *params;
751
752                 if (!queue) {
753                         ret = -ENODEV;
754                         goto err_out_unlock;
755                 }
756                 params = nla_data(nfqa[NFQA_CFG_PARAMS]);
757                 nfqnl_set_mode(queue, params->copy_mode,
758                                 ntohl(params->copy_range));
759         }
760
761         if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
762                 __be32 *queue_maxlen;
763
764                 if (!queue) {
765                         ret = -ENODEV;
766                         goto err_out_unlock;
767                 }
768                 queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
769                 spin_lock_bh(&queue->lock);
770                 queue->queue_maxlen = ntohl(*queue_maxlen);
771                 spin_unlock_bh(&queue->lock);
772         }
773
774 err_out_unlock:
775         rcu_read_unlock();
776         return ret;
777 }
778
779 static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
780         [NFQNL_MSG_PACKET]      = { .call = nfqnl_recv_unsupp,
781                                     .attr_count = NFQA_MAX, },
782         [NFQNL_MSG_VERDICT]     = { .call = nfqnl_recv_verdict,
783                                     .attr_count = NFQA_MAX,
784                                     .policy = nfqa_verdict_policy },
785         [NFQNL_MSG_CONFIG]      = { .call = nfqnl_recv_config,
786                                     .attr_count = NFQA_CFG_MAX,
787                                     .policy = nfqa_cfg_policy },
788 };
789
790 static const struct nfnetlink_subsystem nfqnl_subsys = {
791         .name           = "nf_queue",
792         .subsys_id      = NFNL_SUBSYS_QUEUE,
793         .cb_count       = NFQNL_MSG_MAX,
794         .cb             = nfqnl_cb,
795 };
796
797 #ifdef CONFIG_PROC_FS
798 struct iter_state {
799         unsigned int bucket;
800 };
801
802 static struct hlist_node *get_first(struct seq_file *seq)
803 {
804         struct iter_state *st = seq->private;
805
806         if (!st)
807                 return NULL;
808
809         for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
810                 if (!hlist_empty(&instance_table[st->bucket]))
811                         return instance_table[st->bucket].first;
812         }
813         return NULL;
814 }
815
816 static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
817 {
818         struct iter_state *st = seq->private;
819
820         h = h->next;
821         while (!h) {
822                 if (++st->bucket >= INSTANCE_BUCKETS)
823                         return NULL;
824
825                 h = instance_table[st->bucket].first;
826         }
827         return h;
828 }
829
830 static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
831 {
832         struct hlist_node *head;
833         head = get_first(seq);
834
835         if (head)
836                 while (pos && (head = get_next(seq, head)))
837                         pos--;
838         return pos ? NULL : head;
839 }
840
841 static void *seq_start(struct seq_file *seq, loff_t *pos)
842         __acquires(instances_lock)
843 {
844         spin_lock(&instances_lock);
845         return get_idx(seq, *pos);
846 }
847
848 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
849 {
850         (*pos)++;
851         return get_next(s, v);
852 }
853
854 static void seq_stop(struct seq_file *s, void *v)
855         __releases(instances_lock)
856 {
857         spin_unlock(&instances_lock);
858 }
859
860 static int seq_show(struct seq_file *s, void *v)
861 {
862         const struct nfqnl_instance *inst = v;
863
864         return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
865                           inst->queue_num,
866                           inst->peer_pid, inst->queue_total,
867                           inst->copy_mode, inst->copy_range,
868                           inst->queue_dropped, inst->queue_user_dropped,
869                           inst->id_sequence, 1);
870 }
871
872 static const struct seq_operations nfqnl_seq_ops = {
873         .start  = seq_start,
874         .next   = seq_next,
875         .stop   = seq_stop,
876         .show   = seq_show,
877 };
878
879 static int nfqnl_open(struct inode *inode, struct file *file)
880 {
881         return seq_open_private(file, &nfqnl_seq_ops,
882                         sizeof(struct iter_state));
883 }
884
885 static const struct file_operations nfqnl_file_ops = {
886         .owner   = THIS_MODULE,
887         .open    = nfqnl_open,
888         .read    = seq_read,
889         .llseek  = seq_lseek,
890         .release = seq_release_private,
891 };
892
893 #endif /* PROC_FS */
894
895 static int __init nfnetlink_queue_init(void)
896 {
897         int i, status = -ENOMEM;
898
899         for (i = 0; i < INSTANCE_BUCKETS; i++)
900                 INIT_HLIST_HEAD(&instance_table[i]);
901
902         netlink_register_notifier(&nfqnl_rtnl_notifier);
903         status = nfnetlink_subsys_register(&nfqnl_subsys);
904         if (status < 0) {
905                 printk(KERN_ERR "nf_queue: failed to create netlink socket\n");
906                 goto cleanup_netlink_notifier;
907         }
908
909 #ifdef CONFIG_PROC_FS
910         if (!proc_create("nfnetlink_queue", 0440,
911                          proc_net_netfilter, &nfqnl_file_ops))
912                 goto cleanup_subsys;
913 #endif
914
915         register_netdevice_notifier(&nfqnl_dev_notifier);
916         return status;
917
918 #ifdef CONFIG_PROC_FS
919 cleanup_subsys:
920         nfnetlink_subsys_unregister(&nfqnl_subsys);
921 #endif
922 cleanup_netlink_notifier:
923         netlink_unregister_notifier(&nfqnl_rtnl_notifier);
924         return status;
925 }
926
927 static void __exit nfnetlink_queue_fini(void)
928 {
929         nf_unregister_queue_handlers(&nfqh);
930         unregister_netdevice_notifier(&nfqnl_dev_notifier);
931 #ifdef CONFIG_PROC_FS
932         remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
933 #endif
934         nfnetlink_subsys_unregister(&nfqnl_subsys);
935         netlink_unregister_notifier(&nfqnl_rtnl_notifier);
936 }
937
938 MODULE_DESCRIPTION("netfilter packet queue handler");
939 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
940 MODULE_LICENSE("GPL");
941 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
942
943 module_init(nfnetlink_queue_init);
944 module_exit(nfnetlink_queue_fini);