1 /* Expectation handling for nf_conntrack. */
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/types.h>
13 #include <linux/netfilter.h>
14 #include <linux/skbuff.h>
15 #include <linux/proc_fs.h>
16 #include <linux/seq_file.h>
17 #include <linux/stddef.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/percpu.h>
21 #include <linux/kernel.h>
22 #include <linux/jhash.h>
23 #include <net/net_namespace.h>
25 #include <net/netfilter/nf_conntrack.h>
26 #include <net/netfilter/nf_conntrack_core.h>
27 #include <net/netfilter/nf_conntrack_expect.h>
28 #include <net/netfilter/nf_conntrack_helper.h>
29 #include <net/netfilter/nf_conntrack_tuple.h>
31 unsigned int nf_ct_expect_hsize __read_mostly;
32 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
34 static unsigned int nf_ct_expect_hash_rnd __read_mostly;
35 unsigned int nf_ct_expect_max __read_mostly;
36 static int nf_ct_expect_hash_rnd_initted __read_mostly;
38 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
40 /* nf_conntrack_expect helper functions */
41 void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
43 struct nf_conn_help *master_help = nfct_help(exp->master);
44 struct net *net = nf_ct_exp_net(exp);
46 NF_CT_ASSERT(master_help);
47 NF_CT_ASSERT(!timer_pending(&exp->timeout));
49 hlist_del_rcu(&exp->hnode);
50 net->ct.expect_count--;
52 hlist_del(&exp->lnode);
53 master_help->expecting[exp->class]--;
54 nf_ct_expect_put(exp);
56 NF_CT_STAT_INC(net, expect_delete);
58 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect);
60 static void nf_ct_expectation_timed_out(unsigned long ul_expect)
62 struct nf_conntrack_expect *exp = (void *)ul_expect;
64 spin_lock_bh(&nf_conntrack_lock);
65 nf_ct_unlink_expect(exp);
66 spin_unlock_bh(&nf_conntrack_lock);
67 nf_ct_expect_put(exp);
70 static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
74 if (unlikely(!nf_ct_expect_hash_rnd_initted)) {
75 get_random_bytes(&nf_ct_expect_hash_rnd,
76 sizeof(nf_ct_expect_hash_rnd));
77 nf_ct_expect_hash_rnd_initted = 1;
80 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
81 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
82 (__force __u16)tuple->dst.u.all) ^ nf_ct_expect_hash_rnd);
83 return ((u64)hash * nf_ct_expect_hsize) >> 32;
86 struct nf_conntrack_expect *
87 __nf_ct_expect_find(struct net *net, const struct nf_conntrack_tuple *tuple)
89 struct nf_conntrack_expect *i;
93 if (!net->ct.expect_count)
96 h = nf_ct_expect_dst_hash(tuple);
97 hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) {
98 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask))
103 EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
105 /* Just find a expectation corresponding to a tuple. */
106 struct nf_conntrack_expect *
107 nf_ct_expect_find_get(struct net *net, const struct nf_conntrack_tuple *tuple)
109 struct nf_conntrack_expect *i;
112 i = __nf_ct_expect_find(net, tuple);
113 if (i && !atomic_inc_not_zero(&i->use))
119 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
121 /* If an expectation for this connection is found, it gets delete from
122 * global list then returned. */
123 struct nf_conntrack_expect *
124 nf_ct_find_expectation(struct net *net, const struct nf_conntrack_tuple *tuple)
126 struct nf_conntrack_expect *i, *exp = NULL;
127 struct hlist_node *n;
130 if (!net->ct.expect_count)
133 h = nf_ct_expect_dst_hash(tuple);
134 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
135 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
136 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) {
144 /* If master is not in hash table yet (ie. packet hasn't left
145 this machine yet), how can other end know about expected?
146 Hence these are not the droids you are looking for (if
147 master ct never got confirmed, we'd hold a reference to it
148 and weird things would happen to future packets). */
149 if (!nf_ct_is_confirmed(exp->master))
152 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
153 atomic_inc(&exp->use);
155 } else if (del_timer(&exp->timeout)) {
156 nf_ct_unlink_expect(exp);
163 /* delete all expectations for this conntrack */
164 void nf_ct_remove_expectations(struct nf_conn *ct)
166 struct nf_conn_help *help = nfct_help(ct);
167 struct nf_conntrack_expect *exp;
168 struct hlist_node *n, *next;
170 /* Optimization: most connection never expect any others. */
174 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
175 if (del_timer(&exp->timeout)) {
176 nf_ct_unlink_expect(exp);
177 nf_ct_expect_put(exp);
181 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
183 /* Would two expected things clash? */
184 static inline int expect_clash(const struct nf_conntrack_expect *a,
185 const struct nf_conntrack_expect *b)
187 /* Part covered by intersection of masks must be unequal,
188 otherwise they clash */
189 struct nf_conntrack_tuple_mask intersect_mask;
192 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
194 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
195 intersect_mask.src.u3.all[count] =
196 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
199 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
202 static inline int expect_matches(const struct nf_conntrack_expect *a,
203 const struct nf_conntrack_expect *b)
205 return a->master == b->master && a->class == b->class
206 && nf_ct_tuple_equal(&a->tuple, &b->tuple)
207 && nf_ct_tuple_mask_equal(&a->mask, &b->mask);
210 /* Generally a bad idea to call this: could have matched already. */
211 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
213 spin_lock_bh(&nf_conntrack_lock);
214 if (del_timer(&exp->timeout)) {
215 nf_ct_unlink_expect(exp);
216 nf_ct_expect_put(exp);
218 spin_unlock_bh(&nf_conntrack_lock);
220 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
222 /* We don't increase the master conntrack refcount for non-fulfilled
223 * conntracks. During the conntrack destruction, the expectations are
224 * always killed before the conntrack itself */
225 struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
227 struct nf_conntrack_expect *new;
229 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
234 atomic_set(&new->use, 1);
235 INIT_RCU_HEAD(&new->rcu);
238 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
240 void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
242 const union nf_inet_addr *saddr,
243 const union nf_inet_addr *daddr,
244 u_int8_t proto, const __be16 *src, const __be16 *dst)
248 if (family == AF_INET)
255 exp->expectfn = NULL;
257 exp->tuple.src.l3num = family;
258 exp->tuple.dst.protonum = proto;
261 memcpy(&exp->tuple.src.u3, saddr, len);
262 if (sizeof(exp->tuple.src.u3) > len)
263 /* address needs to be cleared for nf_ct_tuple_equal */
264 memset((void *)&exp->tuple.src.u3 + len, 0x00,
265 sizeof(exp->tuple.src.u3) - len);
266 memset(&exp->mask.src.u3, 0xFF, len);
267 if (sizeof(exp->mask.src.u3) > len)
268 memset((void *)&exp->mask.src.u3 + len, 0x00,
269 sizeof(exp->mask.src.u3) - len);
271 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
272 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
276 exp->tuple.src.u.all = *src;
277 exp->mask.src.u.all = htons(0xFFFF);
279 exp->tuple.src.u.all = 0;
280 exp->mask.src.u.all = 0;
283 memcpy(&exp->tuple.dst.u3, daddr, len);
284 if (sizeof(exp->tuple.dst.u3) > len)
285 /* address needs to be cleared for nf_ct_tuple_equal */
286 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
287 sizeof(exp->tuple.dst.u3) - len);
289 exp->tuple.dst.u.all = *dst;
291 EXPORT_SYMBOL_GPL(nf_ct_expect_init);
293 static void nf_ct_expect_free_rcu(struct rcu_head *head)
295 struct nf_conntrack_expect *exp;
297 exp = container_of(head, struct nf_conntrack_expect, rcu);
298 kmem_cache_free(nf_ct_expect_cachep, exp);
301 void nf_ct_expect_put(struct nf_conntrack_expect *exp)
303 if (atomic_dec_and_test(&exp->use))
304 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
306 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
308 static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
310 struct nf_conn_help *master_help = nfct_help(exp->master);
311 struct net *net = nf_ct_exp_net(exp);
312 const struct nf_conntrack_expect_policy *p;
313 unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
315 atomic_inc(&exp->use);
317 hlist_add_head(&exp->lnode, &master_help->expectations);
318 master_help->expecting[exp->class]++;
320 hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
321 net->ct.expect_count++;
323 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
325 p = &master_help->helper->expect_policy[exp->class];
326 exp->timeout.expires = jiffies + p->timeout * HZ;
327 add_timer(&exp->timeout);
329 atomic_inc(&exp->use);
330 NF_CT_STAT_INC(net, expect_create);
333 /* Race with expectations being used means we could have none to find; OK. */
334 static void evict_oldest_expect(struct nf_conn *master,
335 struct nf_conntrack_expect *new)
337 struct nf_conn_help *master_help = nfct_help(master);
338 struct nf_conntrack_expect *exp, *last = NULL;
339 struct hlist_node *n;
341 hlist_for_each_entry(exp, n, &master_help->expectations, lnode) {
342 if (exp->class == new->class)
346 if (last && del_timer(&last->timeout)) {
347 nf_ct_unlink_expect(last);
348 nf_ct_expect_put(last);
352 static inline int refresh_timer(struct nf_conntrack_expect *i)
354 struct nf_conn_help *master_help = nfct_help(i->master);
355 const struct nf_conntrack_expect_policy *p;
357 if (!del_timer(&i->timeout))
360 p = &master_help->helper->expect_policy[i->class];
361 i->timeout.expires = jiffies + p->timeout * HZ;
362 add_timer(&i->timeout);
366 static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
368 const struct nf_conntrack_expect_policy *p;
369 struct nf_conntrack_expect *i;
370 struct nf_conn *master = expect->master;
371 struct nf_conn_help *master_help = nfct_help(master);
372 struct net *net = nf_ct_exp_net(expect);
373 struct hlist_node *n;
377 if (!master_help->helper) {
381 h = nf_ct_expect_dst_hash(&expect->tuple);
382 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
383 if (expect_matches(i, expect)) {
384 /* Refresh timer: if it's dying, ignore.. */
385 if (refresh_timer(i)) {
389 } else if (expect_clash(i, expect)) {
394 /* Will be over limit? */
395 p = &master_help->helper->expect_policy[expect->class];
396 if (p->max_expected &&
397 master_help->expecting[expect->class] >= p->max_expected) {
398 evict_oldest_expect(master, expect);
399 if (master_help->expecting[expect->class] >= p->max_expected) {
405 if (net->ct.expect_count >= nf_ct_expect_max) {
408 "nf_conntrack: expectation table full\n");
415 int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
420 spin_lock_bh(&nf_conntrack_lock);
421 ret = __nf_ct_expect_check(expect);
426 nf_ct_expect_insert(expect);
427 spin_unlock_bh(&nf_conntrack_lock);
428 nf_ct_expect_event_report(IPEXP_NEW, expect, pid, report);
431 spin_unlock_bh(&nf_conntrack_lock);
434 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
436 #ifdef CONFIG_PROC_FS
437 struct ct_expect_iter_state {
438 struct seq_net_private p;
442 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
444 struct net *net = seq_file_net(seq);
445 struct ct_expect_iter_state *st = seq->private;
446 struct hlist_node *n;
448 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
449 n = rcu_dereference(net->ct.expect_hash[st->bucket].first);
456 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
457 struct hlist_node *head)
459 struct net *net = seq_file_net(seq);
460 struct ct_expect_iter_state *st = seq->private;
462 head = rcu_dereference(head->next);
463 while (head == NULL) {
464 if (++st->bucket >= nf_ct_expect_hsize)
466 head = rcu_dereference(net->ct.expect_hash[st->bucket].first);
471 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
473 struct hlist_node *head = ct_expect_get_first(seq);
476 while (pos && (head = ct_expect_get_next(seq, head)))
478 return pos ? NULL : head;
481 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
485 return ct_expect_get_idx(seq, *pos);
488 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
491 return ct_expect_get_next(seq, v);
494 static void exp_seq_stop(struct seq_file *seq, void *v)
500 static int exp_seq_show(struct seq_file *s, void *v)
502 struct nf_conntrack_expect *expect;
503 struct hlist_node *n = v;
506 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
508 if (expect->timeout.function)
509 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
510 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
513 seq_printf(s, "l3proto = %u proto=%u ",
514 expect->tuple.src.l3num,
515 expect->tuple.dst.protonum);
516 print_tuple(s, &expect->tuple,
517 __nf_ct_l3proto_find(expect->tuple.src.l3num),
518 __nf_ct_l4proto_find(expect->tuple.src.l3num,
519 expect->tuple.dst.protonum));
521 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
522 seq_printf(s, "PERMANENT");
525 if (expect->flags & NF_CT_EXPECT_INACTIVE)
526 seq_printf(s, "%sINACTIVE", delim);
528 return seq_putc(s, '\n');
531 static const struct seq_operations exp_seq_ops = {
532 .start = exp_seq_start,
533 .next = exp_seq_next,
534 .stop = exp_seq_stop,
538 static int exp_open(struct inode *inode, struct file *file)
540 return seq_open_net(inode, file, &exp_seq_ops,
541 sizeof(struct ct_expect_iter_state));
544 static const struct file_operations exp_file_ops = {
545 .owner = THIS_MODULE,
549 .release = seq_release_net,
551 #endif /* CONFIG_PROC_FS */
553 static int exp_proc_init(struct net *net)
555 #ifdef CONFIG_PROC_FS
556 struct proc_dir_entry *proc;
558 proc = proc_net_fops_create(net, "nf_conntrack_expect", 0440, &exp_file_ops);
561 #endif /* CONFIG_PROC_FS */
565 static void exp_proc_remove(struct net *net)
567 #ifdef CONFIG_PROC_FS
568 proc_net_remove(net, "nf_conntrack_expect");
569 #endif /* CONFIG_PROC_FS */
572 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600);
574 int nf_conntrack_expect_init(struct net *net)
578 if (net_eq(net, &init_net)) {
579 if (!nf_ct_expect_hsize) {
580 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
581 if (!nf_ct_expect_hsize)
582 nf_ct_expect_hsize = 1;
584 nf_ct_expect_max = nf_ct_expect_hsize * 4;
587 net->ct.expect_count = 0;
588 net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize,
589 &net->ct.expect_vmalloc, 0);
590 if (net->ct.expect_hash == NULL)
593 if (net_eq(net, &init_net)) {
594 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
595 sizeof(struct nf_conntrack_expect),
597 if (!nf_ct_expect_cachep)
601 err = exp_proc_init(net);
608 if (net_eq(net, &init_net))
609 kmem_cache_destroy(nf_ct_expect_cachep);
611 nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
617 void nf_conntrack_expect_fini(struct net *net)
619 exp_proc_remove(net);
620 if (net_eq(net, &init_net))
621 kmem_cache_destroy(nf_ct_expect_cachep);
622 nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,