2 * x_tables core - Backend for {ip,ip6,arp}_tables
4 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
6 * Based on existing ip_tables code which is
7 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
8 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
16 #include <linux/kernel.h>
17 #include <linux/socket.h>
18 #include <linux/net.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/string.h>
22 #include <linux/vmalloc.h>
23 #include <linux/mutex.h>
25 #include <net/net_namespace.h>
27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_arp.h>
31 MODULE_LICENSE("GPL");
32 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
33 MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
35 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
38 struct compat_delta *next;
45 struct list_head match;
46 struct list_head target;
48 struct mutex compat_mutex;
49 struct compat_delta *compat_offsets;
53 static struct xt_af *xt;
55 #ifdef DEBUG_IP_FIREWALL_USER
56 #define duprintf(format, args...) printk(format , ## args)
58 #define duprintf(format, args...)
61 static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
62 [NFPROTO_UNSPEC] = "x",
63 [NFPROTO_IPV4] = "ip",
64 [NFPROTO_ARP] = "arp",
65 [NFPROTO_BRIDGE] = "eb",
66 [NFPROTO_IPV6] = "ip6",
69 /* Registration hooks for targets. */
71 xt_register_target(struct xt_target *target)
73 u_int8_t af = target->family;
76 ret = mutex_lock_interruptible(&xt[af].mutex);
79 list_add(&target->list, &xt[af].target);
80 mutex_unlock(&xt[af].mutex);
83 EXPORT_SYMBOL(xt_register_target);
86 xt_unregister_target(struct xt_target *target)
88 u_int8_t af = target->family;
90 mutex_lock(&xt[af].mutex);
91 list_del(&target->list);
92 mutex_unlock(&xt[af].mutex);
94 EXPORT_SYMBOL(xt_unregister_target);
97 xt_register_targets(struct xt_target *target, unsigned int n)
102 for (i = 0; i < n; i++) {
103 err = xt_register_target(&target[i]);
111 xt_unregister_targets(target, i);
114 EXPORT_SYMBOL(xt_register_targets);
117 xt_unregister_targets(struct xt_target *target, unsigned int n)
121 for (i = 0; i < n; i++)
122 xt_unregister_target(&target[i]);
124 EXPORT_SYMBOL(xt_unregister_targets);
127 xt_register_match(struct xt_match *match)
129 u_int8_t af = match->family;
132 ret = mutex_lock_interruptible(&xt[af].mutex);
136 list_add(&match->list, &xt[af].match);
137 mutex_unlock(&xt[af].mutex);
141 EXPORT_SYMBOL(xt_register_match);
144 xt_unregister_match(struct xt_match *match)
146 u_int8_t af = match->family;
148 mutex_lock(&xt[af].mutex);
149 list_del(&match->list);
150 mutex_unlock(&xt[af].mutex);
152 EXPORT_SYMBOL(xt_unregister_match);
155 xt_register_matches(struct xt_match *match, unsigned int n)
160 for (i = 0; i < n; i++) {
161 err = xt_register_match(&match[i]);
169 xt_unregister_matches(match, i);
172 EXPORT_SYMBOL(xt_register_matches);
175 xt_unregister_matches(struct xt_match *match, unsigned int n)
179 for (i = 0; i < n; i++)
180 xt_unregister_match(&match[i]);
182 EXPORT_SYMBOL(xt_unregister_matches);
186 * These are weird, but module loading must not be done with mutex
187 * held (since they will register), and we have to have a single
188 * function to use try_then_request_module().
191 /* Find match, grabs ref. Returns ERR_PTR() on error. */
192 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
197 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
198 return ERR_PTR(-EINTR);
200 list_for_each_entry(m, &xt[af].match, list) {
201 if (strcmp(m->name, name) == 0) {
202 if (m->revision == revision) {
203 if (try_module_get(m->me)) {
204 mutex_unlock(&xt[af].mutex);
208 err = -EPROTOTYPE; /* Found something. */
211 mutex_unlock(&xt[af].mutex);
213 if (af != NFPROTO_UNSPEC)
214 /* Try searching again in the family-independent list */
215 return xt_find_match(NFPROTO_UNSPEC, name, revision);
219 EXPORT_SYMBOL(xt_find_match);
221 /* Find target, grabs ref. Returns ERR_PTR() on error. */
222 struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
227 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
228 return ERR_PTR(-EINTR);
230 list_for_each_entry(t, &xt[af].target, list) {
231 if (strcmp(t->name, name) == 0) {
232 if (t->revision == revision) {
233 if (try_module_get(t->me)) {
234 mutex_unlock(&xt[af].mutex);
238 err = -EPROTOTYPE; /* Found something. */
241 mutex_unlock(&xt[af].mutex);
243 if (af != NFPROTO_UNSPEC)
244 /* Try searching again in the family-independent list */
245 return xt_find_target(NFPROTO_UNSPEC, name, revision);
249 EXPORT_SYMBOL(xt_find_target);
251 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
253 struct xt_target *target;
255 target = try_then_request_module(xt_find_target(af, name, revision),
256 "%st_%s", xt_prefix[af], name);
257 if (IS_ERR(target) || !target)
261 EXPORT_SYMBOL_GPL(xt_request_find_target);
263 static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
265 const struct xt_match *m;
268 list_for_each_entry(m, &xt[af].match, list) {
269 if (strcmp(m->name, name) == 0) {
270 if (m->revision > *bestp)
271 *bestp = m->revision;
272 if (m->revision == revision)
277 if (af != NFPROTO_UNSPEC && !have_rev)
278 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
283 static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
285 const struct xt_target *t;
288 list_for_each_entry(t, &xt[af].target, list) {
289 if (strcmp(t->name, name) == 0) {
290 if (t->revision > *bestp)
291 *bestp = t->revision;
292 if (t->revision == revision)
297 if (af != NFPROTO_UNSPEC && !have_rev)
298 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
303 /* Returns true or false (if no such extension at all) */
304 int xt_find_revision(u8 af, const char *name, u8 revision, int target,
307 int have_rev, best = -1;
309 if (mutex_lock_interruptible(&xt[af].mutex) != 0) {
314 have_rev = target_revfn(af, name, revision, &best);
316 have_rev = match_revfn(af, name, revision, &best);
317 mutex_unlock(&xt[af].mutex);
319 /* Nothing at all? Return 0 to try loading module. */
327 *err = -EPROTONOSUPPORT;
330 EXPORT_SYMBOL_GPL(xt_find_revision);
332 int xt_check_match(struct xt_mtchk_param *par,
333 unsigned int size, u_int8_t proto, bool inv_proto)
335 if (XT_ALIGN(par->match->matchsize) != size &&
336 par->match->matchsize != -1) {
338 * ebt_among is exempt from centralized matchsize checking
339 * because it uses a dynamic-size data set.
341 printk("%s_tables: %s match: invalid size %Zu != %u\n",
342 xt_prefix[par->family], par->match->name,
343 XT_ALIGN(par->match->matchsize), size);
346 if (par->match->table != NULL &&
347 strcmp(par->match->table, par->table) != 0) {
348 printk("%s_tables: %s match: only valid in %s table, not %s\n",
349 xt_prefix[par->family], par->match->name,
350 par->match->table, par->table);
353 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
354 printk("%s_tables: %s match: bad hook_mask %#x/%#x\n",
355 xt_prefix[par->family], par->match->name,
356 par->hook_mask, par->match->hooks);
359 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
360 printk("%s_tables: %s match: only valid for protocol %u\n",
361 xt_prefix[par->family], par->match->name,
365 if (par->match->checkentry != NULL && !par->match->checkentry(par))
369 EXPORT_SYMBOL_GPL(xt_check_match);
372 int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta)
374 struct compat_delta *tmp;
376 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
380 tmp->offset = offset;
383 if (xt[af].compat_offsets) {
384 tmp->next = xt[af].compat_offsets->next;
385 xt[af].compat_offsets->next = tmp;
387 xt[af].compat_offsets = tmp;
392 EXPORT_SYMBOL_GPL(xt_compat_add_offset);
394 void xt_compat_flush_offsets(u_int8_t af)
396 struct compat_delta *tmp, *next;
398 if (xt[af].compat_offsets) {
399 for (tmp = xt[af].compat_offsets; tmp; tmp = next) {
403 xt[af].compat_offsets = NULL;
406 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
408 short xt_compat_calc_jump(u_int8_t af, unsigned int offset)
410 struct compat_delta *tmp;
413 for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next)
414 if (tmp->offset < offset)
418 EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
420 int xt_compat_match_offset(const struct xt_match *match)
422 u_int16_t csize = match->compatsize ? : match->matchsize;
423 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
425 EXPORT_SYMBOL_GPL(xt_compat_match_offset);
427 int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
430 const struct xt_match *match = m->u.kernel.match;
431 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
432 int pad, off = xt_compat_match_offset(match);
433 u_int16_t msize = cm->u.user.match_size;
436 memcpy(m, cm, sizeof(*cm));
437 if (match->compat_from_user)
438 match->compat_from_user(m->data, cm->data);
440 memcpy(m->data, cm->data, msize - sizeof(*cm));
441 pad = XT_ALIGN(match->matchsize) - match->matchsize;
443 memset(m->data + match->matchsize, 0, pad);
446 m->u.user.match_size = msize;
452 EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
454 int xt_compat_match_to_user(struct xt_entry_match *m, void __user **dstptr,
457 const struct xt_match *match = m->u.kernel.match;
458 struct compat_xt_entry_match __user *cm = *dstptr;
459 int off = xt_compat_match_offset(match);
460 u_int16_t msize = m->u.user.match_size - off;
462 if (copy_to_user(cm, m, sizeof(*cm)) ||
463 put_user(msize, &cm->u.user.match_size) ||
464 copy_to_user(cm->u.user.name, m->u.kernel.match->name,
465 strlen(m->u.kernel.match->name) + 1))
468 if (match->compat_to_user) {
469 if (match->compat_to_user((void __user *)cm->data, m->data))
472 if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
480 EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
481 #endif /* CONFIG_COMPAT */
483 int xt_check_target(struct xt_tgchk_param *par,
484 unsigned int size, u_int8_t proto, bool inv_proto)
486 if (XT_ALIGN(par->target->targetsize) != size) {
487 printk("%s_tables: %s target: invalid size %Zu != %u\n",
488 xt_prefix[par->family], par->target->name,
489 XT_ALIGN(par->target->targetsize), size);
492 if (par->target->table != NULL &&
493 strcmp(par->target->table, par->table) != 0) {
494 printk("%s_tables: %s target: only valid in %s table, not %s\n",
495 xt_prefix[par->family], par->target->name,
496 par->target->table, par->table);
499 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
500 printk("%s_tables: %s target: bad hook_mask %#x/%#x\n",
501 xt_prefix[par->family], par->target->name,
502 par->hook_mask, par->target->hooks);
505 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
506 printk("%s_tables: %s target: only valid for protocol %u\n",
507 xt_prefix[par->family], par->target->name,
511 if (par->target->checkentry != NULL && !par->target->checkentry(par))
515 EXPORT_SYMBOL_GPL(xt_check_target);
518 int xt_compat_target_offset(const struct xt_target *target)
520 u_int16_t csize = target->compatsize ? : target->targetsize;
521 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
523 EXPORT_SYMBOL_GPL(xt_compat_target_offset);
525 void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
528 const struct xt_target *target = t->u.kernel.target;
529 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
530 int pad, off = xt_compat_target_offset(target);
531 u_int16_t tsize = ct->u.user.target_size;
534 memcpy(t, ct, sizeof(*ct));
535 if (target->compat_from_user)
536 target->compat_from_user(t->data, ct->data);
538 memcpy(t->data, ct->data, tsize - sizeof(*ct));
539 pad = XT_ALIGN(target->targetsize) - target->targetsize;
541 memset(t->data + target->targetsize, 0, pad);
544 t->u.user.target_size = tsize;
549 EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
551 int xt_compat_target_to_user(struct xt_entry_target *t, void __user **dstptr,
554 const struct xt_target *target = t->u.kernel.target;
555 struct compat_xt_entry_target __user *ct = *dstptr;
556 int off = xt_compat_target_offset(target);
557 u_int16_t tsize = t->u.user.target_size - off;
559 if (copy_to_user(ct, t, sizeof(*ct)) ||
560 put_user(tsize, &ct->u.user.target_size) ||
561 copy_to_user(ct->u.user.name, t->u.kernel.target->name,
562 strlen(t->u.kernel.target->name) + 1))
565 if (target->compat_to_user) {
566 if (target->compat_to_user((void __user *)ct->data, t->data))
569 if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
577 EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
580 struct xt_table_info *xt_alloc_table_info(unsigned int size)
582 struct xt_table_info *newinfo;
585 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
586 if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > num_physpages)
589 newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
593 newinfo->size = size;
595 for_each_possible_cpu(cpu) {
596 if (size <= PAGE_SIZE)
597 newinfo->entries[cpu] = kmalloc_node(size,
601 newinfo->entries[cpu] = vmalloc_node(size,
604 if (newinfo->entries[cpu] == NULL) {
605 xt_free_table_info(newinfo);
612 EXPORT_SYMBOL(xt_alloc_table_info);
614 void xt_free_table_info(struct xt_table_info *info)
618 for_each_possible_cpu(cpu) {
619 if (info->size <= PAGE_SIZE)
620 kfree(info->entries[cpu]);
622 vfree(info->entries[cpu]);
626 EXPORT_SYMBOL(xt_free_table_info);
628 /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
629 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
634 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
635 return ERR_PTR(-EINTR);
637 list_for_each_entry(t, &net->xt.tables[af], list)
638 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
640 mutex_unlock(&xt[af].mutex);
643 EXPORT_SYMBOL_GPL(xt_find_table_lock);
645 void xt_table_unlock(struct xt_table *table)
647 mutex_unlock(&xt[table->af].mutex);
649 EXPORT_SYMBOL_GPL(xt_table_unlock);
652 void xt_compat_lock(u_int8_t af)
654 mutex_lock(&xt[af].compat_mutex);
656 EXPORT_SYMBOL_GPL(xt_compat_lock);
658 void xt_compat_unlock(u_int8_t af)
660 mutex_unlock(&xt[af].compat_mutex);
662 EXPORT_SYMBOL_GPL(xt_compat_unlock);
665 DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks);
666 EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks);
669 struct xt_table_info *
670 xt_replace_table(struct xt_table *table,
671 unsigned int num_counters,
672 struct xt_table_info *newinfo,
675 struct xt_table_info *private;
677 /* Do the substitution. */
679 private = table->private;
681 /* Check inside lock: is the old number correct? */
682 if (num_counters != private->number) {
683 duprintf("num_counters != table->private->number (%u/%u)\n",
684 num_counters, private->number);
690 table->private = newinfo;
691 newinfo->initial_entries = private->initial_entries;
694 * Even though table entries have now been swapped, other CPU's
695 * may still be using the old entries. This is okay, because
696 * resynchronization happens because of the locking done
697 * during the get_counters() routine.
703 EXPORT_SYMBOL_GPL(xt_replace_table);
705 struct xt_table *xt_register_table(struct net *net, struct xt_table *table,
706 struct xt_table_info *bootstrap,
707 struct xt_table_info *newinfo)
710 struct xt_table_info *private;
713 /* Don't add one object to multiple lists. */
714 table = kmemdup(table, sizeof(struct xt_table), GFP_KERNEL);
720 ret = mutex_lock_interruptible(&xt[table->af].mutex);
724 /* Don't autoload: we'd eat our tail... */
725 list_for_each_entry(t, &net->xt.tables[table->af], list) {
726 if (strcmp(t->name, table->name) == 0) {
732 /* Simplifies replace_table code. */
733 table->private = bootstrap;
735 if (!xt_replace_table(table, 0, newinfo, &ret))
738 private = table->private;
739 duprintf("table->private->number = %u\n", private->number);
741 /* save number of initial entries */
742 private->initial_entries = private->number;
744 list_add(&table->list, &net->xt.tables[table->af]);
745 mutex_unlock(&xt[table->af].mutex);
749 mutex_unlock(&xt[table->af].mutex);
755 EXPORT_SYMBOL_GPL(xt_register_table);
757 void *xt_unregister_table(struct xt_table *table)
759 struct xt_table_info *private;
761 mutex_lock(&xt[table->af].mutex);
762 private = table->private;
763 list_del(&table->list);
764 mutex_unlock(&xt[table->af].mutex);
769 EXPORT_SYMBOL_GPL(xt_unregister_table);
771 #ifdef CONFIG_PROC_FS
772 struct xt_names_priv {
773 struct seq_net_private p;
776 static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
778 struct xt_names_priv *priv = seq->private;
779 struct net *net = seq_file_net(seq);
780 u_int8_t af = priv->af;
782 mutex_lock(&xt[af].mutex);
783 return seq_list_start(&net->xt.tables[af], *pos);
786 static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
788 struct xt_names_priv *priv = seq->private;
789 struct net *net = seq_file_net(seq);
790 u_int8_t af = priv->af;
792 return seq_list_next(v, &net->xt.tables[af], pos);
795 static void xt_table_seq_stop(struct seq_file *seq, void *v)
797 struct xt_names_priv *priv = seq->private;
798 u_int8_t af = priv->af;
800 mutex_unlock(&xt[af].mutex);
803 static int xt_table_seq_show(struct seq_file *seq, void *v)
805 struct xt_table *table = list_entry(v, struct xt_table, list);
807 if (strlen(table->name))
808 return seq_printf(seq, "%s\n", table->name);
813 static const struct seq_operations xt_table_seq_ops = {
814 .start = xt_table_seq_start,
815 .next = xt_table_seq_next,
816 .stop = xt_table_seq_stop,
817 .show = xt_table_seq_show,
820 static int xt_table_open(struct inode *inode, struct file *file)
823 struct xt_names_priv *priv;
825 ret = seq_open_net(inode, file, &xt_table_seq_ops,
826 sizeof(struct xt_names_priv));
828 priv = ((struct seq_file *)file->private_data)->private;
829 priv->af = (unsigned long)PDE(inode)->data;
834 static const struct file_operations xt_table_ops = {
835 .owner = THIS_MODULE,
836 .open = xt_table_open,
839 .release = seq_release_net,
843 * Traverse state for ip{,6}_{tables,matches} for helping crossing
844 * the multi-AF mutexes.
846 struct nf_mttg_trav {
847 struct list_head *head, *curr;
848 uint8_t class, nfproto;
853 MTTG_TRAV_NFP_UNSPEC,
858 static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
861 static const uint8_t next_class[] = {
862 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
863 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
865 struct nf_mttg_trav *trav = seq->private;
867 switch (trav->class) {
869 trav->class = MTTG_TRAV_NFP_UNSPEC;
870 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
871 trav->head = trav->curr = is_target ?
872 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
874 case MTTG_TRAV_NFP_UNSPEC:
875 trav->curr = trav->curr->next;
876 if (trav->curr != trav->head)
878 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
879 mutex_lock(&xt[trav->nfproto].mutex);
880 trav->head = trav->curr = is_target ?
881 &xt[trav->nfproto].target : &xt[trav->nfproto].match;
882 trav->class = next_class[trav->class];
884 case MTTG_TRAV_NFP_SPEC:
885 trav->curr = trav->curr->next;
886 if (trav->curr != trav->head)
888 /* fallthru, _stop will unlock */
898 static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
901 struct nf_mttg_trav *trav = seq->private;
904 trav->class = MTTG_TRAV_INIT;
905 for (j = 0; j < *pos; ++j)
906 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
911 static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
913 struct nf_mttg_trav *trav = seq->private;
915 switch (trav->class) {
916 case MTTG_TRAV_NFP_UNSPEC:
917 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
919 case MTTG_TRAV_NFP_SPEC:
920 mutex_unlock(&xt[trav->nfproto].mutex);
925 static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
927 return xt_mttg_seq_start(seq, pos, false);
930 static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
932 return xt_mttg_seq_next(seq, v, ppos, false);
935 static int xt_match_seq_show(struct seq_file *seq, void *v)
937 const struct nf_mttg_trav *trav = seq->private;
938 const struct xt_match *match;
940 switch (trav->class) {
941 case MTTG_TRAV_NFP_UNSPEC:
942 case MTTG_TRAV_NFP_SPEC:
943 if (trav->curr == trav->head)
945 match = list_entry(trav->curr, struct xt_match, list);
946 return (*match->name == '\0') ? 0 :
947 seq_printf(seq, "%s\n", match->name);
952 static const struct seq_operations xt_match_seq_ops = {
953 .start = xt_match_seq_start,
954 .next = xt_match_seq_next,
955 .stop = xt_mttg_seq_stop,
956 .show = xt_match_seq_show,
959 static int xt_match_open(struct inode *inode, struct file *file)
961 struct seq_file *seq;
962 struct nf_mttg_trav *trav;
965 trav = kmalloc(sizeof(*trav), GFP_KERNEL);
969 ret = seq_open(file, &xt_match_seq_ops);
975 seq = file->private_data;
977 trav->nfproto = (unsigned long)PDE(inode)->data;
981 static const struct file_operations xt_match_ops = {
982 .owner = THIS_MODULE,
983 .open = xt_match_open,
986 .release = seq_release_private,
989 static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
991 return xt_mttg_seq_start(seq, pos, true);
994 static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
996 return xt_mttg_seq_next(seq, v, ppos, true);
999 static int xt_target_seq_show(struct seq_file *seq, void *v)
1001 const struct nf_mttg_trav *trav = seq->private;
1002 const struct xt_target *target;
1004 switch (trav->class) {
1005 case MTTG_TRAV_NFP_UNSPEC:
1006 case MTTG_TRAV_NFP_SPEC:
1007 if (trav->curr == trav->head)
1009 target = list_entry(trav->curr, struct xt_target, list);
1010 return (*target->name == '\0') ? 0 :
1011 seq_printf(seq, "%s\n", target->name);
1016 static const struct seq_operations xt_target_seq_ops = {
1017 .start = xt_target_seq_start,
1018 .next = xt_target_seq_next,
1019 .stop = xt_mttg_seq_stop,
1020 .show = xt_target_seq_show,
1023 static int xt_target_open(struct inode *inode, struct file *file)
1025 struct seq_file *seq;
1026 struct nf_mttg_trav *trav;
1029 trav = kmalloc(sizeof(*trav), GFP_KERNEL);
1033 ret = seq_open(file, &xt_target_seq_ops);
1039 seq = file->private_data;
1040 seq->private = trav;
1041 trav->nfproto = (unsigned long)PDE(inode)->data;
1045 static const struct file_operations xt_target_ops = {
1046 .owner = THIS_MODULE,
1047 .open = xt_target_open,
1049 .llseek = seq_lseek,
1050 .release = seq_release_private,
1053 #define FORMAT_TABLES "_tables_names"
1054 #define FORMAT_MATCHES "_tables_matches"
1055 #define FORMAT_TARGETS "_tables_targets"
1057 #endif /* CONFIG_PROC_FS */
1059 int xt_proto_init(struct net *net, u_int8_t af)
1061 #ifdef CONFIG_PROC_FS
1062 char buf[XT_FUNCTION_MAXNAMELEN];
1063 struct proc_dir_entry *proc;
1066 if (af >= ARRAY_SIZE(xt_prefix))
1070 #ifdef CONFIG_PROC_FS
1071 strlcpy(buf, xt_prefix[af], sizeof(buf));
1072 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1073 proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops,
1074 (void *)(unsigned long)af);
1078 strlcpy(buf, xt_prefix[af], sizeof(buf));
1079 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1080 proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops,
1081 (void *)(unsigned long)af);
1083 goto out_remove_tables;
1085 strlcpy(buf, xt_prefix[af], sizeof(buf));
1086 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1087 proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops,
1088 (void *)(unsigned long)af);
1090 goto out_remove_matches;
1095 #ifdef CONFIG_PROC_FS
1097 strlcpy(buf, xt_prefix[af], sizeof(buf));
1098 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1099 proc_net_remove(net, buf);
1102 strlcpy(buf, xt_prefix[af], sizeof(buf));
1103 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1104 proc_net_remove(net, buf);
1109 EXPORT_SYMBOL_GPL(xt_proto_init);
1111 void xt_proto_fini(struct net *net, u_int8_t af)
1113 #ifdef CONFIG_PROC_FS
1114 char buf[XT_FUNCTION_MAXNAMELEN];
1116 strlcpy(buf, xt_prefix[af], sizeof(buf));
1117 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1118 proc_net_remove(net, buf);
1120 strlcpy(buf, xt_prefix[af], sizeof(buf));
1121 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1122 proc_net_remove(net, buf);
1124 strlcpy(buf, xt_prefix[af], sizeof(buf));
1125 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1126 proc_net_remove(net, buf);
1127 #endif /*CONFIG_PROC_FS*/
1129 EXPORT_SYMBOL_GPL(xt_proto_fini);
1131 static int __net_init xt_net_init(struct net *net)
1135 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1136 INIT_LIST_HEAD(&net->xt.tables[i]);
1140 static struct pernet_operations xt_net_ops = {
1141 .init = xt_net_init,
1144 static int __init xt_init(void)
1149 for_each_possible_cpu(i) {
1150 struct xt_info_lock *lock = &per_cpu(xt_info_locks, i);
1151 spin_lock_init(&lock->lock);
1155 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
1159 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1160 mutex_init(&xt[i].mutex);
1161 #ifdef CONFIG_COMPAT
1162 mutex_init(&xt[i].compat_mutex);
1163 xt[i].compat_offsets = NULL;
1165 INIT_LIST_HEAD(&xt[i].target);
1166 INIT_LIST_HEAD(&xt[i].match);
1168 rv = register_pernet_subsys(&xt_net_ops);
1174 static void __exit xt_fini(void)
1176 unregister_pernet_subsys(&xt_net_ops);
1180 module_init(xt_init);
1181 module_exit(xt_fini);