2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12 * - increase module usage count as soon as we have rules inside
15 #include <linux/config.h>
16 #include <linux/cache.h>
17 #include <linux/skbuff.h>
18 #include <linux/kmod.h>
19 #include <linux/vmalloc.h>
20 #include <linux/netdevice.h>
21 #include <linux/module.h>
22 #include <linux/tcp.h>
23 #include <linux/udp.h>
24 #include <linux/icmp.h>
26 #include <asm/uaccess.h>
27 #include <asm/semaphore.h>
28 #include <linux/proc_fs.h>
29 #include <linux/err.h>
31 #include <linux/netfilter_ipv4/ip_tables.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv4 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __FUNCTION__, __FILE__, __LINE__); \
61 #define IP_NF_ASSERT(x)
63 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
65 static DECLARE_MUTEX(ipt_mutex);
68 #define ASSERT_READ_LOCK(x) IP_NF_ASSERT(down_trylock(&ipt_mutex) != 0)
69 #define ASSERT_WRITE_LOCK(x) IP_NF_ASSERT(down_trylock(&ipt_mutex) != 0)
70 #include <linux/netfilter_ipv4/listhelp.h>
73 /* All the better to debug you with... */
79 We keep a set of rules for each CPU, so we can avoid write-locking
80 them in the softirq when updating the counters and therefore
81 only need to read-lock in the softirq; doing a write_lock_bh() in user
82 context stops packets coming through and allows user context to read
83 the counters or update the rules.
85 To be cache friendly on SMP, we arrange them like so:
87 ... cache-align padding ...
90 Hence the start of any table is given by get_table() below. */
92 /* The table itself */
97 /* Number of entries: FIXME. --RR */
99 /* Initial number of entries. Needed for module usage count */
100 unsigned int initial_entries;
102 /* Entry points and underflows */
103 unsigned int hook_entry[NF_IP_NUMHOOKS];
104 unsigned int underflow[NF_IP_NUMHOOKS];
106 /* ipt_entry tables: one per CPU */
107 char entries[0] ____cacheline_aligned;
110 static LIST_HEAD(ipt_target);
111 static LIST_HEAD(ipt_match);
112 static LIST_HEAD(ipt_tables);
113 #define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
116 #define TABLE_OFFSET(t,p) (SMP_ALIGN((t)->size)*(p))
118 #define TABLE_OFFSET(t,p) 0
122 #define down(x) do { printk("DOWN:%u:" #x "\n", __LINE__); down(x); } while(0)
123 #define down_interruptible(x) ({ int __r; printk("DOWNi:%u:" #x "\n", __LINE__); __r = down_interruptible(x); if (__r != 0) printk("ABORT-DOWNi:%u\n", __LINE__); __r; })
124 #define up(x) do { printk("UP:%u:" #x "\n", __LINE__); up(x); } while(0)
127 /* Returns whether matches rule or not. */
129 ip_packet_match(const struct iphdr *ip,
132 const struct ipt_ip *ipinfo,
138 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
140 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
142 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
144 dprintf("Source or dest mismatch.\n");
146 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
148 NIPQUAD(ipinfo->smsk.s_addr),
149 NIPQUAD(ipinfo->src.s_addr),
150 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
151 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
153 NIPQUAD(ipinfo->dmsk.s_addr),
154 NIPQUAD(ipinfo->dst.s_addr),
155 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
159 /* Look for ifname matches; this should unroll nicely. */
160 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
161 ret |= (((const unsigned long *)indev)[i]
162 ^ ((const unsigned long *)ipinfo->iniface)[i])
163 & ((const unsigned long *)ipinfo->iniface_mask)[i];
166 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
167 dprintf("VIA in mismatch (%s vs %s).%s\n",
168 indev, ipinfo->iniface,
169 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
173 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
174 ret |= (((const unsigned long *)outdev)[i]
175 ^ ((const unsigned long *)ipinfo->outiface)[i])
176 & ((const unsigned long *)ipinfo->outiface_mask)[i];
179 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
180 dprintf("VIA out mismatch (%s vs %s).%s\n",
181 outdev, ipinfo->outiface,
182 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
186 /* Check specific protocol */
188 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
189 dprintf("Packet protocol %hi does not match %hi.%s\n",
190 ip->protocol, ipinfo->proto,
191 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
195 /* If we have a fragment rule but the packet is not a fragment
196 * then we return zero */
197 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
198 dprintf("Fragment rule but not fragment.%s\n",
199 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
207 ip_checkentry(const struct ipt_ip *ip)
209 if (ip->flags & ~IPT_F_MASK) {
210 duprintf("Unknown flag bits set: %08X\n",
211 ip->flags & ~IPT_F_MASK);
214 if (ip->invflags & ~IPT_INV_MASK) {
215 duprintf("Unknown invflag bits set: %08X\n",
216 ip->invflags & ~IPT_INV_MASK);
223 ipt_error(struct sk_buff **pskb,
224 const struct net_device *in,
225 const struct net_device *out,
226 unsigned int hooknum,
227 const void *targinfo,
231 printk("ip_tables: error: `%s'\n", (char *)targinfo);
237 int do_match(struct ipt_entry_match *m,
238 const struct sk_buff *skb,
239 const struct net_device *in,
240 const struct net_device *out,
244 /* Stop iteration if it doesn't match */
245 if (!m->u.kernel.match->match(skb, in, out, m->data, offset, hotdrop))
251 static inline struct ipt_entry *
252 get_entry(void *base, unsigned int offset)
254 return (struct ipt_entry *)(base + offset);
257 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
259 ipt_do_table(struct sk_buff **pskb,
261 const struct net_device *in,
262 const struct net_device *out,
263 struct ipt_table *table,
266 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
271 /* Initializing verdict to NF_DROP keeps gcc happy. */
272 unsigned int verdict = NF_DROP;
273 const char *indev, *outdev;
275 struct ipt_entry *e, *back;
278 ip = (*pskb)->nh.iph;
279 datalen = (*pskb)->len - ip->ihl * 4;
280 indev = in ? in->name : nulldevname;
281 outdev = out ? out->name : nulldevname;
282 /* We handle fragments by dealing with the first fragment as
283 * if it was a normal packet. All other fragments are treated
284 * normally, except that they will NEVER match rules that ask
285 * things we don't know, ie. tcp syn flag or ports). If the
286 * rule is also a fragment-specific rule, non-fragments won't
288 offset = ntohs(ip->frag_off) & IP_OFFSET;
290 read_lock_bh(&table->lock);
291 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
292 table_base = (void *)table->private->entries
293 + TABLE_OFFSET(table->private, smp_processor_id());
294 e = get_entry(table_base, table->private->hook_entry[hook]);
296 #ifdef CONFIG_NETFILTER_DEBUG
297 /* Check noone else using our table */
298 if (((struct ipt_entry *)table_base)->comefrom != 0xdead57ac
299 && ((struct ipt_entry *)table_base)->comefrom != 0xeeeeeeec) {
300 printk("ASSERT: CPU #%u, %s comefrom(%p) = %X\n",
303 &((struct ipt_entry *)table_base)->comefrom,
304 ((struct ipt_entry *)table_base)->comefrom);
306 ((struct ipt_entry *)table_base)->comefrom = 0x57acc001;
309 /* For return from builtin chain */
310 back = get_entry(table_base, table->private->underflow[hook]);
315 (*pskb)->nfcache |= e->nfcache;
316 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
317 struct ipt_entry_target *t;
319 if (IPT_MATCH_ITERATE(e, do_match,
321 offset, &hotdrop) != 0)
324 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
326 t = ipt_get_target(e);
327 IP_NF_ASSERT(t->u.kernel.target);
328 /* Standard target? */
329 if (!t->u.kernel.target->target) {
332 v = ((struct ipt_standard_target *)t)->verdict;
334 /* Pop from stack? */
335 if (v != IPT_RETURN) {
336 verdict = (unsigned)(-v) - 1;
340 back = get_entry(table_base,
345 != (void *)e + e->next_offset) {
346 /* Save old back ptr in next entry */
347 struct ipt_entry *next
348 = (void *)e + e->next_offset;
350 = (void *)back - table_base;
351 /* set back pointer to next entry */
355 e = get_entry(table_base, v);
357 /* Targets which reenter must return
359 #ifdef CONFIG_NETFILTER_DEBUG
360 ((struct ipt_entry *)table_base)->comefrom
363 verdict = t->u.kernel.target->target(pskb,
369 #ifdef CONFIG_NETFILTER_DEBUG
370 if (((struct ipt_entry *)table_base)->comefrom
372 && verdict == IPT_CONTINUE) {
373 printk("Target %s reentered!\n",
374 t->u.kernel.target->name);
377 ((struct ipt_entry *)table_base)->comefrom
380 /* Target might have changed stuff. */
381 ip = (*pskb)->nh.iph;
382 datalen = (*pskb)->len - ip->ihl * 4;
384 if (verdict == IPT_CONTINUE)
385 e = (void *)e + e->next_offset;
393 e = (void *)e + e->next_offset;
397 #ifdef CONFIG_NETFILTER_DEBUG
398 ((struct ipt_entry *)table_base)->comefrom = 0xdead57ac;
400 read_unlock_bh(&table->lock);
402 #ifdef DEBUG_ALLOW_ALL
412 * These are weird, but module loading must not be done with mutex
413 * held (since they will register), and we have to have a single
414 * function to use try_then_request_module().
417 /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
418 static inline struct ipt_table *find_table_lock(const char *name)
422 if (down_interruptible(&ipt_mutex) != 0)
423 return ERR_PTR(-EINTR);
425 list_for_each_entry(t, &ipt_tables, list)
426 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
432 /* Find match, grabs ref. Returns ERR_PTR() on error. */
433 static inline struct ipt_match *find_match(const char *name, u8 revision)
438 if (down_interruptible(&ipt_mutex) != 0)
439 return ERR_PTR(-EINTR);
441 list_for_each_entry(m, &ipt_match, list) {
442 if (strcmp(m->name, name) == 0) {
443 if (m->revision == revision) {
444 if (try_module_get(m->me)) {
449 err = -EPROTOTYPE; /* Found something. */
456 /* Find target, grabs ref. Returns ERR_PTR() on error. */
457 static inline struct ipt_target *find_target(const char *name, u8 revision)
459 struct ipt_target *t;
462 if (down_interruptible(&ipt_mutex) != 0)
463 return ERR_PTR(-EINTR);
465 list_for_each_entry(t, &ipt_target, list) {
466 if (strcmp(t->name, name) == 0) {
467 if (t->revision == revision) {
468 if (try_module_get(t->me)) {
473 err = -EPROTOTYPE; /* Found something. */
480 struct ipt_target *ipt_find_target(const char *name, u8 revision)
482 struct ipt_target *target;
484 target = try_then_request_module(find_target(name, revision),
486 if (IS_ERR(target) || !target)
491 static int match_revfn(const char *name, u8 revision, int *bestp)
496 list_for_each_entry(m, &ipt_match, list) {
497 if (strcmp(m->name, name) == 0) {
498 if (m->revision > *bestp)
499 *bestp = m->revision;
500 if (m->revision == revision)
507 static int target_revfn(const char *name, u8 revision, int *bestp)
509 struct ipt_target *t;
512 list_for_each_entry(t, &ipt_target, list) {
513 if (strcmp(t->name, name) == 0) {
514 if (t->revision > *bestp)
515 *bestp = t->revision;
516 if (t->revision == revision)
523 /* Returns true or false (if no such extension at all) */
524 static inline int find_revision(const char *name, u8 revision,
525 int (*revfn)(const char *, u8, int *),
528 int have_rev, best = -1;
530 if (down_interruptible(&ipt_mutex) != 0) {
534 have_rev = revfn(name, revision, &best);
537 /* Nothing at all? Return 0 to try loading module. */
545 *err = -EPROTONOSUPPORT;
550 /* All zeroes == unconditional rule. */
552 unconditional(const struct ipt_ip *ip)
556 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
557 if (((__u32 *)ip)[i])
563 /* Figures out from what hook each rule can be called: returns 0 if
564 there are loops. Puts hook bitmask in comefrom. */
566 mark_source_chains(struct ipt_table_info *newinfo, unsigned int valid_hooks)
570 /* No recursion; use packet counter to save back ptrs (reset
571 to 0 as we leave), and comefrom to save source hook bitmask */
572 for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
573 unsigned int pos = newinfo->hook_entry[hook];
575 = (struct ipt_entry *)(newinfo->entries + pos);
577 if (!(valid_hooks & (1 << hook)))
580 /* Set initial back pointer. */
581 e->counters.pcnt = pos;
584 struct ipt_standard_target *t
585 = (void *)ipt_get_target(e);
587 if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
588 printk("iptables: loop hook %u pos %u %08X.\n",
589 hook, pos, e->comefrom);
593 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
595 /* Unconditional return/END. */
596 if (e->target_offset == sizeof(struct ipt_entry)
597 && (strcmp(t->target.u.user.name,
598 IPT_STANDARD_TARGET) == 0)
600 && unconditional(&e->ip)) {
601 unsigned int oldpos, size;
603 /* Return: backtrack through the last
606 e->comefrom ^= (1<<NF_IP_NUMHOOKS);
607 #ifdef DEBUG_IP_FIREWALL_USER
609 & (1 << NF_IP_NUMHOOKS)) {
610 duprintf("Back unset "
617 pos = e->counters.pcnt;
618 e->counters.pcnt = 0;
620 /* We're at the start. */
624 e = (struct ipt_entry *)
625 (newinfo->entries + pos);
626 } while (oldpos == pos + e->next_offset);
629 size = e->next_offset;
630 e = (struct ipt_entry *)
631 (newinfo->entries + pos + size);
632 e->counters.pcnt = pos;
635 int newpos = t->verdict;
637 if (strcmp(t->target.u.user.name,
638 IPT_STANDARD_TARGET) == 0
640 /* This a jump; chase it. */
641 duprintf("Jump rule %u -> %u\n",
644 /* ... this is a fallthru */
645 newpos = pos + e->next_offset;
647 e = (struct ipt_entry *)
648 (newinfo->entries + newpos);
649 e->counters.pcnt = pos;
654 duprintf("Finished chain %u\n", hook);
660 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
662 if (i && (*i)-- == 0)
665 if (m->u.kernel.match->destroy)
666 m->u.kernel.match->destroy(m->data,
667 m->u.match_size - sizeof(*m));
668 module_put(m->u.kernel.match->me);
673 standard_check(const struct ipt_entry_target *t,
674 unsigned int max_offset)
676 struct ipt_standard_target *targ = (void *)t;
678 /* Check standard info. */
680 != IPT_ALIGN(sizeof(struct ipt_standard_target))) {
681 duprintf("standard_check: target size %u != %u\n",
683 IPT_ALIGN(sizeof(struct ipt_standard_target)));
687 if (targ->verdict >= 0
688 && targ->verdict > max_offset - sizeof(struct ipt_entry)) {
689 duprintf("ipt_standard_check: bad verdict (%i)\n",
694 if (targ->verdict < -NF_MAX_VERDICT - 1) {
695 duprintf("ipt_standard_check: bad negative verdict (%i)\n",
703 check_match(struct ipt_entry_match *m,
705 const struct ipt_ip *ip,
706 unsigned int hookmask,
709 struct ipt_match *match;
711 match = try_then_request_module(find_match(m->u.user.name,
713 "ipt_%s", m->u.user.name);
714 if (IS_ERR(match) || !match) {
715 duprintf("check_match: `%s' not found\n", m->u.user.name);
716 return match ? PTR_ERR(match) : -ENOENT;
718 m->u.kernel.match = match;
720 if (m->u.kernel.match->checkentry
721 && !m->u.kernel.match->checkentry(name, ip, m->data,
722 m->u.match_size - sizeof(*m),
724 module_put(m->u.kernel.match->me);
725 duprintf("ip_tables: check failed for `%s'.\n",
726 m->u.kernel.match->name);
734 static struct ipt_target ipt_standard_target;
737 check_entry(struct ipt_entry *e, const char *name, unsigned int size,
740 struct ipt_entry_target *t;
741 struct ipt_target *target;
745 if (!ip_checkentry(&e->ip)) {
746 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
751 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
753 goto cleanup_matches;
755 t = ipt_get_target(e);
756 target = try_then_request_module(find_target(t->u.user.name,
758 "ipt_%s", t->u.user.name);
759 if (IS_ERR(target) || !target) {
760 duprintf("check_entry: `%s' not found\n", t->u.user.name);
761 ret = target ? PTR_ERR(target) : -ENOENT;
762 goto cleanup_matches;
764 t->u.kernel.target = target;
766 if (t->u.kernel.target == &ipt_standard_target) {
767 if (!standard_check(t, size)) {
769 goto cleanup_matches;
771 } else if (t->u.kernel.target->checkentry
772 && !t->u.kernel.target->checkentry(name, e, t->data,
776 module_put(t->u.kernel.target->me);
777 duprintf("ip_tables: check failed for `%s'.\n",
778 t->u.kernel.target->name);
780 goto cleanup_matches;
787 IPT_MATCH_ITERATE(e, cleanup_match, &j);
792 check_entry_size_and_hooks(struct ipt_entry *e,
793 struct ipt_table_info *newinfo,
795 unsigned char *limit,
796 const unsigned int *hook_entries,
797 const unsigned int *underflows,
802 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
803 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
804 duprintf("Bad offset %p\n", e);
809 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
810 duprintf("checking: element %p size %u\n",
815 /* Check hooks & underflows */
816 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
817 if ((unsigned char *)e - base == hook_entries[h])
818 newinfo->hook_entry[h] = hook_entries[h];
819 if ((unsigned char *)e - base == underflows[h])
820 newinfo->underflow[h] = underflows[h];
823 /* FIXME: underflows must be unconditional, standard verdicts
824 < 0 (not IPT_RETURN). --RR */
826 /* Clear counters and comefrom */
827 e->counters = ((struct ipt_counters) { 0, 0 });
835 cleanup_entry(struct ipt_entry *e, unsigned int *i)
837 struct ipt_entry_target *t;
839 if (i && (*i)-- == 0)
842 /* Cleanup all matches */
843 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
844 t = ipt_get_target(e);
845 if (t->u.kernel.target->destroy)
846 t->u.kernel.target->destroy(t->data,
847 t->u.target_size - sizeof(*t));
848 module_put(t->u.kernel.target->me);
852 /* Checks and translates the user-supplied table segment (held in
855 translate_table(const char *name,
856 unsigned int valid_hooks,
857 struct ipt_table_info *newinfo,
860 const unsigned int *hook_entries,
861 const unsigned int *underflows)
866 newinfo->size = size;
867 newinfo->number = number;
869 /* Init all hooks to impossible value. */
870 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
871 newinfo->hook_entry[i] = 0xFFFFFFFF;
872 newinfo->underflow[i] = 0xFFFFFFFF;
875 duprintf("translate_table: size %u\n", newinfo->size);
877 /* Walk through entries, checking offsets. */
878 ret = IPT_ENTRY_ITERATE(newinfo->entries, newinfo->size,
879 check_entry_size_and_hooks,
882 newinfo->entries + size,
883 hook_entries, underflows, &i);
888 duprintf("translate_table: %u not %u entries\n",
893 /* Check hooks all assigned */
894 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
895 /* Only hooks which are valid */
896 if (!(valid_hooks & (1 << i)))
898 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
899 duprintf("Invalid hook entry %u %u\n",
903 if (newinfo->underflow[i] == 0xFFFFFFFF) {
904 duprintf("Invalid underflow %u %u\n",
910 if (!mark_source_chains(newinfo, valid_hooks))
913 /* Finally, each sanity check must pass */
915 ret = IPT_ENTRY_ITERATE(newinfo->entries, newinfo->size,
916 check_entry, name, size, &i);
919 IPT_ENTRY_ITERATE(newinfo->entries, newinfo->size,
924 /* And one copy for every other CPU */
925 for (i = 1; i < num_possible_cpus(); i++) {
926 memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i,
928 SMP_ALIGN(newinfo->size));
934 static struct ipt_table_info *
935 replace_table(struct ipt_table *table,
936 unsigned int num_counters,
937 struct ipt_table_info *newinfo,
940 struct ipt_table_info *oldinfo;
942 #ifdef CONFIG_NETFILTER_DEBUG
944 struct ipt_entry *table_base;
947 for (i = 0; i < num_possible_cpus(); i++) {
949 (void *)newinfo->entries
950 + TABLE_OFFSET(newinfo, i);
952 table_base->comefrom = 0xdead57ac;
957 /* Do the substitution. */
958 write_lock_bh(&table->lock);
959 /* Check inside lock: is the old number correct? */
960 if (num_counters != table->private->number) {
961 duprintf("num_counters != table->private->number (%u/%u)\n",
962 num_counters, table->private->number);
963 write_unlock_bh(&table->lock);
967 oldinfo = table->private;
968 table->private = newinfo;
969 newinfo->initial_entries = oldinfo->initial_entries;
970 write_unlock_bh(&table->lock);
977 add_entry_to_counter(const struct ipt_entry *e,
978 struct ipt_counters total[],
981 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
988 get_counters(const struct ipt_table_info *t,
989 struct ipt_counters counters[])
994 for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
996 IPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu),
998 add_entry_to_counter,
1005 copy_entries_to_user(unsigned int total_size,
1006 struct ipt_table *table,
1007 void __user *userptr)
1009 unsigned int off, num, countersize;
1010 struct ipt_entry *e;
1011 struct ipt_counters *counters;
1014 /* We need atomic snapshot of counters: rest doesn't change
1015 (other than comefrom, which userspace doesn't care
1017 countersize = sizeof(struct ipt_counters) * table->private->number;
1018 counters = vmalloc(countersize);
1020 if (counters == NULL)
1023 /* First, sum counters... */
1024 memset(counters, 0, countersize);
1025 write_lock_bh(&table->lock);
1026 get_counters(table->private, counters);
1027 write_unlock_bh(&table->lock);
1029 /* ... then copy entire thing from CPU 0... */
1030 if (copy_to_user(userptr, table->private->entries, total_size) != 0) {
1035 /* FIXME: use iterator macros --RR */
1036 /* ... then go back and fix counters and names */
1037 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1039 struct ipt_entry_match *m;
1040 struct ipt_entry_target *t;
1042 e = (struct ipt_entry *)(table->private->entries + off);
1043 if (copy_to_user(userptr + off
1044 + offsetof(struct ipt_entry, counters),
1046 sizeof(counters[num])) != 0) {
1051 for (i = sizeof(struct ipt_entry);
1052 i < e->target_offset;
1053 i += m->u.match_size) {
1056 if (copy_to_user(userptr + off + i
1057 + offsetof(struct ipt_entry_match,
1059 m->u.kernel.match->name,
1060 strlen(m->u.kernel.match->name)+1)
1067 t = ipt_get_target(e);
1068 if (copy_to_user(userptr + off + e->target_offset
1069 + offsetof(struct ipt_entry_target,
1071 t->u.kernel.target->name,
1072 strlen(t->u.kernel.target->name)+1) != 0) {
1084 get_entries(const struct ipt_get_entries *entries,
1085 struct ipt_get_entries __user *uptr)
1088 struct ipt_table *t;
1090 t = find_table_lock(entries->name);
1091 if (t && !IS_ERR(t)) {
1092 duprintf("t->private->number = %u\n",
1093 t->private->number);
1094 if (entries->size == t->private->size)
1095 ret = copy_entries_to_user(t->private->size,
1096 t, uptr->entrytable);
1098 duprintf("get_entries: I've got %u not %u!\n",
1106 ret = t ? PTR_ERR(t) : -ENOENT;
1112 do_replace(void __user *user, unsigned int len)
1115 struct ipt_replace tmp;
1116 struct ipt_table *t;
1117 struct ipt_table_info *newinfo, *oldinfo;
1118 struct ipt_counters *counters;
1120 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1123 /* Hack: Causes ipchains to give correct error msg --RR */
1124 if (len != sizeof(tmp) + tmp.size)
1125 return -ENOPROTOOPT;
1127 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
1128 if ((SMP_ALIGN(tmp.size) >> PAGE_SHIFT) + 2 > num_physpages)
1131 newinfo = vmalloc(sizeof(struct ipt_table_info)
1132 + SMP_ALIGN(tmp.size) * num_possible_cpus());
1136 if (copy_from_user(newinfo->entries, user + sizeof(tmp),
1142 counters = vmalloc(tmp.num_counters * sizeof(struct ipt_counters));
1147 memset(counters, 0, tmp.num_counters * sizeof(struct ipt_counters));
1149 ret = translate_table(tmp.name, tmp.valid_hooks,
1150 newinfo, tmp.size, tmp.num_entries,
1151 tmp.hook_entry, tmp.underflow);
1153 goto free_newinfo_counters;
1155 duprintf("ip_tables: Translated table\n");
1157 t = try_then_request_module(find_table_lock(tmp.name),
1158 "iptable_%s", tmp.name);
1159 if (!t || IS_ERR(t)) {
1160 ret = t ? PTR_ERR(t) : -ENOENT;
1161 goto free_newinfo_counters_untrans;
1165 if (tmp.valid_hooks != t->valid_hooks) {
1166 duprintf("Valid hook crap: %08X vs %08X\n",
1167 tmp.valid_hooks, t->valid_hooks);
1172 oldinfo = replace_table(t, tmp.num_counters, newinfo, &ret);
1176 /* Update module usage count based on number of rules */
1177 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1178 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1179 if ((oldinfo->number > oldinfo->initial_entries) ||
1180 (newinfo->number <= oldinfo->initial_entries))
1182 if ((oldinfo->number > oldinfo->initial_entries) &&
1183 (newinfo->number <= oldinfo->initial_entries))
1186 /* Get the old counters. */
1187 get_counters(oldinfo, counters);
1188 /* Decrease module usage counts and free resource */
1189 IPT_ENTRY_ITERATE(oldinfo->entries, oldinfo->size, cleanup_entry,NULL);
1191 if (copy_to_user(tmp.counters, counters,
1192 sizeof(struct ipt_counters) * tmp.num_counters) != 0)
1201 free_newinfo_counters_untrans:
1202 IPT_ENTRY_ITERATE(newinfo->entries, newinfo->size, cleanup_entry,NULL);
1203 free_newinfo_counters:
1210 /* We're lazy, and add to the first CPU; overflow works its fey magic
1211 * and everything is OK. */
1213 add_counter_to_entry(struct ipt_entry *e,
1214 const struct ipt_counters addme[],
1218 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1220 (long unsigned int)e->counters.pcnt,
1221 (long unsigned int)e->counters.bcnt,
1222 (long unsigned int)addme[*i].pcnt,
1223 (long unsigned int)addme[*i].bcnt);
1226 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1233 do_add_counters(void __user *user, unsigned int len)
1236 struct ipt_counters_info tmp, *paddc;
1237 struct ipt_table *t;
1240 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1243 if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct ipt_counters))
1246 paddc = vmalloc(len);
1250 if (copy_from_user(paddc, user, len) != 0) {
1255 t = find_table_lock(tmp.name);
1256 if (!t || IS_ERR(t)) {
1257 ret = t ? PTR_ERR(t) : -ENOENT;
1261 write_lock_bh(&t->lock);
1262 if (t->private->number != paddc->num_counters) {
1264 goto unlock_up_free;
1268 IPT_ENTRY_ITERATE(t->private->entries,
1270 add_counter_to_entry,
1274 write_unlock_bh(&t->lock);
1284 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1288 if (!capable(CAP_NET_ADMIN))
1292 case IPT_SO_SET_REPLACE:
1293 ret = do_replace(user, len);
1296 case IPT_SO_SET_ADD_COUNTERS:
1297 ret = do_add_counters(user, len);
1301 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1309 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1313 if (!capable(CAP_NET_ADMIN))
1317 case IPT_SO_GET_INFO: {
1318 char name[IPT_TABLE_MAXNAMELEN];
1319 struct ipt_table *t;
1321 if (*len != sizeof(struct ipt_getinfo)) {
1322 duprintf("length %u != %u\n", *len,
1323 sizeof(struct ipt_getinfo));
1328 if (copy_from_user(name, user, sizeof(name)) != 0) {
1332 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1334 t = try_then_request_module(find_table_lock(name),
1335 "iptable_%s", name);
1336 if (t && !IS_ERR(t)) {
1337 struct ipt_getinfo info;
1339 info.valid_hooks = t->valid_hooks;
1340 memcpy(info.hook_entry, t->private->hook_entry,
1341 sizeof(info.hook_entry));
1342 memcpy(info.underflow, t->private->underflow,
1343 sizeof(info.underflow));
1344 info.num_entries = t->private->number;
1345 info.size = t->private->size;
1346 memcpy(info.name, name, sizeof(info.name));
1348 if (copy_to_user(user, &info, *len) != 0)
1355 ret = t ? PTR_ERR(t) : -ENOENT;
1359 case IPT_SO_GET_ENTRIES: {
1360 struct ipt_get_entries get;
1362 if (*len < sizeof(get)) {
1363 duprintf("get_entries: %u < %u\n", *len, sizeof(get));
1365 } else if (copy_from_user(&get, user, sizeof(get)) != 0) {
1367 } else if (*len != sizeof(struct ipt_get_entries) + get.size) {
1368 duprintf("get_entries: %u != %u\n", *len,
1369 sizeof(struct ipt_get_entries) + get.size);
1372 ret = get_entries(&get, user);
1376 case IPT_SO_GET_REVISION_MATCH:
1377 case IPT_SO_GET_REVISION_TARGET: {
1378 struct ipt_get_revision rev;
1379 int (*revfn)(const char *, u8, int *);
1381 if (*len != sizeof(rev)) {
1385 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1390 if (cmd == IPT_SO_GET_REVISION_TARGET)
1391 revfn = target_revfn;
1393 revfn = match_revfn;
1395 try_then_request_module(find_revision(rev.name, rev.revision,
1397 "ipt_%s", rev.name);
1402 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
1409 /* Registration hooks for targets. */
1411 ipt_register_target(struct ipt_target *target)
1415 ret = down_interruptible(&ipt_mutex);
1418 list_add(&target->list, &ipt_target);
1424 ipt_unregister_target(struct ipt_target *target)
1427 LIST_DELETE(&ipt_target, target);
1432 ipt_register_match(struct ipt_match *match)
1436 ret = down_interruptible(&ipt_mutex);
1440 list_add(&match->list, &ipt_match);
1447 ipt_unregister_match(struct ipt_match *match)
1450 LIST_DELETE(&ipt_match, match);
1454 int ipt_register_table(struct ipt_table *table, const struct ipt_replace *repl)
1457 struct ipt_table_info *newinfo;
1458 static struct ipt_table_info bootstrap
1459 = { 0, 0, 0, { 0 }, { 0 }, { } };
1461 newinfo = vmalloc(sizeof(struct ipt_table_info)
1462 + SMP_ALIGN(repl->size) * num_possible_cpus());
1466 memcpy(newinfo->entries, repl->entries, repl->size);
1468 ret = translate_table(table->name, table->valid_hooks,
1469 newinfo, repl->size,
1478 ret = down_interruptible(&ipt_mutex);
1484 /* Don't autoload: we'd eat our tail... */
1485 if (list_named_find(&ipt_tables, table->name)) {
1490 /* Simplifies replace_table code. */
1491 table->private = &bootstrap;
1492 if (!replace_table(table, 0, newinfo, &ret))
1495 duprintf("table->private->number = %u\n",
1496 table->private->number);
1498 /* save number of initial entries */
1499 table->private->initial_entries = table->private->number;
1501 rwlock_init(&table->lock);
1502 list_prepend(&ipt_tables, table);
1513 void ipt_unregister_table(struct ipt_table *table)
1516 LIST_DELETE(&ipt_tables, table);
1519 /* Decrease module usage counts and free resources */
1520 IPT_ENTRY_ITERATE(table->private->entries, table->private->size,
1521 cleanup_entry, NULL);
1522 vfree(table->private);
1525 /* Returns 1 if the port is matched by the range, 0 otherwise */
1527 port_match(u_int16_t min, u_int16_t max, u_int16_t port, int invert)
1531 ret = (port >= min && port <= max) ^ invert;
1536 tcp_find_option(u_int8_t option,
1537 const struct sk_buff *skb,
1538 unsigned int optlen,
1542 /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
1543 u_int8_t _opt[60 - sizeof(struct tcphdr)], *op;
1546 duprintf("tcp_match: finding option\n");
1551 /* If we don't have the whole header, drop packet. */
1552 op = skb_header_pointer(skb,
1553 skb->nh.iph->ihl*4 + sizeof(struct tcphdr),
1560 for (i = 0; i < optlen; ) {
1561 if (op[i] == option) return !invert;
1563 else i += op[i+1]?:1;
1570 tcp_match(const struct sk_buff *skb,
1571 const struct net_device *in,
1572 const struct net_device *out,
1573 const void *matchinfo,
1577 struct tcphdr _tcph, *th;
1578 const struct ipt_tcp *tcpinfo = matchinfo;
1583 Don't allow a fragment of TCP 8 bytes in. Nobody normal
1584 causes this. Its a cracker trying to break in by doing a
1585 flag overwrite to pass the direction checks.
1588 duprintf("Dropping evil TCP offset=1 frag.\n");
1591 /* Must not be a fragment. */
1595 #define FWINVTCP(bool,invflg) ((bool) ^ !!(tcpinfo->invflags & invflg))
1597 th = skb_header_pointer(skb, skb->nh.iph->ihl*4,
1598 sizeof(_tcph), &_tcph);
1600 /* We've been asked to examine this packet, and we
1601 can't. Hence, no choice but to drop. */
1602 duprintf("Dropping evil TCP offset=0 tinygram.\n");
1607 if (!port_match(tcpinfo->spts[0], tcpinfo->spts[1],
1609 !!(tcpinfo->invflags & IPT_TCP_INV_SRCPT)))
1611 if (!port_match(tcpinfo->dpts[0], tcpinfo->dpts[1],
1613 !!(tcpinfo->invflags & IPT_TCP_INV_DSTPT)))
1615 if (!FWINVTCP((((unsigned char *)th)[13] & tcpinfo->flg_mask)
1616 == tcpinfo->flg_cmp,
1619 if (tcpinfo->option) {
1620 if (th->doff * 4 < sizeof(_tcph)) {
1624 if (!tcp_find_option(tcpinfo->option, skb,
1625 th->doff*4 - sizeof(_tcph),
1626 tcpinfo->invflags & IPT_TCP_INV_OPTION,
1633 /* Called when user tries to insert an entry of this type. */
1635 tcp_checkentry(const char *tablename,
1636 const struct ipt_ip *ip,
1638 unsigned int matchsize,
1639 unsigned int hook_mask)
1641 const struct ipt_tcp *tcpinfo = matchinfo;
1643 /* Must specify proto == TCP, and no unknown invflags */
1644 return ip->proto == IPPROTO_TCP
1645 && !(ip->invflags & IPT_INV_PROTO)
1646 && matchsize == IPT_ALIGN(sizeof(struct ipt_tcp))
1647 && !(tcpinfo->invflags & ~IPT_TCP_INV_MASK);
1651 udp_match(const struct sk_buff *skb,
1652 const struct net_device *in,
1653 const struct net_device *out,
1654 const void *matchinfo,
1658 struct udphdr _udph, *uh;
1659 const struct ipt_udp *udpinfo = matchinfo;
1661 /* Must not be a fragment. */
1665 uh = skb_header_pointer(skb, skb->nh.iph->ihl*4,
1666 sizeof(_udph), &_udph);
1668 /* We've been asked to examine this packet, and we
1669 can't. Hence, no choice but to drop. */
1670 duprintf("Dropping evil UDP tinygram.\n");
1675 return port_match(udpinfo->spts[0], udpinfo->spts[1],
1677 !!(udpinfo->invflags & IPT_UDP_INV_SRCPT))
1678 && port_match(udpinfo->dpts[0], udpinfo->dpts[1],
1680 !!(udpinfo->invflags & IPT_UDP_INV_DSTPT));
1683 /* Called when user tries to insert an entry of this type. */
1685 udp_checkentry(const char *tablename,
1686 const struct ipt_ip *ip,
1688 unsigned int matchinfosize,
1689 unsigned int hook_mask)
1691 const struct ipt_udp *udpinfo = matchinfo;
1693 /* Must specify proto == UDP, and no unknown invflags */
1694 if (ip->proto != IPPROTO_UDP || (ip->invflags & IPT_INV_PROTO)) {
1695 duprintf("ipt_udp: Protocol %u != %u\n", ip->proto,
1699 if (matchinfosize != IPT_ALIGN(sizeof(struct ipt_udp))) {
1700 duprintf("ipt_udp: matchsize %u != %u\n",
1701 matchinfosize, IPT_ALIGN(sizeof(struct ipt_udp)));
1704 if (udpinfo->invflags & ~IPT_UDP_INV_MASK) {
1705 duprintf("ipt_udp: unknown flags %X\n",
1713 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1715 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1716 u_int8_t type, u_int8_t code,
1719 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
1724 icmp_match(const struct sk_buff *skb,
1725 const struct net_device *in,
1726 const struct net_device *out,
1727 const void *matchinfo,
1731 struct icmphdr _icmph, *ic;
1732 const struct ipt_icmp *icmpinfo = matchinfo;
1734 /* Must not be a fragment. */
1738 ic = skb_header_pointer(skb, skb->nh.iph->ihl*4,
1739 sizeof(_icmph), &_icmph);
1741 /* We've been asked to examine this packet, and we
1742 * can't. Hence, no choice but to drop.
1744 duprintf("Dropping evil ICMP tinygram.\n");
1749 return icmp_type_code_match(icmpinfo->type,
1753 !!(icmpinfo->invflags&IPT_ICMP_INV));
1756 /* Called when user tries to insert an entry of this type. */
1758 icmp_checkentry(const char *tablename,
1759 const struct ipt_ip *ip,
1761 unsigned int matchsize,
1762 unsigned int hook_mask)
1764 const struct ipt_icmp *icmpinfo = matchinfo;
1766 /* Must specify proto == ICMP, and no unknown invflags */
1767 return ip->proto == IPPROTO_ICMP
1768 && !(ip->invflags & IPT_INV_PROTO)
1769 && matchsize == IPT_ALIGN(sizeof(struct ipt_icmp))
1770 && !(icmpinfo->invflags & ~IPT_ICMP_INV);
1773 /* The built-in targets: standard (NULL) and error. */
1774 static struct ipt_target ipt_standard_target = {
1775 .name = IPT_STANDARD_TARGET,
1778 static struct ipt_target ipt_error_target = {
1779 .name = IPT_ERROR_TARGET,
1780 .target = ipt_error,
1783 static struct nf_sockopt_ops ipt_sockopts = {
1785 .set_optmin = IPT_BASE_CTL,
1786 .set_optmax = IPT_SO_SET_MAX+1,
1787 .set = do_ipt_set_ctl,
1788 .get_optmin = IPT_BASE_CTL,
1789 .get_optmax = IPT_SO_GET_MAX+1,
1790 .get = do_ipt_get_ctl,
1793 static struct ipt_match tcp_matchstruct = {
1795 .match = &tcp_match,
1796 .checkentry = &tcp_checkentry,
1799 static struct ipt_match udp_matchstruct = {
1801 .match = &udp_match,
1802 .checkentry = &udp_checkentry,
1805 static struct ipt_match icmp_matchstruct = {
1807 .match = &icmp_match,
1808 .checkentry = &icmp_checkentry,
1811 #ifdef CONFIG_PROC_FS
1812 static inline int print_name(const char *i,
1813 off_t start_offset, char *buffer, int length,
1814 off_t *pos, unsigned int *count)
1816 if ((*count)++ >= start_offset) {
1817 unsigned int namelen;
1819 namelen = sprintf(buffer + *pos, "%s\n",
1820 i + sizeof(struct list_head));
1821 if (*pos + namelen > length) {
1822 /* Stop iterating */
1830 static inline int print_target(const struct ipt_target *t,
1831 off_t start_offset, char *buffer, int length,
1832 off_t *pos, unsigned int *count)
1834 if (t == &ipt_standard_target || t == &ipt_error_target)
1836 return print_name((char *)t, start_offset, buffer, length, pos, count);
1839 static int ipt_get_tables(char *buffer, char **start, off_t offset, int length)
1842 unsigned int count = 0;
1844 if (down_interruptible(&ipt_mutex) != 0)
1847 LIST_FIND(&ipt_tables, print_name, void *,
1848 offset, buffer, length, &pos, &count);
1852 /* `start' hack - see fs/proc/generic.c line ~105 */
1853 *start=(char *)((unsigned long)count-offset);
1857 static int ipt_get_targets(char *buffer, char **start, off_t offset, int length)
1860 unsigned int count = 0;
1862 if (down_interruptible(&ipt_mutex) != 0)
1865 LIST_FIND(&ipt_target, print_target, struct ipt_target *,
1866 offset, buffer, length, &pos, &count);
1870 *start = (char *)((unsigned long)count - offset);
1874 static int ipt_get_matches(char *buffer, char **start, off_t offset, int length)
1877 unsigned int count = 0;
1879 if (down_interruptible(&ipt_mutex) != 0)
1882 LIST_FIND(&ipt_match, print_name, void *,
1883 offset, buffer, length, &pos, &count);
1887 *start = (char *)((unsigned long)count - offset);
1891 static struct { char *name; get_info_t *get_info; } ipt_proc_entry[] =
1892 { { "ip_tables_names", ipt_get_tables },
1893 { "ip_tables_targets", ipt_get_targets },
1894 { "ip_tables_matches", ipt_get_matches },
1896 #endif /*CONFIG_PROC_FS*/
1898 static int __init init(void)
1902 /* Noone else will be downing sem now, so we won't sleep */
1904 list_append(&ipt_target, &ipt_standard_target);
1905 list_append(&ipt_target, &ipt_error_target);
1906 list_append(&ipt_match, &tcp_matchstruct);
1907 list_append(&ipt_match, &udp_matchstruct);
1908 list_append(&ipt_match, &icmp_matchstruct);
1911 /* Register setsockopt */
1912 ret = nf_register_sockopt(&ipt_sockopts);
1914 duprintf("Unable to register sockopts.\n");
1918 #ifdef CONFIG_PROC_FS
1920 struct proc_dir_entry *proc;
1923 for (i = 0; ipt_proc_entry[i].name; i++) {
1924 proc = proc_net_create(ipt_proc_entry[i].name, 0,
1925 ipt_proc_entry[i].get_info);
1928 proc_net_remove(ipt_proc_entry[i].name);
1929 nf_unregister_sockopt(&ipt_sockopts);
1932 proc->owner = THIS_MODULE;
1937 printk("ip_tables: (C) 2000-2002 Netfilter core team\n");
1941 static void __exit fini(void)
1943 nf_unregister_sockopt(&ipt_sockopts);
1944 #ifdef CONFIG_PROC_FS
1947 for (i = 0; ipt_proc_entry[i].name; i++)
1948 proc_net_remove(ipt_proc_entry[i].name);
1953 EXPORT_SYMBOL(ipt_register_table);
1954 EXPORT_SYMBOL(ipt_unregister_table);
1955 EXPORT_SYMBOL(ipt_register_match);
1956 EXPORT_SYMBOL(ipt_unregister_match);
1957 EXPORT_SYMBOL(ipt_do_table);
1958 EXPORT_SYMBOL(ipt_register_target);
1959 EXPORT_SYMBOL(ipt_unregister_target);
1960 EXPORT_SYMBOL(ipt_find_target);