2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12 * - increase module usage count as soon as we have rules inside
15 #include <linux/config.h>
16 #include <linux/cache.h>
17 #include <linux/skbuff.h>
18 #include <linux/kmod.h>
19 #include <linux/vmalloc.h>
20 #include <linux/netdevice.h>
21 #include <linux/module.h>
22 #include <linux/tcp.h>
23 #include <linux/udp.h>
24 #include <linux/icmp.h>
26 #include <asm/uaccess.h>
27 #include <asm/semaphore.h>
28 #include <linux/proc_fs.h>
29 #include <linux/err.h>
30 #include <linux/cpumask.h>
32 #include <linux/netfilter_ipv4/ip_tables.h>
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv4 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) printk(format , ## args)
45 #define dprintf(format, args...)
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) printk(format , ## args)
51 #define duprintf(format, args...)
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) \
58 printk("IP_NF_ASSERT: %s:%s:%u\n", \
59 __FUNCTION__, __FILE__, __LINE__); \
62 #define IP_NF_ASSERT(x)
64 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
66 static DECLARE_MUTEX(ipt_mutex);
69 #define ASSERT_READ_LOCK(x) IP_NF_ASSERT(down_trylock(&ipt_mutex) != 0)
70 #define ASSERT_WRITE_LOCK(x) IP_NF_ASSERT(down_trylock(&ipt_mutex) != 0)
71 #include <linux/netfilter_ipv4/listhelp.h>
74 /* All the better to debug you with... */
80 We keep a set of rules for each CPU, so we can avoid write-locking
81 them in the softirq when updating the counters and therefore
82 only need to read-lock in the softirq; doing a write_lock_bh() in user
83 context stops packets coming through and allows user context to read
84 the counters or update the rules.
86 To be cache friendly on SMP, we arrange them like so:
88 ... cache-align padding ...
91 Hence the start of any table is given by get_table() below. */
93 /* The table itself */
98 /* Number of entries: FIXME. --RR */
100 /* Initial number of entries. Needed for module usage count */
101 unsigned int initial_entries;
103 /* Entry points and underflows */
104 unsigned int hook_entry[NF_IP_NUMHOOKS];
105 unsigned int underflow[NF_IP_NUMHOOKS];
107 /* ipt_entry tables: one per CPU */
108 char entries[0] ____cacheline_aligned;
111 static LIST_HEAD(ipt_target);
112 static LIST_HEAD(ipt_match);
113 static LIST_HEAD(ipt_tables);
114 #define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
117 #define TABLE_OFFSET(t,p) (SMP_ALIGN((t)->size)*(p))
119 #define TABLE_OFFSET(t,p) 0
123 #define down(x) do { printk("DOWN:%u:" #x "\n", __LINE__); down(x); } while(0)
124 #define down_interruptible(x) ({ int __r; printk("DOWNi:%u:" #x "\n", __LINE__); __r = down_interruptible(x); if (__r != 0) printk("ABORT-DOWNi:%u\n", __LINE__); __r; })
125 #define up(x) do { printk("UP:%u:" #x "\n", __LINE__); up(x); } while(0)
128 /* Returns whether matches rule or not. */
130 ip_packet_match(const struct iphdr *ip,
133 const struct ipt_ip *ipinfo,
139 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
141 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
143 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
145 dprintf("Source or dest mismatch.\n");
147 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
149 NIPQUAD(ipinfo->smsk.s_addr),
150 NIPQUAD(ipinfo->src.s_addr),
151 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
152 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
154 NIPQUAD(ipinfo->dmsk.s_addr),
155 NIPQUAD(ipinfo->dst.s_addr),
156 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
160 /* Look for ifname matches; this should unroll nicely. */
161 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
162 ret |= (((const unsigned long *)indev)[i]
163 ^ ((const unsigned long *)ipinfo->iniface)[i])
164 & ((const unsigned long *)ipinfo->iniface_mask)[i];
167 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
168 dprintf("VIA in mismatch (%s vs %s).%s\n",
169 indev, ipinfo->iniface,
170 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
174 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
175 ret |= (((const unsigned long *)outdev)[i]
176 ^ ((const unsigned long *)ipinfo->outiface)[i])
177 & ((const unsigned long *)ipinfo->outiface_mask)[i];
180 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
181 dprintf("VIA out mismatch (%s vs %s).%s\n",
182 outdev, ipinfo->outiface,
183 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
187 /* Check specific protocol */
189 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
190 dprintf("Packet protocol %hi does not match %hi.%s\n",
191 ip->protocol, ipinfo->proto,
192 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
196 /* If we have a fragment rule but the packet is not a fragment
197 * then we return zero */
198 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
199 dprintf("Fragment rule but not fragment.%s\n",
200 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
208 ip_checkentry(const struct ipt_ip *ip)
210 if (ip->flags & ~IPT_F_MASK) {
211 duprintf("Unknown flag bits set: %08X\n",
212 ip->flags & ~IPT_F_MASK);
215 if (ip->invflags & ~IPT_INV_MASK) {
216 duprintf("Unknown invflag bits set: %08X\n",
217 ip->invflags & ~IPT_INV_MASK);
224 ipt_error(struct sk_buff **pskb,
225 const struct net_device *in,
226 const struct net_device *out,
227 unsigned int hooknum,
228 const void *targinfo,
232 printk("ip_tables: error: `%s'\n", (char *)targinfo);
238 int do_match(struct ipt_entry_match *m,
239 const struct sk_buff *skb,
240 const struct net_device *in,
241 const struct net_device *out,
245 /* Stop iteration if it doesn't match */
246 if (!m->u.kernel.match->match(skb, in, out, m->data, offset, hotdrop))
252 static inline struct ipt_entry *
253 get_entry(void *base, unsigned int offset)
255 return (struct ipt_entry *)(base + offset);
258 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
260 ipt_do_table(struct sk_buff **pskb,
262 const struct net_device *in,
263 const struct net_device *out,
264 struct ipt_table *table,
267 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
272 /* Initializing verdict to NF_DROP keeps gcc happy. */
273 unsigned int verdict = NF_DROP;
274 const char *indev, *outdev;
276 struct ipt_entry *e, *back;
279 ip = (*pskb)->nh.iph;
280 datalen = (*pskb)->len - ip->ihl * 4;
281 indev = in ? in->name : nulldevname;
282 outdev = out ? out->name : nulldevname;
283 /* We handle fragments by dealing with the first fragment as
284 * if it was a normal packet. All other fragments are treated
285 * normally, except that they will NEVER match rules that ask
286 * things we don't know, ie. tcp syn flag or ports). If the
287 * rule is also a fragment-specific rule, non-fragments won't
289 offset = ntohs(ip->frag_off) & IP_OFFSET;
291 read_lock_bh(&table->lock);
292 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
293 table_base = (void *)table->private->entries
294 + TABLE_OFFSET(table->private, smp_processor_id());
295 e = get_entry(table_base, table->private->hook_entry[hook]);
297 #ifdef CONFIG_NETFILTER_DEBUG
298 /* Check noone else using our table */
299 if (((struct ipt_entry *)table_base)->comefrom != 0xdead57ac
300 && ((struct ipt_entry *)table_base)->comefrom != 0xeeeeeeec) {
301 printk("ASSERT: CPU #%u, %s comefrom(%p) = %X\n",
304 &((struct ipt_entry *)table_base)->comefrom,
305 ((struct ipt_entry *)table_base)->comefrom);
307 ((struct ipt_entry *)table_base)->comefrom = 0x57acc001;
310 /* For return from builtin chain */
311 back = get_entry(table_base, table->private->underflow[hook]);
316 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
317 struct ipt_entry_target *t;
319 if (IPT_MATCH_ITERATE(e, do_match,
321 offset, &hotdrop) != 0)
324 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
326 t = ipt_get_target(e);
327 IP_NF_ASSERT(t->u.kernel.target);
328 /* Standard target? */
329 if (!t->u.kernel.target->target) {
332 v = ((struct ipt_standard_target *)t)->verdict;
334 /* Pop from stack? */
335 if (v != IPT_RETURN) {
336 verdict = (unsigned)(-v) - 1;
340 back = get_entry(table_base,
344 if (table_base + v != (void *)e + e->next_offset
345 && !(e->ip.flags & IPT_F_GOTO)) {
346 /* Save old back ptr in next entry */
347 struct ipt_entry *next
348 = (void *)e + e->next_offset;
350 = (void *)back - table_base;
351 /* set back pointer to next entry */
355 e = get_entry(table_base, v);
357 /* Targets which reenter must return
359 #ifdef CONFIG_NETFILTER_DEBUG
360 ((struct ipt_entry *)table_base)->comefrom
363 verdict = t->u.kernel.target->target(pskb,
369 #ifdef CONFIG_NETFILTER_DEBUG
370 if (((struct ipt_entry *)table_base)->comefrom
372 && verdict == IPT_CONTINUE) {
373 printk("Target %s reentered!\n",
374 t->u.kernel.target->name);
377 ((struct ipt_entry *)table_base)->comefrom
380 /* Target might have changed stuff. */
381 ip = (*pskb)->nh.iph;
382 datalen = (*pskb)->len - ip->ihl * 4;
384 if (verdict == IPT_CONTINUE)
385 e = (void *)e + e->next_offset;
393 e = (void *)e + e->next_offset;
397 #ifdef CONFIG_NETFILTER_DEBUG
398 ((struct ipt_entry *)table_base)->comefrom = 0xdead57ac;
400 read_unlock_bh(&table->lock);
402 #ifdef DEBUG_ALLOW_ALL
412 * These are weird, but module loading must not be done with mutex
413 * held (since they will register), and we have to have a single
414 * function to use try_then_request_module().
417 /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
418 static inline struct ipt_table *find_table_lock(const char *name)
422 if (down_interruptible(&ipt_mutex) != 0)
423 return ERR_PTR(-EINTR);
425 list_for_each_entry(t, &ipt_tables, list)
426 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
432 /* Find match, grabs ref. Returns ERR_PTR() on error. */
433 static inline struct ipt_match *find_match(const char *name, u8 revision)
438 if (down_interruptible(&ipt_mutex) != 0)
439 return ERR_PTR(-EINTR);
441 list_for_each_entry(m, &ipt_match, list) {
442 if (strcmp(m->name, name) == 0) {
443 if (m->revision == revision) {
444 if (try_module_get(m->me)) {
449 err = -EPROTOTYPE; /* Found something. */
456 /* Find target, grabs ref. Returns ERR_PTR() on error. */
457 static inline struct ipt_target *find_target(const char *name, u8 revision)
459 struct ipt_target *t;
462 if (down_interruptible(&ipt_mutex) != 0)
463 return ERR_PTR(-EINTR);
465 list_for_each_entry(t, &ipt_target, list) {
466 if (strcmp(t->name, name) == 0) {
467 if (t->revision == revision) {
468 if (try_module_get(t->me)) {
473 err = -EPROTOTYPE; /* Found something. */
480 struct ipt_target *ipt_find_target(const char *name, u8 revision)
482 struct ipt_target *target;
484 target = try_then_request_module(find_target(name, revision),
486 if (IS_ERR(target) || !target)
491 static int match_revfn(const char *name, u8 revision, int *bestp)
496 list_for_each_entry(m, &ipt_match, list) {
497 if (strcmp(m->name, name) == 0) {
498 if (m->revision > *bestp)
499 *bestp = m->revision;
500 if (m->revision == revision)
507 static int target_revfn(const char *name, u8 revision, int *bestp)
509 struct ipt_target *t;
512 list_for_each_entry(t, &ipt_target, list) {
513 if (strcmp(t->name, name) == 0) {
514 if (t->revision > *bestp)
515 *bestp = t->revision;
516 if (t->revision == revision)
523 /* Returns true or false (if no such extension at all) */
524 static inline int find_revision(const char *name, u8 revision,
525 int (*revfn)(const char *, u8, int *),
528 int have_rev, best = -1;
530 if (down_interruptible(&ipt_mutex) != 0) {
534 have_rev = revfn(name, revision, &best);
537 /* Nothing at all? Return 0 to try loading module. */
545 *err = -EPROTONOSUPPORT;
550 /* All zeroes == unconditional rule. */
552 unconditional(const struct ipt_ip *ip)
556 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
557 if (((__u32 *)ip)[i])
563 /* Figures out from what hook each rule can be called: returns 0 if
564 there are loops. Puts hook bitmask in comefrom. */
566 mark_source_chains(struct ipt_table_info *newinfo, unsigned int valid_hooks)
570 /* No recursion; use packet counter to save back ptrs (reset
571 to 0 as we leave), and comefrom to save source hook bitmask */
572 for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
573 unsigned int pos = newinfo->hook_entry[hook];
575 = (struct ipt_entry *)(newinfo->entries + pos);
577 if (!(valid_hooks & (1 << hook)))
580 /* Set initial back pointer. */
581 e->counters.pcnt = pos;
584 struct ipt_standard_target *t
585 = (void *)ipt_get_target(e);
587 if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
588 printk("iptables: loop hook %u pos %u %08X.\n",
589 hook, pos, e->comefrom);
593 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
595 /* Unconditional return/END. */
596 if (e->target_offset == sizeof(struct ipt_entry)
597 && (strcmp(t->target.u.user.name,
598 IPT_STANDARD_TARGET) == 0)
600 && unconditional(&e->ip)) {
601 unsigned int oldpos, size;
603 /* Return: backtrack through the last
606 e->comefrom ^= (1<<NF_IP_NUMHOOKS);
607 #ifdef DEBUG_IP_FIREWALL_USER
609 & (1 << NF_IP_NUMHOOKS)) {
610 duprintf("Back unset "
617 pos = e->counters.pcnt;
618 e->counters.pcnt = 0;
620 /* We're at the start. */
624 e = (struct ipt_entry *)
625 (newinfo->entries + pos);
626 } while (oldpos == pos + e->next_offset);
629 size = e->next_offset;
630 e = (struct ipt_entry *)
631 (newinfo->entries + pos + size);
632 e->counters.pcnt = pos;
635 int newpos = t->verdict;
637 if (strcmp(t->target.u.user.name,
638 IPT_STANDARD_TARGET) == 0
640 /* This a jump; chase it. */
641 duprintf("Jump rule %u -> %u\n",
644 /* ... this is a fallthru */
645 newpos = pos + e->next_offset;
647 e = (struct ipt_entry *)
648 (newinfo->entries + newpos);
649 e->counters.pcnt = pos;
654 duprintf("Finished chain %u\n", hook);
660 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
662 if (i && (*i)-- == 0)
665 if (m->u.kernel.match->destroy)
666 m->u.kernel.match->destroy(m->data,
667 m->u.match_size - sizeof(*m));
668 module_put(m->u.kernel.match->me);
673 standard_check(const struct ipt_entry_target *t,
674 unsigned int max_offset)
676 struct ipt_standard_target *targ = (void *)t;
678 /* Check standard info. */
680 != IPT_ALIGN(sizeof(struct ipt_standard_target))) {
681 duprintf("standard_check: target size %u != %u\n",
683 IPT_ALIGN(sizeof(struct ipt_standard_target)));
687 if (targ->verdict >= 0
688 && targ->verdict > max_offset - sizeof(struct ipt_entry)) {
689 duprintf("ipt_standard_check: bad verdict (%i)\n",
694 if (targ->verdict < -NF_MAX_VERDICT - 1) {
695 duprintf("ipt_standard_check: bad negative verdict (%i)\n",
703 check_match(struct ipt_entry_match *m,
705 const struct ipt_ip *ip,
706 unsigned int hookmask,
709 struct ipt_match *match;
711 match = try_then_request_module(find_match(m->u.user.name,
713 "ipt_%s", m->u.user.name);
714 if (IS_ERR(match) || !match) {
715 duprintf("check_match: `%s' not found\n", m->u.user.name);
716 return match ? PTR_ERR(match) : -ENOENT;
718 m->u.kernel.match = match;
720 if (m->u.kernel.match->checkentry
721 && !m->u.kernel.match->checkentry(name, ip, m->data,
722 m->u.match_size - sizeof(*m),
724 module_put(m->u.kernel.match->me);
725 duprintf("ip_tables: check failed for `%s'.\n",
726 m->u.kernel.match->name);
734 static struct ipt_target ipt_standard_target;
737 check_entry(struct ipt_entry *e, const char *name, unsigned int size,
740 struct ipt_entry_target *t;
741 struct ipt_target *target;
745 if (!ip_checkentry(&e->ip)) {
746 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
751 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
753 goto cleanup_matches;
755 t = ipt_get_target(e);
756 target = try_then_request_module(find_target(t->u.user.name,
758 "ipt_%s", t->u.user.name);
759 if (IS_ERR(target) || !target) {
760 duprintf("check_entry: `%s' not found\n", t->u.user.name);
761 ret = target ? PTR_ERR(target) : -ENOENT;
762 goto cleanup_matches;
764 t->u.kernel.target = target;
766 if (t->u.kernel.target == &ipt_standard_target) {
767 if (!standard_check(t, size)) {
769 goto cleanup_matches;
771 } else if (t->u.kernel.target->checkentry
772 && !t->u.kernel.target->checkentry(name, e, t->data,
776 module_put(t->u.kernel.target->me);
777 duprintf("ip_tables: check failed for `%s'.\n",
778 t->u.kernel.target->name);
780 goto cleanup_matches;
787 IPT_MATCH_ITERATE(e, cleanup_match, &j);
792 check_entry_size_and_hooks(struct ipt_entry *e,
793 struct ipt_table_info *newinfo,
795 unsigned char *limit,
796 const unsigned int *hook_entries,
797 const unsigned int *underflows,
802 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
803 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
804 duprintf("Bad offset %p\n", e);
809 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
810 duprintf("checking: element %p size %u\n",
815 /* Check hooks & underflows */
816 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
817 if ((unsigned char *)e - base == hook_entries[h])
818 newinfo->hook_entry[h] = hook_entries[h];
819 if ((unsigned char *)e - base == underflows[h])
820 newinfo->underflow[h] = underflows[h];
823 /* FIXME: underflows must be unconditional, standard verdicts
824 < 0 (not IPT_RETURN). --RR */
826 /* Clear counters and comefrom */
827 e->counters = ((struct ipt_counters) { 0, 0 });
835 cleanup_entry(struct ipt_entry *e, unsigned int *i)
837 struct ipt_entry_target *t;
839 if (i && (*i)-- == 0)
842 /* Cleanup all matches */
843 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
844 t = ipt_get_target(e);
845 if (t->u.kernel.target->destroy)
846 t->u.kernel.target->destroy(t->data,
847 t->u.target_size - sizeof(*t));
848 module_put(t->u.kernel.target->me);
852 /* Checks and translates the user-supplied table segment (held in
855 translate_table(const char *name,
856 unsigned int valid_hooks,
857 struct ipt_table_info *newinfo,
860 const unsigned int *hook_entries,
861 const unsigned int *underflows)
866 newinfo->size = size;
867 newinfo->number = number;
869 /* Init all hooks to impossible value. */
870 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
871 newinfo->hook_entry[i] = 0xFFFFFFFF;
872 newinfo->underflow[i] = 0xFFFFFFFF;
875 duprintf("translate_table: size %u\n", newinfo->size);
877 /* Walk through entries, checking offsets. */
878 ret = IPT_ENTRY_ITERATE(newinfo->entries, newinfo->size,
879 check_entry_size_and_hooks,
882 newinfo->entries + size,
883 hook_entries, underflows, &i);
888 duprintf("translate_table: %u not %u entries\n",
893 /* Check hooks all assigned */
894 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
895 /* Only hooks which are valid */
896 if (!(valid_hooks & (1 << i)))
898 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
899 duprintf("Invalid hook entry %u %u\n",
903 if (newinfo->underflow[i] == 0xFFFFFFFF) {
904 duprintf("Invalid underflow %u %u\n",
910 if (!mark_source_chains(newinfo, valid_hooks))
913 /* Finally, each sanity check must pass */
915 ret = IPT_ENTRY_ITERATE(newinfo->entries, newinfo->size,
916 check_entry, name, size, &i);
919 IPT_ENTRY_ITERATE(newinfo->entries, newinfo->size,
924 /* And one copy for every other CPU */
928 memcpy(newinfo->entries + SMP_ALIGN(newinfo->size) * i,
930 SMP_ALIGN(newinfo->size));
936 static struct ipt_table_info *
937 replace_table(struct ipt_table *table,
938 unsigned int num_counters,
939 struct ipt_table_info *newinfo,
942 struct ipt_table_info *oldinfo;
944 #ifdef CONFIG_NETFILTER_DEBUG
946 struct ipt_entry *table_base;
951 (void *)newinfo->entries
952 + TABLE_OFFSET(newinfo, i);
954 table_base->comefrom = 0xdead57ac;
959 /* Do the substitution. */
960 write_lock_bh(&table->lock);
961 /* Check inside lock: is the old number correct? */
962 if (num_counters != table->private->number) {
963 duprintf("num_counters != table->private->number (%u/%u)\n",
964 num_counters, table->private->number);
965 write_unlock_bh(&table->lock);
969 oldinfo = table->private;
970 table->private = newinfo;
971 newinfo->initial_entries = oldinfo->initial_entries;
972 write_unlock_bh(&table->lock);
979 add_entry_to_counter(const struct ipt_entry *e,
980 struct ipt_counters total[],
983 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
990 get_counters(const struct ipt_table_info *t,
991 struct ipt_counters counters[])
998 IPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu),
1000 add_entry_to_counter,
1007 copy_entries_to_user(unsigned int total_size,
1008 struct ipt_table *table,
1009 void __user *userptr)
1011 unsigned int off, num, countersize;
1012 struct ipt_entry *e;
1013 struct ipt_counters *counters;
1016 /* We need atomic snapshot of counters: rest doesn't change
1017 (other than comefrom, which userspace doesn't care
1019 countersize = sizeof(struct ipt_counters) * table->private->number;
1020 counters = vmalloc(countersize);
1022 if (counters == NULL)
1025 /* First, sum counters... */
1026 memset(counters, 0, countersize);
1027 write_lock_bh(&table->lock);
1028 get_counters(table->private, counters);
1029 write_unlock_bh(&table->lock);
1031 /* ... then copy entire thing from CPU 0... */
1032 if (copy_to_user(userptr, table->private->entries, total_size) != 0) {
1037 /* FIXME: use iterator macros --RR */
1038 /* ... then go back and fix counters and names */
1039 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1041 struct ipt_entry_match *m;
1042 struct ipt_entry_target *t;
1044 e = (struct ipt_entry *)(table->private->entries + off);
1045 if (copy_to_user(userptr + off
1046 + offsetof(struct ipt_entry, counters),
1048 sizeof(counters[num])) != 0) {
1053 for (i = sizeof(struct ipt_entry);
1054 i < e->target_offset;
1055 i += m->u.match_size) {
1058 if (copy_to_user(userptr + off + i
1059 + offsetof(struct ipt_entry_match,
1061 m->u.kernel.match->name,
1062 strlen(m->u.kernel.match->name)+1)
1069 t = ipt_get_target(e);
1070 if (copy_to_user(userptr + off + e->target_offset
1071 + offsetof(struct ipt_entry_target,
1073 t->u.kernel.target->name,
1074 strlen(t->u.kernel.target->name)+1) != 0) {
1086 get_entries(const struct ipt_get_entries *entries,
1087 struct ipt_get_entries __user *uptr)
1090 struct ipt_table *t;
1092 t = find_table_lock(entries->name);
1093 if (t && !IS_ERR(t)) {
1094 duprintf("t->private->number = %u\n",
1095 t->private->number);
1096 if (entries->size == t->private->size)
1097 ret = copy_entries_to_user(t->private->size,
1098 t, uptr->entrytable);
1100 duprintf("get_entries: I've got %u not %u!\n",
1108 ret = t ? PTR_ERR(t) : -ENOENT;
1114 do_replace(void __user *user, unsigned int len)
1117 struct ipt_replace tmp;
1118 struct ipt_table *t;
1119 struct ipt_table_info *newinfo, *oldinfo;
1120 struct ipt_counters *counters;
1122 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1125 /* Hack: Causes ipchains to give correct error msg --RR */
1126 if (len != sizeof(tmp) + tmp.size)
1127 return -ENOPROTOOPT;
1129 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
1130 if ((SMP_ALIGN(tmp.size) >> PAGE_SHIFT) + 2 > num_physpages)
1133 newinfo = vmalloc(sizeof(struct ipt_table_info)
1134 + SMP_ALIGN(tmp.size) *
1135 (highest_possible_processor_id()+1));
1139 if (copy_from_user(newinfo->entries, user + sizeof(tmp),
1145 counters = vmalloc(tmp.num_counters * sizeof(struct ipt_counters));
1150 memset(counters, 0, tmp.num_counters * sizeof(struct ipt_counters));
1152 ret = translate_table(tmp.name, tmp.valid_hooks,
1153 newinfo, tmp.size, tmp.num_entries,
1154 tmp.hook_entry, tmp.underflow);
1156 goto free_newinfo_counters;
1158 duprintf("ip_tables: Translated table\n");
1160 t = try_then_request_module(find_table_lock(tmp.name),
1161 "iptable_%s", tmp.name);
1162 if (!t || IS_ERR(t)) {
1163 ret = t ? PTR_ERR(t) : -ENOENT;
1164 goto free_newinfo_counters_untrans;
1168 if (tmp.valid_hooks != t->valid_hooks) {
1169 duprintf("Valid hook crap: %08X vs %08X\n",
1170 tmp.valid_hooks, t->valid_hooks);
1175 oldinfo = replace_table(t, tmp.num_counters, newinfo, &ret);
1179 /* Update module usage count based on number of rules */
1180 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1181 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1182 if ((oldinfo->number > oldinfo->initial_entries) ||
1183 (newinfo->number <= oldinfo->initial_entries))
1185 if ((oldinfo->number > oldinfo->initial_entries) &&
1186 (newinfo->number <= oldinfo->initial_entries))
1189 /* Get the old counters. */
1190 get_counters(oldinfo, counters);
1191 /* Decrease module usage counts and free resource */
1192 IPT_ENTRY_ITERATE(oldinfo->entries, oldinfo->size, cleanup_entry,NULL);
1194 if (copy_to_user(tmp.counters, counters,
1195 sizeof(struct ipt_counters) * tmp.num_counters) != 0)
1204 free_newinfo_counters_untrans:
1205 IPT_ENTRY_ITERATE(newinfo->entries, newinfo->size, cleanup_entry,NULL);
1206 free_newinfo_counters:
1213 /* We're lazy, and add to the first CPU; overflow works its fey magic
1214 * and everything is OK. */
1216 add_counter_to_entry(struct ipt_entry *e,
1217 const struct ipt_counters addme[],
1221 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1223 (long unsigned int)e->counters.pcnt,
1224 (long unsigned int)e->counters.bcnt,
1225 (long unsigned int)addme[*i].pcnt,
1226 (long unsigned int)addme[*i].bcnt);
1229 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1236 do_add_counters(void __user *user, unsigned int len)
1239 struct ipt_counters_info tmp, *paddc;
1240 struct ipt_table *t;
1243 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1246 if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct ipt_counters))
1249 paddc = vmalloc(len);
1253 if (copy_from_user(paddc, user, len) != 0) {
1258 t = find_table_lock(tmp.name);
1259 if (!t || IS_ERR(t)) {
1260 ret = t ? PTR_ERR(t) : -ENOENT;
1264 write_lock_bh(&t->lock);
1265 if (t->private->number != paddc->num_counters) {
1267 goto unlock_up_free;
1271 IPT_ENTRY_ITERATE(t->private->entries,
1273 add_counter_to_entry,
1277 write_unlock_bh(&t->lock);
1287 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1291 if (!capable(CAP_NET_ADMIN))
1295 case IPT_SO_SET_REPLACE:
1296 ret = do_replace(user, len);
1299 case IPT_SO_SET_ADD_COUNTERS:
1300 ret = do_add_counters(user, len);
1304 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1312 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1316 if (!capable(CAP_NET_ADMIN))
1320 case IPT_SO_GET_INFO: {
1321 char name[IPT_TABLE_MAXNAMELEN];
1322 struct ipt_table *t;
1324 if (*len != sizeof(struct ipt_getinfo)) {
1325 duprintf("length %u != %u\n", *len,
1326 sizeof(struct ipt_getinfo));
1331 if (copy_from_user(name, user, sizeof(name)) != 0) {
1335 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1337 t = try_then_request_module(find_table_lock(name),
1338 "iptable_%s", name);
1339 if (t && !IS_ERR(t)) {
1340 struct ipt_getinfo info;
1342 info.valid_hooks = t->valid_hooks;
1343 memcpy(info.hook_entry, t->private->hook_entry,
1344 sizeof(info.hook_entry));
1345 memcpy(info.underflow, t->private->underflow,
1346 sizeof(info.underflow));
1347 info.num_entries = t->private->number;
1348 info.size = t->private->size;
1349 memcpy(info.name, name, sizeof(info.name));
1351 if (copy_to_user(user, &info, *len) != 0)
1358 ret = t ? PTR_ERR(t) : -ENOENT;
1362 case IPT_SO_GET_ENTRIES: {
1363 struct ipt_get_entries get;
1365 if (*len < sizeof(get)) {
1366 duprintf("get_entries: %u < %u\n", *len, sizeof(get));
1368 } else if (copy_from_user(&get, user, sizeof(get)) != 0) {
1370 } else if (*len != sizeof(struct ipt_get_entries) + get.size) {
1371 duprintf("get_entries: %u != %u\n", *len,
1372 sizeof(struct ipt_get_entries) + get.size);
1375 ret = get_entries(&get, user);
1379 case IPT_SO_GET_REVISION_MATCH:
1380 case IPT_SO_GET_REVISION_TARGET: {
1381 struct ipt_get_revision rev;
1382 int (*revfn)(const char *, u8, int *);
1384 if (*len != sizeof(rev)) {
1388 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1393 if (cmd == IPT_SO_GET_REVISION_TARGET)
1394 revfn = target_revfn;
1396 revfn = match_revfn;
1398 try_then_request_module(find_revision(rev.name, rev.revision,
1400 "ipt_%s", rev.name);
1405 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
1412 /* Registration hooks for targets. */
1414 ipt_register_target(struct ipt_target *target)
1418 ret = down_interruptible(&ipt_mutex);
1421 list_add(&target->list, &ipt_target);
1427 ipt_unregister_target(struct ipt_target *target)
1430 LIST_DELETE(&ipt_target, target);
1435 ipt_register_match(struct ipt_match *match)
1439 ret = down_interruptible(&ipt_mutex);
1443 list_add(&match->list, &ipt_match);
1450 ipt_unregister_match(struct ipt_match *match)
1453 LIST_DELETE(&ipt_match, match);
1457 int ipt_register_table(struct ipt_table *table, const struct ipt_replace *repl)
1460 struct ipt_table_info *newinfo;
1461 static struct ipt_table_info bootstrap
1462 = { 0, 0, 0, { 0 }, { 0 }, { } };
1464 newinfo = vmalloc(sizeof(struct ipt_table_info)
1465 + SMP_ALIGN(repl->size) *
1466 (highest_possible_processor_id()+1));
1470 memcpy(newinfo->entries, repl->entries, repl->size);
1472 ret = translate_table(table->name, table->valid_hooks,
1473 newinfo, repl->size,
1482 ret = down_interruptible(&ipt_mutex);
1488 /* Don't autoload: we'd eat our tail... */
1489 if (list_named_find(&ipt_tables, table->name)) {
1494 /* Simplifies replace_table code. */
1495 table->private = &bootstrap;
1496 if (!replace_table(table, 0, newinfo, &ret))
1499 duprintf("table->private->number = %u\n",
1500 table->private->number);
1502 /* save number of initial entries */
1503 table->private->initial_entries = table->private->number;
1505 rwlock_init(&table->lock);
1506 list_prepend(&ipt_tables, table);
1517 void ipt_unregister_table(struct ipt_table *table)
1520 LIST_DELETE(&ipt_tables, table);
1523 /* Decrease module usage counts and free resources */
1524 IPT_ENTRY_ITERATE(table->private->entries, table->private->size,
1525 cleanup_entry, NULL);
1526 vfree(table->private);
1529 /* Returns 1 if the port is matched by the range, 0 otherwise */
1531 port_match(u_int16_t min, u_int16_t max, u_int16_t port, int invert)
1535 ret = (port >= min && port <= max) ^ invert;
1540 tcp_find_option(u_int8_t option,
1541 const struct sk_buff *skb,
1542 unsigned int optlen,
1546 /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
1547 u_int8_t _opt[60 - sizeof(struct tcphdr)], *op;
1550 duprintf("tcp_match: finding option\n");
1555 /* If we don't have the whole header, drop packet. */
1556 op = skb_header_pointer(skb,
1557 skb->nh.iph->ihl*4 + sizeof(struct tcphdr),
1564 for (i = 0; i < optlen; ) {
1565 if (op[i] == option) return !invert;
1567 else i += op[i+1]?:1;
1574 tcp_match(const struct sk_buff *skb,
1575 const struct net_device *in,
1576 const struct net_device *out,
1577 const void *matchinfo,
1581 struct tcphdr _tcph, *th;
1582 const struct ipt_tcp *tcpinfo = matchinfo;
1587 Don't allow a fragment of TCP 8 bytes in. Nobody normal
1588 causes this. Its a cracker trying to break in by doing a
1589 flag overwrite to pass the direction checks.
1592 duprintf("Dropping evil TCP offset=1 frag.\n");
1595 /* Must not be a fragment. */
1599 #define FWINVTCP(bool,invflg) ((bool) ^ !!(tcpinfo->invflags & invflg))
1601 th = skb_header_pointer(skb, skb->nh.iph->ihl*4,
1602 sizeof(_tcph), &_tcph);
1604 /* We've been asked to examine this packet, and we
1605 can't. Hence, no choice but to drop. */
1606 duprintf("Dropping evil TCP offset=0 tinygram.\n");
1611 if (!port_match(tcpinfo->spts[0], tcpinfo->spts[1],
1613 !!(tcpinfo->invflags & IPT_TCP_INV_SRCPT)))
1615 if (!port_match(tcpinfo->dpts[0], tcpinfo->dpts[1],
1617 !!(tcpinfo->invflags & IPT_TCP_INV_DSTPT)))
1619 if (!FWINVTCP((((unsigned char *)th)[13] & tcpinfo->flg_mask)
1620 == tcpinfo->flg_cmp,
1623 if (tcpinfo->option) {
1624 if (th->doff * 4 < sizeof(_tcph)) {
1628 if (!tcp_find_option(tcpinfo->option, skb,
1629 th->doff*4 - sizeof(_tcph),
1630 tcpinfo->invflags & IPT_TCP_INV_OPTION,
1637 /* Called when user tries to insert an entry of this type. */
1639 tcp_checkentry(const char *tablename,
1640 const struct ipt_ip *ip,
1642 unsigned int matchsize,
1643 unsigned int hook_mask)
1645 const struct ipt_tcp *tcpinfo = matchinfo;
1647 /* Must specify proto == TCP, and no unknown invflags */
1648 return ip->proto == IPPROTO_TCP
1649 && !(ip->invflags & IPT_INV_PROTO)
1650 && matchsize == IPT_ALIGN(sizeof(struct ipt_tcp))
1651 && !(tcpinfo->invflags & ~IPT_TCP_INV_MASK);
1655 udp_match(const struct sk_buff *skb,
1656 const struct net_device *in,
1657 const struct net_device *out,
1658 const void *matchinfo,
1662 struct udphdr _udph, *uh;
1663 const struct ipt_udp *udpinfo = matchinfo;
1665 /* Must not be a fragment. */
1669 uh = skb_header_pointer(skb, skb->nh.iph->ihl*4,
1670 sizeof(_udph), &_udph);
1672 /* We've been asked to examine this packet, and we
1673 can't. Hence, no choice but to drop. */
1674 duprintf("Dropping evil UDP tinygram.\n");
1679 return port_match(udpinfo->spts[0], udpinfo->spts[1],
1681 !!(udpinfo->invflags & IPT_UDP_INV_SRCPT))
1682 && port_match(udpinfo->dpts[0], udpinfo->dpts[1],
1684 !!(udpinfo->invflags & IPT_UDP_INV_DSTPT));
1687 /* Called when user tries to insert an entry of this type. */
1689 udp_checkentry(const char *tablename,
1690 const struct ipt_ip *ip,
1692 unsigned int matchinfosize,
1693 unsigned int hook_mask)
1695 const struct ipt_udp *udpinfo = matchinfo;
1697 /* Must specify proto == UDP, and no unknown invflags */
1698 if (ip->proto != IPPROTO_UDP || (ip->invflags & IPT_INV_PROTO)) {
1699 duprintf("ipt_udp: Protocol %u != %u\n", ip->proto,
1703 if (matchinfosize != IPT_ALIGN(sizeof(struct ipt_udp))) {
1704 duprintf("ipt_udp: matchsize %u != %u\n",
1705 matchinfosize, IPT_ALIGN(sizeof(struct ipt_udp)));
1708 if (udpinfo->invflags & ~IPT_UDP_INV_MASK) {
1709 duprintf("ipt_udp: unknown flags %X\n",
1717 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1719 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1720 u_int8_t type, u_int8_t code,
1723 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
1728 icmp_match(const struct sk_buff *skb,
1729 const struct net_device *in,
1730 const struct net_device *out,
1731 const void *matchinfo,
1735 struct icmphdr _icmph, *ic;
1736 const struct ipt_icmp *icmpinfo = matchinfo;
1738 /* Must not be a fragment. */
1742 ic = skb_header_pointer(skb, skb->nh.iph->ihl*4,
1743 sizeof(_icmph), &_icmph);
1745 /* We've been asked to examine this packet, and we
1746 * can't. Hence, no choice but to drop.
1748 duprintf("Dropping evil ICMP tinygram.\n");
1753 return icmp_type_code_match(icmpinfo->type,
1757 !!(icmpinfo->invflags&IPT_ICMP_INV));
1760 /* Called when user tries to insert an entry of this type. */
1762 icmp_checkentry(const char *tablename,
1763 const struct ipt_ip *ip,
1765 unsigned int matchsize,
1766 unsigned int hook_mask)
1768 const struct ipt_icmp *icmpinfo = matchinfo;
1770 /* Must specify proto == ICMP, and no unknown invflags */
1771 return ip->proto == IPPROTO_ICMP
1772 && !(ip->invflags & IPT_INV_PROTO)
1773 && matchsize == IPT_ALIGN(sizeof(struct ipt_icmp))
1774 && !(icmpinfo->invflags & ~IPT_ICMP_INV);
1777 /* The built-in targets: standard (NULL) and error. */
1778 static struct ipt_target ipt_standard_target = {
1779 .name = IPT_STANDARD_TARGET,
1782 static struct ipt_target ipt_error_target = {
1783 .name = IPT_ERROR_TARGET,
1784 .target = ipt_error,
1787 static struct nf_sockopt_ops ipt_sockopts = {
1789 .set_optmin = IPT_BASE_CTL,
1790 .set_optmax = IPT_SO_SET_MAX+1,
1791 .set = do_ipt_set_ctl,
1792 .get_optmin = IPT_BASE_CTL,
1793 .get_optmax = IPT_SO_GET_MAX+1,
1794 .get = do_ipt_get_ctl,
1797 static struct ipt_match tcp_matchstruct = {
1799 .match = &tcp_match,
1800 .checkentry = &tcp_checkentry,
1803 static struct ipt_match udp_matchstruct = {
1805 .match = &udp_match,
1806 .checkentry = &udp_checkentry,
1809 static struct ipt_match icmp_matchstruct = {
1811 .match = &icmp_match,
1812 .checkentry = &icmp_checkentry,
1815 #ifdef CONFIG_PROC_FS
1816 static inline int print_name(const char *i,
1817 off_t start_offset, char *buffer, int length,
1818 off_t *pos, unsigned int *count)
1820 if ((*count)++ >= start_offset) {
1821 unsigned int namelen;
1823 namelen = sprintf(buffer + *pos, "%s\n",
1824 i + sizeof(struct list_head));
1825 if (*pos + namelen > length) {
1826 /* Stop iterating */
1834 static inline int print_target(const struct ipt_target *t,
1835 off_t start_offset, char *buffer, int length,
1836 off_t *pos, unsigned int *count)
1838 if (t == &ipt_standard_target || t == &ipt_error_target)
1840 return print_name((char *)t, start_offset, buffer, length, pos, count);
1843 static int ipt_get_tables(char *buffer, char **start, off_t offset, int length)
1846 unsigned int count = 0;
1848 if (down_interruptible(&ipt_mutex) != 0)
1851 LIST_FIND(&ipt_tables, print_name, void *,
1852 offset, buffer, length, &pos, &count);
1856 /* `start' hack - see fs/proc/generic.c line ~105 */
1857 *start=(char *)((unsigned long)count-offset);
1861 static int ipt_get_targets(char *buffer, char **start, off_t offset, int length)
1864 unsigned int count = 0;
1866 if (down_interruptible(&ipt_mutex) != 0)
1869 LIST_FIND(&ipt_target, print_target, struct ipt_target *,
1870 offset, buffer, length, &pos, &count);
1874 *start = (char *)((unsigned long)count - offset);
1878 static int ipt_get_matches(char *buffer, char **start, off_t offset, int length)
1881 unsigned int count = 0;
1883 if (down_interruptible(&ipt_mutex) != 0)
1886 LIST_FIND(&ipt_match, print_name, void *,
1887 offset, buffer, length, &pos, &count);
1891 *start = (char *)((unsigned long)count - offset);
1895 static struct { char *name; get_info_t *get_info; } ipt_proc_entry[] =
1896 { { "ip_tables_names", ipt_get_tables },
1897 { "ip_tables_targets", ipt_get_targets },
1898 { "ip_tables_matches", ipt_get_matches },
1900 #endif /*CONFIG_PROC_FS*/
1902 static int __init init(void)
1906 /* Noone else will be downing sem now, so we won't sleep */
1908 list_append(&ipt_target, &ipt_standard_target);
1909 list_append(&ipt_target, &ipt_error_target);
1910 list_append(&ipt_match, &tcp_matchstruct);
1911 list_append(&ipt_match, &udp_matchstruct);
1912 list_append(&ipt_match, &icmp_matchstruct);
1915 /* Register setsockopt */
1916 ret = nf_register_sockopt(&ipt_sockopts);
1918 duprintf("Unable to register sockopts.\n");
1922 #ifdef CONFIG_PROC_FS
1924 struct proc_dir_entry *proc;
1927 for (i = 0; ipt_proc_entry[i].name; i++) {
1928 proc = proc_net_create(ipt_proc_entry[i].name, 0,
1929 ipt_proc_entry[i].get_info);
1932 proc_net_remove(ipt_proc_entry[i].name);
1933 nf_unregister_sockopt(&ipt_sockopts);
1936 proc->owner = THIS_MODULE;
1941 printk("ip_tables: (C) 2000-2002 Netfilter core team\n");
1945 static void __exit fini(void)
1947 nf_unregister_sockopt(&ipt_sockopts);
1948 #ifdef CONFIG_PROC_FS
1951 for (i = 0; ipt_proc_entry[i].name; i++)
1952 proc_net_remove(ipt_proc_entry[i].name);
1957 EXPORT_SYMBOL(ipt_register_table);
1958 EXPORT_SYMBOL(ipt_unregister_table);
1959 EXPORT_SYMBOL(ipt_register_match);
1960 EXPORT_SYMBOL(ipt_unregister_match);
1961 EXPORT_SYMBOL(ipt_do_table);
1962 EXPORT_SYMBOL(ipt_register_target);
1963 EXPORT_SYMBOL(ipt_unregister_target);
1964 EXPORT_SYMBOL(ipt_find_target);