2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12 * - increase module usage count as soon as we have rules inside
15 #include <linux/config.h>
16 #include <linux/cache.h>
17 #include <linux/skbuff.h>
18 #include <linux/kmod.h>
19 #include <linux/vmalloc.h>
20 #include <linux/netdevice.h>
21 #include <linux/module.h>
22 #include <linux/tcp.h>
23 #include <linux/udp.h>
24 #include <linux/icmp.h>
26 #include <asm/uaccess.h>
27 #include <asm/semaphore.h>
28 #include <linux/proc_fs.h>
29 #include <linux/err.h>
30 #include <linux/cpumask.h>
32 #include <linux/netfilter_ipv4/ip_tables.h>
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv4 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) printk(format , ## args)
45 #define dprintf(format, args...)
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) printk(format , ## args)
51 #define duprintf(format, args...)
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) \
58 printk("IP_NF_ASSERT: %s:%s:%u\n", \
59 __FUNCTION__, __FILE__, __LINE__); \
62 #define IP_NF_ASSERT(x)
64 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
66 static DECLARE_MUTEX(ipt_mutex);
69 #define ASSERT_READ_LOCK(x) IP_NF_ASSERT(down_trylock(&ipt_mutex) != 0)
70 #define ASSERT_WRITE_LOCK(x) IP_NF_ASSERT(down_trylock(&ipt_mutex) != 0)
71 #include <linux/netfilter_ipv4/listhelp.h>
74 /* All the better to debug you with... */
80 We keep a set of rules for each CPU, so we can avoid write-locking
81 them in the softirq when updating the counters and therefore
82 only need to read-lock in the softirq; doing a write_lock_bh() in user
83 context stops packets coming through and allows user context to read
84 the counters or update the rules.
86 Hence the start of any table is given by get_table() below. */
88 /* The table itself */
93 /* Number of entries: FIXME. --RR */
95 /* Initial number of entries. Needed for module usage count */
96 unsigned int initial_entries;
98 /* Entry points and underflows */
99 unsigned int hook_entry[NF_IP_NUMHOOKS];
100 unsigned int underflow[NF_IP_NUMHOOKS];
102 /* ipt_entry tables: one per CPU */
103 void *entries[NR_CPUS];
106 static LIST_HEAD(ipt_target);
107 static LIST_HEAD(ipt_match);
108 static LIST_HEAD(ipt_tables);
109 #define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0)
110 #define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
113 #define down(x) do { printk("DOWN:%u:" #x "\n", __LINE__); down(x); } while(0)
114 #define down_interruptible(x) ({ int __r; printk("DOWNi:%u:" #x "\n", __LINE__); __r = down_interruptible(x); if (__r != 0) printk("ABORT-DOWNi:%u\n", __LINE__); __r; })
115 #define up(x) do { printk("UP:%u:" #x "\n", __LINE__); up(x); } while(0)
118 /* Returns whether matches rule or not. */
120 ip_packet_match(const struct iphdr *ip,
123 const struct ipt_ip *ipinfo,
129 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
131 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
133 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
135 dprintf("Source or dest mismatch.\n");
137 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
139 NIPQUAD(ipinfo->smsk.s_addr),
140 NIPQUAD(ipinfo->src.s_addr),
141 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
142 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
144 NIPQUAD(ipinfo->dmsk.s_addr),
145 NIPQUAD(ipinfo->dst.s_addr),
146 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
150 /* Look for ifname matches; this should unroll nicely. */
151 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
152 ret |= (((const unsigned long *)indev)[i]
153 ^ ((const unsigned long *)ipinfo->iniface)[i])
154 & ((const unsigned long *)ipinfo->iniface_mask)[i];
157 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
158 dprintf("VIA in mismatch (%s vs %s).%s\n",
159 indev, ipinfo->iniface,
160 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
164 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
165 ret |= (((const unsigned long *)outdev)[i]
166 ^ ((const unsigned long *)ipinfo->outiface)[i])
167 & ((const unsigned long *)ipinfo->outiface_mask)[i];
170 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
171 dprintf("VIA out mismatch (%s vs %s).%s\n",
172 outdev, ipinfo->outiface,
173 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
177 /* Check specific protocol */
179 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
180 dprintf("Packet protocol %hi does not match %hi.%s\n",
181 ip->protocol, ipinfo->proto,
182 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
186 /* If we have a fragment rule but the packet is not a fragment
187 * then we return zero */
188 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
189 dprintf("Fragment rule but not fragment.%s\n",
190 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
198 ip_checkentry(const struct ipt_ip *ip)
200 if (ip->flags & ~IPT_F_MASK) {
201 duprintf("Unknown flag bits set: %08X\n",
202 ip->flags & ~IPT_F_MASK);
205 if (ip->invflags & ~IPT_INV_MASK) {
206 duprintf("Unknown invflag bits set: %08X\n",
207 ip->invflags & ~IPT_INV_MASK);
214 ipt_error(struct sk_buff **pskb,
215 const struct net_device *in,
216 const struct net_device *out,
217 unsigned int hooknum,
218 const void *targinfo,
222 printk("ip_tables: error: `%s'\n", (char *)targinfo);
228 int do_match(struct ipt_entry_match *m,
229 const struct sk_buff *skb,
230 const struct net_device *in,
231 const struct net_device *out,
235 /* Stop iteration if it doesn't match */
236 if (!m->u.kernel.match->match(skb, in, out, m->data, offset, hotdrop))
242 static inline struct ipt_entry *
243 get_entry(void *base, unsigned int offset)
245 return (struct ipt_entry *)(base + offset);
248 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
250 ipt_do_table(struct sk_buff **pskb,
252 const struct net_device *in,
253 const struct net_device *out,
254 struct ipt_table *table,
257 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
262 /* Initializing verdict to NF_DROP keeps gcc happy. */
263 unsigned int verdict = NF_DROP;
264 const char *indev, *outdev;
266 struct ipt_entry *e, *back;
269 ip = (*pskb)->nh.iph;
270 datalen = (*pskb)->len - ip->ihl * 4;
271 indev = in ? in->name : nulldevname;
272 outdev = out ? out->name : nulldevname;
273 /* We handle fragments by dealing with the first fragment as
274 * if it was a normal packet. All other fragments are treated
275 * normally, except that they will NEVER match rules that ask
276 * things we don't know, ie. tcp syn flag or ports). If the
277 * rule is also a fragment-specific rule, non-fragments won't
279 offset = ntohs(ip->frag_off) & IP_OFFSET;
281 read_lock_bh(&table->lock);
282 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
283 table_base = (void *)table->private->entries[smp_processor_id()];
284 e = get_entry(table_base, table->private->hook_entry[hook]);
286 #ifdef CONFIG_NETFILTER_DEBUG
287 /* Check noone else using our table */
288 if (((struct ipt_entry *)table_base)->comefrom != 0xdead57ac
289 && ((struct ipt_entry *)table_base)->comefrom != 0xeeeeeeec) {
290 printk("ASSERT: CPU #%u, %s comefrom(%p) = %X\n",
293 &((struct ipt_entry *)table_base)->comefrom,
294 ((struct ipt_entry *)table_base)->comefrom);
296 ((struct ipt_entry *)table_base)->comefrom = 0x57acc001;
299 /* For return from builtin chain */
300 back = get_entry(table_base, table->private->underflow[hook]);
305 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
306 struct ipt_entry_target *t;
308 if (IPT_MATCH_ITERATE(e, do_match,
310 offset, &hotdrop) != 0)
313 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
315 t = ipt_get_target(e);
316 IP_NF_ASSERT(t->u.kernel.target);
317 /* Standard target? */
318 if (!t->u.kernel.target->target) {
321 v = ((struct ipt_standard_target *)t)->verdict;
323 /* Pop from stack? */
324 if (v != IPT_RETURN) {
325 verdict = (unsigned)(-v) - 1;
329 back = get_entry(table_base,
333 if (table_base + v != (void *)e + e->next_offset
334 && !(e->ip.flags & IPT_F_GOTO)) {
335 /* Save old back ptr in next entry */
336 struct ipt_entry *next
337 = (void *)e + e->next_offset;
339 = (void *)back - table_base;
340 /* set back pointer to next entry */
344 e = get_entry(table_base, v);
346 /* Targets which reenter must return
348 #ifdef CONFIG_NETFILTER_DEBUG
349 ((struct ipt_entry *)table_base)->comefrom
352 verdict = t->u.kernel.target->target(pskb,
358 #ifdef CONFIG_NETFILTER_DEBUG
359 if (((struct ipt_entry *)table_base)->comefrom
361 && verdict == IPT_CONTINUE) {
362 printk("Target %s reentered!\n",
363 t->u.kernel.target->name);
366 ((struct ipt_entry *)table_base)->comefrom
369 /* Target might have changed stuff. */
370 ip = (*pskb)->nh.iph;
371 datalen = (*pskb)->len - ip->ihl * 4;
373 if (verdict == IPT_CONTINUE)
374 e = (void *)e + e->next_offset;
382 e = (void *)e + e->next_offset;
386 #ifdef CONFIG_NETFILTER_DEBUG
387 ((struct ipt_entry *)table_base)->comefrom = 0xdead57ac;
389 read_unlock_bh(&table->lock);
391 #ifdef DEBUG_ALLOW_ALL
401 * These are weird, but module loading must not be done with mutex
402 * held (since they will register), and we have to have a single
403 * function to use try_then_request_module().
406 /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
407 static inline struct ipt_table *find_table_lock(const char *name)
411 if (down_interruptible(&ipt_mutex) != 0)
412 return ERR_PTR(-EINTR);
414 list_for_each_entry(t, &ipt_tables, list)
415 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
421 /* Find match, grabs ref. Returns ERR_PTR() on error. */
422 static inline struct ipt_match *find_match(const char *name, u8 revision)
427 if (down_interruptible(&ipt_mutex) != 0)
428 return ERR_PTR(-EINTR);
430 list_for_each_entry(m, &ipt_match, list) {
431 if (strcmp(m->name, name) == 0) {
432 if (m->revision == revision) {
433 if (try_module_get(m->me)) {
438 err = -EPROTOTYPE; /* Found something. */
445 /* Find target, grabs ref. Returns ERR_PTR() on error. */
446 static inline struct ipt_target *find_target(const char *name, u8 revision)
448 struct ipt_target *t;
451 if (down_interruptible(&ipt_mutex) != 0)
452 return ERR_PTR(-EINTR);
454 list_for_each_entry(t, &ipt_target, list) {
455 if (strcmp(t->name, name) == 0) {
456 if (t->revision == revision) {
457 if (try_module_get(t->me)) {
462 err = -EPROTOTYPE; /* Found something. */
469 struct ipt_target *ipt_find_target(const char *name, u8 revision)
471 struct ipt_target *target;
473 target = try_then_request_module(find_target(name, revision),
475 if (IS_ERR(target) || !target)
480 static int match_revfn(const char *name, u8 revision, int *bestp)
485 list_for_each_entry(m, &ipt_match, list) {
486 if (strcmp(m->name, name) == 0) {
487 if (m->revision > *bestp)
488 *bestp = m->revision;
489 if (m->revision == revision)
496 static int target_revfn(const char *name, u8 revision, int *bestp)
498 struct ipt_target *t;
501 list_for_each_entry(t, &ipt_target, list) {
502 if (strcmp(t->name, name) == 0) {
503 if (t->revision > *bestp)
504 *bestp = t->revision;
505 if (t->revision == revision)
512 /* Returns true or false (if no such extension at all) */
513 static inline int find_revision(const char *name, u8 revision,
514 int (*revfn)(const char *, u8, int *),
517 int have_rev, best = -1;
519 if (down_interruptible(&ipt_mutex) != 0) {
523 have_rev = revfn(name, revision, &best);
526 /* Nothing at all? Return 0 to try loading module. */
534 *err = -EPROTONOSUPPORT;
539 /* All zeroes == unconditional rule. */
541 unconditional(const struct ipt_ip *ip)
545 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
546 if (((__u32 *)ip)[i])
552 /* Figures out from what hook each rule can be called: returns 0 if
553 there are loops. Puts hook bitmask in comefrom. */
555 mark_source_chains(struct ipt_table_info *newinfo,
556 unsigned int valid_hooks, void *entry0)
560 /* No recursion; use packet counter to save back ptrs (reset
561 to 0 as we leave), and comefrom to save source hook bitmask */
562 for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
563 unsigned int pos = newinfo->hook_entry[hook];
565 = (struct ipt_entry *)(entry0 + pos);
567 if (!(valid_hooks & (1 << hook)))
570 /* Set initial back pointer. */
571 e->counters.pcnt = pos;
574 struct ipt_standard_target *t
575 = (void *)ipt_get_target(e);
577 if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
578 printk("iptables: loop hook %u pos %u %08X.\n",
579 hook, pos, e->comefrom);
583 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
585 /* Unconditional return/END. */
586 if (e->target_offset == sizeof(struct ipt_entry)
587 && (strcmp(t->target.u.user.name,
588 IPT_STANDARD_TARGET) == 0)
590 && unconditional(&e->ip)) {
591 unsigned int oldpos, size;
593 /* Return: backtrack through the last
596 e->comefrom ^= (1<<NF_IP_NUMHOOKS);
597 #ifdef DEBUG_IP_FIREWALL_USER
599 & (1 << NF_IP_NUMHOOKS)) {
600 duprintf("Back unset "
607 pos = e->counters.pcnt;
608 e->counters.pcnt = 0;
610 /* We're at the start. */
614 e = (struct ipt_entry *)
616 } while (oldpos == pos + e->next_offset);
619 size = e->next_offset;
620 e = (struct ipt_entry *)
621 (entry0 + pos + size);
622 e->counters.pcnt = pos;
625 int newpos = t->verdict;
627 if (strcmp(t->target.u.user.name,
628 IPT_STANDARD_TARGET) == 0
630 /* This a jump; chase it. */
631 duprintf("Jump rule %u -> %u\n",
634 /* ... this is a fallthru */
635 newpos = pos + e->next_offset;
637 e = (struct ipt_entry *)
639 e->counters.pcnt = pos;
644 duprintf("Finished chain %u\n", hook);
650 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
652 if (i && (*i)-- == 0)
655 if (m->u.kernel.match->destroy)
656 m->u.kernel.match->destroy(m->data,
657 m->u.match_size - sizeof(*m));
658 module_put(m->u.kernel.match->me);
663 standard_check(const struct ipt_entry_target *t,
664 unsigned int max_offset)
666 struct ipt_standard_target *targ = (void *)t;
668 /* Check standard info. */
670 != IPT_ALIGN(sizeof(struct ipt_standard_target))) {
671 duprintf("standard_check: target size %u != %u\n",
673 IPT_ALIGN(sizeof(struct ipt_standard_target)));
677 if (targ->verdict >= 0
678 && targ->verdict > max_offset - sizeof(struct ipt_entry)) {
679 duprintf("ipt_standard_check: bad verdict (%i)\n",
684 if (targ->verdict < -NF_MAX_VERDICT - 1) {
685 duprintf("ipt_standard_check: bad negative verdict (%i)\n",
693 check_match(struct ipt_entry_match *m,
695 const struct ipt_ip *ip,
696 unsigned int hookmask,
699 struct ipt_match *match;
701 match = try_then_request_module(find_match(m->u.user.name,
703 "ipt_%s", m->u.user.name);
704 if (IS_ERR(match) || !match) {
705 duprintf("check_match: `%s' not found\n", m->u.user.name);
706 return match ? PTR_ERR(match) : -ENOENT;
708 m->u.kernel.match = match;
710 if (m->u.kernel.match->checkentry
711 && !m->u.kernel.match->checkentry(name, ip, m->data,
712 m->u.match_size - sizeof(*m),
714 module_put(m->u.kernel.match->me);
715 duprintf("ip_tables: check failed for `%s'.\n",
716 m->u.kernel.match->name);
724 static struct ipt_target ipt_standard_target;
727 check_entry(struct ipt_entry *e, const char *name, unsigned int size,
730 struct ipt_entry_target *t;
731 struct ipt_target *target;
735 if (!ip_checkentry(&e->ip)) {
736 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
741 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
743 goto cleanup_matches;
745 t = ipt_get_target(e);
746 target = try_then_request_module(find_target(t->u.user.name,
748 "ipt_%s", t->u.user.name);
749 if (IS_ERR(target) || !target) {
750 duprintf("check_entry: `%s' not found\n", t->u.user.name);
751 ret = target ? PTR_ERR(target) : -ENOENT;
752 goto cleanup_matches;
754 t->u.kernel.target = target;
756 if (t->u.kernel.target == &ipt_standard_target) {
757 if (!standard_check(t, size)) {
759 goto cleanup_matches;
761 } else if (t->u.kernel.target->checkentry
762 && !t->u.kernel.target->checkentry(name, e, t->data,
766 module_put(t->u.kernel.target->me);
767 duprintf("ip_tables: check failed for `%s'.\n",
768 t->u.kernel.target->name);
770 goto cleanup_matches;
777 IPT_MATCH_ITERATE(e, cleanup_match, &j);
782 check_entry_size_and_hooks(struct ipt_entry *e,
783 struct ipt_table_info *newinfo,
785 unsigned char *limit,
786 const unsigned int *hook_entries,
787 const unsigned int *underflows,
792 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
793 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
794 duprintf("Bad offset %p\n", e);
799 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
800 duprintf("checking: element %p size %u\n",
805 /* Check hooks & underflows */
806 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
807 if ((unsigned char *)e - base == hook_entries[h])
808 newinfo->hook_entry[h] = hook_entries[h];
809 if ((unsigned char *)e - base == underflows[h])
810 newinfo->underflow[h] = underflows[h];
813 /* FIXME: underflows must be unconditional, standard verdicts
814 < 0 (not IPT_RETURN). --RR */
816 /* Clear counters and comefrom */
817 e->counters = ((struct ipt_counters) { 0, 0 });
825 cleanup_entry(struct ipt_entry *e, unsigned int *i)
827 struct ipt_entry_target *t;
829 if (i && (*i)-- == 0)
832 /* Cleanup all matches */
833 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
834 t = ipt_get_target(e);
835 if (t->u.kernel.target->destroy)
836 t->u.kernel.target->destroy(t->data,
837 t->u.target_size - sizeof(*t));
838 module_put(t->u.kernel.target->me);
842 /* Checks and translates the user-supplied table segment (held in
845 translate_table(const char *name,
846 unsigned int valid_hooks,
847 struct ipt_table_info *newinfo,
851 const unsigned int *hook_entries,
852 const unsigned int *underflows)
857 newinfo->size = size;
858 newinfo->number = number;
860 /* Init all hooks to impossible value. */
861 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
862 newinfo->hook_entry[i] = 0xFFFFFFFF;
863 newinfo->underflow[i] = 0xFFFFFFFF;
866 duprintf("translate_table: size %u\n", newinfo->size);
868 /* Walk through entries, checking offsets. */
869 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
870 check_entry_size_and_hooks,
874 hook_entries, underflows, &i);
879 duprintf("translate_table: %u not %u entries\n",
884 /* Check hooks all assigned */
885 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
886 /* Only hooks which are valid */
887 if (!(valid_hooks & (1 << i)))
889 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
890 duprintf("Invalid hook entry %u %u\n",
894 if (newinfo->underflow[i] == 0xFFFFFFFF) {
895 duprintf("Invalid underflow %u %u\n",
901 if (!mark_source_chains(newinfo, valid_hooks, entry0))
904 /* Finally, each sanity check must pass */
906 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
907 check_entry, name, size, &i);
910 IPT_ENTRY_ITERATE(entry0, newinfo->size,
915 /* And one copy for every other CPU */
917 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
918 memcpy(newinfo->entries[i], entry0, newinfo->size);
924 static struct ipt_table_info *
925 replace_table(struct ipt_table *table,
926 unsigned int num_counters,
927 struct ipt_table_info *newinfo,
930 struct ipt_table_info *oldinfo;
932 #ifdef CONFIG_NETFILTER_DEBUG
937 struct ipt_entry *table_base = newinfo->entries[cpu];
939 table_base->comefrom = 0xdead57ac;
944 /* Do the substitution. */
945 write_lock_bh(&table->lock);
946 /* Check inside lock: is the old number correct? */
947 if (num_counters != table->private->number) {
948 duprintf("num_counters != table->private->number (%u/%u)\n",
949 num_counters, table->private->number);
950 write_unlock_bh(&table->lock);
954 oldinfo = table->private;
955 table->private = newinfo;
956 newinfo->initial_entries = oldinfo->initial_entries;
957 write_unlock_bh(&table->lock);
964 add_entry_to_counter(const struct ipt_entry *e,
965 struct ipt_counters total[],
968 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
975 set_entry_to_counter(const struct ipt_entry *e,
976 struct ipt_counters total[],
979 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
986 get_counters(const struct ipt_table_info *t,
987 struct ipt_counters counters[])
993 /* Instead of clearing (by a previous call to memset())
994 * the counters and using adds, we set the counters
995 * with data used by 'current' CPU
996 * We dont care about preemption here.
998 curcpu = raw_smp_processor_id();
1001 IPT_ENTRY_ITERATE(t->entries[curcpu],
1003 set_entry_to_counter,
1011 IPT_ENTRY_ITERATE(t->entries[cpu],
1013 add_entry_to_counter,
1020 copy_entries_to_user(unsigned int total_size,
1021 struct ipt_table *table,
1022 void __user *userptr)
1024 unsigned int off, num, countersize;
1025 struct ipt_entry *e;
1026 struct ipt_counters *counters;
1028 void *loc_cpu_entry;
1030 /* We need atomic snapshot of counters: rest doesn't change
1031 (other than comefrom, which userspace doesn't care
1033 countersize = sizeof(struct ipt_counters) * table->private->number;
1034 counters = vmalloc_node(countersize, numa_node_id());
1036 if (counters == NULL)
1039 /* First, sum counters... */
1040 write_lock_bh(&table->lock);
1041 get_counters(table->private, counters);
1042 write_unlock_bh(&table->lock);
1044 /* choose the copy that is on our node/cpu, ...
1045 * This choice is lazy (because current thread is
1046 * allowed to migrate to another cpu)
1048 loc_cpu_entry = table->private->entries[raw_smp_processor_id()];
1049 /* ... then copy entire thing ... */
1050 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1055 /* FIXME: use iterator macros --RR */
1056 /* ... then go back and fix counters and names */
1057 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1059 struct ipt_entry_match *m;
1060 struct ipt_entry_target *t;
1062 e = (struct ipt_entry *)(loc_cpu_entry + off);
1063 if (copy_to_user(userptr + off
1064 + offsetof(struct ipt_entry, counters),
1066 sizeof(counters[num])) != 0) {
1071 for (i = sizeof(struct ipt_entry);
1072 i < e->target_offset;
1073 i += m->u.match_size) {
1076 if (copy_to_user(userptr + off + i
1077 + offsetof(struct ipt_entry_match,
1079 m->u.kernel.match->name,
1080 strlen(m->u.kernel.match->name)+1)
1087 t = ipt_get_target(e);
1088 if (copy_to_user(userptr + off + e->target_offset
1089 + offsetof(struct ipt_entry_target,
1091 t->u.kernel.target->name,
1092 strlen(t->u.kernel.target->name)+1) != 0) {
1104 get_entries(const struct ipt_get_entries *entries,
1105 struct ipt_get_entries __user *uptr)
1108 struct ipt_table *t;
1110 t = find_table_lock(entries->name);
1111 if (t && !IS_ERR(t)) {
1112 duprintf("t->private->number = %u\n",
1113 t->private->number);
1114 if (entries->size == t->private->size)
1115 ret = copy_entries_to_user(t->private->size,
1116 t, uptr->entrytable);
1118 duprintf("get_entries: I've got %u not %u!\n",
1126 ret = t ? PTR_ERR(t) : -ENOENT;
1131 static void free_table_info(struct ipt_table_info *info)
1135 if (info->size <= PAGE_SIZE)
1136 kfree(info->entries[cpu]);
1138 vfree(info->entries[cpu]);
1143 static struct ipt_table_info *alloc_table_info(unsigned int size)
1145 struct ipt_table_info *newinfo;
1148 newinfo = kzalloc(sizeof(struct ipt_table_info), GFP_KERNEL);
1152 newinfo->size = size;
1155 if (size <= PAGE_SIZE)
1156 newinfo->entries[cpu] = kmalloc_node(size,
1160 newinfo->entries[cpu] = vmalloc_node(size, cpu_to_node(cpu));
1161 if (newinfo->entries[cpu] == 0) {
1162 free_table_info(newinfo);
1171 do_replace(void __user *user, unsigned int len)
1174 struct ipt_replace tmp;
1175 struct ipt_table *t;
1176 struct ipt_table_info *newinfo, *oldinfo;
1177 struct ipt_counters *counters;
1178 void *loc_cpu_entry, *loc_cpu_old_entry;
1180 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1183 /* Hack: Causes ipchains to give correct error msg --RR */
1184 if (len != sizeof(tmp) + tmp.size)
1185 return -ENOPROTOOPT;
1187 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
1188 if ((SMP_ALIGN(tmp.size) >> PAGE_SHIFT) + 2 > num_physpages)
1191 newinfo = alloc_table_info(tmp.size);
1195 /* choose the copy that is our node/cpu */
1196 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1197 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1203 counters = vmalloc(tmp.num_counters * sizeof(struct ipt_counters));
1209 ret = translate_table(tmp.name, tmp.valid_hooks,
1210 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1211 tmp.hook_entry, tmp.underflow);
1213 goto free_newinfo_counters;
1215 duprintf("ip_tables: Translated table\n");
1217 t = try_then_request_module(find_table_lock(tmp.name),
1218 "iptable_%s", tmp.name);
1219 if (!t || IS_ERR(t)) {
1220 ret = t ? PTR_ERR(t) : -ENOENT;
1221 goto free_newinfo_counters_untrans;
1225 if (tmp.valid_hooks != t->valid_hooks) {
1226 duprintf("Valid hook crap: %08X vs %08X\n",
1227 tmp.valid_hooks, t->valid_hooks);
1232 oldinfo = replace_table(t, tmp.num_counters, newinfo, &ret);
1236 /* Update module usage count based on number of rules */
1237 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1238 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1239 if ((oldinfo->number > oldinfo->initial_entries) ||
1240 (newinfo->number <= oldinfo->initial_entries))
1242 if ((oldinfo->number > oldinfo->initial_entries) &&
1243 (newinfo->number <= oldinfo->initial_entries))
1246 /* Get the old counters. */
1247 get_counters(oldinfo, counters);
1248 /* Decrease module usage counts and free resource */
1249 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1250 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1251 free_table_info(oldinfo);
1252 if (copy_to_user(tmp.counters, counters,
1253 sizeof(struct ipt_counters) * tmp.num_counters) != 0)
1262 free_newinfo_counters_untrans:
1263 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1264 free_newinfo_counters:
1267 free_table_info(newinfo);
1271 /* We're lazy, and add to the first CPU; overflow works its fey magic
1272 * and everything is OK. */
1274 add_counter_to_entry(struct ipt_entry *e,
1275 const struct ipt_counters addme[],
1279 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1281 (long unsigned int)e->counters.pcnt,
1282 (long unsigned int)e->counters.bcnt,
1283 (long unsigned int)addme[*i].pcnt,
1284 (long unsigned int)addme[*i].bcnt);
1287 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1294 do_add_counters(void __user *user, unsigned int len)
1297 struct ipt_counters_info tmp, *paddc;
1298 struct ipt_table *t;
1300 void *loc_cpu_entry;
1302 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1305 if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct ipt_counters))
1308 paddc = vmalloc_node(len, numa_node_id());
1312 if (copy_from_user(paddc, user, len) != 0) {
1317 t = find_table_lock(tmp.name);
1318 if (!t || IS_ERR(t)) {
1319 ret = t ? PTR_ERR(t) : -ENOENT;
1323 write_lock_bh(&t->lock);
1324 if (t->private->number != paddc->num_counters) {
1326 goto unlock_up_free;
1330 /* Choose the copy that is on our node */
1331 loc_cpu_entry = t->private->entries[raw_smp_processor_id()];
1332 IPT_ENTRY_ITERATE(loc_cpu_entry,
1334 add_counter_to_entry,
1338 write_unlock_bh(&t->lock);
1348 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1352 if (!capable(CAP_NET_ADMIN))
1356 case IPT_SO_SET_REPLACE:
1357 ret = do_replace(user, len);
1360 case IPT_SO_SET_ADD_COUNTERS:
1361 ret = do_add_counters(user, len);
1365 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1373 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1377 if (!capable(CAP_NET_ADMIN))
1381 case IPT_SO_GET_INFO: {
1382 char name[IPT_TABLE_MAXNAMELEN];
1383 struct ipt_table *t;
1385 if (*len != sizeof(struct ipt_getinfo)) {
1386 duprintf("length %u != %u\n", *len,
1387 sizeof(struct ipt_getinfo));
1392 if (copy_from_user(name, user, sizeof(name)) != 0) {
1396 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1398 t = try_then_request_module(find_table_lock(name),
1399 "iptable_%s", name);
1400 if (t && !IS_ERR(t)) {
1401 struct ipt_getinfo info;
1403 info.valid_hooks = t->valid_hooks;
1404 memcpy(info.hook_entry, t->private->hook_entry,
1405 sizeof(info.hook_entry));
1406 memcpy(info.underflow, t->private->underflow,
1407 sizeof(info.underflow));
1408 info.num_entries = t->private->number;
1409 info.size = t->private->size;
1410 memcpy(info.name, name, sizeof(info.name));
1412 if (copy_to_user(user, &info, *len) != 0)
1419 ret = t ? PTR_ERR(t) : -ENOENT;
1423 case IPT_SO_GET_ENTRIES: {
1424 struct ipt_get_entries get;
1426 if (*len < sizeof(get)) {
1427 duprintf("get_entries: %u < %u\n", *len, sizeof(get));
1429 } else if (copy_from_user(&get, user, sizeof(get)) != 0) {
1431 } else if (*len != sizeof(struct ipt_get_entries) + get.size) {
1432 duprintf("get_entries: %u != %u\n", *len,
1433 sizeof(struct ipt_get_entries) + get.size);
1436 ret = get_entries(&get, user);
1440 case IPT_SO_GET_REVISION_MATCH:
1441 case IPT_SO_GET_REVISION_TARGET: {
1442 struct ipt_get_revision rev;
1443 int (*revfn)(const char *, u8, int *);
1445 if (*len != sizeof(rev)) {
1449 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1454 if (cmd == IPT_SO_GET_REVISION_TARGET)
1455 revfn = target_revfn;
1457 revfn = match_revfn;
1459 try_then_request_module(find_revision(rev.name, rev.revision,
1461 "ipt_%s", rev.name);
1466 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
1473 /* Registration hooks for targets. */
1475 ipt_register_target(struct ipt_target *target)
1479 ret = down_interruptible(&ipt_mutex);
1482 list_add(&target->list, &ipt_target);
1488 ipt_unregister_target(struct ipt_target *target)
1491 LIST_DELETE(&ipt_target, target);
1496 ipt_register_match(struct ipt_match *match)
1500 ret = down_interruptible(&ipt_mutex);
1504 list_add(&match->list, &ipt_match);
1511 ipt_unregister_match(struct ipt_match *match)
1514 LIST_DELETE(&ipt_match, match);
1518 int ipt_register_table(struct ipt_table *table, const struct ipt_replace *repl)
1521 struct ipt_table_info *newinfo;
1522 static struct ipt_table_info bootstrap
1523 = { 0, 0, 0, { 0 }, { 0 }, { } };
1524 void *loc_cpu_entry;
1526 newinfo = alloc_table_info(repl->size);
1530 /* choose the copy on our node/cpu
1531 * but dont care of preemption
1533 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1534 memcpy(loc_cpu_entry, repl->entries, repl->size);
1536 ret = translate_table(table->name, table->valid_hooks,
1537 newinfo, loc_cpu_entry, repl->size,
1542 free_table_info(newinfo);
1546 ret = down_interruptible(&ipt_mutex);
1548 free_table_info(newinfo);
1552 /* Don't autoload: we'd eat our tail... */
1553 if (list_named_find(&ipt_tables, table->name)) {
1558 /* Simplifies replace_table code. */
1559 table->private = &bootstrap;
1560 if (!replace_table(table, 0, newinfo, &ret))
1563 duprintf("table->private->number = %u\n",
1564 table->private->number);
1566 /* save number of initial entries */
1567 table->private->initial_entries = table->private->number;
1569 rwlock_init(&table->lock);
1570 list_prepend(&ipt_tables, table);
1577 free_table_info(newinfo);
1581 void ipt_unregister_table(struct ipt_table *table)
1583 void *loc_cpu_entry;
1586 LIST_DELETE(&ipt_tables, table);
1589 /* Decrease module usage counts and free resources */
1590 loc_cpu_entry = table->private->entries[raw_smp_processor_id()];
1591 IPT_ENTRY_ITERATE(loc_cpu_entry, table->private->size,
1592 cleanup_entry, NULL);
1593 free_table_info(table->private);
1596 /* Returns 1 if the port is matched by the range, 0 otherwise */
1598 port_match(u_int16_t min, u_int16_t max, u_int16_t port, int invert)
1602 ret = (port >= min && port <= max) ^ invert;
1607 tcp_find_option(u_int8_t option,
1608 const struct sk_buff *skb,
1609 unsigned int optlen,
1613 /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
1614 u_int8_t _opt[60 - sizeof(struct tcphdr)], *op;
1617 duprintf("tcp_match: finding option\n");
1622 /* If we don't have the whole header, drop packet. */
1623 op = skb_header_pointer(skb,
1624 skb->nh.iph->ihl*4 + sizeof(struct tcphdr),
1631 for (i = 0; i < optlen; ) {
1632 if (op[i] == option) return !invert;
1634 else i += op[i+1]?:1;
1641 tcp_match(const struct sk_buff *skb,
1642 const struct net_device *in,
1643 const struct net_device *out,
1644 const void *matchinfo,
1648 struct tcphdr _tcph, *th;
1649 const struct ipt_tcp *tcpinfo = matchinfo;
1654 Don't allow a fragment of TCP 8 bytes in. Nobody normal
1655 causes this. Its a cracker trying to break in by doing a
1656 flag overwrite to pass the direction checks.
1659 duprintf("Dropping evil TCP offset=1 frag.\n");
1662 /* Must not be a fragment. */
1666 #define FWINVTCP(bool,invflg) ((bool) ^ !!(tcpinfo->invflags & invflg))
1668 th = skb_header_pointer(skb, skb->nh.iph->ihl*4,
1669 sizeof(_tcph), &_tcph);
1671 /* We've been asked to examine this packet, and we
1672 can't. Hence, no choice but to drop. */
1673 duprintf("Dropping evil TCP offset=0 tinygram.\n");
1678 if (!port_match(tcpinfo->spts[0], tcpinfo->spts[1],
1680 !!(tcpinfo->invflags & IPT_TCP_INV_SRCPT)))
1682 if (!port_match(tcpinfo->dpts[0], tcpinfo->dpts[1],
1684 !!(tcpinfo->invflags & IPT_TCP_INV_DSTPT)))
1686 if (!FWINVTCP((((unsigned char *)th)[13] & tcpinfo->flg_mask)
1687 == tcpinfo->flg_cmp,
1690 if (tcpinfo->option) {
1691 if (th->doff * 4 < sizeof(_tcph)) {
1695 if (!tcp_find_option(tcpinfo->option, skb,
1696 th->doff*4 - sizeof(_tcph),
1697 tcpinfo->invflags & IPT_TCP_INV_OPTION,
1704 /* Called when user tries to insert an entry of this type. */
1706 tcp_checkentry(const char *tablename,
1707 const struct ipt_ip *ip,
1709 unsigned int matchsize,
1710 unsigned int hook_mask)
1712 const struct ipt_tcp *tcpinfo = matchinfo;
1714 /* Must specify proto == TCP, and no unknown invflags */
1715 return ip->proto == IPPROTO_TCP
1716 && !(ip->invflags & IPT_INV_PROTO)
1717 && matchsize == IPT_ALIGN(sizeof(struct ipt_tcp))
1718 && !(tcpinfo->invflags & ~IPT_TCP_INV_MASK);
1722 udp_match(const struct sk_buff *skb,
1723 const struct net_device *in,
1724 const struct net_device *out,
1725 const void *matchinfo,
1729 struct udphdr _udph, *uh;
1730 const struct ipt_udp *udpinfo = matchinfo;
1732 /* Must not be a fragment. */
1736 uh = skb_header_pointer(skb, skb->nh.iph->ihl*4,
1737 sizeof(_udph), &_udph);
1739 /* We've been asked to examine this packet, and we
1740 can't. Hence, no choice but to drop. */
1741 duprintf("Dropping evil UDP tinygram.\n");
1746 return port_match(udpinfo->spts[0], udpinfo->spts[1],
1748 !!(udpinfo->invflags & IPT_UDP_INV_SRCPT))
1749 && port_match(udpinfo->dpts[0], udpinfo->dpts[1],
1751 !!(udpinfo->invflags & IPT_UDP_INV_DSTPT));
1754 /* Called when user tries to insert an entry of this type. */
1756 udp_checkentry(const char *tablename,
1757 const struct ipt_ip *ip,
1759 unsigned int matchinfosize,
1760 unsigned int hook_mask)
1762 const struct ipt_udp *udpinfo = matchinfo;
1764 /* Must specify proto == UDP, and no unknown invflags */
1765 if (ip->proto != IPPROTO_UDP || (ip->invflags & IPT_INV_PROTO)) {
1766 duprintf("ipt_udp: Protocol %u != %u\n", ip->proto,
1770 if (matchinfosize != IPT_ALIGN(sizeof(struct ipt_udp))) {
1771 duprintf("ipt_udp: matchsize %u != %u\n",
1772 matchinfosize, IPT_ALIGN(sizeof(struct ipt_udp)));
1775 if (udpinfo->invflags & ~IPT_UDP_INV_MASK) {
1776 duprintf("ipt_udp: unknown flags %X\n",
1784 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1786 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1787 u_int8_t type, u_int8_t code,
1790 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
1795 icmp_match(const struct sk_buff *skb,
1796 const struct net_device *in,
1797 const struct net_device *out,
1798 const void *matchinfo,
1802 struct icmphdr _icmph, *ic;
1803 const struct ipt_icmp *icmpinfo = matchinfo;
1805 /* Must not be a fragment. */
1809 ic = skb_header_pointer(skb, skb->nh.iph->ihl*4,
1810 sizeof(_icmph), &_icmph);
1812 /* We've been asked to examine this packet, and we
1813 * can't. Hence, no choice but to drop.
1815 duprintf("Dropping evil ICMP tinygram.\n");
1820 return icmp_type_code_match(icmpinfo->type,
1824 !!(icmpinfo->invflags&IPT_ICMP_INV));
1827 /* Called when user tries to insert an entry of this type. */
1829 icmp_checkentry(const char *tablename,
1830 const struct ipt_ip *ip,
1832 unsigned int matchsize,
1833 unsigned int hook_mask)
1835 const struct ipt_icmp *icmpinfo = matchinfo;
1837 /* Must specify proto == ICMP, and no unknown invflags */
1838 return ip->proto == IPPROTO_ICMP
1839 && !(ip->invflags & IPT_INV_PROTO)
1840 && matchsize == IPT_ALIGN(sizeof(struct ipt_icmp))
1841 && !(icmpinfo->invflags & ~IPT_ICMP_INV);
1844 /* The built-in targets: standard (NULL) and error. */
1845 static struct ipt_target ipt_standard_target = {
1846 .name = IPT_STANDARD_TARGET,
1849 static struct ipt_target ipt_error_target = {
1850 .name = IPT_ERROR_TARGET,
1851 .target = ipt_error,
1854 static struct nf_sockopt_ops ipt_sockopts = {
1856 .set_optmin = IPT_BASE_CTL,
1857 .set_optmax = IPT_SO_SET_MAX+1,
1858 .set = do_ipt_set_ctl,
1859 .get_optmin = IPT_BASE_CTL,
1860 .get_optmax = IPT_SO_GET_MAX+1,
1861 .get = do_ipt_get_ctl,
1864 static struct ipt_match tcp_matchstruct = {
1866 .match = &tcp_match,
1867 .checkentry = &tcp_checkentry,
1870 static struct ipt_match udp_matchstruct = {
1872 .match = &udp_match,
1873 .checkentry = &udp_checkentry,
1876 static struct ipt_match icmp_matchstruct = {
1878 .match = &icmp_match,
1879 .checkentry = &icmp_checkentry,
1882 #ifdef CONFIG_PROC_FS
1883 static inline int print_name(const char *i,
1884 off_t start_offset, char *buffer, int length,
1885 off_t *pos, unsigned int *count)
1887 if ((*count)++ >= start_offset) {
1888 unsigned int namelen;
1890 namelen = sprintf(buffer + *pos, "%s\n",
1891 i + sizeof(struct list_head));
1892 if (*pos + namelen > length) {
1893 /* Stop iterating */
1901 static inline int print_target(const struct ipt_target *t,
1902 off_t start_offset, char *buffer, int length,
1903 off_t *pos, unsigned int *count)
1905 if (t == &ipt_standard_target || t == &ipt_error_target)
1907 return print_name((char *)t, start_offset, buffer, length, pos, count);
1910 static int ipt_get_tables(char *buffer, char **start, off_t offset, int length)
1913 unsigned int count = 0;
1915 if (down_interruptible(&ipt_mutex) != 0)
1918 LIST_FIND(&ipt_tables, print_name, void *,
1919 offset, buffer, length, &pos, &count);
1923 /* `start' hack - see fs/proc/generic.c line ~105 */
1924 *start=(char *)((unsigned long)count-offset);
1928 static int ipt_get_targets(char *buffer, char **start, off_t offset, int length)
1931 unsigned int count = 0;
1933 if (down_interruptible(&ipt_mutex) != 0)
1936 LIST_FIND(&ipt_target, print_target, struct ipt_target *,
1937 offset, buffer, length, &pos, &count);
1941 *start = (char *)((unsigned long)count - offset);
1945 static int ipt_get_matches(char *buffer, char **start, off_t offset, int length)
1948 unsigned int count = 0;
1950 if (down_interruptible(&ipt_mutex) != 0)
1953 LIST_FIND(&ipt_match, print_name, void *,
1954 offset, buffer, length, &pos, &count);
1958 *start = (char *)((unsigned long)count - offset);
1962 static const struct { char *name; get_info_t *get_info; } ipt_proc_entry[] =
1963 { { "ip_tables_names", ipt_get_tables },
1964 { "ip_tables_targets", ipt_get_targets },
1965 { "ip_tables_matches", ipt_get_matches },
1967 #endif /*CONFIG_PROC_FS*/
1969 static int __init init(void)
1973 /* Noone else will be downing sem now, so we won't sleep */
1975 list_append(&ipt_target, &ipt_standard_target);
1976 list_append(&ipt_target, &ipt_error_target);
1977 list_append(&ipt_match, &tcp_matchstruct);
1978 list_append(&ipt_match, &udp_matchstruct);
1979 list_append(&ipt_match, &icmp_matchstruct);
1982 /* Register setsockopt */
1983 ret = nf_register_sockopt(&ipt_sockopts);
1985 duprintf("Unable to register sockopts.\n");
1989 #ifdef CONFIG_PROC_FS
1991 struct proc_dir_entry *proc;
1994 for (i = 0; ipt_proc_entry[i].name; i++) {
1995 proc = proc_net_create(ipt_proc_entry[i].name, 0,
1996 ipt_proc_entry[i].get_info);
1999 proc_net_remove(ipt_proc_entry[i].name);
2000 nf_unregister_sockopt(&ipt_sockopts);
2003 proc->owner = THIS_MODULE;
2008 printk("ip_tables: (C) 2000-2002 Netfilter core team\n");
2012 static void __exit fini(void)
2014 nf_unregister_sockopt(&ipt_sockopts);
2015 #ifdef CONFIG_PROC_FS
2018 for (i = 0; ipt_proc_entry[i].name; i++)
2019 proc_net_remove(ipt_proc_entry[i].name);
2024 EXPORT_SYMBOL(ipt_register_table);
2025 EXPORT_SYMBOL(ipt_unregister_table);
2026 EXPORT_SYMBOL(ipt_register_match);
2027 EXPORT_SYMBOL(ipt_unregister_match);
2028 EXPORT_SYMBOL(ipt_do_table);
2029 EXPORT_SYMBOL(ipt_register_target);
2030 EXPORT_SYMBOL(ipt_unregister_target);
2031 EXPORT_SYMBOL(ipt_find_target);