2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/cache.h>
12 #include <linux/capability.h>
13 #include <linux/skbuff.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/netdevice.h>
17 #include <linux/module.h>
18 #include <linux/icmp.h>
20 #include <net/compat.h>
21 #include <asm/uaccess.h>
22 #include <linux/mutex.h>
23 #include <linux/proc_fs.h>
24 #include <linux/err.h>
25 #include <linux/cpumask.h>
27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_ipv4/ip_tables.h>
30 MODULE_LICENSE("GPL");
31 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
32 MODULE_DESCRIPTION("IPv4 packet filter");
34 /*#define DEBUG_IP_FIREWALL*/
35 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
36 /*#define DEBUG_IP_FIREWALL_USER*/
38 #ifdef DEBUG_IP_FIREWALL
39 #define dprintf(format, args...) printk(format , ## args)
41 #define dprintf(format, args...)
44 #ifdef DEBUG_IP_FIREWALL_USER
45 #define duprintf(format, args...) printk(format , ## args)
47 #define duprintf(format, args...)
50 #ifdef CONFIG_NETFILTER_DEBUG
51 #define IP_NF_ASSERT(x) \
54 printk("IP_NF_ASSERT: %s:%s:%u\n", \
55 __FUNCTION__, __FILE__, __LINE__); \
58 #define IP_NF_ASSERT(x)
62 /* All the better to debug you with... */
68 We keep a set of rules for each CPU, so we can avoid write-locking
69 them in the softirq when updating the counters and therefore
70 only need to read-lock in the softirq; doing a write_lock_bh() in user
71 context stops packets coming through and allows user context to read
72 the counters or update the rules.
74 Hence the start of any table is given by get_table() below. */
76 /* Returns whether matches rule or not. */
78 ip_packet_match(const struct iphdr *ip,
81 const struct ipt_ip *ipinfo,
87 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
89 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
91 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
93 dprintf("Source or dest mismatch.\n");
95 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
97 NIPQUAD(ipinfo->smsk.s_addr),
98 NIPQUAD(ipinfo->src.s_addr),
99 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
100 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
102 NIPQUAD(ipinfo->dmsk.s_addr),
103 NIPQUAD(ipinfo->dst.s_addr),
104 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
108 /* Look for ifname matches; this should unroll nicely. */
109 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
110 ret |= (((const unsigned long *)indev)[i]
111 ^ ((const unsigned long *)ipinfo->iniface)[i])
112 & ((const unsigned long *)ipinfo->iniface_mask)[i];
115 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
116 dprintf("VIA in mismatch (%s vs %s).%s\n",
117 indev, ipinfo->iniface,
118 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
122 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
123 ret |= (((const unsigned long *)outdev)[i]
124 ^ ((const unsigned long *)ipinfo->outiface)[i])
125 & ((const unsigned long *)ipinfo->outiface_mask)[i];
128 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
129 dprintf("VIA out mismatch (%s vs %s).%s\n",
130 outdev, ipinfo->outiface,
131 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
135 /* Check specific protocol */
137 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
138 dprintf("Packet protocol %hi does not match %hi.%s\n",
139 ip->protocol, ipinfo->proto,
140 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
144 /* If we have a fragment rule but the packet is not a fragment
145 * then we return zero */
146 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
147 dprintf("Fragment rule but not fragment.%s\n",
148 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
156 ip_checkentry(const struct ipt_ip *ip)
158 if (ip->flags & ~IPT_F_MASK) {
159 duprintf("Unknown flag bits set: %08X\n",
160 ip->flags & ~IPT_F_MASK);
163 if (ip->invflags & ~IPT_INV_MASK) {
164 duprintf("Unknown invflag bits set: %08X\n",
165 ip->invflags & ~IPT_INV_MASK);
172 ipt_error(struct sk_buff *skb,
173 const struct net_device *in,
174 const struct net_device *out,
175 unsigned int hooknum,
176 const struct xt_target *target,
177 const void *targinfo)
180 printk("ip_tables: error: `%s'\n", (char *)targinfo);
186 bool do_match(struct ipt_entry_match *m,
187 const struct sk_buff *skb,
188 const struct net_device *in,
189 const struct net_device *out,
193 /* Stop iteration if it doesn't match */
194 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
195 offset, ip_hdrlen(skb), hotdrop))
201 static inline struct ipt_entry *
202 get_entry(void *base, unsigned int offset)
204 return (struct ipt_entry *)(base + offset);
207 /* All zeroes == unconditional rule. */
209 unconditional(const struct ipt_ip *ip)
213 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
214 if (((__u32 *)ip)[i])
220 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
221 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
222 static const char *hooknames[] = {
223 [NF_INET_PRE_ROUTING] = "PREROUTING",
224 [NF_INET_LOCAL_IN] = "INPUT",
225 [NF_INET_FORWARD] = "FORWARD",
226 [NF_INET_LOCAL_OUT] = "OUTPUT",
227 [NF_INET_POST_ROUTING] = "POSTROUTING",
230 enum nf_ip_trace_comments {
231 NF_IP_TRACE_COMMENT_RULE,
232 NF_IP_TRACE_COMMENT_RETURN,
233 NF_IP_TRACE_COMMENT_POLICY,
236 static const char *comments[] = {
237 [NF_IP_TRACE_COMMENT_RULE] = "rule",
238 [NF_IP_TRACE_COMMENT_RETURN] = "return",
239 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
242 static struct nf_loginfo trace_loginfo = {
243 .type = NF_LOG_TYPE_LOG,
247 .logflags = NF_LOG_MASK,
253 get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
254 char *hookname, char **chainname,
255 char **comment, unsigned int *rulenum)
257 struct ipt_standard_target *t = (void *)ipt_get_target(s);
259 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
260 /* Head of user chain: ERROR target with chainname */
261 *chainname = t->target.data;
266 if (s->target_offset == sizeof(struct ipt_entry)
267 && strcmp(t->target.u.kernel.target->name,
268 IPT_STANDARD_TARGET) == 0
270 && unconditional(&s->ip)) {
271 /* Tail of chains: STANDARD target (return/policy) */
272 *comment = *chainname == hookname
273 ? (char *)comments[NF_IP_TRACE_COMMENT_POLICY]
274 : (char *)comments[NF_IP_TRACE_COMMENT_RETURN];
283 static void trace_packet(struct sk_buff *skb,
285 const struct net_device *in,
286 const struct net_device *out,
288 struct xt_table_info *private,
292 struct ipt_entry *root;
293 char *hookname, *chainname, *comment;
294 unsigned int rulenum = 0;
296 table_base = (void *)private->entries[smp_processor_id()];
297 root = get_entry(table_base, private->hook_entry[hook]);
299 hookname = chainname = (char *)hooknames[hook];
300 comment = (char *)comments[NF_IP_TRACE_COMMENT_RULE];
302 IPT_ENTRY_ITERATE(root,
303 private->size - private->hook_entry[hook],
304 get_chainname_rulenum,
305 e, hookname, &chainname, &comment, &rulenum);
307 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
308 "TRACE: %s:%s:%s:%u ",
309 tablename, chainname, comment, rulenum);
313 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
315 ipt_do_table(struct sk_buff *skb,
317 const struct net_device *in,
318 const struct net_device *out,
319 struct xt_table *table)
321 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
325 bool hotdrop = false;
326 /* Initializing verdict to NF_DROP keeps gcc happy. */
327 unsigned int verdict = NF_DROP;
328 const char *indev, *outdev;
330 struct ipt_entry *e, *back;
331 struct xt_table_info *private;
335 datalen = skb->len - ip->ihl * 4;
336 indev = in ? in->name : nulldevname;
337 outdev = out ? out->name : nulldevname;
338 /* We handle fragments by dealing with the first fragment as
339 * if it was a normal packet. All other fragments are treated
340 * normally, except that they will NEVER match rules that ask
341 * things we don't know, ie. tcp syn flag or ports). If the
342 * rule is also a fragment-specific rule, non-fragments won't
344 offset = ntohs(ip->frag_off) & IP_OFFSET;
346 read_lock_bh(&table->lock);
347 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
348 private = table->private;
349 table_base = (void *)private->entries[smp_processor_id()];
350 e = get_entry(table_base, private->hook_entry[hook]);
352 /* For return from builtin chain */
353 back = get_entry(table_base, private->underflow[hook]);
358 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
359 struct ipt_entry_target *t;
361 if (IPT_MATCH_ITERATE(e, do_match,
363 offset, &hotdrop) != 0)
366 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
368 t = ipt_get_target(e);
369 IP_NF_ASSERT(t->u.kernel.target);
371 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
372 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
373 /* The packet is traced: log it */
374 if (unlikely(skb->nf_trace))
375 trace_packet(skb, hook, in, out,
376 table->name, private, e);
378 /* Standard target? */
379 if (!t->u.kernel.target->target) {
382 v = ((struct ipt_standard_target *)t)->verdict;
384 /* Pop from stack? */
385 if (v != IPT_RETURN) {
386 verdict = (unsigned)(-v) - 1;
390 back = get_entry(table_base,
394 if (table_base + v != (void *)e + e->next_offset
395 && !(e->ip.flags & IPT_F_GOTO)) {
396 /* Save old back ptr in next entry */
397 struct ipt_entry *next
398 = (void *)e + e->next_offset;
400 = (void *)back - table_base;
401 /* set back pointer to next entry */
405 e = get_entry(table_base, v);
407 /* Targets which reenter must return
409 #ifdef CONFIG_NETFILTER_DEBUG
410 ((struct ipt_entry *)table_base)->comefrom
413 verdict = t->u.kernel.target->target(skb,
419 #ifdef CONFIG_NETFILTER_DEBUG
420 if (((struct ipt_entry *)table_base)->comefrom
422 && verdict == IPT_CONTINUE) {
423 printk("Target %s reentered!\n",
424 t->u.kernel.target->name);
427 ((struct ipt_entry *)table_base)->comefrom
430 /* Target might have changed stuff. */
432 datalen = skb->len - ip->ihl * 4;
434 if (verdict == IPT_CONTINUE)
435 e = (void *)e + e->next_offset;
443 e = (void *)e + e->next_offset;
447 read_unlock_bh(&table->lock);
449 #ifdef DEBUG_ALLOW_ALL
458 /* Figures out from what hook each rule can be called: returns 0 if
459 there are loops. Puts hook bitmask in comefrom. */
461 mark_source_chains(struct xt_table_info *newinfo,
462 unsigned int valid_hooks, void *entry0)
466 /* No recursion; use packet counter to save back ptrs (reset
467 to 0 as we leave), and comefrom to save source hook bitmask */
468 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
469 unsigned int pos = newinfo->hook_entry[hook];
471 = (struct ipt_entry *)(entry0 + pos);
473 if (!(valid_hooks & (1 << hook)))
476 /* Set initial back pointer. */
477 e->counters.pcnt = pos;
480 struct ipt_standard_target *t
481 = (void *)ipt_get_target(e);
482 int visited = e->comefrom & (1 << hook);
484 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
485 printk("iptables: loop hook %u pos %u %08X.\n",
486 hook, pos, e->comefrom);
490 |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
492 /* Unconditional return/END. */
493 if ((e->target_offset == sizeof(struct ipt_entry)
494 && (strcmp(t->target.u.user.name,
495 IPT_STANDARD_TARGET) == 0)
497 && unconditional(&e->ip)) || visited) {
498 unsigned int oldpos, size;
500 if (t->verdict < -NF_MAX_VERDICT - 1) {
501 duprintf("mark_source_chains: bad "
502 "negative verdict (%i)\n",
507 /* Return: backtrack through the last
510 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
511 #ifdef DEBUG_IP_FIREWALL_USER
513 & (1 << NF_INET_NUMHOOKS)) {
514 duprintf("Back unset "
521 pos = e->counters.pcnt;
522 e->counters.pcnt = 0;
524 /* We're at the start. */
528 e = (struct ipt_entry *)
530 } while (oldpos == pos + e->next_offset);
533 size = e->next_offset;
534 e = (struct ipt_entry *)
535 (entry0 + pos + size);
536 e->counters.pcnt = pos;
539 int newpos = t->verdict;
541 if (strcmp(t->target.u.user.name,
542 IPT_STANDARD_TARGET) == 0
544 if (newpos > newinfo->size -
545 sizeof(struct ipt_entry)) {
546 duprintf("mark_source_chains: "
547 "bad verdict (%i)\n",
551 /* This a jump; chase it. */
552 duprintf("Jump rule %u -> %u\n",
555 /* ... this is a fallthru */
556 newpos = pos + e->next_offset;
558 e = (struct ipt_entry *)
560 e->counters.pcnt = pos;
565 duprintf("Finished chain %u\n", hook);
571 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
573 if (i && (*i)-- == 0)
576 if (m->u.kernel.match->destroy)
577 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
578 module_put(m->u.kernel.match->me);
583 check_entry(struct ipt_entry *e, const char *name)
585 struct ipt_entry_target *t;
587 if (!ip_checkentry(&e->ip)) {
588 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
592 if (e->target_offset + sizeof(struct ipt_entry_target) > e->next_offset)
595 t = ipt_get_target(e);
596 if (e->target_offset + t->u.target_size > e->next_offset)
602 static inline int check_match(struct ipt_entry_match *m, const char *name,
603 const struct ipt_ip *ip, unsigned int hookmask,
606 struct xt_match *match;
609 match = m->u.kernel.match;
610 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
611 name, hookmask, ip->proto,
612 ip->invflags & IPT_INV_PROTO);
613 if (!ret && m->u.kernel.match->checkentry
614 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
616 duprintf("ip_tables: check failed for `%s'.\n",
617 m->u.kernel.match->name);
626 find_check_match(struct ipt_entry_match *m,
628 const struct ipt_ip *ip,
629 unsigned int hookmask,
632 struct xt_match *match;
635 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
637 "ipt_%s", m->u.user.name);
638 if (IS_ERR(match) || !match) {
639 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
640 return match ? PTR_ERR(match) : -ENOENT;
642 m->u.kernel.match = match;
644 ret = check_match(m, name, ip, hookmask, i);
650 module_put(m->u.kernel.match->me);
654 static inline int check_target(struct ipt_entry *e, const char *name)
656 struct ipt_entry_target *t;
657 struct xt_target *target;
660 t = ipt_get_target(e);
661 target = t->u.kernel.target;
662 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
663 name, e->comefrom, e->ip.proto,
664 e->ip.invflags & IPT_INV_PROTO);
665 if (!ret && t->u.kernel.target->checkentry
666 && !t->u.kernel.target->checkentry(name, e, target,
667 t->data, e->comefrom)) {
668 duprintf("ip_tables: check failed for `%s'.\n",
669 t->u.kernel.target->name);
676 find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
679 struct ipt_entry_target *t;
680 struct xt_target *target;
684 ret = check_entry(e, name);
689 ret = IPT_MATCH_ITERATE(e, find_check_match, name, &e->ip,
692 goto cleanup_matches;
694 t = ipt_get_target(e);
695 target = try_then_request_module(xt_find_target(AF_INET,
698 "ipt_%s", t->u.user.name);
699 if (IS_ERR(target) || !target) {
700 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
701 ret = target ? PTR_ERR(target) : -ENOENT;
702 goto cleanup_matches;
704 t->u.kernel.target = target;
706 ret = check_target(e, name);
713 module_put(t->u.kernel.target->me);
715 IPT_MATCH_ITERATE(e, cleanup_match, &j);
720 check_entry_size_and_hooks(struct ipt_entry *e,
721 struct xt_table_info *newinfo,
723 unsigned char *limit,
724 const unsigned int *hook_entries,
725 const unsigned int *underflows,
730 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
731 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
732 duprintf("Bad offset %p\n", e);
737 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
738 duprintf("checking: element %p size %u\n",
743 /* Check hooks & underflows */
744 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
745 if ((unsigned char *)e - base == hook_entries[h])
746 newinfo->hook_entry[h] = hook_entries[h];
747 if ((unsigned char *)e - base == underflows[h])
748 newinfo->underflow[h] = underflows[h];
751 /* FIXME: underflows must be unconditional, standard verdicts
752 < 0 (not IPT_RETURN). --RR */
754 /* Clear counters and comefrom */
755 e->counters = ((struct xt_counters) { 0, 0 });
763 cleanup_entry(struct ipt_entry *e, unsigned int *i)
765 struct ipt_entry_target *t;
767 if (i && (*i)-- == 0)
770 /* Cleanup all matches */
771 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
772 t = ipt_get_target(e);
773 if (t->u.kernel.target->destroy)
774 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
775 module_put(t->u.kernel.target->me);
779 /* Checks and translates the user-supplied table segment (held in
782 translate_table(const char *name,
783 unsigned int valid_hooks,
784 struct xt_table_info *newinfo,
788 const unsigned int *hook_entries,
789 const unsigned int *underflows)
794 newinfo->size = size;
795 newinfo->number = number;
797 /* Init all hooks to impossible value. */
798 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
799 newinfo->hook_entry[i] = 0xFFFFFFFF;
800 newinfo->underflow[i] = 0xFFFFFFFF;
803 duprintf("translate_table: size %u\n", newinfo->size);
805 /* Walk through entries, checking offsets. */
806 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
807 check_entry_size_and_hooks,
811 hook_entries, underflows, &i);
816 duprintf("translate_table: %u not %u entries\n",
821 /* Check hooks all assigned */
822 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
823 /* Only hooks which are valid */
824 if (!(valid_hooks & (1 << i)))
826 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
827 duprintf("Invalid hook entry %u %u\n",
831 if (newinfo->underflow[i] == 0xFFFFFFFF) {
832 duprintf("Invalid underflow %u %u\n",
838 if (!mark_source_chains(newinfo, valid_hooks, entry0))
841 /* Finally, each sanity check must pass */
843 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
844 find_check_entry, name, size, &i);
847 IPT_ENTRY_ITERATE(entry0, newinfo->size,
852 /* And one copy for every other CPU */
853 for_each_possible_cpu(i) {
854 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
855 memcpy(newinfo->entries[i], entry0, newinfo->size);
863 add_entry_to_counter(const struct ipt_entry *e,
864 struct xt_counters total[],
867 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
874 set_entry_to_counter(const struct ipt_entry *e,
875 struct ipt_counters total[],
878 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
885 get_counters(const struct xt_table_info *t,
886 struct xt_counters counters[])
892 /* Instead of clearing (by a previous call to memset())
893 * the counters and using adds, we set the counters
894 * with data used by 'current' CPU
895 * We dont care about preemption here.
897 curcpu = raw_smp_processor_id();
900 IPT_ENTRY_ITERATE(t->entries[curcpu],
902 set_entry_to_counter,
906 for_each_possible_cpu(cpu) {
910 IPT_ENTRY_ITERATE(t->entries[cpu],
912 add_entry_to_counter,
918 static inline struct xt_counters * alloc_counters(struct xt_table *table)
920 unsigned int countersize;
921 struct xt_counters *counters;
922 struct xt_table_info *private = table->private;
924 /* We need atomic snapshot of counters: rest doesn't change
925 (other than comefrom, which userspace doesn't care
927 countersize = sizeof(struct xt_counters) * private->number;
928 counters = vmalloc_node(countersize, numa_node_id());
930 if (counters == NULL)
931 return ERR_PTR(-ENOMEM);
933 /* First, sum counters... */
934 write_lock_bh(&table->lock);
935 get_counters(private, counters);
936 write_unlock_bh(&table->lock);
942 copy_entries_to_user(unsigned int total_size,
943 struct xt_table *table,
944 void __user *userptr)
946 unsigned int off, num;
948 struct xt_counters *counters;
949 struct xt_table_info *private = table->private;
953 counters = alloc_counters(table);
954 if (IS_ERR(counters))
955 return PTR_ERR(counters);
957 /* choose the copy that is on our node/cpu, ...
958 * This choice is lazy (because current thread is
959 * allowed to migrate to another cpu)
961 loc_cpu_entry = private->entries[raw_smp_processor_id()];
962 /* ... then copy entire thing ... */
963 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
968 /* FIXME: use iterator macros --RR */
969 /* ... then go back and fix counters and names */
970 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
972 struct ipt_entry_match *m;
973 struct ipt_entry_target *t;
975 e = (struct ipt_entry *)(loc_cpu_entry + off);
976 if (copy_to_user(userptr + off
977 + offsetof(struct ipt_entry, counters),
979 sizeof(counters[num])) != 0) {
984 for (i = sizeof(struct ipt_entry);
985 i < e->target_offset;
986 i += m->u.match_size) {
989 if (copy_to_user(userptr + off + i
990 + offsetof(struct ipt_entry_match,
992 m->u.kernel.match->name,
993 strlen(m->u.kernel.match->name)+1)
1000 t = ipt_get_target(e);
1001 if (copy_to_user(userptr + off + e->target_offset
1002 + offsetof(struct ipt_entry_target,
1004 t->u.kernel.target->name,
1005 strlen(t->u.kernel.target->name)+1) != 0) {
1016 #ifdef CONFIG_COMPAT
1017 struct compat_delta {
1018 struct compat_delta *next;
1019 unsigned int offset;
1023 static struct compat_delta *compat_offsets = NULL;
1025 static int compat_add_offset(unsigned int offset, short delta)
1027 struct compat_delta *tmp;
1029 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
1032 tmp->offset = offset;
1034 if (compat_offsets) {
1035 tmp->next = compat_offsets->next;
1036 compat_offsets->next = tmp;
1038 compat_offsets = tmp;
1044 static void compat_flush_offsets(void)
1046 struct compat_delta *tmp, *next;
1048 if (compat_offsets) {
1049 for(tmp = compat_offsets; tmp; tmp = next) {
1053 compat_offsets = NULL;
1057 static short compat_calc_jump(unsigned int offset)
1059 struct compat_delta *tmp;
1062 for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
1063 if (tmp->offset < offset)
1064 delta += tmp->delta;
1068 static void compat_standard_from_user(void *dst, void *src)
1070 int v = *(compat_int_t *)src;
1073 v += compat_calc_jump(v);
1074 memcpy(dst, &v, sizeof(v));
1077 static int compat_standard_to_user(void __user *dst, void *src)
1079 compat_int_t cv = *(int *)src;
1082 cv -= compat_calc_jump(cv);
1083 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1087 compat_calc_match(struct ipt_entry_match *m, int * size)
1089 *size += xt_compat_match_offset(m->u.kernel.match);
1093 static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
1094 void *base, struct xt_table_info *newinfo)
1096 struct ipt_entry_target *t;
1097 unsigned int entry_offset;
1101 entry_offset = (void *)e - base;
1102 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1103 t = ipt_get_target(e);
1104 off += xt_compat_target_offset(t->u.kernel.target);
1105 newinfo->size -= off;
1106 ret = compat_add_offset(entry_offset, off);
1110 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1111 if (info->hook_entry[i] && (e < (struct ipt_entry *)
1112 (base + info->hook_entry[i])))
1113 newinfo->hook_entry[i] -= off;
1114 if (info->underflow[i] && (e < (struct ipt_entry *)
1115 (base + info->underflow[i])))
1116 newinfo->underflow[i] -= off;
1121 static int compat_table_info(struct xt_table_info *info,
1122 struct xt_table_info *newinfo)
1124 void *loc_cpu_entry;
1127 if (!newinfo || !info)
1130 memset(newinfo, 0, sizeof(struct xt_table_info));
1131 newinfo->size = info->size;
1132 newinfo->number = info->number;
1133 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1134 newinfo->hook_entry[i] = info->hook_entry[i];
1135 newinfo->underflow[i] = info->underflow[i];
1137 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1138 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1139 compat_calc_entry, info, loc_cpu_entry, newinfo);
1143 static int get_info(void __user *user, int *len, int compat)
1145 char name[IPT_TABLE_MAXNAMELEN];
1149 if (*len != sizeof(struct ipt_getinfo)) {
1150 duprintf("length %u != %u\n", *len,
1151 (unsigned int)sizeof(struct ipt_getinfo));
1155 if (copy_from_user(name, user, sizeof(name)) != 0)
1158 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1159 #ifdef CONFIG_COMPAT
1161 xt_compat_lock(AF_INET);
1163 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1164 "iptable_%s", name);
1165 if (t && !IS_ERR(t)) {
1166 struct ipt_getinfo info;
1167 struct xt_table_info *private = t->private;
1169 #ifdef CONFIG_COMPAT
1171 struct xt_table_info tmp;
1172 ret = compat_table_info(private, &tmp);
1173 compat_flush_offsets();
1177 info.valid_hooks = t->valid_hooks;
1178 memcpy(info.hook_entry, private->hook_entry,
1179 sizeof(info.hook_entry));
1180 memcpy(info.underflow, private->underflow,
1181 sizeof(info.underflow));
1182 info.num_entries = private->number;
1183 info.size = private->size;
1184 strcpy(info.name, name);
1186 if (copy_to_user(user, &info, *len) != 0)
1194 ret = t ? PTR_ERR(t) : -ENOENT;
1195 #ifdef CONFIG_COMPAT
1197 xt_compat_unlock(AF_INET);
1203 get_entries(struct ipt_get_entries __user *uptr, int *len)
1206 struct ipt_get_entries get;
1209 if (*len < sizeof(get)) {
1210 duprintf("get_entries: %u < %d\n", *len,
1211 (unsigned int)sizeof(get));
1214 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1216 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1217 duprintf("get_entries: %u != %u\n", *len,
1218 (unsigned int)(sizeof(struct ipt_get_entries) +
1223 t = xt_find_table_lock(AF_INET, get.name);
1224 if (t && !IS_ERR(t)) {
1225 struct xt_table_info *private = t->private;
1226 duprintf("t->private->number = %u\n",
1228 if (get.size == private->size)
1229 ret = copy_entries_to_user(private->size,
1230 t, uptr->entrytable);
1232 duprintf("get_entries: I've got %u not %u!\n",
1240 ret = t ? PTR_ERR(t) : -ENOENT;
1246 __do_replace(const char *name, unsigned int valid_hooks,
1247 struct xt_table_info *newinfo, unsigned int num_counters,
1248 void __user *counters_ptr)
1252 struct xt_table_info *oldinfo;
1253 struct xt_counters *counters;
1254 void *loc_cpu_old_entry;
1257 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1263 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1264 "iptable_%s", name);
1265 if (!t || IS_ERR(t)) {
1266 ret = t ? PTR_ERR(t) : -ENOENT;
1267 goto free_newinfo_counters_untrans;
1271 if (valid_hooks != t->valid_hooks) {
1272 duprintf("Valid hook crap: %08X vs %08X\n",
1273 valid_hooks, t->valid_hooks);
1278 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1282 /* Update module usage count based on number of rules */
1283 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1284 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1285 if ((oldinfo->number > oldinfo->initial_entries) ||
1286 (newinfo->number <= oldinfo->initial_entries))
1288 if ((oldinfo->number > oldinfo->initial_entries) &&
1289 (newinfo->number <= oldinfo->initial_entries))
1292 /* Get the old counters. */
1293 get_counters(oldinfo, counters);
1294 /* Decrease module usage counts and free resource */
1295 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1296 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1297 xt_free_table_info(oldinfo);
1298 if (copy_to_user(counters_ptr, counters,
1299 sizeof(struct xt_counters) * num_counters) != 0)
1308 free_newinfo_counters_untrans:
1315 do_replace(void __user *user, unsigned int len)
1318 struct ipt_replace tmp;
1319 struct xt_table_info *newinfo;
1320 void *loc_cpu_entry;
1322 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1325 /* Hack: Causes ipchains to give correct error msg --RR */
1326 if (len != sizeof(tmp) + tmp.size)
1327 return -ENOPROTOOPT;
1329 /* overflow check */
1330 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1333 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1336 newinfo = xt_alloc_table_info(tmp.size);
1340 /* choose the copy that is our node/cpu */
1341 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1342 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1348 ret = translate_table(tmp.name, tmp.valid_hooks,
1349 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1350 tmp.hook_entry, tmp.underflow);
1354 duprintf("ip_tables: Translated table\n");
1356 ret = __do_replace(tmp.name, tmp.valid_hooks,
1357 newinfo, tmp.num_counters,
1360 goto free_newinfo_untrans;
1363 free_newinfo_untrans:
1364 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1366 xt_free_table_info(newinfo);
1370 /* We're lazy, and add to the first CPU; overflow works its fey magic
1371 * and everything is OK. */
1373 add_counter_to_entry(struct ipt_entry *e,
1374 const struct xt_counters addme[],
1378 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1380 (long unsigned int)e->counters.pcnt,
1381 (long unsigned int)e->counters.bcnt,
1382 (long unsigned int)addme[*i].pcnt,
1383 (long unsigned int)addme[*i].bcnt);
1386 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1393 do_add_counters(void __user *user, unsigned int len, int compat)
1396 struct xt_counters_info tmp;
1397 struct xt_counters *paddc;
1398 unsigned int num_counters;
1403 struct xt_table_info *private;
1405 void *loc_cpu_entry;
1406 #ifdef CONFIG_COMPAT
1407 struct compat_xt_counters_info compat_tmp;
1411 size = sizeof(struct compat_xt_counters_info);
1416 size = sizeof(struct xt_counters_info);
1419 if (copy_from_user(ptmp, user, size) != 0)
1422 #ifdef CONFIG_COMPAT
1424 num_counters = compat_tmp.num_counters;
1425 name = compat_tmp.name;
1429 num_counters = tmp.num_counters;
1433 if (len != size + num_counters * sizeof(struct xt_counters))
1436 paddc = vmalloc_node(len - size, numa_node_id());
1440 if (copy_from_user(paddc, user + size, len - size) != 0) {
1445 t = xt_find_table_lock(AF_INET, name);
1446 if (!t || IS_ERR(t)) {
1447 ret = t ? PTR_ERR(t) : -ENOENT;
1451 write_lock_bh(&t->lock);
1452 private = t->private;
1453 if (private->number != num_counters) {
1455 goto unlock_up_free;
1459 /* Choose the copy that is on our node */
1460 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1461 IPT_ENTRY_ITERATE(loc_cpu_entry,
1463 add_counter_to_entry,
1467 write_unlock_bh(&t->lock);
1476 #ifdef CONFIG_COMPAT
1477 struct compat_ipt_replace {
1478 char name[IPT_TABLE_MAXNAMELEN];
1482 u32 hook_entry[NF_INET_NUMHOOKS];
1483 u32 underflow[NF_INET_NUMHOOKS];
1485 compat_uptr_t counters; /* struct ipt_counters * */
1486 struct compat_ipt_entry entries[0];
1489 static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
1490 void __user **dstptr, compat_uint_t *size)
1492 return xt_compat_match_to_user(m, dstptr, size);
1496 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1497 compat_uint_t *size, struct xt_counters *counters,
1500 struct ipt_entry_target *t;
1501 struct compat_ipt_entry __user *ce;
1502 u_int16_t target_offset, next_offset;
1503 compat_uint_t origsize;
1508 ce = (struct compat_ipt_entry __user *)*dstptr;
1509 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1512 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1515 *dstptr += sizeof(struct compat_ipt_entry);
1516 ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1517 target_offset = e->target_offset - (origsize - *size);
1520 t = ipt_get_target(e);
1521 ret = xt_compat_target_to_user(t, dstptr, size);
1525 next_offset = e->next_offset - (origsize - *size);
1526 if (put_user(target_offset, &ce->target_offset))
1528 if (put_user(next_offset, &ce->next_offset))
1538 compat_find_calc_match(struct ipt_entry_match *m,
1540 const struct ipt_ip *ip,
1541 unsigned int hookmask,
1544 struct xt_match *match;
1546 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1547 m->u.user.revision),
1548 "ipt_%s", m->u.user.name);
1549 if (IS_ERR(match) || !match) {
1550 duprintf("compat_check_calc_match: `%s' not found\n",
1552 return match ? PTR_ERR(match) : -ENOENT;
1554 m->u.kernel.match = match;
1555 *size += xt_compat_match_offset(match);
1562 compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1564 if (i && (*i)-- == 0)
1567 module_put(m->u.kernel.match->me);
1572 compat_release_entry(struct ipt_entry *e, unsigned int *i)
1574 struct ipt_entry_target *t;
1576 if (i && (*i)-- == 0)
1579 /* Cleanup all matches */
1580 IPT_MATCH_ITERATE(e, compat_release_match, NULL);
1581 t = ipt_get_target(e);
1582 module_put(t->u.kernel.target->me);
1587 check_compat_entry_size_and_hooks(struct ipt_entry *e,
1588 struct xt_table_info *newinfo,
1590 unsigned char *base,
1591 unsigned char *limit,
1592 unsigned int *hook_entries,
1593 unsigned int *underflows,
1597 struct ipt_entry_target *t;
1598 struct xt_target *target;
1599 unsigned int entry_offset;
1602 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1603 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1604 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1605 duprintf("Bad offset %p, limit = %p\n", e, limit);
1609 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1610 sizeof(struct compat_xt_entry_target)) {
1611 duprintf("checking: element %p size %u\n",
1616 ret = check_entry(e, name);
1621 entry_offset = (void *)e - (void *)base;
1623 ret = IPT_MATCH_ITERATE(e, compat_find_calc_match, name, &e->ip,
1624 e->comefrom, &off, &j);
1626 goto release_matches;
1628 t = ipt_get_target(e);
1629 target = try_then_request_module(xt_find_target(AF_INET,
1631 t->u.user.revision),
1632 "ipt_%s", t->u.user.name);
1633 if (IS_ERR(target) || !target) {
1634 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1636 ret = target ? PTR_ERR(target) : -ENOENT;
1637 goto release_matches;
1639 t->u.kernel.target = target;
1641 off += xt_compat_target_offset(target);
1643 ret = compat_add_offset(entry_offset, off);
1647 /* Check hooks & underflows */
1648 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1649 if ((unsigned char *)e - base == hook_entries[h])
1650 newinfo->hook_entry[h] = hook_entries[h];
1651 if ((unsigned char *)e - base == underflows[h])
1652 newinfo->underflow[h] = underflows[h];
1655 /* Clear counters and comefrom */
1656 e->counters = ((struct ipt_counters) { 0, 0 });
1663 module_put(t->u.kernel.target->me);
1665 IPT_MATCH_ITERATE(e, compat_release_match, &j);
1669 static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1670 void **dstptr, compat_uint_t *size, const char *name,
1671 const struct ipt_ip *ip, unsigned int hookmask)
1673 xt_compat_match_from_user(m, dstptr, size);
1677 static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1678 unsigned int *size, const char *name,
1679 struct xt_table_info *newinfo, unsigned char *base)
1681 struct ipt_entry_target *t;
1682 struct xt_target *target;
1683 struct ipt_entry *de;
1684 unsigned int origsize;
1689 de = (struct ipt_entry *)*dstptr;
1690 memcpy(de, e, sizeof(struct ipt_entry));
1692 *dstptr += sizeof(struct compat_ipt_entry);
1693 ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
1694 name, &de->ip, de->comefrom);
1697 de->target_offset = e->target_offset - (origsize - *size);
1698 t = ipt_get_target(e);
1699 target = t->u.kernel.target;
1700 xt_compat_target_from_user(t, dstptr, size);
1702 de->next_offset = e->next_offset - (origsize - *size);
1703 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1704 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1705 newinfo->hook_entry[h] -= origsize - *size;
1706 if ((unsigned char *)de - base < newinfo->underflow[h])
1707 newinfo->underflow[h] -= origsize - *size;
1712 static inline int compat_check_entry(struct ipt_entry *e, const char *name,
1718 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
1720 goto cleanup_matches;
1722 ret = check_target(e, name);
1724 goto cleanup_matches;
1730 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1735 translate_compat_table(const char *name,
1736 unsigned int valid_hooks,
1737 struct xt_table_info **pinfo,
1739 unsigned int total_size,
1740 unsigned int number,
1741 unsigned int *hook_entries,
1742 unsigned int *underflows)
1745 struct xt_table_info *newinfo, *info;
1746 void *pos, *entry0, *entry1;
1753 info->number = number;
1755 /* Init all hooks to impossible value. */
1756 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1757 info->hook_entry[i] = 0xFFFFFFFF;
1758 info->underflow[i] = 0xFFFFFFFF;
1761 duprintf("translate_compat_table: size %u\n", info->size);
1763 xt_compat_lock(AF_INET);
1764 /* Walk through entries, checking offsets. */
1765 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1766 check_compat_entry_size_and_hooks,
1767 info, &size, entry0,
1768 entry0 + total_size,
1769 hook_entries, underflows, &j, name);
1775 duprintf("translate_compat_table: %u not %u entries\n",
1780 /* Check hooks all assigned */
1781 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1782 /* Only hooks which are valid */
1783 if (!(valid_hooks & (1 << i)))
1785 if (info->hook_entry[i] == 0xFFFFFFFF) {
1786 duprintf("Invalid hook entry %u %u\n",
1787 i, hook_entries[i]);
1790 if (info->underflow[i] == 0xFFFFFFFF) {
1791 duprintf("Invalid underflow %u %u\n",
1798 newinfo = xt_alloc_table_info(size);
1802 newinfo->number = number;
1803 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1804 newinfo->hook_entry[i] = info->hook_entry[i];
1805 newinfo->underflow[i] = info->underflow[i];
1807 entry1 = newinfo->entries[raw_smp_processor_id()];
1810 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1811 compat_copy_entry_from_user, &pos, &size,
1812 name, newinfo, entry1);
1813 compat_flush_offsets();
1814 xt_compat_unlock(AF_INET);
1819 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1823 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1827 IPT_ENTRY_ITERATE_CONTINUE(entry1, newinfo->size, i,
1828 compat_release_entry, &j);
1829 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1830 xt_free_table_info(newinfo);
1834 /* And one copy for every other CPU */
1835 for_each_possible_cpu(i)
1836 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1837 memcpy(newinfo->entries[i], entry1, newinfo->size);
1841 xt_free_table_info(info);
1845 xt_free_table_info(newinfo);
1847 IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1850 compat_flush_offsets();
1851 xt_compat_unlock(AF_INET);
1856 compat_do_replace(void __user *user, unsigned int len)
1859 struct compat_ipt_replace tmp;
1860 struct xt_table_info *newinfo;
1861 void *loc_cpu_entry;
1863 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1866 /* Hack: Causes ipchains to give correct error msg --RR */
1867 if (len != sizeof(tmp) + tmp.size)
1868 return -ENOPROTOOPT;
1870 /* overflow check */
1871 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1874 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1877 newinfo = xt_alloc_table_info(tmp.size);
1881 /* choose the copy that is our node/cpu */
1882 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1883 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1889 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1890 &newinfo, &loc_cpu_entry, tmp.size,
1891 tmp.num_entries, tmp.hook_entry, tmp.underflow);
1895 duprintf("compat_do_replace: Translated table\n");
1897 ret = __do_replace(tmp.name, tmp.valid_hooks,
1898 newinfo, tmp.num_counters,
1899 compat_ptr(tmp.counters));
1901 goto free_newinfo_untrans;
1904 free_newinfo_untrans:
1905 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1907 xt_free_table_info(newinfo);
1912 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1917 if (!capable(CAP_NET_ADMIN))
1921 case IPT_SO_SET_REPLACE:
1922 ret = compat_do_replace(user, len);
1925 case IPT_SO_SET_ADD_COUNTERS:
1926 ret = do_add_counters(user, len, 1);
1930 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1937 struct compat_ipt_get_entries
1939 char name[IPT_TABLE_MAXNAMELEN];
1941 struct compat_ipt_entry entrytable[0];
1944 static int compat_copy_entries_to_user(unsigned int total_size,
1945 struct xt_table *table, void __user *userptr)
1947 struct xt_counters *counters;
1948 struct xt_table_info *private = table->private;
1952 void *loc_cpu_entry;
1955 counters = alloc_counters(table);
1956 if (IS_ERR(counters))
1957 return PTR_ERR(counters);
1959 /* choose the copy that is on our node/cpu, ...
1960 * This choice is lazy (because current thread is
1961 * allowed to migrate to another cpu)
1963 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1966 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1967 compat_copy_entry_to_user,
1968 &pos, &size, counters, &i);
1975 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1978 struct compat_ipt_get_entries get;
1982 if (*len < sizeof(get)) {
1983 duprintf("compat_get_entries: %u < %u\n",
1984 *len, (unsigned int)sizeof(get));
1988 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1991 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1992 duprintf("compat_get_entries: %u != %u\n", *len,
1993 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1998 xt_compat_lock(AF_INET);
1999 t = xt_find_table_lock(AF_INET, get.name);
2000 if (t && !IS_ERR(t)) {
2001 struct xt_table_info *private = t->private;
2002 struct xt_table_info info;
2003 duprintf("t->private->number = %u\n",
2005 ret = compat_table_info(private, &info);
2006 if (!ret && get.size == info.size) {
2007 ret = compat_copy_entries_to_user(private->size,
2008 t, uptr->entrytable);
2010 duprintf("compat_get_entries: I've got %u not %u!\n",
2015 compat_flush_offsets();
2019 ret = t ? PTR_ERR(t) : -ENOENT;
2021 xt_compat_unlock(AF_INET);
2025 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
2028 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2032 if (!capable(CAP_NET_ADMIN))
2036 case IPT_SO_GET_INFO:
2037 ret = get_info(user, len, 1);
2039 case IPT_SO_GET_ENTRIES:
2040 ret = compat_get_entries(user, len);
2043 ret = do_ipt_get_ctl(sk, cmd, user, len);
2050 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2054 if (!capable(CAP_NET_ADMIN))
2058 case IPT_SO_SET_REPLACE:
2059 ret = do_replace(user, len);
2062 case IPT_SO_SET_ADD_COUNTERS:
2063 ret = do_add_counters(user, len, 0);
2067 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2075 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2079 if (!capable(CAP_NET_ADMIN))
2083 case IPT_SO_GET_INFO:
2084 ret = get_info(user, len, 0);
2087 case IPT_SO_GET_ENTRIES:
2088 ret = get_entries(user, len);
2091 case IPT_SO_GET_REVISION_MATCH:
2092 case IPT_SO_GET_REVISION_TARGET: {
2093 struct ipt_get_revision rev;
2096 if (*len != sizeof(rev)) {
2100 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2105 if (cmd == IPT_SO_GET_REVISION_TARGET)
2110 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2113 "ipt_%s", rev.name);
2118 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2125 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2128 struct xt_table_info *newinfo;
2129 static struct xt_table_info bootstrap
2130 = { 0, 0, 0, { 0 }, { 0 }, { } };
2131 void *loc_cpu_entry;
2133 newinfo = xt_alloc_table_info(repl->size);
2137 /* choose the copy on our node/cpu
2138 * but dont care of preemption
2140 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2141 memcpy(loc_cpu_entry, repl->entries, repl->size);
2143 ret = translate_table(table->name, table->valid_hooks,
2144 newinfo, loc_cpu_entry, repl->size,
2149 xt_free_table_info(newinfo);
2153 ret = xt_register_table(table, &bootstrap, newinfo);
2155 xt_free_table_info(newinfo);
2162 void ipt_unregister_table(struct xt_table *table)
2164 struct xt_table_info *private;
2165 void *loc_cpu_entry;
2167 private = xt_unregister_table(table);
2169 /* Decrease module usage counts and free resources */
2170 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2171 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2172 xt_free_table_info(private);
2175 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2177 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2178 u_int8_t type, u_int8_t code,
2181 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2186 icmp_match(const struct sk_buff *skb,
2187 const struct net_device *in,
2188 const struct net_device *out,
2189 const struct xt_match *match,
2190 const void *matchinfo,
2192 unsigned int protoff,
2195 struct icmphdr _icmph, *ic;
2196 const struct ipt_icmp *icmpinfo = matchinfo;
2198 /* Must not be a fragment. */
2202 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2204 /* We've been asked to examine this packet, and we
2205 * can't. Hence, no choice but to drop.
2207 duprintf("Dropping evil ICMP tinygram.\n");
2212 return icmp_type_code_match(icmpinfo->type,
2216 !!(icmpinfo->invflags&IPT_ICMP_INV));
2219 /* Called when user tries to insert an entry of this type. */
2221 icmp_checkentry(const char *tablename,
2223 const struct xt_match *match,
2225 unsigned int hook_mask)
2227 const struct ipt_icmp *icmpinfo = matchinfo;
2229 /* Must specify no unknown invflags */
2230 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2233 /* The built-in targets: standard (NULL) and error. */
2234 static struct xt_target ipt_standard_target __read_mostly = {
2235 .name = IPT_STANDARD_TARGET,
2236 .targetsize = sizeof(int),
2238 #ifdef CONFIG_COMPAT
2239 .compatsize = sizeof(compat_int_t),
2240 .compat_from_user = compat_standard_from_user,
2241 .compat_to_user = compat_standard_to_user,
2245 static struct xt_target ipt_error_target __read_mostly = {
2246 .name = IPT_ERROR_TARGET,
2247 .target = ipt_error,
2248 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2252 static struct nf_sockopt_ops ipt_sockopts = {
2254 .set_optmin = IPT_BASE_CTL,
2255 .set_optmax = IPT_SO_SET_MAX+1,
2256 .set = do_ipt_set_ctl,
2257 #ifdef CONFIG_COMPAT
2258 .compat_set = compat_do_ipt_set_ctl,
2260 .get_optmin = IPT_BASE_CTL,
2261 .get_optmax = IPT_SO_GET_MAX+1,
2262 .get = do_ipt_get_ctl,
2263 #ifdef CONFIG_COMPAT
2264 .compat_get = compat_do_ipt_get_ctl,
2266 .owner = THIS_MODULE,
2269 static struct xt_match icmp_matchstruct __read_mostly = {
2271 .match = icmp_match,
2272 .matchsize = sizeof(struct ipt_icmp),
2273 .proto = IPPROTO_ICMP,
2275 .checkentry = icmp_checkentry,
2278 static int __init ip_tables_init(void)
2282 ret = xt_proto_init(AF_INET);
2286 /* Noone else will be downing sem now, so we won't sleep */
2287 ret = xt_register_target(&ipt_standard_target);
2290 ret = xt_register_target(&ipt_error_target);
2293 ret = xt_register_match(&icmp_matchstruct);
2297 /* Register setsockopt */
2298 ret = nf_register_sockopt(&ipt_sockopts);
2302 printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2306 xt_unregister_match(&icmp_matchstruct);
2308 xt_unregister_target(&ipt_error_target);
2310 xt_unregister_target(&ipt_standard_target);
2312 xt_proto_fini(AF_INET);
2317 static void __exit ip_tables_fini(void)
2319 nf_unregister_sockopt(&ipt_sockopts);
2321 xt_unregister_match(&icmp_matchstruct);
2322 xt_unregister_target(&ipt_error_target);
2323 xt_unregister_target(&ipt_standard_target);
2325 xt_proto_fini(AF_INET);
2328 EXPORT_SYMBOL(ipt_register_table);
2329 EXPORT_SYMBOL(ipt_unregister_table);
2330 EXPORT_SYMBOL(ipt_do_table);
2331 module_init(ip_tables_init);
2332 module_exit(ip_tables_fini);