2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12 * - increase module usage count as soon as we have rules inside
14 * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
15 * - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
17 #include <linux/config.h>
18 #include <linux/cache.h>
19 #include <linux/capability.h>
20 #include <linux/skbuff.h>
21 #include <linux/kmod.h>
22 #include <linux/vmalloc.h>
23 #include <linux/netdevice.h>
24 #include <linux/module.h>
25 #include <linux/icmp.h>
27 #include <net/compat.h>
28 #include <asm/uaccess.h>
29 #include <linux/mutex.h>
30 #include <linux/proc_fs.h>
31 #include <linux/err.h>
32 #include <linux/cpumask.h>
34 #include <linux/netfilter/x_tables.h>
35 #include <linux/netfilter_ipv4/ip_tables.h>
37 MODULE_LICENSE("GPL");
38 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
39 MODULE_DESCRIPTION("IPv4 packet filter");
41 /*#define DEBUG_IP_FIREWALL*/
42 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
43 /*#define DEBUG_IP_FIREWALL_USER*/
45 #ifdef DEBUG_IP_FIREWALL
46 #define dprintf(format, args...) printk(format , ## args)
48 #define dprintf(format, args...)
51 #ifdef DEBUG_IP_FIREWALL_USER
52 #define duprintf(format, args...) printk(format , ## args)
54 #define duprintf(format, args...)
57 #ifdef CONFIG_NETFILTER_DEBUG
58 #define IP_NF_ASSERT(x) \
61 printk("IP_NF_ASSERT: %s:%s:%u\n", \
62 __FUNCTION__, __FILE__, __LINE__); \
65 #define IP_NF_ASSERT(x)
69 /* All the better to debug you with... */
75 We keep a set of rules for each CPU, so we can avoid write-locking
76 them in the softirq when updating the counters and therefore
77 only need to read-lock in the softirq; doing a write_lock_bh() in user
78 context stops packets coming through and allows user context to read
79 the counters or update the rules.
81 Hence the start of any table is given by get_table() below. */
83 /* Returns whether matches rule or not. */
85 ip_packet_match(const struct iphdr *ip,
88 const struct ipt_ip *ipinfo,
94 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
96 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
98 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
100 dprintf("Source or dest mismatch.\n");
102 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
104 NIPQUAD(ipinfo->smsk.s_addr),
105 NIPQUAD(ipinfo->src.s_addr),
106 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
107 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
109 NIPQUAD(ipinfo->dmsk.s_addr),
110 NIPQUAD(ipinfo->dst.s_addr),
111 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
115 /* Look for ifname matches; this should unroll nicely. */
116 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
117 ret |= (((const unsigned long *)indev)[i]
118 ^ ((const unsigned long *)ipinfo->iniface)[i])
119 & ((const unsigned long *)ipinfo->iniface_mask)[i];
122 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
123 dprintf("VIA in mismatch (%s vs %s).%s\n",
124 indev, ipinfo->iniface,
125 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
129 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
130 ret |= (((const unsigned long *)outdev)[i]
131 ^ ((const unsigned long *)ipinfo->outiface)[i])
132 & ((const unsigned long *)ipinfo->outiface_mask)[i];
135 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
136 dprintf("VIA out mismatch (%s vs %s).%s\n",
137 outdev, ipinfo->outiface,
138 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
142 /* Check specific protocol */
144 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
145 dprintf("Packet protocol %hi does not match %hi.%s\n",
146 ip->protocol, ipinfo->proto,
147 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
151 /* If we have a fragment rule but the packet is not a fragment
152 * then we return zero */
153 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
154 dprintf("Fragment rule but not fragment.%s\n",
155 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
163 ip_checkentry(const struct ipt_ip *ip)
165 if (ip->flags & ~IPT_F_MASK) {
166 duprintf("Unknown flag bits set: %08X\n",
167 ip->flags & ~IPT_F_MASK);
170 if (ip->invflags & ~IPT_INV_MASK) {
171 duprintf("Unknown invflag bits set: %08X\n",
172 ip->invflags & ~IPT_INV_MASK);
179 ipt_error(struct sk_buff **pskb,
180 const struct net_device *in,
181 const struct net_device *out,
182 unsigned int hooknum,
183 const struct xt_target *target,
184 const void *targinfo,
188 printk("ip_tables: error: `%s'\n", (char *)targinfo);
194 int do_match(struct ipt_entry_match *m,
195 const struct sk_buff *skb,
196 const struct net_device *in,
197 const struct net_device *out,
201 /* Stop iteration if it doesn't match */
202 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
203 offset, skb->nh.iph->ihl*4, hotdrop))
209 static inline struct ipt_entry *
210 get_entry(void *base, unsigned int offset)
212 return (struct ipt_entry *)(base + offset);
215 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
217 ipt_do_table(struct sk_buff **pskb,
219 const struct net_device *in,
220 const struct net_device *out,
221 struct ipt_table *table,
224 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
229 /* Initializing verdict to NF_DROP keeps gcc happy. */
230 unsigned int verdict = NF_DROP;
231 const char *indev, *outdev;
233 struct ipt_entry *e, *back;
234 struct xt_table_info *private = table->private;
237 ip = (*pskb)->nh.iph;
238 datalen = (*pskb)->len - ip->ihl * 4;
239 indev = in ? in->name : nulldevname;
240 outdev = out ? out->name : nulldevname;
241 /* We handle fragments by dealing with the first fragment as
242 * if it was a normal packet. All other fragments are treated
243 * normally, except that they will NEVER match rules that ask
244 * things we don't know, ie. tcp syn flag or ports). If the
245 * rule is also a fragment-specific rule, non-fragments won't
247 offset = ntohs(ip->frag_off) & IP_OFFSET;
249 read_lock_bh(&table->lock);
250 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
251 table_base = (void *)private->entries[smp_processor_id()];
252 e = get_entry(table_base, private->hook_entry[hook]);
254 /* For return from builtin chain */
255 back = get_entry(table_base, private->underflow[hook]);
260 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
261 struct ipt_entry_target *t;
263 if (IPT_MATCH_ITERATE(e, do_match,
265 offset, &hotdrop) != 0)
268 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
270 t = ipt_get_target(e);
271 IP_NF_ASSERT(t->u.kernel.target);
272 /* Standard target? */
273 if (!t->u.kernel.target->target) {
276 v = ((struct ipt_standard_target *)t)->verdict;
278 /* Pop from stack? */
279 if (v != IPT_RETURN) {
280 verdict = (unsigned)(-v) - 1;
284 back = get_entry(table_base,
288 if (table_base + v != (void *)e + e->next_offset
289 && !(e->ip.flags & IPT_F_GOTO)) {
290 /* Save old back ptr in next entry */
291 struct ipt_entry *next
292 = (void *)e + e->next_offset;
294 = (void *)back - table_base;
295 /* set back pointer to next entry */
299 e = get_entry(table_base, v);
301 /* Targets which reenter must return
303 #ifdef CONFIG_NETFILTER_DEBUG
304 ((struct ipt_entry *)table_base)->comefrom
307 verdict = t->u.kernel.target->target(pskb,
314 #ifdef CONFIG_NETFILTER_DEBUG
315 if (((struct ipt_entry *)table_base)->comefrom
317 && verdict == IPT_CONTINUE) {
318 printk("Target %s reentered!\n",
319 t->u.kernel.target->name);
322 ((struct ipt_entry *)table_base)->comefrom
325 /* Target might have changed stuff. */
326 ip = (*pskb)->nh.iph;
327 datalen = (*pskb)->len - ip->ihl * 4;
329 if (verdict == IPT_CONTINUE)
330 e = (void *)e + e->next_offset;
338 e = (void *)e + e->next_offset;
342 read_unlock_bh(&table->lock);
344 #ifdef DEBUG_ALLOW_ALL
353 /* All zeroes == unconditional rule. */
355 unconditional(const struct ipt_ip *ip)
359 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
360 if (((__u32 *)ip)[i])
366 /* Figures out from what hook each rule can be called: returns 0 if
367 there are loops. Puts hook bitmask in comefrom. */
369 mark_source_chains(struct xt_table_info *newinfo,
370 unsigned int valid_hooks, void *entry0)
374 /* No recursion; use packet counter to save back ptrs (reset
375 to 0 as we leave), and comefrom to save source hook bitmask */
376 for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
377 unsigned int pos = newinfo->hook_entry[hook];
379 = (struct ipt_entry *)(entry0 + pos);
381 if (!(valid_hooks & (1 << hook)))
384 /* Set initial back pointer. */
385 e->counters.pcnt = pos;
388 struct ipt_standard_target *t
389 = (void *)ipt_get_target(e);
391 if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
392 printk("iptables: loop hook %u pos %u %08X.\n",
393 hook, pos, e->comefrom);
397 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
399 /* Unconditional return/END. */
400 if (e->target_offset == sizeof(struct ipt_entry)
401 && (strcmp(t->target.u.user.name,
402 IPT_STANDARD_TARGET) == 0)
404 && unconditional(&e->ip)) {
405 unsigned int oldpos, size;
407 /* Return: backtrack through the last
410 e->comefrom ^= (1<<NF_IP_NUMHOOKS);
411 #ifdef DEBUG_IP_FIREWALL_USER
413 & (1 << NF_IP_NUMHOOKS)) {
414 duprintf("Back unset "
421 pos = e->counters.pcnt;
422 e->counters.pcnt = 0;
424 /* We're at the start. */
428 e = (struct ipt_entry *)
430 } while (oldpos == pos + e->next_offset);
433 size = e->next_offset;
434 e = (struct ipt_entry *)
435 (entry0 + pos + size);
436 e->counters.pcnt = pos;
439 int newpos = t->verdict;
441 if (strcmp(t->target.u.user.name,
442 IPT_STANDARD_TARGET) == 0
444 /* This a jump; chase it. */
445 duprintf("Jump rule %u -> %u\n",
448 /* ... this is a fallthru */
449 newpos = pos + e->next_offset;
451 e = (struct ipt_entry *)
453 e->counters.pcnt = pos;
458 duprintf("Finished chain %u\n", hook);
464 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
466 if (i && (*i)-- == 0)
469 if (m->u.kernel.match->destroy)
470 m->u.kernel.match->destroy(m->u.kernel.match, m->data,
471 m->u.match_size - sizeof(*m));
472 module_put(m->u.kernel.match->me);
477 standard_check(const struct ipt_entry_target *t,
478 unsigned int max_offset)
480 struct ipt_standard_target *targ = (void *)t;
482 /* Check standard info. */
483 if (targ->verdict >= 0
484 && targ->verdict > max_offset - sizeof(struct ipt_entry)) {
485 duprintf("ipt_standard_check: bad verdict (%i)\n",
489 if (targ->verdict < -NF_MAX_VERDICT - 1) {
490 duprintf("ipt_standard_check: bad negative verdict (%i)\n",
498 check_match(struct ipt_entry_match *m,
500 const struct ipt_ip *ip,
501 unsigned int hookmask,
504 struct ipt_match *match;
507 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
509 "ipt_%s", m->u.user.name);
510 if (IS_ERR(match) || !match) {
511 duprintf("check_match: `%s' not found\n", m->u.user.name);
512 return match ? PTR_ERR(match) : -ENOENT;
514 m->u.kernel.match = match;
516 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
517 name, hookmask, ip->proto,
518 ip->invflags & IPT_INV_PROTO);
522 if (m->u.kernel.match->checkentry
523 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
524 m->u.match_size - sizeof(*m),
526 duprintf("ip_tables: check failed for `%s'.\n",
527 m->u.kernel.match->name);
535 module_put(m->u.kernel.match->me);
539 static struct ipt_target ipt_standard_target;
542 check_entry(struct ipt_entry *e, const char *name, unsigned int size,
545 struct ipt_entry_target *t;
546 struct ipt_target *target;
550 if (!ip_checkentry(&e->ip)) {
551 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
556 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
558 goto cleanup_matches;
560 t = ipt_get_target(e);
561 target = try_then_request_module(xt_find_target(AF_INET,
564 "ipt_%s", t->u.user.name);
565 if (IS_ERR(target) || !target) {
566 duprintf("check_entry: `%s' not found\n", t->u.user.name);
567 ret = target ? PTR_ERR(target) : -ENOENT;
568 goto cleanup_matches;
570 t->u.kernel.target = target;
572 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
573 name, e->comefrom, e->ip.proto,
574 e->ip.invflags & IPT_INV_PROTO);
578 if (t->u.kernel.target == &ipt_standard_target) {
579 if (!standard_check(t, size)) {
581 goto cleanup_matches;
583 } else if (t->u.kernel.target->checkentry
584 && !t->u.kernel.target->checkentry(name, e, target, t->data,
588 duprintf("ip_tables: check failed for `%s'.\n",
589 t->u.kernel.target->name);
597 module_put(t->u.kernel.target->me);
599 IPT_MATCH_ITERATE(e, cleanup_match, &j);
604 check_entry_size_and_hooks(struct ipt_entry *e,
605 struct xt_table_info *newinfo,
607 unsigned char *limit,
608 const unsigned int *hook_entries,
609 const unsigned int *underflows,
614 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
615 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
616 duprintf("Bad offset %p\n", e);
621 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
622 duprintf("checking: element %p size %u\n",
627 /* Check hooks & underflows */
628 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
629 if ((unsigned char *)e - base == hook_entries[h])
630 newinfo->hook_entry[h] = hook_entries[h];
631 if ((unsigned char *)e - base == underflows[h])
632 newinfo->underflow[h] = underflows[h];
635 /* FIXME: underflows must be unconditional, standard verdicts
636 < 0 (not IPT_RETURN). --RR */
638 /* Clear counters and comefrom */
639 e->counters = ((struct xt_counters) { 0, 0 });
647 cleanup_entry(struct ipt_entry *e, unsigned int *i)
649 struct ipt_entry_target *t;
651 if (i && (*i)-- == 0)
654 /* Cleanup all matches */
655 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
656 t = ipt_get_target(e);
657 if (t->u.kernel.target->destroy)
658 t->u.kernel.target->destroy(t->u.kernel.target, t->data,
659 t->u.target_size - sizeof(*t));
660 module_put(t->u.kernel.target->me);
664 /* Checks and translates the user-supplied table segment (held in
667 translate_table(const char *name,
668 unsigned int valid_hooks,
669 struct xt_table_info *newinfo,
673 const unsigned int *hook_entries,
674 const unsigned int *underflows)
679 newinfo->size = size;
680 newinfo->number = number;
682 /* Init all hooks to impossible value. */
683 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
684 newinfo->hook_entry[i] = 0xFFFFFFFF;
685 newinfo->underflow[i] = 0xFFFFFFFF;
688 duprintf("translate_table: size %u\n", newinfo->size);
690 /* Walk through entries, checking offsets. */
691 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
692 check_entry_size_and_hooks,
696 hook_entries, underflows, &i);
701 duprintf("translate_table: %u not %u entries\n",
706 /* Check hooks all assigned */
707 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
708 /* Only hooks which are valid */
709 if (!(valid_hooks & (1 << i)))
711 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
712 duprintf("Invalid hook entry %u %u\n",
716 if (newinfo->underflow[i] == 0xFFFFFFFF) {
717 duprintf("Invalid underflow %u %u\n",
723 if (!mark_source_chains(newinfo, valid_hooks, entry0))
726 /* Finally, each sanity check must pass */
728 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
729 check_entry, name, size, &i);
732 IPT_ENTRY_ITERATE(entry0, newinfo->size,
737 /* And one copy for every other CPU */
739 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
740 memcpy(newinfo->entries[i], entry0, newinfo->size);
748 add_entry_to_counter(const struct ipt_entry *e,
749 struct xt_counters total[],
752 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
759 set_entry_to_counter(const struct ipt_entry *e,
760 struct ipt_counters total[],
763 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
770 get_counters(const struct xt_table_info *t,
771 struct xt_counters counters[])
777 /* Instead of clearing (by a previous call to memset())
778 * the counters and using adds, we set the counters
779 * with data used by 'current' CPU
780 * We dont care about preemption here.
782 curcpu = raw_smp_processor_id();
785 IPT_ENTRY_ITERATE(t->entries[curcpu],
787 set_entry_to_counter,
795 IPT_ENTRY_ITERATE(t->entries[cpu],
797 add_entry_to_counter,
803 static inline struct xt_counters * alloc_counters(struct ipt_table *table)
805 unsigned int countersize;
806 struct xt_counters *counters;
807 struct xt_table_info *private = table->private;
809 /* We need atomic snapshot of counters: rest doesn't change
810 (other than comefrom, which userspace doesn't care
812 countersize = sizeof(struct xt_counters) * private->number;
813 counters = vmalloc_node(countersize, numa_node_id());
815 if (counters == NULL)
816 return ERR_PTR(-ENOMEM);
818 /* First, sum counters... */
819 write_lock_bh(&table->lock);
820 get_counters(private, counters);
821 write_unlock_bh(&table->lock);
827 copy_entries_to_user(unsigned int total_size,
828 struct ipt_table *table,
829 void __user *userptr)
831 unsigned int off, num;
833 struct xt_counters *counters;
834 struct xt_table_info *private = table->private;
838 counters = alloc_counters(table);
839 if (IS_ERR(counters))
840 return PTR_ERR(counters);
842 /* choose the copy that is on our node/cpu, ...
843 * This choice is lazy (because current thread is
844 * allowed to migrate to another cpu)
846 loc_cpu_entry = private->entries[raw_smp_processor_id()];
847 /* ... then copy entire thing ... */
848 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
853 /* FIXME: use iterator macros --RR */
854 /* ... then go back and fix counters and names */
855 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
857 struct ipt_entry_match *m;
858 struct ipt_entry_target *t;
860 e = (struct ipt_entry *)(loc_cpu_entry + off);
861 if (copy_to_user(userptr + off
862 + offsetof(struct ipt_entry, counters),
864 sizeof(counters[num])) != 0) {
869 for (i = sizeof(struct ipt_entry);
870 i < e->target_offset;
871 i += m->u.match_size) {
874 if (copy_to_user(userptr + off + i
875 + offsetof(struct ipt_entry_match,
877 m->u.kernel.match->name,
878 strlen(m->u.kernel.match->name)+1)
885 t = ipt_get_target(e);
886 if (copy_to_user(userptr + off + e->target_offset
887 + offsetof(struct ipt_entry_target,
889 t->u.kernel.target->name,
890 strlen(t->u.kernel.target->name)+1) != 0) {
902 struct compat_delta {
903 struct compat_delta *next;
908 static struct compat_delta *compat_offsets = NULL;
910 static int compat_add_offset(u_int16_t offset, short delta)
912 struct compat_delta *tmp;
914 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
917 tmp->offset = offset;
919 if (compat_offsets) {
920 tmp->next = compat_offsets->next;
921 compat_offsets->next = tmp;
923 compat_offsets = tmp;
929 static void compat_flush_offsets(void)
931 struct compat_delta *tmp, *next;
933 if (compat_offsets) {
934 for(tmp = compat_offsets; tmp; tmp = next) {
938 compat_offsets = NULL;
942 static short compat_calc_jump(u_int16_t offset)
944 struct compat_delta *tmp;
947 for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
948 if (tmp->offset < offset)
953 struct compat_ipt_standard_target
955 struct compat_xt_entry_target target;
956 compat_int_t verdict;
959 #define IPT_ST_OFFSET (sizeof(struct ipt_standard_target) - \
960 sizeof(struct compat_ipt_standard_target))
962 struct compat_ipt_standard
964 struct compat_ipt_entry entry;
965 struct compat_ipt_standard_target target;
968 static int compat_ipt_standard_fn(void *target,
969 void **dstptr, int *size, int convert)
971 struct compat_ipt_standard_target compat_st, *pcompat_st;
972 struct ipt_standard_target st, *pst;
978 pst = (struct ipt_standard_target *)target;
979 memcpy(&compat_st.target, &pst->target,
980 sizeof(struct ipt_entry_target));
981 compat_st.verdict = pst->verdict;
982 if (compat_st.verdict > 0)
984 compat_calc_jump(compat_st.verdict);
985 compat_st.target.u.user.target_size =
986 sizeof(struct compat_ipt_standard_target);
987 if (__copy_to_user(*dstptr, &compat_st,
988 sizeof(struct compat_ipt_standard_target)))
990 *size -= IPT_ST_OFFSET;
991 *dstptr += sizeof(struct compat_ipt_standard_target);
993 case COMPAT_FROM_USER:
995 (struct compat_ipt_standard_target *)target;
996 memcpy(&st.target, &pcompat_st->target,
997 sizeof(struct ipt_entry_target));
998 st.verdict = pcompat_st->verdict;
1000 st.verdict += compat_calc_jump(st.verdict);
1001 st.target.u.user.target_size =
1002 sizeof(struct ipt_standard_target);
1003 memcpy(*dstptr, &st,
1004 sizeof(struct ipt_standard_target));
1005 *size += IPT_ST_OFFSET;
1006 *dstptr += sizeof(struct ipt_standard_target);
1008 case COMPAT_CALC_SIZE:
1009 *size += IPT_ST_OFFSET;
1019 compat_calc_match(struct ipt_entry_match *m, int * size)
1021 if (m->u.kernel.match->compat)
1022 m->u.kernel.match->compat(m, NULL, size, COMPAT_CALC_SIZE);
1024 xt_compat_match(m, NULL, size, COMPAT_CALC_SIZE);
1028 static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
1029 void *base, struct xt_table_info *newinfo)
1031 struct ipt_entry_target *t;
1032 u_int16_t entry_offset;
1036 entry_offset = (void *)e - base;
1037 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1038 t = ipt_get_target(e);
1039 if (t->u.kernel.target->compat)
1040 t->u.kernel.target->compat(t, NULL, &off, COMPAT_CALC_SIZE);
1042 xt_compat_target(t, NULL, &off, COMPAT_CALC_SIZE);
1043 newinfo->size -= off;
1044 ret = compat_add_offset(entry_offset, off);
1048 for (i = 0; i< NF_IP_NUMHOOKS; i++) {
1049 if (info->hook_entry[i] && (e < (struct ipt_entry *)
1050 (base + info->hook_entry[i])))
1051 newinfo->hook_entry[i] -= off;
1052 if (info->underflow[i] && (e < (struct ipt_entry *)
1053 (base + info->underflow[i])))
1054 newinfo->underflow[i] -= off;
1059 static int compat_table_info(struct xt_table_info *info,
1060 struct xt_table_info *newinfo)
1062 void *loc_cpu_entry;
1065 if (!newinfo || !info)
1068 memset(newinfo, 0, sizeof(struct xt_table_info));
1069 newinfo->size = info->size;
1070 newinfo->number = info->number;
1071 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1072 newinfo->hook_entry[i] = info->hook_entry[i];
1073 newinfo->underflow[i] = info->underflow[i];
1075 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1076 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1077 compat_calc_entry, info, loc_cpu_entry, newinfo);
1081 static int get_info(void __user *user, int *len, int compat)
1083 char name[IPT_TABLE_MAXNAMELEN];
1084 struct ipt_table *t;
1087 if (*len != sizeof(struct ipt_getinfo)) {
1088 duprintf("length %u != %u\n", *len,
1089 (unsigned int)sizeof(struct ipt_getinfo));
1093 if (copy_from_user(name, user, sizeof(name)) != 0)
1096 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1097 #ifdef CONFIG_COMPAT
1099 xt_compat_lock(AF_INET);
1101 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1102 "iptable_%s", name);
1103 if (t && !IS_ERR(t)) {
1104 struct ipt_getinfo info;
1105 struct xt_table_info *private = t->private;
1107 #ifdef CONFIG_COMPAT
1109 struct xt_table_info tmp;
1110 ret = compat_table_info(private, &tmp);
1111 compat_flush_offsets();
1115 info.valid_hooks = t->valid_hooks;
1116 memcpy(info.hook_entry, private->hook_entry,
1117 sizeof(info.hook_entry));
1118 memcpy(info.underflow, private->underflow,
1119 sizeof(info.underflow));
1120 info.num_entries = private->number;
1121 info.size = private->size;
1122 strcpy(info.name, name);
1124 if (copy_to_user(user, &info, *len) != 0)
1132 ret = t ? PTR_ERR(t) : -ENOENT;
1133 #ifdef CONFIG_COMPAT
1135 xt_compat_unlock(AF_INET);
1141 get_entries(struct ipt_get_entries __user *uptr, int *len)
1144 struct ipt_get_entries get;
1145 struct ipt_table *t;
1147 if (*len < sizeof(get)) {
1148 duprintf("get_entries: %u < %d\n", *len,
1149 (unsigned int)sizeof(get));
1152 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1154 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1155 duprintf("get_entries: %u != %u\n", *len,
1156 (unsigned int)(sizeof(struct ipt_get_entries) +
1161 t = xt_find_table_lock(AF_INET, get.name);
1162 if (t && !IS_ERR(t)) {
1163 struct xt_table_info *private = t->private;
1164 duprintf("t->private->number = %u\n",
1166 if (get.size == private->size)
1167 ret = copy_entries_to_user(private->size,
1168 t, uptr->entrytable);
1170 duprintf("get_entries: I've got %u not %u!\n",
1178 ret = t ? PTR_ERR(t) : -ENOENT;
1184 __do_replace(const char *name, unsigned int valid_hooks,
1185 struct xt_table_info *newinfo, unsigned int num_counters,
1186 void __user *counters_ptr)
1189 struct ipt_table *t;
1190 struct xt_table_info *oldinfo;
1191 struct xt_counters *counters;
1192 void *loc_cpu_old_entry;
1195 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1201 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1202 "iptable_%s", name);
1203 if (!t || IS_ERR(t)) {
1204 ret = t ? PTR_ERR(t) : -ENOENT;
1205 goto free_newinfo_counters_untrans;
1209 if (valid_hooks != t->valid_hooks) {
1210 duprintf("Valid hook crap: %08X vs %08X\n",
1211 valid_hooks, t->valid_hooks);
1216 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1220 /* Update module usage count based on number of rules */
1221 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1222 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1223 if ((oldinfo->number > oldinfo->initial_entries) ||
1224 (newinfo->number <= oldinfo->initial_entries))
1226 if ((oldinfo->number > oldinfo->initial_entries) &&
1227 (newinfo->number <= oldinfo->initial_entries))
1230 /* Get the old counters. */
1231 get_counters(oldinfo, counters);
1232 /* Decrease module usage counts and free resource */
1233 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1234 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1235 xt_free_table_info(oldinfo);
1236 if (copy_to_user(counters_ptr, counters,
1237 sizeof(struct xt_counters) * num_counters) != 0)
1246 free_newinfo_counters_untrans:
1253 do_replace(void __user *user, unsigned int len)
1256 struct ipt_replace tmp;
1257 struct xt_table_info *newinfo;
1258 void *loc_cpu_entry;
1260 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1263 /* Hack: Causes ipchains to give correct error msg --RR */
1264 if (len != sizeof(tmp) + tmp.size)
1265 return -ENOPROTOOPT;
1267 /* overflow check */
1268 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1271 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1274 newinfo = xt_alloc_table_info(tmp.size);
1278 /* choose the copy that is our node/cpu */
1279 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1280 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1286 ret = translate_table(tmp.name, tmp.valid_hooks,
1287 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1288 tmp.hook_entry, tmp.underflow);
1292 duprintf("ip_tables: Translated table\n");
1294 ret = __do_replace(tmp.name, tmp.valid_hooks,
1295 newinfo, tmp.num_counters,
1298 goto free_newinfo_untrans;
1301 free_newinfo_untrans:
1302 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1304 xt_free_table_info(newinfo);
1308 /* We're lazy, and add to the first CPU; overflow works its fey magic
1309 * and everything is OK. */
1311 add_counter_to_entry(struct ipt_entry *e,
1312 const struct xt_counters addme[],
1316 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1318 (long unsigned int)e->counters.pcnt,
1319 (long unsigned int)e->counters.bcnt,
1320 (long unsigned int)addme[*i].pcnt,
1321 (long unsigned int)addme[*i].bcnt);
1324 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1331 do_add_counters(void __user *user, unsigned int len, int compat)
1334 struct xt_counters_info tmp;
1335 struct xt_counters *paddc;
1336 unsigned int num_counters;
1340 struct ipt_table *t;
1341 struct xt_table_info *private;
1343 void *loc_cpu_entry;
1344 #ifdef CONFIG_COMPAT
1345 struct compat_xt_counters_info compat_tmp;
1349 size = sizeof(struct compat_xt_counters_info);
1354 size = sizeof(struct xt_counters_info);
1357 if (copy_from_user(ptmp, user, size) != 0)
1360 #ifdef CONFIG_COMPAT
1362 num_counters = compat_tmp.num_counters;
1363 name = compat_tmp.name;
1367 num_counters = tmp.num_counters;
1371 if (len != size + num_counters * sizeof(struct xt_counters))
1374 paddc = vmalloc_node(len - size, numa_node_id());
1378 if (copy_from_user(paddc, user + size, len - size) != 0) {
1383 t = xt_find_table_lock(AF_INET, name);
1384 if (!t || IS_ERR(t)) {
1385 ret = t ? PTR_ERR(t) : -ENOENT;
1389 write_lock_bh(&t->lock);
1390 private = t->private;
1391 if (private->number != num_counters) {
1393 goto unlock_up_free;
1397 /* Choose the copy that is on our node */
1398 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1399 IPT_ENTRY_ITERATE(loc_cpu_entry,
1401 add_counter_to_entry,
1405 write_unlock_bh(&t->lock);
1414 #ifdef CONFIG_COMPAT
1415 struct compat_ipt_replace {
1416 char name[IPT_TABLE_MAXNAMELEN];
1420 u32 hook_entry[NF_IP_NUMHOOKS];
1421 u32 underflow[NF_IP_NUMHOOKS];
1423 compat_uptr_t counters; /* struct ipt_counters * */
1424 struct compat_ipt_entry entries[0];
1427 static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
1428 void __user **dstptr, compat_uint_t *size)
1430 if (m->u.kernel.match->compat)
1431 return m->u.kernel.match->compat(m, dstptr, size,
1434 return xt_compat_match(m, dstptr, size, COMPAT_TO_USER);
1437 static int compat_copy_entry_to_user(struct ipt_entry *e,
1438 void __user **dstptr, compat_uint_t *size)
1440 struct ipt_entry_target __user *t;
1441 struct compat_ipt_entry __user *ce;
1442 u_int16_t target_offset, next_offset;
1443 compat_uint_t origsize;
1448 ce = (struct compat_ipt_entry __user *)*dstptr;
1449 if (__copy_to_user(ce, e, sizeof(struct ipt_entry)))
1452 *dstptr += sizeof(struct compat_ipt_entry);
1453 ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1454 target_offset = e->target_offset - (origsize - *size);
1457 t = ipt_get_target(e);
1458 if (t->u.kernel.target->compat)
1459 ret = t->u.kernel.target->compat(t, dstptr, size,
1462 ret = xt_compat_target(t, dstptr, size, COMPAT_TO_USER);
1466 next_offset = e->next_offset - (origsize - *size);
1467 if (__put_user(target_offset, &ce->target_offset))
1469 if (__put_user(next_offset, &ce->next_offset))
1477 compat_check_calc_match(struct ipt_entry_match *m,
1479 const struct ipt_ip *ip,
1480 unsigned int hookmask,
1483 struct ipt_match *match;
1485 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1486 m->u.user.revision),
1487 "ipt_%s", m->u.user.name);
1488 if (IS_ERR(match) || !match) {
1489 duprintf("compat_check_calc_match: `%s' not found\n",
1491 return match ? PTR_ERR(match) : -ENOENT;
1493 m->u.kernel.match = match;
1495 if (m->u.kernel.match->compat)
1496 m->u.kernel.match->compat(m, NULL, size, COMPAT_CALC_SIZE);
1498 xt_compat_match(m, NULL, size, COMPAT_CALC_SIZE);
1505 check_compat_entry_size_and_hooks(struct ipt_entry *e,
1506 struct xt_table_info *newinfo,
1508 unsigned char *base,
1509 unsigned char *limit,
1510 unsigned int *hook_entries,
1511 unsigned int *underflows,
1515 struct ipt_entry_target *t;
1516 struct ipt_target *target;
1517 u_int16_t entry_offset;
1520 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1521 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1522 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1523 duprintf("Bad offset %p, limit = %p\n", e, limit);
1527 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1528 sizeof(struct compat_xt_entry_target)) {
1529 duprintf("checking: element %p size %u\n",
1534 if (!ip_checkentry(&e->ip)) {
1535 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
1540 entry_offset = (void *)e - (void *)base;
1542 ret = IPT_MATCH_ITERATE(e, compat_check_calc_match, name, &e->ip,
1543 e->comefrom, &off, &j);
1547 t = ipt_get_target(e);
1548 target = try_then_request_module(xt_find_target(AF_INET,
1550 t->u.user.revision),
1551 "ipt_%s", t->u.user.name);
1552 if (IS_ERR(target) || !target) {
1553 duprintf("check_entry: `%s' not found\n", t->u.user.name);
1554 ret = target ? PTR_ERR(target) : -ENOENT;
1557 t->u.kernel.target = target;
1559 if (t->u.kernel.target->compat)
1560 t->u.kernel.target->compat(t, NULL, &off, COMPAT_CALC_SIZE);
1562 xt_compat_target(t, NULL, &off, COMPAT_CALC_SIZE);
1564 ret = compat_add_offset(entry_offset, off);
1568 /* Check hooks & underflows */
1569 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1570 if ((unsigned char *)e - base == hook_entries[h])
1571 newinfo->hook_entry[h] = hook_entries[h];
1572 if ((unsigned char *)e - base == underflows[h])
1573 newinfo->underflow[h] = underflows[h];
1576 /* Clear counters and comefrom */
1577 e->counters = ((struct ipt_counters) { 0, 0 });
1583 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1587 static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1588 void **dstptr, compat_uint_t *size, const char *name,
1589 const struct ipt_ip *ip, unsigned int hookmask)
1591 struct ipt_entry_match *dm;
1592 struct ipt_match *match;
1595 dm = (struct ipt_entry_match *)*dstptr;
1596 match = m->u.kernel.match;
1598 match->compat(m, dstptr, size, COMPAT_FROM_USER);
1600 xt_compat_match(m, dstptr, size, COMPAT_FROM_USER);
1602 ret = xt_check_match(match, AF_INET, dm->u.match_size - sizeof(*dm),
1603 name, hookmask, ip->proto,
1604 ip->invflags & IPT_INV_PROTO);
1608 if (m->u.kernel.match->checkentry
1609 && !m->u.kernel.match->checkentry(name, ip, match, dm->data,
1610 dm->u.match_size - sizeof(*dm),
1612 duprintf("ip_tables: check failed for `%s'.\n",
1613 m->u.kernel.match->name);
1619 static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1620 unsigned int *size, const char *name,
1621 struct xt_table_info *newinfo, unsigned char *base)
1623 struct ipt_entry_target *t;
1624 struct ipt_target *target;
1625 struct ipt_entry *de;
1626 unsigned int origsize;
1631 de = (struct ipt_entry *)*dstptr;
1632 memcpy(de, e, sizeof(struct ipt_entry));
1634 *dstptr += sizeof(struct compat_ipt_entry);
1635 ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
1636 name, &de->ip, de->comefrom);
1639 de->target_offset = e->target_offset - (origsize - *size);
1640 t = ipt_get_target(e);
1641 target = t->u.kernel.target;
1643 target->compat(t, dstptr, size, COMPAT_FROM_USER);
1645 xt_compat_target(t, dstptr, size, COMPAT_FROM_USER);
1647 de->next_offset = e->next_offset - (origsize - *size);
1648 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1649 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1650 newinfo->hook_entry[h] -= origsize - *size;
1651 if ((unsigned char *)de - base < newinfo->underflow[h])
1652 newinfo->underflow[h] -= origsize - *size;
1655 t = ipt_get_target(de);
1656 target = t->u.kernel.target;
1657 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
1658 name, e->comefrom, e->ip.proto,
1659 e->ip.invflags & IPT_INV_PROTO);
1664 if (t->u.kernel.target == &ipt_standard_target) {
1665 if (!standard_check(t, *size))
1667 } else if (t->u.kernel.target->checkentry
1668 && !t->u.kernel.target->checkentry(name, de, target,
1669 t->data, t->u.target_size - sizeof(*t),
1671 duprintf("ip_tables: compat: check failed for `%s'.\n",
1672 t->u.kernel.target->name);
1681 translate_compat_table(const char *name,
1682 unsigned int valid_hooks,
1683 struct xt_table_info **pinfo,
1685 unsigned int total_size,
1686 unsigned int number,
1687 unsigned int *hook_entries,
1688 unsigned int *underflows)
1691 struct xt_table_info *newinfo, *info;
1692 void *pos, *entry0, *entry1;
1699 info->number = number;
1701 /* Init all hooks to impossible value. */
1702 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1703 info->hook_entry[i] = 0xFFFFFFFF;
1704 info->underflow[i] = 0xFFFFFFFF;
1707 duprintf("translate_compat_table: size %u\n", info->size);
1709 xt_compat_lock(AF_INET);
1710 /* Walk through entries, checking offsets. */
1711 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1712 check_compat_entry_size_and_hooks,
1713 info, &size, entry0,
1714 entry0 + total_size,
1715 hook_entries, underflows, &i, name);
1721 duprintf("translate_compat_table: %u not %u entries\n",
1726 /* Check hooks all assigned */
1727 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1728 /* Only hooks which are valid */
1729 if (!(valid_hooks & (1 << i)))
1731 if (info->hook_entry[i] == 0xFFFFFFFF) {
1732 duprintf("Invalid hook entry %u %u\n",
1733 i, hook_entries[i]);
1736 if (info->underflow[i] == 0xFFFFFFFF) {
1737 duprintf("Invalid underflow %u %u\n",
1744 newinfo = xt_alloc_table_info(size);
1748 newinfo->number = number;
1749 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1750 newinfo->hook_entry[i] = info->hook_entry[i];
1751 newinfo->underflow[i] = info->underflow[i];
1753 entry1 = newinfo->entries[raw_smp_processor_id()];
1756 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1757 compat_copy_entry_from_user, &pos, &size,
1758 name, newinfo, entry1);
1759 compat_flush_offsets();
1760 xt_compat_unlock(AF_INET);
1765 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1768 /* And one copy for every other CPU */
1770 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1771 memcpy(newinfo->entries[i], entry1, newinfo->size);
1775 xt_free_table_info(info);
1779 xt_free_table_info(newinfo);
1783 xt_compat_unlock(AF_INET);
1788 compat_do_replace(void __user *user, unsigned int len)
1791 struct compat_ipt_replace tmp;
1792 struct xt_table_info *newinfo;
1793 void *loc_cpu_entry;
1795 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1798 /* Hack: Causes ipchains to give correct error msg --RR */
1799 if (len != sizeof(tmp) + tmp.size)
1800 return -ENOPROTOOPT;
1802 /* overflow check */
1803 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1806 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1809 newinfo = xt_alloc_table_info(tmp.size);
1813 /* choose the copy that is our node/cpu */
1814 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1815 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1821 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1822 &newinfo, &loc_cpu_entry, tmp.size,
1823 tmp.num_entries, tmp.hook_entry, tmp.underflow);
1827 duprintf("compat_do_replace: Translated table\n");
1829 ret = __do_replace(tmp.name, tmp.valid_hooks,
1830 newinfo, tmp.num_counters,
1831 compat_ptr(tmp.counters));
1833 goto free_newinfo_untrans;
1836 free_newinfo_untrans:
1837 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1839 xt_free_table_info(newinfo);
1844 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1849 if (!capable(CAP_NET_ADMIN))
1853 case IPT_SO_SET_REPLACE:
1854 ret = compat_do_replace(user, len);
1857 case IPT_SO_SET_ADD_COUNTERS:
1858 ret = do_add_counters(user, len, 1);
1862 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1869 struct compat_ipt_get_entries
1871 char name[IPT_TABLE_MAXNAMELEN];
1873 struct compat_ipt_entry entrytable[0];
1876 static int compat_copy_entries_to_user(unsigned int total_size,
1877 struct ipt_table *table, void __user *userptr)
1879 unsigned int off, num;
1880 struct compat_ipt_entry e;
1881 struct xt_counters *counters;
1882 struct xt_table_info *private = table->private;
1886 void *loc_cpu_entry;
1888 counters = alloc_counters(table);
1889 if (IS_ERR(counters))
1890 return PTR_ERR(counters);
1892 /* choose the copy that is on our node/cpu, ...
1893 * This choice is lazy (because current thread is
1894 * allowed to migrate to another cpu)
1896 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1899 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1900 compat_copy_entry_to_user, &pos, &size);
1904 /* ... then go back and fix counters and names */
1905 for (off = 0, num = 0; off < size; off += e.next_offset, num++) {
1907 struct ipt_entry_match m;
1908 struct ipt_entry_target t;
1911 if (copy_from_user(&e, userptr + off,
1912 sizeof(struct compat_ipt_entry)))
1914 if (copy_to_user(userptr + off +
1915 offsetof(struct compat_ipt_entry, counters),
1916 &counters[num], sizeof(counters[num])))
1919 for (i = sizeof(struct compat_ipt_entry);
1920 i < e.target_offset; i += m.u.match_size) {
1921 if (copy_from_user(&m, userptr + off + i,
1922 sizeof(struct ipt_entry_match)))
1924 if (copy_to_user(userptr + off + i +
1925 offsetof(struct ipt_entry_match, u.user.name),
1926 m.u.kernel.match->name,
1927 strlen(m.u.kernel.match->name) + 1))
1931 if (copy_from_user(&t, userptr + off + e.target_offset,
1932 sizeof(struct ipt_entry_target)))
1934 if (copy_to_user(userptr + off + e.target_offset +
1935 offsetof(struct ipt_entry_target, u.user.name),
1936 t.u.kernel.target->name,
1937 strlen(t.u.kernel.target->name) + 1))
1947 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1950 struct compat_ipt_get_entries get;
1951 struct ipt_table *t;
1954 if (*len < sizeof(get)) {
1955 duprintf("compat_get_entries: %u < %u\n",
1956 *len, (unsigned int)sizeof(get));
1960 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1963 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1964 duprintf("compat_get_entries: %u != %u\n", *len,
1965 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1970 xt_compat_lock(AF_INET);
1971 t = xt_find_table_lock(AF_INET, get.name);
1972 if (t && !IS_ERR(t)) {
1973 struct xt_table_info *private = t->private;
1974 struct xt_table_info info;
1975 duprintf("t->private->number = %u\n",
1977 ret = compat_table_info(private, &info);
1978 if (!ret && get.size == info.size) {
1979 ret = compat_copy_entries_to_user(private->size,
1980 t, uptr->entrytable);
1982 duprintf("compat_get_entries: I've got %u not %u!\n",
1987 compat_flush_offsets();
1991 ret = t ? PTR_ERR(t) : -ENOENT;
1993 xt_compat_unlock(AF_INET);
1998 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2003 case IPT_SO_GET_INFO:
2004 ret = get_info(user, len, 1);
2006 case IPT_SO_GET_ENTRIES:
2007 ret = compat_get_entries(user, len);
2010 duprintf("compat_do_ipt_get_ctl: unknown request %i\n", cmd);
2018 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2022 if (!capable(CAP_NET_ADMIN))
2026 case IPT_SO_SET_REPLACE:
2027 ret = do_replace(user, len);
2030 case IPT_SO_SET_ADD_COUNTERS:
2031 ret = do_add_counters(user, len, 0);
2035 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2043 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2047 if (!capable(CAP_NET_ADMIN))
2051 case IPT_SO_GET_INFO:
2052 ret = get_info(user, len, 0);
2055 case IPT_SO_GET_ENTRIES:
2056 ret = get_entries(user, len);
2059 case IPT_SO_GET_REVISION_MATCH:
2060 case IPT_SO_GET_REVISION_TARGET: {
2061 struct ipt_get_revision rev;
2064 if (*len != sizeof(rev)) {
2068 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2073 if (cmd == IPT_SO_GET_REVISION_TARGET)
2078 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2081 "ipt_%s", rev.name);
2086 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2093 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2096 struct xt_table_info *newinfo;
2097 static struct xt_table_info bootstrap
2098 = { 0, 0, 0, { 0 }, { 0 }, { } };
2099 void *loc_cpu_entry;
2101 newinfo = xt_alloc_table_info(repl->size);
2105 /* choose the copy on our node/cpu
2106 * but dont care of preemption
2108 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2109 memcpy(loc_cpu_entry, repl->entries, repl->size);
2111 ret = translate_table(table->name, table->valid_hooks,
2112 newinfo, loc_cpu_entry, repl->size,
2117 xt_free_table_info(newinfo);
2121 if (xt_register_table(table, &bootstrap, newinfo) != 0) {
2122 xt_free_table_info(newinfo);
2129 void ipt_unregister_table(struct ipt_table *table)
2131 struct xt_table_info *private;
2132 void *loc_cpu_entry;
2134 private = xt_unregister_table(table);
2136 /* Decrease module usage counts and free resources */
2137 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2138 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2139 xt_free_table_info(private);
2142 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2144 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2145 u_int8_t type, u_int8_t code,
2148 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2153 icmp_match(const struct sk_buff *skb,
2154 const struct net_device *in,
2155 const struct net_device *out,
2156 const struct xt_match *match,
2157 const void *matchinfo,
2159 unsigned int protoff,
2162 struct icmphdr _icmph, *ic;
2163 const struct ipt_icmp *icmpinfo = matchinfo;
2165 /* Must not be a fragment. */
2169 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2171 /* We've been asked to examine this packet, and we
2172 * can't. Hence, no choice but to drop.
2174 duprintf("Dropping evil ICMP tinygram.\n");
2179 return icmp_type_code_match(icmpinfo->type,
2183 !!(icmpinfo->invflags&IPT_ICMP_INV));
2186 /* Called when user tries to insert an entry of this type. */
2188 icmp_checkentry(const char *tablename,
2190 const struct xt_match *match,
2192 unsigned int matchsize,
2193 unsigned int hook_mask)
2195 const struct ipt_icmp *icmpinfo = matchinfo;
2197 /* Must specify no unknown invflags */
2198 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2201 /* The built-in targets: standard (NULL) and error. */
2202 static struct ipt_target ipt_standard_target = {
2203 .name = IPT_STANDARD_TARGET,
2204 .targetsize = sizeof(int),
2206 #ifdef CONFIG_COMPAT
2207 .compat = &compat_ipt_standard_fn,
2211 static struct ipt_target ipt_error_target = {
2212 .name = IPT_ERROR_TARGET,
2213 .target = ipt_error,
2214 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2218 static struct nf_sockopt_ops ipt_sockopts = {
2220 .set_optmin = IPT_BASE_CTL,
2221 .set_optmax = IPT_SO_SET_MAX+1,
2222 .set = do_ipt_set_ctl,
2223 #ifdef CONFIG_COMPAT
2224 .compat_set = compat_do_ipt_set_ctl,
2226 .get_optmin = IPT_BASE_CTL,
2227 .get_optmax = IPT_SO_GET_MAX+1,
2228 .get = do_ipt_get_ctl,
2229 #ifdef CONFIG_COMPAT
2230 .compat_get = compat_do_ipt_get_ctl,
2234 static struct ipt_match icmp_matchstruct = {
2236 .match = icmp_match,
2237 .matchsize = sizeof(struct ipt_icmp),
2238 .proto = IPPROTO_ICMP,
2240 .checkentry = icmp_checkentry,
2243 static int __init ip_tables_init(void)
2247 xt_proto_init(AF_INET);
2249 /* Noone else will be downing sem now, so we won't sleep */
2250 xt_register_target(&ipt_standard_target);
2251 xt_register_target(&ipt_error_target);
2252 xt_register_match(&icmp_matchstruct);
2254 /* Register setsockopt */
2255 ret = nf_register_sockopt(&ipt_sockopts);
2257 duprintf("Unable to register sockopts.\n");
2261 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2265 static void __exit ip_tables_fini(void)
2267 nf_unregister_sockopt(&ipt_sockopts);
2269 xt_unregister_match(&icmp_matchstruct);
2270 xt_unregister_target(&ipt_error_target);
2271 xt_unregister_target(&ipt_standard_target);
2273 xt_proto_fini(AF_INET);
2276 EXPORT_SYMBOL(ipt_register_table);
2277 EXPORT_SYMBOL(ipt_unregister_table);
2278 EXPORT_SYMBOL(ipt_do_table);
2279 module_init(ip_tables_init);
2280 module_exit(ip_tables_fini);