2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12 * - increase module usage count as soon as we have rules inside
14 * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
15 * - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
17 #include <linux/cache.h>
18 #include <linux/capability.h>
19 #include <linux/skbuff.h>
20 #include <linux/kmod.h>
21 #include <linux/vmalloc.h>
22 #include <linux/netdevice.h>
23 #include <linux/module.h>
24 #include <linux/icmp.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
33 #include <linux/netfilter/x_tables.h>
34 #include <linux/netfilter_ipv4/ip_tables.h>
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
38 MODULE_DESCRIPTION("IPv4 packet filter");
40 /*#define DEBUG_IP_FIREWALL*/
41 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
42 /*#define DEBUG_IP_FIREWALL_USER*/
44 #ifdef DEBUG_IP_FIREWALL
45 #define dprintf(format, args...) printk(format , ## args)
47 #define dprintf(format, args...)
50 #ifdef DEBUG_IP_FIREWALL_USER
51 #define duprintf(format, args...) printk(format , ## args)
53 #define duprintf(format, args...)
56 #ifdef CONFIG_NETFILTER_DEBUG
57 #define IP_NF_ASSERT(x) \
60 printk("IP_NF_ASSERT: %s:%s:%u\n", \
61 __FUNCTION__, __FILE__, __LINE__); \
64 #define IP_NF_ASSERT(x)
68 /* All the better to debug you with... */
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
80 Hence the start of any table is given by get_table() below. */
82 /* Returns whether matches rule or not. */
84 ip_packet_match(const struct iphdr *ip,
87 const struct ipt_ip *ipinfo,
93 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
95 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
97 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
99 dprintf("Source or dest mismatch.\n");
101 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
103 NIPQUAD(ipinfo->smsk.s_addr),
104 NIPQUAD(ipinfo->src.s_addr),
105 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
108 NIPQUAD(ipinfo->dmsk.s_addr),
109 NIPQUAD(ipinfo->dst.s_addr),
110 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
114 /* Look for ifname matches; this should unroll nicely. */
115 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
116 ret |= (((const unsigned long *)indev)[i]
117 ^ ((const unsigned long *)ipinfo->iniface)[i])
118 & ((const unsigned long *)ipinfo->iniface_mask)[i];
121 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
122 dprintf("VIA in mismatch (%s vs %s).%s\n",
123 indev, ipinfo->iniface,
124 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
128 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
129 ret |= (((const unsigned long *)outdev)[i]
130 ^ ((const unsigned long *)ipinfo->outiface)[i])
131 & ((const unsigned long *)ipinfo->outiface_mask)[i];
134 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
135 dprintf("VIA out mismatch (%s vs %s).%s\n",
136 outdev, ipinfo->outiface,
137 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
141 /* Check specific protocol */
143 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
144 dprintf("Packet protocol %hi does not match %hi.%s\n",
145 ip->protocol, ipinfo->proto,
146 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
150 /* If we have a fragment rule but the packet is not a fragment
151 * then we return zero */
152 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
153 dprintf("Fragment rule but not fragment.%s\n",
154 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
162 ip_checkentry(const struct ipt_ip *ip)
164 if (ip->flags & ~IPT_F_MASK) {
165 duprintf("Unknown flag bits set: %08X\n",
166 ip->flags & ~IPT_F_MASK);
169 if (ip->invflags & ~IPT_INV_MASK) {
170 duprintf("Unknown invflag bits set: %08X\n",
171 ip->invflags & ~IPT_INV_MASK);
178 ipt_error(struct sk_buff **pskb,
179 const struct net_device *in,
180 const struct net_device *out,
181 unsigned int hooknum,
182 const struct xt_target *target,
183 const void *targinfo)
186 printk("ip_tables: error: `%s'\n", (char *)targinfo);
192 int do_match(struct ipt_entry_match *m,
193 const struct sk_buff *skb,
194 const struct net_device *in,
195 const struct net_device *out,
199 /* Stop iteration if it doesn't match */
200 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
201 offset, skb->nh.iph->ihl*4, hotdrop))
207 static inline struct ipt_entry *
208 get_entry(void *base, unsigned int offset)
210 return (struct ipt_entry *)(base + offset);
213 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
215 ipt_do_table(struct sk_buff **pskb,
217 const struct net_device *in,
218 const struct net_device *out,
219 struct ipt_table *table)
221 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
226 /* Initializing verdict to NF_DROP keeps gcc happy. */
227 unsigned int verdict = NF_DROP;
228 const char *indev, *outdev;
230 struct ipt_entry *e, *back;
231 struct xt_table_info *private;
234 ip = (*pskb)->nh.iph;
235 datalen = (*pskb)->len - ip->ihl * 4;
236 indev = in ? in->name : nulldevname;
237 outdev = out ? out->name : nulldevname;
238 /* We handle fragments by dealing with the first fragment as
239 * if it was a normal packet. All other fragments are treated
240 * normally, except that they will NEVER match rules that ask
241 * things we don't know, ie. tcp syn flag or ports). If the
242 * rule is also a fragment-specific rule, non-fragments won't
244 offset = ntohs(ip->frag_off) & IP_OFFSET;
246 read_lock_bh(&table->lock);
247 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
248 private = table->private;
249 table_base = (void *)private->entries[smp_processor_id()];
250 e = get_entry(table_base, private->hook_entry[hook]);
252 /* For return from builtin chain */
253 back = get_entry(table_base, private->underflow[hook]);
258 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
259 struct ipt_entry_target *t;
261 if (IPT_MATCH_ITERATE(e, do_match,
263 offset, &hotdrop) != 0)
266 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
268 t = ipt_get_target(e);
269 IP_NF_ASSERT(t->u.kernel.target);
270 /* Standard target? */
271 if (!t->u.kernel.target->target) {
274 v = ((struct ipt_standard_target *)t)->verdict;
276 /* Pop from stack? */
277 if (v != IPT_RETURN) {
278 verdict = (unsigned)(-v) - 1;
282 back = get_entry(table_base,
286 if (table_base + v != (void *)e + e->next_offset
287 && !(e->ip.flags & IPT_F_GOTO)) {
288 /* Save old back ptr in next entry */
289 struct ipt_entry *next
290 = (void *)e + e->next_offset;
292 = (void *)back - table_base;
293 /* set back pointer to next entry */
297 e = get_entry(table_base, v);
299 /* Targets which reenter must return
301 #ifdef CONFIG_NETFILTER_DEBUG
302 ((struct ipt_entry *)table_base)->comefrom
305 verdict = t->u.kernel.target->target(pskb,
311 #ifdef CONFIG_NETFILTER_DEBUG
312 if (((struct ipt_entry *)table_base)->comefrom
314 && verdict == IPT_CONTINUE) {
315 printk("Target %s reentered!\n",
316 t->u.kernel.target->name);
319 ((struct ipt_entry *)table_base)->comefrom
322 /* Target might have changed stuff. */
323 ip = (*pskb)->nh.iph;
324 datalen = (*pskb)->len - ip->ihl * 4;
326 if (verdict == IPT_CONTINUE)
327 e = (void *)e + e->next_offset;
335 e = (void *)e + e->next_offset;
339 read_unlock_bh(&table->lock);
341 #ifdef DEBUG_ALLOW_ALL
350 /* All zeroes == unconditional rule. */
352 unconditional(const struct ipt_ip *ip)
356 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
357 if (((__u32 *)ip)[i])
363 /* Figures out from what hook each rule can be called: returns 0 if
364 there are loops. Puts hook bitmask in comefrom. */
366 mark_source_chains(struct xt_table_info *newinfo,
367 unsigned int valid_hooks, void *entry0)
371 /* No recursion; use packet counter to save back ptrs (reset
372 to 0 as we leave), and comefrom to save source hook bitmask */
373 for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
374 unsigned int pos = newinfo->hook_entry[hook];
376 = (struct ipt_entry *)(entry0 + pos);
378 if (!(valid_hooks & (1 << hook)))
381 /* Set initial back pointer. */
382 e->counters.pcnt = pos;
385 struct ipt_standard_target *t
386 = (void *)ipt_get_target(e);
388 if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
389 printk("iptables: loop hook %u pos %u %08X.\n",
390 hook, pos, e->comefrom);
394 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
396 /* Unconditional return/END. */
397 if (e->target_offset == sizeof(struct ipt_entry)
398 && (strcmp(t->target.u.user.name,
399 IPT_STANDARD_TARGET) == 0)
401 && unconditional(&e->ip)) {
402 unsigned int oldpos, size;
404 /* Return: backtrack through the last
407 e->comefrom ^= (1<<NF_IP_NUMHOOKS);
408 #ifdef DEBUG_IP_FIREWALL_USER
410 & (1 << NF_IP_NUMHOOKS)) {
411 duprintf("Back unset "
418 pos = e->counters.pcnt;
419 e->counters.pcnt = 0;
421 /* We're at the start. */
425 e = (struct ipt_entry *)
427 } while (oldpos == pos + e->next_offset);
430 size = e->next_offset;
431 e = (struct ipt_entry *)
432 (entry0 + pos + size);
433 e->counters.pcnt = pos;
436 int newpos = t->verdict;
438 if (strcmp(t->target.u.user.name,
439 IPT_STANDARD_TARGET) == 0
441 /* This a jump; chase it. */
442 duprintf("Jump rule %u -> %u\n",
445 /* ... this is a fallthru */
446 newpos = pos + e->next_offset;
448 e = (struct ipt_entry *)
450 e->counters.pcnt = pos;
455 duprintf("Finished chain %u\n", hook);
461 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
463 if (i && (*i)-- == 0)
466 if (m->u.kernel.match->destroy)
467 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
468 module_put(m->u.kernel.match->me);
473 standard_check(const struct ipt_entry_target *t,
474 unsigned int max_offset)
476 struct ipt_standard_target *targ = (void *)t;
478 /* Check standard info. */
479 if (targ->verdict >= 0
480 && targ->verdict > max_offset - sizeof(struct ipt_entry)) {
481 duprintf("ipt_standard_check: bad verdict (%i)\n",
485 if (targ->verdict < -NF_MAX_VERDICT - 1) {
486 duprintf("ipt_standard_check: bad negative verdict (%i)\n",
494 check_match(struct ipt_entry_match *m,
496 const struct ipt_ip *ip,
497 unsigned int hookmask,
500 struct ipt_match *match;
503 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
505 "ipt_%s", m->u.user.name);
506 if (IS_ERR(match) || !match) {
507 duprintf("check_match: `%s' not found\n", m->u.user.name);
508 return match ? PTR_ERR(match) : -ENOENT;
510 m->u.kernel.match = match;
512 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
513 name, hookmask, ip->proto,
514 ip->invflags & IPT_INV_PROTO);
518 if (m->u.kernel.match->checkentry
519 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
521 duprintf("ip_tables: check failed for `%s'.\n",
522 m->u.kernel.match->name);
530 module_put(m->u.kernel.match->me);
534 static struct ipt_target ipt_standard_target;
537 check_entry(struct ipt_entry *e, const char *name, unsigned int size,
540 struct ipt_entry_target *t;
541 struct ipt_target *target;
545 if (!ip_checkentry(&e->ip)) {
546 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
551 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
553 goto cleanup_matches;
555 t = ipt_get_target(e);
556 target = try_then_request_module(xt_find_target(AF_INET,
559 "ipt_%s", t->u.user.name);
560 if (IS_ERR(target) || !target) {
561 duprintf("check_entry: `%s' not found\n", t->u.user.name);
562 ret = target ? PTR_ERR(target) : -ENOENT;
563 goto cleanup_matches;
565 t->u.kernel.target = target;
567 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
568 name, e->comefrom, e->ip.proto,
569 e->ip.invflags & IPT_INV_PROTO);
573 if (t->u.kernel.target == &ipt_standard_target) {
574 if (!standard_check(t, size)) {
578 } else if (t->u.kernel.target->checkentry
579 && !t->u.kernel.target->checkentry(name, e, target, t->data,
581 duprintf("ip_tables: check failed for `%s'.\n",
582 t->u.kernel.target->name);
590 module_put(t->u.kernel.target->me);
592 IPT_MATCH_ITERATE(e, cleanup_match, &j);
597 check_entry_size_and_hooks(struct ipt_entry *e,
598 struct xt_table_info *newinfo,
600 unsigned char *limit,
601 const unsigned int *hook_entries,
602 const unsigned int *underflows,
607 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
608 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
609 duprintf("Bad offset %p\n", e);
614 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
615 duprintf("checking: element %p size %u\n",
620 /* Check hooks & underflows */
621 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
622 if ((unsigned char *)e - base == hook_entries[h])
623 newinfo->hook_entry[h] = hook_entries[h];
624 if ((unsigned char *)e - base == underflows[h])
625 newinfo->underflow[h] = underflows[h];
628 /* FIXME: underflows must be unconditional, standard verdicts
629 < 0 (not IPT_RETURN). --RR */
631 /* Clear counters and comefrom */
632 e->counters = ((struct xt_counters) { 0, 0 });
640 cleanup_entry(struct ipt_entry *e, unsigned int *i)
642 struct ipt_entry_target *t;
644 if (i && (*i)-- == 0)
647 /* Cleanup all matches */
648 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
649 t = ipt_get_target(e);
650 if (t->u.kernel.target->destroy)
651 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
652 module_put(t->u.kernel.target->me);
656 /* Checks and translates the user-supplied table segment (held in
659 translate_table(const char *name,
660 unsigned int valid_hooks,
661 struct xt_table_info *newinfo,
665 const unsigned int *hook_entries,
666 const unsigned int *underflows)
671 newinfo->size = size;
672 newinfo->number = number;
674 /* Init all hooks to impossible value. */
675 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
676 newinfo->hook_entry[i] = 0xFFFFFFFF;
677 newinfo->underflow[i] = 0xFFFFFFFF;
680 duprintf("translate_table: size %u\n", newinfo->size);
682 /* Walk through entries, checking offsets. */
683 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
684 check_entry_size_and_hooks,
688 hook_entries, underflows, &i);
693 duprintf("translate_table: %u not %u entries\n",
698 /* Check hooks all assigned */
699 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
700 /* Only hooks which are valid */
701 if (!(valid_hooks & (1 << i)))
703 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
704 duprintf("Invalid hook entry %u %u\n",
708 if (newinfo->underflow[i] == 0xFFFFFFFF) {
709 duprintf("Invalid underflow %u %u\n",
715 if (!mark_source_chains(newinfo, valid_hooks, entry0))
718 /* Finally, each sanity check must pass */
720 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
721 check_entry, name, size, &i);
724 IPT_ENTRY_ITERATE(entry0, newinfo->size,
729 /* And one copy for every other CPU */
730 for_each_possible_cpu(i) {
731 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
732 memcpy(newinfo->entries[i], entry0, newinfo->size);
740 add_entry_to_counter(const struct ipt_entry *e,
741 struct xt_counters total[],
744 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
751 set_entry_to_counter(const struct ipt_entry *e,
752 struct ipt_counters total[],
755 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
762 get_counters(const struct xt_table_info *t,
763 struct xt_counters counters[])
769 /* Instead of clearing (by a previous call to memset())
770 * the counters and using adds, we set the counters
771 * with data used by 'current' CPU
772 * We dont care about preemption here.
774 curcpu = raw_smp_processor_id();
777 IPT_ENTRY_ITERATE(t->entries[curcpu],
779 set_entry_to_counter,
783 for_each_possible_cpu(cpu) {
787 IPT_ENTRY_ITERATE(t->entries[cpu],
789 add_entry_to_counter,
795 static inline struct xt_counters * alloc_counters(struct ipt_table *table)
797 unsigned int countersize;
798 struct xt_counters *counters;
799 struct xt_table_info *private = table->private;
801 /* We need atomic snapshot of counters: rest doesn't change
802 (other than comefrom, which userspace doesn't care
804 countersize = sizeof(struct xt_counters) * private->number;
805 counters = vmalloc_node(countersize, numa_node_id());
807 if (counters == NULL)
808 return ERR_PTR(-ENOMEM);
810 /* First, sum counters... */
811 write_lock_bh(&table->lock);
812 get_counters(private, counters);
813 write_unlock_bh(&table->lock);
819 copy_entries_to_user(unsigned int total_size,
820 struct ipt_table *table,
821 void __user *userptr)
823 unsigned int off, num;
825 struct xt_counters *counters;
826 struct xt_table_info *private = table->private;
830 counters = alloc_counters(table);
831 if (IS_ERR(counters))
832 return PTR_ERR(counters);
834 /* choose the copy that is on our node/cpu, ...
835 * This choice is lazy (because current thread is
836 * allowed to migrate to another cpu)
838 loc_cpu_entry = private->entries[raw_smp_processor_id()];
839 /* ... then copy entire thing ... */
840 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
845 /* FIXME: use iterator macros --RR */
846 /* ... then go back and fix counters and names */
847 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
849 struct ipt_entry_match *m;
850 struct ipt_entry_target *t;
852 e = (struct ipt_entry *)(loc_cpu_entry + off);
853 if (copy_to_user(userptr + off
854 + offsetof(struct ipt_entry, counters),
856 sizeof(counters[num])) != 0) {
861 for (i = sizeof(struct ipt_entry);
862 i < e->target_offset;
863 i += m->u.match_size) {
866 if (copy_to_user(userptr + off + i
867 + offsetof(struct ipt_entry_match,
869 m->u.kernel.match->name,
870 strlen(m->u.kernel.match->name)+1)
877 t = ipt_get_target(e);
878 if (copy_to_user(userptr + off + e->target_offset
879 + offsetof(struct ipt_entry_target,
881 t->u.kernel.target->name,
882 strlen(t->u.kernel.target->name)+1) != 0) {
894 struct compat_delta {
895 struct compat_delta *next;
900 static struct compat_delta *compat_offsets = NULL;
902 static int compat_add_offset(u_int16_t offset, short delta)
904 struct compat_delta *tmp;
906 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
909 tmp->offset = offset;
911 if (compat_offsets) {
912 tmp->next = compat_offsets->next;
913 compat_offsets->next = tmp;
915 compat_offsets = tmp;
921 static void compat_flush_offsets(void)
923 struct compat_delta *tmp, *next;
925 if (compat_offsets) {
926 for(tmp = compat_offsets; tmp; tmp = next) {
930 compat_offsets = NULL;
934 static short compat_calc_jump(u_int16_t offset)
936 struct compat_delta *tmp;
939 for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
940 if (tmp->offset < offset)
945 static void compat_standard_from_user(void *dst, void *src)
947 int v = *(compat_int_t *)src;
950 v += compat_calc_jump(v);
951 memcpy(dst, &v, sizeof(v));
954 static int compat_standard_to_user(void __user *dst, void *src)
956 compat_int_t cv = *(int *)src;
959 cv -= compat_calc_jump(cv);
960 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
964 compat_calc_match(struct ipt_entry_match *m, int * size)
966 *size += xt_compat_match_offset(m->u.kernel.match);
970 static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
971 void *base, struct xt_table_info *newinfo)
973 struct ipt_entry_target *t;
974 u_int16_t entry_offset;
978 entry_offset = (void *)e - base;
979 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
980 t = ipt_get_target(e);
981 off += xt_compat_target_offset(t->u.kernel.target);
982 newinfo->size -= off;
983 ret = compat_add_offset(entry_offset, off);
987 for (i = 0; i< NF_IP_NUMHOOKS; i++) {
988 if (info->hook_entry[i] && (e < (struct ipt_entry *)
989 (base + info->hook_entry[i])))
990 newinfo->hook_entry[i] -= off;
991 if (info->underflow[i] && (e < (struct ipt_entry *)
992 (base + info->underflow[i])))
993 newinfo->underflow[i] -= off;
998 static int compat_table_info(struct xt_table_info *info,
999 struct xt_table_info *newinfo)
1001 void *loc_cpu_entry;
1004 if (!newinfo || !info)
1007 memset(newinfo, 0, sizeof(struct xt_table_info));
1008 newinfo->size = info->size;
1009 newinfo->number = info->number;
1010 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1011 newinfo->hook_entry[i] = info->hook_entry[i];
1012 newinfo->underflow[i] = info->underflow[i];
1014 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1015 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1016 compat_calc_entry, info, loc_cpu_entry, newinfo);
1020 static int get_info(void __user *user, int *len, int compat)
1022 char name[IPT_TABLE_MAXNAMELEN];
1023 struct ipt_table *t;
1026 if (*len != sizeof(struct ipt_getinfo)) {
1027 duprintf("length %u != %u\n", *len,
1028 (unsigned int)sizeof(struct ipt_getinfo));
1032 if (copy_from_user(name, user, sizeof(name)) != 0)
1035 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1036 #ifdef CONFIG_COMPAT
1038 xt_compat_lock(AF_INET);
1040 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1041 "iptable_%s", name);
1042 if (t && !IS_ERR(t)) {
1043 struct ipt_getinfo info;
1044 struct xt_table_info *private = t->private;
1046 #ifdef CONFIG_COMPAT
1048 struct xt_table_info tmp;
1049 ret = compat_table_info(private, &tmp);
1050 compat_flush_offsets();
1054 info.valid_hooks = t->valid_hooks;
1055 memcpy(info.hook_entry, private->hook_entry,
1056 sizeof(info.hook_entry));
1057 memcpy(info.underflow, private->underflow,
1058 sizeof(info.underflow));
1059 info.num_entries = private->number;
1060 info.size = private->size;
1061 strcpy(info.name, name);
1063 if (copy_to_user(user, &info, *len) != 0)
1071 ret = t ? PTR_ERR(t) : -ENOENT;
1072 #ifdef CONFIG_COMPAT
1074 xt_compat_unlock(AF_INET);
1080 get_entries(struct ipt_get_entries __user *uptr, int *len)
1083 struct ipt_get_entries get;
1084 struct ipt_table *t;
1086 if (*len < sizeof(get)) {
1087 duprintf("get_entries: %u < %d\n", *len,
1088 (unsigned int)sizeof(get));
1091 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1093 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1094 duprintf("get_entries: %u != %u\n", *len,
1095 (unsigned int)(sizeof(struct ipt_get_entries) +
1100 t = xt_find_table_lock(AF_INET, get.name);
1101 if (t && !IS_ERR(t)) {
1102 struct xt_table_info *private = t->private;
1103 duprintf("t->private->number = %u\n",
1105 if (get.size == private->size)
1106 ret = copy_entries_to_user(private->size,
1107 t, uptr->entrytable);
1109 duprintf("get_entries: I've got %u not %u!\n",
1117 ret = t ? PTR_ERR(t) : -ENOENT;
1123 __do_replace(const char *name, unsigned int valid_hooks,
1124 struct xt_table_info *newinfo, unsigned int num_counters,
1125 void __user *counters_ptr)
1128 struct ipt_table *t;
1129 struct xt_table_info *oldinfo;
1130 struct xt_counters *counters;
1131 void *loc_cpu_old_entry;
1134 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1140 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1141 "iptable_%s", name);
1142 if (!t || IS_ERR(t)) {
1143 ret = t ? PTR_ERR(t) : -ENOENT;
1144 goto free_newinfo_counters_untrans;
1148 if (valid_hooks != t->valid_hooks) {
1149 duprintf("Valid hook crap: %08X vs %08X\n",
1150 valid_hooks, t->valid_hooks);
1155 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1159 /* Update module usage count based on number of rules */
1160 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1161 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1162 if ((oldinfo->number > oldinfo->initial_entries) ||
1163 (newinfo->number <= oldinfo->initial_entries))
1165 if ((oldinfo->number > oldinfo->initial_entries) &&
1166 (newinfo->number <= oldinfo->initial_entries))
1169 /* Get the old counters. */
1170 get_counters(oldinfo, counters);
1171 /* Decrease module usage counts and free resource */
1172 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1173 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1174 xt_free_table_info(oldinfo);
1175 if (copy_to_user(counters_ptr, counters,
1176 sizeof(struct xt_counters) * num_counters) != 0)
1185 free_newinfo_counters_untrans:
1192 do_replace(void __user *user, unsigned int len)
1195 struct ipt_replace tmp;
1196 struct xt_table_info *newinfo;
1197 void *loc_cpu_entry;
1199 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1202 /* Hack: Causes ipchains to give correct error msg --RR */
1203 if (len != sizeof(tmp) + tmp.size)
1204 return -ENOPROTOOPT;
1206 /* overflow check */
1207 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1210 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1213 newinfo = xt_alloc_table_info(tmp.size);
1217 /* choose the copy that is our node/cpu */
1218 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1219 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1225 ret = translate_table(tmp.name, tmp.valid_hooks,
1226 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1227 tmp.hook_entry, tmp.underflow);
1231 duprintf("ip_tables: Translated table\n");
1233 ret = __do_replace(tmp.name, tmp.valid_hooks,
1234 newinfo, tmp.num_counters,
1237 goto free_newinfo_untrans;
1240 free_newinfo_untrans:
1241 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1243 xt_free_table_info(newinfo);
1247 /* We're lazy, and add to the first CPU; overflow works its fey magic
1248 * and everything is OK. */
1250 add_counter_to_entry(struct ipt_entry *e,
1251 const struct xt_counters addme[],
1255 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1257 (long unsigned int)e->counters.pcnt,
1258 (long unsigned int)e->counters.bcnt,
1259 (long unsigned int)addme[*i].pcnt,
1260 (long unsigned int)addme[*i].bcnt);
1263 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1270 do_add_counters(void __user *user, unsigned int len, int compat)
1273 struct xt_counters_info tmp;
1274 struct xt_counters *paddc;
1275 unsigned int num_counters;
1279 struct ipt_table *t;
1280 struct xt_table_info *private;
1282 void *loc_cpu_entry;
1283 #ifdef CONFIG_COMPAT
1284 struct compat_xt_counters_info compat_tmp;
1288 size = sizeof(struct compat_xt_counters_info);
1293 size = sizeof(struct xt_counters_info);
1296 if (copy_from_user(ptmp, user, size) != 0)
1299 #ifdef CONFIG_COMPAT
1301 num_counters = compat_tmp.num_counters;
1302 name = compat_tmp.name;
1306 num_counters = tmp.num_counters;
1310 if (len != size + num_counters * sizeof(struct xt_counters))
1313 paddc = vmalloc_node(len - size, numa_node_id());
1317 if (copy_from_user(paddc, user + size, len - size) != 0) {
1322 t = xt_find_table_lock(AF_INET, name);
1323 if (!t || IS_ERR(t)) {
1324 ret = t ? PTR_ERR(t) : -ENOENT;
1328 write_lock_bh(&t->lock);
1329 private = t->private;
1330 if (private->number != num_counters) {
1332 goto unlock_up_free;
1336 /* Choose the copy that is on our node */
1337 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1338 IPT_ENTRY_ITERATE(loc_cpu_entry,
1340 add_counter_to_entry,
1344 write_unlock_bh(&t->lock);
1353 #ifdef CONFIG_COMPAT
1354 struct compat_ipt_replace {
1355 char name[IPT_TABLE_MAXNAMELEN];
1359 u32 hook_entry[NF_IP_NUMHOOKS];
1360 u32 underflow[NF_IP_NUMHOOKS];
1362 compat_uptr_t counters; /* struct ipt_counters * */
1363 struct compat_ipt_entry entries[0];
1366 static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
1367 void __user **dstptr, compat_uint_t *size)
1369 return xt_compat_match_to_user(m, dstptr, size);
1372 static int compat_copy_entry_to_user(struct ipt_entry *e,
1373 void __user **dstptr, compat_uint_t *size)
1375 struct ipt_entry_target *t;
1376 struct compat_ipt_entry __user *ce;
1377 u_int16_t target_offset, next_offset;
1378 compat_uint_t origsize;
1383 ce = (struct compat_ipt_entry __user *)*dstptr;
1384 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1387 *dstptr += sizeof(struct compat_ipt_entry);
1388 ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1389 target_offset = e->target_offset - (origsize - *size);
1392 t = ipt_get_target(e);
1393 ret = xt_compat_target_to_user(t, dstptr, size);
1397 next_offset = e->next_offset - (origsize - *size);
1398 if (put_user(target_offset, &ce->target_offset))
1400 if (put_user(next_offset, &ce->next_offset))
1408 compat_check_calc_match(struct ipt_entry_match *m,
1410 const struct ipt_ip *ip,
1411 unsigned int hookmask,
1414 struct ipt_match *match;
1416 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1417 m->u.user.revision),
1418 "ipt_%s", m->u.user.name);
1419 if (IS_ERR(match) || !match) {
1420 duprintf("compat_check_calc_match: `%s' not found\n",
1422 return match ? PTR_ERR(match) : -ENOENT;
1424 m->u.kernel.match = match;
1425 *size += xt_compat_match_offset(match);
1432 check_compat_entry_size_and_hooks(struct ipt_entry *e,
1433 struct xt_table_info *newinfo,
1435 unsigned char *base,
1436 unsigned char *limit,
1437 unsigned int *hook_entries,
1438 unsigned int *underflows,
1442 struct ipt_entry_target *t;
1443 struct ipt_target *target;
1444 u_int16_t entry_offset;
1447 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1448 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1449 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1450 duprintf("Bad offset %p, limit = %p\n", e, limit);
1454 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1455 sizeof(struct compat_xt_entry_target)) {
1456 duprintf("checking: element %p size %u\n",
1461 if (!ip_checkentry(&e->ip)) {
1462 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
1467 entry_offset = (void *)e - (void *)base;
1469 ret = IPT_MATCH_ITERATE(e, compat_check_calc_match, name, &e->ip,
1470 e->comefrom, &off, &j);
1472 goto cleanup_matches;
1474 t = ipt_get_target(e);
1475 target = try_then_request_module(xt_find_target(AF_INET,
1477 t->u.user.revision),
1478 "ipt_%s", t->u.user.name);
1479 if (IS_ERR(target) || !target) {
1480 duprintf("check_entry: `%s' not found\n", t->u.user.name);
1481 ret = target ? PTR_ERR(target) : -ENOENT;
1482 goto cleanup_matches;
1484 t->u.kernel.target = target;
1486 off += xt_compat_target_offset(target);
1488 ret = compat_add_offset(entry_offset, off);
1492 /* Check hooks & underflows */
1493 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1494 if ((unsigned char *)e - base == hook_entries[h])
1495 newinfo->hook_entry[h] = hook_entries[h];
1496 if ((unsigned char *)e - base == underflows[h])
1497 newinfo->underflow[h] = underflows[h];
1500 /* Clear counters and comefrom */
1501 e->counters = ((struct ipt_counters) { 0, 0 });
1508 module_put(t->u.kernel.target->me);
1510 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1514 static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1515 void **dstptr, compat_uint_t *size, const char *name,
1516 const struct ipt_ip *ip, unsigned int hookmask, int *i)
1518 struct ipt_entry_match *dm;
1519 struct ipt_match *match;
1522 dm = (struct ipt_entry_match *)*dstptr;
1523 match = m->u.kernel.match;
1524 xt_compat_match_from_user(m, dstptr, size);
1526 ret = xt_check_match(match, AF_INET, dm->u.match_size - sizeof(*dm),
1527 name, hookmask, ip->proto,
1528 ip->invflags & IPT_INV_PROTO);
1532 if (m->u.kernel.match->checkentry
1533 && !m->u.kernel.match->checkentry(name, ip, match, dm->data,
1535 duprintf("ip_tables: check failed for `%s'.\n",
1536 m->u.kernel.match->name);
1544 module_put(m->u.kernel.match->me);
1548 static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1549 unsigned int *size, const char *name,
1550 struct xt_table_info *newinfo, unsigned char *base)
1552 struct ipt_entry_target *t;
1553 struct ipt_target *target;
1554 struct ipt_entry *de;
1555 unsigned int origsize;
1560 de = (struct ipt_entry *)*dstptr;
1561 memcpy(de, e, sizeof(struct ipt_entry));
1564 *dstptr += sizeof(struct compat_ipt_entry);
1565 ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
1566 name, &de->ip, de->comefrom, &j);
1568 goto cleanup_matches;
1569 de->target_offset = e->target_offset - (origsize - *size);
1570 t = ipt_get_target(e);
1571 target = t->u.kernel.target;
1572 xt_compat_target_from_user(t, dstptr, size);
1574 de->next_offset = e->next_offset - (origsize - *size);
1575 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1576 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1577 newinfo->hook_entry[h] -= origsize - *size;
1578 if ((unsigned char *)de - base < newinfo->underflow[h])
1579 newinfo->underflow[h] -= origsize - *size;
1582 t = ipt_get_target(de);
1583 target = t->u.kernel.target;
1584 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
1585 name, e->comefrom, e->ip.proto,
1586 e->ip.invflags & IPT_INV_PROTO);
1591 if (t->u.kernel.target == &ipt_standard_target) {
1592 if (!standard_check(t, *size))
1594 } else if (t->u.kernel.target->checkentry
1595 && !t->u.kernel.target->checkentry(name, de, target,
1596 t->data, de->comefrom)) {
1597 duprintf("ip_tables: compat: check failed for `%s'.\n",
1598 t->u.kernel.target->name);
1605 module_put(t->u.kernel.target->me);
1607 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1612 translate_compat_table(const char *name,
1613 unsigned int valid_hooks,
1614 struct xt_table_info **pinfo,
1616 unsigned int total_size,
1617 unsigned int number,
1618 unsigned int *hook_entries,
1619 unsigned int *underflows)
1622 struct xt_table_info *newinfo, *info;
1623 void *pos, *entry0, *entry1;
1630 info->number = number;
1632 /* Init all hooks to impossible value. */
1633 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1634 info->hook_entry[i] = 0xFFFFFFFF;
1635 info->underflow[i] = 0xFFFFFFFF;
1638 duprintf("translate_compat_table: size %u\n", info->size);
1640 xt_compat_lock(AF_INET);
1641 /* Walk through entries, checking offsets. */
1642 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1643 check_compat_entry_size_and_hooks,
1644 info, &size, entry0,
1645 entry0 + total_size,
1646 hook_entries, underflows, &i, name);
1652 duprintf("translate_compat_table: %u not %u entries\n",
1657 /* Check hooks all assigned */
1658 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1659 /* Only hooks which are valid */
1660 if (!(valid_hooks & (1 << i)))
1662 if (info->hook_entry[i] == 0xFFFFFFFF) {
1663 duprintf("Invalid hook entry %u %u\n",
1664 i, hook_entries[i]);
1667 if (info->underflow[i] == 0xFFFFFFFF) {
1668 duprintf("Invalid underflow %u %u\n",
1675 newinfo = xt_alloc_table_info(size);
1679 newinfo->number = number;
1680 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1681 newinfo->hook_entry[i] = info->hook_entry[i];
1682 newinfo->underflow[i] = info->underflow[i];
1684 entry1 = newinfo->entries[raw_smp_processor_id()];
1687 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1688 compat_copy_entry_from_user, &pos, &size,
1689 name, newinfo, entry1);
1690 compat_flush_offsets();
1691 xt_compat_unlock(AF_INET);
1696 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1699 /* And one copy for every other CPU */
1700 for_each_possible_cpu(i)
1701 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1702 memcpy(newinfo->entries[i], entry1, newinfo->size);
1706 xt_free_table_info(info);
1710 xt_free_table_info(newinfo);
1714 xt_compat_unlock(AF_INET);
1719 compat_do_replace(void __user *user, unsigned int len)
1722 struct compat_ipt_replace tmp;
1723 struct xt_table_info *newinfo;
1724 void *loc_cpu_entry;
1726 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1729 /* Hack: Causes ipchains to give correct error msg --RR */
1730 if (len != sizeof(tmp) + tmp.size)
1731 return -ENOPROTOOPT;
1733 /* overflow check */
1734 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1737 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1740 newinfo = xt_alloc_table_info(tmp.size);
1744 /* choose the copy that is our node/cpu */
1745 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1746 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1752 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1753 &newinfo, &loc_cpu_entry, tmp.size,
1754 tmp.num_entries, tmp.hook_entry, tmp.underflow);
1758 duprintf("compat_do_replace: Translated table\n");
1760 ret = __do_replace(tmp.name, tmp.valid_hooks,
1761 newinfo, tmp.num_counters,
1762 compat_ptr(tmp.counters));
1764 goto free_newinfo_untrans;
1767 free_newinfo_untrans:
1768 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1770 xt_free_table_info(newinfo);
1775 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1780 if (!capable(CAP_NET_ADMIN))
1784 case IPT_SO_SET_REPLACE:
1785 ret = compat_do_replace(user, len);
1788 case IPT_SO_SET_ADD_COUNTERS:
1789 ret = do_add_counters(user, len, 1);
1793 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1800 struct compat_ipt_get_entries
1802 char name[IPT_TABLE_MAXNAMELEN];
1804 struct compat_ipt_entry entrytable[0];
1807 static int compat_copy_entries_to_user(unsigned int total_size,
1808 struct ipt_table *table, void __user *userptr)
1810 unsigned int off, num;
1811 struct compat_ipt_entry e;
1812 struct xt_counters *counters;
1813 struct xt_table_info *private = table->private;
1817 void *loc_cpu_entry;
1819 counters = alloc_counters(table);
1820 if (IS_ERR(counters))
1821 return PTR_ERR(counters);
1823 /* choose the copy that is on our node/cpu, ...
1824 * This choice is lazy (because current thread is
1825 * allowed to migrate to another cpu)
1827 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1830 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1831 compat_copy_entry_to_user, &pos, &size);
1835 /* ... then go back and fix counters and names */
1836 for (off = 0, num = 0; off < size; off += e.next_offset, num++) {
1838 struct ipt_entry_match m;
1839 struct ipt_entry_target t;
1842 if (copy_from_user(&e, userptr + off,
1843 sizeof(struct compat_ipt_entry)))
1845 if (copy_to_user(userptr + off +
1846 offsetof(struct compat_ipt_entry, counters),
1847 &counters[num], sizeof(counters[num])))
1850 for (i = sizeof(struct compat_ipt_entry);
1851 i < e.target_offset; i += m.u.match_size) {
1852 if (copy_from_user(&m, userptr + off + i,
1853 sizeof(struct ipt_entry_match)))
1855 if (copy_to_user(userptr + off + i +
1856 offsetof(struct ipt_entry_match, u.user.name),
1857 m.u.kernel.match->name,
1858 strlen(m.u.kernel.match->name) + 1))
1862 if (copy_from_user(&t, userptr + off + e.target_offset,
1863 sizeof(struct ipt_entry_target)))
1865 if (copy_to_user(userptr + off + e.target_offset +
1866 offsetof(struct ipt_entry_target, u.user.name),
1867 t.u.kernel.target->name,
1868 strlen(t.u.kernel.target->name) + 1))
1878 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1881 struct compat_ipt_get_entries get;
1882 struct ipt_table *t;
1885 if (*len < sizeof(get)) {
1886 duprintf("compat_get_entries: %u < %u\n",
1887 *len, (unsigned int)sizeof(get));
1891 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1894 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1895 duprintf("compat_get_entries: %u != %u\n", *len,
1896 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1901 xt_compat_lock(AF_INET);
1902 t = xt_find_table_lock(AF_INET, get.name);
1903 if (t && !IS_ERR(t)) {
1904 struct xt_table_info *private = t->private;
1905 struct xt_table_info info;
1906 duprintf("t->private->number = %u\n",
1908 ret = compat_table_info(private, &info);
1909 if (!ret && get.size == info.size) {
1910 ret = compat_copy_entries_to_user(private->size,
1911 t, uptr->entrytable);
1913 duprintf("compat_get_entries: I've got %u not %u!\n",
1918 compat_flush_offsets();
1922 ret = t ? PTR_ERR(t) : -ENOENT;
1924 xt_compat_unlock(AF_INET);
1928 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1931 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1936 case IPT_SO_GET_INFO:
1937 ret = get_info(user, len, 1);
1939 case IPT_SO_GET_ENTRIES:
1940 ret = compat_get_entries(user, len);
1943 ret = do_ipt_get_ctl(sk, cmd, user, len);
1950 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1954 if (!capable(CAP_NET_ADMIN))
1958 case IPT_SO_SET_REPLACE:
1959 ret = do_replace(user, len);
1962 case IPT_SO_SET_ADD_COUNTERS:
1963 ret = do_add_counters(user, len, 0);
1967 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1975 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1979 if (!capable(CAP_NET_ADMIN))
1983 case IPT_SO_GET_INFO:
1984 ret = get_info(user, len, 0);
1987 case IPT_SO_GET_ENTRIES:
1988 ret = get_entries(user, len);
1991 case IPT_SO_GET_REVISION_MATCH:
1992 case IPT_SO_GET_REVISION_TARGET: {
1993 struct ipt_get_revision rev;
1996 if (*len != sizeof(rev)) {
2000 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2005 if (cmd == IPT_SO_GET_REVISION_TARGET)
2010 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2013 "ipt_%s", rev.name);
2018 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2025 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2028 struct xt_table_info *newinfo;
2029 static struct xt_table_info bootstrap
2030 = { 0, 0, 0, { 0 }, { 0 }, { } };
2031 void *loc_cpu_entry;
2033 newinfo = xt_alloc_table_info(repl->size);
2037 /* choose the copy on our node/cpu
2038 * but dont care of preemption
2040 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2041 memcpy(loc_cpu_entry, repl->entries, repl->size);
2043 ret = translate_table(table->name, table->valid_hooks,
2044 newinfo, loc_cpu_entry, repl->size,
2049 xt_free_table_info(newinfo);
2053 ret = xt_register_table(table, &bootstrap, newinfo);
2055 xt_free_table_info(newinfo);
2062 void ipt_unregister_table(struct ipt_table *table)
2064 struct xt_table_info *private;
2065 void *loc_cpu_entry;
2067 private = xt_unregister_table(table);
2069 /* Decrease module usage counts and free resources */
2070 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2071 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2072 xt_free_table_info(private);
2075 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2077 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2078 u_int8_t type, u_int8_t code,
2081 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2086 icmp_match(const struct sk_buff *skb,
2087 const struct net_device *in,
2088 const struct net_device *out,
2089 const struct xt_match *match,
2090 const void *matchinfo,
2092 unsigned int protoff,
2095 struct icmphdr _icmph, *ic;
2096 const struct ipt_icmp *icmpinfo = matchinfo;
2098 /* Must not be a fragment. */
2102 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2104 /* We've been asked to examine this packet, and we
2105 * can't. Hence, no choice but to drop.
2107 duprintf("Dropping evil ICMP tinygram.\n");
2112 return icmp_type_code_match(icmpinfo->type,
2116 !!(icmpinfo->invflags&IPT_ICMP_INV));
2119 /* Called when user tries to insert an entry of this type. */
2121 icmp_checkentry(const char *tablename,
2123 const struct xt_match *match,
2125 unsigned int hook_mask)
2127 const struct ipt_icmp *icmpinfo = matchinfo;
2129 /* Must specify no unknown invflags */
2130 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2133 /* The built-in targets: standard (NULL) and error. */
2134 static struct ipt_target ipt_standard_target = {
2135 .name = IPT_STANDARD_TARGET,
2136 .targetsize = sizeof(int),
2138 #ifdef CONFIG_COMPAT
2139 .compatsize = sizeof(compat_int_t),
2140 .compat_from_user = compat_standard_from_user,
2141 .compat_to_user = compat_standard_to_user,
2145 static struct ipt_target ipt_error_target = {
2146 .name = IPT_ERROR_TARGET,
2147 .target = ipt_error,
2148 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2152 static struct nf_sockopt_ops ipt_sockopts = {
2154 .set_optmin = IPT_BASE_CTL,
2155 .set_optmax = IPT_SO_SET_MAX+1,
2156 .set = do_ipt_set_ctl,
2157 #ifdef CONFIG_COMPAT
2158 .compat_set = compat_do_ipt_set_ctl,
2160 .get_optmin = IPT_BASE_CTL,
2161 .get_optmax = IPT_SO_GET_MAX+1,
2162 .get = do_ipt_get_ctl,
2163 #ifdef CONFIG_COMPAT
2164 .compat_get = compat_do_ipt_get_ctl,
2168 static struct ipt_match icmp_matchstruct = {
2170 .match = icmp_match,
2171 .matchsize = sizeof(struct ipt_icmp),
2172 .proto = IPPROTO_ICMP,
2174 .checkentry = icmp_checkentry,
2177 static int __init ip_tables_init(void)
2181 ret = xt_proto_init(AF_INET);
2185 /* Noone else will be downing sem now, so we won't sleep */
2186 ret = xt_register_target(&ipt_standard_target);
2189 ret = xt_register_target(&ipt_error_target);
2192 ret = xt_register_match(&icmp_matchstruct);
2196 /* Register setsockopt */
2197 ret = nf_register_sockopt(&ipt_sockopts);
2201 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2205 xt_unregister_match(&icmp_matchstruct);
2207 xt_unregister_target(&ipt_error_target);
2209 xt_unregister_target(&ipt_standard_target);
2211 xt_proto_fini(AF_INET);
2216 static void __exit ip_tables_fini(void)
2218 nf_unregister_sockopt(&ipt_sockopts);
2220 xt_unregister_match(&icmp_matchstruct);
2221 xt_unregister_target(&ipt_error_target);
2222 xt_unregister_target(&ipt_standard_target);
2224 xt_proto_fini(AF_INET);
2227 EXPORT_SYMBOL(ipt_register_table);
2228 EXPORT_SYMBOL(ipt_unregister_table);
2229 EXPORT_SYMBOL(ipt_do_table);
2230 module_init(ip_tables_init);
2231 module_exit(ip_tables_fini);