2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12 * - increase module usage count as soon as we have rules inside
14 * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
15 * - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
17 #include <linux/cache.h>
18 #include <linux/capability.h>
19 #include <linux/skbuff.h>
20 #include <linux/kmod.h>
21 #include <linux/vmalloc.h>
22 #include <linux/netdevice.h>
23 #include <linux/module.h>
24 #include <linux/icmp.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
33 #include <linux/netfilter/x_tables.h>
34 #include <linux/netfilter_ipv4/ip_tables.h>
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
38 MODULE_DESCRIPTION("IPv4 packet filter");
40 /*#define DEBUG_IP_FIREWALL*/
41 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
42 /*#define DEBUG_IP_FIREWALL_USER*/
44 #ifdef DEBUG_IP_FIREWALL
45 #define dprintf(format, args...) printk(format , ## args)
47 #define dprintf(format, args...)
50 #ifdef DEBUG_IP_FIREWALL_USER
51 #define duprintf(format, args...) printk(format , ## args)
53 #define duprintf(format, args...)
56 #ifdef CONFIG_NETFILTER_DEBUG
57 #define IP_NF_ASSERT(x) \
60 printk("IP_NF_ASSERT: %s:%s:%u\n", \
61 __FUNCTION__, __FILE__, __LINE__); \
64 #define IP_NF_ASSERT(x)
68 /* All the better to debug you with... */
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
80 Hence the start of any table is given by get_table() below. */
82 /* Returns whether matches rule or not. */
84 ip_packet_match(const struct iphdr *ip,
87 const struct ipt_ip *ipinfo,
93 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
95 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
97 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
99 dprintf("Source or dest mismatch.\n");
101 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
103 NIPQUAD(ipinfo->smsk.s_addr),
104 NIPQUAD(ipinfo->src.s_addr),
105 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
108 NIPQUAD(ipinfo->dmsk.s_addr),
109 NIPQUAD(ipinfo->dst.s_addr),
110 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
114 /* Look for ifname matches; this should unroll nicely. */
115 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
116 ret |= (((const unsigned long *)indev)[i]
117 ^ ((const unsigned long *)ipinfo->iniface)[i])
118 & ((const unsigned long *)ipinfo->iniface_mask)[i];
121 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
122 dprintf("VIA in mismatch (%s vs %s).%s\n",
123 indev, ipinfo->iniface,
124 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
128 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
129 ret |= (((const unsigned long *)outdev)[i]
130 ^ ((const unsigned long *)ipinfo->outiface)[i])
131 & ((const unsigned long *)ipinfo->outiface_mask)[i];
134 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
135 dprintf("VIA out mismatch (%s vs %s).%s\n",
136 outdev, ipinfo->outiface,
137 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
141 /* Check specific protocol */
143 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
144 dprintf("Packet protocol %hi does not match %hi.%s\n",
145 ip->protocol, ipinfo->proto,
146 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
150 /* If we have a fragment rule but the packet is not a fragment
151 * then we return zero */
152 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
153 dprintf("Fragment rule but not fragment.%s\n",
154 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
162 ip_checkentry(const struct ipt_ip *ip)
164 if (ip->flags & ~IPT_F_MASK) {
165 duprintf("Unknown flag bits set: %08X\n",
166 ip->flags & ~IPT_F_MASK);
169 if (ip->invflags & ~IPT_INV_MASK) {
170 duprintf("Unknown invflag bits set: %08X\n",
171 ip->invflags & ~IPT_INV_MASK);
178 ipt_error(struct sk_buff **pskb,
179 const struct net_device *in,
180 const struct net_device *out,
181 unsigned int hooknum,
182 const struct xt_target *target,
183 const void *targinfo)
186 printk("ip_tables: error: `%s'\n", (char *)targinfo);
192 int do_match(struct ipt_entry_match *m,
193 const struct sk_buff *skb,
194 const struct net_device *in,
195 const struct net_device *out,
199 /* Stop iteration if it doesn't match */
200 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
201 offset, skb->nh.iph->ihl*4, hotdrop))
207 static inline struct ipt_entry *
208 get_entry(void *base, unsigned int offset)
210 return (struct ipt_entry *)(base + offset);
213 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
215 ipt_do_table(struct sk_buff **pskb,
217 const struct net_device *in,
218 const struct net_device *out,
219 struct xt_table *table)
221 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
226 /* Initializing verdict to NF_DROP keeps gcc happy. */
227 unsigned int verdict = NF_DROP;
228 const char *indev, *outdev;
230 struct ipt_entry *e, *back;
231 struct xt_table_info *private;
234 ip = (*pskb)->nh.iph;
235 datalen = (*pskb)->len - ip->ihl * 4;
236 indev = in ? in->name : nulldevname;
237 outdev = out ? out->name : nulldevname;
238 /* We handle fragments by dealing with the first fragment as
239 * if it was a normal packet. All other fragments are treated
240 * normally, except that they will NEVER match rules that ask
241 * things we don't know, ie. tcp syn flag or ports). If the
242 * rule is also a fragment-specific rule, non-fragments won't
244 offset = ntohs(ip->frag_off) & IP_OFFSET;
246 read_lock_bh(&table->lock);
247 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
248 private = table->private;
249 table_base = (void *)private->entries[smp_processor_id()];
250 e = get_entry(table_base, private->hook_entry[hook]);
252 /* For return from builtin chain */
253 back = get_entry(table_base, private->underflow[hook]);
258 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
259 struct ipt_entry_target *t;
261 if (IPT_MATCH_ITERATE(e, do_match,
263 offset, &hotdrop) != 0)
266 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
268 t = ipt_get_target(e);
269 IP_NF_ASSERT(t->u.kernel.target);
270 /* Standard target? */
271 if (!t->u.kernel.target->target) {
274 v = ((struct ipt_standard_target *)t)->verdict;
276 /* Pop from stack? */
277 if (v != IPT_RETURN) {
278 verdict = (unsigned)(-v) - 1;
282 back = get_entry(table_base,
286 if (table_base + v != (void *)e + e->next_offset
287 && !(e->ip.flags & IPT_F_GOTO)) {
288 /* Save old back ptr in next entry */
289 struct ipt_entry *next
290 = (void *)e + e->next_offset;
292 = (void *)back - table_base;
293 /* set back pointer to next entry */
297 e = get_entry(table_base, v);
299 /* Targets which reenter must return
301 #ifdef CONFIG_NETFILTER_DEBUG
302 ((struct ipt_entry *)table_base)->comefrom
305 verdict = t->u.kernel.target->target(pskb,
311 #ifdef CONFIG_NETFILTER_DEBUG
312 if (((struct ipt_entry *)table_base)->comefrom
314 && verdict == IPT_CONTINUE) {
315 printk("Target %s reentered!\n",
316 t->u.kernel.target->name);
319 ((struct ipt_entry *)table_base)->comefrom
322 /* Target might have changed stuff. */
323 ip = (*pskb)->nh.iph;
324 datalen = (*pskb)->len - ip->ihl * 4;
326 if (verdict == IPT_CONTINUE)
327 e = (void *)e + e->next_offset;
335 e = (void *)e + e->next_offset;
339 read_unlock_bh(&table->lock);
341 #ifdef DEBUG_ALLOW_ALL
350 /* All zeroes == unconditional rule. */
352 unconditional(const struct ipt_ip *ip)
356 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
357 if (((__u32 *)ip)[i])
363 /* Figures out from what hook each rule can be called: returns 0 if
364 there are loops. Puts hook bitmask in comefrom. */
366 mark_source_chains(struct xt_table_info *newinfo,
367 unsigned int valid_hooks, void *entry0)
371 /* No recursion; use packet counter to save back ptrs (reset
372 to 0 as we leave), and comefrom to save source hook bitmask */
373 for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
374 unsigned int pos = newinfo->hook_entry[hook];
376 = (struct ipt_entry *)(entry0 + pos);
378 if (!(valid_hooks & (1 << hook)))
381 /* Set initial back pointer. */
382 e->counters.pcnt = pos;
385 struct ipt_standard_target *t
386 = (void *)ipt_get_target(e);
387 int visited = e->comefrom & (1 << hook);
389 if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
390 printk("iptables: loop hook %u pos %u %08X.\n",
391 hook, pos, e->comefrom);
395 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
397 /* Unconditional return/END. */
398 if ((e->target_offset == sizeof(struct ipt_entry)
399 && (strcmp(t->target.u.user.name,
400 IPT_STANDARD_TARGET) == 0)
402 && unconditional(&e->ip)) || visited) {
403 unsigned int oldpos, size;
405 if (t->verdict < -NF_MAX_VERDICT - 1) {
406 duprintf("mark_source_chains: bad "
407 "negative verdict (%i)\n",
412 /* Return: backtrack through the last
415 e->comefrom ^= (1<<NF_IP_NUMHOOKS);
416 #ifdef DEBUG_IP_FIREWALL_USER
418 & (1 << NF_IP_NUMHOOKS)) {
419 duprintf("Back unset "
426 pos = e->counters.pcnt;
427 e->counters.pcnt = 0;
429 /* We're at the start. */
433 e = (struct ipt_entry *)
435 } while (oldpos == pos + e->next_offset);
438 size = e->next_offset;
439 e = (struct ipt_entry *)
440 (entry0 + pos + size);
441 e->counters.pcnt = pos;
444 int newpos = t->verdict;
446 if (strcmp(t->target.u.user.name,
447 IPT_STANDARD_TARGET) == 0
449 if (newpos > newinfo->size -
450 sizeof(struct ipt_entry)) {
451 duprintf("mark_source_chains: "
452 "bad verdict (%i)\n",
456 /* This a jump; chase it. */
457 duprintf("Jump rule %u -> %u\n",
460 /* ... this is a fallthru */
461 newpos = pos + e->next_offset;
463 e = (struct ipt_entry *)
465 e->counters.pcnt = pos;
470 duprintf("Finished chain %u\n", hook);
476 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
478 if (i && (*i)-- == 0)
481 if (m->u.kernel.match->destroy)
482 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
483 module_put(m->u.kernel.match->me);
488 check_entry(struct ipt_entry *e, const char *name)
490 struct ipt_entry_target *t;
492 if (!ip_checkentry(&e->ip)) {
493 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
497 if (e->target_offset + sizeof(struct ipt_entry_target) > e->next_offset)
500 t = ipt_get_target(e);
501 if (e->target_offset + t->u.target_size > e->next_offset)
507 static inline int check_match(struct ipt_entry_match *m, const char *name,
508 const struct ipt_ip *ip, unsigned int hookmask)
510 struct xt_match *match;
513 match = m->u.kernel.match;
514 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
515 name, hookmask, ip->proto,
516 ip->invflags & IPT_INV_PROTO);
517 if (!ret && m->u.kernel.match->checkentry
518 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
520 duprintf("ip_tables: check failed for `%s'.\n",
521 m->u.kernel.match->name);
528 find_check_match(struct ipt_entry_match *m,
530 const struct ipt_ip *ip,
531 unsigned int hookmask,
534 struct xt_match *match;
537 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
539 "ipt_%s", m->u.user.name);
540 if (IS_ERR(match) || !match) {
541 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
542 return match ? PTR_ERR(match) : -ENOENT;
544 m->u.kernel.match = match;
546 ret = check_match(m, name, ip, hookmask);
553 module_put(m->u.kernel.match->me);
557 static inline int check_target(struct ipt_entry *e, const char *name)
559 struct ipt_entry_target *t;
560 struct xt_target *target;
563 t = ipt_get_target(e);
564 target = t->u.kernel.target;
565 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
566 name, e->comefrom, e->ip.proto,
567 e->ip.invflags & IPT_INV_PROTO);
568 if (!ret && t->u.kernel.target->checkentry
569 && !t->u.kernel.target->checkentry(name, e, target,
570 t->data, e->comefrom)) {
571 duprintf("ip_tables: check failed for `%s'.\n",
572 t->u.kernel.target->name);
579 find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
582 struct ipt_entry_target *t;
583 struct xt_target *target;
587 ret = check_entry(e, name);
592 ret = IPT_MATCH_ITERATE(e, find_check_match, name, &e->ip,
595 goto cleanup_matches;
597 t = ipt_get_target(e);
598 target = try_then_request_module(xt_find_target(AF_INET,
601 "ipt_%s", t->u.user.name);
602 if (IS_ERR(target) || !target) {
603 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
604 ret = target ? PTR_ERR(target) : -ENOENT;
605 goto cleanup_matches;
607 t->u.kernel.target = target;
609 ret = check_target(e, name);
616 module_put(t->u.kernel.target->me);
618 IPT_MATCH_ITERATE(e, cleanup_match, &j);
623 check_entry_size_and_hooks(struct ipt_entry *e,
624 struct xt_table_info *newinfo,
626 unsigned char *limit,
627 const unsigned int *hook_entries,
628 const unsigned int *underflows,
633 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
634 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
635 duprintf("Bad offset %p\n", e);
640 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
641 duprintf("checking: element %p size %u\n",
646 /* Check hooks & underflows */
647 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
648 if ((unsigned char *)e - base == hook_entries[h])
649 newinfo->hook_entry[h] = hook_entries[h];
650 if ((unsigned char *)e - base == underflows[h])
651 newinfo->underflow[h] = underflows[h];
654 /* FIXME: underflows must be unconditional, standard verdicts
655 < 0 (not IPT_RETURN). --RR */
657 /* Clear counters and comefrom */
658 e->counters = ((struct xt_counters) { 0, 0 });
666 cleanup_entry(struct ipt_entry *e, unsigned int *i)
668 struct ipt_entry_target *t;
670 if (i && (*i)-- == 0)
673 /* Cleanup all matches */
674 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
675 t = ipt_get_target(e);
676 if (t->u.kernel.target->destroy)
677 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
678 module_put(t->u.kernel.target->me);
682 /* Checks and translates the user-supplied table segment (held in
685 translate_table(const char *name,
686 unsigned int valid_hooks,
687 struct xt_table_info *newinfo,
691 const unsigned int *hook_entries,
692 const unsigned int *underflows)
697 newinfo->size = size;
698 newinfo->number = number;
700 /* Init all hooks to impossible value. */
701 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
702 newinfo->hook_entry[i] = 0xFFFFFFFF;
703 newinfo->underflow[i] = 0xFFFFFFFF;
706 duprintf("translate_table: size %u\n", newinfo->size);
708 /* Walk through entries, checking offsets. */
709 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
710 check_entry_size_and_hooks,
714 hook_entries, underflows, &i);
719 duprintf("translate_table: %u not %u entries\n",
724 /* Check hooks all assigned */
725 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
726 /* Only hooks which are valid */
727 if (!(valid_hooks & (1 << i)))
729 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
730 duprintf("Invalid hook entry %u %u\n",
734 if (newinfo->underflow[i] == 0xFFFFFFFF) {
735 duprintf("Invalid underflow %u %u\n",
741 if (!mark_source_chains(newinfo, valid_hooks, entry0))
744 /* Finally, each sanity check must pass */
746 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
747 find_check_entry, name, size, &i);
750 IPT_ENTRY_ITERATE(entry0, newinfo->size,
755 /* And one copy for every other CPU */
756 for_each_possible_cpu(i) {
757 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
758 memcpy(newinfo->entries[i], entry0, newinfo->size);
766 add_entry_to_counter(const struct ipt_entry *e,
767 struct xt_counters total[],
770 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
777 set_entry_to_counter(const struct ipt_entry *e,
778 struct ipt_counters total[],
781 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
788 get_counters(const struct xt_table_info *t,
789 struct xt_counters counters[])
795 /* Instead of clearing (by a previous call to memset())
796 * the counters and using adds, we set the counters
797 * with data used by 'current' CPU
798 * We dont care about preemption here.
800 curcpu = raw_smp_processor_id();
803 IPT_ENTRY_ITERATE(t->entries[curcpu],
805 set_entry_to_counter,
809 for_each_possible_cpu(cpu) {
813 IPT_ENTRY_ITERATE(t->entries[cpu],
815 add_entry_to_counter,
821 static inline struct xt_counters * alloc_counters(struct xt_table *table)
823 unsigned int countersize;
824 struct xt_counters *counters;
825 struct xt_table_info *private = table->private;
827 /* We need atomic snapshot of counters: rest doesn't change
828 (other than comefrom, which userspace doesn't care
830 countersize = sizeof(struct xt_counters) * private->number;
831 counters = vmalloc_node(countersize, numa_node_id());
833 if (counters == NULL)
834 return ERR_PTR(-ENOMEM);
836 /* First, sum counters... */
837 write_lock_bh(&table->lock);
838 get_counters(private, counters);
839 write_unlock_bh(&table->lock);
845 copy_entries_to_user(unsigned int total_size,
846 struct xt_table *table,
847 void __user *userptr)
849 unsigned int off, num;
851 struct xt_counters *counters;
852 struct xt_table_info *private = table->private;
856 counters = alloc_counters(table);
857 if (IS_ERR(counters))
858 return PTR_ERR(counters);
860 /* choose the copy that is on our node/cpu, ...
861 * This choice is lazy (because current thread is
862 * allowed to migrate to another cpu)
864 loc_cpu_entry = private->entries[raw_smp_processor_id()];
865 /* ... then copy entire thing ... */
866 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
871 /* FIXME: use iterator macros --RR */
872 /* ... then go back and fix counters and names */
873 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
875 struct ipt_entry_match *m;
876 struct ipt_entry_target *t;
878 e = (struct ipt_entry *)(loc_cpu_entry + off);
879 if (copy_to_user(userptr + off
880 + offsetof(struct ipt_entry, counters),
882 sizeof(counters[num])) != 0) {
887 for (i = sizeof(struct ipt_entry);
888 i < e->target_offset;
889 i += m->u.match_size) {
892 if (copy_to_user(userptr + off + i
893 + offsetof(struct ipt_entry_match,
895 m->u.kernel.match->name,
896 strlen(m->u.kernel.match->name)+1)
903 t = ipt_get_target(e);
904 if (copy_to_user(userptr + off + e->target_offset
905 + offsetof(struct ipt_entry_target,
907 t->u.kernel.target->name,
908 strlen(t->u.kernel.target->name)+1) != 0) {
920 struct compat_delta {
921 struct compat_delta *next;
926 static struct compat_delta *compat_offsets = NULL;
928 static int compat_add_offset(unsigned int offset, short delta)
930 struct compat_delta *tmp;
932 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
935 tmp->offset = offset;
937 if (compat_offsets) {
938 tmp->next = compat_offsets->next;
939 compat_offsets->next = tmp;
941 compat_offsets = tmp;
947 static void compat_flush_offsets(void)
949 struct compat_delta *tmp, *next;
951 if (compat_offsets) {
952 for(tmp = compat_offsets; tmp; tmp = next) {
956 compat_offsets = NULL;
960 static short compat_calc_jump(unsigned int offset)
962 struct compat_delta *tmp;
965 for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
966 if (tmp->offset < offset)
971 static void compat_standard_from_user(void *dst, void *src)
973 int v = *(compat_int_t *)src;
976 v += compat_calc_jump(v);
977 memcpy(dst, &v, sizeof(v));
980 static int compat_standard_to_user(void __user *dst, void *src)
982 compat_int_t cv = *(int *)src;
985 cv -= compat_calc_jump(cv);
986 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
990 compat_calc_match(struct ipt_entry_match *m, int * size)
992 *size += xt_compat_match_offset(m->u.kernel.match);
996 static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
997 void *base, struct xt_table_info *newinfo)
999 struct ipt_entry_target *t;
1000 unsigned int entry_offset;
1004 entry_offset = (void *)e - base;
1005 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1006 t = ipt_get_target(e);
1007 off += xt_compat_target_offset(t->u.kernel.target);
1008 newinfo->size -= off;
1009 ret = compat_add_offset(entry_offset, off);
1013 for (i = 0; i< NF_IP_NUMHOOKS; i++) {
1014 if (info->hook_entry[i] && (e < (struct ipt_entry *)
1015 (base + info->hook_entry[i])))
1016 newinfo->hook_entry[i] -= off;
1017 if (info->underflow[i] && (e < (struct ipt_entry *)
1018 (base + info->underflow[i])))
1019 newinfo->underflow[i] -= off;
1024 static int compat_table_info(struct xt_table_info *info,
1025 struct xt_table_info *newinfo)
1027 void *loc_cpu_entry;
1030 if (!newinfo || !info)
1033 memset(newinfo, 0, sizeof(struct xt_table_info));
1034 newinfo->size = info->size;
1035 newinfo->number = info->number;
1036 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1037 newinfo->hook_entry[i] = info->hook_entry[i];
1038 newinfo->underflow[i] = info->underflow[i];
1040 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1041 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1042 compat_calc_entry, info, loc_cpu_entry, newinfo);
1046 static int get_info(void __user *user, int *len, int compat)
1048 char name[IPT_TABLE_MAXNAMELEN];
1052 if (*len != sizeof(struct ipt_getinfo)) {
1053 duprintf("length %u != %u\n", *len,
1054 (unsigned int)sizeof(struct ipt_getinfo));
1058 if (copy_from_user(name, user, sizeof(name)) != 0)
1061 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1062 #ifdef CONFIG_COMPAT
1064 xt_compat_lock(AF_INET);
1066 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1067 "iptable_%s", name);
1068 if (t && !IS_ERR(t)) {
1069 struct ipt_getinfo info;
1070 struct xt_table_info *private = t->private;
1072 #ifdef CONFIG_COMPAT
1074 struct xt_table_info tmp;
1075 ret = compat_table_info(private, &tmp);
1076 compat_flush_offsets();
1080 info.valid_hooks = t->valid_hooks;
1081 memcpy(info.hook_entry, private->hook_entry,
1082 sizeof(info.hook_entry));
1083 memcpy(info.underflow, private->underflow,
1084 sizeof(info.underflow));
1085 info.num_entries = private->number;
1086 info.size = private->size;
1087 strcpy(info.name, name);
1089 if (copy_to_user(user, &info, *len) != 0)
1097 ret = t ? PTR_ERR(t) : -ENOENT;
1098 #ifdef CONFIG_COMPAT
1100 xt_compat_unlock(AF_INET);
1106 get_entries(struct ipt_get_entries __user *uptr, int *len)
1109 struct ipt_get_entries get;
1112 if (*len < sizeof(get)) {
1113 duprintf("get_entries: %u < %d\n", *len,
1114 (unsigned int)sizeof(get));
1117 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1119 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1120 duprintf("get_entries: %u != %u\n", *len,
1121 (unsigned int)(sizeof(struct ipt_get_entries) +
1126 t = xt_find_table_lock(AF_INET, get.name);
1127 if (t && !IS_ERR(t)) {
1128 struct xt_table_info *private = t->private;
1129 duprintf("t->private->number = %u\n",
1131 if (get.size == private->size)
1132 ret = copy_entries_to_user(private->size,
1133 t, uptr->entrytable);
1135 duprintf("get_entries: I've got %u not %u!\n",
1143 ret = t ? PTR_ERR(t) : -ENOENT;
1149 __do_replace(const char *name, unsigned int valid_hooks,
1150 struct xt_table_info *newinfo, unsigned int num_counters,
1151 void __user *counters_ptr)
1155 struct xt_table_info *oldinfo;
1156 struct xt_counters *counters;
1157 void *loc_cpu_old_entry;
1160 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1166 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1167 "iptable_%s", name);
1168 if (!t || IS_ERR(t)) {
1169 ret = t ? PTR_ERR(t) : -ENOENT;
1170 goto free_newinfo_counters_untrans;
1174 if (valid_hooks != t->valid_hooks) {
1175 duprintf("Valid hook crap: %08X vs %08X\n",
1176 valid_hooks, t->valid_hooks);
1181 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1185 /* Update module usage count based on number of rules */
1186 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1187 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1188 if ((oldinfo->number > oldinfo->initial_entries) ||
1189 (newinfo->number <= oldinfo->initial_entries))
1191 if ((oldinfo->number > oldinfo->initial_entries) &&
1192 (newinfo->number <= oldinfo->initial_entries))
1195 /* Get the old counters. */
1196 get_counters(oldinfo, counters);
1197 /* Decrease module usage counts and free resource */
1198 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1199 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1200 xt_free_table_info(oldinfo);
1201 if (copy_to_user(counters_ptr, counters,
1202 sizeof(struct xt_counters) * num_counters) != 0)
1211 free_newinfo_counters_untrans:
1218 do_replace(void __user *user, unsigned int len)
1221 struct ipt_replace tmp;
1222 struct xt_table_info *newinfo;
1223 void *loc_cpu_entry;
1225 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1228 /* Hack: Causes ipchains to give correct error msg --RR */
1229 if (len != sizeof(tmp) + tmp.size)
1230 return -ENOPROTOOPT;
1232 /* overflow check */
1233 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1236 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1239 newinfo = xt_alloc_table_info(tmp.size);
1243 /* choose the copy that is our node/cpu */
1244 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1245 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1251 ret = translate_table(tmp.name, tmp.valid_hooks,
1252 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1253 tmp.hook_entry, tmp.underflow);
1257 duprintf("ip_tables: Translated table\n");
1259 ret = __do_replace(tmp.name, tmp.valid_hooks,
1260 newinfo, tmp.num_counters,
1263 goto free_newinfo_untrans;
1266 free_newinfo_untrans:
1267 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1269 xt_free_table_info(newinfo);
1273 /* We're lazy, and add to the first CPU; overflow works its fey magic
1274 * and everything is OK. */
1276 add_counter_to_entry(struct ipt_entry *e,
1277 const struct xt_counters addme[],
1281 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1283 (long unsigned int)e->counters.pcnt,
1284 (long unsigned int)e->counters.bcnt,
1285 (long unsigned int)addme[*i].pcnt,
1286 (long unsigned int)addme[*i].bcnt);
1289 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1296 do_add_counters(void __user *user, unsigned int len, int compat)
1299 struct xt_counters_info tmp;
1300 struct xt_counters *paddc;
1301 unsigned int num_counters;
1306 struct xt_table_info *private;
1308 void *loc_cpu_entry;
1309 #ifdef CONFIG_COMPAT
1310 struct compat_xt_counters_info compat_tmp;
1314 size = sizeof(struct compat_xt_counters_info);
1319 size = sizeof(struct xt_counters_info);
1322 if (copy_from_user(ptmp, user, size) != 0)
1325 #ifdef CONFIG_COMPAT
1327 num_counters = compat_tmp.num_counters;
1328 name = compat_tmp.name;
1332 num_counters = tmp.num_counters;
1336 if (len != size + num_counters * sizeof(struct xt_counters))
1339 paddc = vmalloc_node(len - size, numa_node_id());
1343 if (copy_from_user(paddc, user + size, len - size) != 0) {
1348 t = xt_find_table_lock(AF_INET, name);
1349 if (!t || IS_ERR(t)) {
1350 ret = t ? PTR_ERR(t) : -ENOENT;
1354 write_lock_bh(&t->lock);
1355 private = t->private;
1356 if (private->number != num_counters) {
1358 goto unlock_up_free;
1362 /* Choose the copy that is on our node */
1363 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1364 IPT_ENTRY_ITERATE(loc_cpu_entry,
1366 add_counter_to_entry,
1370 write_unlock_bh(&t->lock);
1379 #ifdef CONFIG_COMPAT
1380 struct compat_ipt_replace {
1381 char name[IPT_TABLE_MAXNAMELEN];
1385 u32 hook_entry[NF_IP_NUMHOOKS];
1386 u32 underflow[NF_IP_NUMHOOKS];
1388 compat_uptr_t counters; /* struct ipt_counters * */
1389 struct compat_ipt_entry entries[0];
1392 static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
1393 void __user **dstptr, compat_uint_t *size)
1395 return xt_compat_match_to_user(m, dstptr, size);
1398 static int compat_copy_entry_to_user(struct ipt_entry *e,
1399 void __user **dstptr, compat_uint_t *size)
1401 struct ipt_entry_target *t;
1402 struct compat_ipt_entry __user *ce;
1403 u_int16_t target_offset, next_offset;
1404 compat_uint_t origsize;
1409 ce = (struct compat_ipt_entry __user *)*dstptr;
1410 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1413 *dstptr += sizeof(struct compat_ipt_entry);
1414 ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1415 target_offset = e->target_offset - (origsize - *size);
1418 t = ipt_get_target(e);
1419 ret = xt_compat_target_to_user(t, dstptr, size);
1423 next_offset = e->next_offset - (origsize - *size);
1424 if (put_user(target_offset, &ce->target_offset))
1426 if (put_user(next_offset, &ce->next_offset))
1434 compat_check_calc_match(struct ipt_entry_match *m,
1436 const struct ipt_ip *ip,
1437 unsigned int hookmask,
1440 struct xt_match *match;
1442 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1443 m->u.user.revision),
1444 "ipt_%s", m->u.user.name);
1445 if (IS_ERR(match) || !match) {
1446 duprintf("compat_check_calc_match: `%s' not found\n",
1448 return match ? PTR_ERR(match) : -ENOENT;
1450 m->u.kernel.match = match;
1451 *size += xt_compat_match_offset(match);
1458 check_compat_entry_size_and_hooks(struct ipt_entry *e,
1459 struct xt_table_info *newinfo,
1461 unsigned char *base,
1462 unsigned char *limit,
1463 unsigned int *hook_entries,
1464 unsigned int *underflows,
1468 struct ipt_entry_target *t;
1469 struct xt_target *target;
1470 unsigned int entry_offset;
1473 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1474 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1475 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1476 duprintf("Bad offset %p, limit = %p\n", e, limit);
1480 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1481 sizeof(struct compat_xt_entry_target)) {
1482 duprintf("checking: element %p size %u\n",
1487 ret = check_entry(e, name);
1492 entry_offset = (void *)e - (void *)base;
1494 ret = IPT_MATCH_ITERATE(e, compat_check_calc_match, name, &e->ip,
1495 e->comefrom, &off, &j);
1497 goto cleanup_matches;
1499 t = ipt_get_target(e);
1500 target = try_then_request_module(xt_find_target(AF_INET,
1502 t->u.user.revision),
1503 "ipt_%s", t->u.user.name);
1504 if (IS_ERR(target) || !target) {
1505 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1507 ret = target ? PTR_ERR(target) : -ENOENT;
1508 goto cleanup_matches;
1510 t->u.kernel.target = target;
1512 off += xt_compat_target_offset(target);
1514 ret = compat_add_offset(entry_offset, off);
1518 /* Check hooks & underflows */
1519 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1520 if ((unsigned char *)e - base == hook_entries[h])
1521 newinfo->hook_entry[h] = hook_entries[h];
1522 if ((unsigned char *)e - base == underflows[h])
1523 newinfo->underflow[h] = underflows[h];
1526 /* Clear counters and comefrom */
1527 e->counters = ((struct ipt_counters) { 0, 0 });
1534 module_put(t->u.kernel.target->me);
1536 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1540 static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1541 void **dstptr, compat_uint_t *size, const char *name,
1542 const struct ipt_ip *ip, unsigned int hookmask)
1544 xt_compat_match_from_user(m, dstptr, size);
1548 static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1549 unsigned int *size, const char *name,
1550 struct xt_table_info *newinfo, unsigned char *base)
1552 struct ipt_entry_target *t;
1553 struct xt_target *target;
1554 struct ipt_entry *de;
1555 unsigned int origsize;
1560 de = (struct ipt_entry *)*dstptr;
1561 memcpy(de, e, sizeof(struct ipt_entry));
1563 *dstptr += sizeof(struct compat_ipt_entry);
1564 ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
1565 name, &de->ip, de->comefrom);
1568 de->target_offset = e->target_offset - (origsize - *size);
1569 t = ipt_get_target(e);
1570 target = t->u.kernel.target;
1571 xt_compat_target_from_user(t, dstptr, size);
1573 de->next_offset = e->next_offset - (origsize - *size);
1574 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1575 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1576 newinfo->hook_entry[h] -= origsize - *size;
1577 if ((unsigned char *)de - base < newinfo->underflow[h])
1578 newinfo->underflow[h] -= origsize - *size;
1583 static inline int compat_check_entry(struct ipt_entry *e, const char *name)
1587 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom);
1591 return check_target(e, name);
1595 translate_compat_table(const char *name,
1596 unsigned int valid_hooks,
1597 struct xt_table_info **pinfo,
1599 unsigned int total_size,
1600 unsigned int number,
1601 unsigned int *hook_entries,
1602 unsigned int *underflows)
1605 struct xt_table_info *newinfo, *info;
1606 void *pos, *entry0, *entry1;
1613 info->number = number;
1615 /* Init all hooks to impossible value. */
1616 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1617 info->hook_entry[i] = 0xFFFFFFFF;
1618 info->underflow[i] = 0xFFFFFFFF;
1621 duprintf("translate_compat_table: size %u\n", info->size);
1623 xt_compat_lock(AF_INET);
1624 /* Walk through entries, checking offsets. */
1625 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1626 check_compat_entry_size_and_hooks,
1627 info, &size, entry0,
1628 entry0 + total_size,
1629 hook_entries, underflows, &j, name);
1635 duprintf("translate_compat_table: %u not %u entries\n",
1640 /* Check hooks all assigned */
1641 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1642 /* Only hooks which are valid */
1643 if (!(valid_hooks & (1 << i)))
1645 if (info->hook_entry[i] == 0xFFFFFFFF) {
1646 duprintf("Invalid hook entry %u %u\n",
1647 i, hook_entries[i]);
1650 if (info->underflow[i] == 0xFFFFFFFF) {
1651 duprintf("Invalid underflow %u %u\n",
1658 newinfo = xt_alloc_table_info(size);
1662 newinfo->number = number;
1663 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1664 newinfo->hook_entry[i] = info->hook_entry[i];
1665 newinfo->underflow[i] = info->underflow[i];
1667 entry1 = newinfo->entries[raw_smp_processor_id()];
1670 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1671 compat_copy_entry_from_user, &pos, &size,
1672 name, newinfo, entry1);
1673 compat_flush_offsets();
1674 xt_compat_unlock(AF_INET);
1679 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1682 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1687 /* And one copy for every other CPU */
1688 for_each_possible_cpu(i)
1689 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1690 memcpy(newinfo->entries[i], entry1, newinfo->size);
1694 xt_free_table_info(info);
1698 xt_free_table_info(newinfo);
1700 IPT_ENTRY_ITERATE(entry0, total_size, cleanup_entry, &j);
1703 compat_flush_offsets();
1704 xt_compat_unlock(AF_INET);
1709 compat_do_replace(void __user *user, unsigned int len)
1712 struct compat_ipt_replace tmp;
1713 struct xt_table_info *newinfo;
1714 void *loc_cpu_entry;
1716 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1719 /* Hack: Causes ipchains to give correct error msg --RR */
1720 if (len != sizeof(tmp) + tmp.size)
1721 return -ENOPROTOOPT;
1723 /* overflow check */
1724 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1727 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1730 newinfo = xt_alloc_table_info(tmp.size);
1734 /* choose the copy that is our node/cpu */
1735 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1736 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1742 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1743 &newinfo, &loc_cpu_entry, tmp.size,
1744 tmp.num_entries, tmp.hook_entry, tmp.underflow);
1748 duprintf("compat_do_replace: Translated table\n");
1750 ret = __do_replace(tmp.name, tmp.valid_hooks,
1751 newinfo, tmp.num_counters,
1752 compat_ptr(tmp.counters));
1754 goto free_newinfo_untrans;
1757 free_newinfo_untrans:
1758 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1760 xt_free_table_info(newinfo);
1765 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1770 if (!capable(CAP_NET_ADMIN))
1774 case IPT_SO_SET_REPLACE:
1775 ret = compat_do_replace(user, len);
1778 case IPT_SO_SET_ADD_COUNTERS:
1779 ret = do_add_counters(user, len, 1);
1783 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1790 struct compat_ipt_get_entries
1792 char name[IPT_TABLE_MAXNAMELEN];
1794 struct compat_ipt_entry entrytable[0];
1797 static int compat_copy_entries_to_user(unsigned int total_size,
1798 struct xt_table *table, void __user *userptr)
1800 unsigned int off, num;
1801 struct compat_ipt_entry e;
1802 struct xt_counters *counters;
1803 struct xt_table_info *private = table->private;
1807 void *loc_cpu_entry;
1809 counters = alloc_counters(table);
1810 if (IS_ERR(counters))
1811 return PTR_ERR(counters);
1813 /* choose the copy that is on our node/cpu, ...
1814 * This choice is lazy (because current thread is
1815 * allowed to migrate to another cpu)
1817 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1820 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1821 compat_copy_entry_to_user, &pos, &size);
1825 /* ... then go back and fix counters and names */
1826 for (off = 0, num = 0; off < size; off += e.next_offset, num++) {
1828 struct ipt_entry_match m;
1829 struct ipt_entry_target t;
1832 if (copy_from_user(&e, userptr + off,
1833 sizeof(struct compat_ipt_entry)))
1835 if (copy_to_user(userptr + off +
1836 offsetof(struct compat_ipt_entry, counters),
1837 &counters[num], sizeof(counters[num])))
1840 for (i = sizeof(struct compat_ipt_entry);
1841 i < e.target_offset; i += m.u.match_size) {
1842 if (copy_from_user(&m, userptr + off + i,
1843 sizeof(struct ipt_entry_match)))
1845 if (copy_to_user(userptr + off + i +
1846 offsetof(struct ipt_entry_match, u.user.name),
1847 m.u.kernel.match->name,
1848 strlen(m.u.kernel.match->name) + 1))
1852 if (copy_from_user(&t, userptr + off + e.target_offset,
1853 sizeof(struct ipt_entry_target)))
1855 if (copy_to_user(userptr + off + e.target_offset +
1856 offsetof(struct ipt_entry_target, u.user.name),
1857 t.u.kernel.target->name,
1858 strlen(t.u.kernel.target->name) + 1))
1868 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1871 struct compat_ipt_get_entries get;
1875 if (*len < sizeof(get)) {
1876 duprintf("compat_get_entries: %u < %u\n",
1877 *len, (unsigned int)sizeof(get));
1881 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1884 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1885 duprintf("compat_get_entries: %u != %u\n", *len,
1886 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1891 xt_compat_lock(AF_INET);
1892 t = xt_find_table_lock(AF_INET, get.name);
1893 if (t && !IS_ERR(t)) {
1894 struct xt_table_info *private = t->private;
1895 struct xt_table_info info;
1896 duprintf("t->private->number = %u\n",
1898 ret = compat_table_info(private, &info);
1899 if (!ret && get.size == info.size) {
1900 ret = compat_copy_entries_to_user(private->size,
1901 t, uptr->entrytable);
1903 duprintf("compat_get_entries: I've got %u not %u!\n",
1908 compat_flush_offsets();
1912 ret = t ? PTR_ERR(t) : -ENOENT;
1914 xt_compat_unlock(AF_INET);
1918 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1921 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1925 if (!capable(CAP_NET_ADMIN))
1929 case IPT_SO_GET_INFO:
1930 ret = get_info(user, len, 1);
1932 case IPT_SO_GET_ENTRIES:
1933 ret = compat_get_entries(user, len);
1936 ret = do_ipt_get_ctl(sk, cmd, user, len);
1943 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1947 if (!capable(CAP_NET_ADMIN))
1951 case IPT_SO_SET_REPLACE:
1952 ret = do_replace(user, len);
1955 case IPT_SO_SET_ADD_COUNTERS:
1956 ret = do_add_counters(user, len, 0);
1960 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1968 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1972 if (!capable(CAP_NET_ADMIN))
1976 case IPT_SO_GET_INFO:
1977 ret = get_info(user, len, 0);
1980 case IPT_SO_GET_ENTRIES:
1981 ret = get_entries(user, len);
1984 case IPT_SO_GET_REVISION_MATCH:
1985 case IPT_SO_GET_REVISION_TARGET: {
1986 struct ipt_get_revision rev;
1989 if (*len != sizeof(rev)) {
1993 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1998 if (cmd == IPT_SO_GET_REVISION_TARGET)
2003 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2006 "ipt_%s", rev.name);
2011 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2018 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2021 struct xt_table_info *newinfo;
2022 static struct xt_table_info bootstrap
2023 = { 0, 0, 0, { 0 }, { 0 }, { } };
2024 void *loc_cpu_entry;
2026 newinfo = xt_alloc_table_info(repl->size);
2030 /* choose the copy on our node/cpu
2031 * but dont care of preemption
2033 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2034 memcpy(loc_cpu_entry, repl->entries, repl->size);
2036 ret = translate_table(table->name, table->valid_hooks,
2037 newinfo, loc_cpu_entry, repl->size,
2042 xt_free_table_info(newinfo);
2046 ret = xt_register_table(table, &bootstrap, newinfo);
2048 xt_free_table_info(newinfo);
2055 void ipt_unregister_table(struct xt_table *table)
2057 struct xt_table_info *private;
2058 void *loc_cpu_entry;
2060 private = xt_unregister_table(table);
2062 /* Decrease module usage counts and free resources */
2063 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2064 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2065 xt_free_table_info(private);
2068 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2070 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2071 u_int8_t type, u_int8_t code,
2074 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2079 icmp_match(const struct sk_buff *skb,
2080 const struct net_device *in,
2081 const struct net_device *out,
2082 const struct xt_match *match,
2083 const void *matchinfo,
2085 unsigned int protoff,
2088 struct icmphdr _icmph, *ic;
2089 const struct ipt_icmp *icmpinfo = matchinfo;
2091 /* Must not be a fragment. */
2095 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2097 /* We've been asked to examine this packet, and we
2098 * can't. Hence, no choice but to drop.
2100 duprintf("Dropping evil ICMP tinygram.\n");
2105 return icmp_type_code_match(icmpinfo->type,
2109 !!(icmpinfo->invflags&IPT_ICMP_INV));
2112 /* Called when user tries to insert an entry of this type. */
2114 icmp_checkentry(const char *tablename,
2116 const struct xt_match *match,
2118 unsigned int hook_mask)
2120 const struct ipt_icmp *icmpinfo = matchinfo;
2122 /* Must specify no unknown invflags */
2123 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2126 /* The built-in targets: standard (NULL) and error. */
2127 static struct xt_target ipt_standard_target = {
2128 .name = IPT_STANDARD_TARGET,
2129 .targetsize = sizeof(int),
2131 #ifdef CONFIG_COMPAT
2132 .compatsize = sizeof(compat_int_t),
2133 .compat_from_user = compat_standard_from_user,
2134 .compat_to_user = compat_standard_to_user,
2138 static struct xt_target ipt_error_target = {
2139 .name = IPT_ERROR_TARGET,
2140 .target = ipt_error,
2141 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2145 static struct nf_sockopt_ops ipt_sockopts = {
2147 .set_optmin = IPT_BASE_CTL,
2148 .set_optmax = IPT_SO_SET_MAX+1,
2149 .set = do_ipt_set_ctl,
2150 #ifdef CONFIG_COMPAT
2151 .compat_set = compat_do_ipt_set_ctl,
2153 .get_optmin = IPT_BASE_CTL,
2154 .get_optmax = IPT_SO_GET_MAX+1,
2155 .get = do_ipt_get_ctl,
2156 #ifdef CONFIG_COMPAT
2157 .compat_get = compat_do_ipt_get_ctl,
2161 static struct xt_match icmp_matchstruct = {
2163 .match = icmp_match,
2164 .matchsize = sizeof(struct ipt_icmp),
2165 .proto = IPPROTO_ICMP,
2167 .checkentry = icmp_checkentry,
2170 static int __init ip_tables_init(void)
2174 ret = xt_proto_init(AF_INET);
2178 /* Noone else will be downing sem now, so we won't sleep */
2179 ret = xt_register_target(&ipt_standard_target);
2182 ret = xt_register_target(&ipt_error_target);
2185 ret = xt_register_match(&icmp_matchstruct);
2189 /* Register setsockopt */
2190 ret = nf_register_sockopt(&ipt_sockopts);
2194 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2198 xt_unregister_match(&icmp_matchstruct);
2200 xt_unregister_target(&ipt_error_target);
2202 xt_unregister_target(&ipt_standard_target);
2204 xt_proto_fini(AF_INET);
2209 static void __exit ip_tables_fini(void)
2211 nf_unregister_sockopt(&ipt_sockopts);
2213 xt_unregister_match(&icmp_matchstruct);
2214 xt_unregister_target(&ipt_error_target);
2215 xt_unregister_target(&ipt_standard_target);
2217 xt_proto_fini(AF_INET);
2220 EXPORT_SYMBOL(ipt_register_table);
2221 EXPORT_SYMBOL(ipt_unregister_table);
2222 EXPORT_SYMBOL(ipt_do_table);
2223 module_init(ip_tables_init);
2224 module_exit(ip_tables_fini);