2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/cache.h>
12 #include <linux/capability.h>
13 #include <linux/skbuff.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/netdevice.h>
17 #include <linux/module.h>
18 #include <linux/icmp.h>
20 #include <net/compat.h>
21 #include <asm/uaccess.h>
22 #include <linux/mutex.h>
23 #include <linux/proc_fs.h>
24 #include <linux/err.h>
25 #include <linux/cpumask.h>
27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_ipv4/ip_tables.h>
30 MODULE_LICENSE("GPL");
31 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
32 MODULE_DESCRIPTION("IPv4 packet filter");
34 /*#define DEBUG_IP_FIREWALL*/
35 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
36 /*#define DEBUG_IP_FIREWALL_USER*/
38 #ifdef DEBUG_IP_FIREWALL
39 #define dprintf(format, args...) printk(format , ## args)
41 #define dprintf(format, args...)
44 #ifdef DEBUG_IP_FIREWALL_USER
45 #define duprintf(format, args...) printk(format , ## args)
47 #define duprintf(format, args...)
50 #ifdef CONFIG_NETFILTER_DEBUG
51 #define IP_NF_ASSERT(x) \
54 printk("IP_NF_ASSERT: %s:%s:%u\n", \
55 __FUNCTION__, __FILE__, __LINE__); \
58 #define IP_NF_ASSERT(x)
62 /* All the better to debug you with... */
68 We keep a set of rules for each CPU, so we can avoid write-locking
69 them in the softirq when updating the counters and therefore
70 only need to read-lock in the softirq; doing a write_lock_bh() in user
71 context stops packets coming through and allows user context to read
72 the counters or update the rules.
74 Hence the start of any table is given by get_table() below. */
76 /* Returns whether matches rule or not. */
78 ip_packet_match(const struct iphdr *ip,
81 const struct ipt_ip *ipinfo,
87 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
89 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
91 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
93 dprintf("Source or dest mismatch.\n");
95 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
97 NIPQUAD(ipinfo->smsk.s_addr),
98 NIPQUAD(ipinfo->src.s_addr),
99 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
100 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
102 NIPQUAD(ipinfo->dmsk.s_addr),
103 NIPQUAD(ipinfo->dst.s_addr),
104 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
108 /* Look for ifname matches; this should unroll nicely. */
109 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
110 ret |= (((const unsigned long *)indev)[i]
111 ^ ((const unsigned long *)ipinfo->iniface)[i])
112 & ((const unsigned long *)ipinfo->iniface_mask)[i];
115 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
116 dprintf("VIA in mismatch (%s vs %s).%s\n",
117 indev, ipinfo->iniface,
118 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
122 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
123 ret |= (((const unsigned long *)outdev)[i]
124 ^ ((const unsigned long *)ipinfo->outiface)[i])
125 & ((const unsigned long *)ipinfo->outiface_mask)[i];
128 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
129 dprintf("VIA out mismatch (%s vs %s).%s\n",
130 outdev, ipinfo->outiface,
131 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
135 /* Check specific protocol */
137 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
138 dprintf("Packet protocol %hi does not match %hi.%s\n",
139 ip->protocol, ipinfo->proto,
140 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
144 /* If we have a fragment rule but the packet is not a fragment
145 * then we return zero */
146 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
147 dprintf("Fragment rule but not fragment.%s\n",
148 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
156 ip_checkentry(const struct ipt_ip *ip)
158 if (ip->flags & ~IPT_F_MASK) {
159 duprintf("Unknown flag bits set: %08X\n",
160 ip->flags & ~IPT_F_MASK);
163 if (ip->invflags & ~IPT_INV_MASK) {
164 duprintf("Unknown invflag bits set: %08X\n",
165 ip->invflags & ~IPT_INV_MASK);
172 ipt_error(struct sk_buff **pskb,
173 const struct net_device *in,
174 const struct net_device *out,
175 unsigned int hooknum,
176 const struct xt_target *target,
177 const void *targinfo)
180 printk("ip_tables: error: `%s'\n", (char *)targinfo);
186 int do_match(struct ipt_entry_match *m,
187 const struct sk_buff *skb,
188 const struct net_device *in,
189 const struct net_device *out,
193 /* Stop iteration if it doesn't match */
194 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
195 offset, ip_hdrlen(skb), hotdrop))
201 static inline struct ipt_entry *
202 get_entry(void *base, unsigned int offset)
204 return (struct ipt_entry *)(base + offset);
207 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
209 ipt_do_table(struct sk_buff **pskb,
211 const struct net_device *in,
212 const struct net_device *out,
213 struct xt_table *table)
215 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
220 /* Initializing verdict to NF_DROP keeps gcc happy. */
221 unsigned int verdict = NF_DROP;
222 const char *indev, *outdev;
224 struct ipt_entry *e, *back;
225 struct xt_table_info *private;
229 datalen = (*pskb)->len - ip->ihl * 4;
230 indev = in ? in->name : nulldevname;
231 outdev = out ? out->name : nulldevname;
232 /* We handle fragments by dealing with the first fragment as
233 * if it was a normal packet. All other fragments are treated
234 * normally, except that they will NEVER match rules that ask
235 * things we don't know, ie. tcp syn flag or ports). If the
236 * rule is also a fragment-specific rule, non-fragments won't
238 offset = ntohs(ip->frag_off) & IP_OFFSET;
240 read_lock_bh(&table->lock);
241 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
242 private = table->private;
243 table_base = (void *)private->entries[smp_processor_id()];
244 e = get_entry(table_base, private->hook_entry[hook]);
246 /* For return from builtin chain */
247 back = get_entry(table_base, private->underflow[hook]);
252 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
253 struct ipt_entry_target *t;
255 if (IPT_MATCH_ITERATE(e, do_match,
257 offset, &hotdrop) != 0)
260 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
262 t = ipt_get_target(e);
263 IP_NF_ASSERT(t->u.kernel.target);
264 /* Standard target? */
265 if (!t->u.kernel.target->target) {
268 v = ((struct ipt_standard_target *)t)->verdict;
270 /* Pop from stack? */
271 if (v != IPT_RETURN) {
272 verdict = (unsigned)(-v) - 1;
276 back = get_entry(table_base,
280 if (table_base + v != (void *)e + e->next_offset
281 && !(e->ip.flags & IPT_F_GOTO)) {
282 /* Save old back ptr in next entry */
283 struct ipt_entry *next
284 = (void *)e + e->next_offset;
286 = (void *)back - table_base;
287 /* set back pointer to next entry */
291 e = get_entry(table_base, v);
293 /* Targets which reenter must return
295 #ifdef CONFIG_NETFILTER_DEBUG
296 ((struct ipt_entry *)table_base)->comefrom
299 verdict = t->u.kernel.target->target(pskb,
305 #ifdef CONFIG_NETFILTER_DEBUG
306 if (((struct ipt_entry *)table_base)->comefrom
308 && verdict == IPT_CONTINUE) {
309 printk("Target %s reentered!\n",
310 t->u.kernel.target->name);
313 ((struct ipt_entry *)table_base)->comefrom
316 /* Target might have changed stuff. */
318 datalen = (*pskb)->len - ip->ihl * 4;
320 if (verdict == IPT_CONTINUE)
321 e = (void *)e + e->next_offset;
329 e = (void *)e + e->next_offset;
333 read_unlock_bh(&table->lock);
335 #ifdef DEBUG_ALLOW_ALL
344 /* All zeroes == unconditional rule. */
346 unconditional(const struct ipt_ip *ip)
350 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
351 if (((__u32 *)ip)[i])
357 /* Figures out from what hook each rule can be called: returns 0 if
358 there are loops. Puts hook bitmask in comefrom. */
360 mark_source_chains(struct xt_table_info *newinfo,
361 unsigned int valid_hooks, void *entry0)
365 /* No recursion; use packet counter to save back ptrs (reset
366 to 0 as we leave), and comefrom to save source hook bitmask */
367 for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
368 unsigned int pos = newinfo->hook_entry[hook];
370 = (struct ipt_entry *)(entry0 + pos);
372 if (!(valid_hooks & (1 << hook)))
375 /* Set initial back pointer. */
376 e->counters.pcnt = pos;
379 struct ipt_standard_target *t
380 = (void *)ipt_get_target(e);
381 int visited = e->comefrom & (1 << hook);
383 if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
384 printk("iptables: loop hook %u pos %u %08X.\n",
385 hook, pos, e->comefrom);
389 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
391 /* Unconditional return/END. */
392 if ((e->target_offset == sizeof(struct ipt_entry)
393 && (strcmp(t->target.u.user.name,
394 IPT_STANDARD_TARGET) == 0)
396 && unconditional(&e->ip)) || visited) {
397 unsigned int oldpos, size;
399 if (t->verdict < -NF_MAX_VERDICT - 1) {
400 duprintf("mark_source_chains: bad "
401 "negative verdict (%i)\n",
406 /* Return: backtrack through the last
409 e->comefrom ^= (1<<NF_IP_NUMHOOKS);
410 #ifdef DEBUG_IP_FIREWALL_USER
412 & (1 << NF_IP_NUMHOOKS)) {
413 duprintf("Back unset "
420 pos = e->counters.pcnt;
421 e->counters.pcnt = 0;
423 /* We're at the start. */
427 e = (struct ipt_entry *)
429 } while (oldpos == pos + e->next_offset);
432 size = e->next_offset;
433 e = (struct ipt_entry *)
434 (entry0 + pos + size);
435 e->counters.pcnt = pos;
438 int newpos = t->verdict;
440 if (strcmp(t->target.u.user.name,
441 IPT_STANDARD_TARGET) == 0
443 if (newpos > newinfo->size -
444 sizeof(struct ipt_entry)) {
445 duprintf("mark_source_chains: "
446 "bad verdict (%i)\n",
450 /* This a jump; chase it. */
451 duprintf("Jump rule %u -> %u\n",
454 /* ... this is a fallthru */
455 newpos = pos + e->next_offset;
457 e = (struct ipt_entry *)
459 e->counters.pcnt = pos;
464 duprintf("Finished chain %u\n", hook);
470 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
472 if (i && (*i)-- == 0)
475 if (m->u.kernel.match->destroy)
476 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
477 module_put(m->u.kernel.match->me);
482 check_entry(struct ipt_entry *e, const char *name)
484 struct ipt_entry_target *t;
486 if (!ip_checkentry(&e->ip)) {
487 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
491 if (e->target_offset + sizeof(struct ipt_entry_target) > e->next_offset)
494 t = ipt_get_target(e);
495 if (e->target_offset + t->u.target_size > e->next_offset)
501 static inline int check_match(struct ipt_entry_match *m, const char *name,
502 const struct ipt_ip *ip, unsigned int hookmask)
504 struct xt_match *match;
507 match = m->u.kernel.match;
508 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
509 name, hookmask, ip->proto,
510 ip->invflags & IPT_INV_PROTO);
511 if (!ret && m->u.kernel.match->checkentry
512 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
514 duprintf("ip_tables: check failed for `%s'.\n",
515 m->u.kernel.match->name);
522 find_check_match(struct ipt_entry_match *m,
524 const struct ipt_ip *ip,
525 unsigned int hookmask,
528 struct xt_match *match;
531 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
533 "ipt_%s", m->u.user.name);
534 if (IS_ERR(match) || !match) {
535 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
536 return match ? PTR_ERR(match) : -ENOENT;
538 m->u.kernel.match = match;
540 ret = check_match(m, name, ip, hookmask);
547 module_put(m->u.kernel.match->me);
551 static inline int check_target(struct ipt_entry *e, const char *name)
553 struct ipt_entry_target *t;
554 struct xt_target *target;
557 t = ipt_get_target(e);
558 target = t->u.kernel.target;
559 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
560 name, e->comefrom, e->ip.proto,
561 e->ip.invflags & IPT_INV_PROTO);
562 if (!ret && t->u.kernel.target->checkentry
563 && !t->u.kernel.target->checkentry(name, e, target,
564 t->data, e->comefrom)) {
565 duprintf("ip_tables: check failed for `%s'.\n",
566 t->u.kernel.target->name);
573 find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
576 struct ipt_entry_target *t;
577 struct xt_target *target;
581 ret = check_entry(e, name);
586 ret = IPT_MATCH_ITERATE(e, find_check_match, name, &e->ip,
589 goto cleanup_matches;
591 t = ipt_get_target(e);
592 target = try_then_request_module(xt_find_target(AF_INET,
595 "ipt_%s", t->u.user.name);
596 if (IS_ERR(target) || !target) {
597 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
598 ret = target ? PTR_ERR(target) : -ENOENT;
599 goto cleanup_matches;
601 t->u.kernel.target = target;
603 ret = check_target(e, name);
610 module_put(t->u.kernel.target->me);
612 IPT_MATCH_ITERATE(e, cleanup_match, &j);
617 check_entry_size_and_hooks(struct ipt_entry *e,
618 struct xt_table_info *newinfo,
620 unsigned char *limit,
621 const unsigned int *hook_entries,
622 const unsigned int *underflows,
627 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
628 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
629 duprintf("Bad offset %p\n", e);
634 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
635 duprintf("checking: element %p size %u\n",
640 /* Check hooks & underflows */
641 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
642 if ((unsigned char *)e - base == hook_entries[h])
643 newinfo->hook_entry[h] = hook_entries[h];
644 if ((unsigned char *)e - base == underflows[h])
645 newinfo->underflow[h] = underflows[h];
648 /* FIXME: underflows must be unconditional, standard verdicts
649 < 0 (not IPT_RETURN). --RR */
651 /* Clear counters and comefrom */
652 e->counters = ((struct xt_counters) { 0, 0 });
660 cleanup_entry(struct ipt_entry *e, unsigned int *i)
662 struct ipt_entry_target *t;
664 if (i && (*i)-- == 0)
667 /* Cleanup all matches */
668 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
669 t = ipt_get_target(e);
670 if (t->u.kernel.target->destroy)
671 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
672 module_put(t->u.kernel.target->me);
676 /* Checks and translates the user-supplied table segment (held in
679 translate_table(const char *name,
680 unsigned int valid_hooks,
681 struct xt_table_info *newinfo,
685 const unsigned int *hook_entries,
686 const unsigned int *underflows)
691 newinfo->size = size;
692 newinfo->number = number;
694 /* Init all hooks to impossible value. */
695 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
696 newinfo->hook_entry[i] = 0xFFFFFFFF;
697 newinfo->underflow[i] = 0xFFFFFFFF;
700 duprintf("translate_table: size %u\n", newinfo->size);
702 /* Walk through entries, checking offsets. */
703 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
704 check_entry_size_and_hooks,
708 hook_entries, underflows, &i);
713 duprintf("translate_table: %u not %u entries\n",
718 /* Check hooks all assigned */
719 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
720 /* Only hooks which are valid */
721 if (!(valid_hooks & (1 << i)))
723 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
724 duprintf("Invalid hook entry %u %u\n",
728 if (newinfo->underflow[i] == 0xFFFFFFFF) {
729 duprintf("Invalid underflow %u %u\n",
735 if (!mark_source_chains(newinfo, valid_hooks, entry0))
738 /* Finally, each sanity check must pass */
740 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
741 find_check_entry, name, size, &i);
744 IPT_ENTRY_ITERATE(entry0, newinfo->size,
749 /* And one copy for every other CPU */
750 for_each_possible_cpu(i) {
751 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
752 memcpy(newinfo->entries[i], entry0, newinfo->size);
760 add_entry_to_counter(const struct ipt_entry *e,
761 struct xt_counters total[],
764 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
771 set_entry_to_counter(const struct ipt_entry *e,
772 struct ipt_counters total[],
775 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
782 get_counters(const struct xt_table_info *t,
783 struct xt_counters counters[])
789 /* Instead of clearing (by a previous call to memset())
790 * the counters and using adds, we set the counters
791 * with data used by 'current' CPU
792 * We dont care about preemption here.
794 curcpu = raw_smp_processor_id();
797 IPT_ENTRY_ITERATE(t->entries[curcpu],
799 set_entry_to_counter,
803 for_each_possible_cpu(cpu) {
807 IPT_ENTRY_ITERATE(t->entries[cpu],
809 add_entry_to_counter,
815 static inline struct xt_counters * alloc_counters(struct xt_table *table)
817 unsigned int countersize;
818 struct xt_counters *counters;
819 struct xt_table_info *private = table->private;
821 /* We need atomic snapshot of counters: rest doesn't change
822 (other than comefrom, which userspace doesn't care
824 countersize = sizeof(struct xt_counters) * private->number;
825 counters = vmalloc_node(countersize, numa_node_id());
827 if (counters == NULL)
828 return ERR_PTR(-ENOMEM);
830 /* First, sum counters... */
831 write_lock_bh(&table->lock);
832 get_counters(private, counters);
833 write_unlock_bh(&table->lock);
839 copy_entries_to_user(unsigned int total_size,
840 struct xt_table *table,
841 void __user *userptr)
843 unsigned int off, num;
845 struct xt_counters *counters;
846 struct xt_table_info *private = table->private;
850 counters = alloc_counters(table);
851 if (IS_ERR(counters))
852 return PTR_ERR(counters);
854 /* choose the copy that is on our node/cpu, ...
855 * This choice is lazy (because current thread is
856 * allowed to migrate to another cpu)
858 loc_cpu_entry = private->entries[raw_smp_processor_id()];
859 /* ... then copy entire thing ... */
860 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
865 /* FIXME: use iterator macros --RR */
866 /* ... then go back and fix counters and names */
867 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
869 struct ipt_entry_match *m;
870 struct ipt_entry_target *t;
872 e = (struct ipt_entry *)(loc_cpu_entry + off);
873 if (copy_to_user(userptr + off
874 + offsetof(struct ipt_entry, counters),
876 sizeof(counters[num])) != 0) {
881 for (i = sizeof(struct ipt_entry);
882 i < e->target_offset;
883 i += m->u.match_size) {
886 if (copy_to_user(userptr + off + i
887 + offsetof(struct ipt_entry_match,
889 m->u.kernel.match->name,
890 strlen(m->u.kernel.match->name)+1)
897 t = ipt_get_target(e);
898 if (copy_to_user(userptr + off + e->target_offset
899 + offsetof(struct ipt_entry_target,
901 t->u.kernel.target->name,
902 strlen(t->u.kernel.target->name)+1) != 0) {
914 struct compat_delta {
915 struct compat_delta *next;
920 static struct compat_delta *compat_offsets = NULL;
922 static int compat_add_offset(unsigned int offset, short delta)
924 struct compat_delta *tmp;
926 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
929 tmp->offset = offset;
931 if (compat_offsets) {
932 tmp->next = compat_offsets->next;
933 compat_offsets->next = tmp;
935 compat_offsets = tmp;
941 static void compat_flush_offsets(void)
943 struct compat_delta *tmp, *next;
945 if (compat_offsets) {
946 for(tmp = compat_offsets; tmp; tmp = next) {
950 compat_offsets = NULL;
954 static short compat_calc_jump(unsigned int offset)
956 struct compat_delta *tmp;
959 for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
960 if (tmp->offset < offset)
965 static void compat_standard_from_user(void *dst, void *src)
967 int v = *(compat_int_t *)src;
970 v += compat_calc_jump(v);
971 memcpy(dst, &v, sizeof(v));
974 static int compat_standard_to_user(void __user *dst, void *src)
976 compat_int_t cv = *(int *)src;
979 cv -= compat_calc_jump(cv);
980 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
984 compat_calc_match(struct ipt_entry_match *m, int * size)
986 *size += xt_compat_match_offset(m->u.kernel.match);
990 static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
991 void *base, struct xt_table_info *newinfo)
993 struct ipt_entry_target *t;
994 unsigned int entry_offset;
998 entry_offset = (void *)e - base;
999 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1000 t = ipt_get_target(e);
1001 off += xt_compat_target_offset(t->u.kernel.target);
1002 newinfo->size -= off;
1003 ret = compat_add_offset(entry_offset, off);
1007 for (i = 0; i< NF_IP_NUMHOOKS; i++) {
1008 if (info->hook_entry[i] && (e < (struct ipt_entry *)
1009 (base + info->hook_entry[i])))
1010 newinfo->hook_entry[i] -= off;
1011 if (info->underflow[i] && (e < (struct ipt_entry *)
1012 (base + info->underflow[i])))
1013 newinfo->underflow[i] -= off;
1018 static int compat_table_info(struct xt_table_info *info,
1019 struct xt_table_info *newinfo)
1021 void *loc_cpu_entry;
1024 if (!newinfo || !info)
1027 memset(newinfo, 0, sizeof(struct xt_table_info));
1028 newinfo->size = info->size;
1029 newinfo->number = info->number;
1030 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1031 newinfo->hook_entry[i] = info->hook_entry[i];
1032 newinfo->underflow[i] = info->underflow[i];
1034 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1035 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1036 compat_calc_entry, info, loc_cpu_entry, newinfo);
1040 static int get_info(void __user *user, int *len, int compat)
1042 char name[IPT_TABLE_MAXNAMELEN];
1046 if (*len != sizeof(struct ipt_getinfo)) {
1047 duprintf("length %u != %u\n", *len,
1048 (unsigned int)sizeof(struct ipt_getinfo));
1052 if (copy_from_user(name, user, sizeof(name)) != 0)
1055 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1056 #ifdef CONFIG_COMPAT
1058 xt_compat_lock(AF_INET);
1060 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1061 "iptable_%s", name);
1062 if (t && !IS_ERR(t)) {
1063 struct ipt_getinfo info;
1064 struct xt_table_info *private = t->private;
1066 #ifdef CONFIG_COMPAT
1068 struct xt_table_info tmp;
1069 ret = compat_table_info(private, &tmp);
1070 compat_flush_offsets();
1074 info.valid_hooks = t->valid_hooks;
1075 memcpy(info.hook_entry, private->hook_entry,
1076 sizeof(info.hook_entry));
1077 memcpy(info.underflow, private->underflow,
1078 sizeof(info.underflow));
1079 info.num_entries = private->number;
1080 info.size = private->size;
1081 strcpy(info.name, name);
1083 if (copy_to_user(user, &info, *len) != 0)
1091 ret = t ? PTR_ERR(t) : -ENOENT;
1092 #ifdef CONFIG_COMPAT
1094 xt_compat_unlock(AF_INET);
1100 get_entries(struct ipt_get_entries __user *uptr, int *len)
1103 struct ipt_get_entries get;
1106 if (*len < sizeof(get)) {
1107 duprintf("get_entries: %u < %d\n", *len,
1108 (unsigned int)sizeof(get));
1111 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1113 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1114 duprintf("get_entries: %u != %u\n", *len,
1115 (unsigned int)(sizeof(struct ipt_get_entries) +
1120 t = xt_find_table_lock(AF_INET, get.name);
1121 if (t && !IS_ERR(t)) {
1122 struct xt_table_info *private = t->private;
1123 duprintf("t->private->number = %u\n",
1125 if (get.size == private->size)
1126 ret = copy_entries_to_user(private->size,
1127 t, uptr->entrytable);
1129 duprintf("get_entries: I've got %u not %u!\n",
1137 ret = t ? PTR_ERR(t) : -ENOENT;
1143 __do_replace(const char *name, unsigned int valid_hooks,
1144 struct xt_table_info *newinfo, unsigned int num_counters,
1145 void __user *counters_ptr)
1149 struct xt_table_info *oldinfo;
1150 struct xt_counters *counters;
1151 void *loc_cpu_old_entry;
1154 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1160 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1161 "iptable_%s", name);
1162 if (!t || IS_ERR(t)) {
1163 ret = t ? PTR_ERR(t) : -ENOENT;
1164 goto free_newinfo_counters_untrans;
1168 if (valid_hooks != t->valid_hooks) {
1169 duprintf("Valid hook crap: %08X vs %08X\n",
1170 valid_hooks, t->valid_hooks);
1175 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1179 /* Update module usage count based on number of rules */
1180 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1181 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1182 if ((oldinfo->number > oldinfo->initial_entries) ||
1183 (newinfo->number <= oldinfo->initial_entries))
1185 if ((oldinfo->number > oldinfo->initial_entries) &&
1186 (newinfo->number <= oldinfo->initial_entries))
1189 /* Get the old counters. */
1190 get_counters(oldinfo, counters);
1191 /* Decrease module usage counts and free resource */
1192 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1193 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1194 xt_free_table_info(oldinfo);
1195 if (copy_to_user(counters_ptr, counters,
1196 sizeof(struct xt_counters) * num_counters) != 0)
1205 free_newinfo_counters_untrans:
1212 do_replace(void __user *user, unsigned int len)
1215 struct ipt_replace tmp;
1216 struct xt_table_info *newinfo;
1217 void *loc_cpu_entry;
1219 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1222 /* Hack: Causes ipchains to give correct error msg --RR */
1223 if (len != sizeof(tmp) + tmp.size)
1224 return -ENOPROTOOPT;
1226 /* overflow check */
1227 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1230 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1233 newinfo = xt_alloc_table_info(tmp.size);
1237 /* choose the copy that is our node/cpu */
1238 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1239 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1245 ret = translate_table(tmp.name, tmp.valid_hooks,
1246 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1247 tmp.hook_entry, tmp.underflow);
1251 duprintf("ip_tables: Translated table\n");
1253 ret = __do_replace(tmp.name, tmp.valid_hooks,
1254 newinfo, tmp.num_counters,
1257 goto free_newinfo_untrans;
1260 free_newinfo_untrans:
1261 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1263 xt_free_table_info(newinfo);
1267 /* We're lazy, and add to the first CPU; overflow works its fey magic
1268 * and everything is OK. */
1270 add_counter_to_entry(struct ipt_entry *e,
1271 const struct xt_counters addme[],
1275 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1277 (long unsigned int)e->counters.pcnt,
1278 (long unsigned int)e->counters.bcnt,
1279 (long unsigned int)addme[*i].pcnt,
1280 (long unsigned int)addme[*i].bcnt);
1283 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1290 do_add_counters(void __user *user, unsigned int len, int compat)
1293 struct xt_counters_info tmp;
1294 struct xt_counters *paddc;
1295 unsigned int num_counters;
1300 struct xt_table_info *private;
1302 void *loc_cpu_entry;
1303 #ifdef CONFIG_COMPAT
1304 struct compat_xt_counters_info compat_tmp;
1308 size = sizeof(struct compat_xt_counters_info);
1313 size = sizeof(struct xt_counters_info);
1316 if (copy_from_user(ptmp, user, size) != 0)
1319 #ifdef CONFIG_COMPAT
1321 num_counters = compat_tmp.num_counters;
1322 name = compat_tmp.name;
1326 num_counters = tmp.num_counters;
1330 if (len != size + num_counters * sizeof(struct xt_counters))
1333 paddc = vmalloc_node(len - size, numa_node_id());
1337 if (copy_from_user(paddc, user + size, len - size) != 0) {
1342 t = xt_find_table_lock(AF_INET, name);
1343 if (!t || IS_ERR(t)) {
1344 ret = t ? PTR_ERR(t) : -ENOENT;
1348 write_lock_bh(&t->lock);
1349 private = t->private;
1350 if (private->number != num_counters) {
1352 goto unlock_up_free;
1356 /* Choose the copy that is on our node */
1357 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1358 IPT_ENTRY_ITERATE(loc_cpu_entry,
1360 add_counter_to_entry,
1364 write_unlock_bh(&t->lock);
1373 #ifdef CONFIG_COMPAT
1374 struct compat_ipt_replace {
1375 char name[IPT_TABLE_MAXNAMELEN];
1379 u32 hook_entry[NF_IP_NUMHOOKS];
1380 u32 underflow[NF_IP_NUMHOOKS];
1382 compat_uptr_t counters; /* struct ipt_counters * */
1383 struct compat_ipt_entry entries[0];
1386 static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
1387 void __user **dstptr, compat_uint_t *size)
1389 return xt_compat_match_to_user(m, dstptr, size);
1392 static int compat_copy_entry_to_user(struct ipt_entry *e,
1393 void __user **dstptr, compat_uint_t *size)
1395 struct ipt_entry_target *t;
1396 struct compat_ipt_entry __user *ce;
1397 u_int16_t target_offset, next_offset;
1398 compat_uint_t origsize;
1403 ce = (struct compat_ipt_entry __user *)*dstptr;
1404 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1407 *dstptr += sizeof(struct compat_ipt_entry);
1408 ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1409 target_offset = e->target_offset - (origsize - *size);
1412 t = ipt_get_target(e);
1413 ret = xt_compat_target_to_user(t, dstptr, size);
1417 next_offset = e->next_offset - (origsize - *size);
1418 if (put_user(target_offset, &ce->target_offset))
1420 if (put_user(next_offset, &ce->next_offset))
1428 compat_check_calc_match(struct ipt_entry_match *m,
1430 const struct ipt_ip *ip,
1431 unsigned int hookmask,
1434 struct xt_match *match;
1436 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1437 m->u.user.revision),
1438 "ipt_%s", m->u.user.name);
1439 if (IS_ERR(match) || !match) {
1440 duprintf("compat_check_calc_match: `%s' not found\n",
1442 return match ? PTR_ERR(match) : -ENOENT;
1444 m->u.kernel.match = match;
1445 *size += xt_compat_match_offset(match);
1452 check_compat_entry_size_and_hooks(struct ipt_entry *e,
1453 struct xt_table_info *newinfo,
1455 unsigned char *base,
1456 unsigned char *limit,
1457 unsigned int *hook_entries,
1458 unsigned int *underflows,
1462 struct ipt_entry_target *t;
1463 struct xt_target *target;
1464 unsigned int entry_offset;
1467 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1468 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1469 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1470 duprintf("Bad offset %p, limit = %p\n", e, limit);
1474 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1475 sizeof(struct compat_xt_entry_target)) {
1476 duprintf("checking: element %p size %u\n",
1481 ret = check_entry(e, name);
1486 entry_offset = (void *)e - (void *)base;
1488 ret = IPT_MATCH_ITERATE(e, compat_check_calc_match, name, &e->ip,
1489 e->comefrom, &off, &j);
1491 goto cleanup_matches;
1493 t = ipt_get_target(e);
1494 target = try_then_request_module(xt_find_target(AF_INET,
1496 t->u.user.revision),
1497 "ipt_%s", t->u.user.name);
1498 if (IS_ERR(target) || !target) {
1499 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1501 ret = target ? PTR_ERR(target) : -ENOENT;
1502 goto cleanup_matches;
1504 t->u.kernel.target = target;
1506 off += xt_compat_target_offset(target);
1508 ret = compat_add_offset(entry_offset, off);
1512 /* Check hooks & underflows */
1513 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1514 if ((unsigned char *)e - base == hook_entries[h])
1515 newinfo->hook_entry[h] = hook_entries[h];
1516 if ((unsigned char *)e - base == underflows[h])
1517 newinfo->underflow[h] = underflows[h];
1520 /* Clear counters and comefrom */
1521 e->counters = ((struct ipt_counters) { 0, 0 });
1528 module_put(t->u.kernel.target->me);
1530 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1534 static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1535 void **dstptr, compat_uint_t *size, const char *name,
1536 const struct ipt_ip *ip, unsigned int hookmask)
1538 xt_compat_match_from_user(m, dstptr, size);
1542 static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1543 unsigned int *size, const char *name,
1544 struct xt_table_info *newinfo, unsigned char *base)
1546 struct ipt_entry_target *t;
1547 struct xt_target *target;
1548 struct ipt_entry *de;
1549 unsigned int origsize;
1554 de = (struct ipt_entry *)*dstptr;
1555 memcpy(de, e, sizeof(struct ipt_entry));
1557 *dstptr += sizeof(struct compat_ipt_entry);
1558 ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
1559 name, &de->ip, de->comefrom);
1562 de->target_offset = e->target_offset - (origsize - *size);
1563 t = ipt_get_target(e);
1564 target = t->u.kernel.target;
1565 xt_compat_target_from_user(t, dstptr, size);
1567 de->next_offset = e->next_offset - (origsize - *size);
1568 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1569 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1570 newinfo->hook_entry[h] -= origsize - *size;
1571 if ((unsigned char *)de - base < newinfo->underflow[h])
1572 newinfo->underflow[h] -= origsize - *size;
1577 static inline int compat_check_entry(struct ipt_entry *e, const char *name)
1581 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom);
1585 return check_target(e, name);
1589 translate_compat_table(const char *name,
1590 unsigned int valid_hooks,
1591 struct xt_table_info **pinfo,
1593 unsigned int total_size,
1594 unsigned int number,
1595 unsigned int *hook_entries,
1596 unsigned int *underflows)
1599 struct xt_table_info *newinfo, *info;
1600 void *pos, *entry0, *entry1;
1607 info->number = number;
1609 /* Init all hooks to impossible value. */
1610 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1611 info->hook_entry[i] = 0xFFFFFFFF;
1612 info->underflow[i] = 0xFFFFFFFF;
1615 duprintf("translate_compat_table: size %u\n", info->size);
1617 xt_compat_lock(AF_INET);
1618 /* Walk through entries, checking offsets. */
1619 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1620 check_compat_entry_size_and_hooks,
1621 info, &size, entry0,
1622 entry0 + total_size,
1623 hook_entries, underflows, &j, name);
1629 duprintf("translate_compat_table: %u not %u entries\n",
1634 /* Check hooks all assigned */
1635 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1636 /* Only hooks which are valid */
1637 if (!(valid_hooks & (1 << i)))
1639 if (info->hook_entry[i] == 0xFFFFFFFF) {
1640 duprintf("Invalid hook entry %u %u\n",
1641 i, hook_entries[i]);
1644 if (info->underflow[i] == 0xFFFFFFFF) {
1645 duprintf("Invalid underflow %u %u\n",
1652 newinfo = xt_alloc_table_info(size);
1656 newinfo->number = number;
1657 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1658 newinfo->hook_entry[i] = info->hook_entry[i];
1659 newinfo->underflow[i] = info->underflow[i];
1661 entry1 = newinfo->entries[raw_smp_processor_id()];
1664 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1665 compat_copy_entry_from_user, &pos, &size,
1666 name, newinfo, entry1);
1667 compat_flush_offsets();
1668 xt_compat_unlock(AF_INET);
1673 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1676 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1681 /* And one copy for every other CPU */
1682 for_each_possible_cpu(i)
1683 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1684 memcpy(newinfo->entries[i], entry1, newinfo->size);
1688 xt_free_table_info(info);
1692 xt_free_table_info(newinfo);
1694 IPT_ENTRY_ITERATE(entry0, total_size, cleanup_entry, &j);
1697 compat_flush_offsets();
1698 xt_compat_unlock(AF_INET);
1703 compat_do_replace(void __user *user, unsigned int len)
1706 struct compat_ipt_replace tmp;
1707 struct xt_table_info *newinfo;
1708 void *loc_cpu_entry;
1710 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1713 /* Hack: Causes ipchains to give correct error msg --RR */
1714 if (len != sizeof(tmp) + tmp.size)
1715 return -ENOPROTOOPT;
1717 /* overflow check */
1718 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1721 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1724 newinfo = xt_alloc_table_info(tmp.size);
1728 /* choose the copy that is our node/cpu */
1729 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1730 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1736 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1737 &newinfo, &loc_cpu_entry, tmp.size,
1738 tmp.num_entries, tmp.hook_entry, tmp.underflow);
1742 duprintf("compat_do_replace: Translated table\n");
1744 ret = __do_replace(tmp.name, tmp.valid_hooks,
1745 newinfo, tmp.num_counters,
1746 compat_ptr(tmp.counters));
1748 goto free_newinfo_untrans;
1751 free_newinfo_untrans:
1752 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1754 xt_free_table_info(newinfo);
1759 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1764 if (!capable(CAP_NET_ADMIN))
1768 case IPT_SO_SET_REPLACE:
1769 ret = compat_do_replace(user, len);
1772 case IPT_SO_SET_ADD_COUNTERS:
1773 ret = do_add_counters(user, len, 1);
1777 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1784 struct compat_ipt_get_entries
1786 char name[IPT_TABLE_MAXNAMELEN];
1788 struct compat_ipt_entry entrytable[0];
1791 static int compat_copy_entries_to_user(unsigned int total_size,
1792 struct xt_table *table, void __user *userptr)
1794 unsigned int off, num;
1795 struct compat_ipt_entry e;
1796 struct xt_counters *counters;
1797 struct xt_table_info *private = table->private;
1801 void *loc_cpu_entry;
1803 counters = alloc_counters(table);
1804 if (IS_ERR(counters))
1805 return PTR_ERR(counters);
1807 /* choose the copy that is on our node/cpu, ...
1808 * This choice is lazy (because current thread is
1809 * allowed to migrate to another cpu)
1811 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1814 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1815 compat_copy_entry_to_user, &pos, &size);
1819 /* ... then go back and fix counters and names */
1820 for (off = 0, num = 0; off < size; off += e.next_offset, num++) {
1822 struct ipt_entry_match m;
1823 struct ipt_entry_target t;
1826 if (copy_from_user(&e, userptr + off,
1827 sizeof(struct compat_ipt_entry)))
1829 if (copy_to_user(userptr + off +
1830 offsetof(struct compat_ipt_entry, counters),
1831 &counters[num], sizeof(counters[num])))
1834 for (i = sizeof(struct compat_ipt_entry);
1835 i < e.target_offset; i += m.u.match_size) {
1836 if (copy_from_user(&m, userptr + off + i,
1837 sizeof(struct ipt_entry_match)))
1839 if (copy_to_user(userptr + off + i +
1840 offsetof(struct ipt_entry_match, u.user.name),
1841 m.u.kernel.match->name,
1842 strlen(m.u.kernel.match->name) + 1))
1846 if (copy_from_user(&t, userptr + off + e.target_offset,
1847 sizeof(struct ipt_entry_target)))
1849 if (copy_to_user(userptr + off + e.target_offset +
1850 offsetof(struct ipt_entry_target, u.user.name),
1851 t.u.kernel.target->name,
1852 strlen(t.u.kernel.target->name) + 1))
1862 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1865 struct compat_ipt_get_entries get;
1869 if (*len < sizeof(get)) {
1870 duprintf("compat_get_entries: %u < %u\n",
1871 *len, (unsigned int)sizeof(get));
1875 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1878 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1879 duprintf("compat_get_entries: %u != %u\n", *len,
1880 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1885 xt_compat_lock(AF_INET);
1886 t = xt_find_table_lock(AF_INET, get.name);
1887 if (t && !IS_ERR(t)) {
1888 struct xt_table_info *private = t->private;
1889 struct xt_table_info info;
1890 duprintf("t->private->number = %u\n",
1892 ret = compat_table_info(private, &info);
1893 if (!ret && get.size == info.size) {
1894 ret = compat_copy_entries_to_user(private->size,
1895 t, uptr->entrytable);
1897 duprintf("compat_get_entries: I've got %u not %u!\n",
1902 compat_flush_offsets();
1906 ret = t ? PTR_ERR(t) : -ENOENT;
1908 xt_compat_unlock(AF_INET);
1912 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1915 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1919 if (!capable(CAP_NET_ADMIN))
1923 case IPT_SO_GET_INFO:
1924 ret = get_info(user, len, 1);
1926 case IPT_SO_GET_ENTRIES:
1927 ret = compat_get_entries(user, len);
1930 ret = do_ipt_get_ctl(sk, cmd, user, len);
1937 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1941 if (!capable(CAP_NET_ADMIN))
1945 case IPT_SO_SET_REPLACE:
1946 ret = do_replace(user, len);
1949 case IPT_SO_SET_ADD_COUNTERS:
1950 ret = do_add_counters(user, len, 0);
1954 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1962 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1966 if (!capable(CAP_NET_ADMIN))
1970 case IPT_SO_GET_INFO:
1971 ret = get_info(user, len, 0);
1974 case IPT_SO_GET_ENTRIES:
1975 ret = get_entries(user, len);
1978 case IPT_SO_GET_REVISION_MATCH:
1979 case IPT_SO_GET_REVISION_TARGET: {
1980 struct ipt_get_revision rev;
1983 if (*len != sizeof(rev)) {
1987 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1992 if (cmd == IPT_SO_GET_REVISION_TARGET)
1997 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2000 "ipt_%s", rev.name);
2005 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2012 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2015 struct xt_table_info *newinfo;
2016 static struct xt_table_info bootstrap
2017 = { 0, 0, 0, { 0 }, { 0 }, { } };
2018 void *loc_cpu_entry;
2020 newinfo = xt_alloc_table_info(repl->size);
2024 /* choose the copy on our node/cpu
2025 * but dont care of preemption
2027 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2028 memcpy(loc_cpu_entry, repl->entries, repl->size);
2030 ret = translate_table(table->name, table->valid_hooks,
2031 newinfo, loc_cpu_entry, repl->size,
2036 xt_free_table_info(newinfo);
2040 ret = xt_register_table(table, &bootstrap, newinfo);
2042 xt_free_table_info(newinfo);
2049 void ipt_unregister_table(struct xt_table *table)
2051 struct xt_table_info *private;
2052 void *loc_cpu_entry;
2054 private = xt_unregister_table(table);
2056 /* Decrease module usage counts and free resources */
2057 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2058 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2059 xt_free_table_info(private);
2062 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2064 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2065 u_int8_t type, u_int8_t code,
2068 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2073 icmp_match(const struct sk_buff *skb,
2074 const struct net_device *in,
2075 const struct net_device *out,
2076 const struct xt_match *match,
2077 const void *matchinfo,
2079 unsigned int protoff,
2082 struct icmphdr _icmph, *ic;
2083 const struct ipt_icmp *icmpinfo = matchinfo;
2085 /* Must not be a fragment. */
2089 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2091 /* We've been asked to examine this packet, and we
2092 * can't. Hence, no choice but to drop.
2094 duprintf("Dropping evil ICMP tinygram.\n");
2099 return icmp_type_code_match(icmpinfo->type,
2103 !!(icmpinfo->invflags&IPT_ICMP_INV));
2106 /* Called when user tries to insert an entry of this type. */
2108 icmp_checkentry(const char *tablename,
2110 const struct xt_match *match,
2112 unsigned int hook_mask)
2114 const struct ipt_icmp *icmpinfo = matchinfo;
2116 /* Must specify no unknown invflags */
2117 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2120 /* The built-in targets: standard (NULL) and error. */
2121 static struct xt_target ipt_standard_target = {
2122 .name = IPT_STANDARD_TARGET,
2123 .targetsize = sizeof(int),
2125 #ifdef CONFIG_COMPAT
2126 .compatsize = sizeof(compat_int_t),
2127 .compat_from_user = compat_standard_from_user,
2128 .compat_to_user = compat_standard_to_user,
2132 static struct xt_target ipt_error_target = {
2133 .name = IPT_ERROR_TARGET,
2134 .target = ipt_error,
2135 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2139 static struct nf_sockopt_ops ipt_sockopts = {
2141 .set_optmin = IPT_BASE_CTL,
2142 .set_optmax = IPT_SO_SET_MAX+1,
2143 .set = do_ipt_set_ctl,
2144 #ifdef CONFIG_COMPAT
2145 .compat_set = compat_do_ipt_set_ctl,
2147 .get_optmin = IPT_BASE_CTL,
2148 .get_optmax = IPT_SO_GET_MAX+1,
2149 .get = do_ipt_get_ctl,
2150 #ifdef CONFIG_COMPAT
2151 .compat_get = compat_do_ipt_get_ctl,
2155 static struct xt_match icmp_matchstruct = {
2157 .match = icmp_match,
2158 .matchsize = sizeof(struct ipt_icmp),
2159 .proto = IPPROTO_ICMP,
2161 .checkentry = icmp_checkentry,
2164 static int __init ip_tables_init(void)
2168 ret = xt_proto_init(AF_INET);
2172 /* Noone else will be downing sem now, so we won't sleep */
2173 ret = xt_register_target(&ipt_standard_target);
2176 ret = xt_register_target(&ipt_error_target);
2179 ret = xt_register_match(&icmp_matchstruct);
2183 /* Register setsockopt */
2184 ret = nf_register_sockopt(&ipt_sockopts);
2188 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2192 xt_unregister_match(&icmp_matchstruct);
2194 xt_unregister_target(&ipt_error_target);
2196 xt_unregister_target(&ipt_standard_target);
2198 xt_proto_fini(AF_INET);
2203 static void __exit ip_tables_fini(void)
2205 nf_unregister_sockopt(&ipt_sockopts);
2207 xt_unregister_match(&icmp_matchstruct);
2208 xt_unregister_target(&ipt_error_target);
2209 xt_unregister_target(&ipt_standard_target);
2211 xt_proto_fini(AF_INET);
2214 EXPORT_SYMBOL(ipt_register_table);
2215 EXPORT_SYMBOL(ipt_unregister_table);
2216 EXPORT_SYMBOL(ipt_do_table);
2217 module_init(ip_tables_init);
2218 module_exit(ip_tables_fini);