2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
81 ip6t_ext_hdr(u8 nexthdr)
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
95 ip6_packet_match(const struct sk_buff *skb,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
104 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
106 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
108 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
109 &ip6info->src), IP6T_INV_SRCIP)
110 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
111 &ip6info->dst), IP6T_INV_DSTIP)) {
112 dprintf("Source or dest mismatch.\n");
114 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
115 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
116 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
117 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
118 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
119 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
123 /* Look for ifname matches; this should unroll nicely. */
124 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
125 ret |= (((const unsigned long *)indev)[i]
126 ^ ((const unsigned long *)ip6info->iniface)[i])
127 & ((const unsigned long *)ip6info->iniface_mask)[i];
130 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
131 dprintf("VIA in mismatch (%s vs %s).%s\n",
132 indev, ip6info->iniface,
133 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
137 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
138 ret |= (((const unsigned long *)outdev)[i]
139 ^ ((const unsigned long *)ip6info->outiface)[i])
140 & ((const unsigned long *)ip6info->outiface_mask)[i];
143 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
144 dprintf("VIA out mismatch (%s vs %s).%s\n",
145 outdev, ip6info->outiface,
146 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
150 /* ... might want to do something with class and flowlabel here ... */
152 /* look for the desired protocol header */
153 if((ip6info->flags & IP6T_F_PROTO)) {
155 unsigned short _frag_off;
157 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
163 *fragoff = _frag_off;
165 dprintf("Packet protocol %hi ?= %s%hi.\n",
167 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
170 if (ip6info->proto == protohdr) {
171 if(ip6info->invflags & IP6T_INV_PROTO) {
177 /* We need match for the '-p all', too! */
178 if ((ip6info->proto != 0) &&
179 !(ip6info->invflags & IP6T_INV_PROTO))
185 /* should be ip6 safe */
187 ip6_checkentry(const struct ip6t_ip6 *ipv6)
189 if (ipv6->flags & ~IP6T_F_MASK) {
190 duprintf("Unknown flag bits set: %08X\n",
191 ipv6->flags & ~IP6T_F_MASK);
194 if (ipv6->invflags & ~IP6T_INV_MASK) {
195 duprintf("Unknown invflag bits set: %08X\n",
196 ipv6->invflags & ~IP6T_INV_MASK);
203 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
206 printk("ip6_tables: error: `%s'\n",
207 (const char *)par->targinfo);
212 /* Performance critical - called for every packet */
214 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
215 struct xt_match_param *par)
217 par->match = m->u.kernel.match;
218 par->matchinfo = m->data;
220 /* Stop iteration if it doesn't match */
221 if (!m->u.kernel.match->match(skb, par))
227 static inline struct ip6t_entry *
228 get_entry(void *base, unsigned int offset)
230 return (struct ip6t_entry *)(base + offset);
233 /* All zeroes == unconditional rule. */
234 /* Mildly perf critical (only if packet tracing is on) */
236 unconditional(const struct ip6t_ip6 *ipv6)
240 for (i = 0; i < sizeof(*ipv6); i++)
241 if (((char *)ipv6)[i])
244 return (i == sizeof(*ipv6));
247 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
248 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
249 /* This cries for unification! */
250 static const char *const hooknames[] = {
251 [NF_INET_PRE_ROUTING] = "PREROUTING",
252 [NF_INET_LOCAL_IN] = "INPUT",
253 [NF_INET_FORWARD] = "FORWARD",
254 [NF_INET_LOCAL_OUT] = "OUTPUT",
255 [NF_INET_POST_ROUTING] = "POSTROUTING",
258 enum nf_ip_trace_comments {
259 NF_IP6_TRACE_COMMENT_RULE,
260 NF_IP6_TRACE_COMMENT_RETURN,
261 NF_IP6_TRACE_COMMENT_POLICY,
264 static const char *const comments[] = {
265 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
266 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
267 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
270 static struct nf_loginfo trace_loginfo = {
271 .type = NF_LOG_TYPE_LOG,
275 .logflags = NF_LOG_MASK,
280 /* Mildly perf critical (only if packet tracing is on) */
282 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
283 char *hookname, char **chainname,
284 char **comment, unsigned int *rulenum)
286 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
288 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
289 /* Head of user chain: ERROR target with chainname */
290 *chainname = t->target.data;
295 if (s->target_offset == sizeof(struct ip6t_entry)
296 && strcmp(t->target.u.kernel.target->name,
297 IP6T_STANDARD_TARGET) == 0
299 && unconditional(&s->ipv6)) {
300 /* Tail of chains: STANDARD target (return/policy) */
301 *comment = *chainname == hookname
302 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY]
303 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN];
312 static void trace_packet(struct sk_buff *skb,
314 const struct net_device *in,
315 const struct net_device *out,
316 const char *tablename,
317 struct xt_table_info *private,
318 struct ip6t_entry *e)
321 const struct ip6t_entry *root;
322 char *hookname, *chainname, *comment;
323 unsigned int rulenum = 0;
325 table_base = (void *)private->entries[smp_processor_id()];
326 root = get_entry(table_base, private->hook_entry[hook]);
328 hookname = chainname = (char *)hooknames[hook];
329 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE];
331 IP6T_ENTRY_ITERATE(root,
332 private->size - private->hook_entry[hook],
333 get_chainname_rulenum,
334 e, hookname, &chainname, &comment, &rulenum);
336 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
337 "TRACE: %s:%s:%s:%u ",
338 tablename, chainname, comment, rulenum);
342 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
344 ip6t_do_table(struct sk_buff *skb,
346 const struct net_device *in,
347 const struct net_device *out,
348 struct xt_table *table)
350 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
351 bool hotdrop = false;
352 /* Initializing verdict to NF_DROP keeps gcc happy. */
353 unsigned int verdict = NF_DROP;
354 const char *indev, *outdev;
356 struct ip6t_entry *e, *back;
357 struct xt_table_info *private;
358 struct xt_match_param mtpar;
359 struct xt_target_param tgpar;
362 indev = in ? in->name : nulldevname;
363 outdev = out ? out->name : nulldevname;
364 /* We handle fragments by dealing with the first fragment as
365 * if it was a normal packet. All other fragments are treated
366 * normally, except that they will NEVER match rules that ask
367 * things we don't know, ie. tcp syn flag or ports). If the
368 * rule is also a fragment-specific rule, non-fragments won't
370 mtpar.hotdrop = &hotdrop;
371 mtpar.in = tgpar.in = in;
372 mtpar.out = tgpar.out = out;
373 mtpar.family = tgpar.family = NFPROTO_IPV6;
374 tgpar.hooknum = hook;
376 read_lock_bh(&table->lock);
377 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
378 private = table->private;
379 table_base = (void *)private->entries[smp_processor_id()];
380 e = get_entry(table_base, private->hook_entry[hook]);
382 /* For return from builtin chain */
383 back = get_entry(table_base, private->underflow[hook]);
388 if (ip6_packet_match(skb, indev, outdev, &e->ipv6,
389 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
390 struct ip6t_entry_target *t;
392 if (IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0)
395 ADD_COUNTER(e->counters,
396 ntohs(ipv6_hdr(skb)->payload_len) +
397 sizeof(struct ipv6hdr), 1);
399 t = ip6t_get_target(e);
400 IP_NF_ASSERT(t->u.kernel.target);
402 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
403 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
404 /* The packet is traced: log it */
405 if (unlikely(skb->nf_trace))
406 trace_packet(skb, hook, in, out,
407 table->name, private, e);
409 /* Standard target? */
410 if (!t->u.kernel.target->target) {
413 v = ((struct ip6t_standard_target *)t)->verdict;
415 /* Pop from stack? */
416 if (v != IP6T_RETURN) {
417 verdict = (unsigned)(-v) - 1;
421 back = get_entry(table_base,
425 if (table_base + v != (void *)e + e->next_offset
426 && !(e->ipv6.flags & IP6T_F_GOTO)) {
427 /* Save old back ptr in next entry */
428 struct ip6t_entry *next
429 = (void *)e + e->next_offset;
431 = (void *)back - table_base;
432 /* set back pointer to next entry */
436 e = get_entry(table_base, v);
438 /* Targets which reenter must return
440 tgpar.target = t->u.kernel.target;
441 tgpar.targinfo = t->data;
443 #ifdef CONFIG_NETFILTER_DEBUG
444 ((struct ip6t_entry *)table_base)->comefrom
447 verdict = t->u.kernel.target->target(skb,
450 #ifdef CONFIG_NETFILTER_DEBUG
451 if (((struct ip6t_entry *)table_base)->comefrom
453 && verdict == IP6T_CONTINUE) {
454 printk("Target %s reentered!\n",
455 t->u.kernel.target->name);
458 ((struct ip6t_entry *)table_base)->comefrom
461 if (verdict == IP6T_CONTINUE)
462 e = (void *)e + e->next_offset;
470 e = (void *)e + e->next_offset;
474 #ifdef CONFIG_NETFILTER_DEBUG
475 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
477 read_unlock_bh(&table->lock);
479 #ifdef DEBUG_ALLOW_ALL
488 /* Figures out from what hook each rule can be called: returns 0 if
489 there are loops. Puts hook bitmask in comefrom. */
491 mark_source_chains(struct xt_table_info *newinfo,
492 unsigned int valid_hooks, void *entry0)
496 /* No recursion; use packet counter to save back ptrs (reset
497 to 0 as we leave), and comefrom to save source hook bitmask */
498 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
499 unsigned int pos = newinfo->hook_entry[hook];
500 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
502 if (!(valid_hooks & (1 << hook)))
505 /* Set initial back pointer. */
506 e->counters.pcnt = pos;
509 struct ip6t_standard_target *t
510 = (void *)ip6t_get_target(e);
511 int visited = e->comefrom & (1 << hook);
513 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
514 printk("iptables: loop hook %u pos %u %08X.\n",
515 hook, pos, e->comefrom);
518 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
520 /* Unconditional return/END. */
521 if ((e->target_offset == sizeof(struct ip6t_entry)
522 && (strcmp(t->target.u.user.name,
523 IP6T_STANDARD_TARGET) == 0)
525 && unconditional(&e->ipv6)) || visited) {
526 unsigned int oldpos, size;
528 if (t->verdict < -NF_MAX_VERDICT - 1) {
529 duprintf("mark_source_chains: bad "
530 "negative verdict (%i)\n",
535 /* Return: backtrack through the last
538 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
539 #ifdef DEBUG_IP_FIREWALL_USER
541 & (1 << NF_INET_NUMHOOKS)) {
542 duprintf("Back unset "
549 pos = e->counters.pcnt;
550 e->counters.pcnt = 0;
552 /* We're at the start. */
556 e = (struct ip6t_entry *)
558 } while (oldpos == pos + e->next_offset);
561 size = e->next_offset;
562 e = (struct ip6t_entry *)
563 (entry0 + pos + size);
564 e->counters.pcnt = pos;
567 int newpos = t->verdict;
569 if (strcmp(t->target.u.user.name,
570 IP6T_STANDARD_TARGET) == 0
572 if (newpos > newinfo->size -
573 sizeof(struct ip6t_entry)) {
574 duprintf("mark_source_chains: "
575 "bad verdict (%i)\n",
579 /* This a jump; chase it. */
580 duprintf("Jump rule %u -> %u\n",
583 /* ... this is a fallthru */
584 newpos = pos + e->next_offset;
586 e = (struct ip6t_entry *)
588 e->counters.pcnt = pos;
593 duprintf("Finished chain %u\n", hook);
599 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
601 struct xt_mtdtor_param par;
603 if (i && (*i)-- == 0)
606 par.match = m->u.kernel.match;
607 par.matchinfo = m->data;
608 par.family = NFPROTO_IPV6;
609 if (par.match->destroy != NULL)
610 par.match->destroy(&par);
611 module_put(par.match->me);
616 check_entry(struct ip6t_entry *e, const char *name)
618 struct ip6t_entry_target *t;
620 if (!ip6_checkentry(&e->ipv6)) {
621 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
625 if (e->target_offset + sizeof(struct ip6t_entry_target) >
629 t = ip6t_get_target(e);
630 if (e->target_offset + t->u.target_size > e->next_offset)
636 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
639 const struct ip6t_ip6 *ipv6 = par->entryinfo;
642 par->match = m->u.kernel.match;
643 par->matchinfo = m->data;
645 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
646 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
648 duprintf("ip_tables: check failed for `%s'.\n",
657 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
660 struct xt_match *match;
663 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
665 "ip6t_%s", m->u.user.name);
666 if (IS_ERR(match) || !match) {
667 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
668 return match ? PTR_ERR(match) : -ENOENT;
670 m->u.kernel.match = match;
672 ret = check_match(m, par, i);
678 module_put(m->u.kernel.match->me);
682 static int check_target(struct ip6t_entry *e, const char *name)
684 struct ip6t_entry_target *t = ip6t_get_target(e);
685 struct xt_tgchk_param par = {
688 .target = t->u.kernel.target,
690 .hook_mask = e->comefrom,
691 .family = NFPROTO_IPV6,
695 t = ip6t_get_target(e);
696 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
697 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
699 duprintf("ip_tables: check failed for `%s'.\n",
700 t->u.kernel.target->name);
707 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
710 struct ip6t_entry_target *t;
711 struct xt_target *target;
714 struct xt_mtchk_param mtpar;
716 ret = check_entry(e, name);
722 mtpar.entryinfo = &e->ipv6;
723 mtpar.hook_mask = e->comefrom;
724 mtpar.family = NFPROTO_IPV6;
725 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
727 goto cleanup_matches;
729 t = ip6t_get_target(e);
730 target = try_then_request_module(xt_find_target(AF_INET6,
733 "ip6t_%s", t->u.user.name);
734 if (IS_ERR(target) || !target) {
735 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
736 ret = target ? PTR_ERR(target) : -ENOENT;
737 goto cleanup_matches;
739 t->u.kernel.target = target;
741 ret = check_target(e, name);
748 module_put(t->u.kernel.target->me);
750 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
755 check_entry_size_and_hooks(struct ip6t_entry *e,
756 struct xt_table_info *newinfo,
758 unsigned char *limit,
759 const unsigned int *hook_entries,
760 const unsigned int *underflows,
765 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
766 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
767 duprintf("Bad offset %p\n", e);
772 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
773 duprintf("checking: element %p size %u\n",
778 /* Check hooks & underflows */
779 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
780 if ((unsigned char *)e - base == hook_entries[h])
781 newinfo->hook_entry[h] = hook_entries[h];
782 if ((unsigned char *)e - base == underflows[h])
783 newinfo->underflow[h] = underflows[h];
786 /* FIXME: underflows must be unconditional, standard verdicts
787 < 0 (not IP6T_RETURN). --RR */
789 /* Clear counters and comefrom */
790 e->counters = ((struct xt_counters) { 0, 0 });
798 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
800 struct xt_tgdtor_param par;
801 struct ip6t_entry_target *t;
803 if (i && (*i)-- == 0)
806 /* Cleanup all matches */
807 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
808 t = ip6t_get_target(e);
810 par.target = t->u.kernel.target;
811 par.targinfo = t->data;
812 par.family = NFPROTO_IPV6;
813 if (par.target->destroy != NULL)
814 par.target->destroy(&par);
815 module_put(par.target->me);
819 /* Checks and translates the user-supplied table segment (held in
822 translate_table(const char *name,
823 unsigned int valid_hooks,
824 struct xt_table_info *newinfo,
828 const unsigned int *hook_entries,
829 const unsigned int *underflows)
834 newinfo->size = size;
835 newinfo->number = number;
837 /* Init all hooks to impossible value. */
838 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
839 newinfo->hook_entry[i] = 0xFFFFFFFF;
840 newinfo->underflow[i] = 0xFFFFFFFF;
843 duprintf("translate_table: size %u\n", newinfo->size);
845 /* Walk through entries, checking offsets. */
846 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
847 check_entry_size_and_hooks,
851 hook_entries, underflows, &i);
856 duprintf("translate_table: %u not %u entries\n",
861 /* Check hooks all assigned */
862 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
863 /* Only hooks which are valid */
864 if (!(valid_hooks & (1 << i)))
866 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
867 duprintf("Invalid hook entry %u %u\n",
871 if (newinfo->underflow[i] == 0xFFFFFFFF) {
872 duprintf("Invalid underflow %u %u\n",
878 if (!mark_source_chains(newinfo, valid_hooks, entry0))
881 /* Finally, each sanity check must pass */
883 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
884 find_check_entry, name, size, &i);
887 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
892 /* And one copy for every other CPU */
893 for_each_possible_cpu(i) {
894 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
895 memcpy(newinfo->entries[i], entry0, newinfo->size);
903 add_entry_to_counter(const struct ip6t_entry *e,
904 struct xt_counters total[],
907 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
914 set_entry_to_counter(const struct ip6t_entry *e,
915 struct ip6t_counters total[],
918 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
925 get_counters(const struct xt_table_info *t,
926 struct xt_counters counters[])
932 /* Instead of clearing (by a previous call to memset())
933 * the counters and using adds, we set the counters
934 * with data used by 'current' CPU
935 * We dont care about preemption here.
937 curcpu = raw_smp_processor_id();
940 IP6T_ENTRY_ITERATE(t->entries[curcpu],
942 set_entry_to_counter,
946 for_each_possible_cpu(cpu) {
950 IP6T_ENTRY_ITERATE(t->entries[cpu],
952 add_entry_to_counter,
958 static struct xt_counters *alloc_counters(struct xt_table *table)
960 unsigned int countersize;
961 struct xt_counters *counters;
962 const struct xt_table_info *private = table->private;
964 /* We need atomic snapshot of counters: rest doesn't change
965 (other than comefrom, which userspace doesn't care
967 countersize = sizeof(struct xt_counters) * private->number;
968 counters = vmalloc_node(countersize, numa_node_id());
970 if (counters == NULL)
971 return ERR_PTR(-ENOMEM);
973 /* First, sum counters... */
974 write_lock_bh(&table->lock);
975 get_counters(private, counters);
976 write_unlock_bh(&table->lock);
982 copy_entries_to_user(unsigned int total_size,
983 struct xt_table *table,
984 void __user *userptr)
986 unsigned int off, num;
987 struct ip6t_entry *e;
988 struct xt_counters *counters;
989 const struct xt_table_info *private = table->private;
991 const void *loc_cpu_entry;
993 counters = alloc_counters(table);
994 if (IS_ERR(counters))
995 return PTR_ERR(counters);
997 /* choose the copy that is on our node/cpu, ...
998 * This choice is lazy (because current thread is
999 * allowed to migrate to another cpu)
1001 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1002 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1007 /* FIXME: use iterator macros --RR */
1008 /* ... then go back and fix counters and names */
1009 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1011 const struct ip6t_entry_match *m;
1012 const struct ip6t_entry_target *t;
1014 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1015 if (copy_to_user(userptr + off
1016 + offsetof(struct ip6t_entry, counters),
1018 sizeof(counters[num])) != 0) {
1023 for (i = sizeof(struct ip6t_entry);
1024 i < e->target_offset;
1025 i += m->u.match_size) {
1028 if (copy_to_user(userptr + off + i
1029 + offsetof(struct ip6t_entry_match,
1031 m->u.kernel.match->name,
1032 strlen(m->u.kernel.match->name)+1)
1039 t = ip6t_get_target(e);
1040 if (copy_to_user(userptr + off + e->target_offset
1041 + offsetof(struct ip6t_entry_target,
1043 t->u.kernel.target->name,
1044 strlen(t->u.kernel.target->name)+1) != 0) {
1055 #ifdef CONFIG_COMPAT
1056 static void compat_standard_from_user(void *dst, void *src)
1058 int v = *(compat_int_t *)src;
1061 v += xt_compat_calc_jump(AF_INET6, v);
1062 memcpy(dst, &v, sizeof(v));
1065 static int compat_standard_to_user(void __user *dst, void *src)
1067 compat_int_t cv = *(int *)src;
1070 cv -= xt_compat_calc_jump(AF_INET6, cv);
1071 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1075 compat_calc_match(struct ip6t_entry_match *m, int *size)
1077 *size += xt_compat_match_offset(m->u.kernel.match);
1081 static int compat_calc_entry(struct ip6t_entry *e,
1082 const struct xt_table_info *info,
1083 void *base, struct xt_table_info *newinfo)
1085 struct ip6t_entry_target *t;
1086 unsigned int entry_offset;
1089 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1090 entry_offset = (void *)e - base;
1091 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1092 t = ip6t_get_target(e);
1093 off += xt_compat_target_offset(t->u.kernel.target);
1094 newinfo->size -= off;
1095 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1099 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1100 if (info->hook_entry[i] &&
1101 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1102 newinfo->hook_entry[i] -= off;
1103 if (info->underflow[i] &&
1104 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1105 newinfo->underflow[i] -= off;
1110 static int compat_table_info(const struct xt_table_info *info,
1111 struct xt_table_info *newinfo)
1113 void *loc_cpu_entry;
1115 if (!newinfo || !info)
1118 /* we dont care about newinfo->entries[] */
1119 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1120 newinfo->initial_entries = 0;
1121 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1122 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1123 compat_calc_entry, info, loc_cpu_entry,
1128 static int get_info(struct net *net, void __user *user, int *len, int compat)
1130 char name[IP6T_TABLE_MAXNAMELEN];
1134 if (*len != sizeof(struct ip6t_getinfo)) {
1135 duprintf("length %u != %zu\n", *len,
1136 sizeof(struct ip6t_getinfo));
1140 if (copy_from_user(name, user, sizeof(name)) != 0)
1143 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1144 #ifdef CONFIG_COMPAT
1146 xt_compat_lock(AF_INET6);
1148 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1149 "ip6table_%s", name);
1150 if (t && !IS_ERR(t)) {
1151 struct ip6t_getinfo info;
1152 const struct xt_table_info *private = t->private;
1154 #ifdef CONFIG_COMPAT
1156 struct xt_table_info tmp;
1157 ret = compat_table_info(private, &tmp);
1158 xt_compat_flush_offsets(AF_INET6);
1162 info.valid_hooks = t->valid_hooks;
1163 memcpy(info.hook_entry, private->hook_entry,
1164 sizeof(info.hook_entry));
1165 memcpy(info.underflow, private->underflow,
1166 sizeof(info.underflow));
1167 info.num_entries = private->number;
1168 info.size = private->size;
1169 strcpy(info.name, name);
1171 if (copy_to_user(user, &info, *len) != 0)
1179 ret = t ? PTR_ERR(t) : -ENOENT;
1180 #ifdef CONFIG_COMPAT
1182 xt_compat_unlock(AF_INET6);
1188 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1191 struct ip6t_get_entries get;
1194 if (*len < sizeof(get)) {
1195 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1198 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1200 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1201 duprintf("get_entries: %u != %zu\n",
1202 *len, sizeof(get) + get.size);
1206 t = xt_find_table_lock(net, AF_INET6, get.name);
1207 if (t && !IS_ERR(t)) {
1208 struct xt_table_info *private = t->private;
1209 duprintf("t->private->number = %u\n", private->number);
1210 if (get.size == private->size)
1211 ret = copy_entries_to_user(private->size,
1212 t, uptr->entrytable);
1214 duprintf("get_entries: I've got %u not %u!\n",
1215 private->size, get.size);
1221 ret = t ? PTR_ERR(t) : -ENOENT;
1227 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1228 struct xt_table_info *newinfo, unsigned int num_counters,
1229 void __user *counters_ptr)
1233 struct xt_table_info *oldinfo;
1234 struct xt_counters *counters;
1235 const void *loc_cpu_old_entry;
1238 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1245 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1246 "ip6table_%s", name);
1247 if (!t || IS_ERR(t)) {
1248 ret = t ? PTR_ERR(t) : -ENOENT;
1249 goto free_newinfo_counters_untrans;
1253 if (valid_hooks != t->valid_hooks) {
1254 duprintf("Valid hook crap: %08X vs %08X\n",
1255 valid_hooks, t->valid_hooks);
1260 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1264 /* Update module usage count based on number of rules */
1265 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1266 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1267 if ((oldinfo->number > oldinfo->initial_entries) ||
1268 (newinfo->number <= oldinfo->initial_entries))
1270 if ((oldinfo->number > oldinfo->initial_entries) &&
1271 (newinfo->number <= oldinfo->initial_entries))
1274 /* Get the old counters. */
1275 get_counters(oldinfo, counters);
1276 /* Decrease module usage counts and free resource */
1277 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1278 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1280 xt_free_table_info(oldinfo);
1281 if (copy_to_user(counters_ptr, counters,
1282 sizeof(struct xt_counters) * num_counters) != 0)
1291 free_newinfo_counters_untrans:
1298 do_replace(struct net *net, void __user *user, unsigned int len)
1301 struct ip6t_replace tmp;
1302 struct xt_table_info *newinfo;
1303 void *loc_cpu_entry;
1305 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1308 /* overflow check */
1309 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1312 newinfo = xt_alloc_table_info(tmp.size);
1316 /* choose the copy that is on our node/cpu */
1317 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1318 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1324 ret = translate_table(tmp.name, tmp.valid_hooks,
1325 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1326 tmp.hook_entry, tmp.underflow);
1330 duprintf("ip_tables: Translated table\n");
1332 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1333 tmp.num_counters, tmp.counters);
1335 goto free_newinfo_untrans;
1338 free_newinfo_untrans:
1339 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1341 xt_free_table_info(newinfo);
1345 /* We're lazy, and add to the first CPU; overflow works its fey magic
1346 * and everything is OK. */
1348 add_counter_to_entry(struct ip6t_entry *e,
1349 const struct xt_counters addme[],
1353 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1355 (long unsigned int)e->counters.pcnt,
1356 (long unsigned int)e->counters.bcnt,
1357 (long unsigned int)addme[*i].pcnt,
1358 (long unsigned int)addme[*i].bcnt);
1361 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1368 do_add_counters(struct net *net, void __user *user, unsigned int len,
1372 struct xt_counters_info tmp;
1373 struct xt_counters *paddc;
1374 unsigned int num_counters;
1379 const struct xt_table_info *private;
1381 const void *loc_cpu_entry;
1382 #ifdef CONFIG_COMPAT
1383 struct compat_xt_counters_info compat_tmp;
1387 size = sizeof(struct compat_xt_counters_info);
1392 size = sizeof(struct xt_counters_info);
1395 if (copy_from_user(ptmp, user, size) != 0)
1398 #ifdef CONFIG_COMPAT
1400 num_counters = compat_tmp.num_counters;
1401 name = compat_tmp.name;
1405 num_counters = tmp.num_counters;
1409 if (len != size + num_counters * sizeof(struct xt_counters))
1412 paddc = vmalloc_node(len - size, numa_node_id());
1416 if (copy_from_user(paddc, user + size, len - size) != 0) {
1421 t = xt_find_table_lock(net, AF_INET6, name);
1422 if (!t || IS_ERR(t)) {
1423 ret = t ? PTR_ERR(t) : -ENOENT;
1427 write_lock_bh(&t->lock);
1428 private = t->private;
1429 if (private->number != num_counters) {
1431 goto unlock_up_free;
1435 /* Choose the copy that is on our node */
1436 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1437 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1439 add_counter_to_entry,
1443 write_unlock_bh(&t->lock);
1452 #ifdef CONFIG_COMPAT
1453 struct compat_ip6t_replace {
1454 char name[IP6T_TABLE_MAXNAMELEN];
1458 u32 hook_entry[NF_INET_NUMHOOKS];
1459 u32 underflow[NF_INET_NUMHOOKS];
1461 compat_uptr_t counters; /* struct ip6t_counters * */
1462 struct compat_ip6t_entry entries[0];
1466 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1467 unsigned int *size, struct xt_counters *counters,
1470 struct ip6t_entry_target *t;
1471 struct compat_ip6t_entry __user *ce;
1472 u_int16_t target_offset, next_offset;
1473 compat_uint_t origsize;
1478 ce = (struct compat_ip6t_entry __user *)*dstptr;
1479 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1482 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1485 *dstptr += sizeof(struct compat_ip6t_entry);
1486 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1488 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1489 target_offset = e->target_offset - (origsize - *size);
1492 t = ip6t_get_target(e);
1493 ret = xt_compat_target_to_user(t, dstptr, size);
1497 next_offset = e->next_offset - (origsize - *size);
1498 if (put_user(target_offset, &ce->target_offset))
1500 if (put_user(next_offset, &ce->next_offset))
1510 compat_find_calc_match(struct ip6t_entry_match *m,
1512 const struct ip6t_ip6 *ipv6,
1513 unsigned int hookmask,
1514 int *size, unsigned int *i)
1516 struct xt_match *match;
1518 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1519 m->u.user.revision),
1520 "ip6t_%s", m->u.user.name);
1521 if (IS_ERR(match) || !match) {
1522 duprintf("compat_check_calc_match: `%s' not found\n",
1524 return match ? PTR_ERR(match) : -ENOENT;
1526 m->u.kernel.match = match;
1527 *size += xt_compat_match_offset(match);
1534 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1536 if (i && (*i)-- == 0)
1539 module_put(m->u.kernel.match->me);
1544 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1546 struct ip6t_entry_target *t;
1548 if (i && (*i)-- == 0)
1551 /* Cleanup all matches */
1552 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1553 t = compat_ip6t_get_target(e);
1554 module_put(t->u.kernel.target->me);
1559 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1560 struct xt_table_info *newinfo,
1562 unsigned char *base,
1563 unsigned char *limit,
1564 unsigned int *hook_entries,
1565 unsigned int *underflows,
1569 struct ip6t_entry_target *t;
1570 struct xt_target *target;
1571 unsigned int entry_offset;
1575 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1576 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1577 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1578 duprintf("Bad offset %p, limit = %p\n", e, limit);
1582 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1583 sizeof(struct compat_xt_entry_target)) {
1584 duprintf("checking: element %p size %u\n",
1589 /* For purposes of check_entry casting the compat entry is fine */
1590 ret = check_entry((struct ip6t_entry *)e, name);
1594 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1595 entry_offset = (void *)e - (void *)base;
1597 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1598 &e->ipv6, e->comefrom, &off, &j);
1600 goto release_matches;
1602 t = compat_ip6t_get_target(e);
1603 target = try_then_request_module(xt_find_target(AF_INET6,
1605 t->u.user.revision),
1606 "ip6t_%s", t->u.user.name);
1607 if (IS_ERR(target) || !target) {
1608 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1610 ret = target ? PTR_ERR(target) : -ENOENT;
1611 goto release_matches;
1613 t->u.kernel.target = target;
1615 off += xt_compat_target_offset(target);
1617 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1621 /* Check hooks & underflows */
1622 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1623 if ((unsigned char *)e - base == hook_entries[h])
1624 newinfo->hook_entry[h] = hook_entries[h];
1625 if ((unsigned char *)e - base == underflows[h])
1626 newinfo->underflow[h] = underflows[h];
1629 /* Clear counters and comefrom */
1630 memset(&e->counters, 0, sizeof(e->counters));
1637 module_put(t->u.kernel.target->me);
1639 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1644 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1645 unsigned int *size, const char *name,
1646 struct xt_table_info *newinfo, unsigned char *base)
1648 struct ip6t_entry_target *t;
1649 struct xt_target *target;
1650 struct ip6t_entry *de;
1651 unsigned int origsize;
1656 de = (struct ip6t_entry *)*dstptr;
1657 memcpy(de, e, sizeof(struct ip6t_entry));
1658 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1660 *dstptr += sizeof(struct ip6t_entry);
1661 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1663 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1667 de->target_offset = e->target_offset - (origsize - *size);
1668 t = compat_ip6t_get_target(e);
1669 target = t->u.kernel.target;
1670 xt_compat_target_from_user(t, dstptr, size);
1672 de->next_offset = e->next_offset - (origsize - *size);
1673 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1674 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1675 newinfo->hook_entry[h] -= origsize - *size;
1676 if ((unsigned char *)de - base < newinfo->underflow[h])
1677 newinfo->underflow[h] -= origsize - *size;
1682 static int compat_check_entry(struct ip6t_entry *e, const char *name,
1687 struct xt_mtchk_param mtpar;
1691 mtpar.entryinfo = &e->ipv6;
1692 mtpar.hook_mask = e->comefrom;
1693 mtpar.family = NFPROTO_IPV6;
1694 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1696 goto cleanup_matches;
1698 ret = check_target(e, name);
1700 goto cleanup_matches;
1706 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1711 translate_compat_table(const char *name,
1712 unsigned int valid_hooks,
1713 struct xt_table_info **pinfo,
1715 unsigned int total_size,
1716 unsigned int number,
1717 unsigned int *hook_entries,
1718 unsigned int *underflows)
1721 struct xt_table_info *newinfo, *info;
1722 void *pos, *entry0, *entry1;
1729 info->number = number;
1731 /* Init all hooks to impossible value. */
1732 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1733 info->hook_entry[i] = 0xFFFFFFFF;
1734 info->underflow[i] = 0xFFFFFFFF;
1737 duprintf("translate_compat_table: size %u\n", info->size);
1739 xt_compat_lock(AF_INET6);
1740 /* Walk through entries, checking offsets. */
1741 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1742 check_compat_entry_size_and_hooks,
1743 info, &size, entry0,
1744 entry0 + total_size,
1745 hook_entries, underflows, &j, name);
1751 duprintf("translate_compat_table: %u not %u entries\n",
1756 /* Check hooks all assigned */
1757 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1758 /* Only hooks which are valid */
1759 if (!(valid_hooks & (1 << i)))
1761 if (info->hook_entry[i] == 0xFFFFFFFF) {
1762 duprintf("Invalid hook entry %u %u\n",
1763 i, hook_entries[i]);
1766 if (info->underflow[i] == 0xFFFFFFFF) {
1767 duprintf("Invalid underflow %u %u\n",
1774 newinfo = xt_alloc_table_info(size);
1778 newinfo->number = number;
1779 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1780 newinfo->hook_entry[i] = info->hook_entry[i];
1781 newinfo->underflow[i] = info->underflow[i];
1783 entry1 = newinfo->entries[raw_smp_processor_id()];
1786 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1787 compat_copy_entry_from_user,
1788 &pos, &size, name, newinfo, entry1);
1789 xt_compat_flush_offsets(AF_INET6);
1790 xt_compat_unlock(AF_INET6);
1795 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1799 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1803 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1804 compat_release_entry, &j);
1805 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1806 xt_free_table_info(newinfo);
1810 /* And one copy for every other CPU */
1811 for_each_possible_cpu(i)
1812 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1813 memcpy(newinfo->entries[i], entry1, newinfo->size);
1817 xt_free_table_info(info);
1821 xt_free_table_info(newinfo);
1823 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1826 xt_compat_flush_offsets(AF_INET6);
1827 xt_compat_unlock(AF_INET6);
1832 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1835 struct compat_ip6t_replace tmp;
1836 struct xt_table_info *newinfo;
1837 void *loc_cpu_entry;
1839 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1842 /* overflow check */
1843 if (tmp.size >= INT_MAX / num_possible_cpus())
1845 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1848 newinfo = xt_alloc_table_info(tmp.size);
1852 /* choose the copy that is on our node/cpu */
1853 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1854 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1860 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1861 &newinfo, &loc_cpu_entry, tmp.size,
1862 tmp.num_entries, tmp.hook_entry,
1867 duprintf("compat_do_replace: Translated table\n");
1869 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1870 tmp.num_counters, compat_ptr(tmp.counters));
1872 goto free_newinfo_untrans;
1875 free_newinfo_untrans:
1876 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1878 xt_free_table_info(newinfo);
1883 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1888 if (!capable(CAP_NET_ADMIN))
1892 case IP6T_SO_SET_REPLACE:
1893 ret = compat_do_replace(sock_net(sk), user, len);
1896 case IP6T_SO_SET_ADD_COUNTERS:
1897 ret = do_add_counters(sock_net(sk), user, len, 1);
1901 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1908 struct compat_ip6t_get_entries {
1909 char name[IP6T_TABLE_MAXNAMELEN];
1911 struct compat_ip6t_entry entrytable[0];
1915 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1916 void __user *userptr)
1918 struct xt_counters *counters;
1919 const struct xt_table_info *private = table->private;
1923 const void *loc_cpu_entry;
1926 counters = alloc_counters(table);
1927 if (IS_ERR(counters))
1928 return PTR_ERR(counters);
1930 /* choose the copy that is on our node/cpu, ...
1931 * This choice is lazy (because current thread is
1932 * allowed to migrate to another cpu)
1934 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1937 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1938 compat_copy_entry_to_user,
1939 &pos, &size, counters, &i);
1946 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1950 struct compat_ip6t_get_entries get;
1953 if (*len < sizeof(get)) {
1954 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1958 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1961 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1962 duprintf("compat_get_entries: %u != %zu\n",
1963 *len, sizeof(get) + get.size);
1967 xt_compat_lock(AF_INET6);
1968 t = xt_find_table_lock(net, AF_INET6, get.name);
1969 if (t && !IS_ERR(t)) {
1970 const struct xt_table_info *private = t->private;
1971 struct xt_table_info info;
1972 duprintf("t->private->number = %u\n", private->number);
1973 ret = compat_table_info(private, &info);
1974 if (!ret && get.size == info.size) {
1975 ret = compat_copy_entries_to_user(private->size,
1976 t, uptr->entrytable);
1978 duprintf("compat_get_entries: I've got %u not %u!\n",
1979 private->size, get.size);
1982 xt_compat_flush_offsets(AF_INET6);
1986 ret = t ? PTR_ERR(t) : -ENOENT;
1988 xt_compat_unlock(AF_INET6);
1992 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1995 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1999 if (!capable(CAP_NET_ADMIN))
2003 case IP6T_SO_GET_INFO:
2004 ret = get_info(sock_net(sk), user, len, 1);
2006 case IP6T_SO_GET_ENTRIES:
2007 ret = compat_get_entries(sock_net(sk), user, len);
2010 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2017 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2021 if (!capable(CAP_NET_ADMIN))
2025 case IP6T_SO_SET_REPLACE:
2026 ret = do_replace(sock_net(sk), user, len);
2029 case IP6T_SO_SET_ADD_COUNTERS:
2030 ret = do_add_counters(sock_net(sk), user, len, 0);
2034 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2042 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2046 if (!capable(CAP_NET_ADMIN))
2050 case IP6T_SO_GET_INFO:
2051 ret = get_info(sock_net(sk), user, len, 0);
2054 case IP6T_SO_GET_ENTRIES:
2055 ret = get_entries(sock_net(sk), user, len);
2058 case IP6T_SO_GET_REVISION_MATCH:
2059 case IP6T_SO_GET_REVISION_TARGET: {
2060 struct ip6t_get_revision rev;
2063 if (*len != sizeof(rev)) {
2067 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2072 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2077 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2080 "ip6t_%s", rev.name);
2085 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2092 struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
2093 const struct ip6t_replace *repl)
2096 struct xt_table_info *newinfo;
2097 struct xt_table_info bootstrap
2098 = { 0, 0, 0, { 0 }, { 0 }, { } };
2099 void *loc_cpu_entry;
2100 struct xt_table *new_table;
2102 newinfo = xt_alloc_table_info(repl->size);
2108 /* choose the copy on our node/cpu, but dont care about preemption */
2109 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2110 memcpy(loc_cpu_entry, repl->entries, repl->size);
2112 ret = translate_table(table->name, table->valid_hooks,
2113 newinfo, loc_cpu_entry, repl->size,
2120 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2121 if (IS_ERR(new_table)) {
2122 ret = PTR_ERR(new_table);
2128 xt_free_table_info(newinfo);
2130 return ERR_PTR(ret);
2133 void ip6t_unregister_table(struct xt_table *table)
2135 struct xt_table_info *private;
2136 void *loc_cpu_entry;
2137 struct module *table_owner = table->me;
2139 private = xt_unregister_table(table);
2141 /* Decrease module usage counts and free resources */
2142 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2143 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2144 if (private->number > private->initial_entries)
2145 module_put(table_owner);
2146 xt_free_table_info(private);
2149 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2151 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2152 u_int8_t type, u_int8_t code,
2155 return (type == test_type && code >= min_code && code <= max_code)
2160 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2162 const struct icmp6hdr *ic;
2163 struct icmp6hdr _icmph;
2164 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2166 /* Must not be a fragment. */
2167 if (par->fragoff != 0)
2170 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2172 /* We've been asked to examine this packet, and we
2173 * can't. Hence, no choice but to drop.
2175 duprintf("Dropping evil ICMP tinygram.\n");
2176 *par->hotdrop = true;
2180 return icmp6_type_code_match(icmpinfo->type,
2183 ic->icmp6_type, ic->icmp6_code,
2184 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2187 /* Called when user tries to insert an entry of this type. */
2188 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2190 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2192 /* Must specify no unknown invflags */
2193 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2196 /* The built-in targets: standard (NULL) and error. */
2197 static struct xt_target ip6t_standard_target __read_mostly = {
2198 .name = IP6T_STANDARD_TARGET,
2199 .targetsize = sizeof(int),
2201 #ifdef CONFIG_COMPAT
2202 .compatsize = sizeof(compat_int_t),
2203 .compat_from_user = compat_standard_from_user,
2204 .compat_to_user = compat_standard_to_user,
2208 static struct xt_target ip6t_error_target __read_mostly = {
2209 .name = IP6T_ERROR_TARGET,
2210 .target = ip6t_error,
2211 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2215 static struct nf_sockopt_ops ip6t_sockopts = {
2217 .set_optmin = IP6T_BASE_CTL,
2218 .set_optmax = IP6T_SO_SET_MAX+1,
2219 .set = do_ip6t_set_ctl,
2220 #ifdef CONFIG_COMPAT
2221 .compat_set = compat_do_ip6t_set_ctl,
2223 .get_optmin = IP6T_BASE_CTL,
2224 .get_optmax = IP6T_SO_GET_MAX+1,
2225 .get = do_ip6t_get_ctl,
2226 #ifdef CONFIG_COMPAT
2227 .compat_get = compat_do_ip6t_get_ctl,
2229 .owner = THIS_MODULE,
2232 static struct xt_match icmp6_matchstruct __read_mostly = {
2234 .match = icmp6_match,
2235 .matchsize = sizeof(struct ip6t_icmp),
2236 .checkentry = icmp6_checkentry,
2237 .proto = IPPROTO_ICMPV6,
2241 static int __net_init ip6_tables_net_init(struct net *net)
2243 return xt_proto_init(net, AF_INET6);
2246 static void __net_exit ip6_tables_net_exit(struct net *net)
2248 xt_proto_fini(net, AF_INET6);
2251 static struct pernet_operations ip6_tables_net_ops = {
2252 .init = ip6_tables_net_init,
2253 .exit = ip6_tables_net_exit,
2256 static int __init ip6_tables_init(void)
2260 ret = register_pernet_subsys(&ip6_tables_net_ops);
2264 /* Noone else will be downing sem now, so we won't sleep */
2265 ret = xt_register_target(&ip6t_standard_target);
2268 ret = xt_register_target(&ip6t_error_target);
2271 ret = xt_register_match(&icmp6_matchstruct);
2275 /* Register setsockopt */
2276 ret = nf_register_sockopt(&ip6t_sockopts);
2280 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2284 xt_unregister_match(&icmp6_matchstruct);
2286 xt_unregister_target(&ip6t_error_target);
2288 xt_unregister_target(&ip6t_standard_target);
2290 unregister_pernet_subsys(&ip6_tables_net_ops);
2295 static void __exit ip6_tables_fini(void)
2297 nf_unregister_sockopt(&ip6t_sockopts);
2299 xt_unregister_match(&icmp6_matchstruct);
2300 xt_unregister_target(&ip6t_error_target);
2301 xt_unregister_target(&ip6t_standard_target);
2303 unregister_pernet_subsys(&ip6_tables_net_ops);
2307 * find the offset to specified header or the protocol number of last header
2308 * if target < 0. "last header" is transport protocol header, ESP, or
2311 * If target header is found, its offset is set in *offset and return protocol
2312 * number. Otherwise, return -1.
2314 * If the first fragment doesn't contain the final protocol header or
2315 * NEXTHDR_NONE it is considered invalid.
2317 * Note that non-1st fragment is special case that "the protocol number
2318 * of last header" is "next header" field in Fragment header. In this case,
2319 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2323 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2324 int target, unsigned short *fragoff)
2326 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2327 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2328 unsigned int len = skb->len - start;
2333 while (nexthdr != target) {
2334 struct ipv6_opt_hdr _hdr, *hp;
2335 unsigned int hdrlen;
2337 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2343 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2346 if (nexthdr == NEXTHDR_FRAGMENT) {
2347 unsigned short _frag_off;
2349 fp = skb_header_pointer(skb,
2350 start+offsetof(struct frag_hdr,
2357 _frag_off = ntohs(*fp) & ~0x7;
2360 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2361 hp->nexthdr == NEXTHDR_NONE)) {
2363 *fragoff = _frag_off;
2369 } else if (nexthdr == NEXTHDR_AUTH)
2370 hdrlen = (hp->hdrlen + 2) << 2;
2372 hdrlen = ipv6_optlen(hp);
2374 nexthdr = hp->nexthdr;
2383 EXPORT_SYMBOL(ip6t_register_table);
2384 EXPORT_SYMBOL(ip6t_unregister_table);
2385 EXPORT_SYMBOL(ip6t_do_table);
2386 EXPORT_SYMBOL(ip6t_ext_hdr);
2387 EXPORT_SYMBOL(ipv6_find_hdr);
2389 module_init(ip6_tables_init);
2390 module_exit(ip6_tables_fini);