2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
81 ip6t_ext_hdr(u8 nexthdr)
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
95 ip6_packet_match(const struct sk_buff *skb,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
103 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
105 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
107 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
108 &ip6info->src), IP6T_INV_SRCIP)
109 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
110 &ip6info->dst), IP6T_INV_DSTIP)) {
111 dprintf("Source or dest mismatch.\n");
113 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
114 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
115 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
116 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
117 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
118 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
122 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
124 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
125 dprintf("VIA in mismatch (%s vs %s).%s\n",
126 indev, ip6info->iniface,
127 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
131 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
133 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
134 dprintf("VIA out mismatch (%s vs %s).%s\n",
135 outdev, ip6info->outiface,
136 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
140 /* ... might want to do something with class and flowlabel here ... */
142 /* look for the desired protocol header */
143 if((ip6info->flags & IP6T_F_PROTO)) {
145 unsigned short _frag_off;
147 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
153 *fragoff = _frag_off;
155 dprintf("Packet protocol %hi ?= %s%hi.\n",
157 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
160 if (ip6info->proto == protohdr) {
161 if(ip6info->invflags & IP6T_INV_PROTO) {
167 /* We need match for the '-p all', too! */
168 if ((ip6info->proto != 0) &&
169 !(ip6info->invflags & IP6T_INV_PROTO))
175 /* should be ip6 safe */
177 ip6_checkentry(const struct ip6t_ip6 *ipv6)
179 if (ipv6->flags & ~IP6T_F_MASK) {
180 duprintf("Unknown flag bits set: %08X\n",
181 ipv6->flags & ~IP6T_F_MASK);
184 if (ipv6->invflags & ~IP6T_INV_MASK) {
185 duprintf("Unknown invflag bits set: %08X\n",
186 ipv6->invflags & ~IP6T_INV_MASK);
193 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
196 printk("ip6_tables: error: `%s'\n",
197 (const char *)par->targinfo);
202 /* Performance critical - called for every packet */
204 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
205 struct xt_match_param *par)
207 par->match = m->u.kernel.match;
208 par->matchinfo = m->data;
210 /* Stop iteration if it doesn't match */
211 if (!m->u.kernel.match->match(skb, par))
217 static inline struct ip6t_entry *
218 get_entry(void *base, unsigned int offset)
220 return (struct ip6t_entry *)(base + offset);
223 /* All zeroes == unconditional rule. */
224 /* Mildly perf critical (only if packet tracing is on) */
226 unconditional(const struct ip6t_ip6 *ipv6)
230 for (i = 0; i < sizeof(*ipv6); i++)
231 if (((char *)ipv6)[i])
234 return (i == sizeof(*ipv6));
237 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
238 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
239 /* This cries for unification! */
240 static const char *const hooknames[] = {
241 [NF_INET_PRE_ROUTING] = "PREROUTING",
242 [NF_INET_LOCAL_IN] = "INPUT",
243 [NF_INET_FORWARD] = "FORWARD",
244 [NF_INET_LOCAL_OUT] = "OUTPUT",
245 [NF_INET_POST_ROUTING] = "POSTROUTING",
248 enum nf_ip_trace_comments {
249 NF_IP6_TRACE_COMMENT_RULE,
250 NF_IP6_TRACE_COMMENT_RETURN,
251 NF_IP6_TRACE_COMMENT_POLICY,
254 static const char *const comments[] = {
255 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
256 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
257 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
260 static struct nf_loginfo trace_loginfo = {
261 .type = NF_LOG_TYPE_LOG,
265 .logflags = NF_LOG_MASK,
270 /* Mildly perf critical (only if packet tracing is on) */
272 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
273 char *hookname, char **chainname,
274 char **comment, unsigned int *rulenum)
276 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
278 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
279 /* Head of user chain: ERROR target with chainname */
280 *chainname = t->target.data;
285 if (s->target_offset == sizeof(struct ip6t_entry)
286 && strcmp(t->target.u.kernel.target->name,
287 IP6T_STANDARD_TARGET) == 0
289 && unconditional(&s->ipv6)) {
290 /* Tail of chains: STANDARD target (return/policy) */
291 *comment = *chainname == hookname
292 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY]
293 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN];
302 static void trace_packet(struct sk_buff *skb,
304 const struct net_device *in,
305 const struct net_device *out,
306 const char *tablename,
307 struct xt_table_info *private,
308 struct ip6t_entry *e)
311 const struct ip6t_entry *root;
312 char *hookname, *chainname, *comment;
313 unsigned int rulenum = 0;
315 table_base = (void *)private->entries[smp_processor_id()];
316 root = get_entry(table_base, private->hook_entry[hook]);
318 hookname = chainname = (char *)hooknames[hook];
319 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE];
321 IP6T_ENTRY_ITERATE(root,
322 private->size - private->hook_entry[hook],
323 get_chainname_rulenum,
324 e, hookname, &chainname, &comment, &rulenum);
326 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
327 "TRACE: %s:%s:%s:%u ",
328 tablename, chainname, comment, rulenum);
332 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
334 ip6t_do_table(struct sk_buff *skb,
336 const struct net_device *in,
337 const struct net_device *out,
338 struct xt_table *table)
340 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
341 bool hotdrop = false;
342 /* Initializing verdict to NF_DROP keeps gcc happy. */
343 unsigned int verdict = NF_DROP;
344 const char *indev, *outdev;
346 struct ip6t_entry *e, *back;
347 struct xt_table_info *private;
348 struct xt_match_param mtpar;
349 struct xt_target_param tgpar;
352 indev = in ? in->name : nulldevname;
353 outdev = out ? out->name : nulldevname;
354 /* We handle fragments by dealing with the first fragment as
355 * if it was a normal packet. All other fragments are treated
356 * normally, except that they will NEVER match rules that ask
357 * things we don't know, ie. tcp syn flag or ports). If the
358 * rule is also a fragment-specific rule, non-fragments won't
360 mtpar.hotdrop = &hotdrop;
361 mtpar.in = tgpar.in = in;
362 mtpar.out = tgpar.out = out;
363 mtpar.family = tgpar.family = NFPROTO_IPV6;
364 tgpar.hooknum = hook;
366 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
369 private = table->private;
370 table_base = private->entries[smp_processor_id()];
372 e = get_entry(table_base, private->hook_entry[hook]);
374 /* For return from builtin chain */
375 back = get_entry(table_base, private->underflow[hook]);
380 if (ip6_packet_match(skb, indev, outdev, &e->ipv6,
381 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
382 struct ip6t_entry_target *t;
384 if (IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0)
387 ADD_COUNTER(e->counters,
388 ntohs(ipv6_hdr(skb)->payload_len) +
389 sizeof(struct ipv6hdr), 1);
391 t = ip6t_get_target(e);
392 IP_NF_ASSERT(t->u.kernel.target);
394 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
395 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
396 /* The packet is traced: log it */
397 if (unlikely(skb->nf_trace))
398 trace_packet(skb, hook, in, out,
399 table->name, private, e);
401 /* Standard target? */
402 if (!t->u.kernel.target->target) {
405 v = ((struct ip6t_standard_target *)t)->verdict;
407 /* Pop from stack? */
408 if (v != IP6T_RETURN) {
409 verdict = (unsigned)(-v) - 1;
413 back = get_entry(table_base,
417 if (table_base + v != (void *)e + e->next_offset
418 && !(e->ipv6.flags & IP6T_F_GOTO)) {
419 /* Save old back ptr in next entry */
420 struct ip6t_entry *next
421 = (void *)e + e->next_offset;
423 = (void *)back - table_base;
424 /* set back pointer to next entry */
428 e = get_entry(table_base, v);
430 /* Targets which reenter must return
432 tgpar.target = t->u.kernel.target;
433 tgpar.targinfo = t->data;
435 #ifdef CONFIG_NETFILTER_DEBUG
436 ((struct ip6t_entry *)table_base)->comefrom
439 verdict = t->u.kernel.target->target(skb,
442 #ifdef CONFIG_NETFILTER_DEBUG
443 if (((struct ip6t_entry *)table_base)->comefrom
445 && verdict == IP6T_CONTINUE) {
446 printk("Target %s reentered!\n",
447 t->u.kernel.target->name);
450 ((struct ip6t_entry *)table_base)->comefrom
453 if (verdict == IP6T_CONTINUE)
454 e = (void *)e + e->next_offset;
462 e = (void *)e + e->next_offset;
466 #ifdef CONFIG_NETFILTER_DEBUG
467 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
469 xt_info_rdunlock_bh();
471 #ifdef DEBUG_ALLOW_ALL
480 /* Figures out from what hook each rule can be called: returns 0 if
481 there are loops. Puts hook bitmask in comefrom. */
483 mark_source_chains(struct xt_table_info *newinfo,
484 unsigned int valid_hooks, void *entry0)
488 /* No recursion; use packet counter to save back ptrs (reset
489 to 0 as we leave), and comefrom to save source hook bitmask */
490 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
491 unsigned int pos = newinfo->hook_entry[hook];
492 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
494 if (!(valid_hooks & (1 << hook)))
497 /* Set initial back pointer. */
498 e->counters.pcnt = pos;
501 struct ip6t_standard_target *t
502 = (void *)ip6t_get_target(e);
503 int visited = e->comefrom & (1 << hook);
505 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
506 printk("iptables: loop hook %u pos %u %08X.\n",
507 hook, pos, e->comefrom);
510 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
512 /* Unconditional return/END. */
513 if ((e->target_offset == sizeof(struct ip6t_entry)
514 && (strcmp(t->target.u.user.name,
515 IP6T_STANDARD_TARGET) == 0)
517 && unconditional(&e->ipv6)) || visited) {
518 unsigned int oldpos, size;
520 if ((strcmp(t->target.u.user.name,
521 IP6T_STANDARD_TARGET) == 0) &&
522 t->verdict < -NF_MAX_VERDICT - 1) {
523 duprintf("mark_source_chains: bad "
524 "negative verdict (%i)\n",
529 /* Return: backtrack through the last
532 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
533 #ifdef DEBUG_IP_FIREWALL_USER
535 & (1 << NF_INET_NUMHOOKS)) {
536 duprintf("Back unset "
543 pos = e->counters.pcnt;
544 e->counters.pcnt = 0;
546 /* We're at the start. */
550 e = (struct ip6t_entry *)
552 } while (oldpos == pos + e->next_offset);
555 size = e->next_offset;
556 e = (struct ip6t_entry *)
557 (entry0 + pos + size);
558 e->counters.pcnt = pos;
561 int newpos = t->verdict;
563 if (strcmp(t->target.u.user.name,
564 IP6T_STANDARD_TARGET) == 0
566 if (newpos > newinfo->size -
567 sizeof(struct ip6t_entry)) {
568 duprintf("mark_source_chains: "
569 "bad verdict (%i)\n",
573 /* This a jump; chase it. */
574 duprintf("Jump rule %u -> %u\n",
577 /* ... this is a fallthru */
578 newpos = pos + e->next_offset;
580 e = (struct ip6t_entry *)
582 e->counters.pcnt = pos;
587 duprintf("Finished chain %u\n", hook);
593 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
595 struct xt_mtdtor_param par;
597 if (i && (*i)-- == 0)
600 par.match = m->u.kernel.match;
601 par.matchinfo = m->data;
602 par.family = NFPROTO_IPV6;
603 if (par.match->destroy != NULL)
604 par.match->destroy(&par);
605 module_put(par.match->me);
610 check_entry(struct ip6t_entry *e, const char *name)
612 struct ip6t_entry_target *t;
614 if (!ip6_checkentry(&e->ipv6)) {
615 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
619 if (e->target_offset + sizeof(struct ip6t_entry_target) >
623 t = ip6t_get_target(e);
624 if (e->target_offset + t->u.target_size > e->next_offset)
630 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
633 const struct ip6t_ip6 *ipv6 = par->entryinfo;
636 par->match = m->u.kernel.match;
637 par->matchinfo = m->data;
639 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
640 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
642 duprintf("ip_tables: check failed for `%s'.\n",
651 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
654 struct xt_match *match;
657 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
659 "ip6t_%s", m->u.user.name);
660 if (IS_ERR(match) || !match) {
661 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
662 return match ? PTR_ERR(match) : -ENOENT;
664 m->u.kernel.match = match;
666 ret = check_match(m, par, i);
672 module_put(m->u.kernel.match->me);
676 static int check_target(struct ip6t_entry *e, const char *name)
678 struct ip6t_entry_target *t = ip6t_get_target(e);
679 struct xt_tgchk_param par = {
682 .target = t->u.kernel.target,
684 .hook_mask = e->comefrom,
685 .family = NFPROTO_IPV6,
689 t = ip6t_get_target(e);
690 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
691 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
693 duprintf("ip_tables: check failed for `%s'.\n",
694 t->u.kernel.target->name);
701 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
704 struct ip6t_entry_target *t;
705 struct xt_target *target;
708 struct xt_mtchk_param mtpar;
710 ret = check_entry(e, name);
716 mtpar.entryinfo = &e->ipv6;
717 mtpar.hook_mask = e->comefrom;
718 mtpar.family = NFPROTO_IPV6;
719 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
721 goto cleanup_matches;
723 t = ip6t_get_target(e);
724 target = try_then_request_module(xt_find_target(AF_INET6,
727 "ip6t_%s", t->u.user.name);
728 if (IS_ERR(target) || !target) {
729 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
730 ret = target ? PTR_ERR(target) : -ENOENT;
731 goto cleanup_matches;
733 t->u.kernel.target = target;
735 ret = check_target(e, name);
742 module_put(t->u.kernel.target->me);
744 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
749 check_entry_size_and_hooks(struct ip6t_entry *e,
750 struct xt_table_info *newinfo,
752 unsigned char *limit,
753 const unsigned int *hook_entries,
754 const unsigned int *underflows,
759 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
760 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
761 duprintf("Bad offset %p\n", e);
766 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
767 duprintf("checking: element %p size %u\n",
772 /* Check hooks & underflows */
773 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
774 if ((unsigned char *)e - base == hook_entries[h])
775 newinfo->hook_entry[h] = hook_entries[h];
776 if ((unsigned char *)e - base == underflows[h])
777 newinfo->underflow[h] = underflows[h];
780 /* FIXME: underflows must be unconditional, standard verdicts
781 < 0 (not IP6T_RETURN). --RR */
783 /* Clear counters and comefrom */
784 e->counters = ((struct xt_counters) { 0, 0 });
792 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
794 struct xt_tgdtor_param par;
795 struct ip6t_entry_target *t;
797 if (i && (*i)-- == 0)
800 /* Cleanup all matches */
801 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
802 t = ip6t_get_target(e);
804 par.target = t->u.kernel.target;
805 par.targinfo = t->data;
806 par.family = NFPROTO_IPV6;
807 if (par.target->destroy != NULL)
808 par.target->destroy(&par);
809 module_put(par.target->me);
813 /* Checks and translates the user-supplied table segment (held in
816 translate_table(const char *name,
817 unsigned int valid_hooks,
818 struct xt_table_info *newinfo,
822 const unsigned int *hook_entries,
823 const unsigned int *underflows)
828 newinfo->size = size;
829 newinfo->number = number;
831 /* Init all hooks to impossible value. */
832 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
833 newinfo->hook_entry[i] = 0xFFFFFFFF;
834 newinfo->underflow[i] = 0xFFFFFFFF;
837 duprintf("translate_table: size %u\n", newinfo->size);
839 /* Walk through entries, checking offsets. */
840 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
841 check_entry_size_and_hooks,
845 hook_entries, underflows, &i);
850 duprintf("translate_table: %u not %u entries\n",
855 /* Check hooks all assigned */
856 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
857 /* Only hooks which are valid */
858 if (!(valid_hooks & (1 << i)))
860 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
861 duprintf("Invalid hook entry %u %u\n",
865 if (newinfo->underflow[i] == 0xFFFFFFFF) {
866 duprintf("Invalid underflow %u %u\n",
872 if (!mark_source_chains(newinfo, valid_hooks, entry0))
875 /* Finally, each sanity check must pass */
877 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
878 find_check_entry, name, size, &i);
881 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
886 /* And one copy for every other CPU */
887 for_each_possible_cpu(i) {
888 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
889 memcpy(newinfo->entries[i], entry0, newinfo->size);
897 add_entry_to_counter(const struct ip6t_entry *e,
898 struct xt_counters total[],
901 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
908 set_entry_to_counter(const struct ip6t_entry *e,
909 struct ip6t_counters total[],
912 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
919 get_counters(const struct xt_table_info *t,
920 struct xt_counters counters[])
926 /* Instead of clearing (by a previous call to memset())
927 * the counters and using adds, we set the counters
928 * with data used by 'current' CPU
930 * Bottom half has to be disabled to prevent deadlock
931 * if new softirq were to run and call ipt_do_table
934 curcpu = smp_processor_id();
937 IP6T_ENTRY_ITERATE(t->entries[curcpu],
939 set_entry_to_counter,
943 for_each_possible_cpu(cpu) {
948 IP6T_ENTRY_ITERATE(t->entries[cpu],
950 add_entry_to_counter,
953 xt_info_wrunlock(cpu);
958 static struct xt_counters *alloc_counters(struct xt_table *table)
960 unsigned int countersize;
961 struct xt_counters *counters;
962 struct xt_table_info *private = table->private;
964 /* We need atomic snapshot of counters: rest doesn't change
965 (other than comefrom, which userspace doesn't care
967 countersize = sizeof(struct xt_counters) * private->number;
968 counters = vmalloc_node(countersize, numa_node_id());
970 if (counters == NULL)
971 return ERR_PTR(-ENOMEM);
973 get_counters(private, counters);
979 copy_entries_to_user(unsigned int total_size,
980 struct xt_table *table,
981 void __user *userptr)
983 unsigned int off, num;
984 struct ip6t_entry *e;
985 struct xt_counters *counters;
986 const struct xt_table_info *private = table->private;
988 const void *loc_cpu_entry;
990 counters = alloc_counters(table);
991 if (IS_ERR(counters))
992 return PTR_ERR(counters);
994 /* choose the copy that is on our node/cpu, ...
995 * This choice is lazy (because current thread is
996 * allowed to migrate to another cpu)
998 loc_cpu_entry = private->entries[raw_smp_processor_id()];
999 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1004 /* FIXME: use iterator macros --RR */
1005 /* ... then go back and fix counters and names */
1006 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1008 const struct ip6t_entry_match *m;
1009 const struct ip6t_entry_target *t;
1011 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1012 if (copy_to_user(userptr + off
1013 + offsetof(struct ip6t_entry, counters),
1015 sizeof(counters[num])) != 0) {
1020 for (i = sizeof(struct ip6t_entry);
1021 i < e->target_offset;
1022 i += m->u.match_size) {
1025 if (copy_to_user(userptr + off + i
1026 + offsetof(struct ip6t_entry_match,
1028 m->u.kernel.match->name,
1029 strlen(m->u.kernel.match->name)+1)
1036 t = ip6t_get_target(e);
1037 if (copy_to_user(userptr + off + e->target_offset
1038 + offsetof(struct ip6t_entry_target,
1040 t->u.kernel.target->name,
1041 strlen(t->u.kernel.target->name)+1) != 0) {
1052 #ifdef CONFIG_COMPAT
1053 static void compat_standard_from_user(void *dst, void *src)
1055 int v = *(compat_int_t *)src;
1058 v += xt_compat_calc_jump(AF_INET6, v);
1059 memcpy(dst, &v, sizeof(v));
1062 static int compat_standard_to_user(void __user *dst, void *src)
1064 compat_int_t cv = *(int *)src;
1067 cv -= xt_compat_calc_jump(AF_INET6, cv);
1068 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1072 compat_calc_match(struct ip6t_entry_match *m, int *size)
1074 *size += xt_compat_match_offset(m->u.kernel.match);
1078 static int compat_calc_entry(struct ip6t_entry *e,
1079 const struct xt_table_info *info,
1080 void *base, struct xt_table_info *newinfo)
1082 struct ip6t_entry_target *t;
1083 unsigned int entry_offset;
1086 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1087 entry_offset = (void *)e - base;
1088 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1089 t = ip6t_get_target(e);
1090 off += xt_compat_target_offset(t->u.kernel.target);
1091 newinfo->size -= off;
1092 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1096 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1097 if (info->hook_entry[i] &&
1098 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1099 newinfo->hook_entry[i] -= off;
1100 if (info->underflow[i] &&
1101 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1102 newinfo->underflow[i] -= off;
1107 static int compat_table_info(const struct xt_table_info *info,
1108 struct xt_table_info *newinfo)
1110 void *loc_cpu_entry;
1112 if (!newinfo || !info)
1115 /* we dont care about newinfo->entries[] */
1116 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1117 newinfo->initial_entries = 0;
1118 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1119 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1120 compat_calc_entry, info, loc_cpu_entry,
1125 static int get_info(struct net *net, void __user *user, int *len, int compat)
1127 char name[IP6T_TABLE_MAXNAMELEN];
1131 if (*len != sizeof(struct ip6t_getinfo)) {
1132 duprintf("length %u != %zu\n", *len,
1133 sizeof(struct ip6t_getinfo));
1137 if (copy_from_user(name, user, sizeof(name)) != 0)
1140 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1141 #ifdef CONFIG_COMPAT
1143 xt_compat_lock(AF_INET6);
1145 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1146 "ip6table_%s", name);
1147 if (t && !IS_ERR(t)) {
1148 struct ip6t_getinfo info;
1149 const struct xt_table_info *private = t->private;
1151 #ifdef CONFIG_COMPAT
1153 struct xt_table_info tmp;
1154 ret = compat_table_info(private, &tmp);
1155 xt_compat_flush_offsets(AF_INET6);
1159 info.valid_hooks = t->valid_hooks;
1160 memcpy(info.hook_entry, private->hook_entry,
1161 sizeof(info.hook_entry));
1162 memcpy(info.underflow, private->underflow,
1163 sizeof(info.underflow));
1164 info.num_entries = private->number;
1165 info.size = private->size;
1166 strcpy(info.name, name);
1168 if (copy_to_user(user, &info, *len) != 0)
1176 ret = t ? PTR_ERR(t) : -ENOENT;
1177 #ifdef CONFIG_COMPAT
1179 xt_compat_unlock(AF_INET6);
1185 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1188 struct ip6t_get_entries get;
1191 if (*len < sizeof(get)) {
1192 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1195 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1197 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1198 duprintf("get_entries: %u != %zu\n",
1199 *len, sizeof(get) + get.size);
1203 t = xt_find_table_lock(net, AF_INET6, get.name);
1204 if (t && !IS_ERR(t)) {
1205 struct xt_table_info *private = t->private;
1206 duprintf("t->private->number = %u\n", private->number);
1207 if (get.size == private->size)
1208 ret = copy_entries_to_user(private->size,
1209 t, uptr->entrytable);
1211 duprintf("get_entries: I've got %u not %u!\n",
1212 private->size, get.size);
1218 ret = t ? PTR_ERR(t) : -ENOENT;
1224 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1225 struct xt_table_info *newinfo, unsigned int num_counters,
1226 void __user *counters_ptr)
1230 struct xt_table_info *oldinfo;
1231 struct xt_counters *counters;
1232 const void *loc_cpu_old_entry;
1235 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1242 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1243 "ip6table_%s", name);
1244 if (!t || IS_ERR(t)) {
1245 ret = t ? PTR_ERR(t) : -ENOENT;
1246 goto free_newinfo_counters_untrans;
1250 if (valid_hooks != t->valid_hooks) {
1251 duprintf("Valid hook crap: %08X vs %08X\n",
1252 valid_hooks, t->valid_hooks);
1257 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1261 /* Update module usage count based on number of rules */
1262 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1263 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1264 if ((oldinfo->number > oldinfo->initial_entries) ||
1265 (newinfo->number <= oldinfo->initial_entries))
1267 if ((oldinfo->number > oldinfo->initial_entries) &&
1268 (newinfo->number <= oldinfo->initial_entries))
1271 /* Get the old counters, and synchronize with replace */
1272 get_counters(oldinfo, counters);
1274 /* Decrease module usage counts and free resource */
1275 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1276 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1278 xt_free_table_info(oldinfo);
1279 if (copy_to_user(counters_ptr, counters,
1280 sizeof(struct xt_counters) * num_counters) != 0)
1289 free_newinfo_counters_untrans:
1296 do_replace(struct net *net, void __user *user, unsigned int len)
1299 struct ip6t_replace tmp;
1300 struct xt_table_info *newinfo;
1301 void *loc_cpu_entry;
1303 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1306 /* overflow check */
1307 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1310 newinfo = xt_alloc_table_info(tmp.size);
1314 /* choose the copy that is on our node/cpu */
1315 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1316 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1322 ret = translate_table(tmp.name, tmp.valid_hooks,
1323 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1324 tmp.hook_entry, tmp.underflow);
1328 duprintf("ip_tables: Translated table\n");
1330 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1331 tmp.num_counters, tmp.counters);
1333 goto free_newinfo_untrans;
1336 free_newinfo_untrans:
1337 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1339 xt_free_table_info(newinfo);
1343 /* We're lazy, and add to the first CPU; overflow works its fey magic
1344 * and everything is OK. */
1346 add_counter_to_entry(struct ip6t_entry *e,
1347 const struct xt_counters addme[],
1350 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1357 do_add_counters(struct net *net, void __user *user, unsigned int len,
1360 unsigned int i, curcpu;
1361 struct xt_counters_info tmp;
1362 struct xt_counters *paddc;
1363 unsigned int num_counters;
1368 const struct xt_table_info *private;
1370 const void *loc_cpu_entry;
1371 #ifdef CONFIG_COMPAT
1372 struct compat_xt_counters_info compat_tmp;
1376 size = sizeof(struct compat_xt_counters_info);
1381 size = sizeof(struct xt_counters_info);
1384 if (copy_from_user(ptmp, user, size) != 0)
1387 #ifdef CONFIG_COMPAT
1389 num_counters = compat_tmp.num_counters;
1390 name = compat_tmp.name;
1394 num_counters = tmp.num_counters;
1398 if (len != size + num_counters * sizeof(struct xt_counters))
1401 paddc = vmalloc_node(len - size, numa_node_id());
1405 if (copy_from_user(paddc, user + size, len - size) != 0) {
1410 t = xt_find_table_lock(net, AF_INET6, name);
1411 if (!t || IS_ERR(t)) {
1412 ret = t ? PTR_ERR(t) : -ENOENT;
1418 private = t->private;
1419 if (private->number != num_counters) {
1421 goto unlock_up_free;
1425 /* Choose the copy that is on our node */
1426 curcpu = smp_processor_id();
1427 xt_info_wrlock(curcpu);
1428 loc_cpu_entry = private->entries[curcpu];
1429 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1431 add_counter_to_entry,
1434 xt_info_wrunlock(curcpu);
1446 #ifdef CONFIG_COMPAT
1447 struct compat_ip6t_replace {
1448 char name[IP6T_TABLE_MAXNAMELEN];
1452 u32 hook_entry[NF_INET_NUMHOOKS];
1453 u32 underflow[NF_INET_NUMHOOKS];
1455 compat_uptr_t counters; /* struct ip6t_counters * */
1456 struct compat_ip6t_entry entries[0];
1460 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1461 unsigned int *size, struct xt_counters *counters,
1464 struct ip6t_entry_target *t;
1465 struct compat_ip6t_entry __user *ce;
1466 u_int16_t target_offset, next_offset;
1467 compat_uint_t origsize;
1472 ce = (struct compat_ip6t_entry __user *)*dstptr;
1473 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1476 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1479 *dstptr += sizeof(struct compat_ip6t_entry);
1480 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1482 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1483 target_offset = e->target_offset - (origsize - *size);
1486 t = ip6t_get_target(e);
1487 ret = xt_compat_target_to_user(t, dstptr, size);
1491 next_offset = e->next_offset - (origsize - *size);
1492 if (put_user(target_offset, &ce->target_offset))
1494 if (put_user(next_offset, &ce->next_offset))
1504 compat_find_calc_match(struct ip6t_entry_match *m,
1506 const struct ip6t_ip6 *ipv6,
1507 unsigned int hookmask,
1508 int *size, unsigned int *i)
1510 struct xt_match *match;
1512 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1513 m->u.user.revision),
1514 "ip6t_%s", m->u.user.name);
1515 if (IS_ERR(match) || !match) {
1516 duprintf("compat_check_calc_match: `%s' not found\n",
1518 return match ? PTR_ERR(match) : -ENOENT;
1520 m->u.kernel.match = match;
1521 *size += xt_compat_match_offset(match);
1528 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1530 if (i && (*i)-- == 0)
1533 module_put(m->u.kernel.match->me);
1538 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1540 struct ip6t_entry_target *t;
1542 if (i && (*i)-- == 0)
1545 /* Cleanup all matches */
1546 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1547 t = compat_ip6t_get_target(e);
1548 module_put(t->u.kernel.target->me);
1553 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1554 struct xt_table_info *newinfo,
1556 unsigned char *base,
1557 unsigned char *limit,
1558 unsigned int *hook_entries,
1559 unsigned int *underflows,
1563 struct ip6t_entry_target *t;
1564 struct xt_target *target;
1565 unsigned int entry_offset;
1569 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1570 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1571 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1572 duprintf("Bad offset %p, limit = %p\n", e, limit);
1576 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1577 sizeof(struct compat_xt_entry_target)) {
1578 duprintf("checking: element %p size %u\n",
1583 /* For purposes of check_entry casting the compat entry is fine */
1584 ret = check_entry((struct ip6t_entry *)e, name);
1588 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1589 entry_offset = (void *)e - (void *)base;
1591 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1592 &e->ipv6, e->comefrom, &off, &j);
1594 goto release_matches;
1596 t = compat_ip6t_get_target(e);
1597 target = try_then_request_module(xt_find_target(AF_INET6,
1599 t->u.user.revision),
1600 "ip6t_%s", t->u.user.name);
1601 if (IS_ERR(target) || !target) {
1602 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1604 ret = target ? PTR_ERR(target) : -ENOENT;
1605 goto release_matches;
1607 t->u.kernel.target = target;
1609 off += xt_compat_target_offset(target);
1611 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1615 /* Check hooks & underflows */
1616 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1617 if ((unsigned char *)e - base == hook_entries[h])
1618 newinfo->hook_entry[h] = hook_entries[h];
1619 if ((unsigned char *)e - base == underflows[h])
1620 newinfo->underflow[h] = underflows[h];
1623 /* Clear counters and comefrom */
1624 memset(&e->counters, 0, sizeof(e->counters));
1631 module_put(t->u.kernel.target->me);
1633 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1638 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1639 unsigned int *size, const char *name,
1640 struct xt_table_info *newinfo, unsigned char *base)
1642 struct ip6t_entry_target *t;
1643 struct xt_target *target;
1644 struct ip6t_entry *de;
1645 unsigned int origsize;
1650 de = (struct ip6t_entry *)*dstptr;
1651 memcpy(de, e, sizeof(struct ip6t_entry));
1652 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1654 *dstptr += sizeof(struct ip6t_entry);
1655 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1657 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1661 de->target_offset = e->target_offset - (origsize - *size);
1662 t = compat_ip6t_get_target(e);
1663 target = t->u.kernel.target;
1664 xt_compat_target_from_user(t, dstptr, size);
1666 de->next_offset = e->next_offset - (origsize - *size);
1667 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1668 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1669 newinfo->hook_entry[h] -= origsize - *size;
1670 if ((unsigned char *)de - base < newinfo->underflow[h])
1671 newinfo->underflow[h] -= origsize - *size;
1676 static int compat_check_entry(struct ip6t_entry *e, const char *name,
1681 struct xt_mtchk_param mtpar;
1685 mtpar.entryinfo = &e->ipv6;
1686 mtpar.hook_mask = e->comefrom;
1687 mtpar.family = NFPROTO_IPV6;
1688 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1690 goto cleanup_matches;
1692 ret = check_target(e, name);
1694 goto cleanup_matches;
1700 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1705 translate_compat_table(const char *name,
1706 unsigned int valid_hooks,
1707 struct xt_table_info **pinfo,
1709 unsigned int total_size,
1710 unsigned int number,
1711 unsigned int *hook_entries,
1712 unsigned int *underflows)
1715 struct xt_table_info *newinfo, *info;
1716 void *pos, *entry0, *entry1;
1723 info->number = number;
1725 /* Init all hooks to impossible value. */
1726 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1727 info->hook_entry[i] = 0xFFFFFFFF;
1728 info->underflow[i] = 0xFFFFFFFF;
1731 duprintf("translate_compat_table: size %u\n", info->size);
1733 xt_compat_lock(AF_INET6);
1734 /* Walk through entries, checking offsets. */
1735 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1736 check_compat_entry_size_and_hooks,
1737 info, &size, entry0,
1738 entry0 + total_size,
1739 hook_entries, underflows, &j, name);
1745 duprintf("translate_compat_table: %u not %u entries\n",
1750 /* Check hooks all assigned */
1751 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1752 /* Only hooks which are valid */
1753 if (!(valid_hooks & (1 << i)))
1755 if (info->hook_entry[i] == 0xFFFFFFFF) {
1756 duprintf("Invalid hook entry %u %u\n",
1757 i, hook_entries[i]);
1760 if (info->underflow[i] == 0xFFFFFFFF) {
1761 duprintf("Invalid underflow %u %u\n",
1768 newinfo = xt_alloc_table_info(size);
1772 newinfo->number = number;
1773 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1774 newinfo->hook_entry[i] = info->hook_entry[i];
1775 newinfo->underflow[i] = info->underflow[i];
1777 entry1 = newinfo->entries[raw_smp_processor_id()];
1780 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1781 compat_copy_entry_from_user,
1782 &pos, &size, name, newinfo, entry1);
1783 xt_compat_flush_offsets(AF_INET6);
1784 xt_compat_unlock(AF_INET6);
1789 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1793 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1797 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1798 compat_release_entry, &j);
1799 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1800 xt_free_table_info(newinfo);
1804 /* And one copy for every other CPU */
1805 for_each_possible_cpu(i)
1806 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1807 memcpy(newinfo->entries[i], entry1, newinfo->size);
1811 xt_free_table_info(info);
1815 xt_free_table_info(newinfo);
1817 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1820 xt_compat_flush_offsets(AF_INET6);
1821 xt_compat_unlock(AF_INET6);
1826 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1829 struct compat_ip6t_replace tmp;
1830 struct xt_table_info *newinfo;
1831 void *loc_cpu_entry;
1833 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1836 /* overflow check */
1837 if (tmp.size >= INT_MAX / num_possible_cpus())
1839 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1842 newinfo = xt_alloc_table_info(tmp.size);
1846 /* choose the copy that is on our node/cpu */
1847 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1848 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1854 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1855 &newinfo, &loc_cpu_entry, tmp.size,
1856 tmp.num_entries, tmp.hook_entry,
1861 duprintf("compat_do_replace: Translated table\n");
1863 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1864 tmp.num_counters, compat_ptr(tmp.counters));
1866 goto free_newinfo_untrans;
1869 free_newinfo_untrans:
1870 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1872 xt_free_table_info(newinfo);
1877 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1882 if (!capable(CAP_NET_ADMIN))
1886 case IP6T_SO_SET_REPLACE:
1887 ret = compat_do_replace(sock_net(sk), user, len);
1890 case IP6T_SO_SET_ADD_COUNTERS:
1891 ret = do_add_counters(sock_net(sk), user, len, 1);
1895 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1902 struct compat_ip6t_get_entries {
1903 char name[IP6T_TABLE_MAXNAMELEN];
1905 struct compat_ip6t_entry entrytable[0];
1909 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1910 void __user *userptr)
1912 struct xt_counters *counters;
1913 const struct xt_table_info *private = table->private;
1917 const void *loc_cpu_entry;
1920 counters = alloc_counters(table);
1921 if (IS_ERR(counters))
1922 return PTR_ERR(counters);
1924 /* choose the copy that is on our node/cpu, ...
1925 * This choice is lazy (because current thread is
1926 * allowed to migrate to another cpu)
1928 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1931 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1932 compat_copy_entry_to_user,
1933 &pos, &size, counters, &i);
1940 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1944 struct compat_ip6t_get_entries get;
1947 if (*len < sizeof(get)) {
1948 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1952 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1955 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1956 duprintf("compat_get_entries: %u != %zu\n",
1957 *len, sizeof(get) + get.size);
1961 xt_compat_lock(AF_INET6);
1962 t = xt_find_table_lock(net, AF_INET6, get.name);
1963 if (t && !IS_ERR(t)) {
1964 const struct xt_table_info *private = t->private;
1965 struct xt_table_info info;
1966 duprintf("t->private->number = %u\n", private->number);
1967 ret = compat_table_info(private, &info);
1968 if (!ret && get.size == info.size) {
1969 ret = compat_copy_entries_to_user(private->size,
1970 t, uptr->entrytable);
1972 duprintf("compat_get_entries: I've got %u not %u!\n",
1973 private->size, get.size);
1976 xt_compat_flush_offsets(AF_INET6);
1980 ret = t ? PTR_ERR(t) : -ENOENT;
1982 xt_compat_unlock(AF_INET6);
1986 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1989 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1993 if (!capable(CAP_NET_ADMIN))
1997 case IP6T_SO_GET_INFO:
1998 ret = get_info(sock_net(sk), user, len, 1);
2000 case IP6T_SO_GET_ENTRIES:
2001 ret = compat_get_entries(sock_net(sk), user, len);
2004 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2011 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2015 if (!capable(CAP_NET_ADMIN))
2019 case IP6T_SO_SET_REPLACE:
2020 ret = do_replace(sock_net(sk), user, len);
2023 case IP6T_SO_SET_ADD_COUNTERS:
2024 ret = do_add_counters(sock_net(sk), user, len, 0);
2028 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2036 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2040 if (!capable(CAP_NET_ADMIN))
2044 case IP6T_SO_GET_INFO:
2045 ret = get_info(sock_net(sk), user, len, 0);
2048 case IP6T_SO_GET_ENTRIES:
2049 ret = get_entries(sock_net(sk), user, len);
2052 case IP6T_SO_GET_REVISION_MATCH:
2053 case IP6T_SO_GET_REVISION_TARGET: {
2054 struct ip6t_get_revision rev;
2057 if (*len != sizeof(rev)) {
2061 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2066 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2071 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2074 "ip6t_%s", rev.name);
2079 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2086 struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
2087 const struct ip6t_replace *repl)
2090 struct xt_table_info *newinfo;
2091 struct xt_table_info bootstrap
2092 = { 0, 0, 0, { 0 }, { 0 }, { } };
2093 void *loc_cpu_entry;
2094 struct xt_table *new_table;
2096 newinfo = xt_alloc_table_info(repl->size);
2102 /* choose the copy on our node/cpu, but dont care about preemption */
2103 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2104 memcpy(loc_cpu_entry, repl->entries, repl->size);
2106 ret = translate_table(table->name, table->valid_hooks,
2107 newinfo, loc_cpu_entry, repl->size,
2114 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2115 if (IS_ERR(new_table)) {
2116 ret = PTR_ERR(new_table);
2122 xt_free_table_info(newinfo);
2124 return ERR_PTR(ret);
2127 void ip6t_unregister_table(struct xt_table *table)
2129 struct xt_table_info *private;
2130 void *loc_cpu_entry;
2131 struct module *table_owner = table->me;
2133 private = xt_unregister_table(table);
2135 /* Decrease module usage counts and free resources */
2136 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2137 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2138 if (private->number > private->initial_entries)
2139 module_put(table_owner);
2140 xt_free_table_info(private);
2143 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2145 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2146 u_int8_t type, u_int8_t code,
2149 return (type == test_type && code >= min_code && code <= max_code)
2154 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2156 const struct icmp6hdr *ic;
2157 struct icmp6hdr _icmph;
2158 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2160 /* Must not be a fragment. */
2161 if (par->fragoff != 0)
2164 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2166 /* We've been asked to examine this packet, and we
2167 * can't. Hence, no choice but to drop.
2169 duprintf("Dropping evil ICMP tinygram.\n");
2170 *par->hotdrop = true;
2174 return icmp6_type_code_match(icmpinfo->type,
2177 ic->icmp6_type, ic->icmp6_code,
2178 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2181 /* Called when user tries to insert an entry of this type. */
2182 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2184 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2186 /* Must specify no unknown invflags */
2187 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2190 /* The built-in targets: standard (NULL) and error. */
2191 static struct xt_target ip6t_standard_target __read_mostly = {
2192 .name = IP6T_STANDARD_TARGET,
2193 .targetsize = sizeof(int),
2195 #ifdef CONFIG_COMPAT
2196 .compatsize = sizeof(compat_int_t),
2197 .compat_from_user = compat_standard_from_user,
2198 .compat_to_user = compat_standard_to_user,
2202 static struct xt_target ip6t_error_target __read_mostly = {
2203 .name = IP6T_ERROR_TARGET,
2204 .target = ip6t_error,
2205 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2209 static struct nf_sockopt_ops ip6t_sockopts = {
2211 .set_optmin = IP6T_BASE_CTL,
2212 .set_optmax = IP6T_SO_SET_MAX+1,
2213 .set = do_ip6t_set_ctl,
2214 #ifdef CONFIG_COMPAT
2215 .compat_set = compat_do_ip6t_set_ctl,
2217 .get_optmin = IP6T_BASE_CTL,
2218 .get_optmax = IP6T_SO_GET_MAX+1,
2219 .get = do_ip6t_get_ctl,
2220 #ifdef CONFIG_COMPAT
2221 .compat_get = compat_do_ip6t_get_ctl,
2223 .owner = THIS_MODULE,
2226 static struct xt_match icmp6_matchstruct __read_mostly = {
2228 .match = icmp6_match,
2229 .matchsize = sizeof(struct ip6t_icmp),
2230 .checkentry = icmp6_checkentry,
2231 .proto = IPPROTO_ICMPV6,
2235 static int __net_init ip6_tables_net_init(struct net *net)
2237 return xt_proto_init(net, AF_INET6);
2240 static void __net_exit ip6_tables_net_exit(struct net *net)
2242 xt_proto_fini(net, AF_INET6);
2245 static struct pernet_operations ip6_tables_net_ops = {
2246 .init = ip6_tables_net_init,
2247 .exit = ip6_tables_net_exit,
2250 static int __init ip6_tables_init(void)
2254 ret = register_pernet_subsys(&ip6_tables_net_ops);
2258 /* Noone else will be downing sem now, so we won't sleep */
2259 ret = xt_register_target(&ip6t_standard_target);
2262 ret = xt_register_target(&ip6t_error_target);
2265 ret = xt_register_match(&icmp6_matchstruct);
2269 /* Register setsockopt */
2270 ret = nf_register_sockopt(&ip6t_sockopts);
2274 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2278 xt_unregister_match(&icmp6_matchstruct);
2280 xt_unregister_target(&ip6t_error_target);
2282 xt_unregister_target(&ip6t_standard_target);
2284 unregister_pernet_subsys(&ip6_tables_net_ops);
2289 static void __exit ip6_tables_fini(void)
2291 nf_unregister_sockopt(&ip6t_sockopts);
2293 xt_unregister_match(&icmp6_matchstruct);
2294 xt_unregister_target(&ip6t_error_target);
2295 xt_unregister_target(&ip6t_standard_target);
2297 unregister_pernet_subsys(&ip6_tables_net_ops);
2301 * find the offset to specified header or the protocol number of last header
2302 * if target < 0. "last header" is transport protocol header, ESP, or
2305 * If target header is found, its offset is set in *offset and return protocol
2306 * number. Otherwise, return -1.
2308 * If the first fragment doesn't contain the final protocol header or
2309 * NEXTHDR_NONE it is considered invalid.
2311 * Note that non-1st fragment is special case that "the protocol number
2312 * of last header" is "next header" field in Fragment header. In this case,
2313 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2317 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2318 int target, unsigned short *fragoff)
2320 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2321 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2322 unsigned int len = skb->len - start;
2327 while (nexthdr != target) {
2328 struct ipv6_opt_hdr _hdr, *hp;
2329 unsigned int hdrlen;
2331 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2337 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2340 if (nexthdr == NEXTHDR_FRAGMENT) {
2341 unsigned short _frag_off;
2343 fp = skb_header_pointer(skb,
2344 start+offsetof(struct frag_hdr,
2351 _frag_off = ntohs(*fp) & ~0x7;
2354 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2355 hp->nexthdr == NEXTHDR_NONE)) {
2357 *fragoff = _frag_off;
2363 } else if (nexthdr == NEXTHDR_AUTH)
2364 hdrlen = (hp->hdrlen + 2) << 2;
2366 hdrlen = ipv6_optlen(hp);
2368 nexthdr = hp->nexthdr;
2377 EXPORT_SYMBOL(ip6t_register_table);
2378 EXPORT_SYMBOL(ip6t_unregister_table);
2379 EXPORT_SYMBOL(ip6t_do_table);
2380 EXPORT_SYMBOL(ip6t_ext_hdr);
2381 EXPORT_SYMBOL(ipv6_find_hdr);
2383 module_init(ip6_tables_init);
2384 module_exit(ip6_tables_fini);