2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
81 ip6t_ext_hdr(u8 nexthdr)
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
95 ip6_packet_match(const struct sk_buff *skb,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
103 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
105 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
107 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
108 &ip6info->src), IP6T_INV_SRCIP)
109 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
110 &ip6info->dst), IP6T_INV_DSTIP)) {
111 dprintf("Source or dest mismatch.\n");
113 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
114 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
115 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
116 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
117 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
118 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
122 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
124 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
125 dprintf("VIA in mismatch (%s vs %s).%s\n",
126 indev, ip6info->iniface,
127 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
131 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
133 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
134 dprintf("VIA out mismatch (%s vs %s).%s\n",
135 outdev, ip6info->outiface,
136 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
140 /* ... might want to do something with class and flowlabel here ... */
142 /* look for the desired protocol header */
143 if((ip6info->flags & IP6T_F_PROTO)) {
145 unsigned short _frag_off;
147 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
153 *fragoff = _frag_off;
155 dprintf("Packet protocol %hi ?= %s%hi.\n",
157 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
160 if (ip6info->proto == protohdr) {
161 if(ip6info->invflags & IP6T_INV_PROTO) {
167 /* We need match for the '-p all', too! */
168 if ((ip6info->proto != 0) &&
169 !(ip6info->invflags & IP6T_INV_PROTO))
175 /* should be ip6 safe */
177 ip6_checkentry(const struct ip6t_ip6 *ipv6)
179 if (ipv6->flags & ~IP6T_F_MASK) {
180 duprintf("Unknown flag bits set: %08X\n",
181 ipv6->flags & ~IP6T_F_MASK);
184 if (ipv6->invflags & ~IP6T_INV_MASK) {
185 duprintf("Unknown invflag bits set: %08X\n",
186 ipv6->invflags & ~IP6T_INV_MASK);
193 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
196 printk("ip6_tables: error: `%s'\n",
197 (const char *)par->targinfo);
202 /* Performance critical - called for every packet */
204 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
205 struct xt_match_param *par)
207 par->match = m->u.kernel.match;
208 par->matchinfo = m->data;
210 /* Stop iteration if it doesn't match */
211 if (!m->u.kernel.match->match(skb, par))
217 static inline struct ip6t_entry *
218 get_entry(void *base, unsigned int offset)
220 return (struct ip6t_entry *)(base + offset);
223 /* All zeroes == unconditional rule. */
224 /* Mildly perf critical (only if packet tracing is on) */
226 unconditional(const struct ip6t_ip6 *ipv6)
230 for (i = 0; i < sizeof(*ipv6); i++)
231 if (((char *)ipv6)[i])
234 return (i == sizeof(*ipv6));
237 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
238 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
239 /* This cries for unification! */
240 static const char *const hooknames[] = {
241 [NF_INET_PRE_ROUTING] = "PREROUTING",
242 [NF_INET_LOCAL_IN] = "INPUT",
243 [NF_INET_FORWARD] = "FORWARD",
244 [NF_INET_LOCAL_OUT] = "OUTPUT",
245 [NF_INET_POST_ROUTING] = "POSTROUTING",
248 enum nf_ip_trace_comments {
249 NF_IP6_TRACE_COMMENT_RULE,
250 NF_IP6_TRACE_COMMENT_RETURN,
251 NF_IP6_TRACE_COMMENT_POLICY,
254 static const char *const comments[] = {
255 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
256 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
257 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
260 static struct nf_loginfo trace_loginfo = {
261 .type = NF_LOG_TYPE_LOG,
265 .logflags = NF_LOG_MASK,
270 /* Mildly perf critical (only if packet tracing is on) */
272 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
273 char *hookname, char **chainname,
274 char **comment, unsigned int *rulenum)
276 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
278 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
279 /* Head of user chain: ERROR target with chainname */
280 *chainname = t->target.data;
285 if (s->target_offset == sizeof(struct ip6t_entry)
286 && strcmp(t->target.u.kernel.target->name,
287 IP6T_STANDARD_TARGET) == 0
289 && unconditional(&s->ipv6)) {
290 /* Tail of chains: STANDARD target (return/policy) */
291 *comment = *chainname == hookname
292 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY]
293 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN];
302 static void trace_packet(struct sk_buff *skb,
304 const struct net_device *in,
305 const struct net_device *out,
306 const char *tablename,
307 struct xt_table_info *private,
308 struct ip6t_entry *e)
311 const struct ip6t_entry *root;
312 char *hookname, *chainname, *comment;
313 unsigned int rulenum = 0;
315 table_base = (void *)private->entries[smp_processor_id()];
316 root = get_entry(table_base, private->hook_entry[hook]);
318 hookname = chainname = (char *)hooknames[hook];
319 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE];
321 IP6T_ENTRY_ITERATE(root,
322 private->size - private->hook_entry[hook],
323 get_chainname_rulenum,
324 e, hookname, &chainname, &comment, &rulenum);
326 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
327 "TRACE: %s:%s:%s:%u ",
328 tablename, chainname, comment, rulenum);
332 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
334 ip6t_do_table(struct sk_buff *skb,
336 const struct net_device *in,
337 const struct net_device *out,
338 struct xt_table *table)
340 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
341 bool hotdrop = false;
342 /* Initializing verdict to NF_DROP keeps gcc happy. */
343 unsigned int verdict = NF_DROP;
344 const char *indev, *outdev;
346 struct ip6t_entry *e, *back;
347 struct xt_table_info *private;
348 struct xt_match_param mtpar;
349 struct xt_target_param tgpar;
352 indev = in ? in->name : nulldevname;
353 outdev = out ? out->name : nulldevname;
354 /* We handle fragments by dealing with the first fragment as
355 * if it was a normal packet. All other fragments are treated
356 * normally, except that they will NEVER match rules that ask
357 * things we don't know, ie. tcp syn flag or ports). If the
358 * rule is also a fragment-specific rule, non-fragments won't
360 mtpar.hotdrop = &hotdrop;
361 mtpar.in = tgpar.in = in;
362 mtpar.out = tgpar.out = out;
363 mtpar.family = tgpar.family = NFPROTO_IPV6;
364 tgpar.hooknum = hook;
366 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
369 private = rcu_dereference(table->private);
370 table_base = rcu_dereference(private->entries[smp_processor_id()]);
372 e = get_entry(table_base, private->hook_entry[hook]);
374 /* For return from builtin chain */
375 back = get_entry(table_base, private->underflow[hook]);
380 if (ip6_packet_match(skb, indev, outdev, &e->ipv6,
381 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
382 struct ip6t_entry_target *t;
384 if (IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0)
387 ADD_COUNTER(e->counters,
388 ntohs(ipv6_hdr(skb)->payload_len) +
389 sizeof(struct ipv6hdr), 1);
391 t = ip6t_get_target(e);
392 IP_NF_ASSERT(t->u.kernel.target);
394 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
395 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
396 /* The packet is traced: log it */
397 if (unlikely(skb->nf_trace))
398 trace_packet(skb, hook, in, out,
399 table->name, private, e);
401 /* Standard target? */
402 if (!t->u.kernel.target->target) {
405 v = ((struct ip6t_standard_target *)t)->verdict;
407 /* Pop from stack? */
408 if (v != IP6T_RETURN) {
409 verdict = (unsigned)(-v) - 1;
413 back = get_entry(table_base,
417 if (table_base + v != (void *)e + e->next_offset
418 && !(e->ipv6.flags & IP6T_F_GOTO)) {
419 /* Save old back ptr in next entry */
420 struct ip6t_entry *next
421 = (void *)e + e->next_offset;
423 = (void *)back - table_base;
424 /* set back pointer to next entry */
428 e = get_entry(table_base, v);
430 /* Targets which reenter must return
432 tgpar.target = t->u.kernel.target;
433 tgpar.targinfo = t->data;
435 #ifdef CONFIG_NETFILTER_DEBUG
436 ((struct ip6t_entry *)table_base)->comefrom
439 verdict = t->u.kernel.target->target(skb,
442 #ifdef CONFIG_NETFILTER_DEBUG
443 if (((struct ip6t_entry *)table_base)->comefrom
445 && verdict == IP6T_CONTINUE) {
446 printk("Target %s reentered!\n",
447 t->u.kernel.target->name);
450 ((struct ip6t_entry *)table_base)->comefrom
453 if (verdict == IP6T_CONTINUE)
454 e = (void *)e + e->next_offset;
462 e = (void *)e + e->next_offset;
466 #ifdef CONFIG_NETFILTER_DEBUG
467 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
469 rcu_read_unlock_bh();
471 #ifdef DEBUG_ALLOW_ALL
480 /* Figures out from what hook each rule can be called: returns 0 if
481 there are loops. Puts hook bitmask in comefrom. */
483 mark_source_chains(struct xt_table_info *newinfo,
484 unsigned int valid_hooks, void *entry0)
488 /* No recursion; use packet counter to save back ptrs (reset
489 to 0 as we leave), and comefrom to save source hook bitmask */
490 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
491 unsigned int pos = newinfo->hook_entry[hook];
492 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
494 if (!(valid_hooks & (1 << hook)))
497 /* Set initial back pointer. */
498 e->counters.pcnt = pos;
501 struct ip6t_standard_target *t
502 = (void *)ip6t_get_target(e);
503 int visited = e->comefrom & (1 << hook);
505 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
506 printk("iptables: loop hook %u pos %u %08X.\n",
507 hook, pos, e->comefrom);
510 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
512 /* Unconditional return/END. */
513 if ((e->target_offset == sizeof(struct ip6t_entry)
514 && (strcmp(t->target.u.user.name,
515 IP6T_STANDARD_TARGET) == 0)
517 && unconditional(&e->ipv6)) || visited) {
518 unsigned int oldpos, size;
520 if ((strcmp(t->target.u.user.name,
521 IP6T_STANDARD_TARGET) == 0) &&
522 t->verdict < -NF_MAX_VERDICT - 1) {
523 duprintf("mark_source_chains: bad "
524 "negative verdict (%i)\n",
529 /* Return: backtrack through the last
532 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
533 #ifdef DEBUG_IP_FIREWALL_USER
535 & (1 << NF_INET_NUMHOOKS)) {
536 duprintf("Back unset "
543 pos = e->counters.pcnt;
544 e->counters.pcnt = 0;
546 /* We're at the start. */
550 e = (struct ip6t_entry *)
552 } while (oldpos == pos + e->next_offset);
555 size = e->next_offset;
556 e = (struct ip6t_entry *)
557 (entry0 + pos + size);
558 e->counters.pcnt = pos;
561 int newpos = t->verdict;
563 if (strcmp(t->target.u.user.name,
564 IP6T_STANDARD_TARGET) == 0
566 if (newpos > newinfo->size -
567 sizeof(struct ip6t_entry)) {
568 duprintf("mark_source_chains: "
569 "bad verdict (%i)\n",
573 /* This a jump; chase it. */
574 duprintf("Jump rule %u -> %u\n",
577 /* ... this is a fallthru */
578 newpos = pos + e->next_offset;
580 e = (struct ip6t_entry *)
582 e->counters.pcnt = pos;
587 duprintf("Finished chain %u\n", hook);
593 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
595 struct xt_mtdtor_param par;
597 if (i && (*i)-- == 0)
600 par.match = m->u.kernel.match;
601 par.matchinfo = m->data;
602 par.family = NFPROTO_IPV6;
603 if (par.match->destroy != NULL)
604 par.match->destroy(&par);
605 module_put(par.match->me);
610 check_entry(struct ip6t_entry *e, const char *name)
612 struct ip6t_entry_target *t;
614 if (!ip6_checkentry(&e->ipv6)) {
615 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
619 if (e->target_offset + sizeof(struct ip6t_entry_target) >
623 t = ip6t_get_target(e);
624 if (e->target_offset + t->u.target_size > e->next_offset)
630 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
633 const struct ip6t_ip6 *ipv6 = par->entryinfo;
636 par->match = m->u.kernel.match;
637 par->matchinfo = m->data;
639 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
640 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
642 duprintf("ip_tables: check failed for `%s'.\n",
651 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
654 struct xt_match *match;
657 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
659 "ip6t_%s", m->u.user.name);
660 if (IS_ERR(match) || !match) {
661 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
662 return match ? PTR_ERR(match) : -ENOENT;
664 m->u.kernel.match = match;
666 ret = check_match(m, par, i);
672 module_put(m->u.kernel.match->me);
676 static int check_target(struct ip6t_entry *e, const char *name)
678 struct ip6t_entry_target *t = ip6t_get_target(e);
679 struct xt_tgchk_param par = {
682 .target = t->u.kernel.target,
684 .hook_mask = e->comefrom,
685 .family = NFPROTO_IPV6,
689 t = ip6t_get_target(e);
690 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
691 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
693 duprintf("ip_tables: check failed for `%s'.\n",
694 t->u.kernel.target->name);
701 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
704 struct ip6t_entry_target *t;
705 struct xt_target *target;
708 struct xt_mtchk_param mtpar;
710 ret = check_entry(e, name);
716 mtpar.entryinfo = &e->ipv6;
717 mtpar.hook_mask = e->comefrom;
718 mtpar.family = NFPROTO_IPV6;
719 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
721 goto cleanup_matches;
723 t = ip6t_get_target(e);
724 target = try_then_request_module(xt_find_target(AF_INET6,
727 "ip6t_%s", t->u.user.name);
728 if (IS_ERR(target) || !target) {
729 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
730 ret = target ? PTR_ERR(target) : -ENOENT;
731 goto cleanup_matches;
733 t->u.kernel.target = target;
735 ret = check_target(e, name);
742 module_put(t->u.kernel.target->me);
744 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
749 check_entry_size_and_hooks(struct ip6t_entry *e,
750 struct xt_table_info *newinfo,
752 unsigned char *limit,
753 const unsigned int *hook_entries,
754 const unsigned int *underflows,
759 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
760 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
761 duprintf("Bad offset %p\n", e);
766 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
767 duprintf("checking: element %p size %u\n",
772 /* Check hooks & underflows */
773 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
774 if ((unsigned char *)e - base == hook_entries[h])
775 newinfo->hook_entry[h] = hook_entries[h];
776 if ((unsigned char *)e - base == underflows[h])
777 newinfo->underflow[h] = underflows[h];
780 /* FIXME: underflows must be unconditional, standard verdicts
781 < 0 (not IP6T_RETURN). --RR */
783 /* Clear counters and comefrom */
784 e->counters = ((struct xt_counters) { 0, 0 });
792 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
794 struct xt_tgdtor_param par;
795 struct ip6t_entry_target *t;
797 if (i && (*i)-- == 0)
800 /* Cleanup all matches */
801 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
802 t = ip6t_get_target(e);
804 par.target = t->u.kernel.target;
805 par.targinfo = t->data;
806 par.family = NFPROTO_IPV6;
807 if (par.target->destroy != NULL)
808 par.target->destroy(&par);
809 module_put(par.target->me);
813 /* Checks and translates the user-supplied table segment (held in
816 translate_table(const char *name,
817 unsigned int valid_hooks,
818 struct xt_table_info *newinfo,
822 const unsigned int *hook_entries,
823 const unsigned int *underflows)
828 newinfo->size = size;
829 newinfo->number = number;
831 /* Init all hooks to impossible value. */
832 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
833 newinfo->hook_entry[i] = 0xFFFFFFFF;
834 newinfo->underflow[i] = 0xFFFFFFFF;
837 duprintf("translate_table: size %u\n", newinfo->size);
839 /* Walk through entries, checking offsets. */
840 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
841 check_entry_size_and_hooks,
845 hook_entries, underflows, &i);
850 duprintf("translate_table: %u not %u entries\n",
855 /* Check hooks all assigned */
856 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
857 /* Only hooks which are valid */
858 if (!(valid_hooks & (1 << i)))
860 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
861 duprintf("Invalid hook entry %u %u\n",
865 if (newinfo->underflow[i] == 0xFFFFFFFF) {
866 duprintf("Invalid underflow %u %u\n",
872 if (!mark_source_chains(newinfo, valid_hooks, entry0))
875 /* Finally, each sanity check must pass */
877 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
878 find_check_entry, name, size, &i);
881 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
886 /* And one copy for every other CPU */
887 for_each_possible_cpu(i) {
888 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
889 memcpy(newinfo->entries[i], entry0, newinfo->size);
897 add_entry_to_counter(const struct ip6t_entry *e,
898 struct xt_counters total[],
901 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
908 set_entry_to_counter(const struct ip6t_entry *e,
909 struct ip6t_counters total[],
912 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
919 get_counters(const struct xt_table_info *t,
920 struct xt_counters counters[])
926 /* Instead of clearing (by a previous call to memset())
927 * the counters and using adds, we set the counters
928 * with data used by 'current' CPU
929 * We dont care about preemption here.
931 curcpu = raw_smp_processor_id();
934 IP6T_ENTRY_ITERATE(t->entries[curcpu],
936 set_entry_to_counter,
940 for_each_possible_cpu(cpu) {
944 IP6T_ENTRY_ITERATE(t->entries[cpu],
946 add_entry_to_counter,
952 /* We're lazy, and add to the first CPU; overflow works its fey magic
953 * and everything is OK. */
955 add_counter_to_entry(struct ip6t_entry *e,
956 const struct xt_counters addme[],
959 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
965 /* Take values from counters and add them back onto the current cpu */
966 static void put_counters(struct xt_table_info *t,
967 const struct xt_counters counters[])
972 cpu = smp_processor_id();
974 IP6T_ENTRY_ITERATE(t->entries[cpu],
976 add_counter_to_entry,
983 zero_entry_counter(struct ip6t_entry *e, void *arg)
985 e->counters.bcnt = 0;
986 e->counters.pcnt = 0;
991 clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info)
994 const void *loc_cpu_entry = info->entries[raw_smp_processor_id()];
996 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
997 for_each_possible_cpu(cpu) {
998 memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size);
999 IP6T_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size,
1000 zero_entry_counter, NULL);
1004 static struct xt_counters *alloc_counters(struct xt_table *table)
1006 unsigned int countersize;
1007 struct xt_counters *counters;
1008 struct xt_table_info *private = table->private;
1009 struct xt_table_info *info;
1011 /* We need atomic snapshot of counters: rest doesn't change
1012 (other than comefrom, which userspace doesn't care
1014 countersize = sizeof(struct xt_counters) * private->number;
1015 counters = vmalloc_node(countersize, numa_node_id());
1017 if (counters == NULL)
1020 info = xt_alloc_table_info(private->size);
1024 clone_counters(info, private);
1026 mutex_lock(&table->lock);
1027 xt_table_entry_swap_rcu(private, info);
1028 synchronize_net(); /* Wait until smoke has cleared */
1030 get_counters(info, counters);
1031 put_counters(private, counters);
1032 mutex_unlock(&table->lock);
1034 xt_free_table_info(info);
1041 return ERR_PTR(-ENOMEM);
1045 copy_entries_to_user(unsigned int total_size,
1046 struct xt_table *table,
1047 void __user *userptr)
1049 unsigned int off, num;
1050 struct ip6t_entry *e;
1051 struct xt_counters *counters;
1052 const struct xt_table_info *private = table->private;
1054 const void *loc_cpu_entry;
1056 counters = alloc_counters(table);
1057 if (IS_ERR(counters))
1058 return PTR_ERR(counters);
1060 /* choose the copy that is on our node/cpu, ...
1061 * This choice is lazy (because current thread is
1062 * allowed to migrate to another cpu)
1064 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1065 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1070 /* FIXME: use iterator macros --RR */
1071 /* ... then go back and fix counters and names */
1072 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1074 const struct ip6t_entry_match *m;
1075 const struct ip6t_entry_target *t;
1077 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1078 if (copy_to_user(userptr + off
1079 + offsetof(struct ip6t_entry, counters),
1081 sizeof(counters[num])) != 0) {
1086 for (i = sizeof(struct ip6t_entry);
1087 i < e->target_offset;
1088 i += m->u.match_size) {
1091 if (copy_to_user(userptr + off + i
1092 + offsetof(struct ip6t_entry_match,
1094 m->u.kernel.match->name,
1095 strlen(m->u.kernel.match->name)+1)
1102 t = ip6t_get_target(e);
1103 if (copy_to_user(userptr + off + e->target_offset
1104 + offsetof(struct ip6t_entry_target,
1106 t->u.kernel.target->name,
1107 strlen(t->u.kernel.target->name)+1) != 0) {
1118 #ifdef CONFIG_COMPAT
1119 static void compat_standard_from_user(void *dst, void *src)
1121 int v = *(compat_int_t *)src;
1124 v += xt_compat_calc_jump(AF_INET6, v);
1125 memcpy(dst, &v, sizeof(v));
1128 static int compat_standard_to_user(void __user *dst, void *src)
1130 compat_int_t cv = *(int *)src;
1133 cv -= xt_compat_calc_jump(AF_INET6, cv);
1134 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1138 compat_calc_match(struct ip6t_entry_match *m, int *size)
1140 *size += xt_compat_match_offset(m->u.kernel.match);
1144 static int compat_calc_entry(struct ip6t_entry *e,
1145 const struct xt_table_info *info,
1146 void *base, struct xt_table_info *newinfo)
1148 struct ip6t_entry_target *t;
1149 unsigned int entry_offset;
1152 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1153 entry_offset = (void *)e - base;
1154 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1155 t = ip6t_get_target(e);
1156 off += xt_compat_target_offset(t->u.kernel.target);
1157 newinfo->size -= off;
1158 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1162 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1163 if (info->hook_entry[i] &&
1164 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1165 newinfo->hook_entry[i] -= off;
1166 if (info->underflow[i] &&
1167 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1168 newinfo->underflow[i] -= off;
1173 static int compat_table_info(const struct xt_table_info *info,
1174 struct xt_table_info *newinfo)
1176 void *loc_cpu_entry;
1178 if (!newinfo || !info)
1181 /* we dont care about newinfo->entries[] */
1182 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1183 newinfo->initial_entries = 0;
1184 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1185 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1186 compat_calc_entry, info, loc_cpu_entry,
1191 static int get_info(struct net *net, void __user *user, int *len, int compat)
1193 char name[IP6T_TABLE_MAXNAMELEN];
1197 if (*len != sizeof(struct ip6t_getinfo)) {
1198 duprintf("length %u != %zu\n", *len,
1199 sizeof(struct ip6t_getinfo));
1203 if (copy_from_user(name, user, sizeof(name)) != 0)
1206 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1207 #ifdef CONFIG_COMPAT
1209 xt_compat_lock(AF_INET6);
1211 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1212 "ip6table_%s", name);
1213 if (t && !IS_ERR(t)) {
1214 struct ip6t_getinfo info;
1215 const struct xt_table_info *private = t->private;
1217 #ifdef CONFIG_COMPAT
1219 struct xt_table_info tmp;
1220 ret = compat_table_info(private, &tmp);
1221 xt_compat_flush_offsets(AF_INET6);
1225 info.valid_hooks = t->valid_hooks;
1226 memcpy(info.hook_entry, private->hook_entry,
1227 sizeof(info.hook_entry));
1228 memcpy(info.underflow, private->underflow,
1229 sizeof(info.underflow));
1230 info.num_entries = private->number;
1231 info.size = private->size;
1232 strcpy(info.name, name);
1234 if (copy_to_user(user, &info, *len) != 0)
1242 ret = t ? PTR_ERR(t) : -ENOENT;
1243 #ifdef CONFIG_COMPAT
1245 xt_compat_unlock(AF_INET6);
1251 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1254 struct ip6t_get_entries get;
1257 if (*len < sizeof(get)) {
1258 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1261 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1263 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1264 duprintf("get_entries: %u != %zu\n",
1265 *len, sizeof(get) + get.size);
1269 t = xt_find_table_lock(net, AF_INET6, get.name);
1270 if (t && !IS_ERR(t)) {
1271 struct xt_table_info *private = t->private;
1272 duprintf("t->private->number = %u\n", private->number);
1273 if (get.size == private->size)
1274 ret = copy_entries_to_user(private->size,
1275 t, uptr->entrytable);
1277 duprintf("get_entries: I've got %u not %u!\n",
1278 private->size, get.size);
1284 ret = t ? PTR_ERR(t) : -ENOENT;
1290 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1291 struct xt_table_info *newinfo, unsigned int num_counters,
1292 void __user *counters_ptr)
1296 struct xt_table_info *oldinfo;
1297 struct xt_counters *counters;
1298 const void *loc_cpu_old_entry;
1301 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1308 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1309 "ip6table_%s", name);
1310 if (!t || IS_ERR(t)) {
1311 ret = t ? PTR_ERR(t) : -ENOENT;
1312 goto free_newinfo_counters_untrans;
1316 if (valid_hooks != t->valid_hooks) {
1317 duprintf("Valid hook crap: %08X vs %08X\n",
1318 valid_hooks, t->valid_hooks);
1323 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1327 /* Update module usage count based on number of rules */
1328 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1329 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1330 if ((oldinfo->number > oldinfo->initial_entries) ||
1331 (newinfo->number <= oldinfo->initial_entries))
1333 if ((oldinfo->number > oldinfo->initial_entries) &&
1334 (newinfo->number <= oldinfo->initial_entries))
1337 /* Get the old counters. */
1338 get_counters(oldinfo, counters);
1339 /* Decrease module usage counts and free resource */
1340 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1341 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1343 xt_free_table_info(oldinfo);
1344 if (copy_to_user(counters_ptr, counters,
1345 sizeof(struct xt_counters) * num_counters) != 0)
1354 free_newinfo_counters_untrans:
1361 do_replace(struct net *net, void __user *user, unsigned int len)
1364 struct ip6t_replace tmp;
1365 struct xt_table_info *newinfo;
1366 void *loc_cpu_entry;
1368 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1371 /* overflow check */
1372 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1375 newinfo = xt_alloc_table_info(tmp.size);
1379 /* choose the copy that is on our node/cpu */
1380 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1381 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1387 ret = translate_table(tmp.name, tmp.valid_hooks,
1388 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1389 tmp.hook_entry, tmp.underflow);
1393 duprintf("ip_tables: Translated table\n");
1395 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1396 tmp.num_counters, tmp.counters);
1398 goto free_newinfo_untrans;
1401 free_newinfo_untrans:
1402 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1404 xt_free_table_info(newinfo);
1409 do_add_counters(struct net *net, void __user *user, unsigned int len,
1413 struct xt_counters_info tmp;
1414 struct xt_counters *paddc;
1415 unsigned int num_counters;
1420 const struct xt_table_info *private;
1422 const void *loc_cpu_entry;
1423 #ifdef CONFIG_COMPAT
1424 struct compat_xt_counters_info compat_tmp;
1428 size = sizeof(struct compat_xt_counters_info);
1433 size = sizeof(struct xt_counters_info);
1436 if (copy_from_user(ptmp, user, size) != 0)
1439 #ifdef CONFIG_COMPAT
1441 num_counters = compat_tmp.num_counters;
1442 name = compat_tmp.name;
1446 num_counters = tmp.num_counters;
1450 if (len != size + num_counters * sizeof(struct xt_counters))
1453 paddc = vmalloc_node(len - size, numa_node_id());
1457 if (copy_from_user(paddc, user + size, len - size) != 0) {
1462 t = xt_find_table_lock(net, AF_INET6, name);
1463 if (!t || IS_ERR(t)) {
1464 ret = t ? PTR_ERR(t) : -ENOENT;
1468 mutex_lock(&t->lock);
1469 private = t->private;
1470 if (private->number != num_counters) {
1472 goto unlock_up_free;
1477 /* Choose the copy that is on our node */
1478 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1479 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1481 add_counter_to_entry,
1486 mutex_unlock(&t->lock);
1495 #ifdef CONFIG_COMPAT
1496 struct compat_ip6t_replace {
1497 char name[IP6T_TABLE_MAXNAMELEN];
1501 u32 hook_entry[NF_INET_NUMHOOKS];
1502 u32 underflow[NF_INET_NUMHOOKS];
1504 compat_uptr_t counters; /* struct ip6t_counters * */
1505 struct compat_ip6t_entry entries[0];
1509 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1510 unsigned int *size, struct xt_counters *counters,
1513 struct ip6t_entry_target *t;
1514 struct compat_ip6t_entry __user *ce;
1515 u_int16_t target_offset, next_offset;
1516 compat_uint_t origsize;
1521 ce = (struct compat_ip6t_entry __user *)*dstptr;
1522 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1525 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1528 *dstptr += sizeof(struct compat_ip6t_entry);
1529 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1531 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1532 target_offset = e->target_offset - (origsize - *size);
1535 t = ip6t_get_target(e);
1536 ret = xt_compat_target_to_user(t, dstptr, size);
1540 next_offset = e->next_offset - (origsize - *size);
1541 if (put_user(target_offset, &ce->target_offset))
1543 if (put_user(next_offset, &ce->next_offset))
1553 compat_find_calc_match(struct ip6t_entry_match *m,
1555 const struct ip6t_ip6 *ipv6,
1556 unsigned int hookmask,
1557 int *size, unsigned int *i)
1559 struct xt_match *match;
1561 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1562 m->u.user.revision),
1563 "ip6t_%s", m->u.user.name);
1564 if (IS_ERR(match) || !match) {
1565 duprintf("compat_check_calc_match: `%s' not found\n",
1567 return match ? PTR_ERR(match) : -ENOENT;
1569 m->u.kernel.match = match;
1570 *size += xt_compat_match_offset(match);
1577 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1579 if (i && (*i)-- == 0)
1582 module_put(m->u.kernel.match->me);
1587 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1589 struct ip6t_entry_target *t;
1591 if (i && (*i)-- == 0)
1594 /* Cleanup all matches */
1595 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1596 t = compat_ip6t_get_target(e);
1597 module_put(t->u.kernel.target->me);
1602 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1603 struct xt_table_info *newinfo,
1605 unsigned char *base,
1606 unsigned char *limit,
1607 unsigned int *hook_entries,
1608 unsigned int *underflows,
1612 struct ip6t_entry_target *t;
1613 struct xt_target *target;
1614 unsigned int entry_offset;
1618 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1619 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1620 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1621 duprintf("Bad offset %p, limit = %p\n", e, limit);
1625 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1626 sizeof(struct compat_xt_entry_target)) {
1627 duprintf("checking: element %p size %u\n",
1632 /* For purposes of check_entry casting the compat entry is fine */
1633 ret = check_entry((struct ip6t_entry *)e, name);
1637 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1638 entry_offset = (void *)e - (void *)base;
1640 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1641 &e->ipv6, e->comefrom, &off, &j);
1643 goto release_matches;
1645 t = compat_ip6t_get_target(e);
1646 target = try_then_request_module(xt_find_target(AF_INET6,
1648 t->u.user.revision),
1649 "ip6t_%s", t->u.user.name);
1650 if (IS_ERR(target) || !target) {
1651 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1653 ret = target ? PTR_ERR(target) : -ENOENT;
1654 goto release_matches;
1656 t->u.kernel.target = target;
1658 off += xt_compat_target_offset(target);
1660 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1664 /* Check hooks & underflows */
1665 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1666 if ((unsigned char *)e - base == hook_entries[h])
1667 newinfo->hook_entry[h] = hook_entries[h];
1668 if ((unsigned char *)e - base == underflows[h])
1669 newinfo->underflow[h] = underflows[h];
1672 /* Clear counters and comefrom */
1673 memset(&e->counters, 0, sizeof(e->counters));
1680 module_put(t->u.kernel.target->me);
1682 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1687 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1688 unsigned int *size, const char *name,
1689 struct xt_table_info *newinfo, unsigned char *base)
1691 struct ip6t_entry_target *t;
1692 struct xt_target *target;
1693 struct ip6t_entry *de;
1694 unsigned int origsize;
1699 de = (struct ip6t_entry *)*dstptr;
1700 memcpy(de, e, sizeof(struct ip6t_entry));
1701 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1703 *dstptr += sizeof(struct ip6t_entry);
1704 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1706 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1710 de->target_offset = e->target_offset - (origsize - *size);
1711 t = compat_ip6t_get_target(e);
1712 target = t->u.kernel.target;
1713 xt_compat_target_from_user(t, dstptr, size);
1715 de->next_offset = e->next_offset - (origsize - *size);
1716 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1717 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1718 newinfo->hook_entry[h] -= origsize - *size;
1719 if ((unsigned char *)de - base < newinfo->underflow[h])
1720 newinfo->underflow[h] -= origsize - *size;
1725 static int compat_check_entry(struct ip6t_entry *e, const char *name,
1730 struct xt_mtchk_param mtpar;
1734 mtpar.entryinfo = &e->ipv6;
1735 mtpar.hook_mask = e->comefrom;
1736 mtpar.family = NFPROTO_IPV6;
1737 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1739 goto cleanup_matches;
1741 ret = check_target(e, name);
1743 goto cleanup_matches;
1749 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1754 translate_compat_table(const char *name,
1755 unsigned int valid_hooks,
1756 struct xt_table_info **pinfo,
1758 unsigned int total_size,
1759 unsigned int number,
1760 unsigned int *hook_entries,
1761 unsigned int *underflows)
1764 struct xt_table_info *newinfo, *info;
1765 void *pos, *entry0, *entry1;
1772 info->number = number;
1774 /* Init all hooks to impossible value. */
1775 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1776 info->hook_entry[i] = 0xFFFFFFFF;
1777 info->underflow[i] = 0xFFFFFFFF;
1780 duprintf("translate_compat_table: size %u\n", info->size);
1782 xt_compat_lock(AF_INET6);
1783 /* Walk through entries, checking offsets. */
1784 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1785 check_compat_entry_size_and_hooks,
1786 info, &size, entry0,
1787 entry0 + total_size,
1788 hook_entries, underflows, &j, name);
1794 duprintf("translate_compat_table: %u not %u entries\n",
1799 /* Check hooks all assigned */
1800 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1801 /* Only hooks which are valid */
1802 if (!(valid_hooks & (1 << i)))
1804 if (info->hook_entry[i] == 0xFFFFFFFF) {
1805 duprintf("Invalid hook entry %u %u\n",
1806 i, hook_entries[i]);
1809 if (info->underflow[i] == 0xFFFFFFFF) {
1810 duprintf("Invalid underflow %u %u\n",
1817 newinfo = xt_alloc_table_info(size);
1821 newinfo->number = number;
1822 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1823 newinfo->hook_entry[i] = info->hook_entry[i];
1824 newinfo->underflow[i] = info->underflow[i];
1826 entry1 = newinfo->entries[raw_smp_processor_id()];
1829 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1830 compat_copy_entry_from_user,
1831 &pos, &size, name, newinfo, entry1);
1832 xt_compat_flush_offsets(AF_INET6);
1833 xt_compat_unlock(AF_INET6);
1838 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1842 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1846 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1847 compat_release_entry, &j);
1848 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1849 xt_free_table_info(newinfo);
1853 /* And one copy for every other CPU */
1854 for_each_possible_cpu(i)
1855 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1856 memcpy(newinfo->entries[i], entry1, newinfo->size);
1860 xt_free_table_info(info);
1864 xt_free_table_info(newinfo);
1866 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1869 xt_compat_flush_offsets(AF_INET6);
1870 xt_compat_unlock(AF_INET6);
1875 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1878 struct compat_ip6t_replace tmp;
1879 struct xt_table_info *newinfo;
1880 void *loc_cpu_entry;
1882 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1885 /* overflow check */
1886 if (tmp.size >= INT_MAX / num_possible_cpus())
1888 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1891 newinfo = xt_alloc_table_info(tmp.size);
1895 /* choose the copy that is on our node/cpu */
1896 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1897 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1903 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1904 &newinfo, &loc_cpu_entry, tmp.size,
1905 tmp.num_entries, tmp.hook_entry,
1910 duprintf("compat_do_replace: Translated table\n");
1912 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1913 tmp.num_counters, compat_ptr(tmp.counters));
1915 goto free_newinfo_untrans;
1918 free_newinfo_untrans:
1919 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1921 xt_free_table_info(newinfo);
1926 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1931 if (!capable(CAP_NET_ADMIN))
1935 case IP6T_SO_SET_REPLACE:
1936 ret = compat_do_replace(sock_net(sk), user, len);
1939 case IP6T_SO_SET_ADD_COUNTERS:
1940 ret = do_add_counters(sock_net(sk), user, len, 1);
1944 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1951 struct compat_ip6t_get_entries {
1952 char name[IP6T_TABLE_MAXNAMELEN];
1954 struct compat_ip6t_entry entrytable[0];
1958 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1959 void __user *userptr)
1961 struct xt_counters *counters;
1962 const struct xt_table_info *private = table->private;
1966 const void *loc_cpu_entry;
1969 counters = alloc_counters(table);
1970 if (IS_ERR(counters))
1971 return PTR_ERR(counters);
1973 /* choose the copy that is on our node/cpu, ...
1974 * This choice is lazy (because current thread is
1975 * allowed to migrate to another cpu)
1977 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1980 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1981 compat_copy_entry_to_user,
1982 &pos, &size, counters, &i);
1989 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1993 struct compat_ip6t_get_entries get;
1996 if (*len < sizeof(get)) {
1997 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
2001 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
2004 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
2005 duprintf("compat_get_entries: %u != %zu\n",
2006 *len, sizeof(get) + get.size);
2010 xt_compat_lock(AF_INET6);
2011 t = xt_find_table_lock(net, AF_INET6, get.name);
2012 if (t && !IS_ERR(t)) {
2013 const struct xt_table_info *private = t->private;
2014 struct xt_table_info info;
2015 duprintf("t->private->number = %u\n", private->number);
2016 ret = compat_table_info(private, &info);
2017 if (!ret && get.size == info.size) {
2018 ret = compat_copy_entries_to_user(private->size,
2019 t, uptr->entrytable);
2021 duprintf("compat_get_entries: I've got %u not %u!\n",
2022 private->size, get.size);
2025 xt_compat_flush_offsets(AF_INET6);
2029 ret = t ? PTR_ERR(t) : -ENOENT;
2031 xt_compat_unlock(AF_INET6);
2035 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2038 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2042 if (!capable(CAP_NET_ADMIN))
2046 case IP6T_SO_GET_INFO:
2047 ret = get_info(sock_net(sk), user, len, 1);
2049 case IP6T_SO_GET_ENTRIES:
2050 ret = compat_get_entries(sock_net(sk), user, len);
2053 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2060 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2064 if (!capable(CAP_NET_ADMIN))
2068 case IP6T_SO_SET_REPLACE:
2069 ret = do_replace(sock_net(sk), user, len);
2072 case IP6T_SO_SET_ADD_COUNTERS:
2073 ret = do_add_counters(sock_net(sk), user, len, 0);
2077 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2085 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2089 if (!capable(CAP_NET_ADMIN))
2093 case IP6T_SO_GET_INFO:
2094 ret = get_info(sock_net(sk), user, len, 0);
2097 case IP6T_SO_GET_ENTRIES:
2098 ret = get_entries(sock_net(sk), user, len);
2101 case IP6T_SO_GET_REVISION_MATCH:
2102 case IP6T_SO_GET_REVISION_TARGET: {
2103 struct ip6t_get_revision rev;
2106 if (*len != sizeof(rev)) {
2110 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2115 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2120 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2123 "ip6t_%s", rev.name);
2128 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2135 struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
2136 const struct ip6t_replace *repl)
2139 struct xt_table_info *newinfo;
2140 struct xt_table_info bootstrap
2141 = { 0, 0, 0, { 0 }, { 0 }, { } };
2142 void *loc_cpu_entry;
2143 struct xt_table *new_table;
2145 newinfo = xt_alloc_table_info(repl->size);
2151 /* choose the copy on our node/cpu, but dont care about preemption */
2152 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2153 memcpy(loc_cpu_entry, repl->entries, repl->size);
2155 ret = translate_table(table->name, table->valid_hooks,
2156 newinfo, loc_cpu_entry, repl->size,
2163 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2164 if (IS_ERR(new_table)) {
2165 ret = PTR_ERR(new_table);
2171 xt_free_table_info(newinfo);
2173 return ERR_PTR(ret);
2176 void ip6t_unregister_table(struct xt_table *table)
2178 struct xt_table_info *private;
2179 void *loc_cpu_entry;
2180 struct module *table_owner = table->me;
2182 private = xt_unregister_table(table);
2184 /* Decrease module usage counts and free resources */
2185 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2186 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2187 if (private->number > private->initial_entries)
2188 module_put(table_owner);
2189 xt_free_table_info(private);
2192 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2194 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2195 u_int8_t type, u_int8_t code,
2198 return (type == test_type && code >= min_code && code <= max_code)
2203 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2205 const struct icmp6hdr *ic;
2206 struct icmp6hdr _icmph;
2207 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2209 /* Must not be a fragment. */
2210 if (par->fragoff != 0)
2213 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2215 /* We've been asked to examine this packet, and we
2216 * can't. Hence, no choice but to drop.
2218 duprintf("Dropping evil ICMP tinygram.\n");
2219 *par->hotdrop = true;
2223 return icmp6_type_code_match(icmpinfo->type,
2226 ic->icmp6_type, ic->icmp6_code,
2227 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2230 /* Called when user tries to insert an entry of this type. */
2231 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2233 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2235 /* Must specify no unknown invflags */
2236 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2239 /* The built-in targets: standard (NULL) and error. */
2240 static struct xt_target ip6t_standard_target __read_mostly = {
2241 .name = IP6T_STANDARD_TARGET,
2242 .targetsize = sizeof(int),
2244 #ifdef CONFIG_COMPAT
2245 .compatsize = sizeof(compat_int_t),
2246 .compat_from_user = compat_standard_from_user,
2247 .compat_to_user = compat_standard_to_user,
2251 static struct xt_target ip6t_error_target __read_mostly = {
2252 .name = IP6T_ERROR_TARGET,
2253 .target = ip6t_error,
2254 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2258 static struct nf_sockopt_ops ip6t_sockopts = {
2260 .set_optmin = IP6T_BASE_CTL,
2261 .set_optmax = IP6T_SO_SET_MAX+1,
2262 .set = do_ip6t_set_ctl,
2263 #ifdef CONFIG_COMPAT
2264 .compat_set = compat_do_ip6t_set_ctl,
2266 .get_optmin = IP6T_BASE_CTL,
2267 .get_optmax = IP6T_SO_GET_MAX+1,
2268 .get = do_ip6t_get_ctl,
2269 #ifdef CONFIG_COMPAT
2270 .compat_get = compat_do_ip6t_get_ctl,
2272 .owner = THIS_MODULE,
2275 static struct xt_match icmp6_matchstruct __read_mostly = {
2277 .match = icmp6_match,
2278 .matchsize = sizeof(struct ip6t_icmp),
2279 .checkentry = icmp6_checkentry,
2280 .proto = IPPROTO_ICMPV6,
2284 static int __net_init ip6_tables_net_init(struct net *net)
2286 return xt_proto_init(net, AF_INET6);
2289 static void __net_exit ip6_tables_net_exit(struct net *net)
2291 xt_proto_fini(net, AF_INET6);
2294 static struct pernet_operations ip6_tables_net_ops = {
2295 .init = ip6_tables_net_init,
2296 .exit = ip6_tables_net_exit,
2299 static int __init ip6_tables_init(void)
2303 ret = register_pernet_subsys(&ip6_tables_net_ops);
2307 /* Noone else will be downing sem now, so we won't sleep */
2308 ret = xt_register_target(&ip6t_standard_target);
2311 ret = xt_register_target(&ip6t_error_target);
2314 ret = xt_register_match(&icmp6_matchstruct);
2318 /* Register setsockopt */
2319 ret = nf_register_sockopt(&ip6t_sockopts);
2323 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2327 xt_unregister_match(&icmp6_matchstruct);
2329 xt_unregister_target(&ip6t_error_target);
2331 xt_unregister_target(&ip6t_standard_target);
2333 unregister_pernet_subsys(&ip6_tables_net_ops);
2338 static void __exit ip6_tables_fini(void)
2340 nf_unregister_sockopt(&ip6t_sockopts);
2342 xt_unregister_match(&icmp6_matchstruct);
2343 xt_unregister_target(&ip6t_error_target);
2344 xt_unregister_target(&ip6t_standard_target);
2346 unregister_pernet_subsys(&ip6_tables_net_ops);
2350 * find the offset to specified header or the protocol number of last header
2351 * if target < 0. "last header" is transport protocol header, ESP, or
2354 * If target header is found, its offset is set in *offset and return protocol
2355 * number. Otherwise, return -1.
2357 * If the first fragment doesn't contain the final protocol header or
2358 * NEXTHDR_NONE it is considered invalid.
2360 * Note that non-1st fragment is special case that "the protocol number
2361 * of last header" is "next header" field in Fragment header. In this case,
2362 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2366 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2367 int target, unsigned short *fragoff)
2369 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2370 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2371 unsigned int len = skb->len - start;
2376 while (nexthdr != target) {
2377 struct ipv6_opt_hdr _hdr, *hp;
2378 unsigned int hdrlen;
2380 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2386 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2389 if (nexthdr == NEXTHDR_FRAGMENT) {
2390 unsigned short _frag_off;
2392 fp = skb_header_pointer(skb,
2393 start+offsetof(struct frag_hdr,
2400 _frag_off = ntohs(*fp) & ~0x7;
2403 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2404 hp->nexthdr == NEXTHDR_NONE)) {
2406 *fragoff = _frag_off;
2412 } else if (nexthdr == NEXTHDR_AUTH)
2413 hdrlen = (hp->hdrlen + 2) << 2;
2415 hdrlen = ipv6_optlen(hp);
2417 nexthdr = hp->nexthdr;
2426 EXPORT_SYMBOL(ip6t_register_table);
2427 EXPORT_SYMBOL(ip6t_unregister_table);
2428 EXPORT_SYMBOL(ip6t_do_table);
2429 EXPORT_SYMBOL(ip6t_ext_hdr);
2430 EXPORT_SYMBOL(ipv6_find_hdr);
2432 module_init(ip6_tables_init);
2433 module_exit(ip6_tables_fini);