2 * Internet Control Message Protocol (ICMPv6)
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on net/ipv4/icmp.c
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
21 * Andi Kleen : exception handling
22 * Andi Kleen add rate limits. never reply to a icmp.
23 * add more length checks and other fixes.
24 * yoshfuji : ensure to sent parameter problem for
26 * YOSHIFUJI Hideaki @USAGI: added sysctl for icmp rate limit.
28 * YOSHIFUJI Hideaki @USAGI: Per-interface statistics support
29 * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data
32 #include <linux/module.h>
33 #include <linux/errno.h>
34 #include <linux/types.h>
35 #include <linux/socket.h>
37 #include <linux/kernel.h>
38 #include <linux/sockios.h>
39 #include <linux/net.h>
40 #include <linux/skbuff.h>
41 #include <linux/init.h>
42 #include <linux/netfilter.h>
45 #include <linux/sysctl.h>
48 #include <linux/inet.h>
49 #include <linux/netdevice.h>
50 #include <linux/icmpv6.h>
56 #include <net/ip6_checksum.h>
57 #include <net/protocol.h>
59 #include <net/rawv6.h>
60 #include <net/transp_v6.h>
61 #include <net/ip6_route.h>
62 #include <net/addrconf.h>
65 #include <net/inet_common.h>
67 #include <asm/uaccess.h>
68 #include <asm/system.h>
70 DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly;
71 EXPORT_SYMBOL(icmpv6_statistics);
72 DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics) __read_mostly;
73 EXPORT_SYMBOL(icmpv6msg_statistics);
76 * The ICMP socket(s). This is the most convenient way to flow control
77 * our ICMP output as well as maintain a clean interface throughout
78 * all layers. All Socketless IP sends will soon be gone.
80 * On SMP we have one ICMP socket per-cpu.
82 static inline struct sock *icmpv6_sk(struct net *net)
84 return net->ipv6.icmp_sk[smp_processor_id()];
87 static int icmpv6_rcv(struct sk_buff *skb);
89 static struct inet6_protocol icmpv6_protocol = {
90 .handler = icmpv6_rcv,
91 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
94 static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
101 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
102 /* This can happen if the output path (f.e. SIT or
103 * ip6ip6 tunnel) signals dst_link_failure() for an
104 * outgoing ICMP6 packet.
112 static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
114 spin_unlock_bh(&sk->sk_lock.slock);
118 * Slightly more convenient version of icmpv6_send.
120 void icmpv6_param_prob(struct sk_buff *skb, int code, int pos)
122 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
127 * Figure out, may we reply to this packet with icmp error.
129 * We do not reply, if:
130 * - it was icmp error message.
131 * - it is truncated, so that it is known, that protocol is ICMPV6
132 * (i.e. in the middle of some exthdr)
137 static int is_ineligible(struct sk_buff *skb)
139 int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
140 int len = skb->len - ptr;
141 __u8 nexthdr = ipv6_hdr(skb)->nexthdr;
146 ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr);
149 if (nexthdr == IPPROTO_ICMPV6) {
151 tp = skb_header_pointer(skb,
152 ptr+offsetof(struct icmp6hdr, icmp6_type),
153 sizeof(_type), &_type);
155 !(*tp & ICMPV6_INFOMSG_MASK))
162 * Check the ICMP output rate limit
164 static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
167 struct dst_entry *dst;
168 struct net *net = sock_net(sk);
171 /* Informational messages are not limited. */
172 if (type & ICMPV6_INFOMSG_MASK)
175 /* Do not limit pmtu discovery, it would break it. */
176 if (type == ICMPV6_PKT_TOOBIG)
180 * Look up the output route.
181 * XXX: perhaps the expire for routing entries cloned by
182 * this lookup should be more aggressive (not longer than timeout).
184 dst = ip6_route_output(net, sk, fl);
186 IP6_INC_STATS(ip6_dst_idev(dst),
187 IPSTATS_MIB_OUTNOROUTES);
188 } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
191 struct rt6_info *rt = (struct rt6_info *)dst;
192 int tmo = net->ipv6.sysctl.icmpv6_time;
194 /* Give more bandwidth to wider prefixes. */
195 if (rt->rt6i_dst.plen < 128)
196 tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
198 res = xrlim_allow(dst, tmo);
205 * an inline helper for the "simple" if statement below
206 * checks if parameter problem report is caused by an
207 * unrecognized IPv6 option that has the Option Type
208 * highest-order two bits set to 10
211 static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset)
215 offset += skb_network_offset(skb);
216 op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
219 return (*op & 0xC0) == 0x80;
222 static int icmpv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct icmp6hdr *thdr, int len)
225 struct icmp6hdr *icmp6h;
228 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
231 icmp6h = icmp6_hdr(skb);
232 memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
233 icmp6h->icmp6_cksum = 0;
235 if (skb_queue_len(&sk->sk_write_queue) == 1) {
236 skb->csum = csum_partial((char *)icmp6h,
237 sizeof(struct icmp6hdr), skb->csum);
238 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
245 skb_queue_walk(&sk->sk_write_queue, skb) {
246 tmp_csum = csum_add(tmp_csum, skb->csum);
249 tmp_csum = csum_partial((char *)icmp6h,
250 sizeof(struct icmp6hdr), tmp_csum);
251 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
256 ip6_push_pending_frames(sk);
267 static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
269 struct icmpv6_msg *msg = (struct icmpv6_msg *) from;
270 struct sk_buff *org_skb = msg->skb;
273 csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
275 skb->csum = csum_block_add(skb->csum, csum, odd);
276 if (!(msg->type & ICMPV6_INFOMSG_MASK))
277 nf_ct_attach(skb, org_skb);
281 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
282 static void mip6_addr_swap(struct sk_buff *skb)
284 struct ipv6hdr *iph = ipv6_hdr(skb);
285 struct inet6_skb_parm *opt = IP6CB(skb);
286 struct ipv6_destopt_hao *hao;
291 off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
292 if (likely(off >= 0)) {
293 hao = (struct ipv6_destopt_hao *)
294 (skb_network_header(skb) + off);
295 ipv6_addr_copy(&tmp, &iph->saddr);
296 ipv6_addr_copy(&iph->saddr, &hao->addr);
297 ipv6_addr_copy(&hao->addr, &tmp);
302 static inline void mip6_addr_swap(struct sk_buff *skb) {}
306 * Send an ICMP message in response to a packet in error
308 void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
309 struct net_device *dev)
311 struct net *net = dev_net(skb->dev);
312 struct inet6_dev *idev = NULL;
313 struct ipv6hdr *hdr = ipv6_hdr(skb);
315 struct ipv6_pinfo *np;
316 struct in6_addr *saddr = NULL;
317 struct dst_entry *dst;
318 struct dst_entry *dst2;
319 struct icmp6hdr tmp_hdr;
322 struct icmpv6_msg msg;
329 if ((u8 *)hdr < skb->head ||
330 (skb->network_header + sizeof(*hdr)) > skb->tail)
334 * Make sure we respect the rules
335 * i.e. RFC 1885 2.4(e)
336 * Rule (e.1) is enforced by not using icmpv6_send
337 * in any code that processes icmp errors.
339 addr_type = ipv6_addr_type(&hdr->daddr);
341 if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0))
348 if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) {
349 if (type != ICMPV6_PKT_TOOBIG &&
350 !(type == ICMPV6_PARAMPROB &&
351 code == ICMPV6_UNK_OPTION &&
352 (opt_unrec(skb, info))))
358 addr_type = ipv6_addr_type(&hdr->saddr);
364 if (addr_type & IPV6_ADDR_LINKLOCAL)
365 iif = skb->dev->ifindex;
368 * Must not send error if the source does not uniquely
369 * identify a single node (RFC2463 Section 2.4).
370 * We check unspecified / multicast addresses here,
371 * and anycast addresses will be checked later.
373 if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
374 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: addr_any/mcast source\n");
379 * Never answer to a ICMP packet.
381 if (is_ineligible(skb)) {
382 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: no reply to icmp error\n");
388 memset(&fl, 0, sizeof(fl));
389 fl.proto = IPPROTO_ICMPV6;
390 ipv6_addr_copy(&fl.fl6_dst, &hdr->saddr);
392 ipv6_addr_copy(&fl.fl6_src, saddr);
394 fl.fl_icmp_type = type;
395 fl.fl_icmp_code = code;
396 security_skb_classify_flow(skb, &fl);
398 sk = icmpv6_xmit_lock(net);
403 if (!icmpv6_xrlim_allow(sk, type, &fl))
406 tmp_hdr.icmp6_type = type;
407 tmp_hdr.icmp6_code = code;
408 tmp_hdr.icmp6_cksum = 0;
409 tmp_hdr.icmp6_pointer = htonl(info);
411 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
412 fl.oif = np->mcast_oif;
414 err = ip6_dst_lookup(sk, &dst, &fl);
419 * We won't send icmp if the destination is known
422 if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
423 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n");
424 goto out_dst_release;
427 /* No need to clone since we're just using its address. */
430 err = xfrm_lookup(&dst, &fl, sk, 0);
443 if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6))
444 goto relookup_failed;
446 if (ip6_dst_lookup(sk, &dst2, &fl))
447 goto relookup_failed;
449 err = xfrm_lookup(&dst2, &fl, sk, XFRM_LOOKUP_ICMP);
456 goto out_dst_release;
465 if (ipv6_addr_is_multicast(&fl.fl6_dst))
466 hlimit = np->mcast_hops;
468 hlimit = np->hop_limit;
470 hlimit = ip6_dst_hoplimit(dst);
477 msg.offset = skb_network_offset(skb);
480 len = skb->len - msg.offset;
481 len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
483 LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n");
484 goto out_dst_release;
487 idev = in6_dev_get(skb->dev);
489 err = ip6_append_data(sk, icmpv6_getfrag, &msg,
490 len + sizeof(struct icmp6hdr),
491 sizeof(struct icmp6hdr),
492 hlimit, tclass, NULL, &fl, (struct rt6_info*)dst,
495 ip6_flush_pending_frames(sk);
498 err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, len + sizeof(struct icmp6hdr));
501 if (likely(idev != NULL))
506 icmpv6_xmit_unlock(sk);
509 EXPORT_SYMBOL(icmpv6_send);
511 static void icmpv6_echo_reply(struct sk_buff *skb)
513 struct net *net = dev_net(skb->dev);
515 struct inet6_dev *idev;
516 struct ipv6_pinfo *np;
517 struct in6_addr *saddr = NULL;
518 struct icmp6hdr *icmph = icmp6_hdr(skb);
519 struct icmp6hdr tmp_hdr;
521 struct icmpv6_msg msg;
522 struct dst_entry *dst;
527 saddr = &ipv6_hdr(skb)->daddr;
529 if (!ipv6_unicast_destination(skb))
532 memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
533 tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
535 memset(&fl, 0, sizeof(fl));
536 fl.proto = IPPROTO_ICMPV6;
537 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
539 ipv6_addr_copy(&fl.fl6_src, saddr);
540 fl.oif = skb->dev->ifindex;
541 fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
542 security_skb_classify_flow(skb, &fl);
544 sk = icmpv6_xmit_lock(net);
549 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
550 fl.oif = np->mcast_oif;
552 err = ip6_dst_lookup(sk, &dst, &fl);
555 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
558 if (ipv6_addr_is_multicast(&fl.fl6_dst))
559 hlimit = np->mcast_hops;
561 hlimit = np->hop_limit;
563 hlimit = ip6_dst_hoplimit(dst);
569 idev = in6_dev_get(skb->dev);
573 msg.type = ICMPV6_ECHO_REPLY;
575 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
576 sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl,
577 (struct rt6_info*)dst, MSG_DONTWAIT);
580 ip6_flush_pending_frames(sk);
583 err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
586 if (likely(idev != NULL))
590 icmpv6_xmit_unlock(sk);
593 static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info)
595 struct inet6_protocol *ipprot;
600 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
603 nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
604 if (ipv6_ext_hdr(nexthdr)) {
605 /* now skip over extension headers */
606 inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
610 inner_offset = sizeof(struct ipv6hdr);
613 /* Checkin header including 8 bytes of inner protocol header. */
614 if (!pskb_may_pull(skb, inner_offset+8))
617 /* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
618 Without this we will not able f.e. to make source routed
620 Corresponding argument (opt) to notifiers is already added.
624 hash = nexthdr & (MAX_INET_PROTOS - 1);
627 ipprot = rcu_dereference(inet6_protos[hash]);
628 if (ipprot && ipprot->err_handler)
629 ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
632 raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info);
636 * Handle icmp messages
639 static int icmpv6_rcv(struct sk_buff *skb)
641 struct net_device *dev = skb->dev;
642 struct inet6_dev *idev = __in6_dev_get(dev);
643 struct in6_addr *saddr, *daddr;
644 struct ipv6hdr *orig_hdr;
645 struct icmp6hdr *hdr;
648 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
651 if (!(skb->sp && skb->sp->xvec[skb->sp->len - 1]->props.flags &
655 if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(*orig_hdr)))
658 nh = skb_network_offset(skb);
659 skb_set_network_header(skb, sizeof(*hdr));
661 if (!xfrm6_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
664 skb_set_network_header(skb, nh);
667 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INMSGS);
669 saddr = &ipv6_hdr(skb)->saddr;
670 daddr = &ipv6_hdr(skb)->daddr;
672 /* Perform checksum. */
673 switch (skb->ip_summed) {
674 case CHECKSUM_COMPLETE:
675 if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
680 skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
682 if (__skb_checksum_complete(skb)) {
683 LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [" NIP6_FMT " > " NIP6_FMT "]\n",
684 NIP6(*saddr), NIP6(*daddr));
689 if (!pskb_pull(skb, sizeof(*hdr)))
692 hdr = icmp6_hdr(skb);
694 type = hdr->icmp6_type;
696 ICMP6MSGIN_INC_STATS_BH(idev, type);
699 case ICMPV6_ECHO_REQUEST:
700 icmpv6_echo_reply(skb);
703 case ICMPV6_ECHO_REPLY:
704 /* we couldn't care less */
707 case ICMPV6_PKT_TOOBIG:
708 /* BUGGG_FUTURE: if packet contains rthdr, we cannot update
709 standard destination cache. Seems, only "advanced"
710 destination cache will allow to solve this problem
713 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
715 hdr = icmp6_hdr(skb);
716 orig_hdr = (struct ipv6hdr *) (hdr + 1);
717 rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev,
718 ntohl(hdr->icmp6_mtu));
721 * Drop through to notify
724 case ICMPV6_DEST_UNREACH:
725 case ICMPV6_TIME_EXCEED:
726 case ICMPV6_PARAMPROB:
727 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
730 case NDISC_ROUTER_SOLICITATION:
731 case NDISC_ROUTER_ADVERTISEMENT:
732 case NDISC_NEIGHBOUR_SOLICITATION:
733 case NDISC_NEIGHBOUR_ADVERTISEMENT:
738 case ICMPV6_MGM_QUERY:
739 igmp6_event_query(skb);
742 case ICMPV6_MGM_REPORT:
743 igmp6_event_report(skb);
746 case ICMPV6_MGM_REDUCTION:
747 case ICMPV6_NI_QUERY:
748 case ICMPV6_NI_REPLY:
749 case ICMPV6_MLD2_REPORT:
750 case ICMPV6_DHAAD_REQUEST:
751 case ICMPV6_DHAAD_REPLY:
752 case ICMPV6_MOBILE_PREFIX_SOL:
753 case ICMPV6_MOBILE_PREFIX_ADV:
757 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n");
760 if (type & ICMPV6_INFOMSG_MASK)
764 * error of unknown type.
765 * must pass to upper level
768 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
775 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS);
781 void icmpv6_flow_init(struct sock *sk, struct flowi *fl,
783 const struct in6_addr *saddr,
784 const struct in6_addr *daddr,
787 memset(fl, 0, sizeof(*fl));
788 ipv6_addr_copy(&fl->fl6_src, saddr);
789 ipv6_addr_copy(&fl->fl6_dst, daddr);
790 fl->proto = IPPROTO_ICMPV6;
791 fl->fl_icmp_type = type;
792 fl->fl_icmp_code = 0;
794 security_sk_classify_flow(sk, fl);
798 * Special lock-class for __icmpv6_sk:
800 static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
802 static int __net_init icmpv6_sk_init(struct net *net)
808 kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
809 if (net->ipv6.icmp_sk == NULL)
812 for_each_possible_cpu(i) {
813 err = inet_ctl_sock_create(&sk, PF_INET6,
814 SOCK_RAW, IPPROTO_ICMPV6, net);
817 "Failed to initialize the ICMP6 control socket "
823 net->ipv6.icmp_sk[i] = sk;
826 * Split off their lock-class, because sk->sk_dst_lock
827 * gets used from softirqs, which is safe for
828 * __icmpv6_sk (because those never get directly used
829 * via userspace syscalls), but unsafe for normal sockets.
831 lockdep_set_class(&sk->sk_dst_lock,
832 &icmpv6_socket_sk_dst_lock_key);
834 /* Enough space for 2 64K ICMP packets, including
835 * sk_buff struct overhead.
838 (2 * ((64 * 1024) + sizeof(struct sk_buff)));
843 for (j = 0; j < i; j++)
844 inet_ctl_sock_destroy(net->ipv6.icmp_sk[j]);
845 kfree(net->ipv6.icmp_sk);
849 static void __net_exit icmpv6_sk_exit(struct net *net)
853 for_each_possible_cpu(i) {
854 inet_ctl_sock_destroy(net->ipv6.icmp_sk[i]);
856 kfree(net->ipv6.icmp_sk);
859 static struct pernet_operations icmpv6_sk_ops = {
860 .init = icmpv6_sk_init,
861 .exit = icmpv6_sk_exit,
864 int __init icmpv6_init(void)
868 err = register_pernet_subsys(&icmpv6_sk_ops);
873 if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0)
878 printk(KERN_ERR "Failed to register ICMP6 protocol\n");
879 unregister_pernet_subsys(&icmpv6_sk_ops);
883 void icmpv6_cleanup(void)
885 unregister_pernet_subsys(&icmpv6_sk_ops);
886 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
890 static const struct icmp6_err {
898 { /* ADM_PROHIBITED */
902 { /* Was NOT_NEIGHBOUR, now reserved */
916 int icmpv6_err_convert(int type, int code, int *err)
923 case ICMPV6_DEST_UNREACH:
925 if (code <= ICMPV6_PORT_UNREACH) {
926 *err = tab_unreach[code].err;
927 fatal = tab_unreach[code].fatal;
931 case ICMPV6_PKT_TOOBIG:
935 case ICMPV6_PARAMPROB:
940 case ICMPV6_TIME_EXCEED:
948 EXPORT_SYMBOL(icmpv6_err_convert);
951 ctl_table ipv6_icmp_table_template[] = {
953 .ctl_name = NET_IPV6_ICMP_RATELIMIT,
954 .procname = "ratelimit",
955 .data = &init_net.ipv6.sysctl.icmpv6_time,
956 .maxlen = sizeof(int),
958 .proc_handler = &proc_dointvec_ms_jiffies,
959 .strategy = &sysctl_ms_jiffies
964 struct ctl_table *ipv6_icmp_sysctl_init(struct net *net)
966 struct ctl_table *table;
968 table = kmemdup(ipv6_icmp_table_template,
969 sizeof(ipv6_icmp_table_template),
973 table[0].data = &net->ipv6.sysctl.icmpv6_time;