2 * Internet Control Message Protocol (ICMPv6)
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: icmp.c,v 1.38 2002/02/08 03:57:19 davem Exp $
10 * Based on net/ipv4/icmp.c
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
23 * Andi Kleen : exception handling
24 * Andi Kleen add rate limits. never reply to a icmp.
25 * add more length checks and other fixes.
26 * yoshfuji : ensure to sent parameter problem for
28 * YOSHIFUJI Hideaki @USAGI: added sysctl for icmp rate limit.
30 * YOSHIFUJI Hideaki @USAGI: Per-interface statistics support
31 * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/types.h>
37 #include <linux/socket.h>
39 #include <linux/kernel.h>
40 #include <linux/sockios.h>
41 #include <linux/net.h>
42 #include <linux/skbuff.h>
43 #include <linux/init.h>
44 #include <linux/netfilter.h>
47 #include <linux/sysctl.h>
50 #include <linux/inet.h>
51 #include <linux/netdevice.h>
52 #include <linux/icmpv6.h>
58 #include <net/ip6_checksum.h>
59 #include <net/protocol.h>
61 #include <net/rawv6.h>
62 #include <net/transp_v6.h>
63 #include <net/ip6_route.h>
64 #include <net/addrconf.h>
67 #include <net/inet_common.h>
69 #include <asm/uaccess.h>
70 #include <asm/system.h>
72 DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly;
73 EXPORT_SYMBOL(icmpv6_statistics);
74 DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics) __read_mostly;
75 EXPORT_SYMBOL(icmpv6msg_statistics);
78 * The ICMP socket(s). This is the most convenient way to flow control
79 * our ICMP output as well as maintain a clean interface throughout
80 * all layers. All Socketless IP sends will soon be gone.
82 * On SMP we have one ICMP socket per-cpu.
84 static inline struct sock *icmpv6_sk(struct net *net)
86 return net->ipv6.icmp_sk[smp_processor_id()];
89 static int icmpv6_rcv(struct sk_buff *skb);
91 static struct inet6_protocol icmpv6_protocol = {
92 .handler = icmpv6_rcv,
93 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
96 static __inline__ int icmpv6_xmit_lock(struct sock *sk)
100 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
101 /* This can happen if the output path (f.e. SIT or
102 * ip6ip6 tunnel) signals dst_link_failure() for an
103 * outgoing ICMP6 packet.
111 static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
113 spin_unlock_bh(&sk->sk_lock.slock);
117 * Slightly more convenient version of icmpv6_send.
119 void icmpv6_param_prob(struct sk_buff *skb, int code, int pos)
121 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
126 * Figure out, may we reply to this packet with icmp error.
128 * We do not reply, if:
129 * - it was icmp error message.
130 * - it is truncated, so that it is known, that protocol is ICMPV6
131 * (i.e. in the middle of some exthdr)
136 static int is_ineligible(struct sk_buff *skb)
138 int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
139 int len = skb->len - ptr;
140 __u8 nexthdr = ipv6_hdr(skb)->nexthdr;
145 ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr);
148 if (nexthdr == IPPROTO_ICMPV6) {
150 tp = skb_header_pointer(skb,
151 ptr+offsetof(struct icmp6hdr, icmp6_type),
152 sizeof(_type), &_type);
154 !(*tp & ICMPV6_INFOMSG_MASK))
161 * Check the ICMP output rate limit
163 static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
166 struct dst_entry *dst;
167 struct net *net = sock_net(sk);
170 /* Informational messages are not limited. */
171 if (type & ICMPV6_INFOMSG_MASK)
174 /* Do not limit pmtu discovery, it would break it. */
175 if (type == ICMPV6_PKT_TOOBIG)
179 * Look up the output route.
180 * XXX: perhaps the expire for routing entries cloned by
181 * this lookup should be more aggressive (not longer than timeout).
183 dst = ip6_route_output(net, sk, fl);
185 IP6_INC_STATS(ip6_dst_idev(dst),
186 IPSTATS_MIB_OUTNOROUTES);
187 } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
190 struct rt6_info *rt = (struct rt6_info *)dst;
191 int tmo = net->ipv6.sysctl.icmpv6_time;
193 /* Give more bandwidth to wider prefixes. */
194 if (rt->rt6i_dst.plen < 128)
195 tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
197 res = xrlim_allow(dst, tmo);
204 * an inline helper for the "simple" if statement below
205 * checks if parameter problem report is caused by an
206 * unrecognized IPv6 option that has the Option Type
207 * highest-order two bits set to 10
210 static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset)
214 offset += skb_network_offset(skb);
215 op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
218 return (*op & 0xC0) == 0x80;
221 static int icmpv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct icmp6hdr *thdr, int len)
224 struct icmp6hdr *icmp6h;
227 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
230 icmp6h = icmp6_hdr(skb);
231 memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
232 icmp6h->icmp6_cksum = 0;
234 if (skb_queue_len(&sk->sk_write_queue) == 1) {
235 skb->csum = csum_partial((char *)icmp6h,
236 sizeof(struct icmp6hdr), skb->csum);
237 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
244 skb_queue_walk(&sk->sk_write_queue, skb) {
245 tmp_csum = csum_add(tmp_csum, skb->csum);
248 tmp_csum = csum_partial((char *)icmp6h,
249 sizeof(struct icmp6hdr), tmp_csum);
250 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
255 ip6_push_pending_frames(sk);
266 static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
268 struct icmpv6_msg *msg = (struct icmpv6_msg *) from;
269 struct sk_buff *org_skb = msg->skb;
272 csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
274 skb->csum = csum_block_add(skb->csum, csum, odd);
275 if (!(msg->type & ICMPV6_INFOMSG_MASK))
276 nf_ct_attach(skb, org_skb);
280 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
281 static void mip6_addr_swap(struct sk_buff *skb)
283 struct ipv6hdr *iph = ipv6_hdr(skb);
284 struct inet6_skb_parm *opt = IP6CB(skb);
285 struct ipv6_destopt_hao *hao;
290 off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
291 if (likely(off >= 0)) {
292 hao = (struct ipv6_destopt_hao *)
293 (skb_network_header(skb) + off);
294 ipv6_addr_copy(&tmp, &iph->saddr);
295 ipv6_addr_copy(&iph->saddr, &hao->addr);
296 ipv6_addr_copy(&hao->addr, &tmp);
301 static inline void mip6_addr_swap(struct sk_buff *skb) {}
305 * Send an ICMP message in response to a packet in error
307 void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
308 struct net_device *dev)
310 struct net *net = dev_net(skb->dev);
311 struct inet6_dev *idev = NULL;
312 struct ipv6hdr *hdr = ipv6_hdr(skb);
314 struct ipv6_pinfo *np;
315 struct in6_addr *saddr = NULL;
316 struct dst_entry *dst;
317 struct dst_entry *dst2;
318 struct icmp6hdr tmp_hdr;
321 struct icmpv6_msg msg;
328 if ((u8 *)hdr < skb->head ||
329 (skb->network_header + sizeof(*hdr)) > skb->tail)
333 * Make sure we respect the rules
334 * i.e. RFC 1885 2.4(e)
335 * Rule (e.1) is enforced by not using icmpv6_send
336 * in any code that processes icmp errors.
338 addr_type = ipv6_addr_type(&hdr->daddr);
340 if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0))
347 if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) {
348 if (type != ICMPV6_PKT_TOOBIG &&
349 !(type == ICMPV6_PARAMPROB &&
350 code == ICMPV6_UNK_OPTION &&
351 (opt_unrec(skb, info))))
357 addr_type = ipv6_addr_type(&hdr->saddr);
363 if (addr_type & IPV6_ADDR_LINKLOCAL)
364 iif = skb->dev->ifindex;
367 * Must not send error if the source does not uniquely
368 * identify a single node (RFC2463 Section 2.4).
369 * We check unspecified / multicast addresses here,
370 * and anycast addresses will be checked later.
372 if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
373 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: addr_any/mcast source\n");
378 * Never answer to a ICMP packet.
380 if (is_ineligible(skb)) {
381 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: no reply to icmp error\n");
387 memset(&fl, 0, sizeof(fl));
388 fl.proto = IPPROTO_ICMPV6;
389 ipv6_addr_copy(&fl.fl6_dst, &hdr->saddr);
391 ipv6_addr_copy(&fl.fl6_src, saddr);
393 fl.fl_icmp_type = type;
394 fl.fl_icmp_code = code;
395 security_skb_classify_flow(skb, &fl);
400 if (icmpv6_xmit_lock(sk))
403 if (!icmpv6_xrlim_allow(sk, type, &fl))
406 tmp_hdr.icmp6_type = type;
407 tmp_hdr.icmp6_code = code;
408 tmp_hdr.icmp6_cksum = 0;
409 tmp_hdr.icmp6_pointer = htonl(info);
411 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
412 fl.oif = np->mcast_oif;
414 err = ip6_dst_lookup(sk, &dst, &fl);
419 * We won't send icmp if the destination is known
422 if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
423 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n");
424 goto out_dst_release;
427 /* No need to clone since we're just using its address. */
430 err = xfrm_lookup(&dst, &fl, sk, 0);
443 if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6))
444 goto out_dst_release;
446 if (ip6_dst_lookup(sk, &dst2, &fl))
447 goto out_dst_release;
449 err = xfrm_lookup(&dst2, &fl, sk, XFRM_LOOKUP_ICMP);
450 if (err == -ENOENT) {
463 if (ipv6_addr_is_multicast(&fl.fl6_dst))
464 hlimit = np->mcast_hops;
466 hlimit = np->hop_limit;
468 hlimit = ip6_dst_hoplimit(dst);
475 msg.offset = skb_network_offset(skb);
478 len = skb->len - msg.offset;
479 len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
481 LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n");
482 goto out_dst_release;
485 idev = in6_dev_get(skb->dev);
487 err = ip6_append_data(sk, icmpv6_getfrag, &msg,
488 len + sizeof(struct icmp6hdr),
489 sizeof(struct icmp6hdr),
490 hlimit, tclass, NULL, &fl, (struct rt6_info*)dst,
493 ip6_flush_pending_frames(sk);
496 err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, len + sizeof(struct icmp6hdr));
499 if (likely(idev != NULL))
504 icmpv6_xmit_unlock(sk);
507 EXPORT_SYMBOL(icmpv6_send);
509 static void icmpv6_echo_reply(struct sk_buff *skb)
511 struct net *net = dev_net(skb->dev);
513 struct inet6_dev *idev;
514 struct ipv6_pinfo *np;
515 struct in6_addr *saddr = NULL;
516 struct icmp6hdr *icmph = icmp6_hdr(skb);
517 struct icmp6hdr tmp_hdr;
519 struct icmpv6_msg msg;
520 struct dst_entry *dst;
525 saddr = &ipv6_hdr(skb)->daddr;
527 if (!ipv6_unicast_destination(skb))
530 memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
531 tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
533 memset(&fl, 0, sizeof(fl));
534 fl.proto = IPPROTO_ICMPV6;
535 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
537 ipv6_addr_copy(&fl.fl6_src, saddr);
538 fl.oif = skb->dev->ifindex;
539 fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
540 security_skb_classify_flow(skb, &fl);
545 if (icmpv6_xmit_lock(sk))
548 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
549 fl.oif = np->mcast_oif;
551 err = ip6_dst_lookup(sk, &dst, &fl);
554 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
557 if (ipv6_addr_is_multicast(&fl.fl6_dst))
558 hlimit = np->mcast_hops;
560 hlimit = np->hop_limit;
562 hlimit = ip6_dst_hoplimit(dst);
568 idev = in6_dev_get(skb->dev);
572 msg.type = ICMPV6_ECHO_REPLY;
574 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
575 sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl,
576 (struct rt6_info*)dst, MSG_DONTWAIT);
579 ip6_flush_pending_frames(sk);
582 err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
585 if (likely(idev != NULL))
589 icmpv6_xmit_unlock(sk);
592 static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info)
594 struct inet6_protocol *ipprot;
599 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
602 nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
603 if (ipv6_ext_hdr(nexthdr)) {
604 /* now skip over extension headers */
605 inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
609 inner_offset = sizeof(struct ipv6hdr);
612 /* Checkin header including 8 bytes of inner protocol header. */
613 if (!pskb_may_pull(skb, inner_offset+8))
616 /* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
617 Without this we will not able f.e. to make source routed
619 Corresponding argument (opt) to notifiers is already added.
623 hash = nexthdr & (MAX_INET_PROTOS - 1);
626 ipprot = rcu_dereference(inet6_protos[hash]);
627 if (ipprot && ipprot->err_handler)
628 ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
631 raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info);
635 * Handle icmp messages
638 static int icmpv6_rcv(struct sk_buff *skb)
640 struct net_device *dev = skb->dev;
641 struct inet6_dev *idev = __in6_dev_get(dev);
642 struct in6_addr *saddr, *daddr;
643 struct ipv6hdr *orig_hdr;
644 struct icmp6hdr *hdr;
647 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
650 if (!(skb->sp && skb->sp->xvec[skb->sp->len - 1]->props.flags &
654 if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(*orig_hdr)))
657 nh = skb_network_offset(skb);
658 skb_set_network_header(skb, sizeof(*hdr));
660 if (!xfrm6_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
663 skb_set_network_header(skb, nh);
666 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INMSGS);
668 saddr = &ipv6_hdr(skb)->saddr;
669 daddr = &ipv6_hdr(skb)->daddr;
671 /* Perform checksum. */
672 switch (skb->ip_summed) {
673 case CHECKSUM_COMPLETE:
674 if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
679 skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
681 if (__skb_checksum_complete(skb)) {
682 LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [" NIP6_FMT " > " NIP6_FMT "]\n",
683 NIP6(*saddr), NIP6(*daddr));
688 if (!pskb_pull(skb, sizeof(*hdr)))
691 hdr = icmp6_hdr(skb);
693 type = hdr->icmp6_type;
695 ICMP6MSGIN_INC_STATS_BH(idev, type);
698 case ICMPV6_ECHO_REQUEST:
699 icmpv6_echo_reply(skb);
702 case ICMPV6_ECHO_REPLY:
703 /* we couldn't care less */
706 case ICMPV6_PKT_TOOBIG:
707 /* BUGGG_FUTURE: if packet contains rthdr, we cannot update
708 standard destination cache. Seems, only "advanced"
709 destination cache will allow to solve this problem
712 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
714 hdr = icmp6_hdr(skb);
715 orig_hdr = (struct ipv6hdr *) (hdr + 1);
716 rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev,
717 ntohl(hdr->icmp6_mtu));
720 * Drop through to notify
723 case ICMPV6_DEST_UNREACH:
724 case ICMPV6_TIME_EXCEED:
725 case ICMPV6_PARAMPROB:
726 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
729 case NDISC_ROUTER_SOLICITATION:
730 case NDISC_ROUTER_ADVERTISEMENT:
731 case NDISC_NEIGHBOUR_SOLICITATION:
732 case NDISC_NEIGHBOUR_ADVERTISEMENT:
737 case ICMPV6_MGM_QUERY:
738 igmp6_event_query(skb);
741 case ICMPV6_MGM_REPORT:
742 igmp6_event_report(skb);
745 case ICMPV6_MGM_REDUCTION:
746 case ICMPV6_NI_QUERY:
747 case ICMPV6_NI_REPLY:
748 case ICMPV6_MLD2_REPORT:
749 case ICMPV6_DHAAD_REQUEST:
750 case ICMPV6_DHAAD_REPLY:
751 case ICMPV6_MOBILE_PREFIX_SOL:
752 case ICMPV6_MOBILE_PREFIX_ADV:
756 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n");
759 if (type & ICMPV6_INFOMSG_MASK)
763 * error of unknown type.
764 * must pass to upper level
767 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
774 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS);
780 void icmpv6_flow_init(struct sock *sk, struct flowi *fl,
782 const struct in6_addr *saddr,
783 const struct in6_addr *daddr,
786 memset(fl, 0, sizeof(*fl));
787 ipv6_addr_copy(&fl->fl6_src, saddr);
788 ipv6_addr_copy(&fl->fl6_dst, daddr);
789 fl->proto = IPPROTO_ICMPV6;
790 fl->fl_icmp_type = type;
791 fl->fl_icmp_code = 0;
793 security_sk_classify_flow(sk, fl);
797 * Special lock-class for __icmpv6_sk:
799 static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
801 static int __net_init icmpv6_sk_init(struct net *net)
807 kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
808 if (net->ipv6.icmp_sk == NULL)
811 for_each_possible_cpu(i) {
812 err = inet_ctl_sock_create(&sk, PF_INET6,
813 SOCK_RAW, IPPROTO_ICMPV6, net);
816 "Failed to initialize the ICMP6 control socket "
822 net->ipv6.icmp_sk[i] = sk;
825 * Split off their lock-class, because sk->sk_dst_lock
826 * gets used from softirqs, which is safe for
827 * __icmpv6_sk (because those never get directly used
828 * via userspace syscalls), but unsafe for normal sockets.
830 lockdep_set_class(&sk->sk_dst_lock,
831 &icmpv6_socket_sk_dst_lock_key);
833 /* Enough space for 2 64K ICMP packets, including
834 * sk_buff struct overhead.
837 (2 * ((64 * 1024) + sizeof(struct sk_buff)));
842 for (j = 0; j < i; j++)
843 inet_ctl_sock_destroy(net->ipv6.icmp_sk[j]);
844 kfree(net->ipv6.icmp_sk);
848 static void __net_exit icmpv6_sk_exit(struct net *net)
852 for_each_possible_cpu(i) {
853 inet_ctl_sock_destroy(net->ipv6.icmp_sk[i]);
855 kfree(net->ipv6.icmp_sk);
858 static struct pernet_operations icmpv6_sk_ops = {
859 .init = icmpv6_sk_init,
860 .exit = icmpv6_sk_exit,
863 int __init icmpv6_init(void)
867 err = register_pernet_subsys(&icmpv6_sk_ops);
872 if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0)
877 printk(KERN_ERR "Failed to register ICMP6 protocol\n");
878 unregister_pernet_subsys(&icmpv6_sk_ops);
882 void icmpv6_cleanup(void)
884 unregister_pernet_subsys(&icmpv6_sk_ops);
885 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
889 static const struct icmp6_err {
897 { /* ADM_PROHIBITED */
901 { /* Was NOT_NEIGHBOUR, now reserved */
915 int icmpv6_err_convert(int type, int code, int *err)
922 case ICMPV6_DEST_UNREACH:
924 if (code <= ICMPV6_PORT_UNREACH) {
925 *err = tab_unreach[code].err;
926 fatal = tab_unreach[code].fatal;
930 case ICMPV6_PKT_TOOBIG:
934 case ICMPV6_PARAMPROB:
939 case ICMPV6_TIME_EXCEED:
947 EXPORT_SYMBOL(icmpv6_err_convert);
950 ctl_table ipv6_icmp_table_template[] = {
952 .ctl_name = NET_IPV6_ICMP_RATELIMIT,
953 .procname = "ratelimit",
954 .data = &init_net.ipv6.sysctl.icmpv6_time,
955 .maxlen = sizeof(int),
957 .proc_handler = &proc_dointvec
962 struct ctl_table *ipv6_icmp_sysctl_init(struct net *net)
964 struct ctl_table *table;
966 table = kmemdup(ipv6_icmp_table_template,
967 sizeof(ipv6_icmp_table_template),
971 table[0].data = &net->ipv6.sysctl.icmpv6_time;