2 * Internet Control Message Protocol (ICMPv6)
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: icmp.c,v 1.38 2002/02/08 03:57:19 davem Exp $
10 * Based on net/ipv4/icmp.c
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
23 * Andi Kleen : exception handling
24 * Andi Kleen add rate limits. never reply to a icmp.
25 * add more length checks and other fixes.
26 * yoshfuji : ensure to sent parameter problem for
28 * YOSHIFUJI Hideaki @USAGI: added sysctl for icmp rate limit.
30 * YOSHIFUJI Hideaki @USAGI: Per-interface statistics support
31 * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/types.h>
37 #include <linux/socket.h>
39 #include <linux/kernel.h>
40 #include <linux/sockios.h>
41 #include <linux/net.h>
42 #include <linux/skbuff.h>
43 #include <linux/init.h>
44 #include <linux/netfilter.h>
47 #include <linux/sysctl.h>
50 #include <linux/inet.h>
51 #include <linux/netdevice.h>
52 #include <linux/icmpv6.h>
58 #include <net/ip6_checksum.h>
59 #include <net/protocol.h>
61 #include <net/rawv6.h>
62 #include <net/transp_v6.h>
63 #include <net/ip6_route.h>
64 #include <net/addrconf.h>
67 #include <net/inet_common.h>
69 #include <asm/uaccess.h>
70 #include <asm/system.h>
72 DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly;
73 EXPORT_SYMBOL(icmpv6_statistics);
74 DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics) __read_mostly;
75 EXPORT_SYMBOL(icmpv6msg_statistics);
78 * The ICMP socket(s). This is the most convenient way to flow control
79 * our ICMP output as well as maintain a clean interface throughout
80 * all layers. All Socketless IP sends will soon be gone.
82 * On SMP we have one ICMP socket per-cpu.
84 static inline struct sock *icmpv6_sk(struct net *net)
86 return net->ipv6.icmp_sk[smp_processor_id()];
89 static int icmpv6_rcv(struct sk_buff *skb);
91 static struct inet6_protocol icmpv6_protocol = {
92 .handler = icmpv6_rcv,
93 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
96 static __inline__ int icmpv6_xmit_lock(struct sock *sk)
100 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
101 /* This can happen if the output path (f.e. SIT or
102 * ip6ip6 tunnel) signals dst_link_failure() for an
103 * outgoing ICMP6 packet.
111 static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
113 spin_unlock_bh(&sk->sk_lock.slock);
117 * Slightly more convenient version of icmpv6_send.
119 void icmpv6_param_prob(struct sk_buff *skb, int code, int pos)
121 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
126 * Figure out, may we reply to this packet with icmp error.
128 * We do not reply, if:
129 * - it was icmp error message.
130 * - it is truncated, so that it is known, that protocol is ICMPV6
131 * (i.e. in the middle of some exthdr)
136 static int is_ineligible(struct sk_buff *skb)
138 int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
139 int len = skb->len - ptr;
140 __u8 nexthdr = ipv6_hdr(skb)->nexthdr;
145 ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr);
148 if (nexthdr == IPPROTO_ICMPV6) {
150 tp = skb_header_pointer(skb,
151 ptr+offsetof(struct icmp6hdr, icmp6_type),
152 sizeof(_type), &_type);
154 !(*tp & ICMPV6_INFOMSG_MASK))
161 * Check the ICMP output rate limit
163 static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
166 struct dst_entry *dst;
167 struct net *net = sock_net(sk);
170 /* Informational messages are not limited. */
171 if (type & ICMPV6_INFOMSG_MASK)
174 /* Do not limit pmtu discovery, it would break it. */
175 if (type == ICMPV6_PKT_TOOBIG)
179 * Look up the output route.
180 * XXX: perhaps the expire for routing entries cloned by
181 * this lookup should be more aggressive (not longer than timeout).
183 dst = ip6_route_output(net, sk, fl);
185 IP6_INC_STATS(ip6_dst_idev(dst),
186 IPSTATS_MIB_OUTNOROUTES);
187 } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
190 struct rt6_info *rt = (struct rt6_info *)dst;
191 int tmo = net->ipv6.sysctl.icmpv6_time;
193 /* Give more bandwidth to wider prefixes. */
194 if (rt->rt6i_dst.plen < 128)
195 tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
197 res = xrlim_allow(dst, tmo);
204 * an inline helper for the "simple" if statement below
205 * checks if parameter problem report is caused by an
206 * unrecognized IPv6 option that has the Option Type
207 * highest-order two bits set to 10
210 static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset)
214 offset += skb_network_offset(skb);
215 op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
218 return (*op & 0xC0) == 0x80;
221 static int icmpv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct icmp6hdr *thdr, int len)
224 struct icmp6hdr *icmp6h;
227 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
230 icmp6h = icmp6_hdr(skb);
231 memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
232 icmp6h->icmp6_cksum = 0;
234 if (skb_queue_len(&sk->sk_write_queue) == 1) {
235 skb->csum = csum_partial((char *)icmp6h,
236 sizeof(struct icmp6hdr), skb->csum);
237 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
244 skb_queue_walk(&sk->sk_write_queue, skb) {
245 tmp_csum = csum_add(tmp_csum, skb->csum);
248 tmp_csum = csum_partial((char *)icmp6h,
249 sizeof(struct icmp6hdr), tmp_csum);
250 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
255 ip6_push_pending_frames(sk);
266 static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
268 struct icmpv6_msg *msg = (struct icmpv6_msg *) from;
269 struct sk_buff *org_skb = msg->skb;
272 csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
274 skb->csum = csum_block_add(skb->csum, csum, odd);
275 if (!(msg->type & ICMPV6_INFOMSG_MASK))
276 nf_ct_attach(skb, org_skb);
280 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
281 static void mip6_addr_swap(struct sk_buff *skb)
283 struct ipv6hdr *iph = ipv6_hdr(skb);
284 struct inet6_skb_parm *opt = IP6CB(skb);
285 struct ipv6_destopt_hao *hao;
290 off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
291 if (likely(off >= 0)) {
292 hao = (struct ipv6_destopt_hao *)
293 (skb_network_header(skb) + off);
294 ipv6_addr_copy(&tmp, &iph->saddr);
295 ipv6_addr_copy(&iph->saddr, &hao->addr);
296 ipv6_addr_copy(&hao->addr, &tmp);
301 static inline void mip6_addr_swap(struct sk_buff *skb) {}
305 * Send an ICMP message in response to a packet in error
307 void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
308 struct net_device *dev)
310 struct net *net = dev_net(skb->dev);
311 struct inet6_dev *idev = NULL;
312 struct ipv6hdr *hdr = ipv6_hdr(skb);
314 struct ipv6_pinfo *np;
315 struct in6_addr *saddr = NULL;
316 struct dst_entry *dst;
317 struct dst_entry *dst2;
318 struct icmp6hdr tmp_hdr;
321 struct icmpv6_msg msg;
328 if ((u8 *)hdr < skb->head ||
329 (skb->network_header + sizeof(*hdr)) > skb->tail)
333 * Make sure we respect the rules
334 * i.e. RFC 1885 2.4(e)
335 * Rule (e.1) is enforced by not using icmpv6_send
336 * in any code that processes icmp errors.
338 addr_type = ipv6_addr_type(&hdr->daddr);
340 if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0))
347 if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) {
348 if (type != ICMPV6_PKT_TOOBIG &&
349 !(type == ICMPV6_PARAMPROB &&
350 code == ICMPV6_UNK_OPTION &&
351 (opt_unrec(skb, info))))
357 addr_type = ipv6_addr_type(&hdr->saddr);
363 if (addr_type & IPV6_ADDR_LINKLOCAL)
364 iif = skb->dev->ifindex;
367 * Must not send error if the source does not uniquely
368 * identify a single node (RFC2463 Section 2.4).
369 * We check unspecified / multicast addresses here,
370 * and anycast addresses will be checked later.
372 if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
373 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: addr_any/mcast source\n");
378 * Never answer to a ICMP packet.
380 if (is_ineligible(skb)) {
381 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: no reply to icmp error\n");
387 memset(&fl, 0, sizeof(fl));
388 fl.proto = IPPROTO_ICMPV6;
389 ipv6_addr_copy(&fl.fl6_dst, &hdr->saddr);
391 ipv6_addr_copy(&fl.fl6_src, saddr);
393 fl.fl_icmp_type = type;
394 fl.fl_icmp_code = code;
395 security_skb_classify_flow(skb, &fl);
400 if (icmpv6_xmit_lock(sk))
403 if (!icmpv6_xrlim_allow(sk, type, &fl))
406 tmp_hdr.icmp6_type = type;
407 tmp_hdr.icmp6_code = code;
408 tmp_hdr.icmp6_cksum = 0;
409 tmp_hdr.icmp6_pointer = htonl(info);
411 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
412 fl.oif = np->mcast_oif;
414 err = ip6_dst_lookup(sk, &dst, &fl);
419 * We won't send icmp if the destination is known
422 if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
423 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n");
424 goto out_dst_release;
427 /* No need to clone since we're just using its address. */
430 err = xfrm_lookup(&dst, &fl, sk, 0);
443 if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6))
444 goto relookup_failed;
446 if (ip6_dst_lookup(sk, &dst2, &fl))
447 goto relookup_failed;
449 err = xfrm_lookup(&dst2, &fl, sk, XFRM_LOOKUP_ICMP);
456 goto out_dst_release;
465 if (ipv6_addr_is_multicast(&fl.fl6_dst))
466 hlimit = np->mcast_hops;
468 hlimit = np->hop_limit;
470 hlimit = ip6_dst_hoplimit(dst);
477 msg.offset = skb_network_offset(skb);
480 len = skb->len - msg.offset;
481 len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
483 LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n");
484 goto out_dst_release;
487 idev = in6_dev_get(skb->dev);
489 err = ip6_append_data(sk, icmpv6_getfrag, &msg,
490 len + sizeof(struct icmp6hdr),
491 sizeof(struct icmp6hdr),
492 hlimit, tclass, NULL, &fl, (struct rt6_info*)dst,
495 ip6_flush_pending_frames(sk);
498 err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, len + sizeof(struct icmp6hdr));
501 if (likely(idev != NULL))
506 icmpv6_xmit_unlock(sk);
509 EXPORT_SYMBOL(icmpv6_send);
511 static void icmpv6_echo_reply(struct sk_buff *skb)
513 struct net *net = dev_net(skb->dev);
515 struct inet6_dev *idev;
516 struct ipv6_pinfo *np;
517 struct in6_addr *saddr = NULL;
518 struct icmp6hdr *icmph = icmp6_hdr(skb);
519 struct icmp6hdr tmp_hdr;
521 struct icmpv6_msg msg;
522 struct dst_entry *dst;
527 saddr = &ipv6_hdr(skb)->daddr;
529 if (!ipv6_unicast_destination(skb))
532 memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
533 tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
535 memset(&fl, 0, sizeof(fl));
536 fl.proto = IPPROTO_ICMPV6;
537 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
539 ipv6_addr_copy(&fl.fl6_src, saddr);
540 fl.oif = skb->dev->ifindex;
541 fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
542 security_skb_classify_flow(skb, &fl);
547 if (icmpv6_xmit_lock(sk))
550 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
551 fl.oif = np->mcast_oif;
553 err = ip6_dst_lookup(sk, &dst, &fl);
556 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
559 if (ipv6_addr_is_multicast(&fl.fl6_dst))
560 hlimit = np->mcast_hops;
562 hlimit = np->hop_limit;
564 hlimit = ip6_dst_hoplimit(dst);
570 idev = in6_dev_get(skb->dev);
574 msg.type = ICMPV6_ECHO_REPLY;
576 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
577 sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl,
578 (struct rt6_info*)dst, MSG_DONTWAIT);
581 ip6_flush_pending_frames(sk);
584 err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
587 if (likely(idev != NULL))
591 icmpv6_xmit_unlock(sk);
594 static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info)
596 struct inet6_protocol *ipprot;
601 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
604 nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
605 if (ipv6_ext_hdr(nexthdr)) {
606 /* now skip over extension headers */
607 inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
611 inner_offset = sizeof(struct ipv6hdr);
614 /* Checkin header including 8 bytes of inner protocol header. */
615 if (!pskb_may_pull(skb, inner_offset+8))
618 /* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
619 Without this we will not able f.e. to make source routed
621 Corresponding argument (opt) to notifiers is already added.
625 hash = nexthdr & (MAX_INET_PROTOS - 1);
628 ipprot = rcu_dereference(inet6_protos[hash]);
629 if (ipprot && ipprot->err_handler)
630 ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
633 raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info);
637 * Handle icmp messages
640 static int icmpv6_rcv(struct sk_buff *skb)
642 struct net_device *dev = skb->dev;
643 struct inet6_dev *idev = __in6_dev_get(dev);
644 struct in6_addr *saddr, *daddr;
645 struct ipv6hdr *orig_hdr;
646 struct icmp6hdr *hdr;
649 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
652 if (!(skb->sp && skb->sp->xvec[skb->sp->len - 1]->props.flags &
656 if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(*orig_hdr)))
659 nh = skb_network_offset(skb);
660 skb_set_network_header(skb, sizeof(*hdr));
662 if (!xfrm6_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
665 skb_set_network_header(skb, nh);
668 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INMSGS);
670 saddr = &ipv6_hdr(skb)->saddr;
671 daddr = &ipv6_hdr(skb)->daddr;
673 /* Perform checksum. */
674 switch (skb->ip_summed) {
675 case CHECKSUM_COMPLETE:
676 if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
681 skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
683 if (__skb_checksum_complete(skb)) {
684 LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [" NIP6_FMT " > " NIP6_FMT "]\n",
685 NIP6(*saddr), NIP6(*daddr));
690 if (!pskb_pull(skb, sizeof(*hdr)))
693 hdr = icmp6_hdr(skb);
695 type = hdr->icmp6_type;
697 ICMP6MSGIN_INC_STATS_BH(idev, type);
700 case ICMPV6_ECHO_REQUEST:
701 icmpv6_echo_reply(skb);
704 case ICMPV6_ECHO_REPLY:
705 /* we couldn't care less */
708 case ICMPV6_PKT_TOOBIG:
709 /* BUGGG_FUTURE: if packet contains rthdr, we cannot update
710 standard destination cache. Seems, only "advanced"
711 destination cache will allow to solve this problem
714 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
716 hdr = icmp6_hdr(skb);
717 orig_hdr = (struct ipv6hdr *) (hdr + 1);
718 rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev,
719 ntohl(hdr->icmp6_mtu));
722 * Drop through to notify
725 case ICMPV6_DEST_UNREACH:
726 case ICMPV6_TIME_EXCEED:
727 case ICMPV6_PARAMPROB:
728 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
731 case NDISC_ROUTER_SOLICITATION:
732 case NDISC_ROUTER_ADVERTISEMENT:
733 case NDISC_NEIGHBOUR_SOLICITATION:
734 case NDISC_NEIGHBOUR_ADVERTISEMENT:
739 case ICMPV6_MGM_QUERY:
740 igmp6_event_query(skb);
743 case ICMPV6_MGM_REPORT:
744 igmp6_event_report(skb);
747 case ICMPV6_MGM_REDUCTION:
748 case ICMPV6_NI_QUERY:
749 case ICMPV6_NI_REPLY:
750 case ICMPV6_MLD2_REPORT:
751 case ICMPV6_DHAAD_REQUEST:
752 case ICMPV6_DHAAD_REPLY:
753 case ICMPV6_MOBILE_PREFIX_SOL:
754 case ICMPV6_MOBILE_PREFIX_ADV:
758 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n");
761 if (type & ICMPV6_INFOMSG_MASK)
765 * error of unknown type.
766 * must pass to upper level
769 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
776 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS);
782 void icmpv6_flow_init(struct sock *sk, struct flowi *fl,
784 const struct in6_addr *saddr,
785 const struct in6_addr *daddr,
788 memset(fl, 0, sizeof(*fl));
789 ipv6_addr_copy(&fl->fl6_src, saddr);
790 ipv6_addr_copy(&fl->fl6_dst, daddr);
791 fl->proto = IPPROTO_ICMPV6;
792 fl->fl_icmp_type = type;
793 fl->fl_icmp_code = 0;
795 security_sk_classify_flow(sk, fl);
799 * Special lock-class for __icmpv6_sk:
801 static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
803 static int __net_init icmpv6_sk_init(struct net *net)
809 kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
810 if (net->ipv6.icmp_sk == NULL)
813 for_each_possible_cpu(i) {
814 err = inet_ctl_sock_create(&sk, PF_INET6,
815 SOCK_RAW, IPPROTO_ICMPV6, net);
818 "Failed to initialize the ICMP6 control socket "
824 net->ipv6.icmp_sk[i] = sk;
827 * Split off their lock-class, because sk->sk_dst_lock
828 * gets used from softirqs, which is safe for
829 * __icmpv6_sk (because those never get directly used
830 * via userspace syscalls), but unsafe for normal sockets.
832 lockdep_set_class(&sk->sk_dst_lock,
833 &icmpv6_socket_sk_dst_lock_key);
835 /* Enough space for 2 64K ICMP packets, including
836 * sk_buff struct overhead.
839 (2 * ((64 * 1024) + sizeof(struct sk_buff)));
844 for (j = 0; j < i; j++)
845 inet_ctl_sock_destroy(net->ipv6.icmp_sk[j]);
846 kfree(net->ipv6.icmp_sk);
850 static void __net_exit icmpv6_sk_exit(struct net *net)
854 for_each_possible_cpu(i) {
855 inet_ctl_sock_destroy(net->ipv6.icmp_sk[i]);
857 kfree(net->ipv6.icmp_sk);
860 static struct pernet_operations icmpv6_sk_ops = {
861 .init = icmpv6_sk_init,
862 .exit = icmpv6_sk_exit,
865 int __init icmpv6_init(void)
869 err = register_pernet_subsys(&icmpv6_sk_ops);
874 if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0)
879 printk(KERN_ERR "Failed to register ICMP6 protocol\n");
880 unregister_pernet_subsys(&icmpv6_sk_ops);
884 void icmpv6_cleanup(void)
886 unregister_pernet_subsys(&icmpv6_sk_ops);
887 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
891 static const struct icmp6_err {
899 { /* ADM_PROHIBITED */
903 { /* Was NOT_NEIGHBOUR, now reserved */
917 int icmpv6_err_convert(int type, int code, int *err)
924 case ICMPV6_DEST_UNREACH:
926 if (code <= ICMPV6_PORT_UNREACH) {
927 *err = tab_unreach[code].err;
928 fatal = tab_unreach[code].fatal;
932 case ICMPV6_PKT_TOOBIG:
936 case ICMPV6_PARAMPROB:
941 case ICMPV6_TIME_EXCEED:
949 EXPORT_SYMBOL(icmpv6_err_convert);
952 ctl_table ipv6_icmp_table_template[] = {
954 .ctl_name = NET_IPV6_ICMP_RATELIMIT,
955 .procname = "ratelimit",
956 .data = &init_net.ipv6.sysctl.icmpv6_time,
957 .maxlen = sizeof(int),
959 .proc_handler = &proc_dointvec
964 struct ctl_table *ipv6_icmp_sysctl_init(struct net *net)
966 struct ctl_table *table;
968 table = kmemdup(ipv6_icmp_table_template,
969 sizeof(ipv6_icmp_table_template),
973 table[0].data = &net->ipv6.sysctl.icmpv6_time;