3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Adapted from linux/net/ipv4/raw.c
10 * $Id: raw.c,v 1.51 2002/02/01 22:01:04 davem Exp $
13 * Hideaki YOSHIFUJI : sin6_scope_id support
14 * YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance)
15 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/socket.h>
26 #include <linux/sockios.h>
27 #include <linux/sched.h>
28 #include <linux/net.h>
29 #include <linux/in6.h>
30 #include <linux/netdevice.h>
31 #include <linux/if_arp.h>
32 #include <linux/icmpv6.h>
33 #include <linux/netfilter.h>
34 #include <linux/netfilter_ipv6.h>
35 #include <asm/uaccess.h>
36 #include <asm/ioctls.h>
44 #include <net/ndisc.h>
45 #include <net/protocol.h>
46 #include <net/ip6_route.h>
47 #include <net/ip6_checksum.h>
48 #include <net/addrconf.h>
49 #include <net/transp_v6.h>
51 #include <net/inet_common.h>
53 #include <net/rawv6.h>
56 #include <linux/proc_fs.h>
57 #include <linux/seq_file.h>
59 struct hlist_head raw_v6_htable[RAWV6_HTABLE_SIZE];
60 DEFINE_RWLOCK(raw_v6_lock);
62 static void raw_v6_hash(struct sock *sk)
64 struct hlist_head *list = &raw_v6_htable[inet_sk(sk)->num &
65 (RAWV6_HTABLE_SIZE - 1)];
67 write_lock_bh(&raw_v6_lock);
68 sk_add_node(sk, list);
69 sock_prot_inc_use(sk->sk_prot);
70 write_unlock_bh(&raw_v6_lock);
73 static void raw_v6_unhash(struct sock *sk)
75 write_lock_bh(&raw_v6_lock);
76 if (sk_del_node_init(sk))
77 sock_prot_dec_use(sk->sk_prot);
78 write_unlock_bh(&raw_v6_lock);
82 /* Grumble... icmp and ip_input want to get at this... */
83 struct sock *__raw_v6_lookup(struct sock *sk, unsigned short num,
84 struct in6_addr *loc_addr, struct in6_addr *rmt_addr)
86 struct hlist_node *node;
87 int is_multicast = ipv6_addr_is_multicast(loc_addr);
89 sk_for_each_from(sk, node)
90 if (inet_sk(sk)->num == num) {
91 struct ipv6_pinfo *np = inet6_sk(sk);
93 if (!ipv6_addr_any(&np->daddr) &&
94 !ipv6_addr_equal(&np->daddr, rmt_addr))
97 if (!ipv6_addr_any(&np->rcv_saddr)) {
98 if (ipv6_addr_equal(&np->rcv_saddr, loc_addr))
101 inet6_mc_check(sk, loc_addr, rmt_addr))
116 static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb)
118 struct icmp6hdr *icmph;
119 struct raw6_sock *rp = raw6_sk(sk);
121 if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) {
122 __u32 *data = &rp->filter.data[0];
125 icmph = (struct icmp6hdr *) skb->data;
126 bit_nr = icmph->icmp6_type;
128 return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0;
134 * demultiplex raw sockets.
135 * (should consider queueing the skb in the sock receive_queue
136 * without calling rawv6.c)
138 * Caller owns SKB so we must make clones.
140 void ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
142 struct in6_addr *saddr;
143 struct in6_addr *daddr;
147 saddr = &skb->nh.ipv6h->saddr;
150 hash = nexthdr & (MAX_INET_PROTOS - 1);
152 read_lock(&raw_v6_lock);
153 sk = sk_head(&raw_v6_htable[hash]);
156 * The first socket found will be delivered after
157 * delivery to transport protocols.
163 sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr);
166 if (nexthdr != IPPROTO_ICMPV6 || !icmpv6_filter(sk, skb)) {
167 struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
169 /* Not releasing hash table! */
171 rawv6_rcv(sk, clone);
173 sk = __raw_v6_lookup(sk_next(sk), nexthdr, daddr, saddr);
176 read_unlock(&raw_v6_lock);
179 /* This cleans up af_inet6 a bit. -DaveM */
180 static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
182 struct inet_sock *inet = inet_sk(sk);
183 struct ipv6_pinfo *np = inet6_sk(sk);
184 struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr;
189 if (addr_len < SIN6_LEN_RFC2133)
191 addr_type = ipv6_addr_type(&addr->sin6_addr);
193 /* Raw sockets are IPv6 only */
194 if (addr_type == IPV6_ADDR_MAPPED)
195 return(-EADDRNOTAVAIL);
200 if (sk->sk_state != TCP_CLOSE)
203 /* Check if the address belongs to the host. */
204 if (addr_type != IPV6_ADDR_ANY) {
205 struct net_device *dev = NULL;
207 if (addr_type & IPV6_ADDR_LINKLOCAL) {
208 if (addr_len >= sizeof(struct sockaddr_in6) &&
209 addr->sin6_scope_id) {
210 /* Override any existing binding, if another
211 * one is supplied by user.
213 sk->sk_bound_dev_if = addr->sin6_scope_id;
216 /* Binding to link-local address requires an interface */
217 if (!sk->sk_bound_dev_if)
220 dev = dev_get_by_index(sk->sk_bound_dev_if);
227 /* ipv4 addr of the socket is invalid. Only the
228 * unspecified and mapped address have a v4 equivalent.
230 v4addr = LOOPBACK4_IPV6;
231 if (!(addr_type & IPV6_ADDR_MULTICAST)) {
232 err = -EADDRNOTAVAIL;
233 if (!ipv6_chk_addr(&addr->sin6_addr, dev, 0)) {
243 inet->rcv_saddr = inet->saddr = v4addr;
244 ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr);
245 if (!(addr_type & IPV6_ADDR_MULTICAST))
246 ipv6_addr_copy(&np->saddr, &addr->sin6_addr);
253 void rawv6_err(struct sock *sk, struct sk_buff *skb,
254 struct inet6_skb_parm *opt,
255 int type, int code, int offset, u32 info)
257 struct inet_sock *inet = inet_sk(sk);
258 struct ipv6_pinfo *np = inet6_sk(sk);
262 /* Report error on raw socket, if:
263 1. User requested recverr.
264 2. Socket is connected (otherwise the error indication
265 is useless without recverr and error is hard.
267 if (!np->recverr && sk->sk_state != TCP_ESTABLISHED)
270 harderr = icmpv6_err_convert(type, code, &err);
271 if (type == ICMPV6_PKT_TOOBIG)
272 harderr = (np->pmtudisc == IPV6_PMTUDISC_DO);
275 u8 *payload = skb->data;
278 ipv6_icmp_error(sk, skb, err, 0, ntohl(info), payload);
281 if (np->recverr || harderr) {
283 sk->sk_error_report(sk);
287 static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
289 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
290 skb->ip_summed != CHECKSUM_UNNECESSARY) {
291 if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
292 /* FIXME: increment a raw6 drops counter here */
296 skb->ip_summed = CHECKSUM_UNNECESSARY;
299 /* Charge it to the socket. */
300 if (sock_queue_rcv_skb(sk,skb)<0) {
301 /* FIXME: increment a raw6 drops counter here */
310 * This is next to useless...
311 * if we demultiplex in network layer we don't need the extra call
312 * just to queue the skb...
313 * maybe we could have the network decide upon a hint if it
314 * should call raw_rcv for demultiplexing
316 int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
318 struct inet_sock *inet = inet_sk(sk);
319 struct raw6_sock *rp = raw6_sk(sk);
321 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
327 skb->ip_summed = CHECKSUM_UNNECESSARY;
329 if (skb->ip_summed != CHECKSUM_UNNECESSARY) {
330 if (skb->ip_summed == CHECKSUM_HW) {
331 skb->ip_summed = CHECKSUM_UNNECESSARY;
332 if (csum_ipv6_magic(&skb->nh.ipv6h->saddr,
333 &skb->nh.ipv6h->daddr,
334 skb->len, inet->num, skb->csum)) {
336 printk(KERN_DEBUG "raw v6 hw csum failure.\n"));
337 skb->ip_summed = CHECKSUM_NONE;
340 if (skb->ip_summed == CHECKSUM_NONE)
341 skb->csum = ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
342 &skb->nh.ipv6h->daddr,
343 skb->len, inet->num, 0);
347 if (skb->ip_summed != CHECKSUM_UNNECESSARY &&
348 (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
349 /* FIXME: increment a raw6 drops counter here */
353 skb->ip_summed = CHECKSUM_UNNECESSARY;
356 rawv6_rcv_skb(sk, skb);
362 * This should be easy, if there is something there
363 * we return it, otherwise we block.
366 static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
367 struct msghdr *msg, size_t len,
368 int noblock, int flags, int *addr_len)
370 struct ipv6_pinfo *np = inet6_sk(sk);
371 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)msg->msg_name;
380 *addr_len=sizeof(*sin6);
382 if (flags & MSG_ERRQUEUE)
383 return ipv6_recv_error(sk, msg, len);
385 skb = skb_recv_datagram(sk, flags, noblock, &err);
392 msg->msg_flags |= MSG_TRUNC;
395 if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
396 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
397 } else if (msg->msg_flags&MSG_TRUNC) {
398 if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)))
400 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
402 err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov);
409 /* Copy the address. */
411 sin6->sin6_family = AF_INET6;
412 ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr);
413 sin6->sin6_flowinfo = 0;
414 sin6->sin6_scope_id = 0;
415 if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
416 sin6->sin6_scope_id = IP6CB(skb)->iif;
419 sock_recv_timestamp(msg, sk, skb);
422 datagram_recv_ctl(sk, msg, skb);
425 if (flags & MSG_TRUNC)
429 skb_free_datagram(sk, skb);
435 if (flags&MSG_PEEK) {
437 spin_lock_irq(&sk->sk_receive_queue.lock);
438 if (skb == skb_peek(&sk->sk_receive_queue)) {
439 __skb_unlink(skb, &sk->sk_receive_queue);
442 spin_unlock_irq(&sk->sk_receive_queue.lock);
447 /* Error for blocking case is chosen to masquerade
448 as some normal condition.
450 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
451 /* FIXME: increment a raw6 drops counter here */
455 static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl,
456 struct raw6_sock *rp)
458 struct inet_sock *inet = inet_sk(sk);
469 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
473 if (offset >= inet->cork.length - 1) {
475 ip6_flush_pending_frames(sk);
479 /* should be check HW csum miyazawa */
480 if (skb_queue_len(&sk->sk_write_queue) == 1) {
482 * Only one fragment on the socket.
484 tmp_csum = skb->csum;
486 struct sk_buff *csum_skb = NULL;
489 skb_queue_walk(&sk->sk_write_queue, skb) {
490 tmp_csum = csum_add(tmp_csum, skb->csum);
495 len = skb->len - (skb->h.raw - skb->data);
507 offset += skb->h.raw - skb->data;
508 if (skb_copy_bits(skb, offset, &csum, 2))
511 /* in case cksum was not initialized */
513 tmp_csum = csum_sub(tmp_csum, csum);
515 tmp_csum = csum_ipv6_magic(&fl->fl6_src,
517 inet->cork.length, fl->proto, tmp_csum);
523 if (skb_store_bits(skb, offset, &csum, 2))
527 err = ip6_push_pending_frames(sk);
532 static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
533 struct flowi *fl, struct rt6_info *rt,
536 struct inet_sock *inet = inet_sk(sk);
542 if (length > rt->u.dst.dev->mtu) {
543 ipv6_local_error(sk, EMSGSIZE, fl, rt->u.dst.dev->mtu);
549 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
551 skb = sock_alloc_send_skb(sk, length+hh_len+15,
552 flags&MSG_DONTWAIT, &err);
555 skb_reserve(skb, hh_len);
557 skb->priority = sk->sk_priority;
558 skb->dst = dst_clone(&rt->u.dst);
560 skb->nh.ipv6h = iph = (struct ipv6hdr *)skb_put(skb, length);
562 skb->ip_summed = CHECKSUM_NONE;
564 skb->h.raw = skb->nh.raw;
565 err = memcpy_fromiovecend((void *)iph, from, 0, length);
569 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
570 err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
573 err = inet->recverr ? net_xmit_errno(err) : 0;
583 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
587 static void rawv6_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
590 u8 __user *type = NULL;
591 u8 __user *code = NULL;
598 for (i = 0; i < msg->msg_iovlen; i++) {
599 iov = &msg->msg_iov[i];
605 /* check if one-byte field is readable or not. */
606 if (iov->iov_base && iov->iov_len < 1)
610 type = iov->iov_base;
611 /* check if code field is readable or not. */
612 if (iov->iov_len > 1)
615 code = iov->iov_base;
618 get_user(fl->fl_icmp_type, type);
619 __get_user(fl->fl_icmp_code, code);
632 static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
633 struct msghdr *msg, size_t len)
635 struct ipv6_txoptions opt_space;
636 struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name;
637 struct in6_addr *daddr, *final_p = NULL, final;
638 struct inet_sock *inet = inet_sk(sk);
639 struct ipv6_pinfo *np = inet6_sk(sk);
640 struct raw6_sock *rp = raw6_sk(sk);
641 struct ipv6_txoptions *opt = NULL;
642 struct ip6_flowlabel *flowlabel = NULL;
643 struct dst_entry *dst = NULL;
645 int addr_len = msg->msg_namelen;
650 /* Rough check on arithmetic overflow,
651 better check is made in ip6_build_xmit
656 /* Mirror BSD error message compatibility */
657 if (msg->msg_flags & MSG_OOB)
661 * Get and verify the address.
663 memset(&fl, 0, sizeof(fl));
666 if (addr_len < SIN6_LEN_RFC2133)
669 if (sin6->sin6_family && sin6->sin6_family != AF_INET6)
670 return(-EAFNOSUPPORT);
672 /* port is the proto value [0..255] carried in nexthdr */
673 proto = ntohs(sin6->sin6_port);
677 else if (proto != inet->num)
683 daddr = &sin6->sin6_addr;
685 fl.fl6_flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
686 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
687 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
688 if (flowlabel == NULL)
690 daddr = &flowlabel->dst;
695 * Otherwise it will be difficult to maintain
698 if (sk->sk_state == TCP_ESTABLISHED &&
699 ipv6_addr_equal(daddr, &np->daddr))
702 if (addr_len >= sizeof(struct sockaddr_in6) &&
703 sin6->sin6_scope_id &&
704 ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL)
705 fl.oif = sin6->sin6_scope_id;
707 if (sk->sk_state != TCP_ESTABLISHED)
708 return -EDESTADDRREQ;
712 fl.fl6_flowlabel = np->flow_label;
715 if (ipv6_addr_any(daddr)) {
717 * unspecified destination address
718 * treated as error... is this correct ?
720 fl6_sock_release(flowlabel);
725 fl.oif = sk->sk_bound_dev_if;
727 if (msg->msg_controllen) {
729 memset(opt, 0, sizeof(struct ipv6_txoptions));
730 opt->tot_len = sizeof(struct ipv6_txoptions);
732 err = datagram_send_ctl(msg, &fl, opt, &hlimit);
734 fl6_sock_release(flowlabel);
737 if ((fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
738 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
739 if (flowlabel == NULL)
742 if (!(opt->opt_nflen|opt->opt_flen))
748 opt = fl6_merge_options(&opt_space, flowlabel, opt);
751 rawv6_probe_proto_opt(&fl, msg);
753 ipv6_addr_copy(&fl.fl6_dst, daddr);
754 if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr))
755 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
757 /* merge ip6_build_xmit from ip6_output */
758 if (opt && opt->srcrt) {
759 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
760 ipv6_addr_copy(&final, &fl.fl6_dst);
761 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
765 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
766 fl.oif = np->mcast_oif;
768 err = ip6_dst_lookup(sk, &dst, &fl);
772 ipv6_addr_copy(&fl.fl6_dst, final_p);
774 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
780 if (ipv6_addr_is_multicast(&fl.fl6_dst))
781 hlimit = np->mcast_hops;
783 hlimit = np->hop_limit;
785 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
787 hlimit = ipv6_get_hoplimit(dst->dev);
790 if (msg->msg_flags&MSG_CONFIRM)
795 err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl, (struct rt6_info*)dst, msg->msg_flags);
798 err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, len, 0,
799 hlimit, opt, &fl, (struct rt6_info*)dst, msg->msg_flags);
802 ip6_flush_pending_frames(sk);
803 else if (!(msg->msg_flags & MSG_MORE))
804 err = rawv6_push_pending_frames(sk, &fl, rp);
807 ip6_dst_store(sk, dst,
808 ipv6_addr_equal(&fl.fl6_dst, &np->daddr) ?
811 err = np->recverr ? net_xmit_errno(err) : 0;
815 fl6_sock_release(flowlabel);
816 return err<0?err:len;
819 if (!(msg->msg_flags & MSG_PROBE) || len)
820 goto back_from_confirm;
825 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
826 char __user *optval, int optlen)
830 if (optlen > sizeof(struct icmp6_filter))
831 optlen = sizeof(struct icmp6_filter);
832 if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
842 static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
843 char __user *optval, int __user *optlen)
849 if (get_user(len, optlen))
853 if (len > sizeof(struct icmp6_filter))
854 len = sizeof(struct icmp6_filter);
855 if (put_user(len, optlen))
857 if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
868 static int rawv6_setsockopt(struct sock *sk, int level, int optname,
869 char __user *optval, int optlen)
871 struct raw6_sock *rp = raw6_sk(sk);
879 if (inet_sk(sk)->num != IPPROTO_ICMPV6)
881 return rawv6_seticmpfilter(sk, level, optname, optval,
884 if (optname == IPV6_CHECKSUM)
887 return ipv6_setsockopt(sk, level, optname, optval,
891 if (get_user(val, (int __user *)optval))
896 /* You may get strange result with a positive odd offset;
897 RFC2292bis agrees with me. */
898 if (val > 0 && (val&1))
911 return(-ENOPROTOOPT);
915 static int rawv6_getsockopt(struct sock *sk, int level, int optname,
916 char __user *optval, int __user *optlen)
918 struct raw6_sock *rp = raw6_sk(sk);
926 if (inet_sk(sk)->num != IPPROTO_ICMPV6)
928 return rawv6_geticmpfilter(sk, level, optname, optval,
931 if (optname == IPV6_CHECKSUM)
934 return ipv6_getsockopt(sk, level, optname, optval,
938 if (get_user(len,optlen))
943 if (rp->checksum == 0)
953 len = min_t(unsigned int, sizeof(int), len);
955 if (put_user(len, optlen))
957 if (copy_to_user(optval,&val,len))
962 static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
967 int amount = atomic_read(&sk->sk_wmem_alloc);
968 return put_user(amount, (int __user *)arg);
975 spin_lock_irq(&sk->sk_receive_queue.lock);
976 skb = skb_peek(&sk->sk_receive_queue);
978 amount = skb->tail - skb->h.raw;
979 spin_unlock_irq(&sk->sk_receive_queue.lock);
980 return put_user(amount, (int __user *)arg);
988 static void rawv6_close(struct sock *sk, long timeout)
990 if (inet_sk(sk)->num == IPPROTO_RAW)
991 ip6_ra_control(sk, -1, NULL);
993 sk_common_release(sk);
996 static int rawv6_init_sk(struct sock *sk)
998 if (inet_sk(sk)->num == IPPROTO_ICMPV6) {
999 struct raw6_sock *rp = raw6_sk(sk);
1006 struct proto rawv6_prot = {
1008 .owner = THIS_MODULE,
1009 .close = rawv6_close,
1010 .connect = ip6_datagram_connect,
1011 .disconnect = udp_disconnect,
1012 .ioctl = rawv6_ioctl,
1013 .init = rawv6_init_sk,
1014 .destroy = inet6_destroy_sock,
1015 .setsockopt = rawv6_setsockopt,
1016 .getsockopt = rawv6_getsockopt,
1017 .sendmsg = rawv6_sendmsg,
1018 .recvmsg = rawv6_recvmsg,
1020 .backlog_rcv = rawv6_rcv_skb,
1021 .hash = raw_v6_hash,
1022 .unhash = raw_v6_unhash,
1023 .obj_size = sizeof(struct raw6_sock),
1026 #ifdef CONFIG_PROC_FS
1027 struct raw6_iter_state {
1031 #define raw6_seq_private(seq) ((struct raw6_iter_state *)(seq)->private)
1033 static struct sock *raw6_get_first(struct seq_file *seq)
1036 struct hlist_node *node;
1037 struct raw6_iter_state* state = raw6_seq_private(seq);
1039 for (state->bucket = 0; state->bucket < RAWV6_HTABLE_SIZE; ++state->bucket)
1040 sk_for_each(sk, node, &raw_v6_htable[state->bucket])
1041 if (sk->sk_family == PF_INET6)
1048 static struct sock *raw6_get_next(struct seq_file *seq, struct sock *sk)
1050 struct raw6_iter_state* state = raw6_seq_private(seq);
1056 } while (sk && sk->sk_family != PF_INET6);
1058 if (!sk && ++state->bucket < RAWV6_HTABLE_SIZE) {
1059 sk = sk_head(&raw_v6_htable[state->bucket]);
1065 static struct sock *raw6_get_idx(struct seq_file *seq, loff_t pos)
1067 struct sock *sk = raw6_get_first(seq);
1069 while (pos && (sk = raw6_get_next(seq, sk)) != NULL)
1071 return pos ? NULL : sk;
1074 static void *raw6_seq_start(struct seq_file *seq, loff_t *pos)
1076 read_lock(&raw_v6_lock);
1077 return *pos ? raw6_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1080 static void *raw6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1084 if (v == SEQ_START_TOKEN)
1085 sk = raw6_get_first(seq);
1087 sk = raw6_get_next(seq, v);
1092 static void raw6_seq_stop(struct seq_file *seq, void *v)
1094 read_unlock(&raw_v6_lock);
1097 static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
1099 struct ipv6_pinfo *np = inet6_sk(sp);
1100 struct in6_addr *dest, *src;
1104 src = &np->rcv_saddr;
1106 srcp = inet_sk(sp)->num;
1108 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1109 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p\n",
1111 src->s6_addr32[0], src->s6_addr32[1],
1112 src->s6_addr32[2], src->s6_addr32[3], srcp,
1113 dest->s6_addr32[0], dest->s6_addr32[1],
1114 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1116 atomic_read(&sp->sk_wmem_alloc),
1117 atomic_read(&sp->sk_rmem_alloc),
1121 atomic_read(&sp->sk_refcnt), sp);
1124 static int raw6_seq_show(struct seq_file *seq, void *v)
1126 if (v == SEQ_START_TOKEN)
1131 "st tx_queue rx_queue tr tm->when retrnsmt"
1132 " uid timeout inode\n");
1134 raw6_sock_seq_show(seq, v, raw6_seq_private(seq)->bucket);
1138 static struct seq_operations raw6_seq_ops = {
1139 .start = raw6_seq_start,
1140 .next = raw6_seq_next,
1141 .stop = raw6_seq_stop,
1142 .show = raw6_seq_show,
1145 static int raw6_seq_open(struct inode *inode, struct file *file)
1147 struct seq_file *seq;
1149 struct raw6_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
1152 rc = seq_open(file, &raw6_seq_ops);
1155 seq = file->private_data;
1157 memset(s, 0, sizeof(*s));
1165 static struct file_operations raw6_seq_fops = {
1166 .owner = THIS_MODULE,
1167 .open = raw6_seq_open,
1169 .llseek = seq_lseek,
1170 .release = seq_release_private,
1173 int __init raw6_proc_init(void)
1175 if (!proc_net_fops_create("raw6", S_IRUGO, &raw6_seq_fops))
1180 void raw6_proc_exit(void)
1182 proc_net_remove("raw6");
1184 #endif /* CONFIG_PROC_FS */