3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Adapted from linux/net/ipv4/raw.c
10 * $Id: raw.c,v 1.51 2002/02/01 22:01:04 davem Exp $
13 * Hideaki YOSHIFUJI : sin6_scope_id support
14 * YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance)
15 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/socket.h>
26 #include <linux/sockios.h>
27 #include <linux/sched.h>
28 #include <linux/net.h>
29 #include <linux/in6.h>
30 #include <linux/netdevice.h>
31 #include <linux/if_arp.h>
32 #include <linux/icmpv6.h>
33 #include <linux/netfilter.h>
34 #include <linux/netfilter_ipv6.h>
35 #include <asm/uaccess.h>
36 #include <asm/ioctls.h>
44 #include <net/ndisc.h>
45 #include <net/protocol.h>
46 #include <net/ip6_route.h>
47 #include <net/ip6_checksum.h>
48 #include <net/addrconf.h>
49 #include <net/transp_v6.h>
51 #include <net/inet_common.h>
52 #include <net/tcp_states.h>
54 #include <net/rawv6.h>
57 #include <linux/proc_fs.h>
58 #include <linux/seq_file.h>
60 struct hlist_head raw_v6_htable[RAWV6_HTABLE_SIZE];
61 DEFINE_RWLOCK(raw_v6_lock);
63 static void raw_v6_hash(struct sock *sk)
65 struct hlist_head *list = &raw_v6_htable[inet_sk(sk)->num &
66 (RAWV6_HTABLE_SIZE - 1)];
68 write_lock_bh(&raw_v6_lock);
69 sk_add_node(sk, list);
70 sock_prot_inc_use(sk->sk_prot);
71 write_unlock_bh(&raw_v6_lock);
74 static void raw_v6_unhash(struct sock *sk)
76 write_lock_bh(&raw_v6_lock);
77 if (sk_del_node_init(sk))
78 sock_prot_dec_use(sk->sk_prot);
79 write_unlock_bh(&raw_v6_lock);
83 /* Grumble... icmp and ip_input want to get at this... */
84 struct sock *__raw_v6_lookup(struct sock *sk, unsigned short num,
85 struct in6_addr *loc_addr, struct in6_addr *rmt_addr,
88 struct hlist_node *node;
89 int is_multicast = ipv6_addr_is_multicast(loc_addr);
91 sk_for_each_from(sk, node)
92 if (inet_sk(sk)->num == num) {
93 struct ipv6_pinfo *np = inet6_sk(sk);
95 if (!ipv6_addr_any(&np->daddr) &&
96 !ipv6_addr_equal(&np->daddr, rmt_addr))
99 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
102 if (!ipv6_addr_any(&np->rcv_saddr)) {
103 if (ipv6_addr_equal(&np->rcv_saddr, loc_addr))
106 inet6_mc_check(sk, loc_addr, rmt_addr))
121 static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb)
123 struct icmp6hdr *icmph;
124 struct raw6_sock *rp = raw6_sk(sk);
126 if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) {
127 __u32 *data = &rp->filter.data[0];
130 icmph = (struct icmp6hdr *) skb->data;
131 bit_nr = icmph->icmp6_type;
133 return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0;
139 * demultiplex raw sockets.
140 * (should consider queueing the skb in the sock receive_queue
141 * without calling rawv6.c)
143 * Caller owns SKB so we must make clones.
145 int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
147 struct in6_addr *saddr;
148 struct in6_addr *daddr;
153 saddr = &skb->nh.ipv6h->saddr;
156 hash = nexthdr & (MAX_INET_PROTOS - 1);
158 read_lock(&raw_v6_lock);
159 sk = sk_head(&raw_v6_htable[hash]);
162 * The first socket found will be delivered after
163 * delivery to transport protocols.
169 sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr, IP6CB(skb)->iif);
173 if (nexthdr != IPPROTO_ICMPV6 || !icmpv6_filter(sk, skb)) {
174 struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
176 /* Not releasing hash table! */
178 rawv6_rcv(sk, clone);
180 sk = __raw_v6_lookup(sk_next(sk), nexthdr, daddr, saddr,
184 read_unlock(&raw_v6_lock);
188 /* This cleans up af_inet6 a bit. -DaveM */
189 static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
191 struct inet_sock *inet = inet_sk(sk);
192 struct ipv6_pinfo *np = inet6_sk(sk);
193 struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr;
198 if (addr_len < SIN6_LEN_RFC2133)
200 addr_type = ipv6_addr_type(&addr->sin6_addr);
202 /* Raw sockets are IPv6 only */
203 if (addr_type == IPV6_ADDR_MAPPED)
204 return(-EADDRNOTAVAIL);
209 if (sk->sk_state != TCP_CLOSE)
212 /* Check if the address belongs to the host. */
213 if (addr_type != IPV6_ADDR_ANY) {
214 struct net_device *dev = NULL;
216 if (addr_type & IPV6_ADDR_LINKLOCAL) {
217 if (addr_len >= sizeof(struct sockaddr_in6) &&
218 addr->sin6_scope_id) {
219 /* Override any existing binding, if another
220 * one is supplied by user.
222 sk->sk_bound_dev_if = addr->sin6_scope_id;
225 /* Binding to link-local address requires an interface */
226 if (!sk->sk_bound_dev_if)
229 dev = dev_get_by_index(sk->sk_bound_dev_if);
236 /* ipv4 addr of the socket is invalid. Only the
237 * unspecified and mapped address have a v4 equivalent.
239 v4addr = LOOPBACK4_IPV6;
240 if (!(addr_type & IPV6_ADDR_MULTICAST)) {
241 err = -EADDRNOTAVAIL;
242 if (!ipv6_chk_addr(&addr->sin6_addr, dev, 0)) {
252 inet->rcv_saddr = inet->saddr = v4addr;
253 ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr);
254 if (!(addr_type & IPV6_ADDR_MULTICAST))
255 ipv6_addr_copy(&np->saddr, &addr->sin6_addr);
262 void rawv6_err(struct sock *sk, struct sk_buff *skb,
263 struct inet6_skb_parm *opt,
264 int type, int code, int offset, u32 info)
266 struct inet_sock *inet = inet_sk(sk);
267 struct ipv6_pinfo *np = inet6_sk(sk);
271 /* Report error on raw socket, if:
272 1. User requested recverr.
273 2. Socket is connected (otherwise the error indication
274 is useless without recverr and error is hard.
276 if (!np->recverr && sk->sk_state != TCP_ESTABLISHED)
279 harderr = icmpv6_err_convert(type, code, &err);
280 if (type == ICMPV6_PKT_TOOBIG)
281 harderr = (np->pmtudisc == IPV6_PMTUDISC_DO);
284 u8 *payload = skb->data;
287 ipv6_icmp_error(sk, skb, err, 0, ntohl(info), payload);
290 if (np->recverr || harderr) {
292 sk->sk_error_report(sk);
296 static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
298 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
299 skb->ip_summed != CHECKSUM_UNNECESSARY) {
300 if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
301 /* FIXME: increment a raw6 drops counter here */
305 skb->ip_summed = CHECKSUM_UNNECESSARY;
308 /* Charge it to the socket. */
309 if (sock_queue_rcv_skb(sk,skb)<0) {
310 /* FIXME: increment a raw6 drops counter here */
319 * This is next to useless...
320 * if we demultiplex in network layer we don't need the extra call
321 * just to queue the skb...
322 * maybe we could have the network decide upon a hint if it
323 * should call raw_rcv for demultiplexing
325 int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
327 struct inet_sock *inet = inet_sk(sk);
328 struct raw6_sock *rp = raw6_sk(sk);
330 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
336 skb->ip_summed = CHECKSUM_UNNECESSARY;
338 if (skb->ip_summed != CHECKSUM_UNNECESSARY) {
339 if (skb->ip_summed == CHECKSUM_HW) {
340 skb_postpull_rcsum(skb, skb->nh.raw,
341 skb->h.raw - skb->nh.raw);
342 skb->ip_summed = CHECKSUM_UNNECESSARY;
343 if (csum_ipv6_magic(&skb->nh.ipv6h->saddr,
344 &skb->nh.ipv6h->daddr,
345 skb->len, inet->num, skb->csum)) {
346 LIMIT_NETDEBUG(KERN_DEBUG "raw v6 hw csum failure.\n");
347 skb->ip_summed = CHECKSUM_NONE;
350 if (skb->ip_summed == CHECKSUM_NONE)
351 skb->csum = ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
352 &skb->nh.ipv6h->daddr,
353 skb->len, inet->num, 0);
357 if (skb->ip_summed != CHECKSUM_UNNECESSARY &&
358 (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
359 /* FIXME: increment a raw6 drops counter here */
363 skb->ip_summed = CHECKSUM_UNNECESSARY;
366 rawv6_rcv_skb(sk, skb);
372 * This should be easy, if there is something there
373 * we return it, otherwise we block.
376 static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
377 struct msghdr *msg, size_t len,
378 int noblock, int flags, int *addr_len)
380 struct ipv6_pinfo *np = inet6_sk(sk);
381 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)msg->msg_name;
390 *addr_len=sizeof(*sin6);
392 if (flags & MSG_ERRQUEUE)
393 return ipv6_recv_error(sk, msg, len);
395 skb = skb_recv_datagram(sk, flags, noblock, &err);
402 msg->msg_flags |= MSG_TRUNC;
405 if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
406 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
407 } else if (msg->msg_flags&MSG_TRUNC) {
408 if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)))
410 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
412 err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov);
419 /* Copy the address. */
421 sin6->sin6_family = AF_INET6;
422 ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr);
423 sin6->sin6_flowinfo = 0;
424 sin6->sin6_scope_id = 0;
425 if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
426 sin6->sin6_scope_id = IP6CB(skb)->iif;
429 sock_recv_timestamp(msg, sk, skb);
432 datagram_recv_ctl(sk, msg, skb);
435 if (flags & MSG_TRUNC)
439 skb_free_datagram(sk, skb);
445 if (flags&MSG_PEEK) {
447 spin_lock_bh(&sk->sk_receive_queue.lock);
448 if (skb == skb_peek(&sk->sk_receive_queue)) {
449 __skb_unlink(skb, &sk->sk_receive_queue);
452 spin_unlock_bh(&sk->sk_receive_queue.lock);
457 /* Error for blocking case is chosen to masquerade
458 as some normal condition.
460 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
461 /* FIXME: increment a raw6 drops counter here */
465 static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl,
466 struct raw6_sock *rp)
479 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
483 total_len = inet_sk(sk)->cork.length - (skb->nh.raw - skb->data);
484 if (offset >= total_len - 1) {
486 ip6_flush_pending_frames(sk);
490 /* should be check HW csum miyazawa */
491 if (skb_queue_len(&sk->sk_write_queue) == 1) {
493 * Only one fragment on the socket.
495 tmp_csum = skb->csum;
497 struct sk_buff *csum_skb = NULL;
500 skb_queue_walk(&sk->sk_write_queue, skb) {
501 tmp_csum = csum_add(tmp_csum, skb->csum);
506 len = skb->len - (skb->h.raw - skb->data);
518 offset += skb->h.raw - skb->data;
519 if (skb_copy_bits(skb, offset, &csum, 2))
522 /* in case cksum was not initialized */
524 tmp_csum = csum_sub(tmp_csum, csum);
526 tmp_csum = csum_ipv6_magic(&fl->fl6_src,
528 total_len, fl->proto, tmp_csum);
534 if (skb_store_bits(skb, offset, &csum, 2))
538 err = ip6_push_pending_frames(sk);
543 static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
544 struct flowi *fl, struct rt6_info *rt,
547 struct ipv6_pinfo *np = inet6_sk(sk);
553 if (length > rt->u.dst.dev->mtu) {
554 ipv6_local_error(sk, EMSGSIZE, fl, rt->u.dst.dev->mtu);
560 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
562 skb = sock_alloc_send_skb(sk, length+hh_len+15,
563 flags&MSG_DONTWAIT, &err);
566 skb_reserve(skb, hh_len);
568 skb->priority = sk->sk_priority;
569 skb->dst = dst_clone(&rt->u.dst);
571 skb->nh.ipv6h = iph = (struct ipv6hdr *)skb_put(skb, length);
573 skb->ip_summed = CHECKSUM_NONE;
575 skb->h.raw = skb->nh.raw;
576 err = memcpy_fromiovecend((void *)iph, from, 0, length);
580 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
581 err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
584 err = np->recverr ? net_xmit_errno(err) : 0;
594 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
598 static void rawv6_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
601 u8 __user *type = NULL;
602 u8 __user *code = NULL;
609 for (i = 0; i < msg->msg_iovlen; i++) {
610 iov = &msg->msg_iov[i];
616 /* check if one-byte field is readable or not. */
617 if (iov->iov_base && iov->iov_len < 1)
621 type = iov->iov_base;
622 /* check if code field is readable or not. */
623 if (iov->iov_len > 1)
626 code = iov->iov_base;
629 get_user(fl->fl_icmp_type, type);
630 __get_user(fl->fl_icmp_code, code);
643 static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
644 struct msghdr *msg, size_t len)
646 struct ipv6_txoptions opt_space;
647 struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name;
648 struct in6_addr *daddr, *final_p = NULL, final;
649 struct inet_sock *inet = inet_sk(sk);
650 struct ipv6_pinfo *np = inet6_sk(sk);
651 struct raw6_sock *rp = raw6_sk(sk);
652 struct ipv6_txoptions *opt = NULL;
653 struct ip6_flowlabel *flowlabel = NULL;
654 struct dst_entry *dst = NULL;
656 int addr_len = msg->msg_namelen;
662 /* Rough check on arithmetic overflow,
663 better check is made in ip6_build_xmit
668 /* Mirror BSD error message compatibility */
669 if (msg->msg_flags & MSG_OOB)
673 * Get and verify the address.
675 memset(&fl, 0, sizeof(fl));
678 if (addr_len < SIN6_LEN_RFC2133)
681 if (sin6->sin6_family && sin6->sin6_family != AF_INET6)
682 return(-EAFNOSUPPORT);
684 /* port is the proto value [0..255] carried in nexthdr */
685 proto = ntohs(sin6->sin6_port);
689 else if (proto != inet->num)
695 daddr = &sin6->sin6_addr;
697 fl.fl6_flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
698 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
699 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
700 if (flowlabel == NULL)
702 daddr = &flowlabel->dst;
707 * Otherwise it will be difficult to maintain
710 if (sk->sk_state == TCP_ESTABLISHED &&
711 ipv6_addr_equal(daddr, &np->daddr))
714 if (addr_len >= sizeof(struct sockaddr_in6) &&
715 sin6->sin6_scope_id &&
716 ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL)
717 fl.oif = sin6->sin6_scope_id;
719 if (sk->sk_state != TCP_ESTABLISHED)
720 return -EDESTADDRREQ;
724 fl.fl6_flowlabel = np->flow_label;
727 if (ipv6_addr_any(daddr)) {
729 * unspecified destination address
730 * treated as error... is this correct ?
732 fl6_sock_release(flowlabel);
737 fl.oif = sk->sk_bound_dev_if;
739 if (msg->msg_controllen) {
741 memset(opt, 0, sizeof(struct ipv6_txoptions));
742 opt->tot_len = sizeof(struct ipv6_txoptions);
744 err = datagram_send_ctl(msg, &fl, opt, &hlimit, &tclass);
746 fl6_sock_release(flowlabel);
749 if ((fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
750 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
751 if (flowlabel == NULL)
754 if (!(opt->opt_nflen|opt->opt_flen))
759 opt = fl6_merge_options(&opt_space, flowlabel, opt);
762 rawv6_probe_proto_opt(&fl, msg);
764 ipv6_addr_copy(&fl.fl6_dst, daddr);
765 if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr))
766 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
768 /* merge ip6_build_xmit from ip6_output */
769 if (opt && opt->srcrt) {
770 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
771 ipv6_addr_copy(&final, &fl.fl6_dst);
772 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
776 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
777 fl.oif = np->mcast_oif;
779 err = ip6_dst_lookup(sk, &dst, &fl);
783 ipv6_addr_copy(&fl.fl6_dst, final_p);
785 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
789 if (ipv6_addr_is_multicast(&fl.fl6_dst))
790 hlimit = np->mcast_hops;
792 hlimit = np->hop_limit;
794 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
796 hlimit = ipv6_get_hoplimit(dst->dev);
800 tclass = np->cork.tclass;
805 if (msg->msg_flags&MSG_CONFIRM)
810 err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl, (struct rt6_info*)dst, msg->msg_flags);
813 err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov,
814 len, 0, hlimit, tclass, opt, &fl, (struct rt6_info*)dst,
818 ip6_flush_pending_frames(sk);
819 else if (!(msg->msg_flags & MSG_MORE))
820 err = rawv6_push_pending_frames(sk, &fl, rp);
823 ip6_dst_store(sk, dst,
824 ipv6_addr_equal(&fl.fl6_dst, &np->daddr) ?
829 fl6_sock_release(flowlabel);
830 return err<0?err:len;
833 if (!(msg->msg_flags & MSG_PROBE) || len)
834 goto back_from_confirm;
839 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
840 char __user *optval, int optlen)
844 if (optlen > sizeof(struct icmp6_filter))
845 optlen = sizeof(struct icmp6_filter);
846 if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
856 static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
857 char __user *optval, int __user *optlen)
863 if (get_user(len, optlen))
867 if (len > sizeof(struct icmp6_filter))
868 len = sizeof(struct icmp6_filter);
869 if (put_user(len, optlen))
871 if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
882 static int rawv6_setsockopt(struct sock *sk, int level, int optname,
883 char __user *optval, int optlen)
885 struct raw6_sock *rp = raw6_sk(sk);
893 if (inet_sk(sk)->num != IPPROTO_ICMPV6)
895 return rawv6_seticmpfilter(sk, level, optname, optval,
898 if (optname == IPV6_CHECKSUM)
901 return ipv6_setsockopt(sk, level, optname, optval,
905 if (get_user(val, (int __user *)optval))
910 /* You may get strange result with a positive odd offset;
911 RFC2292bis agrees with me. */
912 if (val > 0 && (val&1))
925 return(-ENOPROTOOPT);
929 static int rawv6_getsockopt(struct sock *sk, int level, int optname,
930 char __user *optval, int __user *optlen)
932 struct raw6_sock *rp = raw6_sk(sk);
940 if (inet_sk(sk)->num != IPPROTO_ICMPV6)
942 return rawv6_geticmpfilter(sk, level, optname, optval,
945 if (optname == IPV6_CHECKSUM)
948 return ipv6_getsockopt(sk, level, optname, optval,
952 if (get_user(len,optlen))
957 if (rp->checksum == 0)
967 len = min_t(unsigned int, sizeof(int), len);
969 if (put_user(len, optlen))
971 if (copy_to_user(optval,&val,len))
976 static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
981 int amount = atomic_read(&sk->sk_wmem_alloc);
982 return put_user(amount, (int __user *)arg);
989 spin_lock_bh(&sk->sk_receive_queue.lock);
990 skb = skb_peek(&sk->sk_receive_queue);
992 amount = skb->tail - skb->h.raw;
993 spin_unlock_bh(&sk->sk_receive_queue.lock);
994 return put_user(amount, (int __user *)arg);
1002 static void rawv6_close(struct sock *sk, long timeout)
1004 if (inet_sk(sk)->num == IPPROTO_RAW)
1005 ip6_ra_control(sk, -1, NULL);
1007 sk_common_release(sk);
1010 static int rawv6_init_sk(struct sock *sk)
1012 if (inet_sk(sk)->num == IPPROTO_ICMPV6) {
1013 struct raw6_sock *rp = raw6_sk(sk);
1020 struct proto rawv6_prot = {
1022 .owner = THIS_MODULE,
1023 .close = rawv6_close,
1024 .connect = ip6_datagram_connect,
1025 .disconnect = udp_disconnect,
1026 .ioctl = rawv6_ioctl,
1027 .init = rawv6_init_sk,
1028 .destroy = inet6_destroy_sock,
1029 .setsockopt = rawv6_setsockopt,
1030 .getsockopt = rawv6_getsockopt,
1031 .sendmsg = rawv6_sendmsg,
1032 .recvmsg = rawv6_recvmsg,
1034 .backlog_rcv = rawv6_rcv_skb,
1035 .hash = raw_v6_hash,
1036 .unhash = raw_v6_unhash,
1037 .obj_size = sizeof(struct raw6_sock),
1040 #ifdef CONFIG_PROC_FS
1041 struct raw6_iter_state {
1045 #define raw6_seq_private(seq) ((struct raw6_iter_state *)(seq)->private)
1047 static struct sock *raw6_get_first(struct seq_file *seq)
1050 struct hlist_node *node;
1051 struct raw6_iter_state* state = raw6_seq_private(seq);
1053 for (state->bucket = 0; state->bucket < RAWV6_HTABLE_SIZE; ++state->bucket)
1054 sk_for_each(sk, node, &raw_v6_htable[state->bucket])
1055 if (sk->sk_family == PF_INET6)
1062 static struct sock *raw6_get_next(struct seq_file *seq, struct sock *sk)
1064 struct raw6_iter_state* state = raw6_seq_private(seq);
1070 } while (sk && sk->sk_family != PF_INET6);
1072 if (!sk && ++state->bucket < RAWV6_HTABLE_SIZE) {
1073 sk = sk_head(&raw_v6_htable[state->bucket]);
1079 static struct sock *raw6_get_idx(struct seq_file *seq, loff_t pos)
1081 struct sock *sk = raw6_get_first(seq);
1083 while (pos && (sk = raw6_get_next(seq, sk)) != NULL)
1085 return pos ? NULL : sk;
1088 static void *raw6_seq_start(struct seq_file *seq, loff_t *pos)
1090 read_lock(&raw_v6_lock);
1091 return *pos ? raw6_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1094 static void *raw6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1098 if (v == SEQ_START_TOKEN)
1099 sk = raw6_get_first(seq);
1101 sk = raw6_get_next(seq, v);
1106 static void raw6_seq_stop(struct seq_file *seq, void *v)
1108 read_unlock(&raw_v6_lock);
1111 static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
1113 struct ipv6_pinfo *np = inet6_sk(sp);
1114 struct in6_addr *dest, *src;
1118 src = &np->rcv_saddr;
1120 srcp = inet_sk(sp)->num;
1122 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1123 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p\n",
1125 src->s6_addr32[0], src->s6_addr32[1],
1126 src->s6_addr32[2], src->s6_addr32[3], srcp,
1127 dest->s6_addr32[0], dest->s6_addr32[1],
1128 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1130 atomic_read(&sp->sk_wmem_alloc),
1131 atomic_read(&sp->sk_rmem_alloc),
1135 atomic_read(&sp->sk_refcnt), sp);
1138 static int raw6_seq_show(struct seq_file *seq, void *v)
1140 if (v == SEQ_START_TOKEN)
1145 "st tx_queue rx_queue tr tm->when retrnsmt"
1146 " uid timeout inode\n");
1148 raw6_sock_seq_show(seq, v, raw6_seq_private(seq)->bucket);
1152 static struct seq_operations raw6_seq_ops = {
1153 .start = raw6_seq_start,
1154 .next = raw6_seq_next,
1155 .stop = raw6_seq_stop,
1156 .show = raw6_seq_show,
1159 static int raw6_seq_open(struct inode *inode, struct file *file)
1161 struct seq_file *seq;
1163 struct raw6_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
1166 rc = seq_open(file, &raw6_seq_ops);
1169 seq = file->private_data;
1171 memset(s, 0, sizeof(*s));
1179 static struct file_operations raw6_seq_fops = {
1180 .owner = THIS_MODULE,
1181 .open = raw6_seq_open,
1183 .llseek = seq_lseek,
1184 .release = seq_release_private,
1187 int __init raw6_proc_init(void)
1189 if (!proc_net_fops_create("raw6", S_IRUGO, &raw6_seq_fops))
1194 void raw6_proc_exit(void)
1196 proc_net_remove("raw6");
1198 #endif /* CONFIG_PROC_FS */