2 * inet_diag.c Module for monitoring INET transport protocols sockets.
4 * Version: $Id: inet_diag.c,v 1.3 2002/02/01 22:01:04 davem Exp $
6 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/fcntl.h>
17 #include <linux/random.h>
18 #include <linux/cache.h>
19 #include <linux/init.h>
20 #include <linux/time.h>
25 #include <net/inet_common.h>
26 #include <net/inet_connection_sock.h>
27 #include <net/inet_hashtables.h>
28 #include <net/inet_timewait_sock.h>
29 #include <net/inet6_hashtables.h>
31 #include <linux/inet.h>
32 #include <linux/stddef.h>
34 #include <linux/inet_diag.h>
36 static const struct inet_diag_handler **inet_diag_table;
38 struct inet_diag_entry {
47 static struct sock *idiagnl;
49 #define INET_DIAG_PUT(skb, attrtype, attrlen) \
50 RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
52 static int inet_csk_diag_fill(struct sock *sk,
54 int ext, u32 pid, u32 seq, u16 nlmsg_flags,
55 const struct nlmsghdr *unlh)
57 const struct inet_sock *inet = inet_sk(sk);
58 const struct inet_connection_sock *icsk = inet_csk(sk);
59 struct inet_diag_msg *r;
62 struct inet_diag_meminfo *minfo = NULL;
63 unsigned char *b = skb->tail;
64 const struct inet_diag_handler *handler;
66 handler = inet_diag_table[unlh->nlmsg_type];
67 BUG_ON(handler == NULL);
69 nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
70 nlh->nlmsg_flags = nlmsg_flags;
73 BUG_ON(sk->sk_state == TCP_TIME_WAIT);
75 if (ext & (1 << (INET_DIAG_MEMINFO - 1)))
76 minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo));
78 if (ext & (1 << (INET_DIAG_INFO - 1)))
79 info = INET_DIAG_PUT(skb, INET_DIAG_INFO,
80 handler->idiag_info_size);
82 if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) {
83 const size_t len = strlen(icsk->icsk_ca_ops->name);
85 strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
86 icsk->icsk_ca_ops->name);
89 r->idiag_family = sk->sk_family;
90 r->idiag_state = sk->sk_state;
94 r->id.idiag_if = sk->sk_bound_dev_if;
95 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
96 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
98 r->id.idiag_sport = inet->sport;
99 r->id.idiag_dport = inet->dport;
100 r->id.idiag_src[0] = inet->rcv_saddr;
101 r->id.idiag_dst[0] = inet->daddr;
103 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
104 if (r->idiag_family == AF_INET6) {
105 struct ipv6_pinfo *np = inet6_sk(sk);
107 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
109 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
114 #define EXPIRES_IN_MS(tmo) ((tmo - jiffies) * 1000 + HZ - 1) / HZ
116 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
118 r->idiag_retrans = icsk->icsk_retransmits;
119 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
120 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
122 r->idiag_retrans = icsk->icsk_probes_out;
123 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
124 } else if (timer_pending(&sk->sk_timer)) {
126 r->idiag_retrans = icsk->icsk_probes_out;
127 r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires);
130 r->idiag_expires = 0;
134 r->idiag_uid = sock_i_uid(sk);
135 r->idiag_inode = sock_i_ino(sk);
138 minfo->idiag_rmem = atomic_read(&sk->sk_rmem_alloc);
139 minfo->idiag_wmem = sk->sk_wmem_queued;
140 minfo->idiag_fmem = sk->sk_forward_alloc;
141 minfo->idiag_tmem = atomic_read(&sk->sk_wmem_alloc);
144 handler->idiag_get_info(sk, r, info);
146 if (sk->sk_state < TCP_TIME_WAIT &&
147 icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
148 icsk->icsk_ca_ops->get_info(sk, ext, skb);
150 nlh->nlmsg_len = skb->tail - b;
155 skb_trim(skb, b - skb->data);
159 static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
160 struct sk_buff *skb, int ext, u32 pid,
161 u32 seq, u16 nlmsg_flags,
162 const struct nlmsghdr *unlh)
165 struct inet_diag_msg *r;
166 const unsigned char *previous_tail = skb->tail;
167 struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq,
168 unlh->nlmsg_type, sizeof(*r));
171 BUG_ON(tw->tw_state != TCP_TIME_WAIT);
173 nlh->nlmsg_flags = nlmsg_flags;
175 tmo = tw->tw_ttd - jiffies;
179 r->idiag_family = tw->tw_family;
180 r->idiag_state = tw->tw_state;
182 r->idiag_retrans = 0;
183 r->id.idiag_if = tw->tw_bound_dev_if;
184 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
185 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
186 r->id.idiag_sport = tw->tw_sport;
187 r->id.idiag_dport = tw->tw_dport;
188 r->id.idiag_src[0] = tw->tw_rcv_saddr;
189 r->id.idiag_dst[0] = tw->tw_daddr;
190 r->idiag_state = tw->tw_substate;
192 r->idiag_expires = (tmo * 1000 + HZ - 1) / HZ;
197 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
198 if (tw->tw_family == AF_INET6) {
199 const struct inet6_timewait_sock *tw6 =
200 inet6_twsk((struct sock *)tw);
202 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
203 &tw6->tw_v6_rcv_saddr);
204 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
208 nlh->nlmsg_len = skb->tail - previous_tail;
211 skb_trim(skb, previous_tail - skb->data);
215 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
216 int ext, u32 pid, u32 seq, u16 nlmsg_flags,
217 const struct nlmsghdr *unlh)
219 if (sk->sk_state == TCP_TIME_WAIT)
220 return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
221 skb, ext, pid, seq, nlmsg_flags,
223 return inet_csk_diag_fill(sk, skb, ext, pid, seq, nlmsg_flags, unlh);
226 static int inet_diag_get_exact(struct sk_buff *in_skb,
227 const struct nlmsghdr *nlh)
231 struct inet_diag_req *req = NLMSG_DATA(nlh);
233 struct inet_hashinfo *hashinfo;
234 const struct inet_diag_handler *handler;
236 handler = inet_diag_table[nlh->nlmsg_type];
237 BUG_ON(handler == NULL);
238 hashinfo = handler->idiag_hashinfo;
240 if (req->idiag_family == AF_INET) {
241 sk = inet_lookup(hashinfo, req->id.idiag_dst[0],
242 req->id.idiag_dport, req->id.idiag_src[0],
243 req->id.idiag_sport, req->id.idiag_if);
245 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
246 else if (req->idiag_family == AF_INET6) {
247 sk = inet6_lookup(hashinfo,
248 (struct in6_addr *)req->id.idiag_dst,
250 (struct in6_addr *)req->id.idiag_src,
263 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
264 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
265 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
266 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
270 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
271 sizeof(struct inet_diag_meminfo) +
272 handler->idiag_info_size + 64)),
277 err = sk_diag_fill(sk, rep, req->idiag_ext,
278 NETLINK_CB(in_skb).pid,
279 nlh->nlmsg_seq, 0, nlh);
281 WARN_ON(err == -EMSGSIZE);
285 err = netlink_unicast(idiagnl, rep, NETLINK_CB(in_skb).pid,
292 if (sk->sk_state == TCP_TIME_WAIT)
293 inet_twsk_put((struct inet_timewait_sock *)sk);
300 static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
302 int words = bits >> 5;
307 if (memcmp(a1, a2, words << 2))
317 mask = htonl((0xffffffff) << (32 - bits));
319 if ((w1 ^ w2) & mask)
327 static int inet_diag_bc_run(const void *bc, int len,
328 const struct inet_diag_entry *entry)
332 const struct inet_diag_bc_op *op = bc;
335 case INET_DIAG_BC_NOP:
337 case INET_DIAG_BC_JMP:
340 case INET_DIAG_BC_S_GE:
341 yes = entry->sport >= op[1].no;
343 case INET_DIAG_BC_S_LE:
344 yes = entry->dport <= op[1].no;
346 case INET_DIAG_BC_D_GE:
347 yes = entry->dport >= op[1].no;
349 case INET_DIAG_BC_D_LE:
350 yes = entry->dport <= op[1].no;
352 case INET_DIAG_BC_AUTO:
353 yes = !(entry->userlocks & SOCK_BINDPORT_LOCK);
355 case INET_DIAG_BC_S_COND:
356 case INET_DIAG_BC_D_COND: {
357 struct inet_diag_hostcond *cond;
360 cond = (struct inet_diag_hostcond *)(op + 1);
361 if (cond->port != -1 &&
362 cond->port != (op->code == INET_DIAG_BC_S_COND ?
363 entry->sport : entry->dport)) {
368 if (cond->prefix_len == 0)
371 if (op->code == INET_DIAG_BC_S_COND)
376 if (bitstring_match(addr, cond->addr,
379 if (entry->family == AF_INET6 &&
380 cond->family == AF_INET) {
381 if (addr[0] == 0 && addr[1] == 0 &&
382 addr[2] == htonl(0xffff) &&
383 bitstring_match(addr + 3, cond->addr,
403 static int valid_cc(const void *bc, int len, int cc)
406 const struct inet_diag_bc_op *op = bc;
420 static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
422 const unsigned char *bc = bytecode;
423 int len = bytecode_len;
426 struct inet_diag_bc_op *op = (struct inet_diag_bc_op *)bc;
428 //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
430 case INET_DIAG_BC_AUTO:
431 case INET_DIAG_BC_S_COND:
432 case INET_DIAG_BC_D_COND:
433 case INET_DIAG_BC_S_GE:
434 case INET_DIAG_BC_S_LE:
435 case INET_DIAG_BC_D_GE:
436 case INET_DIAG_BC_D_LE:
437 if (op->yes < 4 || op->yes > len + 4)
439 case INET_DIAG_BC_JMP:
440 if (op->no < 4 || op->no > len + 4)
443 !valid_cc(bytecode, bytecode_len, len - op->no))
446 case INET_DIAG_BC_NOP:
447 if (op->yes < 4 || op->yes > len + 4)
456 return len == 0 ? 0 : -EINVAL;
459 static int inet_csk_diag_dump(struct sock *sk,
461 struct netlink_callback *cb)
463 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
465 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
466 struct inet_diag_entry entry;
467 struct rtattr *bc = (struct rtattr *)(r + 1);
468 struct inet_sock *inet = inet_sk(sk);
470 entry.family = sk->sk_family;
471 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
472 if (entry.family == AF_INET6) {
473 struct ipv6_pinfo *np = inet6_sk(sk);
475 entry.saddr = np->rcv_saddr.s6_addr32;
476 entry.daddr = np->daddr.s6_addr32;
480 entry.saddr = &inet->rcv_saddr;
481 entry.daddr = &inet->daddr;
483 entry.sport = inet->num;
484 entry.dport = ntohs(inet->dport);
485 entry.userlocks = sk->sk_userlocks;
487 if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
491 return inet_csk_diag_fill(sk, skb, r->idiag_ext,
492 NETLINK_CB(cb->skb).pid,
493 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
496 static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
498 struct netlink_callback *cb)
500 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
502 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
503 struct inet_diag_entry entry;
504 struct rtattr *bc = (struct rtattr *)(r + 1);
506 entry.family = tw->tw_family;
507 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
508 if (tw->tw_family == AF_INET6) {
509 struct inet6_timewait_sock *tw6 =
510 inet6_twsk((struct sock *)tw);
511 entry.saddr = tw6->tw_v6_rcv_saddr.s6_addr32;
512 entry.daddr = tw6->tw_v6_daddr.s6_addr32;
516 entry.saddr = &tw->tw_rcv_saddr;
517 entry.daddr = &tw->tw_daddr;
519 entry.sport = tw->tw_num;
520 entry.dport = ntohs(tw->tw_dport);
523 if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
527 return inet_twsk_diag_fill(tw, skb, r->idiag_ext,
528 NETLINK_CB(cb->skb).pid,
529 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
532 static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
533 struct request_sock *req, u32 pid, u32 seq,
534 const struct nlmsghdr *unlh)
536 const struct inet_request_sock *ireq = inet_rsk(req);
537 struct inet_sock *inet = inet_sk(sk);
538 unsigned char *b = skb->tail;
539 struct inet_diag_msg *r;
540 struct nlmsghdr *nlh;
543 nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
544 nlh->nlmsg_flags = NLM_F_MULTI;
547 r->idiag_family = sk->sk_family;
548 r->idiag_state = TCP_SYN_RECV;
550 r->idiag_retrans = req->retrans;
552 r->id.idiag_if = sk->sk_bound_dev_if;
553 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
554 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
556 tmo = req->expires - jiffies;
560 r->id.idiag_sport = inet->sport;
561 r->id.idiag_dport = ireq->rmt_port;
562 r->id.idiag_src[0] = ireq->loc_addr;
563 r->id.idiag_dst[0] = ireq->rmt_addr;
564 r->idiag_expires = jiffies_to_msecs(tmo);
567 r->idiag_uid = sock_i_uid(sk);
569 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
570 if (r->idiag_family == AF_INET6) {
571 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
572 &inet6_rsk(req)->loc_addr);
573 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
574 &inet6_rsk(req)->rmt_addr);
577 nlh->nlmsg_len = skb->tail - b;
582 skb_trim(skb, b - skb->data);
586 static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
587 struct netlink_callback *cb)
589 struct inet_diag_entry entry;
590 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
591 struct inet_connection_sock *icsk = inet_csk(sk);
592 struct listen_sock *lopt;
593 struct rtattr *bc = NULL;
594 struct inet_sock *inet = inet_sk(sk);
596 int reqnum, s_reqnum;
600 s_reqnum = cb->args[4];
605 entry.family = sk->sk_family;
607 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
609 lopt = icsk->icsk_accept_queue.listen_opt;
610 if (!lopt || !lopt->qlen)
613 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
614 bc = (struct rtattr *)(r + 1);
615 entry.sport = inet->num;
616 entry.userlocks = sk->sk_userlocks;
619 for (j = s_j; j < lopt->nr_table_entries; j++) {
620 struct request_sock *req, *head = lopt->syn_table[j];
623 for (req = head; req; reqnum++, req = req->dl_next) {
624 struct inet_request_sock *ireq = inet_rsk(req);
626 if (reqnum < s_reqnum)
628 if (r->id.idiag_dport != ireq->rmt_port &&
634 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
635 (entry.family == AF_INET6) ?
636 inet6_rsk(req)->loc_addr.s6_addr32 :
640 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
641 (entry.family == AF_INET6) ?
642 inet6_rsk(req)->rmt_addr.s6_addr32 :
645 entry.dport = ntohs(ireq->rmt_port);
647 if (!inet_diag_bc_run(RTA_DATA(bc),
648 RTA_PAYLOAD(bc), &entry))
652 err = inet_diag_fill_req(skb, sk, req,
653 NETLINK_CB(cb->skb).pid,
654 cb->nlh->nlmsg_seq, cb->nlh);
657 cb->args[4] = reqnum;
666 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
671 static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
675 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
676 const struct inet_diag_handler *handler;
677 struct inet_hashinfo *hashinfo;
679 handler = inet_diag_table[cb->nlh->nlmsg_type];
680 BUG_ON(handler == NULL);
681 hashinfo = handler->idiag_hashinfo;
684 s_num = num = cb->args[2];
686 if (cb->args[0] == 0) {
687 if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV)))
690 inet_listen_lock(hashinfo);
691 for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
693 struct hlist_node *node;
696 sk_for_each(sk, node, &hashinfo->listening_hash[i]) {
697 struct inet_sock *inet = inet_sk(sk);
704 if (r->id.idiag_sport != inet->sport &&
708 if (!(r->idiag_states & TCPF_LISTEN) ||
713 if (inet_csk_diag_dump(sk, skb, cb) < 0) {
714 inet_listen_unlock(hashinfo);
719 if (!(r->idiag_states & TCPF_SYN_RECV))
722 if (inet_diag_dump_reqs(skb, sk, cb) < 0) {
723 inet_listen_unlock(hashinfo);
737 inet_listen_unlock(hashinfo);
740 s_i = num = s_num = 0;
743 if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV)))
746 for (i = s_i; i < hashinfo->ehash_size; i++) {
747 struct inet_ehash_bucket *head = &hashinfo->ehash[i];
749 struct hlist_node *node;
754 read_lock_bh(&head->lock);
756 sk_for_each(sk, node, &head->chain) {
757 struct inet_sock *inet = inet_sk(sk);
761 if (!(r->idiag_states & (1 << sk->sk_state)))
763 if (r->id.idiag_sport != inet->sport &&
766 if (r->id.idiag_dport != inet->dport &&
769 if (inet_csk_diag_dump(sk, skb, cb) < 0) {
770 read_unlock_bh(&head->lock);
777 if (r->idiag_states & TCPF_TIME_WAIT) {
778 struct inet_timewait_sock *tw;
780 inet_twsk_for_each(tw, node,
785 if (r->id.idiag_sport != tw->tw_sport &&
788 if (r->id.idiag_dport != tw->tw_dport &&
791 if (inet_twsk_diag_dump(tw, skb, cb) < 0) {
792 read_unlock_bh(&head->lock);
799 read_unlock_bh(&head->lock);
808 static inline int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
810 if (!(nlh->nlmsg_flags&NLM_F_REQUEST))
813 if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX)
816 if (inet_diag_table[nlh->nlmsg_type] == NULL)
819 if (NLMSG_LENGTH(sizeof(struct inet_diag_req)) > skb->len)
822 if (nlh->nlmsg_flags&NLM_F_DUMP) {
824 (4 + NLMSG_SPACE(sizeof(struct inet_diag_req)))) {
825 struct rtattr *rta = (void *)(NLMSG_DATA(nlh) +
826 sizeof(struct inet_diag_req));
827 if (rta->rta_type != INET_DIAG_REQ_BYTECODE ||
831 NLMSG_SPACE(sizeof(struct inet_diag_req))))
833 if (inet_diag_bc_audit(RTA_DATA(rta), RTA_PAYLOAD(rta)))
836 return netlink_dump_start(idiagnl, skb, nlh,
837 inet_diag_dump, NULL);
839 return inet_diag_get_exact(skb, nlh);
846 static inline void inet_diag_rcv_skb(struct sk_buff *skb)
848 if (skb->len >= NLMSG_SPACE(0)) {
850 struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
852 if (nlh->nlmsg_len < sizeof(*nlh) ||
853 skb->len < nlh->nlmsg_len)
855 err = inet_diag_rcv_msg(skb, nlh);
856 if (err || nlh->nlmsg_flags & NLM_F_ACK)
857 netlink_ack(skb, nlh, err);
861 static void inet_diag_rcv(struct sock *sk, int len)
864 unsigned int qlen = skb_queue_len(&sk->sk_receive_queue);
866 while (qlen-- && (skb = skb_dequeue(&sk->sk_receive_queue))) {
867 inet_diag_rcv_skb(skb);
872 static DEFINE_SPINLOCK(inet_diag_register_lock);
874 int inet_diag_register(const struct inet_diag_handler *h)
876 const __u16 type = h->idiag_type;
879 if (type >= INET_DIAG_GETSOCK_MAX)
882 spin_lock(&inet_diag_register_lock);
884 if (inet_diag_table[type] == NULL) {
885 inet_diag_table[type] = h;
888 spin_unlock(&inet_diag_register_lock);
892 EXPORT_SYMBOL_GPL(inet_diag_register);
894 void inet_diag_unregister(const struct inet_diag_handler *h)
896 const __u16 type = h->idiag_type;
898 if (type >= INET_DIAG_GETSOCK_MAX)
901 spin_lock(&inet_diag_register_lock);
902 inet_diag_table[type] = NULL;
903 spin_unlock(&inet_diag_register_lock);
907 EXPORT_SYMBOL_GPL(inet_diag_unregister);
909 static int __init inet_diag_init(void)
911 const int inet_diag_table_size = (INET_DIAG_GETSOCK_MAX *
912 sizeof(struct inet_diag_handler *));
915 inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL);
916 if (!inet_diag_table)
919 idiagnl = netlink_kernel_create(NETLINK_INET_DIAG, 0, inet_diag_rcv,
927 kfree(inet_diag_table);
931 static void __exit inet_diag_exit(void)
933 sock_release(idiagnl->sk_socket);
934 kfree(inet_diag_table);
937 module_init(inet_diag_init);
938 module_exit(inet_diag_exit);
939 MODULE_LICENSE("GPL");