4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/config.h>
13 #include <linux/dccp.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/skbuff.h>
19 #include <linux/netdevice.h>
21 #include <linux/if_arp.h>
22 #include <linux/init.h>
23 #include <linux/random.h>
24 #include <net/checksum.h>
26 #include <net/inet_sock.h>
30 #include <asm/semaphore.h>
31 #include <linux/spinlock.h>
32 #include <linux/timer.h>
33 #include <linux/delay.h>
34 #include <linux/poll.h>
40 DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
42 EXPORT_SYMBOL_GPL(dccp_statistics);
44 atomic_t dccp_orphan_count = ATOMIC_INIT(0);
46 EXPORT_SYMBOL_GPL(dccp_orphan_count);
48 struct inet_hashinfo __cacheline_aligned dccp_hashinfo = {
49 .lhash_lock = RW_LOCK_UNLOCKED,
50 .lhash_users = ATOMIC_INIT(0),
51 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(dccp_hashinfo.lhash_wait),
54 EXPORT_SYMBOL_GPL(dccp_hashinfo);
56 void dccp_set_state(struct sock *sk, const int state)
58 const int oldstate = sk->sk_state;
60 dccp_pr_debug("%s(%p) %-10.10s -> %s\n",
62 dccp_state_name(oldstate), dccp_state_name(state));
63 WARN_ON(state == oldstate);
67 if (oldstate != DCCP_OPEN)
68 DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
72 if (oldstate == DCCP_CLOSING || oldstate == DCCP_OPEN)
73 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
75 sk->sk_prot->unhash(sk);
76 if (inet_csk(sk)->icsk_bind_hash != NULL &&
77 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
78 inet_put_port(&dccp_hashinfo, sk);
81 if (oldstate == DCCP_OPEN)
82 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
85 /* Change state AFTER socket is unhashed to avoid closed
86 * socket sitting in hash tables.
91 EXPORT_SYMBOL_GPL(dccp_set_state);
93 void dccp_done(struct sock *sk)
95 dccp_set_state(sk, DCCP_CLOSED);
96 dccp_clear_xmit_timers(sk);
98 sk->sk_shutdown = SHUTDOWN_MASK;
100 if (!sock_flag(sk, SOCK_DEAD))
101 sk->sk_state_change(sk);
103 inet_csk_destroy_sock(sk);
106 EXPORT_SYMBOL_GPL(dccp_done);
108 const char *dccp_packet_name(const int type)
110 static const char *dccp_packet_names[] = {
111 [DCCP_PKT_REQUEST] = "REQUEST",
112 [DCCP_PKT_RESPONSE] = "RESPONSE",
113 [DCCP_PKT_DATA] = "DATA",
114 [DCCP_PKT_ACK] = "ACK",
115 [DCCP_PKT_DATAACK] = "DATAACK",
116 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
117 [DCCP_PKT_CLOSE] = "CLOSE",
118 [DCCP_PKT_RESET] = "RESET",
119 [DCCP_PKT_SYNC] = "SYNC",
120 [DCCP_PKT_SYNCACK] = "SYNCACK",
123 if (type >= DCCP_NR_PKT_TYPES)
126 return dccp_packet_names[type];
129 EXPORT_SYMBOL_GPL(dccp_packet_name);
131 const char *dccp_state_name(const int state)
133 static char *dccp_state_names[] = {
134 [DCCP_OPEN] = "OPEN",
135 [DCCP_REQUESTING] = "REQUESTING",
136 [DCCP_PARTOPEN] = "PARTOPEN",
137 [DCCP_LISTEN] = "LISTEN",
138 [DCCP_RESPOND] = "RESPOND",
139 [DCCP_CLOSING] = "CLOSING",
140 [DCCP_TIME_WAIT] = "TIME_WAIT",
141 [DCCP_CLOSED] = "CLOSED",
144 if (state >= DCCP_MAX_STATES)
145 return "INVALID STATE!";
147 return dccp_state_names[state];
150 EXPORT_SYMBOL_GPL(dccp_state_name);
152 void dccp_hash(struct sock *sk)
154 inet_hash(&dccp_hashinfo, sk);
157 EXPORT_SYMBOL_GPL(dccp_hash);
159 void dccp_unhash(struct sock *sk)
161 inet_unhash(&dccp_hashinfo, sk);
164 EXPORT_SYMBOL_GPL(dccp_unhash);
166 int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
168 struct dccp_sock *dp = dccp_sk(sk);
169 struct dccp_minisock *dmsk = dccp_msk(sk);
170 struct inet_connection_sock *icsk = inet_csk(sk);
172 dccp_minisock_init(&dp->dccps_minisock);
173 do_gettimeofday(&dp->dccps_epoch);
176 * FIXME: We're hardcoding the CCID, and doing this at this point makes
177 * the listening (master) sock get CCID control blocks, which is not
178 * necessary, but for now, to not mess with the test userspace apps,
179 * lets leave it here, later the real solution is to do this in a
180 * setsockopt(CCIDs-I-want/accept). -acme
182 if (likely(ctl_sock_initialized)) {
183 int rc = dccp_feat_init(dmsk);
188 if (dmsk->dccpms_send_ack_vector) {
189 dp->dccps_hc_rx_ackvec = dccp_ackvec_alloc(GFP_KERNEL);
190 if (dp->dccps_hc_rx_ackvec == NULL)
193 dp->dccps_hc_rx_ccid = ccid_hc_rx_new(dmsk->dccpms_rx_ccid,
195 dp->dccps_hc_tx_ccid = ccid_hc_tx_new(dmsk->dccpms_tx_ccid,
197 if (unlikely(dp->dccps_hc_rx_ccid == NULL ||
198 dp->dccps_hc_tx_ccid == NULL)) {
199 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
200 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
201 if (dmsk->dccpms_send_ack_vector) {
202 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
203 dp->dccps_hc_rx_ackvec = NULL;
205 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
209 /* control socket doesn't need feat nego */
210 INIT_LIST_HEAD(&dmsk->dccpms_pending);
211 INIT_LIST_HEAD(&dmsk->dccpms_conf);
214 dccp_init_xmit_timers(sk);
215 icsk->icsk_rto = DCCP_TIMEOUT_INIT;
216 sk->sk_state = DCCP_CLOSED;
217 sk->sk_write_space = dccp_write_space;
218 icsk->icsk_sync_mss = dccp_sync_mss;
219 dp->dccps_mss_cache = 536;
220 dp->dccps_role = DCCP_ROLE_UNDEFINED;
221 dp->dccps_service = DCCP_SERVICE_INVALID_VALUE;
222 dp->dccps_l_ack_ratio = dp->dccps_r_ack_ratio = 1;
227 EXPORT_SYMBOL_GPL(dccp_init_sock);
229 int dccp_destroy_sock(struct sock *sk)
231 struct dccp_sock *dp = dccp_sk(sk);
232 struct dccp_minisock *dmsk = dccp_msk(sk);
235 * DCCP doesn't use sk_write_queue, just sk_send_head
236 * for retransmissions
238 if (sk->sk_send_head != NULL) {
239 kfree_skb(sk->sk_send_head);
240 sk->sk_send_head = NULL;
243 /* Clean up a referenced DCCP bind bucket. */
244 if (inet_csk(sk)->icsk_bind_hash != NULL)
245 inet_put_port(&dccp_hashinfo, sk);
247 kfree(dp->dccps_service_list);
248 dp->dccps_service_list = NULL;
250 if (dmsk->dccpms_send_ack_vector) {
251 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
252 dp->dccps_hc_rx_ackvec = NULL;
254 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
255 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
256 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
258 /* clean up feature negotiation state */
259 dccp_feat_clean(dmsk);
264 EXPORT_SYMBOL_GPL(dccp_destroy_sock);
266 static inline int dccp_listen_start(struct sock *sk)
268 struct dccp_sock *dp = dccp_sk(sk);
270 dp->dccps_role = DCCP_ROLE_LISTEN;
272 * Apps need to use setsockopt(DCCP_SOCKOPT_SERVICE)
273 * before calling listen()
275 if (dccp_service_not_initialized(sk))
277 return inet_csk_listen_start(sk, TCP_SYNQ_HSIZE);
280 int dccp_disconnect(struct sock *sk, int flags)
282 struct inet_connection_sock *icsk = inet_csk(sk);
283 struct inet_sock *inet = inet_sk(sk);
285 const int old_state = sk->sk_state;
287 if (old_state != DCCP_CLOSED)
288 dccp_set_state(sk, DCCP_CLOSED);
290 /* ABORT function of RFC793 */
291 if (old_state == DCCP_LISTEN) {
292 inet_csk_listen_stop(sk);
293 /* FIXME: do the active reset thing */
294 } else if (old_state == DCCP_REQUESTING)
295 sk->sk_err = ECONNRESET;
297 dccp_clear_xmit_timers(sk);
298 __skb_queue_purge(&sk->sk_receive_queue);
299 if (sk->sk_send_head != NULL) {
300 __kfree_skb(sk->sk_send_head);
301 sk->sk_send_head = NULL;
306 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
307 inet_reset_saddr(sk);
310 sock_reset_flag(sk, SOCK_DONE);
312 icsk->icsk_backoff = 0;
313 inet_csk_delack_init(sk);
316 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
318 sk->sk_error_report(sk);
322 EXPORT_SYMBOL_GPL(dccp_disconnect);
325 * Wait for a DCCP event.
327 * Note that we don't need to lock the socket, as the upper poll layers
328 * take care of normal races (between the test and the event) and we don't
329 * go look at any of the socket buffers directly.
331 unsigned int dccp_poll(struct file *file, struct socket *sock,
335 struct sock *sk = sock->sk;
337 poll_wait(file, sk->sk_sleep, wait);
338 if (sk->sk_state == DCCP_LISTEN)
339 return inet_csk_listen_poll(sk);
341 /* Socket is not locked. We are protected from async events
342 by poll logic and correct handling of state changes
343 made by another threads is impossible in any case.
350 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
352 if (sk->sk_shutdown & RCV_SHUTDOWN)
353 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
356 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
357 if (atomic_read(&sk->sk_rmem_alloc) > 0)
358 mask |= POLLIN | POLLRDNORM;
360 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
361 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
362 mask |= POLLOUT | POLLWRNORM;
363 } else { /* send SIGIO later */
364 set_bit(SOCK_ASYNC_NOSPACE,
365 &sk->sk_socket->flags);
366 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
368 /* Race breaker. If space is freed after
369 * wspace test but before the flags are set,
370 * IO signal will be lost.
372 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
373 mask |= POLLOUT | POLLWRNORM;
380 EXPORT_SYMBOL_GPL(dccp_poll);
382 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
384 dccp_pr_debug("entry\n");
388 EXPORT_SYMBOL_GPL(dccp_ioctl);
390 static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
391 char __user *optval, int optlen)
393 struct dccp_sock *dp = dccp_sk(sk);
394 struct dccp_service_list *sl = NULL;
396 if (service == DCCP_SERVICE_INVALID_VALUE ||
397 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
400 if (optlen > sizeof(service)) {
401 sl = kmalloc(optlen, GFP_KERNEL);
405 sl->dccpsl_nr = optlen / sizeof(u32) - 1;
406 if (copy_from_user(sl->dccpsl_list,
407 optval + sizeof(service),
408 optlen - sizeof(service)) ||
409 dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
416 dp->dccps_service = service;
418 kfree(dp->dccps_service_list);
420 dp->dccps_service_list = sl;
425 /* byte 1 is feature. the rest is the preference list */
426 static int dccp_setsockopt_change(struct sock *sk, int type,
427 struct dccp_so_feat __user *optval)
429 struct dccp_so_feat opt;
433 if (copy_from_user(&opt, optval, sizeof(opt)))
436 val = kmalloc(opt.dccpsf_len, GFP_KERNEL);
440 if (copy_from_user(val, opt.dccpsf_val, opt.dccpsf_len)) {
445 rc = dccp_feat_change(dccp_msk(sk), type, opt.dccpsf_feat,
446 val, opt.dccpsf_len, GFP_KERNEL);
458 static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
459 char __user *optval, int optlen)
461 struct dccp_sock *dp;
465 if (optlen < sizeof(int))
468 if (get_user(val, (int __user *)optval))
471 if (optname == DCCP_SOCKOPT_SERVICE)
472 return dccp_setsockopt_service(sk, val, optval, optlen);
479 case DCCP_SOCKOPT_PACKET_SIZE:
480 dp->dccps_packet_size = val;
483 case DCCP_SOCKOPT_CHANGE_L:
484 if (optlen != sizeof(struct dccp_so_feat))
487 err = dccp_setsockopt_change(sk, DCCPO_CHANGE_L,
488 (struct dccp_so_feat *)
492 case DCCP_SOCKOPT_CHANGE_R:
493 if (optlen != sizeof(struct dccp_so_feat))
496 err = dccp_setsockopt_change(sk, DCCPO_CHANGE_R,
497 (struct dccp_so_feat *)
510 int dccp_setsockopt(struct sock *sk, int level, int optname,
511 char __user *optval, int optlen)
513 if (level != SOL_DCCP)
514 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
517 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
520 EXPORT_SYMBOL_GPL(dccp_setsockopt);
523 int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
524 char __user *optval, int optlen)
526 if (level != SOL_DCCP)
527 return inet_csk_compat_setsockopt(sk, level, optname,
529 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
532 EXPORT_SYMBOL_GPL(compat_dccp_setsockopt);
535 static int dccp_getsockopt_service(struct sock *sk, int len,
536 __be32 __user *optval,
539 const struct dccp_sock *dp = dccp_sk(sk);
540 const struct dccp_service_list *sl;
541 int err = -ENOENT, slen = 0, total_len = sizeof(u32);
544 if (dccp_service_not_initialized(sk))
547 if ((sl = dp->dccps_service_list) != NULL) {
548 slen = sl->dccpsl_nr * sizeof(u32);
557 if (put_user(total_len, optlen) ||
558 put_user(dp->dccps_service, optval) ||
559 (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
566 static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
567 char __user *optval, int __user *optlen)
569 struct dccp_sock *dp;
572 if (get_user(len, optlen))
575 if (len < sizeof(int))
581 case DCCP_SOCKOPT_PACKET_SIZE:
582 val = dp->dccps_packet_size;
583 len = sizeof(dp->dccps_packet_size);
585 case DCCP_SOCKOPT_SERVICE:
586 return dccp_getsockopt_service(sk, len,
587 (__be32 __user *)optval, optlen);
589 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
590 len, (u32 __user *)optval, optlen);
592 return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
593 len, (u32 __user *)optval, optlen);
598 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
604 int dccp_getsockopt(struct sock *sk, int level, int optname,
605 char __user *optval, int __user *optlen)
607 if (level != SOL_DCCP)
608 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
611 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
614 EXPORT_SYMBOL_GPL(dccp_getsockopt);
617 int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
618 char __user *optval, int __user *optlen)
620 if (level != SOL_DCCP)
621 return inet_csk_compat_getsockopt(sk, level, optname,
623 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
626 EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
629 int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
632 const struct dccp_sock *dp = dccp_sk(sk);
633 const int flags = msg->msg_flags;
634 const int noblock = flags & MSG_DONTWAIT;
639 if (len > dp->dccps_mss_cache)
643 timeo = sock_sndtimeo(sk, noblock);
646 * We have to use sk_stream_wait_connect here to set sk_write_pending,
647 * so that the trick in dccp_rcv_request_sent_state_process.
649 /* Wait for a connection to finish. */
650 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN | DCCPF_CLOSING))
651 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
654 size = sk->sk_prot->max_header + len;
656 skb = sock_alloc_send_skb(sk, size, noblock, &rc);
661 skb_reserve(skb, sk->sk_prot->max_header);
662 rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
666 rc = dccp_write_xmit(sk, skb, &timeo);
668 * XXX we don't use sk_write_queue, so just discard the packet.
669 * Current plan however is to _use_ sk_write_queue with
670 * an algorith similar to tcp_sendmsg, where the main difference
671 * is that in DCCP we have to respect packet boundaries, so
672 * no coalescing of skbs.
674 * This bug was _quickly_ found & fixed by just looking at an OSTRA
675 * generated callgraph 8) -acme
685 EXPORT_SYMBOL_GPL(dccp_sendmsg);
687 int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
688 size_t len, int nonblock, int flags, int *addr_len)
690 const struct dccp_hdr *dh;
695 if (sk->sk_state == DCCP_LISTEN) {
700 timeo = sock_rcvtimeo(sk, nonblock);
703 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
706 goto verify_sock_status;
710 if (dh->dccph_type == DCCP_PKT_DATA ||
711 dh->dccph_type == DCCP_PKT_DATAACK)
714 if (dh->dccph_type == DCCP_PKT_RESET ||
715 dh->dccph_type == DCCP_PKT_CLOSE) {
716 dccp_pr_debug("found fin ok!\n");
720 dccp_pr_debug("packet_type=%s\n",
721 dccp_packet_name(dh->dccph_type));
722 sk_eat_skb(sk, skb, 0);
724 if (sock_flag(sk, SOCK_DONE)) {
730 len = sock_error(sk);
734 if (sk->sk_shutdown & RCV_SHUTDOWN) {
739 if (sk->sk_state == DCCP_CLOSED) {
740 if (!sock_flag(sk, SOCK_DONE)) {
741 /* This occurs when user tries to read
742 * from never connected socket.
756 if (signal_pending(current)) {
757 len = sock_intr_errno(timeo);
761 sk_wait_data(sk, &timeo);
766 else if (len < skb->len)
767 msg->msg_flags |= MSG_TRUNC;
769 if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) {
770 /* Exception. Bailout! */
775 if (!(flags & MSG_PEEK))
776 sk_eat_skb(sk, skb, 0);
784 EXPORT_SYMBOL_GPL(dccp_recvmsg);
786 int inet_dccp_listen(struct socket *sock, int backlog)
788 struct sock *sk = sock->sk;
789 unsigned char old_state;
795 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
798 old_state = sk->sk_state;
799 if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
802 /* Really, if the socket is already in listen state
803 * we can only allow the backlog to be adjusted.
805 if (old_state != DCCP_LISTEN) {
807 * FIXME: here it probably should be sk->sk_prot->listen_start
808 * see tcp_listen_start
810 err = dccp_listen_start(sk);
814 sk->sk_max_ack_backlog = backlog;
822 EXPORT_SYMBOL_GPL(inet_dccp_listen);
824 static const unsigned char dccp_new_state[] = {
825 /* current state: new state: action: */
827 [DCCP_OPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
828 [DCCP_REQUESTING] = DCCP_CLOSED,
829 [DCCP_PARTOPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
830 [DCCP_LISTEN] = DCCP_CLOSED,
831 [DCCP_RESPOND] = DCCP_CLOSED,
832 [DCCP_CLOSING] = DCCP_CLOSED,
833 [DCCP_TIME_WAIT] = DCCP_CLOSED,
834 [DCCP_CLOSED] = DCCP_CLOSED,
837 static int dccp_close_state(struct sock *sk)
839 const int next = dccp_new_state[sk->sk_state];
840 const int ns = next & DCCP_STATE_MASK;
842 if (ns != sk->sk_state)
843 dccp_set_state(sk, ns);
845 return next & DCCP_ACTION_FIN;
848 void dccp_close(struct sock *sk, long timeout)
855 sk->sk_shutdown = SHUTDOWN_MASK;
857 if (sk->sk_state == DCCP_LISTEN) {
858 dccp_set_state(sk, DCCP_CLOSED);
861 inet_csk_listen_stop(sk);
863 goto adjudge_to_death;
867 * We need to flush the recv. buffs. We do this only on the
868 * descriptor close, not protocol-sourced closes, because the
869 *reader process may not have drained the data yet!
871 /* FIXME: check for unread data */
872 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
876 if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
877 /* Check zero linger _after_ checking for unread data. */
878 sk->sk_prot->disconnect(sk, 0);
879 } else if (dccp_close_state(sk)) {
880 dccp_send_close(sk, 1);
883 sk_stream_wait_close(sk, timeout);
886 state = sk->sk_state;
889 atomic_inc(sk->sk_prot->orphan_count);
892 * It is the last release_sock in its life. It will remove backlog.
896 * Now socket is owned by kernel and we acquire BH lock
897 * to finish close. No need to check for user refs.
901 BUG_TRAP(!sock_owned_by_user(sk));
903 /* Have we already been destroyed by a softirq or backlog? */
904 if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
908 * The last release_sock may have processed the CLOSE or RESET
909 * packet moving sock to CLOSED state, if not we have to fire
910 * the CLOSE/CLOSEREQ retransmission timer, see "8.3. Termination"
911 * in draft-ietf-dccp-spec-11. -acme
913 if (sk->sk_state == DCCP_CLOSING) {
914 /* FIXME: should start at 2 * RTT */
915 /* Timer for repeating the CLOSE/CLOSEREQ until an answer. */
916 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
917 inet_csk(sk)->icsk_rto,
920 /* Yeah, we should use sk->sk_prot->orphan_count, etc */
921 dccp_set_state(sk, DCCP_CLOSED);
925 if (sk->sk_state == DCCP_CLOSED)
926 inet_csk_destroy_sock(sk);
928 /* Otherwise, socket is reprieved until protocol close. */
936 EXPORT_SYMBOL_GPL(dccp_close);
938 void dccp_shutdown(struct sock *sk, int how)
940 dccp_pr_debug("entry\n");
943 EXPORT_SYMBOL_GPL(dccp_shutdown);
945 static int __init dccp_mib_init(void)
949 dccp_statistics[0] = alloc_percpu(struct dccp_mib);
950 if (dccp_statistics[0] == NULL)
953 dccp_statistics[1] = alloc_percpu(struct dccp_mib);
954 if (dccp_statistics[1] == NULL)
961 free_percpu(dccp_statistics[0]);
962 dccp_statistics[0] = NULL;
967 static void dccp_mib_exit(void)
969 free_percpu(dccp_statistics[0]);
970 free_percpu(dccp_statistics[1]);
971 dccp_statistics[0] = dccp_statistics[1] = NULL;
974 static int thash_entries;
975 module_param(thash_entries, int, 0444);
976 MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
978 #ifdef CONFIG_IP_DCCP_DEBUG
980 module_param(dccp_debug, int, 0444);
981 MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
983 EXPORT_SYMBOL_GPL(dccp_debug);
986 static int __init dccp_init(void)
989 int ehash_order, bhash_order, i;
992 dccp_hashinfo.bind_bucket_cachep =
993 kmem_cache_create("dccp_bind_bucket",
994 sizeof(struct inet_bind_bucket), 0,
995 SLAB_HWCACHE_ALIGN, NULL, NULL);
996 if (!dccp_hashinfo.bind_bucket_cachep)
1000 * Size and allocate the main established and bind bucket
1003 * The methodology is similar to that of the buffer cache.
1005 if (num_physpages >= (128 * 1024))
1006 goal = num_physpages >> (21 - PAGE_SHIFT);
1008 goal = num_physpages >> (23 - PAGE_SHIFT);
1011 goal = (thash_entries *
1012 sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
1013 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
1016 dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE /
1017 sizeof(struct inet_ehash_bucket);
1018 dccp_hashinfo.ehash_size >>= 1;
1019 while (dccp_hashinfo.ehash_size &
1020 (dccp_hashinfo.ehash_size - 1))
1021 dccp_hashinfo.ehash_size--;
1022 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
1023 __get_free_pages(GFP_ATOMIC, ehash_order);
1024 } while (!dccp_hashinfo.ehash && --ehash_order > 0);
1026 if (!dccp_hashinfo.ehash) {
1027 printk(KERN_CRIT "Failed to allocate DCCP "
1028 "established hash table\n");
1029 goto out_free_bind_bucket_cachep;
1032 for (i = 0; i < (dccp_hashinfo.ehash_size << 1); i++) {
1033 rwlock_init(&dccp_hashinfo.ehash[i].lock);
1034 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain);
1037 bhash_order = ehash_order;
1040 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
1041 sizeof(struct inet_bind_hashbucket);
1042 if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
1045 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
1046 __get_free_pages(GFP_ATOMIC, bhash_order);
1047 } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
1049 if (!dccp_hashinfo.bhash) {
1050 printk(KERN_CRIT "Failed to allocate DCCP bind hash table\n");
1051 goto out_free_dccp_ehash;
1054 for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
1055 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
1056 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
1059 rc = dccp_mib_init();
1061 goto out_free_dccp_bhash;
1063 rc = dccp_ackvec_init();
1065 goto out_free_dccp_mib;
1067 rc = dccp_sysctl_init();
1069 goto out_ackvec_exit;
1076 out_free_dccp_bhash:
1077 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1078 dccp_hashinfo.bhash = NULL;
1079 out_free_dccp_ehash:
1080 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
1081 dccp_hashinfo.ehash = NULL;
1082 out_free_bind_bucket_cachep:
1083 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1084 dccp_hashinfo.bind_bucket_cachep = NULL;
1088 static void __exit dccp_fini(void)
1091 free_pages((unsigned long)dccp_hashinfo.bhash,
1092 get_order(dccp_hashinfo.bhash_size *
1093 sizeof(struct inet_bind_hashbucket)));
1094 free_pages((unsigned long)dccp_hashinfo.ehash,
1095 get_order(dccp_hashinfo.ehash_size *
1096 sizeof(struct inet_ehash_bucket)));
1097 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1102 module_init(dccp_init);
1103 module_exit(dccp_fini);
1105 MODULE_LICENSE("GPL");
1106 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1107 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");