4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/dccp.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/skbuff.h>
18 #include <linux/netdevice.h>
20 #include <linux/if_arp.h>
21 #include <linux/init.h>
22 #include <linux/random.h>
23 #include <net/checksum.h>
25 #include <net/inet_sock.h>
29 #include <asm/ioctls.h>
30 #include <linux/spinlock.h>
31 #include <linux/timer.h>
32 #include <linux/delay.h>
33 #include <linux/poll.h>
39 DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
41 EXPORT_SYMBOL_GPL(dccp_statistics);
43 atomic_t dccp_orphan_count = ATOMIC_INIT(0);
45 EXPORT_SYMBOL_GPL(dccp_orphan_count);
47 struct inet_hashinfo __cacheline_aligned dccp_hashinfo = {
48 .lhash_lock = RW_LOCK_UNLOCKED,
49 .lhash_users = ATOMIC_INIT(0),
50 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(dccp_hashinfo.lhash_wait),
53 EXPORT_SYMBOL_GPL(dccp_hashinfo);
55 /* the maximum queue length for tx in packets. 0 is no limit */
56 int sysctl_dccp_tx_qlen __read_mostly = 5;
58 void dccp_set_state(struct sock *sk, const int state)
60 const int oldstate = sk->sk_state;
62 dccp_pr_debug("%s(%p) %s --> %s\n", dccp_role(sk), sk,
63 dccp_state_name(oldstate), dccp_state_name(state));
64 WARN_ON(state == oldstate);
68 if (oldstate != DCCP_OPEN)
69 DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
70 /* Client retransmits all Confirm options until entering OPEN */
71 if (oldstate == DCCP_PARTOPEN)
72 dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg);
76 if (oldstate == DCCP_OPEN || oldstate == DCCP_ACTIVE_CLOSEREQ ||
77 oldstate == DCCP_CLOSING)
78 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
80 sk->sk_prot->unhash(sk);
81 if (inet_csk(sk)->icsk_bind_hash != NULL &&
82 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
86 if (oldstate == DCCP_OPEN)
87 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
90 /* Change state AFTER socket is unhashed to avoid closed
91 * socket sitting in hash tables.
96 EXPORT_SYMBOL_GPL(dccp_set_state);
98 static void dccp_finish_passive_close(struct sock *sk)
100 switch (sk->sk_state) {
101 case DCCP_PASSIVE_CLOSE:
102 /* Node (client or server) has received Close packet. */
103 dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
104 dccp_set_state(sk, DCCP_CLOSED);
106 case DCCP_PASSIVE_CLOSEREQ:
108 * Client received CloseReq. We set the `active' flag so that
109 * dccp_send_close() retransmits the Close as per RFC 4340, 8.3.
111 dccp_send_close(sk, 1);
112 dccp_set_state(sk, DCCP_CLOSING);
116 void dccp_done(struct sock *sk)
118 dccp_set_state(sk, DCCP_CLOSED);
119 dccp_clear_xmit_timers(sk);
121 sk->sk_shutdown = SHUTDOWN_MASK;
123 if (!sock_flag(sk, SOCK_DEAD))
124 sk->sk_state_change(sk);
126 inet_csk_destroy_sock(sk);
129 EXPORT_SYMBOL_GPL(dccp_done);
131 const char *dccp_packet_name(const int type)
133 static const char *dccp_packet_names[] = {
134 [DCCP_PKT_REQUEST] = "REQUEST",
135 [DCCP_PKT_RESPONSE] = "RESPONSE",
136 [DCCP_PKT_DATA] = "DATA",
137 [DCCP_PKT_ACK] = "ACK",
138 [DCCP_PKT_DATAACK] = "DATAACK",
139 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
140 [DCCP_PKT_CLOSE] = "CLOSE",
141 [DCCP_PKT_RESET] = "RESET",
142 [DCCP_PKT_SYNC] = "SYNC",
143 [DCCP_PKT_SYNCACK] = "SYNCACK",
146 if (type >= DCCP_NR_PKT_TYPES)
149 return dccp_packet_names[type];
152 EXPORT_SYMBOL_GPL(dccp_packet_name);
154 const char *dccp_state_name(const int state)
156 static char *dccp_state_names[] = {
157 [DCCP_OPEN] = "OPEN",
158 [DCCP_REQUESTING] = "REQUESTING",
159 [DCCP_PARTOPEN] = "PARTOPEN",
160 [DCCP_LISTEN] = "LISTEN",
161 [DCCP_RESPOND] = "RESPOND",
162 [DCCP_CLOSING] = "CLOSING",
163 [DCCP_ACTIVE_CLOSEREQ] = "CLOSEREQ",
164 [DCCP_PASSIVE_CLOSE] = "PASSIVE_CLOSE",
165 [DCCP_PASSIVE_CLOSEREQ] = "PASSIVE_CLOSEREQ",
166 [DCCP_TIME_WAIT] = "TIME_WAIT",
167 [DCCP_CLOSED] = "CLOSED",
170 if (state >= DCCP_MAX_STATES)
171 return "INVALID STATE!";
173 return dccp_state_names[state];
176 EXPORT_SYMBOL_GPL(dccp_state_name);
178 int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
180 struct dccp_sock *dp = dccp_sk(sk);
181 struct inet_connection_sock *icsk = inet_csk(sk);
183 icsk->icsk_rto = DCCP_TIMEOUT_INIT;
184 icsk->icsk_syn_retries = sysctl_dccp_request_retries;
185 sk->sk_state = DCCP_CLOSED;
186 sk->sk_write_space = dccp_write_space;
187 icsk->icsk_sync_mss = dccp_sync_mss;
188 dp->dccps_mss_cache = 536;
189 dp->dccps_rate_last = jiffies;
190 dp->dccps_role = DCCP_ROLE_UNDEFINED;
191 dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT;
193 dccp_init_xmit_timers(sk);
195 INIT_LIST_HEAD(&dp->dccps_featneg);
196 /* control socket doesn't need feat nego */
197 if (likely(ctl_sock_initialized))
198 return dccp_feat_init(sk);
202 EXPORT_SYMBOL_GPL(dccp_init_sock);
204 void dccp_destroy_sock(struct sock *sk)
206 struct dccp_sock *dp = dccp_sk(sk);
209 * DCCP doesn't use sk_write_queue, just sk_send_head
210 * for retransmissions
212 if (sk->sk_send_head != NULL) {
213 kfree_skb(sk->sk_send_head);
214 sk->sk_send_head = NULL;
217 /* Clean up a referenced DCCP bind bucket. */
218 if (inet_csk(sk)->icsk_bind_hash != NULL)
221 kfree(dp->dccps_service_list);
222 dp->dccps_service_list = NULL;
224 if (dp->dccps_hc_rx_ackvec != NULL) {
225 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
226 dp->dccps_hc_rx_ackvec = NULL;
228 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
229 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
230 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
232 /* clean up feature negotiation state */
233 dccp_feat_list_purge(&dp->dccps_featneg);
236 EXPORT_SYMBOL_GPL(dccp_destroy_sock);
238 static inline int dccp_listen_start(struct sock *sk, int backlog)
240 struct dccp_sock *dp = dccp_sk(sk);
242 dp->dccps_role = DCCP_ROLE_LISTEN;
243 /* do not start to listen if feature negotiation setup fails */
244 if (dccp_feat_finalise_settings(dp))
246 return inet_csk_listen_start(sk, backlog);
249 static inline int dccp_need_reset(int state)
251 return state != DCCP_CLOSED && state != DCCP_LISTEN &&
252 state != DCCP_REQUESTING;
255 int dccp_disconnect(struct sock *sk, int flags)
257 struct inet_connection_sock *icsk = inet_csk(sk);
258 struct inet_sock *inet = inet_sk(sk);
260 const int old_state = sk->sk_state;
262 if (old_state != DCCP_CLOSED)
263 dccp_set_state(sk, DCCP_CLOSED);
266 * This corresponds to the ABORT function of RFC793, sec. 3.8
267 * TCP uses a RST segment, DCCP a Reset packet with Code 2, "Aborted".
269 if (old_state == DCCP_LISTEN) {
270 inet_csk_listen_stop(sk);
271 } else if (dccp_need_reset(old_state)) {
272 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
273 sk->sk_err = ECONNRESET;
274 } else if (old_state == DCCP_REQUESTING)
275 sk->sk_err = ECONNRESET;
277 dccp_clear_xmit_timers(sk);
279 __skb_queue_purge(&sk->sk_receive_queue);
280 __skb_queue_purge(&sk->sk_write_queue);
281 if (sk->sk_send_head != NULL) {
282 __kfree_skb(sk->sk_send_head);
283 sk->sk_send_head = NULL;
288 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
289 inet_reset_saddr(sk);
292 sock_reset_flag(sk, SOCK_DONE);
294 icsk->icsk_backoff = 0;
295 inet_csk_delack_init(sk);
298 WARN_ON(inet->num && !icsk->icsk_bind_hash);
300 sk->sk_error_report(sk);
304 EXPORT_SYMBOL_GPL(dccp_disconnect);
307 * Wait for a DCCP event.
309 * Note that we don't need to lock the socket, as the upper poll layers
310 * take care of normal races (between the test and the event) and we don't
311 * go look at any of the socket buffers directly.
313 unsigned int dccp_poll(struct file *file, struct socket *sock,
317 struct sock *sk = sock->sk;
319 poll_wait(file, sk->sk_sleep, wait);
320 if (sk->sk_state == DCCP_LISTEN)
321 return inet_csk_listen_poll(sk);
323 /* Socket is not locked. We are protected from async events
324 by poll logic and correct handling of state changes
325 made by another threads is impossible in any case.
332 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
334 if (sk->sk_shutdown & RCV_SHUTDOWN)
335 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
338 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
339 if (atomic_read(&sk->sk_rmem_alloc) > 0)
340 mask |= POLLIN | POLLRDNORM;
342 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
343 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
344 mask |= POLLOUT | POLLWRNORM;
345 } else { /* send SIGIO later */
346 set_bit(SOCK_ASYNC_NOSPACE,
347 &sk->sk_socket->flags);
348 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
350 /* Race breaker. If space is freed after
351 * wspace test but before the flags are set,
352 * IO signal will be lost.
354 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
355 mask |= POLLOUT | POLLWRNORM;
362 EXPORT_SYMBOL_GPL(dccp_poll);
364 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
370 if (sk->sk_state == DCCP_LISTEN)
376 unsigned long amount = 0;
378 skb = skb_peek(&sk->sk_receive_queue);
381 * We will only return the amount of this packet since
382 * that is all that will be read.
386 rc = put_user(amount, (int __user *)arg);
398 EXPORT_SYMBOL_GPL(dccp_ioctl);
400 static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
401 char __user *optval, int optlen)
403 struct dccp_sock *dp = dccp_sk(sk);
404 struct dccp_service_list *sl = NULL;
406 if (service == DCCP_SERVICE_INVALID_VALUE ||
407 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
410 if (optlen > sizeof(service)) {
411 sl = kmalloc(optlen, GFP_KERNEL);
415 sl->dccpsl_nr = optlen / sizeof(u32) - 1;
416 if (copy_from_user(sl->dccpsl_list,
417 optval + sizeof(service),
418 optlen - sizeof(service)) ||
419 dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
426 dp->dccps_service = service;
428 kfree(dp->dccps_service_list);
430 dp->dccps_service_list = sl;
435 static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx)
440 if (cscov < 0 || cscov > 15)
443 * Populate a list of permissible values, in the range cscov...15. This
444 * is necessary since feature negotiation of single values only works if
445 * both sides incidentally choose the same value. Since the list starts
446 * lowest-value first, negotiation will pick the smallest shared value.
452 list = kmalloc(len, GFP_KERNEL);
456 for (i = 0; i < len; i++)
459 rc = dccp_feat_register_sp(sk, DCCPF_MIN_CSUM_COVER, rx, list, len);
463 dccp_sk(sk)->dccps_pcrlen = cscov;
465 dccp_sk(sk)->dccps_pcslen = cscov;
471 static int dccp_setsockopt_ccid(struct sock *sk, int type,
472 char __user *optval, int optlen)
477 if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS)
480 val = kmalloc(optlen, GFP_KERNEL);
484 if (copy_from_user(val, optval, optlen)) {
490 if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID)
491 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 1, val, optlen);
493 if (!rc && (type == DCCP_SOCKOPT_RX_CCID || type == DCCP_SOCKOPT_CCID))
494 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 0, val, optlen);
501 static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
502 char __user *optval, int optlen)
504 struct dccp_sock *dp = dccp_sk(sk);
508 case DCCP_SOCKOPT_PACKET_SIZE:
509 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
511 case DCCP_SOCKOPT_CHANGE_L:
512 case DCCP_SOCKOPT_CHANGE_R:
513 DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n");
515 case DCCP_SOCKOPT_CCID:
516 case DCCP_SOCKOPT_RX_CCID:
517 case DCCP_SOCKOPT_TX_CCID:
518 return dccp_setsockopt_ccid(sk, optname, optval, optlen);
521 if (optlen < (int)sizeof(int))
524 if (get_user(val, (int __user *)optval))
527 if (optname == DCCP_SOCKOPT_SERVICE)
528 return dccp_setsockopt_service(sk, val, optval, optlen);
532 case DCCP_SOCKOPT_SERVER_TIMEWAIT:
533 if (dp->dccps_role != DCCP_ROLE_SERVER)
536 dp->dccps_server_timewait = (val != 0);
538 case DCCP_SOCKOPT_SEND_CSCOV:
539 err = dccp_setsockopt_cscov(sk, val, false);
541 case DCCP_SOCKOPT_RECV_CSCOV:
542 err = dccp_setsockopt_cscov(sk, val, true);
553 int dccp_setsockopt(struct sock *sk, int level, int optname,
554 char __user *optval, int optlen)
556 if (level != SOL_DCCP)
557 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
560 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
563 EXPORT_SYMBOL_GPL(dccp_setsockopt);
566 int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
567 char __user *optval, int optlen)
569 if (level != SOL_DCCP)
570 return inet_csk_compat_setsockopt(sk, level, optname,
572 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
575 EXPORT_SYMBOL_GPL(compat_dccp_setsockopt);
578 static int dccp_getsockopt_service(struct sock *sk, int len,
579 __be32 __user *optval,
582 const struct dccp_sock *dp = dccp_sk(sk);
583 const struct dccp_service_list *sl;
584 int err = -ENOENT, slen = 0, total_len = sizeof(u32);
587 if ((sl = dp->dccps_service_list) != NULL) {
588 slen = sl->dccpsl_nr * sizeof(u32);
597 if (put_user(total_len, optlen) ||
598 put_user(dp->dccps_service, optval) ||
599 (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
606 static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
607 char __user *optval, int __user *optlen)
609 struct dccp_sock *dp;
612 if (get_user(len, optlen))
615 if (len < (int)sizeof(int))
621 case DCCP_SOCKOPT_PACKET_SIZE:
622 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
624 case DCCP_SOCKOPT_SERVICE:
625 return dccp_getsockopt_service(sk, len,
626 (__be32 __user *)optval, optlen);
627 case DCCP_SOCKOPT_GET_CUR_MPS:
628 val = dp->dccps_mss_cache;
630 case DCCP_SOCKOPT_AVAILABLE_CCIDS:
631 return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen);
632 case DCCP_SOCKOPT_TX_CCID:
633 val = ccid_get_current_tx_ccid(dp);
637 case DCCP_SOCKOPT_RX_CCID:
638 val = ccid_get_current_rx_ccid(dp);
642 case DCCP_SOCKOPT_SERVER_TIMEWAIT:
643 val = dp->dccps_server_timewait;
645 case DCCP_SOCKOPT_SEND_CSCOV:
646 val = dp->dccps_pcslen;
648 case DCCP_SOCKOPT_RECV_CSCOV:
649 val = dp->dccps_pcrlen;
652 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
653 len, (u32 __user *)optval, optlen);
655 return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
656 len, (u32 __user *)optval, optlen);
662 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
668 int dccp_getsockopt(struct sock *sk, int level, int optname,
669 char __user *optval, int __user *optlen)
671 if (level != SOL_DCCP)
672 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
675 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
678 EXPORT_SYMBOL_GPL(dccp_getsockopt);
681 int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
682 char __user *optval, int __user *optlen)
684 if (level != SOL_DCCP)
685 return inet_csk_compat_getsockopt(sk, level, optname,
687 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
690 EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
693 int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
696 const struct dccp_sock *dp = dccp_sk(sk);
697 const int flags = msg->msg_flags;
698 const int noblock = flags & MSG_DONTWAIT;
703 if (len > dp->dccps_mss_cache)
708 if (sysctl_dccp_tx_qlen &&
709 (sk->sk_write_queue.qlen >= sysctl_dccp_tx_qlen)) {
714 timeo = sock_sndtimeo(sk, noblock);
717 * We have to use sk_stream_wait_connect here to set sk_write_pending,
718 * so that the trick in dccp_rcv_request_sent_state_process.
720 /* Wait for a connection to finish. */
721 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
722 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
725 size = sk->sk_prot->max_header + len;
727 skb = sock_alloc_send_skb(sk, size, noblock, &rc);
732 skb_reserve(skb, sk->sk_prot->max_header);
733 rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
737 skb_queue_tail(&sk->sk_write_queue, skb);
738 dccp_write_xmit(sk,0);
747 EXPORT_SYMBOL_GPL(dccp_sendmsg);
749 int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
750 size_t len, int nonblock, int flags, int *addr_len)
752 const struct dccp_hdr *dh;
757 if (sk->sk_state == DCCP_LISTEN) {
762 timeo = sock_rcvtimeo(sk, nonblock);
765 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
768 goto verify_sock_status;
772 switch (dh->dccph_type) {
774 case DCCP_PKT_DATAACK:
778 case DCCP_PKT_CLOSEREQ:
779 if (!(flags & MSG_PEEK))
780 dccp_finish_passive_close(sk);
783 dccp_pr_debug("found fin (%s) ok!\n",
784 dccp_packet_name(dh->dccph_type));
788 dccp_pr_debug("packet_type=%s\n",
789 dccp_packet_name(dh->dccph_type));
790 sk_eat_skb(sk, skb, 0);
793 if (sock_flag(sk, SOCK_DONE)) {
799 len = sock_error(sk);
803 if (sk->sk_shutdown & RCV_SHUTDOWN) {
808 if (sk->sk_state == DCCP_CLOSED) {
809 if (!sock_flag(sk, SOCK_DONE)) {
810 /* This occurs when user tries to read
811 * from never connected socket.
825 if (signal_pending(current)) {
826 len = sock_intr_errno(timeo);
830 sk_wait_data(sk, &timeo);
835 else if (len < skb->len)
836 msg->msg_flags |= MSG_TRUNC;
838 if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) {
839 /* Exception. Bailout! */
844 if (!(flags & MSG_PEEK))
845 sk_eat_skb(sk, skb, 0);
853 EXPORT_SYMBOL_GPL(dccp_recvmsg);
855 int inet_dccp_listen(struct socket *sock, int backlog)
857 struct sock *sk = sock->sk;
858 unsigned char old_state;
864 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
867 old_state = sk->sk_state;
868 if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
871 /* Really, if the socket is already in listen state
872 * we can only allow the backlog to be adjusted.
874 if (old_state != DCCP_LISTEN) {
876 * FIXME: here it probably should be sk->sk_prot->listen_start
877 * see tcp_listen_start
879 err = dccp_listen_start(sk, backlog);
883 sk->sk_max_ack_backlog = backlog;
891 EXPORT_SYMBOL_GPL(inet_dccp_listen);
893 static void dccp_terminate_connection(struct sock *sk)
895 u8 next_state = DCCP_CLOSED;
897 switch (sk->sk_state) {
898 case DCCP_PASSIVE_CLOSE:
899 case DCCP_PASSIVE_CLOSEREQ:
900 dccp_finish_passive_close(sk);
903 dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk);
904 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
907 dccp_send_close(sk, 1);
909 if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER &&
910 !dccp_sk(sk)->dccps_server_timewait)
911 next_state = DCCP_ACTIVE_CLOSEREQ;
913 next_state = DCCP_CLOSING;
916 dccp_set_state(sk, next_state);
920 void dccp_close(struct sock *sk, long timeout)
922 struct dccp_sock *dp = dccp_sk(sk);
924 u32 data_was_unread = 0;
929 sk->sk_shutdown = SHUTDOWN_MASK;
931 if (sk->sk_state == DCCP_LISTEN) {
932 dccp_set_state(sk, DCCP_CLOSED);
935 inet_csk_listen_stop(sk);
937 goto adjudge_to_death;
940 sk_stop_timer(sk, &dp->dccps_xmit_timer);
943 * We need to flush the recv. buffs. We do this only on the
944 * descriptor close, not protocol-sourced closes, because the
945 *reader process may not have drained the data yet!
947 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
948 data_was_unread += skb->len;
952 if (data_was_unread) {
953 /* Unread data was tossed, send an appropriate Reset Code */
954 DCCP_WARN("DCCP: ABORT -- %u bytes unread\n", data_was_unread);
955 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
956 dccp_set_state(sk, DCCP_CLOSED);
957 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
958 /* Check zero linger _after_ checking for unread data. */
959 sk->sk_prot->disconnect(sk, 0);
960 } else if (sk->sk_state != DCCP_CLOSED) {
961 dccp_terminate_connection(sk);
964 sk_stream_wait_close(sk, timeout);
967 state = sk->sk_state;
970 atomic_inc(sk->sk_prot->orphan_count);
973 * It is the last release_sock in its life. It will remove backlog.
977 * Now socket is owned by kernel and we acquire BH lock
978 * to finish close. No need to check for user refs.
982 WARN_ON(sock_owned_by_user(sk));
984 /* Have we already been destroyed by a softirq or backlog? */
985 if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
988 if (sk->sk_state == DCCP_CLOSED)
989 inet_csk_destroy_sock(sk);
991 /* Otherwise, socket is reprieved until protocol close. */
999 EXPORT_SYMBOL_GPL(dccp_close);
1001 void dccp_shutdown(struct sock *sk, int how)
1003 dccp_pr_debug("called shutdown(%x)\n", how);
1006 EXPORT_SYMBOL_GPL(dccp_shutdown);
1008 static inline int dccp_mib_init(void)
1010 return snmp_mib_init((void**)dccp_statistics, sizeof(struct dccp_mib));
1013 static inline void dccp_mib_exit(void)
1015 snmp_mib_free((void**)dccp_statistics);
1018 static int thash_entries;
1019 module_param(thash_entries, int, 0444);
1020 MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
1022 #ifdef CONFIG_IP_DCCP_DEBUG
1024 module_param(dccp_debug, bool, 0644);
1025 MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
1027 EXPORT_SYMBOL_GPL(dccp_debug);
1030 static int __init dccp_init(void)
1033 int ehash_order, bhash_order, i;
1036 BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
1037 FIELD_SIZEOF(struct sk_buff, cb));
1039 dccp_hashinfo.bind_bucket_cachep =
1040 kmem_cache_create("dccp_bind_bucket",
1041 sizeof(struct inet_bind_bucket), 0,
1042 SLAB_HWCACHE_ALIGN, NULL);
1043 if (!dccp_hashinfo.bind_bucket_cachep)
1047 * Size and allocate the main established and bind bucket
1050 * The methodology is similar to that of the buffer cache.
1052 if (num_physpages >= (128 * 1024))
1053 goal = num_physpages >> (21 - PAGE_SHIFT);
1055 goal = num_physpages >> (23 - PAGE_SHIFT);
1058 goal = (thash_entries *
1059 sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
1060 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
1063 dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE /
1064 sizeof(struct inet_ehash_bucket);
1065 while (dccp_hashinfo.ehash_size &
1066 (dccp_hashinfo.ehash_size - 1))
1067 dccp_hashinfo.ehash_size--;
1068 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
1069 __get_free_pages(GFP_ATOMIC, ehash_order);
1070 } while (!dccp_hashinfo.ehash && --ehash_order > 0);
1072 if (!dccp_hashinfo.ehash) {
1073 DCCP_CRIT("Failed to allocate DCCP established hash table");
1074 goto out_free_bind_bucket_cachep;
1077 for (i = 0; i < dccp_hashinfo.ehash_size; i++) {
1078 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain);
1079 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].twchain);
1082 if (inet_ehash_locks_alloc(&dccp_hashinfo))
1083 goto out_free_dccp_ehash;
1085 bhash_order = ehash_order;
1088 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
1089 sizeof(struct inet_bind_hashbucket);
1090 if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
1093 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
1094 __get_free_pages(GFP_ATOMIC, bhash_order);
1095 } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
1097 if (!dccp_hashinfo.bhash) {
1098 DCCP_CRIT("Failed to allocate DCCP bind hash table");
1099 goto out_free_dccp_locks;
1102 for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
1103 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
1104 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
1107 rc = dccp_mib_init();
1109 goto out_free_dccp_bhash;
1111 rc = dccp_ackvec_init();
1113 goto out_free_dccp_mib;
1115 rc = dccp_sysctl_init();
1117 goto out_ackvec_exit;
1119 dccp_timestamping_init();
1126 out_free_dccp_bhash:
1127 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1128 dccp_hashinfo.bhash = NULL;
1129 out_free_dccp_locks:
1130 inet_ehash_locks_free(&dccp_hashinfo);
1131 out_free_dccp_ehash:
1132 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
1133 dccp_hashinfo.ehash = NULL;
1134 out_free_bind_bucket_cachep:
1135 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1136 dccp_hashinfo.bind_bucket_cachep = NULL;
1140 static void __exit dccp_fini(void)
1143 free_pages((unsigned long)dccp_hashinfo.bhash,
1144 get_order(dccp_hashinfo.bhash_size *
1145 sizeof(struct inet_bind_hashbucket)));
1146 free_pages((unsigned long)dccp_hashinfo.ehash,
1147 get_order(dccp_hashinfo.ehash_size *
1148 sizeof(struct inet_ehash_bucket)));
1149 inet_ehash_locks_free(&dccp_hashinfo);
1150 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1155 module_init(dccp_init);
1156 module_exit(dccp_fini);
1158 MODULE_LICENSE("GPL");
1159 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1160 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");