4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/dccp.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
16 #include <linux/timer.h>
20 #include <net/inet_timewait_sock.h>
27 struct inet_timewait_death_row dccp_death_row = {
28 .sysctl_max_tw_buckets = NR_FILE * 2,
29 .period = DCCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
30 .death_lock = __SPIN_LOCK_UNLOCKED(dccp_death_row.death_lock),
31 .hashinfo = &dccp_hashinfo,
32 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
33 (unsigned long)&dccp_death_row),
34 .twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work,
35 inet_twdr_twkill_work),
36 /* Short-time timewait calendar */
39 .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
40 (unsigned long)&dccp_death_row),
43 EXPORT_SYMBOL_GPL(dccp_death_row);
45 void dccp_time_wait(struct sock *sk, int state, int timeo)
47 struct inet_timewait_sock *tw = NULL;
49 if (dccp_death_row.tw_count < dccp_death_row.sysctl_max_tw_buckets)
50 tw = inet_twsk_alloc(sk, state);
53 const struct inet_connection_sock *icsk = inet_csk(sk);
54 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
55 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
56 if (tw->tw_family == PF_INET6) {
57 const struct ipv6_pinfo *np = inet6_sk(sk);
58 struct inet6_timewait_sock *tw6;
60 tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
61 tw6 = inet6_twsk((struct sock *)tw);
62 ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
63 ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
64 tw->tw_ipv6only = np->ipv6only;
67 /* Linkage updates. */
68 __inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
70 /* Get the TIME_WAIT timeout firing. */
74 tw->tw_timeout = DCCP_TIMEWAIT_LEN;
75 if (state == DCCP_TIME_WAIT)
76 timeo = DCCP_TIMEWAIT_LEN;
78 inet_twsk_schedule(tw, &dccp_death_row, timeo,
82 /* Sorry, if we're out of memory, just CLOSE this
83 * socket up. We've got bigger problems than
84 * non-graceful socket closings.
86 DCCP_WARN("time wait bucket table overflow\n");
92 struct sock *dccp_create_openreq_child(struct sock *sk,
93 const struct request_sock *req,
94 const struct sk_buff *skb)
97 * Step 3: Process LISTEN state
99 * (* Generate a new socket and switch to that socket *)
100 * Set S := new socket for this port pair
102 struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
105 const struct dccp_request_sock *dreq = dccp_rsk(req);
106 struct inet_connection_sock *newicsk = inet_csk(newsk);
107 struct dccp_sock *newdp = dccp_sk(newsk);
108 struct dccp_minisock *newdmsk = dccp_msk(newsk);
110 newdp->dccps_role = DCCP_ROLE_SERVER;
111 newdp->dccps_hc_rx_ackvec = NULL;
112 newdp->dccps_service_list = NULL;
113 newdp->dccps_service = dreq->dreq_service;
114 newicsk->icsk_rto = DCCP_TIMEOUT_INIT;
116 if (dccp_feat_clone(sk, newsk))
119 if (newdmsk->dccpms_send_ack_vector) {
120 newdp->dccps_hc_rx_ackvec =
121 dccp_ackvec_alloc(GFP_ATOMIC);
122 if (unlikely(newdp->dccps_hc_rx_ackvec == NULL))
126 newdp->dccps_hc_rx_ccid =
127 ccid_hc_rx_new(newdmsk->dccpms_rx_ccid,
129 newdp->dccps_hc_tx_ccid =
130 ccid_hc_tx_new(newdmsk->dccpms_tx_ccid,
132 if (unlikely(newdp->dccps_hc_rx_ccid == NULL ||
133 newdp->dccps_hc_tx_ccid == NULL)) {
134 dccp_ackvec_free(newdp->dccps_hc_rx_ackvec);
135 ccid_hc_rx_delete(newdp->dccps_hc_rx_ccid, newsk);
136 ccid_hc_tx_delete(newdp->dccps_hc_tx_ccid, newsk);
138 /* It is still raw copy of parent, so invalidate
139 * destructor and make plain sk_free() */
140 newsk->sk_destruct = NULL;
146 * Step 3: Process LISTEN state
148 * Choose S.ISS (initial seqno) or set from Init Cookies
149 * Initialize S.GAR := S.ISS
150 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
153 /* See dccp_v4_conn_request */
154 newdmsk->dccpms_sequence_window = req->rcv_wnd;
156 newdp->dccps_gar = newdp->dccps_isr = dreq->dreq_isr;
157 dccp_update_gsr(newsk, dreq->dreq_isr);
159 newdp->dccps_iss = dreq->dreq_iss;
160 dccp_update_gss(newsk, dreq->dreq_iss);
163 * SWL and AWL are initially adjusted so that they are not less than
164 * the initial Sequence Numbers received and sent, respectively:
165 * SWL := max(GSR + 1 - floor(W/4), ISR),
166 * AWL := max(GSS - W' + 1, ISS).
167 * These adjustments MUST be applied only at the beginning of the
170 dccp_set_seqno(&newdp->dccps_swl,
171 max48(newdp->dccps_swl, newdp->dccps_isr));
172 dccp_set_seqno(&newdp->dccps_awl,
173 max48(newdp->dccps_awl, newdp->dccps_iss));
175 dccp_init_xmit_timers(newsk);
177 DCCP_INC_STATS_BH(DCCP_MIB_PASSIVEOPENS);
182 EXPORT_SYMBOL_GPL(dccp_create_openreq_child);
185 * Process an incoming packet for RESPOND sockets represented
186 * as an request_sock.
188 struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
189 struct request_sock *req,
190 struct request_sock **prev)
192 struct sock *child = NULL;
194 /* Check for retransmitted REQUEST */
195 if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
196 struct dccp_request_sock *dreq = dccp_rsk(req);
198 if (after48(DCCP_SKB_CB(skb)->dccpd_seq, dreq->dreq_isr)) {
199 dccp_pr_debug("Retransmitted REQUEST\n");
200 dreq->dreq_isr = DCCP_SKB_CB(skb)->dccpd_seq;
202 * Send another RESPONSE packet
203 * To protect against Request floods, increment retrans
204 * counter (backoff, monitored by dccp_response_timer).
207 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
209 /* Network Duplicate, discard packet */
213 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
215 if (dccp_hdr(skb)->dccph_type != DCCP_PKT_ACK &&
216 dccp_hdr(skb)->dccph_type != DCCP_PKT_DATAACK)
220 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != dccp_rsk(req)->dreq_iss) {
221 dccp_pr_debug("Invalid ACK number: ack_seq=%llu, "
224 DCCP_SKB_CB(skb)->dccpd_ack_seq,
226 dccp_rsk(req)->dreq_iss);
230 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
232 goto listen_overflow;
234 /* FIXME: deal with options */
236 inet_csk_reqsk_queue_unlink(sk, req, prev);
237 inet_csk_reqsk_queue_removed(sk, req);
238 inet_csk_reqsk_queue_add(sk, req, child);
242 dccp_pr_debug("listen_overflow!\n");
243 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
245 if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
246 req->rsk_ops->send_reset(sk, skb);
248 inet_csk_reqsk_queue_drop(sk, req, prev);
252 EXPORT_SYMBOL_GPL(dccp_check_req);
255 * Queue segment on the new socket if the new socket is active,
256 * otherwise we just shortcircuit this and continue with
259 int dccp_child_process(struct sock *parent, struct sock *child,
263 const int state = child->sk_state;
265 if (!sock_owned_by_user(child)) {
266 ret = dccp_rcv_state_process(child, skb, dccp_hdr(skb),
269 /* Wakeup parent, send SIGIO */
270 if (state == DCCP_RESPOND && child->sk_state != state)
271 parent->sk_data_ready(parent, 0);
273 /* Alas, it is possible again, because we do lookup
274 * in main socket hash table and lock on listening
275 * socket does not protect us more.
277 sk_add_backlog(child, skb);
280 bh_unlock_sock(child);
285 EXPORT_SYMBOL_GPL(dccp_child_process);
287 void dccp_reqsk_send_ack(struct sk_buff *skb, struct request_sock *rsk)
289 DCCP_BUG("DCCP-ACK packets are never sent in LISTEN/RESPOND state");
292 EXPORT_SYMBOL_GPL(dccp_reqsk_send_ack);
294 void dccp_reqsk_init(struct request_sock *req, struct sk_buff *skb)
296 inet_rsk(req)->rmt_port = dccp_hdr(skb)->dccph_sport;
297 inet_rsk(req)->acked = 0;
298 req->rcv_wnd = sysctl_dccp_feat_sequence_window;
301 EXPORT_SYMBOL_GPL(dccp_reqsk_init);