4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/config.h>
14 #include <linux/dccp.h>
15 #include <linux/skbuff.h>
22 static void dccp_fin(struct sock *sk, struct sk_buff *skb)
24 sk->sk_shutdown |= RCV_SHUTDOWN;
25 sock_set_flag(sk, SOCK_DONE);
26 __skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
27 __skb_queue_tail(&sk->sk_receive_queue, skb);
28 skb_set_owner_r(skb, sk);
29 sk->sk_data_ready(sk, 0);
32 static void dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
34 dccp_v4_send_reset(sk, DCCP_RESET_CODE_CLOSED);
36 dccp_set_state(sk, DCCP_CLOSED);
37 sk_wake_async(sk, 1, POLL_HUP);
40 static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
43 * Step 7: Check for unexpected packet types
44 * If (S.is_server and P.type == CloseReq)
45 * Send Sync packet acknowledging P.seqno
46 * Drop packet and return
48 if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) {
49 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
53 if (sk->sk_state != DCCP_CLOSING)
54 dccp_set_state(sk, DCCP_CLOSING);
55 dccp_send_close(sk, 0);
58 static inline void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb)
60 struct dccp_sock *dp = dccp_sk(sk);
62 if (dp->dccps_options.dccpo_send_ack_vector)
63 dccp_ackpkts_check_rcv_ackno(dp->dccps_hc_rx_ackpkts, sk,
64 DCCP_SKB_CB(skb)->dccpd_ack_seq);
67 static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
69 const struct dccp_hdr *dh = dccp_hdr(skb);
70 struct dccp_sock *dp = dccp_sk(sk);
74 * Step 5: Prepare sequence numbers for Sync
75 * If P.type == Sync or P.type == SyncAck,
76 * If S.AWL <= P.ackno <= S.AWH and P.seqno >= S.SWL,
77 * / * P is valid, so update sequence number variables
78 * accordingly. After this update, P will pass the tests
79 * in Step 6. A SyncAck is generated if necessary in
81 * Update S.GSR, S.SWL, S.SWH
83 * Drop packet and return
85 if (dh->dccph_type == DCCP_PKT_SYNC ||
86 dh->dccph_type == DCCP_PKT_SYNCACK) {
87 if (between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
88 dp->dccps_awl, dp->dccps_awh) &&
89 !before48(DCCP_SKB_CB(skb)->dccpd_seq, dp->dccps_swl))
90 dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq);
96 * Step 6: Check sequence numbers
97 * Let LSWL = S.SWL and LAWL = S.AWL
98 * If P.type == CloseReq or P.type == Close or P.type == Reset,
99 * LSWL := S.GSR + 1, LAWL := S.GAR
100 * If LSWL <= P.seqno <= S.SWH
101 * and (P.ackno does not exist or LAWL <= P.ackno <= S.AWH),
102 * Update S.GSR, S.SWL, S.SWH
106 * Send Sync packet acknowledging P.seqno
107 * Drop packet and return
109 lswl = dp->dccps_swl;
110 lawl = dp->dccps_awl;
112 if (dh->dccph_type == DCCP_PKT_CLOSEREQ ||
113 dh->dccph_type == DCCP_PKT_CLOSE ||
114 dh->dccph_type == DCCP_PKT_RESET) {
115 lswl = dp->dccps_gsr;
116 dccp_inc_seqno(&lswl);
117 lawl = dp->dccps_gar;
120 if (between48(DCCP_SKB_CB(skb)->dccpd_seq, lswl, dp->dccps_swh) &&
121 (DCCP_SKB_CB(skb)->dccpd_ack_seq == DCCP_PKT_WITHOUT_ACK_SEQ ||
122 between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
123 lawl, dp->dccps_awh))) {
124 dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq);
126 if (dh->dccph_type != DCCP_PKT_SYNC &&
127 (DCCP_SKB_CB(skb)->dccpd_ack_seq !=
128 DCCP_PKT_WITHOUT_ACK_SEQ))
129 dp->dccps_gar = DCCP_SKB_CB(skb)->dccpd_ack_seq;
131 LIMIT_NETDEBUG(KERN_WARNING "DCCP: Step 6 failed for %s packet, "
132 "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
133 "(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), "
135 dccp_packet_name(dh->dccph_type),
136 (unsigned long long) lswl,
138 DCCP_SKB_CB(skb)->dccpd_seq,
139 (unsigned long long) dp->dccps_swh,
140 (DCCP_SKB_CB(skb)->dccpd_ack_seq ==
141 DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist" : "exists",
142 (unsigned long long) lawl,
144 DCCP_SKB_CB(skb)->dccpd_ack_seq,
145 (unsigned long long) dp->dccps_awh);
146 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
153 int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
154 const struct dccp_hdr *dh, const unsigned len)
156 struct dccp_sock *dp = dccp_sk(sk);
158 if (dccp_check_seqno(sk, skb))
161 if (dccp_parse_options(sk, skb))
164 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
165 dccp_event_ack_recv(sk, skb);
168 * FIXME: check ECN to see if we should use
169 * DCCP_ACKPKTS_STATE_ECN_MARKED
171 if (dp->dccps_options.dccpo_send_ack_vector) {
172 struct dccp_ackpkts *ap = dp->dccps_hc_rx_ackpkts;
174 if (dccp_ackpkts_add(dp->dccps_hc_rx_ackpkts, sk,
175 DCCP_SKB_CB(skb)->dccpd_seq,
176 DCCP_ACKPKTS_STATE_RECEIVED)) {
177 LIMIT_NETDEBUG(KERN_WARNING "DCCP: acknowledgeable "
178 "packets buffer full!\n");
179 ap->dccpap_ack_seqno = DCCP_MAX_SEQNO + 1;
180 inet_csk_schedule_ack(sk);
181 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
188 * FIXME: this activation is probably wrong, have to study more
189 * TCP delack machinery and how it fits into DCCP draft, but
190 * for now it kinda "works" 8)
192 if (!inet_csk_ack_scheduled(sk)) {
193 inet_csk_schedule_ack(sk);
194 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5 * HZ,
199 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
200 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
202 switch (dccp_hdr(skb)->dccph_type) {
203 case DCCP_PKT_DATAACK:
206 * FIXME: check if sk_receive_queue is full, schedule DATA_DROPPED
209 __skb_pull(skb, dh->dccph_doff * 4);
210 __skb_queue_tail(&sk->sk_receive_queue, skb);
211 skb_set_owner_r(skb, sk);
212 sk->sk_data_ready(sk, 0);
218 * Step 9: Process Reset
219 * If P.type == Reset,
220 * Tear down connection
221 * S.state := TIMEWAIT
223 * Drop packet and return
226 dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
228 case DCCP_PKT_CLOSEREQ:
229 dccp_rcv_closereq(sk, skb);
232 dccp_rcv_close(sk, skb);
234 case DCCP_PKT_REQUEST:
236 * or (S.is_server and P.type == Response)
237 * or (S.is_client and P.type == Request)
238 * or (S.state >= OPEN and P.type == Request
239 * and P.seqno >= S.OSR)
240 * or (S.state >= OPEN and P.type == Response
241 * and P.seqno >= S.OSR)
242 * or (S.state == RESPOND and P.type == Data),
243 * Send Sync packet acknowledging P.seqno
244 * Drop packet and return
246 if (dp->dccps_role != DCCP_ROLE_LISTEN)
249 case DCCP_PKT_RESPONSE:
250 if (dp->dccps_role != DCCP_ROLE_CLIENT)
253 if (!before48(DCCP_SKB_CB(skb)->dccpd_seq, dp->dccps_osr)) {
255 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
260 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
265 * As with DCCP-Ack packets, DCCP-Sync and DCCP-SyncAck packets
266 * MAY have non-zero-length application data areas, whose
267 * contents * receivers MUST ignore.
272 DCCP_INC_STATS_BH(DCCP_MIB_INERRS);
278 static int dccp_rcv_request_sent_state_process(struct sock *sk,
280 const struct dccp_hdr *dh,
284 * Step 4: Prepare sequence numbers in REQUEST
285 * If S.state == REQUEST,
286 * If (P.type == Response or P.type == Reset)
287 * and S.AWL <= P.ackno <= S.AWH,
288 * / * Set sequence number variables corresponding to the
289 * other endpoint, so P will pass the tests in Step 6 * /
290 * Set S.GSR, S.ISR, S.SWL, S.SWH
291 * / * Response processing continues in Step 10; Reset
292 * processing continues in Step 9 * /
294 if (dh->dccph_type == DCCP_PKT_RESPONSE) {
295 const struct inet_connection_sock *icsk = inet_csk(sk);
296 struct dccp_sock *dp = dccp_sk(sk);
298 /* Stop the REQUEST timer */
299 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
300 BUG_TRAP(sk->sk_send_head != NULL);
301 __kfree_skb(sk->sk_send_head);
302 sk->sk_send_head = NULL;
304 if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
305 dp->dccps_awl, dp->dccps_awh)) {
306 dccp_pr_debug("invalid ackno: S.AWL=%llu, "
307 "P.ackno=%llu, S.AWH=%llu \n",
308 (unsigned long long)dp->dccps_awl,
309 (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
310 (unsigned long long)dp->dccps_awh);
311 goto out_invalid_packet;
314 dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq;
315 dccp_update_gsr(sk, dp->dccps_isr);
317 * SWL and AWL are initially adjusted so that they are not less than
318 * the initial Sequence Numbers received and sent, respectively:
319 * SWL := max(GSR + 1 - floor(W/4), ISR),
320 * AWL := max(GSS - W' + 1, ISS).
321 * These adjustments MUST be applied only at the beginning of the
324 * AWL was adjusted in dccp_v4_connect -acme
326 dccp_set_seqno(&dp->dccps_swl,
327 max48(dp->dccps_swl, dp->dccps_isr));
329 if (ccid_hc_rx_init(dp->dccps_hc_rx_ccid, sk) != 0 ||
330 ccid_hc_tx_init(dp->dccps_hc_tx_ccid, sk) != 0) {
331 ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
332 ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);
333 /* FIXME: send appropriate RESET code */
334 goto out_invalid_packet;
337 dccp_sync_mss(sk, dp->dccps_pmtu_cookie);
340 * Step 10: Process REQUEST state (second part)
341 * If S.state == REQUEST,
342 * / * If we get here, P is a valid Response from the
343 * server (see Step 4), and we should move to
344 * PARTOPEN state. PARTOPEN means send an Ack,
345 * don't send Data packets, retransmit Acks
346 * periodically, and always include any Init Cookie
347 * from the Response * /
348 * S.state := PARTOPEN
350 * Continue with S.state == PARTOPEN
351 * / * Step 12 will send the Ack completing the
352 * three-way handshake * /
354 dccp_set_state(sk, DCCP_PARTOPEN);
356 /* Make sure socket is routed, for correct metrics. */
357 inet_sk_rebuild_header(sk);
359 if (!sock_flag(sk, SOCK_DEAD)) {
360 sk->sk_state_change(sk);
361 sk_wake_async(sk, 0, POLL_OUT);
364 if (sk->sk_write_pending || icsk->icsk_ack.pingpong ||
365 icsk->icsk_accept_queue.rskq_defer_accept) {
366 /* Save one ACK. Data will be ready after
367 * several ticks, if write_pending is set.
369 * It may be deleted, but with this feature tcpdumps
370 * look so _wonderfully_ clever, that I was not able
371 * to stand against the temptation 8) --ANK
374 * OK, in DCCP we can as well do a similar trick, its
375 * even in the draft, but there is no need for us to
376 * schedule an ack here, as dccp_sendmsg does this for
377 * us, also stated in the draft. -acme
387 return 1; /* dccp_v4_do_rcv will send a reset, but...
388 FIXME: the reset code should be
389 DCCP_RESET_CODE_PACKET_ERROR */
392 static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
394 const struct dccp_hdr *dh,
399 switch (dh->dccph_type) {
401 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
403 case DCCP_PKT_DATAACK:
406 * FIXME: we should be reseting the PARTOPEN (DELACK) timer
407 * here but only if we haven't used the DELACK timer for
408 * something else, like sending a delayed ack for a TIMESTAMP
409 * echo, etc, for now were not clearing it, sending an extra
410 * ACK when there is nothing else to do in DELACK is not a big
414 /* Stop the PARTOPEN timer */
415 if (sk->sk_state == DCCP_PARTOPEN)
416 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
418 dccp_sk(sk)->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
419 dccp_set_state(sk, DCCP_OPEN);
421 if (dh->dccph_type == DCCP_PKT_DATAACK) {
422 dccp_rcv_established(sk, skb, dh, len);
423 queued = 1; /* packet was queued
424 (by dccp_rcv_established) */
432 int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
433 struct dccp_hdr *dh, unsigned len)
435 struct dccp_sock *dp = dccp_sk(sk);
436 const int old_state = sk->sk_state;
440 * Step 3: Process LISTEN state
441 * (Continuing from dccp_v4_do_rcv and dccp_v6_do_rcv)
443 * If S.state == LISTEN,
444 * If P.type == Request or P contains a valid Init Cookie
446 * * Must scan the packet's options to check for an Init
447 * Cookie. Only the Init Cookie is processed here,
448 * however; other options are processed in Step 8. This
449 * scan need only be performed if the endpoint uses Init
451 * * Generate a new socket and switch to that socket *
452 * Set S := new socket for this port pair
454 * Choose S.ISS (initial seqno) or set from Init Cookie
455 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
456 * Continue with S.state == RESPOND
457 * * A Response packet will be generated in Step 11 *
459 * Generate Reset(No Connection) unless P.type == Reset
460 * Drop packet and return
462 * NOTE: the check for the packet types is done in
463 * dccp_rcv_state_process
465 if (sk->sk_state == DCCP_LISTEN) {
466 if (dh->dccph_type == DCCP_PKT_REQUEST) {
467 if (dccp_v4_conn_request(sk, skb) < 0)
470 /* FIXME: do congestion control initialization */
473 if (dh->dccph_type == DCCP_PKT_RESET)
476 /* Caller (dccp_v4_do_rcv) will send Reset(No Connection)*/
480 if (sk->sk_state != DCCP_REQUESTING) {
481 if (dccp_check_seqno(sk, skb))
485 * Step 8: Process options and mark acknowledgeable
487 if (dccp_parse_options(sk, skb))
490 if (DCCP_SKB_CB(skb)->dccpd_ack_seq !=
491 DCCP_PKT_WITHOUT_ACK_SEQ)
492 dccp_event_ack_recv(sk, skb);
494 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
495 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
498 * FIXME: check ECN to see if we should use
499 * DCCP_ACKPKTS_STATE_ECN_MARKED
501 if (dp->dccps_options.dccpo_send_ack_vector) {
502 if (dccp_ackpkts_add(dp->dccps_hc_rx_ackpkts, sk,
503 DCCP_SKB_CB(skb)->dccpd_seq,
504 DCCP_ACKPKTS_STATE_RECEIVED))
507 * FIXME: this activation is probably wrong, have to
508 * study more TCP delack machinery and how it fits into
509 * DCCP draft, but for now it kinda "works" 8)
511 if ((dp->dccps_hc_rx_ackpkts->dccpap_ack_seqno ==
512 DCCP_MAX_SEQNO + 1) &&
513 !inet_csk_ack_scheduled(sk)) {
514 inet_csk_schedule_ack(sk);
515 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
523 * Step 9: Process Reset
524 * If P.type == Reset,
525 * Tear down connection
526 * S.state := TIMEWAIT
528 * Drop packet and return
530 if (dh->dccph_type == DCCP_PKT_RESET) {
532 * Queue the equivalent of TCP fin so that dccp_recvmsg
536 dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
539 * Step 7: Check for unexpected packet types
540 * If (S.is_server and P.type == CloseReq)
541 * or (S.is_server and P.type == Response)
542 * or (S.is_client and P.type == Request)
543 * or (S.state == RESPOND and P.type == Data),
544 * Send Sync packet acknowledging P.seqno
545 * Drop packet and return
547 } else if ((dp->dccps_role != DCCP_ROLE_CLIENT &&
548 (dh->dccph_type == DCCP_PKT_RESPONSE ||
549 dh->dccph_type == DCCP_PKT_CLOSEREQ)) ||
550 (dp->dccps_role == DCCP_ROLE_CLIENT &&
551 dh->dccph_type == DCCP_PKT_REQUEST) ||
552 (sk->sk_state == DCCP_RESPOND &&
553 dh->dccph_type == DCCP_PKT_DATA)) {
554 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
557 } else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) {
558 dccp_rcv_closereq(sk, skb);
560 } else if (dh->dccph_type == DCCP_PKT_CLOSE) {
561 dccp_rcv_close(sk, skb);
565 if (unlikely(dh->dccph_type == DCCP_PKT_SYNC)) {
566 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
571 switch (sk->sk_state) {
575 case DCCP_REQUESTING:
576 /* FIXME: do congestion control initialization */
578 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
587 queued = dccp_rcv_respond_partopen_state_process(sk, skb,
592 if (dh->dccph_type == DCCP_PKT_ACK ||
593 dh->dccph_type == DCCP_PKT_DATAACK) {
596 sk->sk_state_change(sk);
597 sk_wake_async(sk, 0, POLL_OUT);