input.c revision ac75773c2742d82cbcb078708df406e9017224b7
1/*
2 *  net/dccp/input.c
3 *
4 *  An implementation of the DCCP protocol
5 *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 *	This program is free software; you can redistribute it and/or
8 *	modify it under the terms of the GNU General Public License
9 *	as published by the Free Software Foundation; either version
10 *	2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/dccp.h>
14#include <linux/skbuff.h>
15
16#include <net/sock.h>
17
18#include "ackvec.h"
19#include "ccid.h"
20#include "dccp.h"
21
22/* rate-limit for syncs in reply to sequence-invalid packets; RFC 4340, 7.5.4 */
23int sysctl_dccp_sync_ratelimit	__read_mostly = HZ / 8;
24
25static void dccp_enqueue_skb(struct sock *sk, struct sk_buff *skb)
26{
27	__skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
28	__skb_queue_tail(&sk->sk_receive_queue, skb);
29	skb_set_owner_r(skb, sk);
30	sk->sk_data_ready(sk, 0);
31}
32
33static void dccp_fin(struct sock *sk, struct sk_buff *skb)
34{
35	/*
36	 * On receiving Close/CloseReq, both RD/WR shutdown are performed.
37	 * RFC 4340, 8.3 says that we MAY send further Data/DataAcks after
38	 * receiving the closing segment, but there is no guarantee that such
39	 * data will be processed at all.
40	 */
41	sk->sk_shutdown = SHUTDOWN_MASK;
42	sock_set_flag(sk, SOCK_DONE);
43	dccp_enqueue_skb(sk, skb);
44}
45
46static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
47{
48	int queued = 0;
49
50	switch (sk->sk_state) {
51	/*
52	 * We ignore Close when received in one of the following states:
53	 *  - CLOSED		(may be a late or duplicate packet)
54	 *  - PASSIVE_CLOSEREQ	(the peer has sent a CloseReq earlier)
55	 *  - RESPOND		(already handled by dccp_check_req)
56	 */
57	case DCCP_CLOSING:
58		/*
59		 * Simultaneous-close: receiving a Close after sending one. This
60		 * can happen if both client and server perform active-close and
61		 * will result in an endless ping-pong of crossing and retrans-
62		 * mitted Close packets, which only terminates when one of the
63		 * nodes times out (min. 64 seconds). Quicker convergence can be
64		 * achieved when one of the nodes acts as tie-breaker.
65		 * This is ok as both ends are done with data transfer and each
66		 * end is just waiting for the other to acknowledge termination.
67		 */
68		if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT)
69			break;
70		/* fall through */
71	case DCCP_REQUESTING:
72	case DCCP_ACTIVE_CLOSEREQ:
73		dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
74		dccp_done(sk);
75		break;
76	case DCCP_OPEN:
77	case DCCP_PARTOPEN:
78		/* Give waiting application a chance to read pending data */
79		queued = 1;
80		dccp_fin(sk, skb);
81		dccp_set_state(sk, DCCP_PASSIVE_CLOSE);
82		/* fall through */
83	case DCCP_PASSIVE_CLOSE:
84		/*
85		 * Retransmitted Close: we have already enqueued the first one.
86		 */
87		sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
88	}
89	return queued;
90}
91
92static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
93{
94	int queued = 0;
95
96	/*
97	 *   Step 7: Check for unexpected packet types
98	 *      If (S.is_server and P.type == CloseReq)
99	 *	  Send Sync packet acknowledging P.seqno
100	 *	  Drop packet and return
101	 */
102	if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) {
103		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
104		return queued;
105	}
106
107	/* Step 13: process relevant Client states < CLOSEREQ */
108	switch (sk->sk_state) {
109	case DCCP_REQUESTING:
110		dccp_send_close(sk, 0);
111		dccp_set_state(sk, DCCP_CLOSING);
112		break;
113	case DCCP_OPEN:
114	case DCCP_PARTOPEN:
115		/* Give waiting application a chance to read pending data */
116		queued = 1;
117		dccp_fin(sk, skb);
118		dccp_set_state(sk, DCCP_PASSIVE_CLOSEREQ);
119		/* fall through */
120	case DCCP_PASSIVE_CLOSEREQ:
121		sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
122	}
123	return queued;
124}
125
126static u8 dccp_reset_code_convert(const u8 code)
127{
128	const u8 error_code[] = {
129	[DCCP_RESET_CODE_CLOSED]	     = 0,	/* normal termination */
130	[DCCP_RESET_CODE_UNSPECIFIED]	     = 0,	/* nothing known */
131	[DCCP_RESET_CODE_ABORTED]	     = ECONNRESET,
132
133	[DCCP_RESET_CODE_NO_CONNECTION]	     = ECONNREFUSED,
134	[DCCP_RESET_CODE_CONNECTION_REFUSED] = ECONNREFUSED,
135	[DCCP_RESET_CODE_TOO_BUSY]	     = EUSERS,
136	[DCCP_RESET_CODE_AGGRESSION_PENALTY] = EDQUOT,
137
138	[DCCP_RESET_CODE_PACKET_ERROR]	     = ENOMSG,
139	[DCCP_RESET_CODE_BAD_INIT_COOKIE]    = EBADR,
140	[DCCP_RESET_CODE_BAD_SERVICE_CODE]   = EBADRQC,
141	[DCCP_RESET_CODE_OPTION_ERROR]	     = EILSEQ,
142	[DCCP_RESET_CODE_MANDATORY_ERROR]    = EOPNOTSUPP,
143	};
144
145	return code >= DCCP_MAX_RESET_CODES ? 0 : error_code[code];
146}
147
148static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb)
149{
150	u8 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code);
151
152	sk->sk_err = err;
153
154	/* Queue the equivalent of TCP fin so that dccp_recvmsg exits the loop */
155	dccp_fin(sk, skb);
156
157	if (err && !sock_flag(sk, SOCK_DEAD))
158		sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
159	dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
160}
161
162static void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb)
163{
164	struct dccp_sock *dp = dccp_sk(sk);
165
166	if (dccp_msk(sk)->dccpms_send_ack_vector)
167		dccp_ackvec_check_rcv_ackno(dp->dccps_hc_rx_ackvec, sk,
168					    DCCP_SKB_CB(skb)->dccpd_ack_seq);
169}
170
171static void dccp_deliver_input_to_ccids(struct sock *sk, struct sk_buff *skb)
172{
173	const struct dccp_sock *dp = dccp_sk(sk);
174
175	/* Don't deliver to RX CCID when node has shut down read end. */
176	if (!(sk->sk_shutdown & RCV_SHUTDOWN))
177		ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
178	/*
179	 * Until the TX queue has been drained, we can not honour SHUT_WR, since
180	 * we need received feedback as input to adjust congestion control.
181	 */
182	if (sk->sk_write_queue.qlen > 0 || !(sk->sk_shutdown & SEND_SHUTDOWN))
183		ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
184}
185
186static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
187{
188	const struct dccp_hdr *dh = dccp_hdr(skb);
189	struct dccp_sock *dp = dccp_sk(sk);
190	u64 lswl, lawl, seqno = DCCP_SKB_CB(skb)->dccpd_seq,
191			ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
192
193	/*
194	 *   Step 5: Prepare sequence numbers for Sync
195	 *     If P.type == Sync or P.type == SyncAck,
196	 *	  If S.AWL <= P.ackno <= S.AWH and P.seqno >= S.SWL,
197	 *	     / * P is valid, so update sequence number variables
198	 *		 accordingly.  After this update, P will pass the tests
199	 *		 in Step 6.  A SyncAck is generated if necessary in
200	 *		 Step 15 * /
201	 *	     Update S.GSR, S.SWL, S.SWH
202	 *	  Otherwise,
203	 *	     Drop packet and return
204	 */
205	if (dh->dccph_type == DCCP_PKT_SYNC ||
206	    dh->dccph_type == DCCP_PKT_SYNCACK) {
207		if (between48(ackno, dp->dccps_awl, dp->dccps_awh) &&
208		    dccp_delta_seqno(dp->dccps_swl, seqno) >= 0)
209			dccp_update_gsr(sk, seqno);
210		else
211			return -1;
212	}
213
214	/*
215	 *   Step 6: Check sequence numbers
216	 *      Let LSWL = S.SWL and LAWL = S.AWL
217	 *      If P.type == CloseReq or P.type == Close or P.type == Reset,
218	 *	  LSWL := S.GSR + 1, LAWL := S.GAR
219	 *      If LSWL <= P.seqno <= S.SWH
220	 *	     and (P.ackno does not exist or LAWL <= P.ackno <= S.AWH),
221	 *	  Update S.GSR, S.SWL, S.SWH
222	 *	  If P.type != Sync,
223	 *	     Update S.GAR
224	 */
225	lswl = dp->dccps_swl;
226	lawl = dp->dccps_awl;
227
228	if (dh->dccph_type == DCCP_PKT_CLOSEREQ ||
229	    dh->dccph_type == DCCP_PKT_CLOSE ||
230	    dh->dccph_type == DCCP_PKT_RESET) {
231		lswl = ADD48(dp->dccps_gsr, 1);
232		lawl = dp->dccps_gar;
233	}
234
235	if (between48(seqno, lswl, dp->dccps_swh) &&
236	    (ackno == DCCP_PKT_WITHOUT_ACK_SEQ ||
237	     between48(ackno, lawl, dp->dccps_awh))) {
238		dccp_update_gsr(sk, seqno);
239
240		if (dh->dccph_type != DCCP_PKT_SYNC &&
241		    (ackno != DCCP_PKT_WITHOUT_ACK_SEQ))
242			dp->dccps_gar = ackno;
243	} else {
244		unsigned long now = jiffies;
245		/*
246		 *   Step 6: Check sequence numbers
247		 *      Otherwise,
248		 *         If P.type == Reset,
249		 *            Send Sync packet acknowledging S.GSR
250		 *         Otherwise,
251		 *            Send Sync packet acknowledging P.seqno
252		 *      Drop packet and return
253		 *
254		 *   These Syncs are rate-limited as per RFC 4340, 7.5.4:
255		 *   at most 1 / (dccp_sync_rate_limit * HZ) Syncs per second.
256		 */
257		if (time_before(now, (dp->dccps_rate_last +
258				      sysctl_dccp_sync_ratelimit)))
259			return 0;
260
261		DCCP_WARN("DCCP: Step 6 failed for %s packet, "
262			  "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
263			  "(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), "
264			  "sending SYNC...\n",  dccp_packet_name(dh->dccph_type),
265			  (unsigned long long) lswl, (unsigned long long) seqno,
266			  (unsigned long long) dp->dccps_swh,
267			  (ackno == DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist"
268							      : "exists",
269			  (unsigned long long) lawl, (unsigned long long) ackno,
270			  (unsigned long long) dp->dccps_awh);
271
272		dp->dccps_rate_last = now;
273
274		if (dh->dccph_type == DCCP_PKT_RESET)
275			seqno = dp->dccps_gsr;
276		dccp_send_sync(sk, seqno, DCCP_PKT_SYNC);
277		return -1;
278	}
279
280	return 0;
281}
282
283static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
284				  const struct dccp_hdr *dh, const unsigned len)
285{
286	struct dccp_sock *dp = dccp_sk(sk);
287
288	switch (dccp_hdr(skb)->dccph_type) {
289	case DCCP_PKT_DATAACK:
290	case DCCP_PKT_DATA:
291		/*
292		 * FIXME: schedule DATA_DROPPED (RFC 4340, 11.7.2) if and when
293		 * - sk_shutdown == RCV_SHUTDOWN, use Code 1, "Not Listening"
294		 * - sk_receive_queue is full, use Code 2, "Receive Buffer"
295		 */
296		dccp_enqueue_skb(sk, skb);
297		return 0;
298	case DCCP_PKT_ACK:
299		goto discard;
300	case DCCP_PKT_RESET:
301		/*
302		 *  Step 9: Process Reset
303		 *	If P.type == Reset,
304		 *		Tear down connection
305		 *		S.state := TIMEWAIT
306		 *		Set TIMEWAIT timer
307		 *		Drop packet and return
308		 */
309		dccp_rcv_reset(sk, skb);
310		return 0;
311	case DCCP_PKT_CLOSEREQ:
312		if (dccp_rcv_closereq(sk, skb))
313			return 0;
314		goto discard;
315	case DCCP_PKT_CLOSE:
316		if (dccp_rcv_close(sk, skb))
317			return 0;
318		goto discard;
319	case DCCP_PKT_REQUEST:
320		/* Step 7
321		 *   or (S.is_server and P.type == Response)
322		 *   or (S.is_client and P.type == Request)
323		 *   or (S.state >= OPEN and P.type == Request
324		 *	and P.seqno >= S.OSR)
325		 *    or (S.state >= OPEN and P.type == Response
326		 *	and P.seqno >= S.OSR)
327		 *    or (S.state == RESPOND and P.type == Data),
328		 *  Send Sync packet acknowledging P.seqno
329		 *  Drop packet and return
330		 */
331		if (dp->dccps_role != DCCP_ROLE_LISTEN)
332			goto send_sync;
333		goto check_seq;
334	case DCCP_PKT_RESPONSE:
335		if (dp->dccps_role != DCCP_ROLE_CLIENT)
336			goto send_sync;
337check_seq:
338		if (dccp_delta_seqno(dp->dccps_osr,
339				     DCCP_SKB_CB(skb)->dccpd_seq) >= 0) {
340send_sync:
341			dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
342				       DCCP_PKT_SYNC);
343		}
344		break;
345	case DCCP_PKT_SYNC:
346		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
347			       DCCP_PKT_SYNCACK);
348		/*
349		 * From RFC 4340, sec. 5.7
350		 *
351		 * As with DCCP-Ack packets, DCCP-Sync and DCCP-SyncAck packets
352		 * MAY have non-zero-length application data areas, whose
353		 * contents receivers MUST ignore.
354		 */
355		goto discard;
356	}
357
358	DCCP_INC_STATS_BH(DCCP_MIB_INERRS);
359discard:
360	__kfree_skb(skb);
361	return 0;
362}
363
364int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
365			 const struct dccp_hdr *dh, const unsigned len)
366{
367	struct dccp_sock *dp = dccp_sk(sk);
368
369	if (dccp_check_seqno(sk, skb))
370		goto discard;
371
372	if (dccp_parse_options(sk, NULL, skb))
373		return 1;
374
375	if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
376		dccp_event_ack_recv(sk, skb);
377
378	if (dccp_msk(sk)->dccpms_send_ack_vector &&
379	    dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
380			    DCCP_SKB_CB(skb)->dccpd_seq,
381			    DCCP_ACKVEC_STATE_RECEIVED))
382		goto discard;
383	dccp_deliver_input_to_ccids(sk, skb);
384
385	return __dccp_rcv_established(sk, skb, dh, len);
386discard:
387	__kfree_skb(skb);
388	return 0;
389}
390
391EXPORT_SYMBOL_GPL(dccp_rcv_established);
392
393static int dccp_rcv_request_sent_state_process(struct sock *sk,
394					       struct sk_buff *skb,
395					       const struct dccp_hdr *dh,
396					       const unsigned len)
397{
398	/*
399	 *  Step 4: Prepare sequence numbers in REQUEST
400	 *     If S.state == REQUEST,
401	 *	  If (P.type == Response or P.type == Reset)
402	 *		and S.AWL <= P.ackno <= S.AWH,
403	 *	     / * Set sequence number variables corresponding to the
404	 *		other endpoint, so P will pass the tests in Step 6 * /
405	 *	     Set S.GSR, S.ISR, S.SWL, S.SWH
406	 *	     / * Response processing continues in Step 10; Reset
407	 *		processing continues in Step 9 * /
408	*/
409	if (dh->dccph_type == DCCP_PKT_RESPONSE) {
410		const struct inet_connection_sock *icsk = inet_csk(sk);
411		struct dccp_sock *dp = dccp_sk(sk);
412		long tstamp = dccp_timestamp();
413
414		if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
415			       dp->dccps_awl, dp->dccps_awh)) {
416			dccp_pr_debug("invalid ackno: S.AWL=%llu, "
417				      "P.ackno=%llu, S.AWH=%llu \n",
418				      (unsigned long long)dp->dccps_awl,
419			   (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
420				      (unsigned long long)dp->dccps_awh);
421			goto out_invalid_packet;
422		}
423
424		if (dccp_parse_options(sk, NULL, skb))
425			goto out_invalid_packet;
426
427		/* Obtain usec RTT sample from SYN exchange (used by CCID 3) */
428		if (likely(dp->dccps_options_received.dccpor_timestamp_echo))
429			dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * (tstamp -
430			    dp->dccps_options_received.dccpor_timestamp_echo));
431
432		if (dccp_msk(sk)->dccpms_send_ack_vector &&
433		    dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
434				    DCCP_SKB_CB(skb)->dccpd_seq,
435				    DCCP_ACKVEC_STATE_RECEIVED))
436			goto out_invalid_packet; /* FIXME: change error code */
437
438		/* Stop the REQUEST timer */
439		inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
440		WARN_ON(sk->sk_send_head == NULL);
441		kfree_skb(sk->sk_send_head);
442		sk->sk_send_head = NULL;
443
444		dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq;
445		dccp_update_gsr(sk, dp->dccps_isr);
446		/*
447		 * SWL and AWL are initially adjusted so that they are not less than
448		 * the initial Sequence Numbers received and sent, respectively:
449		 *	SWL := max(GSR + 1 - floor(W/4), ISR),
450		 *	AWL := max(GSS - W' + 1, ISS).
451		 * These adjustments MUST be applied only at the beginning of the
452		 * connection.
453		 *
454		 * AWL was adjusted in dccp_v4_connect -acme
455		 */
456		dccp_set_seqno(&dp->dccps_swl,
457			       max48(dp->dccps_swl, dp->dccps_isr));
458
459		dccp_sync_mss(sk, icsk->icsk_pmtu_cookie);
460
461		/*
462		 *    Step 10: Process REQUEST state (second part)
463		 *       If S.state == REQUEST,
464		 *	  / * If we get here, P is a valid Response from the
465		 *	      server (see Step 4), and we should move to
466		 *	      PARTOPEN state. PARTOPEN means send an Ack,
467		 *	      don't send Data packets, retransmit Acks
468		 *	      periodically, and always include any Init Cookie
469		 *	      from the Response * /
470		 *	  S.state := PARTOPEN
471		 *	  Set PARTOPEN timer
472		 *	  Continue with S.state == PARTOPEN
473		 *	  / * Step 12 will send the Ack completing the
474		 *	      three-way handshake * /
475		 */
476		dccp_set_state(sk, DCCP_PARTOPEN);
477
478		/* Make sure socket is routed, for correct metrics. */
479		icsk->icsk_af_ops->rebuild_header(sk);
480
481		if (!sock_flag(sk, SOCK_DEAD)) {
482			sk->sk_state_change(sk);
483			sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
484		}
485
486		if (sk->sk_write_pending || icsk->icsk_ack.pingpong ||
487		    icsk->icsk_accept_queue.rskq_defer_accept) {
488			/* Save one ACK. Data will be ready after
489			 * several ticks, if write_pending is set.
490			 *
491			 * It may be deleted, but with this feature tcpdumps
492			 * look so _wonderfully_ clever, that I was not able
493			 * to stand against the temptation 8)     --ANK
494			 */
495			/*
496			 * OK, in DCCP we can as well do a similar trick, its
497			 * even in the draft, but there is no need for us to
498			 * schedule an ack here, as dccp_sendmsg does this for
499			 * us, also stated in the draft. -acme
500			 */
501			__kfree_skb(skb);
502			return 0;
503		}
504		dccp_send_ack(sk);
505		return -1;
506	}
507
508out_invalid_packet:
509	/* dccp_v4_do_rcv will send a reset */
510	DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
511	return 1;
512}
513
514static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
515						   struct sk_buff *skb,
516						   const struct dccp_hdr *dh,
517						   const unsigned len)
518{
519	int queued = 0;
520
521	switch (dh->dccph_type) {
522	case DCCP_PKT_RESET:
523		inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
524		break;
525	case DCCP_PKT_DATA:
526		if (sk->sk_state == DCCP_RESPOND)
527			break;
528	case DCCP_PKT_DATAACK:
529	case DCCP_PKT_ACK:
530		/*
531		 * FIXME: we should be reseting the PARTOPEN (DELACK) timer
532		 * here but only if we haven't used the DELACK timer for
533		 * something else, like sending a delayed ack for a TIMESTAMP
534		 * echo, etc, for now were not clearing it, sending an extra
535		 * ACK when there is nothing else to do in DELACK is not a big
536		 * deal after all.
537		 */
538
539		/* Stop the PARTOPEN timer */
540		if (sk->sk_state == DCCP_PARTOPEN)
541			inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
542
543		dccp_sk(sk)->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
544		dccp_set_state(sk, DCCP_OPEN);
545
546		if (dh->dccph_type == DCCP_PKT_DATAACK ||
547		    dh->dccph_type == DCCP_PKT_DATA) {
548			__dccp_rcv_established(sk, skb, dh, len);
549			queued = 1; /* packet was queued
550				       (by __dccp_rcv_established) */
551		}
552		break;
553	}
554
555	return queued;
556}
557
558int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
559			   struct dccp_hdr *dh, unsigned len)
560{
561	struct dccp_sock *dp = dccp_sk(sk);
562	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
563	const int old_state = sk->sk_state;
564	int queued = 0;
565
566	/*
567	 *  Step 3: Process LISTEN state
568	 *
569	 *     If S.state == LISTEN,
570	 *	 If P.type == Request or P contains a valid Init Cookie option,
571	 *	      (* Must scan the packet's options to check for Init
572	 *		 Cookies.  Only Init Cookies are processed here,
573	 *		 however; other options are processed in Step 8.  This
574	 *		 scan need only be performed if the endpoint uses Init
575	 *		 Cookies *)
576	 *	      (* Generate a new socket and switch to that socket *)
577	 *	      Set S := new socket for this port pair
578	 *	      S.state = RESPOND
579	 *	      Choose S.ISS (initial seqno) or set from Init Cookies
580	 *	      Initialize S.GAR := S.ISS
581	 *	      Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init
582	 *	      Cookies Continue with S.state == RESPOND
583	 *	      (* A Response packet will be generated in Step 11 *)
584	 *	 Otherwise,
585	 *	      Generate Reset(No Connection) unless P.type == Reset
586	 *	      Drop packet and return
587	 */
588	if (sk->sk_state == DCCP_LISTEN) {
589		if (dh->dccph_type == DCCP_PKT_REQUEST) {
590			if (inet_csk(sk)->icsk_af_ops->conn_request(sk,
591								    skb) < 0)
592				return 1;
593			goto discard;
594		}
595		if (dh->dccph_type == DCCP_PKT_RESET)
596			goto discard;
597
598		/* Caller (dccp_v4_do_rcv) will send Reset */
599		dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
600		return 1;
601	}
602
603	if (sk->sk_state != DCCP_REQUESTING) {
604		if (dccp_check_seqno(sk, skb))
605			goto discard;
606
607		/*
608		 * Step 8: Process options and mark acknowledgeable
609		 */
610		if (dccp_parse_options(sk, NULL, skb))
611			return 1;
612
613		if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
614			dccp_event_ack_recv(sk, skb);
615
616		if (dccp_msk(sk)->dccpms_send_ack_vector &&
617		    dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
618				    DCCP_SKB_CB(skb)->dccpd_seq,
619				    DCCP_ACKVEC_STATE_RECEIVED))
620			goto discard;
621
622		dccp_deliver_input_to_ccids(sk, skb);
623	}
624
625	/*
626	 *  Step 9: Process Reset
627	 *	If P.type == Reset,
628	 *		Tear down connection
629	 *		S.state := TIMEWAIT
630	 *		Set TIMEWAIT timer
631	 *		Drop packet and return
632	*/
633	if (dh->dccph_type == DCCP_PKT_RESET) {
634		dccp_rcv_reset(sk, skb);
635		return 0;
636		/*
637		 *   Step 7: Check for unexpected packet types
638		 *      If (S.is_server and P.type == Response)
639		 *	    or (S.is_client and P.type == Request)
640		 *	    or (S.state == RESPOND and P.type == Data),
641		 *	  Send Sync packet acknowledging P.seqno
642		 *	  Drop packet and return
643		 */
644	} else if ((dp->dccps_role != DCCP_ROLE_CLIENT &&
645		    dh->dccph_type == DCCP_PKT_RESPONSE) ||
646		    (dp->dccps_role == DCCP_ROLE_CLIENT &&
647		     dh->dccph_type == DCCP_PKT_REQUEST) ||
648		    (sk->sk_state == DCCP_RESPOND &&
649		     dh->dccph_type == DCCP_PKT_DATA)) {
650		dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNC);
651		goto discard;
652	} else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) {
653		if (dccp_rcv_closereq(sk, skb))
654			return 0;
655		goto discard;
656	} else if (dh->dccph_type == DCCP_PKT_CLOSE) {
657		if (dccp_rcv_close(sk, skb))
658			return 0;
659		goto discard;
660	}
661
662	switch (sk->sk_state) {
663	case DCCP_CLOSED:
664		dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
665		return 1;
666
667	case DCCP_REQUESTING:
668		/* FIXME: do congestion control initialization */
669
670		queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
671		if (queued >= 0)
672			return queued;
673
674		__kfree_skb(skb);
675		return 0;
676
677	case DCCP_RESPOND:
678	case DCCP_PARTOPEN:
679		queued = dccp_rcv_respond_partopen_state_process(sk, skb,
680								 dh, len);
681		break;
682	}
683
684	if (dh->dccph_type == DCCP_PKT_ACK ||
685	    dh->dccph_type == DCCP_PKT_DATAACK) {
686		switch (old_state) {
687		case DCCP_PARTOPEN:
688			sk->sk_state_change(sk);
689			sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
690			break;
691		}
692	} else if (unlikely(dh->dccph_type == DCCP_PKT_SYNC)) {
693		dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNCACK);
694		goto discard;
695	}
696
697	if (!queued) {
698discard:
699		__kfree_skb(skb);
700	}
701	return 0;
702}
703
704EXPORT_SYMBOL_GPL(dccp_rcv_state_process);
705
706/**
707 *  dccp_sample_rtt  -  Validate and finalise computation of RTT sample
708 *  @delta:	number of microseconds between packet and acknowledgment
709 *  The routine is kept generic to work in different contexts. It should be
710 *  called immediately when the ACK used for the RTT sample arrives.
711 */
712u32 dccp_sample_rtt(struct sock *sk, long delta)
713{
714	/* dccpor_elapsed_time is either zeroed out or set and > 0 */
715	delta -= dccp_sk(sk)->dccps_options_received.dccpor_elapsed_time * 10;
716
717	if (unlikely(delta <= 0)) {
718		DCCP_WARN("unusable RTT sample %ld, using min\n", delta);
719		return DCCP_SANE_RTT_MIN;
720	}
721	if (unlikely(delta > DCCP_SANE_RTT_MAX)) {
722		DCCP_WARN("RTT sample %ld too large, using max\n", delta);
723		return DCCP_SANE_RTT_MAX;
724	}
725
726	return delta;
727}
728
729EXPORT_SYMBOL_GPL(dccp_sample_rtt);
730