1/* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
8 *
9 * This abstraction carries sctp events to the ULP (sockets).
10 *
11 * This SCTP implementation is free software;
12 * you can redistribute it and/or modify it under the terms of
13 * the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This SCTP implementation is distributed in the hope that it
18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19 *                 ************************
20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21 * See the GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with GNU CC; see the file COPYING.  If not, see
25 * <http://www.gnu.org/licenses/>.
26 *
27 * Please send any bug reports or fixes you make to the
28 * email address(es):
29 *    lksctp developers <linux-sctp@vger.kernel.org>
30 *
31 * Written or modified by:
32 *    Jon Grimm             <jgrimm@us.ibm.com>
33 *    La Monte H.P. Yarroll <piggy@acm.org>
34 *    Sridhar Samudrala     <sri@us.ibm.com>
35 */
36
37#include <linux/slab.h>
38#include <linux/types.h>
39#include <linux/skbuff.h>
40#include <net/sock.h>
41#include <net/busy_poll.h>
42#include <net/sctp/structs.h>
43#include <net/sctp/sctp.h>
44#include <net/sctp/sm.h>
45
46/* Forward declarations for internal helpers.  */
47static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
48					      struct sctp_ulpevent *);
49static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
50					      struct sctp_ulpevent *);
51static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
52
53/* 1st Level Abstractions */
54
55/* Initialize a ULP queue from a block of memory.  */
56struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
57				 struct sctp_association *asoc)
58{
59	memset(ulpq, 0, sizeof(struct sctp_ulpq));
60
61	ulpq->asoc = asoc;
62	skb_queue_head_init(&ulpq->reasm);
63	skb_queue_head_init(&ulpq->lobby);
64	ulpq->pd_mode  = 0;
65
66	return ulpq;
67}
68
69
70/* Flush the reassembly and ordering queues.  */
71void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
72{
73	struct sk_buff *skb;
74	struct sctp_ulpevent *event;
75
76	while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
77		event = sctp_skb2event(skb);
78		sctp_ulpevent_free(event);
79	}
80
81	while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
82		event = sctp_skb2event(skb);
83		sctp_ulpevent_free(event);
84	}
85
86}
87
88/* Dispose of a ulpqueue.  */
89void sctp_ulpq_free(struct sctp_ulpq *ulpq)
90{
91	sctp_ulpq_flush(ulpq);
92}
93
94/* Process an incoming DATA chunk.  */
95int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
96			gfp_t gfp)
97{
98	struct sk_buff_head temp;
99	struct sctp_ulpevent *event;
100	int event_eor = 0;
101
102	/* Create an event from the incoming chunk. */
103	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
104	if (!event)
105		return -ENOMEM;
106
107	/* Do reassembly if needed.  */
108	event = sctp_ulpq_reasm(ulpq, event);
109
110	/* Do ordering if needed.  */
111	if ((event) && (event->msg_flags & MSG_EOR)) {
112		/* Create a temporary list to collect chunks on.  */
113		skb_queue_head_init(&temp);
114		__skb_queue_tail(&temp, sctp_event2skb(event));
115
116		event = sctp_ulpq_order(ulpq, event);
117	}
118
119	/* Send event to the ULP.  'event' is the sctp_ulpevent for
120	 * very first SKB on the 'temp' list.
121	 */
122	if (event) {
123		event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
124		sctp_ulpq_tail_event(ulpq, event);
125	}
126
127	return event_eor;
128}
129
130/* Add a new event for propagation to the ULP.  */
131/* Clear the partial delivery mode for this socket.   Note: This
132 * assumes that no association is currently in partial delivery mode.
133 */
134int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
135{
136	struct sctp_sock *sp = sctp_sk(sk);
137
138	if (atomic_dec_and_test(&sp->pd_mode)) {
139		/* This means there are no other associations in PD, so
140		 * we can go ahead and clear out the lobby in one shot
141		 */
142		if (!skb_queue_empty(&sp->pd_lobby)) {
143			struct list_head *list;
144			sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
145			list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
146			INIT_LIST_HEAD(list);
147			return 1;
148		}
149	} else {
150		/* There are other associations in PD, so we only need to
151		 * pull stuff out of the lobby that belongs to the
152		 * associations that is exiting PD (all of its notifications
153		 * are posted here).
154		 */
155		if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
156			struct sk_buff *skb, *tmp;
157			struct sctp_ulpevent *event;
158
159			sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
160				event = sctp_skb2event(skb);
161				if (event->asoc == asoc) {
162					__skb_unlink(skb, &sp->pd_lobby);
163					__skb_queue_tail(&sk->sk_receive_queue,
164							 skb);
165				}
166			}
167		}
168	}
169
170	return 0;
171}
172
173/* Set the pd_mode on the socket and ulpq */
174static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
175{
176	struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
177
178	atomic_inc(&sp->pd_mode);
179	ulpq->pd_mode = 1;
180}
181
182/* Clear the pd_mode and restart any pending messages waiting for delivery. */
183static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
184{
185	ulpq->pd_mode = 0;
186	sctp_ulpq_reasm_drain(ulpq);
187	return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
188}
189
190/* If the SKB of 'event' is on a list, it is the first such member
191 * of that list.
192 */
193int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
194{
195	struct sock *sk = ulpq->asoc->base.sk;
196	struct sk_buff_head *queue, *skb_list;
197	struct sk_buff *skb = sctp_event2skb(event);
198	int clear_pd = 0;
199
200	skb_list = (struct sk_buff_head *) skb->prev;
201
202	/* If the socket is just going to throw this away, do not
203	 * even try to deliver it.
204	 */
205	if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
206		goto out_free;
207
208	if (!sctp_ulpevent_is_notification(event))
209		sk_mark_napi_id(sk, skb);
210
211	/* Check if the user wishes to receive this event.  */
212	if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
213		goto out_free;
214
215	/* If we are in partial delivery mode, post to the lobby until
216	 * partial delivery is cleared, unless, of course _this_ is
217	 * the association the cause of the partial delivery.
218	 */
219
220	if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
221		queue = &sk->sk_receive_queue;
222	} else {
223		if (ulpq->pd_mode) {
224			/* If the association is in partial delivery, we
225			 * need to finish delivering the partially processed
226			 * packet before passing any other data.  This is
227			 * because we don't truly support stream interleaving.
228			 */
229			if ((event->msg_flags & MSG_NOTIFICATION) ||
230			    (SCTP_DATA_NOT_FRAG ==
231				    (event->msg_flags & SCTP_DATA_FRAG_MASK)))
232				queue = &sctp_sk(sk)->pd_lobby;
233			else {
234				clear_pd = event->msg_flags & MSG_EOR;
235				queue = &sk->sk_receive_queue;
236			}
237		} else {
238			/*
239			 * If fragment interleave is enabled, we
240			 * can queue this to the receive queue instead
241			 * of the lobby.
242			 */
243			if (sctp_sk(sk)->frag_interleave)
244				queue = &sk->sk_receive_queue;
245			else
246				queue = &sctp_sk(sk)->pd_lobby;
247		}
248	}
249
250	/* If we are harvesting multiple skbs they will be
251	 * collected on a list.
252	 */
253	if (skb_list)
254		sctp_skb_list_tail(skb_list, queue);
255	else
256		__skb_queue_tail(queue, skb);
257
258	/* Did we just complete partial delivery and need to get
259	 * rolling again?  Move pending data to the receive
260	 * queue.
261	 */
262	if (clear_pd)
263		sctp_ulpq_clear_pd(ulpq);
264
265	if (queue == &sk->sk_receive_queue)
266		sk->sk_data_ready(sk);
267	return 1;
268
269out_free:
270	if (skb_list)
271		sctp_queue_purge_ulpevents(skb_list);
272	else
273		sctp_ulpevent_free(event);
274
275	return 0;
276}
277
278/* 2nd Level Abstractions */
279
280/* Helper function to store chunks that need to be reassembled.  */
281static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
282					 struct sctp_ulpevent *event)
283{
284	struct sk_buff *pos;
285	struct sctp_ulpevent *cevent;
286	__u32 tsn, ctsn;
287
288	tsn = event->tsn;
289
290	/* See if it belongs at the end. */
291	pos = skb_peek_tail(&ulpq->reasm);
292	if (!pos) {
293		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
294		return;
295	}
296
297	/* Short circuit just dropping it at the end. */
298	cevent = sctp_skb2event(pos);
299	ctsn = cevent->tsn;
300	if (TSN_lt(ctsn, tsn)) {
301		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
302		return;
303	}
304
305	/* Find the right place in this list. We store them by TSN.  */
306	skb_queue_walk(&ulpq->reasm, pos) {
307		cevent = sctp_skb2event(pos);
308		ctsn = cevent->tsn;
309
310		if (TSN_lt(tsn, ctsn))
311			break;
312	}
313
314	/* Insert before pos. */
315	__skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
316
317}
318
319/* Helper function to return an event corresponding to the reassembled
320 * datagram.
321 * This routine creates a re-assembled skb given the first and last skb's
322 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
323 * payload was fragmented on the way and ip had to reassemble them.
324 * We add the rest of skb's to the first skb's fraglist.
325 */
326static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
327	struct sk_buff_head *queue, struct sk_buff *f_frag,
328	struct sk_buff *l_frag)
329{
330	struct sk_buff *pos;
331	struct sk_buff *new = NULL;
332	struct sctp_ulpevent *event;
333	struct sk_buff *pnext, *last;
334	struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
335
336	/* Store the pointer to the 2nd skb */
337	if (f_frag == l_frag)
338		pos = NULL;
339	else
340		pos = f_frag->next;
341
342	/* Get the last skb in the f_frag's frag_list if present. */
343	for (last = list; list; last = list, list = list->next)
344		;
345
346	/* Add the list of remaining fragments to the first fragments
347	 * frag_list.
348	 */
349	if (last)
350		last->next = pos;
351	else {
352		if (skb_cloned(f_frag)) {
353			/* This is a cloned skb, we can't just modify
354			 * the frag_list.  We need a new skb to do that.
355			 * Instead of calling skb_unshare(), we'll do it
356			 * ourselves since we need to delay the free.
357			 */
358			new = skb_copy(f_frag, GFP_ATOMIC);
359			if (!new)
360				return NULL;	/* try again later */
361
362			sctp_skb_set_owner_r(new, f_frag->sk);
363
364			skb_shinfo(new)->frag_list = pos;
365		} else
366			skb_shinfo(f_frag)->frag_list = pos;
367	}
368
369	/* Remove the first fragment from the reassembly queue.  */
370	__skb_unlink(f_frag, queue);
371
372	/* if we did unshare, then free the old skb and re-assign */
373	if (new) {
374		kfree_skb(f_frag);
375		f_frag = new;
376	}
377
378	while (pos) {
379
380		pnext = pos->next;
381
382		/* Update the len and data_len fields of the first fragment. */
383		f_frag->len += pos->len;
384		f_frag->data_len += pos->len;
385
386		/* Remove the fragment from the reassembly queue.  */
387		__skb_unlink(pos, queue);
388
389		/* Break if we have reached the last fragment.  */
390		if (pos == l_frag)
391			break;
392		pos->next = pnext;
393		pos = pnext;
394	}
395
396	event = sctp_skb2event(f_frag);
397	SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
398
399	return event;
400}
401
402
403/* Helper function to check if an incoming chunk has filled up the last
404 * missing fragment in a SCTP datagram and return the corresponding event.
405 */
406static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
407{
408	struct sk_buff *pos;
409	struct sctp_ulpevent *cevent;
410	struct sk_buff *first_frag = NULL;
411	__u32 ctsn, next_tsn;
412	struct sctp_ulpevent *retval = NULL;
413	struct sk_buff *pd_first = NULL;
414	struct sk_buff *pd_last = NULL;
415	size_t pd_len = 0;
416	struct sctp_association *asoc;
417	u32 pd_point;
418
419	/* Initialized to 0 just to avoid compiler warning message.  Will
420	 * never be used with this value. It is referenced only after it
421	 * is set when we find the first fragment of a message.
422	 */
423	next_tsn = 0;
424
425	/* The chunks are held in the reasm queue sorted by TSN.
426	 * Walk through the queue sequentially and look for a sequence of
427	 * fragmented chunks that complete a datagram.
428	 * 'first_frag' and next_tsn are reset when we find a chunk which
429	 * is the first fragment of a datagram. Once these 2 fields are set
430	 * we expect to find the remaining middle fragments and the last
431	 * fragment in order. If not, first_frag is reset to NULL and we
432	 * start the next pass when we find another first fragment.
433	 *
434	 * There is a potential to do partial delivery if user sets
435	 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
436	 * to see if can do PD.
437	 */
438	skb_queue_walk(&ulpq->reasm, pos) {
439		cevent = sctp_skb2event(pos);
440		ctsn = cevent->tsn;
441
442		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
443		case SCTP_DATA_FIRST_FRAG:
444			/* If this "FIRST_FRAG" is the first
445			 * element in the queue, then count it towards
446			 * possible PD.
447			 */
448			if (pos == ulpq->reasm.next) {
449			    pd_first = pos;
450			    pd_last = pos;
451			    pd_len = pos->len;
452			} else {
453			    pd_first = NULL;
454			    pd_last = NULL;
455			    pd_len = 0;
456			}
457
458			first_frag = pos;
459			next_tsn = ctsn + 1;
460			break;
461
462		case SCTP_DATA_MIDDLE_FRAG:
463			if ((first_frag) && (ctsn == next_tsn)) {
464				next_tsn++;
465				if (pd_first) {
466				    pd_last = pos;
467				    pd_len += pos->len;
468				}
469			} else
470				first_frag = NULL;
471			break;
472
473		case SCTP_DATA_LAST_FRAG:
474			if (first_frag && (ctsn == next_tsn))
475				goto found;
476			else
477				first_frag = NULL;
478			break;
479		}
480	}
481
482	asoc = ulpq->asoc;
483	if (pd_first) {
484		/* Make sure we can enter partial deliver.
485		 * We can trigger partial delivery only if framgent
486		 * interleave is set, or the socket is not already
487		 * in  partial delivery.
488		 */
489		if (!sctp_sk(asoc->base.sk)->frag_interleave &&
490		    atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
491			goto done;
492
493		cevent = sctp_skb2event(pd_first);
494		pd_point = sctp_sk(asoc->base.sk)->pd_point;
495		if (pd_point && pd_point <= pd_len) {
496			retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
497							     &ulpq->reasm,
498							     pd_first,
499							     pd_last);
500			if (retval)
501				sctp_ulpq_set_pd(ulpq);
502		}
503	}
504done:
505	return retval;
506found:
507	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
508					     &ulpq->reasm, first_frag, pos);
509	if (retval)
510		retval->msg_flags |= MSG_EOR;
511	goto done;
512}
513
514/* Retrieve the next set of fragments of a partial message. */
515static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
516{
517	struct sk_buff *pos, *last_frag, *first_frag;
518	struct sctp_ulpevent *cevent;
519	__u32 ctsn, next_tsn;
520	int is_last;
521	struct sctp_ulpevent *retval;
522
523	/* The chunks are held in the reasm queue sorted by TSN.
524	 * Walk through the queue sequentially and look for the first
525	 * sequence of fragmented chunks.
526	 */
527
528	if (skb_queue_empty(&ulpq->reasm))
529		return NULL;
530
531	last_frag = first_frag = NULL;
532	retval = NULL;
533	next_tsn = 0;
534	is_last = 0;
535
536	skb_queue_walk(&ulpq->reasm, pos) {
537		cevent = sctp_skb2event(pos);
538		ctsn = cevent->tsn;
539
540		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
541		case SCTP_DATA_FIRST_FRAG:
542			if (!first_frag)
543				return NULL;
544			goto done;
545		case SCTP_DATA_MIDDLE_FRAG:
546			if (!first_frag) {
547				first_frag = pos;
548				next_tsn = ctsn + 1;
549				last_frag = pos;
550			} else if (next_tsn == ctsn) {
551				next_tsn++;
552				last_frag = pos;
553			} else
554				goto done;
555			break;
556		case SCTP_DATA_LAST_FRAG:
557			if (!first_frag)
558				first_frag = pos;
559			else if (ctsn != next_tsn)
560				goto done;
561			last_frag = pos;
562			is_last = 1;
563			goto done;
564		default:
565			return NULL;
566		}
567	}
568
569	/* We have the reassembled event. There is no need to look
570	 * further.
571	 */
572done:
573	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
574					&ulpq->reasm, first_frag, last_frag);
575	if (retval && is_last)
576		retval->msg_flags |= MSG_EOR;
577
578	return retval;
579}
580
581
582/* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
583 * need reassembling.
584 */
585static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
586						struct sctp_ulpevent *event)
587{
588	struct sctp_ulpevent *retval = NULL;
589
590	/* Check if this is part of a fragmented message.  */
591	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
592		event->msg_flags |= MSG_EOR;
593		return event;
594	}
595
596	sctp_ulpq_store_reasm(ulpq, event);
597	if (!ulpq->pd_mode)
598		retval = sctp_ulpq_retrieve_reassembled(ulpq);
599	else {
600		__u32 ctsn, ctsnap;
601
602		/* Do not even bother unless this is the next tsn to
603		 * be delivered.
604		 */
605		ctsn = event->tsn;
606		ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
607		if (TSN_lte(ctsn, ctsnap))
608			retval = sctp_ulpq_retrieve_partial(ulpq);
609	}
610
611	return retval;
612}
613
614/* Retrieve the first part (sequential fragments) for partial delivery.  */
615static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
616{
617	struct sk_buff *pos, *last_frag, *first_frag;
618	struct sctp_ulpevent *cevent;
619	__u32 ctsn, next_tsn;
620	struct sctp_ulpevent *retval;
621
622	/* The chunks are held in the reasm queue sorted by TSN.
623	 * Walk through the queue sequentially and look for a sequence of
624	 * fragmented chunks that start a datagram.
625	 */
626
627	if (skb_queue_empty(&ulpq->reasm))
628		return NULL;
629
630	last_frag = first_frag = NULL;
631	retval = NULL;
632	next_tsn = 0;
633
634	skb_queue_walk(&ulpq->reasm, pos) {
635		cevent = sctp_skb2event(pos);
636		ctsn = cevent->tsn;
637
638		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
639		case SCTP_DATA_FIRST_FRAG:
640			if (!first_frag) {
641				first_frag = pos;
642				next_tsn = ctsn + 1;
643				last_frag = pos;
644			} else
645				goto done;
646			break;
647
648		case SCTP_DATA_MIDDLE_FRAG:
649			if (!first_frag)
650				return NULL;
651			if (ctsn == next_tsn) {
652				next_tsn++;
653				last_frag = pos;
654			} else
655				goto done;
656			break;
657
658		case SCTP_DATA_LAST_FRAG:
659			if (!first_frag)
660				return NULL;
661			else
662				goto done;
663			break;
664
665		default:
666			return NULL;
667		}
668	}
669
670	/* We have the reassembled event. There is no need to look
671	 * further.
672	 */
673done:
674	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
675					&ulpq->reasm, first_frag, last_frag);
676	return retval;
677}
678
679/*
680 * Flush out stale fragments from the reassembly queue when processing
681 * a Forward TSN.
682 *
683 * RFC 3758, Section 3.6
684 *
685 * After receiving and processing a FORWARD TSN, the data receiver MUST
686 * take cautions in updating its re-assembly queue.  The receiver MUST
687 * remove any partially reassembled message, which is still missing one
688 * or more TSNs earlier than or equal to the new cumulative TSN point.
689 * In the event that the receiver has invoked the partial delivery API,
690 * a notification SHOULD also be generated to inform the upper layer API
691 * that the message being partially delivered will NOT be completed.
692 */
693void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
694{
695	struct sk_buff *pos, *tmp;
696	struct sctp_ulpevent *event;
697	__u32 tsn;
698
699	if (skb_queue_empty(&ulpq->reasm))
700		return;
701
702	skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
703		event = sctp_skb2event(pos);
704		tsn = event->tsn;
705
706		/* Since the entire message must be abandoned by the
707		 * sender (item A3 in Section 3.5, RFC 3758), we can
708		 * free all fragments on the list that are less then
709		 * or equal to ctsn_point
710		 */
711		if (TSN_lte(tsn, fwd_tsn)) {
712			__skb_unlink(pos, &ulpq->reasm);
713			sctp_ulpevent_free(event);
714		} else
715			break;
716	}
717}
718
719/*
720 * Drain the reassembly queue.  If we just cleared parted delivery, it
721 * is possible that the reassembly queue will contain already reassembled
722 * messages.  Retrieve any such messages and give them to the user.
723 */
724static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
725{
726	struct sctp_ulpevent *event = NULL;
727	struct sk_buff_head temp;
728
729	if (skb_queue_empty(&ulpq->reasm))
730		return;
731
732	while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
733		/* Do ordering if needed.  */
734		if ((event) && (event->msg_flags & MSG_EOR)) {
735			skb_queue_head_init(&temp);
736			__skb_queue_tail(&temp, sctp_event2skb(event));
737
738			event = sctp_ulpq_order(ulpq, event);
739		}
740
741		/* Send event to the ULP.  'event' is the
742		 * sctp_ulpevent for  very first SKB on the  temp' list.
743		 */
744		if (event)
745			sctp_ulpq_tail_event(ulpq, event);
746	}
747}
748
749
750/* Helper function to gather skbs that have possibly become
751 * ordered by an an incoming chunk.
752 */
753static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
754					      struct sctp_ulpevent *event)
755{
756	struct sk_buff_head *event_list;
757	struct sk_buff *pos, *tmp;
758	struct sctp_ulpevent *cevent;
759	struct sctp_stream *in;
760	__u16 sid, csid, cssn;
761
762	sid = event->stream;
763	in  = &ulpq->asoc->ssnmap->in;
764
765	event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
766
767	/* We are holding the chunks by stream, by SSN.  */
768	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
769		cevent = (struct sctp_ulpevent *) pos->cb;
770		csid = cevent->stream;
771		cssn = cevent->ssn;
772
773		/* Have we gone too far?  */
774		if (csid > sid)
775			break;
776
777		/* Have we not gone far enough?  */
778		if (csid < sid)
779			continue;
780
781		if (cssn != sctp_ssn_peek(in, sid))
782			break;
783
784		/* Found it, so mark in the ssnmap. */
785		sctp_ssn_next(in, sid);
786
787		__skb_unlink(pos, &ulpq->lobby);
788
789		/* Attach all gathered skbs to the event.  */
790		__skb_queue_tail(event_list, pos);
791	}
792}
793
794/* Helper function to store chunks needing ordering.  */
795static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
796					   struct sctp_ulpevent *event)
797{
798	struct sk_buff *pos;
799	struct sctp_ulpevent *cevent;
800	__u16 sid, csid;
801	__u16 ssn, cssn;
802
803	pos = skb_peek_tail(&ulpq->lobby);
804	if (!pos) {
805		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
806		return;
807	}
808
809	sid = event->stream;
810	ssn = event->ssn;
811
812	cevent = (struct sctp_ulpevent *) pos->cb;
813	csid = cevent->stream;
814	cssn = cevent->ssn;
815	if (sid > csid) {
816		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
817		return;
818	}
819
820	if ((sid == csid) && SSN_lt(cssn, ssn)) {
821		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
822		return;
823	}
824
825	/* Find the right place in this list.  We store them by
826	 * stream ID and then by SSN.
827	 */
828	skb_queue_walk(&ulpq->lobby, pos) {
829		cevent = (struct sctp_ulpevent *) pos->cb;
830		csid = cevent->stream;
831		cssn = cevent->ssn;
832
833		if (csid > sid)
834			break;
835		if (csid == sid && SSN_lt(ssn, cssn))
836			break;
837	}
838
839
840	/* Insert before pos. */
841	__skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
842}
843
844static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
845					     struct sctp_ulpevent *event)
846{
847	__u16 sid, ssn;
848	struct sctp_stream *in;
849
850	/* Check if this message needs ordering.  */
851	if (SCTP_DATA_UNORDERED & event->msg_flags)
852		return event;
853
854	/* Note: The stream ID must be verified before this routine.  */
855	sid = event->stream;
856	ssn = event->ssn;
857	in  = &ulpq->asoc->ssnmap->in;
858
859	/* Is this the expected SSN for this stream ID?  */
860	if (ssn != sctp_ssn_peek(in, sid)) {
861		/* We've received something out of order, so find where it
862		 * needs to be placed.  We order by stream and then by SSN.
863		 */
864		sctp_ulpq_store_ordered(ulpq, event);
865		return NULL;
866	}
867
868	/* Mark that the next chunk has been found.  */
869	sctp_ssn_next(in, sid);
870
871	/* Go find any other chunks that were waiting for
872	 * ordering.
873	 */
874	sctp_ulpq_retrieve_ordered(ulpq, event);
875
876	return event;
877}
878
879/* Helper function to gather skbs that have possibly become
880 * ordered by forward tsn skipping their dependencies.
881 */
882static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
883{
884	struct sk_buff *pos, *tmp;
885	struct sctp_ulpevent *cevent;
886	struct sctp_ulpevent *event;
887	struct sctp_stream *in;
888	struct sk_buff_head temp;
889	struct sk_buff_head *lobby = &ulpq->lobby;
890	__u16 csid, cssn;
891
892	in  = &ulpq->asoc->ssnmap->in;
893
894	/* We are holding the chunks by stream, by SSN.  */
895	skb_queue_head_init(&temp);
896	event = NULL;
897	sctp_skb_for_each(pos, lobby, tmp) {
898		cevent = (struct sctp_ulpevent *) pos->cb;
899		csid = cevent->stream;
900		cssn = cevent->ssn;
901
902		/* Have we gone too far?  */
903		if (csid > sid)
904			break;
905
906		/* Have we not gone far enough?  */
907		if (csid < sid)
908			continue;
909
910		/* see if this ssn has been marked by skipping */
911		if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
912			break;
913
914		__skb_unlink(pos, lobby);
915		if (!event)
916			/* Create a temporary list to collect chunks on.  */
917			event = sctp_skb2event(pos);
918
919		/* Attach all gathered skbs to the event.  */
920		__skb_queue_tail(&temp, pos);
921	}
922
923	/* If we didn't reap any data, see if the next expected SSN
924	 * is next on the queue and if so, use that.
925	 */
926	if (event == NULL && pos != (struct sk_buff *)lobby) {
927		cevent = (struct sctp_ulpevent *) pos->cb;
928		csid = cevent->stream;
929		cssn = cevent->ssn;
930
931		if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
932			sctp_ssn_next(in, csid);
933			__skb_unlink(pos, lobby);
934			__skb_queue_tail(&temp, pos);
935			event = sctp_skb2event(pos);
936		}
937	}
938
939	/* Send event to the ULP.  'event' is the sctp_ulpevent for
940	 * very first SKB on the 'temp' list.
941	 */
942	if (event) {
943		/* see if we have more ordered that we can deliver */
944		sctp_ulpq_retrieve_ordered(ulpq, event);
945		sctp_ulpq_tail_event(ulpq, event);
946	}
947}
948
949/* Skip over an SSN. This is used during the processing of
950 * Forwared TSN chunk to skip over the abandoned ordered data
951 */
952void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
953{
954	struct sctp_stream *in;
955
956	/* Note: The stream ID must be verified before this routine.  */
957	in  = &ulpq->asoc->ssnmap->in;
958
959	/* Is this an old SSN?  If so ignore. */
960	if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
961		return;
962
963	/* Mark that we are no longer expecting this SSN or lower. */
964	sctp_ssn_skip(in, sid, ssn);
965
966	/* Go find any other chunks that were waiting for
967	 * ordering and deliver them if needed.
968	 */
969	sctp_ulpq_reap_ordered(ulpq, sid);
970}
971
972static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
973		struct sk_buff_head *list, __u16 needed)
974{
975	__u16 freed = 0;
976	__u32 tsn, last_tsn;
977	struct sk_buff *skb, *flist, *last;
978	struct sctp_ulpevent *event;
979	struct sctp_tsnmap *tsnmap;
980
981	tsnmap = &ulpq->asoc->peer.tsn_map;
982
983	while ((skb = skb_peek_tail(list)) != NULL) {
984		event = sctp_skb2event(skb);
985		tsn = event->tsn;
986
987		/* Don't renege below the Cumulative TSN ACK Point. */
988		if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
989			break;
990
991		/* Events in ordering queue may have multiple fragments
992		 * corresponding to additional TSNs.  Sum the total
993		 * freed space; find the last TSN.
994		 */
995		freed += skb_headlen(skb);
996		flist = skb_shinfo(skb)->frag_list;
997		for (last = flist; flist; flist = flist->next) {
998			last = flist;
999			freed += skb_headlen(last);
1000		}
1001		if (last)
1002			last_tsn = sctp_skb2event(last)->tsn;
1003		else
1004			last_tsn = tsn;
1005
1006		/* Unlink the event, then renege all applicable TSNs. */
1007		__skb_unlink(skb, list);
1008		sctp_ulpevent_free(event);
1009		while (TSN_lte(tsn, last_tsn)) {
1010			sctp_tsnmap_renege(tsnmap, tsn);
1011			tsn++;
1012		}
1013		if (freed >= needed)
1014			return freed;
1015	}
1016
1017	return freed;
1018}
1019
1020/* Renege 'needed' bytes from the ordering queue. */
1021static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1022{
1023	return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1024}
1025
1026/* Renege 'needed' bytes from the reassembly queue. */
1027static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1028{
1029	return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1030}
1031
1032/* Partial deliver the first message as there is pressure on rwnd. */
1033void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1034				gfp_t gfp)
1035{
1036	struct sctp_ulpevent *event;
1037	struct sctp_association *asoc;
1038	struct sctp_sock *sp;
1039	__u32 ctsn;
1040	struct sk_buff *skb;
1041
1042	asoc = ulpq->asoc;
1043	sp = sctp_sk(asoc->base.sk);
1044
1045	/* If the association is already in Partial Delivery mode
1046	 * we have nothing to do.
1047	 */
1048	if (ulpq->pd_mode)
1049		return;
1050
1051	/* Data must be at or below the Cumulative TSN ACK Point to
1052	 * start partial delivery.
1053	 */
1054	skb = skb_peek(&asoc->ulpq.reasm);
1055	if (skb != NULL) {
1056		ctsn = sctp_skb2event(skb)->tsn;
1057		if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1058			return;
1059	}
1060
1061	/* If the user enabled fragment interleave socket option,
1062	 * multiple associations can enter partial delivery.
1063	 * Otherwise, we can only enter partial delivery if the
1064	 * socket is not in partial deliver mode.
1065	 */
1066	if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1067		/* Is partial delivery possible?  */
1068		event = sctp_ulpq_retrieve_first(ulpq);
1069		/* Send event to the ULP.   */
1070		if (event) {
1071			sctp_ulpq_tail_event(ulpq, event);
1072			sctp_ulpq_set_pd(ulpq);
1073			return;
1074		}
1075	}
1076}
1077
1078/* Renege some packets to make room for an incoming chunk.  */
1079void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1080		      gfp_t gfp)
1081{
1082	struct sctp_association *asoc;
1083	__u16 needed, freed;
1084
1085	asoc = ulpq->asoc;
1086
1087	if (chunk) {
1088		needed = ntohs(chunk->chunk_hdr->length);
1089		needed -= sizeof(sctp_data_chunk_t);
1090	} else
1091		needed = SCTP_DEFAULT_MAXWINDOW;
1092
1093	freed = 0;
1094
1095	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1096		freed = sctp_ulpq_renege_order(ulpq, needed);
1097		if (freed < needed) {
1098			freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1099		}
1100	}
1101	/* If able to free enough room, accept this chunk. */
1102	if (chunk && (freed >= needed)) {
1103		int retval;
1104		retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1105		/*
1106		 * Enter partial delivery if chunk has not been
1107		 * delivered; otherwise, drain the reassembly queue.
1108		 */
1109		if (retval <= 0)
1110			sctp_ulpq_partial_delivery(ulpq, gfp);
1111		else if (retval == 1)
1112			sctp_ulpq_reasm_drain(ulpq);
1113	}
1114
1115	sk_mem_reclaim(asoc->base.sk);
1116}
1117
1118
1119
1120/* Notify the application if an association is aborted and in
1121 * partial delivery mode.  Send up any pending received messages.
1122 */
1123void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1124{
1125	struct sctp_ulpevent *ev = NULL;
1126	struct sock *sk;
1127
1128	if (!ulpq->pd_mode)
1129		return;
1130
1131	sk = ulpq->asoc->base.sk;
1132	if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
1133				       &sctp_sk(sk)->subscribe))
1134		ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1135					      SCTP_PARTIAL_DELIVERY_ABORTED,
1136					      gfp);
1137	if (ev)
1138		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1139
1140	/* If there is data waiting, send it up the socket now. */
1141	if (sctp_ulpq_clear_pd(ulpq) || ev)
1142		sk->sk_data_ready(sk);
1143}
1144