transport.c revision 771085d6bf3c52de29fc213e5bad07a82e57c23e
1/* SCTP kernel implementation
2 * Copyright (c) 1999-2000 Cisco, Inc.
3 * Copyright (c) 1999-2001 Motorola, Inc.
4 * Copyright (c) 2001-2003 International Business Machines Corp.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 La Monte H.P. Yarroll
7 *
8 * This file is part of the SCTP kernel implementation
9 *
10 * This module provides the abstraction for an SCTP tranport representing
11 * a remote transport address.  For local transport addresses, we just use
12 * union sctp_addr.
13 *
14 * This SCTP implementation is free software;
15 * you can redistribute it and/or modify it under the terms of
16 * the GNU General Public License as published by
17 * the Free Software Foundation; either version 2, or (at your option)
18 * any later version.
19 *
20 * This SCTP implementation is distributed in the hope that it
21 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
22 *                 ************************
23 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
24 * See the GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with GNU CC; see the file COPYING.  If not, write to
28 * the Free Software Foundation, 59 Temple Place - Suite 330,
29 * Boston, MA 02111-1307, USA.
30 *
31 * Please send any bug reports or fixes you make to the
32 * email address(es):
33 *    lksctp developers <lksctp-developers@lists.sourceforge.net>
34 *
35 * Or submit a bug report through the following website:
36 *    http://www.sf.net/projects/lksctp
37 *
38 * Written or modified by:
39 *    La Monte H.P. Yarroll <piggy@acm.org>
40 *    Karl Knutson          <karl@athena.chicago.il.us>
41 *    Jon Grimm             <jgrimm@us.ibm.com>
42 *    Xingang Guo           <xingang.guo@intel.com>
43 *    Hui Huang             <hui.huang@nokia.com>
44 *    Sridhar Samudrala	    <sri@us.ibm.com>
45 *    Ardelle Fan	    <ardelle.fan@intel.com>
46 *
47 * Any bugs reported given to us we will try to fix... any fixes shared will
48 * be incorporated into the next SCTP release.
49 */
50
51#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
52
53#include <linux/slab.h>
54#include <linux/types.h>
55#include <linux/random.h>
56#include <net/sctp/sctp.h>
57#include <net/sctp/sm.h>
58
59/* 1st Level Abstractions.  */
60
61/* Initialize a new transport from provided memory.  */
62static struct sctp_transport *sctp_transport_init(struct net *net,
63						  struct sctp_transport *peer,
64						  const union sctp_addr *addr,
65						  gfp_t gfp)
66{
67	/* Copy in the address.  */
68	peer->ipaddr = *addr;
69	peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
70	memset(&peer->saddr, 0, sizeof(union sctp_addr));
71
72	peer->sack_generation = 0;
73
74	/* From 6.3.1 RTO Calculation:
75	 *
76	 * C1) Until an RTT measurement has been made for a packet sent to the
77	 * given destination transport address, set RTO to the protocol
78	 * parameter 'RTO.Initial'.
79	 */
80	peer->rto = msecs_to_jiffies(net->sctp.rto_initial);
81
82	peer->last_time_heard = jiffies;
83	peer->last_time_ecne_reduced = jiffies;
84
85	peer->param_flags = SPP_HB_DISABLE |
86			    SPP_PMTUD_ENABLE |
87			    SPP_SACKDELAY_ENABLE;
88
89	/* Initialize the default path max_retrans.  */
90	peer->pathmaxrxt  = net->sctp.max_retrans_path;
91	peer->pf_retrans  = net->sctp.pf_retrans;
92
93	INIT_LIST_HEAD(&peer->transmitted);
94	INIT_LIST_HEAD(&peer->send_ready);
95	INIT_LIST_HEAD(&peer->transports);
96
97	setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event,
98			(unsigned long)peer);
99	setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event,
100			(unsigned long)peer);
101	setup_timer(&peer->proto_unreach_timer,
102		    sctp_generate_proto_unreach_event, (unsigned long)peer);
103
104	/* Initialize the 64-bit random nonce sent with heartbeat. */
105	get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
106
107	atomic_set(&peer->refcnt, 1);
108
109	return peer;
110}
111
112/* Allocate and initialize a new transport.  */
113struct sctp_transport *sctp_transport_new(struct net *net,
114					  const union sctp_addr *addr,
115					  gfp_t gfp)
116{
117	struct sctp_transport *transport;
118
119	transport = kzalloc(sizeof(*transport), gfp);
120	if (!transport)
121		goto fail;
122
123	if (!sctp_transport_init(net, transport, addr, gfp))
124		goto fail_init;
125
126	SCTP_DBG_OBJCNT_INC(transport);
127
128	return transport;
129
130fail_init:
131	kfree(transport);
132
133fail:
134	return NULL;
135}
136
137/* This transport is no longer needed.  Free up if possible, or
138 * delay until it last reference count.
139 */
140void sctp_transport_free(struct sctp_transport *transport)
141{
142	transport->dead = 1;
143
144	/* Try to delete the heartbeat timer.  */
145	if (del_timer(&transport->hb_timer))
146		sctp_transport_put(transport);
147
148	/* Delete the T3_rtx timer if it's active.
149	 * There is no point in not doing this now and letting
150	 * structure hang around in memory since we know
151	 * the tranport is going away.
152	 */
153	if (del_timer(&transport->T3_rtx_timer))
154		sctp_transport_put(transport);
155
156	/* Delete the ICMP proto unreachable timer if it's active. */
157	if (del_timer(&transport->proto_unreach_timer))
158		sctp_association_put(transport->asoc);
159
160	sctp_transport_put(transport);
161}
162
163static void sctp_transport_destroy_rcu(struct rcu_head *head)
164{
165	struct sctp_transport *transport;
166
167	transport = container_of(head, struct sctp_transport, rcu);
168
169	dst_release(transport->dst);
170	kfree(transport);
171	SCTP_DBG_OBJCNT_DEC(transport);
172}
173
174/* Destroy the transport data structure.
175 * Assumes there are no more users of this structure.
176 */
177static void sctp_transport_destroy(struct sctp_transport *transport)
178{
179	if (unlikely(!transport->dead)) {
180		WARN(1, "Attempt to destroy undead transport %p!\n", transport);
181		return;
182	}
183
184	sctp_packet_free(&transport->packet);
185
186	if (transport->asoc)
187		sctp_association_put(transport->asoc);
188
189	call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
190}
191
192/* Start T3_rtx timer if it is not already running and update the heartbeat
193 * timer.  This routine is called every time a DATA chunk is sent.
194 */
195void sctp_transport_reset_timers(struct sctp_transport *transport)
196{
197	/* RFC 2960 6.3.2 Retransmission Timer Rules
198	 *
199	 * R1) Every time a DATA chunk is sent to any address(including a
200	 * retransmission), if the T3-rtx timer of that address is not running
201	 * start it running so that it will expire after the RTO of that
202	 * address.
203	 */
204
205	if (!timer_pending(&transport->T3_rtx_timer))
206		if (!mod_timer(&transport->T3_rtx_timer,
207			       jiffies + transport->rto))
208			sctp_transport_hold(transport);
209
210	/* When a data chunk is sent, reset the heartbeat interval.  */
211	if (!mod_timer(&transport->hb_timer,
212		       sctp_transport_timeout(transport)))
213	    sctp_transport_hold(transport);
214}
215
216/* This transport has been assigned to an association.
217 * Initialize fields from the association or from the sock itself.
218 * Register the reference count in the association.
219 */
220void sctp_transport_set_owner(struct sctp_transport *transport,
221			      struct sctp_association *asoc)
222{
223	transport->asoc = asoc;
224	sctp_association_hold(asoc);
225}
226
227/* Initialize the pmtu of a transport. */
228void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
229{
230	/* If we don't have a fresh route, look one up */
231	if (!transport->dst || transport->dst->obsolete) {
232		dst_release(transport->dst);
233		transport->af_specific->get_dst(transport, &transport->saddr,
234						&transport->fl, sk);
235	}
236
237	if (transport->dst) {
238		transport->pathmtu = dst_mtu(transport->dst);
239	} else
240		transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
241}
242
243void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 pmtu)
244{
245	struct dst_entry *dst;
246
247	if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
248		pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
249			__func__, pmtu,
250			SCTP_DEFAULT_MINSEGMENT);
251		/* Use default minimum segment size and disable
252		 * pmtu discovery on this transport.
253		 */
254		t->pathmtu = SCTP_DEFAULT_MINSEGMENT;
255	} else {
256		t->pathmtu = pmtu;
257	}
258
259	dst = sctp_transport_dst_check(t);
260	if (!dst)
261		t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
262
263	if (dst) {
264		dst->ops->update_pmtu(dst, sk, NULL, pmtu);
265
266		dst = sctp_transport_dst_check(t);
267		if (!dst)
268			t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
269	}
270}
271
272/* Caches the dst entry and source address for a transport's destination
273 * address.
274 */
275void sctp_transport_route(struct sctp_transport *transport,
276			  union sctp_addr *saddr, struct sctp_sock *opt)
277{
278	struct sctp_association *asoc = transport->asoc;
279	struct sctp_af *af = transport->af_specific;
280
281	af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt));
282
283	if (saddr)
284		memcpy(&transport->saddr, saddr, sizeof(union sctp_addr));
285	else
286		af->get_saddr(opt, transport, &transport->fl);
287
288	if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) {
289		return;
290	}
291	if (transport->dst) {
292		transport->pathmtu = dst_mtu(transport->dst);
293
294		/* Initialize sk->sk_rcv_saddr, if the transport is the
295		 * association's active path for getsockname().
296		 */
297		if (asoc && (!asoc->peer.primary_path ||
298				(transport == asoc->peer.active_path)))
299			opt->pf->af->to_sk_saddr(&transport->saddr,
300						 asoc->base.sk);
301	} else
302		transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
303}
304
305/* Hold a reference to a transport.  */
306void sctp_transport_hold(struct sctp_transport *transport)
307{
308	atomic_inc(&transport->refcnt);
309}
310
311/* Release a reference to a transport and clean up
312 * if there are no more references.
313 */
314void sctp_transport_put(struct sctp_transport *transport)
315{
316	if (atomic_dec_and_test(&transport->refcnt))
317		sctp_transport_destroy(transport);
318}
319
320/* Update transport's RTO based on the newly calculated RTT. */
321void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
322{
323	if (unlikely(!tp->rto_pending))
324		/* We should not be doing any RTO updates unless rto_pending is set.  */
325		pr_debug("%s: rto_pending not set on transport %p!\n", __func__, tp);
326
327	if (tp->rttvar || tp->srtt) {
328		struct net *net = sock_net(tp->asoc->base.sk);
329		/* 6.3.1 C3) When a new RTT measurement R' is made, set
330		 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
331		 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
332		 */
333
334		/* Note:  The above algorithm has been rewritten to
335		 * express rto_beta and rto_alpha as inverse powers
336		 * of two.
337		 * For example, assuming the default value of RTO.Alpha of
338		 * 1/8, rto_alpha would be expressed as 3.
339		 */
340		tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta)
341			+ (((__u32)abs64((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta);
342		tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha)
343			+ (rtt >> net->sctp.rto_alpha);
344	} else {
345		/* 6.3.1 C2) When the first RTT measurement R is made, set
346		 * SRTT <- R, RTTVAR <- R/2.
347		 */
348		tp->srtt = rtt;
349		tp->rttvar = rtt >> 1;
350	}
351
352	/* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then
353	 * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY.
354	 */
355	if (tp->rttvar == 0)
356		tp->rttvar = SCTP_CLOCK_GRANULARITY;
357
358	/* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */
359	tp->rto = tp->srtt + (tp->rttvar << 2);
360
361	/* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min
362	 * seconds then it is rounded up to RTO.Min seconds.
363	 */
364	if (tp->rto < tp->asoc->rto_min)
365		tp->rto = tp->asoc->rto_min;
366
367	/* 6.3.1 C7) A maximum value may be placed on RTO provided it is
368	 * at least RTO.max seconds.
369	 */
370	if (tp->rto > tp->asoc->rto_max)
371		tp->rto = tp->asoc->rto_max;
372
373	sctp_max_rto(tp->asoc, tp);
374	tp->rtt = rtt;
375
376	/* Reset rto_pending so that a new RTT measurement is started when a
377	 * new data chunk is sent.
378	 */
379	tp->rto_pending = 0;
380
381	pr_debug("%s: transport:%p, rtt:%d, srtt:%d rttvar:%d, rto:%ld\n",
382		 __func__, tp, rtt, tp->srtt, tp->rttvar, tp->rto);
383}
384
385/* This routine updates the transport's cwnd and partial_bytes_acked
386 * parameters based on the bytes acked in the received SACK.
387 */
388void sctp_transport_raise_cwnd(struct sctp_transport *transport,
389			       __u32 sack_ctsn, __u32 bytes_acked)
390{
391	struct sctp_association *asoc = transport->asoc;
392	__u32 cwnd, ssthresh, flight_size, pba, pmtu;
393
394	cwnd = transport->cwnd;
395	flight_size = transport->flight_size;
396
397	/* See if we need to exit Fast Recovery first */
398	if (asoc->fast_recovery &&
399	    TSN_lte(asoc->fast_recovery_exit, sack_ctsn))
400		asoc->fast_recovery = 0;
401
402	/* The appropriate cwnd increase algorithm is performed if, and only
403	 * if the cumulative TSN whould advanced and the congestion window is
404	 * being fully utilized.
405	 */
406	if (TSN_lte(sack_ctsn, transport->asoc->ctsn_ack_point) ||
407	    (flight_size < cwnd))
408		return;
409
410	ssthresh = transport->ssthresh;
411	pba = transport->partial_bytes_acked;
412	pmtu = transport->asoc->pathmtu;
413
414	if (cwnd <= ssthresh) {
415		/* RFC 4960 7.2.1
416		 * o  When cwnd is less than or equal to ssthresh, an SCTP
417		 *    endpoint MUST use the slow-start algorithm to increase
418		 *    cwnd only if the current congestion window is being fully
419		 *    utilized, an incoming SACK advances the Cumulative TSN
420		 *    Ack Point, and the data sender is not in Fast Recovery.
421		 *    Only when these three conditions are met can the cwnd be
422		 *    increased; otherwise, the cwnd MUST not be increased.
423		 *    If these conditions are met, then cwnd MUST be increased
424		 *    by, at most, the lesser of 1) the total size of the
425		 *    previously outstanding DATA chunk(s) acknowledged, and
426		 *    2) the destination's path MTU.  This upper bound protects
427		 *    against the ACK-Splitting attack outlined in [SAVAGE99].
428		 */
429		if (asoc->fast_recovery)
430			return;
431
432		if (bytes_acked > pmtu)
433			cwnd += pmtu;
434		else
435			cwnd += bytes_acked;
436
437		pr_debug("%s: slow start: transport:%p, bytes_acked:%d, "
438			 "cwnd:%d, ssthresh:%d, flight_size:%d, pba:%d\n",
439			 __func__, transport, bytes_acked, cwnd, ssthresh,
440			 flight_size, pba);
441	} else {
442		/* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh,
443		 * upon each SACK arrival that advances the Cumulative TSN Ack
444		 * Point, increase partial_bytes_acked by the total number of
445		 * bytes of all new chunks acknowledged in that SACK including
446		 * chunks acknowledged by the new Cumulative TSN Ack and by
447		 * Gap Ack Blocks.
448		 *
449		 * When partial_bytes_acked is equal to or greater than cwnd
450		 * and before the arrival of the SACK the sender had cwnd or
451		 * more bytes of data outstanding (i.e., before arrival of the
452		 * SACK, flightsize was greater than or equal to cwnd),
453		 * increase cwnd by MTU, and reset partial_bytes_acked to
454		 * (partial_bytes_acked - cwnd).
455		 */
456		pba += bytes_acked;
457		if (pba >= cwnd) {
458			cwnd += pmtu;
459			pba = ((cwnd < pba) ? (pba - cwnd) : 0);
460		}
461
462		pr_debug("%s: congestion avoidance: transport:%p, "
463			 "bytes_acked:%d, cwnd:%d, ssthresh:%d, "
464			 "flight_size:%d, pba:%d\n", __func__,
465			 transport, bytes_acked, cwnd, ssthresh,
466			 flight_size, pba);
467	}
468
469	transport->cwnd = cwnd;
470	transport->partial_bytes_acked = pba;
471}
472
473/* This routine is used to lower the transport's cwnd when congestion is
474 * detected.
475 */
476void sctp_transport_lower_cwnd(struct sctp_transport *transport,
477			       sctp_lower_cwnd_t reason)
478{
479	struct sctp_association *asoc = transport->asoc;
480
481	switch (reason) {
482	case SCTP_LOWER_CWND_T3_RTX:
483		/* RFC 2960 Section 7.2.3, sctpimpguide
484		 * When the T3-rtx timer expires on an address, SCTP should
485		 * perform slow start by:
486		 *      ssthresh = max(cwnd/2, 4*MTU)
487		 *      cwnd = 1*MTU
488		 *      partial_bytes_acked = 0
489		 */
490		transport->ssthresh = max(transport->cwnd/2,
491					  4*asoc->pathmtu);
492		transport->cwnd = asoc->pathmtu;
493
494		/* T3-rtx also clears fast recovery */
495		asoc->fast_recovery = 0;
496		break;
497
498	case SCTP_LOWER_CWND_FAST_RTX:
499		/* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the
500		 * destination address(es) to which the missing DATA chunks
501		 * were last sent, according to the formula described in
502		 * Section 7.2.3.
503		 *
504		 * RFC 2960 7.2.3, sctpimpguide Upon detection of packet
505		 * losses from SACK (see Section 7.2.4), An endpoint
506		 * should do the following:
507		 *      ssthresh = max(cwnd/2, 4*MTU)
508		 *      cwnd = ssthresh
509		 *      partial_bytes_acked = 0
510		 */
511		if (asoc->fast_recovery)
512			return;
513
514		/* Mark Fast recovery */
515		asoc->fast_recovery = 1;
516		asoc->fast_recovery_exit = asoc->next_tsn - 1;
517
518		transport->ssthresh = max(transport->cwnd/2,
519					  4*asoc->pathmtu);
520		transport->cwnd = transport->ssthresh;
521		break;
522
523	case SCTP_LOWER_CWND_ECNE:
524		/* RFC 2481 Section 6.1.2.
525		 * If the sender receives an ECN-Echo ACK packet
526		 * then the sender knows that congestion was encountered in the
527		 * network on the path from the sender to the receiver. The
528		 * indication of congestion should be treated just as a
529		 * congestion loss in non-ECN Capable TCP. That is, the TCP
530		 * source halves the congestion window "cwnd" and reduces the
531		 * slow start threshold "ssthresh".
532		 * A critical condition is that TCP does not react to
533		 * congestion indications more than once every window of
534		 * data (or more loosely more than once every round-trip time).
535		 */
536		if (time_after(jiffies, transport->last_time_ecne_reduced +
537					transport->rtt)) {
538			transport->ssthresh = max(transport->cwnd/2,
539						  4*asoc->pathmtu);
540			transport->cwnd = transport->ssthresh;
541			transport->last_time_ecne_reduced = jiffies;
542		}
543		break;
544
545	case SCTP_LOWER_CWND_INACTIVE:
546		/* RFC 2960 Section 7.2.1, sctpimpguide
547		 * When the endpoint does not transmit data on a given
548		 * transport address, the cwnd of the transport address
549		 * should be adjusted to max(cwnd/2, 4*MTU) per RTO.
550		 * NOTE: Although the draft recommends that this check needs
551		 * to be done every RTO interval, we do it every hearbeat
552		 * interval.
553		 */
554		transport->cwnd = max(transport->cwnd/2,
555					 4*asoc->pathmtu);
556		break;
557	}
558
559	transport->partial_bytes_acked = 0;
560
561	pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d\n",
562		 __func__, transport, reason, transport->cwnd,
563		 transport->ssthresh);
564}
565
566/* Apply Max.Burst limit to the congestion window:
567 * sctpimpguide-05 2.14.2
568 * D) When the time comes for the sender to
569 * transmit new DATA chunks, the protocol parameter Max.Burst MUST
570 * first be applied to limit how many new DATA chunks may be sent.
571 * The limit is applied by adjusting cwnd as follows:
572 * 	if ((flightsize+ Max.Burst * MTU) < cwnd)
573 * 		cwnd = flightsize + Max.Burst * MTU
574 */
575
576void sctp_transport_burst_limited(struct sctp_transport *t)
577{
578	struct sctp_association *asoc = t->asoc;
579	u32 old_cwnd = t->cwnd;
580	u32 max_burst_bytes;
581
582	if (t->burst_limited)
583		return;
584
585	max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);
586	if (max_burst_bytes < old_cwnd) {
587		t->cwnd = max_burst_bytes;
588		t->burst_limited = old_cwnd;
589	}
590}
591
592/* Restore the old cwnd congestion window, after the burst had it's
593 * desired effect.
594 */
595void sctp_transport_burst_reset(struct sctp_transport *t)
596{
597	if (t->burst_limited) {
598		t->cwnd = t->burst_limited;
599		t->burst_limited = 0;
600	}
601}
602
603/* What is the next timeout value for this transport? */
604unsigned long sctp_transport_timeout(struct sctp_transport *t)
605{
606	unsigned long timeout;
607	timeout = t->rto + sctp_jitter(t->rto);
608	if ((t->state != SCTP_UNCONFIRMED) &&
609	    (t->state != SCTP_PF))
610		timeout += t->hbinterval;
611	timeout += jiffies;
612	return timeout;
613}
614
615/* Reset transport variables to their initial values */
616void sctp_transport_reset(struct sctp_transport *t)
617{
618	struct sctp_association *asoc = t->asoc;
619
620	/* RFC 2960 (bis), Section 5.2.4
621	 * All the congestion control parameters (e.g., cwnd, ssthresh)
622	 * related to this peer MUST be reset to their initial values
623	 * (see Section 6.2.1)
624	 */
625	t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
626	t->burst_limited = 0;
627	t->ssthresh = asoc->peer.i.a_rwnd;
628	t->rto = asoc->rto_initial;
629	sctp_max_rto(asoc, t);
630	t->rtt = 0;
631	t->srtt = 0;
632	t->rttvar = 0;
633
634	/* Reset these additional varibles so that we have a clean
635	 * slate.
636	 */
637	t->partial_bytes_acked = 0;
638	t->flight_size = 0;
639	t->error_count = 0;
640	t->rto_pending = 0;
641	t->hb_sent = 0;
642
643	/* Initialize the state information for SFR-CACC */
644	t->cacc.changeover_active = 0;
645	t->cacc.cycling_changeover = 0;
646	t->cacc.next_tsn_at_change = 0;
647	t->cacc.cacc_saw_newack = 0;
648}
649
650/* Schedule retransmission on the given transport */
651void sctp_transport_immediate_rtx(struct sctp_transport *t)
652{
653	/* Stop pending T3_rtx_timer */
654	if (del_timer(&t->T3_rtx_timer))
655		sctp_transport_put(t);
656
657	sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX);
658	if (!timer_pending(&t->T3_rtx_timer)) {
659		if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto))
660			sctp_transport_hold(t);
661	}
662	return;
663}
664