tcp_input.c revision 4dc2665e3634d720a62bd27128fc8781fcdad2dc
1/*
2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
3 *		operating system.  INET is implemented using the  BSD Socket
4 *		interface as the means of communication with the user level.
5 *
6 *		Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version:	$Id: tcp_input.c,v 1.243 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors:	Ross Biro
11 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
14 *		Florian La Roche, <flla@stud.uni-sb.de>
15 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
17 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
18 *		Matthew Dillon, <dillon@apollo.west.oic.com>
19 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 *		Jorge Cwik, <jorge@laser.satlink.net>
21 */
22
23/*
24 * Changes:
25 *		Pedro Roque	:	Fast Retransmit/Recovery.
26 *					Two receive queues.
27 *					Retransmit queue handled by TCP.
28 *					Better retransmit timer handling.
29 *					New congestion avoidance.
30 *					Header prediction.
31 *					Variable renaming.
32 *
33 *		Eric		:	Fast Retransmit.
34 *		Randy Scott	:	MSS option defines.
35 *		Eric Schenk	:	Fixes to slow start algorithm.
36 *		Eric Schenk	:	Yet another double ACK bug.
37 *		Eric Schenk	:	Delayed ACK bug fixes.
38 *		Eric Schenk	:	Floyd style fast retrans war avoidance.
39 *		David S. Miller	:	Don't allow zero congestion window.
40 *		Eric Schenk	:	Fix retransmitter so that it sends
41 *					next packet on ack of previous packet.
42 *		Andi Kleen	:	Moved open_request checking here
43 *					and process RSTs for open_requests.
44 *		Andi Kleen	:	Better prune_queue, and other fixes.
45 *		Andrey Savochkin:	Fix RTT measurements in the presence of
46 *					timestamps.
47 *		Andrey Savochkin:	Check sequence numbers correctly when
48 *					removing SACKs due to in sequence incoming
49 *					data segments.
50 *		Andi Kleen:		Make sure we never ack data there is not
51 *					enough room for. Also make this condition
52 *					a fatal error if it might still happen.
53 *		Andi Kleen:		Add tcp_measure_rcv_mss to make
54 *					connections with MSS<min(MTU,ann. MSS)
55 *					work without delayed acks.
56 *		Andi Kleen:		Process packets with PSH set in the
57 *					fast path.
58 *		J Hadi Salim:		ECN support
59 *	 	Andrei Gurtov,
60 *		Pasi Sarolahti,
61 *		Panu Kuhlberg:		Experimental audit of TCP (re)transmission
62 *					engine. Lots of bugs are found.
63 *		Pasi Sarolahti:		F-RTO for dealing with spurious RTOs
64 */
65
66#include <linux/mm.h>
67#include <linux/module.h>
68#include <linux/sysctl.h>
69#include <net/tcp.h>
70#include <net/inet_common.h>
71#include <linux/ipsec.h>
72#include <asm/unaligned.h>
73#include <net/netdma.h>
74
75int sysctl_tcp_timestamps __read_mostly = 1;
76int sysctl_tcp_window_scaling __read_mostly = 1;
77int sysctl_tcp_sack __read_mostly = 1;
78int sysctl_tcp_fack __read_mostly = 1;
79int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
80int sysctl_tcp_ecn __read_mostly;
81int sysctl_tcp_dsack __read_mostly = 1;
82int sysctl_tcp_app_win __read_mostly = 31;
83int sysctl_tcp_adv_win_scale __read_mostly = 2;
84
85int sysctl_tcp_stdurg __read_mostly;
86int sysctl_tcp_rfc1337 __read_mostly;
87int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
88int sysctl_tcp_frto __read_mostly;
89int sysctl_tcp_nometrics_save __read_mostly;
90
91int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
92int sysctl_tcp_abc __read_mostly;
93
94#define FLAG_DATA		0x01 /* Incoming frame contained data.		*/
95#define FLAG_WIN_UPDATE		0x02 /* Incoming ACK was a window update.	*/
96#define FLAG_DATA_ACKED		0x04 /* This ACK acknowledged new data.		*/
97#define FLAG_RETRANS_DATA_ACKED	0x08 /* "" "" some of which was retransmitted.	*/
98#define FLAG_SYN_ACKED		0x10 /* This ACK acknowledged SYN.		*/
99#define FLAG_DATA_SACKED	0x20 /* New SACK.				*/
100#define FLAG_ECE		0x40 /* ECE in this ACK				*/
101#define FLAG_DATA_LOST		0x80 /* SACK detected data lossage.		*/
102#define FLAG_SLOWPATH		0x100 /* Do not skip RFC checks for window update.*/
103#define FLAG_ONLY_ORIG_SACKED	0x200 /* SACKs only non-rexmit sent before RTO */
104
105#define FLAG_ACKED		(FLAG_DATA_ACKED|FLAG_SYN_ACKED)
106#define FLAG_NOT_DUP		(FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
107#define FLAG_CA_ALERT		(FLAG_DATA_SACKED|FLAG_ECE)
108#define FLAG_FORWARD_PROGRESS	(FLAG_ACKED|FLAG_DATA_SACKED)
109
110#define IsReno(tp) ((tp)->rx_opt.sack_ok == 0)
111#define IsFack(tp) ((tp)->rx_opt.sack_ok & 2)
112#define IsDSack(tp) ((tp)->rx_opt.sack_ok & 4)
113
114#define IsSackFrto() (sysctl_tcp_frto == 0x2)
115
116#define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
117
118/* Adapt the MSS value used to make delayed ack decision to the
119 * real world.
120 */
121static void tcp_measure_rcv_mss(struct sock *sk,
122				const struct sk_buff *skb)
123{
124	struct inet_connection_sock *icsk = inet_csk(sk);
125	const unsigned int lss = icsk->icsk_ack.last_seg_size;
126	unsigned int len;
127
128	icsk->icsk_ack.last_seg_size = 0;
129
130	/* skb->len may jitter because of SACKs, even if peer
131	 * sends good full-sized frames.
132	 */
133	len = skb_shinfo(skb)->gso_size ?: skb->len;
134	if (len >= icsk->icsk_ack.rcv_mss) {
135		icsk->icsk_ack.rcv_mss = len;
136	} else {
137		/* Otherwise, we make more careful check taking into account,
138		 * that SACKs block is variable.
139		 *
140		 * "len" is invariant segment length, including TCP header.
141		 */
142		len += skb->data - skb->h.raw;
143		if (len >= TCP_MIN_RCVMSS + sizeof(struct tcphdr) ||
144		    /* If PSH is not set, packet should be
145		     * full sized, provided peer TCP is not badly broken.
146		     * This observation (if it is correct 8)) allows
147		     * to handle super-low mtu links fairly.
148		     */
149		    (len >= TCP_MIN_MSS + sizeof(struct tcphdr) &&
150		     !(tcp_flag_word(skb->h.th)&TCP_REMNANT))) {
151			/* Subtract also invariant (if peer is RFC compliant),
152			 * tcp header plus fixed timestamp option length.
153			 * Resulting "len" is MSS free of SACK jitter.
154			 */
155			len -= tcp_sk(sk)->tcp_header_len;
156			icsk->icsk_ack.last_seg_size = len;
157			if (len == lss) {
158				icsk->icsk_ack.rcv_mss = len;
159				return;
160			}
161		}
162		if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)
163			icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2;
164		icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
165	}
166}
167
168static void tcp_incr_quickack(struct sock *sk)
169{
170	struct inet_connection_sock *icsk = inet_csk(sk);
171	unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
172
173	if (quickacks==0)
174		quickacks=2;
175	if (quickacks > icsk->icsk_ack.quick)
176		icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
177}
178
179void tcp_enter_quickack_mode(struct sock *sk)
180{
181	struct inet_connection_sock *icsk = inet_csk(sk);
182	tcp_incr_quickack(sk);
183	icsk->icsk_ack.pingpong = 0;
184	icsk->icsk_ack.ato = TCP_ATO_MIN;
185}
186
187/* Send ACKs quickly, if "quick" count is not exhausted
188 * and the session is not interactive.
189 */
190
191static inline int tcp_in_quickack_mode(const struct sock *sk)
192{
193	const struct inet_connection_sock *icsk = inet_csk(sk);
194	return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
195}
196
197/* Buffer size and advertised window tuning.
198 *
199 * 1. Tuning sk->sk_sndbuf, when connection enters established state.
200 */
201
202static void tcp_fixup_sndbuf(struct sock *sk)
203{
204	int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 +
205		     sizeof(struct sk_buff);
206
207	if (sk->sk_sndbuf < 3 * sndmem)
208		sk->sk_sndbuf = min(3 * sndmem, sysctl_tcp_wmem[2]);
209}
210
211/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
212 *
213 * All tcp_full_space() is split to two parts: "network" buffer, allocated
214 * forward and advertised in receiver window (tp->rcv_wnd) and
215 * "application buffer", required to isolate scheduling/application
216 * latencies from network.
217 * window_clamp is maximal advertised window. It can be less than
218 * tcp_full_space(), in this case tcp_full_space() - window_clamp
219 * is reserved for "application" buffer. The less window_clamp is
220 * the smoother our behaviour from viewpoint of network, but the lower
221 * throughput and the higher sensitivity of the connection to losses. 8)
222 *
223 * rcv_ssthresh is more strict window_clamp used at "slow start"
224 * phase to predict further behaviour of this connection.
225 * It is used for two goals:
226 * - to enforce header prediction at sender, even when application
227 *   requires some significant "application buffer". It is check #1.
228 * - to prevent pruning of receive queue because of misprediction
229 *   of receiver window. Check #2.
230 *
231 * The scheme does not work when sender sends good segments opening
232 * window and then starts to feed us spaghetti. But it should work
233 * in common situations. Otherwise, we have to rely on queue collapsing.
234 */
235
236/* Slow part of check#2. */
237static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp,
238			     const struct sk_buff *skb)
239{
240	/* Optimize this! */
241	int truesize = tcp_win_from_space(skb->truesize)/2;
242	int window = tcp_win_from_space(sysctl_tcp_rmem[2])/2;
243
244	while (tp->rcv_ssthresh <= window) {
245		if (truesize <= skb->len)
246			return 2 * inet_csk(sk)->icsk_ack.rcv_mss;
247
248		truesize >>= 1;
249		window >>= 1;
250	}
251	return 0;
252}
253
254static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp,
255			    struct sk_buff *skb)
256{
257	/* Check #1 */
258	if (tp->rcv_ssthresh < tp->window_clamp &&
259	    (int)tp->rcv_ssthresh < tcp_space(sk) &&
260	    !tcp_memory_pressure) {
261		int incr;
262
263		/* Check #2. Increase window, if skb with such overhead
264		 * will fit to rcvbuf in future.
265		 */
266		if (tcp_win_from_space(skb->truesize) <= skb->len)
267			incr = 2*tp->advmss;
268		else
269			incr = __tcp_grow_window(sk, tp, skb);
270
271		if (incr) {
272			tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp);
273			inet_csk(sk)->icsk_ack.quick |= 1;
274		}
275	}
276}
277
278/* 3. Tuning rcvbuf, when connection enters established state. */
279
280static void tcp_fixup_rcvbuf(struct sock *sk)
281{
282	struct tcp_sock *tp = tcp_sk(sk);
283	int rcvmem = tp->advmss + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff);
284
285	/* Try to select rcvbuf so that 4 mss-sized segments
286	 * will fit to window and corresponding skbs will fit to our rcvbuf.
287	 * (was 3; 4 is minimum to allow fast retransmit to work.)
288	 */
289	while (tcp_win_from_space(rcvmem) < tp->advmss)
290		rcvmem += 128;
291	if (sk->sk_rcvbuf < 4 * rcvmem)
292		sk->sk_rcvbuf = min(4 * rcvmem, sysctl_tcp_rmem[2]);
293}
294
295/* 4. Try to fixup all. It is made immediately after connection enters
296 *    established state.
297 */
298static void tcp_init_buffer_space(struct sock *sk)
299{
300	struct tcp_sock *tp = tcp_sk(sk);
301	int maxwin;
302
303	if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
304		tcp_fixup_rcvbuf(sk);
305	if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
306		tcp_fixup_sndbuf(sk);
307
308	tp->rcvq_space.space = tp->rcv_wnd;
309
310	maxwin = tcp_full_space(sk);
311
312	if (tp->window_clamp >= maxwin) {
313		tp->window_clamp = maxwin;
314
315		if (sysctl_tcp_app_win && maxwin > 4 * tp->advmss)
316			tp->window_clamp = max(maxwin -
317					       (maxwin >> sysctl_tcp_app_win),
318					       4 * tp->advmss);
319	}
320
321	/* Force reservation of one segment. */
322	if (sysctl_tcp_app_win &&
323	    tp->window_clamp > 2 * tp->advmss &&
324	    tp->window_clamp + tp->advmss > maxwin)
325		tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss);
326
327	tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
328	tp->snd_cwnd_stamp = tcp_time_stamp;
329}
330
331/* 5. Recalculate window clamp after socket hit its memory bounds. */
332static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp)
333{
334	struct inet_connection_sock *icsk = inet_csk(sk);
335
336	icsk->icsk_ack.quick = 0;
337
338	if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
339	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
340	    !tcp_memory_pressure &&
341	    atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
342		sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
343				    sysctl_tcp_rmem[2]);
344	}
345	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
346		tp->rcv_ssthresh = min(tp->window_clamp, 2U*tp->advmss);
347}
348
349
350/* Initialize RCV_MSS value.
351 * RCV_MSS is an our guess about MSS used by the peer.
352 * We haven't any direct information about the MSS.
353 * It's better to underestimate the RCV_MSS rather than overestimate.
354 * Overestimations make us ACKing less frequently than needed.
355 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
356 */
357void tcp_initialize_rcv_mss(struct sock *sk)
358{
359	struct tcp_sock *tp = tcp_sk(sk);
360	unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
361
362	hint = min(hint, tp->rcv_wnd/2);
363	hint = min(hint, TCP_MIN_RCVMSS);
364	hint = max(hint, TCP_MIN_MSS);
365
366	inet_csk(sk)->icsk_ack.rcv_mss = hint;
367}
368
369/* Receiver "autotuning" code.
370 *
371 * The algorithm for RTT estimation w/o timestamps is based on
372 * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL.
373 * <http://www.lanl.gov/radiant/website/pubs/drs/lacsi2001.ps>
374 *
375 * More detail on this code can be found at
376 * <http://www.psc.edu/~jheffner/senior_thesis.ps>,
377 * though this reference is out of date.  A new paper
378 * is pending.
379 */
380static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
381{
382	u32 new_sample = tp->rcv_rtt_est.rtt;
383	long m = sample;
384
385	if (m == 0)
386		m = 1;
387
388	if (new_sample != 0) {
389		/* If we sample in larger samples in the non-timestamp
390		 * case, we could grossly overestimate the RTT especially
391		 * with chatty applications or bulk transfer apps which
392		 * are stalled on filesystem I/O.
393		 *
394		 * Also, since we are only going for a minimum in the
395		 * non-timestamp case, we do not smooth things out
396		 * else with timestamps disabled convergence takes too
397		 * long.
398		 */
399		if (!win_dep) {
400			m -= (new_sample >> 3);
401			new_sample += m;
402		} else if (m < new_sample)
403			new_sample = m << 3;
404	} else {
405		/* No previous measure. */
406		new_sample = m << 3;
407	}
408
409	if (tp->rcv_rtt_est.rtt != new_sample)
410		tp->rcv_rtt_est.rtt = new_sample;
411}
412
413static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
414{
415	if (tp->rcv_rtt_est.time == 0)
416		goto new_measure;
417	if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
418		return;
419	tcp_rcv_rtt_update(tp,
420			   jiffies - tp->rcv_rtt_est.time,
421			   1);
422
423new_measure:
424	tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
425	tp->rcv_rtt_est.time = tcp_time_stamp;
426}
427
428static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, const struct sk_buff *skb)
429{
430	struct tcp_sock *tp = tcp_sk(sk);
431	if (tp->rx_opt.rcv_tsecr &&
432	    (TCP_SKB_CB(skb)->end_seq -
433	     TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss))
434		tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0);
435}
436
437/*
438 * This function should be called every time data is copied to user space.
439 * It calculates the appropriate TCP receive buffer space.
440 */
441void tcp_rcv_space_adjust(struct sock *sk)
442{
443	struct tcp_sock *tp = tcp_sk(sk);
444	int time;
445	int space;
446
447	if (tp->rcvq_space.time == 0)
448		goto new_measure;
449
450	time = tcp_time_stamp - tp->rcvq_space.time;
451	if (time < (tp->rcv_rtt_est.rtt >> 3) ||
452	    tp->rcv_rtt_est.rtt == 0)
453		return;
454
455	space = 2 * (tp->copied_seq - tp->rcvq_space.seq);
456
457	space = max(tp->rcvq_space.space, space);
458
459	if (tp->rcvq_space.space != space) {
460		int rcvmem;
461
462		tp->rcvq_space.space = space;
463
464		if (sysctl_tcp_moderate_rcvbuf &&
465		    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
466			int new_clamp = space;
467
468			/* Receive space grows, normalize in order to
469			 * take into account packet headers and sk_buff
470			 * structure overhead.
471			 */
472			space /= tp->advmss;
473			if (!space)
474				space = 1;
475			rcvmem = (tp->advmss + MAX_TCP_HEADER +
476				  16 + sizeof(struct sk_buff));
477			while (tcp_win_from_space(rcvmem) < tp->advmss)
478				rcvmem += 128;
479			space *= rcvmem;
480			space = min(space, sysctl_tcp_rmem[2]);
481			if (space > sk->sk_rcvbuf) {
482				sk->sk_rcvbuf = space;
483
484				/* Make the window clamp follow along.  */
485				tp->window_clamp = new_clamp;
486			}
487		}
488	}
489
490new_measure:
491	tp->rcvq_space.seq = tp->copied_seq;
492	tp->rcvq_space.time = tcp_time_stamp;
493}
494
495/* There is something which you must keep in mind when you analyze the
496 * behavior of the tp->ato delayed ack timeout interval.  When a
497 * connection starts up, we want to ack as quickly as possible.  The
498 * problem is that "good" TCP's do slow start at the beginning of data
499 * transmission.  The means that until we send the first few ACK's the
500 * sender will sit on his end and only queue most of his data, because
501 * he can only send snd_cwnd unacked packets at any given time.  For
502 * each ACK we send, he increments snd_cwnd and transmits more of his
503 * queue.  -DaveM
504 */
505static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
506{
507	struct inet_connection_sock *icsk = inet_csk(sk);
508	u32 now;
509
510	inet_csk_schedule_ack(sk);
511
512	tcp_measure_rcv_mss(sk, skb);
513
514	tcp_rcv_rtt_measure(tp);
515
516	now = tcp_time_stamp;
517
518	if (!icsk->icsk_ack.ato) {
519		/* The _first_ data packet received, initialize
520		 * delayed ACK engine.
521		 */
522		tcp_incr_quickack(sk);
523		icsk->icsk_ack.ato = TCP_ATO_MIN;
524	} else {
525		int m = now - icsk->icsk_ack.lrcvtime;
526
527		if (m <= TCP_ATO_MIN/2) {
528			/* The fastest case is the first. */
529			icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;
530		} else if (m < icsk->icsk_ack.ato) {
531			icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m;
532			if (icsk->icsk_ack.ato > icsk->icsk_rto)
533				icsk->icsk_ack.ato = icsk->icsk_rto;
534		} else if (m > icsk->icsk_rto) {
535			/* Too long gap. Apparently sender failed to
536			 * restart window, so that we send ACKs quickly.
537			 */
538			tcp_incr_quickack(sk);
539			sk_stream_mem_reclaim(sk);
540		}
541	}
542	icsk->icsk_ack.lrcvtime = now;
543
544	TCP_ECN_check_ce(tp, skb);
545
546	if (skb->len >= 128)
547		tcp_grow_window(sk, tp, skb);
548}
549
550/* Called to compute a smoothed rtt estimate. The data fed to this
551 * routine either comes from timestamps, or from segments that were
552 * known _not_ to have been retransmitted [see Karn/Partridge
553 * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88
554 * piece by Van Jacobson.
555 * NOTE: the next three routines used to be one big routine.
556 * To save cycles in the RFC 1323 implementation it was better to break
557 * it up into three procedures. -- erics
558 */
559static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
560{
561	struct tcp_sock *tp = tcp_sk(sk);
562	long m = mrtt; /* RTT */
563
564	/*	The following amusing code comes from Jacobson's
565	 *	article in SIGCOMM '88.  Note that rtt and mdev
566	 *	are scaled versions of rtt and mean deviation.
567	 *	This is designed to be as fast as possible
568	 *	m stands for "measurement".
569	 *
570	 *	On a 1990 paper the rto value is changed to:
571	 *	RTO = rtt + 4 * mdev
572	 *
573	 * Funny. This algorithm seems to be very broken.
574	 * These formulae increase RTO, when it should be decreased, increase
575	 * too slowly, when it should be increased quickly, decrease too quickly
576	 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely
577	 * does not matter how to _calculate_ it. Seems, it was trap
578	 * that VJ failed to avoid. 8)
579	 */
580	if(m == 0)
581		m = 1;
582	if (tp->srtt != 0) {
583		m -= (tp->srtt >> 3);	/* m is now error in rtt est */
584		tp->srtt += m;		/* rtt = 7/8 rtt + 1/8 new */
585		if (m < 0) {
586			m = -m;		/* m is now abs(error) */
587			m -= (tp->mdev >> 2);   /* similar update on mdev */
588			/* This is similar to one of Eifel findings.
589			 * Eifel blocks mdev updates when rtt decreases.
590			 * This solution is a bit different: we use finer gain
591			 * for mdev in this case (alpha*beta).
592			 * Like Eifel it also prevents growth of rto,
593			 * but also it limits too fast rto decreases,
594			 * happening in pure Eifel.
595			 */
596			if (m > 0)
597				m >>= 3;
598		} else {
599			m -= (tp->mdev >> 2);   /* similar update on mdev */
600		}
601		tp->mdev += m;	    	/* mdev = 3/4 mdev + 1/4 new */
602		if (tp->mdev > tp->mdev_max) {
603			tp->mdev_max = tp->mdev;
604			if (tp->mdev_max > tp->rttvar)
605				tp->rttvar = tp->mdev_max;
606		}
607		if (after(tp->snd_una, tp->rtt_seq)) {
608			if (tp->mdev_max < tp->rttvar)
609				tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2;
610			tp->rtt_seq = tp->snd_nxt;
611			tp->mdev_max = TCP_RTO_MIN;
612		}
613	} else {
614		/* no previous measure. */
615		tp->srtt = m<<3;	/* take the measured time to be rtt */
616		tp->mdev = m<<1;	/* make sure rto = 3*rtt */
617		tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
618		tp->rtt_seq = tp->snd_nxt;
619	}
620}
621
622/* Calculate rto without backoff.  This is the second half of Van Jacobson's
623 * routine referred to above.
624 */
625static inline void tcp_set_rto(struct sock *sk)
626{
627	const struct tcp_sock *tp = tcp_sk(sk);
628	/* Old crap is replaced with new one. 8)
629	 *
630	 * More seriously:
631	 * 1. If rtt variance happened to be less 50msec, it is hallucination.
632	 *    It cannot be less due to utterly erratic ACK generation made
633	 *    at least by solaris and freebsd. "Erratic ACKs" has _nothing_
634	 *    to do with delayed acks, because at cwnd>2 true delack timeout
635	 *    is invisible. Actually, Linux-2.4 also generates erratic
636	 *    ACKs in some circumstances.
637	 */
638	inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar;
639
640	/* 2. Fixups made earlier cannot be right.
641	 *    If we do not estimate RTO correctly without them,
642	 *    all the algo is pure shit and should be replaced
643	 *    with correct one. It is exactly, which we pretend to do.
644	 */
645}
646
647/* NOTE: clamping at TCP_RTO_MIN is not required, current algo
648 * guarantees that rto is higher.
649 */
650static inline void tcp_bound_rto(struct sock *sk)
651{
652	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
653		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
654}
655
656/* Save metrics learned by this TCP session.
657   This function is called only, when TCP finishes successfully
658   i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE.
659 */
660void tcp_update_metrics(struct sock *sk)
661{
662	struct tcp_sock *tp = tcp_sk(sk);
663	struct dst_entry *dst = __sk_dst_get(sk);
664
665	if (sysctl_tcp_nometrics_save)
666		return;
667
668	dst_confirm(dst);
669
670	if (dst && (dst->flags&DST_HOST)) {
671		const struct inet_connection_sock *icsk = inet_csk(sk);
672		int m;
673
674		if (icsk->icsk_backoff || !tp->srtt) {
675			/* This session failed to estimate rtt. Why?
676			 * Probably, no packets returned in time.
677			 * Reset our results.
678			 */
679			if (!(dst_metric_locked(dst, RTAX_RTT)))
680				dst->metrics[RTAX_RTT-1] = 0;
681			return;
682		}
683
684		m = dst_metric(dst, RTAX_RTT) - tp->srtt;
685
686		/* If newly calculated rtt larger than stored one,
687		 * store new one. Otherwise, use EWMA. Remember,
688		 * rtt overestimation is always better than underestimation.
689		 */
690		if (!(dst_metric_locked(dst, RTAX_RTT))) {
691			if (m <= 0)
692				dst->metrics[RTAX_RTT-1] = tp->srtt;
693			else
694				dst->metrics[RTAX_RTT-1] -= (m>>3);
695		}
696
697		if (!(dst_metric_locked(dst, RTAX_RTTVAR))) {
698			if (m < 0)
699				m = -m;
700
701			/* Scale deviation to rttvar fixed point */
702			m >>= 1;
703			if (m < tp->mdev)
704				m = tp->mdev;
705
706			if (m >= dst_metric(dst, RTAX_RTTVAR))
707				dst->metrics[RTAX_RTTVAR-1] = m;
708			else
709				dst->metrics[RTAX_RTTVAR-1] -=
710					(dst->metrics[RTAX_RTTVAR-1] - m)>>2;
711		}
712
713		if (tp->snd_ssthresh >= 0xFFFF) {
714			/* Slow start still did not finish. */
715			if (dst_metric(dst, RTAX_SSTHRESH) &&
716			    !dst_metric_locked(dst, RTAX_SSTHRESH) &&
717			    (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH))
718				dst->metrics[RTAX_SSTHRESH-1] = tp->snd_cwnd >> 1;
719			if (!dst_metric_locked(dst, RTAX_CWND) &&
720			    tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
721				dst->metrics[RTAX_CWND-1] = tp->snd_cwnd;
722		} else if (tp->snd_cwnd > tp->snd_ssthresh &&
723			   icsk->icsk_ca_state == TCP_CA_Open) {
724			/* Cong. avoidance phase, cwnd is reliable. */
725			if (!dst_metric_locked(dst, RTAX_SSTHRESH))
726				dst->metrics[RTAX_SSTHRESH-1] =
727					max(tp->snd_cwnd >> 1, tp->snd_ssthresh);
728			if (!dst_metric_locked(dst, RTAX_CWND))
729				dst->metrics[RTAX_CWND-1] = (dst->metrics[RTAX_CWND-1] + tp->snd_cwnd) >> 1;
730		} else {
731			/* Else slow start did not finish, cwnd is non-sense,
732			   ssthresh may be also invalid.
733			 */
734			if (!dst_metric_locked(dst, RTAX_CWND))
735				dst->metrics[RTAX_CWND-1] = (dst->metrics[RTAX_CWND-1] + tp->snd_ssthresh) >> 1;
736			if (dst->metrics[RTAX_SSTHRESH-1] &&
737			    !dst_metric_locked(dst, RTAX_SSTHRESH) &&
738			    tp->snd_ssthresh > dst->metrics[RTAX_SSTHRESH-1])
739				dst->metrics[RTAX_SSTHRESH-1] = tp->snd_ssthresh;
740		}
741
742		if (!dst_metric_locked(dst, RTAX_REORDERING)) {
743			if (dst->metrics[RTAX_REORDERING-1] < tp->reordering &&
744			    tp->reordering != sysctl_tcp_reordering)
745				dst->metrics[RTAX_REORDERING-1] = tp->reordering;
746		}
747	}
748}
749
750/* Numbers are taken from RFC2414.  */
751__u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
752{
753	__u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
754
755	if (!cwnd) {
756		if (tp->mss_cache > 1460)
757			cwnd = 2;
758		else
759			cwnd = (tp->mss_cache > 1095) ? 3 : 4;
760	}
761	return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
762}
763
764/* Set slow start threshold and cwnd not falling to slow start */
765void tcp_enter_cwr(struct sock *sk)
766{
767	struct tcp_sock *tp = tcp_sk(sk);
768
769	tp->prior_ssthresh = 0;
770	tp->bytes_acked = 0;
771	if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
772		tp->undo_marker = 0;
773		tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
774		tp->snd_cwnd = min(tp->snd_cwnd,
775				   tcp_packets_in_flight(tp) + 1U);
776		tp->snd_cwnd_cnt = 0;
777		tp->high_seq = tp->snd_nxt;
778		tp->snd_cwnd_stamp = tcp_time_stamp;
779		TCP_ECN_queue_cwr(tp);
780
781		tcp_set_ca_state(sk, TCP_CA_CWR);
782	}
783}
784
785/* Initialize metrics on socket. */
786
787static void tcp_init_metrics(struct sock *sk)
788{
789	struct tcp_sock *tp = tcp_sk(sk);
790	struct dst_entry *dst = __sk_dst_get(sk);
791
792	if (dst == NULL)
793		goto reset;
794
795	dst_confirm(dst);
796
797	if (dst_metric_locked(dst, RTAX_CWND))
798		tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND);
799	if (dst_metric(dst, RTAX_SSTHRESH)) {
800		tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH);
801		if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
802			tp->snd_ssthresh = tp->snd_cwnd_clamp;
803	}
804	if (dst_metric(dst, RTAX_REORDERING) &&
805	    tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
806		tp->rx_opt.sack_ok &= ~2;
807		tp->reordering = dst_metric(dst, RTAX_REORDERING);
808	}
809
810	if (dst_metric(dst, RTAX_RTT) == 0)
811		goto reset;
812
813	if (!tp->srtt && dst_metric(dst, RTAX_RTT) < (TCP_TIMEOUT_INIT << 3))
814		goto reset;
815
816	/* Initial rtt is determined from SYN,SYN-ACK.
817	 * The segment is small and rtt may appear much
818	 * less than real one. Use per-dst memory
819	 * to make it more realistic.
820	 *
821	 * A bit of theory. RTT is time passed after "normal" sized packet
822	 * is sent until it is ACKed. In normal circumstances sending small
823	 * packets force peer to delay ACKs and calculation is correct too.
824	 * The algorithm is adaptive and, provided we follow specs, it
825	 * NEVER underestimate RTT. BUT! If peer tries to make some clever
826	 * tricks sort of "quick acks" for time long enough to decrease RTT
827	 * to low value, and then abruptly stops to do it and starts to delay
828	 * ACKs, wait for troubles.
829	 */
830	if (dst_metric(dst, RTAX_RTT) > tp->srtt) {
831		tp->srtt = dst_metric(dst, RTAX_RTT);
832		tp->rtt_seq = tp->snd_nxt;
833	}
834	if (dst_metric(dst, RTAX_RTTVAR) > tp->mdev) {
835		tp->mdev = dst_metric(dst, RTAX_RTTVAR);
836		tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
837	}
838	tcp_set_rto(sk);
839	tcp_bound_rto(sk);
840	if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
841		goto reset;
842	tp->snd_cwnd = tcp_init_cwnd(tp, dst);
843	tp->snd_cwnd_stamp = tcp_time_stamp;
844	return;
845
846reset:
847	/* Play conservative. If timestamps are not
848	 * supported, TCP will fail to recalculate correct
849	 * rtt, if initial rto is too small. FORGET ALL AND RESET!
850	 */
851	if (!tp->rx_opt.saw_tstamp && tp->srtt) {
852		tp->srtt = 0;
853		tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
854		inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
855	}
856}
857
858static void tcp_update_reordering(struct sock *sk, const int metric,
859				  const int ts)
860{
861	struct tcp_sock *tp = tcp_sk(sk);
862	if (metric > tp->reordering) {
863		tp->reordering = min(TCP_MAX_REORDERING, metric);
864
865		/* This exciting event is worth to be remembered. 8) */
866		if (ts)
867			NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER);
868		else if (IsReno(tp))
869			NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER);
870		else if (IsFack(tp))
871			NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER);
872		else
873			NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER);
874#if FASTRETRANS_DEBUG > 1
875		printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
876		       tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
877		       tp->reordering,
878		       tp->fackets_out,
879		       tp->sacked_out,
880		       tp->undo_marker ? tp->undo_retrans : 0);
881#endif
882		/* Disable FACK yet. */
883		tp->rx_opt.sack_ok &= ~2;
884	}
885}
886
887/* This procedure tags the retransmission queue when SACKs arrive.
888 *
889 * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
890 * Packets in queue with these bits set are counted in variables
891 * sacked_out, retrans_out and lost_out, correspondingly.
892 *
893 * Valid combinations are:
894 * Tag  InFlight	Description
895 * 0	1		- orig segment is in flight.
896 * S	0		- nothing flies, orig reached receiver.
897 * L	0		- nothing flies, orig lost by net.
898 * R	2		- both orig and retransmit are in flight.
899 * L|R	1		- orig is lost, retransmit is in flight.
900 * S|R  1		- orig reached receiver, retrans is still in flight.
901 * (L|S|R is logically valid, it could occur when L|R is sacked,
902 *  but it is equivalent to plain S and code short-curcuits it to S.
903 *  L|S is logically invalid, it would mean -1 packet in flight 8))
904 *
905 * These 6 states form finite state machine, controlled by the following events:
906 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue())
907 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue())
908 * 3. Loss detection event of one of three flavors:
909 *	A. Scoreboard estimator decided the packet is lost.
910 *	   A'. Reno "three dupacks" marks head of queue lost.
911 *	   A''. Its FACK modfication, head until snd.fack is lost.
912 *	B. SACK arrives sacking data transmitted after never retransmitted
913 *	   hole was sent out.
914 *	C. SACK arrives sacking SND.NXT at the moment, when the
915 *	   segment was retransmitted.
916 * 4. D-SACK added new rule: D-SACK changes any tag to S.
917 *
918 * It is pleasant to note, that state diagram turns out to be commutative,
919 * so that we are allowed not to be bothered by order of our actions,
920 * when multiple events arrive simultaneously. (see the function below).
921 *
922 * Reordering detection.
923 * --------------------
924 * Reordering metric is maximal distance, which a packet can be displaced
925 * in packet stream. With SACKs we can estimate it:
926 *
927 * 1. SACK fills old hole and the corresponding segment was not
928 *    ever retransmitted -> reordering. Alas, we cannot use it
929 *    when segment was retransmitted.
930 * 2. The last flaw is solved with D-SACK. D-SACK arrives
931 *    for retransmitted and already SACKed segment -> reordering..
932 * Both of these heuristics are not used in Loss state, when we cannot
933 * account for retransmits accurately.
934 */
935static int
936tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una)
937{
938	const struct inet_connection_sock *icsk = inet_csk(sk);
939	struct tcp_sock *tp = tcp_sk(sk);
940	unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked;
941	struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2);
942	struct sk_buff *cached_skb;
943	int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
944	int reord = tp->packets_out;
945	int prior_fackets;
946	u32 lost_retrans = 0;
947	int flag = 0;
948	int dup_sack = 0;
949	int cached_fack_count;
950	int i;
951	int first_sack_index;
952
953	if (!tp->sacked_out)
954		tp->fackets_out = 0;
955	prior_fackets = tp->fackets_out;
956
957	/* Check for D-SACK. */
958	if (before(ntohl(sp[0].start_seq), TCP_SKB_CB(ack_skb)->ack_seq)) {
959		dup_sack = 1;
960		tp->rx_opt.sack_ok |= 4;
961		NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
962	} else if (num_sacks > 1 &&
963			!after(ntohl(sp[0].end_seq), ntohl(sp[1].end_seq)) &&
964			!before(ntohl(sp[0].start_seq), ntohl(sp[1].start_seq))) {
965		dup_sack = 1;
966		tp->rx_opt.sack_ok |= 4;
967		NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
968	}
969
970	/* D-SACK for already forgotten data...
971	 * Do dumb counting. */
972	if (dup_sack &&
973			!after(ntohl(sp[0].end_seq), prior_snd_una) &&
974			after(ntohl(sp[0].end_seq), tp->undo_marker))
975		tp->undo_retrans--;
976
977	/* Eliminate too old ACKs, but take into
978	 * account more or less fresh ones, they can
979	 * contain valid SACK info.
980	 */
981	if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
982		return 0;
983
984	/* SACK fastpath:
985	 * if the only SACK change is the increase of the end_seq of
986	 * the first block then only apply that SACK block
987	 * and use retrans queue hinting otherwise slowpath */
988	flag = 1;
989	for (i = 0; i < num_sacks; i++) {
990		__be32 start_seq = sp[i].start_seq;
991		__be32 end_seq = sp[i].end_seq;
992
993		if (i == 0) {
994			if (tp->recv_sack_cache[i].start_seq != start_seq)
995				flag = 0;
996		} else {
997			if ((tp->recv_sack_cache[i].start_seq != start_seq) ||
998			    (tp->recv_sack_cache[i].end_seq != end_seq))
999				flag = 0;
1000		}
1001		tp->recv_sack_cache[i].start_seq = start_seq;
1002		tp->recv_sack_cache[i].end_seq = end_seq;
1003	}
1004	/* Clear the rest of the cache sack blocks so they won't match mistakenly. */
1005	for (; i < ARRAY_SIZE(tp->recv_sack_cache); i++) {
1006		tp->recv_sack_cache[i].start_seq = 0;
1007		tp->recv_sack_cache[i].end_seq = 0;
1008	}
1009
1010	first_sack_index = 0;
1011	if (flag)
1012		num_sacks = 1;
1013	else {
1014		int j;
1015		tp->fastpath_skb_hint = NULL;
1016
1017		/* order SACK blocks to allow in order walk of the retrans queue */
1018		for (i = num_sacks-1; i > 0; i--) {
1019			for (j = 0; j < i; j++){
1020				if (after(ntohl(sp[j].start_seq),
1021					  ntohl(sp[j+1].start_seq))){
1022					struct tcp_sack_block_wire tmp;
1023
1024					tmp = sp[j];
1025					sp[j] = sp[j+1];
1026					sp[j+1] = tmp;
1027
1028					/* Track where the first SACK block goes to */
1029					if (j == first_sack_index)
1030						first_sack_index = j+1;
1031				}
1032
1033			}
1034		}
1035	}
1036
1037	/* clear flag as used for different purpose in following code */
1038	flag = 0;
1039
1040	/* Use SACK fastpath hint if valid */
1041	cached_skb = tp->fastpath_skb_hint;
1042	cached_fack_count = tp->fastpath_cnt_hint;
1043	if (!cached_skb) {
1044		cached_skb = sk->sk_write_queue.next;
1045		cached_fack_count = 0;
1046	}
1047
1048	for (i=0; i<num_sacks; i++, sp++) {
1049		struct sk_buff *skb;
1050		__u32 start_seq = ntohl(sp->start_seq);
1051		__u32 end_seq = ntohl(sp->end_seq);
1052		int fack_count;
1053
1054		skb = cached_skb;
1055		fack_count = cached_fack_count;
1056
1057		/* Event "B" in the comment above. */
1058		if (after(end_seq, tp->high_seq))
1059			flag |= FLAG_DATA_LOST;
1060
1061		sk_stream_for_retrans_queue_from(skb, sk) {
1062			int in_sack, pcount;
1063			u8 sacked;
1064
1065			cached_skb = skb;
1066			cached_fack_count = fack_count;
1067			if (i == first_sack_index) {
1068				tp->fastpath_skb_hint = skb;
1069				tp->fastpath_cnt_hint = fack_count;
1070			}
1071
1072			/* The retransmission queue is always in order, so
1073			 * we can short-circuit the walk early.
1074			 */
1075			if (!before(TCP_SKB_CB(skb)->seq, end_seq))
1076				break;
1077
1078			in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
1079				!before(end_seq, TCP_SKB_CB(skb)->end_seq);
1080
1081			pcount = tcp_skb_pcount(skb);
1082
1083			if (pcount > 1 && !in_sack &&
1084			    after(TCP_SKB_CB(skb)->end_seq, start_seq)) {
1085				unsigned int pkt_len;
1086
1087				in_sack = !after(start_seq,
1088						 TCP_SKB_CB(skb)->seq);
1089
1090				if (!in_sack)
1091					pkt_len = (start_seq -
1092						   TCP_SKB_CB(skb)->seq);
1093				else
1094					pkt_len = (end_seq -
1095						   TCP_SKB_CB(skb)->seq);
1096				if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size))
1097					break;
1098				pcount = tcp_skb_pcount(skb);
1099			}
1100
1101			fack_count += pcount;
1102
1103			sacked = TCP_SKB_CB(skb)->sacked;
1104
1105			/* Account D-SACK for retransmitted packet. */
1106			if ((dup_sack && in_sack) &&
1107			    (sacked & TCPCB_RETRANS) &&
1108			    after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
1109				tp->undo_retrans--;
1110
1111			/* The frame is ACKed. */
1112			if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) {
1113				if (sacked&TCPCB_RETRANS) {
1114					if ((dup_sack && in_sack) &&
1115					    (sacked&TCPCB_SACKED_ACKED))
1116						reord = min(fack_count, reord);
1117				} else {
1118					/* If it was in a hole, we detected reordering. */
1119					if (fack_count < prior_fackets &&
1120					    !(sacked&TCPCB_SACKED_ACKED))
1121						reord = min(fack_count, reord);
1122				}
1123
1124				/* Nothing to do; acked frame is about to be dropped. */
1125				continue;
1126			}
1127
1128			if ((sacked&TCPCB_SACKED_RETRANS) &&
1129			    after(end_seq, TCP_SKB_CB(skb)->ack_seq) &&
1130			    (!lost_retrans || after(end_seq, lost_retrans)))
1131				lost_retrans = end_seq;
1132
1133			if (!in_sack)
1134				continue;
1135
1136			if (!(sacked&TCPCB_SACKED_ACKED)) {
1137				if (sacked & TCPCB_SACKED_RETRANS) {
1138					/* If the segment is not tagged as lost,
1139					 * we do not clear RETRANS, believing
1140					 * that retransmission is still in flight.
1141					 */
1142					if (sacked & TCPCB_LOST) {
1143						TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
1144						tp->lost_out -= tcp_skb_pcount(skb);
1145						tp->retrans_out -= tcp_skb_pcount(skb);
1146
1147						/* clear lost hint */
1148						tp->retransmit_skb_hint = NULL;
1149					}
1150				} else {
1151					/* New sack for not retransmitted frame,
1152					 * which was in hole. It is reordering.
1153					 */
1154					if (!(sacked & TCPCB_RETRANS) &&
1155					    fack_count < prior_fackets)
1156						reord = min(fack_count, reord);
1157
1158					if (sacked & TCPCB_LOST) {
1159						TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
1160						tp->lost_out -= tcp_skb_pcount(skb);
1161
1162						/* clear lost hint */
1163						tp->retransmit_skb_hint = NULL;
1164					}
1165					/* SACK enhanced F-RTO detection.
1166					 * Set flag if and only if non-rexmitted
1167					 * segments below frto_highmark are
1168					 * SACKed (RFC4138; Appendix B).
1169					 * Clearing correct due to in-order walk
1170					 */
1171					if (after(end_seq, tp->frto_highmark)) {
1172						flag &= ~FLAG_ONLY_ORIG_SACKED;
1173					} else {
1174						if (!(sacked & TCPCB_RETRANS))
1175							flag |= FLAG_ONLY_ORIG_SACKED;
1176					}
1177				}
1178
1179				TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_ACKED;
1180				flag |= FLAG_DATA_SACKED;
1181				tp->sacked_out += tcp_skb_pcount(skb);
1182
1183				if (fack_count > tp->fackets_out)
1184					tp->fackets_out = fack_count;
1185			} else {
1186				if (dup_sack && (sacked&TCPCB_RETRANS))
1187					reord = min(fack_count, reord);
1188			}
1189
1190			/* D-SACK. We can detect redundant retransmission
1191			 * in S|R and plain R frames and clear it.
1192			 * undo_retrans is decreased above, L|R frames
1193			 * are accounted above as well.
1194			 */
1195			if (dup_sack &&
1196			    (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS)) {
1197				TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1198				tp->retrans_out -= tcp_skb_pcount(skb);
1199				tp->retransmit_skb_hint = NULL;
1200			}
1201		}
1202	}
1203
1204	/* Check for lost retransmit. This superb idea is
1205	 * borrowed from "ratehalving". Event "C".
1206	 * Later note: FACK people cheated me again 8),
1207	 * we have to account for reordering! Ugly,
1208	 * but should help.
1209	 */
1210	if (lost_retrans && icsk->icsk_ca_state == TCP_CA_Recovery) {
1211		struct sk_buff *skb;
1212
1213		sk_stream_for_retrans_queue(skb, sk) {
1214			if (after(TCP_SKB_CB(skb)->seq, lost_retrans))
1215				break;
1216			if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1217				continue;
1218			if ((TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) &&
1219			    after(lost_retrans, TCP_SKB_CB(skb)->ack_seq) &&
1220			    (IsFack(tp) ||
1221			     !before(lost_retrans,
1222				     TCP_SKB_CB(skb)->ack_seq + tp->reordering *
1223				     tp->mss_cache))) {
1224				TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1225				tp->retrans_out -= tcp_skb_pcount(skb);
1226
1227				/* clear lost hint */
1228				tp->retransmit_skb_hint = NULL;
1229
1230				if (!(TCP_SKB_CB(skb)->sacked&(TCPCB_LOST|TCPCB_SACKED_ACKED))) {
1231					tp->lost_out += tcp_skb_pcount(skb);
1232					TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1233					flag |= FLAG_DATA_SACKED;
1234					NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT);
1235				}
1236			}
1237		}
1238	}
1239
1240	tp->left_out = tp->sacked_out + tp->lost_out;
1241
1242	if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss &&
1243	    (tp->frto_highmark && after(tp->snd_una, tp->frto_highmark)))
1244		tcp_update_reordering(sk, ((tp->fackets_out + 1) - reord), 0);
1245
1246#if FASTRETRANS_DEBUG > 0
1247	BUG_TRAP((int)tp->sacked_out >= 0);
1248	BUG_TRAP((int)tp->lost_out >= 0);
1249	BUG_TRAP((int)tp->retrans_out >= 0);
1250	BUG_TRAP((int)tcp_packets_in_flight(tp) >= 0);
1251#endif
1252	return flag;
1253}
1254
1255/* F-RTO can only be used if these conditions are satisfied:
1256 *  - there must be some unsent new data
1257 *  - the advertised window should allow sending it
1258 *  - TCP has never retransmitted anything other than head (SACK enhanced
1259 *    variant from Appendix B of RFC4138 is more robust here)
1260 */
1261int tcp_use_frto(struct sock *sk)
1262{
1263	const struct tcp_sock *tp = tcp_sk(sk);
1264	struct sk_buff *skb;
1265
1266	if (!sysctl_tcp_frto || !sk->sk_send_head ||
1267		after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
1268		      tp->snd_una + tp->snd_wnd))
1269		return 0;
1270
1271	if (IsSackFrto())
1272		return 1;
1273
1274	/* Avoid expensive walking of rexmit queue if possible */
1275	if (tp->retrans_out > 1)
1276		return 0;
1277
1278	skb = skb_peek(&sk->sk_write_queue)->next;	/* Skips head */
1279	sk_stream_for_retrans_queue_from(skb, sk) {
1280		if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS)
1281			return 0;
1282		/* Short-circuit when first non-SACKed skb has been checked */
1283		if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED))
1284			break;
1285	}
1286	return 1;
1287}
1288
1289/* RTO occurred, but do not yet enter Loss state. Instead, defer RTO
1290 * recovery a bit and use heuristics in tcp_process_frto() to detect if
1291 * the RTO was spurious. Only clear SACKED_RETRANS of the head here to
1292 * keep retrans_out counting accurate (with SACK F-RTO, other than head
1293 * may still have that bit set); TCPCB_LOST and remaining SACKED_RETRANS
1294 * bits are handled if the Loss state is really to be entered (in
1295 * tcp_enter_frto_loss).
1296 *
1297 * Do like tcp_enter_loss() would; when RTO expires the second time it
1298 * does:
1299 *  "Reduce ssthresh if it has not yet been made inside this window."
1300 */
1301void tcp_enter_frto(struct sock *sk)
1302{
1303	const struct inet_connection_sock *icsk = inet_csk(sk);
1304	struct tcp_sock *tp = tcp_sk(sk);
1305	struct sk_buff *skb;
1306
1307	if ((!tp->frto_counter && icsk->icsk_ca_state <= TCP_CA_Disorder) ||
1308	    tp->snd_una == tp->high_seq ||
1309	    ((icsk->icsk_ca_state == TCP_CA_Loss || tp->frto_counter) &&
1310	     !icsk->icsk_retransmits)) {
1311		tp->prior_ssthresh = tcp_current_ssthresh(sk);
1312		/* Our state is too optimistic in ssthresh() call because cwnd
1313		 * is not reduced until tcp_enter_frto_loss() when previous FRTO
1314		 * recovery has not yet completed. Pattern would be this: RTO,
1315		 * Cumulative ACK, RTO (2xRTO for the same segment does not end
1316		 * up here twice).
1317		 * RFC4138 should be more specific on what to do, even though
1318		 * RTO is quite unlikely to occur after the first Cumulative ACK
1319		 * due to back-off and complexity of triggering events ...
1320		 */
1321		if (tp->frto_counter) {
1322			u32 stored_cwnd;
1323			stored_cwnd = tp->snd_cwnd;
1324			tp->snd_cwnd = 2;
1325			tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1326			tp->snd_cwnd = stored_cwnd;
1327		} else {
1328			tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1329		}
1330		/* ... in theory, cong.control module could do "any tricks" in
1331		 * ssthresh(), which means that ca_state, lost bits and lost_out
1332		 * counter would have to be faked before the call occurs. We
1333		 * consider that too expensive, unlikely and hacky, so modules
1334		 * using these in ssthresh() must deal these incompatibility
1335		 * issues if they receives CA_EVENT_FRTO and frto_counter != 0
1336		 */
1337		tcp_ca_event(sk, CA_EVENT_FRTO);
1338	}
1339
1340	tp->undo_marker = tp->snd_una;
1341	tp->undo_retrans = 0;
1342
1343	skb = skb_peek(&sk->sk_write_queue);
1344	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
1345		TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1346		tp->retrans_out -= tcp_skb_pcount(skb);
1347	}
1348	tcp_sync_left_out(tp);
1349
1350	/* Earlier loss recovery underway (see RFC4138; Appendix B).
1351	 * The last condition is necessary at least in tp->frto_counter case.
1352	 */
1353	if (IsSackFrto() && (tp->frto_counter ||
1354	    ((1 << icsk->icsk_ca_state) & (TCPF_CA_Recovery|TCPF_CA_Loss))) &&
1355	    after(tp->high_seq, tp->snd_una)) {
1356		tp->frto_highmark = tp->high_seq;
1357	} else {
1358		tp->frto_highmark = tp->snd_nxt;
1359	}
1360	tcp_set_ca_state(sk, TCP_CA_Disorder);
1361	tp->high_seq = tp->snd_nxt;
1362	tp->frto_counter = 1;
1363}
1364
1365/* Enter Loss state after F-RTO was applied. Dupack arrived after RTO,
1366 * which indicates that we should follow the traditional RTO recovery,
1367 * i.e. mark everything lost and do go-back-N retransmission.
1368 */
1369static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
1370{
1371	struct tcp_sock *tp = tcp_sk(sk);
1372	struct sk_buff *skb;
1373	int cnt = 0;
1374
1375	tp->sacked_out = 0;
1376	tp->lost_out = 0;
1377	tp->fackets_out = 0;
1378	tp->retrans_out = 0;
1379
1380	sk_stream_for_retrans_queue(skb, sk) {
1381		cnt += tcp_skb_pcount(skb);
1382		/*
1383		 * Count the retransmission made on RTO correctly (only when
1384		 * waiting for the first ACK and did not get it)...
1385		 */
1386		if ((tp->frto_counter == 1) && !(flag&FLAG_DATA_ACKED)) {
1387			tp->retrans_out += tcp_skb_pcount(skb);
1388			/* ...enter this if branch just for the first segment */
1389			flag |= FLAG_DATA_ACKED;
1390		} else {
1391			TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
1392		}
1393		if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
1394
1395			/* Do not mark those segments lost that were
1396			 * forward transmitted after RTO
1397			 */
1398			if (!after(TCP_SKB_CB(skb)->end_seq,
1399				   tp->frto_highmark)) {
1400				TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1401				tp->lost_out += tcp_skb_pcount(skb);
1402			}
1403		} else {
1404			tp->sacked_out += tcp_skb_pcount(skb);
1405			tp->fackets_out = cnt;
1406		}
1407	}
1408	tcp_sync_left_out(tp);
1409
1410	tp->snd_cwnd = tcp_packets_in_flight(tp) + allowed_segments;
1411	tp->snd_cwnd_cnt = 0;
1412	tp->snd_cwnd_stamp = tcp_time_stamp;
1413	tp->undo_marker = 0;
1414	tp->frto_counter = 0;
1415
1416	tp->reordering = min_t(unsigned int, tp->reordering,
1417					     sysctl_tcp_reordering);
1418	tcp_set_ca_state(sk, TCP_CA_Loss);
1419	tp->high_seq = tp->frto_highmark;
1420	TCP_ECN_queue_cwr(tp);
1421
1422	clear_all_retrans_hints(tp);
1423}
1424
1425void tcp_clear_retrans(struct tcp_sock *tp)
1426{
1427	tp->left_out = 0;
1428	tp->retrans_out = 0;
1429
1430	tp->fackets_out = 0;
1431	tp->sacked_out = 0;
1432	tp->lost_out = 0;
1433
1434	tp->undo_marker = 0;
1435	tp->undo_retrans = 0;
1436}
1437
1438/* Enter Loss state. If "how" is not zero, forget all SACK information
1439 * and reset tags completely, otherwise preserve SACKs. If receiver
1440 * dropped its ofo queue, we will know this due to reneging detection.
1441 */
1442void tcp_enter_loss(struct sock *sk, int how)
1443{
1444	const struct inet_connection_sock *icsk = inet_csk(sk);
1445	struct tcp_sock *tp = tcp_sk(sk);
1446	struct sk_buff *skb;
1447	int cnt = 0;
1448
1449	/* Reduce ssthresh if it has not yet been made inside this window. */
1450	if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq ||
1451	    (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
1452		tp->prior_ssthresh = tcp_current_ssthresh(sk);
1453		tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1454		tcp_ca_event(sk, CA_EVENT_LOSS);
1455	}
1456	tp->snd_cwnd	   = 1;
1457	tp->snd_cwnd_cnt   = 0;
1458	tp->snd_cwnd_stamp = tcp_time_stamp;
1459
1460	tp->bytes_acked = 0;
1461	tcp_clear_retrans(tp);
1462
1463	/* Push undo marker, if it was plain RTO and nothing
1464	 * was retransmitted. */
1465	if (!how)
1466		tp->undo_marker = tp->snd_una;
1467
1468	sk_stream_for_retrans_queue(skb, sk) {
1469		cnt += tcp_skb_pcount(skb);
1470		if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS)
1471			tp->undo_marker = 0;
1472		TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED;
1473		if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) {
1474			TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
1475			TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1476			tp->lost_out += tcp_skb_pcount(skb);
1477		} else {
1478			tp->sacked_out += tcp_skb_pcount(skb);
1479			tp->fackets_out = cnt;
1480		}
1481	}
1482	tcp_sync_left_out(tp);
1483
1484	tp->reordering = min_t(unsigned int, tp->reordering,
1485					     sysctl_tcp_reordering);
1486	tcp_set_ca_state(sk, TCP_CA_Loss);
1487	tp->high_seq = tp->snd_nxt;
1488	TCP_ECN_queue_cwr(tp);
1489
1490	clear_all_retrans_hints(tp);
1491}
1492
1493static int tcp_check_sack_reneging(struct sock *sk)
1494{
1495	struct sk_buff *skb;
1496
1497	/* If ACK arrived pointing to a remembered SACK,
1498	 * it means that our remembered SACKs do not reflect
1499	 * real state of receiver i.e.
1500	 * receiver _host_ is heavily congested (or buggy).
1501	 * Do processing similar to RTO timeout.
1502	 */
1503	if ((skb = skb_peek(&sk->sk_write_queue)) != NULL &&
1504	    (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
1505		struct inet_connection_sock *icsk = inet_csk(sk);
1506		NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING);
1507
1508		tcp_enter_loss(sk, 1);
1509		icsk->icsk_retransmits++;
1510		tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue));
1511		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
1512					  icsk->icsk_rto, TCP_RTO_MAX);
1513		return 1;
1514	}
1515	return 0;
1516}
1517
1518static inline int tcp_fackets_out(struct tcp_sock *tp)
1519{
1520	return IsReno(tp) ? tp->sacked_out+1 : tp->fackets_out;
1521}
1522
1523static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
1524{
1525	return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto);
1526}
1527
1528static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp)
1529{
1530	return tp->packets_out &&
1531	       tcp_skb_timedout(sk, skb_peek(&sk->sk_write_queue));
1532}
1533
1534/* Linux NewReno/SACK/FACK/ECN state machine.
1535 * --------------------------------------
1536 *
1537 * "Open"	Normal state, no dubious events, fast path.
1538 * "Disorder"   In all the respects it is "Open",
1539 *		but requires a bit more attention. It is entered when
1540 *		we see some SACKs or dupacks. It is split of "Open"
1541 *		mainly to move some processing from fast path to slow one.
1542 * "CWR"	CWND was reduced due to some Congestion Notification event.
1543 *		It can be ECN, ICMP source quench, local device congestion.
1544 * "Recovery"	CWND was reduced, we are fast-retransmitting.
1545 * "Loss"	CWND was reduced due to RTO timeout or SACK reneging.
1546 *
1547 * tcp_fastretrans_alert() is entered:
1548 * - each incoming ACK, if state is not "Open"
1549 * - when arrived ACK is unusual, namely:
1550 *	* SACK
1551 *	* Duplicate ACK.
1552 *	* ECN ECE.
1553 *
1554 * Counting packets in flight is pretty simple.
1555 *
1556 *	in_flight = packets_out - left_out + retrans_out
1557 *
1558 *	packets_out is SND.NXT-SND.UNA counted in packets.
1559 *
1560 *	retrans_out is number of retransmitted segments.
1561 *
1562 *	left_out is number of segments left network, but not ACKed yet.
1563 *
1564 *		left_out = sacked_out + lost_out
1565 *
1566 *     sacked_out: Packets, which arrived to receiver out of order
1567 *		   and hence not ACKed. With SACKs this number is simply
1568 *		   amount of SACKed data. Even without SACKs
1569 *		   it is easy to give pretty reliable estimate of this number,
1570 *		   counting duplicate ACKs.
1571 *
1572 *       lost_out: Packets lost by network. TCP has no explicit
1573 *		   "loss notification" feedback from network (for now).
1574 *		   It means that this number can be only _guessed_.
1575 *		   Actually, it is the heuristics to predict lossage that
1576 *		   distinguishes different algorithms.
1577 *
1578 *	F.e. after RTO, when all the queue is considered as lost,
1579 *	lost_out = packets_out and in_flight = retrans_out.
1580 *
1581 *		Essentially, we have now two algorithms counting
1582 *		lost packets.
1583 *
1584 *		FACK: It is the simplest heuristics. As soon as we decided
1585 *		that something is lost, we decide that _all_ not SACKed
1586 *		packets until the most forward SACK are lost. I.e.
1587 *		lost_out = fackets_out - sacked_out and left_out = fackets_out.
1588 *		It is absolutely correct estimate, if network does not reorder
1589 *		packets. And it loses any connection to reality when reordering
1590 *		takes place. We use FACK by default until reordering
1591 *		is suspected on the path to this destination.
1592 *
1593 *		NewReno: when Recovery is entered, we assume that one segment
1594 *		is lost (classic Reno). While we are in Recovery and
1595 *		a partial ACK arrives, we assume that one more packet
1596 *		is lost (NewReno). This heuristics are the same in NewReno
1597 *		and SACK.
1598 *
1599 *  Imagine, that's all! Forget about all this shamanism about CWND inflation
1600 *  deflation etc. CWND is real congestion window, never inflated, changes
1601 *  only according to classic VJ rules.
1602 *
1603 * Really tricky (and requiring careful tuning) part of algorithm
1604 * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue().
1605 * The first determines the moment _when_ we should reduce CWND and,
1606 * hence, slow down forward transmission. In fact, it determines the moment
1607 * when we decide that hole is caused by loss, rather than by a reorder.
1608 *
1609 * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill
1610 * holes, caused by lost packets.
1611 *
1612 * And the most logically complicated part of algorithm is undo
1613 * heuristics. We detect false retransmits due to both too early
1614 * fast retransmit (reordering) and underestimated RTO, analyzing
1615 * timestamps and D-SACKs. When we detect that some segments were
1616 * retransmitted by mistake and CWND reduction was wrong, we undo
1617 * window reduction and abort recovery phase. This logic is hidden
1618 * inside several functions named tcp_try_undo_<something>.
1619 */
1620
1621/* This function decides, when we should leave Disordered state
1622 * and enter Recovery phase, reducing congestion window.
1623 *
1624 * Main question: may we further continue forward transmission
1625 * with the same cwnd?
1626 */
1627static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp)
1628{
1629	__u32 packets_out;
1630
1631	/* Do not perform any recovery during FRTO algorithm */
1632	if (tp->frto_counter)
1633		return 0;
1634
1635	/* Trick#1: The loss is proven. */
1636	if (tp->lost_out)
1637		return 1;
1638
1639	/* Not-A-Trick#2 : Classic rule... */
1640	if (tcp_fackets_out(tp) > tp->reordering)
1641		return 1;
1642
1643	/* Trick#3 : when we use RFC2988 timer restart, fast
1644	 * retransmit can be triggered by timeout of queue head.
1645	 */
1646	if (tcp_head_timedout(sk, tp))
1647		return 1;
1648
1649	/* Trick#4: It is still not OK... But will it be useful to delay
1650	 * recovery more?
1651	 */
1652	packets_out = tp->packets_out;
1653	if (packets_out <= tp->reordering &&
1654	    tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) &&
1655	    !tcp_may_send_now(sk, tp)) {
1656		/* We have nothing to send. This connection is limited
1657		 * either by receiver window or by application.
1658		 */
1659		return 1;
1660	}
1661
1662	return 0;
1663}
1664
1665/* If we receive more dupacks than we expected counting segments
1666 * in assumption of absent reordering, interpret this as reordering.
1667 * The only another reason could be bug in receiver TCP.
1668 */
1669static void tcp_check_reno_reordering(struct sock *sk, const int addend)
1670{
1671	struct tcp_sock *tp = tcp_sk(sk);
1672	u32 holes;
1673
1674	holes = max(tp->lost_out, 1U);
1675	holes = min(holes, tp->packets_out);
1676
1677	if ((tp->sacked_out + holes) > tp->packets_out) {
1678		tp->sacked_out = tp->packets_out - holes;
1679		tcp_update_reordering(sk, tp->packets_out + addend, 0);
1680	}
1681}
1682
1683/* Emulate SACKs for SACKless connection: account for a new dupack. */
1684
1685static void tcp_add_reno_sack(struct sock *sk)
1686{
1687	struct tcp_sock *tp = tcp_sk(sk);
1688	tp->sacked_out++;
1689	tcp_check_reno_reordering(sk, 0);
1690	tcp_sync_left_out(tp);
1691}
1692
1693/* Account for ACK, ACKing some data in Reno Recovery phase. */
1694
1695static void tcp_remove_reno_sacks(struct sock *sk, struct tcp_sock *tp, int acked)
1696{
1697	if (acked > 0) {
1698		/* One ACK acked hole. The rest eat duplicate ACKs. */
1699		if (acked-1 >= tp->sacked_out)
1700			tp->sacked_out = 0;
1701		else
1702			tp->sacked_out -= acked-1;
1703	}
1704	tcp_check_reno_reordering(sk, acked);
1705	tcp_sync_left_out(tp);
1706}
1707
1708static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
1709{
1710	tp->sacked_out = 0;
1711	tp->left_out = tp->lost_out;
1712}
1713
1714/* Mark head of queue up as lost. */
1715static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp,
1716			       int packets, u32 high_seq)
1717{
1718	struct sk_buff *skb;
1719	int cnt;
1720
1721	BUG_TRAP(packets <= tp->packets_out);
1722	if (tp->lost_skb_hint) {
1723		skb = tp->lost_skb_hint;
1724		cnt = tp->lost_cnt_hint;
1725	} else {
1726		skb = sk->sk_write_queue.next;
1727		cnt = 0;
1728	}
1729
1730	sk_stream_for_retrans_queue_from(skb, sk) {
1731		/* TODO: do this better */
1732		/* this is not the most efficient way to do this... */
1733		tp->lost_skb_hint = skb;
1734		tp->lost_cnt_hint = cnt;
1735		cnt += tcp_skb_pcount(skb);
1736		if (cnt > packets || after(TCP_SKB_CB(skb)->end_seq, high_seq))
1737			break;
1738		if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
1739			TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1740			tp->lost_out += tcp_skb_pcount(skb);
1741
1742			/* clear xmit_retransmit_queue hints
1743			 *  if this is beyond hint */
1744			if(tp->retransmit_skb_hint != NULL &&
1745			   before(TCP_SKB_CB(skb)->seq,
1746				  TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) {
1747
1748				tp->retransmit_skb_hint = NULL;
1749			}
1750		}
1751	}
1752	tcp_sync_left_out(tp);
1753}
1754
1755/* Account newly detected lost packet(s) */
1756
1757static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
1758{
1759	if (IsFack(tp)) {
1760		int lost = tp->fackets_out - tp->reordering;
1761		if (lost <= 0)
1762			lost = 1;
1763		tcp_mark_head_lost(sk, tp, lost, tp->high_seq);
1764	} else {
1765		tcp_mark_head_lost(sk, tp, 1, tp->high_seq);
1766	}
1767
1768	/* New heuristics: it is possible only after we switched
1769	 * to restart timer each time when something is ACKed.
1770	 * Hence, we can detect timed out packets during fast
1771	 * retransmit without falling to slow start.
1772	 */
1773	if (!IsReno(tp) && tcp_head_timedout(sk, tp)) {
1774		struct sk_buff *skb;
1775
1776		skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
1777			: sk->sk_write_queue.next;
1778
1779		sk_stream_for_retrans_queue_from(skb, sk) {
1780			if (!tcp_skb_timedout(sk, skb))
1781				break;
1782
1783			if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
1784				TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1785				tp->lost_out += tcp_skb_pcount(skb);
1786
1787				/* clear xmit_retrans hint */
1788				if (tp->retransmit_skb_hint &&
1789				    before(TCP_SKB_CB(skb)->seq,
1790					   TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
1791
1792					tp->retransmit_skb_hint = NULL;
1793			}
1794		}
1795
1796		tp->scoreboard_skb_hint = skb;
1797
1798		tcp_sync_left_out(tp);
1799	}
1800}
1801
1802/* CWND moderation, preventing bursts due to too big ACKs
1803 * in dubious situations.
1804 */
1805static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
1806{
1807	tp->snd_cwnd = min(tp->snd_cwnd,
1808			   tcp_packets_in_flight(tp)+tcp_max_burst(tp));
1809	tp->snd_cwnd_stamp = tcp_time_stamp;
1810}
1811
1812/* Lower bound on congestion window is slow start threshold
1813 * unless congestion avoidance choice decides to overide it.
1814 */
1815static inline u32 tcp_cwnd_min(const struct sock *sk)
1816{
1817	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1818
1819	return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh;
1820}
1821
1822/* Decrease cwnd each second ack. */
1823static void tcp_cwnd_down(struct sock *sk)
1824{
1825	struct tcp_sock *tp = tcp_sk(sk);
1826	int decr = tp->snd_cwnd_cnt + 1;
1827
1828	tp->snd_cwnd_cnt = decr&1;
1829	decr >>= 1;
1830
1831	if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
1832		tp->snd_cwnd -= decr;
1833
1834	tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
1835	tp->snd_cwnd_stamp = tcp_time_stamp;
1836}
1837
1838/* Nothing was retransmitted or returned timestamp is less
1839 * than timestamp of the first retransmission.
1840 */
1841static inline int tcp_packet_delayed(struct tcp_sock *tp)
1842{
1843	return !tp->retrans_stamp ||
1844		(tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
1845		 (__s32)(tp->rx_opt.rcv_tsecr - tp->retrans_stamp) < 0);
1846}
1847
1848/* Undo procedures. */
1849
1850#if FASTRETRANS_DEBUG > 1
1851static void DBGUNDO(struct sock *sk, struct tcp_sock *tp, const char *msg)
1852{
1853	struct inet_sock *inet = inet_sk(sk);
1854	printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n",
1855	       msg,
1856	       NIPQUAD(inet->daddr), ntohs(inet->dport),
1857	       tp->snd_cwnd, tp->left_out,
1858	       tp->snd_ssthresh, tp->prior_ssthresh,
1859	       tp->packets_out);
1860}
1861#else
1862#define DBGUNDO(x...) do { } while (0)
1863#endif
1864
1865static void tcp_undo_cwr(struct sock *sk, const int undo)
1866{
1867	struct tcp_sock *tp = tcp_sk(sk);
1868
1869	if (tp->prior_ssthresh) {
1870		const struct inet_connection_sock *icsk = inet_csk(sk);
1871
1872		if (icsk->icsk_ca_ops->undo_cwnd)
1873			tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
1874		else
1875			tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1);
1876
1877		if (undo && tp->prior_ssthresh > tp->snd_ssthresh) {
1878			tp->snd_ssthresh = tp->prior_ssthresh;
1879			TCP_ECN_withdraw_cwr(tp);
1880		}
1881	} else {
1882		tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
1883	}
1884	tcp_moderate_cwnd(tp);
1885	tp->snd_cwnd_stamp = tcp_time_stamp;
1886
1887	/* There is something screwy going on with the retrans hints after
1888	   an undo */
1889	clear_all_retrans_hints(tp);
1890}
1891
1892static inline int tcp_may_undo(struct tcp_sock *tp)
1893{
1894	return tp->undo_marker &&
1895		(!tp->undo_retrans || tcp_packet_delayed(tp));
1896}
1897
1898/* People celebrate: "We love our President!" */
1899static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp)
1900{
1901	if (tcp_may_undo(tp)) {
1902		/* Happy end! We did not retransmit anything
1903		 * or our original transmission succeeded.
1904		 */
1905		DBGUNDO(sk, tp, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
1906		tcp_undo_cwr(sk, 1);
1907		if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
1908			NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
1909		else
1910			NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO);
1911		tp->undo_marker = 0;
1912	}
1913	if (tp->snd_una == tp->high_seq && IsReno(tp)) {
1914		/* Hold old state until something *above* high_seq
1915		 * is ACKed. For Reno it is MUST to prevent false
1916		 * fast retransmits (RFC2582). SACK TCP is safe. */
1917		tcp_moderate_cwnd(tp);
1918		return 1;
1919	}
1920	tcp_set_ca_state(sk, TCP_CA_Open);
1921	return 0;
1922}
1923
1924/* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
1925static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp)
1926{
1927	if (tp->undo_marker && !tp->undo_retrans) {
1928		DBGUNDO(sk, tp, "D-SACK");
1929		tcp_undo_cwr(sk, 1);
1930		tp->undo_marker = 0;
1931		NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);
1932	}
1933}
1934
1935/* Undo during fast recovery after partial ACK. */
1936
1937static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp,
1938				int acked)
1939{
1940	/* Partial ACK arrived. Force Hoe's retransmit. */
1941	int failed = IsReno(tp) || tp->fackets_out>tp->reordering;
1942
1943	if (tcp_may_undo(tp)) {
1944		/* Plain luck! Hole if filled with delayed
1945		 * packet, rather than with a retransmit.
1946		 */
1947		if (tp->retrans_out == 0)
1948			tp->retrans_stamp = 0;
1949
1950		tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
1951
1952		DBGUNDO(sk, tp, "Hoe");
1953		tcp_undo_cwr(sk, 0);
1954		NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);
1955
1956		/* So... Do not make Hoe's retransmit yet.
1957		 * If the first packet was delayed, the rest
1958		 * ones are most probably delayed as well.
1959		 */
1960		failed = 0;
1961	}
1962	return failed;
1963}
1964
1965/* Undo during loss recovery after partial ACK. */
1966static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp)
1967{
1968	if (tcp_may_undo(tp)) {
1969		struct sk_buff *skb;
1970		sk_stream_for_retrans_queue(skb, sk) {
1971			TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
1972		}
1973
1974		clear_all_retrans_hints(tp);
1975
1976		DBGUNDO(sk, tp, "partial loss");
1977		tp->lost_out = 0;
1978		tp->left_out = tp->sacked_out;
1979		tcp_undo_cwr(sk, 1);
1980		NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
1981		inet_csk(sk)->icsk_retransmits = 0;
1982		tp->undo_marker = 0;
1983		if (!IsReno(tp))
1984			tcp_set_ca_state(sk, TCP_CA_Open);
1985		return 1;
1986	}
1987	return 0;
1988}
1989
1990static inline void tcp_complete_cwr(struct sock *sk)
1991{
1992	struct tcp_sock *tp = tcp_sk(sk);
1993	tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
1994	tp->snd_cwnd_stamp = tcp_time_stamp;
1995	tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
1996}
1997
1998static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag)
1999{
2000	tp->left_out = tp->sacked_out;
2001
2002	if (tp->retrans_out == 0)
2003		tp->retrans_stamp = 0;
2004
2005	if (flag&FLAG_ECE)
2006		tcp_enter_cwr(sk);
2007
2008	if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
2009		int state = TCP_CA_Open;
2010
2011		if (tp->left_out || tp->retrans_out || tp->undo_marker)
2012			state = TCP_CA_Disorder;
2013
2014		if (inet_csk(sk)->icsk_ca_state != state) {
2015			tcp_set_ca_state(sk, state);
2016			tp->high_seq = tp->snd_nxt;
2017		}
2018		tcp_moderate_cwnd(tp);
2019	} else {
2020		tcp_cwnd_down(sk);
2021	}
2022}
2023
2024static void tcp_mtup_probe_failed(struct sock *sk)
2025{
2026	struct inet_connection_sock *icsk = inet_csk(sk);
2027
2028	icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
2029	icsk->icsk_mtup.probe_size = 0;
2030}
2031
2032static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
2033{
2034	struct tcp_sock *tp = tcp_sk(sk);
2035	struct inet_connection_sock *icsk = inet_csk(sk);
2036
2037	/* FIXME: breaks with very large cwnd */
2038	tp->prior_ssthresh = tcp_current_ssthresh(sk);
2039	tp->snd_cwnd = tp->snd_cwnd *
2040		       tcp_mss_to_mtu(sk, tp->mss_cache) /
2041		       icsk->icsk_mtup.probe_size;
2042	tp->snd_cwnd_cnt = 0;
2043	tp->snd_cwnd_stamp = tcp_time_stamp;
2044	tp->rcv_ssthresh = tcp_current_ssthresh(sk);
2045
2046	icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
2047	icsk->icsk_mtup.probe_size = 0;
2048	tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
2049}
2050
2051
2052/* Process an event, which can update packets-in-flight not trivially.
2053 * Main goal of this function is to calculate new estimate for left_out,
2054 * taking into account both packets sitting in receiver's buffer and
2055 * packets lost by network.
2056 *
2057 * Besides that it does CWND reduction, when packet loss is detected
2058 * and changes state of machine.
2059 *
2060 * It does _not_ decide what to send, it is made in function
2061 * tcp_xmit_retransmit_queue().
2062 */
2063static void
2064tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
2065		      int prior_packets, int flag)
2066{
2067	struct inet_connection_sock *icsk = inet_csk(sk);
2068	struct tcp_sock *tp = tcp_sk(sk);
2069	int is_dupack = (tp->snd_una == prior_snd_una && !(flag&FLAG_NOT_DUP));
2070
2071	/* Some technical things:
2072	 * 1. Reno does not count dupacks (sacked_out) automatically. */
2073	if (!tp->packets_out)
2074		tp->sacked_out = 0;
2075	/* 2. SACK counts snd_fack in packets inaccurately. */
2076	if (tp->sacked_out == 0)
2077		tp->fackets_out = 0;
2078
2079	/* Now state machine starts.
2080	 * A. ECE, hence prohibit cwnd undoing, the reduction is required. */
2081	if (flag&FLAG_ECE)
2082		tp->prior_ssthresh = 0;
2083
2084	/* B. In all the states check for reneging SACKs. */
2085	if (tp->sacked_out && tcp_check_sack_reneging(sk))
2086		return;
2087
2088	/* C. Process data loss notification, provided it is valid. */
2089	if ((flag&FLAG_DATA_LOST) &&
2090	    before(tp->snd_una, tp->high_seq) &&
2091	    icsk->icsk_ca_state != TCP_CA_Open &&
2092	    tp->fackets_out > tp->reordering) {
2093		tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq);
2094		NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
2095	}
2096
2097	/* D. Synchronize left_out to current state. */
2098	tcp_sync_left_out(tp);
2099
2100	/* E. Check state exit conditions. State can be terminated
2101	 *    when high_seq is ACKed. */
2102	if (icsk->icsk_ca_state == TCP_CA_Open) {
2103		BUG_TRAP(tp->retrans_out == 0);
2104		tp->retrans_stamp = 0;
2105	} else if (!before(tp->snd_una, tp->high_seq)) {
2106		switch (icsk->icsk_ca_state) {
2107		case TCP_CA_Loss:
2108			icsk->icsk_retransmits = 0;
2109			if (tcp_try_undo_recovery(sk, tp))
2110				return;
2111			break;
2112
2113		case TCP_CA_CWR:
2114			/* CWR is to be held something *above* high_seq
2115			 * is ACKed for CWR bit to reach receiver. */
2116			if (tp->snd_una != tp->high_seq) {
2117				tcp_complete_cwr(sk);
2118				tcp_set_ca_state(sk, TCP_CA_Open);
2119			}
2120			break;
2121
2122		case TCP_CA_Disorder:
2123			tcp_try_undo_dsack(sk, tp);
2124			if (!tp->undo_marker ||
2125			    /* For SACK case do not Open to allow to undo
2126			     * catching for all duplicate ACKs. */
2127			    IsReno(tp) || tp->snd_una != tp->high_seq) {
2128				tp->undo_marker = 0;
2129				tcp_set_ca_state(sk, TCP_CA_Open);
2130			}
2131			break;
2132
2133		case TCP_CA_Recovery:
2134			if (IsReno(tp))
2135				tcp_reset_reno_sack(tp);
2136			if (tcp_try_undo_recovery(sk, tp))
2137				return;
2138			tcp_complete_cwr(sk);
2139			break;
2140		}
2141	}
2142
2143	/* F. Process state. */
2144	switch (icsk->icsk_ca_state) {
2145	case TCP_CA_Recovery:
2146		if (prior_snd_una == tp->snd_una) {
2147			if (IsReno(tp) && is_dupack)
2148				tcp_add_reno_sack(sk);
2149		} else {
2150			int acked = prior_packets - tp->packets_out;
2151			if (IsReno(tp))
2152				tcp_remove_reno_sacks(sk, tp, acked);
2153			is_dupack = tcp_try_undo_partial(sk, tp, acked);
2154		}
2155		break;
2156	case TCP_CA_Loss:
2157		if (flag&FLAG_DATA_ACKED)
2158			icsk->icsk_retransmits = 0;
2159		if (!tcp_try_undo_loss(sk, tp)) {
2160			tcp_moderate_cwnd(tp);
2161			tcp_xmit_retransmit_queue(sk);
2162			return;
2163		}
2164		if (icsk->icsk_ca_state != TCP_CA_Open)
2165			return;
2166		/* Loss is undone; fall through to processing in Open state. */
2167	default:
2168		if (IsReno(tp)) {
2169			if (tp->snd_una != prior_snd_una)
2170				tcp_reset_reno_sack(tp);
2171			if (is_dupack)
2172				tcp_add_reno_sack(sk);
2173		}
2174
2175		if (icsk->icsk_ca_state == TCP_CA_Disorder)
2176			tcp_try_undo_dsack(sk, tp);
2177
2178		if (!tcp_time_to_recover(sk, tp)) {
2179			tcp_try_to_open(sk, tp, flag);
2180			return;
2181		}
2182
2183		/* MTU probe failure: don't reduce cwnd */
2184		if (icsk->icsk_ca_state < TCP_CA_CWR &&
2185		    icsk->icsk_mtup.probe_size &&
2186		    tp->snd_una == tp->mtu_probe.probe_seq_start) {
2187			tcp_mtup_probe_failed(sk);
2188			/* Restores the reduction we did in tcp_mtup_probe() */
2189			tp->snd_cwnd++;
2190			tcp_simple_retransmit(sk);
2191			return;
2192		}
2193
2194		/* Otherwise enter Recovery state */
2195
2196		if (IsReno(tp))
2197			NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY);
2198		else
2199			NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY);
2200
2201		tp->high_seq = tp->snd_nxt;
2202		tp->prior_ssthresh = 0;
2203		tp->undo_marker = tp->snd_una;
2204		tp->undo_retrans = tp->retrans_out;
2205
2206		if (icsk->icsk_ca_state < TCP_CA_CWR) {
2207			if (!(flag&FLAG_ECE))
2208				tp->prior_ssthresh = tcp_current_ssthresh(sk);
2209			tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
2210			TCP_ECN_queue_cwr(tp);
2211		}
2212
2213		tp->bytes_acked = 0;
2214		tp->snd_cwnd_cnt = 0;
2215		tcp_set_ca_state(sk, TCP_CA_Recovery);
2216	}
2217
2218	if (is_dupack || tcp_head_timedout(sk, tp))
2219		tcp_update_scoreboard(sk, tp);
2220	tcp_cwnd_down(sk);
2221	tcp_xmit_retransmit_queue(sk);
2222}
2223
2224/* Read draft-ietf-tcplw-high-performance before mucking
2225 * with this code. (Supersedes RFC1323)
2226 */
2227static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
2228{
2229	/* RTTM Rule: A TSecr value received in a segment is used to
2230	 * update the averaged RTT measurement only if the segment
2231	 * acknowledges some new data, i.e., only if it advances the
2232	 * left edge of the send window.
2233	 *
2234	 * See draft-ietf-tcplw-high-performance-00, section 3.3.
2235	 * 1998/04/10 Andrey V. Savochkin <saw@msu.ru>
2236	 *
2237	 * Changed: reset backoff as soon as we see the first valid sample.
2238	 * If we do not, we get strongly overestimated rto. With timestamps
2239	 * samples are accepted even from very old segments: f.e., when rtt=1
2240	 * increases to 8, we retransmit 5 times and after 8 seconds delayed
2241	 * answer arrives rto becomes 120 seconds! If at least one of segments
2242	 * in window is lost... Voila.	 			--ANK (010210)
2243	 */
2244	struct tcp_sock *tp = tcp_sk(sk);
2245	const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
2246	tcp_rtt_estimator(sk, seq_rtt);
2247	tcp_set_rto(sk);
2248	inet_csk(sk)->icsk_backoff = 0;
2249	tcp_bound_rto(sk);
2250}
2251
2252static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag)
2253{
2254	/* We don't have a timestamp. Can only use
2255	 * packets that are not retransmitted to determine
2256	 * rtt estimates. Also, we must not reset the
2257	 * backoff for rto until we get a non-retransmitted
2258	 * packet. This allows us to deal with a situation
2259	 * where the network delay has increased suddenly.
2260	 * I.e. Karn's algorithm. (SIGCOMM '87, p5.)
2261	 */
2262
2263	if (flag & FLAG_RETRANS_DATA_ACKED)
2264		return;
2265
2266	tcp_rtt_estimator(sk, seq_rtt);
2267	tcp_set_rto(sk);
2268	inet_csk(sk)->icsk_backoff = 0;
2269	tcp_bound_rto(sk);
2270}
2271
2272static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
2273				      const s32 seq_rtt)
2274{
2275	const struct tcp_sock *tp = tcp_sk(sk);
2276	/* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */
2277	if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
2278		tcp_ack_saw_tstamp(sk, flag);
2279	else if (seq_rtt >= 0)
2280		tcp_ack_no_tstamp(sk, seq_rtt, flag);
2281}
2282
2283static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
2284			   u32 in_flight, int good)
2285{
2286	const struct inet_connection_sock *icsk = inet_csk(sk);
2287	icsk->icsk_ca_ops->cong_avoid(sk, ack, rtt, in_flight, good);
2288	tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
2289}
2290
2291/* Restart timer after forward progress on connection.
2292 * RFC2988 recommends to restart timer to now+rto.
2293 */
2294
2295static void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp)
2296{
2297	if (!tp->packets_out) {
2298		inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
2299	} else {
2300		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
2301	}
2302}
2303
2304static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb,
2305			 __u32 now, __s32 *seq_rtt)
2306{
2307	struct tcp_sock *tp = tcp_sk(sk);
2308	struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
2309	__u32 seq = tp->snd_una;
2310	__u32 packets_acked;
2311	int acked = 0;
2312
2313	/* If we get here, the whole TSO packet has not been
2314	 * acked.
2315	 */
2316	BUG_ON(!after(scb->end_seq, seq));
2317
2318	packets_acked = tcp_skb_pcount(skb);
2319	if (tcp_trim_head(sk, skb, seq - scb->seq))
2320		return 0;
2321	packets_acked -= tcp_skb_pcount(skb);
2322
2323	if (packets_acked) {
2324		__u8 sacked = scb->sacked;
2325
2326		acked |= FLAG_DATA_ACKED;
2327		if (sacked) {
2328			if (sacked & TCPCB_RETRANS) {
2329				if (sacked & TCPCB_SACKED_RETRANS)
2330					tp->retrans_out -= packets_acked;
2331				acked |= FLAG_RETRANS_DATA_ACKED;
2332				*seq_rtt = -1;
2333			} else if (*seq_rtt < 0)
2334				*seq_rtt = now - scb->when;
2335			if (sacked & TCPCB_SACKED_ACKED)
2336				tp->sacked_out -= packets_acked;
2337			if (sacked & TCPCB_LOST)
2338				tp->lost_out -= packets_acked;
2339			if (sacked & TCPCB_URG) {
2340				if (tp->urg_mode &&
2341				    !before(seq, tp->snd_up))
2342					tp->urg_mode = 0;
2343			}
2344		} else if (*seq_rtt < 0)
2345			*seq_rtt = now - scb->when;
2346
2347		if (tp->fackets_out) {
2348			__u32 dval = min(tp->fackets_out, packets_acked);
2349			tp->fackets_out -= dval;
2350		}
2351		tp->packets_out -= packets_acked;
2352
2353		BUG_ON(tcp_skb_pcount(skb) == 0);
2354		BUG_ON(!before(scb->seq, scb->end_seq));
2355	}
2356
2357	return acked;
2358}
2359
2360static u32 tcp_usrtt(struct timeval *tv)
2361{
2362	struct timeval now;
2363
2364	do_gettimeofday(&now);
2365	return (now.tv_sec - tv->tv_sec) * 1000000 + (now.tv_usec - tv->tv_usec);
2366}
2367
2368/* Remove acknowledged frames from the retransmission queue. */
2369static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
2370{
2371	struct tcp_sock *tp = tcp_sk(sk);
2372	const struct inet_connection_sock *icsk = inet_csk(sk);
2373	struct sk_buff *skb;
2374	__u32 now = tcp_time_stamp;
2375	int acked = 0;
2376	__s32 seq_rtt = -1;
2377	u32 pkts_acked = 0;
2378	void (*rtt_sample)(struct sock *sk, u32 usrtt)
2379		= icsk->icsk_ca_ops->rtt_sample;
2380	struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
2381
2382	while ((skb = skb_peek(&sk->sk_write_queue)) &&
2383	       skb != sk->sk_send_head) {
2384		struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
2385		__u8 sacked = scb->sacked;
2386
2387		/* If our packet is before the ack sequence we can
2388		 * discard it as it's confirmed to have arrived at
2389		 * the other end.
2390		 */
2391		if (after(scb->end_seq, tp->snd_una)) {
2392			if (tcp_skb_pcount(skb) > 1 &&
2393			    after(tp->snd_una, scb->seq))
2394				acked |= tcp_tso_acked(sk, skb,
2395						       now, &seq_rtt);
2396			break;
2397		}
2398
2399		/* Initial outgoing SYN's get put onto the write_queue
2400		 * just like anything else we transmit.  It is not
2401		 * true data, and if we misinform our callers that
2402		 * this ACK acks real data, we will erroneously exit
2403		 * connection startup slow start one packet too
2404		 * quickly.  This is severely frowned upon behavior.
2405		 */
2406		if (!(scb->flags & TCPCB_FLAG_SYN)) {
2407			acked |= FLAG_DATA_ACKED;
2408			++pkts_acked;
2409		} else {
2410			acked |= FLAG_SYN_ACKED;
2411			tp->retrans_stamp = 0;
2412		}
2413
2414		/* MTU probing checks */
2415		if (icsk->icsk_mtup.probe_size) {
2416			if (!after(tp->mtu_probe.probe_seq_end, TCP_SKB_CB(skb)->end_seq)) {
2417				tcp_mtup_probe_success(sk, skb);
2418			}
2419		}
2420
2421		if (sacked) {
2422			if (sacked & TCPCB_RETRANS) {
2423				if(sacked & TCPCB_SACKED_RETRANS)
2424					tp->retrans_out -= tcp_skb_pcount(skb);
2425				acked |= FLAG_RETRANS_DATA_ACKED;
2426				seq_rtt = -1;
2427			} else if (seq_rtt < 0) {
2428				seq_rtt = now - scb->when;
2429				skb_get_timestamp(skb, &tv);
2430			}
2431			if (sacked & TCPCB_SACKED_ACKED)
2432				tp->sacked_out -= tcp_skb_pcount(skb);
2433			if (sacked & TCPCB_LOST)
2434				tp->lost_out -= tcp_skb_pcount(skb);
2435			if (sacked & TCPCB_URG) {
2436				if (tp->urg_mode &&
2437				    !before(scb->end_seq, tp->snd_up))
2438					tp->urg_mode = 0;
2439			}
2440		} else if (seq_rtt < 0) {
2441			seq_rtt = now - scb->when;
2442			skb_get_timestamp(skb, &tv);
2443		}
2444		tcp_dec_pcount_approx(&tp->fackets_out, skb);
2445		tcp_packets_out_dec(tp, skb);
2446		__skb_unlink(skb, &sk->sk_write_queue);
2447		sk_stream_free_skb(sk, skb);
2448		clear_all_retrans_hints(tp);
2449	}
2450
2451	if (acked&FLAG_ACKED) {
2452		tcp_ack_update_rtt(sk, acked, seq_rtt);
2453		tcp_ack_packets_out(sk, tp);
2454		if (rtt_sample && !(acked & FLAG_RETRANS_DATA_ACKED))
2455			(*rtt_sample)(sk, tcp_usrtt(&tv));
2456
2457		if (icsk->icsk_ca_ops->pkts_acked)
2458			icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked);
2459	}
2460
2461#if FASTRETRANS_DEBUG > 0
2462	BUG_TRAP((int)tp->sacked_out >= 0);
2463	BUG_TRAP((int)tp->lost_out >= 0);
2464	BUG_TRAP((int)tp->retrans_out >= 0);
2465	if (!tp->packets_out && tp->rx_opt.sack_ok) {
2466		const struct inet_connection_sock *icsk = inet_csk(sk);
2467		if (tp->lost_out) {
2468			printk(KERN_DEBUG "Leak l=%u %d\n",
2469			       tp->lost_out, icsk->icsk_ca_state);
2470			tp->lost_out = 0;
2471		}
2472		if (tp->sacked_out) {
2473			printk(KERN_DEBUG "Leak s=%u %d\n",
2474			       tp->sacked_out, icsk->icsk_ca_state);
2475			tp->sacked_out = 0;
2476		}
2477		if (tp->retrans_out) {
2478			printk(KERN_DEBUG "Leak r=%u %d\n",
2479			       tp->retrans_out, icsk->icsk_ca_state);
2480			tp->retrans_out = 0;
2481		}
2482	}
2483#endif
2484	*seq_rtt_p = seq_rtt;
2485	return acked;
2486}
2487
2488static void tcp_ack_probe(struct sock *sk)
2489{
2490	const struct tcp_sock *tp = tcp_sk(sk);
2491	struct inet_connection_sock *icsk = inet_csk(sk);
2492
2493	/* Was it a usable window open? */
2494
2495	if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
2496		   tp->snd_una + tp->snd_wnd)) {
2497		icsk->icsk_backoff = 0;
2498		inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
2499		/* Socket must be waked up by subsequent tcp_data_snd_check().
2500		 * This function is not for random using!
2501		 */
2502	} else {
2503		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2504					  min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
2505					  TCP_RTO_MAX);
2506	}
2507}
2508
2509static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag)
2510{
2511	return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
2512		inet_csk(sk)->icsk_ca_state != TCP_CA_Open);
2513}
2514
2515static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
2516{
2517	const struct tcp_sock *tp = tcp_sk(sk);
2518	return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&
2519		!((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_Recovery | TCPF_CA_CWR));
2520}
2521
2522/* Check that window update is acceptable.
2523 * The function assumes that snd_una<=ack<=snd_next.
2524 */
2525static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack,
2526					const u32 ack_seq, const u32 nwin)
2527{
2528	return (after(ack, tp->snd_una) ||
2529		after(ack_seq, tp->snd_wl1) ||
2530		(ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd));
2531}
2532
2533/* Update our send window.
2534 *
2535 * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
2536 * and in FreeBSD. NetBSD's one is even worse.) is wrong.
2537 */
2538static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp,
2539				 struct sk_buff *skb, u32 ack, u32 ack_seq)
2540{
2541	int flag = 0;
2542	u32 nwin = ntohs(skb->h.th->window);
2543
2544	if (likely(!skb->h.th->syn))
2545		nwin <<= tp->rx_opt.snd_wscale;
2546
2547	if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
2548		flag |= FLAG_WIN_UPDATE;
2549		tcp_update_wl(tp, ack, ack_seq);
2550
2551		if (tp->snd_wnd != nwin) {
2552			tp->snd_wnd = nwin;
2553
2554			/* Note, it is the only place, where
2555			 * fast path is recovered for sending TCP.
2556			 */
2557			tp->pred_flags = 0;
2558			tcp_fast_path_check(sk, tp);
2559
2560			if (nwin > tp->max_window) {
2561				tp->max_window = nwin;
2562				tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie);
2563			}
2564		}
2565	}
2566
2567	tp->snd_una = ack;
2568
2569	return flag;
2570}
2571
2572/* A very conservative spurious RTO response algorithm: reduce cwnd and
2573 * continue in congestion avoidance.
2574 */
2575static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
2576{
2577	tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
2578	tp->snd_cwnd_cnt = 0;
2579	tcp_moderate_cwnd(tp);
2580}
2581
2582/* F-RTO spurious RTO detection algorithm (RFC4138)
2583 *
2584 * F-RTO affects during two new ACKs following RTO (well, almost, see inline
2585 * comments). State (ACK number) is kept in frto_counter. When ACK advances
2586 * window (but not to or beyond highest sequence sent before RTO):
2587 *   On First ACK,  send two new segments out.
2588 *   On Second ACK, RTO was likely spurious. Do spurious response (response
2589 *                  algorithm is not part of the F-RTO detection algorithm
2590 *                  given in RFC4138 but can be selected separately).
2591 * Otherwise (basically on duplicate ACK), RTO was (likely) caused by a loss
2592 * and TCP falls back to conventional RTO recovery.
2593 *
2594 * Rationale: if the RTO was spurious, new ACKs should arrive from the
2595 * original window even after we transmit two new data segments.
2596 *
2597 * SACK version:
2598 *   on first step, wait until first cumulative ACK arrives, then move to
2599 *   the second step. In second step, the next ACK decides.
2600 *
2601 * F-RTO is implemented (mainly) in four functions:
2602 *   - tcp_use_frto() is used to determine if TCP is can use F-RTO
2603 *   - tcp_enter_frto() prepares TCP state on RTO if F-RTO is used, it is
2604 *     called when tcp_use_frto() showed green light
2605 *   - tcp_process_frto() handles incoming ACKs during F-RTO algorithm
2606 *   - tcp_enter_frto_loss() is called if there is not enough evidence
2607 *     to prove that the RTO is indeed spurious. It transfers the control
2608 *     from F-RTO to the conventional RTO recovery
2609 */
2610static int tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag)
2611{
2612	struct tcp_sock *tp = tcp_sk(sk);
2613
2614	tcp_sync_left_out(tp);
2615
2616	/* Duplicate the behavior from Loss state (fastretrans_alert) */
2617	if (flag&FLAG_DATA_ACKED)
2618		inet_csk(sk)->icsk_retransmits = 0;
2619
2620	if (!before(tp->snd_una, tp->frto_highmark)) {
2621		tcp_enter_frto_loss(sk, tp->frto_counter + 1, flag);
2622		return 1;
2623	}
2624
2625	if (!IsSackFrto() || IsReno(tp)) {
2626		/* RFC4138 shortcoming in step 2; should also have case c):
2627		 * ACK isn't duplicate nor advances window, e.g., opposite dir
2628		 * data, winupdate
2629		 */
2630		if ((tp->snd_una == prior_snd_una) && (flag&FLAG_NOT_DUP) &&
2631		    !(flag&FLAG_FORWARD_PROGRESS))
2632			return 1;
2633
2634		if (!(flag&FLAG_DATA_ACKED)) {
2635			tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3),
2636					    flag);
2637			return 1;
2638		}
2639	} else {
2640		if (!(flag&FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
2641			/* Prevent sending of new data. */
2642			tp->snd_cwnd = min(tp->snd_cwnd,
2643					   tcp_packets_in_flight(tp));
2644			return 1;
2645		}
2646
2647		if ((tp->frto_counter == 2) &&
2648		    (!(flag&FLAG_FORWARD_PROGRESS) ||
2649		     ((flag&FLAG_DATA_SACKED) && !(flag&FLAG_ONLY_ORIG_SACKED)))) {
2650			/* RFC4138 shortcoming (see comment above) */
2651			if (!(flag&FLAG_FORWARD_PROGRESS) && (flag&FLAG_NOT_DUP))
2652				return 1;
2653
2654			tcp_enter_frto_loss(sk, 3, flag);
2655			return 1;
2656		}
2657	}
2658
2659	if (tp->frto_counter == 1) {
2660		tp->snd_cwnd = tcp_packets_in_flight(tp) + 2;
2661		tp->frto_counter = 2;
2662		return 1;
2663	} else /* frto_counter == 2 */ {
2664		tcp_conservative_spur_to_response(tp);
2665		tp->frto_counter = 0;
2666	}
2667	return 0;
2668}
2669
2670/* This routine deals with incoming acks, but not outgoing ones. */
2671static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2672{
2673	struct inet_connection_sock *icsk = inet_csk(sk);
2674	struct tcp_sock *tp = tcp_sk(sk);
2675	u32 prior_snd_una = tp->snd_una;
2676	u32 ack_seq = TCP_SKB_CB(skb)->seq;
2677	u32 ack = TCP_SKB_CB(skb)->ack_seq;
2678	u32 prior_in_flight;
2679	s32 seq_rtt;
2680	int prior_packets;
2681	int frto_cwnd = 0;
2682
2683	/* If the ack is newer than sent or older than previous acks
2684	 * then we can probably ignore it.
2685	 */
2686	if (after(ack, tp->snd_nxt))
2687		goto uninteresting_ack;
2688
2689	if (before(ack, prior_snd_una))
2690		goto old_ack;
2691
2692	if (sysctl_tcp_abc) {
2693		if (icsk->icsk_ca_state < TCP_CA_CWR)
2694			tp->bytes_acked += ack - prior_snd_una;
2695		else if (icsk->icsk_ca_state == TCP_CA_Loss)
2696			/* we assume just one segment left network */
2697			tp->bytes_acked += min(ack - prior_snd_una, tp->mss_cache);
2698	}
2699
2700	if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
2701		/* Window is constant, pure forward advance.
2702		 * No more checks are required.
2703		 * Note, we use the fact that SND.UNA>=SND.WL2.
2704		 */
2705		tcp_update_wl(tp, ack, ack_seq);
2706		tp->snd_una = ack;
2707		flag |= FLAG_WIN_UPDATE;
2708
2709		tcp_ca_event(sk, CA_EVENT_FAST_ACK);
2710
2711		NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS);
2712	} else {
2713		if (ack_seq != TCP_SKB_CB(skb)->end_seq)
2714			flag |= FLAG_DATA;
2715		else
2716			NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS);
2717
2718		flag |= tcp_ack_update_window(sk, tp, skb, ack, ack_seq);
2719
2720		if (TCP_SKB_CB(skb)->sacked)
2721			flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
2722
2723		if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th))
2724			flag |= FLAG_ECE;
2725
2726		tcp_ca_event(sk, CA_EVENT_SLOW_ACK);
2727	}
2728
2729	/* We passed data and got it acked, remove any soft error
2730	 * log. Something worked...
2731	 */
2732	sk->sk_err_soft = 0;
2733	tp->rcv_tstamp = tcp_time_stamp;
2734	prior_packets = tp->packets_out;
2735	if (!prior_packets)
2736		goto no_queue;
2737
2738	prior_in_flight = tcp_packets_in_flight(tp);
2739
2740	/* See if we can take anything off of the retransmit queue. */
2741	flag |= tcp_clean_rtx_queue(sk, &seq_rtt);
2742
2743	if (tp->frto_counter)
2744		frto_cwnd = tcp_process_frto(sk, prior_snd_una, flag);
2745
2746	if (tcp_ack_is_dubious(sk, flag)) {
2747		/* Advance CWND, if state allows this. */
2748		if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
2749		    tcp_may_raise_cwnd(sk, flag))
2750			tcp_cong_avoid(sk, ack,  seq_rtt, prior_in_flight, 0);
2751		tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
2752	} else {
2753		if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
2754			tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 1);
2755	}
2756
2757	if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP))
2758		dst_confirm(sk->sk_dst_cache);
2759
2760	return 1;
2761
2762no_queue:
2763	icsk->icsk_probes_out = 0;
2764
2765	/* If this ack opens up a zero window, clear backoff.  It was
2766	 * being used to time the probes, and is probably far higher than
2767	 * it needs to be for normal retransmission.
2768	 */
2769	if (sk->sk_send_head)
2770		tcp_ack_probe(sk);
2771	return 1;
2772
2773old_ack:
2774	if (TCP_SKB_CB(skb)->sacked)
2775		tcp_sacktag_write_queue(sk, skb, prior_snd_una);
2776
2777uninteresting_ack:
2778	SOCK_DEBUG(sk, "Ack %u out of %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
2779	return 0;
2780}
2781
2782
2783/* Look for tcp options. Normally only called on SYN and SYNACK packets.
2784 * But, this can also be called on packets in the established flow when
2785 * the fast version below fails.
2786 */
2787void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab)
2788{
2789	unsigned char *ptr;
2790	struct tcphdr *th = skb->h.th;
2791	int length=(th->doff*4)-sizeof(struct tcphdr);
2792
2793	ptr = (unsigned char *)(th + 1);
2794	opt_rx->saw_tstamp = 0;
2795
2796	while(length>0) {
2797		int opcode=*ptr++;
2798		int opsize;
2799
2800		switch (opcode) {
2801			case TCPOPT_EOL:
2802				return;
2803			case TCPOPT_NOP:	/* Ref: RFC 793 section 3.1 */
2804				length--;
2805				continue;
2806			default:
2807				opsize=*ptr++;
2808				if (opsize < 2) /* "silly options" */
2809					return;
2810				if (opsize > length)
2811					return;	/* don't parse partial options */
2812				switch(opcode) {
2813				case TCPOPT_MSS:
2814					if(opsize==TCPOLEN_MSS && th->syn && !estab) {
2815						u16 in_mss = ntohs(get_unaligned((__be16 *)ptr));
2816						if (in_mss) {
2817							if (opt_rx->user_mss && opt_rx->user_mss < in_mss)
2818								in_mss = opt_rx->user_mss;
2819							opt_rx->mss_clamp = in_mss;
2820						}
2821					}
2822					break;
2823				case TCPOPT_WINDOW:
2824					if(opsize==TCPOLEN_WINDOW && th->syn && !estab)
2825						if (sysctl_tcp_window_scaling) {
2826							__u8 snd_wscale = *(__u8 *) ptr;
2827							opt_rx->wscale_ok = 1;
2828							if (snd_wscale > 14) {
2829								if(net_ratelimit())
2830									printk(KERN_INFO "tcp_parse_options: Illegal window "
2831									       "scaling value %d >14 received.\n",
2832									       snd_wscale);
2833								snd_wscale = 14;
2834							}
2835							opt_rx->snd_wscale = snd_wscale;
2836						}
2837					break;
2838				case TCPOPT_TIMESTAMP:
2839					if(opsize==TCPOLEN_TIMESTAMP) {
2840						if ((estab && opt_rx->tstamp_ok) ||
2841						    (!estab && sysctl_tcp_timestamps)) {
2842							opt_rx->saw_tstamp = 1;
2843							opt_rx->rcv_tsval = ntohl(get_unaligned((__be32 *)ptr));
2844							opt_rx->rcv_tsecr = ntohl(get_unaligned((__be32 *)(ptr+4)));
2845						}
2846					}
2847					break;
2848				case TCPOPT_SACK_PERM:
2849					if(opsize==TCPOLEN_SACK_PERM && th->syn && !estab) {
2850						if (sysctl_tcp_sack) {
2851							opt_rx->sack_ok = 1;
2852							tcp_sack_reset(opt_rx);
2853						}
2854					}
2855					break;
2856
2857				case TCPOPT_SACK:
2858					if((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
2859					   !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) &&
2860					   opt_rx->sack_ok) {
2861						TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
2862					}
2863#ifdef CONFIG_TCP_MD5SIG
2864				case TCPOPT_MD5SIG:
2865					/*
2866					 * The MD5 Hash has already been
2867					 * checked (see tcp_v{4,6}_do_rcv()).
2868					 */
2869					break;
2870#endif
2871				};
2872				ptr+=opsize-2;
2873				length-=opsize;
2874		};
2875	}
2876}
2877
2878/* Fast parse options. This hopes to only see timestamps.
2879 * If it is wrong it falls back on tcp_parse_options().
2880 */
2881static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
2882				  struct tcp_sock *tp)
2883{
2884	if (th->doff == sizeof(struct tcphdr)>>2) {
2885		tp->rx_opt.saw_tstamp = 0;
2886		return 0;
2887	} else if (tp->rx_opt.tstamp_ok &&
2888		   th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) {
2889		__be32 *ptr = (__be32 *)(th + 1);
2890		if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
2891				  | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
2892			tp->rx_opt.saw_tstamp = 1;
2893			++ptr;
2894			tp->rx_opt.rcv_tsval = ntohl(*ptr);
2895			++ptr;
2896			tp->rx_opt.rcv_tsecr = ntohl(*ptr);
2897			return 1;
2898		}
2899	}
2900	tcp_parse_options(skb, &tp->rx_opt, 1);
2901	return 1;
2902}
2903
2904static inline void tcp_store_ts_recent(struct tcp_sock *tp)
2905{
2906	tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
2907	tp->rx_opt.ts_recent_stamp = xtime.tv_sec;
2908}
2909
2910static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
2911{
2912	if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
2913		/* PAWS bug workaround wrt. ACK frames, the PAWS discard
2914		 * extra check below makes sure this can only happen
2915		 * for pure ACK frames.  -DaveM
2916		 *
2917		 * Not only, also it occurs for expired timestamps.
2918		 */
2919
2920		if((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 ||
2921		   xtime.tv_sec >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS)
2922			tcp_store_ts_recent(tp);
2923	}
2924}
2925
2926/* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM
2927 *
2928 * It is not fatal. If this ACK does _not_ change critical state (seqs, window)
2929 * it can pass through stack. So, the following predicate verifies that
2930 * this segment is not used for anything but congestion avoidance or
2931 * fast retransmit. Moreover, we even are able to eliminate most of such
2932 * second order effects, if we apply some small "replay" window (~RTO)
2933 * to timestamp space.
2934 *
2935 * All these measures still do not guarantee that we reject wrapped ACKs
2936 * on networks with high bandwidth, when sequence space is recycled fastly,
2937 * but it guarantees that such events will be very rare and do not affect
2938 * connection seriously. This doesn't look nice, but alas, PAWS is really
2939 * buggy extension.
2940 *
2941 * [ Later note. Even worse! It is buggy for segments _with_ data. RFC
2942 * states that events when retransmit arrives after original data are rare.
2943 * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is
2944 * the biggest problem on large power networks even with minor reordering.
2945 * OK, let's give it small replay window. If peer clock is even 1hz, it is safe
2946 * up to bandwidth of 18Gigabit/sec. 8) ]
2947 */
2948
2949static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
2950{
2951	struct tcp_sock *tp = tcp_sk(sk);
2952	struct tcphdr *th = skb->h.th;
2953	u32 seq = TCP_SKB_CB(skb)->seq;
2954	u32 ack = TCP_SKB_CB(skb)->ack_seq;
2955
2956	return (/* 1. Pure ACK with correct sequence number. */
2957		(th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) &&
2958
2959		/* 2. ... and duplicate ACK. */
2960		ack == tp->snd_una &&
2961
2962		/* 3. ... and does not update window. */
2963		!tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) &&
2964
2965		/* 4. ... and sits in replay window. */
2966		(s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ);
2967}
2968
2969static inline int tcp_paws_discard(const struct sock *sk, const struct sk_buff *skb)
2970{
2971	const struct tcp_sock *tp = tcp_sk(sk);
2972	return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW &&
2973		xtime.tv_sec < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS &&
2974		!tcp_disordered_ack(sk, skb));
2975}
2976
2977/* Check segment sequence number for validity.
2978 *
2979 * Segment controls are considered valid, if the segment
2980 * fits to the window after truncation to the window. Acceptability
2981 * of data (and SYN, FIN, of course) is checked separately.
2982 * See tcp_data_queue(), for example.
2983 *
2984 * Also, controls (RST is main one) are accepted using RCV.WUP instead
2985 * of RCV.NXT. Peer still did not advance his SND.UNA when we
2986 * delayed ACK, so that hisSND.UNA<=ourRCV.WUP.
2987 * (borrowed from freebsd)
2988 */
2989
2990static inline int tcp_sequence(struct tcp_sock *tp, u32 seq, u32 end_seq)
2991{
2992	return	!before(end_seq, tp->rcv_wup) &&
2993		!after(seq, tp->rcv_nxt + tcp_receive_window(tp));
2994}
2995
2996/* When we get a reset we do this. */
2997static void tcp_reset(struct sock *sk)
2998{
2999	/* We want the right error as BSD sees it (and indeed as we do). */
3000	switch (sk->sk_state) {
3001		case TCP_SYN_SENT:
3002			sk->sk_err = ECONNREFUSED;
3003			break;
3004		case TCP_CLOSE_WAIT:
3005			sk->sk_err = EPIPE;
3006			break;
3007		case TCP_CLOSE:
3008			return;
3009		default:
3010			sk->sk_err = ECONNRESET;
3011	}
3012
3013	if (!sock_flag(sk, SOCK_DEAD))
3014		sk->sk_error_report(sk);
3015
3016	tcp_done(sk);
3017}
3018
3019/*
3020 * 	Process the FIN bit. This now behaves as it is supposed to work
3021 *	and the FIN takes effect when it is validly part of sequence
3022 *	space. Not before when we get holes.
3023 *
3024 *	If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT
3025 *	(and thence onto LAST-ACK and finally, CLOSE, we never enter
3026 *	TIME-WAIT)
3027 *
3028 *	If we are in FINWAIT-1, a received FIN indicates simultaneous
3029 *	close and we go into CLOSING (and later onto TIME-WAIT)
3030 *
3031 *	If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT.
3032 */
3033static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
3034{
3035	struct tcp_sock *tp = tcp_sk(sk);
3036
3037	inet_csk_schedule_ack(sk);
3038
3039	sk->sk_shutdown |= RCV_SHUTDOWN;
3040	sock_set_flag(sk, SOCK_DONE);
3041
3042	switch (sk->sk_state) {
3043		case TCP_SYN_RECV:
3044		case TCP_ESTABLISHED:
3045			/* Move to CLOSE_WAIT */
3046			tcp_set_state(sk, TCP_CLOSE_WAIT);
3047			inet_csk(sk)->icsk_ack.pingpong = 1;
3048			break;
3049
3050		case TCP_CLOSE_WAIT:
3051		case TCP_CLOSING:
3052			/* Received a retransmission of the FIN, do
3053			 * nothing.
3054			 */
3055			break;
3056		case TCP_LAST_ACK:
3057			/* RFC793: Remain in the LAST-ACK state. */
3058			break;
3059
3060		case TCP_FIN_WAIT1:
3061			/* This case occurs when a simultaneous close
3062			 * happens, we must ack the received FIN and
3063			 * enter the CLOSING state.
3064			 */
3065			tcp_send_ack(sk);
3066			tcp_set_state(sk, TCP_CLOSING);
3067			break;
3068		case TCP_FIN_WAIT2:
3069			/* Received a FIN -- send ACK and enter TIME_WAIT. */
3070			tcp_send_ack(sk);
3071			tcp_time_wait(sk, TCP_TIME_WAIT, 0);
3072			break;
3073		default:
3074			/* Only TCP_LISTEN and TCP_CLOSE are left, in these
3075			 * cases we should never reach this piece of code.
3076			 */
3077			printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n",
3078			       __FUNCTION__, sk->sk_state);
3079			break;
3080	};
3081
3082	/* It _is_ possible, that we have something out-of-order _after_ FIN.
3083	 * Probably, we should reset in this case. For now drop them.
3084	 */
3085	__skb_queue_purge(&tp->out_of_order_queue);
3086	if (tp->rx_opt.sack_ok)
3087		tcp_sack_reset(&tp->rx_opt);
3088	sk_stream_mem_reclaim(sk);
3089
3090	if (!sock_flag(sk, SOCK_DEAD)) {
3091		sk->sk_state_change(sk);
3092
3093		/* Do not send POLL_HUP for half duplex close. */
3094		if (sk->sk_shutdown == SHUTDOWN_MASK ||
3095		    sk->sk_state == TCP_CLOSE)
3096			sk_wake_async(sk, 1, POLL_HUP);
3097		else
3098			sk_wake_async(sk, 1, POLL_IN);
3099	}
3100}
3101
3102static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq)
3103{
3104	if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {
3105		if (before(seq, sp->start_seq))
3106			sp->start_seq = seq;
3107		if (after(end_seq, sp->end_seq))
3108			sp->end_seq = end_seq;
3109		return 1;
3110	}
3111	return 0;
3112}
3113
3114static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq)
3115{
3116	if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) {
3117		if (before(seq, tp->rcv_nxt))
3118			NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT);
3119		else
3120			NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT);
3121
3122		tp->rx_opt.dsack = 1;
3123		tp->duplicate_sack[0].start_seq = seq;
3124		tp->duplicate_sack[0].end_seq = end_seq;
3125		tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 1, 4 - tp->rx_opt.tstamp_ok);
3126	}
3127}
3128
3129static void tcp_dsack_extend(struct tcp_sock *tp, u32 seq, u32 end_seq)
3130{
3131	if (!tp->rx_opt.dsack)
3132		tcp_dsack_set(tp, seq, end_seq);
3133	else
3134		tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
3135}
3136
3137static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
3138{
3139	struct tcp_sock *tp = tcp_sk(sk);
3140
3141	if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
3142	    before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
3143		NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
3144		tcp_enter_quickack_mode(sk);
3145
3146		if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) {
3147			u32 end_seq = TCP_SKB_CB(skb)->end_seq;
3148
3149			if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
3150				end_seq = tp->rcv_nxt;
3151			tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, end_seq);
3152		}
3153	}
3154
3155	tcp_send_ack(sk);
3156}
3157
3158/* These routines update the SACK block as out-of-order packets arrive or
3159 * in-order packets close up the sequence space.
3160 */
3161static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
3162{
3163	int this_sack;
3164	struct tcp_sack_block *sp = &tp->selective_acks[0];
3165	struct tcp_sack_block *swalk = sp+1;
3166
3167	/* See if the recent change to the first SACK eats into
3168	 * or hits the sequence space of other SACK blocks, if so coalesce.
3169	 */
3170	for (this_sack = 1; this_sack < tp->rx_opt.num_sacks; ) {
3171		if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) {
3172			int i;
3173
3174			/* Zap SWALK, by moving every further SACK up by one slot.
3175			 * Decrease num_sacks.
3176			 */
3177			tp->rx_opt.num_sacks--;
3178			tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
3179			for(i=this_sack; i < tp->rx_opt.num_sacks; i++)
3180				sp[i] = sp[i+1];
3181			continue;
3182		}
3183		this_sack++, swalk++;
3184	}
3185}
3186
3187static inline void tcp_sack_swap(struct tcp_sack_block *sack1, struct tcp_sack_block *sack2)
3188{
3189	__u32 tmp;
3190
3191	tmp = sack1->start_seq;
3192	sack1->start_seq = sack2->start_seq;
3193	sack2->start_seq = tmp;
3194
3195	tmp = sack1->end_seq;
3196	sack1->end_seq = sack2->end_seq;
3197	sack2->end_seq = tmp;
3198}
3199
3200static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
3201{
3202	struct tcp_sock *tp = tcp_sk(sk);
3203	struct tcp_sack_block *sp = &tp->selective_acks[0];
3204	int cur_sacks = tp->rx_opt.num_sacks;
3205	int this_sack;
3206
3207	if (!cur_sacks)
3208		goto new_sack;
3209
3210	for (this_sack=0; this_sack<cur_sacks; this_sack++, sp++) {
3211		if (tcp_sack_extend(sp, seq, end_seq)) {
3212			/* Rotate this_sack to the first one. */
3213			for (; this_sack>0; this_sack--, sp--)
3214				tcp_sack_swap(sp, sp-1);
3215			if (cur_sacks > 1)
3216				tcp_sack_maybe_coalesce(tp);
3217			return;
3218		}
3219	}
3220
3221	/* Could not find an adjacent existing SACK, build a new one,
3222	 * put it at the front, and shift everyone else down.  We
3223	 * always know there is at least one SACK present already here.
3224	 *
3225	 * If the sack array is full, forget about the last one.
3226	 */
3227	if (this_sack >= 4) {
3228		this_sack--;
3229		tp->rx_opt.num_sacks--;
3230		sp--;
3231	}
3232	for(; this_sack > 0; this_sack--, sp--)
3233		*sp = *(sp-1);
3234
3235new_sack:
3236	/* Build the new head SACK, and we're done. */
3237	sp->start_seq = seq;
3238	sp->end_seq = end_seq;
3239	tp->rx_opt.num_sacks++;
3240	tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
3241}
3242
3243/* RCV.NXT advances, some SACKs should be eaten. */
3244
3245static void tcp_sack_remove(struct tcp_sock *tp)
3246{
3247	struct tcp_sack_block *sp = &tp->selective_acks[0];
3248	int num_sacks = tp->rx_opt.num_sacks;
3249	int this_sack;
3250
3251	/* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
3252	if (skb_queue_empty(&tp->out_of_order_queue)) {
3253		tp->rx_opt.num_sacks = 0;
3254		tp->rx_opt.eff_sacks = tp->rx_opt.dsack;
3255		return;
3256	}
3257
3258	for(this_sack = 0; this_sack < num_sacks; ) {
3259		/* Check if the start of the sack is covered by RCV.NXT. */
3260		if (!before(tp->rcv_nxt, sp->start_seq)) {
3261			int i;
3262
3263			/* RCV.NXT must cover all the block! */
3264			BUG_TRAP(!before(tp->rcv_nxt, sp->end_seq));
3265
3266			/* Zap this SACK, by moving forward any other SACKS. */
3267			for (i=this_sack+1; i < num_sacks; i++)
3268				tp->selective_acks[i-1] = tp->selective_acks[i];
3269			num_sacks--;
3270			continue;
3271		}
3272		this_sack++;
3273		sp++;
3274	}
3275	if (num_sacks != tp->rx_opt.num_sacks) {
3276		tp->rx_opt.num_sacks = num_sacks;
3277		tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
3278	}
3279}
3280
3281/* This one checks to see if we can put data from the
3282 * out_of_order queue into the receive_queue.
3283 */
3284static void tcp_ofo_queue(struct sock *sk)
3285{
3286	struct tcp_sock *tp = tcp_sk(sk);
3287	__u32 dsack_high = tp->rcv_nxt;
3288	struct sk_buff *skb;
3289
3290	while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) {
3291		if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
3292			break;
3293
3294		if (before(TCP_SKB_CB(skb)->seq, dsack_high)) {
3295			__u32 dsack = dsack_high;
3296			if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
3297				dsack_high = TCP_SKB_CB(skb)->end_seq;
3298			tcp_dsack_extend(tp, TCP_SKB_CB(skb)->seq, dsack);
3299		}
3300
3301		if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
3302			SOCK_DEBUG(sk, "ofo packet was already received \n");
3303			__skb_unlink(skb, &tp->out_of_order_queue);
3304			__kfree_skb(skb);
3305			continue;
3306		}
3307		SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n",
3308			   tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
3309			   TCP_SKB_CB(skb)->end_seq);
3310
3311		__skb_unlink(skb, &tp->out_of_order_queue);
3312		__skb_queue_tail(&sk->sk_receive_queue, skb);
3313		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
3314		if(skb->h.th->fin)
3315			tcp_fin(skb, sk, skb->h.th);
3316	}
3317}
3318
3319static int tcp_prune_queue(struct sock *sk);
3320
3321static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
3322{
3323	struct tcphdr *th = skb->h.th;
3324	struct tcp_sock *tp = tcp_sk(sk);
3325	int eaten = -1;
3326
3327	if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
3328		goto drop;
3329
3330	__skb_pull(skb, th->doff*4);
3331
3332	TCP_ECN_accept_cwr(tp, skb);
3333
3334	if (tp->rx_opt.dsack) {
3335		tp->rx_opt.dsack = 0;
3336		tp->rx_opt.eff_sacks = min_t(unsigned int, tp->rx_opt.num_sacks,
3337						    4 - tp->rx_opt.tstamp_ok);
3338	}
3339
3340	/*  Queue data for delivery to the user.
3341	 *  Packets in sequence go to the receive queue.
3342	 *  Out of sequence packets to the out_of_order_queue.
3343	 */
3344	if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
3345		if (tcp_receive_window(tp) == 0)
3346			goto out_of_window;
3347
3348		/* Ok. In sequence. In window. */
3349		if (tp->ucopy.task == current &&
3350		    tp->copied_seq == tp->rcv_nxt && tp->ucopy.len &&
3351		    sock_owned_by_user(sk) && !tp->urg_data) {
3352			int chunk = min_t(unsigned int, skb->len,
3353							tp->ucopy.len);
3354
3355			__set_current_state(TASK_RUNNING);
3356
3357			local_bh_enable();
3358			if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) {
3359				tp->ucopy.len -= chunk;
3360				tp->copied_seq += chunk;
3361				eaten = (chunk == skb->len && !th->fin);
3362				tcp_rcv_space_adjust(sk);
3363			}
3364			local_bh_disable();
3365		}
3366
3367		if (eaten <= 0) {
3368queue_and_out:
3369			if (eaten < 0 &&
3370			    (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
3371			     !sk_stream_rmem_schedule(sk, skb))) {
3372				if (tcp_prune_queue(sk) < 0 ||
3373				    !sk_stream_rmem_schedule(sk, skb))
3374					goto drop;
3375			}
3376			sk_stream_set_owner_r(skb, sk);
3377			__skb_queue_tail(&sk->sk_receive_queue, skb);
3378		}
3379		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
3380		if(skb->len)
3381			tcp_event_data_recv(sk, tp, skb);
3382		if(th->fin)
3383			tcp_fin(skb, sk, th);
3384
3385		if (!skb_queue_empty(&tp->out_of_order_queue)) {
3386			tcp_ofo_queue(sk);
3387
3388			/* RFC2581. 4.2. SHOULD send immediate ACK, when
3389			 * gap in queue is filled.
3390			 */
3391			if (skb_queue_empty(&tp->out_of_order_queue))
3392				inet_csk(sk)->icsk_ack.pingpong = 0;
3393		}
3394
3395		if (tp->rx_opt.num_sacks)
3396			tcp_sack_remove(tp);
3397
3398		tcp_fast_path_check(sk, tp);
3399
3400		if (eaten > 0)
3401			__kfree_skb(skb);
3402		else if (!sock_flag(sk, SOCK_DEAD))
3403			sk->sk_data_ready(sk, 0);
3404		return;
3405	}
3406
3407	if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
3408		/* A retransmit, 2nd most common case.  Force an immediate ack. */
3409		NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
3410		tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
3411
3412out_of_window:
3413		tcp_enter_quickack_mode(sk);
3414		inet_csk_schedule_ack(sk);
3415drop:
3416		__kfree_skb(skb);
3417		return;
3418	}
3419
3420	/* Out of window. F.e. zero window probe. */
3421	if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp)))
3422		goto out_of_window;
3423
3424	tcp_enter_quickack_mode(sk);
3425
3426	if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
3427		/* Partial packet, seq < rcv_next < end_seq */
3428		SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n",
3429			   tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
3430			   TCP_SKB_CB(skb)->end_seq);
3431
3432		tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
3433
3434		/* If window is closed, drop tail of packet. But after
3435		 * remembering D-SACK for its head made in previous line.
3436		 */
3437		if (!tcp_receive_window(tp))
3438			goto out_of_window;
3439		goto queue_and_out;
3440	}
3441
3442	TCP_ECN_check_ce(tp, skb);
3443
3444	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
3445	    !sk_stream_rmem_schedule(sk, skb)) {
3446		if (tcp_prune_queue(sk) < 0 ||
3447		    !sk_stream_rmem_schedule(sk, skb))
3448			goto drop;
3449	}
3450
3451	/* Disable header prediction. */
3452	tp->pred_flags = 0;
3453	inet_csk_schedule_ack(sk);
3454
3455	SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
3456		   tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
3457
3458	sk_stream_set_owner_r(skb, sk);
3459
3460	if (!skb_peek(&tp->out_of_order_queue)) {
3461		/* Initial out of order segment, build 1 SACK. */
3462		if (tp->rx_opt.sack_ok) {
3463			tp->rx_opt.num_sacks = 1;
3464			tp->rx_opt.dsack     = 0;
3465			tp->rx_opt.eff_sacks = 1;
3466			tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
3467			tp->selective_acks[0].end_seq =
3468						TCP_SKB_CB(skb)->end_seq;
3469		}
3470		__skb_queue_head(&tp->out_of_order_queue,skb);
3471	} else {
3472		struct sk_buff *skb1 = tp->out_of_order_queue.prev;
3473		u32 seq = TCP_SKB_CB(skb)->seq;
3474		u32 end_seq = TCP_SKB_CB(skb)->end_seq;
3475
3476		if (seq == TCP_SKB_CB(skb1)->end_seq) {
3477			__skb_append(skb1, skb, &tp->out_of_order_queue);
3478
3479			if (!tp->rx_opt.num_sacks ||
3480			    tp->selective_acks[0].end_seq != seq)
3481				goto add_sack;
3482
3483			/* Common case: data arrive in order after hole. */
3484			tp->selective_acks[0].end_seq = end_seq;
3485			return;
3486		}
3487
3488		/* Find place to insert this segment. */
3489		do {
3490			if (!after(TCP_SKB_CB(skb1)->seq, seq))
3491				break;
3492		} while ((skb1 = skb1->prev) !=
3493			 (struct sk_buff*)&tp->out_of_order_queue);
3494
3495		/* Do skb overlap to previous one? */
3496		if (skb1 != (struct sk_buff*)&tp->out_of_order_queue &&
3497		    before(seq, TCP_SKB_CB(skb1)->end_seq)) {
3498			if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
3499				/* All the bits are present. Drop. */
3500				__kfree_skb(skb);
3501				tcp_dsack_set(tp, seq, end_seq);
3502				goto add_sack;
3503			}
3504			if (after(seq, TCP_SKB_CB(skb1)->seq)) {
3505				/* Partial overlap. */
3506				tcp_dsack_set(tp, seq, TCP_SKB_CB(skb1)->end_seq);
3507			} else {
3508				skb1 = skb1->prev;
3509			}
3510		}
3511		__skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue);
3512
3513		/* And clean segments covered by new one as whole. */
3514		while ((skb1 = skb->next) !=
3515		       (struct sk_buff*)&tp->out_of_order_queue &&
3516		       after(end_seq, TCP_SKB_CB(skb1)->seq)) {
3517		       if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
3518			       tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq);
3519			       break;
3520		       }
3521		       __skb_unlink(skb1, &tp->out_of_order_queue);
3522		       tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq);
3523		       __kfree_skb(skb1);
3524		}
3525
3526add_sack:
3527		if (tp->rx_opt.sack_ok)
3528			tcp_sack_new_ofo_skb(sk, seq, end_seq);
3529	}
3530}
3531
3532/* Collapse contiguous sequence of skbs head..tail with
3533 * sequence numbers start..end.
3534 * Segments with FIN/SYN are not collapsed (only because this
3535 * simplifies code)
3536 */
3537static void
3538tcp_collapse(struct sock *sk, struct sk_buff_head *list,
3539	     struct sk_buff *head, struct sk_buff *tail,
3540	     u32 start, u32 end)
3541{
3542	struct sk_buff *skb;
3543
3544	/* First, check that queue is collapsible and find
3545	 * the point where collapsing can be useful. */
3546	for (skb = head; skb != tail; ) {
3547		/* No new bits? It is possible on ofo queue. */
3548		if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
3549			struct sk_buff *next = skb->next;
3550			__skb_unlink(skb, list);
3551			__kfree_skb(skb);
3552			NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
3553			skb = next;
3554			continue;
3555		}
3556
3557		/* The first skb to collapse is:
3558		 * - not SYN/FIN and
3559		 * - bloated or contains data before "start" or
3560		 *   overlaps to the next one.
3561		 */
3562		if (!skb->h.th->syn && !skb->h.th->fin &&
3563		    (tcp_win_from_space(skb->truesize) > skb->len ||
3564		     before(TCP_SKB_CB(skb)->seq, start) ||
3565		     (skb->next != tail &&
3566		      TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb->next)->seq)))
3567			break;
3568
3569		/* Decided to skip this, advance start seq. */
3570		start = TCP_SKB_CB(skb)->end_seq;
3571		skb = skb->next;
3572	}
3573	if (skb == tail || skb->h.th->syn || skb->h.th->fin)
3574		return;
3575
3576	while (before(start, end)) {
3577		struct sk_buff *nskb;
3578		int header = skb_headroom(skb);
3579		int copy = SKB_MAX_ORDER(header, 0);
3580
3581		/* Too big header? This can happen with IPv6. */
3582		if (copy < 0)
3583			return;
3584		if (end-start < copy)
3585			copy = end-start;
3586		nskb = alloc_skb(copy+header, GFP_ATOMIC);
3587		if (!nskb)
3588			return;
3589		skb_reserve(nskb, header);
3590		memcpy(nskb->head, skb->head, header);
3591		nskb->nh.raw = nskb->head + (skb->nh.raw-skb->head);
3592		nskb->h.raw = nskb->head + (skb->h.raw-skb->head);
3593		nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head);
3594		memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
3595		TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
3596		__skb_insert(nskb, skb->prev, skb, list);
3597		sk_stream_set_owner_r(nskb, sk);
3598
3599		/* Copy data, releasing collapsed skbs. */
3600		while (copy > 0) {
3601			int offset = start - TCP_SKB_CB(skb)->seq;
3602			int size = TCP_SKB_CB(skb)->end_seq - start;
3603
3604			BUG_ON(offset < 0);
3605			if (size > 0) {
3606				size = min(copy, size);
3607				if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
3608					BUG();
3609				TCP_SKB_CB(nskb)->end_seq += size;
3610				copy -= size;
3611				start += size;
3612			}
3613			if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
3614				struct sk_buff *next = skb->next;
3615				__skb_unlink(skb, list);
3616				__kfree_skb(skb);
3617				NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
3618				skb = next;
3619				if (skb == tail || skb->h.th->syn || skb->h.th->fin)
3620					return;
3621			}
3622		}
3623	}
3624}
3625
3626/* Collapse ofo queue. Algorithm: select contiguous sequence of skbs
3627 * and tcp_collapse() them until all the queue is collapsed.
3628 */
3629static void tcp_collapse_ofo_queue(struct sock *sk)
3630{
3631	struct tcp_sock *tp = tcp_sk(sk);
3632	struct sk_buff *skb = skb_peek(&tp->out_of_order_queue);
3633	struct sk_buff *head;
3634	u32 start, end;
3635
3636	if (skb == NULL)
3637		return;
3638
3639	start = TCP_SKB_CB(skb)->seq;
3640	end = TCP_SKB_CB(skb)->end_seq;
3641	head = skb;
3642
3643	for (;;) {
3644		skb = skb->next;
3645
3646		/* Segment is terminated when we see gap or when
3647		 * we are at the end of all the queue. */
3648		if (skb == (struct sk_buff *)&tp->out_of_order_queue ||
3649		    after(TCP_SKB_CB(skb)->seq, end) ||
3650		    before(TCP_SKB_CB(skb)->end_seq, start)) {
3651			tcp_collapse(sk, &tp->out_of_order_queue,
3652				     head, skb, start, end);
3653			head = skb;
3654			if (skb == (struct sk_buff *)&tp->out_of_order_queue)
3655				break;
3656			/* Start new segment */
3657			start = TCP_SKB_CB(skb)->seq;
3658			end = TCP_SKB_CB(skb)->end_seq;
3659		} else {
3660			if (before(TCP_SKB_CB(skb)->seq, start))
3661				start = TCP_SKB_CB(skb)->seq;
3662			if (after(TCP_SKB_CB(skb)->end_seq, end))
3663				end = TCP_SKB_CB(skb)->end_seq;
3664		}
3665	}
3666}
3667
3668/* Reduce allocated memory if we can, trying to get
3669 * the socket within its memory limits again.
3670 *
3671 * Return less than zero if we should start dropping frames
3672 * until the socket owning process reads some of the data
3673 * to stabilize the situation.
3674 */
3675static int tcp_prune_queue(struct sock *sk)
3676{
3677	struct tcp_sock *tp = tcp_sk(sk);
3678
3679	SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
3680
3681	NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED);
3682
3683	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
3684		tcp_clamp_window(sk, tp);
3685	else if (tcp_memory_pressure)
3686		tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
3687
3688	tcp_collapse_ofo_queue(sk);
3689	tcp_collapse(sk, &sk->sk_receive_queue,
3690		     sk->sk_receive_queue.next,
3691		     (struct sk_buff*)&sk->sk_receive_queue,
3692		     tp->copied_seq, tp->rcv_nxt);
3693	sk_stream_mem_reclaim(sk);
3694
3695	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
3696		return 0;
3697
3698	/* Collapsing did not help, destructive actions follow.
3699	 * This must not ever occur. */
3700
3701	/* First, purge the out_of_order queue. */
3702	if (!skb_queue_empty(&tp->out_of_order_queue)) {
3703		NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED);
3704		__skb_queue_purge(&tp->out_of_order_queue);
3705
3706		/* Reset SACK state.  A conforming SACK implementation will
3707		 * do the same at a timeout based retransmit.  When a connection
3708		 * is in a sad state like this, we care only about integrity
3709		 * of the connection not performance.
3710		 */
3711		if (tp->rx_opt.sack_ok)
3712			tcp_sack_reset(&tp->rx_opt);
3713		sk_stream_mem_reclaim(sk);
3714	}
3715
3716	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
3717		return 0;
3718
3719	/* If we are really being abused, tell the caller to silently
3720	 * drop receive data on the floor.  It will get retransmitted
3721	 * and hopefully then we'll have sufficient space.
3722	 */
3723	NET_INC_STATS_BH(LINUX_MIB_RCVPRUNED);
3724
3725	/* Massive buffer overcommit. */
3726	tp->pred_flags = 0;
3727	return -1;
3728}
3729
3730
3731/* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
3732 * As additional protections, we do not touch cwnd in retransmission phases,
3733 * and if application hit its sndbuf limit recently.
3734 */
3735void tcp_cwnd_application_limited(struct sock *sk)
3736{
3737	struct tcp_sock *tp = tcp_sk(sk);
3738
3739	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
3740	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
3741		/* Limited by application or receiver window. */
3742		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
3743		u32 win_used = max(tp->snd_cwnd_used, init_win);
3744		if (win_used < tp->snd_cwnd) {
3745			tp->snd_ssthresh = tcp_current_ssthresh(sk);
3746			tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
3747		}
3748		tp->snd_cwnd_used = 0;
3749	}
3750	tp->snd_cwnd_stamp = tcp_time_stamp;
3751}
3752
3753static int tcp_should_expand_sndbuf(struct sock *sk, struct tcp_sock *tp)
3754{
3755	/* If the user specified a specific send buffer setting, do
3756	 * not modify it.
3757	 */
3758	if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
3759		return 0;
3760
3761	/* If we are under global TCP memory pressure, do not expand.  */
3762	if (tcp_memory_pressure)
3763		return 0;
3764
3765	/* If we are under soft global TCP memory pressure, do not expand.  */
3766	if (atomic_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
3767		return 0;
3768
3769	/* If we filled the congestion window, do not expand.  */
3770	if (tp->packets_out >= tp->snd_cwnd)
3771		return 0;
3772
3773	return 1;
3774}
3775
3776/* When incoming ACK allowed to free some skb from write_queue,
3777 * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket
3778 * on the exit from tcp input handler.
3779 *
3780 * PROBLEM: sndbuf expansion does not work well with largesend.
3781 */
3782static void tcp_new_space(struct sock *sk)
3783{
3784	struct tcp_sock *tp = tcp_sk(sk);
3785
3786	if (tcp_should_expand_sndbuf(sk, tp)) {
3787		int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
3788			MAX_TCP_HEADER + 16 + sizeof(struct sk_buff),
3789		    demanded = max_t(unsigned int, tp->snd_cwnd,
3790						   tp->reordering + 1);
3791		sndmem *= 2*demanded;
3792		if (sndmem > sk->sk_sndbuf)
3793			sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
3794		tp->snd_cwnd_stamp = tcp_time_stamp;
3795	}
3796
3797	sk->sk_write_space(sk);
3798}
3799
3800static void tcp_check_space(struct sock *sk)
3801{
3802	if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
3803		sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
3804		if (sk->sk_socket &&
3805		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
3806			tcp_new_space(sk);
3807	}
3808}
3809
3810static inline void tcp_data_snd_check(struct sock *sk, struct tcp_sock *tp)
3811{
3812	tcp_push_pending_frames(sk, tp);
3813	tcp_check_space(sk);
3814}
3815
3816/*
3817 * Check if sending an ack is needed.
3818 */
3819static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
3820{
3821	struct tcp_sock *tp = tcp_sk(sk);
3822
3823	    /* More than one full frame received... */
3824	if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss
3825	     /* ... and right edge of window advances far enough.
3826	      * (tcp_recvmsg() will send ACK otherwise). Or...
3827	      */
3828	     && __tcp_select_window(sk) >= tp->rcv_wnd) ||
3829	    /* We ACK each frame or... */
3830	    tcp_in_quickack_mode(sk) ||
3831	    /* We have out of order data. */
3832	    (ofo_possible &&
3833	     skb_peek(&tp->out_of_order_queue))) {
3834		/* Then ack it now */
3835		tcp_send_ack(sk);
3836	} else {
3837		/* Else, send delayed ack. */
3838		tcp_send_delayed_ack(sk);
3839	}
3840}
3841
3842static inline void tcp_ack_snd_check(struct sock *sk)
3843{
3844	if (!inet_csk_ack_scheduled(sk)) {
3845		/* We sent a data segment already. */
3846		return;
3847	}
3848	__tcp_ack_snd_check(sk, 1);
3849}
3850
3851/*
3852 *	This routine is only called when we have urgent data
3853 *	signaled. Its the 'slow' part of tcp_urg. It could be
3854 *	moved inline now as tcp_urg is only called from one
3855 *	place. We handle URGent data wrong. We have to - as
3856 *	BSD still doesn't use the correction from RFC961.
3857 *	For 1003.1g we should support a new option TCP_STDURG to permit
3858 *	either form (or just set the sysctl tcp_stdurg).
3859 */
3860
3861static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
3862{
3863	struct tcp_sock *tp = tcp_sk(sk);
3864	u32 ptr = ntohs(th->urg_ptr);
3865
3866	if (ptr && !sysctl_tcp_stdurg)
3867		ptr--;
3868	ptr += ntohl(th->seq);
3869
3870	/* Ignore urgent data that we've already seen and read. */
3871	if (after(tp->copied_seq, ptr))
3872		return;
3873
3874	/* Do not replay urg ptr.
3875	 *
3876	 * NOTE: interesting situation not covered by specs.
3877	 * Misbehaving sender may send urg ptr, pointing to segment,
3878	 * which we already have in ofo queue. We are not able to fetch
3879	 * such data and will stay in TCP_URG_NOTYET until will be eaten
3880	 * by recvmsg(). Seems, we are not obliged to handle such wicked
3881	 * situations. But it is worth to think about possibility of some
3882	 * DoSes using some hypothetical application level deadlock.
3883	 */
3884	if (before(ptr, tp->rcv_nxt))
3885		return;
3886
3887	/* Do we already have a newer (or duplicate) urgent pointer? */
3888	if (tp->urg_data && !after(ptr, tp->urg_seq))
3889		return;
3890
3891	/* Tell the world about our new urgent pointer. */
3892	sk_send_sigurg(sk);
3893
3894	/* We may be adding urgent data when the last byte read was
3895	 * urgent. To do this requires some care. We cannot just ignore
3896	 * tp->copied_seq since we would read the last urgent byte again
3897	 * as data, nor can we alter copied_seq until this data arrives
3898	 * or we break the semantics of SIOCATMARK (and thus sockatmark())
3899	 *
3900	 * NOTE. Double Dutch. Rendering to plain English: author of comment
3901	 * above did something sort of 	send("A", MSG_OOB); send("B", MSG_OOB);
3902	 * and expect that both A and B disappear from stream. This is _wrong_.
3903	 * Though this happens in BSD with high probability, this is occasional.
3904	 * Any application relying on this is buggy. Note also, that fix "works"
3905	 * only in this artificial test. Insert some normal data between A and B and we will
3906	 * decline of BSD again. Verdict: it is better to remove to trap
3907	 * buggy users.
3908	 */
3909	if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
3910	    !sock_flag(sk, SOCK_URGINLINE) &&
3911	    tp->copied_seq != tp->rcv_nxt) {
3912		struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
3913		tp->copied_seq++;
3914		if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
3915			__skb_unlink(skb, &sk->sk_receive_queue);
3916			__kfree_skb(skb);
3917		}
3918	}
3919
3920	tp->urg_data   = TCP_URG_NOTYET;
3921	tp->urg_seq    = ptr;
3922
3923	/* Disable header prediction. */
3924	tp->pred_flags = 0;
3925}
3926
3927/* This is the 'fast' part of urgent handling. */
3928static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th)
3929{
3930	struct tcp_sock *tp = tcp_sk(sk);
3931
3932	/* Check if we get a new urgent pointer - normally not. */
3933	if (th->urg)
3934		tcp_check_urg(sk,th);
3935
3936	/* Do we wait for any urgent data? - normally not... */
3937	if (tp->urg_data == TCP_URG_NOTYET) {
3938		u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) -
3939			  th->syn;
3940
3941		/* Is the urgent pointer pointing into this packet? */
3942		if (ptr < skb->len) {
3943			u8 tmp;
3944			if (skb_copy_bits(skb, ptr, &tmp, 1))
3945				BUG();
3946			tp->urg_data = TCP_URG_VALID | tmp;
3947			if (!sock_flag(sk, SOCK_DEAD))
3948				sk->sk_data_ready(sk, 0);
3949		}
3950	}
3951}
3952
3953static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
3954{
3955	struct tcp_sock *tp = tcp_sk(sk);
3956	int chunk = skb->len - hlen;
3957	int err;
3958
3959	local_bh_enable();
3960	if (skb->ip_summed==CHECKSUM_UNNECESSARY)
3961		err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk);
3962	else
3963		err = skb_copy_and_csum_datagram_iovec(skb, hlen,
3964						       tp->ucopy.iov);
3965
3966	if (!err) {
3967		tp->ucopy.len -= chunk;
3968		tp->copied_seq += chunk;
3969		tcp_rcv_space_adjust(sk);
3970	}
3971
3972	local_bh_disable();
3973	return err;
3974}
3975
3976static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
3977{
3978	__sum16 result;
3979
3980	if (sock_owned_by_user(sk)) {
3981		local_bh_enable();
3982		result = __tcp_checksum_complete(skb);
3983		local_bh_disable();
3984	} else {
3985		result = __tcp_checksum_complete(skb);
3986	}
3987	return result;
3988}
3989
3990static inline int tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
3991{
3992	return skb->ip_summed != CHECKSUM_UNNECESSARY &&
3993		__tcp_checksum_complete_user(sk, skb);
3994}
3995
3996#ifdef CONFIG_NET_DMA
3997static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen)
3998{
3999	struct tcp_sock *tp = tcp_sk(sk);
4000	int chunk = skb->len - hlen;
4001	int dma_cookie;
4002	int copied_early = 0;
4003
4004	if (tp->ucopy.wakeup)
4005		return 0;
4006
4007	if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
4008		tp->ucopy.dma_chan = get_softnet_dma();
4009
4010	if (tp->ucopy.dma_chan && skb->ip_summed == CHECKSUM_UNNECESSARY) {
4011
4012		dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan,
4013			skb, hlen, tp->ucopy.iov, chunk, tp->ucopy.pinned_list);
4014
4015		if (dma_cookie < 0)
4016			goto out;
4017
4018		tp->ucopy.dma_cookie = dma_cookie;
4019		copied_early = 1;
4020
4021		tp->ucopy.len -= chunk;
4022		tp->copied_seq += chunk;
4023		tcp_rcv_space_adjust(sk);
4024
4025		if ((tp->ucopy.len == 0) ||
4026		    (tcp_flag_word(skb->h.th) & TCP_FLAG_PSH) ||
4027		    (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
4028			tp->ucopy.wakeup = 1;
4029			sk->sk_data_ready(sk, 0);
4030		}
4031	} else if (chunk > 0) {
4032		tp->ucopy.wakeup = 1;
4033		sk->sk_data_ready(sk, 0);
4034	}
4035out:
4036	return copied_early;
4037}
4038#endif /* CONFIG_NET_DMA */
4039
4040/*
4041 *	TCP receive function for the ESTABLISHED state.
4042 *
4043 *	It is split into a fast path and a slow path. The fast path is
4044 * 	disabled when:
4045 *	- A zero window was announced from us - zero window probing
4046 *        is only handled properly in the slow path.
4047 *	- Out of order segments arrived.
4048 *	- Urgent data is expected.
4049 *	- There is no buffer space left
4050 *	- Unexpected TCP flags/window values/header lengths are received
4051 *	  (detected by checking the TCP header against pred_flags)
4052 *	- Data is sent in both directions. Fast path only supports pure senders
4053 *	  or pure receivers (this means either the sequence number or the ack
4054 *	  value must stay constant)
4055 *	- Unexpected TCP option.
4056 *
4057 *	When these conditions are not satisfied it drops into a standard
4058 *	receive procedure patterned after RFC793 to handle all cases.
4059 *	The first three cases are guaranteed by proper pred_flags setting,
4060 *	the rest is checked inline. Fast processing is turned on in
4061 *	tcp_data_queue when everything is OK.
4062 */
4063int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4064			struct tcphdr *th, unsigned len)
4065{
4066	struct tcp_sock *tp = tcp_sk(sk);
4067
4068	/*
4069	 *	Header prediction.
4070	 *	The code loosely follows the one in the famous
4071	 *	"30 instruction TCP receive" Van Jacobson mail.
4072	 *
4073	 *	Van's trick is to deposit buffers into socket queue
4074	 *	on a device interrupt, to call tcp_recv function
4075	 *	on the receive process context and checksum and copy
4076	 *	the buffer to user space. smart...
4077	 *
4078	 *	Our current scheme is not silly either but we take the
4079	 *	extra cost of the net_bh soft interrupt processing...
4080	 *	We do checksum and copy also but from device to kernel.
4081	 */
4082
4083	tp->rx_opt.saw_tstamp = 0;
4084
4085	/*	pred_flags is 0xS?10 << 16 + snd_wnd
4086	 *	if header_prediction is to be made
4087	 *	'S' will always be tp->tcp_header_len >> 2
4088	 *	'?' will be 0 for the fast path, otherwise pred_flags is 0 to
4089	 *  turn it off	(when there are holes in the receive
4090	 *	 space for instance)
4091	 *	PSH flag is ignored.
4092	 */
4093
4094	if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags &&
4095		TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
4096		int tcp_header_len = tp->tcp_header_len;
4097
4098		/* Timestamp header prediction: tcp_header_len
4099		 * is automatically equal to th->doff*4 due to pred_flags
4100		 * match.
4101		 */
4102
4103		/* Check timestamp */
4104		if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) {
4105			__be32 *ptr = (__be32 *)(th + 1);
4106
4107			/* No? Slow path! */
4108			if (*ptr != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
4109					  | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
4110				goto slow_path;
4111
4112			tp->rx_opt.saw_tstamp = 1;
4113			++ptr;
4114			tp->rx_opt.rcv_tsval = ntohl(*ptr);
4115			++ptr;
4116			tp->rx_opt.rcv_tsecr = ntohl(*ptr);
4117
4118			/* If PAWS failed, check it more carefully in slow path */
4119			if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0)
4120				goto slow_path;
4121
4122			/* DO NOT update ts_recent here, if checksum fails
4123			 * and timestamp was corrupted part, it will result
4124			 * in a hung connection since we will drop all
4125			 * future packets due to the PAWS test.
4126			 */
4127		}
4128
4129		if (len <= tcp_header_len) {
4130			/* Bulk data transfer: sender */
4131			if (len == tcp_header_len) {
4132				/* Predicted packet is in window by definition.
4133				 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
4134				 * Hence, check seq<=rcv_wup reduces to:
4135				 */
4136				if (tcp_header_len ==
4137				    (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
4138				    tp->rcv_nxt == tp->rcv_wup)
4139					tcp_store_ts_recent(tp);
4140
4141				/* We know that such packets are checksummed
4142				 * on entry.
4143				 */
4144				tcp_ack(sk, skb, 0);
4145				__kfree_skb(skb);
4146				tcp_data_snd_check(sk, tp);
4147				return 0;
4148			} else { /* Header too small */
4149				TCP_INC_STATS_BH(TCP_MIB_INERRS);
4150				goto discard;
4151			}
4152		} else {
4153			int eaten = 0;
4154			int copied_early = 0;
4155
4156			if (tp->copied_seq == tp->rcv_nxt &&
4157			    len - tcp_header_len <= tp->ucopy.len) {
4158#ifdef CONFIG_NET_DMA
4159				if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
4160					copied_early = 1;
4161					eaten = 1;
4162				}
4163#endif
4164				if (tp->ucopy.task == current && sock_owned_by_user(sk) && !copied_early) {
4165					__set_current_state(TASK_RUNNING);
4166
4167					if (!tcp_copy_to_iovec(sk, skb, tcp_header_len))
4168						eaten = 1;
4169				}
4170				if (eaten) {
4171					/* Predicted packet is in window by definition.
4172					 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
4173					 * Hence, check seq<=rcv_wup reduces to:
4174					 */
4175					if (tcp_header_len ==
4176					    (sizeof(struct tcphdr) +
4177					     TCPOLEN_TSTAMP_ALIGNED) &&
4178					    tp->rcv_nxt == tp->rcv_wup)
4179						tcp_store_ts_recent(tp);
4180
4181					tcp_rcv_rtt_measure_ts(sk, skb);
4182
4183					__skb_pull(skb, tcp_header_len);
4184					tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4185					NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER);
4186				}
4187				if (copied_early)
4188					tcp_cleanup_rbuf(sk, skb->len);
4189			}
4190			if (!eaten) {
4191				if (tcp_checksum_complete_user(sk, skb))
4192					goto csum_error;
4193
4194				/* Predicted packet is in window by definition.
4195				 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
4196				 * Hence, check seq<=rcv_wup reduces to:
4197				 */
4198				if (tcp_header_len ==
4199				    (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
4200				    tp->rcv_nxt == tp->rcv_wup)
4201					tcp_store_ts_recent(tp);
4202
4203				tcp_rcv_rtt_measure_ts(sk, skb);
4204
4205				if ((int)skb->truesize > sk->sk_forward_alloc)
4206					goto step5;
4207
4208				NET_INC_STATS_BH(LINUX_MIB_TCPHPHITS);
4209
4210				/* Bulk data transfer: receiver */
4211				__skb_pull(skb,tcp_header_len);
4212				__skb_queue_tail(&sk->sk_receive_queue, skb);
4213				sk_stream_set_owner_r(skb, sk);
4214				tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4215			}
4216
4217			tcp_event_data_recv(sk, tp, skb);
4218
4219			if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) {
4220				/* Well, only one small jumplet in fast path... */
4221				tcp_ack(sk, skb, FLAG_DATA);
4222				tcp_data_snd_check(sk, tp);
4223				if (!inet_csk_ack_scheduled(sk))
4224					goto no_ack;
4225			}
4226
4227			__tcp_ack_snd_check(sk, 0);
4228no_ack:
4229#ifdef CONFIG_NET_DMA
4230			if (copied_early)
4231				__skb_queue_tail(&sk->sk_async_wait_queue, skb);
4232			else
4233#endif
4234			if (eaten)
4235				__kfree_skb(skb);
4236			else
4237				sk->sk_data_ready(sk, 0);
4238			return 0;
4239		}
4240	}
4241
4242slow_path:
4243	if (len < (th->doff<<2) || tcp_checksum_complete_user(sk, skb))
4244		goto csum_error;
4245
4246	/*
4247	 * RFC1323: H1. Apply PAWS check first.
4248	 */
4249	if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
4250	    tcp_paws_discard(sk, skb)) {
4251		if (!th->rst) {
4252			NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
4253			tcp_send_dupack(sk, skb);
4254			goto discard;
4255		}
4256		/* Resets are accepted even if PAWS failed.
4257
4258		   ts_recent update must be made after we are sure
4259		   that the packet is in window.
4260		 */
4261	}
4262
4263	/*
4264	 *	Standard slow path.
4265	 */
4266
4267	if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
4268		/* RFC793, page 37: "In all states except SYN-SENT, all reset
4269		 * (RST) segments are validated by checking their SEQ-fields."
4270		 * And page 69: "If an incoming segment is not acceptable,
4271		 * an acknowledgment should be sent in reply (unless the RST bit
4272		 * is set, if so drop the segment and return)".
4273		 */
4274		if (!th->rst)
4275			tcp_send_dupack(sk, skb);
4276		goto discard;
4277	}
4278
4279	if(th->rst) {
4280		tcp_reset(sk);
4281		goto discard;
4282	}
4283
4284	tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
4285
4286	if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4287		TCP_INC_STATS_BH(TCP_MIB_INERRS);
4288		NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN);
4289		tcp_reset(sk);
4290		return 1;
4291	}
4292
4293step5:
4294	if(th->ack)
4295		tcp_ack(sk, skb, FLAG_SLOWPATH);
4296
4297	tcp_rcv_rtt_measure_ts(sk, skb);
4298
4299	/* Process urgent data. */
4300	tcp_urg(sk, skb, th);
4301
4302	/* step 7: process the segment text */
4303	tcp_data_queue(sk, skb);
4304
4305	tcp_data_snd_check(sk, tp);
4306	tcp_ack_snd_check(sk);
4307	return 0;
4308
4309csum_error:
4310	TCP_INC_STATS_BH(TCP_MIB_INERRS);
4311
4312discard:
4313	__kfree_skb(skb);
4314	return 0;
4315}
4316
4317static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
4318					 struct tcphdr *th, unsigned len)
4319{
4320	struct tcp_sock *tp = tcp_sk(sk);
4321	struct inet_connection_sock *icsk = inet_csk(sk);
4322	int saved_clamp = tp->rx_opt.mss_clamp;
4323
4324	tcp_parse_options(skb, &tp->rx_opt, 0);
4325
4326	if (th->ack) {
4327		/* rfc793:
4328		 * "If the state is SYN-SENT then
4329		 *    first check the ACK bit
4330		 *      If the ACK bit is set
4331		 *	  If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send
4332		 *        a reset (unless the RST bit is set, if so drop
4333		 *        the segment and return)"
4334		 *
4335		 *  We do not send data with SYN, so that RFC-correct
4336		 *  test reduces to:
4337		 */
4338		if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt)
4339			goto reset_and_undo;
4340
4341		if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
4342		    !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
4343			     tcp_time_stamp)) {
4344			NET_INC_STATS_BH(LINUX_MIB_PAWSACTIVEREJECTED);
4345			goto reset_and_undo;
4346		}
4347
4348		/* Now ACK is acceptable.
4349		 *
4350		 * "If the RST bit is set
4351		 *    If the ACK was acceptable then signal the user "error:
4352		 *    connection reset", drop the segment, enter CLOSED state,
4353		 *    delete TCB, and return."
4354		 */
4355
4356		if (th->rst) {
4357			tcp_reset(sk);
4358			goto discard;
4359		}
4360
4361		/* rfc793:
4362		 *   "fifth, if neither of the SYN or RST bits is set then
4363		 *    drop the segment and return."
4364		 *
4365		 *    See note below!
4366		 *                                        --ANK(990513)
4367		 */
4368		if (!th->syn)
4369			goto discard_and_undo;
4370
4371		/* rfc793:
4372		 *   "If the SYN bit is on ...
4373		 *    are acceptable then ...
4374		 *    (our SYN has been ACKed), change the connection
4375		 *    state to ESTABLISHED..."
4376		 */
4377
4378		TCP_ECN_rcv_synack(tp, th);
4379
4380		tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
4381		tcp_ack(sk, skb, FLAG_SLOWPATH);
4382
4383		/* Ok.. it's good. Set up sequence numbers and
4384		 * move to established.
4385		 */
4386		tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
4387		tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
4388
4389		/* RFC1323: The window in SYN & SYN/ACK segments is
4390		 * never scaled.
4391		 */
4392		tp->snd_wnd = ntohs(th->window);
4393		tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(skb)->seq);
4394
4395		if (!tp->rx_opt.wscale_ok) {
4396			tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
4397			tp->window_clamp = min(tp->window_clamp, 65535U);
4398		}
4399
4400		if (tp->rx_opt.saw_tstamp) {
4401			tp->rx_opt.tstamp_ok	   = 1;
4402			tp->tcp_header_len =
4403				sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
4404			tp->advmss	    -= TCPOLEN_TSTAMP_ALIGNED;
4405			tcp_store_ts_recent(tp);
4406		} else {
4407			tp->tcp_header_len = sizeof(struct tcphdr);
4408		}
4409
4410		if (tp->rx_opt.sack_ok && sysctl_tcp_fack)
4411			tp->rx_opt.sack_ok |= 2;
4412
4413		tcp_mtup_init(sk);
4414		tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
4415		tcp_initialize_rcv_mss(sk);
4416
4417		/* Remember, tcp_poll() does not lock socket!
4418		 * Change state from SYN-SENT only after copied_seq
4419		 * is initialized. */
4420		tp->copied_seq = tp->rcv_nxt;
4421		smp_mb();
4422		tcp_set_state(sk, TCP_ESTABLISHED);
4423
4424		security_inet_conn_established(sk, skb);
4425
4426		/* Make sure socket is routed, for correct metrics.  */
4427		icsk->icsk_af_ops->rebuild_header(sk);
4428
4429		tcp_init_metrics(sk);
4430
4431		tcp_init_congestion_control(sk);
4432
4433		/* Prevent spurious tcp_cwnd_restart() on first data
4434		 * packet.
4435		 */
4436		tp->lsndtime = tcp_time_stamp;
4437
4438		tcp_init_buffer_space(sk);
4439
4440		if (sock_flag(sk, SOCK_KEEPOPEN))
4441			inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
4442
4443		if (!tp->rx_opt.snd_wscale)
4444			__tcp_fast_path_on(tp, tp->snd_wnd);
4445		else
4446			tp->pred_flags = 0;
4447
4448		if (!sock_flag(sk, SOCK_DEAD)) {
4449			sk->sk_state_change(sk);
4450			sk_wake_async(sk, 0, POLL_OUT);
4451		}
4452
4453		if (sk->sk_write_pending ||
4454		    icsk->icsk_accept_queue.rskq_defer_accept ||
4455		    icsk->icsk_ack.pingpong) {
4456			/* Save one ACK. Data will be ready after
4457			 * several ticks, if write_pending is set.
4458			 *
4459			 * It may be deleted, but with this feature tcpdumps
4460			 * look so _wonderfully_ clever, that I was not able
4461			 * to stand against the temptation 8)     --ANK
4462			 */
4463			inet_csk_schedule_ack(sk);
4464			icsk->icsk_ack.lrcvtime = tcp_time_stamp;
4465			icsk->icsk_ack.ato	 = TCP_ATO_MIN;
4466			tcp_incr_quickack(sk);
4467			tcp_enter_quickack_mode(sk);
4468			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
4469						  TCP_DELACK_MAX, TCP_RTO_MAX);
4470
4471discard:
4472			__kfree_skb(skb);
4473			return 0;
4474		} else {
4475			tcp_send_ack(sk);
4476		}
4477		return -1;
4478	}
4479
4480	/* No ACK in the segment */
4481
4482	if (th->rst) {
4483		/* rfc793:
4484		 * "If the RST bit is set
4485		 *
4486		 *      Otherwise (no ACK) drop the segment and return."
4487		 */
4488
4489		goto discard_and_undo;
4490	}
4491
4492	/* PAWS check. */
4493	if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && tcp_paws_check(&tp->rx_opt, 0))
4494		goto discard_and_undo;
4495
4496	if (th->syn) {
4497		/* We see SYN without ACK. It is attempt of
4498		 * simultaneous connect with crossed SYNs.
4499		 * Particularly, it can be connect to self.
4500		 */
4501		tcp_set_state(sk, TCP_SYN_RECV);
4502
4503		if (tp->rx_opt.saw_tstamp) {
4504			tp->rx_opt.tstamp_ok = 1;
4505			tcp_store_ts_recent(tp);
4506			tp->tcp_header_len =
4507				sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
4508		} else {
4509			tp->tcp_header_len = sizeof(struct tcphdr);
4510		}
4511
4512		tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
4513		tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
4514
4515		/* RFC1323: The window in SYN & SYN/ACK segments is
4516		 * never scaled.
4517		 */
4518		tp->snd_wnd    = ntohs(th->window);
4519		tp->snd_wl1    = TCP_SKB_CB(skb)->seq;
4520		tp->max_window = tp->snd_wnd;
4521
4522		TCP_ECN_rcv_syn(tp, th);
4523
4524		tcp_mtup_init(sk);
4525		tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
4526		tcp_initialize_rcv_mss(sk);
4527
4528
4529		tcp_send_synack(sk);
4530#if 0
4531		/* Note, we could accept data and URG from this segment.
4532		 * There are no obstacles to make this.
4533		 *
4534		 * However, if we ignore data in ACKless segments sometimes,
4535		 * we have no reasons to accept it sometimes.
4536		 * Also, seems the code doing it in step6 of tcp_rcv_state_process
4537		 * is not flawless. So, discard packet for sanity.
4538		 * Uncomment this return to process the data.
4539		 */
4540		return -1;
4541#else
4542		goto discard;
4543#endif
4544	}
4545	/* "fifth, if neither of the SYN or RST bits is set then
4546	 * drop the segment and return."
4547	 */
4548
4549discard_and_undo:
4550	tcp_clear_options(&tp->rx_opt);
4551	tp->rx_opt.mss_clamp = saved_clamp;
4552	goto discard;
4553
4554reset_and_undo:
4555	tcp_clear_options(&tp->rx_opt);
4556	tp->rx_opt.mss_clamp = saved_clamp;
4557	return 1;
4558}
4559
4560
4561/*
4562 *	This function implements the receiving procedure of RFC 793 for
4563 *	all states except ESTABLISHED and TIME_WAIT.
4564 *	It's called from both tcp_v4_rcv and tcp_v6_rcv and should be
4565 *	address independent.
4566 */
4567
4568int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4569			  struct tcphdr *th, unsigned len)
4570{
4571	struct tcp_sock *tp = tcp_sk(sk);
4572	struct inet_connection_sock *icsk = inet_csk(sk);
4573	int queued = 0;
4574
4575	tp->rx_opt.saw_tstamp = 0;
4576
4577	switch (sk->sk_state) {
4578	case TCP_CLOSE:
4579		goto discard;
4580
4581	case TCP_LISTEN:
4582		if(th->ack)
4583			return 1;
4584
4585		if(th->rst)
4586			goto discard;
4587
4588		if(th->syn) {
4589			if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
4590				return 1;
4591
4592			/* Now we have several options: In theory there is
4593			 * nothing else in the frame. KA9Q has an option to
4594			 * send data with the syn, BSD accepts data with the
4595			 * syn up to the [to be] advertised window and
4596			 * Solaris 2.1 gives you a protocol error. For now
4597			 * we just ignore it, that fits the spec precisely
4598			 * and avoids incompatibilities. It would be nice in
4599			 * future to drop through and process the data.
4600			 *
4601			 * Now that TTCP is starting to be used we ought to
4602			 * queue this data.
4603			 * But, this leaves one open to an easy denial of
4604			 * service attack, and SYN cookies can't defend
4605			 * against this problem. So, we drop the data
4606			 * in the interest of security over speed unless
4607			 * it's still in use.
4608			 */
4609			kfree_skb(skb);
4610			return 0;
4611		}
4612		goto discard;
4613
4614	case TCP_SYN_SENT:
4615		queued = tcp_rcv_synsent_state_process(sk, skb, th, len);
4616		if (queued >= 0)
4617			return queued;
4618
4619		/* Do step6 onward by hand. */
4620		tcp_urg(sk, skb, th);
4621		__kfree_skb(skb);
4622		tcp_data_snd_check(sk, tp);
4623		return 0;
4624	}
4625
4626	if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
4627	    tcp_paws_discard(sk, skb)) {
4628		if (!th->rst) {
4629			NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
4630			tcp_send_dupack(sk, skb);
4631			goto discard;
4632		}
4633		/* Reset is accepted even if it did not pass PAWS. */
4634	}
4635
4636	/* step 1: check sequence number */
4637	if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
4638		if (!th->rst)
4639			tcp_send_dupack(sk, skb);
4640		goto discard;
4641	}
4642
4643	/* step 2: check RST bit */
4644	if(th->rst) {
4645		tcp_reset(sk);
4646		goto discard;
4647	}
4648
4649	tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
4650
4651	/* step 3: check security and precedence [ignored] */
4652
4653	/*	step 4:
4654	 *
4655	 *	Check for a SYN in window.
4656	 */
4657	if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4658		NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN);
4659		tcp_reset(sk);
4660		return 1;
4661	}
4662
4663	/* step 5: check the ACK field */
4664	if (th->ack) {
4665		int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH);
4666
4667		switch(sk->sk_state) {
4668		case TCP_SYN_RECV:
4669			if (acceptable) {
4670				tp->copied_seq = tp->rcv_nxt;
4671				smp_mb();
4672				tcp_set_state(sk, TCP_ESTABLISHED);
4673				sk->sk_state_change(sk);
4674
4675				/* Note, that this wakeup is only for marginal
4676				 * crossed SYN case. Passively open sockets
4677				 * are not waked up, because sk->sk_sleep ==
4678				 * NULL and sk->sk_socket == NULL.
4679				 */
4680				if (sk->sk_socket) {
4681					sk_wake_async(sk,0,POLL_OUT);
4682				}
4683
4684				tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
4685				tp->snd_wnd = ntohs(th->window) <<
4686					      tp->rx_opt.snd_wscale;
4687				tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq,
4688					    TCP_SKB_CB(skb)->seq);
4689
4690				/* tcp_ack considers this ACK as duplicate
4691				 * and does not calculate rtt.
4692				 * Fix it at least with timestamps.
4693				 */
4694				if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
4695				    !tp->srtt)
4696					tcp_ack_saw_tstamp(sk, 0);
4697
4698				if (tp->rx_opt.tstamp_ok)
4699					tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
4700
4701				/* Make sure socket is routed, for
4702				 * correct metrics.
4703				 */
4704				icsk->icsk_af_ops->rebuild_header(sk);
4705
4706				tcp_init_metrics(sk);
4707
4708				tcp_init_congestion_control(sk);
4709
4710				/* Prevent spurious tcp_cwnd_restart() on
4711				 * first data packet.
4712				 */
4713				tp->lsndtime = tcp_time_stamp;
4714
4715				tcp_mtup_init(sk);
4716				tcp_initialize_rcv_mss(sk);
4717				tcp_init_buffer_space(sk);
4718				tcp_fast_path_on(tp);
4719			} else {
4720				return 1;
4721			}
4722			break;
4723
4724		case TCP_FIN_WAIT1:
4725			if (tp->snd_una == tp->write_seq) {
4726				tcp_set_state(sk, TCP_FIN_WAIT2);
4727				sk->sk_shutdown |= SEND_SHUTDOWN;
4728				dst_confirm(sk->sk_dst_cache);
4729
4730				if (!sock_flag(sk, SOCK_DEAD))
4731					/* Wake up lingering close() */
4732					sk->sk_state_change(sk);
4733				else {
4734					int tmo;
4735
4736					if (tp->linger2 < 0 ||
4737					    (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
4738					     after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
4739						tcp_done(sk);
4740						NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA);
4741						return 1;
4742					}
4743
4744					tmo = tcp_fin_time(sk);
4745					if (tmo > TCP_TIMEWAIT_LEN) {
4746						inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
4747					} else if (th->fin || sock_owned_by_user(sk)) {
4748						/* Bad case. We could lose such FIN otherwise.
4749						 * It is not a big problem, but it looks confusing
4750						 * and not so rare event. We still can lose it now,
4751						 * if it spins in bh_lock_sock(), but it is really
4752						 * marginal case.
4753						 */
4754						inet_csk_reset_keepalive_timer(sk, tmo);
4755					} else {
4756						tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
4757						goto discard;
4758					}
4759				}
4760			}
4761			break;
4762
4763		case TCP_CLOSING:
4764			if (tp->snd_una == tp->write_seq) {
4765				tcp_time_wait(sk, TCP_TIME_WAIT, 0);
4766				goto discard;
4767			}
4768			break;
4769
4770		case TCP_LAST_ACK:
4771			if (tp->snd_una == tp->write_seq) {
4772				tcp_update_metrics(sk);
4773				tcp_done(sk);
4774				goto discard;
4775			}
4776			break;
4777		}
4778	} else
4779		goto discard;
4780
4781	/* step 6: check the URG bit */
4782	tcp_urg(sk, skb, th);
4783
4784	/* step 7: process the segment text */
4785	switch (sk->sk_state) {
4786	case TCP_CLOSE_WAIT:
4787	case TCP_CLOSING:
4788	case TCP_LAST_ACK:
4789		if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
4790			break;
4791	case TCP_FIN_WAIT1:
4792	case TCP_FIN_WAIT2:
4793		/* RFC 793 says to queue data in these states,
4794		 * RFC 1122 says we MUST send a reset.
4795		 * BSD 4.4 also does reset.
4796		 */
4797		if (sk->sk_shutdown & RCV_SHUTDOWN) {
4798			if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
4799			    after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
4800				NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA);
4801				tcp_reset(sk);
4802				return 1;
4803			}
4804		}
4805		/* Fall through */
4806	case TCP_ESTABLISHED:
4807		tcp_data_queue(sk, skb);
4808		queued = 1;
4809		break;
4810	}
4811
4812	/* tcp_data could move socket to TIME-WAIT */
4813	if (sk->sk_state != TCP_CLOSE) {
4814		tcp_data_snd_check(sk, tp);
4815		tcp_ack_snd_check(sk);
4816	}
4817
4818	if (!queued) {
4819discard:
4820		__kfree_skb(skb);
4821	}
4822	return 0;
4823}
4824
4825EXPORT_SYMBOL(sysctl_tcp_ecn);
4826EXPORT_SYMBOL(sysctl_tcp_reordering);
4827EXPORT_SYMBOL(tcp_parse_options);
4828EXPORT_SYMBOL(tcp_rcv_established);
4829EXPORT_SYMBOL(tcp_rcv_state_process);
4830EXPORT_SYMBOL(tcp_initialize_rcv_mss);
4831