tcp_minisocks.c revision ec3c0982a2dd1e671bad8e9d26c28dcba0039d87
1/*
2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
3 *		operating system.  INET is implemented using the  BSD Socket
4 *		interface as the means of communication with the user level.
5 *
6 *		Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version:	$Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors:	Ross Biro
11 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
14 *		Florian La Roche, <flla@stud.uni-sb.de>
15 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
17 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
18 *		Matthew Dillon, <dillon@apollo.west.oic.com>
19 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 *		Jorge Cwik, <jorge@laser.satlink.net>
21 */
22
23#include <linux/mm.h>
24#include <linux/module.h>
25#include <linux/sysctl.h>
26#include <linux/workqueue.h>
27#include <net/tcp.h>
28#include <net/inet_common.h>
29#include <net/xfrm.h>
30
31#ifdef CONFIG_SYSCTL
32#define SYNC_INIT 0 /* let the user enable it */
33#else
34#define SYNC_INIT 1
35#endif
36
37int sysctl_tcp_syncookies __read_mostly = SYNC_INIT;
38EXPORT_SYMBOL(sysctl_tcp_syncookies);
39
40int sysctl_tcp_abort_on_overflow __read_mostly;
41
42struct inet_timewait_death_row tcp_death_row = {
43	.sysctl_max_tw_buckets = NR_FILE * 2,
44	.period		= TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
45	.death_lock	= __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock),
46	.hashinfo	= &tcp_hashinfo,
47	.tw_timer	= TIMER_INITIALIZER(inet_twdr_hangman, 0,
48					    (unsigned long)&tcp_death_row),
49	.twkill_work	= __WORK_INITIALIZER(tcp_death_row.twkill_work,
50					     inet_twdr_twkill_work),
51/* Short-time timewait calendar */
52
53	.twcal_hand	= -1,
54	.twcal_timer	= TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
55					    (unsigned long)&tcp_death_row),
56};
57
58EXPORT_SYMBOL_GPL(tcp_death_row);
59
60static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
61{
62	if (seq == s_win)
63		return 1;
64	if (after(end_seq, s_win) && before(seq, e_win))
65		return 1;
66	return (seq == e_win && seq == end_seq);
67}
68
69/*
70 * * Main purpose of TIME-WAIT state is to close connection gracefully,
71 *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
72 *   (and, probably, tail of data) and one or more our ACKs are lost.
73 * * What is TIME-WAIT timeout? It is associated with maximal packet
74 *   lifetime in the internet, which results in wrong conclusion, that
75 *   it is set to catch "old duplicate segments" wandering out of their path.
76 *   It is not quite correct. This timeout is calculated so that it exceeds
77 *   maximal retransmission timeout enough to allow to lose one (or more)
78 *   segments sent by peer and our ACKs. This time may be calculated from RTO.
79 * * When TIME-WAIT socket receives RST, it means that another end
80 *   finally closed and we are allowed to kill TIME-WAIT too.
81 * * Second purpose of TIME-WAIT is catching old duplicate segments.
82 *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
83 *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
84 * * If we invented some more clever way to catch duplicates
85 *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
86 *
87 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
88 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
89 * from the very beginning.
90 *
91 * NOTE. With recycling (and later with fin-wait-2) TW bucket
92 * is _not_ stateless. It means, that strictly speaking we must
93 * spinlock it. I do not want! Well, probability of misbehaviour
94 * is ridiculously low and, seems, we could use some mb() tricks
95 * to avoid misread sequence numbers, states etc.  --ANK
96 */
97enum tcp_tw_status
98tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
99			   const struct tcphdr *th)
100{
101	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
102	struct tcp_options_received tmp_opt;
103	int paws_reject = 0;
104
105	tmp_opt.saw_tstamp = 0;
106	if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
107		tcp_parse_options(skb, &tmp_opt, 0);
108
109		if (tmp_opt.saw_tstamp) {
110			tmp_opt.ts_recent	= tcptw->tw_ts_recent;
111			tmp_opt.ts_recent_stamp	= tcptw->tw_ts_recent_stamp;
112			paws_reject = tcp_paws_check(&tmp_opt, th->rst);
113		}
114	}
115
116	if (tw->tw_substate == TCP_FIN_WAIT2) {
117		/* Just repeat all the checks of tcp_rcv_state_process() */
118
119		/* Out of window, send ACK */
120		if (paws_reject ||
121		    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
122				   tcptw->tw_rcv_nxt,
123				   tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
124			return TCP_TW_ACK;
125
126		if (th->rst)
127			goto kill;
128
129		if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
130			goto kill_with_rst;
131
132		/* Dup ACK? */
133		if (!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
134		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
135			inet_twsk_put(tw);
136			return TCP_TW_SUCCESS;
137		}
138
139		/* New data or FIN. If new data arrive after half-duplex close,
140		 * reset.
141		 */
142		if (!th->fin ||
143		    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
144kill_with_rst:
145			inet_twsk_deschedule(tw, &tcp_death_row);
146			inet_twsk_put(tw);
147			return TCP_TW_RST;
148		}
149
150		/* FIN arrived, enter true time-wait state. */
151		tw->tw_substate	  = TCP_TIME_WAIT;
152		tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
153		if (tmp_opt.saw_tstamp) {
154			tcptw->tw_ts_recent_stamp = get_seconds();
155			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
156		}
157
158		/* I am shamed, but failed to make it more elegant.
159		 * Yes, it is direct reference to IP, which is impossible
160		 * to generalize to IPv6. Taking into account that IPv6
161		 * do not understand recycling in any case, it not
162		 * a big problem in practice. --ANK */
163		if (tw->tw_family == AF_INET &&
164		    tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp &&
165		    tcp_v4_tw_remember_stamp(tw))
166			inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout,
167					   TCP_TIMEWAIT_LEN);
168		else
169			inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
170					   TCP_TIMEWAIT_LEN);
171		return TCP_TW_ACK;
172	}
173
174	/*
175	 *	Now real TIME-WAIT state.
176	 *
177	 *	RFC 1122:
178	 *	"When a connection is [...] on TIME-WAIT state [...]
179	 *	[a TCP] MAY accept a new SYN from the remote TCP to
180	 *	reopen the connection directly, if it:
181	 *
182	 *	(1)  assigns its initial sequence number for the new
183	 *	connection to be larger than the largest sequence
184	 *	number it used on the previous connection incarnation,
185	 *	and
186	 *
187	 *	(2)  returns to TIME-WAIT state if the SYN turns out
188	 *	to be an old duplicate".
189	 */
190
191	if (!paws_reject &&
192	    (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
193	     (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
194		/* In window segment, it may be only reset or bare ack. */
195
196		if (th->rst) {
197			/* This is TIME_WAIT assassination, in two flavors.
198			 * Oh well... nobody has a sufficient solution to this
199			 * protocol bug yet.
200			 */
201			if (sysctl_tcp_rfc1337 == 0) {
202kill:
203				inet_twsk_deschedule(tw, &tcp_death_row);
204				inet_twsk_put(tw);
205				return TCP_TW_SUCCESS;
206			}
207		}
208		inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
209				   TCP_TIMEWAIT_LEN);
210
211		if (tmp_opt.saw_tstamp) {
212			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
213			tcptw->tw_ts_recent_stamp = get_seconds();
214		}
215
216		inet_twsk_put(tw);
217		return TCP_TW_SUCCESS;
218	}
219
220	/* Out of window segment.
221
222	   All the segments are ACKed immediately.
223
224	   The only exception is new SYN. We accept it, if it is
225	   not old duplicate and we are not in danger to be killed
226	   by delayed old duplicates. RFC check is that it has
227	   newer sequence number works at rates <40Mbit/sec.
228	   However, if paws works, it is reliable AND even more,
229	   we even may relax silly seq space cutoff.
230
231	   RED-PEN: we violate main RFC requirement, if this SYN will appear
232	   old duplicate (i.e. we receive RST in reply to SYN-ACK),
233	   we must return socket to time-wait state. It is not good,
234	   but not fatal yet.
235	 */
236
237	if (th->syn && !th->rst && !th->ack && !paws_reject &&
238	    (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
239	     (tmp_opt.saw_tstamp &&
240	      (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
241		u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
242		if (isn == 0)
243			isn++;
244		TCP_SKB_CB(skb)->when = isn;
245		return TCP_TW_SYN;
246	}
247
248	if (paws_reject)
249		NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
250
251	if (!th->rst) {
252		/* In this case we must reset the TIMEWAIT timer.
253		 *
254		 * If it is ACKless SYN it may be both old duplicate
255		 * and new good SYN with random sequence number <rcv_nxt.
256		 * Do not reschedule in the last case.
257		 */
258		if (paws_reject || th->ack)
259			inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
260					   TCP_TIMEWAIT_LEN);
261
262		/* Send ACK. Note, we do not put the bucket,
263		 * it will be released by caller.
264		 */
265		return TCP_TW_ACK;
266	}
267	inet_twsk_put(tw);
268	return TCP_TW_SUCCESS;
269}
270
271/*
272 * Move a socket to time-wait or dead fin-wait-2 state.
273 */
274void tcp_time_wait(struct sock *sk, int state, int timeo)
275{
276	struct inet_timewait_sock *tw = NULL;
277	const struct inet_connection_sock *icsk = inet_csk(sk);
278	const struct tcp_sock *tp = tcp_sk(sk);
279	int recycle_ok = 0;
280
281	if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
282		recycle_ok = icsk->icsk_af_ops->remember_stamp(sk);
283
284	if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
285		tw = inet_twsk_alloc(sk, state);
286
287	if (tw != NULL) {
288		struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
289		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
290
291		tw->tw_rcv_wscale	= tp->rx_opt.rcv_wscale;
292		tcptw->tw_rcv_nxt	= tp->rcv_nxt;
293		tcptw->tw_snd_nxt	= tp->snd_nxt;
294		tcptw->tw_rcv_wnd	= tcp_receive_window(tp);
295		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
296		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
297
298#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
299		if (tw->tw_family == PF_INET6) {
300			struct ipv6_pinfo *np = inet6_sk(sk);
301			struct inet6_timewait_sock *tw6;
302
303			tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
304			tw6 = inet6_twsk((struct sock *)tw);
305			ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
306			ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
307			tw->tw_ipv6only = np->ipv6only;
308		}
309#endif
310
311#ifdef CONFIG_TCP_MD5SIG
312		/*
313		 * The timewait bucket does not have the key DB from the
314		 * sock structure. We just make a quick copy of the
315		 * md5 key being used (if indeed we are using one)
316		 * so the timewait ack generating code has the key.
317		 */
318		do {
319			struct tcp_md5sig_key *key;
320			memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key));
321			tcptw->tw_md5_keylen = 0;
322			key = tp->af_specific->md5_lookup(sk, sk);
323			if (key != NULL) {
324				memcpy(&tcptw->tw_md5_key, key->key, key->keylen);
325				tcptw->tw_md5_keylen = key->keylen;
326				if (tcp_alloc_md5sig_pool() == NULL)
327					BUG();
328			}
329		} while (0);
330#endif
331
332		/* Linkage updates. */
333		__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
334
335		/* Get the TIME_WAIT timeout firing. */
336		if (timeo < rto)
337			timeo = rto;
338
339		if (recycle_ok) {
340			tw->tw_timeout = rto;
341		} else {
342			tw->tw_timeout = TCP_TIMEWAIT_LEN;
343			if (state == TCP_TIME_WAIT)
344				timeo = TCP_TIMEWAIT_LEN;
345		}
346
347		inet_twsk_schedule(tw, &tcp_death_row, timeo,
348				   TCP_TIMEWAIT_LEN);
349		inet_twsk_put(tw);
350	} else {
351		/* Sorry, if we're out of memory, just CLOSE this
352		 * socket up.  We've got bigger problems than
353		 * non-graceful socket closings.
354		 */
355		LIMIT_NETDEBUG(KERN_INFO "TCP: time wait bucket table overflow\n");
356	}
357
358	tcp_update_metrics(sk);
359	tcp_done(sk);
360}
361
362void tcp_twsk_destructor(struct sock *sk)
363{
364#ifdef CONFIG_TCP_MD5SIG
365	struct tcp_timewait_sock *twsk = tcp_twsk(sk);
366	if (twsk->tw_md5_keylen)
367		tcp_put_md5sig_pool();
368#endif
369}
370
371EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
372
373static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
374					 struct request_sock *req)
375{
376	tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
377}
378
379/* This is not only more efficient than what we used to do, it eliminates
380 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
381 *
382 * Actually, we could lots of memory writes here. tp of listening
383 * socket contains all necessary default parameters.
384 */
385struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
386{
387	struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
388
389	if (newsk != NULL) {
390		const struct inet_request_sock *ireq = inet_rsk(req);
391		struct tcp_request_sock *treq = tcp_rsk(req);
392		struct inet_connection_sock *newicsk = inet_csk(newsk);
393		struct tcp_sock *newtp;
394
395		/* Now setup tcp_sock */
396		newtp = tcp_sk(newsk);
397		newtp->pred_flags = 0;
398		newtp->rcv_wup = newtp->copied_seq = newtp->rcv_nxt = treq->rcv_isn + 1;
399		newtp->snd_sml = newtp->snd_una = newtp->snd_nxt = treq->snt_isn + 1;
400
401		tcp_prequeue_init(newtp);
402
403		tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn);
404
405		newtp->srtt = 0;
406		newtp->mdev = TCP_TIMEOUT_INIT;
407		newicsk->icsk_rto = TCP_TIMEOUT_INIT;
408
409		newtp->packets_out = 0;
410		newtp->retrans_out = 0;
411		newtp->sacked_out = 0;
412		newtp->fackets_out = 0;
413		newtp->snd_ssthresh = 0x7fffffff;
414
415		/* So many TCP implementations out there (incorrectly) count the
416		 * initial SYN frame in their delayed-ACK and congestion control
417		 * algorithms that we must have the following bandaid to talk
418		 * efficiently to them.  -DaveM
419		 */
420		newtp->snd_cwnd = 2;
421		newtp->snd_cwnd_cnt = 0;
422		newtp->bytes_acked = 0;
423
424		newtp->frto_counter = 0;
425		newtp->frto_highmark = 0;
426
427		newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
428
429		tcp_set_ca_state(newsk, TCP_CA_Open);
430		tcp_init_xmit_timers(newsk);
431		skb_queue_head_init(&newtp->out_of_order_queue);
432		newtp->write_seq = treq->snt_isn + 1;
433		newtp->pushed_seq = newtp->write_seq;
434
435		newtp->rx_opt.saw_tstamp = 0;
436
437		newtp->rx_opt.dsack = 0;
438		newtp->rx_opt.eff_sacks = 0;
439
440		newtp->rx_opt.num_sacks = 0;
441		newtp->urg_data = 0;
442
443		if (sock_flag(newsk, SOCK_KEEPOPEN))
444			inet_csk_reset_keepalive_timer(newsk,
445						       keepalive_time_when(newtp));
446
447		newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
448		if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
449			if (sysctl_tcp_fack)
450				tcp_enable_fack(newtp);
451		}
452		newtp->window_clamp = req->window_clamp;
453		newtp->rcv_ssthresh = req->rcv_wnd;
454		newtp->rcv_wnd = req->rcv_wnd;
455		newtp->rx_opt.wscale_ok = ireq->wscale_ok;
456		if (newtp->rx_opt.wscale_ok) {
457			newtp->rx_opt.snd_wscale = ireq->snd_wscale;
458			newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
459		} else {
460			newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
461			newtp->window_clamp = min(newtp->window_clamp, 65535U);
462		}
463		newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
464				  newtp->rx_opt.snd_wscale);
465		newtp->max_window = newtp->snd_wnd;
466
467		if (newtp->rx_opt.tstamp_ok) {
468			newtp->rx_opt.ts_recent = req->ts_recent;
469			newtp->rx_opt.ts_recent_stamp = get_seconds();
470			newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
471		} else {
472			newtp->rx_opt.ts_recent_stamp = 0;
473			newtp->tcp_header_len = sizeof(struct tcphdr);
474		}
475#ifdef CONFIG_TCP_MD5SIG
476		newtp->md5sig_info = NULL;	/*XXX*/
477		if (newtp->af_specific->md5_lookup(sk, newsk))
478			newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
479#endif
480		if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
481			newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
482		newtp->rx_opt.mss_clamp = req->mss;
483		TCP_ECN_openreq_child(newtp, req);
484
485		TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
486	}
487	return newsk;
488}
489
490/*
491 *	Process an incoming packet for SYN_RECV sockets represented
492 *	as a request_sock.
493 */
494
495struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
496			   struct request_sock *req,
497			   struct request_sock **prev)
498{
499	const struct tcphdr *th = tcp_hdr(skb);
500	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
501	int paws_reject = 0;
502	struct tcp_options_received tmp_opt;
503	struct sock *child;
504
505	tmp_opt.saw_tstamp = 0;
506	if (th->doff > (sizeof(struct tcphdr)>>2)) {
507		tcp_parse_options(skb, &tmp_opt, 0);
508
509		if (tmp_opt.saw_tstamp) {
510			tmp_opt.ts_recent = req->ts_recent;
511			/* We do not store true stamp, but it is not required,
512			 * it can be estimated (approximately)
513			 * from another data.
514			 */
515			tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
516			paws_reject = tcp_paws_check(&tmp_opt, th->rst);
517		}
518	}
519
520	/* Check for pure retransmitted SYN. */
521	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
522	    flg == TCP_FLAG_SYN &&
523	    !paws_reject) {
524		/*
525		 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
526		 * this case on figure 6 and figure 8, but formal
527		 * protocol description says NOTHING.
528		 * To be more exact, it says that we should send ACK,
529		 * because this segment (at least, if it has no data)
530		 * is out of window.
531		 *
532		 *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
533		 *  describe SYN-RECV state. All the description
534		 *  is wrong, we cannot believe to it and should
535		 *  rely only on common sense and implementation
536		 *  experience.
537		 *
538		 * Enforce "SYN-ACK" according to figure 8, figure 6
539		 * of RFC793, fixed by RFC1122.
540		 */
541		req->rsk_ops->rtx_syn_ack(sk, req);
542		return NULL;
543	}
544
545	/* Further reproduces section "SEGMENT ARRIVES"
546	   for state SYN-RECEIVED of RFC793.
547	   It is broken, however, it does not work only
548	   when SYNs are crossed.
549
550	   You would think that SYN crossing is impossible here, since
551	   we should have a SYN_SENT socket (from connect()) on our end,
552	   but this is not true if the crossed SYNs were sent to both
553	   ends by a malicious third party.  We must defend against this,
554	   and to do that we first verify the ACK (as per RFC793, page
555	   36) and reset if it is invalid.  Is this a true full defense?
556	   To convince ourselves, let us consider a way in which the ACK
557	   test can still pass in this 'malicious crossed SYNs' case.
558	   Malicious sender sends identical SYNs (and thus identical sequence
559	   numbers) to both A and B:
560
561		A: gets SYN, seq=7
562		B: gets SYN, seq=7
563
564	   By our good fortune, both A and B select the same initial
565	   send sequence number of seven :-)
566
567		A: sends SYN|ACK, seq=7, ack_seq=8
568		B: sends SYN|ACK, seq=7, ack_seq=8
569
570	   So we are now A eating this SYN|ACK, ACK test passes.  So
571	   does sequence test, SYN is truncated, and thus we consider
572	   it a bare ACK.
573
574	   Both ends (listening sockets) accept the new incoming
575	   connection and try to talk to each other. 8-)
576
577	   Note: This case is both harmless, and rare.  Possibility is about the
578	   same as us discovering intelligent life on another plant tomorrow.
579
580	   But generally, we should (RFC lies!) to accept ACK
581	   from SYNACK both here and in tcp_rcv_state_process().
582	   tcp_rcv_state_process() does not, hence, we do not too.
583
584	   Note that the case is absolutely generic:
585	   we cannot optimize anything here without
586	   violating protocol. All the checks must be made
587	   before attempt to create socket.
588	 */
589
590	/* RFC793 page 36: "If the connection is in any non-synchronized state ...
591	 *                  and the incoming segment acknowledges something not yet
592	 *                  sent (the segment carries an unacceptable ACK) ...
593	 *                  a reset is sent."
594	 *
595	 * Invalid ACK: reset will be sent by listening socket
596	 */
597	if ((flg & TCP_FLAG_ACK) &&
598	    (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1))
599		return sk;
600
601	/* Also, it would be not so bad idea to check rcv_tsecr, which
602	 * is essentially ACK extension and too early or too late values
603	 * should cause reset in unsynchronized states.
604	 */
605
606	/* RFC793: "first check sequence number". */
607
608	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
609					  tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
610		/* Out of window: send ACK and drop. */
611		if (!(flg & TCP_FLAG_RST))
612			req->rsk_ops->send_ack(skb, req);
613		if (paws_reject)
614			NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
615		return NULL;
616	}
617
618	/* In sequence, PAWS is OK. */
619
620	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
621			req->ts_recent = tmp_opt.rcv_tsval;
622
623		if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
624			/* Truncate SYN, it is out of window starting
625			   at tcp_rsk(req)->rcv_isn + 1. */
626			flg &= ~TCP_FLAG_SYN;
627		}
628
629		/* RFC793: "second check the RST bit" and
630		 *	   "fourth, check the SYN bit"
631		 */
632		if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
633			TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
634			goto embryonic_reset;
635		}
636
637		/* ACK sequence verified above, just make sure ACK is
638		 * set.  If ACK not set, just silently drop the packet.
639		 */
640		if (!(flg & TCP_FLAG_ACK))
641			return NULL;
642
643		/* OK, ACK is valid, create big socket and
644		 * feed this segment to it. It will repeat all
645		 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
646		 * ESTABLISHED STATE. If it will be dropped after
647		 * socket is created, wait for troubles.
648		 */
649		child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb,
650								 req, NULL);
651		if (child == NULL)
652			goto listen_overflow;
653#ifdef CONFIG_TCP_MD5SIG
654		else {
655			/* Copy over the MD5 key from the original socket */
656			struct tcp_md5sig_key *key;
657			struct tcp_sock *tp = tcp_sk(sk);
658			key = tp->af_specific->md5_lookup(sk, child);
659			if (key != NULL) {
660				/*
661				 * We're using one, so create a matching key on the
662				 * newsk structure. If we fail to get memory then we
663				 * end up not copying the key across. Shucks.
664				 */
665				char *newkey = kmemdup(key->key, key->keylen,
666						       GFP_ATOMIC);
667				if (newkey) {
668					if (!tcp_alloc_md5sig_pool())
669						BUG();
670					tp->af_specific->md5_add(child, child,
671								 newkey,
672								 key->keylen);
673				}
674			}
675		}
676#endif
677
678		inet_csk_reqsk_queue_unlink(sk, req, prev);
679		inet_csk_reqsk_queue_removed(sk, req);
680
681		if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
682		    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
683
684			/* the accept queue handling is done is est recv slow
685			 * path so lets make sure to start there
686			 */
687			tcp_sk(child)->pred_flags = 0;
688			sock_hold(sk);
689			sock_hold(child);
690			tcp_sk(child)->defer_tcp_accept.listen_sk = sk;
691			tcp_sk(child)->defer_tcp_accept.request = req;
692
693			inet_csk_reset_keepalive_timer(child,
694						       inet_csk(sk)->icsk_accept_queue.rskq_defer_accept * HZ);
695		} else {
696			inet_csk_reqsk_queue_add(sk, req, child);
697		}
698
699		return child;
700
701	listen_overflow:
702		if (!sysctl_tcp_abort_on_overflow) {
703			inet_rsk(req)->acked = 1;
704			return NULL;
705		}
706
707	embryonic_reset:
708		NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
709		if (!(flg & TCP_FLAG_RST))
710			req->rsk_ops->send_reset(sk, skb);
711
712		inet_csk_reqsk_queue_drop(sk, req, prev);
713		return NULL;
714}
715
716/*
717 * Queue segment on the new socket if the new socket is active,
718 * otherwise we just shortcircuit this and continue with
719 * the new socket.
720 */
721
722int tcp_child_process(struct sock *parent, struct sock *child,
723		      struct sk_buff *skb)
724{
725	int ret = 0;
726	int state = child->sk_state;
727
728	if (!sock_owned_by_user(child)) {
729		ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb),
730					    skb->len);
731		/* Wakeup parent, send SIGIO */
732		if (state == TCP_SYN_RECV && child->sk_state != state)
733			parent->sk_data_ready(parent, 0);
734	} else {
735		/* Alas, it is possible again, because we do lookup
736		 * in main socket hash table and lock on listening
737		 * socket does not protect us more.
738		 */
739		sk_add_backlog(child, skb);
740	}
741
742	bh_unlock_sock(child);
743	sock_put(child);
744	return ret;
745}
746
747EXPORT_SYMBOL(tcp_check_req);
748EXPORT_SYMBOL(tcp_child_process);
749EXPORT_SYMBOL(tcp_create_openreq_child);
750EXPORT_SYMBOL(tcp_timewait_state_process);
751