tcp_minisocks.c revision cd75eff64dae8856afbf6ef0f0ca3c145465d8e0
1/*
2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
3 *		operating system.  INET is implemented using the  BSD Socket
4 *		interface as the means of communication with the user level.
5 *
6 *		Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Authors:	Ross Biro
9 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
11 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
12 *		Florian La Roche, <flla@stud.uni-sb.de>
13 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
15 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
16 *		Matthew Dillon, <dillon@apollo.west.oic.com>
17 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 *		Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/slab.h>
24#include <linux/sysctl.h>
25#include <linux/workqueue.h>
26#include <net/tcp.h>
27#include <net/inet_common.h>
28#include <net/xfrm.h>
29
30int sysctl_tcp_syncookies __read_mostly = 1;
31EXPORT_SYMBOL(sysctl_tcp_syncookies);
32
33int sysctl_tcp_abort_on_overflow __read_mostly;
34
35struct inet_timewait_death_row tcp_death_row = {
36	.sysctl_max_tw_buckets = NR_FILE * 2,
37	.period		= TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
38	.death_lock	= __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock),
39	.hashinfo	= &tcp_hashinfo,
40	.tw_timer	= TIMER_INITIALIZER(inet_twdr_hangman, 0,
41					    (unsigned long)&tcp_death_row),
42	.twkill_work	= __WORK_INITIALIZER(tcp_death_row.twkill_work,
43					     inet_twdr_twkill_work),
44/* Short-time timewait calendar */
45
46	.twcal_hand	= -1,
47	.twcal_timer	= TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
48					    (unsigned long)&tcp_death_row),
49};
50EXPORT_SYMBOL_GPL(tcp_death_row);
51
52static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
53{
54	if (seq == s_win)
55		return true;
56	if (after(end_seq, s_win) && before(seq, e_win))
57		return true;
58	return seq == e_win && seq == end_seq;
59}
60
61/*
62 * * Main purpose of TIME-WAIT state is to close connection gracefully,
63 *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
64 *   (and, probably, tail of data) and one or more our ACKs are lost.
65 * * What is TIME-WAIT timeout? It is associated with maximal packet
66 *   lifetime in the internet, which results in wrong conclusion, that
67 *   it is set to catch "old duplicate segments" wandering out of their path.
68 *   It is not quite correct. This timeout is calculated so that it exceeds
69 *   maximal retransmission timeout enough to allow to lose one (or more)
70 *   segments sent by peer and our ACKs. This time may be calculated from RTO.
71 * * When TIME-WAIT socket receives RST, it means that another end
72 *   finally closed and we are allowed to kill TIME-WAIT too.
73 * * Second purpose of TIME-WAIT is catching old duplicate segments.
74 *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
75 *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
76 * * If we invented some more clever way to catch duplicates
77 *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
78 *
79 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
80 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
81 * from the very beginning.
82 *
83 * NOTE. With recycling (and later with fin-wait-2) TW bucket
84 * is _not_ stateless. It means, that strictly speaking we must
85 * spinlock it. I do not want! Well, probability of misbehaviour
86 * is ridiculously low and, seems, we could use some mb() tricks
87 * to avoid misread sequence numbers, states etc.  --ANK
88 *
89 * We don't need to initialize tmp_out.sack_ok as we don't use the results
90 */
91enum tcp_tw_status
92tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
93			   const struct tcphdr *th)
94{
95	struct tcp_options_received tmp_opt;
96	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
97	bool paws_reject = false;
98
99	tmp_opt.saw_tstamp = 0;
100	if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
101		tcp_parse_options(skb, &tmp_opt, 0, NULL);
102
103		if (tmp_opt.saw_tstamp) {
104			tmp_opt.rcv_tsecr	-= tcptw->tw_ts_offset;
105			tmp_opt.ts_recent	= tcptw->tw_ts_recent;
106			tmp_opt.ts_recent_stamp	= tcptw->tw_ts_recent_stamp;
107			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
108		}
109	}
110
111	if (tw->tw_substate == TCP_FIN_WAIT2) {
112		/* Just repeat all the checks of tcp_rcv_state_process() */
113
114		/* Out of window, send ACK */
115		if (paws_reject ||
116		    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
117				   tcptw->tw_rcv_nxt,
118				   tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
119			return TCP_TW_ACK;
120
121		if (th->rst)
122			goto kill;
123
124		if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
125			goto kill_with_rst;
126
127		/* Dup ACK? */
128		if (!th->ack ||
129		    !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
130		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
131			inet_twsk_put(tw);
132			return TCP_TW_SUCCESS;
133		}
134
135		/* New data or FIN. If new data arrive after half-duplex close,
136		 * reset.
137		 */
138		if (!th->fin ||
139		    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
140kill_with_rst:
141			inet_twsk_deschedule(tw, &tcp_death_row);
142			inet_twsk_put(tw);
143			return TCP_TW_RST;
144		}
145
146		/* FIN arrived, enter true time-wait state. */
147		tw->tw_substate	  = TCP_TIME_WAIT;
148		tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
149		if (tmp_opt.saw_tstamp) {
150			tcptw->tw_ts_recent_stamp = get_seconds();
151			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
152		}
153
154		if (tcp_death_row.sysctl_tw_recycle &&
155		    tcptw->tw_ts_recent_stamp &&
156		    tcp_tw_remember_stamp(tw))
157			inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout,
158					   TCP_TIMEWAIT_LEN);
159		else
160			inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
161					   TCP_TIMEWAIT_LEN);
162		return TCP_TW_ACK;
163	}
164
165	/*
166	 *	Now real TIME-WAIT state.
167	 *
168	 *	RFC 1122:
169	 *	"When a connection is [...] on TIME-WAIT state [...]
170	 *	[a TCP] MAY accept a new SYN from the remote TCP to
171	 *	reopen the connection directly, if it:
172	 *
173	 *	(1)  assigns its initial sequence number for the new
174	 *	connection to be larger than the largest sequence
175	 *	number it used on the previous connection incarnation,
176	 *	and
177	 *
178	 *	(2)  returns to TIME-WAIT state if the SYN turns out
179	 *	to be an old duplicate".
180	 */
181
182	if (!paws_reject &&
183	    (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
184	     (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
185		/* In window segment, it may be only reset or bare ack. */
186
187		if (th->rst) {
188			/* This is TIME_WAIT assassination, in two flavors.
189			 * Oh well... nobody has a sufficient solution to this
190			 * protocol bug yet.
191			 */
192			if (sysctl_tcp_rfc1337 == 0) {
193kill:
194				inet_twsk_deschedule(tw, &tcp_death_row);
195				inet_twsk_put(tw);
196				return TCP_TW_SUCCESS;
197			}
198		}
199		inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
200				   TCP_TIMEWAIT_LEN);
201
202		if (tmp_opt.saw_tstamp) {
203			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
204			tcptw->tw_ts_recent_stamp = get_seconds();
205		}
206
207		inet_twsk_put(tw);
208		return TCP_TW_SUCCESS;
209	}
210
211	/* Out of window segment.
212
213	   All the segments are ACKed immediately.
214
215	   The only exception is new SYN. We accept it, if it is
216	   not old duplicate and we are not in danger to be killed
217	   by delayed old duplicates. RFC check is that it has
218	   newer sequence number works at rates <40Mbit/sec.
219	   However, if paws works, it is reliable AND even more,
220	   we even may relax silly seq space cutoff.
221
222	   RED-PEN: we violate main RFC requirement, if this SYN will appear
223	   old duplicate (i.e. we receive RST in reply to SYN-ACK),
224	   we must return socket to time-wait state. It is not good,
225	   but not fatal yet.
226	 */
227
228	if (th->syn && !th->rst && !th->ack && !paws_reject &&
229	    (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
230	     (tmp_opt.saw_tstamp &&
231	      (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
232		u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
233		if (isn == 0)
234			isn++;
235		TCP_SKB_CB(skb)->when = isn;
236		return TCP_TW_SYN;
237	}
238
239	if (paws_reject)
240		NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
241
242	if (!th->rst) {
243		/* In this case we must reset the TIMEWAIT timer.
244		 *
245		 * If it is ACKless SYN it may be both old duplicate
246		 * and new good SYN with random sequence number <rcv_nxt.
247		 * Do not reschedule in the last case.
248		 */
249		if (paws_reject || th->ack)
250			inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
251					   TCP_TIMEWAIT_LEN);
252
253		/* Send ACK. Note, we do not put the bucket,
254		 * it will be released by caller.
255		 */
256		return TCP_TW_ACK;
257	}
258	inet_twsk_put(tw);
259	return TCP_TW_SUCCESS;
260}
261EXPORT_SYMBOL(tcp_timewait_state_process);
262
263/*
264 * Move a socket to time-wait or dead fin-wait-2 state.
265 */
266void tcp_time_wait(struct sock *sk, int state, int timeo)
267{
268	struct inet_timewait_sock *tw = NULL;
269	const struct inet_connection_sock *icsk = inet_csk(sk);
270	const struct tcp_sock *tp = tcp_sk(sk);
271	bool recycle_ok = false;
272
273	if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
274		recycle_ok = tcp_remember_stamp(sk);
275
276	if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
277		tw = inet_twsk_alloc(sk, state);
278
279	if (tw != NULL) {
280		struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
281		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
282		struct inet_sock *inet = inet_sk(sk);
283
284		tw->tw_transparent	= inet->transparent;
285		tw->tw_rcv_wscale	= tp->rx_opt.rcv_wscale;
286		tcptw->tw_rcv_nxt	= tp->rcv_nxt;
287		tcptw->tw_snd_nxt	= tp->snd_nxt;
288		tcptw->tw_rcv_wnd	= tcp_receive_window(tp);
289		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
290		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
291		tcptw->tw_ts_offset	= tp->tsoffset;
292
293#if IS_ENABLED(CONFIG_IPV6)
294		if (tw->tw_family == PF_INET6) {
295			struct ipv6_pinfo *np = inet6_sk(sk);
296			struct inet6_timewait_sock *tw6;
297
298			tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
299			tw6 = inet6_twsk((struct sock *)tw);
300			tw6->tw_v6_daddr = np->daddr;
301			tw6->tw_v6_rcv_saddr = np->rcv_saddr;
302			tw->tw_tclass = np->tclass;
303			tw->tw_ipv6only = np->ipv6only;
304		}
305#endif
306
307#ifdef CONFIG_TCP_MD5SIG
308		/*
309		 * The timewait bucket does not have the key DB from the
310		 * sock structure. We just make a quick copy of the
311		 * md5 key being used (if indeed we are using one)
312		 * so the timewait ack generating code has the key.
313		 */
314		do {
315			struct tcp_md5sig_key *key;
316			tcptw->tw_md5_key = NULL;
317			key = tp->af_specific->md5_lookup(sk, sk);
318			if (key != NULL) {
319				tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
320				if (tcptw->tw_md5_key && tcp_alloc_md5sig_pool(sk) == NULL)
321					BUG();
322			}
323		} while (0);
324#endif
325
326		/* Linkage updates. */
327		__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
328
329		/* Get the TIME_WAIT timeout firing. */
330		if (timeo < rto)
331			timeo = rto;
332
333		if (recycle_ok) {
334			tw->tw_timeout = rto;
335		} else {
336			tw->tw_timeout = TCP_TIMEWAIT_LEN;
337			if (state == TCP_TIME_WAIT)
338				timeo = TCP_TIMEWAIT_LEN;
339		}
340
341		inet_twsk_schedule(tw, &tcp_death_row, timeo,
342				   TCP_TIMEWAIT_LEN);
343		inet_twsk_put(tw);
344	} else {
345		/* Sorry, if we're out of memory, just CLOSE this
346		 * socket up.  We've got bigger problems than
347		 * non-graceful socket closings.
348		 */
349		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
350	}
351
352	tcp_update_metrics(sk);
353	tcp_done(sk);
354}
355
356void tcp_twsk_destructor(struct sock *sk)
357{
358#ifdef CONFIG_TCP_MD5SIG
359	struct tcp_timewait_sock *twsk = tcp_twsk(sk);
360
361	if (twsk->tw_md5_key) {
362		tcp_free_md5sig_pool();
363		kfree_rcu(twsk->tw_md5_key, rcu);
364	}
365#endif
366}
367EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
368
369static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
370					 struct request_sock *req)
371{
372	tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
373}
374
375/* This is not only more efficient than what we used to do, it eliminates
376 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
377 *
378 * Actually, we could lots of memory writes here. tp of listening
379 * socket contains all necessary default parameters.
380 */
381struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
382{
383	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
384
385	if (newsk != NULL) {
386		const struct inet_request_sock *ireq = inet_rsk(req);
387		struct tcp_request_sock *treq = tcp_rsk(req);
388		struct inet_connection_sock *newicsk = inet_csk(newsk);
389		struct tcp_sock *newtp = tcp_sk(newsk);
390
391		/* Now setup tcp_sock */
392		newtp->pred_flags = 0;
393
394		newtp->rcv_wup = newtp->copied_seq =
395		newtp->rcv_nxt = treq->rcv_isn + 1;
396
397		newtp->snd_sml = newtp->snd_una =
398		newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
399
400		tcp_prequeue_init(newtp);
401		INIT_LIST_HEAD(&newtp->tsq_node);
402
403		tcp_init_wl(newtp, treq->rcv_isn);
404
405		newtp->srtt = 0;
406		newtp->mdev = TCP_TIMEOUT_INIT;
407		newicsk->icsk_rto = TCP_TIMEOUT_INIT;
408
409		newtp->packets_out = 0;
410		newtp->retrans_out = 0;
411		newtp->sacked_out = 0;
412		newtp->fackets_out = 0;
413		newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
414		tcp_enable_early_retrans(newtp);
415		newtp->tlp_high_seq = 0;
416
417		/* So many TCP implementations out there (incorrectly) count the
418		 * initial SYN frame in their delayed-ACK and congestion control
419		 * algorithms that we must have the following bandaid to talk
420		 * efficiently to them.  -DaveM
421		 */
422		newtp->snd_cwnd = TCP_INIT_CWND;
423		newtp->snd_cwnd_cnt = 0;
424
425		if (newicsk->icsk_ca_ops != &tcp_init_congestion_ops &&
426		    !try_module_get(newicsk->icsk_ca_ops->owner))
427			newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
428
429		tcp_set_ca_state(newsk, TCP_CA_Open);
430		tcp_init_xmit_timers(newsk);
431		skb_queue_head_init(&newtp->out_of_order_queue);
432		newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
433
434		newtp->rx_opt.saw_tstamp = 0;
435
436		newtp->rx_opt.dsack = 0;
437		newtp->rx_opt.num_sacks = 0;
438
439		newtp->urg_data = 0;
440
441		if (sock_flag(newsk, SOCK_KEEPOPEN))
442			inet_csk_reset_keepalive_timer(newsk,
443						       keepalive_time_when(newtp));
444
445		newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
446		if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
447			if (sysctl_tcp_fack)
448				tcp_enable_fack(newtp);
449		}
450		newtp->window_clamp = req->window_clamp;
451		newtp->rcv_ssthresh = req->rcv_wnd;
452		newtp->rcv_wnd = req->rcv_wnd;
453		newtp->rx_opt.wscale_ok = ireq->wscale_ok;
454		if (newtp->rx_opt.wscale_ok) {
455			newtp->rx_opt.snd_wscale = ireq->snd_wscale;
456			newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
457		} else {
458			newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
459			newtp->window_clamp = min(newtp->window_clamp, 65535U);
460		}
461		newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
462				  newtp->rx_opt.snd_wscale);
463		newtp->max_window = newtp->snd_wnd;
464
465		if (newtp->rx_opt.tstamp_ok) {
466			newtp->rx_opt.ts_recent = req->ts_recent;
467			newtp->rx_opt.ts_recent_stamp = get_seconds();
468			newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
469		} else {
470			newtp->rx_opt.ts_recent_stamp = 0;
471			newtp->tcp_header_len = sizeof(struct tcphdr);
472		}
473		newtp->tsoffset = 0;
474#ifdef CONFIG_TCP_MD5SIG
475		newtp->md5sig_info = NULL;	/*XXX*/
476		if (newtp->af_specific->md5_lookup(sk, newsk))
477			newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
478#endif
479		if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
480			newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
481		newtp->rx_opt.mss_clamp = req->mss;
482		TCP_ECN_openreq_child(newtp, req);
483		newtp->fastopen_rsk = NULL;
484		newtp->syn_data_acked = 0;
485
486		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
487	}
488	return newsk;
489}
490EXPORT_SYMBOL(tcp_create_openreq_child);
491
492/*
493 * Process an incoming packet for SYN_RECV sockets represented as a
494 * request_sock. Normally sk is the listener socket but for TFO it
495 * points to the child socket.
496 *
497 * XXX (TFO) - The current impl contains a special check for ack
498 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
499 *
500 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
501 */
502
503struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
504			   struct request_sock *req,
505			   struct request_sock **prev,
506			   bool fastopen)
507{
508	struct tcp_options_received tmp_opt;
509	struct sock *child;
510	const struct tcphdr *th = tcp_hdr(skb);
511	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
512	bool paws_reject = false;
513
514	BUG_ON(fastopen == (sk->sk_state == TCP_LISTEN));
515
516	tmp_opt.saw_tstamp = 0;
517	if (th->doff > (sizeof(struct tcphdr)>>2)) {
518		tcp_parse_options(skb, &tmp_opt, 0, NULL);
519
520		if (tmp_opt.saw_tstamp) {
521			tmp_opt.ts_recent = req->ts_recent;
522			/* We do not store true stamp, but it is not required,
523			 * it can be estimated (approximately)
524			 * from another data.
525			 */
526			tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
527			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
528		}
529	}
530
531	/* Check for pure retransmitted SYN. */
532	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
533	    flg == TCP_FLAG_SYN &&
534	    !paws_reject) {
535		/*
536		 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
537		 * this case on figure 6 and figure 8, but formal
538		 * protocol description says NOTHING.
539		 * To be more exact, it says that we should send ACK,
540		 * because this segment (at least, if it has no data)
541		 * is out of window.
542		 *
543		 *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
544		 *  describe SYN-RECV state. All the description
545		 *  is wrong, we cannot believe to it and should
546		 *  rely only on common sense and implementation
547		 *  experience.
548		 *
549		 * Enforce "SYN-ACK" according to figure 8, figure 6
550		 * of RFC793, fixed by RFC1122.
551		 *
552		 * Note that even if there is new data in the SYN packet
553		 * they will be thrown away too.
554		 *
555		 * Reset timer after retransmitting SYNACK, similar to
556		 * the idea of fast retransmit in recovery.
557		 */
558		if (!inet_rtx_syn_ack(sk, req))
559			req->expires = min(TCP_TIMEOUT_INIT << req->num_timeout,
560					   TCP_RTO_MAX) + jiffies;
561		return NULL;
562	}
563
564	/* Further reproduces section "SEGMENT ARRIVES"
565	   for state SYN-RECEIVED of RFC793.
566	   It is broken, however, it does not work only
567	   when SYNs are crossed.
568
569	   You would think that SYN crossing is impossible here, since
570	   we should have a SYN_SENT socket (from connect()) on our end,
571	   but this is not true if the crossed SYNs were sent to both
572	   ends by a malicious third party.  We must defend against this,
573	   and to do that we first verify the ACK (as per RFC793, page
574	   36) and reset if it is invalid.  Is this a true full defense?
575	   To convince ourselves, let us consider a way in which the ACK
576	   test can still pass in this 'malicious crossed SYNs' case.
577	   Malicious sender sends identical SYNs (and thus identical sequence
578	   numbers) to both A and B:
579
580		A: gets SYN, seq=7
581		B: gets SYN, seq=7
582
583	   By our good fortune, both A and B select the same initial
584	   send sequence number of seven :-)
585
586		A: sends SYN|ACK, seq=7, ack_seq=8
587		B: sends SYN|ACK, seq=7, ack_seq=8
588
589	   So we are now A eating this SYN|ACK, ACK test passes.  So
590	   does sequence test, SYN is truncated, and thus we consider
591	   it a bare ACK.
592
593	   If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
594	   bare ACK.  Otherwise, we create an established connection.  Both
595	   ends (listening sockets) accept the new incoming connection and try
596	   to talk to each other. 8-)
597
598	   Note: This case is both harmless, and rare.  Possibility is about the
599	   same as us discovering intelligent life on another plant tomorrow.
600
601	   But generally, we should (RFC lies!) to accept ACK
602	   from SYNACK both here and in tcp_rcv_state_process().
603	   tcp_rcv_state_process() does not, hence, we do not too.
604
605	   Note that the case is absolutely generic:
606	   we cannot optimize anything here without
607	   violating protocol. All the checks must be made
608	   before attempt to create socket.
609	 */
610
611	/* RFC793 page 36: "If the connection is in any non-synchronized state ...
612	 *                  and the incoming segment acknowledges something not yet
613	 *                  sent (the segment carries an unacceptable ACK) ...
614	 *                  a reset is sent."
615	 *
616	 * Invalid ACK: reset will be sent by listening socket.
617	 * Note that the ACK validity check for a Fast Open socket is done
618	 * elsewhere and is checked directly against the child socket rather
619	 * than req because user data may have been sent out.
620	 */
621	if ((flg & TCP_FLAG_ACK) && !fastopen &&
622	    (TCP_SKB_CB(skb)->ack_seq !=
623	     tcp_rsk(req)->snt_isn + 1))
624		return sk;
625
626	/* Also, it would be not so bad idea to check rcv_tsecr, which
627	 * is essentially ACK extension and too early or too late values
628	 * should cause reset in unsynchronized states.
629	 */
630
631	/* RFC793: "first check sequence number". */
632
633	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
634					  tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rcv_wnd)) {
635		/* Out of window: send ACK and drop. */
636		if (!(flg & TCP_FLAG_RST))
637			req->rsk_ops->send_ack(sk, skb, req);
638		if (paws_reject)
639			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
640		return NULL;
641	}
642
643	/* In sequence, PAWS is OK. */
644
645	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
646		req->ts_recent = tmp_opt.rcv_tsval;
647
648	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
649		/* Truncate SYN, it is out of window starting
650		   at tcp_rsk(req)->rcv_isn + 1. */
651		flg &= ~TCP_FLAG_SYN;
652	}
653
654	/* RFC793: "second check the RST bit" and
655	 *	   "fourth, check the SYN bit"
656	 */
657	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
658		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
659		goto embryonic_reset;
660	}
661
662	/* ACK sequence verified above, just make sure ACK is
663	 * set.  If ACK not set, just silently drop the packet.
664	 *
665	 * XXX (TFO) - if we ever allow "data after SYN", the
666	 * following check needs to be removed.
667	 */
668	if (!(flg & TCP_FLAG_ACK))
669		return NULL;
670
671	/* Got ACK for our SYNACK, so update baseline for SYNACK RTT sample. */
672	if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr)
673		tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr;
674	else if (req->num_retrans) /* don't take RTT sample if retrans && ~TS */
675		tcp_rsk(req)->snt_synack = 0;
676
677	/* For Fast Open no more processing is needed (sk is the
678	 * child socket).
679	 */
680	if (fastopen)
681		return sk;
682
683	/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
684	if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
685	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
686		inet_rsk(req)->acked = 1;
687		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
688		return NULL;
689	}
690
691	/* OK, ACK is valid, create big socket and
692	 * feed this segment to it. It will repeat all
693	 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
694	 * ESTABLISHED STATE. If it will be dropped after
695	 * socket is created, wait for troubles.
696	 */
697	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
698	if (child == NULL)
699		goto listen_overflow;
700
701	inet_csk_reqsk_queue_unlink(sk, req, prev);
702	inet_csk_reqsk_queue_removed(sk, req);
703
704	inet_csk_reqsk_queue_add(sk, req, child);
705	return child;
706
707listen_overflow:
708	if (!sysctl_tcp_abort_on_overflow) {
709		inet_rsk(req)->acked = 1;
710		return NULL;
711	}
712
713embryonic_reset:
714	if (!(flg & TCP_FLAG_RST)) {
715		/* Received a bad SYN pkt - for TFO We try not to reset
716		 * the local connection unless it's really necessary to
717		 * avoid becoming vulnerable to outside attack aiming at
718		 * resetting legit local connections.
719		 */
720		req->rsk_ops->send_reset(sk, skb);
721	} else if (fastopen) { /* received a valid RST pkt */
722		reqsk_fastopen_remove(sk, req, true);
723		tcp_reset(sk);
724	}
725	if (!fastopen) {
726		inet_csk_reqsk_queue_drop(sk, req, prev);
727		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
728	}
729	return NULL;
730}
731EXPORT_SYMBOL(tcp_check_req);
732
733/*
734 * Queue segment on the new socket if the new socket is active,
735 * otherwise we just shortcircuit this and continue with
736 * the new socket.
737 *
738 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
739 * when entering. But other states are possible due to a race condition
740 * where after __inet_lookup_established() fails but before the listener
741 * locked is obtained, other packets cause the same connection to
742 * be created.
743 */
744
745int tcp_child_process(struct sock *parent, struct sock *child,
746		      struct sk_buff *skb)
747{
748	int ret = 0;
749	int state = child->sk_state;
750
751	if (!sock_owned_by_user(child)) {
752		ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb),
753					    skb->len);
754		/* Wakeup parent, send SIGIO */
755		if (state == TCP_SYN_RECV && child->sk_state != state)
756			parent->sk_data_ready(parent, 0);
757	} else {
758		/* Alas, it is possible again, because we do lookup
759		 * in main socket hash table and lock on listening
760		 * socket does not protect us more.
761		 */
762		__sk_add_backlog(child, skb);
763	}
764
765	bh_unlock_sock(child);
766	sock_put(child);
767	return ret;
768}
769EXPORT_SYMBOL(tcp_child_process);
770