tcp_ipv4.c revision b2fb4f54ecd47c42413d54b4666b06cf93c05abf
1/*
2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
3 *		operating system.  INET is implemented using the  BSD Socket
4 *		interface as the means of communication with the user level.
5 *
6 *		Implementation of the Transmission Control Protocol(TCP).
7 *
8 *		IPv4 specific functions
9 *
10 *
11 *		code split from:
12 *		linux/ipv4/tcp.c
13 *		linux/ipv4/tcp_input.c
14 *		linux/ipv4/tcp_output.c
15 *
16 *		See tcp.c for author information
17 *
18 *	This program is free software; you can redistribute it and/or
19 *      modify it under the terms of the GNU General Public License
20 *      as published by the Free Software Foundation; either version
21 *      2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 *		David S. Miller	:	New socket lookup architecture.
27 *					This code is dedicated to John Dyson.
28 *		David S. Miller :	Change semantics of established hash,
29 *					half is devoted to TIME_WAIT sockets
30 *					and the rest go in the other half.
31 *		Andi Kleen :		Add support for syncookies and fixed
32 *					some bugs: ip options weren't passed to
33 *					the TCP layer, missed a check for an
34 *					ACK bit.
35 *		Andi Kleen :		Implemented fast path mtu discovery.
36 *	     				Fixed many serious bugs in the
37 *					request_sock handling and moved
38 *					most of it into the af independent code.
39 *					Added tail drop and some other bugfixes.
40 *					Added new listen semantics.
41 *		Mike McLagan	:	Routing by source
42 *	Juan Jose Ciarlante:		ip_dynaddr bits
43 *		Andi Kleen:		various fixes.
44 *	Vitaly E. Lavrov	:	Transparent proxy revived after year
45 *					coma.
46 *	Andi Kleen		:	Fix new listen.
47 *	Andi Kleen		:	Fix accept error reporting.
48 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
49 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
50 *					a single port at the same time.
51 */
52
53#define pr_fmt(fmt) "TCP: " fmt
54
55#include <linux/bottom_half.h>
56#include <linux/types.h>
57#include <linux/fcntl.h>
58#include <linux/module.h>
59#include <linux/random.h>
60#include <linux/cache.h>
61#include <linux/jhash.h>
62#include <linux/init.h>
63#include <linux/times.h>
64#include <linux/slab.h>
65
66#include <net/net_namespace.h>
67#include <net/icmp.h>
68#include <net/inet_hashtables.h>
69#include <net/tcp.h>
70#include <net/transp_v6.h>
71#include <net/ipv6.h>
72#include <net/inet_common.h>
73#include <net/timewait_sock.h>
74#include <net/xfrm.h>
75#include <net/netdma.h>
76#include <net/secure_seq.h>
77#include <net/tcp_memcontrol.h>
78
79#include <linux/inet.h>
80#include <linux/ipv6.h>
81#include <linux/stddef.h>
82#include <linux/proc_fs.h>
83#include <linux/seq_file.h>
84
85#include <linux/crypto.h>
86#include <linux/scatterlist.h>
87
88int sysctl_tcp_tw_reuse __read_mostly;
89int sysctl_tcp_low_latency __read_mostly;
90EXPORT_SYMBOL(sysctl_tcp_low_latency);
91
92
93#ifdef CONFIG_TCP_MD5SIG
94static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
96#endif
97
98struct inet_hashinfo tcp_hashinfo;
99EXPORT_SYMBOL(tcp_hashinfo);
100
101static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
102{
103	return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
104					  ip_hdr(skb)->saddr,
105					  tcp_hdr(skb)->dest,
106					  tcp_hdr(skb)->source);
107}
108
109int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110{
111	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112	struct tcp_sock *tp = tcp_sk(sk);
113
114	/* With PAWS, it is safe from the viewpoint
115	   of data integrity. Even without PAWS it is safe provided sequence
116	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117
118	   Actually, the idea is close to VJ's one, only timestamp cache is
119	   held not per host, but per port pair and TW bucket is used as state
120	   holder.
121
122	   If TW bucket has been already destroyed we fall back to VJ's scheme
123	   and use initial timestamp retrieved from peer table.
124	 */
125	if (tcptw->tw_ts_recent_stamp &&
126	    (twp == NULL || (sysctl_tcp_tw_reuse &&
127			     get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128		tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129		if (tp->write_seq == 0)
130			tp->write_seq = 1;
131		tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
132		tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
133		sock_hold(sktw);
134		return 1;
135	}
136
137	return 0;
138}
139EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140
141/* This will initiate an outgoing connection. */
142int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
143{
144	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
145	struct inet_sock *inet = inet_sk(sk);
146	struct tcp_sock *tp = tcp_sk(sk);
147	__be16 orig_sport, orig_dport;
148	__be32 daddr, nexthop;
149	struct flowi4 *fl4;
150	struct rtable *rt;
151	int err;
152	struct ip_options_rcu *inet_opt;
153
154	if (addr_len < sizeof(struct sockaddr_in))
155		return -EINVAL;
156
157	if (usin->sin_family != AF_INET)
158		return -EAFNOSUPPORT;
159
160	nexthop = daddr = usin->sin_addr.s_addr;
161	inet_opt = rcu_dereference_protected(inet->inet_opt,
162					     sock_owned_by_user(sk));
163	if (inet_opt && inet_opt->opt.srr) {
164		if (!daddr)
165			return -EINVAL;
166		nexthop = inet_opt->opt.faddr;
167	}
168
169	orig_sport = inet->inet_sport;
170	orig_dport = usin->sin_port;
171	fl4 = &inet->cork.fl.u.ip4;
172	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
173			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
174			      IPPROTO_TCP,
175			      orig_sport, orig_dport, sk, true);
176	if (IS_ERR(rt)) {
177		err = PTR_ERR(rt);
178		if (err == -ENETUNREACH)
179			IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
180		return err;
181	}
182
183	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
184		ip_rt_put(rt);
185		return -ENETUNREACH;
186	}
187
188	if (!inet_opt || !inet_opt->opt.srr)
189		daddr = fl4->daddr;
190
191	if (!inet->inet_saddr)
192		inet->inet_saddr = fl4->saddr;
193	inet->inet_rcv_saddr = inet->inet_saddr;
194
195	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
196		/* Reset inherited state */
197		tp->rx_opt.ts_recent	   = 0;
198		tp->rx_opt.ts_recent_stamp = 0;
199		if (likely(!tp->repair))
200			tp->write_seq	   = 0;
201	}
202
203	if (tcp_death_row.sysctl_tw_recycle &&
204	    !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
205		tcp_fetch_timewait_stamp(sk, &rt->dst);
206
207	inet->inet_dport = usin->sin_port;
208	inet->inet_daddr = daddr;
209
210	inet_csk(sk)->icsk_ext_hdr_len = 0;
211	if (inet_opt)
212		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
213
214	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
215
216	/* Socket identity is still unknown (sport may be zero).
217	 * However we set state to SYN-SENT and not releasing socket
218	 * lock select source port, enter ourselves into the hash tables and
219	 * complete initialization after this.
220	 */
221	tcp_set_state(sk, TCP_SYN_SENT);
222	err = inet_hash_connect(&tcp_death_row, sk);
223	if (err)
224		goto failure;
225
226	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
227			       inet->inet_sport, inet->inet_dport, sk);
228	if (IS_ERR(rt)) {
229		err = PTR_ERR(rt);
230		rt = NULL;
231		goto failure;
232	}
233	/* OK, now commit destination to socket.  */
234	sk->sk_gso_type = SKB_GSO_TCPV4;
235	sk_setup_caps(sk, &rt->dst);
236
237	if (!tp->write_seq && likely(!tp->repair))
238		tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
239							   inet->inet_daddr,
240							   inet->inet_sport,
241							   usin->sin_port);
242
243	inet->inet_id = tp->write_seq ^ jiffies;
244
245	err = tcp_connect(sk);
246
247	rt = NULL;
248	if (err)
249		goto failure;
250
251	return 0;
252
253failure:
254	/*
255	 * This unhashes the socket and releases the local port,
256	 * if necessary.
257	 */
258	tcp_set_state(sk, TCP_CLOSE);
259	ip_rt_put(rt);
260	sk->sk_route_caps = 0;
261	inet->inet_dport = 0;
262	return err;
263}
264EXPORT_SYMBOL(tcp_v4_connect);
265
266/*
267 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
268 * It can be called through tcp_release_cb() if socket was owned by user
269 * at the time tcp_v4_err() was called to handle ICMP message.
270 */
271static void tcp_v4_mtu_reduced(struct sock *sk)
272{
273	struct dst_entry *dst;
274	struct inet_sock *inet = inet_sk(sk);
275	u32 mtu = tcp_sk(sk)->mtu_info;
276
277	/* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
278	 * send out by Linux are always <576bytes so they should go through
279	 * unfragmented).
280	 */
281	if (sk->sk_state == TCP_LISTEN)
282		return;
283
284	dst = inet_csk_update_pmtu(sk, mtu);
285	if (!dst)
286		return;
287
288	/* Something is about to be wrong... Remember soft error
289	 * for the case, if this connection will not able to recover.
290	 */
291	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
292		sk->sk_err_soft = EMSGSIZE;
293
294	mtu = dst_mtu(dst);
295
296	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
297	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
298		tcp_sync_mss(sk, mtu);
299
300		/* Resend the TCP packet because it's
301		 * clear that the old packet has been
302		 * dropped. This is the new "fast" path mtu
303		 * discovery.
304		 */
305		tcp_simple_retransmit(sk);
306	} /* else let the usual retransmit timer handle it */
307}
308
309static void do_redirect(struct sk_buff *skb, struct sock *sk)
310{
311	struct dst_entry *dst = __sk_dst_check(sk, 0);
312
313	if (dst)
314		dst->ops->redirect(dst, sk, skb);
315}
316
317/*
318 * This routine is called by the ICMP module when it gets some
319 * sort of error condition.  If err < 0 then the socket should
320 * be closed and the error returned to the user.  If err > 0
321 * it's just the icmp type << 8 | icmp code.  After adjustment
322 * header points to the first 8 bytes of the tcp header.  We need
323 * to find the appropriate port.
324 *
325 * The locking strategy used here is very "optimistic". When
326 * someone else accesses the socket the ICMP is just dropped
327 * and for some paths there is no check at all.
328 * A more general error queue to queue errors for later handling
329 * is probably better.
330 *
331 */
332
333void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
334{
335	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
336	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
337	struct inet_connection_sock *icsk;
338	struct tcp_sock *tp;
339	struct inet_sock *inet;
340	const int type = icmp_hdr(icmp_skb)->type;
341	const int code = icmp_hdr(icmp_skb)->code;
342	struct sock *sk;
343	struct sk_buff *skb;
344	struct request_sock *req;
345	__u32 seq;
346	__u32 remaining;
347	int err;
348	struct net *net = dev_net(icmp_skb->dev);
349
350	if (icmp_skb->len < (iph->ihl << 2) + 8) {
351		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
352		return;
353	}
354
355	sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
356			iph->saddr, th->source, inet_iif(icmp_skb));
357	if (!sk) {
358		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
359		return;
360	}
361	if (sk->sk_state == TCP_TIME_WAIT) {
362		inet_twsk_put(inet_twsk(sk));
363		return;
364	}
365
366	bh_lock_sock(sk);
367	/* If too many ICMPs get dropped on busy
368	 * servers this needs to be solved differently.
369	 * We do take care of PMTU discovery (RFC1191) special case :
370	 * we can receive locally generated ICMP messages while socket is held.
371	 */
372	if (sock_owned_by_user(sk)) {
373		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
374			NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
375	}
376	if (sk->sk_state == TCP_CLOSE)
377		goto out;
378
379	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
380		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
381		goto out;
382	}
383
384	icsk = inet_csk(sk);
385	tp = tcp_sk(sk);
386	req = tp->fastopen_rsk;
387	seq = ntohl(th->seq);
388	if (sk->sk_state != TCP_LISTEN &&
389	    !between(seq, tp->snd_una, tp->snd_nxt) &&
390	    (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
391		/* For a Fast Open socket, allow seq to be snt_isn. */
392		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
393		goto out;
394	}
395
396	switch (type) {
397	case ICMP_REDIRECT:
398		do_redirect(icmp_skb, sk);
399		goto out;
400	case ICMP_SOURCE_QUENCH:
401		/* Just silently ignore these. */
402		goto out;
403	case ICMP_PARAMETERPROB:
404		err = EPROTO;
405		break;
406	case ICMP_DEST_UNREACH:
407		if (code > NR_ICMP_UNREACH)
408			goto out;
409
410		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
411			tp->mtu_info = info;
412			if (!sock_owned_by_user(sk)) {
413				tcp_v4_mtu_reduced(sk);
414			} else {
415				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
416					sock_hold(sk);
417			}
418			goto out;
419		}
420
421		err = icmp_err_convert[code].errno;
422		/* check if icmp_skb allows revert of backoff
423		 * (see draft-zimmermann-tcp-lcd) */
424		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
425			break;
426		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
427		    !icsk->icsk_backoff)
428			break;
429
430		/* XXX (TFO) - revisit the following logic for TFO */
431
432		if (sock_owned_by_user(sk))
433			break;
434
435		icsk->icsk_backoff--;
436		inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
437			TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
438		tcp_bound_rto(sk);
439
440		skb = tcp_write_queue_head(sk);
441		BUG_ON(!skb);
442
443		remaining = icsk->icsk_rto - min(icsk->icsk_rto,
444				tcp_time_stamp - TCP_SKB_CB(skb)->when);
445
446		if (remaining) {
447			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
448						  remaining, TCP_RTO_MAX);
449		} else {
450			/* RTO revert clocked out retransmission.
451			 * Will retransmit now */
452			tcp_retransmit_timer(sk);
453		}
454
455		break;
456	case ICMP_TIME_EXCEEDED:
457		err = EHOSTUNREACH;
458		break;
459	default:
460		goto out;
461	}
462
463	/* XXX (TFO) - if it's a TFO socket and has been accepted, rather
464	 * than following the TCP_SYN_RECV case and closing the socket,
465	 * we ignore the ICMP error and keep trying like a fully established
466	 * socket. Is this the right thing to do?
467	 */
468	if (req && req->sk == NULL)
469		goto out;
470
471	switch (sk->sk_state) {
472		struct request_sock *req, **prev;
473	case TCP_LISTEN:
474		if (sock_owned_by_user(sk))
475			goto out;
476
477		req = inet_csk_search_req(sk, &prev, th->dest,
478					  iph->daddr, iph->saddr);
479		if (!req)
480			goto out;
481
482		/* ICMPs are not backlogged, hence we cannot get
483		   an established socket here.
484		 */
485		WARN_ON(req->sk);
486
487		if (seq != tcp_rsk(req)->snt_isn) {
488			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
489			goto out;
490		}
491
492		/*
493		 * Still in SYN_RECV, just remove it silently.
494		 * There is no good way to pass the error to the newly
495		 * created socket, and POSIX does not want network
496		 * errors returned from accept().
497		 */
498		inet_csk_reqsk_queue_drop(sk, req, prev);
499		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
500		goto out;
501
502	case TCP_SYN_SENT:
503	case TCP_SYN_RECV:  /* Cannot happen.
504			       It can f.e. if SYNs crossed,
505			       or Fast Open.
506			     */
507		if (!sock_owned_by_user(sk)) {
508			sk->sk_err = err;
509
510			sk->sk_error_report(sk);
511
512			tcp_done(sk);
513		} else {
514			sk->sk_err_soft = err;
515		}
516		goto out;
517	}
518
519	/* If we've already connected we will keep trying
520	 * until we time out, or the user gives up.
521	 *
522	 * rfc1122 4.2.3.9 allows to consider as hard errors
523	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
524	 * but it is obsoleted by pmtu discovery).
525	 *
526	 * Note, that in modern internet, where routing is unreliable
527	 * and in each dark corner broken firewalls sit, sending random
528	 * errors ordered by their masters even this two messages finally lose
529	 * their original sense (even Linux sends invalid PORT_UNREACHs)
530	 *
531	 * Now we are in compliance with RFCs.
532	 *							--ANK (980905)
533	 */
534
535	inet = inet_sk(sk);
536	if (!sock_owned_by_user(sk) && inet->recverr) {
537		sk->sk_err = err;
538		sk->sk_error_report(sk);
539	} else	{ /* Only an error on timeout */
540		sk->sk_err_soft = err;
541	}
542
543out:
544	bh_unlock_sock(sk);
545	sock_put(sk);
546}
547
548static void __tcp_v4_send_check(struct sk_buff *skb,
549				__be32 saddr, __be32 daddr)
550{
551	struct tcphdr *th = tcp_hdr(skb);
552
553	if (skb->ip_summed == CHECKSUM_PARTIAL) {
554		th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
555		skb->csum_start = skb_transport_header(skb) - skb->head;
556		skb->csum_offset = offsetof(struct tcphdr, check);
557	} else {
558		th->check = tcp_v4_check(skb->len, saddr, daddr,
559					 csum_partial(th,
560						      th->doff << 2,
561						      skb->csum));
562	}
563}
564
565/* This routine computes an IPv4 TCP checksum. */
566void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
567{
568	const struct inet_sock *inet = inet_sk(sk);
569
570	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
571}
572EXPORT_SYMBOL(tcp_v4_send_check);
573
574int tcp_v4_gso_send_check(struct sk_buff *skb)
575{
576	const struct iphdr *iph;
577	struct tcphdr *th;
578
579	if (!pskb_may_pull(skb, sizeof(*th)))
580		return -EINVAL;
581
582	iph = ip_hdr(skb);
583	th = tcp_hdr(skb);
584
585	th->check = 0;
586	skb->ip_summed = CHECKSUM_PARTIAL;
587	__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
588	return 0;
589}
590
591/*
592 *	This routine will send an RST to the other tcp.
593 *
594 *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
595 *		      for reset.
596 *	Answer: if a packet caused RST, it is not for a socket
597 *		existing in our system, if it is matched to a socket,
598 *		it is just duplicate segment or bug in other side's TCP.
599 *		So that we build reply only basing on parameters
600 *		arrived with segment.
601 *	Exception: precedence violation. We do not implement it in any case.
602 */
603
604static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
605{
606	const struct tcphdr *th = tcp_hdr(skb);
607	struct {
608		struct tcphdr th;
609#ifdef CONFIG_TCP_MD5SIG
610		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
611#endif
612	} rep;
613	struct ip_reply_arg arg;
614#ifdef CONFIG_TCP_MD5SIG
615	struct tcp_md5sig_key *key;
616	const __u8 *hash_location = NULL;
617	unsigned char newhash[16];
618	int genhash;
619	struct sock *sk1 = NULL;
620#endif
621	struct net *net;
622
623	/* Never send a reset in response to a reset. */
624	if (th->rst)
625		return;
626
627	if (skb_rtable(skb)->rt_type != RTN_LOCAL)
628		return;
629
630	/* Swap the send and the receive. */
631	memset(&rep, 0, sizeof(rep));
632	rep.th.dest   = th->source;
633	rep.th.source = th->dest;
634	rep.th.doff   = sizeof(struct tcphdr) / 4;
635	rep.th.rst    = 1;
636
637	if (th->ack) {
638		rep.th.seq = th->ack_seq;
639	} else {
640		rep.th.ack = 1;
641		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
642				       skb->len - (th->doff << 2));
643	}
644
645	memset(&arg, 0, sizeof(arg));
646	arg.iov[0].iov_base = (unsigned char *)&rep;
647	arg.iov[0].iov_len  = sizeof(rep.th);
648
649#ifdef CONFIG_TCP_MD5SIG
650	hash_location = tcp_parse_md5sig_option(th);
651	if (!sk && hash_location) {
652		/*
653		 * active side is lost. Try to find listening socket through
654		 * source port, and then find md5 key through listening socket.
655		 * we are not loose security here:
656		 * Incoming packet is checked with md5 hash with finding key,
657		 * no RST generated if md5 hash doesn't match.
658		 */
659		sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
660					     &tcp_hashinfo, ip_hdr(skb)->saddr,
661					     th->source, ip_hdr(skb)->daddr,
662					     ntohs(th->source), inet_iif(skb));
663		/* don't send rst if it can't find key */
664		if (!sk1)
665			return;
666		rcu_read_lock();
667		key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
668					&ip_hdr(skb)->saddr, AF_INET);
669		if (!key)
670			goto release_sk1;
671
672		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
673		if (genhash || memcmp(hash_location, newhash, 16) != 0)
674			goto release_sk1;
675	} else {
676		key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
677					     &ip_hdr(skb)->saddr,
678					     AF_INET) : NULL;
679	}
680
681	if (key) {
682		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
683				   (TCPOPT_NOP << 16) |
684				   (TCPOPT_MD5SIG << 8) |
685				   TCPOLEN_MD5SIG);
686		/* Update length and the length the header thinks exists */
687		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
688		rep.th.doff = arg.iov[0].iov_len / 4;
689
690		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
691				     key, ip_hdr(skb)->saddr,
692				     ip_hdr(skb)->daddr, &rep.th);
693	}
694#endif
695	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
696				      ip_hdr(skb)->saddr, /* XXX */
697				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
698	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
699	arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
700	/* When socket is gone, all binding information is lost.
701	 * routing might fail in this case. No choice here, if we choose to force
702	 * input interface, we will misroute in case of asymmetric route.
703	 */
704	if (sk)
705		arg.bound_dev_if = sk->sk_bound_dev_if;
706
707	net = dev_net(skb_dst(skb)->dev);
708	arg.tos = ip_hdr(skb)->tos;
709	ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
710			      ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
711
712	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
713	TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
714
715#ifdef CONFIG_TCP_MD5SIG
716release_sk1:
717	if (sk1) {
718		rcu_read_unlock();
719		sock_put(sk1);
720	}
721#endif
722}
723
724/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
725   outside socket context is ugly, certainly. What can I do?
726 */
727
728static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
729			    u32 win, u32 tsval, u32 tsecr, int oif,
730			    struct tcp_md5sig_key *key,
731			    int reply_flags, u8 tos)
732{
733	const struct tcphdr *th = tcp_hdr(skb);
734	struct {
735		struct tcphdr th;
736		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
737#ifdef CONFIG_TCP_MD5SIG
738			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
739#endif
740			];
741	} rep;
742	struct ip_reply_arg arg;
743	struct net *net = dev_net(skb_dst(skb)->dev);
744
745	memset(&rep.th, 0, sizeof(struct tcphdr));
746	memset(&arg, 0, sizeof(arg));
747
748	arg.iov[0].iov_base = (unsigned char *)&rep;
749	arg.iov[0].iov_len  = sizeof(rep.th);
750	if (tsecr) {
751		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
752				   (TCPOPT_TIMESTAMP << 8) |
753				   TCPOLEN_TIMESTAMP);
754		rep.opt[1] = htonl(tsval);
755		rep.opt[2] = htonl(tsecr);
756		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
757	}
758
759	/* Swap the send and the receive. */
760	rep.th.dest    = th->source;
761	rep.th.source  = th->dest;
762	rep.th.doff    = arg.iov[0].iov_len / 4;
763	rep.th.seq     = htonl(seq);
764	rep.th.ack_seq = htonl(ack);
765	rep.th.ack     = 1;
766	rep.th.window  = htons(win);
767
768#ifdef CONFIG_TCP_MD5SIG
769	if (key) {
770		int offset = (tsecr) ? 3 : 0;
771
772		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
773					  (TCPOPT_NOP << 16) |
774					  (TCPOPT_MD5SIG << 8) |
775					  TCPOLEN_MD5SIG);
776		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
777		rep.th.doff = arg.iov[0].iov_len/4;
778
779		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
780				    key, ip_hdr(skb)->saddr,
781				    ip_hdr(skb)->daddr, &rep.th);
782	}
783#endif
784	arg.flags = reply_flags;
785	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
786				      ip_hdr(skb)->saddr, /* XXX */
787				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
788	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
789	if (oif)
790		arg.bound_dev_if = oif;
791	arg.tos = tos;
792	ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
793			      ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
794
795	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
796}
797
798static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
799{
800	struct inet_timewait_sock *tw = inet_twsk(sk);
801	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
802
803	tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
804			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
805			tcp_time_stamp + tcptw->tw_ts_offset,
806			tcptw->tw_ts_recent,
807			tw->tw_bound_dev_if,
808			tcp_twsk_md5_key(tcptw),
809			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
810			tw->tw_tos
811			);
812
813	inet_twsk_put(tw);
814}
815
816static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
817				  struct request_sock *req)
818{
819	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
820	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
821	 */
822	tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
823			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
824			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
825			tcp_time_stamp,
826			req->ts_recent,
827			0,
828			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
829					  AF_INET),
830			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
831			ip_hdr(skb)->tos);
832}
833
834/*
835 *	Send a SYN-ACK after having received a SYN.
836 *	This still operates on a request_sock only, not on a big
837 *	socket.
838 */
839static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
840			      struct request_sock *req,
841			      struct request_values *rvp,
842			      u16 queue_mapping,
843			      bool nocache)
844{
845	const struct inet_request_sock *ireq = inet_rsk(req);
846	struct flowi4 fl4;
847	int err = -1;
848	struct sk_buff * skb;
849
850	/* First, grab a route. */
851	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
852		return -1;
853
854	skb = tcp_make_synack(sk, dst, req, rvp, NULL);
855
856	if (skb) {
857		__tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
858
859		skb_set_queue_mapping(skb, queue_mapping);
860		err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
861					    ireq->rmt_addr,
862					    ireq->opt);
863		err = net_xmit_eval(err);
864		if (!tcp_rsk(req)->snt_synack && !err)
865			tcp_rsk(req)->snt_synack = tcp_time_stamp;
866	}
867
868	return err;
869}
870
871static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
872			     struct request_values *rvp)
873{
874	int res = tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
875
876	if (!res)
877		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
878	return res;
879}
880
881/*
882 *	IPv4 request_sock destructor.
883 */
884static void tcp_v4_reqsk_destructor(struct request_sock *req)
885{
886	kfree(inet_rsk(req)->opt);
887}
888
889/*
890 * Return true if a syncookie should be sent
891 */
892bool tcp_syn_flood_action(struct sock *sk,
893			 const struct sk_buff *skb,
894			 const char *proto)
895{
896	const char *msg = "Dropping request";
897	bool want_cookie = false;
898	struct listen_sock *lopt;
899
900
901
902#ifdef CONFIG_SYN_COOKIES
903	if (sysctl_tcp_syncookies) {
904		msg = "Sending cookies";
905		want_cookie = true;
906		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
907	} else
908#endif
909		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
910
911	lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
912	if (!lopt->synflood_warned) {
913		lopt->synflood_warned = 1;
914		pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
915			proto, ntohs(tcp_hdr(skb)->dest), msg);
916	}
917	return want_cookie;
918}
919EXPORT_SYMBOL(tcp_syn_flood_action);
920
921/*
922 * Save and compile IPv4 options into the request_sock if needed.
923 */
924static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
925{
926	const struct ip_options *opt = &(IPCB(skb)->opt);
927	struct ip_options_rcu *dopt = NULL;
928
929	if (opt && opt->optlen) {
930		int opt_size = sizeof(*dopt) + opt->optlen;
931
932		dopt = kmalloc(opt_size, GFP_ATOMIC);
933		if (dopt) {
934			if (ip_options_echo(&dopt->opt, skb)) {
935				kfree(dopt);
936				dopt = NULL;
937			}
938		}
939	}
940	return dopt;
941}
942
943#ifdef CONFIG_TCP_MD5SIG
944/*
945 * RFC2385 MD5 checksumming requires a mapping of
946 * IP address->MD5 Key.
947 * We need to maintain these in the sk structure.
948 */
949
950/* Find the Key structure for an address.  */
951struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
952					 const union tcp_md5_addr *addr,
953					 int family)
954{
955	struct tcp_sock *tp = tcp_sk(sk);
956	struct tcp_md5sig_key *key;
957	unsigned int size = sizeof(struct in_addr);
958	struct tcp_md5sig_info *md5sig;
959
960	/* caller either holds rcu_read_lock() or socket lock */
961	md5sig = rcu_dereference_check(tp->md5sig_info,
962				       sock_owned_by_user(sk) ||
963				       lockdep_is_held(&sk->sk_lock.slock));
964	if (!md5sig)
965		return NULL;
966#if IS_ENABLED(CONFIG_IPV6)
967	if (family == AF_INET6)
968		size = sizeof(struct in6_addr);
969#endif
970	hlist_for_each_entry_rcu(key, &md5sig->head, node) {
971		if (key->family != family)
972			continue;
973		if (!memcmp(&key->addr, addr, size))
974			return key;
975	}
976	return NULL;
977}
978EXPORT_SYMBOL(tcp_md5_do_lookup);
979
980struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
981					 struct sock *addr_sk)
982{
983	union tcp_md5_addr *addr;
984
985	addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
986	return tcp_md5_do_lookup(sk, addr, AF_INET);
987}
988EXPORT_SYMBOL(tcp_v4_md5_lookup);
989
990static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
991						      struct request_sock *req)
992{
993	union tcp_md5_addr *addr;
994
995	addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
996	return tcp_md5_do_lookup(sk, addr, AF_INET);
997}
998
999/* This can be called on a newly created socket, from other files */
1000int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1001		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
1002{
1003	/* Add Key to the list */
1004	struct tcp_md5sig_key *key;
1005	struct tcp_sock *tp = tcp_sk(sk);
1006	struct tcp_md5sig_info *md5sig;
1007
1008	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1009	if (key) {
1010		/* Pre-existing entry - just update that one. */
1011		memcpy(key->key, newkey, newkeylen);
1012		key->keylen = newkeylen;
1013		return 0;
1014	}
1015
1016	md5sig = rcu_dereference_protected(tp->md5sig_info,
1017					   sock_owned_by_user(sk));
1018	if (!md5sig) {
1019		md5sig = kmalloc(sizeof(*md5sig), gfp);
1020		if (!md5sig)
1021			return -ENOMEM;
1022
1023		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1024		INIT_HLIST_HEAD(&md5sig->head);
1025		rcu_assign_pointer(tp->md5sig_info, md5sig);
1026	}
1027
1028	key = sock_kmalloc(sk, sizeof(*key), gfp);
1029	if (!key)
1030		return -ENOMEM;
1031	if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
1032		sock_kfree_s(sk, key, sizeof(*key));
1033		return -ENOMEM;
1034	}
1035
1036	memcpy(key->key, newkey, newkeylen);
1037	key->keylen = newkeylen;
1038	key->family = family;
1039	memcpy(&key->addr, addr,
1040	       (family == AF_INET6) ? sizeof(struct in6_addr) :
1041				      sizeof(struct in_addr));
1042	hlist_add_head_rcu(&key->node, &md5sig->head);
1043	return 0;
1044}
1045EXPORT_SYMBOL(tcp_md5_do_add);
1046
1047int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1048{
1049	struct tcp_sock *tp = tcp_sk(sk);
1050	struct tcp_md5sig_key *key;
1051	struct tcp_md5sig_info *md5sig;
1052
1053	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1054	if (!key)
1055		return -ENOENT;
1056	hlist_del_rcu(&key->node);
1057	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1058	kfree_rcu(key, rcu);
1059	md5sig = rcu_dereference_protected(tp->md5sig_info,
1060					   sock_owned_by_user(sk));
1061	if (hlist_empty(&md5sig->head))
1062		tcp_free_md5sig_pool();
1063	return 0;
1064}
1065EXPORT_SYMBOL(tcp_md5_do_del);
1066
1067static void tcp_clear_md5_list(struct sock *sk)
1068{
1069	struct tcp_sock *tp = tcp_sk(sk);
1070	struct tcp_md5sig_key *key;
1071	struct hlist_node *n;
1072	struct tcp_md5sig_info *md5sig;
1073
1074	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1075
1076	if (!hlist_empty(&md5sig->head))
1077		tcp_free_md5sig_pool();
1078	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1079		hlist_del_rcu(&key->node);
1080		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1081		kfree_rcu(key, rcu);
1082	}
1083}
1084
1085static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1086				 int optlen)
1087{
1088	struct tcp_md5sig cmd;
1089	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1090
1091	if (optlen < sizeof(cmd))
1092		return -EINVAL;
1093
1094	if (copy_from_user(&cmd, optval, sizeof(cmd)))
1095		return -EFAULT;
1096
1097	if (sin->sin_family != AF_INET)
1098		return -EINVAL;
1099
1100	if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1101		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1102				      AF_INET);
1103
1104	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1105		return -EINVAL;
1106
1107	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1108			      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1109			      GFP_KERNEL);
1110}
1111
1112static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1113					__be32 daddr, __be32 saddr, int nbytes)
1114{
1115	struct tcp4_pseudohdr *bp;
1116	struct scatterlist sg;
1117
1118	bp = &hp->md5_blk.ip4;
1119
1120	/*
1121	 * 1. the TCP pseudo-header (in the order: source IP address,
1122	 * destination IP address, zero-padded protocol number, and
1123	 * segment length)
1124	 */
1125	bp->saddr = saddr;
1126	bp->daddr = daddr;
1127	bp->pad = 0;
1128	bp->protocol = IPPROTO_TCP;
1129	bp->len = cpu_to_be16(nbytes);
1130
1131	sg_init_one(&sg, bp, sizeof(*bp));
1132	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1133}
1134
1135static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1136			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1137{
1138	struct tcp_md5sig_pool *hp;
1139	struct hash_desc *desc;
1140
1141	hp = tcp_get_md5sig_pool();
1142	if (!hp)
1143		goto clear_hash_noput;
1144	desc = &hp->md5_desc;
1145
1146	if (crypto_hash_init(desc))
1147		goto clear_hash;
1148	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1149		goto clear_hash;
1150	if (tcp_md5_hash_header(hp, th))
1151		goto clear_hash;
1152	if (tcp_md5_hash_key(hp, key))
1153		goto clear_hash;
1154	if (crypto_hash_final(desc, md5_hash))
1155		goto clear_hash;
1156
1157	tcp_put_md5sig_pool();
1158	return 0;
1159
1160clear_hash:
1161	tcp_put_md5sig_pool();
1162clear_hash_noput:
1163	memset(md5_hash, 0, 16);
1164	return 1;
1165}
1166
1167int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1168			const struct sock *sk, const struct request_sock *req,
1169			const struct sk_buff *skb)
1170{
1171	struct tcp_md5sig_pool *hp;
1172	struct hash_desc *desc;
1173	const struct tcphdr *th = tcp_hdr(skb);
1174	__be32 saddr, daddr;
1175
1176	if (sk) {
1177		saddr = inet_sk(sk)->inet_saddr;
1178		daddr = inet_sk(sk)->inet_daddr;
1179	} else if (req) {
1180		saddr = inet_rsk(req)->loc_addr;
1181		daddr = inet_rsk(req)->rmt_addr;
1182	} else {
1183		const struct iphdr *iph = ip_hdr(skb);
1184		saddr = iph->saddr;
1185		daddr = iph->daddr;
1186	}
1187
1188	hp = tcp_get_md5sig_pool();
1189	if (!hp)
1190		goto clear_hash_noput;
1191	desc = &hp->md5_desc;
1192
1193	if (crypto_hash_init(desc))
1194		goto clear_hash;
1195
1196	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1197		goto clear_hash;
1198	if (tcp_md5_hash_header(hp, th))
1199		goto clear_hash;
1200	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1201		goto clear_hash;
1202	if (tcp_md5_hash_key(hp, key))
1203		goto clear_hash;
1204	if (crypto_hash_final(desc, md5_hash))
1205		goto clear_hash;
1206
1207	tcp_put_md5sig_pool();
1208	return 0;
1209
1210clear_hash:
1211	tcp_put_md5sig_pool();
1212clear_hash_noput:
1213	memset(md5_hash, 0, 16);
1214	return 1;
1215}
1216EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1217
1218static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1219{
1220	/*
1221	 * This gets called for each TCP segment that arrives
1222	 * so we want to be efficient.
1223	 * We have 3 drop cases:
1224	 * o No MD5 hash and one expected.
1225	 * o MD5 hash and we're not expecting one.
1226	 * o MD5 hash and its wrong.
1227	 */
1228	const __u8 *hash_location = NULL;
1229	struct tcp_md5sig_key *hash_expected;
1230	const struct iphdr *iph = ip_hdr(skb);
1231	const struct tcphdr *th = tcp_hdr(skb);
1232	int genhash;
1233	unsigned char newhash[16];
1234
1235	hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1236					  AF_INET);
1237	hash_location = tcp_parse_md5sig_option(th);
1238
1239	/* We've parsed the options - do we have a hash? */
1240	if (!hash_expected && !hash_location)
1241		return false;
1242
1243	if (hash_expected && !hash_location) {
1244		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1245		return true;
1246	}
1247
1248	if (!hash_expected && hash_location) {
1249		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1250		return true;
1251	}
1252
1253	/* Okay, so this is hash_expected and hash_location -
1254	 * so we need to calculate the checksum.
1255	 */
1256	genhash = tcp_v4_md5_hash_skb(newhash,
1257				      hash_expected,
1258				      NULL, NULL, skb);
1259
1260	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1261		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1262				     &iph->saddr, ntohs(th->source),
1263				     &iph->daddr, ntohs(th->dest),
1264				     genhash ? " tcp_v4_calc_md5_hash failed"
1265				     : "");
1266		return true;
1267	}
1268	return false;
1269}
1270
1271#endif
1272
1273struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1274	.family		=	PF_INET,
1275	.obj_size	=	sizeof(struct tcp_request_sock),
1276	.rtx_syn_ack	=	tcp_v4_rtx_synack,
1277	.send_ack	=	tcp_v4_reqsk_send_ack,
1278	.destructor	=	tcp_v4_reqsk_destructor,
1279	.send_reset	=	tcp_v4_send_reset,
1280	.syn_ack_timeout = 	tcp_syn_ack_timeout,
1281};
1282
1283#ifdef CONFIG_TCP_MD5SIG
1284static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1285	.md5_lookup	=	tcp_v4_reqsk_md5_lookup,
1286	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1287};
1288#endif
1289
1290static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
1291			       struct request_sock *req,
1292			       struct tcp_fastopen_cookie *foc,
1293			       struct tcp_fastopen_cookie *valid_foc)
1294{
1295	bool skip_cookie = false;
1296	struct fastopen_queue *fastopenq;
1297
1298	if (likely(!fastopen_cookie_present(foc))) {
1299		/* See include/net/tcp.h for the meaning of these knobs */
1300		if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
1301		    ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
1302		    (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
1303			skip_cookie = true; /* no cookie to validate */
1304		else
1305			return false;
1306	}
1307	fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
1308	/* A FO option is present; bump the counter. */
1309	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
1310
1311	/* Make sure the listener has enabled fastopen, and we don't
1312	 * exceed the max # of pending TFO requests allowed before trying
1313	 * to validating the cookie in order to avoid burning CPU cycles
1314	 * unnecessarily.
1315	 *
1316	 * XXX (TFO) - The implication of checking the max_qlen before
1317	 * processing a cookie request is that clients can't differentiate
1318	 * between qlen overflow causing Fast Open to be disabled
1319	 * temporarily vs a server not supporting Fast Open at all.
1320	 */
1321	if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
1322	    fastopenq == NULL || fastopenq->max_qlen == 0)
1323		return false;
1324
1325	if (fastopenq->qlen >= fastopenq->max_qlen) {
1326		struct request_sock *req1;
1327		spin_lock(&fastopenq->lock);
1328		req1 = fastopenq->rskq_rst_head;
1329		if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
1330			spin_unlock(&fastopenq->lock);
1331			NET_INC_STATS_BH(sock_net(sk),
1332			    LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
1333			/* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1334			foc->len = -1;
1335			return false;
1336		}
1337		fastopenq->rskq_rst_head = req1->dl_next;
1338		fastopenq->qlen--;
1339		spin_unlock(&fastopenq->lock);
1340		reqsk_free(req1);
1341	}
1342	if (skip_cookie) {
1343		tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1344		return true;
1345	}
1346	if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
1347		if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
1348			tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1349			if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
1350			    memcmp(&foc->val[0], &valid_foc->val[0],
1351			    TCP_FASTOPEN_COOKIE_SIZE) != 0)
1352				return false;
1353			valid_foc->len = -1;
1354		}
1355		/* Acknowledge the data received from the peer. */
1356		tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1357		return true;
1358	} else if (foc->len == 0) { /* Client requesting a cookie */
1359		tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1360		NET_INC_STATS_BH(sock_net(sk),
1361		    LINUX_MIB_TCPFASTOPENCOOKIEREQD);
1362	} else {
1363		/* Client sent a cookie with wrong size. Treat it
1364		 * the same as invalid and return a valid one.
1365		 */
1366		tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1367	}
1368	return false;
1369}
1370
1371static int tcp_v4_conn_req_fastopen(struct sock *sk,
1372				    struct sk_buff *skb,
1373				    struct sk_buff *skb_synack,
1374				    struct request_sock *req,
1375				    struct request_values *rvp)
1376{
1377	struct tcp_sock *tp = tcp_sk(sk);
1378	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1379	const struct inet_request_sock *ireq = inet_rsk(req);
1380	struct sock *child;
1381	int err;
1382
1383	req->num_retrans = 0;
1384	req->num_timeout = 0;
1385	req->sk = NULL;
1386
1387	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
1388	if (child == NULL) {
1389		NET_INC_STATS_BH(sock_net(sk),
1390				 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1391		kfree_skb(skb_synack);
1392		return -1;
1393	}
1394	err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1395				    ireq->rmt_addr, ireq->opt);
1396	err = net_xmit_eval(err);
1397	if (!err)
1398		tcp_rsk(req)->snt_synack = tcp_time_stamp;
1399	/* XXX (TFO) - is it ok to ignore error and continue? */
1400
1401	spin_lock(&queue->fastopenq->lock);
1402	queue->fastopenq->qlen++;
1403	spin_unlock(&queue->fastopenq->lock);
1404
1405	/* Initialize the child socket. Have to fix some values to take
1406	 * into account the child is a Fast Open socket and is created
1407	 * only out of the bits carried in the SYN packet.
1408	 */
1409	tp = tcp_sk(child);
1410
1411	tp->fastopen_rsk = req;
1412	/* Do a hold on the listner sk so that if the listener is being
1413	 * closed, the child that has been accepted can live on and still
1414	 * access listen_lock.
1415	 */
1416	sock_hold(sk);
1417	tcp_rsk(req)->listener = sk;
1418
1419	/* RFC1323: The window in SYN & SYN/ACK segments is never
1420	 * scaled. So correct it appropriately.
1421	 */
1422	tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
1423
1424	/* Activate the retrans timer so that SYNACK can be retransmitted.
1425	 * The request socket is not added to the SYN table of the parent
1426	 * because it's been added to the accept queue directly.
1427	 */
1428	inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
1429	    TCP_TIMEOUT_INIT, TCP_RTO_MAX);
1430
1431	/* Add the child socket directly into the accept queue */
1432	inet_csk_reqsk_queue_add(sk, req, child);
1433
1434	/* Now finish processing the fastopen child socket. */
1435	inet_csk(child)->icsk_af_ops->rebuild_header(child);
1436	tcp_init_congestion_control(child);
1437	tcp_mtup_init(child);
1438	tcp_init_buffer_space(child);
1439	tcp_init_metrics(child);
1440
1441	/* Queue the data carried in the SYN packet. We need to first
1442	 * bump skb's refcnt because the caller will attempt to free it.
1443	 *
1444	 * XXX (TFO) - we honor a zero-payload TFO request for now.
1445	 * (Any reason not to?)
1446	 */
1447	if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
1448		/* Don't queue the skb if there is no payload in SYN.
1449		 * XXX (TFO) - How about SYN+FIN?
1450		 */
1451		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1452	} else {
1453		skb = skb_get(skb);
1454		skb_dst_drop(skb);
1455		__skb_pull(skb, tcp_hdr(skb)->doff * 4);
1456		skb_set_owner_r(skb, child);
1457		__skb_queue_tail(&child->sk_receive_queue, skb);
1458		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1459		tp->syn_data_acked = 1;
1460	}
1461	sk->sk_data_ready(sk, 0);
1462	bh_unlock_sock(child);
1463	sock_put(child);
1464	WARN_ON(req->sk == NULL);
1465	return 0;
1466}
1467
1468int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1469{
1470	struct tcp_extend_values tmp_ext;
1471	struct tcp_options_received tmp_opt;
1472	const u8 *hash_location;
1473	struct request_sock *req;
1474	struct inet_request_sock *ireq;
1475	struct tcp_sock *tp = tcp_sk(sk);
1476	struct dst_entry *dst = NULL;
1477	__be32 saddr = ip_hdr(skb)->saddr;
1478	__be32 daddr = ip_hdr(skb)->daddr;
1479	__u32 isn = TCP_SKB_CB(skb)->when;
1480	bool want_cookie = false;
1481	struct flowi4 fl4;
1482	struct tcp_fastopen_cookie foc = { .len = -1 };
1483	struct tcp_fastopen_cookie valid_foc = { .len = -1 };
1484	struct sk_buff *skb_synack;
1485	int do_fastopen;
1486
1487	/* Never answer to SYNs send to broadcast or multicast */
1488	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1489		goto drop;
1490
1491	/* TW buckets are converted to open requests without
1492	 * limitations, they conserve resources and peer is
1493	 * evidently real one.
1494	 */
1495	if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1496		want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1497		if (!want_cookie)
1498			goto drop;
1499	}
1500
1501	/* Accept backlog is full. If we have already queued enough
1502	 * of warm entries in syn queue, drop request. It is better than
1503	 * clogging syn queue with openreqs with exponentially increasing
1504	 * timeout.
1505	 */
1506	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1507		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1508		goto drop;
1509	}
1510
1511	req = inet_reqsk_alloc(&tcp_request_sock_ops);
1512	if (!req)
1513		goto drop;
1514
1515#ifdef CONFIG_TCP_MD5SIG
1516	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1517#endif
1518
1519	tcp_clear_options(&tmp_opt);
1520	tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1521	tmp_opt.user_mss  = tp->rx_opt.user_mss;
1522	tcp_parse_options(skb, &tmp_opt, &hash_location, 0,
1523	    want_cookie ? NULL : &foc);
1524
1525	if (tmp_opt.cookie_plus > 0 &&
1526	    tmp_opt.saw_tstamp &&
1527	    !tp->rx_opt.cookie_out_never &&
1528	    (sysctl_tcp_cookie_size > 0 ||
1529	     (tp->cookie_values != NULL &&
1530	      tp->cookie_values->cookie_desired > 0))) {
1531		u8 *c;
1532		u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1533		int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1534
1535		if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1536			goto drop_and_release;
1537
1538		/* Secret recipe starts with IP addresses */
1539		*mess++ ^= (__force u32)daddr;
1540		*mess++ ^= (__force u32)saddr;
1541
1542		/* plus variable length Initiator Cookie */
1543		c = (u8 *)mess;
1544		while (l-- > 0)
1545			*c++ ^= *hash_location++;
1546
1547		want_cookie = false;	/* not our kind of cookie */
1548		tmp_ext.cookie_out_never = 0; /* false */
1549		tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1550	} else if (!tp->rx_opt.cookie_in_always) {
1551		/* redundant indications, but ensure initialization. */
1552		tmp_ext.cookie_out_never = 1; /* true */
1553		tmp_ext.cookie_plus = 0;
1554	} else {
1555		goto drop_and_release;
1556	}
1557	tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1558
1559	if (want_cookie && !tmp_opt.saw_tstamp)
1560		tcp_clear_options(&tmp_opt);
1561
1562	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1563	tcp_openreq_init(req, &tmp_opt, skb);
1564
1565	ireq = inet_rsk(req);
1566	ireq->loc_addr = daddr;
1567	ireq->rmt_addr = saddr;
1568	ireq->no_srccheck = inet_sk(sk)->transparent;
1569	ireq->opt = tcp_v4_save_options(skb);
1570
1571	if (security_inet_conn_request(sk, skb, req))
1572		goto drop_and_free;
1573
1574	if (!want_cookie || tmp_opt.tstamp_ok)
1575		TCP_ECN_create_request(req, skb, sock_net(sk));
1576
1577	if (want_cookie) {
1578		isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1579		req->cookie_ts = tmp_opt.tstamp_ok;
1580	} else if (!isn) {
1581		/* VJ's idea. We save last timestamp seen
1582		 * from the destination in peer table, when entering
1583		 * state TIME-WAIT, and check against it before
1584		 * accepting new connection request.
1585		 *
1586		 * If "isn" is not zero, this request hit alive
1587		 * timewait bucket, so that all the necessary checks
1588		 * are made in the function processing timewait state.
1589		 */
1590		if (tmp_opt.saw_tstamp &&
1591		    tcp_death_row.sysctl_tw_recycle &&
1592		    (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1593		    fl4.daddr == saddr) {
1594			if (!tcp_peer_is_proven(req, dst, true)) {
1595				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1596				goto drop_and_release;
1597			}
1598		}
1599		/* Kill the following clause, if you dislike this way. */
1600		else if (!sysctl_tcp_syncookies &&
1601			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1602			  (sysctl_max_syn_backlog >> 2)) &&
1603			 !tcp_peer_is_proven(req, dst, false)) {
1604			/* Without syncookies last quarter of
1605			 * backlog is filled with destinations,
1606			 * proven to be alive.
1607			 * It means that we continue to communicate
1608			 * to destinations, already remembered
1609			 * to the moment of synflood.
1610			 */
1611			LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1612				       &saddr, ntohs(tcp_hdr(skb)->source));
1613			goto drop_and_release;
1614		}
1615
1616		isn = tcp_v4_init_sequence(skb);
1617	}
1618	tcp_rsk(req)->snt_isn = isn;
1619
1620	if (dst == NULL) {
1621		dst = inet_csk_route_req(sk, &fl4, req);
1622		if (dst == NULL)
1623			goto drop_and_free;
1624	}
1625	do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
1626
1627	/* We don't call tcp_v4_send_synack() directly because we need
1628	 * to make sure a child socket can be created successfully before
1629	 * sending back synack!
1630	 *
1631	 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1632	 * (or better yet, call tcp_send_synack() in the child context
1633	 * directly, but will have to fix bunch of other code first)
1634	 * after syn_recv_sock() except one will need to first fix the
1635	 * latter to remove its dependency on the current implementation
1636	 * of tcp_v4_send_synack()->tcp_select_initial_window().
1637	 */
1638	skb_synack = tcp_make_synack(sk, dst, req,
1639	    (struct request_values *)&tmp_ext,
1640	    fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1641
1642	if (skb_synack) {
1643		__tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
1644		skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1645	} else
1646		goto drop_and_free;
1647
1648	if (likely(!do_fastopen)) {
1649		int err;
1650		err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1651		     ireq->rmt_addr, ireq->opt);
1652		err = net_xmit_eval(err);
1653		if (err || want_cookie)
1654			goto drop_and_free;
1655
1656		tcp_rsk(req)->snt_synack = tcp_time_stamp;
1657		tcp_rsk(req)->listener = NULL;
1658		/* Add the request_sock to the SYN table */
1659		inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1660		if (fastopen_cookie_present(&foc) && foc.len != 0)
1661			NET_INC_STATS_BH(sock_net(sk),
1662			    LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1663	} else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req,
1664	    (struct request_values *)&tmp_ext))
1665		goto drop_and_free;
1666
1667	return 0;
1668
1669drop_and_release:
1670	dst_release(dst);
1671drop_and_free:
1672	reqsk_free(req);
1673drop:
1674	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1675	return 0;
1676}
1677EXPORT_SYMBOL(tcp_v4_conn_request);
1678
1679
1680/*
1681 * The three way handshake has completed - we got a valid synack -
1682 * now create the new socket.
1683 */
1684struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1685				  struct request_sock *req,
1686				  struct dst_entry *dst)
1687{
1688	struct inet_request_sock *ireq;
1689	struct inet_sock *newinet;
1690	struct tcp_sock *newtp;
1691	struct sock *newsk;
1692#ifdef CONFIG_TCP_MD5SIG
1693	struct tcp_md5sig_key *key;
1694#endif
1695	struct ip_options_rcu *inet_opt;
1696
1697	if (sk_acceptq_is_full(sk))
1698		goto exit_overflow;
1699
1700	newsk = tcp_create_openreq_child(sk, req, skb);
1701	if (!newsk)
1702		goto exit_nonewsk;
1703
1704	newsk->sk_gso_type = SKB_GSO_TCPV4;
1705	inet_sk_rx_dst_set(newsk, skb);
1706
1707	newtp		      = tcp_sk(newsk);
1708	newinet		      = inet_sk(newsk);
1709	ireq		      = inet_rsk(req);
1710	newinet->inet_daddr   = ireq->rmt_addr;
1711	newinet->inet_rcv_saddr = ireq->loc_addr;
1712	newinet->inet_saddr	      = ireq->loc_addr;
1713	inet_opt	      = ireq->opt;
1714	rcu_assign_pointer(newinet->inet_opt, inet_opt);
1715	ireq->opt	      = NULL;
1716	newinet->mc_index     = inet_iif(skb);
1717	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1718	newinet->rcv_tos      = ip_hdr(skb)->tos;
1719	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1720	if (inet_opt)
1721		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1722	newinet->inet_id = newtp->write_seq ^ jiffies;
1723
1724	if (!dst) {
1725		dst = inet_csk_route_child_sock(sk, newsk, req);
1726		if (!dst)
1727			goto put_and_exit;
1728	} else {
1729		/* syncookie case : see end of cookie_v4_check() */
1730	}
1731	sk_setup_caps(newsk, dst);
1732
1733	tcp_mtup_init(newsk);
1734	tcp_sync_mss(newsk, dst_mtu(dst));
1735	newtp->advmss = dst_metric_advmss(dst);
1736	if (tcp_sk(sk)->rx_opt.user_mss &&
1737	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1738		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1739
1740	tcp_initialize_rcv_mss(newsk);
1741	tcp_synack_rtt_meas(newsk, req);
1742	newtp->total_retrans = req->num_retrans;
1743
1744#ifdef CONFIG_TCP_MD5SIG
1745	/* Copy over the MD5 key from the original socket */
1746	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1747				AF_INET);
1748	if (key != NULL) {
1749		/*
1750		 * We're using one, so create a matching key
1751		 * on the newsk structure. If we fail to get
1752		 * memory, then we end up not copying the key
1753		 * across. Shucks.
1754		 */
1755		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1756			       AF_INET, key->key, key->keylen, GFP_ATOMIC);
1757		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1758	}
1759#endif
1760
1761	if (__inet_inherit_port(sk, newsk) < 0)
1762		goto put_and_exit;
1763	__inet_hash_nolisten(newsk, NULL);
1764
1765	return newsk;
1766
1767exit_overflow:
1768	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1769exit_nonewsk:
1770	dst_release(dst);
1771exit:
1772	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1773	return NULL;
1774put_and_exit:
1775	inet_csk_prepare_forced_close(newsk);
1776	tcp_done(newsk);
1777	goto exit;
1778}
1779EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1780
1781static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1782{
1783	struct tcphdr *th = tcp_hdr(skb);
1784	const struct iphdr *iph = ip_hdr(skb);
1785	struct sock *nsk;
1786	struct request_sock **prev;
1787	/* Find possible connection requests. */
1788	struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1789						       iph->saddr, iph->daddr);
1790	if (req)
1791		return tcp_check_req(sk, skb, req, prev, false);
1792
1793	nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1794			th->source, iph->daddr, th->dest, inet_iif(skb));
1795
1796	if (nsk) {
1797		if (nsk->sk_state != TCP_TIME_WAIT) {
1798			bh_lock_sock(nsk);
1799			return nsk;
1800		}
1801		inet_twsk_put(inet_twsk(nsk));
1802		return NULL;
1803	}
1804
1805#ifdef CONFIG_SYN_COOKIES
1806	if (!th->syn)
1807		sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1808#endif
1809	return sk;
1810}
1811
1812static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1813{
1814	const struct iphdr *iph = ip_hdr(skb);
1815
1816	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1817		if (!tcp_v4_check(skb->len, iph->saddr,
1818				  iph->daddr, skb->csum)) {
1819			skb->ip_summed = CHECKSUM_UNNECESSARY;
1820			return 0;
1821		}
1822	}
1823
1824	skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1825				       skb->len, IPPROTO_TCP, 0);
1826
1827	if (skb->len <= 76) {
1828		return __skb_checksum_complete(skb);
1829	}
1830	return 0;
1831}
1832
1833
1834/* The socket must have it's spinlock held when we get
1835 * here.
1836 *
1837 * We have a potential double-lock case here, so even when
1838 * doing backlog processing we use the BH locking scheme.
1839 * This is because we cannot sleep with the original spinlock
1840 * held.
1841 */
1842int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1843{
1844	struct sock *rsk;
1845#ifdef CONFIG_TCP_MD5SIG
1846	/*
1847	 * We really want to reject the packet as early as possible
1848	 * if:
1849	 *  o We're expecting an MD5'd packet and this is no MD5 tcp option
1850	 *  o There is an MD5 option and we're not expecting one
1851	 */
1852	if (tcp_v4_inbound_md5_hash(sk, skb))
1853		goto discard;
1854#endif
1855
1856	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1857		struct dst_entry *dst = sk->sk_rx_dst;
1858
1859		sock_rps_save_rxhash(sk, skb);
1860		if (dst) {
1861			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1862			    dst->ops->check(dst, 0) == NULL) {
1863				dst_release(dst);
1864				sk->sk_rx_dst = NULL;
1865			}
1866		}
1867		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1868			rsk = sk;
1869			goto reset;
1870		}
1871		return 0;
1872	}
1873
1874	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1875		goto csum_err;
1876
1877	if (sk->sk_state == TCP_LISTEN) {
1878		struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1879		if (!nsk)
1880			goto discard;
1881
1882		if (nsk != sk) {
1883			sock_rps_save_rxhash(nsk, skb);
1884			if (tcp_child_process(sk, nsk, skb)) {
1885				rsk = nsk;
1886				goto reset;
1887			}
1888			return 0;
1889		}
1890	} else
1891		sock_rps_save_rxhash(sk, skb);
1892
1893	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1894		rsk = sk;
1895		goto reset;
1896	}
1897	return 0;
1898
1899reset:
1900	tcp_v4_send_reset(rsk, skb);
1901discard:
1902	kfree_skb(skb);
1903	/* Be careful here. If this function gets more complicated and
1904	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1905	 * might be destroyed here. This current version compiles correctly,
1906	 * but you have been warned.
1907	 */
1908	return 0;
1909
1910csum_err:
1911	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1912	goto discard;
1913}
1914EXPORT_SYMBOL(tcp_v4_do_rcv);
1915
1916void tcp_v4_early_demux(struct sk_buff *skb)
1917{
1918	const struct iphdr *iph;
1919	const struct tcphdr *th;
1920	struct sock *sk;
1921
1922	if (skb->pkt_type != PACKET_HOST)
1923		return;
1924
1925	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1926		return;
1927
1928	iph = ip_hdr(skb);
1929	th = tcp_hdr(skb);
1930
1931	if (th->doff < sizeof(struct tcphdr) / 4)
1932		return;
1933
1934	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1935				       iph->saddr, th->source,
1936				       iph->daddr, ntohs(th->dest),
1937				       skb->skb_iif);
1938	if (sk) {
1939		skb->sk = sk;
1940		skb->destructor = sock_edemux;
1941		if (sk->sk_state != TCP_TIME_WAIT) {
1942			struct dst_entry *dst = sk->sk_rx_dst;
1943
1944			if (dst)
1945				dst = dst_check(dst, 0);
1946			if (dst &&
1947			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1948				skb_dst_set_noref(skb, dst);
1949		}
1950	}
1951}
1952
1953/* Packet is added to VJ-style prequeue for processing in process
1954 * context, if a reader task is waiting. Apparently, this exciting
1955 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1956 * failed somewhere. Latency? Burstiness? Well, at least now we will
1957 * see, why it failed. 8)8)				  --ANK
1958 *
1959 */
1960bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1961{
1962	struct tcp_sock *tp = tcp_sk(sk);
1963
1964	if (sysctl_tcp_low_latency || !tp->ucopy.task)
1965		return false;
1966
1967	if (skb->len <= tcp_hdrlen(skb) &&
1968	    skb_queue_len(&tp->ucopy.prequeue) == 0)
1969		return false;
1970
1971	__skb_queue_tail(&tp->ucopy.prequeue, skb);
1972	tp->ucopy.memory += skb->truesize;
1973	if (tp->ucopy.memory > sk->sk_rcvbuf) {
1974		struct sk_buff *skb1;
1975
1976		BUG_ON(sock_owned_by_user(sk));
1977
1978		while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1979			sk_backlog_rcv(sk, skb1);
1980			NET_INC_STATS_BH(sock_net(sk),
1981					 LINUX_MIB_TCPPREQUEUEDROPPED);
1982		}
1983
1984		tp->ucopy.memory = 0;
1985	} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1986		wake_up_interruptible_sync_poll(sk_sleep(sk),
1987					   POLLIN | POLLRDNORM | POLLRDBAND);
1988		if (!inet_csk_ack_scheduled(sk))
1989			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1990						  (3 * tcp_rto_min(sk)) / 4,
1991						  TCP_RTO_MAX);
1992	}
1993	return true;
1994}
1995EXPORT_SYMBOL(tcp_prequeue);
1996
1997/*
1998 *	From tcp_input.c
1999 */
2000
2001int tcp_v4_rcv(struct sk_buff *skb)
2002{
2003	const struct iphdr *iph;
2004	const struct tcphdr *th;
2005	struct sock *sk;
2006	int ret;
2007	struct net *net = dev_net(skb->dev);
2008
2009	if (skb->pkt_type != PACKET_HOST)
2010		goto discard_it;
2011
2012	/* Count it even if it's bad */
2013	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
2014
2015	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
2016		goto discard_it;
2017
2018	th = tcp_hdr(skb);
2019
2020	if (th->doff < sizeof(struct tcphdr) / 4)
2021		goto bad_packet;
2022	if (!pskb_may_pull(skb, th->doff * 4))
2023		goto discard_it;
2024
2025	/* An explanation is required here, I think.
2026	 * Packet length and doff are validated by header prediction,
2027	 * provided case of th->doff==0 is eliminated.
2028	 * So, we defer the checks. */
2029	if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
2030		goto bad_packet;
2031
2032	th = tcp_hdr(skb);
2033	iph = ip_hdr(skb);
2034	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
2035	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
2036				    skb->len - th->doff * 4);
2037	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
2038	TCP_SKB_CB(skb)->when	 = 0;
2039	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
2040	TCP_SKB_CB(skb)->sacked	 = 0;
2041
2042	sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
2043	if (!sk)
2044		goto no_tcp_socket;
2045
2046process:
2047	if (sk->sk_state == TCP_TIME_WAIT)
2048		goto do_time_wait;
2049
2050	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
2051		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
2052		goto discard_and_relse;
2053	}
2054
2055	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
2056		goto discard_and_relse;
2057	nf_reset(skb);
2058
2059	if (sk_filter(sk, skb))
2060		goto discard_and_relse;
2061
2062	skb->dev = NULL;
2063
2064	bh_lock_sock_nested(sk);
2065	ret = 0;
2066	if (!sock_owned_by_user(sk)) {
2067#ifdef CONFIG_NET_DMA
2068		struct tcp_sock *tp = tcp_sk(sk);
2069		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
2070			tp->ucopy.dma_chan = net_dma_find_channel();
2071		if (tp->ucopy.dma_chan)
2072			ret = tcp_v4_do_rcv(sk, skb);
2073		else
2074#endif
2075		{
2076			if (!tcp_prequeue(sk, skb))
2077				ret = tcp_v4_do_rcv(sk, skb);
2078		}
2079	} else if (unlikely(sk_add_backlog(sk, skb,
2080					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
2081		bh_unlock_sock(sk);
2082		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
2083		goto discard_and_relse;
2084	}
2085	bh_unlock_sock(sk);
2086
2087	sock_put(sk);
2088
2089	return ret;
2090
2091no_tcp_socket:
2092	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2093		goto discard_it;
2094
2095	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2096bad_packet:
2097		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
2098	} else {
2099		tcp_v4_send_reset(NULL, skb);
2100	}
2101
2102discard_it:
2103	/* Discard frame. */
2104	kfree_skb(skb);
2105	return 0;
2106
2107discard_and_relse:
2108	sock_put(sk);
2109	goto discard_it;
2110
2111do_time_wait:
2112	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2113		inet_twsk_put(inet_twsk(sk));
2114		goto discard_it;
2115	}
2116
2117	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2118		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
2119		inet_twsk_put(inet_twsk(sk));
2120		goto discard_it;
2121	}
2122	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2123	case TCP_TW_SYN: {
2124		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2125							&tcp_hashinfo,
2126							iph->saddr, th->source,
2127							iph->daddr, th->dest,
2128							inet_iif(skb));
2129		if (sk2) {
2130			inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
2131			inet_twsk_put(inet_twsk(sk));
2132			sk = sk2;
2133			goto process;
2134		}
2135		/* Fall through to ACK */
2136	}
2137	case TCP_TW_ACK:
2138		tcp_v4_timewait_ack(sk, skb);
2139		break;
2140	case TCP_TW_RST:
2141		goto no_tcp_socket;
2142	case TCP_TW_SUCCESS:;
2143	}
2144	goto discard_it;
2145}
2146
2147static struct timewait_sock_ops tcp_timewait_sock_ops = {
2148	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
2149	.twsk_unique	= tcp_twsk_unique,
2150	.twsk_destructor= tcp_twsk_destructor,
2151};
2152
2153void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2154{
2155	struct dst_entry *dst = skb_dst(skb);
2156
2157	dst_hold(dst);
2158	sk->sk_rx_dst = dst;
2159	inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2160}
2161EXPORT_SYMBOL(inet_sk_rx_dst_set);
2162
2163const struct inet_connection_sock_af_ops ipv4_specific = {
2164	.queue_xmit	   = ip_queue_xmit,
2165	.send_check	   = tcp_v4_send_check,
2166	.rebuild_header	   = inet_sk_rebuild_header,
2167	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
2168	.conn_request	   = tcp_v4_conn_request,
2169	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
2170	.net_header_len	   = sizeof(struct iphdr),
2171	.setsockopt	   = ip_setsockopt,
2172	.getsockopt	   = ip_getsockopt,
2173	.addr2sockaddr	   = inet_csk_addr2sockaddr,
2174	.sockaddr_len	   = sizeof(struct sockaddr_in),
2175	.bind_conflict	   = inet_csk_bind_conflict,
2176#ifdef CONFIG_COMPAT
2177	.compat_setsockopt = compat_ip_setsockopt,
2178	.compat_getsockopt = compat_ip_getsockopt,
2179#endif
2180};
2181EXPORT_SYMBOL(ipv4_specific);
2182
2183#ifdef CONFIG_TCP_MD5SIG
2184static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2185	.md5_lookup		= tcp_v4_md5_lookup,
2186	.calc_md5_hash		= tcp_v4_md5_hash_skb,
2187	.md5_parse		= tcp_v4_parse_md5_keys,
2188};
2189#endif
2190
2191/* NOTE: A lot of things set to zero explicitly by call to
2192 *       sk_alloc() so need not be done here.
2193 */
2194static int tcp_v4_init_sock(struct sock *sk)
2195{
2196	struct inet_connection_sock *icsk = inet_csk(sk);
2197
2198	tcp_init_sock(sk);
2199
2200	icsk->icsk_af_ops = &ipv4_specific;
2201
2202#ifdef CONFIG_TCP_MD5SIG
2203	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2204#endif
2205
2206	return 0;
2207}
2208
2209void tcp_v4_destroy_sock(struct sock *sk)
2210{
2211	struct tcp_sock *tp = tcp_sk(sk);
2212
2213	tcp_clear_xmit_timers(sk);
2214
2215	tcp_cleanup_congestion_control(sk);
2216
2217	/* Cleanup up the write buffer. */
2218	tcp_write_queue_purge(sk);
2219
2220	/* Cleans up our, hopefully empty, out_of_order_queue. */
2221	__skb_queue_purge(&tp->out_of_order_queue);
2222
2223#ifdef CONFIG_TCP_MD5SIG
2224	/* Clean up the MD5 key list, if any */
2225	if (tp->md5sig_info) {
2226		tcp_clear_md5_list(sk);
2227		kfree_rcu(tp->md5sig_info, rcu);
2228		tp->md5sig_info = NULL;
2229	}
2230#endif
2231
2232#ifdef CONFIG_NET_DMA
2233	/* Cleans up our sk_async_wait_queue */
2234	__skb_queue_purge(&sk->sk_async_wait_queue);
2235#endif
2236
2237	/* Clean prequeue, it must be empty really */
2238	__skb_queue_purge(&tp->ucopy.prequeue);
2239
2240	/* Clean up a referenced TCP bind bucket. */
2241	if (inet_csk(sk)->icsk_bind_hash)
2242		inet_put_port(sk);
2243
2244	/* TCP Cookie Transactions */
2245	if (tp->cookie_values != NULL) {
2246		kref_put(&tp->cookie_values->kref,
2247			 tcp_cookie_values_release);
2248		tp->cookie_values = NULL;
2249	}
2250	BUG_ON(tp->fastopen_rsk != NULL);
2251
2252	/* If socket is aborted during connect operation */
2253	tcp_free_fastopen_req(tp);
2254
2255	sk_sockets_allocated_dec(sk);
2256	sock_release_memcg(sk);
2257}
2258EXPORT_SYMBOL(tcp_v4_destroy_sock);
2259
2260#ifdef CONFIG_PROC_FS
2261/* Proc filesystem TCP sock list dumping. */
2262
2263static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
2264{
2265	return hlist_nulls_empty(head) ? NULL :
2266		list_entry(head->first, struct inet_timewait_sock, tw_node);
2267}
2268
2269static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
2270{
2271	return !is_a_nulls(tw->tw_node.next) ?
2272		hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2273}
2274
2275/*
2276 * Get next listener socket follow cur.  If cur is NULL, get first socket
2277 * starting from bucket given in st->bucket; when st->bucket is zero the
2278 * very first socket in the hash table is returned.
2279 */
2280static void *listening_get_next(struct seq_file *seq, void *cur)
2281{
2282	struct inet_connection_sock *icsk;
2283	struct hlist_nulls_node *node;
2284	struct sock *sk = cur;
2285	struct inet_listen_hashbucket *ilb;
2286	struct tcp_iter_state *st = seq->private;
2287	struct net *net = seq_file_net(seq);
2288
2289	if (!sk) {
2290		ilb = &tcp_hashinfo.listening_hash[st->bucket];
2291		spin_lock_bh(&ilb->lock);
2292		sk = sk_nulls_head(&ilb->head);
2293		st->offset = 0;
2294		goto get_sk;
2295	}
2296	ilb = &tcp_hashinfo.listening_hash[st->bucket];
2297	++st->num;
2298	++st->offset;
2299
2300	if (st->state == TCP_SEQ_STATE_OPENREQ) {
2301		struct request_sock *req = cur;
2302
2303		icsk = inet_csk(st->syn_wait_sk);
2304		req = req->dl_next;
2305		while (1) {
2306			while (req) {
2307				if (req->rsk_ops->family == st->family) {
2308					cur = req;
2309					goto out;
2310				}
2311				req = req->dl_next;
2312			}
2313			if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2314				break;
2315get_req:
2316			req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2317		}
2318		sk	  = sk_nulls_next(st->syn_wait_sk);
2319		st->state = TCP_SEQ_STATE_LISTENING;
2320		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2321	} else {
2322		icsk = inet_csk(sk);
2323		read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2324		if (reqsk_queue_len(&icsk->icsk_accept_queue))
2325			goto start_req;
2326		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2327		sk = sk_nulls_next(sk);
2328	}
2329get_sk:
2330	sk_nulls_for_each_from(sk, node) {
2331		if (!net_eq(sock_net(sk), net))
2332			continue;
2333		if (sk->sk_family == st->family) {
2334			cur = sk;
2335			goto out;
2336		}
2337		icsk = inet_csk(sk);
2338		read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2339		if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2340start_req:
2341			st->uid		= sock_i_uid(sk);
2342			st->syn_wait_sk = sk;
2343			st->state	= TCP_SEQ_STATE_OPENREQ;
2344			st->sbucket	= 0;
2345			goto get_req;
2346		}
2347		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2348	}
2349	spin_unlock_bh(&ilb->lock);
2350	st->offset = 0;
2351	if (++st->bucket < INET_LHTABLE_SIZE) {
2352		ilb = &tcp_hashinfo.listening_hash[st->bucket];
2353		spin_lock_bh(&ilb->lock);
2354		sk = sk_nulls_head(&ilb->head);
2355		goto get_sk;
2356	}
2357	cur = NULL;
2358out:
2359	return cur;
2360}
2361
2362static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2363{
2364	struct tcp_iter_state *st = seq->private;
2365	void *rc;
2366
2367	st->bucket = 0;
2368	st->offset = 0;
2369	rc = listening_get_next(seq, NULL);
2370
2371	while (rc && *pos) {
2372		rc = listening_get_next(seq, rc);
2373		--*pos;
2374	}
2375	return rc;
2376}
2377
2378static inline bool empty_bucket(struct tcp_iter_state *st)
2379{
2380	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2381		hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2382}
2383
2384/*
2385 * Get first established socket starting from bucket given in st->bucket.
2386 * If st->bucket is zero, the very first socket in the hash is returned.
2387 */
2388static void *established_get_first(struct seq_file *seq)
2389{
2390	struct tcp_iter_state *st = seq->private;
2391	struct net *net = seq_file_net(seq);
2392	void *rc = NULL;
2393
2394	st->offset = 0;
2395	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2396		struct sock *sk;
2397		struct hlist_nulls_node *node;
2398		struct inet_timewait_sock *tw;
2399		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2400
2401		/* Lockless fast path for the common case of empty buckets */
2402		if (empty_bucket(st))
2403			continue;
2404
2405		spin_lock_bh(lock);
2406		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2407			if (sk->sk_family != st->family ||
2408			    !net_eq(sock_net(sk), net)) {
2409				continue;
2410			}
2411			rc = sk;
2412			goto out;
2413		}
2414		st->state = TCP_SEQ_STATE_TIME_WAIT;
2415		inet_twsk_for_each(tw, node,
2416				   &tcp_hashinfo.ehash[st->bucket].twchain) {
2417			if (tw->tw_family != st->family ||
2418			    !net_eq(twsk_net(tw), net)) {
2419				continue;
2420			}
2421			rc = tw;
2422			goto out;
2423		}
2424		spin_unlock_bh(lock);
2425		st->state = TCP_SEQ_STATE_ESTABLISHED;
2426	}
2427out:
2428	return rc;
2429}
2430
2431static void *established_get_next(struct seq_file *seq, void *cur)
2432{
2433	struct sock *sk = cur;
2434	struct inet_timewait_sock *tw;
2435	struct hlist_nulls_node *node;
2436	struct tcp_iter_state *st = seq->private;
2437	struct net *net = seq_file_net(seq);
2438
2439	++st->num;
2440	++st->offset;
2441
2442	if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2443		tw = cur;
2444		tw = tw_next(tw);
2445get_tw:
2446		while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2447			tw = tw_next(tw);
2448		}
2449		if (tw) {
2450			cur = tw;
2451			goto out;
2452		}
2453		spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2454		st->state = TCP_SEQ_STATE_ESTABLISHED;
2455
2456		/* Look for next non empty bucket */
2457		st->offset = 0;
2458		while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2459				empty_bucket(st))
2460			;
2461		if (st->bucket > tcp_hashinfo.ehash_mask)
2462			return NULL;
2463
2464		spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2465		sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2466	} else
2467		sk = sk_nulls_next(sk);
2468
2469	sk_nulls_for_each_from(sk, node) {
2470		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2471			goto found;
2472	}
2473
2474	st->state = TCP_SEQ_STATE_TIME_WAIT;
2475	tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2476	goto get_tw;
2477found:
2478	cur = sk;
2479out:
2480	return cur;
2481}
2482
2483static void *established_get_idx(struct seq_file *seq, loff_t pos)
2484{
2485	struct tcp_iter_state *st = seq->private;
2486	void *rc;
2487
2488	st->bucket = 0;
2489	rc = established_get_first(seq);
2490
2491	while (rc && pos) {
2492		rc = established_get_next(seq, rc);
2493		--pos;
2494	}
2495	return rc;
2496}
2497
2498static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2499{
2500	void *rc;
2501	struct tcp_iter_state *st = seq->private;
2502
2503	st->state = TCP_SEQ_STATE_LISTENING;
2504	rc	  = listening_get_idx(seq, &pos);
2505
2506	if (!rc) {
2507		st->state = TCP_SEQ_STATE_ESTABLISHED;
2508		rc	  = established_get_idx(seq, pos);
2509	}
2510
2511	return rc;
2512}
2513
2514static void *tcp_seek_last_pos(struct seq_file *seq)
2515{
2516	struct tcp_iter_state *st = seq->private;
2517	int offset = st->offset;
2518	int orig_num = st->num;
2519	void *rc = NULL;
2520
2521	switch (st->state) {
2522	case TCP_SEQ_STATE_OPENREQ:
2523	case TCP_SEQ_STATE_LISTENING:
2524		if (st->bucket >= INET_LHTABLE_SIZE)
2525			break;
2526		st->state = TCP_SEQ_STATE_LISTENING;
2527		rc = listening_get_next(seq, NULL);
2528		while (offset-- && rc)
2529			rc = listening_get_next(seq, rc);
2530		if (rc)
2531			break;
2532		st->bucket = 0;
2533		/* Fallthrough */
2534	case TCP_SEQ_STATE_ESTABLISHED:
2535	case TCP_SEQ_STATE_TIME_WAIT:
2536		st->state = TCP_SEQ_STATE_ESTABLISHED;
2537		if (st->bucket > tcp_hashinfo.ehash_mask)
2538			break;
2539		rc = established_get_first(seq);
2540		while (offset-- && rc)
2541			rc = established_get_next(seq, rc);
2542	}
2543
2544	st->num = orig_num;
2545
2546	return rc;
2547}
2548
2549static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2550{
2551	struct tcp_iter_state *st = seq->private;
2552	void *rc;
2553
2554	if (*pos && *pos == st->last_pos) {
2555		rc = tcp_seek_last_pos(seq);
2556		if (rc)
2557			goto out;
2558	}
2559
2560	st->state = TCP_SEQ_STATE_LISTENING;
2561	st->num = 0;
2562	st->bucket = 0;
2563	st->offset = 0;
2564	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2565
2566out:
2567	st->last_pos = *pos;
2568	return rc;
2569}
2570
2571static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2572{
2573	struct tcp_iter_state *st = seq->private;
2574	void *rc = NULL;
2575
2576	if (v == SEQ_START_TOKEN) {
2577		rc = tcp_get_idx(seq, 0);
2578		goto out;
2579	}
2580
2581	switch (st->state) {
2582	case TCP_SEQ_STATE_OPENREQ:
2583	case TCP_SEQ_STATE_LISTENING:
2584		rc = listening_get_next(seq, v);
2585		if (!rc) {
2586			st->state = TCP_SEQ_STATE_ESTABLISHED;
2587			st->bucket = 0;
2588			st->offset = 0;
2589			rc	  = established_get_first(seq);
2590		}
2591		break;
2592	case TCP_SEQ_STATE_ESTABLISHED:
2593	case TCP_SEQ_STATE_TIME_WAIT:
2594		rc = established_get_next(seq, v);
2595		break;
2596	}
2597out:
2598	++*pos;
2599	st->last_pos = *pos;
2600	return rc;
2601}
2602
2603static void tcp_seq_stop(struct seq_file *seq, void *v)
2604{
2605	struct tcp_iter_state *st = seq->private;
2606
2607	switch (st->state) {
2608	case TCP_SEQ_STATE_OPENREQ:
2609		if (v) {
2610			struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2611			read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2612		}
2613	case TCP_SEQ_STATE_LISTENING:
2614		if (v != SEQ_START_TOKEN)
2615			spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2616		break;
2617	case TCP_SEQ_STATE_TIME_WAIT:
2618	case TCP_SEQ_STATE_ESTABLISHED:
2619		if (v)
2620			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2621		break;
2622	}
2623}
2624
2625int tcp_seq_open(struct inode *inode, struct file *file)
2626{
2627	struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2628	struct tcp_iter_state *s;
2629	int err;
2630
2631	err = seq_open_net(inode, file, &afinfo->seq_ops,
2632			  sizeof(struct tcp_iter_state));
2633	if (err < 0)
2634		return err;
2635
2636	s = ((struct seq_file *)file->private_data)->private;
2637	s->family		= afinfo->family;
2638	s->last_pos 		= 0;
2639	return 0;
2640}
2641EXPORT_SYMBOL(tcp_seq_open);
2642
2643int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2644{
2645	int rc = 0;
2646	struct proc_dir_entry *p;
2647
2648	afinfo->seq_ops.start		= tcp_seq_start;
2649	afinfo->seq_ops.next		= tcp_seq_next;
2650	afinfo->seq_ops.stop		= tcp_seq_stop;
2651
2652	p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2653			     afinfo->seq_fops, afinfo);
2654	if (!p)
2655		rc = -ENOMEM;
2656	return rc;
2657}
2658EXPORT_SYMBOL(tcp_proc_register);
2659
2660void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2661{
2662	remove_proc_entry(afinfo->name, net->proc_net);
2663}
2664EXPORT_SYMBOL(tcp_proc_unregister);
2665
2666static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2667			 struct seq_file *f, int i, kuid_t uid, int *len)
2668{
2669	const struct inet_request_sock *ireq = inet_rsk(req);
2670	long delta = req->expires - jiffies;
2671
2672	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2673		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2674		i,
2675		ireq->loc_addr,
2676		ntohs(inet_sk(sk)->inet_sport),
2677		ireq->rmt_addr,
2678		ntohs(ireq->rmt_port),
2679		TCP_SYN_RECV,
2680		0, 0, /* could print option size, but that is af dependent. */
2681		1,    /* timers active (only the expire timer) */
2682		jiffies_delta_to_clock_t(delta),
2683		req->num_timeout,
2684		from_kuid_munged(seq_user_ns(f), uid),
2685		0,  /* non standard timer */
2686		0, /* open_requests have no inode */
2687		atomic_read(&sk->sk_refcnt),
2688		req,
2689		len);
2690}
2691
2692static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2693{
2694	int timer_active;
2695	unsigned long timer_expires;
2696	const struct tcp_sock *tp = tcp_sk(sk);
2697	const struct inet_connection_sock *icsk = inet_csk(sk);
2698	const struct inet_sock *inet = inet_sk(sk);
2699	struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2700	__be32 dest = inet->inet_daddr;
2701	__be32 src = inet->inet_rcv_saddr;
2702	__u16 destp = ntohs(inet->inet_dport);
2703	__u16 srcp = ntohs(inet->inet_sport);
2704	int rx_queue;
2705
2706	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2707		timer_active	= 1;
2708		timer_expires	= icsk->icsk_timeout;
2709	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2710		timer_active	= 4;
2711		timer_expires	= icsk->icsk_timeout;
2712	} else if (timer_pending(&sk->sk_timer)) {
2713		timer_active	= 2;
2714		timer_expires	= sk->sk_timer.expires;
2715	} else {
2716		timer_active	= 0;
2717		timer_expires = jiffies;
2718	}
2719
2720	if (sk->sk_state == TCP_LISTEN)
2721		rx_queue = sk->sk_ack_backlog;
2722	else
2723		/*
2724		 * because we dont lock socket, we might find a transient negative value
2725		 */
2726		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2727
2728	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2729			"%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2730		i, src, srcp, dest, destp, sk->sk_state,
2731		tp->write_seq - tp->snd_una,
2732		rx_queue,
2733		timer_active,
2734		jiffies_delta_to_clock_t(timer_expires - jiffies),
2735		icsk->icsk_retransmits,
2736		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2737		icsk->icsk_probes_out,
2738		sock_i_ino(sk),
2739		atomic_read(&sk->sk_refcnt), sk,
2740		jiffies_to_clock_t(icsk->icsk_rto),
2741		jiffies_to_clock_t(icsk->icsk_ack.ato),
2742		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2743		tp->snd_cwnd,
2744		sk->sk_state == TCP_LISTEN ?
2745		    (fastopenq ? fastopenq->max_qlen : 0) :
2746		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh),
2747		len);
2748}
2749
2750static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2751			       struct seq_file *f, int i, int *len)
2752{
2753	__be32 dest, src;
2754	__u16 destp, srcp;
2755	long delta = tw->tw_ttd - jiffies;
2756
2757	dest  = tw->tw_daddr;
2758	src   = tw->tw_rcv_saddr;
2759	destp = ntohs(tw->tw_dport);
2760	srcp  = ntohs(tw->tw_sport);
2761
2762	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2763		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2764		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2765		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2766		atomic_read(&tw->tw_refcnt), tw, len);
2767}
2768
2769#define TMPSZ 150
2770
2771static int tcp4_seq_show(struct seq_file *seq, void *v)
2772{
2773	struct tcp_iter_state *st;
2774	int len;
2775
2776	if (v == SEQ_START_TOKEN) {
2777		seq_printf(seq, "%-*s\n", TMPSZ - 1,
2778			   "  sl  local_address rem_address   st tx_queue "
2779			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2780			   "inode");
2781		goto out;
2782	}
2783	st = seq->private;
2784
2785	switch (st->state) {
2786	case TCP_SEQ_STATE_LISTENING:
2787	case TCP_SEQ_STATE_ESTABLISHED:
2788		get_tcp4_sock(v, seq, st->num, &len);
2789		break;
2790	case TCP_SEQ_STATE_OPENREQ:
2791		get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2792		break;
2793	case TCP_SEQ_STATE_TIME_WAIT:
2794		get_timewait4_sock(v, seq, st->num, &len);
2795		break;
2796	}
2797	seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2798out:
2799	return 0;
2800}
2801
2802static const struct file_operations tcp_afinfo_seq_fops = {
2803	.owner   = THIS_MODULE,
2804	.open    = tcp_seq_open,
2805	.read    = seq_read,
2806	.llseek  = seq_lseek,
2807	.release = seq_release_net
2808};
2809
2810static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2811	.name		= "tcp",
2812	.family		= AF_INET,
2813	.seq_fops	= &tcp_afinfo_seq_fops,
2814	.seq_ops	= {
2815		.show		= tcp4_seq_show,
2816	},
2817};
2818
2819static int __net_init tcp4_proc_init_net(struct net *net)
2820{
2821	return tcp_proc_register(net, &tcp4_seq_afinfo);
2822}
2823
2824static void __net_exit tcp4_proc_exit_net(struct net *net)
2825{
2826	tcp_proc_unregister(net, &tcp4_seq_afinfo);
2827}
2828
2829static struct pernet_operations tcp4_net_ops = {
2830	.init = tcp4_proc_init_net,
2831	.exit = tcp4_proc_exit_net,
2832};
2833
2834int __init tcp4_proc_init(void)
2835{
2836	return register_pernet_subsys(&tcp4_net_ops);
2837}
2838
2839void tcp4_proc_exit(void)
2840{
2841	unregister_pernet_subsys(&tcp4_net_ops);
2842}
2843#endif /* CONFIG_PROC_FS */
2844
2845struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2846{
2847	const struct iphdr *iph = skb_gro_network_header(skb);
2848	__wsum wsum;
2849	__sum16 sum;
2850
2851	switch (skb->ip_summed) {
2852	case CHECKSUM_COMPLETE:
2853		if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2854				  skb->csum)) {
2855			skb->ip_summed = CHECKSUM_UNNECESSARY;
2856			break;
2857		}
2858flush:
2859		NAPI_GRO_CB(skb)->flush = 1;
2860		return NULL;
2861
2862	case CHECKSUM_NONE:
2863		wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
2864					  skb_gro_len(skb), IPPROTO_TCP, 0);
2865		sum = csum_fold(skb_checksum(skb,
2866					     skb_gro_offset(skb),
2867					     skb_gro_len(skb),
2868					     wsum));
2869		if (sum)
2870			goto flush;
2871
2872		skb->ip_summed = CHECKSUM_UNNECESSARY;
2873		break;
2874	}
2875
2876	return tcp_gro_receive(head, skb);
2877}
2878
2879int tcp4_gro_complete(struct sk_buff *skb)
2880{
2881	const struct iphdr *iph = ip_hdr(skb);
2882	struct tcphdr *th = tcp_hdr(skb);
2883
2884	th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2885				  iph->saddr, iph->daddr, 0);
2886	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2887
2888	return tcp_gro_complete(skb);
2889}
2890
2891struct proto tcp_prot = {
2892	.name			= "TCP",
2893	.owner			= THIS_MODULE,
2894	.close			= tcp_close,
2895	.connect		= tcp_v4_connect,
2896	.disconnect		= tcp_disconnect,
2897	.accept			= inet_csk_accept,
2898	.ioctl			= tcp_ioctl,
2899	.init			= tcp_v4_init_sock,
2900	.destroy		= tcp_v4_destroy_sock,
2901	.shutdown		= tcp_shutdown,
2902	.setsockopt		= tcp_setsockopt,
2903	.getsockopt		= tcp_getsockopt,
2904	.recvmsg		= tcp_recvmsg,
2905	.sendmsg		= tcp_sendmsg,
2906	.sendpage		= tcp_sendpage,
2907	.backlog_rcv		= tcp_v4_do_rcv,
2908	.release_cb		= tcp_release_cb,
2909	.mtu_reduced		= tcp_v4_mtu_reduced,
2910	.hash			= inet_hash,
2911	.unhash			= inet_unhash,
2912	.get_port		= inet_csk_get_port,
2913	.enter_memory_pressure	= tcp_enter_memory_pressure,
2914	.sockets_allocated	= &tcp_sockets_allocated,
2915	.orphan_count		= &tcp_orphan_count,
2916	.memory_allocated	= &tcp_memory_allocated,
2917	.memory_pressure	= &tcp_memory_pressure,
2918	.sysctl_wmem		= sysctl_tcp_wmem,
2919	.sysctl_rmem		= sysctl_tcp_rmem,
2920	.max_header		= MAX_TCP_HEADER,
2921	.obj_size		= sizeof(struct tcp_sock),
2922	.slab_flags		= SLAB_DESTROY_BY_RCU,
2923	.twsk_prot		= &tcp_timewait_sock_ops,
2924	.rsk_prot		= &tcp_request_sock_ops,
2925	.h.hashinfo		= &tcp_hashinfo,
2926	.no_autobind		= true,
2927#ifdef CONFIG_COMPAT
2928	.compat_setsockopt	= compat_tcp_setsockopt,
2929	.compat_getsockopt	= compat_tcp_getsockopt,
2930#endif
2931#ifdef CONFIG_MEMCG_KMEM
2932	.init_cgroup		= tcp_init_cgroup,
2933	.destroy_cgroup		= tcp_destroy_cgroup,
2934	.proto_cgroup		= tcp_proto_cgroup,
2935#endif
2936};
2937EXPORT_SYMBOL(tcp_prot);
2938
2939static int __net_init tcp_sk_init(struct net *net)
2940{
2941	net->ipv4.sysctl_tcp_ecn = 2;
2942	return 0;
2943}
2944
2945static void __net_exit tcp_sk_exit(struct net *net)
2946{
2947}
2948
2949static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2950{
2951	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2952}
2953
2954static struct pernet_operations __net_initdata tcp_sk_ops = {
2955       .init	   = tcp_sk_init,
2956       .exit	   = tcp_sk_exit,
2957       .exit_batch = tcp_sk_exit_batch,
2958};
2959
2960void __init tcp_v4_init(void)
2961{
2962	inet_hashinfo_init(&tcp_hashinfo);
2963	if (register_pernet_subsys(&tcp_sk_ops))
2964		panic("Failed to create the TCP control socket.\n");
2965}
2966