inet_connection_sock.c revision e6c022a4fa2d2d9ca9d0a7ac3b05ad988f39fc30
1/*
2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
3 *		operating system.  INET is implemented using the  BSD Socket
4 *		interface as the means of communication with the user level.
5 *
6 *		Support for INET connection oriented protocols.
7 *
8 * Authors:	See the TCP sources
9 *
10 *		This program is free software; you can redistribute it and/or
11 *		modify it under the terms of the GNU General Public License
12 *		as published by the Free Software Foundation; either version
13 *		2 of the License, or(at your option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/jhash.h>
18
19#include <net/inet_connection_sock.h>
20#include <net/inet_hashtables.h>
21#include <net/inet_timewait_sock.h>
22#include <net/ip.h>
23#include <net/route.h>
24#include <net/tcp_states.h>
25#include <net/xfrm.h>
26
27#ifdef INET_CSK_DEBUG
28const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
29EXPORT_SYMBOL(inet_csk_timer_bug_msg);
30#endif
31
32/*
33 * This struct holds the first and last local port number.
34 */
35struct local_ports sysctl_local_ports __read_mostly = {
36	.lock = __SEQLOCK_UNLOCKED(sysctl_local_ports.lock),
37	.range = { 32768, 61000 },
38};
39
40unsigned long *sysctl_local_reserved_ports;
41EXPORT_SYMBOL(sysctl_local_reserved_ports);
42
43void inet_get_local_port_range(int *low, int *high)
44{
45	unsigned int seq;
46
47	do {
48		seq = read_seqbegin(&sysctl_local_ports.lock);
49
50		*low = sysctl_local_ports.range[0];
51		*high = sysctl_local_ports.range[1];
52	} while (read_seqretry(&sysctl_local_ports.lock, seq));
53}
54EXPORT_SYMBOL(inet_get_local_port_range);
55
56int inet_csk_bind_conflict(const struct sock *sk,
57			   const struct inet_bind_bucket *tb, bool relax)
58{
59	struct sock *sk2;
60	struct hlist_node *node;
61	int reuse = sk->sk_reuse;
62
63	/*
64	 * Unlike other sk lookup places we do not check
65	 * for sk_net here, since _all_ the socks listed
66	 * in tb->owners list belong to the same net - the
67	 * one this bucket belongs to.
68	 */
69
70	sk_for_each_bound(sk2, node, &tb->owners) {
71		if (sk != sk2 &&
72		    !inet_v6_ipv6only(sk2) &&
73		    (!sk->sk_bound_dev_if ||
74		     !sk2->sk_bound_dev_if ||
75		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
76			if (!reuse || !sk2->sk_reuse ||
77			    sk2->sk_state == TCP_LISTEN) {
78				const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
79				if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
80				    sk2_rcv_saddr == sk_rcv_saddr(sk))
81					break;
82			}
83			if (!relax && reuse && sk2->sk_reuse &&
84			    sk2->sk_state != TCP_LISTEN) {
85				const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
86
87				if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
88				    sk2_rcv_saddr == sk_rcv_saddr(sk))
89					break;
90			}
91		}
92	}
93	return node != NULL;
94}
95EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
96
97/* Obtain a reference to a local port for the given sock,
98 * if snum is zero it means select any available local port.
99 */
100int inet_csk_get_port(struct sock *sk, unsigned short snum)
101{
102	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
103	struct inet_bind_hashbucket *head;
104	struct hlist_node *node;
105	struct inet_bind_bucket *tb;
106	int ret, attempts = 5;
107	struct net *net = sock_net(sk);
108	int smallest_size = -1, smallest_rover;
109
110	local_bh_disable();
111	if (!snum) {
112		int remaining, rover, low, high;
113
114again:
115		inet_get_local_port_range(&low, &high);
116		remaining = (high - low) + 1;
117		smallest_rover = rover = net_random() % remaining + low;
118
119		smallest_size = -1;
120		do {
121			if (inet_is_reserved_local_port(rover))
122				goto next_nolock;
123			head = &hashinfo->bhash[inet_bhashfn(net, rover,
124					hashinfo->bhash_size)];
125			spin_lock(&head->lock);
126			inet_bind_bucket_for_each(tb, node, &head->chain)
127				if (net_eq(ib_net(tb), net) && tb->port == rover) {
128					if (tb->fastreuse > 0 &&
129					    sk->sk_reuse &&
130					    sk->sk_state != TCP_LISTEN &&
131					    (tb->num_owners < smallest_size || smallest_size == -1)) {
132						smallest_size = tb->num_owners;
133						smallest_rover = rover;
134						if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
135						    !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
136							snum = smallest_rover;
137							goto tb_found;
138						}
139					}
140					if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
141						snum = rover;
142						goto tb_found;
143					}
144					goto next;
145				}
146			break;
147		next:
148			spin_unlock(&head->lock);
149		next_nolock:
150			if (++rover > high)
151				rover = low;
152		} while (--remaining > 0);
153
154		/* Exhausted local port range during search?  It is not
155		 * possible for us to be holding one of the bind hash
156		 * locks if this test triggers, because if 'remaining'
157		 * drops to zero, we broke out of the do/while loop at
158		 * the top level, not from the 'break;' statement.
159		 */
160		ret = 1;
161		if (remaining <= 0) {
162			if (smallest_size != -1) {
163				snum = smallest_rover;
164				goto have_snum;
165			}
166			goto fail;
167		}
168		/* OK, here is the one we will use.  HEAD is
169		 * non-NULL and we hold it's mutex.
170		 */
171		snum = rover;
172	} else {
173have_snum:
174		head = &hashinfo->bhash[inet_bhashfn(net, snum,
175				hashinfo->bhash_size)];
176		spin_lock(&head->lock);
177		inet_bind_bucket_for_each(tb, node, &head->chain)
178			if (net_eq(ib_net(tb), net) && tb->port == snum)
179				goto tb_found;
180	}
181	tb = NULL;
182	goto tb_not_found;
183tb_found:
184	if (!hlist_empty(&tb->owners)) {
185		if (sk->sk_reuse == SK_FORCE_REUSE)
186			goto success;
187
188		if (tb->fastreuse > 0 &&
189		    sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
190		    smallest_size == -1) {
191			goto success;
192		} else {
193			ret = 1;
194			if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) {
195				if (sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
196				    smallest_size != -1 && --attempts >= 0) {
197					spin_unlock(&head->lock);
198					goto again;
199				}
200
201				goto fail_unlock;
202			}
203		}
204	}
205tb_not_found:
206	ret = 1;
207	if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep,
208					net, head, snum)) == NULL)
209		goto fail_unlock;
210	if (hlist_empty(&tb->owners)) {
211		if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
212			tb->fastreuse = 1;
213		else
214			tb->fastreuse = 0;
215	} else if (tb->fastreuse &&
216		   (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
217		tb->fastreuse = 0;
218success:
219	if (!inet_csk(sk)->icsk_bind_hash)
220		inet_bind_hash(sk, tb, snum);
221	WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
222	ret = 0;
223
224fail_unlock:
225	spin_unlock(&head->lock);
226fail:
227	local_bh_enable();
228	return ret;
229}
230EXPORT_SYMBOL_GPL(inet_csk_get_port);
231
232/*
233 * Wait for an incoming connection, avoid race conditions. This must be called
234 * with the socket locked.
235 */
236static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
237{
238	struct inet_connection_sock *icsk = inet_csk(sk);
239	DEFINE_WAIT(wait);
240	int err;
241
242	/*
243	 * True wake-one mechanism for incoming connections: only
244	 * one process gets woken up, not the 'whole herd'.
245	 * Since we do not 'race & poll' for established sockets
246	 * anymore, the common case will execute the loop only once.
247	 *
248	 * Subtle issue: "add_wait_queue_exclusive()" will be added
249	 * after any current non-exclusive waiters, and we know that
250	 * it will always _stay_ after any new non-exclusive waiters
251	 * because all non-exclusive waiters are added at the
252	 * beginning of the wait-queue. As such, it's ok to "drop"
253	 * our exclusiveness temporarily when we get woken up without
254	 * having to remove and re-insert us on the wait queue.
255	 */
256	for (;;) {
257		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
258					  TASK_INTERRUPTIBLE);
259		release_sock(sk);
260		if (reqsk_queue_empty(&icsk->icsk_accept_queue))
261			timeo = schedule_timeout(timeo);
262		lock_sock(sk);
263		err = 0;
264		if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
265			break;
266		err = -EINVAL;
267		if (sk->sk_state != TCP_LISTEN)
268			break;
269		err = sock_intr_errno(timeo);
270		if (signal_pending(current))
271			break;
272		err = -EAGAIN;
273		if (!timeo)
274			break;
275	}
276	finish_wait(sk_sleep(sk), &wait);
277	return err;
278}
279
280/*
281 * This will accept the next outstanding connection.
282 */
283struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
284{
285	struct inet_connection_sock *icsk = inet_csk(sk);
286	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
287	struct sock *newsk;
288	struct request_sock *req;
289	int error;
290
291	lock_sock(sk);
292
293	/* We need to make sure that this socket is listening,
294	 * and that it has something pending.
295	 */
296	error = -EINVAL;
297	if (sk->sk_state != TCP_LISTEN)
298		goto out_err;
299
300	/* Find already established connection */
301	if (reqsk_queue_empty(queue)) {
302		long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
303
304		/* If this is a non blocking socket don't sleep */
305		error = -EAGAIN;
306		if (!timeo)
307			goto out_err;
308
309		error = inet_csk_wait_for_connect(sk, timeo);
310		if (error)
311			goto out_err;
312	}
313	req = reqsk_queue_remove(queue);
314	newsk = req->sk;
315
316	sk_acceptq_removed(sk);
317	if (sk->sk_protocol == IPPROTO_TCP && queue->fastopenq != NULL) {
318		spin_lock_bh(&queue->fastopenq->lock);
319		if (tcp_rsk(req)->listener) {
320			/* We are still waiting for the final ACK from 3WHS
321			 * so can't free req now. Instead, we set req->sk to
322			 * NULL to signify that the child socket is taken
323			 * so reqsk_fastopen_remove() will free the req
324			 * when 3WHS finishes (or is aborted).
325			 */
326			req->sk = NULL;
327			req = NULL;
328		}
329		spin_unlock_bh(&queue->fastopenq->lock);
330	}
331out:
332	release_sock(sk);
333	if (req)
334		__reqsk_free(req);
335	return newsk;
336out_err:
337	newsk = NULL;
338	req = NULL;
339	*err = error;
340	goto out;
341}
342EXPORT_SYMBOL(inet_csk_accept);
343
344/*
345 * Using different timers for retransmit, delayed acks and probes
346 * We may wish use just one timer maintaining a list of expire jiffies
347 * to optimize.
348 */
349void inet_csk_init_xmit_timers(struct sock *sk,
350			       void (*retransmit_handler)(unsigned long),
351			       void (*delack_handler)(unsigned long),
352			       void (*keepalive_handler)(unsigned long))
353{
354	struct inet_connection_sock *icsk = inet_csk(sk);
355
356	setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler,
357			(unsigned long)sk);
358	setup_timer(&icsk->icsk_delack_timer, delack_handler,
359			(unsigned long)sk);
360	setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk);
361	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
362}
363EXPORT_SYMBOL(inet_csk_init_xmit_timers);
364
365void inet_csk_clear_xmit_timers(struct sock *sk)
366{
367	struct inet_connection_sock *icsk = inet_csk(sk);
368
369	icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
370
371	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
372	sk_stop_timer(sk, &icsk->icsk_delack_timer);
373	sk_stop_timer(sk, &sk->sk_timer);
374}
375EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
376
377void inet_csk_delete_keepalive_timer(struct sock *sk)
378{
379	sk_stop_timer(sk, &sk->sk_timer);
380}
381EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
382
383void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
384{
385	sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
386}
387EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
388
389struct dst_entry *inet_csk_route_req(struct sock *sk,
390				     struct flowi4 *fl4,
391				     const struct request_sock *req)
392{
393	struct rtable *rt;
394	const struct inet_request_sock *ireq = inet_rsk(req);
395	struct ip_options_rcu *opt = inet_rsk(req)->opt;
396	struct net *net = sock_net(sk);
397	int flags = inet_sk_flowi_flags(sk);
398
399	flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
400			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
401			   sk->sk_protocol,
402			   flags,
403			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
404			   ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
405	security_req_classify_flow(req, flowi4_to_flowi(fl4));
406	rt = ip_route_output_flow(net, fl4, sk);
407	if (IS_ERR(rt))
408		goto no_route;
409	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
410		goto route_err;
411	return &rt->dst;
412
413route_err:
414	ip_rt_put(rt);
415no_route:
416	IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
417	return NULL;
418}
419EXPORT_SYMBOL_GPL(inet_csk_route_req);
420
421struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
422					    struct sock *newsk,
423					    const struct request_sock *req)
424{
425	const struct inet_request_sock *ireq = inet_rsk(req);
426	struct inet_sock *newinet = inet_sk(newsk);
427	struct ip_options_rcu *opt;
428	struct net *net = sock_net(sk);
429	struct flowi4 *fl4;
430	struct rtable *rt;
431
432	fl4 = &newinet->cork.fl.u.ip4;
433
434	rcu_read_lock();
435	opt = rcu_dereference(newinet->inet_opt);
436	flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
437			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
438			   sk->sk_protocol, inet_sk_flowi_flags(sk),
439			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
440			   ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
441	security_req_classify_flow(req, flowi4_to_flowi(fl4));
442	rt = ip_route_output_flow(net, fl4, sk);
443	if (IS_ERR(rt))
444		goto no_route;
445	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
446		goto route_err;
447	rcu_read_unlock();
448	return &rt->dst;
449
450route_err:
451	ip_rt_put(rt);
452no_route:
453	rcu_read_unlock();
454	IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
455	return NULL;
456}
457EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
458
459static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
460				 const u32 rnd, const u32 synq_hsize)
461{
462	return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
463}
464
465#if IS_ENABLED(CONFIG_IPV6)
466#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
467#else
468#define AF_INET_FAMILY(fam) 1
469#endif
470
471struct request_sock *inet_csk_search_req(const struct sock *sk,
472					 struct request_sock ***prevp,
473					 const __be16 rport, const __be32 raddr,
474					 const __be32 laddr)
475{
476	const struct inet_connection_sock *icsk = inet_csk(sk);
477	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
478	struct request_sock *req, **prev;
479
480	for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd,
481						    lopt->nr_table_entries)];
482	     (req = *prev) != NULL;
483	     prev = &req->dl_next) {
484		const struct inet_request_sock *ireq = inet_rsk(req);
485
486		if (ireq->rmt_port == rport &&
487		    ireq->rmt_addr == raddr &&
488		    ireq->loc_addr == laddr &&
489		    AF_INET_FAMILY(req->rsk_ops->family)) {
490			WARN_ON(req->sk);
491			*prevp = prev;
492			break;
493		}
494	}
495
496	return req;
497}
498EXPORT_SYMBOL_GPL(inet_csk_search_req);
499
500void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
501				   unsigned long timeout)
502{
503	struct inet_connection_sock *icsk = inet_csk(sk);
504	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
505	const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
506				     lopt->hash_rnd, lopt->nr_table_entries);
507
508	reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
509	inet_csk_reqsk_queue_added(sk, timeout);
510}
511EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
512
513/* Only thing we need from tcp.h */
514extern int sysctl_tcp_synack_retries;
515
516
517/* Decide when to expire the request and when to resend SYN-ACK */
518static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
519				  const int max_retries,
520				  const u8 rskq_defer_accept,
521				  int *expire, int *resend)
522{
523	if (!rskq_defer_accept) {
524		*expire = req->num_timeout >= thresh;
525		*resend = 1;
526		return;
527	}
528	*expire = req->num_timeout >= thresh &&
529		  (!inet_rsk(req)->acked || req->num_timeout >= max_retries);
530	/*
531	 * Do not resend while waiting for data after ACK,
532	 * start to resend on end of deferring period to give
533	 * last chance for data or ACK to create established socket.
534	 */
535	*resend = !inet_rsk(req)->acked ||
536		  req->num_timeout >= rskq_defer_accept - 1;
537}
538
539int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req)
540{
541	int err = req->rsk_ops->rtx_syn_ack(parent, req, NULL);
542
543	if (!err)
544		req->num_retrans++;
545	return err;
546}
547EXPORT_SYMBOL(inet_rtx_syn_ack);
548
549void inet_csk_reqsk_queue_prune(struct sock *parent,
550				const unsigned long interval,
551				const unsigned long timeout,
552				const unsigned long max_rto)
553{
554	struct inet_connection_sock *icsk = inet_csk(parent);
555	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
556	struct listen_sock *lopt = queue->listen_opt;
557	int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
558	int thresh = max_retries;
559	unsigned long now = jiffies;
560	struct request_sock **reqp, *req;
561	int i, budget;
562
563	if (lopt == NULL || lopt->qlen == 0)
564		return;
565
566	/* Normally all the openreqs are young and become mature
567	 * (i.e. converted to established socket) for first timeout.
568	 * If synack was not acknowledged for 1 second, it means
569	 * one of the following things: synack was lost, ack was lost,
570	 * rtt is high or nobody planned to ack (i.e. synflood).
571	 * When server is a bit loaded, queue is populated with old
572	 * open requests, reducing effective size of queue.
573	 * When server is well loaded, queue size reduces to zero
574	 * after several minutes of work. It is not synflood,
575	 * it is normal operation. The solution is pruning
576	 * too old entries overriding normal timeout, when
577	 * situation becomes dangerous.
578	 *
579	 * Essentially, we reserve half of room for young
580	 * embrions; and abort old ones without pity, if old
581	 * ones are about to clog our table.
582	 */
583	if (lopt->qlen>>(lopt->max_qlen_log-1)) {
584		int young = (lopt->qlen_young<<1);
585
586		while (thresh > 2) {
587			if (lopt->qlen < young)
588				break;
589			thresh--;
590			young <<= 1;
591		}
592	}
593
594	if (queue->rskq_defer_accept)
595		max_retries = queue->rskq_defer_accept;
596
597	budget = 2 * (lopt->nr_table_entries / (timeout / interval));
598	i = lopt->clock_hand;
599
600	do {
601		reqp=&lopt->syn_table[i];
602		while ((req = *reqp) != NULL) {
603			if (time_after_eq(now, req->expires)) {
604				int expire = 0, resend = 0;
605
606				syn_ack_recalc(req, thresh, max_retries,
607					       queue->rskq_defer_accept,
608					       &expire, &resend);
609				req->rsk_ops->syn_ack_timeout(parent, req);
610				if (!expire &&
611				    (!resend ||
612				     !inet_rtx_syn_ack(parent, req) ||
613				     inet_rsk(req)->acked)) {
614					unsigned long timeo;
615
616					if (req->num_timeout++ == 0)
617						lopt->qlen_young--;
618					timeo = min(timeout << req->num_timeout,
619						    max_rto);
620					req->expires = now + timeo;
621					reqp = &req->dl_next;
622					continue;
623				}
624
625				/* Drop this request */
626				inet_csk_reqsk_queue_unlink(parent, req, reqp);
627				reqsk_queue_removed(queue, req);
628				reqsk_free(req);
629				continue;
630			}
631			reqp = &req->dl_next;
632		}
633
634		i = (i + 1) & (lopt->nr_table_entries - 1);
635
636	} while (--budget > 0);
637
638	lopt->clock_hand = i;
639
640	if (lopt->qlen)
641		inet_csk_reset_keepalive_timer(parent, interval);
642}
643EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
644
645/**
646 *	inet_csk_clone_lock - clone an inet socket, and lock its clone
647 *	@sk: the socket to clone
648 *	@req: request_sock
649 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
650 *
651 *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
652 */
653struct sock *inet_csk_clone_lock(const struct sock *sk,
654				 const struct request_sock *req,
655				 const gfp_t priority)
656{
657	struct sock *newsk = sk_clone_lock(sk, priority);
658
659	if (newsk != NULL) {
660		struct inet_connection_sock *newicsk = inet_csk(newsk);
661
662		newsk->sk_state = TCP_SYN_RECV;
663		newicsk->icsk_bind_hash = NULL;
664
665		inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port;
666		inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->loc_port);
667		inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port;
668		newsk->sk_write_space = sk_stream_write_space;
669
670		newicsk->icsk_retransmits = 0;
671		newicsk->icsk_backoff	  = 0;
672		newicsk->icsk_probes_out  = 0;
673
674		/* Deinitialize accept_queue to trap illegal accesses. */
675		memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
676
677		security_inet_csk_clone(newsk, req);
678	}
679	return newsk;
680}
681EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
682
683/*
684 * At this point, there should be no process reference to this
685 * socket, and thus no user references at all.  Therefore we
686 * can assume the socket waitqueue is inactive and nobody will
687 * try to jump onto it.
688 */
689void inet_csk_destroy_sock(struct sock *sk)
690{
691	WARN_ON(sk->sk_state != TCP_CLOSE);
692	WARN_ON(!sock_flag(sk, SOCK_DEAD));
693
694	/* It cannot be in hash table! */
695	WARN_ON(!sk_unhashed(sk));
696
697	/* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
698	WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
699
700	sk->sk_prot->destroy(sk);
701
702	sk_stream_kill_queues(sk);
703
704	xfrm_sk_free_policy(sk);
705
706	sk_refcnt_debug_release(sk);
707
708	percpu_counter_dec(sk->sk_prot->orphan_count);
709	sock_put(sk);
710}
711EXPORT_SYMBOL(inet_csk_destroy_sock);
712
713int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
714{
715	struct inet_sock *inet = inet_sk(sk);
716	struct inet_connection_sock *icsk = inet_csk(sk);
717	int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
718
719	if (rc != 0)
720		return rc;
721
722	sk->sk_max_ack_backlog = 0;
723	sk->sk_ack_backlog = 0;
724	inet_csk_delack_init(sk);
725
726	/* There is race window here: we announce ourselves listening,
727	 * but this transition is still not validated by get_port().
728	 * It is OK, because this socket enters to hash table only
729	 * after validation is complete.
730	 */
731	sk->sk_state = TCP_LISTEN;
732	if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
733		inet->inet_sport = htons(inet->inet_num);
734
735		sk_dst_reset(sk);
736		sk->sk_prot->hash(sk);
737
738		return 0;
739	}
740
741	sk->sk_state = TCP_CLOSE;
742	__reqsk_queue_destroy(&icsk->icsk_accept_queue);
743	return -EADDRINUSE;
744}
745EXPORT_SYMBOL_GPL(inet_csk_listen_start);
746
747/*
748 *	This routine closes sockets which have been at least partially
749 *	opened, but not yet accepted.
750 */
751void inet_csk_listen_stop(struct sock *sk)
752{
753	struct inet_connection_sock *icsk = inet_csk(sk);
754	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
755	struct request_sock *acc_req;
756	struct request_sock *req;
757
758	inet_csk_delete_keepalive_timer(sk);
759
760	/* make all the listen_opt local to us */
761	acc_req = reqsk_queue_yank_acceptq(queue);
762
763	/* Following specs, it would be better either to send FIN
764	 * (and enter FIN-WAIT-1, it is normal close)
765	 * or to send active reset (abort).
766	 * Certainly, it is pretty dangerous while synflood, but it is
767	 * bad justification for our negligence 8)
768	 * To be honest, we are not able to make either
769	 * of the variants now.			--ANK
770	 */
771	reqsk_queue_destroy(queue);
772
773	while ((req = acc_req) != NULL) {
774		struct sock *child = req->sk;
775
776		acc_req = req->dl_next;
777
778		local_bh_disable();
779		bh_lock_sock(child);
780		WARN_ON(sock_owned_by_user(child));
781		sock_hold(child);
782
783		sk->sk_prot->disconnect(child, O_NONBLOCK);
784
785		sock_orphan(child);
786
787		percpu_counter_inc(sk->sk_prot->orphan_count);
788
789		if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->listener) {
790			BUG_ON(tcp_sk(child)->fastopen_rsk != req);
791			BUG_ON(sk != tcp_rsk(req)->listener);
792
793			/* Paranoid, to prevent race condition if
794			 * an inbound pkt destined for child is
795			 * blocked by sock lock in tcp_v4_rcv().
796			 * Also to satisfy an assertion in
797			 * tcp_v4_destroy_sock().
798			 */
799			tcp_sk(child)->fastopen_rsk = NULL;
800			sock_put(sk);
801		}
802		inet_csk_destroy_sock(child);
803
804		bh_unlock_sock(child);
805		local_bh_enable();
806		sock_put(child);
807
808		sk_acceptq_removed(sk);
809		__reqsk_free(req);
810	}
811	if (queue->fastopenq != NULL) {
812		/* Free all the reqs queued in rskq_rst_head. */
813		spin_lock_bh(&queue->fastopenq->lock);
814		acc_req = queue->fastopenq->rskq_rst_head;
815		queue->fastopenq->rskq_rst_head = NULL;
816		spin_unlock_bh(&queue->fastopenq->lock);
817		while ((req = acc_req) != NULL) {
818			acc_req = req->dl_next;
819			__reqsk_free(req);
820		}
821	}
822	WARN_ON(sk->sk_ack_backlog);
823}
824EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
825
826void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
827{
828	struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
829	const struct inet_sock *inet = inet_sk(sk);
830
831	sin->sin_family		= AF_INET;
832	sin->sin_addr.s_addr	= inet->inet_daddr;
833	sin->sin_port		= inet->inet_dport;
834}
835EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
836
837#ifdef CONFIG_COMPAT
838int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
839			       char __user *optval, int __user *optlen)
840{
841	const struct inet_connection_sock *icsk = inet_csk(sk);
842
843	if (icsk->icsk_af_ops->compat_getsockopt != NULL)
844		return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
845							    optval, optlen);
846	return icsk->icsk_af_ops->getsockopt(sk, level, optname,
847					     optval, optlen);
848}
849EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
850
851int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
852			       char __user *optval, unsigned int optlen)
853{
854	const struct inet_connection_sock *icsk = inet_csk(sk);
855
856	if (icsk->icsk_af_ops->compat_setsockopt != NULL)
857		return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
858							    optval, optlen);
859	return icsk->icsk_af_ops->setsockopt(sk, level, optname,
860					     optval, optlen);
861}
862EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
863#endif
864
865static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
866{
867	const struct inet_sock *inet = inet_sk(sk);
868	const struct ip_options_rcu *inet_opt;
869	__be32 daddr = inet->inet_daddr;
870	struct flowi4 *fl4;
871	struct rtable *rt;
872
873	rcu_read_lock();
874	inet_opt = rcu_dereference(inet->inet_opt);
875	if (inet_opt && inet_opt->opt.srr)
876		daddr = inet_opt->opt.faddr;
877	fl4 = &fl->u.ip4;
878	rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
879				   inet->inet_saddr, inet->inet_dport,
880				   inet->inet_sport, sk->sk_protocol,
881				   RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
882	if (IS_ERR(rt))
883		rt = NULL;
884	if (rt)
885		sk_setup_caps(sk, &rt->dst);
886	rcu_read_unlock();
887
888	return &rt->dst;
889}
890
891struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
892{
893	struct dst_entry *dst = __sk_dst_check(sk, 0);
894	struct inet_sock *inet = inet_sk(sk);
895
896	if (!dst) {
897		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
898		if (!dst)
899			goto out;
900	}
901	dst->ops->update_pmtu(dst, sk, NULL, mtu);
902
903	dst = __sk_dst_check(sk, 0);
904	if (!dst)
905		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
906out:
907	return dst;
908}
909EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);
910