inet_connection_sock.c revision 86b08d867d7de001ab224180ed7865fab93fd56e
1/*
2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
3 *		operating system.  INET is implemented using the  BSD Socket
4 *		interface as the means of communication with the user level.
5 *
6 *		Support for INET connection oriented protocols.
7 *
8 * Authors:	See the TCP sources
9 *
10 *		This program is free software; you can redistribute it and/or
11 *		modify it under the terms of the GNU General Public License
12 *		as published by the Free Software Foundation; either version
13 *		2 of the License, or(at your option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/jhash.h>
18
19#include <net/inet_connection_sock.h>
20#include <net/inet_hashtables.h>
21#include <net/inet_timewait_sock.h>
22#include <net/ip.h>
23#include <net/route.h>
24#include <net/tcp_states.h>
25#include <net/xfrm.h>
26
27#ifdef INET_CSK_DEBUG
28const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
29EXPORT_SYMBOL(inet_csk_timer_bug_msg);
30#endif
31
32/*
33 * This array holds the first and last local port number.
34 */
35int sysctl_local_port_range[2] = { 32768, 61000 };
36DEFINE_SEQLOCK(sysctl_port_range_lock);
37
38void inet_get_local_port_range(int *low, int *high)
39{
40	unsigned seq;
41	do {
42		seq = read_seqbegin(&sysctl_port_range_lock);
43
44		*low = sysctl_local_port_range[0];
45		*high = sysctl_local_port_range[1];
46	} while (read_seqretry(&sysctl_port_range_lock, seq));
47}
48EXPORT_SYMBOL(inet_get_local_port_range);
49
50int inet_csk_bind_conflict(const struct sock *sk,
51			   const struct inet_bind_bucket *tb)
52{
53	const __be32 sk_rcv_saddr = inet_rcv_saddr(sk);
54	struct sock *sk2;
55	struct hlist_node *node;
56	int reuse = sk->sk_reuse;
57
58	/*
59	 * Unlike other sk lookup places we do not check
60	 * for sk_net here, since _all_ the socks listed
61	 * in tb->owners list belong to the same net - the
62	 * one this bucket belongs to.
63	 */
64
65	sk_for_each_bound(sk2, node, &tb->owners) {
66		if (sk != sk2 &&
67		    !inet_v6_ipv6only(sk2) &&
68		    (!sk->sk_bound_dev_if ||
69		     !sk2->sk_bound_dev_if ||
70		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
71			if (!reuse || !sk2->sk_reuse ||
72			    sk2->sk_state == TCP_LISTEN) {
73				const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
74				if (!sk2_rcv_saddr || !sk_rcv_saddr ||
75				    sk2_rcv_saddr == sk_rcv_saddr)
76					break;
77			}
78		}
79	}
80	return node != NULL;
81}
82
83EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
84
85/* Obtain a reference to a local port for the given sock,
86 * if snum is zero it means select any available local port.
87 */
88int inet_csk_get_port(struct sock *sk, unsigned short snum)
89{
90	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
91	struct inet_bind_hashbucket *head;
92	struct hlist_node *node;
93	struct inet_bind_bucket *tb;
94	int ret;
95	struct net *net = sock_net(sk);
96
97	local_bh_disable();
98	if (!snum) {
99		int remaining, rover, low, high;
100
101		inet_get_local_port_range(&low, &high);
102		remaining = (high - low) + 1;
103		rover = net_random() % remaining + low;
104
105		do {
106			head = &hashinfo->bhash[inet_bhashfn(net, rover,
107					hashinfo->bhash_size)];
108			spin_lock(&head->lock);
109			inet_bind_bucket_for_each(tb, node, &head->chain)
110				if (tb->ib_net == net && tb->port == rover)
111					goto next;
112			break;
113		next:
114			spin_unlock(&head->lock);
115			if (++rover > high)
116				rover = low;
117		} while (--remaining > 0);
118
119		/* Exhausted local port range during search?  It is not
120		 * possible for us to be holding one of the bind hash
121		 * locks if this test triggers, because if 'remaining'
122		 * drops to zero, we broke out of the do/while loop at
123		 * the top level, not from the 'break;' statement.
124		 */
125		ret = 1;
126		if (remaining <= 0)
127			goto fail;
128
129		/* OK, here is the one we will use.  HEAD is
130		 * non-NULL and we hold it's mutex.
131		 */
132		snum = rover;
133	} else {
134		head = &hashinfo->bhash[inet_bhashfn(net, snum,
135				hashinfo->bhash_size)];
136		spin_lock(&head->lock);
137		inet_bind_bucket_for_each(tb, node, &head->chain)
138			if (tb->ib_net == net && tb->port == snum)
139				goto tb_found;
140	}
141	tb = NULL;
142	goto tb_not_found;
143tb_found:
144	if (!hlist_empty(&tb->owners)) {
145		if (tb->fastreuse > 0 &&
146		    sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
147			goto success;
148		} else {
149			ret = 1;
150			if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb))
151				goto fail_unlock;
152		}
153	}
154tb_not_found:
155	ret = 1;
156	if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep,
157					net, head, snum)) == NULL)
158		goto fail_unlock;
159	if (hlist_empty(&tb->owners)) {
160		if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
161			tb->fastreuse = 1;
162		else
163			tb->fastreuse = 0;
164	} else if (tb->fastreuse &&
165		   (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
166		tb->fastreuse = 0;
167success:
168	if (!inet_csk(sk)->icsk_bind_hash)
169		inet_bind_hash(sk, tb, snum);
170	WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
171	ret = 0;
172
173fail_unlock:
174	spin_unlock(&head->lock);
175fail:
176	local_bh_enable();
177	return ret;
178}
179
180EXPORT_SYMBOL_GPL(inet_csk_get_port);
181
182/*
183 * Wait for an incoming connection, avoid race conditions. This must be called
184 * with the socket locked.
185 */
186static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
187{
188	struct inet_connection_sock *icsk = inet_csk(sk);
189	DEFINE_WAIT(wait);
190	int err;
191
192	/*
193	 * True wake-one mechanism for incoming connections: only
194	 * one process gets woken up, not the 'whole herd'.
195	 * Since we do not 'race & poll' for established sockets
196	 * anymore, the common case will execute the loop only once.
197	 *
198	 * Subtle issue: "add_wait_queue_exclusive()" will be added
199	 * after any current non-exclusive waiters, and we know that
200	 * it will always _stay_ after any new non-exclusive waiters
201	 * because all non-exclusive waiters are added at the
202	 * beginning of the wait-queue. As such, it's ok to "drop"
203	 * our exclusiveness temporarily when we get woken up without
204	 * having to remove and re-insert us on the wait queue.
205	 */
206	for (;;) {
207		prepare_to_wait_exclusive(sk->sk_sleep, &wait,
208					  TASK_INTERRUPTIBLE);
209		release_sock(sk);
210		if (reqsk_queue_empty(&icsk->icsk_accept_queue))
211			timeo = schedule_timeout(timeo);
212		lock_sock(sk);
213		err = 0;
214		if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
215			break;
216		err = -EINVAL;
217		if (sk->sk_state != TCP_LISTEN)
218			break;
219		err = sock_intr_errno(timeo);
220		if (signal_pending(current))
221			break;
222		err = -EAGAIN;
223		if (!timeo)
224			break;
225	}
226	finish_wait(sk->sk_sleep, &wait);
227	return err;
228}
229
230/*
231 * This will accept the next outstanding connection.
232 */
233struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
234{
235	struct inet_connection_sock *icsk = inet_csk(sk);
236	struct sock *newsk;
237	int error;
238
239	lock_sock(sk);
240
241	/* We need to make sure that this socket is listening,
242	 * and that it has something pending.
243	 */
244	error = -EINVAL;
245	if (sk->sk_state != TCP_LISTEN)
246		goto out_err;
247
248	/* Find already established connection */
249	if (reqsk_queue_empty(&icsk->icsk_accept_queue)) {
250		long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
251
252		/* If this is a non blocking socket don't sleep */
253		error = -EAGAIN;
254		if (!timeo)
255			goto out_err;
256
257		error = inet_csk_wait_for_connect(sk, timeo);
258		if (error)
259			goto out_err;
260	}
261
262	newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);
263	WARN_ON(newsk->sk_state == TCP_SYN_RECV);
264out:
265	release_sock(sk);
266	return newsk;
267out_err:
268	newsk = NULL;
269	*err = error;
270	goto out;
271}
272
273EXPORT_SYMBOL(inet_csk_accept);
274
275/*
276 * Using different timers for retransmit, delayed acks and probes
277 * We may wish use just one timer maintaining a list of expire jiffies
278 * to optimize.
279 */
280void inet_csk_init_xmit_timers(struct sock *sk,
281			       void (*retransmit_handler)(unsigned long),
282			       void (*delack_handler)(unsigned long),
283			       void (*keepalive_handler)(unsigned long))
284{
285	struct inet_connection_sock *icsk = inet_csk(sk);
286
287	setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler,
288			(unsigned long)sk);
289	setup_timer(&icsk->icsk_delack_timer, delack_handler,
290			(unsigned long)sk);
291	setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk);
292	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
293}
294
295EXPORT_SYMBOL(inet_csk_init_xmit_timers);
296
297void inet_csk_clear_xmit_timers(struct sock *sk)
298{
299	struct inet_connection_sock *icsk = inet_csk(sk);
300
301	icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
302
303	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
304	sk_stop_timer(sk, &icsk->icsk_delack_timer);
305	sk_stop_timer(sk, &sk->sk_timer);
306}
307
308EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
309
310void inet_csk_delete_keepalive_timer(struct sock *sk)
311{
312	sk_stop_timer(sk, &sk->sk_timer);
313}
314
315EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
316
317void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
318{
319	sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
320}
321
322EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
323
324struct dst_entry* inet_csk_route_req(struct sock *sk,
325				     const struct request_sock *req)
326{
327	struct rtable *rt;
328	const struct inet_request_sock *ireq = inet_rsk(req);
329	struct ip_options *opt = inet_rsk(req)->opt;
330	struct flowi fl = { .oif = sk->sk_bound_dev_if,
331			    .nl_u = { .ip4_u =
332				      { .daddr = ((opt && opt->srr) ?
333						  opt->faddr :
334						  ireq->rmt_addr),
335					.saddr = ireq->loc_addr,
336					.tos = RT_CONN_FLAGS(sk) } },
337			    .proto = sk->sk_protocol,
338			    .flags = inet_sk_flowi_flags(sk),
339			    .uli_u = { .ports =
340				       { .sport = inet_sk(sk)->sport,
341					 .dport = ireq->rmt_port } } };
342	struct net *net = sock_net(sk);
343
344	security_req_classify_flow(req, &fl);
345	if (ip_route_output_flow(net, &rt, &fl, sk, 0)) {
346		IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
347		return NULL;
348	}
349	if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
350		ip_rt_put(rt);
351		IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
352		return NULL;
353	}
354	return &rt->u.dst;
355}
356
357EXPORT_SYMBOL_GPL(inet_csk_route_req);
358
359static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
360				 const u32 rnd, const u32 synq_hsize)
361{
362	return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
363}
364
365#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
366#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
367#else
368#define AF_INET_FAMILY(fam) 1
369#endif
370
371struct request_sock *inet_csk_search_req(const struct sock *sk,
372					 struct request_sock ***prevp,
373					 const __be16 rport, const __be32 raddr,
374					 const __be32 laddr)
375{
376	const struct inet_connection_sock *icsk = inet_csk(sk);
377	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
378	struct request_sock *req, **prev;
379
380	for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd,
381						    lopt->nr_table_entries)];
382	     (req = *prev) != NULL;
383	     prev = &req->dl_next) {
384		const struct inet_request_sock *ireq = inet_rsk(req);
385
386		if (ireq->rmt_port == rport &&
387		    ireq->rmt_addr == raddr &&
388		    ireq->loc_addr == laddr &&
389		    AF_INET_FAMILY(req->rsk_ops->family)) {
390			WARN_ON(req->sk);
391			*prevp = prev;
392			break;
393		}
394	}
395
396	return req;
397}
398
399EXPORT_SYMBOL_GPL(inet_csk_search_req);
400
401void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
402				   unsigned long timeout)
403{
404	struct inet_connection_sock *icsk = inet_csk(sk);
405	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
406	const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
407				     lopt->hash_rnd, lopt->nr_table_entries);
408
409	reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
410	inet_csk_reqsk_queue_added(sk, timeout);
411}
412
413/* Only thing we need from tcp.h */
414extern int sysctl_tcp_synack_retries;
415
416EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
417
418void inet_csk_reqsk_queue_prune(struct sock *parent,
419				const unsigned long interval,
420				const unsigned long timeout,
421				const unsigned long max_rto)
422{
423	struct inet_connection_sock *icsk = inet_csk(parent);
424	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
425	struct listen_sock *lopt = queue->listen_opt;
426	int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
427	int thresh = max_retries;
428	unsigned long now = jiffies;
429	struct request_sock **reqp, *req;
430	int i, budget;
431
432	if (lopt == NULL || lopt->qlen == 0)
433		return;
434
435	/* Normally all the openreqs are young and become mature
436	 * (i.e. converted to established socket) for first timeout.
437	 * If synack was not acknowledged for 3 seconds, it means
438	 * one of the following things: synack was lost, ack was lost,
439	 * rtt is high or nobody planned to ack (i.e. synflood).
440	 * When server is a bit loaded, queue is populated with old
441	 * open requests, reducing effective size of queue.
442	 * When server is well loaded, queue size reduces to zero
443	 * after several minutes of work. It is not synflood,
444	 * it is normal operation. The solution is pruning
445	 * too old entries overriding normal timeout, when
446	 * situation becomes dangerous.
447	 *
448	 * Essentially, we reserve half of room for young
449	 * embrions; and abort old ones without pity, if old
450	 * ones are about to clog our table.
451	 */
452	if (lopt->qlen>>(lopt->max_qlen_log-1)) {
453		int young = (lopt->qlen_young<<1);
454
455		while (thresh > 2) {
456			if (lopt->qlen < young)
457				break;
458			thresh--;
459			young <<= 1;
460		}
461	}
462
463	if (queue->rskq_defer_accept)
464		max_retries = queue->rskq_defer_accept;
465
466	budget = 2 * (lopt->nr_table_entries / (timeout / interval));
467	i = lopt->clock_hand;
468
469	do {
470		reqp=&lopt->syn_table[i];
471		while ((req = *reqp) != NULL) {
472			if (time_after_eq(now, req->expires)) {
473				if ((req->retrans < thresh ||
474				     (inet_rsk(req)->acked && req->retrans < max_retries))
475				    && !req->rsk_ops->rtx_syn_ack(parent, req)) {
476					unsigned long timeo;
477
478					if (req->retrans++ == 0)
479						lopt->qlen_young--;
480					timeo = min((timeout << req->retrans), max_rto);
481					req->expires = now + timeo;
482					reqp = &req->dl_next;
483					continue;
484				}
485
486				/* Drop this request */
487				inet_csk_reqsk_queue_unlink(parent, req, reqp);
488				reqsk_queue_removed(queue, req);
489				reqsk_free(req);
490				continue;
491			}
492			reqp = &req->dl_next;
493		}
494
495		i = (i + 1) & (lopt->nr_table_entries - 1);
496
497	} while (--budget > 0);
498
499	lopt->clock_hand = i;
500
501	if (lopt->qlen)
502		inet_csk_reset_keepalive_timer(parent, interval);
503}
504
505EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
506
507struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
508			    const gfp_t priority)
509{
510	struct sock *newsk = sk_clone(sk, priority);
511
512	if (newsk != NULL) {
513		struct inet_connection_sock *newicsk = inet_csk(newsk);
514
515		newsk->sk_state = TCP_SYN_RECV;
516		newicsk->icsk_bind_hash = NULL;
517
518		inet_sk(newsk)->dport = inet_rsk(req)->rmt_port;
519		newsk->sk_write_space = sk_stream_write_space;
520
521		newicsk->icsk_retransmits = 0;
522		newicsk->icsk_backoff	  = 0;
523		newicsk->icsk_probes_out  = 0;
524
525		/* Deinitialize accept_queue to trap illegal accesses. */
526		memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
527
528		security_inet_csk_clone(newsk, req);
529	}
530	return newsk;
531}
532
533EXPORT_SYMBOL_GPL(inet_csk_clone);
534
535/*
536 * At this point, there should be no process reference to this
537 * socket, and thus no user references at all.  Therefore we
538 * can assume the socket waitqueue is inactive and nobody will
539 * try to jump onto it.
540 */
541void inet_csk_destroy_sock(struct sock *sk)
542{
543	WARN_ON(sk->sk_state != TCP_CLOSE);
544	WARN_ON(!sock_flag(sk, SOCK_DEAD));
545
546	/* It cannot be in hash table! */
547	WARN_ON(!sk_unhashed(sk));
548
549	/* If it has not 0 inet_sk(sk)->num, it must be bound */
550	WARN_ON(inet_sk(sk)->num && !inet_csk(sk)->icsk_bind_hash);
551
552	sk->sk_prot->destroy(sk);
553
554	sk_stream_kill_queues(sk);
555
556	xfrm_sk_free_policy(sk);
557
558	sk_refcnt_debug_release(sk);
559
560	atomic_dec(sk->sk_prot->orphan_count);
561	sock_put(sk);
562}
563
564EXPORT_SYMBOL(inet_csk_destroy_sock);
565
566int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
567{
568	struct inet_sock *inet = inet_sk(sk);
569	struct inet_connection_sock *icsk = inet_csk(sk);
570	int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
571
572	if (rc != 0)
573		return rc;
574
575	sk->sk_max_ack_backlog = 0;
576	sk->sk_ack_backlog = 0;
577	inet_csk_delack_init(sk);
578
579	/* There is race window here: we announce ourselves listening,
580	 * but this transition is still not validated by get_port().
581	 * It is OK, because this socket enters to hash table only
582	 * after validation is complete.
583	 */
584	sk->sk_state = TCP_LISTEN;
585	if (!sk->sk_prot->get_port(sk, inet->num)) {
586		inet->sport = htons(inet->num);
587
588		sk_dst_reset(sk);
589		sk->sk_prot->hash(sk);
590
591		return 0;
592	}
593
594	sk->sk_state = TCP_CLOSE;
595	__reqsk_queue_destroy(&icsk->icsk_accept_queue);
596	return -EADDRINUSE;
597}
598
599EXPORT_SYMBOL_GPL(inet_csk_listen_start);
600
601/*
602 *	This routine closes sockets which have been at least partially
603 *	opened, but not yet accepted.
604 */
605void inet_csk_listen_stop(struct sock *sk)
606{
607	struct inet_connection_sock *icsk = inet_csk(sk);
608	struct request_sock *acc_req;
609	struct request_sock *req;
610
611	inet_csk_delete_keepalive_timer(sk);
612
613	/* make all the listen_opt local to us */
614	acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue);
615
616	/* Following specs, it would be better either to send FIN
617	 * (and enter FIN-WAIT-1, it is normal close)
618	 * or to send active reset (abort).
619	 * Certainly, it is pretty dangerous while synflood, but it is
620	 * bad justification for our negligence 8)
621	 * To be honest, we are not able to make either
622	 * of the variants now.			--ANK
623	 */
624	reqsk_queue_destroy(&icsk->icsk_accept_queue);
625
626	while ((req = acc_req) != NULL) {
627		struct sock *child = req->sk;
628
629		acc_req = req->dl_next;
630
631		local_bh_disable();
632		bh_lock_sock(child);
633		WARN_ON(sock_owned_by_user(child));
634		sock_hold(child);
635
636		sk->sk_prot->disconnect(child, O_NONBLOCK);
637
638		sock_orphan(child);
639
640		atomic_inc(sk->sk_prot->orphan_count);
641
642		inet_csk_destroy_sock(child);
643
644		bh_unlock_sock(child);
645		local_bh_enable();
646		sock_put(child);
647
648		sk_acceptq_removed(sk);
649		__reqsk_free(req);
650	}
651	WARN_ON(sk->sk_ack_backlog);
652}
653
654EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
655
656void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
657{
658	struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
659	const struct inet_sock *inet = inet_sk(sk);
660
661	sin->sin_family		= AF_INET;
662	sin->sin_addr.s_addr	= inet->daddr;
663	sin->sin_port		= inet->dport;
664}
665
666EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
667
668#ifdef CONFIG_COMPAT
669int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
670			       char __user *optval, int __user *optlen)
671{
672	const struct inet_connection_sock *icsk = inet_csk(sk);
673
674	if (icsk->icsk_af_ops->compat_getsockopt != NULL)
675		return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
676							    optval, optlen);
677	return icsk->icsk_af_ops->getsockopt(sk, level, optname,
678					     optval, optlen);
679}
680
681EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
682
683int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
684			       char __user *optval, int optlen)
685{
686	const struct inet_connection_sock *icsk = inet_csk(sk);
687
688	if (icsk->icsk_af_ops->compat_setsockopt != NULL)
689		return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
690							    optval, optlen);
691	return icsk->icsk_af_ops->setsockopt(sk, level, optname,
692					     optval, optlen);
693}
694
695EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
696#endif
697