sock.c revision 1144182a8757f2a1f909f0c592898aaaf80884fc
1/*
2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
3 *		operating system.  INET is implemented using the  BSD Socket
4 *		interface as the means of communication with the user level.
5 *
6 *		Generic socket support routines. Memory allocators, socket lock/release
7 *		handler for protocols to use and generic option handler.
8 *
9 *
10 * Authors:	Ross Biro
11 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *		Florian La Roche, <flla@stud.uni-sb.de>
13 *		Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 *		Alan Cox	: 	Numerous verify_area() problems
17 *		Alan Cox	:	Connecting on a connecting socket
18 *					now returns an error for tcp.
19 *		Alan Cox	:	sock->protocol is set correctly.
20 *					and is not sometimes left as 0.
21 *		Alan Cox	:	connect handles icmp errors on a
22 *					connect properly. Unfortunately there
23 *					is a restart syscall nasty there. I
24 *					can't match BSD without hacking the C
25 *					library. Ideas urgently sought!
26 *		Alan Cox	:	Disallow bind() to addresses that are
27 *					not ours - especially broadcast ones!!
28 *		Alan Cox	:	Socket 1024 _IS_ ok for users. (fencepost)
29 *		Alan Cox	:	sock_wfree/sock_rfree don't destroy sockets,
30 *					instead they leave that for the DESTROY timer.
31 *		Alan Cox	:	Clean up error flag in accept
32 *		Alan Cox	:	TCP ack handling is buggy, the DESTROY timer
33 *					was buggy. Put a remove_sock() in the handler
34 *					for memory when we hit 0. Also altered the timer
35 *					code. The ACK stuff can wait and needs major
36 *					TCP layer surgery.
37 *		Alan Cox	:	Fixed TCP ack bug, removed remove sock
38 *					and fixed timer/inet_bh race.
39 *		Alan Cox	:	Added zapped flag for TCP
40 *		Alan Cox	:	Move kfree_skb into skbuff.c and tidied up surplus code
41 *		Alan Cox	:	for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 *		Alan Cox	:	kfree_s calls now are kfree_skbmem so we can track skb resources
43 *		Alan Cox	:	Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 *		Alan Cox	:	Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 *		Rick Sladkey	:	Relaxed UDP rules for matching packets.
46 *		C.E.Hawkins	:	IFF_PROMISC/SIOCGHWADDR support
47 *	Pauline Middelink	:	identd support
48 *		Alan Cox	:	Fixed connect() taking signals I think.
49 *		Alan Cox	:	SO_LINGER supported
50 *		Alan Cox	:	Error reporting fixes
51 *		Anonymous	:	inet_create tidied up (sk->reuse setting)
52 *		Alan Cox	:	inet sockets don't set sk->type!
53 *		Alan Cox	:	Split socket option code
54 *		Alan Cox	:	Callbacks
55 *		Alan Cox	:	Nagle flag for Charles & Johannes stuff
56 *		Alex		:	Removed restriction on inet fioctl
57 *		Alan Cox	:	Splitting INET from NET core
58 *		Alan Cox	:	Fixed bogus SO_TYPE handling in getsockopt()
59 *		Adam Caldwell	:	Missing return in SO_DONTROUTE/SO_DEBUG code
60 *		Alan Cox	:	Split IP from generic code
61 *		Alan Cox	:	New kfree_skbmem()
62 *		Alan Cox	:	Make SO_DEBUG superuser only.
63 *		Alan Cox	:	Allow anyone to clear SO_DEBUG
64 *					(compatibility fix)
65 *		Alan Cox	:	Added optimistic memory grabbing for AF_UNIX throughput.
66 *		Alan Cox	:	Allocator for a socket is settable.
67 *		Alan Cox	:	SO_ERROR includes soft errors.
68 *		Alan Cox	:	Allow NULL arguments on some SO_ opts
69 *		Alan Cox	: 	Generic socket allocation to make hooks
70 *					easier (suggested by Craig Metz).
71 *		Michael Pall	:	SO_ERROR returns positive errno again
72 *              Steve Whitehouse:       Added default destructor to free
73 *                                      protocol private data.
74 *              Steve Whitehouse:       Added various other default routines
75 *                                      common to several socket families.
76 *              Chris Evans     :       Call suser() check last on F_SETOWN
77 *		Jay Schulist	:	Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 *		Andi Kleen	:	Add sock_kmalloc()/sock_kfree_s()
79 *		Andi Kleen	:	Fix write_space callback
80 *		Chris Evans	:	Security fixes - signedness again
81 *		Arnaldo C. Melo :       cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 *		This program is free software; you can redistribute it and/or
87 *		modify it under the terms of the GNU General Public License
88 *		as published by the Free Software Foundation; either version
89 *		2 of the License, or (at your option) any later version.
90 */
91
92#include <linux/capability.h>
93#include <linux/errno.h>
94#include <linux/types.h>
95#include <linux/socket.h>
96#include <linux/in.h>
97#include <linux/kernel.h>
98#include <linux/module.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/sched.h>
102#include <linux/timer.h>
103#include <linux/string.h>
104#include <linux/sockios.h>
105#include <linux/net.h>
106#include <linux/mm.h>
107#include <linux/slab.h>
108#include <linux/interrupt.h>
109#include <linux/poll.h>
110#include <linux/tcp.h>
111#include <linux/init.h>
112#include <linux/highmem.h>
113#include <linux/user_namespace.h>
114
115#include <asm/uaccess.h>
116#include <asm/system.h>
117
118#include <linux/netdevice.h>
119#include <net/protocol.h>
120#include <linux/skbuff.h>
121#include <net/net_namespace.h>
122#include <net/request_sock.h>
123#include <net/sock.h>
124#include <linux/net_tstamp.h>
125#include <net/xfrm.h>
126#include <linux/ipsec.h>
127#include <net/cls_cgroup.h>
128
129#include <linux/filter.h>
130
131#ifdef CONFIG_INET
132#include <net/tcp.h>
133#endif
134
135/*
136 * Each address family might have different locking rules, so we have
137 * one slock key per address family:
138 */
139static struct lock_class_key af_family_keys[AF_MAX];
140static struct lock_class_key af_family_slock_keys[AF_MAX];
141
142/*
143 * Make lock validator output more readable. (we pre-construct these
144 * strings build-time, so that runtime initialization of socket
145 * locks is fast):
146 */
147static const char *const af_family_key_strings[AF_MAX+1] = {
148  "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX"     , "sk_lock-AF_INET"     ,
149  "sk_lock-AF_AX25"  , "sk_lock-AF_IPX"      , "sk_lock-AF_APPLETALK",
150  "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE"   , "sk_lock-AF_ATMPVC"   ,
151  "sk_lock-AF_X25"   , "sk_lock-AF_INET6"    , "sk_lock-AF_ROSE"     ,
152  "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI"  , "sk_lock-AF_SECURITY" ,
153  "sk_lock-AF_KEY"   , "sk_lock-AF_NETLINK"  , "sk_lock-AF_PACKET"   ,
154  "sk_lock-AF_ASH"   , "sk_lock-AF_ECONET"   , "sk_lock-AF_ATMSVC"   ,
155  "sk_lock-AF_RDS"   , "sk_lock-AF_SNA"      , "sk_lock-AF_IRDA"     ,
156  "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE"  , "sk_lock-AF_LLC"      ,
157  "sk_lock-27"       , "sk_lock-28"          , "sk_lock-AF_CAN"      ,
158  "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
159  "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
160  "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" ,
161  "sk_lock-AF_MAX"
162};
163static const char *const af_family_slock_key_strings[AF_MAX+1] = {
164  "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
165  "slock-AF_AX25"  , "slock-AF_IPX"      , "slock-AF_APPLETALK",
166  "slock-AF_NETROM", "slock-AF_BRIDGE"   , "slock-AF_ATMPVC"   ,
167  "slock-AF_X25"   , "slock-AF_INET6"    , "slock-AF_ROSE"     ,
168  "slock-AF_DECnet", "slock-AF_NETBEUI"  , "slock-AF_SECURITY" ,
169  "slock-AF_KEY"   , "slock-AF_NETLINK"  , "slock-AF_PACKET"   ,
170  "slock-AF_ASH"   , "slock-AF_ECONET"   , "slock-AF_ATMSVC"   ,
171  "slock-AF_RDS"   , "slock-AF_SNA"      , "slock-AF_IRDA"     ,
172  "slock-AF_PPPOX" , "slock-AF_WANPIPE"  , "slock-AF_LLC"      ,
173  "slock-27"       , "slock-28"          , "slock-AF_CAN"      ,
174  "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
175  "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
176  "slock-AF_IEEE802154", "slock-AF_CAIF" ,
177  "slock-AF_MAX"
178};
179static const char *const af_family_clock_key_strings[AF_MAX+1] = {
180  "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
181  "clock-AF_AX25"  , "clock-AF_IPX"      , "clock-AF_APPLETALK",
182  "clock-AF_NETROM", "clock-AF_BRIDGE"   , "clock-AF_ATMPVC"   ,
183  "clock-AF_X25"   , "clock-AF_INET6"    , "clock-AF_ROSE"     ,
184  "clock-AF_DECnet", "clock-AF_NETBEUI"  , "clock-AF_SECURITY" ,
185  "clock-AF_KEY"   , "clock-AF_NETLINK"  , "clock-AF_PACKET"   ,
186  "clock-AF_ASH"   , "clock-AF_ECONET"   , "clock-AF_ATMSVC"   ,
187  "clock-AF_RDS"   , "clock-AF_SNA"      , "clock-AF_IRDA"     ,
188  "clock-AF_PPPOX" , "clock-AF_WANPIPE"  , "clock-AF_LLC"      ,
189  "clock-27"       , "clock-28"          , "clock-AF_CAN"      ,
190  "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
191  "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
192  "clock-AF_IEEE802154", "clock-AF_CAIF" ,
193  "clock-AF_MAX"
194};
195
196/*
197 * sk_callback_lock locking rules are per-address-family,
198 * so split the lock classes by using a per-AF key:
199 */
200static struct lock_class_key af_callback_keys[AF_MAX];
201
202/* Take into consideration the size of the struct sk_buff overhead in the
203 * determination of these values, since that is non-constant across
204 * platforms.  This makes socket queueing behavior and performance
205 * not depend upon such differences.
206 */
207#define _SK_MEM_PACKETS		256
208#define _SK_MEM_OVERHEAD	(sizeof(struct sk_buff) + 256)
209#define SK_WMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
210#define SK_RMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
211
212/* Run time adjustable parameters. */
213__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
214__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
215__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
216__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
217
218/* Maximal space eaten by iovec or ancilliary data plus some space */
219int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
220EXPORT_SYMBOL(sysctl_optmem_max);
221
222#if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP)
223int net_cls_subsys_id = -1;
224EXPORT_SYMBOL_GPL(net_cls_subsys_id);
225#endif
226
227static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
228{
229	struct timeval tv;
230
231	if (optlen < sizeof(tv))
232		return -EINVAL;
233	if (copy_from_user(&tv, optval, sizeof(tv)))
234		return -EFAULT;
235	if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
236		return -EDOM;
237
238	if (tv.tv_sec < 0) {
239		static int warned __read_mostly;
240
241		*timeo_p = 0;
242		if (warned < 10 && net_ratelimit()) {
243			warned++;
244			printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
245			       "tries to set negative timeout\n",
246				current->comm, task_pid_nr(current));
247		}
248		return 0;
249	}
250	*timeo_p = MAX_SCHEDULE_TIMEOUT;
251	if (tv.tv_sec == 0 && tv.tv_usec == 0)
252		return 0;
253	if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
254		*timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
255	return 0;
256}
257
258static void sock_warn_obsolete_bsdism(const char *name)
259{
260	static int warned;
261	static char warncomm[TASK_COMM_LEN];
262	if (strcmp(warncomm, current->comm) && warned < 5) {
263		strcpy(warncomm,  current->comm);
264		printk(KERN_WARNING "process `%s' is using obsolete "
265		       "%s SO_BSDCOMPAT\n", warncomm, name);
266		warned++;
267	}
268}
269
270static void sock_disable_timestamp(struct sock *sk, int flag)
271{
272	if (sock_flag(sk, flag)) {
273		sock_reset_flag(sk, flag);
274		if (!sock_flag(sk, SOCK_TIMESTAMP) &&
275		    !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) {
276			net_disable_timestamp();
277		}
278	}
279}
280
281
282int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
283{
284	int err;
285	int skb_len;
286	unsigned long flags;
287	struct sk_buff_head *list = &sk->sk_receive_queue;
288
289	/* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
290	   number of warnings when compiling with -W --ANK
291	 */
292	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
293	    (unsigned)sk->sk_rcvbuf) {
294		atomic_inc(&sk->sk_drops);
295		return -ENOMEM;
296	}
297
298	err = sk_filter(sk, skb);
299	if (err)
300		return err;
301
302	if (!sk_rmem_schedule(sk, skb->truesize)) {
303		atomic_inc(&sk->sk_drops);
304		return -ENOBUFS;
305	}
306
307	skb->dev = NULL;
308	skb_set_owner_r(skb, sk);
309
310	/* Cache the SKB length before we tack it onto the receive
311	 * queue.  Once it is added it no longer belongs to us and
312	 * may be freed by other threads of control pulling packets
313	 * from the queue.
314	 */
315	skb_len = skb->len;
316
317	/* we escape from rcu protected region, make sure we dont leak
318	 * a norefcounted dst
319	 */
320	skb_dst_force(skb);
321
322	spin_lock_irqsave(&list->lock, flags);
323	skb->dropcount = atomic_read(&sk->sk_drops);
324	__skb_queue_tail(list, skb);
325	spin_unlock_irqrestore(&list->lock, flags);
326
327	if (!sock_flag(sk, SOCK_DEAD))
328		sk->sk_data_ready(sk, skb_len);
329	return 0;
330}
331EXPORT_SYMBOL(sock_queue_rcv_skb);
332
333int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
334{
335	int rc = NET_RX_SUCCESS;
336
337	if (sk_filter(sk, skb))
338		goto discard_and_relse;
339
340	skb->dev = NULL;
341
342	if (sk_rcvqueues_full(sk, skb)) {
343		atomic_inc(&sk->sk_drops);
344		goto discard_and_relse;
345	}
346	if (nested)
347		bh_lock_sock_nested(sk);
348	else
349		bh_lock_sock(sk);
350	if (!sock_owned_by_user(sk)) {
351		/*
352		 * trylock + unlock semantics:
353		 */
354		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
355
356		rc = sk_backlog_rcv(sk, skb);
357
358		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
359	} else if (sk_add_backlog(sk, skb)) {
360		bh_unlock_sock(sk);
361		atomic_inc(&sk->sk_drops);
362		goto discard_and_relse;
363	}
364
365	bh_unlock_sock(sk);
366out:
367	sock_put(sk);
368	return rc;
369discard_and_relse:
370	kfree_skb(skb);
371	goto out;
372}
373EXPORT_SYMBOL(sk_receive_skb);
374
375void sk_reset_txq(struct sock *sk)
376{
377	sk_tx_queue_clear(sk);
378}
379EXPORT_SYMBOL(sk_reset_txq);
380
381struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
382{
383	struct dst_entry *dst = __sk_dst_get(sk);
384
385	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
386		sk_tx_queue_clear(sk);
387		rcu_assign_pointer(sk->sk_dst_cache, NULL);
388		dst_release(dst);
389		return NULL;
390	}
391
392	return dst;
393}
394EXPORT_SYMBOL(__sk_dst_check);
395
396struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
397{
398	struct dst_entry *dst = sk_dst_get(sk);
399
400	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
401		sk_dst_reset(sk);
402		dst_release(dst);
403		return NULL;
404	}
405
406	return dst;
407}
408EXPORT_SYMBOL(sk_dst_check);
409
410static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
411{
412	int ret = -ENOPROTOOPT;
413#ifdef CONFIG_NETDEVICES
414	struct net *net = sock_net(sk);
415	char devname[IFNAMSIZ];
416	int index;
417
418	/* Sorry... */
419	ret = -EPERM;
420	if (!capable(CAP_NET_RAW))
421		goto out;
422
423	ret = -EINVAL;
424	if (optlen < 0)
425		goto out;
426
427	/* Bind this socket to a particular device like "eth0",
428	 * as specified in the passed interface name. If the
429	 * name is "" or the option length is zero the socket
430	 * is not bound.
431	 */
432	if (optlen > IFNAMSIZ - 1)
433		optlen = IFNAMSIZ - 1;
434	memset(devname, 0, sizeof(devname));
435
436	ret = -EFAULT;
437	if (copy_from_user(devname, optval, optlen))
438		goto out;
439
440	index = 0;
441	if (devname[0] != '\0') {
442		struct net_device *dev;
443
444		rcu_read_lock();
445		dev = dev_get_by_name_rcu(net, devname);
446		if (dev)
447			index = dev->ifindex;
448		rcu_read_unlock();
449		ret = -ENODEV;
450		if (!dev)
451			goto out;
452	}
453
454	lock_sock(sk);
455	sk->sk_bound_dev_if = index;
456	sk_dst_reset(sk);
457	release_sock(sk);
458
459	ret = 0;
460
461out:
462#endif
463
464	return ret;
465}
466
467static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
468{
469	if (valbool)
470		sock_set_flag(sk, bit);
471	else
472		sock_reset_flag(sk, bit);
473}
474
475/*
476 *	This is meant for all protocols to use and covers goings on
477 *	at the socket level. Everything here is generic.
478 */
479
480int sock_setsockopt(struct socket *sock, int level, int optname,
481		    char __user *optval, unsigned int optlen)
482{
483	struct sock *sk = sock->sk;
484	int val;
485	int valbool;
486	struct linger ling;
487	int ret = 0;
488
489	/*
490	 *	Options without arguments
491	 */
492
493	if (optname == SO_BINDTODEVICE)
494		return sock_bindtodevice(sk, optval, optlen);
495
496	if (optlen < sizeof(int))
497		return -EINVAL;
498
499	if (get_user(val, (int __user *)optval))
500		return -EFAULT;
501
502	valbool = val ? 1 : 0;
503
504	lock_sock(sk);
505
506	switch (optname) {
507	case SO_DEBUG:
508		if (val && !capable(CAP_NET_ADMIN))
509			ret = -EACCES;
510		else
511			sock_valbool_flag(sk, SOCK_DBG, valbool);
512		break;
513	case SO_REUSEADDR:
514		sk->sk_reuse = valbool;
515		break;
516	case SO_TYPE:
517	case SO_PROTOCOL:
518	case SO_DOMAIN:
519	case SO_ERROR:
520		ret = -ENOPROTOOPT;
521		break;
522	case SO_DONTROUTE:
523		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
524		break;
525	case SO_BROADCAST:
526		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
527		break;
528	case SO_SNDBUF:
529		/* Don't error on this BSD doesn't and if you think
530		   about it this is right. Otherwise apps have to
531		   play 'guess the biggest size' games. RCVBUF/SNDBUF
532		   are treated in BSD as hints */
533
534		if (val > sysctl_wmem_max)
535			val = sysctl_wmem_max;
536set_sndbuf:
537		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
538		if ((val * 2) < SOCK_MIN_SNDBUF)
539			sk->sk_sndbuf = SOCK_MIN_SNDBUF;
540		else
541			sk->sk_sndbuf = val * 2;
542
543		/*
544		 *	Wake up sending tasks if we
545		 *	upped the value.
546		 */
547		sk->sk_write_space(sk);
548		break;
549
550	case SO_SNDBUFFORCE:
551		if (!capable(CAP_NET_ADMIN)) {
552			ret = -EPERM;
553			break;
554		}
555		goto set_sndbuf;
556
557	case SO_RCVBUF:
558		/* Don't error on this BSD doesn't and if you think
559		   about it this is right. Otherwise apps have to
560		   play 'guess the biggest size' games. RCVBUF/SNDBUF
561		   are treated in BSD as hints */
562
563		if (val > sysctl_rmem_max)
564			val = sysctl_rmem_max;
565set_rcvbuf:
566		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
567		/*
568		 * We double it on the way in to account for
569		 * "struct sk_buff" etc. overhead.   Applications
570		 * assume that the SO_RCVBUF setting they make will
571		 * allow that much actual data to be received on that
572		 * socket.
573		 *
574		 * Applications are unaware that "struct sk_buff" and
575		 * other overheads allocate from the receive buffer
576		 * during socket buffer allocation.
577		 *
578		 * And after considering the possible alternatives,
579		 * returning the value we actually used in getsockopt
580		 * is the most desirable behavior.
581		 */
582		if ((val * 2) < SOCK_MIN_RCVBUF)
583			sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
584		else
585			sk->sk_rcvbuf = val * 2;
586		break;
587
588	case SO_RCVBUFFORCE:
589		if (!capable(CAP_NET_ADMIN)) {
590			ret = -EPERM;
591			break;
592		}
593		goto set_rcvbuf;
594
595	case SO_KEEPALIVE:
596#ifdef CONFIG_INET
597		if (sk->sk_protocol == IPPROTO_TCP)
598			tcp_set_keepalive(sk, valbool);
599#endif
600		sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
601		break;
602
603	case SO_OOBINLINE:
604		sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
605		break;
606
607	case SO_NO_CHECK:
608		sk->sk_no_check = valbool;
609		break;
610
611	case SO_PRIORITY:
612		if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
613			sk->sk_priority = val;
614		else
615			ret = -EPERM;
616		break;
617
618	case SO_LINGER:
619		if (optlen < sizeof(ling)) {
620			ret = -EINVAL;	/* 1003.1g */
621			break;
622		}
623		if (copy_from_user(&ling, optval, sizeof(ling))) {
624			ret = -EFAULT;
625			break;
626		}
627		if (!ling.l_onoff)
628			sock_reset_flag(sk, SOCK_LINGER);
629		else {
630#if (BITS_PER_LONG == 32)
631			if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
632				sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
633			else
634#endif
635				sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
636			sock_set_flag(sk, SOCK_LINGER);
637		}
638		break;
639
640	case SO_BSDCOMPAT:
641		sock_warn_obsolete_bsdism("setsockopt");
642		break;
643
644	case SO_PASSCRED:
645		if (valbool)
646			set_bit(SOCK_PASSCRED, &sock->flags);
647		else
648			clear_bit(SOCK_PASSCRED, &sock->flags);
649		break;
650
651	case SO_TIMESTAMP:
652	case SO_TIMESTAMPNS:
653		if (valbool)  {
654			if (optname == SO_TIMESTAMP)
655				sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
656			else
657				sock_set_flag(sk, SOCK_RCVTSTAMPNS);
658			sock_set_flag(sk, SOCK_RCVTSTAMP);
659			sock_enable_timestamp(sk, SOCK_TIMESTAMP);
660		} else {
661			sock_reset_flag(sk, SOCK_RCVTSTAMP);
662			sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
663		}
664		break;
665
666	case SO_TIMESTAMPING:
667		if (val & ~SOF_TIMESTAMPING_MASK) {
668			ret = -EINVAL;
669			break;
670		}
671		sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
672				  val & SOF_TIMESTAMPING_TX_HARDWARE);
673		sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
674				  val & SOF_TIMESTAMPING_TX_SOFTWARE);
675		sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
676				  val & SOF_TIMESTAMPING_RX_HARDWARE);
677		if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
678			sock_enable_timestamp(sk,
679					      SOCK_TIMESTAMPING_RX_SOFTWARE);
680		else
681			sock_disable_timestamp(sk,
682					       SOCK_TIMESTAMPING_RX_SOFTWARE);
683		sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
684				  val & SOF_TIMESTAMPING_SOFTWARE);
685		sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
686				  val & SOF_TIMESTAMPING_SYS_HARDWARE);
687		sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
688				  val & SOF_TIMESTAMPING_RAW_HARDWARE);
689		break;
690
691	case SO_RCVLOWAT:
692		if (val < 0)
693			val = INT_MAX;
694		sk->sk_rcvlowat = val ? : 1;
695		break;
696
697	case SO_RCVTIMEO:
698		ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
699		break;
700
701	case SO_SNDTIMEO:
702		ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
703		break;
704
705	case SO_ATTACH_FILTER:
706		ret = -EINVAL;
707		if (optlen == sizeof(struct sock_fprog)) {
708			struct sock_fprog fprog;
709
710			ret = -EFAULT;
711			if (copy_from_user(&fprog, optval, sizeof(fprog)))
712				break;
713
714			ret = sk_attach_filter(&fprog, sk);
715		}
716		break;
717
718	case SO_DETACH_FILTER:
719		ret = sk_detach_filter(sk);
720		break;
721
722	case SO_PASSSEC:
723		if (valbool)
724			set_bit(SOCK_PASSSEC, &sock->flags);
725		else
726			clear_bit(SOCK_PASSSEC, &sock->flags);
727		break;
728	case SO_MARK:
729		if (!capable(CAP_NET_ADMIN))
730			ret = -EPERM;
731		else
732			sk->sk_mark = val;
733		break;
734
735		/* We implement the SO_SNDLOWAT etc to
736		   not be settable (1003.1g 5.3) */
737	case SO_RXQ_OVFL:
738		if (valbool)
739			sock_set_flag(sk, SOCK_RXQ_OVFL);
740		else
741			sock_reset_flag(sk, SOCK_RXQ_OVFL);
742		break;
743	default:
744		ret = -ENOPROTOOPT;
745		break;
746	}
747	release_sock(sk);
748	return ret;
749}
750EXPORT_SYMBOL(sock_setsockopt);
751
752
753void cred_to_ucred(struct pid *pid, const struct cred *cred,
754		   struct ucred *ucred)
755{
756	ucred->pid = pid_vnr(pid);
757	ucred->uid = ucred->gid = -1;
758	if (cred) {
759		struct user_namespace *current_ns = current_user_ns();
760
761		ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid);
762		ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid);
763	}
764}
765EXPORT_SYMBOL_GPL(cred_to_ucred);
766
767int sock_getsockopt(struct socket *sock, int level, int optname,
768		    char __user *optval, int __user *optlen)
769{
770	struct sock *sk = sock->sk;
771
772	union {
773		int val;
774		struct linger ling;
775		struct timeval tm;
776	} v;
777
778	int lv = sizeof(int);
779	int len;
780
781	if (get_user(len, optlen))
782		return -EFAULT;
783	if (len < 0)
784		return -EINVAL;
785
786	memset(&v, 0, sizeof(v));
787
788	switch (optname) {
789	case SO_DEBUG:
790		v.val = sock_flag(sk, SOCK_DBG);
791		break;
792
793	case SO_DONTROUTE:
794		v.val = sock_flag(sk, SOCK_LOCALROUTE);
795		break;
796
797	case SO_BROADCAST:
798		v.val = !!sock_flag(sk, SOCK_BROADCAST);
799		break;
800
801	case SO_SNDBUF:
802		v.val = sk->sk_sndbuf;
803		break;
804
805	case SO_RCVBUF:
806		v.val = sk->sk_rcvbuf;
807		break;
808
809	case SO_REUSEADDR:
810		v.val = sk->sk_reuse;
811		break;
812
813	case SO_KEEPALIVE:
814		v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
815		break;
816
817	case SO_TYPE:
818		v.val = sk->sk_type;
819		break;
820
821	case SO_PROTOCOL:
822		v.val = sk->sk_protocol;
823		break;
824
825	case SO_DOMAIN:
826		v.val = sk->sk_family;
827		break;
828
829	case SO_ERROR:
830		v.val = -sock_error(sk);
831		if (v.val == 0)
832			v.val = xchg(&sk->sk_err_soft, 0);
833		break;
834
835	case SO_OOBINLINE:
836		v.val = !!sock_flag(sk, SOCK_URGINLINE);
837		break;
838
839	case SO_NO_CHECK:
840		v.val = sk->sk_no_check;
841		break;
842
843	case SO_PRIORITY:
844		v.val = sk->sk_priority;
845		break;
846
847	case SO_LINGER:
848		lv		= sizeof(v.ling);
849		v.ling.l_onoff	= !!sock_flag(sk, SOCK_LINGER);
850		v.ling.l_linger	= sk->sk_lingertime / HZ;
851		break;
852
853	case SO_BSDCOMPAT:
854		sock_warn_obsolete_bsdism("getsockopt");
855		break;
856
857	case SO_TIMESTAMP:
858		v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
859				!sock_flag(sk, SOCK_RCVTSTAMPNS);
860		break;
861
862	case SO_TIMESTAMPNS:
863		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
864		break;
865
866	case SO_TIMESTAMPING:
867		v.val = 0;
868		if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
869			v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
870		if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
871			v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
872		if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
873			v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
874		if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
875			v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
876		if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
877			v.val |= SOF_TIMESTAMPING_SOFTWARE;
878		if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
879			v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
880		if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
881			v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
882		break;
883
884	case SO_RCVTIMEO:
885		lv = sizeof(struct timeval);
886		if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
887			v.tm.tv_sec = 0;
888			v.tm.tv_usec = 0;
889		} else {
890			v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
891			v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
892		}
893		break;
894
895	case SO_SNDTIMEO:
896		lv = sizeof(struct timeval);
897		if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
898			v.tm.tv_sec = 0;
899			v.tm.tv_usec = 0;
900		} else {
901			v.tm.tv_sec = sk->sk_sndtimeo / HZ;
902			v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
903		}
904		break;
905
906	case SO_RCVLOWAT:
907		v.val = sk->sk_rcvlowat;
908		break;
909
910	case SO_SNDLOWAT:
911		v.val = 1;
912		break;
913
914	case SO_PASSCRED:
915		v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
916		break;
917
918	case SO_PEERCRED:
919	{
920		struct ucred peercred;
921		if (len > sizeof(peercred))
922			len = sizeof(peercred);
923		cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
924		if (copy_to_user(optval, &peercred, len))
925			return -EFAULT;
926		goto lenout;
927	}
928
929	case SO_PEERNAME:
930	{
931		char address[128];
932
933		if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
934			return -ENOTCONN;
935		if (lv < len)
936			return -EINVAL;
937		if (copy_to_user(optval, address, len))
938			return -EFAULT;
939		goto lenout;
940	}
941
942	/* Dubious BSD thing... Probably nobody even uses it, but
943	 * the UNIX standard wants it for whatever reason... -DaveM
944	 */
945	case SO_ACCEPTCONN:
946		v.val = sk->sk_state == TCP_LISTEN;
947		break;
948
949	case SO_PASSSEC:
950		v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
951		break;
952
953	case SO_PEERSEC:
954		return security_socket_getpeersec_stream(sock, optval, optlen, len);
955
956	case SO_MARK:
957		v.val = sk->sk_mark;
958		break;
959
960	case SO_RXQ_OVFL:
961		v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
962		break;
963
964	default:
965		return -ENOPROTOOPT;
966	}
967
968	if (len > lv)
969		len = lv;
970	if (copy_to_user(optval, &v, len))
971		return -EFAULT;
972lenout:
973	if (put_user(len, optlen))
974		return -EFAULT;
975	return 0;
976}
977
978/*
979 * Initialize an sk_lock.
980 *
981 * (We also register the sk_lock with the lock validator.)
982 */
983static inline void sock_lock_init(struct sock *sk)
984{
985	sock_lock_init_class_and_name(sk,
986			af_family_slock_key_strings[sk->sk_family],
987			af_family_slock_keys + sk->sk_family,
988			af_family_key_strings[sk->sk_family],
989			af_family_keys + sk->sk_family);
990}
991
992/*
993 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
994 * even temporarly, because of RCU lookups. sk_node should also be left as is.
995 */
996static void sock_copy(struct sock *nsk, const struct sock *osk)
997{
998#ifdef CONFIG_SECURITY_NETWORK
999	void *sptr = nsk->sk_security;
1000#endif
1001	BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) !=
1002		     sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) +
1003		     sizeof(osk->sk_tx_queue_mapping));
1004	memcpy(&nsk->sk_copy_start, &osk->sk_copy_start,
1005	       osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start));
1006#ifdef CONFIG_SECURITY_NETWORK
1007	nsk->sk_security = sptr;
1008	security_sk_clone(osk, nsk);
1009#endif
1010}
1011
1012static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1013		int family)
1014{
1015	struct sock *sk;
1016	struct kmem_cache *slab;
1017
1018	slab = prot->slab;
1019	if (slab != NULL) {
1020		sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1021		if (!sk)
1022			return sk;
1023		if (priority & __GFP_ZERO) {
1024			/*
1025			 * caches using SLAB_DESTROY_BY_RCU should let
1026			 * sk_node.next un-modified. Special care is taken
1027			 * when initializing object to zero.
1028			 */
1029			if (offsetof(struct sock, sk_node.next) != 0)
1030				memset(sk, 0, offsetof(struct sock, sk_node.next));
1031			memset(&sk->sk_node.pprev, 0,
1032			       prot->obj_size - offsetof(struct sock,
1033							 sk_node.pprev));
1034		}
1035	}
1036	else
1037		sk = kmalloc(prot->obj_size, priority);
1038
1039	if (sk != NULL) {
1040		kmemcheck_annotate_bitfield(sk, flags);
1041
1042		if (security_sk_alloc(sk, family, priority))
1043			goto out_free;
1044
1045		if (!try_module_get(prot->owner))
1046			goto out_free_sec;
1047		sk_tx_queue_clear(sk);
1048	}
1049
1050	return sk;
1051
1052out_free_sec:
1053	security_sk_free(sk);
1054out_free:
1055	if (slab != NULL)
1056		kmem_cache_free(slab, sk);
1057	else
1058		kfree(sk);
1059	return NULL;
1060}
1061
1062static void sk_prot_free(struct proto *prot, struct sock *sk)
1063{
1064	struct kmem_cache *slab;
1065	struct module *owner;
1066
1067	owner = prot->owner;
1068	slab = prot->slab;
1069
1070	security_sk_free(sk);
1071	if (slab != NULL)
1072		kmem_cache_free(slab, sk);
1073	else
1074		kfree(sk);
1075	module_put(owner);
1076}
1077
1078#ifdef CONFIG_CGROUPS
1079void sock_update_classid(struct sock *sk)
1080{
1081	u32 classid;
1082
1083	rcu_read_lock();  /* doing current task, which cannot vanish. */
1084	classid = task_cls_classid(current);
1085	rcu_read_unlock();
1086	if (classid && classid != sk->sk_classid)
1087		sk->sk_classid = classid;
1088}
1089EXPORT_SYMBOL(sock_update_classid);
1090#endif
1091
1092/**
1093 *	sk_alloc - All socket objects are allocated here
1094 *	@net: the applicable net namespace
1095 *	@family: protocol family
1096 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1097 *	@prot: struct proto associated with this new sock instance
1098 */
1099struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1100		      struct proto *prot)
1101{
1102	struct sock *sk;
1103
1104	sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1105	if (sk) {
1106		sk->sk_family = family;
1107		/*
1108		 * See comment in struct sock definition to understand
1109		 * why we need sk_prot_creator -acme
1110		 */
1111		sk->sk_prot = sk->sk_prot_creator = prot;
1112		sock_lock_init(sk);
1113		sock_net_set(sk, get_net(net));
1114		atomic_set(&sk->sk_wmem_alloc, 1);
1115
1116		sock_update_classid(sk);
1117	}
1118
1119	return sk;
1120}
1121EXPORT_SYMBOL(sk_alloc);
1122
1123static void __sk_free(struct sock *sk)
1124{
1125	struct sk_filter *filter;
1126
1127	if (sk->sk_destruct)
1128		sk->sk_destruct(sk);
1129
1130	filter = rcu_dereference_check(sk->sk_filter,
1131				       atomic_read(&sk->sk_wmem_alloc) == 0);
1132	if (filter) {
1133		sk_filter_uncharge(sk, filter);
1134		rcu_assign_pointer(sk->sk_filter, NULL);
1135	}
1136
1137	sock_disable_timestamp(sk, SOCK_TIMESTAMP);
1138	sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE);
1139
1140	if (atomic_read(&sk->sk_omem_alloc))
1141		printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
1142		       __func__, atomic_read(&sk->sk_omem_alloc));
1143
1144	if (sk->sk_peer_cred)
1145		put_cred(sk->sk_peer_cred);
1146	put_pid(sk->sk_peer_pid);
1147	put_net(sock_net(sk));
1148	sk_prot_free(sk->sk_prot_creator, sk);
1149}
1150
1151void sk_free(struct sock *sk)
1152{
1153	/*
1154	 * We substract one from sk_wmem_alloc and can know if
1155	 * some packets are still in some tx queue.
1156	 * If not null, sock_wfree() will call __sk_free(sk) later
1157	 */
1158	if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1159		__sk_free(sk);
1160}
1161EXPORT_SYMBOL(sk_free);
1162
1163/*
1164 * Last sock_put should drop referrence to sk->sk_net. It has already
1165 * been dropped in sk_change_net. Taking referrence to stopping namespace
1166 * is not an option.
1167 * Take referrence to a socket to remove it from hash _alive_ and after that
1168 * destroy it in the context of init_net.
1169 */
1170void sk_release_kernel(struct sock *sk)
1171{
1172	if (sk == NULL || sk->sk_socket == NULL)
1173		return;
1174
1175	sock_hold(sk);
1176	sock_release(sk->sk_socket);
1177	release_net(sock_net(sk));
1178	sock_net_set(sk, get_net(&init_net));
1179	sock_put(sk);
1180}
1181EXPORT_SYMBOL(sk_release_kernel);
1182
1183struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1184{
1185	struct sock *newsk;
1186
1187	newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1188	if (newsk != NULL) {
1189		struct sk_filter *filter;
1190
1191		sock_copy(newsk, sk);
1192
1193		/* SANITY */
1194		get_net(sock_net(newsk));
1195		sk_node_init(&newsk->sk_node);
1196		sock_lock_init(newsk);
1197		bh_lock_sock(newsk);
1198		newsk->sk_backlog.head	= newsk->sk_backlog.tail = NULL;
1199		newsk->sk_backlog.len = 0;
1200
1201		atomic_set(&newsk->sk_rmem_alloc, 0);
1202		/*
1203		 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1204		 */
1205		atomic_set(&newsk->sk_wmem_alloc, 1);
1206		atomic_set(&newsk->sk_omem_alloc, 0);
1207		skb_queue_head_init(&newsk->sk_receive_queue);
1208		skb_queue_head_init(&newsk->sk_write_queue);
1209#ifdef CONFIG_NET_DMA
1210		skb_queue_head_init(&newsk->sk_async_wait_queue);
1211#endif
1212
1213		spin_lock_init(&newsk->sk_dst_lock);
1214		rwlock_init(&newsk->sk_callback_lock);
1215		lockdep_set_class_and_name(&newsk->sk_callback_lock,
1216				af_callback_keys + newsk->sk_family,
1217				af_family_clock_key_strings[newsk->sk_family]);
1218
1219		newsk->sk_dst_cache	= NULL;
1220		newsk->sk_wmem_queued	= 0;
1221		newsk->sk_forward_alloc = 0;
1222		newsk->sk_send_head	= NULL;
1223		newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1224
1225		sock_reset_flag(newsk, SOCK_DONE);
1226		skb_queue_head_init(&newsk->sk_error_queue);
1227
1228		filter = newsk->sk_filter;
1229		if (filter != NULL)
1230			sk_filter_charge(newsk, filter);
1231
1232		if (unlikely(xfrm_sk_clone_policy(newsk))) {
1233			/* It is still raw copy of parent, so invalidate
1234			 * destructor and make plain sk_free() */
1235			newsk->sk_destruct = NULL;
1236			sk_free(newsk);
1237			newsk = NULL;
1238			goto out;
1239		}
1240
1241		newsk->sk_err	   = 0;
1242		newsk->sk_priority = 0;
1243		/*
1244		 * Before updating sk_refcnt, we must commit prior changes to memory
1245		 * (Documentation/RCU/rculist_nulls.txt for details)
1246		 */
1247		smp_wmb();
1248		atomic_set(&newsk->sk_refcnt, 2);
1249
1250		/*
1251		 * Increment the counter in the same struct proto as the master
1252		 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1253		 * is the same as sk->sk_prot->socks, as this field was copied
1254		 * with memcpy).
1255		 *
1256		 * This _changes_ the previous behaviour, where
1257		 * tcp_create_openreq_child always was incrementing the
1258		 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1259		 * to be taken into account in all callers. -acme
1260		 */
1261		sk_refcnt_debug_inc(newsk);
1262		sk_set_socket(newsk, NULL);
1263		newsk->sk_wq = NULL;
1264
1265		if (newsk->sk_prot->sockets_allocated)
1266			percpu_counter_inc(newsk->sk_prot->sockets_allocated);
1267
1268		if (sock_flag(newsk, SOCK_TIMESTAMP) ||
1269		    sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
1270			net_enable_timestamp();
1271	}
1272out:
1273	return newsk;
1274}
1275EXPORT_SYMBOL_GPL(sk_clone);
1276
1277void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1278{
1279	__sk_dst_set(sk, dst);
1280	sk->sk_route_caps = dst->dev->features;
1281	if (sk->sk_route_caps & NETIF_F_GSO)
1282		sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1283	sk->sk_route_caps &= ~sk->sk_route_nocaps;
1284	if (sk_can_gso(sk)) {
1285		if (dst->header_len) {
1286			sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1287		} else {
1288			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1289			sk->sk_gso_max_size = dst->dev->gso_max_size;
1290		}
1291	}
1292}
1293EXPORT_SYMBOL_GPL(sk_setup_caps);
1294
1295void __init sk_init(void)
1296{
1297	if (totalram_pages <= 4096) {
1298		sysctl_wmem_max = 32767;
1299		sysctl_rmem_max = 32767;
1300		sysctl_wmem_default = 32767;
1301		sysctl_rmem_default = 32767;
1302	} else if (totalram_pages >= 131072) {
1303		sysctl_wmem_max = 131071;
1304		sysctl_rmem_max = 131071;
1305	}
1306}
1307
1308/*
1309 *	Simple resource managers for sockets.
1310 */
1311
1312
1313/*
1314 * Write buffer destructor automatically called from kfree_skb.
1315 */
1316void sock_wfree(struct sk_buff *skb)
1317{
1318	struct sock *sk = skb->sk;
1319	unsigned int len = skb->truesize;
1320
1321	if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1322		/*
1323		 * Keep a reference on sk_wmem_alloc, this will be released
1324		 * after sk_write_space() call
1325		 */
1326		atomic_sub(len - 1, &sk->sk_wmem_alloc);
1327		sk->sk_write_space(sk);
1328		len = 1;
1329	}
1330	/*
1331	 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1332	 * could not do because of in-flight packets
1333	 */
1334	if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1335		__sk_free(sk);
1336}
1337EXPORT_SYMBOL(sock_wfree);
1338
1339/*
1340 * Read buffer destructor automatically called from kfree_skb.
1341 */
1342void sock_rfree(struct sk_buff *skb)
1343{
1344	struct sock *sk = skb->sk;
1345	unsigned int len = skb->truesize;
1346
1347	atomic_sub(len, &sk->sk_rmem_alloc);
1348	sk_mem_uncharge(sk, len);
1349}
1350EXPORT_SYMBOL(sock_rfree);
1351
1352
1353int sock_i_uid(struct sock *sk)
1354{
1355	int uid;
1356
1357	read_lock_bh(&sk->sk_callback_lock);
1358	uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
1359	read_unlock_bh(&sk->sk_callback_lock);
1360	return uid;
1361}
1362EXPORT_SYMBOL(sock_i_uid);
1363
1364unsigned long sock_i_ino(struct sock *sk)
1365{
1366	unsigned long ino;
1367
1368	read_lock_bh(&sk->sk_callback_lock);
1369	ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1370	read_unlock_bh(&sk->sk_callback_lock);
1371	return ino;
1372}
1373EXPORT_SYMBOL(sock_i_ino);
1374
1375/*
1376 * Allocate a skb from the socket's send buffer.
1377 */
1378struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1379			     gfp_t priority)
1380{
1381	if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1382		struct sk_buff *skb = alloc_skb(size, priority);
1383		if (skb) {
1384			skb_set_owner_w(skb, sk);
1385			return skb;
1386		}
1387	}
1388	return NULL;
1389}
1390EXPORT_SYMBOL(sock_wmalloc);
1391
1392/*
1393 * Allocate a skb from the socket's receive buffer.
1394 */
1395struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
1396			     gfp_t priority)
1397{
1398	if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1399		struct sk_buff *skb = alloc_skb(size, priority);
1400		if (skb) {
1401			skb_set_owner_r(skb, sk);
1402			return skb;
1403		}
1404	}
1405	return NULL;
1406}
1407
1408/*
1409 * Allocate a memory block from the socket's option memory buffer.
1410 */
1411void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1412{
1413	if ((unsigned)size <= sysctl_optmem_max &&
1414	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1415		void *mem;
1416		/* First do the add, to avoid the race if kmalloc
1417		 * might sleep.
1418		 */
1419		atomic_add(size, &sk->sk_omem_alloc);
1420		mem = kmalloc(size, priority);
1421		if (mem)
1422			return mem;
1423		atomic_sub(size, &sk->sk_omem_alloc);
1424	}
1425	return NULL;
1426}
1427EXPORT_SYMBOL(sock_kmalloc);
1428
1429/*
1430 * Free an option memory block.
1431 */
1432void sock_kfree_s(struct sock *sk, void *mem, int size)
1433{
1434	kfree(mem);
1435	atomic_sub(size, &sk->sk_omem_alloc);
1436}
1437EXPORT_SYMBOL(sock_kfree_s);
1438
1439/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1440   I think, these locks should be removed for datagram sockets.
1441 */
1442static long sock_wait_for_wmem(struct sock *sk, long timeo)
1443{
1444	DEFINE_WAIT(wait);
1445
1446	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1447	for (;;) {
1448		if (!timeo)
1449			break;
1450		if (signal_pending(current))
1451			break;
1452		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1453		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1454		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1455			break;
1456		if (sk->sk_shutdown & SEND_SHUTDOWN)
1457			break;
1458		if (sk->sk_err)
1459			break;
1460		timeo = schedule_timeout(timeo);
1461	}
1462	finish_wait(sk_sleep(sk), &wait);
1463	return timeo;
1464}
1465
1466
1467/*
1468 *	Generic send/receive buffer handlers
1469 */
1470
1471struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1472				     unsigned long data_len, int noblock,
1473				     int *errcode)
1474{
1475	struct sk_buff *skb;
1476	gfp_t gfp_mask;
1477	long timeo;
1478	int err;
1479
1480	gfp_mask = sk->sk_allocation;
1481	if (gfp_mask & __GFP_WAIT)
1482		gfp_mask |= __GFP_REPEAT;
1483
1484	timeo = sock_sndtimeo(sk, noblock);
1485	while (1) {
1486		err = sock_error(sk);
1487		if (err != 0)
1488			goto failure;
1489
1490		err = -EPIPE;
1491		if (sk->sk_shutdown & SEND_SHUTDOWN)
1492			goto failure;
1493
1494		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1495			skb = alloc_skb(header_len, gfp_mask);
1496			if (skb) {
1497				int npages;
1498				int i;
1499
1500				/* No pages, we're done... */
1501				if (!data_len)
1502					break;
1503
1504				npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1505				skb->truesize += data_len;
1506				skb_shinfo(skb)->nr_frags = npages;
1507				for (i = 0; i < npages; i++) {
1508					struct page *page;
1509					skb_frag_t *frag;
1510
1511					page = alloc_pages(sk->sk_allocation, 0);
1512					if (!page) {
1513						err = -ENOBUFS;
1514						skb_shinfo(skb)->nr_frags = i;
1515						kfree_skb(skb);
1516						goto failure;
1517					}
1518
1519					frag = &skb_shinfo(skb)->frags[i];
1520					frag->page = page;
1521					frag->page_offset = 0;
1522					frag->size = (data_len >= PAGE_SIZE ?
1523						      PAGE_SIZE :
1524						      data_len);
1525					data_len -= PAGE_SIZE;
1526				}
1527
1528				/* Full success... */
1529				break;
1530			}
1531			err = -ENOBUFS;
1532			goto failure;
1533		}
1534		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1535		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1536		err = -EAGAIN;
1537		if (!timeo)
1538			goto failure;
1539		if (signal_pending(current))
1540			goto interrupted;
1541		timeo = sock_wait_for_wmem(sk, timeo);
1542	}
1543
1544	skb_set_owner_w(skb, sk);
1545	return skb;
1546
1547interrupted:
1548	err = sock_intr_errno(timeo);
1549failure:
1550	*errcode = err;
1551	return NULL;
1552}
1553EXPORT_SYMBOL(sock_alloc_send_pskb);
1554
1555struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1556				    int noblock, int *errcode)
1557{
1558	return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1559}
1560EXPORT_SYMBOL(sock_alloc_send_skb);
1561
1562static void __lock_sock(struct sock *sk)
1563{
1564	DEFINE_WAIT(wait);
1565
1566	for (;;) {
1567		prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1568					TASK_UNINTERRUPTIBLE);
1569		spin_unlock_bh(&sk->sk_lock.slock);
1570		schedule();
1571		spin_lock_bh(&sk->sk_lock.slock);
1572		if (!sock_owned_by_user(sk))
1573			break;
1574	}
1575	finish_wait(&sk->sk_lock.wq, &wait);
1576}
1577
1578static void __release_sock(struct sock *sk)
1579{
1580	struct sk_buff *skb = sk->sk_backlog.head;
1581
1582	do {
1583		sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1584		bh_unlock_sock(sk);
1585
1586		do {
1587			struct sk_buff *next = skb->next;
1588
1589			WARN_ON_ONCE(skb_dst_is_noref(skb));
1590			skb->next = NULL;
1591			sk_backlog_rcv(sk, skb);
1592
1593			/*
1594			 * We are in process context here with softirqs
1595			 * disabled, use cond_resched_softirq() to preempt.
1596			 * This is safe to do because we've taken the backlog
1597			 * queue private:
1598			 */
1599			cond_resched_softirq();
1600
1601			skb = next;
1602		} while (skb != NULL);
1603
1604		bh_lock_sock(sk);
1605	} while ((skb = sk->sk_backlog.head) != NULL);
1606
1607	/*
1608	 * Doing the zeroing here guarantee we can not loop forever
1609	 * while a wild producer attempts to flood us.
1610	 */
1611	sk->sk_backlog.len = 0;
1612}
1613
1614/**
1615 * sk_wait_data - wait for data to arrive at sk_receive_queue
1616 * @sk:    sock to wait on
1617 * @timeo: for how long
1618 *
1619 * Now socket state including sk->sk_err is changed only under lock,
1620 * hence we may omit checks after joining wait queue.
1621 * We check receive queue before schedule() only as optimization;
1622 * it is very likely that release_sock() added new data.
1623 */
1624int sk_wait_data(struct sock *sk, long *timeo)
1625{
1626	int rc;
1627	DEFINE_WAIT(wait);
1628
1629	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1630	set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1631	rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1632	clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1633	finish_wait(sk_sleep(sk), &wait);
1634	return rc;
1635}
1636EXPORT_SYMBOL(sk_wait_data);
1637
1638/**
1639 *	__sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1640 *	@sk: socket
1641 *	@size: memory size to allocate
1642 *	@kind: allocation type
1643 *
1644 *	If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1645 *	rmem allocation. This function assumes that protocols which have
1646 *	memory_pressure use sk_wmem_queued as write buffer accounting.
1647 */
1648int __sk_mem_schedule(struct sock *sk, int size, int kind)
1649{
1650	struct proto *prot = sk->sk_prot;
1651	int amt = sk_mem_pages(size);
1652	int allocated;
1653
1654	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1655	allocated = atomic_add_return(amt, prot->memory_allocated);
1656
1657	/* Under limit. */
1658	if (allocated <= prot->sysctl_mem[0]) {
1659		if (prot->memory_pressure && *prot->memory_pressure)
1660			*prot->memory_pressure = 0;
1661		return 1;
1662	}
1663
1664	/* Under pressure. */
1665	if (allocated > prot->sysctl_mem[1])
1666		if (prot->enter_memory_pressure)
1667			prot->enter_memory_pressure(sk);
1668
1669	/* Over hard limit. */
1670	if (allocated > prot->sysctl_mem[2])
1671		goto suppress_allocation;
1672
1673	/* guarantee minimum buffer size under pressure */
1674	if (kind == SK_MEM_RECV) {
1675		if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1676			return 1;
1677	} else { /* SK_MEM_SEND */
1678		if (sk->sk_type == SOCK_STREAM) {
1679			if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1680				return 1;
1681		} else if (atomic_read(&sk->sk_wmem_alloc) <
1682			   prot->sysctl_wmem[0])
1683				return 1;
1684	}
1685
1686	if (prot->memory_pressure) {
1687		int alloc;
1688
1689		if (!*prot->memory_pressure)
1690			return 1;
1691		alloc = percpu_counter_read_positive(prot->sockets_allocated);
1692		if (prot->sysctl_mem[2] > alloc *
1693		    sk_mem_pages(sk->sk_wmem_queued +
1694				 atomic_read(&sk->sk_rmem_alloc) +
1695				 sk->sk_forward_alloc))
1696			return 1;
1697	}
1698
1699suppress_allocation:
1700
1701	if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1702		sk_stream_moderate_sndbuf(sk);
1703
1704		/* Fail only if socket is _under_ its sndbuf.
1705		 * In this case we cannot block, so that we have to fail.
1706		 */
1707		if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1708			return 1;
1709	}
1710
1711	/* Alas. Undo changes. */
1712	sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1713	atomic_sub(amt, prot->memory_allocated);
1714	return 0;
1715}
1716EXPORT_SYMBOL(__sk_mem_schedule);
1717
1718/**
1719 *	__sk_reclaim - reclaim memory_allocated
1720 *	@sk: socket
1721 */
1722void __sk_mem_reclaim(struct sock *sk)
1723{
1724	struct proto *prot = sk->sk_prot;
1725
1726	atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
1727		   prot->memory_allocated);
1728	sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1729
1730	if (prot->memory_pressure && *prot->memory_pressure &&
1731	    (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1732		*prot->memory_pressure = 0;
1733}
1734EXPORT_SYMBOL(__sk_mem_reclaim);
1735
1736
1737/*
1738 * Set of default routines for initialising struct proto_ops when
1739 * the protocol does not support a particular function. In certain
1740 * cases where it makes no sense for a protocol to have a "do nothing"
1741 * function, some default processing is provided.
1742 */
1743
1744int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1745{
1746	return -EOPNOTSUPP;
1747}
1748EXPORT_SYMBOL(sock_no_bind);
1749
1750int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1751		    int len, int flags)
1752{
1753	return -EOPNOTSUPP;
1754}
1755EXPORT_SYMBOL(sock_no_connect);
1756
1757int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1758{
1759	return -EOPNOTSUPP;
1760}
1761EXPORT_SYMBOL(sock_no_socketpair);
1762
1763int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1764{
1765	return -EOPNOTSUPP;
1766}
1767EXPORT_SYMBOL(sock_no_accept);
1768
1769int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
1770		    int *len, int peer)
1771{
1772	return -EOPNOTSUPP;
1773}
1774EXPORT_SYMBOL(sock_no_getname);
1775
1776unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
1777{
1778	return 0;
1779}
1780EXPORT_SYMBOL(sock_no_poll);
1781
1782int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1783{
1784	return -EOPNOTSUPP;
1785}
1786EXPORT_SYMBOL(sock_no_ioctl);
1787
1788int sock_no_listen(struct socket *sock, int backlog)
1789{
1790	return -EOPNOTSUPP;
1791}
1792EXPORT_SYMBOL(sock_no_listen);
1793
1794int sock_no_shutdown(struct socket *sock, int how)
1795{
1796	return -EOPNOTSUPP;
1797}
1798EXPORT_SYMBOL(sock_no_shutdown);
1799
1800int sock_no_setsockopt(struct socket *sock, int level, int optname,
1801		    char __user *optval, unsigned int optlen)
1802{
1803	return -EOPNOTSUPP;
1804}
1805EXPORT_SYMBOL(sock_no_setsockopt);
1806
1807int sock_no_getsockopt(struct socket *sock, int level, int optname,
1808		    char __user *optval, int __user *optlen)
1809{
1810	return -EOPNOTSUPP;
1811}
1812EXPORT_SYMBOL(sock_no_getsockopt);
1813
1814int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1815		    size_t len)
1816{
1817	return -EOPNOTSUPP;
1818}
1819EXPORT_SYMBOL(sock_no_sendmsg);
1820
1821int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1822		    size_t len, int flags)
1823{
1824	return -EOPNOTSUPP;
1825}
1826EXPORT_SYMBOL(sock_no_recvmsg);
1827
1828int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1829{
1830	/* Mirror missing mmap method error code */
1831	return -ENODEV;
1832}
1833EXPORT_SYMBOL(sock_no_mmap);
1834
1835ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1836{
1837	ssize_t res;
1838	struct msghdr msg = {.msg_flags = flags};
1839	struct kvec iov;
1840	char *kaddr = kmap(page);
1841	iov.iov_base = kaddr + offset;
1842	iov.iov_len = size;
1843	res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1844	kunmap(page);
1845	return res;
1846}
1847EXPORT_SYMBOL(sock_no_sendpage);
1848
1849/*
1850 *	Default Socket Callbacks
1851 */
1852
1853static void sock_def_wakeup(struct sock *sk)
1854{
1855	struct socket_wq *wq;
1856
1857	rcu_read_lock();
1858	wq = rcu_dereference(sk->sk_wq);
1859	if (wq_has_sleeper(wq))
1860		wake_up_interruptible_all(&wq->wait);
1861	rcu_read_unlock();
1862}
1863
1864static void sock_def_error_report(struct sock *sk)
1865{
1866	struct socket_wq *wq;
1867
1868	rcu_read_lock();
1869	wq = rcu_dereference(sk->sk_wq);
1870	if (wq_has_sleeper(wq))
1871		wake_up_interruptible_poll(&wq->wait, POLLERR);
1872	sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
1873	rcu_read_unlock();
1874}
1875
1876static void sock_def_readable(struct sock *sk, int len)
1877{
1878	struct socket_wq *wq;
1879
1880	rcu_read_lock();
1881	wq = rcu_dereference(sk->sk_wq);
1882	if (wq_has_sleeper(wq))
1883		wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1884						POLLRDNORM | POLLRDBAND);
1885	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
1886	rcu_read_unlock();
1887}
1888
1889static void sock_def_write_space(struct sock *sk)
1890{
1891	struct socket_wq *wq;
1892
1893	rcu_read_lock();
1894
1895	/* Do not wake up a writer until he can make "significant"
1896	 * progress.  --DaveM
1897	 */
1898	if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
1899		wq = rcu_dereference(sk->sk_wq);
1900		if (wq_has_sleeper(wq))
1901			wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1902						POLLWRNORM | POLLWRBAND);
1903
1904		/* Should agree with poll, otherwise some programs break */
1905		if (sock_writeable(sk))
1906			sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
1907	}
1908
1909	rcu_read_unlock();
1910}
1911
1912static void sock_def_destruct(struct sock *sk)
1913{
1914	kfree(sk->sk_protinfo);
1915}
1916
1917void sk_send_sigurg(struct sock *sk)
1918{
1919	if (sk->sk_socket && sk->sk_socket->file)
1920		if (send_sigurg(&sk->sk_socket->file->f_owner))
1921			sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
1922}
1923EXPORT_SYMBOL(sk_send_sigurg);
1924
1925void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1926		    unsigned long expires)
1927{
1928	if (!mod_timer(timer, expires))
1929		sock_hold(sk);
1930}
1931EXPORT_SYMBOL(sk_reset_timer);
1932
1933void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1934{
1935	if (timer_pending(timer) && del_timer(timer))
1936		__sock_put(sk);
1937}
1938EXPORT_SYMBOL(sk_stop_timer);
1939
1940void sock_init_data(struct socket *sock, struct sock *sk)
1941{
1942	skb_queue_head_init(&sk->sk_receive_queue);
1943	skb_queue_head_init(&sk->sk_write_queue);
1944	skb_queue_head_init(&sk->sk_error_queue);
1945#ifdef CONFIG_NET_DMA
1946	skb_queue_head_init(&sk->sk_async_wait_queue);
1947#endif
1948
1949	sk->sk_send_head	=	NULL;
1950
1951	init_timer(&sk->sk_timer);
1952
1953	sk->sk_allocation	=	GFP_KERNEL;
1954	sk->sk_rcvbuf		=	sysctl_rmem_default;
1955	sk->sk_sndbuf		=	sysctl_wmem_default;
1956	sk->sk_state		=	TCP_CLOSE;
1957	sk_set_socket(sk, sock);
1958
1959	sock_set_flag(sk, SOCK_ZAPPED);
1960
1961	if (sock) {
1962		sk->sk_type	=	sock->type;
1963		sk->sk_wq	=	sock->wq;
1964		sock->sk	=	sk;
1965	} else
1966		sk->sk_wq	=	NULL;
1967
1968	spin_lock_init(&sk->sk_dst_lock);
1969	rwlock_init(&sk->sk_callback_lock);
1970	lockdep_set_class_and_name(&sk->sk_callback_lock,
1971			af_callback_keys + sk->sk_family,
1972			af_family_clock_key_strings[sk->sk_family]);
1973
1974	sk->sk_state_change	=	sock_def_wakeup;
1975	sk->sk_data_ready	=	sock_def_readable;
1976	sk->sk_write_space	=	sock_def_write_space;
1977	sk->sk_error_report	=	sock_def_error_report;
1978	sk->sk_destruct		=	sock_def_destruct;
1979
1980	sk->sk_sndmsg_page	=	NULL;
1981	sk->sk_sndmsg_off	=	0;
1982
1983	sk->sk_peer_pid 	=	NULL;
1984	sk->sk_peer_cred	=	NULL;
1985	sk->sk_write_pending	=	0;
1986	sk->sk_rcvlowat		=	1;
1987	sk->sk_rcvtimeo		=	MAX_SCHEDULE_TIMEOUT;
1988	sk->sk_sndtimeo		=	MAX_SCHEDULE_TIMEOUT;
1989
1990	sk->sk_stamp = ktime_set(-1L, 0);
1991
1992	/*
1993	 * Before updating sk_refcnt, we must commit prior changes to memory
1994	 * (Documentation/RCU/rculist_nulls.txt for details)
1995	 */
1996	smp_wmb();
1997	atomic_set(&sk->sk_refcnt, 1);
1998	atomic_set(&sk->sk_drops, 0);
1999}
2000EXPORT_SYMBOL(sock_init_data);
2001
2002void lock_sock_nested(struct sock *sk, int subclass)
2003{
2004	might_sleep();
2005	spin_lock_bh(&sk->sk_lock.slock);
2006	if (sk->sk_lock.owned)
2007		__lock_sock(sk);
2008	sk->sk_lock.owned = 1;
2009	spin_unlock(&sk->sk_lock.slock);
2010	/*
2011	 * The sk_lock has mutex_lock() semantics here:
2012	 */
2013	mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2014	local_bh_enable();
2015}
2016EXPORT_SYMBOL(lock_sock_nested);
2017
2018void release_sock(struct sock *sk)
2019{
2020	/*
2021	 * The sk_lock has mutex_unlock() semantics:
2022	 */
2023	mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2024
2025	spin_lock_bh(&sk->sk_lock.slock);
2026	if (sk->sk_backlog.tail)
2027		__release_sock(sk);
2028	sk->sk_lock.owned = 0;
2029	if (waitqueue_active(&sk->sk_lock.wq))
2030		wake_up(&sk->sk_lock.wq);
2031	spin_unlock_bh(&sk->sk_lock.slock);
2032}
2033EXPORT_SYMBOL(release_sock);
2034
2035/**
2036 * lock_sock_fast - fast version of lock_sock
2037 * @sk: socket
2038 *
2039 * This version should be used for very small section, where process wont block
2040 * return false if fast path is taken
2041 *   sk_lock.slock locked, owned = 0, BH disabled
2042 * return true if slow path is taken
2043 *   sk_lock.slock unlocked, owned = 1, BH enabled
2044 */
2045bool lock_sock_fast(struct sock *sk)
2046{
2047	might_sleep();
2048	spin_lock_bh(&sk->sk_lock.slock);
2049
2050	if (!sk->sk_lock.owned)
2051		/*
2052		 * Note : We must disable BH
2053		 */
2054		return false;
2055
2056	__lock_sock(sk);
2057	sk->sk_lock.owned = 1;
2058	spin_unlock(&sk->sk_lock.slock);
2059	/*
2060	 * The sk_lock has mutex_lock() semantics here:
2061	 */
2062	mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2063	local_bh_enable();
2064	return true;
2065}
2066EXPORT_SYMBOL(lock_sock_fast);
2067
2068int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2069{
2070	struct timeval tv;
2071	if (!sock_flag(sk, SOCK_TIMESTAMP))
2072		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2073	tv = ktime_to_timeval(sk->sk_stamp);
2074	if (tv.tv_sec == -1)
2075		return -ENOENT;
2076	if (tv.tv_sec == 0) {
2077		sk->sk_stamp = ktime_get_real();
2078		tv = ktime_to_timeval(sk->sk_stamp);
2079	}
2080	return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2081}
2082EXPORT_SYMBOL(sock_get_timestamp);
2083
2084int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2085{
2086	struct timespec ts;
2087	if (!sock_flag(sk, SOCK_TIMESTAMP))
2088		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2089	ts = ktime_to_timespec(sk->sk_stamp);
2090	if (ts.tv_sec == -1)
2091		return -ENOENT;
2092	if (ts.tv_sec == 0) {
2093		sk->sk_stamp = ktime_get_real();
2094		ts = ktime_to_timespec(sk->sk_stamp);
2095	}
2096	return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2097}
2098EXPORT_SYMBOL(sock_get_timestampns);
2099
2100void sock_enable_timestamp(struct sock *sk, int flag)
2101{
2102	if (!sock_flag(sk, flag)) {
2103		sock_set_flag(sk, flag);
2104		/*
2105		 * we just set one of the two flags which require net
2106		 * time stamping, but time stamping might have been on
2107		 * already because of the other one
2108		 */
2109		if (!sock_flag(sk,
2110				flag == SOCK_TIMESTAMP ?
2111				SOCK_TIMESTAMPING_RX_SOFTWARE :
2112				SOCK_TIMESTAMP))
2113			net_enable_timestamp();
2114	}
2115}
2116
2117/*
2118 *	Get a socket option on an socket.
2119 *
2120 *	FIX: POSIX 1003.1g is very ambiguous here. It states that
2121 *	asynchronous errors should be reported by getsockopt. We assume
2122 *	this means if you specify SO_ERROR (otherwise whats the point of it).
2123 */
2124int sock_common_getsockopt(struct socket *sock, int level, int optname,
2125			   char __user *optval, int __user *optlen)
2126{
2127	struct sock *sk = sock->sk;
2128
2129	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2130}
2131EXPORT_SYMBOL(sock_common_getsockopt);
2132
2133#ifdef CONFIG_COMPAT
2134int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2135				  char __user *optval, int __user *optlen)
2136{
2137	struct sock *sk = sock->sk;
2138
2139	if (sk->sk_prot->compat_getsockopt != NULL)
2140		return sk->sk_prot->compat_getsockopt(sk, level, optname,
2141						      optval, optlen);
2142	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2143}
2144EXPORT_SYMBOL(compat_sock_common_getsockopt);
2145#endif
2146
2147int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2148			struct msghdr *msg, size_t size, int flags)
2149{
2150	struct sock *sk = sock->sk;
2151	int addr_len = 0;
2152	int err;
2153
2154	err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2155				   flags & ~MSG_DONTWAIT, &addr_len);
2156	if (err >= 0)
2157		msg->msg_namelen = addr_len;
2158	return err;
2159}
2160EXPORT_SYMBOL(sock_common_recvmsg);
2161
2162/*
2163 *	Set socket options on an inet socket.
2164 */
2165int sock_common_setsockopt(struct socket *sock, int level, int optname,
2166			   char __user *optval, unsigned int optlen)
2167{
2168	struct sock *sk = sock->sk;
2169
2170	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2171}
2172EXPORT_SYMBOL(sock_common_setsockopt);
2173
2174#ifdef CONFIG_COMPAT
2175int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2176				  char __user *optval, unsigned int optlen)
2177{
2178	struct sock *sk = sock->sk;
2179
2180	if (sk->sk_prot->compat_setsockopt != NULL)
2181		return sk->sk_prot->compat_setsockopt(sk, level, optname,
2182						      optval, optlen);
2183	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2184}
2185EXPORT_SYMBOL(compat_sock_common_setsockopt);
2186#endif
2187
2188void sk_common_release(struct sock *sk)
2189{
2190	if (sk->sk_prot->destroy)
2191		sk->sk_prot->destroy(sk);
2192
2193	/*
2194	 * Observation: when sock_common_release is called, processes have
2195	 * no access to socket. But net still has.
2196	 * Step one, detach it from networking:
2197	 *
2198	 * A. Remove from hash tables.
2199	 */
2200
2201	sk->sk_prot->unhash(sk);
2202
2203	/*
2204	 * In this point socket cannot receive new packets, but it is possible
2205	 * that some packets are in flight because some CPU runs receiver and
2206	 * did hash table lookup before we unhashed socket. They will achieve
2207	 * receive queue and will be purged by socket destructor.
2208	 *
2209	 * Also we still have packets pending on receive queue and probably,
2210	 * our own packets waiting in device queues. sock_destroy will drain
2211	 * receive queue, but transmitted packets will delay socket destruction
2212	 * until the last reference will be released.
2213	 */
2214
2215	sock_orphan(sk);
2216
2217	xfrm_sk_free_policy(sk);
2218
2219	sk_refcnt_debug_release(sk);
2220	sock_put(sk);
2221}
2222EXPORT_SYMBOL(sk_common_release);
2223
2224static DEFINE_RWLOCK(proto_list_lock);
2225static LIST_HEAD(proto_list);
2226
2227#ifdef CONFIG_PROC_FS
2228#define PROTO_INUSE_NR	64	/* should be enough for the first time */
2229struct prot_inuse {
2230	int val[PROTO_INUSE_NR];
2231};
2232
2233static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2234
2235#ifdef CONFIG_NET_NS
2236void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2237{
2238	__this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2239}
2240EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2241
2242int sock_prot_inuse_get(struct net *net, struct proto *prot)
2243{
2244	int cpu, idx = prot->inuse_idx;
2245	int res = 0;
2246
2247	for_each_possible_cpu(cpu)
2248		res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2249
2250	return res >= 0 ? res : 0;
2251}
2252EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2253
2254static int __net_init sock_inuse_init_net(struct net *net)
2255{
2256	net->core.inuse = alloc_percpu(struct prot_inuse);
2257	return net->core.inuse ? 0 : -ENOMEM;
2258}
2259
2260static void __net_exit sock_inuse_exit_net(struct net *net)
2261{
2262	free_percpu(net->core.inuse);
2263}
2264
2265static struct pernet_operations net_inuse_ops = {
2266	.init = sock_inuse_init_net,
2267	.exit = sock_inuse_exit_net,
2268};
2269
2270static __init int net_inuse_init(void)
2271{
2272	if (register_pernet_subsys(&net_inuse_ops))
2273		panic("Cannot initialize net inuse counters");
2274
2275	return 0;
2276}
2277
2278core_initcall(net_inuse_init);
2279#else
2280static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2281
2282void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2283{
2284	__this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2285}
2286EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2287
2288int sock_prot_inuse_get(struct net *net, struct proto *prot)
2289{
2290	int cpu, idx = prot->inuse_idx;
2291	int res = 0;
2292
2293	for_each_possible_cpu(cpu)
2294		res += per_cpu(prot_inuse, cpu).val[idx];
2295
2296	return res >= 0 ? res : 0;
2297}
2298EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2299#endif
2300
2301static void assign_proto_idx(struct proto *prot)
2302{
2303	prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2304
2305	if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2306		printk(KERN_ERR "PROTO_INUSE_NR exhausted\n");
2307		return;
2308	}
2309
2310	set_bit(prot->inuse_idx, proto_inuse_idx);
2311}
2312
2313static void release_proto_idx(struct proto *prot)
2314{
2315	if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2316		clear_bit(prot->inuse_idx, proto_inuse_idx);
2317}
2318#else
2319static inline void assign_proto_idx(struct proto *prot)
2320{
2321}
2322
2323static inline void release_proto_idx(struct proto *prot)
2324{
2325}
2326#endif
2327
2328int proto_register(struct proto *prot, int alloc_slab)
2329{
2330	if (alloc_slab) {
2331		prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2332					SLAB_HWCACHE_ALIGN | prot->slab_flags,
2333					NULL);
2334
2335		if (prot->slab == NULL) {
2336			printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
2337			       prot->name);
2338			goto out;
2339		}
2340
2341		if (prot->rsk_prot != NULL) {
2342			prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
2343			if (prot->rsk_prot->slab_name == NULL)
2344				goto out_free_sock_slab;
2345
2346			prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2347								 prot->rsk_prot->obj_size, 0,
2348								 SLAB_HWCACHE_ALIGN, NULL);
2349
2350			if (prot->rsk_prot->slab == NULL) {
2351				printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
2352				       prot->name);
2353				goto out_free_request_sock_slab_name;
2354			}
2355		}
2356
2357		if (prot->twsk_prot != NULL) {
2358			prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2359
2360			if (prot->twsk_prot->twsk_slab_name == NULL)
2361				goto out_free_request_sock_slab;
2362
2363			prot->twsk_prot->twsk_slab =
2364				kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2365						  prot->twsk_prot->twsk_obj_size,
2366						  0,
2367						  SLAB_HWCACHE_ALIGN |
2368							prot->slab_flags,
2369						  NULL);
2370			if (prot->twsk_prot->twsk_slab == NULL)
2371				goto out_free_timewait_sock_slab_name;
2372		}
2373	}
2374
2375	write_lock(&proto_list_lock);
2376	list_add(&prot->node, &proto_list);
2377	assign_proto_idx(prot);
2378	write_unlock(&proto_list_lock);
2379	return 0;
2380
2381out_free_timewait_sock_slab_name:
2382	kfree(prot->twsk_prot->twsk_slab_name);
2383out_free_request_sock_slab:
2384	if (prot->rsk_prot && prot->rsk_prot->slab) {
2385		kmem_cache_destroy(prot->rsk_prot->slab);
2386		prot->rsk_prot->slab = NULL;
2387	}
2388out_free_request_sock_slab_name:
2389	if (prot->rsk_prot)
2390		kfree(prot->rsk_prot->slab_name);
2391out_free_sock_slab:
2392	kmem_cache_destroy(prot->slab);
2393	prot->slab = NULL;
2394out:
2395	return -ENOBUFS;
2396}
2397EXPORT_SYMBOL(proto_register);
2398
2399void proto_unregister(struct proto *prot)
2400{
2401	write_lock(&proto_list_lock);
2402	release_proto_idx(prot);
2403	list_del(&prot->node);
2404	write_unlock(&proto_list_lock);
2405
2406	if (prot->slab != NULL) {
2407		kmem_cache_destroy(prot->slab);
2408		prot->slab = NULL;
2409	}
2410
2411	if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2412		kmem_cache_destroy(prot->rsk_prot->slab);
2413		kfree(prot->rsk_prot->slab_name);
2414		prot->rsk_prot->slab = NULL;
2415	}
2416
2417	if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2418		kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2419		kfree(prot->twsk_prot->twsk_slab_name);
2420		prot->twsk_prot->twsk_slab = NULL;
2421	}
2422}
2423EXPORT_SYMBOL(proto_unregister);
2424
2425#ifdef CONFIG_PROC_FS
2426static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2427	__acquires(proto_list_lock)
2428{
2429	read_lock(&proto_list_lock);
2430	return seq_list_start_head(&proto_list, *pos);
2431}
2432
2433static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2434{
2435	return seq_list_next(v, &proto_list, pos);
2436}
2437
2438static void proto_seq_stop(struct seq_file *seq, void *v)
2439	__releases(proto_list_lock)
2440{
2441	read_unlock(&proto_list_lock);
2442}
2443
2444static char proto_method_implemented(const void *method)
2445{
2446	return method == NULL ? 'n' : 'y';
2447}
2448
2449static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2450{
2451	seq_printf(seq, "%-9s %4u %6d  %6d   %-3s %6u   %-3s  %-10s "
2452			"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2453		   proto->name,
2454		   proto->obj_size,
2455		   sock_prot_inuse_get(seq_file_net(seq), proto),
2456		   proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
2457		   proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
2458		   proto->max_header,
2459		   proto->slab == NULL ? "no" : "yes",
2460		   module_name(proto->owner),
2461		   proto_method_implemented(proto->close),
2462		   proto_method_implemented(proto->connect),
2463		   proto_method_implemented(proto->disconnect),
2464		   proto_method_implemented(proto->accept),
2465		   proto_method_implemented(proto->ioctl),
2466		   proto_method_implemented(proto->init),
2467		   proto_method_implemented(proto->destroy),
2468		   proto_method_implemented(proto->shutdown),
2469		   proto_method_implemented(proto->setsockopt),
2470		   proto_method_implemented(proto->getsockopt),
2471		   proto_method_implemented(proto->sendmsg),
2472		   proto_method_implemented(proto->recvmsg),
2473		   proto_method_implemented(proto->sendpage),
2474		   proto_method_implemented(proto->bind),
2475		   proto_method_implemented(proto->backlog_rcv),
2476		   proto_method_implemented(proto->hash),
2477		   proto_method_implemented(proto->unhash),
2478		   proto_method_implemented(proto->get_port),
2479		   proto_method_implemented(proto->enter_memory_pressure));
2480}
2481
2482static int proto_seq_show(struct seq_file *seq, void *v)
2483{
2484	if (v == &proto_list)
2485		seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2486			   "protocol",
2487			   "size",
2488			   "sockets",
2489			   "memory",
2490			   "press",
2491			   "maxhdr",
2492			   "slab",
2493			   "module",
2494			   "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2495	else
2496		proto_seq_printf(seq, list_entry(v, struct proto, node));
2497	return 0;
2498}
2499
2500static const struct seq_operations proto_seq_ops = {
2501	.start  = proto_seq_start,
2502	.next   = proto_seq_next,
2503	.stop   = proto_seq_stop,
2504	.show   = proto_seq_show,
2505};
2506
2507static int proto_seq_open(struct inode *inode, struct file *file)
2508{
2509	return seq_open_net(inode, file, &proto_seq_ops,
2510			    sizeof(struct seq_net_private));
2511}
2512
2513static const struct file_operations proto_seq_fops = {
2514	.owner		= THIS_MODULE,
2515	.open		= proto_seq_open,
2516	.read		= seq_read,
2517	.llseek		= seq_lseek,
2518	.release	= seq_release_net,
2519};
2520
2521static __net_init int proto_init_net(struct net *net)
2522{
2523	if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
2524		return -ENOMEM;
2525
2526	return 0;
2527}
2528
2529static __net_exit void proto_exit_net(struct net *net)
2530{
2531	proc_net_remove(net, "protocols");
2532}
2533
2534
2535static __net_initdata struct pernet_operations proto_net_ops = {
2536	.init = proto_init_net,
2537	.exit = proto_exit_net,
2538};
2539
2540static int __init proto_init(void)
2541{
2542	return register_pernet_subsys(&proto_net_ops);
2543}
2544
2545subsys_initcall(proto_init);
2546
2547#endif /* PROC_FS */
2548