af_unix.c revision faf02010290e202e275c1bf94ca9dd808bf85607
1/*
2 * NET4:	Implementation of BSD Unix domain sockets.
3 *
4 * Authors:	Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 *
6 *		This program is free software; you can redistribute it and/or
7 *		modify it under the terms of the GNU General Public License
8 *		as published by the Free Software Foundation; either version
9 *		2 of the License, or (at your option) any later version.
10 *
11 * Fixes:
12 *		Linus Torvalds	:	Assorted bug cures.
13 *		Niibe Yutaka	:	async I/O support.
14 *		Carsten Paeth	:	PF_UNIX check, address fixes.
15 *		Alan Cox	:	Limit size of allocated blocks.
16 *		Alan Cox	:	Fixed the stupid socketpair bug.
17 *		Alan Cox	:	BSD compatibility fine tuning.
18 *		Alan Cox	:	Fixed a bug in connect when interrupted.
19 *		Alan Cox	:	Sorted out a proper draft version of
20 *					file descriptor passing hacked up from
21 *					Mike Shaver's work.
22 *		Marty Leisner	:	Fixes to fd passing
23 *		Nick Nevin	:	recvmsg bugfix.
24 *		Alan Cox	:	Started proper garbage collector
25 *		Heiko EiBfeldt	:	Missing verify_area check
26 *		Alan Cox	:	Started POSIXisms
27 *		Andreas Schwab	:	Replace inode by dentry for proper
28 *					reference counting
29 *		Kirk Petersen	:	Made this a module
30 *	    Christoph Rohland	:	Elegant non-blocking accept/connect algorithm.
31 *					Lots of bug fixes.
32 *	     Alexey Kuznetosv	:	Repaired (I hope) bugs introduces
33 *					by above two patches.
34 *	     Andrea Arcangeli	:	If possible we block in connect(2)
35 *					if the max backlog of the listen socket
36 *					is been reached. This won't break
37 *					old apps and it will avoid huge amount
38 *					of socks hashed (this for unix_gc()
39 *					performances reasons).
40 *					Security fix that limits the max
41 *					number of socks to 2*max_files and
42 *					the number of skb queueable in the
43 *					dgram receiver.
44 *		Artur Skawina   :	Hash function optimizations
45 *	     Alexey Kuznetsov   :	Full scale SMP. Lot of bugs are introduced 8)
46 *	      Malcolm Beattie   :	Set peercred for socketpair
47 *	     Michal Ostrowski   :       Module initialization cleanup.
48 *	     Arnaldo C. Melo	:	Remove MOD_{INC,DEC}_USE_COUNT,
49 *	     				the core infrastructure is doing that
50 *	     				for all net proto families now (2.5.69+)
51 *
52 *
53 * Known differences from reference BSD that was tested:
54 *
55 *	[TO FIX]
56 *	ECONNREFUSED is not returned from one end of a connected() socket to the
57 *		other the moment one end closes.
58 *	fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 *		and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 *	[NOT TO FIX]
61 *	accept() returns a path name even if the connecting socket has closed
62 *		in the meantime (BSD loses the path and gives up).
63 *	accept() returns 0 length path for an unbound connector. BSD returns 16
64 *		and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 *	socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 *	BSD af_unix apparently has connect forgetting to block properly.
67 *		(need to check this with the POSIX spec in detail)
68 *
69 * Differences from 2.0.0-11-... (ANK)
70 *	Bug fixes and improvements.
71 *		- client shutdown killed server socket.
72 *		- removed all useless cli/sti pairs.
73 *
74 *	Semantic changes/extensions.
75 *		- generic control message passing.
76 *		- SCM_CREDENTIALS control message.
77 *		- "Abstract" (not FS based) socket bindings.
78 *		  Abstract names are sequences of bytes (not zero terminated)
79 *		  started by 0, so that this name space does not intersect
80 *		  with BSD names.
81 */
82
83#include <linux/module.h>
84#include <linux/kernel.h>
85#include <linux/signal.h>
86#include <linux/sched.h>
87#include <linux/errno.h>
88#include <linux/string.h>
89#include <linux/stat.h>
90#include <linux/dcache.h>
91#include <linux/namei.h>
92#include <linux/socket.h>
93#include <linux/un.h>
94#include <linux/fcntl.h>
95#include <linux/termios.h>
96#include <linux/sockios.h>
97#include <linux/net.h>
98#include <linux/in.h>
99#include <linux/fs.h>
100#include <linux/slab.h>
101#include <asm/uaccess.h>
102#include <linux/skbuff.h>
103#include <linux/netdevice.h>
104#include <net/net_namespace.h>
105#include <net/sock.h>
106#include <net/tcp_states.h>
107#include <net/af_unix.h>
108#include <linux/proc_fs.h>
109#include <linux/seq_file.h>
110#include <net/scm.h>
111#include <linux/init.h>
112#include <linux/poll.h>
113#include <linux/rtnetlink.h>
114#include <linux/mount.h>
115#include <net/checksum.h>
116#include <linux/security.h>
117
118struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
119EXPORT_SYMBOL_GPL(unix_socket_table);
120DEFINE_SPINLOCK(unix_table_lock);
121EXPORT_SYMBOL_GPL(unix_table_lock);
122static atomic_long_t unix_nr_socks;
123
124#define unix_sockets_unbound	(&unix_socket_table[UNIX_HASH_SIZE])
125
126#define UNIX_ABSTRACT(sk)	(unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
127
128#ifdef CONFIG_SECURITY_NETWORK
129static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
130{
131	memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
132}
133
134static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
135{
136	scm->secid = *UNIXSID(skb);
137}
138#else
139static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
140{ }
141
142static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
143{ }
144#endif /* CONFIG_SECURITY_NETWORK */
145
146/*
147 *  SMP locking strategy:
148 *    hash table is protected with spinlock unix_table_lock
149 *    each socket state is protected by separate spin lock.
150 */
151
152static inline unsigned int unix_hash_fold(__wsum n)
153{
154	unsigned int hash = (__force unsigned int)n;
155
156	hash ^= hash>>16;
157	hash ^= hash>>8;
158	return hash&(UNIX_HASH_SIZE-1);
159}
160
161#define unix_peer(sk) (unix_sk(sk)->peer)
162
163static inline int unix_our_peer(struct sock *sk, struct sock *osk)
164{
165	return unix_peer(osk) == sk;
166}
167
168static inline int unix_may_send(struct sock *sk, struct sock *osk)
169{
170	return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
171}
172
173static inline int unix_recvq_full(struct sock const *sk)
174{
175	return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
176}
177
178struct sock *unix_peer_get(struct sock *s)
179{
180	struct sock *peer;
181
182	unix_state_lock(s);
183	peer = unix_peer(s);
184	if (peer)
185		sock_hold(peer);
186	unix_state_unlock(s);
187	return peer;
188}
189EXPORT_SYMBOL_GPL(unix_peer_get);
190
191static inline void unix_release_addr(struct unix_address *addr)
192{
193	if (atomic_dec_and_test(&addr->refcnt))
194		kfree(addr);
195}
196
197/*
198 *	Check unix socket name:
199 *		- should be not zero length.
200 *	        - if started by not zero, should be NULL terminated (FS object)
201 *		- if started by zero, it is abstract name.
202 */
203
204static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
205{
206	if (len <= sizeof(short) || len > sizeof(*sunaddr))
207		return -EINVAL;
208	if (!sunaddr || sunaddr->sun_family != AF_UNIX)
209		return -EINVAL;
210	if (sunaddr->sun_path[0]) {
211		/*
212		 * This may look like an off by one error but it is a bit more
213		 * subtle. 108 is the longest valid AF_UNIX path for a binding.
214		 * sun_path[108] doesn't as such exist.  However in kernel space
215		 * we are guaranteed that it is a valid memory location in our
216		 * kernel address buffer.
217		 */
218		((char *)sunaddr)[len] = 0;
219		len = strlen(sunaddr->sun_path)+1+sizeof(short);
220		return len;
221	}
222
223	*hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
224	return len;
225}
226
227static void __unix_remove_socket(struct sock *sk)
228{
229	sk_del_node_init(sk);
230}
231
232static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
233{
234	WARN_ON(!sk_unhashed(sk));
235	sk_add_node(sk, list);
236}
237
238static inline void unix_remove_socket(struct sock *sk)
239{
240	spin_lock(&unix_table_lock);
241	__unix_remove_socket(sk);
242	spin_unlock(&unix_table_lock);
243}
244
245static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
246{
247	spin_lock(&unix_table_lock);
248	__unix_insert_socket(list, sk);
249	spin_unlock(&unix_table_lock);
250}
251
252static struct sock *__unix_find_socket_byname(struct net *net,
253					      struct sockaddr_un *sunname,
254					      int len, int type, unsigned int hash)
255{
256	struct sock *s;
257	struct hlist_node *node;
258
259	sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
260		struct unix_sock *u = unix_sk(s);
261
262		if (!net_eq(sock_net(s), net))
263			continue;
264
265		if (u->addr->len == len &&
266		    !memcmp(u->addr->name, sunname, len))
267			goto found;
268	}
269	s = NULL;
270found:
271	return s;
272}
273
274static inline struct sock *unix_find_socket_byname(struct net *net,
275						   struct sockaddr_un *sunname,
276						   int len, int type,
277						   unsigned int hash)
278{
279	struct sock *s;
280
281	spin_lock(&unix_table_lock);
282	s = __unix_find_socket_byname(net, sunname, len, type, hash);
283	if (s)
284		sock_hold(s);
285	spin_unlock(&unix_table_lock);
286	return s;
287}
288
289static struct sock *unix_find_socket_byinode(struct inode *i)
290{
291	struct sock *s;
292	struct hlist_node *node;
293
294	spin_lock(&unix_table_lock);
295	sk_for_each(s, node,
296		    &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
297		struct dentry *dentry = unix_sk(s)->path.dentry;
298
299		if (dentry && dentry->d_inode == i) {
300			sock_hold(s);
301			goto found;
302		}
303	}
304	s = NULL;
305found:
306	spin_unlock(&unix_table_lock);
307	return s;
308}
309
310static inline int unix_writable(struct sock *sk)
311{
312	return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
313}
314
315static void unix_write_space(struct sock *sk)
316{
317	struct socket_wq *wq;
318
319	rcu_read_lock();
320	if (unix_writable(sk)) {
321		wq = rcu_dereference(sk->sk_wq);
322		if (wq_has_sleeper(wq))
323			wake_up_interruptible_sync_poll(&wq->wait,
324				POLLOUT | POLLWRNORM | POLLWRBAND);
325		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
326	}
327	rcu_read_unlock();
328}
329
330/* When dgram socket disconnects (or changes its peer), we clear its receive
331 * queue of packets arrived from previous peer. First, it allows to do
332 * flow control based only on wmem_alloc; second, sk connected to peer
333 * may receive messages only from that peer. */
334static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
335{
336	if (!skb_queue_empty(&sk->sk_receive_queue)) {
337		skb_queue_purge(&sk->sk_receive_queue);
338		wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
339
340		/* If one link of bidirectional dgram pipe is disconnected,
341		 * we signal error. Messages are lost. Do not make this,
342		 * when peer was not connected to us.
343		 */
344		if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
345			other->sk_err = ECONNRESET;
346			other->sk_error_report(other);
347		}
348	}
349}
350
351static void unix_sock_destructor(struct sock *sk)
352{
353	struct unix_sock *u = unix_sk(sk);
354
355	skb_queue_purge(&sk->sk_receive_queue);
356
357	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
358	WARN_ON(!sk_unhashed(sk));
359	WARN_ON(sk->sk_socket);
360	if (!sock_flag(sk, SOCK_DEAD)) {
361		printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
362		return;
363	}
364
365	if (u->addr)
366		unix_release_addr(u->addr);
367
368	atomic_long_dec(&unix_nr_socks);
369	local_bh_disable();
370	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
371	local_bh_enable();
372#ifdef UNIX_REFCNT_DEBUG
373	printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
374		atomic_long_read(&unix_nr_socks));
375#endif
376}
377
378static int unix_release_sock(struct sock *sk, int embrion)
379{
380	struct unix_sock *u = unix_sk(sk);
381	struct path path;
382	struct sock *skpair;
383	struct sk_buff *skb;
384	int state;
385
386	unix_remove_socket(sk);
387
388	/* Clear state */
389	unix_state_lock(sk);
390	sock_orphan(sk);
391	sk->sk_shutdown = SHUTDOWN_MASK;
392	path	     = u->path;
393	u->path.dentry = NULL;
394	u->path.mnt = NULL;
395	state = sk->sk_state;
396	sk->sk_state = TCP_CLOSE;
397	unix_state_unlock(sk);
398
399	wake_up_interruptible_all(&u->peer_wait);
400
401	skpair = unix_peer(sk);
402
403	if (skpair != NULL) {
404		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
405			unix_state_lock(skpair);
406			/* No more writes */
407			skpair->sk_shutdown = SHUTDOWN_MASK;
408			if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
409				skpair->sk_err = ECONNRESET;
410			unix_state_unlock(skpair);
411			skpair->sk_state_change(skpair);
412			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
413		}
414		sock_put(skpair); /* It may now die */
415		unix_peer(sk) = NULL;
416	}
417
418	/* Try to flush out this socket. Throw out buffers at least */
419
420	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
421		if (state == TCP_LISTEN)
422			unix_release_sock(skb->sk, 1);
423		/* passed fds are erased in the kfree_skb hook	      */
424		kfree_skb(skb);
425	}
426
427	if (path.dentry)
428		path_put(&path);
429
430	sock_put(sk);
431
432	/* ---- Socket is dead now and most probably destroyed ---- */
433
434	/*
435	 * Fixme: BSD difference: In BSD all sockets connected to use get
436	 *	  ECONNRESET and we die on the spot. In Linux we behave
437	 *	  like files and pipes do and wait for the last
438	 *	  dereference.
439	 *
440	 * Can't we simply set sock->err?
441	 *
442	 *	  What the above comment does talk about? --ANK(980817)
443	 */
444
445	if (unix_tot_inflight)
446		unix_gc();		/* Garbage collect fds */
447
448	return 0;
449}
450
451static void init_peercred(struct sock *sk)
452{
453	put_pid(sk->sk_peer_pid);
454	if (sk->sk_peer_cred)
455		put_cred(sk->sk_peer_cred);
456	sk->sk_peer_pid  = get_pid(task_tgid(current));
457	sk->sk_peer_cred = get_current_cred();
458}
459
460static void copy_peercred(struct sock *sk, struct sock *peersk)
461{
462	put_pid(sk->sk_peer_pid);
463	if (sk->sk_peer_cred)
464		put_cred(sk->sk_peer_cred);
465	sk->sk_peer_pid  = get_pid(peersk->sk_peer_pid);
466	sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
467}
468
469static int unix_listen(struct socket *sock, int backlog)
470{
471	int err;
472	struct sock *sk = sock->sk;
473	struct unix_sock *u = unix_sk(sk);
474	struct pid *old_pid = NULL;
475	const struct cred *old_cred = NULL;
476
477	err = -EOPNOTSUPP;
478	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
479		goto out;	/* Only stream/seqpacket sockets accept */
480	err = -EINVAL;
481	if (!u->addr)
482		goto out;	/* No listens on an unbound socket */
483	unix_state_lock(sk);
484	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
485		goto out_unlock;
486	if (backlog > sk->sk_max_ack_backlog)
487		wake_up_interruptible_all(&u->peer_wait);
488	sk->sk_max_ack_backlog	= backlog;
489	sk->sk_state		= TCP_LISTEN;
490	/* set credentials so connect can copy them */
491	init_peercred(sk);
492	err = 0;
493
494out_unlock:
495	unix_state_unlock(sk);
496	put_pid(old_pid);
497	if (old_cred)
498		put_cred(old_cred);
499out:
500	return err;
501}
502
503static int unix_release(struct socket *);
504static int unix_bind(struct socket *, struct sockaddr *, int);
505static int unix_stream_connect(struct socket *, struct sockaddr *,
506			       int addr_len, int flags);
507static int unix_socketpair(struct socket *, struct socket *);
508static int unix_accept(struct socket *, struct socket *, int);
509static int unix_getname(struct socket *, struct sockaddr *, int *, int);
510static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
511static unsigned int unix_dgram_poll(struct file *, struct socket *,
512				    poll_table *);
513static int unix_ioctl(struct socket *, unsigned int, unsigned long);
514static int unix_shutdown(struct socket *, int);
515static int unix_stream_sendmsg(struct kiocb *, struct socket *,
516			       struct msghdr *, size_t);
517static int unix_stream_recvmsg(struct kiocb *, struct socket *,
518			       struct msghdr *, size_t, int);
519static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
520			      struct msghdr *, size_t);
521static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
522			      struct msghdr *, size_t, int);
523static int unix_dgram_connect(struct socket *, struct sockaddr *,
524			      int, int);
525static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
526				  struct msghdr *, size_t);
527static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
528				  struct msghdr *, size_t, int);
529
530static void unix_set_peek_off(struct sock *sk, int val)
531{
532	struct unix_sock *u = unix_sk(sk);
533
534	mutex_lock(&u->readlock);
535	sk->sk_peek_off = val;
536	mutex_unlock(&u->readlock);
537}
538
539
540static const struct proto_ops unix_stream_ops = {
541	.family =	PF_UNIX,
542	.owner =	THIS_MODULE,
543	.release =	unix_release,
544	.bind =		unix_bind,
545	.connect =	unix_stream_connect,
546	.socketpair =	unix_socketpair,
547	.accept =	unix_accept,
548	.getname =	unix_getname,
549	.poll =		unix_poll,
550	.ioctl =	unix_ioctl,
551	.listen =	unix_listen,
552	.shutdown =	unix_shutdown,
553	.setsockopt =	sock_no_setsockopt,
554	.getsockopt =	sock_no_getsockopt,
555	.sendmsg =	unix_stream_sendmsg,
556	.recvmsg =	unix_stream_recvmsg,
557	.mmap =		sock_no_mmap,
558	.sendpage =	sock_no_sendpage,
559	.set_peek_off =	unix_set_peek_off,
560};
561
562static const struct proto_ops unix_dgram_ops = {
563	.family =	PF_UNIX,
564	.owner =	THIS_MODULE,
565	.release =	unix_release,
566	.bind =		unix_bind,
567	.connect =	unix_dgram_connect,
568	.socketpair =	unix_socketpair,
569	.accept =	sock_no_accept,
570	.getname =	unix_getname,
571	.poll =		unix_dgram_poll,
572	.ioctl =	unix_ioctl,
573	.listen =	sock_no_listen,
574	.shutdown =	unix_shutdown,
575	.setsockopt =	sock_no_setsockopt,
576	.getsockopt =	sock_no_getsockopt,
577	.sendmsg =	unix_dgram_sendmsg,
578	.recvmsg =	unix_dgram_recvmsg,
579	.mmap =		sock_no_mmap,
580	.sendpage =	sock_no_sendpage,
581	.set_peek_off =	unix_set_peek_off,
582};
583
584static const struct proto_ops unix_seqpacket_ops = {
585	.family =	PF_UNIX,
586	.owner =	THIS_MODULE,
587	.release =	unix_release,
588	.bind =		unix_bind,
589	.connect =	unix_stream_connect,
590	.socketpair =	unix_socketpair,
591	.accept =	unix_accept,
592	.getname =	unix_getname,
593	.poll =		unix_dgram_poll,
594	.ioctl =	unix_ioctl,
595	.listen =	unix_listen,
596	.shutdown =	unix_shutdown,
597	.setsockopt =	sock_no_setsockopt,
598	.getsockopt =	sock_no_getsockopt,
599	.sendmsg =	unix_seqpacket_sendmsg,
600	.recvmsg =	unix_seqpacket_recvmsg,
601	.mmap =		sock_no_mmap,
602	.sendpage =	sock_no_sendpage,
603	.set_peek_off =	unix_set_peek_off,
604};
605
606static struct proto unix_proto = {
607	.name			= "UNIX",
608	.owner			= THIS_MODULE,
609	.obj_size		= sizeof(struct unix_sock),
610};
611
612/*
613 * AF_UNIX sockets do not interact with hardware, hence they
614 * dont trigger interrupts - so it's safe for them to have
615 * bh-unsafe locking for their sk_receive_queue.lock. Split off
616 * this special lock-class by reinitializing the spinlock key:
617 */
618static struct lock_class_key af_unix_sk_receive_queue_lock_key;
619
620static struct sock *unix_create1(struct net *net, struct socket *sock)
621{
622	struct sock *sk = NULL;
623	struct unix_sock *u;
624
625	atomic_long_inc(&unix_nr_socks);
626	if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
627		goto out;
628
629	sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
630	if (!sk)
631		goto out;
632
633	sock_init_data(sock, sk);
634	lockdep_set_class(&sk->sk_receive_queue.lock,
635				&af_unix_sk_receive_queue_lock_key);
636
637	sk->sk_write_space	= unix_write_space;
638	sk->sk_max_ack_backlog	= net->unx.sysctl_max_dgram_qlen;
639	sk->sk_destruct		= unix_sock_destructor;
640	u	  = unix_sk(sk);
641	u->path.dentry = NULL;
642	u->path.mnt = NULL;
643	spin_lock_init(&u->lock);
644	atomic_long_set(&u->inflight, 0);
645	INIT_LIST_HEAD(&u->link);
646	mutex_init(&u->readlock); /* single task reading lock */
647	init_waitqueue_head(&u->peer_wait);
648	unix_insert_socket(unix_sockets_unbound, sk);
649out:
650	if (sk == NULL)
651		atomic_long_dec(&unix_nr_socks);
652	else {
653		local_bh_disable();
654		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
655		local_bh_enable();
656	}
657	return sk;
658}
659
660static int unix_create(struct net *net, struct socket *sock, int protocol,
661		       int kern)
662{
663	if (protocol && protocol != PF_UNIX)
664		return -EPROTONOSUPPORT;
665
666	sock->state = SS_UNCONNECTED;
667
668	switch (sock->type) {
669	case SOCK_STREAM:
670		sock->ops = &unix_stream_ops;
671		break;
672		/*
673		 *	Believe it or not BSD has AF_UNIX, SOCK_RAW though
674		 *	nothing uses it.
675		 */
676	case SOCK_RAW:
677		sock->type = SOCK_DGRAM;
678	case SOCK_DGRAM:
679		sock->ops = &unix_dgram_ops;
680		break;
681	case SOCK_SEQPACKET:
682		sock->ops = &unix_seqpacket_ops;
683		break;
684	default:
685		return -ESOCKTNOSUPPORT;
686	}
687
688	return unix_create1(net, sock) ? 0 : -ENOMEM;
689}
690
691static int unix_release(struct socket *sock)
692{
693	struct sock *sk = sock->sk;
694
695	if (!sk)
696		return 0;
697
698	sock->sk = NULL;
699
700	return unix_release_sock(sk, 0);
701}
702
703static int unix_autobind(struct socket *sock)
704{
705	struct sock *sk = sock->sk;
706	struct net *net = sock_net(sk);
707	struct unix_sock *u = unix_sk(sk);
708	static u32 ordernum = 1;
709	struct unix_address *addr;
710	int err;
711	unsigned int retries = 0;
712
713	mutex_lock(&u->readlock);
714
715	err = 0;
716	if (u->addr)
717		goto out;
718
719	err = -ENOMEM;
720	addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
721	if (!addr)
722		goto out;
723
724	addr->name->sun_family = AF_UNIX;
725	atomic_set(&addr->refcnt, 1);
726
727retry:
728	addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
729	addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
730
731	spin_lock(&unix_table_lock);
732	ordernum = (ordernum+1)&0xFFFFF;
733
734	if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
735				      addr->hash)) {
736		spin_unlock(&unix_table_lock);
737		/*
738		 * __unix_find_socket_byname() may take long time if many names
739		 * are already in use.
740		 */
741		cond_resched();
742		/* Give up if all names seems to be in use. */
743		if (retries++ == 0xFFFFF) {
744			err = -ENOSPC;
745			kfree(addr);
746			goto out;
747		}
748		goto retry;
749	}
750	addr->hash ^= sk->sk_type;
751
752	__unix_remove_socket(sk);
753	u->addr = addr;
754	__unix_insert_socket(&unix_socket_table[addr->hash], sk);
755	spin_unlock(&unix_table_lock);
756	err = 0;
757
758out:	mutex_unlock(&u->readlock);
759	return err;
760}
761
762static struct sock *unix_find_other(struct net *net,
763				    struct sockaddr_un *sunname, int len,
764				    int type, unsigned int hash, int *error)
765{
766	struct sock *u;
767	struct path path;
768	int err = 0;
769
770	if (sunname->sun_path[0]) {
771		struct inode *inode;
772		err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
773		if (err)
774			goto fail;
775		inode = path.dentry->d_inode;
776		err = inode_permission(inode, MAY_WRITE);
777		if (err)
778			goto put_fail;
779
780		err = -ECONNREFUSED;
781		if (!S_ISSOCK(inode->i_mode))
782			goto put_fail;
783		u = unix_find_socket_byinode(inode);
784		if (!u)
785			goto put_fail;
786
787		if (u->sk_type == type)
788			touch_atime(&path);
789
790		path_put(&path);
791
792		err = -EPROTOTYPE;
793		if (u->sk_type != type) {
794			sock_put(u);
795			goto fail;
796		}
797	} else {
798		err = -ECONNREFUSED;
799		u = unix_find_socket_byname(net, sunname, len, type, hash);
800		if (u) {
801			struct dentry *dentry;
802			dentry = unix_sk(u)->path.dentry;
803			if (dentry)
804				touch_atime(&unix_sk(u)->path);
805		} else
806			goto fail;
807	}
808	return u;
809
810put_fail:
811	path_put(&path);
812fail:
813	*error = err;
814	return NULL;
815}
816
817static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
818{
819	struct dentry *dentry;
820	struct path path;
821	int err = 0;
822	/*
823	 * Get the parent directory, calculate the hash for last
824	 * component.
825	 */
826	dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
827	err = PTR_ERR(dentry);
828	if (IS_ERR(dentry))
829		return err;
830
831	/*
832	 * All right, let's create it.
833	 */
834	err = security_path_mknod(&path, dentry, mode, 0);
835	if (!err) {
836		err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
837		if (!err) {
838			res->mnt = mntget(path.mnt);
839			res->dentry = dget(dentry);
840		}
841	}
842	done_path_create(&path, dentry);
843	return err;
844}
845
846static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
847{
848	struct sock *sk = sock->sk;
849	struct net *net = sock_net(sk);
850	struct unix_sock *u = unix_sk(sk);
851	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
852	char *sun_path = sunaddr->sun_path;
853	int err;
854	unsigned int hash;
855	struct unix_address *addr;
856	struct hlist_head *list;
857
858	err = -EINVAL;
859	if (sunaddr->sun_family != AF_UNIX)
860		goto out;
861
862	if (addr_len == sizeof(short)) {
863		err = unix_autobind(sock);
864		goto out;
865	}
866
867	err = unix_mkname(sunaddr, addr_len, &hash);
868	if (err < 0)
869		goto out;
870	addr_len = err;
871
872	mutex_lock(&u->readlock);
873
874	err = -EINVAL;
875	if (u->addr)
876		goto out_up;
877
878	err = -ENOMEM;
879	addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
880	if (!addr)
881		goto out_up;
882
883	memcpy(addr->name, sunaddr, addr_len);
884	addr->len = addr_len;
885	addr->hash = hash ^ sk->sk_type;
886	atomic_set(&addr->refcnt, 1);
887
888	if (sun_path[0]) {
889		struct path path;
890		umode_t mode = S_IFSOCK |
891		       (SOCK_INODE(sock)->i_mode & ~current_umask());
892		err = unix_mknod(sun_path, mode, &path);
893		if (err) {
894			if (err == -EEXIST)
895				err = -EADDRINUSE;
896			unix_release_addr(addr);
897			goto out_up;
898		}
899		addr->hash = UNIX_HASH_SIZE;
900		hash = path.dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1);
901		spin_lock(&unix_table_lock);
902		u->path = path;
903		list = &unix_socket_table[hash];
904	} else {
905		spin_lock(&unix_table_lock);
906		err = -EADDRINUSE;
907		if (__unix_find_socket_byname(net, sunaddr, addr_len,
908					      sk->sk_type, hash)) {
909			unix_release_addr(addr);
910			goto out_unlock;
911		}
912
913		list = &unix_socket_table[addr->hash];
914	}
915
916	err = 0;
917	__unix_remove_socket(sk);
918	u->addr = addr;
919	__unix_insert_socket(list, sk);
920
921out_unlock:
922	spin_unlock(&unix_table_lock);
923out_up:
924	mutex_unlock(&u->readlock);
925out:
926	return err;
927}
928
929static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
930{
931	if (unlikely(sk1 == sk2) || !sk2) {
932		unix_state_lock(sk1);
933		return;
934	}
935	if (sk1 < sk2) {
936		unix_state_lock(sk1);
937		unix_state_lock_nested(sk2);
938	} else {
939		unix_state_lock(sk2);
940		unix_state_lock_nested(sk1);
941	}
942}
943
944static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
945{
946	if (unlikely(sk1 == sk2) || !sk2) {
947		unix_state_unlock(sk1);
948		return;
949	}
950	unix_state_unlock(sk1);
951	unix_state_unlock(sk2);
952}
953
954static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
955			      int alen, int flags)
956{
957	struct sock *sk = sock->sk;
958	struct net *net = sock_net(sk);
959	struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
960	struct sock *other;
961	unsigned int hash;
962	int err;
963
964	if (addr->sa_family != AF_UNSPEC) {
965		err = unix_mkname(sunaddr, alen, &hash);
966		if (err < 0)
967			goto out;
968		alen = err;
969
970		if (test_bit(SOCK_PASSCRED, &sock->flags) &&
971		    !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
972			goto out;
973
974restart:
975		other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
976		if (!other)
977			goto out;
978
979		unix_state_double_lock(sk, other);
980
981		/* Apparently VFS overslept socket death. Retry. */
982		if (sock_flag(other, SOCK_DEAD)) {
983			unix_state_double_unlock(sk, other);
984			sock_put(other);
985			goto restart;
986		}
987
988		err = -EPERM;
989		if (!unix_may_send(sk, other))
990			goto out_unlock;
991
992		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
993		if (err)
994			goto out_unlock;
995
996	} else {
997		/*
998		 *	1003.1g breaking connected state with AF_UNSPEC
999		 */
1000		other = NULL;
1001		unix_state_double_lock(sk, other);
1002	}
1003
1004	/*
1005	 * If it was connected, reconnect.
1006	 */
1007	if (unix_peer(sk)) {
1008		struct sock *old_peer = unix_peer(sk);
1009		unix_peer(sk) = other;
1010		unix_state_double_unlock(sk, other);
1011
1012		if (other != old_peer)
1013			unix_dgram_disconnected(sk, old_peer);
1014		sock_put(old_peer);
1015	} else {
1016		unix_peer(sk) = other;
1017		unix_state_double_unlock(sk, other);
1018	}
1019	return 0;
1020
1021out_unlock:
1022	unix_state_double_unlock(sk, other);
1023	sock_put(other);
1024out:
1025	return err;
1026}
1027
1028static long unix_wait_for_peer(struct sock *other, long timeo)
1029{
1030	struct unix_sock *u = unix_sk(other);
1031	int sched;
1032	DEFINE_WAIT(wait);
1033
1034	prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1035
1036	sched = !sock_flag(other, SOCK_DEAD) &&
1037		!(other->sk_shutdown & RCV_SHUTDOWN) &&
1038		unix_recvq_full(other);
1039
1040	unix_state_unlock(other);
1041
1042	if (sched)
1043		timeo = schedule_timeout(timeo);
1044
1045	finish_wait(&u->peer_wait, &wait);
1046	return timeo;
1047}
1048
1049static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1050			       int addr_len, int flags)
1051{
1052	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1053	struct sock *sk = sock->sk;
1054	struct net *net = sock_net(sk);
1055	struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1056	struct sock *newsk = NULL;
1057	struct sock *other = NULL;
1058	struct sk_buff *skb = NULL;
1059	unsigned int hash;
1060	int st;
1061	int err;
1062	long timeo;
1063
1064	err = unix_mkname(sunaddr, addr_len, &hash);
1065	if (err < 0)
1066		goto out;
1067	addr_len = err;
1068
1069	if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1070	    (err = unix_autobind(sock)) != 0)
1071		goto out;
1072
1073	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1074
1075	/* First of all allocate resources.
1076	   If we will make it after state is locked,
1077	   we will have to recheck all again in any case.
1078	 */
1079
1080	err = -ENOMEM;
1081
1082	/* create new sock for complete connection */
1083	newsk = unix_create1(sock_net(sk), NULL);
1084	if (newsk == NULL)
1085		goto out;
1086
1087	/* Allocate skb for sending to listening sock */
1088	skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1089	if (skb == NULL)
1090		goto out;
1091
1092restart:
1093	/*  Find listening sock. */
1094	other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1095	if (!other)
1096		goto out;
1097
1098	/* Latch state of peer */
1099	unix_state_lock(other);
1100
1101	/* Apparently VFS overslept socket death. Retry. */
1102	if (sock_flag(other, SOCK_DEAD)) {
1103		unix_state_unlock(other);
1104		sock_put(other);
1105		goto restart;
1106	}
1107
1108	err = -ECONNREFUSED;
1109	if (other->sk_state != TCP_LISTEN)
1110		goto out_unlock;
1111	if (other->sk_shutdown & RCV_SHUTDOWN)
1112		goto out_unlock;
1113
1114	if (unix_recvq_full(other)) {
1115		err = -EAGAIN;
1116		if (!timeo)
1117			goto out_unlock;
1118
1119		timeo = unix_wait_for_peer(other, timeo);
1120
1121		err = sock_intr_errno(timeo);
1122		if (signal_pending(current))
1123			goto out;
1124		sock_put(other);
1125		goto restart;
1126	}
1127
1128	/* Latch our state.
1129
1130	   It is tricky place. We need to grab our state lock and cannot
1131	   drop lock on peer. It is dangerous because deadlock is
1132	   possible. Connect to self case and simultaneous
1133	   attempt to connect are eliminated by checking socket
1134	   state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1135	   check this before attempt to grab lock.
1136
1137	   Well, and we have to recheck the state after socket locked.
1138	 */
1139	st = sk->sk_state;
1140
1141	switch (st) {
1142	case TCP_CLOSE:
1143		/* This is ok... continue with connect */
1144		break;
1145	case TCP_ESTABLISHED:
1146		/* Socket is already connected */
1147		err = -EISCONN;
1148		goto out_unlock;
1149	default:
1150		err = -EINVAL;
1151		goto out_unlock;
1152	}
1153
1154	unix_state_lock_nested(sk);
1155
1156	if (sk->sk_state != st) {
1157		unix_state_unlock(sk);
1158		unix_state_unlock(other);
1159		sock_put(other);
1160		goto restart;
1161	}
1162
1163	err = security_unix_stream_connect(sk, other, newsk);
1164	if (err) {
1165		unix_state_unlock(sk);
1166		goto out_unlock;
1167	}
1168
1169	/* The way is open! Fastly set all the necessary fields... */
1170
1171	sock_hold(sk);
1172	unix_peer(newsk)	= sk;
1173	newsk->sk_state		= TCP_ESTABLISHED;
1174	newsk->sk_type		= sk->sk_type;
1175	init_peercred(newsk);
1176	newu = unix_sk(newsk);
1177	RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1178	otheru = unix_sk(other);
1179
1180	/* copy address information from listening to new sock*/
1181	if (otheru->addr) {
1182		atomic_inc(&otheru->addr->refcnt);
1183		newu->addr = otheru->addr;
1184	}
1185	if (otheru->path.dentry) {
1186		path_get(&otheru->path);
1187		newu->path = otheru->path;
1188	}
1189
1190	/* Set credentials */
1191	copy_peercred(sk, other);
1192
1193	sock->state	= SS_CONNECTED;
1194	sk->sk_state	= TCP_ESTABLISHED;
1195	sock_hold(newsk);
1196
1197	smp_mb__after_atomic_inc();	/* sock_hold() does an atomic_inc() */
1198	unix_peer(sk)	= newsk;
1199
1200	unix_state_unlock(sk);
1201
1202	/* take ten and and send info to listening sock */
1203	spin_lock(&other->sk_receive_queue.lock);
1204	__skb_queue_tail(&other->sk_receive_queue, skb);
1205	spin_unlock(&other->sk_receive_queue.lock);
1206	unix_state_unlock(other);
1207	other->sk_data_ready(other, 0);
1208	sock_put(other);
1209	return 0;
1210
1211out_unlock:
1212	if (other)
1213		unix_state_unlock(other);
1214
1215out:
1216	kfree_skb(skb);
1217	if (newsk)
1218		unix_release_sock(newsk, 0);
1219	if (other)
1220		sock_put(other);
1221	return err;
1222}
1223
1224static int unix_socketpair(struct socket *socka, struct socket *sockb)
1225{
1226	struct sock *ska = socka->sk, *skb = sockb->sk;
1227
1228	/* Join our sockets back to back */
1229	sock_hold(ska);
1230	sock_hold(skb);
1231	unix_peer(ska) = skb;
1232	unix_peer(skb) = ska;
1233	init_peercred(ska);
1234	init_peercred(skb);
1235
1236	if (ska->sk_type != SOCK_DGRAM) {
1237		ska->sk_state = TCP_ESTABLISHED;
1238		skb->sk_state = TCP_ESTABLISHED;
1239		socka->state  = SS_CONNECTED;
1240		sockb->state  = SS_CONNECTED;
1241	}
1242	return 0;
1243}
1244
1245static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1246{
1247	struct sock *sk = sock->sk;
1248	struct sock *tsk;
1249	struct sk_buff *skb;
1250	int err;
1251
1252	err = -EOPNOTSUPP;
1253	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1254		goto out;
1255
1256	err = -EINVAL;
1257	if (sk->sk_state != TCP_LISTEN)
1258		goto out;
1259
1260	/* If socket state is TCP_LISTEN it cannot change (for now...),
1261	 * so that no locks are necessary.
1262	 */
1263
1264	skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1265	if (!skb) {
1266		/* This means receive shutdown. */
1267		if (err == 0)
1268			err = -EINVAL;
1269		goto out;
1270	}
1271
1272	tsk = skb->sk;
1273	skb_free_datagram(sk, skb);
1274	wake_up_interruptible(&unix_sk(sk)->peer_wait);
1275
1276	/* attach accepted sock to socket */
1277	unix_state_lock(tsk);
1278	newsock->state = SS_CONNECTED;
1279	sock_graft(tsk, newsock);
1280	unix_state_unlock(tsk);
1281	return 0;
1282
1283out:
1284	return err;
1285}
1286
1287
1288static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1289{
1290	struct sock *sk = sock->sk;
1291	struct unix_sock *u;
1292	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1293	int err = 0;
1294
1295	if (peer) {
1296		sk = unix_peer_get(sk);
1297
1298		err = -ENOTCONN;
1299		if (!sk)
1300			goto out;
1301		err = 0;
1302	} else {
1303		sock_hold(sk);
1304	}
1305
1306	u = unix_sk(sk);
1307	unix_state_lock(sk);
1308	if (!u->addr) {
1309		sunaddr->sun_family = AF_UNIX;
1310		sunaddr->sun_path[0] = 0;
1311		*uaddr_len = sizeof(short);
1312	} else {
1313		struct unix_address *addr = u->addr;
1314
1315		*uaddr_len = addr->len;
1316		memcpy(sunaddr, addr->name, *uaddr_len);
1317	}
1318	unix_state_unlock(sk);
1319	sock_put(sk);
1320out:
1321	return err;
1322}
1323
1324static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1325{
1326	int i;
1327
1328	scm->fp = UNIXCB(skb).fp;
1329	UNIXCB(skb).fp = NULL;
1330
1331	for (i = scm->fp->count-1; i >= 0; i--)
1332		unix_notinflight(scm->fp->fp[i]);
1333}
1334
1335static void unix_destruct_scm(struct sk_buff *skb)
1336{
1337	struct scm_cookie scm;
1338	memset(&scm, 0, sizeof(scm));
1339	scm.pid  = UNIXCB(skb).pid;
1340	scm.cred = UNIXCB(skb).cred;
1341	if (UNIXCB(skb).fp)
1342		unix_detach_fds(&scm, skb);
1343
1344	/* Alas, it calls VFS */
1345	/* So fscking what? fput() had been SMP-safe since the last Summer */
1346	scm_destroy(&scm);
1347	sock_wfree(skb);
1348}
1349
1350#define MAX_RECURSION_LEVEL 4
1351
1352static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1353{
1354	int i;
1355	unsigned char max_level = 0;
1356	int unix_sock_count = 0;
1357
1358	for (i = scm->fp->count - 1; i >= 0; i--) {
1359		struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1360
1361		if (sk) {
1362			unix_sock_count++;
1363			max_level = max(max_level,
1364					unix_sk(sk)->recursion_level);
1365		}
1366	}
1367	if (unlikely(max_level > MAX_RECURSION_LEVEL))
1368		return -ETOOMANYREFS;
1369
1370	/*
1371	 * Need to duplicate file references for the sake of garbage
1372	 * collection.  Otherwise a socket in the fps might become a
1373	 * candidate for GC while the skb is not yet queued.
1374	 */
1375	UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1376	if (!UNIXCB(skb).fp)
1377		return -ENOMEM;
1378
1379	if (unix_sock_count) {
1380		for (i = scm->fp->count - 1; i >= 0; i--)
1381			unix_inflight(scm->fp->fp[i]);
1382	}
1383	return max_level;
1384}
1385
1386static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1387{
1388	int err = 0;
1389
1390	UNIXCB(skb).pid  = get_pid(scm->pid);
1391	if (scm->cred)
1392		UNIXCB(skb).cred = get_cred(scm->cred);
1393	UNIXCB(skb).fp = NULL;
1394	if (scm->fp && send_fds)
1395		err = unix_attach_fds(scm, skb);
1396
1397	skb->destructor = unix_destruct_scm;
1398	return err;
1399}
1400
1401/*
1402 * Some apps rely on write() giving SCM_CREDENTIALS
1403 * We include credentials if source or destination socket
1404 * asserted SOCK_PASSCRED.
1405 */
1406static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1407			    const struct sock *other)
1408{
1409	if (UNIXCB(skb).cred)
1410		return;
1411	if (test_bit(SOCK_PASSCRED, &sock->flags) ||
1412	    !other->sk_socket ||
1413	    test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
1414		UNIXCB(skb).pid  = get_pid(task_tgid(current));
1415		UNIXCB(skb).cred = get_current_cred();
1416	}
1417}
1418
1419/*
1420 *	Send AF_UNIX data.
1421 */
1422
1423static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1424			      struct msghdr *msg, size_t len)
1425{
1426	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1427	struct sock *sk = sock->sk;
1428	struct net *net = sock_net(sk);
1429	struct unix_sock *u = unix_sk(sk);
1430	struct sockaddr_un *sunaddr = msg->msg_name;
1431	struct sock *other = NULL;
1432	int namelen = 0; /* fake GCC */
1433	int err;
1434	unsigned int hash;
1435	struct sk_buff *skb;
1436	long timeo;
1437	struct scm_cookie tmp_scm;
1438	int max_level;
1439	int data_len = 0;
1440
1441	if (NULL == siocb->scm)
1442		siocb->scm = &tmp_scm;
1443	wait_for_unix_gc();
1444	err = scm_send(sock, msg, siocb->scm);
1445	if (err < 0)
1446		return err;
1447
1448	err = -EOPNOTSUPP;
1449	if (msg->msg_flags&MSG_OOB)
1450		goto out;
1451
1452	if (msg->msg_namelen) {
1453		err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1454		if (err < 0)
1455			goto out;
1456		namelen = err;
1457	} else {
1458		sunaddr = NULL;
1459		err = -ENOTCONN;
1460		other = unix_peer_get(sk);
1461		if (!other)
1462			goto out;
1463	}
1464
1465	if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1466	    && (err = unix_autobind(sock)) != 0)
1467		goto out;
1468
1469	err = -EMSGSIZE;
1470	if (len > sk->sk_sndbuf - 32)
1471		goto out;
1472
1473	if (len > SKB_MAX_ALLOC)
1474		data_len = min_t(size_t,
1475				 len - SKB_MAX_ALLOC,
1476				 MAX_SKB_FRAGS * PAGE_SIZE);
1477
1478	skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1479				   msg->msg_flags & MSG_DONTWAIT, &err);
1480	if (skb == NULL)
1481		goto out;
1482
1483	err = unix_scm_to_skb(siocb->scm, skb, true);
1484	if (err < 0)
1485		goto out_free;
1486	max_level = err + 1;
1487	unix_get_secdata(siocb->scm, skb);
1488
1489	skb_put(skb, len - data_len);
1490	skb->data_len = data_len;
1491	skb->len = len;
1492	err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len);
1493	if (err)
1494		goto out_free;
1495
1496	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1497
1498restart:
1499	if (!other) {
1500		err = -ECONNRESET;
1501		if (sunaddr == NULL)
1502			goto out_free;
1503
1504		other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1505					hash, &err);
1506		if (other == NULL)
1507			goto out_free;
1508	}
1509
1510	if (sk_filter(other, skb) < 0) {
1511		/* Toss the packet but do not return any error to the sender */
1512		err = len;
1513		goto out_free;
1514	}
1515
1516	unix_state_lock(other);
1517	err = -EPERM;
1518	if (!unix_may_send(sk, other))
1519		goto out_unlock;
1520
1521	if (sock_flag(other, SOCK_DEAD)) {
1522		/*
1523		 *	Check with 1003.1g - what should
1524		 *	datagram error
1525		 */
1526		unix_state_unlock(other);
1527		sock_put(other);
1528
1529		err = 0;
1530		unix_state_lock(sk);
1531		if (unix_peer(sk) == other) {
1532			unix_peer(sk) = NULL;
1533			unix_state_unlock(sk);
1534
1535			unix_dgram_disconnected(sk, other);
1536			sock_put(other);
1537			err = -ECONNREFUSED;
1538		} else {
1539			unix_state_unlock(sk);
1540		}
1541
1542		other = NULL;
1543		if (err)
1544			goto out_free;
1545		goto restart;
1546	}
1547
1548	err = -EPIPE;
1549	if (other->sk_shutdown & RCV_SHUTDOWN)
1550		goto out_unlock;
1551
1552	if (sk->sk_type != SOCK_SEQPACKET) {
1553		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1554		if (err)
1555			goto out_unlock;
1556	}
1557
1558	if (unix_peer(other) != sk && unix_recvq_full(other)) {
1559		if (!timeo) {
1560			err = -EAGAIN;
1561			goto out_unlock;
1562		}
1563
1564		timeo = unix_wait_for_peer(other, timeo);
1565
1566		err = sock_intr_errno(timeo);
1567		if (signal_pending(current))
1568			goto out_free;
1569
1570		goto restart;
1571	}
1572
1573	if (sock_flag(other, SOCK_RCVTSTAMP))
1574		__net_timestamp(skb);
1575	maybe_add_creds(skb, sock, other);
1576	skb_queue_tail(&other->sk_receive_queue, skb);
1577	if (max_level > unix_sk(other)->recursion_level)
1578		unix_sk(other)->recursion_level = max_level;
1579	unix_state_unlock(other);
1580	other->sk_data_ready(other, len);
1581	sock_put(other);
1582	scm_destroy(siocb->scm);
1583	return len;
1584
1585out_unlock:
1586	unix_state_unlock(other);
1587out_free:
1588	kfree_skb(skb);
1589out:
1590	if (other)
1591		sock_put(other);
1592	scm_destroy(siocb->scm);
1593	return err;
1594}
1595
1596
1597static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1598			       struct msghdr *msg, size_t len)
1599{
1600	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1601	struct sock *sk = sock->sk;
1602	struct sock *other = NULL;
1603	int err, size;
1604	struct sk_buff *skb;
1605	int sent = 0;
1606	struct scm_cookie tmp_scm;
1607	bool fds_sent = false;
1608	int max_level;
1609
1610	if (NULL == siocb->scm)
1611		siocb->scm = &tmp_scm;
1612	wait_for_unix_gc();
1613	err = scm_send(sock, msg, siocb->scm);
1614	if (err < 0)
1615		return err;
1616
1617	err = -EOPNOTSUPP;
1618	if (msg->msg_flags&MSG_OOB)
1619		goto out_err;
1620
1621	if (msg->msg_namelen) {
1622		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1623		goto out_err;
1624	} else {
1625		err = -ENOTCONN;
1626		other = unix_peer(sk);
1627		if (!other)
1628			goto out_err;
1629	}
1630
1631	if (sk->sk_shutdown & SEND_SHUTDOWN)
1632		goto pipe_err;
1633
1634	while (sent < len) {
1635		/*
1636		 *	Optimisation for the fact that under 0.01% of X
1637		 *	messages typically need breaking up.
1638		 */
1639
1640		size = len-sent;
1641
1642		/* Keep two messages in the pipe so it schedules better */
1643		if (size > ((sk->sk_sndbuf >> 1) - 64))
1644			size = (sk->sk_sndbuf >> 1) - 64;
1645
1646		if (size > SKB_MAX_ALLOC)
1647			size = SKB_MAX_ALLOC;
1648
1649		/*
1650		 *	Grab a buffer
1651		 */
1652
1653		skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
1654					  &err);
1655
1656		if (skb == NULL)
1657			goto out_err;
1658
1659		/*
1660		 *	If you pass two values to the sock_alloc_send_skb
1661		 *	it tries to grab the large buffer with GFP_NOFS
1662		 *	(which can fail easily), and if it fails grab the
1663		 *	fallback size buffer which is under a page and will
1664		 *	succeed. [Alan]
1665		 */
1666		size = min_t(int, size, skb_tailroom(skb));
1667
1668
1669		/* Only send the fds in the first buffer */
1670		err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
1671		if (err < 0) {
1672			kfree_skb(skb);
1673			goto out_err;
1674		}
1675		max_level = err + 1;
1676		fds_sent = true;
1677
1678		err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
1679		if (err) {
1680			kfree_skb(skb);
1681			goto out_err;
1682		}
1683
1684		unix_state_lock(other);
1685
1686		if (sock_flag(other, SOCK_DEAD) ||
1687		    (other->sk_shutdown & RCV_SHUTDOWN))
1688			goto pipe_err_free;
1689
1690		maybe_add_creds(skb, sock, other);
1691		skb_queue_tail(&other->sk_receive_queue, skb);
1692		if (max_level > unix_sk(other)->recursion_level)
1693			unix_sk(other)->recursion_level = max_level;
1694		unix_state_unlock(other);
1695		other->sk_data_ready(other, size);
1696		sent += size;
1697	}
1698
1699	scm_destroy(siocb->scm);
1700	siocb->scm = NULL;
1701
1702	return sent;
1703
1704pipe_err_free:
1705	unix_state_unlock(other);
1706	kfree_skb(skb);
1707pipe_err:
1708	if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1709		send_sig(SIGPIPE, current, 0);
1710	err = -EPIPE;
1711out_err:
1712	scm_destroy(siocb->scm);
1713	siocb->scm = NULL;
1714	return sent ? : err;
1715}
1716
1717static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1718				  struct msghdr *msg, size_t len)
1719{
1720	int err;
1721	struct sock *sk = sock->sk;
1722
1723	err = sock_error(sk);
1724	if (err)
1725		return err;
1726
1727	if (sk->sk_state != TCP_ESTABLISHED)
1728		return -ENOTCONN;
1729
1730	if (msg->msg_namelen)
1731		msg->msg_namelen = 0;
1732
1733	return unix_dgram_sendmsg(kiocb, sock, msg, len);
1734}
1735
1736static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock,
1737			      struct msghdr *msg, size_t size,
1738			      int flags)
1739{
1740	struct sock *sk = sock->sk;
1741
1742	if (sk->sk_state != TCP_ESTABLISHED)
1743		return -ENOTCONN;
1744
1745	return unix_dgram_recvmsg(iocb, sock, msg, size, flags);
1746}
1747
1748static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1749{
1750	struct unix_sock *u = unix_sk(sk);
1751
1752	msg->msg_namelen = 0;
1753	if (u->addr) {
1754		msg->msg_namelen = u->addr->len;
1755		memcpy(msg->msg_name, u->addr->name, u->addr->len);
1756	}
1757}
1758
1759static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1760			      struct msghdr *msg, size_t size,
1761			      int flags)
1762{
1763	struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1764	struct scm_cookie tmp_scm;
1765	struct sock *sk = sock->sk;
1766	struct unix_sock *u = unix_sk(sk);
1767	int noblock = flags & MSG_DONTWAIT;
1768	struct sk_buff *skb;
1769	int err;
1770	int peeked, skip;
1771
1772	err = -EOPNOTSUPP;
1773	if (flags&MSG_OOB)
1774		goto out;
1775
1776	msg->msg_namelen = 0;
1777
1778	err = mutex_lock_interruptible(&u->readlock);
1779	if (err) {
1780		err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
1781		goto out;
1782	}
1783
1784	skip = sk_peek_offset(sk, flags);
1785
1786	skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
1787	if (!skb) {
1788		unix_state_lock(sk);
1789		/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1790		if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1791		    (sk->sk_shutdown & RCV_SHUTDOWN))
1792			err = 0;
1793		unix_state_unlock(sk);
1794		goto out_unlock;
1795	}
1796
1797	wake_up_interruptible_sync_poll(&u->peer_wait,
1798					POLLOUT | POLLWRNORM | POLLWRBAND);
1799
1800	if (msg->msg_name)
1801		unix_copy_addr(msg, skb->sk);
1802
1803	if (size > skb->len - skip)
1804		size = skb->len - skip;
1805	else if (size < skb->len - skip)
1806		msg->msg_flags |= MSG_TRUNC;
1807
1808	err = skb_copy_datagram_iovec(skb, skip, msg->msg_iov, size);
1809	if (err)
1810		goto out_free;
1811
1812	if (sock_flag(sk, SOCK_RCVTSTAMP))
1813		__sock_recv_timestamp(msg, sk, skb);
1814
1815	if (!siocb->scm) {
1816		siocb->scm = &tmp_scm;
1817		memset(&tmp_scm, 0, sizeof(tmp_scm));
1818	}
1819	scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1820	unix_set_secdata(siocb->scm, skb);
1821
1822	if (!(flags & MSG_PEEK)) {
1823		if (UNIXCB(skb).fp)
1824			unix_detach_fds(siocb->scm, skb);
1825
1826		sk_peek_offset_bwd(sk, skb->len);
1827	} else {
1828		/* It is questionable: on PEEK we could:
1829		   - do not return fds - good, but too simple 8)
1830		   - return fds, and do not return them on read (old strategy,
1831		     apparently wrong)
1832		   - clone fds (I chose it for now, it is the most universal
1833		     solution)
1834
1835		   POSIX 1003.1g does not actually define this clearly
1836		   at all. POSIX 1003.1g doesn't define a lot of things
1837		   clearly however!
1838
1839		*/
1840
1841		sk_peek_offset_fwd(sk, size);
1842
1843		if (UNIXCB(skb).fp)
1844			siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1845	}
1846	err = (flags & MSG_TRUNC) ? skb->len - skip : size;
1847
1848	scm_recv(sock, msg, siocb->scm, flags);
1849
1850out_free:
1851	skb_free_datagram(sk, skb);
1852out_unlock:
1853	mutex_unlock(&u->readlock);
1854out:
1855	return err;
1856}
1857
1858/*
1859 *	Sleep until data has arrive. But check for races..
1860 */
1861
1862static long unix_stream_data_wait(struct sock *sk, long timeo)
1863{
1864	DEFINE_WAIT(wait);
1865
1866	unix_state_lock(sk);
1867
1868	for (;;) {
1869		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1870
1871		if (!skb_queue_empty(&sk->sk_receive_queue) ||
1872		    sk->sk_err ||
1873		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
1874		    signal_pending(current) ||
1875		    !timeo)
1876			break;
1877
1878		set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1879		unix_state_unlock(sk);
1880		timeo = schedule_timeout(timeo);
1881		unix_state_lock(sk);
1882		clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1883	}
1884
1885	finish_wait(sk_sleep(sk), &wait);
1886	unix_state_unlock(sk);
1887	return timeo;
1888}
1889
1890
1891
1892static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1893			       struct msghdr *msg, size_t size,
1894			       int flags)
1895{
1896	struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1897	struct scm_cookie tmp_scm;
1898	struct sock *sk = sock->sk;
1899	struct unix_sock *u = unix_sk(sk);
1900	struct sockaddr_un *sunaddr = msg->msg_name;
1901	int copied = 0;
1902	int check_creds = 0;
1903	int target;
1904	int err = 0;
1905	long timeo;
1906	int skip;
1907
1908	err = -EINVAL;
1909	if (sk->sk_state != TCP_ESTABLISHED)
1910		goto out;
1911
1912	err = -EOPNOTSUPP;
1913	if (flags&MSG_OOB)
1914		goto out;
1915
1916	target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1917	timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1918
1919	msg->msg_namelen = 0;
1920
1921	/* Lock the socket to prevent queue disordering
1922	 * while sleeps in memcpy_tomsg
1923	 */
1924
1925	if (!siocb->scm) {
1926		siocb->scm = &tmp_scm;
1927		memset(&tmp_scm, 0, sizeof(tmp_scm));
1928	}
1929
1930	err = mutex_lock_interruptible(&u->readlock);
1931	if (err) {
1932		err = sock_intr_errno(timeo);
1933		goto out;
1934	}
1935
1936	skip = sk_peek_offset(sk, flags);
1937
1938	do {
1939		int chunk;
1940		struct sk_buff *skb;
1941
1942		unix_state_lock(sk);
1943		skb = skb_peek(&sk->sk_receive_queue);
1944again:
1945		if (skb == NULL) {
1946			unix_sk(sk)->recursion_level = 0;
1947			if (copied >= target)
1948				goto unlock;
1949
1950			/*
1951			 *	POSIX 1003.1g mandates this order.
1952			 */
1953
1954			err = sock_error(sk);
1955			if (err)
1956				goto unlock;
1957			if (sk->sk_shutdown & RCV_SHUTDOWN)
1958				goto unlock;
1959
1960			unix_state_unlock(sk);
1961			err = -EAGAIN;
1962			if (!timeo)
1963				break;
1964			mutex_unlock(&u->readlock);
1965
1966			timeo = unix_stream_data_wait(sk, timeo);
1967
1968			if (signal_pending(current)
1969			    ||  mutex_lock_interruptible(&u->readlock)) {
1970				err = sock_intr_errno(timeo);
1971				goto out;
1972			}
1973
1974			continue;
1975 unlock:
1976			unix_state_unlock(sk);
1977			break;
1978		}
1979
1980		if (skip >= skb->len) {
1981			skip -= skb->len;
1982			skb = skb_peek_next(skb, &sk->sk_receive_queue);
1983			goto again;
1984		}
1985
1986		unix_state_unlock(sk);
1987
1988		if (check_creds) {
1989			/* Never glue messages from different writers */
1990			if ((UNIXCB(skb).pid  != siocb->scm->pid) ||
1991			    (UNIXCB(skb).cred != siocb->scm->cred))
1992				break;
1993		} else {
1994			/* Copy credentials */
1995			scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1996			check_creds = 1;
1997		}
1998
1999		/* Copy address just once */
2000		if (sunaddr) {
2001			unix_copy_addr(msg, skb->sk);
2002			sunaddr = NULL;
2003		}
2004
2005		chunk = min_t(unsigned int, skb->len - skip, size);
2006		if (memcpy_toiovec(msg->msg_iov, skb->data + skip, chunk)) {
2007			if (copied == 0)
2008				copied = -EFAULT;
2009			break;
2010		}
2011		copied += chunk;
2012		size -= chunk;
2013
2014		/* Mark read part of skb as used */
2015		if (!(flags & MSG_PEEK)) {
2016			skb_pull(skb, chunk);
2017
2018			sk_peek_offset_bwd(sk, chunk);
2019
2020			if (UNIXCB(skb).fp)
2021				unix_detach_fds(siocb->scm, skb);
2022
2023			if (skb->len)
2024				break;
2025
2026			skb_unlink(skb, &sk->sk_receive_queue);
2027			consume_skb(skb);
2028
2029			if (siocb->scm->fp)
2030				break;
2031		} else {
2032			/* It is questionable, see note in unix_dgram_recvmsg.
2033			 */
2034			if (UNIXCB(skb).fp)
2035				siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
2036
2037			sk_peek_offset_fwd(sk, chunk);
2038
2039			break;
2040		}
2041	} while (size);
2042
2043	mutex_unlock(&u->readlock);
2044	scm_recv(sock, msg, siocb->scm, flags);
2045out:
2046	return copied ? : err;
2047}
2048
2049static int unix_shutdown(struct socket *sock, int mode)
2050{
2051	struct sock *sk = sock->sk;
2052	struct sock *other;
2053
2054	mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
2055
2056	if (!mode)
2057		return 0;
2058
2059	unix_state_lock(sk);
2060	sk->sk_shutdown |= mode;
2061	other = unix_peer(sk);
2062	if (other)
2063		sock_hold(other);
2064	unix_state_unlock(sk);
2065	sk->sk_state_change(sk);
2066
2067	if (other &&
2068		(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2069
2070		int peer_mode = 0;
2071
2072		if (mode&RCV_SHUTDOWN)
2073			peer_mode |= SEND_SHUTDOWN;
2074		if (mode&SEND_SHUTDOWN)
2075			peer_mode |= RCV_SHUTDOWN;
2076		unix_state_lock(other);
2077		other->sk_shutdown |= peer_mode;
2078		unix_state_unlock(other);
2079		other->sk_state_change(other);
2080		if (peer_mode == SHUTDOWN_MASK)
2081			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2082		else if (peer_mode & RCV_SHUTDOWN)
2083			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2084	}
2085	if (other)
2086		sock_put(other);
2087
2088	return 0;
2089}
2090
2091long unix_inq_len(struct sock *sk)
2092{
2093	struct sk_buff *skb;
2094	long amount = 0;
2095
2096	if (sk->sk_state == TCP_LISTEN)
2097		return -EINVAL;
2098
2099	spin_lock(&sk->sk_receive_queue.lock);
2100	if (sk->sk_type == SOCK_STREAM ||
2101	    sk->sk_type == SOCK_SEQPACKET) {
2102		skb_queue_walk(&sk->sk_receive_queue, skb)
2103			amount += skb->len;
2104	} else {
2105		skb = skb_peek(&sk->sk_receive_queue);
2106		if (skb)
2107			amount = skb->len;
2108	}
2109	spin_unlock(&sk->sk_receive_queue.lock);
2110
2111	return amount;
2112}
2113EXPORT_SYMBOL_GPL(unix_inq_len);
2114
2115long unix_outq_len(struct sock *sk)
2116{
2117	return sk_wmem_alloc_get(sk);
2118}
2119EXPORT_SYMBOL_GPL(unix_outq_len);
2120
2121static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2122{
2123	struct sock *sk = sock->sk;
2124	long amount = 0;
2125	int err;
2126
2127	switch (cmd) {
2128	case SIOCOUTQ:
2129		amount = unix_outq_len(sk);
2130		err = put_user(amount, (int __user *)arg);
2131		break;
2132	case SIOCINQ:
2133		amount = unix_inq_len(sk);
2134		if (amount < 0)
2135			err = amount;
2136		else
2137			err = put_user(amount, (int __user *)arg);
2138		break;
2139	default:
2140		err = -ENOIOCTLCMD;
2141		break;
2142	}
2143	return err;
2144}
2145
2146static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2147{
2148	struct sock *sk = sock->sk;
2149	unsigned int mask;
2150
2151	sock_poll_wait(file, sk_sleep(sk), wait);
2152	mask = 0;
2153
2154	/* exceptional events? */
2155	if (sk->sk_err)
2156		mask |= POLLERR;
2157	if (sk->sk_shutdown == SHUTDOWN_MASK)
2158		mask |= POLLHUP;
2159	if (sk->sk_shutdown & RCV_SHUTDOWN)
2160		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2161
2162	/* readable? */
2163	if (!skb_queue_empty(&sk->sk_receive_queue))
2164		mask |= POLLIN | POLLRDNORM;
2165
2166	/* Connection-based need to check for termination and startup */
2167	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2168	    sk->sk_state == TCP_CLOSE)
2169		mask |= POLLHUP;
2170
2171	/*
2172	 * we set writable also when the other side has shut down the
2173	 * connection. This prevents stuck sockets.
2174	 */
2175	if (unix_writable(sk))
2176		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2177
2178	return mask;
2179}
2180
2181static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2182				    poll_table *wait)
2183{
2184	struct sock *sk = sock->sk, *other;
2185	unsigned int mask, writable;
2186
2187	sock_poll_wait(file, sk_sleep(sk), wait);
2188	mask = 0;
2189
2190	/* exceptional events? */
2191	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2192		mask |= POLLERR;
2193	if (sk->sk_shutdown & RCV_SHUTDOWN)
2194		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2195	if (sk->sk_shutdown == SHUTDOWN_MASK)
2196		mask |= POLLHUP;
2197
2198	/* readable? */
2199	if (!skb_queue_empty(&sk->sk_receive_queue))
2200		mask |= POLLIN | POLLRDNORM;
2201
2202	/* Connection-based need to check for termination and startup */
2203	if (sk->sk_type == SOCK_SEQPACKET) {
2204		if (sk->sk_state == TCP_CLOSE)
2205			mask |= POLLHUP;
2206		/* connection hasn't started yet? */
2207		if (sk->sk_state == TCP_SYN_SENT)
2208			return mask;
2209	}
2210
2211	/* No write status requested, avoid expensive OUT tests. */
2212	if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
2213		return mask;
2214
2215	writable = unix_writable(sk);
2216	other = unix_peer_get(sk);
2217	if (other) {
2218		if (unix_peer(other) != sk) {
2219			sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
2220			if (unix_recvq_full(other))
2221				writable = 0;
2222		}
2223		sock_put(other);
2224	}
2225
2226	if (writable)
2227		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2228	else
2229		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2230
2231	return mask;
2232}
2233
2234#ifdef CONFIG_PROC_FS
2235static struct sock *first_unix_socket(int *i)
2236{
2237	for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
2238		if (!hlist_empty(&unix_socket_table[*i]))
2239			return __sk_head(&unix_socket_table[*i]);
2240	}
2241	return NULL;
2242}
2243
2244static struct sock *next_unix_socket(int *i, struct sock *s)
2245{
2246	struct sock *next = sk_next(s);
2247	/* More in this chain? */
2248	if (next)
2249		return next;
2250	/* Look for next non-empty chain. */
2251	for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
2252		if (!hlist_empty(&unix_socket_table[*i]))
2253			return __sk_head(&unix_socket_table[*i]);
2254	}
2255	return NULL;
2256}
2257
2258struct unix_iter_state {
2259	struct seq_net_private p;
2260	int i;
2261};
2262
2263static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos)
2264{
2265	struct unix_iter_state *iter = seq->private;
2266	loff_t off = 0;
2267	struct sock *s;
2268
2269	for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
2270		if (sock_net(s) != seq_file_net(seq))
2271			continue;
2272		if (off == pos)
2273			return s;
2274		++off;
2275	}
2276	return NULL;
2277}
2278
2279static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2280	__acquires(unix_table_lock)
2281{
2282	spin_lock(&unix_table_lock);
2283	return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2284}
2285
2286static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2287{
2288	struct unix_iter_state *iter = seq->private;
2289	struct sock *sk = v;
2290	++*pos;
2291
2292	if (v == SEQ_START_TOKEN)
2293		sk = first_unix_socket(&iter->i);
2294	else
2295		sk = next_unix_socket(&iter->i, sk);
2296	while (sk && (sock_net(sk) != seq_file_net(seq)))
2297		sk = next_unix_socket(&iter->i, sk);
2298	return sk;
2299}
2300
2301static void unix_seq_stop(struct seq_file *seq, void *v)
2302	__releases(unix_table_lock)
2303{
2304	spin_unlock(&unix_table_lock);
2305}
2306
2307static int unix_seq_show(struct seq_file *seq, void *v)
2308{
2309
2310	if (v == SEQ_START_TOKEN)
2311		seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
2312			 "Inode Path\n");
2313	else {
2314		struct sock *s = v;
2315		struct unix_sock *u = unix_sk(s);
2316		unix_state_lock(s);
2317
2318		seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2319			s,
2320			atomic_read(&s->sk_refcnt),
2321			0,
2322			s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2323			s->sk_type,
2324			s->sk_socket ?
2325			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2326			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2327			sock_i_ino(s));
2328
2329		if (u->addr) {
2330			int i, len;
2331			seq_putc(seq, ' ');
2332
2333			i = 0;
2334			len = u->addr->len - sizeof(short);
2335			if (!UNIX_ABSTRACT(s))
2336				len--;
2337			else {
2338				seq_putc(seq, '@');
2339				i++;
2340			}
2341			for ( ; i < len; i++)
2342				seq_putc(seq, u->addr->name->sun_path[i]);
2343		}
2344		unix_state_unlock(s);
2345		seq_putc(seq, '\n');
2346	}
2347
2348	return 0;
2349}
2350
2351static const struct seq_operations unix_seq_ops = {
2352	.start  = unix_seq_start,
2353	.next   = unix_seq_next,
2354	.stop   = unix_seq_stop,
2355	.show   = unix_seq_show,
2356};
2357
2358static int unix_seq_open(struct inode *inode, struct file *file)
2359{
2360	return seq_open_net(inode, file, &unix_seq_ops,
2361			    sizeof(struct unix_iter_state));
2362}
2363
2364static const struct file_operations unix_seq_fops = {
2365	.owner		= THIS_MODULE,
2366	.open		= unix_seq_open,
2367	.read		= seq_read,
2368	.llseek		= seq_lseek,
2369	.release	= seq_release_net,
2370};
2371
2372#endif
2373
2374static const struct net_proto_family unix_family_ops = {
2375	.family = PF_UNIX,
2376	.create = unix_create,
2377	.owner	= THIS_MODULE,
2378};
2379
2380
2381static int __net_init unix_net_init(struct net *net)
2382{
2383	int error = -ENOMEM;
2384
2385	net->unx.sysctl_max_dgram_qlen = 10;
2386	if (unix_sysctl_register(net))
2387		goto out;
2388
2389#ifdef CONFIG_PROC_FS
2390	if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) {
2391		unix_sysctl_unregister(net);
2392		goto out;
2393	}
2394#endif
2395	error = 0;
2396out:
2397	return error;
2398}
2399
2400static void __net_exit unix_net_exit(struct net *net)
2401{
2402	unix_sysctl_unregister(net);
2403	proc_net_remove(net, "unix");
2404}
2405
2406static struct pernet_operations unix_net_ops = {
2407	.init = unix_net_init,
2408	.exit = unix_net_exit,
2409};
2410
2411static int __init af_unix_init(void)
2412{
2413	int rc = -1;
2414	struct sk_buff *dummy_skb;
2415
2416	BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
2417
2418	rc = proto_register(&unix_proto, 1);
2419	if (rc != 0) {
2420		printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2421		       __func__);
2422		goto out;
2423	}
2424
2425	sock_register(&unix_family_ops);
2426	register_pernet_subsys(&unix_net_ops);
2427out:
2428	return rc;
2429}
2430
2431static void __exit af_unix_exit(void)
2432{
2433	sock_unregister(PF_UNIX);
2434	proto_unregister(&unix_proto);
2435	unregister_pernet_subsys(&unix_net_ops);
2436}
2437
2438/* Earlier than device_initcall() so that other drivers invoking
2439   request_module() don't end up in a loop when modprobe tries
2440   to use a UNIX socket. But later than subsys_initcall() because
2441   we depend on stuff initialised there */
2442fs_initcall(af_unix_init);
2443module_exit(af_unix_exit);
2444
2445MODULE_LICENSE("GPL");
2446MODULE_ALIAS_NETPROTO(PF_UNIX);
2447