af_unix.c revision a8104a9fcdeb82e22d7acd55fca20746581067d3
1/*
2 * NET4:	Implementation of BSD Unix domain sockets.
3 *
4 * Authors:	Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 *
6 *		This program is free software; you can redistribute it and/or
7 *		modify it under the terms of the GNU General Public License
8 *		as published by the Free Software Foundation; either version
9 *		2 of the License, or (at your option) any later version.
10 *
11 * Fixes:
12 *		Linus Torvalds	:	Assorted bug cures.
13 *		Niibe Yutaka	:	async I/O support.
14 *		Carsten Paeth	:	PF_UNIX check, address fixes.
15 *		Alan Cox	:	Limit size of allocated blocks.
16 *		Alan Cox	:	Fixed the stupid socketpair bug.
17 *		Alan Cox	:	BSD compatibility fine tuning.
18 *		Alan Cox	:	Fixed a bug in connect when interrupted.
19 *		Alan Cox	:	Sorted out a proper draft version of
20 *					file descriptor passing hacked up from
21 *					Mike Shaver's work.
22 *		Marty Leisner	:	Fixes to fd passing
23 *		Nick Nevin	:	recvmsg bugfix.
24 *		Alan Cox	:	Started proper garbage collector
25 *		Heiko EiBfeldt	:	Missing verify_area check
26 *		Alan Cox	:	Started POSIXisms
27 *		Andreas Schwab	:	Replace inode by dentry for proper
28 *					reference counting
29 *		Kirk Petersen	:	Made this a module
30 *	    Christoph Rohland	:	Elegant non-blocking accept/connect algorithm.
31 *					Lots of bug fixes.
32 *	     Alexey Kuznetosv	:	Repaired (I hope) bugs introduces
33 *					by above two patches.
34 *	     Andrea Arcangeli	:	If possible we block in connect(2)
35 *					if the max backlog of the listen socket
36 *					is been reached. This won't break
37 *					old apps and it will avoid huge amount
38 *					of socks hashed (this for unix_gc()
39 *					performances reasons).
40 *					Security fix that limits the max
41 *					number of socks to 2*max_files and
42 *					the number of skb queueable in the
43 *					dgram receiver.
44 *		Artur Skawina   :	Hash function optimizations
45 *	     Alexey Kuznetsov   :	Full scale SMP. Lot of bugs are introduced 8)
46 *	      Malcolm Beattie   :	Set peercred for socketpair
47 *	     Michal Ostrowski   :       Module initialization cleanup.
48 *	     Arnaldo C. Melo	:	Remove MOD_{INC,DEC}_USE_COUNT,
49 *	     				the core infrastructure is doing that
50 *	     				for all net proto families now (2.5.69+)
51 *
52 *
53 * Known differences from reference BSD that was tested:
54 *
55 *	[TO FIX]
56 *	ECONNREFUSED is not returned from one end of a connected() socket to the
57 *		other the moment one end closes.
58 *	fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 *		and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 *	[NOT TO FIX]
61 *	accept() returns a path name even if the connecting socket has closed
62 *		in the meantime (BSD loses the path and gives up).
63 *	accept() returns 0 length path for an unbound connector. BSD returns 16
64 *		and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 *	socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 *	BSD af_unix apparently has connect forgetting to block properly.
67 *		(need to check this with the POSIX spec in detail)
68 *
69 * Differences from 2.0.0-11-... (ANK)
70 *	Bug fixes and improvements.
71 *		- client shutdown killed server socket.
72 *		- removed all useless cli/sti pairs.
73 *
74 *	Semantic changes/extensions.
75 *		- generic control message passing.
76 *		- SCM_CREDENTIALS control message.
77 *		- "Abstract" (not FS based) socket bindings.
78 *		  Abstract names are sequences of bytes (not zero terminated)
79 *		  started by 0, so that this name space does not intersect
80 *		  with BSD names.
81 */
82
83#include <linux/module.h>
84#include <linux/kernel.h>
85#include <linux/signal.h>
86#include <linux/sched.h>
87#include <linux/errno.h>
88#include <linux/string.h>
89#include <linux/stat.h>
90#include <linux/dcache.h>
91#include <linux/namei.h>
92#include <linux/socket.h>
93#include <linux/un.h>
94#include <linux/fcntl.h>
95#include <linux/termios.h>
96#include <linux/sockios.h>
97#include <linux/net.h>
98#include <linux/in.h>
99#include <linux/fs.h>
100#include <linux/slab.h>
101#include <asm/uaccess.h>
102#include <linux/skbuff.h>
103#include <linux/netdevice.h>
104#include <net/net_namespace.h>
105#include <net/sock.h>
106#include <net/tcp_states.h>
107#include <net/af_unix.h>
108#include <linux/proc_fs.h>
109#include <linux/seq_file.h>
110#include <net/scm.h>
111#include <linux/init.h>
112#include <linux/poll.h>
113#include <linux/rtnetlink.h>
114#include <linux/mount.h>
115#include <net/checksum.h>
116#include <linux/security.h>
117
118struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
119EXPORT_SYMBOL_GPL(unix_socket_table);
120DEFINE_SPINLOCK(unix_table_lock);
121EXPORT_SYMBOL_GPL(unix_table_lock);
122static atomic_long_t unix_nr_socks;
123
124#define unix_sockets_unbound	(&unix_socket_table[UNIX_HASH_SIZE])
125
126#define UNIX_ABSTRACT(sk)	(unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
127
128#ifdef CONFIG_SECURITY_NETWORK
129static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
130{
131	memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
132}
133
134static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
135{
136	scm->secid = *UNIXSID(skb);
137}
138#else
139static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
140{ }
141
142static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
143{ }
144#endif /* CONFIG_SECURITY_NETWORK */
145
146/*
147 *  SMP locking strategy:
148 *    hash table is protected with spinlock unix_table_lock
149 *    each socket state is protected by separate spin lock.
150 */
151
152static inline unsigned int unix_hash_fold(__wsum n)
153{
154	unsigned int hash = (__force unsigned int)n;
155
156	hash ^= hash>>16;
157	hash ^= hash>>8;
158	return hash&(UNIX_HASH_SIZE-1);
159}
160
161#define unix_peer(sk) (unix_sk(sk)->peer)
162
163static inline int unix_our_peer(struct sock *sk, struct sock *osk)
164{
165	return unix_peer(osk) == sk;
166}
167
168static inline int unix_may_send(struct sock *sk, struct sock *osk)
169{
170	return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
171}
172
173static inline int unix_recvq_full(struct sock const *sk)
174{
175	return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
176}
177
178struct sock *unix_peer_get(struct sock *s)
179{
180	struct sock *peer;
181
182	unix_state_lock(s);
183	peer = unix_peer(s);
184	if (peer)
185		sock_hold(peer);
186	unix_state_unlock(s);
187	return peer;
188}
189EXPORT_SYMBOL_GPL(unix_peer_get);
190
191static inline void unix_release_addr(struct unix_address *addr)
192{
193	if (atomic_dec_and_test(&addr->refcnt))
194		kfree(addr);
195}
196
197/*
198 *	Check unix socket name:
199 *		- should be not zero length.
200 *	        - if started by not zero, should be NULL terminated (FS object)
201 *		- if started by zero, it is abstract name.
202 */
203
204static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
205{
206	if (len <= sizeof(short) || len > sizeof(*sunaddr))
207		return -EINVAL;
208	if (!sunaddr || sunaddr->sun_family != AF_UNIX)
209		return -EINVAL;
210	if (sunaddr->sun_path[0]) {
211		/*
212		 * This may look like an off by one error but it is a bit more
213		 * subtle. 108 is the longest valid AF_UNIX path for a binding.
214		 * sun_path[108] doesn't as such exist.  However in kernel space
215		 * we are guaranteed that it is a valid memory location in our
216		 * kernel address buffer.
217		 */
218		((char *)sunaddr)[len] = 0;
219		len = strlen(sunaddr->sun_path)+1+sizeof(short);
220		return len;
221	}
222
223	*hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
224	return len;
225}
226
227static void __unix_remove_socket(struct sock *sk)
228{
229	sk_del_node_init(sk);
230}
231
232static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
233{
234	WARN_ON(!sk_unhashed(sk));
235	sk_add_node(sk, list);
236}
237
238static inline void unix_remove_socket(struct sock *sk)
239{
240	spin_lock(&unix_table_lock);
241	__unix_remove_socket(sk);
242	spin_unlock(&unix_table_lock);
243}
244
245static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
246{
247	spin_lock(&unix_table_lock);
248	__unix_insert_socket(list, sk);
249	spin_unlock(&unix_table_lock);
250}
251
252static struct sock *__unix_find_socket_byname(struct net *net,
253					      struct sockaddr_un *sunname,
254					      int len, int type, unsigned int hash)
255{
256	struct sock *s;
257	struct hlist_node *node;
258
259	sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
260		struct unix_sock *u = unix_sk(s);
261
262		if (!net_eq(sock_net(s), net))
263			continue;
264
265		if (u->addr->len == len &&
266		    !memcmp(u->addr->name, sunname, len))
267			goto found;
268	}
269	s = NULL;
270found:
271	return s;
272}
273
274static inline struct sock *unix_find_socket_byname(struct net *net,
275						   struct sockaddr_un *sunname,
276						   int len, int type,
277						   unsigned int hash)
278{
279	struct sock *s;
280
281	spin_lock(&unix_table_lock);
282	s = __unix_find_socket_byname(net, sunname, len, type, hash);
283	if (s)
284		sock_hold(s);
285	spin_unlock(&unix_table_lock);
286	return s;
287}
288
289static struct sock *unix_find_socket_byinode(struct inode *i)
290{
291	struct sock *s;
292	struct hlist_node *node;
293
294	spin_lock(&unix_table_lock);
295	sk_for_each(s, node,
296		    &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
297		struct dentry *dentry = unix_sk(s)->path.dentry;
298
299		if (dentry && dentry->d_inode == i) {
300			sock_hold(s);
301			goto found;
302		}
303	}
304	s = NULL;
305found:
306	spin_unlock(&unix_table_lock);
307	return s;
308}
309
310static inline int unix_writable(struct sock *sk)
311{
312	return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
313}
314
315static void unix_write_space(struct sock *sk)
316{
317	struct socket_wq *wq;
318
319	rcu_read_lock();
320	if (unix_writable(sk)) {
321		wq = rcu_dereference(sk->sk_wq);
322		if (wq_has_sleeper(wq))
323			wake_up_interruptible_sync_poll(&wq->wait,
324				POLLOUT | POLLWRNORM | POLLWRBAND);
325		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
326	}
327	rcu_read_unlock();
328}
329
330/* When dgram socket disconnects (or changes its peer), we clear its receive
331 * queue of packets arrived from previous peer. First, it allows to do
332 * flow control based only on wmem_alloc; second, sk connected to peer
333 * may receive messages only from that peer. */
334static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
335{
336	if (!skb_queue_empty(&sk->sk_receive_queue)) {
337		skb_queue_purge(&sk->sk_receive_queue);
338		wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
339
340		/* If one link of bidirectional dgram pipe is disconnected,
341		 * we signal error. Messages are lost. Do not make this,
342		 * when peer was not connected to us.
343		 */
344		if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
345			other->sk_err = ECONNRESET;
346			other->sk_error_report(other);
347		}
348	}
349}
350
351static void unix_sock_destructor(struct sock *sk)
352{
353	struct unix_sock *u = unix_sk(sk);
354
355	skb_queue_purge(&sk->sk_receive_queue);
356
357	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
358	WARN_ON(!sk_unhashed(sk));
359	WARN_ON(sk->sk_socket);
360	if (!sock_flag(sk, SOCK_DEAD)) {
361		printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
362		return;
363	}
364
365	if (u->addr)
366		unix_release_addr(u->addr);
367
368	atomic_long_dec(&unix_nr_socks);
369	local_bh_disable();
370	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
371	local_bh_enable();
372#ifdef UNIX_REFCNT_DEBUG
373	printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
374		atomic_long_read(&unix_nr_socks));
375#endif
376}
377
378static int unix_release_sock(struct sock *sk, int embrion)
379{
380	struct unix_sock *u = unix_sk(sk);
381	struct path path;
382	struct sock *skpair;
383	struct sk_buff *skb;
384	int state;
385
386	unix_remove_socket(sk);
387
388	/* Clear state */
389	unix_state_lock(sk);
390	sock_orphan(sk);
391	sk->sk_shutdown = SHUTDOWN_MASK;
392	path	     = u->path;
393	u->path.dentry = NULL;
394	u->path.mnt = NULL;
395	state = sk->sk_state;
396	sk->sk_state = TCP_CLOSE;
397	unix_state_unlock(sk);
398
399	wake_up_interruptible_all(&u->peer_wait);
400
401	skpair = unix_peer(sk);
402
403	if (skpair != NULL) {
404		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
405			unix_state_lock(skpair);
406			/* No more writes */
407			skpair->sk_shutdown = SHUTDOWN_MASK;
408			if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
409				skpair->sk_err = ECONNRESET;
410			unix_state_unlock(skpair);
411			skpair->sk_state_change(skpair);
412			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
413		}
414		sock_put(skpair); /* It may now die */
415		unix_peer(sk) = NULL;
416	}
417
418	/* Try to flush out this socket. Throw out buffers at least */
419
420	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
421		if (state == TCP_LISTEN)
422			unix_release_sock(skb->sk, 1);
423		/* passed fds are erased in the kfree_skb hook	      */
424		kfree_skb(skb);
425	}
426
427	if (path.dentry)
428		path_put(&path);
429
430	sock_put(sk);
431
432	/* ---- Socket is dead now and most probably destroyed ---- */
433
434	/*
435	 * Fixme: BSD difference: In BSD all sockets connected to use get
436	 *	  ECONNRESET and we die on the spot. In Linux we behave
437	 *	  like files and pipes do and wait for the last
438	 *	  dereference.
439	 *
440	 * Can't we simply set sock->err?
441	 *
442	 *	  What the above comment does talk about? --ANK(980817)
443	 */
444
445	if (unix_tot_inflight)
446		unix_gc();		/* Garbage collect fds */
447
448	return 0;
449}
450
451static void init_peercred(struct sock *sk)
452{
453	put_pid(sk->sk_peer_pid);
454	if (sk->sk_peer_cred)
455		put_cred(sk->sk_peer_cred);
456	sk->sk_peer_pid  = get_pid(task_tgid(current));
457	sk->sk_peer_cred = get_current_cred();
458}
459
460static void copy_peercred(struct sock *sk, struct sock *peersk)
461{
462	put_pid(sk->sk_peer_pid);
463	if (sk->sk_peer_cred)
464		put_cred(sk->sk_peer_cred);
465	sk->sk_peer_pid  = get_pid(peersk->sk_peer_pid);
466	sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
467}
468
469static int unix_listen(struct socket *sock, int backlog)
470{
471	int err;
472	struct sock *sk = sock->sk;
473	struct unix_sock *u = unix_sk(sk);
474	struct pid *old_pid = NULL;
475	const struct cred *old_cred = NULL;
476
477	err = -EOPNOTSUPP;
478	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
479		goto out;	/* Only stream/seqpacket sockets accept */
480	err = -EINVAL;
481	if (!u->addr)
482		goto out;	/* No listens on an unbound socket */
483	unix_state_lock(sk);
484	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
485		goto out_unlock;
486	if (backlog > sk->sk_max_ack_backlog)
487		wake_up_interruptible_all(&u->peer_wait);
488	sk->sk_max_ack_backlog	= backlog;
489	sk->sk_state		= TCP_LISTEN;
490	/* set credentials so connect can copy them */
491	init_peercred(sk);
492	err = 0;
493
494out_unlock:
495	unix_state_unlock(sk);
496	put_pid(old_pid);
497	if (old_cred)
498		put_cred(old_cred);
499out:
500	return err;
501}
502
503static int unix_release(struct socket *);
504static int unix_bind(struct socket *, struct sockaddr *, int);
505static int unix_stream_connect(struct socket *, struct sockaddr *,
506			       int addr_len, int flags);
507static int unix_socketpair(struct socket *, struct socket *);
508static int unix_accept(struct socket *, struct socket *, int);
509static int unix_getname(struct socket *, struct sockaddr *, int *, int);
510static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
511static unsigned int unix_dgram_poll(struct file *, struct socket *,
512				    poll_table *);
513static int unix_ioctl(struct socket *, unsigned int, unsigned long);
514static int unix_shutdown(struct socket *, int);
515static int unix_stream_sendmsg(struct kiocb *, struct socket *,
516			       struct msghdr *, size_t);
517static int unix_stream_recvmsg(struct kiocb *, struct socket *,
518			       struct msghdr *, size_t, int);
519static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
520			      struct msghdr *, size_t);
521static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
522			      struct msghdr *, size_t, int);
523static int unix_dgram_connect(struct socket *, struct sockaddr *,
524			      int, int);
525static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
526				  struct msghdr *, size_t);
527static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
528				  struct msghdr *, size_t, int);
529
530static void unix_set_peek_off(struct sock *sk, int val)
531{
532	struct unix_sock *u = unix_sk(sk);
533
534	mutex_lock(&u->readlock);
535	sk->sk_peek_off = val;
536	mutex_unlock(&u->readlock);
537}
538
539
540static const struct proto_ops unix_stream_ops = {
541	.family =	PF_UNIX,
542	.owner =	THIS_MODULE,
543	.release =	unix_release,
544	.bind =		unix_bind,
545	.connect =	unix_stream_connect,
546	.socketpair =	unix_socketpair,
547	.accept =	unix_accept,
548	.getname =	unix_getname,
549	.poll =		unix_poll,
550	.ioctl =	unix_ioctl,
551	.listen =	unix_listen,
552	.shutdown =	unix_shutdown,
553	.setsockopt =	sock_no_setsockopt,
554	.getsockopt =	sock_no_getsockopt,
555	.sendmsg =	unix_stream_sendmsg,
556	.recvmsg =	unix_stream_recvmsg,
557	.mmap =		sock_no_mmap,
558	.sendpage =	sock_no_sendpage,
559	.set_peek_off =	unix_set_peek_off,
560};
561
562static const struct proto_ops unix_dgram_ops = {
563	.family =	PF_UNIX,
564	.owner =	THIS_MODULE,
565	.release =	unix_release,
566	.bind =		unix_bind,
567	.connect =	unix_dgram_connect,
568	.socketpair =	unix_socketpair,
569	.accept =	sock_no_accept,
570	.getname =	unix_getname,
571	.poll =		unix_dgram_poll,
572	.ioctl =	unix_ioctl,
573	.listen =	sock_no_listen,
574	.shutdown =	unix_shutdown,
575	.setsockopt =	sock_no_setsockopt,
576	.getsockopt =	sock_no_getsockopt,
577	.sendmsg =	unix_dgram_sendmsg,
578	.recvmsg =	unix_dgram_recvmsg,
579	.mmap =		sock_no_mmap,
580	.sendpage =	sock_no_sendpage,
581	.set_peek_off =	unix_set_peek_off,
582};
583
584static const struct proto_ops unix_seqpacket_ops = {
585	.family =	PF_UNIX,
586	.owner =	THIS_MODULE,
587	.release =	unix_release,
588	.bind =		unix_bind,
589	.connect =	unix_stream_connect,
590	.socketpair =	unix_socketpair,
591	.accept =	unix_accept,
592	.getname =	unix_getname,
593	.poll =		unix_dgram_poll,
594	.ioctl =	unix_ioctl,
595	.listen =	unix_listen,
596	.shutdown =	unix_shutdown,
597	.setsockopt =	sock_no_setsockopt,
598	.getsockopt =	sock_no_getsockopt,
599	.sendmsg =	unix_seqpacket_sendmsg,
600	.recvmsg =	unix_seqpacket_recvmsg,
601	.mmap =		sock_no_mmap,
602	.sendpage =	sock_no_sendpage,
603	.set_peek_off =	unix_set_peek_off,
604};
605
606static struct proto unix_proto = {
607	.name			= "UNIX",
608	.owner			= THIS_MODULE,
609	.obj_size		= sizeof(struct unix_sock),
610};
611
612/*
613 * AF_UNIX sockets do not interact with hardware, hence they
614 * dont trigger interrupts - so it's safe for them to have
615 * bh-unsafe locking for their sk_receive_queue.lock. Split off
616 * this special lock-class by reinitializing the spinlock key:
617 */
618static struct lock_class_key af_unix_sk_receive_queue_lock_key;
619
620static struct sock *unix_create1(struct net *net, struct socket *sock)
621{
622	struct sock *sk = NULL;
623	struct unix_sock *u;
624
625	atomic_long_inc(&unix_nr_socks);
626	if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
627		goto out;
628
629	sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
630	if (!sk)
631		goto out;
632
633	sock_init_data(sock, sk);
634	lockdep_set_class(&sk->sk_receive_queue.lock,
635				&af_unix_sk_receive_queue_lock_key);
636
637	sk->sk_write_space	= unix_write_space;
638	sk->sk_max_ack_backlog	= net->unx.sysctl_max_dgram_qlen;
639	sk->sk_destruct		= unix_sock_destructor;
640	u	  = unix_sk(sk);
641	u->path.dentry = NULL;
642	u->path.mnt = NULL;
643	spin_lock_init(&u->lock);
644	atomic_long_set(&u->inflight, 0);
645	INIT_LIST_HEAD(&u->link);
646	mutex_init(&u->readlock); /* single task reading lock */
647	init_waitqueue_head(&u->peer_wait);
648	unix_insert_socket(unix_sockets_unbound, sk);
649out:
650	if (sk == NULL)
651		atomic_long_dec(&unix_nr_socks);
652	else {
653		local_bh_disable();
654		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
655		local_bh_enable();
656	}
657	return sk;
658}
659
660static int unix_create(struct net *net, struct socket *sock, int protocol,
661		       int kern)
662{
663	if (protocol && protocol != PF_UNIX)
664		return -EPROTONOSUPPORT;
665
666	sock->state = SS_UNCONNECTED;
667
668	switch (sock->type) {
669	case SOCK_STREAM:
670		sock->ops = &unix_stream_ops;
671		break;
672		/*
673		 *	Believe it or not BSD has AF_UNIX, SOCK_RAW though
674		 *	nothing uses it.
675		 */
676	case SOCK_RAW:
677		sock->type = SOCK_DGRAM;
678	case SOCK_DGRAM:
679		sock->ops = &unix_dgram_ops;
680		break;
681	case SOCK_SEQPACKET:
682		sock->ops = &unix_seqpacket_ops;
683		break;
684	default:
685		return -ESOCKTNOSUPPORT;
686	}
687
688	return unix_create1(net, sock) ? 0 : -ENOMEM;
689}
690
691static int unix_release(struct socket *sock)
692{
693	struct sock *sk = sock->sk;
694
695	if (!sk)
696		return 0;
697
698	sock->sk = NULL;
699
700	return unix_release_sock(sk, 0);
701}
702
703static int unix_autobind(struct socket *sock)
704{
705	struct sock *sk = sock->sk;
706	struct net *net = sock_net(sk);
707	struct unix_sock *u = unix_sk(sk);
708	static u32 ordernum = 1;
709	struct unix_address *addr;
710	int err;
711	unsigned int retries = 0;
712
713	mutex_lock(&u->readlock);
714
715	err = 0;
716	if (u->addr)
717		goto out;
718
719	err = -ENOMEM;
720	addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
721	if (!addr)
722		goto out;
723
724	addr->name->sun_family = AF_UNIX;
725	atomic_set(&addr->refcnt, 1);
726
727retry:
728	addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
729	addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
730
731	spin_lock(&unix_table_lock);
732	ordernum = (ordernum+1)&0xFFFFF;
733
734	if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
735				      addr->hash)) {
736		spin_unlock(&unix_table_lock);
737		/*
738		 * __unix_find_socket_byname() may take long time if many names
739		 * are already in use.
740		 */
741		cond_resched();
742		/* Give up if all names seems to be in use. */
743		if (retries++ == 0xFFFFF) {
744			err = -ENOSPC;
745			kfree(addr);
746			goto out;
747		}
748		goto retry;
749	}
750	addr->hash ^= sk->sk_type;
751
752	__unix_remove_socket(sk);
753	u->addr = addr;
754	__unix_insert_socket(&unix_socket_table[addr->hash], sk);
755	spin_unlock(&unix_table_lock);
756	err = 0;
757
758out:	mutex_unlock(&u->readlock);
759	return err;
760}
761
762static struct sock *unix_find_other(struct net *net,
763				    struct sockaddr_un *sunname, int len,
764				    int type, unsigned int hash, int *error)
765{
766	struct sock *u;
767	struct path path;
768	int err = 0;
769
770	if (sunname->sun_path[0]) {
771		struct inode *inode;
772		err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
773		if (err)
774			goto fail;
775		inode = path.dentry->d_inode;
776		err = inode_permission(inode, MAY_WRITE);
777		if (err)
778			goto put_fail;
779
780		err = -ECONNREFUSED;
781		if (!S_ISSOCK(inode->i_mode))
782			goto put_fail;
783		u = unix_find_socket_byinode(inode);
784		if (!u)
785			goto put_fail;
786
787		if (u->sk_type == type)
788			touch_atime(&path);
789
790		path_put(&path);
791
792		err = -EPROTOTYPE;
793		if (u->sk_type != type) {
794			sock_put(u);
795			goto fail;
796		}
797	} else {
798		err = -ECONNREFUSED;
799		u = unix_find_socket_byname(net, sunname, len, type, hash);
800		if (u) {
801			struct dentry *dentry;
802			dentry = unix_sk(u)->path.dentry;
803			if (dentry)
804				touch_atime(&unix_sk(u)->path);
805		} else
806			goto fail;
807	}
808	return u;
809
810put_fail:
811	path_put(&path);
812fail:
813	*error = err;
814	return NULL;
815}
816
817
818static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
819{
820	struct sock *sk = sock->sk;
821	struct net *net = sock_net(sk);
822	struct unix_sock *u = unix_sk(sk);
823	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
824	char *sun_path = sunaddr->sun_path;
825	struct dentry *dentry = NULL;
826	struct path path;
827	int err;
828	unsigned int hash;
829	struct unix_address *addr;
830	struct hlist_head *list;
831
832	err = -EINVAL;
833	if (sunaddr->sun_family != AF_UNIX)
834		goto out;
835
836	if (addr_len == sizeof(short)) {
837		err = unix_autobind(sock);
838		goto out;
839	}
840
841	err = unix_mkname(sunaddr, addr_len, &hash);
842	if (err < 0)
843		goto out;
844	addr_len = err;
845
846	mutex_lock(&u->readlock);
847
848	err = -EINVAL;
849	if (u->addr)
850		goto out_up;
851
852	err = -ENOMEM;
853	addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
854	if (!addr)
855		goto out_up;
856
857	memcpy(addr->name, sunaddr, addr_len);
858	addr->len = addr_len;
859	addr->hash = hash ^ sk->sk_type;
860	atomic_set(&addr->refcnt, 1);
861
862	if (sun_path[0]) {
863		umode_t mode;
864		err = 0;
865		/*
866		 * Get the parent directory, calculate the hash for last
867		 * component.
868		 */
869		dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
870		err = PTR_ERR(dentry);
871		if (IS_ERR(dentry))
872			goto out_mknod_parent;
873
874		/*
875		 * All right, let's create it.
876		 */
877		mode = S_IFSOCK |
878		       (SOCK_INODE(sock)->i_mode & ~current_umask());
879		err = security_path_mknod(&path, dentry, mode, 0);
880		if (err)
881			goto out_mknod_drop_write;
882		err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
883out_mknod_drop_write:
884		if (err)
885			goto out_mknod_dput;
886		mntget(path.mnt);
887		dget(dentry);
888		done_path_create(&path, dentry);
889		path.dentry = dentry;
890
891		addr->hash = UNIX_HASH_SIZE;
892	}
893
894	spin_lock(&unix_table_lock);
895
896	if (!sun_path[0]) {
897		err = -EADDRINUSE;
898		if (__unix_find_socket_byname(net, sunaddr, addr_len,
899					      sk->sk_type, hash)) {
900			unix_release_addr(addr);
901			goto out_unlock;
902		}
903
904		list = &unix_socket_table[addr->hash];
905	} else {
906		list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
907		u->path = path;
908	}
909
910	err = 0;
911	__unix_remove_socket(sk);
912	u->addr = addr;
913	__unix_insert_socket(list, sk);
914
915out_unlock:
916	spin_unlock(&unix_table_lock);
917out_up:
918	mutex_unlock(&u->readlock);
919out:
920	return err;
921
922out_mknod_dput:
923	done_path_create(&path, dentry);
924out_mknod_parent:
925	if (err == -EEXIST)
926		err = -EADDRINUSE;
927	unix_release_addr(addr);
928	goto out_up;
929}
930
931static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
932{
933	if (unlikely(sk1 == sk2) || !sk2) {
934		unix_state_lock(sk1);
935		return;
936	}
937	if (sk1 < sk2) {
938		unix_state_lock(sk1);
939		unix_state_lock_nested(sk2);
940	} else {
941		unix_state_lock(sk2);
942		unix_state_lock_nested(sk1);
943	}
944}
945
946static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
947{
948	if (unlikely(sk1 == sk2) || !sk2) {
949		unix_state_unlock(sk1);
950		return;
951	}
952	unix_state_unlock(sk1);
953	unix_state_unlock(sk2);
954}
955
956static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
957			      int alen, int flags)
958{
959	struct sock *sk = sock->sk;
960	struct net *net = sock_net(sk);
961	struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
962	struct sock *other;
963	unsigned int hash;
964	int err;
965
966	if (addr->sa_family != AF_UNSPEC) {
967		err = unix_mkname(sunaddr, alen, &hash);
968		if (err < 0)
969			goto out;
970		alen = err;
971
972		if (test_bit(SOCK_PASSCRED, &sock->flags) &&
973		    !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
974			goto out;
975
976restart:
977		other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
978		if (!other)
979			goto out;
980
981		unix_state_double_lock(sk, other);
982
983		/* Apparently VFS overslept socket death. Retry. */
984		if (sock_flag(other, SOCK_DEAD)) {
985			unix_state_double_unlock(sk, other);
986			sock_put(other);
987			goto restart;
988		}
989
990		err = -EPERM;
991		if (!unix_may_send(sk, other))
992			goto out_unlock;
993
994		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
995		if (err)
996			goto out_unlock;
997
998	} else {
999		/*
1000		 *	1003.1g breaking connected state with AF_UNSPEC
1001		 */
1002		other = NULL;
1003		unix_state_double_lock(sk, other);
1004	}
1005
1006	/*
1007	 * If it was connected, reconnect.
1008	 */
1009	if (unix_peer(sk)) {
1010		struct sock *old_peer = unix_peer(sk);
1011		unix_peer(sk) = other;
1012		unix_state_double_unlock(sk, other);
1013
1014		if (other != old_peer)
1015			unix_dgram_disconnected(sk, old_peer);
1016		sock_put(old_peer);
1017	} else {
1018		unix_peer(sk) = other;
1019		unix_state_double_unlock(sk, other);
1020	}
1021	return 0;
1022
1023out_unlock:
1024	unix_state_double_unlock(sk, other);
1025	sock_put(other);
1026out:
1027	return err;
1028}
1029
1030static long unix_wait_for_peer(struct sock *other, long timeo)
1031{
1032	struct unix_sock *u = unix_sk(other);
1033	int sched;
1034	DEFINE_WAIT(wait);
1035
1036	prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1037
1038	sched = !sock_flag(other, SOCK_DEAD) &&
1039		!(other->sk_shutdown & RCV_SHUTDOWN) &&
1040		unix_recvq_full(other);
1041
1042	unix_state_unlock(other);
1043
1044	if (sched)
1045		timeo = schedule_timeout(timeo);
1046
1047	finish_wait(&u->peer_wait, &wait);
1048	return timeo;
1049}
1050
1051static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1052			       int addr_len, int flags)
1053{
1054	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1055	struct sock *sk = sock->sk;
1056	struct net *net = sock_net(sk);
1057	struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1058	struct sock *newsk = NULL;
1059	struct sock *other = NULL;
1060	struct sk_buff *skb = NULL;
1061	unsigned int hash;
1062	int st;
1063	int err;
1064	long timeo;
1065
1066	err = unix_mkname(sunaddr, addr_len, &hash);
1067	if (err < 0)
1068		goto out;
1069	addr_len = err;
1070
1071	if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1072	    (err = unix_autobind(sock)) != 0)
1073		goto out;
1074
1075	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1076
1077	/* First of all allocate resources.
1078	   If we will make it after state is locked,
1079	   we will have to recheck all again in any case.
1080	 */
1081
1082	err = -ENOMEM;
1083
1084	/* create new sock for complete connection */
1085	newsk = unix_create1(sock_net(sk), NULL);
1086	if (newsk == NULL)
1087		goto out;
1088
1089	/* Allocate skb for sending to listening sock */
1090	skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1091	if (skb == NULL)
1092		goto out;
1093
1094restart:
1095	/*  Find listening sock. */
1096	other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1097	if (!other)
1098		goto out;
1099
1100	/* Latch state of peer */
1101	unix_state_lock(other);
1102
1103	/* Apparently VFS overslept socket death. Retry. */
1104	if (sock_flag(other, SOCK_DEAD)) {
1105		unix_state_unlock(other);
1106		sock_put(other);
1107		goto restart;
1108	}
1109
1110	err = -ECONNREFUSED;
1111	if (other->sk_state != TCP_LISTEN)
1112		goto out_unlock;
1113	if (other->sk_shutdown & RCV_SHUTDOWN)
1114		goto out_unlock;
1115
1116	if (unix_recvq_full(other)) {
1117		err = -EAGAIN;
1118		if (!timeo)
1119			goto out_unlock;
1120
1121		timeo = unix_wait_for_peer(other, timeo);
1122
1123		err = sock_intr_errno(timeo);
1124		if (signal_pending(current))
1125			goto out;
1126		sock_put(other);
1127		goto restart;
1128	}
1129
1130	/* Latch our state.
1131
1132	   It is tricky place. We need to grab our state lock and cannot
1133	   drop lock on peer. It is dangerous because deadlock is
1134	   possible. Connect to self case and simultaneous
1135	   attempt to connect are eliminated by checking socket
1136	   state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1137	   check this before attempt to grab lock.
1138
1139	   Well, and we have to recheck the state after socket locked.
1140	 */
1141	st = sk->sk_state;
1142
1143	switch (st) {
1144	case TCP_CLOSE:
1145		/* This is ok... continue with connect */
1146		break;
1147	case TCP_ESTABLISHED:
1148		/* Socket is already connected */
1149		err = -EISCONN;
1150		goto out_unlock;
1151	default:
1152		err = -EINVAL;
1153		goto out_unlock;
1154	}
1155
1156	unix_state_lock_nested(sk);
1157
1158	if (sk->sk_state != st) {
1159		unix_state_unlock(sk);
1160		unix_state_unlock(other);
1161		sock_put(other);
1162		goto restart;
1163	}
1164
1165	err = security_unix_stream_connect(sk, other, newsk);
1166	if (err) {
1167		unix_state_unlock(sk);
1168		goto out_unlock;
1169	}
1170
1171	/* The way is open! Fastly set all the necessary fields... */
1172
1173	sock_hold(sk);
1174	unix_peer(newsk)	= sk;
1175	newsk->sk_state		= TCP_ESTABLISHED;
1176	newsk->sk_type		= sk->sk_type;
1177	init_peercred(newsk);
1178	newu = unix_sk(newsk);
1179	RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1180	otheru = unix_sk(other);
1181
1182	/* copy address information from listening to new sock*/
1183	if (otheru->addr) {
1184		atomic_inc(&otheru->addr->refcnt);
1185		newu->addr = otheru->addr;
1186	}
1187	if (otheru->path.dentry) {
1188		path_get(&otheru->path);
1189		newu->path = otheru->path;
1190	}
1191
1192	/* Set credentials */
1193	copy_peercred(sk, other);
1194
1195	sock->state	= SS_CONNECTED;
1196	sk->sk_state	= TCP_ESTABLISHED;
1197	sock_hold(newsk);
1198
1199	smp_mb__after_atomic_inc();	/* sock_hold() does an atomic_inc() */
1200	unix_peer(sk)	= newsk;
1201
1202	unix_state_unlock(sk);
1203
1204	/* take ten and and send info to listening sock */
1205	spin_lock(&other->sk_receive_queue.lock);
1206	__skb_queue_tail(&other->sk_receive_queue, skb);
1207	spin_unlock(&other->sk_receive_queue.lock);
1208	unix_state_unlock(other);
1209	other->sk_data_ready(other, 0);
1210	sock_put(other);
1211	return 0;
1212
1213out_unlock:
1214	if (other)
1215		unix_state_unlock(other);
1216
1217out:
1218	kfree_skb(skb);
1219	if (newsk)
1220		unix_release_sock(newsk, 0);
1221	if (other)
1222		sock_put(other);
1223	return err;
1224}
1225
1226static int unix_socketpair(struct socket *socka, struct socket *sockb)
1227{
1228	struct sock *ska = socka->sk, *skb = sockb->sk;
1229
1230	/* Join our sockets back to back */
1231	sock_hold(ska);
1232	sock_hold(skb);
1233	unix_peer(ska) = skb;
1234	unix_peer(skb) = ska;
1235	init_peercred(ska);
1236	init_peercred(skb);
1237
1238	if (ska->sk_type != SOCK_DGRAM) {
1239		ska->sk_state = TCP_ESTABLISHED;
1240		skb->sk_state = TCP_ESTABLISHED;
1241		socka->state  = SS_CONNECTED;
1242		sockb->state  = SS_CONNECTED;
1243	}
1244	return 0;
1245}
1246
1247static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1248{
1249	struct sock *sk = sock->sk;
1250	struct sock *tsk;
1251	struct sk_buff *skb;
1252	int err;
1253
1254	err = -EOPNOTSUPP;
1255	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1256		goto out;
1257
1258	err = -EINVAL;
1259	if (sk->sk_state != TCP_LISTEN)
1260		goto out;
1261
1262	/* If socket state is TCP_LISTEN it cannot change (for now...),
1263	 * so that no locks are necessary.
1264	 */
1265
1266	skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1267	if (!skb) {
1268		/* This means receive shutdown. */
1269		if (err == 0)
1270			err = -EINVAL;
1271		goto out;
1272	}
1273
1274	tsk = skb->sk;
1275	skb_free_datagram(sk, skb);
1276	wake_up_interruptible(&unix_sk(sk)->peer_wait);
1277
1278	/* attach accepted sock to socket */
1279	unix_state_lock(tsk);
1280	newsock->state = SS_CONNECTED;
1281	sock_graft(tsk, newsock);
1282	unix_state_unlock(tsk);
1283	return 0;
1284
1285out:
1286	return err;
1287}
1288
1289
1290static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1291{
1292	struct sock *sk = sock->sk;
1293	struct unix_sock *u;
1294	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1295	int err = 0;
1296
1297	if (peer) {
1298		sk = unix_peer_get(sk);
1299
1300		err = -ENOTCONN;
1301		if (!sk)
1302			goto out;
1303		err = 0;
1304	} else {
1305		sock_hold(sk);
1306	}
1307
1308	u = unix_sk(sk);
1309	unix_state_lock(sk);
1310	if (!u->addr) {
1311		sunaddr->sun_family = AF_UNIX;
1312		sunaddr->sun_path[0] = 0;
1313		*uaddr_len = sizeof(short);
1314	} else {
1315		struct unix_address *addr = u->addr;
1316
1317		*uaddr_len = addr->len;
1318		memcpy(sunaddr, addr->name, *uaddr_len);
1319	}
1320	unix_state_unlock(sk);
1321	sock_put(sk);
1322out:
1323	return err;
1324}
1325
1326static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1327{
1328	int i;
1329
1330	scm->fp = UNIXCB(skb).fp;
1331	UNIXCB(skb).fp = NULL;
1332
1333	for (i = scm->fp->count-1; i >= 0; i--)
1334		unix_notinflight(scm->fp->fp[i]);
1335}
1336
1337static void unix_destruct_scm(struct sk_buff *skb)
1338{
1339	struct scm_cookie scm;
1340	memset(&scm, 0, sizeof(scm));
1341	scm.pid  = UNIXCB(skb).pid;
1342	scm.cred = UNIXCB(skb).cred;
1343	if (UNIXCB(skb).fp)
1344		unix_detach_fds(&scm, skb);
1345
1346	/* Alas, it calls VFS */
1347	/* So fscking what? fput() had been SMP-safe since the last Summer */
1348	scm_destroy(&scm);
1349	sock_wfree(skb);
1350}
1351
1352#define MAX_RECURSION_LEVEL 4
1353
1354static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1355{
1356	int i;
1357	unsigned char max_level = 0;
1358	int unix_sock_count = 0;
1359
1360	for (i = scm->fp->count - 1; i >= 0; i--) {
1361		struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1362
1363		if (sk) {
1364			unix_sock_count++;
1365			max_level = max(max_level,
1366					unix_sk(sk)->recursion_level);
1367		}
1368	}
1369	if (unlikely(max_level > MAX_RECURSION_LEVEL))
1370		return -ETOOMANYREFS;
1371
1372	/*
1373	 * Need to duplicate file references for the sake of garbage
1374	 * collection.  Otherwise a socket in the fps might become a
1375	 * candidate for GC while the skb is not yet queued.
1376	 */
1377	UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1378	if (!UNIXCB(skb).fp)
1379		return -ENOMEM;
1380
1381	if (unix_sock_count) {
1382		for (i = scm->fp->count - 1; i >= 0; i--)
1383			unix_inflight(scm->fp->fp[i]);
1384	}
1385	return max_level;
1386}
1387
1388static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1389{
1390	int err = 0;
1391
1392	UNIXCB(skb).pid  = get_pid(scm->pid);
1393	if (scm->cred)
1394		UNIXCB(skb).cred = get_cred(scm->cred);
1395	UNIXCB(skb).fp = NULL;
1396	if (scm->fp && send_fds)
1397		err = unix_attach_fds(scm, skb);
1398
1399	skb->destructor = unix_destruct_scm;
1400	return err;
1401}
1402
1403/*
1404 * Some apps rely on write() giving SCM_CREDENTIALS
1405 * We include credentials if source or destination socket
1406 * asserted SOCK_PASSCRED.
1407 */
1408static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1409			    const struct sock *other)
1410{
1411	if (UNIXCB(skb).cred)
1412		return;
1413	if (test_bit(SOCK_PASSCRED, &sock->flags) ||
1414	    !other->sk_socket ||
1415	    test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
1416		UNIXCB(skb).pid  = get_pid(task_tgid(current));
1417		UNIXCB(skb).cred = get_current_cred();
1418	}
1419}
1420
1421/*
1422 *	Send AF_UNIX data.
1423 */
1424
1425static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1426			      struct msghdr *msg, size_t len)
1427{
1428	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1429	struct sock *sk = sock->sk;
1430	struct net *net = sock_net(sk);
1431	struct unix_sock *u = unix_sk(sk);
1432	struct sockaddr_un *sunaddr = msg->msg_name;
1433	struct sock *other = NULL;
1434	int namelen = 0; /* fake GCC */
1435	int err;
1436	unsigned int hash;
1437	struct sk_buff *skb;
1438	long timeo;
1439	struct scm_cookie tmp_scm;
1440	int max_level;
1441	int data_len = 0;
1442
1443	if (NULL == siocb->scm)
1444		siocb->scm = &tmp_scm;
1445	wait_for_unix_gc();
1446	err = scm_send(sock, msg, siocb->scm);
1447	if (err < 0)
1448		return err;
1449
1450	err = -EOPNOTSUPP;
1451	if (msg->msg_flags&MSG_OOB)
1452		goto out;
1453
1454	if (msg->msg_namelen) {
1455		err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1456		if (err < 0)
1457			goto out;
1458		namelen = err;
1459	} else {
1460		sunaddr = NULL;
1461		err = -ENOTCONN;
1462		other = unix_peer_get(sk);
1463		if (!other)
1464			goto out;
1465	}
1466
1467	if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1468	    && (err = unix_autobind(sock)) != 0)
1469		goto out;
1470
1471	err = -EMSGSIZE;
1472	if (len > sk->sk_sndbuf - 32)
1473		goto out;
1474
1475	if (len > SKB_MAX_ALLOC)
1476		data_len = min_t(size_t,
1477				 len - SKB_MAX_ALLOC,
1478				 MAX_SKB_FRAGS * PAGE_SIZE);
1479
1480	skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1481				   msg->msg_flags & MSG_DONTWAIT, &err);
1482	if (skb == NULL)
1483		goto out;
1484
1485	err = unix_scm_to_skb(siocb->scm, skb, true);
1486	if (err < 0)
1487		goto out_free;
1488	max_level = err + 1;
1489	unix_get_secdata(siocb->scm, skb);
1490
1491	skb_put(skb, len - data_len);
1492	skb->data_len = data_len;
1493	skb->len = len;
1494	err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len);
1495	if (err)
1496		goto out_free;
1497
1498	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1499
1500restart:
1501	if (!other) {
1502		err = -ECONNRESET;
1503		if (sunaddr == NULL)
1504			goto out_free;
1505
1506		other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1507					hash, &err);
1508		if (other == NULL)
1509			goto out_free;
1510	}
1511
1512	if (sk_filter(other, skb) < 0) {
1513		/* Toss the packet but do not return any error to the sender */
1514		err = len;
1515		goto out_free;
1516	}
1517
1518	unix_state_lock(other);
1519	err = -EPERM;
1520	if (!unix_may_send(sk, other))
1521		goto out_unlock;
1522
1523	if (sock_flag(other, SOCK_DEAD)) {
1524		/*
1525		 *	Check with 1003.1g - what should
1526		 *	datagram error
1527		 */
1528		unix_state_unlock(other);
1529		sock_put(other);
1530
1531		err = 0;
1532		unix_state_lock(sk);
1533		if (unix_peer(sk) == other) {
1534			unix_peer(sk) = NULL;
1535			unix_state_unlock(sk);
1536
1537			unix_dgram_disconnected(sk, other);
1538			sock_put(other);
1539			err = -ECONNREFUSED;
1540		} else {
1541			unix_state_unlock(sk);
1542		}
1543
1544		other = NULL;
1545		if (err)
1546			goto out_free;
1547		goto restart;
1548	}
1549
1550	err = -EPIPE;
1551	if (other->sk_shutdown & RCV_SHUTDOWN)
1552		goto out_unlock;
1553
1554	if (sk->sk_type != SOCK_SEQPACKET) {
1555		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1556		if (err)
1557			goto out_unlock;
1558	}
1559
1560	if (unix_peer(other) != sk && unix_recvq_full(other)) {
1561		if (!timeo) {
1562			err = -EAGAIN;
1563			goto out_unlock;
1564		}
1565
1566		timeo = unix_wait_for_peer(other, timeo);
1567
1568		err = sock_intr_errno(timeo);
1569		if (signal_pending(current))
1570			goto out_free;
1571
1572		goto restart;
1573	}
1574
1575	if (sock_flag(other, SOCK_RCVTSTAMP))
1576		__net_timestamp(skb);
1577	maybe_add_creds(skb, sock, other);
1578	skb_queue_tail(&other->sk_receive_queue, skb);
1579	if (max_level > unix_sk(other)->recursion_level)
1580		unix_sk(other)->recursion_level = max_level;
1581	unix_state_unlock(other);
1582	other->sk_data_ready(other, len);
1583	sock_put(other);
1584	scm_destroy(siocb->scm);
1585	return len;
1586
1587out_unlock:
1588	unix_state_unlock(other);
1589out_free:
1590	kfree_skb(skb);
1591out:
1592	if (other)
1593		sock_put(other);
1594	scm_destroy(siocb->scm);
1595	return err;
1596}
1597
1598
1599static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1600			       struct msghdr *msg, size_t len)
1601{
1602	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1603	struct sock *sk = sock->sk;
1604	struct sock *other = NULL;
1605	int err, size;
1606	struct sk_buff *skb;
1607	int sent = 0;
1608	struct scm_cookie tmp_scm;
1609	bool fds_sent = false;
1610	int max_level;
1611
1612	if (NULL == siocb->scm)
1613		siocb->scm = &tmp_scm;
1614	wait_for_unix_gc();
1615	err = scm_send(sock, msg, siocb->scm);
1616	if (err < 0)
1617		return err;
1618
1619	err = -EOPNOTSUPP;
1620	if (msg->msg_flags&MSG_OOB)
1621		goto out_err;
1622
1623	if (msg->msg_namelen) {
1624		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1625		goto out_err;
1626	} else {
1627		err = -ENOTCONN;
1628		other = unix_peer(sk);
1629		if (!other)
1630			goto out_err;
1631	}
1632
1633	if (sk->sk_shutdown & SEND_SHUTDOWN)
1634		goto pipe_err;
1635
1636	while (sent < len) {
1637		/*
1638		 *	Optimisation for the fact that under 0.01% of X
1639		 *	messages typically need breaking up.
1640		 */
1641
1642		size = len-sent;
1643
1644		/* Keep two messages in the pipe so it schedules better */
1645		if (size > ((sk->sk_sndbuf >> 1) - 64))
1646			size = (sk->sk_sndbuf >> 1) - 64;
1647
1648		if (size > SKB_MAX_ALLOC)
1649			size = SKB_MAX_ALLOC;
1650
1651		/*
1652		 *	Grab a buffer
1653		 */
1654
1655		skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
1656					  &err);
1657
1658		if (skb == NULL)
1659			goto out_err;
1660
1661		/*
1662		 *	If you pass two values to the sock_alloc_send_skb
1663		 *	it tries to grab the large buffer with GFP_NOFS
1664		 *	(which can fail easily), and if it fails grab the
1665		 *	fallback size buffer which is under a page and will
1666		 *	succeed. [Alan]
1667		 */
1668		size = min_t(int, size, skb_tailroom(skb));
1669
1670
1671		/* Only send the fds in the first buffer */
1672		err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
1673		if (err < 0) {
1674			kfree_skb(skb);
1675			goto out_err;
1676		}
1677		max_level = err + 1;
1678		fds_sent = true;
1679
1680		err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
1681		if (err) {
1682			kfree_skb(skb);
1683			goto out_err;
1684		}
1685
1686		unix_state_lock(other);
1687
1688		if (sock_flag(other, SOCK_DEAD) ||
1689		    (other->sk_shutdown & RCV_SHUTDOWN))
1690			goto pipe_err_free;
1691
1692		maybe_add_creds(skb, sock, other);
1693		skb_queue_tail(&other->sk_receive_queue, skb);
1694		if (max_level > unix_sk(other)->recursion_level)
1695			unix_sk(other)->recursion_level = max_level;
1696		unix_state_unlock(other);
1697		other->sk_data_ready(other, size);
1698		sent += size;
1699	}
1700
1701	scm_destroy(siocb->scm);
1702	siocb->scm = NULL;
1703
1704	return sent;
1705
1706pipe_err_free:
1707	unix_state_unlock(other);
1708	kfree_skb(skb);
1709pipe_err:
1710	if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1711		send_sig(SIGPIPE, current, 0);
1712	err = -EPIPE;
1713out_err:
1714	scm_destroy(siocb->scm);
1715	siocb->scm = NULL;
1716	return sent ? : err;
1717}
1718
1719static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1720				  struct msghdr *msg, size_t len)
1721{
1722	int err;
1723	struct sock *sk = sock->sk;
1724
1725	err = sock_error(sk);
1726	if (err)
1727		return err;
1728
1729	if (sk->sk_state != TCP_ESTABLISHED)
1730		return -ENOTCONN;
1731
1732	if (msg->msg_namelen)
1733		msg->msg_namelen = 0;
1734
1735	return unix_dgram_sendmsg(kiocb, sock, msg, len);
1736}
1737
1738static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock,
1739			      struct msghdr *msg, size_t size,
1740			      int flags)
1741{
1742	struct sock *sk = sock->sk;
1743
1744	if (sk->sk_state != TCP_ESTABLISHED)
1745		return -ENOTCONN;
1746
1747	return unix_dgram_recvmsg(iocb, sock, msg, size, flags);
1748}
1749
1750static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1751{
1752	struct unix_sock *u = unix_sk(sk);
1753
1754	msg->msg_namelen = 0;
1755	if (u->addr) {
1756		msg->msg_namelen = u->addr->len;
1757		memcpy(msg->msg_name, u->addr->name, u->addr->len);
1758	}
1759}
1760
1761static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1762			      struct msghdr *msg, size_t size,
1763			      int flags)
1764{
1765	struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1766	struct scm_cookie tmp_scm;
1767	struct sock *sk = sock->sk;
1768	struct unix_sock *u = unix_sk(sk);
1769	int noblock = flags & MSG_DONTWAIT;
1770	struct sk_buff *skb;
1771	int err;
1772	int peeked, skip;
1773
1774	err = -EOPNOTSUPP;
1775	if (flags&MSG_OOB)
1776		goto out;
1777
1778	msg->msg_namelen = 0;
1779
1780	err = mutex_lock_interruptible(&u->readlock);
1781	if (err) {
1782		err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
1783		goto out;
1784	}
1785
1786	skip = sk_peek_offset(sk, flags);
1787
1788	skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
1789	if (!skb) {
1790		unix_state_lock(sk);
1791		/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1792		if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1793		    (sk->sk_shutdown & RCV_SHUTDOWN))
1794			err = 0;
1795		unix_state_unlock(sk);
1796		goto out_unlock;
1797	}
1798
1799	wake_up_interruptible_sync_poll(&u->peer_wait,
1800					POLLOUT | POLLWRNORM | POLLWRBAND);
1801
1802	if (msg->msg_name)
1803		unix_copy_addr(msg, skb->sk);
1804
1805	if (size > skb->len - skip)
1806		size = skb->len - skip;
1807	else if (size < skb->len - skip)
1808		msg->msg_flags |= MSG_TRUNC;
1809
1810	err = skb_copy_datagram_iovec(skb, skip, msg->msg_iov, size);
1811	if (err)
1812		goto out_free;
1813
1814	if (sock_flag(sk, SOCK_RCVTSTAMP))
1815		__sock_recv_timestamp(msg, sk, skb);
1816
1817	if (!siocb->scm) {
1818		siocb->scm = &tmp_scm;
1819		memset(&tmp_scm, 0, sizeof(tmp_scm));
1820	}
1821	scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1822	unix_set_secdata(siocb->scm, skb);
1823
1824	if (!(flags & MSG_PEEK)) {
1825		if (UNIXCB(skb).fp)
1826			unix_detach_fds(siocb->scm, skb);
1827
1828		sk_peek_offset_bwd(sk, skb->len);
1829	} else {
1830		/* It is questionable: on PEEK we could:
1831		   - do not return fds - good, but too simple 8)
1832		   - return fds, and do not return them on read (old strategy,
1833		     apparently wrong)
1834		   - clone fds (I chose it for now, it is the most universal
1835		     solution)
1836
1837		   POSIX 1003.1g does not actually define this clearly
1838		   at all. POSIX 1003.1g doesn't define a lot of things
1839		   clearly however!
1840
1841		*/
1842
1843		sk_peek_offset_fwd(sk, size);
1844
1845		if (UNIXCB(skb).fp)
1846			siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1847	}
1848	err = (flags & MSG_TRUNC) ? skb->len - skip : size;
1849
1850	scm_recv(sock, msg, siocb->scm, flags);
1851
1852out_free:
1853	skb_free_datagram(sk, skb);
1854out_unlock:
1855	mutex_unlock(&u->readlock);
1856out:
1857	return err;
1858}
1859
1860/*
1861 *	Sleep until data has arrive. But check for races..
1862 */
1863
1864static long unix_stream_data_wait(struct sock *sk, long timeo)
1865{
1866	DEFINE_WAIT(wait);
1867
1868	unix_state_lock(sk);
1869
1870	for (;;) {
1871		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1872
1873		if (!skb_queue_empty(&sk->sk_receive_queue) ||
1874		    sk->sk_err ||
1875		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
1876		    signal_pending(current) ||
1877		    !timeo)
1878			break;
1879
1880		set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1881		unix_state_unlock(sk);
1882		timeo = schedule_timeout(timeo);
1883		unix_state_lock(sk);
1884		clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1885	}
1886
1887	finish_wait(sk_sleep(sk), &wait);
1888	unix_state_unlock(sk);
1889	return timeo;
1890}
1891
1892
1893
1894static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1895			       struct msghdr *msg, size_t size,
1896			       int flags)
1897{
1898	struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1899	struct scm_cookie tmp_scm;
1900	struct sock *sk = sock->sk;
1901	struct unix_sock *u = unix_sk(sk);
1902	struct sockaddr_un *sunaddr = msg->msg_name;
1903	int copied = 0;
1904	int check_creds = 0;
1905	int target;
1906	int err = 0;
1907	long timeo;
1908	int skip;
1909
1910	err = -EINVAL;
1911	if (sk->sk_state != TCP_ESTABLISHED)
1912		goto out;
1913
1914	err = -EOPNOTSUPP;
1915	if (flags&MSG_OOB)
1916		goto out;
1917
1918	target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1919	timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1920
1921	msg->msg_namelen = 0;
1922
1923	/* Lock the socket to prevent queue disordering
1924	 * while sleeps in memcpy_tomsg
1925	 */
1926
1927	if (!siocb->scm) {
1928		siocb->scm = &tmp_scm;
1929		memset(&tmp_scm, 0, sizeof(tmp_scm));
1930	}
1931
1932	err = mutex_lock_interruptible(&u->readlock);
1933	if (err) {
1934		err = sock_intr_errno(timeo);
1935		goto out;
1936	}
1937
1938	skip = sk_peek_offset(sk, flags);
1939
1940	do {
1941		int chunk;
1942		struct sk_buff *skb;
1943
1944		unix_state_lock(sk);
1945		skb = skb_peek(&sk->sk_receive_queue);
1946again:
1947		if (skb == NULL) {
1948			unix_sk(sk)->recursion_level = 0;
1949			if (copied >= target)
1950				goto unlock;
1951
1952			/*
1953			 *	POSIX 1003.1g mandates this order.
1954			 */
1955
1956			err = sock_error(sk);
1957			if (err)
1958				goto unlock;
1959			if (sk->sk_shutdown & RCV_SHUTDOWN)
1960				goto unlock;
1961
1962			unix_state_unlock(sk);
1963			err = -EAGAIN;
1964			if (!timeo)
1965				break;
1966			mutex_unlock(&u->readlock);
1967
1968			timeo = unix_stream_data_wait(sk, timeo);
1969
1970			if (signal_pending(current)
1971			    ||  mutex_lock_interruptible(&u->readlock)) {
1972				err = sock_intr_errno(timeo);
1973				goto out;
1974			}
1975
1976			continue;
1977 unlock:
1978			unix_state_unlock(sk);
1979			break;
1980		}
1981
1982		if (skip >= skb->len) {
1983			skip -= skb->len;
1984			skb = skb_peek_next(skb, &sk->sk_receive_queue);
1985			goto again;
1986		}
1987
1988		unix_state_unlock(sk);
1989
1990		if (check_creds) {
1991			/* Never glue messages from different writers */
1992			if ((UNIXCB(skb).pid  != siocb->scm->pid) ||
1993			    (UNIXCB(skb).cred != siocb->scm->cred))
1994				break;
1995		} else {
1996			/* Copy credentials */
1997			scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1998			check_creds = 1;
1999		}
2000
2001		/* Copy address just once */
2002		if (sunaddr) {
2003			unix_copy_addr(msg, skb->sk);
2004			sunaddr = NULL;
2005		}
2006
2007		chunk = min_t(unsigned int, skb->len - skip, size);
2008		if (memcpy_toiovec(msg->msg_iov, skb->data + skip, chunk)) {
2009			if (copied == 0)
2010				copied = -EFAULT;
2011			break;
2012		}
2013		copied += chunk;
2014		size -= chunk;
2015
2016		/* Mark read part of skb as used */
2017		if (!(flags & MSG_PEEK)) {
2018			skb_pull(skb, chunk);
2019
2020			sk_peek_offset_bwd(sk, chunk);
2021
2022			if (UNIXCB(skb).fp)
2023				unix_detach_fds(siocb->scm, skb);
2024
2025			if (skb->len)
2026				break;
2027
2028			skb_unlink(skb, &sk->sk_receive_queue);
2029			consume_skb(skb);
2030
2031			if (siocb->scm->fp)
2032				break;
2033		} else {
2034			/* It is questionable, see note in unix_dgram_recvmsg.
2035			 */
2036			if (UNIXCB(skb).fp)
2037				siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
2038
2039			sk_peek_offset_fwd(sk, chunk);
2040
2041			break;
2042		}
2043	} while (size);
2044
2045	mutex_unlock(&u->readlock);
2046	scm_recv(sock, msg, siocb->scm, flags);
2047out:
2048	return copied ? : err;
2049}
2050
2051static int unix_shutdown(struct socket *sock, int mode)
2052{
2053	struct sock *sk = sock->sk;
2054	struct sock *other;
2055
2056	mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
2057
2058	if (!mode)
2059		return 0;
2060
2061	unix_state_lock(sk);
2062	sk->sk_shutdown |= mode;
2063	other = unix_peer(sk);
2064	if (other)
2065		sock_hold(other);
2066	unix_state_unlock(sk);
2067	sk->sk_state_change(sk);
2068
2069	if (other &&
2070		(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2071
2072		int peer_mode = 0;
2073
2074		if (mode&RCV_SHUTDOWN)
2075			peer_mode |= SEND_SHUTDOWN;
2076		if (mode&SEND_SHUTDOWN)
2077			peer_mode |= RCV_SHUTDOWN;
2078		unix_state_lock(other);
2079		other->sk_shutdown |= peer_mode;
2080		unix_state_unlock(other);
2081		other->sk_state_change(other);
2082		if (peer_mode == SHUTDOWN_MASK)
2083			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2084		else if (peer_mode & RCV_SHUTDOWN)
2085			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2086	}
2087	if (other)
2088		sock_put(other);
2089
2090	return 0;
2091}
2092
2093long unix_inq_len(struct sock *sk)
2094{
2095	struct sk_buff *skb;
2096	long amount = 0;
2097
2098	if (sk->sk_state == TCP_LISTEN)
2099		return -EINVAL;
2100
2101	spin_lock(&sk->sk_receive_queue.lock);
2102	if (sk->sk_type == SOCK_STREAM ||
2103	    sk->sk_type == SOCK_SEQPACKET) {
2104		skb_queue_walk(&sk->sk_receive_queue, skb)
2105			amount += skb->len;
2106	} else {
2107		skb = skb_peek(&sk->sk_receive_queue);
2108		if (skb)
2109			amount = skb->len;
2110	}
2111	spin_unlock(&sk->sk_receive_queue.lock);
2112
2113	return amount;
2114}
2115EXPORT_SYMBOL_GPL(unix_inq_len);
2116
2117long unix_outq_len(struct sock *sk)
2118{
2119	return sk_wmem_alloc_get(sk);
2120}
2121EXPORT_SYMBOL_GPL(unix_outq_len);
2122
2123static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2124{
2125	struct sock *sk = sock->sk;
2126	long amount = 0;
2127	int err;
2128
2129	switch (cmd) {
2130	case SIOCOUTQ:
2131		amount = unix_outq_len(sk);
2132		err = put_user(amount, (int __user *)arg);
2133		break;
2134	case SIOCINQ:
2135		amount = unix_inq_len(sk);
2136		if (amount < 0)
2137			err = amount;
2138		else
2139			err = put_user(amount, (int __user *)arg);
2140		break;
2141	default:
2142		err = -ENOIOCTLCMD;
2143		break;
2144	}
2145	return err;
2146}
2147
2148static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2149{
2150	struct sock *sk = sock->sk;
2151	unsigned int mask;
2152
2153	sock_poll_wait(file, sk_sleep(sk), wait);
2154	mask = 0;
2155
2156	/* exceptional events? */
2157	if (sk->sk_err)
2158		mask |= POLLERR;
2159	if (sk->sk_shutdown == SHUTDOWN_MASK)
2160		mask |= POLLHUP;
2161	if (sk->sk_shutdown & RCV_SHUTDOWN)
2162		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2163
2164	/* readable? */
2165	if (!skb_queue_empty(&sk->sk_receive_queue))
2166		mask |= POLLIN | POLLRDNORM;
2167
2168	/* Connection-based need to check for termination and startup */
2169	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2170	    sk->sk_state == TCP_CLOSE)
2171		mask |= POLLHUP;
2172
2173	/*
2174	 * we set writable also when the other side has shut down the
2175	 * connection. This prevents stuck sockets.
2176	 */
2177	if (unix_writable(sk))
2178		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2179
2180	return mask;
2181}
2182
2183static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2184				    poll_table *wait)
2185{
2186	struct sock *sk = sock->sk, *other;
2187	unsigned int mask, writable;
2188
2189	sock_poll_wait(file, sk_sleep(sk), wait);
2190	mask = 0;
2191
2192	/* exceptional events? */
2193	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2194		mask |= POLLERR;
2195	if (sk->sk_shutdown & RCV_SHUTDOWN)
2196		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2197	if (sk->sk_shutdown == SHUTDOWN_MASK)
2198		mask |= POLLHUP;
2199
2200	/* readable? */
2201	if (!skb_queue_empty(&sk->sk_receive_queue))
2202		mask |= POLLIN | POLLRDNORM;
2203
2204	/* Connection-based need to check for termination and startup */
2205	if (sk->sk_type == SOCK_SEQPACKET) {
2206		if (sk->sk_state == TCP_CLOSE)
2207			mask |= POLLHUP;
2208		/* connection hasn't started yet? */
2209		if (sk->sk_state == TCP_SYN_SENT)
2210			return mask;
2211	}
2212
2213	/* No write status requested, avoid expensive OUT tests. */
2214	if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
2215		return mask;
2216
2217	writable = unix_writable(sk);
2218	other = unix_peer_get(sk);
2219	if (other) {
2220		if (unix_peer(other) != sk) {
2221			sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
2222			if (unix_recvq_full(other))
2223				writable = 0;
2224		}
2225		sock_put(other);
2226	}
2227
2228	if (writable)
2229		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2230	else
2231		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2232
2233	return mask;
2234}
2235
2236#ifdef CONFIG_PROC_FS
2237static struct sock *first_unix_socket(int *i)
2238{
2239	for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
2240		if (!hlist_empty(&unix_socket_table[*i]))
2241			return __sk_head(&unix_socket_table[*i]);
2242	}
2243	return NULL;
2244}
2245
2246static struct sock *next_unix_socket(int *i, struct sock *s)
2247{
2248	struct sock *next = sk_next(s);
2249	/* More in this chain? */
2250	if (next)
2251		return next;
2252	/* Look for next non-empty chain. */
2253	for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
2254		if (!hlist_empty(&unix_socket_table[*i]))
2255			return __sk_head(&unix_socket_table[*i]);
2256	}
2257	return NULL;
2258}
2259
2260struct unix_iter_state {
2261	struct seq_net_private p;
2262	int i;
2263};
2264
2265static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos)
2266{
2267	struct unix_iter_state *iter = seq->private;
2268	loff_t off = 0;
2269	struct sock *s;
2270
2271	for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
2272		if (sock_net(s) != seq_file_net(seq))
2273			continue;
2274		if (off == pos)
2275			return s;
2276		++off;
2277	}
2278	return NULL;
2279}
2280
2281static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2282	__acquires(unix_table_lock)
2283{
2284	spin_lock(&unix_table_lock);
2285	return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2286}
2287
2288static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2289{
2290	struct unix_iter_state *iter = seq->private;
2291	struct sock *sk = v;
2292	++*pos;
2293
2294	if (v == SEQ_START_TOKEN)
2295		sk = first_unix_socket(&iter->i);
2296	else
2297		sk = next_unix_socket(&iter->i, sk);
2298	while (sk && (sock_net(sk) != seq_file_net(seq)))
2299		sk = next_unix_socket(&iter->i, sk);
2300	return sk;
2301}
2302
2303static void unix_seq_stop(struct seq_file *seq, void *v)
2304	__releases(unix_table_lock)
2305{
2306	spin_unlock(&unix_table_lock);
2307}
2308
2309static int unix_seq_show(struct seq_file *seq, void *v)
2310{
2311
2312	if (v == SEQ_START_TOKEN)
2313		seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
2314			 "Inode Path\n");
2315	else {
2316		struct sock *s = v;
2317		struct unix_sock *u = unix_sk(s);
2318		unix_state_lock(s);
2319
2320		seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2321			s,
2322			atomic_read(&s->sk_refcnt),
2323			0,
2324			s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2325			s->sk_type,
2326			s->sk_socket ?
2327			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2328			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2329			sock_i_ino(s));
2330
2331		if (u->addr) {
2332			int i, len;
2333			seq_putc(seq, ' ');
2334
2335			i = 0;
2336			len = u->addr->len - sizeof(short);
2337			if (!UNIX_ABSTRACT(s))
2338				len--;
2339			else {
2340				seq_putc(seq, '@');
2341				i++;
2342			}
2343			for ( ; i < len; i++)
2344				seq_putc(seq, u->addr->name->sun_path[i]);
2345		}
2346		unix_state_unlock(s);
2347		seq_putc(seq, '\n');
2348	}
2349
2350	return 0;
2351}
2352
2353static const struct seq_operations unix_seq_ops = {
2354	.start  = unix_seq_start,
2355	.next   = unix_seq_next,
2356	.stop   = unix_seq_stop,
2357	.show   = unix_seq_show,
2358};
2359
2360static int unix_seq_open(struct inode *inode, struct file *file)
2361{
2362	return seq_open_net(inode, file, &unix_seq_ops,
2363			    sizeof(struct unix_iter_state));
2364}
2365
2366static const struct file_operations unix_seq_fops = {
2367	.owner		= THIS_MODULE,
2368	.open		= unix_seq_open,
2369	.read		= seq_read,
2370	.llseek		= seq_lseek,
2371	.release	= seq_release_net,
2372};
2373
2374#endif
2375
2376static const struct net_proto_family unix_family_ops = {
2377	.family = PF_UNIX,
2378	.create = unix_create,
2379	.owner	= THIS_MODULE,
2380};
2381
2382
2383static int __net_init unix_net_init(struct net *net)
2384{
2385	int error = -ENOMEM;
2386
2387	net->unx.sysctl_max_dgram_qlen = 10;
2388	if (unix_sysctl_register(net))
2389		goto out;
2390
2391#ifdef CONFIG_PROC_FS
2392	if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) {
2393		unix_sysctl_unregister(net);
2394		goto out;
2395	}
2396#endif
2397	error = 0;
2398out:
2399	return error;
2400}
2401
2402static void __net_exit unix_net_exit(struct net *net)
2403{
2404	unix_sysctl_unregister(net);
2405	proc_net_remove(net, "unix");
2406}
2407
2408static struct pernet_operations unix_net_ops = {
2409	.init = unix_net_init,
2410	.exit = unix_net_exit,
2411};
2412
2413static int __init af_unix_init(void)
2414{
2415	int rc = -1;
2416	struct sk_buff *dummy_skb;
2417
2418	BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
2419
2420	rc = proto_register(&unix_proto, 1);
2421	if (rc != 0) {
2422		printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2423		       __func__);
2424		goto out;
2425	}
2426
2427	sock_register(&unix_family_ops);
2428	register_pernet_subsys(&unix_net_ops);
2429out:
2430	return rc;
2431}
2432
2433static void __exit af_unix_exit(void)
2434{
2435	sock_unregister(PF_UNIX);
2436	proto_unregister(&unix_proto);
2437	unregister_pernet_subsys(&unix_net_ops);
2438}
2439
2440/* Earlier than device_initcall() so that other drivers invoking
2441   request_module() don't end up in a loop when modprobe tries
2442   to use a UNIX socket. But later than subsys_initcall() because
2443   we depend on stuff initialised there */
2444fs_initcall(af_unix_init);
2445module_exit(af_unix_exit);
2446
2447MODULE_LICENSE("GPL");
2448MODULE_ALIAS_NETPROTO(PF_UNIX);
2449