af_unix.c revision 44bb93633f57a55979f3c2589b10fd6a2bfc7c08
1/*
2 * NET4:	Implementation of BSD Unix domain sockets.
3 *
4 * Authors:	Alan Cox, <alan.cox@linux.org>
5 *
6 *		This program is free software; you can redistribute it and/or
7 *		modify it under the terms of the GNU General Public License
8 *		as published by the Free Software Foundation; either version
9 *		2 of the License, or (at your option) any later version.
10 *
11 * Version:	$Id: af_unix.c,v 1.133 2002/02/08 03:57:19 davem Exp $
12 *
13 * Fixes:
14 *		Linus Torvalds	:	Assorted bug cures.
15 *		Niibe Yutaka	:	async I/O support.
16 *		Carsten Paeth	:	PF_UNIX check, address fixes.
17 *		Alan Cox	:	Limit size of allocated blocks.
18 *		Alan Cox	:	Fixed the stupid socketpair bug.
19 *		Alan Cox	:	BSD compatibility fine tuning.
20 *		Alan Cox	:	Fixed a bug in connect when interrupted.
21 *		Alan Cox	:	Sorted out a proper draft version of
22 *					file descriptor passing hacked up from
23 *					Mike Shaver's work.
24 *		Marty Leisner	:	Fixes to fd passing
25 *		Nick Nevin	:	recvmsg bugfix.
26 *		Alan Cox	:	Started proper garbage collector
27 *		Heiko EiBfeldt	:	Missing verify_area check
28 *		Alan Cox	:	Started POSIXisms
29 *		Andreas Schwab	:	Replace inode by dentry for proper
30 *					reference counting
31 *		Kirk Petersen	:	Made this a module
32 *	    Christoph Rohland	:	Elegant non-blocking accept/connect algorithm.
33 *					Lots of bug fixes.
34 *	     Alexey Kuznetosv	:	Repaired (I hope) bugs introduces
35 *					by above two patches.
36 *	     Andrea Arcangeli	:	If possible we block in connect(2)
37 *					if the max backlog of the listen socket
38 *					is been reached. This won't break
39 *					old apps and it will avoid huge amount
40 *					of socks hashed (this for unix_gc()
41 *					performances reasons).
42 *					Security fix that limits the max
43 *					number of socks to 2*max_files and
44 *					the number of skb queueable in the
45 *					dgram receiver.
46 *		Artur Skawina   :	Hash function optimizations
47 *	     Alexey Kuznetsov   :	Full scale SMP. Lot of bugs are introduced 8)
48 *	      Malcolm Beattie   :	Set peercred for socketpair
49 *	     Michal Ostrowski   :       Module initialization cleanup.
50 *	     Arnaldo C. Melo	:	Remove MOD_{INC,DEC}_USE_COUNT,
51 *	     				the core infrastructure is doing that
52 *	     				for all net proto families now (2.5.69+)
53 *
54 *
55 * Known differences from reference BSD that was tested:
56 *
57 *	[TO FIX]
58 *	ECONNREFUSED is not returned from one end of a connected() socket to the
59 *		other the moment one end closes.
60 *	fstat() doesn't return st_dev=0, and give the blksize as high water mark
61 *		and a fake inode identifier (nor the BSD first socket fstat twice bug).
62 *	[NOT TO FIX]
63 *	accept() returns a path name even if the connecting socket has closed
64 *		in the meantime (BSD loses the path and gives up).
65 *	accept() returns 0 length path for an unbound connector. BSD returns 16
66 *		and a null first byte in the path (but not for gethost/peername - BSD bug ??)
67 *	socketpair(...SOCK_RAW..) doesn't panic the kernel.
68 *	BSD af_unix apparently has connect forgetting to block properly.
69 *		(need to check this with the POSIX spec in detail)
70 *
71 * Differences from 2.0.0-11-... (ANK)
72 *	Bug fixes and improvements.
73 *		- client shutdown killed server socket.
74 *		- removed all useless cli/sti pairs.
75 *
76 *	Semantic changes/extensions.
77 *		- generic control message passing.
78 *		- SCM_CREDENTIALS control message.
79 *		- "Abstract" (not FS based) socket bindings.
80 *		  Abstract names are sequences of bytes (not zero terminated)
81 *		  started by 0, so that this name space does not intersect
82 *		  with BSD names.
83 */
84
85#include <linux/module.h>
86#include <linux/kernel.h>
87#include <linux/signal.h>
88#include <linux/sched.h>
89#include <linux/errno.h>
90#include <linux/string.h>
91#include <linux/stat.h>
92#include <linux/dcache.h>
93#include <linux/namei.h>
94#include <linux/socket.h>
95#include <linux/un.h>
96#include <linux/fcntl.h>
97#include <linux/termios.h>
98#include <linux/sockios.h>
99#include <linux/net.h>
100#include <linux/in.h>
101#include <linux/fs.h>
102#include <linux/slab.h>
103#include <asm/uaccess.h>
104#include <linux/skbuff.h>
105#include <linux/netdevice.h>
106#include <net/sock.h>
107#include <net/tcp_states.h>
108#include <net/af_unix.h>
109#include <linux/proc_fs.h>
110#include <linux/seq_file.h>
111#include <net/scm.h>
112#include <linux/init.h>
113#include <linux/poll.h>
114#include <linux/smp_lock.h>
115#include <linux/rtnetlink.h>
116#include <linux/mount.h>
117#include <net/checksum.h>
118#include <linux/security.h>
119
120int sysctl_unix_max_dgram_qlen __read_mostly = 10;
121
122struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
123DEFINE_SPINLOCK(unix_table_lock);
124static atomic_t unix_nr_socks = ATOMIC_INIT(0);
125
126#define unix_sockets_unbound	(&unix_socket_table[UNIX_HASH_SIZE])
127
128#define UNIX_ABSTRACT(sk)	(unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
129
130#ifdef CONFIG_SECURITY_NETWORK
131static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
132{
133	memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
134}
135
136static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
137{
138	scm->secid = *UNIXSID(skb);
139}
140#else
141static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
142{ }
143
144static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
145{ }
146#endif /* CONFIG_SECURITY_NETWORK */
147
148/*
149 *  SMP locking strategy:
150 *    hash table is protected with spinlock unix_table_lock
151 *    each socket state is protected by separate rwlock.
152 */
153
154static inline unsigned unix_hash_fold(__wsum n)
155{
156	unsigned hash = (__force unsigned)n;
157	hash ^= hash>>16;
158	hash ^= hash>>8;
159	return hash&(UNIX_HASH_SIZE-1);
160}
161
162#define unix_peer(sk) (unix_sk(sk)->peer)
163
164static inline int unix_our_peer(struct sock *sk, struct sock *osk)
165{
166	return unix_peer(osk) == sk;
167}
168
169static inline int unix_may_send(struct sock *sk, struct sock *osk)
170{
171	return (unix_peer(osk) == NULL || unix_our_peer(sk, osk));
172}
173
174static struct sock *unix_peer_get(struct sock *s)
175{
176	struct sock *peer;
177
178	unix_state_rlock(s);
179	peer = unix_peer(s);
180	if (peer)
181		sock_hold(peer);
182	unix_state_runlock(s);
183	return peer;
184}
185
186static inline void unix_release_addr(struct unix_address *addr)
187{
188	if (atomic_dec_and_test(&addr->refcnt))
189		kfree(addr);
190}
191
192/*
193 *	Check unix socket name:
194 *		- should be not zero length.
195 *	        - if started by not zero, should be NULL terminated (FS object)
196 *		- if started by zero, it is abstract name.
197 */
198
199static int unix_mkname(struct sockaddr_un * sunaddr, int len, unsigned *hashp)
200{
201	if (len <= sizeof(short) || len > sizeof(*sunaddr))
202		return -EINVAL;
203	if (!sunaddr || sunaddr->sun_family != AF_UNIX)
204		return -EINVAL;
205	if (sunaddr->sun_path[0]) {
206		/*
207		 * This may look like an off by one error but it is a bit more
208		 * subtle. 108 is the longest valid AF_UNIX path for a binding.
209		 * sun_path[108] doesnt as such exist.  However in kernel space
210		 * we are guaranteed that it is a valid memory location in our
211		 * kernel address buffer.
212		 */
213		((char *)sunaddr)[len]=0;
214		len = strlen(sunaddr->sun_path)+1+sizeof(short);
215		return len;
216	}
217
218	*hashp = unix_hash_fold(csum_partial((char*)sunaddr, len, 0));
219	return len;
220}
221
222static void __unix_remove_socket(struct sock *sk)
223{
224	sk_del_node_init(sk);
225}
226
227static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
228{
229	BUG_TRAP(sk_unhashed(sk));
230	sk_add_node(sk, list);
231}
232
233static inline void unix_remove_socket(struct sock *sk)
234{
235	spin_lock(&unix_table_lock);
236	__unix_remove_socket(sk);
237	spin_unlock(&unix_table_lock);
238}
239
240static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
241{
242	spin_lock(&unix_table_lock);
243	__unix_insert_socket(list, sk);
244	spin_unlock(&unix_table_lock);
245}
246
247static struct sock *__unix_find_socket_byname(struct sockaddr_un *sunname,
248					      int len, int type, unsigned hash)
249{
250	struct sock *s;
251	struct hlist_node *node;
252
253	sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
254		struct unix_sock *u = unix_sk(s);
255
256		if (u->addr->len == len &&
257		    !memcmp(u->addr->name, sunname, len))
258			goto found;
259	}
260	s = NULL;
261found:
262	return s;
263}
264
265static inline struct sock *unix_find_socket_byname(struct sockaddr_un *sunname,
266						   int len, int type,
267						   unsigned hash)
268{
269	struct sock *s;
270
271	spin_lock(&unix_table_lock);
272	s = __unix_find_socket_byname(sunname, len, type, hash);
273	if (s)
274		sock_hold(s);
275	spin_unlock(&unix_table_lock);
276	return s;
277}
278
279static struct sock *unix_find_socket_byinode(struct inode *i)
280{
281	struct sock *s;
282	struct hlist_node *node;
283
284	spin_lock(&unix_table_lock);
285	sk_for_each(s, node,
286		    &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
287		struct dentry *dentry = unix_sk(s)->dentry;
288
289		if(dentry && dentry->d_inode == i)
290		{
291			sock_hold(s);
292			goto found;
293		}
294	}
295	s = NULL;
296found:
297	spin_unlock(&unix_table_lock);
298	return s;
299}
300
301static inline int unix_writable(struct sock *sk)
302{
303	return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
304}
305
306static void unix_write_space(struct sock *sk)
307{
308	read_lock(&sk->sk_callback_lock);
309	if (unix_writable(sk)) {
310		if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
311			wake_up_interruptible(sk->sk_sleep);
312		sk_wake_async(sk, 2, POLL_OUT);
313	}
314	read_unlock(&sk->sk_callback_lock);
315}
316
317/* When dgram socket disconnects (or changes its peer), we clear its receive
318 * queue of packets arrived from previous peer. First, it allows to do
319 * flow control based only on wmem_alloc; second, sk connected to peer
320 * may receive messages only from that peer. */
321static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
322{
323	if (!skb_queue_empty(&sk->sk_receive_queue)) {
324		skb_queue_purge(&sk->sk_receive_queue);
325		wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
326
327		/* If one link of bidirectional dgram pipe is disconnected,
328		 * we signal error. Messages are lost. Do not make this,
329		 * when peer was not connected to us.
330		 */
331		if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
332			other->sk_err = ECONNRESET;
333			other->sk_error_report(other);
334		}
335	}
336}
337
338static void unix_sock_destructor(struct sock *sk)
339{
340	struct unix_sock *u = unix_sk(sk);
341
342	skb_queue_purge(&sk->sk_receive_queue);
343
344	BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
345	BUG_TRAP(sk_unhashed(sk));
346	BUG_TRAP(!sk->sk_socket);
347	if (!sock_flag(sk, SOCK_DEAD)) {
348		printk("Attempt to release alive unix socket: %p\n", sk);
349		return;
350	}
351
352	if (u->addr)
353		unix_release_addr(u->addr);
354
355	atomic_dec(&unix_nr_socks);
356#ifdef UNIX_REFCNT_DEBUG
357	printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk, atomic_read(&unix_nr_socks));
358#endif
359}
360
361static int unix_release_sock (struct sock *sk, int embrion)
362{
363	struct unix_sock *u = unix_sk(sk);
364	struct dentry *dentry;
365	struct vfsmount *mnt;
366	struct sock *skpair;
367	struct sk_buff *skb;
368	int state;
369
370	unix_remove_socket(sk);
371
372	/* Clear state */
373	unix_state_wlock(sk);
374	sock_orphan(sk);
375	sk->sk_shutdown = SHUTDOWN_MASK;
376	dentry	     = u->dentry;
377	u->dentry    = NULL;
378	mnt	     = u->mnt;
379	u->mnt	     = NULL;
380	state = sk->sk_state;
381	sk->sk_state = TCP_CLOSE;
382	unix_state_wunlock(sk);
383
384	wake_up_interruptible_all(&u->peer_wait);
385
386	skpair=unix_peer(sk);
387
388	if (skpair!=NULL) {
389		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
390			unix_state_wlock(skpair);
391			/* No more writes */
392			skpair->sk_shutdown = SHUTDOWN_MASK;
393			if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
394				skpair->sk_err = ECONNRESET;
395			unix_state_wunlock(skpair);
396			skpair->sk_state_change(skpair);
397			read_lock(&skpair->sk_callback_lock);
398			sk_wake_async(skpair,1,POLL_HUP);
399			read_unlock(&skpair->sk_callback_lock);
400		}
401		sock_put(skpair); /* It may now die */
402		unix_peer(sk) = NULL;
403	}
404
405	/* Try to flush out this socket. Throw out buffers at least */
406
407	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
408		if (state==TCP_LISTEN)
409			unix_release_sock(skb->sk, 1);
410		/* passed fds are erased in the kfree_skb hook	      */
411		kfree_skb(skb);
412	}
413
414	if (dentry) {
415		dput(dentry);
416		mntput(mnt);
417	}
418
419	sock_put(sk);
420
421	/* ---- Socket is dead now and most probably destroyed ---- */
422
423	/*
424	 * Fixme: BSD difference: In BSD all sockets connected to use get
425	 *	  ECONNRESET and we die on the spot. In Linux we behave
426	 *	  like files and pipes do and wait for the last
427	 *	  dereference.
428	 *
429	 * Can't we simply set sock->err?
430	 *
431	 *	  What the above comment does talk about? --ANK(980817)
432	 */
433
434	if (atomic_read(&unix_tot_inflight))
435		unix_gc();		/* Garbage collect fds */
436
437	return 0;
438}
439
440static int unix_listen(struct socket *sock, int backlog)
441{
442	int err;
443	struct sock *sk = sock->sk;
444	struct unix_sock *u = unix_sk(sk);
445
446	err = -EOPNOTSUPP;
447	if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
448		goto out;			/* Only stream/seqpacket sockets accept */
449	err = -EINVAL;
450	if (!u->addr)
451		goto out;			/* No listens on an unbound socket */
452	unix_state_wlock(sk);
453	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
454		goto out_unlock;
455	if (backlog > sk->sk_max_ack_backlog)
456		wake_up_interruptible_all(&u->peer_wait);
457	sk->sk_max_ack_backlog	= backlog;
458	sk->sk_state		= TCP_LISTEN;
459	/* set credentials so connect can copy them */
460	sk->sk_peercred.pid	= current->tgid;
461	sk->sk_peercred.uid	= current->euid;
462	sk->sk_peercred.gid	= current->egid;
463	err = 0;
464
465out_unlock:
466	unix_state_wunlock(sk);
467out:
468	return err;
469}
470
471static int unix_release(struct socket *);
472static int unix_bind(struct socket *, struct sockaddr *, int);
473static int unix_stream_connect(struct socket *, struct sockaddr *,
474			       int addr_len, int flags);
475static int unix_socketpair(struct socket *, struct socket *);
476static int unix_accept(struct socket *, struct socket *, int);
477static int unix_getname(struct socket *, struct sockaddr *, int *, int);
478static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
479static int unix_ioctl(struct socket *, unsigned int, unsigned long);
480static int unix_shutdown(struct socket *, int);
481static int unix_stream_sendmsg(struct kiocb *, struct socket *,
482			       struct msghdr *, size_t);
483static int unix_stream_recvmsg(struct kiocb *, struct socket *,
484			       struct msghdr *, size_t, int);
485static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
486			      struct msghdr *, size_t);
487static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
488			      struct msghdr *, size_t, int);
489static int unix_dgram_connect(struct socket *, struct sockaddr *,
490			      int, int);
491static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
492				  struct msghdr *, size_t);
493
494static const struct proto_ops unix_stream_ops = {
495	.family =	PF_UNIX,
496	.owner =	THIS_MODULE,
497	.release =	unix_release,
498	.bind =		unix_bind,
499	.connect =	unix_stream_connect,
500	.socketpair =	unix_socketpair,
501	.accept =	unix_accept,
502	.getname =	unix_getname,
503	.poll =		unix_poll,
504	.ioctl =	unix_ioctl,
505	.listen =	unix_listen,
506	.shutdown =	unix_shutdown,
507	.setsockopt =	sock_no_setsockopt,
508	.getsockopt =	sock_no_getsockopt,
509	.sendmsg =	unix_stream_sendmsg,
510	.recvmsg =	unix_stream_recvmsg,
511	.mmap =		sock_no_mmap,
512	.sendpage =	sock_no_sendpage,
513};
514
515static const struct proto_ops unix_dgram_ops = {
516	.family =	PF_UNIX,
517	.owner =	THIS_MODULE,
518	.release =	unix_release,
519	.bind =		unix_bind,
520	.connect =	unix_dgram_connect,
521	.socketpair =	unix_socketpair,
522	.accept =	sock_no_accept,
523	.getname =	unix_getname,
524	.poll =		datagram_poll,
525	.ioctl =	unix_ioctl,
526	.listen =	sock_no_listen,
527	.shutdown =	unix_shutdown,
528	.setsockopt =	sock_no_setsockopt,
529	.getsockopt =	sock_no_getsockopt,
530	.sendmsg =	unix_dgram_sendmsg,
531	.recvmsg =	unix_dgram_recvmsg,
532	.mmap =		sock_no_mmap,
533	.sendpage =	sock_no_sendpage,
534};
535
536static const struct proto_ops unix_seqpacket_ops = {
537	.family =	PF_UNIX,
538	.owner =	THIS_MODULE,
539	.release =	unix_release,
540	.bind =		unix_bind,
541	.connect =	unix_stream_connect,
542	.socketpair =	unix_socketpair,
543	.accept =	unix_accept,
544	.getname =	unix_getname,
545	.poll =		datagram_poll,
546	.ioctl =	unix_ioctl,
547	.listen =	unix_listen,
548	.shutdown =	unix_shutdown,
549	.setsockopt =	sock_no_setsockopt,
550	.getsockopt =	sock_no_getsockopt,
551	.sendmsg =	unix_seqpacket_sendmsg,
552	.recvmsg =	unix_dgram_recvmsg,
553	.mmap =		sock_no_mmap,
554	.sendpage =	sock_no_sendpage,
555};
556
557static struct proto unix_proto = {
558	.name	  = "UNIX",
559	.owner	  = THIS_MODULE,
560	.obj_size = sizeof(struct unix_sock),
561};
562
563/*
564 * AF_UNIX sockets do not interact with hardware, hence they
565 * dont trigger interrupts - so it's safe for them to have
566 * bh-unsafe locking for their sk_receive_queue.lock. Split off
567 * this special lock-class by reinitializing the spinlock key:
568 */
569static struct lock_class_key af_unix_sk_receive_queue_lock_key;
570
571static struct sock * unix_create1(struct socket *sock)
572{
573	struct sock *sk = NULL;
574	struct unix_sock *u;
575
576	if (atomic_read(&unix_nr_socks) >= 2*get_max_files())
577		goto out;
578
579	sk = sk_alloc(PF_UNIX, GFP_KERNEL, &unix_proto, 1);
580	if (!sk)
581		goto out;
582
583	atomic_inc(&unix_nr_socks);
584
585	sock_init_data(sock,sk);
586	lockdep_set_class(&sk->sk_receive_queue.lock,
587				&af_unix_sk_receive_queue_lock_key);
588
589	sk->sk_write_space	= unix_write_space;
590	sk->sk_max_ack_backlog	= sysctl_unix_max_dgram_qlen;
591	sk->sk_destruct		= unix_sock_destructor;
592	u	  = unix_sk(sk);
593	u->dentry = NULL;
594	u->mnt	  = NULL;
595	spin_lock_init(&u->lock);
596	atomic_set(&u->inflight, sock ? 0 : -1);
597	mutex_init(&u->readlock); /* single task reading lock */
598	init_waitqueue_head(&u->peer_wait);
599	unix_insert_socket(unix_sockets_unbound, sk);
600out:
601	return sk;
602}
603
604static int unix_create(struct socket *sock, int protocol)
605{
606	if (protocol && protocol != PF_UNIX)
607		return -EPROTONOSUPPORT;
608
609	sock->state = SS_UNCONNECTED;
610
611	switch (sock->type) {
612	case SOCK_STREAM:
613		sock->ops = &unix_stream_ops;
614		break;
615		/*
616		 *	Believe it or not BSD has AF_UNIX, SOCK_RAW though
617		 *	nothing uses it.
618		 */
619	case SOCK_RAW:
620		sock->type=SOCK_DGRAM;
621	case SOCK_DGRAM:
622		sock->ops = &unix_dgram_ops;
623		break;
624	case SOCK_SEQPACKET:
625		sock->ops = &unix_seqpacket_ops;
626		break;
627	default:
628		return -ESOCKTNOSUPPORT;
629	}
630
631	return unix_create1(sock) ? 0 : -ENOMEM;
632}
633
634static int unix_release(struct socket *sock)
635{
636	struct sock *sk = sock->sk;
637
638	if (!sk)
639		return 0;
640
641	sock->sk = NULL;
642
643	return unix_release_sock (sk, 0);
644}
645
646static int unix_autobind(struct socket *sock)
647{
648	struct sock *sk = sock->sk;
649	struct unix_sock *u = unix_sk(sk);
650	static u32 ordernum = 1;
651	struct unix_address * addr;
652	int err;
653
654	mutex_lock(&u->readlock);
655
656	err = 0;
657	if (u->addr)
658		goto out;
659
660	err = -ENOMEM;
661	addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
662	if (!addr)
663		goto out;
664
665	addr->name->sun_family = AF_UNIX;
666	atomic_set(&addr->refcnt, 1);
667
668retry:
669	addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
670	addr->hash = unix_hash_fold(csum_partial((void*)addr->name, addr->len, 0));
671
672	spin_lock(&unix_table_lock);
673	ordernum = (ordernum+1)&0xFFFFF;
674
675	if (__unix_find_socket_byname(addr->name, addr->len, sock->type,
676				      addr->hash)) {
677		spin_unlock(&unix_table_lock);
678		/* Sanity yield. It is unusual case, but yet... */
679		if (!(ordernum&0xFF))
680			yield();
681		goto retry;
682	}
683	addr->hash ^= sk->sk_type;
684
685	__unix_remove_socket(sk);
686	u->addr = addr;
687	__unix_insert_socket(&unix_socket_table[addr->hash], sk);
688	spin_unlock(&unix_table_lock);
689	err = 0;
690
691out:	mutex_unlock(&u->readlock);
692	return err;
693}
694
695static struct sock *unix_find_other(struct sockaddr_un *sunname, int len,
696				    int type, unsigned hash, int *error)
697{
698	struct sock *u;
699	struct nameidata nd;
700	int err = 0;
701
702	if (sunname->sun_path[0]) {
703		err = path_lookup(sunname->sun_path, LOOKUP_FOLLOW, &nd);
704		if (err)
705			goto fail;
706		err = vfs_permission(&nd, MAY_WRITE);
707		if (err)
708			goto put_fail;
709
710		err = -ECONNREFUSED;
711		if (!S_ISSOCK(nd.dentry->d_inode->i_mode))
712			goto put_fail;
713		u=unix_find_socket_byinode(nd.dentry->d_inode);
714		if (!u)
715			goto put_fail;
716
717		if (u->sk_type == type)
718			touch_atime(nd.mnt, nd.dentry);
719
720		path_release(&nd);
721
722		err=-EPROTOTYPE;
723		if (u->sk_type != type) {
724			sock_put(u);
725			goto fail;
726		}
727	} else {
728		err = -ECONNREFUSED;
729		u=unix_find_socket_byname(sunname, len, type, hash);
730		if (u) {
731			struct dentry *dentry;
732			dentry = unix_sk(u)->dentry;
733			if (dentry)
734				touch_atime(unix_sk(u)->mnt, dentry);
735		} else
736			goto fail;
737	}
738	return u;
739
740put_fail:
741	path_release(&nd);
742fail:
743	*error=err;
744	return NULL;
745}
746
747
748static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
749{
750	struct sock *sk = sock->sk;
751	struct unix_sock *u = unix_sk(sk);
752	struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
753	struct dentry * dentry = NULL;
754	struct nameidata nd;
755	int err;
756	unsigned hash;
757	struct unix_address *addr;
758	struct hlist_head *list;
759
760	err = -EINVAL;
761	if (sunaddr->sun_family != AF_UNIX)
762		goto out;
763
764	if (addr_len==sizeof(short)) {
765		err = unix_autobind(sock);
766		goto out;
767	}
768
769	err = unix_mkname(sunaddr, addr_len, &hash);
770	if (err < 0)
771		goto out;
772	addr_len = err;
773
774	mutex_lock(&u->readlock);
775
776	err = -EINVAL;
777	if (u->addr)
778		goto out_up;
779
780	err = -ENOMEM;
781	addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
782	if (!addr)
783		goto out_up;
784
785	memcpy(addr->name, sunaddr, addr_len);
786	addr->len = addr_len;
787	addr->hash = hash ^ sk->sk_type;
788	atomic_set(&addr->refcnt, 1);
789
790	if (sunaddr->sun_path[0]) {
791		unsigned int mode;
792		err = 0;
793		/*
794		 * Get the parent directory, calculate the hash for last
795		 * component.
796		 */
797		err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
798		if (err)
799			goto out_mknod_parent;
800
801		dentry = lookup_create(&nd, 0);
802		err = PTR_ERR(dentry);
803		if (IS_ERR(dentry))
804			goto out_mknod_unlock;
805
806		/*
807		 * All right, let's create it.
808		 */
809		mode = S_IFSOCK |
810		       (SOCK_INODE(sock)->i_mode & ~current->fs->umask);
811		err = vfs_mknod(nd.dentry->d_inode, dentry, mode, 0);
812		if (err)
813			goto out_mknod_dput;
814		mutex_unlock(&nd.dentry->d_inode->i_mutex);
815		dput(nd.dentry);
816		nd.dentry = dentry;
817
818		addr->hash = UNIX_HASH_SIZE;
819	}
820
821	spin_lock(&unix_table_lock);
822
823	if (!sunaddr->sun_path[0]) {
824		err = -EADDRINUSE;
825		if (__unix_find_socket_byname(sunaddr, addr_len,
826					      sk->sk_type, hash)) {
827			unix_release_addr(addr);
828			goto out_unlock;
829		}
830
831		list = &unix_socket_table[addr->hash];
832	} else {
833		list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
834		u->dentry = nd.dentry;
835		u->mnt    = nd.mnt;
836	}
837
838	err = 0;
839	__unix_remove_socket(sk);
840	u->addr = addr;
841	__unix_insert_socket(list, sk);
842
843out_unlock:
844	spin_unlock(&unix_table_lock);
845out_up:
846	mutex_unlock(&u->readlock);
847out:
848	return err;
849
850out_mknod_dput:
851	dput(dentry);
852out_mknod_unlock:
853	mutex_unlock(&nd.dentry->d_inode->i_mutex);
854	path_release(&nd);
855out_mknod_parent:
856	if (err==-EEXIST)
857		err=-EADDRINUSE;
858	unix_release_addr(addr);
859	goto out_up;
860}
861
862static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
863			      int alen, int flags)
864{
865	struct sock *sk = sock->sk;
866	struct sockaddr_un *sunaddr=(struct sockaddr_un*)addr;
867	struct sock *other;
868	unsigned hash;
869	int err;
870
871	if (addr->sa_family != AF_UNSPEC) {
872		err = unix_mkname(sunaddr, alen, &hash);
873		if (err < 0)
874			goto out;
875		alen = err;
876
877		if (test_bit(SOCK_PASSCRED, &sock->flags) &&
878		    !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
879			goto out;
880
881		other=unix_find_other(sunaddr, alen, sock->type, hash, &err);
882		if (!other)
883			goto out;
884
885		unix_state_wlock(sk);
886
887		err = -EPERM;
888		if (!unix_may_send(sk, other))
889			goto out_unlock;
890
891		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
892		if (err)
893			goto out_unlock;
894
895	} else {
896		/*
897		 *	1003.1g breaking connected state with AF_UNSPEC
898		 */
899		other = NULL;
900		unix_state_wlock(sk);
901	}
902
903	/*
904	 * If it was connected, reconnect.
905	 */
906	if (unix_peer(sk)) {
907		struct sock *old_peer = unix_peer(sk);
908		unix_peer(sk)=other;
909		unix_state_wunlock(sk);
910
911		if (other != old_peer)
912			unix_dgram_disconnected(sk, old_peer);
913		sock_put(old_peer);
914	} else {
915		unix_peer(sk)=other;
916		unix_state_wunlock(sk);
917	}
918 	return 0;
919
920out_unlock:
921	unix_state_wunlock(sk);
922	sock_put(other);
923out:
924	return err;
925}
926
927static long unix_wait_for_peer(struct sock *other, long timeo)
928{
929	struct unix_sock *u = unix_sk(other);
930	int sched;
931	DEFINE_WAIT(wait);
932
933	prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
934
935	sched = !sock_flag(other, SOCK_DEAD) &&
936		!(other->sk_shutdown & RCV_SHUTDOWN) &&
937		(skb_queue_len(&other->sk_receive_queue) >
938		 other->sk_max_ack_backlog);
939
940	unix_state_runlock(other);
941
942	if (sched)
943		timeo = schedule_timeout(timeo);
944
945	finish_wait(&u->peer_wait, &wait);
946	return timeo;
947}
948
949static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
950			       int addr_len, int flags)
951{
952	struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
953	struct sock *sk = sock->sk;
954	struct unix_sock *u = unix_sk(sk), *newu, *otheru;
955	struct sock *newsk = NULL;
956	struct sock *other = NULL;
957	struct sk_buff *skb = NULL;
958	unsigned hash;
959	int st;
960	int err;
961	long timeo;
962
963	err = unix_mkname(sunaddr, addr_len, &hash);
964	if (err < 0)
965		goto out;
966	addr_len = err;
967
968	if (test_bit(SOCK_PASSCRED, &sock->flags)
969		&& !u->addr && (err = unix_autobind(sock)) != 0)
970		goto out;
971
972	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
973
974	/* First of all allocate resources.
975	   If we will make it after state is locked,
976	   we will have to recheck all again in any case.
977	 */
978
979	err = -ENOMEM;
980
981	/* create new sock for complete connection */
982	newsk = unix_create1(NULL);
983	if (newsk == NULL)
984		goto out;
985
986	/* Allocate skb for sending to listening sock */
987	skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
988	if (skb == NULL)
989		goto out;
990
991restart:
992	/*  Find listening sock. */
993	other = unix_find_other(sunaddr, addr_len, sk->sk_type, hash, &err);
994	if (!other)
995		goto out;
996
997	/* Latch state of peer */
998	unix_state_rlock(other);
999
1000	/* Apparently VFS overslept socket death. Retry. */
1001	if (sock_flag(other, SOCK_DEAD)) {
1002		unix_state_runlock(other);
1003		sock_put(other);
1004		goto restart;
1005	}
1006
1007	err = -ECONNREFUSED;
1008	if (other->sk_state != TCP_LISTEN)
1009		goto out_unlock;
1010
1011	if (skb_queue_len(&other->sk_receive_queue) >
1012	    other->sk_max_ack_backlog) {
1013		err = -EAGAIN;
1014		if (!timeo)
1015			goto out_unlock;
1016
1017		timeo = unix_wait_for_peer(other, timeo);
1018
1019		err = sock_intr_errno(timeo);
1020		if (signal_pending(current))
1021			goto out;
1022		sock_put(other);
1023		goto restart;
1024        }
1025
1026	/* Latch our state.
1027
1028	   It is tricky place. We need to grab write lock and cannot
1029	   drop lock on peer. It is dangerous because deadlock is
1030	   possible. Connect to self case and simultaneous
1031	   attempt to connect are eliminated by checking socket
1032	   state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1033	   check this before attempt to grab lock.
1034
1035	   Well, and we have to recheck the state after socket locked.
1036	 */
1037	st = sk->sk_state;
1038
1039	switch (st) {
1040	case TCP_CLOSE:
1041		/* This is ok... continue with connect */
1042		break;
1043	case TCP_ESTABLISHED:
1044		/* Socket is already connected */
1045		err = -EISCONN;
1046		goto out_unlock;
1047	default:
1048		err = -EINVAL;
1049		goto out_unlock;
1050	}
1051
1052	unix_state_wlock_nested(sk);
1053
1054	if (sk->sk_state != st) {
1055		unix_state_wunlock(sk);
1056		unix_state_runlock(other);
1057		sock_put(other);
1058		goto restart;
1059	}
1060
1061	err = security_unix_stream_connect(sock, other->sk_socket, newsk);
1062	if (err) {
1063		unix_state_wunlock(sk);
1064		goto out_unlock;
1065	}
1066
1067	/* The way is open! Fastly set all the necessary fields... */
1068
1069	sock_hold(sk);
1070	unix_peer(newsk)	= sk;
1071	newsk->sk_state		= TCP_ESTABLISHED;
1072	newsk->sk_type		= sk->sk_type;
1073	newsk->sk_peercred.pid	= current->tgid;
1074	newsk->sk_peercred.uid	= current->euid;
1075	newsk->sk_peercred.gid	= current->egid;
1076	newu = unix_sk(newsk);
1077	newsk->sk_sleep		= &newu->peer_wait;
1078	otheru = unix_sk(other);
1079
1080	/* copy address information from listening to new sock*/
1081	if (otheru->addr) {
1082		atomic_inc(&otheru->addr->refcnt);
1083		newu->addr = otheru->addr;
1084	}
1085	if (otheru->dentry) {
1086		newu->dentry	= dget(otheru->dentry);
1087		newu->mnt	= mntget(otheru->mnt);
1088	}
1089
1090	/* Set credentials */
1091	sk->sk_peercred = other->sk_peercred;
1092
1093	sock->state	= SS_CONNECTED;
1094	sk->sk_state	= TCP_ESTABLISHED;
1095	sock_hold(newsk);
1096
1097	smp_mb__after_atomic_inc();	/* sock_hold() does an atomic_inc() */
1098	unix_peer(sk)	= newsk;
1099
1100	unix_state_wunlock(sk);
1101
1102	/* take ten and and send info to listening sock */
1103	spin_lock(&other->sk_receive_queue.lock);
1104	__skb_queue_tail(&other->sk_receive_queue, skb);
1105	/* Undo artificially decreased inflight after embrion
1106	 * is installed to listening socket. */
1107	atomic_inc(&newu->inflight);
1108	spin_unlock(&other->sk_receive_queue.lock);
1109	unix_state_runlock(other);
1110	other->sk_data_ready(other, 0);
1111	sock_put(other);
1112	return 0;
1113
1114out_unlock:
1115	if (other)
1116		unix_state_runlock(other);
1117
1118out:
1119	if (skb)
1120		kfree_skb(skb);
1121	if (newsk)
1122		unix_release_sock(newsk, 0);
1123	if (other)
1124		sock_put(other);
1125	return err;
1126}
1127
1128static int unix_socketpair(struct socket *socka, struct socket *sockb)
1129{
1130	struct sock *ska=socka->sk, *skb = sockb->sk;
1131
1132	/* Join our sockets back to back */
1133	sock_hold(ska);
1134	sock_hold(skb);
1135	unix_peer(ska)=skb;
1136	unix_peer(skb)=ska;
1137	ska->sk_peercred.pid = skb->sk_peercred.pid = current->tgid;
1138	ska->sk_peercred.uid = skb->sk_peercred.uid = current->euid;
1139	ska->sk_peercred.gid = skb->sk_peercred.gid = current->egid;
1140
1141	if (ska->sk_type != SOCK_DGRAM) {
1142		ska->sk_state = TCP_ESTABLISHED;
1143		skb->sk_state = TCP_ESTABLISHED;
1144		socka->state  = SS_CONNECTED;
1145		sockb->state  = SS_CONNECTED;
1146	}
1147	return 0;
1148}
1149
1150static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1151{
1152	struct sock *sk = sock->sk;
1153	struct sock *tsk;
1154	struct sk_buff *skb;
1155	int err;
1156
1157	err = -EOPNOTSUPP;
1158	if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
1159		goto out;
1160
1161	err = -EINVAL;
1162	if (sk->sk_state != TCP_LISTEN)
1163		goto out;
1164
1165	/* If socket state is TCP_LISTEN it cannot change (for now...),
1166	 * so that no locks are necessary.
1167	 */
1168
1169	skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1170	if (!skb) {
1171		/* This means receive shutdown. */
1172		if (err == 0)
1173			err = -EINVAL;
1174		goto out;
1175	}
1176
1177	tsk = skb->sk;
1178	skb_free_datagram(sk, skb);
1179	wake_up_interruptible(&unix_sk(sk)->peer_wait);
1180
1181	/* attach accepted sock to socket */
1182	unix_state_wlock(tsk);
1183	newsock->state = SS_CONNECTED;
1184	sock_graft(tsk, newsock);
1185	unix_state_wunlock(tsk);
1186	return 0;
1187
1188out:
1189	return err;
1190}
1191
1192
1193static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1194{
1195	struct sock *sk = sock->sk;
1196	struct unix_sock *u;
1197	struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
1198	int err = 0;
1199
1200	if (peer) {
1201		sk = unix_peer_get(sk);
1202
1203		err = -ENOTCONN;
1204		if (!sk)
1205			goto out;
1206		err = 0;
1207	} else {
1208		sock_hold(sk);
1209	}
1210
1211	u = unix_sk(sk);
1212	unix_state_rlock(sk);
1213	if (!u->addr) {
1214		sunaddr->sun_family = AF_UNIX;
1215		sunaddr->sun_path[0] = 0;
1216		*uaddr_len = sizeof(short);
1217	} else {
1218		struct unix_address *addr = u->addr;
1219
1220		*uaddr_len = addr->len;
1221		memcpy(sunaddr, addr->name, *uaddr_len);
1222	}
1223	unix_state_runlock(sk);
1224	sock_put(sk);
1225out:
1226	return err;
1227}
1228
1229static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1230{
1231	int i;
1232
1233	scm->fp = UNIXCB(skb).fp;
1234	skb->destructor = sock_wfree;
1235	UNIXCB(skb).fp = NULL;
1236
1237	for (i=scm->fp->count-1; i>=0; i--)
1238		unix_notinflight(scm->fp->fp[i]);
1239}
1240
1241static void unix_destruct_fds(struct sk_buff *skb)
1242{
1243	struct scm_cookie scm;
1244	memset(&scm, 0, sizeof(scm));
1245	unix_detach_fds(&scm, skb);
1246
1247	/* Alas, it calls VFS */
1248	/* So fscking what? fput() had been SMP-safe since the last Summer */
1249	scm_destroy(&scm);
1250	sock_wfree(skb);
1251}
1252
1253static void unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1254{
1255	int i;
1256	for (i=scm->fp->count-1; i>=0; i--)
1257		unix_inflight(scm->fp->fp[i]);
1258	UNIXCB(skb).fp = scm->fp;
1259	skb->destructor = unix_destruct_fds;
1260	scm->fp = NULL;
1261}
1262
1263/*
1264 *	Send AF_UNIX data.
1265 */
1266
1267static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1268			      struct msghdr *msg, size_t len)
1269{
1270	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1271	struct sock *sk = sock->sk;
1272	struct unix_sock *u = unix_sk(sk);
1273	struct sockaddr_un *sunaddr=msg->msg_name;
1274	struct sock *other = NULL;
1275	int namelen = 0; /* fake GCC */
1276	int err;
1277	unsigned hash;
1278	struct sk_buff *skb;
1279	long timeo;
1280	struct scm_cookie tmp_scm;
1281
1282	if (NULL == siocb->scm)
1283		siocb->scm = &tmp_scm;
1284	err = scm_send(sock, msg, siocb->scm);
1285	if (err < 0)
1286		return err;
1287
1288	err = -EOPNOTSUPP;
1289	if (msg->msg_flags&MSG_OOB)
1290		goto out;
1291
1292	if (msg->msg_namelen) {
1293		err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1294		if (err < 0)
1295			goto out;
1296		namelen = err;
1297	} else {
1298		sunaddr = NULL;
1299		err = -ENOTCONN;
1300		other = unix_peer_get(sk);
1301		if (!other)
1302			goto out;
1303	}
1304
1305	if (test_bit(SOCK_PASSCRED, &sock->flags)
1306		&& !u->addr && (err = unix_autobind(sock)) != 0)
1307		goto out;
1308
1309	err = -EMSGSIZE;
1310	if (len > sk->sk_sndbuf - 32)
1311		goto out;
1312
1313	skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
1314	if (skb==NULL)
1315		goto out;
1316
1317	memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1318	if (siocb->scm->fp)
1319		unix_attach_fds(siocb->scm, skb);
1320	unix_get_secdata(siocb->scm, skb);
1321
1322	skb->h.raw = skb->data;
1323	err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
1324	if (err)
1325		goto out_free;
1326
1327	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1328
1329restart:
1330	if (!other) {
1331		err = -ECONNRESET;
1332		if (sunaddr == NULL)
1333			goto out_free;
1334
1335		other = unix_find_other(sunaddr, namelen, sk->sk_type,
1336					hash, &err);
1337		if (other==NULL)
1338			goto out_free;
1339	}
1340
1341	unix_state_rlock(other);
1342	err = -EPERM;
1343	if (!unix_may_send(sk, other))
1344		goto out_unlock;
1345
1346	if (sock_flag(other, SOCK_DEAD)) {
1347		/*
1348		 *	Check with 1003.1g - what should
1349		 *	datagram error
1350		 */
1351		unix_state_runlock(other);
1352		sock_put(other);
1353
1354		err = 0;
1355		unix_state_wlock(sk);
1356		if (unix_peer(sk) == other) {
1357			unix_peer(sk)=NULL;
1358			unix_state_wunlock(sk);
1359
1360			unix_dgram_disconnected(sk, other);
1361			sock_put(other);
1362			err = -ECONNREFUSED;
1363		} else {
1364			unix_state_wunlock(sk);
1365		}
1366
1367		other = NULL;
1368		if (err)
1369			goto out_free;
1370		goto restart;
1371	}
1372
1373	err = -EPIPE;
1374	if (other->sk_shutdown & RCV_SHUTDOWN)
1375		goto out_unlock;
1376
1377	if (sk->sk_type != SOCK_SEQPACKET) {
1378		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1379		if (err)
1380			goto out_unlock;
1381	}
1382
1383	if (unix_peer(other) != sk &&
1384	    (skb_queue_len(&other->sk_receive_queue) >
1385	     other->sk_max_ack_backlog)) {
1386		if (!timeo) {
1387			err = -EAGAIN;
1388			goto out_unlock;
1389		}
1390
1391		timeo = unix_wait_for_peer(other, timeo);
1392
1393		err = sock_intr_errno(timeo);
1394		if (signal_pending(current))
1395			goto out_free;
1396
1397		goto restart;
1398	}
1399
1400	skb_queue_tail(&other->sk_receive_queue, skb);
1401	unix_state_runlock(other);
1402	other->sk_data_ready(other, len);
1403	sock_put(other);
1404	scm_destroy(siocb->scm);
1405	return len;
1406
1407out_unlock:
1408	unix_state_runlock(other);
1409out_free:
1410	kfree_skb(skb);
1411out:
1412	if (other)
1413		sock_put(other);
1414	scm_destroy(siocb->scm);
1415	return err;
1416}
1417
1418
1419static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1420			       struct msghdr *msg, size_t len)
1421{
1422	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1423	struct sock *sk = sock->sk;
1424	struct sock *other = NULL;
1425	struct sockaddr_un *sunaddr=msg->msg_name;
1426	int err,size;
1427	struct sk_buff *skb;
1428	int sent=0;
1429	struct scm_cookie tmp_scm;
1430
1431	if (NULL == siocb->scm)
1432		siocb->scm = &tmp_scm;
1433	err = scm_send(sock, msg, siocb->scm);
1434	if (err < 0)
1435		return err;
1436
1437	err = -EOPNOTSUPP;
1438	if (msg->msg_flags&MSG_OOB)
1439		goto out_err;
1440
1441	if (msg->msg_namelen) {
1442		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1443		goto out_err;
1444	} else {
1445		sunaddr = NULL;
1446		err = -ENOTCONN;
1447		other = unix_peer(sk);
1448		if (!other)
1449			goto out_err;
1450	}
1451
1452	if (sk->sk_shutdown & SEND_SHUTDOWN)
1453		goto pipe_err;
1454
1455	while(sent < len)
1456	{
1457		/*
1458		 *	Optimisation for the fact that under 0.01% of X
1459		 *	messages typically need breaking up.
1460		 */
1461
1462		size = len-sent;
1463
1464		/* Keep two messages in the pipe so it schedules better */
1465		if (size > ((sk->sk_sndbuf >> 1) - 64))
1466			size = (sk->sk_sndbuf >> 1) - 64;
1467
1468		if (size > SKB_MAX_ALLOC)
1469			size = SKB_MAX_ALLOC;
1470
1471		/*
1472		 *	Grab a buffer
1473		 */
1474
1475		skb=sock_alloc_send_skb(sk,size,msg->msg_flags&MSG_DONTWAIT, &err);
1476
1477		if (skb==NULL)
1478			goto out_err;
1479
1480		/*
1481		 *	If you pass two values to the sock_alloc_send_skb
1482		 *	it tries to grab the large buffer with GFP_NOFS
1483		 *	(which can fail easily), and if it fails grab the
1484		 *	fallback size buffer which is under a page and will
1485		 *	succeed. [Alan]
1486		 */
1487		size = min_t(int, size, skb_tailroom(skb));
1488
1489		memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1490		if (siocb->scm->fp)
1491			unix_attach_fds(siocb->scm, skb);
1492
1493		if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) {
1494			kfree_skb(skb);
1495			goto out_err;
1496		}
1497
1498		unix_state_rlock(other);
1499
1500		if (sock_flag(other, SOCK_DEAD) ||
1501		    (other->sk_shutdown & RCV_SHUTDOWN))
1502			goto pipe_err_free;
1503
1504		skb_queue_tail(&other->sk_receive_queue, skb);
1505		unix_state_runlock(other);
1506		other->sk_data_ready(other, size);
1507		sent+=size;
1508	}
1509
1510	scm_destroy(siocb->scm);
1511	siocb->scm = NULL;
1512
1513	return sent;
1514
1515pipe_err_free:
1516	unix_state_runlock(other);
1517	kfree_skb(skb);
1518pipe_err:
1519	if (sent==0 && !(msg->msg_flags&MSG_NOSIGNAL))
1520		send_sig(SIGPIPE,current,0);
1521	err = -EPIPE;
1522out_err:
1523	scm_destroy(siocb->scm);
1524	siocb->scm = NULL;
1525	return sent ? : err;
1526}
1527
1528static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1529				  struct msghdr *msg, size_t len)
1530{
1531	int err;
1532	struct sock *sk = sock->sk;
1533
1534	err = sock_error(sk);
1535	if (err)
1536		return err;
1537
1538	if (sk->sk_state != TCP_ESTABLISHED)
1539		return -ENOTCONN;
1540
1541	if (msg->msg_namelen)
1542		msg->msg_namelen = 0;
1543
1544	return unix_dgram_sendmsg(kiocb, sock, msg, len);
1545}
1546
1547static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1548{
1549	struct unix_sock *u = unix_sk(sk);
1550
1551	msg->msg_namelen = 0;
1552	if (u->addr) {
1553		msg->msg_namelen = u->addr->len;
1554		memcpy(msg->msg_name, u->addr->name, u->addr->len);
1555	}
1556}
1557
1558static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1559			      struct msghdr *msg, size_t size,
1560			      int flags)
1561{
1562	struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1563	struct scm_cookie tmp_scm;
1564	struct sock *sk = sock->sk;
1565	struct unix_sock *u = unix_sk(sk);
1566	int noblock = flags & MSG_DONTWAIT;
1567	struct sk_buff *skb;
1568	int err;
1569
1570	err = -EOPNOTSUPP;
1571	if (flags&MSG_OOB)
1572		goto out;
1573
1574	msg->msg_namelen = 0;
1575
1576	mutex_lock(&u->readlock);
1577
1578	skb = skb_recv_datagram(sk, flags, noblock, &err);
1579	if (!skb)
1580		goto out_unlock;
1581
1582	wake_up_interruptible(&u->peer_wait);
1583
1584	if (msg->msg_name)
1585		unix_copy_addr(msg, skb->sk);
1586
1587	if (size > skb->len)
1588		size = skb->len;
1589	else if (size < skb->len)
1590		msg->msg_flags |= MSG_TRUNC;
1591
1592	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1593	if (err)
1594		goto out_free;
1595
1596	if (!siocb->scm) {
1597		siocb->scm = &tmp_scm;
1598		memset(&tmp_scm, 0, sizeof(tmp_scm));
1599	}
1600	siocb->scm->creds = *UNIXCREDS(skb);
1601	unix_set_secdata(siocb->scm, skb);
1602
1603	if (!(flags & MSG_PEEK))
1604	{
1605		if (UNIXCB(skb).fp)
1606			unix_detach_fds(siocb->scm, skb);
1607	}
1608	else
1609	{
1610		/* It is questionable: on PEEK we could:
1611		   - do not return fds - good, but too simple 8)
1612		   - return fds, and do not return them on read (old strategy,
1613		     apparently wrong)
1614		   - clone fds (I chose it for now, it is the most universal
1615		     solution)
1616
1617	           POSIX 1003.1g does not actually define this clearly
1618	           at all. POSIX 1003.1g doesn't define a lot of things
1619	           clearly however!
1620
1621		*/
1622		if (UNIXCB(skb).fp)
1623			siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1624	}
1625	err = size;
1626
1627	scm_recv(sock, msg, siocb->scm, flags);
1628
1629out_free:
1630	skb_free_datagram(sk,skb);
1631out_unlock:
1632	mutex_unlock(&u->readlock);
1633out:
1634	return err;
1635}
1636
1637/*
1638 *	Sleep until data has arrive. But check for races..
1639 */
1640
1641static long unix_stream_data_wait(struct sock * sk, long timeo)
1642{
1643	DEFINE_WAIT(wait);
1644
1645	unix_state_rlock(sk);
1646
1647	for (;;) {
1648		prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1649
1650		if (!skb_queue_empty(&sk->sk_receive_queue) ||
1651		    sk->sk_err ||
1652		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
1653		    signal_pending(current) ||
1654		    !timeo)
1655			break;
1656
1657		set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1658		unix_state_runlock(sk);
1659		timeo = schedule_timeout(timeo);
1660		unix_state_rlock(sk);
1661		clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1662	}
1663
1664	finish_wait(sk->sk_sleep, &wait);
1665	unix_state_runlock(sk);
1666	return timeo;
1667}
1668
1669
1670
1671static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1672			       struct msghdr *msg, size_t size,
1673			       int flags)
1674{
1675	struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1676	struct scm_cookie tmp_scm;
1677	struct sock *sk = sock->sk;
1678	struct unix_sock *u = unix_sk(sk);
1679	struct sockaddr_un *sunaddr=msg->msg_name;
1680	int copied = 0;
1681	int check_creds = 0;
1682	int target;
1683	int err = 0;
1684	long timeo;
1685
1686	err = -EINVAL;
1687	if (sk->sk_state != TCP_ESTABLISHED)
1688		goto out;
1689
1690	err = -EOPNOTSUPP;
1691	if (flags&MSG_OOB)
1692		goto out;
1693
1694	target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1695	timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1696
1697	msg->msg_namelen = 0;
1698
1699	/* Lock the socket to prevent queue disordering
1700	 * while sleeps in memcpy_tomsg
1701	 */
1702
1703	if (!siocb->scm) {
1704		siocb->scm = &tmp_scm;
1705		memset(&tmp_scm, 0, sizeof(tmp_scm));
1706	}
1707
1708	mutex_lock(&u->readlock);
1709
1710	do
1711	{
1712		int chunk;
1713		struct sk_buff *skb;
1714
1715		skb = skb_dequeue(&sk->sk_receive_queue);
1716		if (skb==NULL)
1717		{
1718			if (copied >= target)
1719				break;
1720
1721			/*
1722			 *	POSIX 1003.1g mandates this order.
1723			 */
1724
1725			if ((err = sock_error(sk)) != 0)
1726				break;
1727			if (sk->sk_shutdown & RCV_SHUTDOWN)
1728				break;
1729			err = -EAGAIN;
1730			if (!timeo)
1731				break;
1732			mutex_unlock(&u->readlock);
1733
1734			timeo = unix_stream_data_wait(sk, timeo);
1735
1736			if (signal_pending(current)) {
1737				err = sock_intr_errno(timeo);
1738				goto out;
1739			}
1740			mutex_lock(&u->readlock);
1741			continue;
1742		}
1743
1744		if (check_creds) {
1745			/* Never glue messages from different writers */
1746			if (memcmp(UNIXCREDS(skb), &siocb->scm->creds, sizeof(siocb->scm->creds)) != 0) {
1747				skb_queue_head(&sk->sk_receive_queue, skb);
1748				break;
1749			}
1750		} else {
1751			/* Copy credentials */
1752			siocb->scm->creds = *UNIXCREDS(skb);
1753			check_creds = 1;
1754		}
1755
1756		/* Copy address just once */
1757		if (sunaddr)
1758		{
1759			unix_copy_addr(msg, skb->sk);
1760			sunaddr = NULL;
1761		}
1762
1763		chunk = min_t(unsigned int, skb->len, size);
1764		if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1765			skb_queue_head(&sk->sk_receive_queue, skb);
1766			if (copied == 0)
1767				copied = -EFAULT;
1768			break;
1769		}
1770		copied += chunk;
1771		size -= chunk;
1772
1773		/* Mark read part of skb as used */
1774		if (!(flags & MSG_PEEK))
1775		{
1776			skb_pull(skb, chunk);
1777
1778			if (UNIXCB(skb).fp)
1779				unix_detach_fds(siocb->scm, skb);
1780
1781			/* put the skb back if we didn't use it up.. */
1782			if (skb->len)
1783			{
1784				skb_queue_head(&sk->sk_receive_queue, skb);
1785				break;
1786			}
1787
1788			kfree_skb(skb);
1789
1790			if (siocb->scm->fp)
1791				break;
1792		}
1793		else
1794		{
1795			/* It is questionable, see note in unix_dgram_recvmsg.
1796			 */
1797			if (UNIXCB(skb).fp)
1798				siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1799
1800			/* put message back and return */
1801			skb_queue_head(&sk->sk_receive_queue, skb);
1802			break;
1803		}
1804	} while (size);
1805
1806	mutex_unlock(&u->readlock);
1807	scm_recv(sock, msg, siocb->scm, flags);
1808out:
1809	return copied ? : err;
1810}
1811
1812static int unix_shutdown(struct socket *sock, int mode)
1813{
1814	struct sock *sk = sock->sk;
1815	struct sock *other;
1816
1817	mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
1818
1819	if (mode) {
1820		unix_state_wlock(sk);
1821		sk->sk_shutdown |= mode;
1822		other=unix_peer(sk);
1823		if (other)
1824			sock_hold(other);
1825		unix_state_wunlock(sk);
1826		sk->sk_state_change(sk);
1827
1828		if (other &&
1829			(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
1830
1831			int peer_mode = 0;
1832
1833			if (mode&RCV_SHUTDOWN)
1834				peer_mode |= SEND_SHUTDOWN;
1835			if (mode&SEND_SHUTDOWN)
1836				peer_mode |= RCV_SHUTDOWN;
1837			unix_state_wlock(other);
1838			other->sk_shutdown |= peer_mode;
1839			unix_state_wunlock(other);
1840			other->sk_state_change(other);
1841			read_lock(&other->sk_callback_lock);
1842			if (peer_mode == SHUTDOWN_MASK)
1843				sk_wake_async(other,1,POLL_HUP);
1844			else if (peer_mode & RCV_SHUTDOWN)
1845				sk_wake_async(other,1,POLL_IN);
1846			read_unlock(&other->sk_callback_lock);
1847		}
1848		if (other)
1849			sock_put(other);
1850	}
1851	return 0;
1852}
1853
1854static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1855{
1856	struct sock *sk = sock->sk;
1857	long amount=0;
1858	int err;
1859
1860	switch(cmd)
1861	{
1862		case SIOCOUTQ:
1863			amount = atomic_read(&sk->sk_wmem_alloc);
1864			err = put_user(amount, (int __user *)arg);
1865			break;
1866		case SIOCINQ:
1867		{
1868			struct sk_buff *skb;
1869
1870			if (sk->sk_state == TCP_LISTEN) {
1871				err = -EINVAL;
1872				break;
1873			}
1874
1875			spin_lock(&sk->sk_receive_queue.lock);
1876			if (sk->sk_type == SOCK_STREAM ||
1877			    sk->sk_type == SOCK_SEQPACKET) {
1878				skb_queue_walk(&sk->sk_receive_queue, skb)
1879					amount += skb->len;
1880			} else {
1881				skb = skb_peek(&sk->sk_receive_queue);
1882				if (skb)
1883					amount=skb->len;
1884			}
1885			spin_unlock(&sk->sk_receive_queue.lock);
1886			err = put_user(amount, (int __user *)arg);
1887			break;
1888		}
1889
1890		default:
1891			err = -ENOIOCTLCMD;
1892			break;
1893	}
1894	return err;
1895}
1896
1897static unsigned int unix_poll(struct file * file, struct socket *sock, poll_table *wait)
1898{
1899	struct sock *sk = sock->sk;
1900	unsigned int mask;
1901
1902	poll_wait(file, sk->sk_sleep, wait);
1903	mask = 0;
1904
1905	/* exceptional events? */
1906	if (sk->sk_err)
1907		mask |= POLLERR;
1908	if (sk->sk_shutdown == SHUTDOWN_MASK)
1909		mask |= POLLHUP;
1910	if (sk->sk_shutdown & RCV_SHUTDOWN)
1911		mask |= POLLRDHUP;
1912
1913	/* readable? */
1914	if (!skb_queue_empty(&sk->sk_receive_queue) ||
1915	    (sk->sk_shutdown & RCV_SHUTDOWN))
1916		mask |= POLLIN | POLLRDNORM;
1917
1918	/* Connection-based need to check for termination and startup */
1919	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && sk->sk_state == TCP_CLOSE)
1920		mask |= POLLHUP;
1921
1922	/*
1923	 * we set writable also when the other side has shut down the
1924	 * connection. This prevents stuck sockets.
1925	 */
1926	if (unix_writable(sk))
1927		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1928
1929	return mask;
1930}
1931
1932
1933#ifdef CONFIG_PROC_FS
1934static struct sock *unix_seq_idx(int *iter, loff_t pos)
1935{
1936	loff_t off = 0;
1937	struct sock *s;
1938
1939	for (s = first_unix_socket(iter); s; s = next_unix_socket(iter, s)) {
1940		if (off == pos)
1941			return s;
1942		++off;
1943	}
1944	return NULL;
1945}
1946
1947
1948static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
1949{
1950	spin_lock(&unix_table_lock);
1951	return *pos ? unix_seq_idx(seq->private, *pos - 1) : ((void *) 1);
1952}
1953
1954static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1955{
1956	++*pos;
1957
1958	if (v == (void *)1)
1959		return first_unix_socket(seq->private);
1960	return next_unix_socket(seq->private, v);
1961}
1962
1963static void unix_seq_stop(struct seq_file *seq, void *v)
1964{
1965	spin_unlock(&unix_table_lock);
1966}
1967
1968static int unix_seq_show(struct seq_file *seq, void *v)
1969{
1970
1971	if (v == (void *)1)
1972		seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
1973			 "Inode Path\n");
1974	else {
1975		struct sock *s = v;
1976		struct unix_sock *u = unix_sk(s);
1977		unix_state_rlock(s);
1978
1979		seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
1980			s,
1981			atomic_read(&s->sk_refcnt),
1982			0,
1983			s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
1984			s->sk_type,
1985			s->sk_socket ?
1986			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
1987			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
1988			sock_i_ino(s));
1989
1990		if (u->addr) {
1991			int i, len;
1992			seq_putc(seq, ' ');
1993
1994			i = 0;
1995			len = u->addr->len - sizeof(short);
1996			if (!UNIX_ABSTRACT(s))
1997				len--;
1998			else {
1999				seq_putc(seq, '@');
2000				i++;
2001			}
2002			for ( ; i < len; i++)
2003				seq_putc(seq, u->addr->name->sun_path[i]);
2004		}
2005		unix_state_runlock(s);
2006		seq_putc(seq, '\n');
2007	}
2008
2009	return 0;
2010}
2011
2012static struct seq_operations unix_seq_ops = {
2013	.start  = unix_seq_start,
2014	.next   = unix_seq_next,
2015	.stop   = unix_seq_stop,
2016	.show   = unix_seq_show,
2017};
2018
2019
2020static int unix_seq_open(struct inode *inode, struct file *file)
2021{
2022	struct seq_file *seq;
2023	int rc = -ENOMEM;
2024	int *iter = kmalloc(sizeof(int), GFP_KERNEL);
2025
2026	if (!iter)
2027		goto out;
2028
2029	rc = seq_open(file, &unix_seq_ops);
2030	if (rc)
2031		goto out_kfree;
2032
2033	seq	     = file->private_data;
2034	seq->private = iter;
2035	*iter = 0;
2036out:
2037	return rc;
2038out_kfree:
2039	kfree(iter);
2040	goto out;
2041}
2042
2043static struct file_operations unix_seq_fops = {
2044	.owner		= THIS_MODULE,
2045	.open		= unix_seq_open,
2046	.read		= seq_read,
2047	.llseek		= seq_lseek,
2048	.release	= seq_release_private,
2049};
2050
2051#endif
2052
2053static struct net_proto_family unix_family_ops = {
2054	.family = PF_UNIX,
2055	.create = unix_create,
2056	.owner	= THIS_MODULE,
2057};
2058
2059static int __init af_unix_init(void)
2060{
2061	int rc = -1;
2062	struct sk_buff *dummy_skb;
2063
2064	BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
2065
2066	rc = proto_register(&unix_proto, 1);
2067        if (rc != 0) {
2068                printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2069		       __FUNCTION__);
2070		goto out;
2071	}
2072
2073	sock_register(&unix_family_ops);
2074#ifdef CONFIG_PROC_FS
2075	proc_net_fops_create("unix", 0, &unix_seq_fops);
2076#endif
2077	unix_sysctl_register();
2078out:
2079	return rc;
2080}
2081
2082static void __exit af_unix_exit(void)
2083{
2084	sock_unregister(PF_UNIX);
2085	unix_sysctl_unregister();
2086	proc_net_remove("unix");
2087	proto_unregister(&unix_proto);
2088}
2089
2090module_init(af_unix_init);
2091module_exit(af_unix_exit);
2092
2093MODULE_LICENSE("GPL");
2094MODULE_ALIAS_NETPROTO(PF_UNIX);
2095