mqueue.c revision 04db0dde0ee1c29110642dff57fba9e438eb805c
1/*
2 * POSIX message queues filesystem for Linux.
3 *
4 * Copyright (C) 2003,2004  Krzysztof Benedyczak    (golbi@mat.uni.torun.pl)
5 *                          Michal Wronski          (michal.wronski@gmail.com)
6 *
7 * Spinlocks:               Mohamed Abbas           (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 * 			    Manfred Spraul	    (manfred@colorfullife.com)
10 *
11 * Audit:                   George Wilson           (ltcgcw@us.ibm.com)
12 *
13 * This file is released under the GPL.
14 */
15
16#include <linux/capability.h>
17#include <linux/init.h>
18#include <linux/pagemap.h>
19#include <linux/file.h>
20#include <linux/mount.h>
21#include <linux/namei.h>
22#include <linux/sysctl.h>
23#include <linux/poll.h>
24#include <linux/mqueue.h>
25#include <linux/msg.h>
26#include <linux/skbuff.h>
27#include <linux/netlink.h>
28#include <linux/syscalls.h>
29#include <linux/audit.h>
30#include <linux/signal.h>
31#include <linux/mutex.h>
32#include <linux/nsproxy.h>
33#include <linux/pid.h>
34#include <linux/ipc_namespace.h>
35
36#include <net/sock.h>
37#include "util.h"
38
39#define MQUEUE_MAGIC	0x19800202
40#define DIRENT_SIZE	20
41#define FILENT_SIZE	80
42
43#define SEND		0
44#define RECV		1
45
46#define STATE_NONE	0
47#define STATE_PENDING	1
48#define STATE_READY	2
49
50struct ext_wait_queue {		/* queue of sleeping tasks */
51	struct task_struct *task;
52	struct list_head list;
53	struct msg_msg *msg;	/* ptr of loaded message */
54	int state;		/* one of STATE_* values */
55};
56
57struct mqueue_inode_info {
58	spinlock_t lock;
59	struct inode vfs_inode;
60	wait_queue_head_t wait_q;
61
62	struct msg_msg **messages;
63	struct mq_attr attr;
64
65	struct sigevent notify;
66	struct pid* notify_owner;
67	struct user_struct *user;	/* user who created, for accounting */
68	struct sock *notify_sock;
69	struct sk_buff *notify_cookie;
70
71	/* for tasks waiting for free space and messages, respectively */
72	struct ext_wait_queue e_wait_q[2];
73
74	unsigned long qsize; /* size of queue in memory (sum of all msgs) */
75};
76
77static const struct inode_operations mqueue_dir_inode_operations;
78static const struct file_operations mqueue_file_operations;
79static const struct super_operations mqueue_super_ops;
80static void remove_notification(struct mqueue_inode_info *info);
81
82static struct kmem_cache *mqueue_inode_cachep;
83
84static struct ctl_table_header * mq_sysctl_table;
85
86static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
87{
88	return container_of(inode, struct mqueue_inode_info, vfs_inode);
89}
90
91/*
92 * This routine should be called with the mq_lock held.
93 */
94static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
95{
96	return get_ipc_ns(inode->i_sb->s_fs_info);
97}
98
99static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
100{
101	struct ipc_namespace *ns;
102
103	spin_lock(&mq_lock);
104	ns = __get_ns_from_inode(inode);
105	spin_unlock(&mq_lock);
106	return ns;
107}
108
109static struct inode *mqueue_get_inode(struct super_block *sb,
110		struct ipc_namespace *ipc_ns, int mode,
111		struct mq_attr *attr)
112{
113	struct user_struct *u = current_user();
114	struct inode *inode;
115
116	inode = new_inode(sb);
117	if (inode) {
118		inode->i_mode = mode;
119		inode->i_uid = current_fsuid();
120		inode->i_gid = current_fsgid();
121		inode->i_mtime = inode->i_ctime = inode->i_atime =
122				CURRENT_TIME;
123
124		if (S_ISREG(mode)) {
125			struct mqueue_inode_info *info;
126			struct task_struct *p = current;
127			unsigned long mq_bytes, mq_msg_tblsz;
128
129			inode->i_fop = &mqueue_file_operations;
130			inode->i_size = FILENT_SIZE;
131			/* mqueue specific info */
132			info = MQUEUE_I(inode);
133			spin_lock_init(&info->lock);
134			init_waitqueue_head(&info->wait_q);
135			INIT_LIST_HEAD(&info->e_wait_q[0].list);
136			INIT_LIST_HEAD(&info->e_wait_q[1].list);
137			info->notify_owner = NULL;
138			info->qsize = 0;
139			info->user = NULL;	/* set when all is ok */
140			memset(&info->attr, 0, sizeof(info->attr));
141			info->attr.mq_maxmsg = ipc_ns->mq_msg_max;
142			info->attr.mq_msgsize = ipc_ns->mq_msgsize_max;
143			if (attr) {
144				info->attr.mq_maxmsg = attr->mq_maxmsg;
145				info->attr.mq_msgsize = attr->mq_msgsize;
146			}
147			mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *);
148			info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
149			if (!info->messages)
150				goto out_inode;
151
152			mq_bytes = (mq_msg_tblsz +
153				(info->attr.mq_maxmsg * info->attr.mq_msgsize));
154
155			spin_lock(&mq_lock);
156			if (u->mq_bytes + mq_bytes < u->mq_bytes ||
157		 	    u->mq_bytes + mq_bytes >
158			    p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) {
159				spin_unlock(&mq_lock);
160				kfree(info->messages);
161				goto out_inode;
162			}
163			u->mq_bytes += mq_bytes;
164			spin_unlock(&mq_lock);
165
166			/* all is ok */
167			info->user = get_uid(u);
168		} else if (S_ISDIR(mode)) {
169			inc_nlink(inode);
170			/* Some things misbehave if size == 0 on a directory */
171			inode->i_size = 2 * DIRENT_SIZE;
172			inode->i_op = &mqueue_dir_inode_operations;
173			inode->i_fop = &simple_dir_operations;
174		}
175	}
176	return inode;
177out_inode:
178	make_bad_inode(inode);
179	iput(inode);
180	return NULL;
181}
182
183static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
184{
185	struct inode *inode;
186	struct ipc_namespace *ns = data;
187	int error = 0;
188
189	sb->s_blocksize = PAGE_CACHE_SIZE;
190	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
191	sb->s_magic = MQUEUE_MAGIC;
192	sb->s_op = &mqueue_super_ops;
193
194	inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO,
195				NULL);
196	if (!inode) {
197		error = -ENOMEM;
198		goto out;
199	}
200
201	sb->s_root = d_alloc_root(inode);
202	if (!sb->s_root) {
203		iput(inode);
204		error = -ENOMEM;
205	}
206
207out:
208	return error;
209}
210
211static int mqueue_get_sb(struct file_system_type *fs_type,
212			 int flags, const char *dev_name,
213			 void *data, struct vfsmount *mnt)
214{
215	if (!(flags & MS_KERNMOUNT))
216		data = current->nsproxy->ipc_ns;
217	return get_sb_ns(fs_type, flags, data, mqueue_fill_super, mnt);
218}
219
220static void init_once(void *foo)
221{
222	struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
223
224	inode_init_once(&p->vfs_inode);
225}
226
227static struct inode *mqueue_alloc_inode(struct super_block *sb)
228{
229	struct mqueue_inode_info *ei;
230
231	ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
232	if (!ei)
233		return NULL;
234	return &ei->vfs_inode;
235}
236
237static void mqueue_destroy_inode(struct inode *inode)
238{
239	kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
240}
241
242static void mqueue_delete_inode(struct inode *inode)
243{
244	struct mqueue_inode_info *info;
245	struct user_struct *user;
246	unsigned long mq_bytes;
247	int i;
248	struct ipc_namespace *ipc_ns;
249
250	if (S_ISDIR(inode->i_mode)) {
251		clear_inode(inode);
252		return;
253	}
254	ipc_ns = get_ns_from_inode(inode);
255	info = MQUEUE_I(inode);
256	spin_lock(&info->lock);
257	for (i = 0; i < info->attr.mq_curmsgs; i++)
258		free_msg(info->messages[i]);
259	kfree(info->messages);
260	spin_unlock(&info->lock);
261
262	clear_inode(inode);
263
264	/* Total amount of bytes accounted for the mqueue */
265	mq_bytes = info->attr.mq_maxmsg * (sizeof(struct msg_msg *)
266	    + info->attr.mq_msgsize);
267	user = info->user;
268	if (user) {
269		spin_lock(&mq_lock);
270		user->mq_bytes -= mq_bytes;
271		/*
272		 * get_ns_from_inode() ensures that the
273		 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
274		 * to which we now hold a reference, or it is NULL.
275		 * We can't put it here under mq_lock, though.
276		 */
277		if (ipc_ns)
278			ipc_ns->mq_queues_count--;
279		spin_unlock(&mq_lock);
280		free_uid(user);
281	}
282	if (ipc_ns)
283		put_ipc_ns(ipc_ns);
284}
285
286static int mqueue_create(struct inode *dir, struct dentry *dentry,
287				int mode, struct nameidata *nd)
288{
289	struct inode *inode;
290	struct mq_attr *attr = dentry->d_fsdata;
291	int error;
292	struct ipc_namespace *ipc_ns;
293
294	spin_lock(&mq_lock);
295	ipc_ns = __get_ns_from_inode(dir);
296	if (!ipc_ns) {
297		error = -EACCES;
298		goto out_unlock;
299	}
300	if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
301			!capable(CAP_SYS_RESOURCE)) {
302		error = -ENOSPC;
303		goto out_unlock;
304	}
305	ipc_ns->mq_queues_count++;
306	spin_unlock(&mq_lock);
307
308	inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
309	if (!inode) {
310		error = -ENOMEM;
311		spin_lock(&mq_lock);
312		ipc_ns->mq_queues_count--;
313		goto out_unlock;
314	}
315
316	put_ipc_ns(ipc_ns);
317	dir->i_size += DIRENT_SIZE;
318	dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
319
320	d_instantiate(dentry, inode);
321	dget(dentry);
322	return 0;
323out_unlock:
324	spin_unlock(&mq_lock);
325	if (ipc_ns)
326		put_ipc_ns(ipc_ns);
327	return error;
328}
329
330static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
331{
332  	struct inode *inode = dentry->d_inode;
333
334	dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
335	dir->i_size -= DIRENT_SIZE;
336  	drop_nlink(inode);
337  	dput(dentry);
338  	return 0;
339}
340
341/*
342*	This is routine for system read from queue file.
343*	To avoid mess with doing here some sort of mq_receive we allow
344*	to read only queue size & notification info (the only values
345*	that are interesting from user point of view and aren't accessible
346*	through std routines)
347*/
348static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
349				size_t count, loff_t *off)
350{
351	struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
352	char buffer[FILENT_SIZE];
353	ssize_t ret;
354
355	spin_lock(&info->lock);
356	snprintf(buffer, sizeof(buffer),
357			"QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
358			info->qsize,
359			info->notify_owner ? info->notify.sigev_notify : 0,
360			(info->notify_owner &&
361			 info->notify.sigev_notify == SIGEV_SIGNAL) ?
362				info->notify.sigev_signo : 0,
363			pid_vnr(info->notify_owner));
364	spin_unlock(&info->lock);
365	buffer[sizeof(buffer)-1] = '\0';
366
367	ret = simple_read_from_buffer(u_data, count, off, buffer,
368				strlen(buffer));
369	if (ret <= 0)
370		return ret;
371
372	filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME;
373	return ret;
374}
375
376static int mqueue_flush_file(struct file *filp, fl_owner_t id)
377{
378	struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
379
380	spin_lock(&info->lock);
381	if (task_tgid(current) == info->notify_owner)
382		remove_notification(info);
383
384	spin_unlock(&info->lock);
385	return 0;
386}
387
388static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
389{
390	struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
391	int retval = 0;
392
393	poll_wait(filp, &info->wait_q, poll_tab);
394
395	spin_lock(&info->lock);
396	if (info->attr.mq_curmsgs)
397		retval = POLLIN | POLLRDNORM;
398
399	if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
400		retval |= POLLOUT | POLLWRNORM;
401	spin_unlock(&info->lock);
402
403	return retval;
404}
405
406/* Adds current to info->e_wait_q[sr] before element with smaller prio */
407static void wq_add(struct mqueue_inode_info *info, int sr,
408			struct ext_wait_queue *ewp)
409{
410	struct ext_wait_queue *walk;
411
412	ewp->task = current;
413
414	list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
415		if (walk->task->static_prio <= current->static_prio) {
416			list_add_tail(&ewp->list, &walk->list);
417			return;
418		}
419	}
420	list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
421}
422
423/*
424 * Puts current task to sleep. Caller must hold queue lock. After return
425 * lock isn't held.
426 * sr: SEND or RECV
427 */
428static int wq_sleep(struct mqueue_inode_info *info, int sr,
429			long timeout, struct ext_wait_queue *ewp)
430{
431	int retval;
432	signed long time;
433
434	wq_add(info, sr, ewp);
435
436	for (;;) {
437		set_current_state(TASK_INTERRUPTIBLE);
438
439		spin_unlock(&info->lock);
440		time = schedule_timeout(timeout);
441
442		while (ewp->state == STATE_PENDING)
443			cpu_relax();
444
445		if (ewp->state == STATE_READY) {
446			retval = 0;
447			goto out;
448		}
449		spin_lock(&info->lock);
450		if (ewp->state == STATE_READY) {
451			retval = 0;
452			goto out_unlock;
453		}
454		if (signal_pending(current)) {
455			retval = -ERESTARTSYS;
456			break;
457		}
458		if (time == 0) {
459			retval = -ETIMEDOUT;
460			break;
461		}
462	}
463	list_del(&ewp->list);
464out_unlock:
465	spin_unlock(&info->lock);
466out:
467	return retval;
468}
469
470/*
471 * Returns waiting task that should be serviced first or NULL if none exists
472 */
473static struct ext_wait_queue *wq_get_first_waiter(
474		struct mqueue_inode_info *info, int sr)
475{
476	struct list_head *ptr;
477
478	ptr = info->e_wait_q[sr].list.prev;
479	if (ptr == &info->e_wait_q[sr].list)
480		return NULL;
481	return list_entry(ptr, struct ext_wait_queue, list);
482}
483
484/* Auxiliary functions to manipulate messages' list */
485static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info)
486{
487	int k;
488
489	k = info->attr.mq_curmsgs - 1;
490	while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) {
491		info->messages[k + 1] = info->messages[k];
492		k--;
493	}
494	info->attr.mq_curmsgs++;
495	info->qsize += ptr->m_ts;
496	info->messages[k + 1] = ptr;
497}
498
499static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
500{
501	info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts;
502	return info->messages[info->attr.mq_curmsgs];
503}
504
505static inline void set_cookie(struct sk_buff *skb, char code)
506{
507	((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
508}
509
510/*
511 * The next function is only to split too long sys_mq_timedsend
512 */
513static void __do_notify(struct mqueue_inode_info *info)
514{
515	/* notification
516	 * invoked when there is registered process and there isn't process
517	 * waiting synchronously for message AND state of queue changed from
518	 * empty to not empty. Here we are sure that no one is waiting
519	 * synchronously. */
520	if (info->notify_owner &&
521	    info->attr.mq_curmsgs == 1) {
522		struct siginfo sig_i;
523		switch (info->notify.sigev_notify) {
524		case SIGEV_NONE:
525			break;
526		case SIGEV_SIGNAL:
527			/* sends signal */
528
529			sig_i.si_signo = info->notify.sigev_signo;
530			sig_i.si_errno = 0;
531			sig_i.si_code = SI_MESGQ;
532			sig_i.si_value = info->notify.sigev_value;
533			sig_i.si_pid = task_tgid_nr_ns(current,
534						ns_of_pid(info->notify_owner));
535			sig_i.si_uid = current_uid();
536
537			kill_pid_info(info->notify.sigev_signo,
538				      &sig_i, info->notify_owner);
539			break;
540		case SIGEV_THREAD:
541			set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
542			netlink_sendskb(info->notify_sock, info->notify_cookie);
543			break;
544		}
545		/* after notification unregisters process */
546		put_pid(info->notify_owner);
547		info->notify_owner = NULL;
548	}
549	wake_up(&info->wait_q);
550}
551
552static long prepare_timeout(struct timespec *p)
553{
554	struct timespec nowts;
555	long timeout;
556
557	if (p) {
558		if (unlikely(p->tv_nsec < 0 || p->tv_sec < 0
559			|| p->tv_nsec >= NSEC_PER_SEC))
560			return -EINVAL;
561		nowts = CURRENT_TIME;
562		/* first subtract as jiffies can't be too big */
563		p->tv_sec -= nowts.tv_sec;
564		if (p->tv_nsec < nowts.tv_nsec) {
565			p->tv_nsec += NSEC_PER_SEC;
566			p->tv_sec--;
567		}
568		p->tv_nsec -= nowts.tv_nsec;
569		if (p->tv_sec < 0)
570			return 0;
571
572		timeout = timespec_to_jiffies(p) + 1;
573	} else
574		return MAX_SCHEDULE_TIMEOUT;
575
576	return timeout;
577}
578
579static void remove_notification(struct mqueue_inode_info *info)
580{
581	if (info->notify_owner != NULL &&
582	    info->notify.sigev_notify == SIGEV_THREAD) {
583		set_cookie(info->notify_cookie, NOTIFY_REMOVED);
584		netlink_sendskb(info->notify_sock, info->notify_cookie);
585	}
586	put_pid(info->notify_owner);
587	info->notify_owner = NULL;
588}
589
590static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
591{
592	if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
593		return 0;
594	if (capable(CAP_SYS_RESOURCE)) {
595		if (attr->mq_maxmsg > HARD_MSGMAX)
596			return 0;
597	} else {
598		if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
599				attr->mq_msgsize > ipc_ns->mq_msgsize_max)
600			return 0;
601	}
602	/* check for overflow */
603	if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
604		return 0;
605	if ((unsigned long)(attr->mq_maxmsg * (attr->mq_msgsize
606	    + sizeof (struct msg_msg *))) <
607	    (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize))
608		return 0;
609	return 1;
610}
611
612/*
613 * Invoked when creating a new queue via sys_mq_open
614 */
615static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir,
616			struct dentry *dentry, int oflag, mode_t mode,
617			struct mq_attr *attr)
618{
619	const struct cred *cred = current_cred();
620	struct file *result;
621	int ret;
622
623	if (attr) {
624		ret = -EINVAL;
625		if (!mq_attr_ok(ipc_ns, attr))
626			goto out;
627		/* store for use during create */
628		dentry->d_fsdata = attr;
629	}
630
631	mode &= ~current_umask();
632	ret = mnt_want_write(ipc_ns->mq_mnt);
633	if (ret)
634		goto out;
635	ret = vfs_create(dir->d_inode, dentry, mode, NULL);
636	dentry->d_fsdata = NULL;
637	if (ret)
638		goto out_drop_write;
639
640	result = dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred);
641	/*
642	 * dentry_open() took a persistent mnt_want_write(),
643	 * so we can now drop this one.
644	 */
645	mnt_drop_write(ipc_ns->mq_mnt);
646	return result;
647
648out_drop_write:
649	mnt_drop_write(ipc_ns->mq_mnt);
650out:
651	dput(dentry);
652	mntput(ipc_ns->mq_mnt);
653	return ERR_PTR(ret);
654}
655
656/* Opens existing queue */
657static struct file *do_open(struct ipc_namespace *ipc_ns,
658				struct dentry *dentry, int oflag)
659{
660	int ret;
661	const struct cred *cred = current_cred();
662
663	static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
664						  MAY_READ | MAY_WRITE };
665
666	if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) {
667		ret = -EINVAL;
668		goto err;
669	}
670
671	if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) {
672		ret = -EACCES;
673		goto err;
674	}
675
676	return dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred);
677
678err:
679	dput(dentry);
680	mntput(ipc_ns->mq_mnt);
681	return ERR_PTR(ret);
682}
683
684SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
685		struct mq_attr __user *, u_attr)
686{
687	struct dentry *dentry;
688	struct file *filp;
689	char *name;
690	struct mq_attr attr;
691	int fd, error;
692	struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
693
694	if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
695		return -EFAULT;
696
697	audit_mq_open(oflag, mode, u_attr ? &attr : NULL);
698
699	if (IS_ERR(name = getname(u_name)))
700		return PTR_ERR(name);
701
702	fd = get_unused_fd_flags(O_CLOEXEC);
703	if (fd < 0)
704		goto out_putname;
705
706	mutex_lock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
707	dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
708	if (IS_ERR(dentry)) {
709		error = PTR_ERR(dentry);
710		goto out_putfd;
711	}
712	mntget(ipc_ns->mq_mnt);
713
714	if (oflag & O_CREAT) {
715		if (dentry->d_inode) {	/* entry already exists */
716			audit_inode(name, dentry);
717			error = -EEXIST;
718			if (oflag & O_EXCL)
719				goto out;
720			filp = do_open(ipc_ns, dentry, oflag);
721		} else {
722			filp = do_create(ipc_ns, ipc_ns->mq_mnt->mnt_root,
723						dentry, oflag, mode,
724						u_attr ? &attr : NULL);
725		}
726	} else {
727		error = -ENOENT;
728		if (!dentry->d_inode)
729			goto out;
730		audit_inode(name, dentry);
731		filp = do_open(ipc_ns, dentry, oflag);
732	}
733
734	if (IS_ERR(filp)) {
735		error = PTR_ERR(filp);
736		goto out_putfd;
737	}
738
739	fd_install(fd, filp);
740	goto out_upsem;
741
742out:
743	dput(dentry);
744	mntput(ipc_ns->mq_mnt);
745out_putfd:
746	put_unused_fd(fd);
747	fd = error;
748out_upsem:
749	mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
750out_putname:
751	putname(name);
752	return fd;
753}
754
755SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
756{
757	int err;
758	char *name;
759	struct dentry *dentry;
760	struct inode *inode = NULL;
761	struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
762
763	name = getname(u_name);
764	if (IS_ERR(name))
765		return PTR_ERR(name);
766
767	mutex_lock_nested(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex,
768			I_MUTEX_PARENT);
769	dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
770	if (IS_ERR(dentry)) {
771		err = PTR_ERR(dentry);
772		goto out_unlock;
773	}
774
775	if (!dentry->d_inode) {
776		err = -ENOENT;
777		goto out_err;
778	}
779
780	inode = dentry->d_inode;
781	if (inode)
782		atomic_inc(&inode->i_count);
783	err = mnt_want_write(ipc_ns->mq_mnt);
784	if (err)
785		goto out_err;
786	err = vfs_unlink(dentry->d_parent->d_inode, dentry);
787	mnt_drop_write(ipc_ns->mq_mnt);
788out_err:
789	dput(dentry);
790
791out_unlock:
792	mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
793	putname(name);
794	if (inode)
795		iput(inode);
796
797	return err;
798}
799
800/* Pipelined send and receive functions.
801 *
802 * If a receiver finds no waiting message, then it registers itself in the
803 * list of waiting receivers. A sender checks that list before adding the new
804 * message into the message array. If there is a waiting receiver, then it
805 * bypasses the message array and directly hands the message over to the
806 * receiver.
807 * The receiver accepts the message and returns without grabbing the queue
808 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
809 * are necessary. The same algorithm is used for sysv semaphores, see
810 * ipc/sem.c for more details.
811 *
812 * The same algorithm is used for senders.
813 */
814
815/* pipelined_send() - send a message directly to the task waiting in
816 * sys_mq_timedreceive() (without inserting message into a queue).
817 */
818static inline void pipelined_send(struct mqueue_inode_info *info,
819				  struct msg_msg *message,
820				  struct ext_wait_queue *receiver)
821{
822	receiver->msg = message;
823	list_del(&receiver->list);
824	receiver->state = STATE_PENDING;
825	wake_up_process(receiver->task);
826	smp_wmb();
827	receiver->state = STATE_READY;
828}
829
830/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
831 * gets its message and put to the queue (we have one free place for sure). */
832static inline void pipelined_receive(struct mqueue_inode_info *info)
833{
834	struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
835
836	if (!sender) {
837		/* for poll */
838		wake_up_interruptible(&info->wait_q);
839		return;
840	}
841	msg_insert(sender->msg, info);
842	list_del(&sender->list);
843	sender->state = STATE_PENDING;
844	wake_up_process(sender->task);
845	smp_wmb();
846	sender->state = STATE_READY;
847}
848
849SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
850		size_t, msg_len, unsigned int, msg_prio,
851		const struct timespec __user *, u_abs_timeout)
852{
853	struct file *filp;
854	struct inode *inode;
855	struct ext_wait_queue wait;
856	struct ext_wait_queue *receiver;
857	struct msg_msg *msg_ptr;
858	struct mqueue_inode_info *info;
859	struct timespec ts, *p = NULL;
860	long timeout;
861	int ret;
862
863	if (u_abs_timeout) {
864		if (copy_from_user(&ts, u_abs_timeout,
865					sizeof(struct timespec)))
866			return -EFAULT;
867		p = &ts;
868	}
869
870	if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
871		return -EINVAL;
872
873	audit_mq_sendrecv(mqdes, msg_len, msg_prio, p);
874	timeout = prepare_timeout(p);
875
876	ret = -EBADF;
877	filp = fget(mqdes);
878	if (unlikely(!filp))
879		goto out;
880
881	inode = filp->f_path.dentry->d_inode;
882	if (unlikely(filp->f_op != &mqueue_file_operations))
883		goto out_fput;
884	info = MQUEUE_I(inode);
885	audit_inode(NULL, filp->f_path.dentry);
886
887	if (unlikely(!(filp->f_mode & FMODE_WRITE)))
888		goto out_fput;
889
890	if (unlikely(msg_len > info->attr.mq_msgsize)) {
891		ret = -EMSGSIZE;
892		goto out_fput;
893	}
894
895	/* First try to allocate memory, before doing anything with
896	 * existing queues. */
897	msg_ptr = load_msg(u_msg_ptr, msg_len);
898	if (IS_ERR(msg_ptr)) {
899		ret = PTR_ERR(msg_ptr);
900		goto out_fput;
901	}
902	msg_ptr->m_ts = msg_len;
903	msg_ptr->m_type = msg_prio;
904
905	spin_lock(&info->lock);
906
907	if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
908		if (filp->f_flags & O_NONBLOCK) {
909			spin_unlock(&info->lock);
910			ret = -EAGAIN;
911		} else if (unlikely(timeout < 0)) {
912			spin_unlock(&info->lock);
913			ret = timeout;
914		} else {
915			wait.task = current;
916			wait.msg = (void *) msg_ptr;
917			wait.state = STATE_NONE;
918			ret = wq_sleep(info, SEND, timeout, &wait);
919		}
920		if (ret < 0)
921			free_msg(msg_ptr);
922	} else {
923		receiver = wq_get_first_waiter(info, RECV);
924		if (receiver) {
925			pipelined_send(info, msg_ptr, receiver);
926		} else {
927			/* adds message to the queue */
928			msg_insert(msg_ptr, info);
929			__do_notify(info);
930		}
931		inode->i_atime = inode->i_mtime = inode->i_ctime =
932				CURRENT_TIME;
933		spin_unlock(&info->lock);
934		ret = 0;
935	}
936out_fput:
937	fput(filp);
938out:
939	return ret;
940}
941
942SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
943		size_t, msg_len, unsigned int __user *, u_msg_prio,
944		const struct timespec __user *, u_abs_timeout)
945{
946	long timeout;
947	ssize_t ret;
948	struct msg_msg *msg_ptr;
949	struct file *filp;
950	struct inode *inode;
951	struct mqueue_inode_info *info;
952	struct ext_wait_queue wait;
953	struct timespec ts, *p = NULL;
954
955	if (u_abs_timeout) {
956		if (copy_from_user(&ts, u_abs_timeout,
957					sizeof(struct timespec)))
958			return -EFAULT;
959		p = &ts;
960	}
961
962	audit_mq_sendrecv(mqdes, msg_len, 0, p);
963	timeout = prepare_timeout(p);
964
965	ret = -EBADF;
966	filp = fget(mqdes);
967	if (unlikely(!filp))
968		goto out;
969
970	inode = filp->f_path.dentry->d_inode;
971	if (unlikely(filp->f_op != &mqueue_file_operations))
972		goto out_fput;
973	info = MQUEUE_I(inode);
974	audit_inode(NULL, filp->f_path.dentry);
975
976	if (unlikely(!(filp->f_mode & FMODE_READ)))
977		goto out_fput;
978
979	/* checks if buffer is big enough */
980	if (unlikely(msg_len < info->attr.mq_msgsize)) {
981		ret = -EMSGSIZE;
982		goto out_fput;
983	}
984
985	spin_lock(&info->lock);
986	if (info->attr.mq_curmsgs == 0) {
987		if (filp->f_flags & O_NONBLOCK) {
988			spin_unlock(&info->lock);
989			ret = -EAGAIN;
990			msg_ptr = NULL;
991		} else if (unlikely(timeout < 0)) {
992			spin_unlock(&info->lock);
993			ret = timeout;
994			msg_ptr = NULL;
995		} else {
996			wait.task = current;
997			wait.state = STATE_NONE;
998			ret = wq_sleep(info, RECV, timeout, &wait);
999			msg_ptr = wait.msg;
1000		}
1001	} else {
1002		msg_ptr = msg_get(info);
1003
1004		inode->i_atime = inode->i_mtime = inode->i_ctime =
1005				CURRENT_TIME;
1006
1007		/* There is now free space in queue. */
1008		pipelined_receive(info);
1009		spin_unlock(&info->lock);
1010		ret = 0;
1011	}
1012	if (ret == 0) {
1013		ret = msg_ptr->m_ts;
1014
1015		if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1016			store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1017			ret = -EFAULT;
1018		}
1019		free_msg(msg_ptr);
1020	}
1021out_fput:
1022	fput(filp);
1023out:
1024	return ret;
1025}
1026
1027/*
1028 * Notes: the case when user wants us to deregister (with NULL as pointer)
1029 * and he isn't currently owner of notification, will be silently discarded.
1030 * It isn't explicitly defined in the POSIX.
1031 */
1032SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1033		const struct sigevent __user *, u_notification)
1034{
1035	int ret;
1036	struct file *filp;
1037	struct sock *sock;
1038	struct inode *inode;
1039	struct sigevent notification;
1040	struct mqueue_inode_info *info;
1041	struct sk_buff *nc;
1042
1043	if (u_notification) {
1044		if (copy_from_user(&notification, u_notification,
1045					sizeof(struct sigevent)))
1046			return -EFAULT;
1047	}
1048
1049	audit_mq_notify(mqdes, u_notification ? &notification : NULL);
1050
1051	nc = NULL;
1052	sock = NULL;
1053	if (u_notification != NULL) {
1054		if (unlikely(notification.sigev_notify != SIGEV_NONE &&
1055			     notification.sigev_notify != SIGEV_SIGNAL &&
1056			     notification.sigev_notify != SIGEV_THREAD))
1057			return -EINVAL;
1058		if (notification.sigev_notify == SIGEV_SIGNAL &&
1059			!valid_signal(notification.sigev_signo)) {
1060			return -EINVAL;
1061		}
1062		if (notification.sigev_notify == SIGEV_THREAD) {
1063			long timeo;
1064
1065			/* create the notify skb */
1066			nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1067			ret = -ENOMEM;
1068			if (!nc)
1069				goto out;
1070			ret = -EFAULT;
1071			if (copy_from_user(nc->data,
1072					notification.sigev_value.sival_ptr,
1073					NOTIFY_COOKIE_LEN)) {
1074				goto out;
1075			}
1076
1077			/* TODO: add a header? */
1078			skb_put(nc, NOTIFY_COOKIE_LEN);
1079			/* and attach it to the socket */
1080retry:
1081			filp = fget(notification.sigev_signo);
1082			ret = -EBADF;
1083			if (!filp)
1084				goto out;
1085			sock = netlink_getsockbyfilp(filp);
1086			fput(filp);
1087			if (IS_ERR(sock)) {
1088				ret = PTR_ERR(sock);
1089				sock = NULL;
1090				goto out;
1091			}
1092
1093			timeo = MAX_SCHEDULE_TIMEOUT;
1094			ret = netlink_attachskb(sock, nc, &timeo, NULL);
1095			if (ret == 1)
1096		       		goto retry;
1097			if (ret) {
1098				sock = NULL;
1099				nc = NULL;
1100				goto out;
1101			}
1102		}
1103	}
1104
1105	ret = -EBADF;
1106	filp = fget(mqdes);
1107	if (!filp)
1108		goto out;
1109
1110	inode = filp->f_path.dentry->d_inode;
1111	if (unlikely(filp->f_op != &mqueue_file_operations))
1112		goto out_fput;
1113	info = MQUEUE_I(inode);
1114
1115	ret = 0;
1116	spin_lock(&info->lock);
1117	if (u_notification == NULL) {
1118		if (info->notify_owner == task_tgid(current)) {
1119			remove_notification(info);
1120			inode->i_atime = inode->i_ctime = CURRENT_TIME;
1121		}
1122	} else if (info->notify_owner != NULL) {
1123		ret = -EBUSY;
1124	} else {
1125		switch (notification.sigev_notify) {
1126		case SIGEV_NONE:
1127			info->notify.sigev_notify = SIGEV_NONE;
1128			break;
1129		case SIGEV_THREAD:
1130			info->notify_sock = sock;
1131			info->notify_cookie = nc;
1132			sock = NULL;
1133			nc = NULL;
1134			info->notify.sigev_notify = SIGEV_THREAD;
1135			break;
1136		case SIGEV_SIGNAL:
1137			info->notify.sigev_signo = notification.sigev_signo;
1138			info->notify.sigev_value = notification.sigev_value;
1139			info->notify.sigev_notify = SIGEV_SIGNAL;
1140			break;
1141		}
1142
1143		info->notify_owner = get_pid(task_tgid(current));
1144		inode->i_atime = inode->i_ctime = CURRENT_TIME;
1145	}
1146	spin_unlock(&info->lock);
1147out_fput:
1148	fput(filp);
1149out:
1150	if (sock) {
1151		netlink_detachskb(sock, nc);
1152	} else if (nc) {
1153		dev_kfree_skb(nc);
1154	}
1155	return ret;
1156}
1157
1158SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1159		const struct mq_attr __user *, u_mqstat,
1160		struct mq_attr __user *, u_omqstat)
1161{
1162	int ret;
1163	struct mq_attr mqstat, omqstat;
1164	struct file *filp;
1165	struct inode *inode;
1166	struct mqueue_inode_info *info;
1167
1168	if (u_mqstat != NULL) {
1169		if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr)))
1170			return -EFAULT;
1171		if (mqstat.mq_flags & (~O_NONBLOCK))
1172			return -EINVAL;
1173	}
1174
1175	ret = -EBADF;
1176	filp = fget(mqdes);
1177	if (!filp)
1178		goto out;
1179
1180	inode = filp->f_path.dentry->d_inode;
1181	if (unlikely(filp->f_op != &mqueue_file_operations))
1182		goto out_fput;
1183	info = MQUEUE_I(inode);
1184
1185	spin_lock(&info->lock);
1186
1187	omqstat = info->attr;
1188	omqstat.mq_flags = filp->f_flags & O_NONBLOCK;
1189	if (u_mqstat) {
1190		audit_mq_getsetattr(mqdes, &mqstat);
1191		spin_lock(&filp->f_lock);
1192		if (mqstat.mq_flags & O_NONBLOCK)
1193			filp->f_flags |= O_NONBLOCK;
1194		else
1195			filp->f_flags &= ~O_NONBLOCK;
1196		spin_unlock(&filp->f_lock);
1197
1198		inode->i_atime = inode->i_ctime = CURRENT_TIME;
1199	}
1200
1201	spin_unlock(&info->lock);
1202
1203	ret = 0;
1204	if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat,
1205						sizeof(struct mq_attr)))
1206		ret = -EFAULT;
1207
1208out_fput:
1209	fput(filp);
1210out:
1211	return ret;
1212}
1213
1214static const struct inode_operations mqueue_dir_inode_operations = {
1215	.lookup = simple_lookup,
1216	.create = mqueue_create,
1217	.unlink = mqueue_unlink,
1218};
1219
1220static const struct file_operations mqueue_file_operations = {
1221	.flush = mqueue_flush_file,
1222	.poll = mqueue_poll_file,
1223	.read = mqueue_read_file,
1224};
1225
1226static const struct super_operations mqueue_super_ops = {
1227	.alloc_inode = mqueue_alloc_inode,
1228	.destroy_inode = mqueue_destroy_inode,
1229	.statfs = simple_statfs,
1230	.delete_inode = mqueue_delete_inode,
1231	.drop_inode = generic_delete_inode,
1232};
1233
1234static struct file_system_type mqueue_fs_type = {
1235	.name = "mqueue",
1236	.get_sb = mqueue_get_sb,
1237	.kill_sb = kill_litter_super,
1238};
1239
1240int mq_init_ns(struct ipc_namespace *ns)
1241{
1242	ns->mq_queues_count  = 0;
1243	ns->mq_queues_max    = DFLT_QUEUESMAX;
1244	ns->mq_msg_max       = DFLT_MSGMAX;
1245	ns->mq_msgsize_max   = DFLT_MSGSIZEMAX;
1246
1247	ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
1248	if (IS_ERR(ns->mq_mnt)) {
1249		int err = PTR_ERR(ns->mq_mnt);
1250		ns->mq_mnt = NULL;
1251		return err;
1252	}
1253	return 0;
1254}
1255
1256void mq_clear_sbinfo(struct ipc_namespace *ns)
1257{
1258	ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1259}
1260
1261void mq_put_mnt(struct ipc_namespace *ns)
1262{
1263	mntput(ns->mq_mnt);
1264}
1265
1266static int __init init_mqueue_fs(void)
1267{
1268	int error;
1269
1270	mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1271				sizeof(struct mqueue_inode_info), 0,
1272				SLAB_HWCACHE_ALIGN, init_once);
1273	if (mqueue_inode_cachep == NULL)
1274		return -ENOMEM;
1275
1276	/* ignore failues - they are not fatal */
1277	mq_sysctl_table = mq_register_sysctl_table();
1278
1279	error = register_filesystem(&mqueue_fs_type);
1280	if (error)
1281		goto out_sysctl;
1282
1283	spin_lock_init(&mq_lock);
1284
1285	init_ipc_ns.mq_mnt = kern_mount_data(&mqueue_fs_type, &init_ipc_ns);
1286	if (IS_ERR(init_ipc_ns.mq_mnt)) {
1287		error = PTR_ERR(init_ipc_ns.mq_mnt);
1288		goto out_filesystem;
1289	}
1290
1291	return 0;
1292
1293out_filesystem:
1294	unregister_filesystem(&mqueue_fs_type);
1295out_sysctl:
1296	if (mq_sysctl_table)
1297		unregister_sysctl_table(mq_sysctl_table);
1298	kmem_cache_destroy(mqueue_inode_cachep);
1299	return error;
1300}
1301
1302__initcall(init_mqueue_fs);
1303