1/*
2 * POSIX message queues filesystem for Linux.
3 *
4 * Copyright (C) 2003,2004  Krzysztof Benedyczak    (golbi@mat.uni.torun.pl)
5 *                          Michal Wronski          (michal.wronski@gmail.com)
6 *
7 * Spinlocks:               Mohamed Abbas           (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 *			    Manfred Spraul	    (manfred@colorfullife.com)
10 *
11 * Audit:                   George Wilson           (ltcgcw@us.ibm.com)
12 *
13 * This file is released under the GPL.
14 */
15
16#include <linux/capability.h>
17#include <linux/init.h>
18#include <linux/pagemap.h>
19#include <linux/file.h>
20#include <linux/mount.h>
21#include <linux/namei.h>
22#include <linux/sysctl.h>
23#include <linux/poll.h>
24#include <linux/mqueue.h>
25#include <linux/msg.h>
26#include <linux/skbuff.h>
27#include <linux/vmalloc.h>
28#include <linux/netlink.h>
29#include <linux/syscalls.h>
30#include <linux/audit.h>
31#include <linux/signal.h>
32#include <linux/mutex.h>
33#include <linux/nsproxy.h>
34#include <linux/pid.h>
35#include <linux/ipc_namespace.h>
36#include <linux/user_namespace.h>
37#include <linux/slab.h>
38
39#include <net/sock.h>
40#include "util.h"
41
42#define MQUEUE_MAGIC	0x19800202
43#define DIRENT_SIZE	20
44#define FILENT_SIZE	80
45
46#define SEND		0
47#define RECV		1
48
49#define STATE_NONE	0
50#define STATE_PENDING	1
51#define STATE_READY	2
52
53struct posix_msg_tree_node {
54	struct rb_node		rb_node;
55	struct list_head	msg_list;
56	int			priority;
57};
58
59struct ext_wait_queue {		/* queue of sleeping tasks */
60	struct task_struct *task;
61	struct list_head list;
62	struct msg_msg *msg;	/* ptr of loaded message */
63	int state;		/* one of STATE_* values */
64};
65
66struct mqueue_inode_info {
67	spinlock_t lock;
68	struct inode vfs_inode;
69	wait_queue_head_t wait_q;
70
71	struct rb_root msg_tree;
72	struct posix_msg_tree_node *node_cache;
73	struct mq_attr attr;
74
75	struct sigevent notify;
76	struct pid *notify_owner;
77	struct user_namespace *notify_user_ns;
78	struct user_struct *user;	/* user who created, for accounting */
79	struct sock *notify_sock;
80	struct sk_buff *notify_cookie;
81
82	/* for tasks waiting for free space and messages, respectively */
83	struct ext_wait_queue e_wait_q[2];
84
85	unsigned long qsize; /* size of queue in memory (sum of all msgs) */
86};
87
88static const struct inode_operations mqueue_dir_inode_operations;
89static const struct file_operations mqueue_file_operations;
90static const struct super_operations mqueue_super_ops;
91static void remove_notification(struct mqueue_inode_info *info);
92
93static struct kmem_cache *mqueue_inode_cachep;
94
95static struct ctl_table_header *mq_sysctl_table;
96
97static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
98{
99	return container_of(inode, struct mqueue_inode_info, vfs_inode);
100}
101
102/*
103 * This routine should be called with the mq_lock held.
104 */
105static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
106{
107	return get_ipc_ns(inode->i_sb->s_fs_info);
108}
109
110static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
111{
112	struct ipc_namespace *ns;
113
114	spin_lock(&mq_lock);
115	ns = __get_ns_from_inode(inode);
116	spin_unlock(&mq_lock);
117	return ns;
118}
119
120/* Auxiliary functions to manipulate messages' list */
121static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
122{
123	struct rb_node **p, *parent = NULL;
124	struct posix_msg_tree_node *leaf;
125
126	p = &info->msg_tree.rb_node;
127	while (*p) {
128		parent = *p;
129		leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
130
131		if (likely(leaf->priority == msg->m_type))
132			goto insert_msg;
133		else if (msg->m_type < leaf->priority)
134			p = &(*p)->rb_left;
135		else
136			p = &(*p)->rb_right;
137	}
138	if (info->node_cache) {
139		leaf = info->node_cache;
140		info->node_cache = NULL;
141	} else {
142		leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
143		if (!leaf)
144			return -ENOMEM;
145		INIT_LIST_HEAD(&leaf->msg_list);
146		info->qsize += sizeof(*leaf);
147	}
148	leaf->priority = msg->m_type;
149	rb_link_node(&leaf->rb_node, parent, p);
150	rb_insert_color(&leaf->rb_node, &info->msg_tree);
151insert_msg:
152	info->attr.mq_curmsgs++;
153	info->qsize += msg->m_ts;
154	list_add_tail(&msg->m_list, &leaf->msg_list);
155	return 0;
156}
157
158static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
159{
160	struct rb_node **p, *parent = NULL;
161	struct posix_msg_tree_node *leaf;
162	struct msg_msg *msg;
163
164try_again:
165	p = &info->msg_tree.rb_node;
166	while (*p) {
167		parent = *p;
168		/*
169		 * During insert, low priorities go to the left and high to the
170		 * right.  On receive, we want the highest priorities first, so
171		 * walk all the way to the right.
172		 */
173		p = &(*p)->rb_right;
174	}
175	if (!parent) {
176		if (info->attr.mq_curmsgs) {
177			pr_warn_once("Inconsistency in POSIX message queue, "
178				     "no tree element, but supposedly messages "
179				     "should exist!\n");
180			info->attr.mq_curmsgs = 0;
181		}
182		return NULL;
183	}
184	leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
185	if (unlikely(list_empty(&leaf->msg_list))) {
186		pr_warn_once("Inconsistency in POSIX message queue, "
187			     "empty leaf node but we haven't implemented "
188			     "lazy leaf delete!\n");
189		rb_erase(&leaf->rb_node, &info->msg_tree);
190		if (info->node_cache) {
191			info->qsize -= sizeof(*leaf);
192			kfree(leaf);
193		} else {
194			info->node_cache = leaf;
195		}
196		goto try_again;
197	} else {
198		msg = list_first_entry(&leaf->msg_list,
199				       struct msg_msg, m_list);
200		list_del(&msg->m_list);
201		if (list_empty(&leaf->msg_list)) {
202			rb_erase(&leaf->rb_node, &info->msg_tree);
203			if (info->node_cache) {
204				info->qsize -= sizeof(*leaf);
205				kfree(leaf);
206			} else {
207				info->node_cache = leaf;
208			}
209		}
210	}
211	info->attr.mq_curmsgs--;
212	info->qsize -= msg->m_ts;
213	return msg;
214}
215
216static struct inode *mqueue_get_inode(struct super_block *sb,
217		struct ipc_namespace *ipc_ns, umode_t mode,
218		struct mq_attr *attr)
219{
220	struct user_struct *u = current_user();
221	struct inode *inode;
222	int ret = -ENOMEM;
223
224	inode = new_inode(sb);
225	if (!inode)
226		goto err;
227
228	inode->i_ino = get_next_ino();
229	inode->i_mode = mode;
230	inode->i_uid = current_fsuid();
231	inode->i_gid = current_fsgid();
232	inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME;
233
234	if (S_ISREG(mode)) {
235		struct mqueue_inode_info *info;
236		unsigned long mq_bytes, mq_treesize;
237
238		inode->i_fop = &mqueue_file_operations;
239		inode->i_size = FILENT_SIZE;
240		/* mqueue specific info */
241		info = MQUEUE_I(inode);
242		spin_lock_init(&info->lock);
243		init_waitqueue_head(&info->wait_q);
244		INIT_LIST_HEAD(&info->e_wait_q[0].list);
245		INIT_LIST_HEAD(&info->e_wait_q[1].list);
246		info->notify_owner = NULL;
247		info->notify_user_ns = NULL;
248		info->qsize = 0;
249		info->user = NULL;	/* set when all is ok */
250		info->msg_tree = RB_ROOT;
251		info->node_cache = NULL;
252		memset(&info->attr, 0, sizeof(info->attr));
253		info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
254					   ipc_ns->mq_msg_default);
255		info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
256					    ipc_ns->mq_msgsize_default);
257		if (attr) {
258			info->attr.mq_maxmsg = attr->mq_maxmsg;
259			info->attr.mq_msgsize = attr->mq_msgsize;
260		}
261		/*
262		 * We used to allocate a static array of pointers and account
263		 * the size of that array as well as one msg_msg struct per
264		 * possible message into the queue size. That's no longer
265		 * accurate as the queue is now an rbtree and will grow and
266		 * shrink depending on usage patterns.  We can, however, still
267		 * account one msg_msg struct per message, but the nodes are
268		 * allocated depending on priority usage, and most programs
269		 * only use one, or a handful, of priorities.  However, since
270		 * this is pinned memory, we need to assume worst case, so
271		 * that means the min(mq_maxmsg, max_priorities) * struct
272		 * posix_msg_tree_node.
273		 */
274		mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
275			min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
276			sizeof(struct posix_msg_tree_node);
277
278		mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
279					  info->attr.mq_msgsize);
280
281		spin_lock(&mq_lock);
282		if (u->mq_bytes + mq_bytes < u->mq_bytes ||
283		    u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
284			spin_unlock(&mq_lock);
285			/* mqueue_evict_inode() releases info->messages */
286			ret = -EMFILE;
287			goto out_inode;
288		}
289		u->mq_bytes += mq_bytes;
290		spin_unlock(&mq_lock);
291
292		/* all is ok */
293		info->user = get_uid(u);
294	} else if (S_ISDIR(mode)) {
295		inc_nlink(inode);
296		/* Some things misbehave if size == 0 on a directory */
297		inode->i_size = 2 * DIRENT_SIZE;
298		inode->i_op = &mqueue_dir_inode_operations;
299		inode->i_fop = &simple_dir_operations;
300	}
301
302	return inode;
303out_inode:
304	iput(inode);
305err:
306	return ERR_PTR(ret);
307}
308
309static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
310{
311	struct inode *inode;
312	struct ipc_namespace *ns = data;
313
314	sb->s_blocksize = PAGE_CACHE_SIZE;
315	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
316	sb->s_magic = MQUEUE_MAGIC;
317	sb->s_op = &mqueue_super_ops;
318
319	inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
320	if (IS_ERR(inode))
321		return PTR_ERR(inode);
322
323	sb->s_root = d_make_root(inode);
324	if (!sb->s_root)
325		return -ENOMEM;
326	return 0;
327}
328
329static struct dentry *mqueue_mount(struct file_system_type *fs_type,
330			 int flags, const char *dev_name,
331			 void *data)
332{
333	if (!(flags & MS_KERNMOUNT)) {
334		struct ipc_namespace *ns = current->nsproxy->ipc_ns;
335		/* Don't allow mounting unless the caller has CAP_SYS_ADMIN
336		 * over the ipc namespace.
337		 */
338		if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
339			return ERR_PTR(-EPERM);
340
341		data = ns;
342	}
343	return mount_ns(fs_type, flags, data, mqueue_fill_super);
344}
345
346static void init_once(void *foo)
347{
348	struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
349
350	inode_init_once(&p->vfs_inode);
351}
352
353static struct inode *mqueue_alloc_inode(struct super_block *sb)
354{
355	struct mqueue_inode_info *ei;
356
357	ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
358	if (!ei)
359		return NULL;
360	return &ei->vfs_inode;
361}
362
363static void mqueue_i_callback(struct rcu_head *head)
364{
365	struct inode *inode = container_of(head, struct inode, i_rcu);
366	kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
367}
368
369static void mqueue_destroy_inode(struct inode *inode)
370{
371	call_rcu(&inode->i_rcu, mqueue_i_callback);
372}
373
374static void mqueue_evict_inode(struct inode *inode)
375{
376	struct mqueue_inode_info *info;
377	struct user_struct *user;
378	unsigned long mq_bytes, mq_treesize;
379	struct ipc_namespace *ipc_ns;
380	struct msg_msg *msg;
381
382	clear_inode(inode);
383
384	if (S_ISDIR(inode->i_mode))
385		return;
386
387	ipc_ns = get_ns_from_inode(inode);
388	info = MQUEUE_I(inode);
389	spin_lock(&info->lock);
390	while ((msg = msg_get(info)) != NULL)
391		free_msg(msg);
392	kfree(info->node_cache);
393	spin_unlock(&info->lock);
394
395	/* Total amount of bytes accounted for the mqueue */
396	mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
397		min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
398		sizeof(struct posix_msg_tree_node);
399
400	mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
401				  info->attr.mq_msgsize);
402
403	user = info->user;
404	if (user) {
405		spin_lock(&mq_lock);
406		user->mq_bytes -= mq_bytes;
407		/*
408		 * get_ns_from_inode() ensures that the
409		 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
410		 * to which we now hold a reference, or it is NULL.
411		 * We can't put it here under mq_lock, though.
412		 */
413		if (ipc_ns)
414			ipc_ns->mq_queues_count--;
415		spin_unlock(&mq_lock);
416		free_uid(user);
417	}
418	if (ipc_ns)
419		put_ipc_ns(ipc_ns);
420}
421
422static int mqueue_create(struct inode *dir, struct dentry *dentry,
423				umode_t mode, bool excl)
424{
425	struct inode *inode;
426	struct mq_attr *attr = dentry->d_fsdata;
427	int error;
428	struct ipc_namespace *ipc_ns;
429
430	spin_lock(&mq_lock);
431	ipc_ns = __get_ns_from_inode(dir);
432	if (!ipc_ns) {
433		error = -EACCES;
434		goto out_unlock;
435	}
436
437	if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
438	    !capable(CAP_SYS_RESOURCE)) {
439		error = -ENOSPC;
440		goto out_unlock;
441	}
442	ipc_ns->mq_queues_count++;
443	spin_unlock(&mq_lock);
444
445	inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
446	if (IS_ERR(inode)) {
447		error = PTR_ERR(inode);
448		spin_lock(&mq_lock);
449		ipc_ns->mq_queues_count--;
450		goto out_unlock;
451	}
452
453	put_ipc_ns(ipc_ns);
454	dir->i_size += DIRENT_SIZE;
455	dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
456
457	d_instantiate(dentry, inode);
458	dget(dentry);
459	return 0;
460out_unlock:
461	spin_unlock(&mq_lock);
462	if (ipc_ns)
463		put_ipc_ns(ipc_ns);
464	return error;
465}
466
467static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
468{
469	struct inode *inode = dentry->d_inode;
470
471	dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
472	dir->i_size -= DIRENT_SIZE;
473	drop_nlink(inode);
474	dput(dentry);
475	return 0;
476}
477
478/*
479*	This is routine for system read from queue file.
480*	To avoid mess with doing here some sort of mq_receive we allow
481*	to read only queue size & notification info (the only values
482*	that are interesting from user point of view and aren't accessible
483*	through std routines)
484*/
485static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
486				size_t count, loff_t *off)
487{
488	struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
489	char buffer[FILENT_SIZE];
490	ssize_t ret;
491
492	spin_lock(&info->lock);
493	snprintf(buffer, sizeof(buffer),
494			"QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
495			info->qsize,
496			info->notify_owner ? info->notify.sigev_notify : 0,
497			(info->notify_owner &&
498			 info->notify.sigev_notify == SIGEV_SIGNAL) ?
499				info->notify.sigev_signo : 0,
500			pid_vnr(info->notify_owner));
501	spin_unlock(&info->lock);
502	buffer[sizeof(buffer)-1] = '\0';
503
504	ret = simple_read_from_buffer(u_data, count, off, buffer,
505				strlen(buffer));
506	if (ret <= 0)
507		return ret;
508
509	file_inode(filp)->i_atime = file_inode(filp)->i_ctime = CURRENT_TIME;
510	return ret;
511}
512
513static int mqueue_flush_file(struct file *filp, fl_owner_t id)
514{
515	struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
516
517	spin_lock(&info->lock);
518	if (task_tgid(current) == info->notify_owner)
519		remove_notification(info);
520
521	spin_unlock(&info->lock);
522	return 0;
523}
524
525static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
526{
527	struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
528	int retval = 0;
529
530	poll_wait(filp, &info->wait_q, poll_tab);
531
532	spin_lock(&info->lock);
533	if (info->attr.mq_curmsgs)
534		retval = POLLIN | POLLRDNORM;
535
536	if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
537		retval |= POLLOUT | POLLWRNORM;
538	spin_unlock(&info->lock);
539
540	return retval;
541}
542
543/* Adds current to info->e_wait_q[sr] before element with smaller prio */
544static void wq_add(struct mqueue_inode_info *info, int sr,
545			struct ext_wait_queue *ewp)
546{
547	struct ext_wait_queue *walk;
548
549	ewp->task = current;
550
551	list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
552		if (walk->task->static_prio <= current->static_prio) {
553			list_add_tail(&ewp->list, &walk->list);
554			return;
555		}
556	}
557	list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
558}
559
560/*
561 * Puts current task to sleep. Caller must hold queue lock. After return
562 * lock isn't held.
563 * sr: SEND or RECV
564 */
565static int wq_sleep(struct mqueue_inode_info *info, int sr,
566		    ktime_t *timeout, struct ext_wait_queue *ewp)
567{
568	int retval;
569	signed long time;
570
571	wq_add(info, sr, ewp);
572
573	for (;;) {
574		set_current_state(TASK_INTERRUPTIBLE);
575
576		spin_unlock(&info->lock);
577		time = schedule_hrtimeout_range_clock(timeout, 0,
578			HRTIMER_MODE_ABS, CLOCK_REALTIME);
579
580		while (ewp->state == STATE_PENDING)
581			cpu_relax();
582
583		if (ewp->state == STATE_READY) {
584			retval = 0;
585			goto out;
586		}
587		spin_lock(&info->lock);
588		if (ewp->state == STATE_READY) {
589			retval = 0;
590			goto out_unlock;
591		}
592		if (signal_pending(current)) {
593			retval = -ERESTARTSYS;
594			break;
595		}
596		if (time == 0) {
597			retval = -ETIMEDOUT;
598			break;
599		}
600	}
601	list_del(&ewp->list);
602out_unlock:
603	spin_unlock(&info->lock);
604out:
605	return retval;
606}
607
608/*
609 * Returns waiting task that should be serviced first or NULL if none exists
610 */
611static struct ext_wait_queue *wq_get_first_waiter(
612		struct mqueue_inode_info *info, int sr)
613{
614	struct list_head *ptr;
615
616	ptr = info->e_wait_q[sr].list.prev;
617	if (ptr == &info->e_wait_q[sr].list)
618		return NULL;
619	return list_entry(ptr, struct ext_wait_queue, list);
620}
621
622
623static inline void set_cookie(struct sk_buff *skb, char code)
624{
625	((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
626}
627
628/*
629 * The next function is only to split too long sys_mq_timedsend
630 */
631static void __do_notify(struct mqueue_inode_info *info)
632{
633	/* notification
634	 * invoked when there is registered process and there isn't process
635	 * waiting synchronously for message AND state of queue changed from
636	 * empty to not empty. Here we are sure that no one is waiting
637	 * synchronously. */
638	if (info->notify_owner &&
639	    info->attr.mq_curmsgs == 1) {
640		struct siginfo sig_i;
641		switch (info->notify.sigev_notify) {
642		case SIGEV_NONE:
643			break;
644		case SIGEV_SIGNAL:
645			/* sends signal */
646
647			sig_i.si_signo = info->notify.sigev_signo;
648			sig_i.si_errno = 0;
649			sig_i.si_code = SI_MESGQ;
650			sig_i.si_value = info->notify.sigev_value;
651			/* map current pid/uid into info->owner's namespaces */
652			rcu_read_lock();
653			sig_i.si_pid = task_tgid_nr_ns(current,
654						ns_of_pid(info->notify_owner));
655			sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid());
656			rcu_read_unlock();
657
658			kill_pid_info(info->notify.sigev_signo,
659				      &sig_i, info->notify_owner);
660			break;
661		case SIGEV_THREAD:
662			set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
663			netlink_sendskb(info->notify_sock, info->notify_cookie);
664			break;
665		}
666		/* after notification unregisters process */
667		put_pid(info->notify_owner);
668		put_user_ns(info->notify_user_ns);
669		info->notify_owner = NULL;
670		info->notify_user_ns = NULL;
671	}
672	wake_up(&info->wait_q);
673}
674
675static int prepare_timeout(const struct timespec __user *u_abs_timeout,
676			   ktime_t *expires, struct timespec *ts)
677{
678	if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec)))
679		return -EFAULT;
680	if (!timespec_valid(ts))
681		return -EINVAL;
682
683	*expires = timespec_to_ktime(*ts);
684	return 0;
685}
686
687static void remove_notification(struct mqueue_inode_info *info)
688{
689	if (info->notify_owner != NULL &&
690	    info->notify.sigev_notify == SIGEV_THREAD) {
691		set_cookie(info->notify_cookie, NOTIFY_REMOVED);
692		netlink_sendskb(info->notify_sock, info->notify_cookie);
693	}
694	put_pid(info->notify_owner);
695	put_user_ns(info->notify_user_ns);
696	info->notify_owner = NULL;
697	info->notify_user_ns = NULL;
698}
699
700static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
701{
702	int mq_treesize;
703	unsigned long total_size;
704
705	if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
706		return -EINVAL;
707	if (capable(CAP_SYS_RESOURCE)) {
708		if (attr->mq_maxmsg > HARD_MSGMAX ||
709		    attr->mq_msgsize > HARD_MSGSIZEMAX)
710			return -EINVAL;
711	} else {
712		if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
713				attr->mq_msgsize > ipc_ns->mq_msgsize_max)
714			return -EINVAL;
715	}
716	/* check for overflow */
717	if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
718		return -EOVERFLOW;
719	mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) +
720		min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) *
721		sizeof(struct posix_msg_tree_node);
722	total_size = attr->mq_maxmsg * attr->mq_msgsize;
723	if (total_size + mq_treesize < total_size)
724		return -EOVERFLOW;
725	return 0;
726}
727
728/*
729 * Invoked when creating a new queue via sys_mq_open
730 */
731static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir,
732			struct path *path, int oflag, umode_t mode,
733			struct mq_attr *attr)
734{
735	const struct cred *cred = current_cred();
736	int ret;
737
738	if (attr) {
739		ret = mq_attr_ok(ipc_ns, attr);
740		if (ret)
741			return ERR_PTR(ret);
742		/* store for use during create */
743		path->dentry->d_fsdata = attr;
744	} else {
745		struct mq_attr def_attr;
746
747		def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
748					 ipc_ns->mq_msg_default);
749		def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
750					  ipc_ns->mq_msgsize_default);
751		ret = mq_attr_ok(ipc_ns, &def_attr);
752		if (ret)
753			return ERR_PTR(ret);
754	}
755
756	mode &= ~current_umask();
757	ret = vfs_create(dir, path->dentry, mode, true);
758	path->dentry->d_fsdata = NULL;
759	if (ret)
760		return ERR_PTR(ret);
761	return dentry_open(path, oflag, cred);
762}
763
764/* Opens existing queue */
765static struct file *do_open(struct path *path, int oflag)
766{
767	static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
768						  MAY_READ | MAY_WRITE };
769	int acc;
770	if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
771		return ERR_PTR(-EINVAL);
772	acc = oflag2acc[oflag & O_ACCMODE];
773	if (inode_permission(path->dentry->d_inode, acc))
774		return ERR_PTR(-EACCES);
775	return dentry_open(path, oflag, current_cred());
776}
777
778SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
779		struct mq_attr __user *, u_attr)
780{
781	struct path path;
782	struct file *filp;
783	struct filename *name;
784	struct mq_attr attr;
785	int fd, error;
786	struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
787	struct vfsmount *mnt = ipc_ns->mq_mnt;
788	struct dentry *root = mnt->mnt_root;
789	int ro;
790
791	if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
792		return -EFAULT;
793
794	audit_mq_open(oflag, mode, u_attr ? &attr : NULL);
795
796	if (IS_ERR(name = getname(u_name)))
797		return PTR_ERR(name);
798
799	fd = get_unused_fd_flags(O_CLOEXEC);
800	if (fd < 0)
801		goto out_putname;
802
803	ro = mnt_want_write(mnt);	/* we'll drop it in any case */
804	error = 0;
805	mutex_lock(&root->d_inode->i_mutex);
806	path.dentry = lookup_one_len(name->name, root, strlen(name->name));
807	if (IS_ERR(path.dentry)) {
808		error = PTR_ERR(path.dentry);
809		goto out_putfd;
810	}
811	path.mnt = mntget(mnt);
812
813	if (oflag & O_CREAT) {
814		if (path.dentry->d_inode) {	/* entry already exists */
815			audit_inode(name, path.dentry, 0);
816			if (oflag & O_EXCL) {
817				error = -EEXIST;
818				goto out;
819			}
820			filp = do_open(&path, oflag);
821		} else {
822			if (ro) {
823				error = ro;
824				goto out;
825			}
826			audit_inode_parent_hidden(name, root);
827			filp = do_create(ipc_ns, root->d_inode,
828						&path, oflag, mode,
829						u_attr ? &attr : NULL);
830		}
831	} else {
832		if (!path.dentry->d_inode) {
833			error = -ENOENT;
834			goto out;
835		}
836		audit_inode(name, path.dentry, 0);
837		filp = do_open(&path, oflag);
838	}
839
840	if (!IS_ERR(filp))
841		fd_install(fd, filp);
842	else
843		error = PTR_ERR(filp);
844out:
845	path_put(&path);
846out_putfd:
847	if (error) {
848		put_unused_fd(fd);
849		fd = error;
850	}
851	mutex_unlock(&root->d_inode->i_mutex);
852	if (!ro)
853		mnt_drop_write(mnt);
854out_putname:
855	putname(name);
856	return fd;
857}
858
859SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
860{
861	int err;
862	struct filename *name;
863	struct dentry *dentry;
864	struct inode *inode = NULL;
865	struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
866	struct vfsmount *mnt = ipc_ns->mq_mnt;
867
868	name = getname(u_name);
869	if (IS_ERR(name))
870		return PTR_ERR(name);
871
872	audit_inode_parent_hidden(name, mnt->mnt_root);
873	err = mnt_want_write(mnt);
874	if (err)
875		goto out_name;
876	mutex_lock_nested(&mnt->mnt_root->d_inode->i_mutex, I_MUTEX_PARENT);
877	dentry = lookup_one_len(name->name, mnt->mnt_root,
878				strlen(name->name));
879	if (IS_ERR(dentry)) {
880		err = PTR_ERR(dentry);
881		goto out_unlock;
882	}
883
884	inode = dentry->d_inode;
885	if (!inode) {
886		err = -ENOENT;
887	} else {
888		ihold(inode);
889		err = vfs_unlink(dentry->d_parent->d_inode, dentry, NULL);
890	}
891	dput(dentry);
892
893out_unlock:
894	mutex_unlock(&mnt->mnt_root->d_inode->i_mutex);
895	if (inode)
896		iput(inode);
897	mnt_drop_write(mnt);
898out_name:
899	putname(name);
900
901	return err;
902}
903
904/* Pipelined send and receive functions.
905 *
906 * If a receiver finds no waiting message, then it registers itself in the
907 * list of waiting receivers. A sender checks that list before adding the new
908 * message into the message array. If there is a waiting receiver, then it
909 * bypasses the message array and directly hands the message over to the
910 * receiver.
911 * The receiver accepts the message and returns without grabbing the queue
912 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
913 * are necessary. The same algorithm is used for sysv semaphores, see
914 * ipc/sem.c for more details.
915 *
916 * The same algorithm is used for senders.
917 */
918
919/* pipelined_send() - send a message directly to the task waiting in
920 * sys_mq_timedreceive() (without inserting message into a queue).
921 */
922static inline void pipelined_send(struct mqueue_inode_info *info,
923				  struct msg_msg *message,
924				  struct ext_wait_queue *receiver)
925{
926	receiver->msg = message;
927	list_del(&receiver->list);
928	receiver->state = STATE_PENDING;
929	wake_up_process(receiver->task);
930	smp_wmb();
931	receiver->state = STATE_READY;
932}
933
934/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
935 * gets its message and put to the queue (we have one free place for sure). */
936static inline void pipelined_receive(struct mqueue_inode_info *info)
937{
938	struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
939
940	if (!sender) {
941		/* for poll */
942		wake_up_interruptible(&info->wait_q);
943		return;
944	}
945	if (msg_insert(sender->msg, info))
946		return;
947	list_del(&sender->list);
948	sender->state = STATE_PENDING;
949	wake_up_process(sender->task);
950	smp_wmb();
951	sender->state = STATE_READY;
952}
953
954SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
955		size_t, msg_len, unsigned int, msg_prio,
956		const struct timespec __user *, u_abs_timeout)
957{
958	struct fd f;
959	struct inode *inode;
960	struct ext_wait_queue wait;
961	struct ext_wait_queue *receiver;
962	struct msg_msg *msg_ptr;
963	struct mqueue_inode_info *info;
964	ktime_t expires, *timeout = NULL;
965	struct timespec ts;
966	struct posix_msg_tree_node *new_leaf = NULL;
967	int ret = 0;
968
969	if (u_abs_timeout) {
970		int res = prepare_timeout(u_abs_timeout, &expires, &ts);
971		if (res)
972			return res;
973		timeout = &expires;
974	}
975
976	if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
977		return -EINVAL;
978
979	audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL);
980
981	f = fdget(mqdes);
982	if (unlikely(!f.file)) {
983		ret = -EBADF;
984		goto out;
985	}
986
987	inode = file_inode(f.file);
988	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
989		ret = -EBADF;
990		goto out_fput;
991	}
992	info = MQUEUE_I(inode);
993	audit_inode(NULL, f.file->f_path.dentry, 0);
994
995	if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
996		ret = -EBADF;
997		goto out_fput;
998	}
999
1000	if (unlikely(msg_len > info->attr.mq_msgsize)) {
1001		ret = -EMSGSIZE;
1002		goto out_fput;
1003	}
1004
1005	/* First try to allocate memory, before doing anything with
1006	 * existing queues. */
1007	msg_ptr = load_msg(u_msg_ptr, msg_len);
1008	if (IS_ERR(msg_ptr)) {
1009		ret = PTR_ERR(msg_ptr);
1010		goto out_fput;
1011	}
1012	msg_ptr->m_ts = msg_len;
1013	msg_ptr->m_type = msg_prio;
1014
1015	/*
1016	 * msg_insert really wants us to have a valid, spare node struct so
1017	 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1018	 * fall back to that if necessary.
1019	 */
1020	if (!info->node_cache)
1021		new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1022
1023	spin_lock(&info->lock);
1024
1025	if (!info->node_cache && new_leaf) {
1026		/* Save our speculative allocation into the cache */
1027		INIT_LIST_HEAD(&new_leaf->msg_list);
1028		info->node_cache = new_leaf;
1029		info->qsize += sizeof(*new_leaf);
1030		new_leaf = NULL;
1031	} else {
1032		kfree(new_leaf);
1033	}
1034
1035	if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
1036		if (f.file->f_flags & O_NONBLOCK) {
1037			ret = -EAGAIN;
1038		} else {
1039			wait.task = current;
1040			wait.msg = (void *) msg_ptr;
1041			wait.state = STATE_NONE;
1042			ret = wq_sleep(info, SEND, timeout, &wait);
1043			/*
1044			 * wq_sleep must be called with info->lock held, and
1045			 * returns with the lock released
1046			 */
1047			goto out_free;
1048		}
1049	} else {
1050		receiver = wq_get_first_waiter(info, RECV);
1051		if (receiver) {
1052			pipelined_send(info, msg_ptr, receiver);
1053		} else {
1054			/* adds message to the queue */
1055			ret = msg_insert(msg_ptr, info);
1056			if (ret)
1057				goto out_unlock;
1058			__do_notify(info);
1059		}
1060		inode->i_atime = inode->i_mtime = inode->i_ctime =
1061				CURRENT_TIME;
1062	}
1063out_unlock:
1064	spin_unlock(&info->lock);
1065out_free:
1066	if (ret)
1067		free_msg(msg_ptr);
1068out_fput:
1069	fdput(f);
1070out:
1071	return ret;
1072}
1073
1074SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1075		size_t, msg_len, unsigned int __user *, u_msg_prio,
1076		const struct timespec __user *, u_abs_timeout)
1077{
1078	ssize_t ret;
1079	struct msg_msg *msg_ptr;
1080	struct fd f;
1081	struct inode *inode;
1082	struct mqueue_inode_info *info;
1083	struct ext_wait_queue wait;
1084	ktime_t expires, *timeout = NULL;
1085	struct timespec ts;
1086	struct posix_msg_tree_node *new_leaf = NULL;
1087
1088	if (u_abs_timeout) {
1089		int res = prepare_timeout(u_abs_timeout, &expires, &ts);
1090		if (res)
1091			return res;
1092		timeout = &expires;
1093	}
1094
1095	audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL);
1096
1097	f = fdget(mqdes);
1098	if (unlikely(!f.file)) {
1099		ret = -EBADF;
1100		goto out;
1101	}
1102
1103	inode = file_inode(f.file);
1104	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1105		ret = -EBADF;
1106		goto out_fput;
1107	}
1108	info = MQUEUE_I(inode);
1109	audit_inode(NULL, f.file->f_path.dentry, 0);
1110
1111	if (unlikely(!(f.file->f_mode & FMODE_READ))) {
1112		ret = -EBADF;
1113		goto out_fput;
1114	}
1115
1116	/* checks if buffer is big enough */
1117	if (unlikely(msg_len < info->attr.mq_msgsize)) {
1118		ret = -EMSGSIZE;
1119		goto out_fput;
1120	}
1121
1122	/*
1123	 * msg_insert really wants us to have a valid, spare node struct so
1124	 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1125	 * fall back to that if necessary.
1126	 */
1127	if (!info->node_cache)
1128		new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1129
1130	spin_lock(&info->lock);
1131
1132	if (!info->node_cache && new_leaf) {
1133		/* Save our speculative allocation into the cache */
1134		INIT_LIST_HEAD(&new_leaf->msg_list);
1135		info->node_cache = new_leaf;
1136		info->qsize += sizeof(*new_leaf);
1137	} else {
1138		kfree(new_leaf);
1139	}
1140
1141	if (info->attr.mq_curmsgs == 0) {
1142		if (f.file->f_flags & O_NONBLOCK) {
1143			spin_unlock(&info->lock);
1144			ret = -EAGAIN;
1145		} else {
1146			wait.task = current;
1147			wait.state = STATE_NONE;
1148			ret = wq_sleep(info, RECV, timeout, &wait);
1149			msg_ptr = wait.msg;
1150		}
1151	} else {
1152		msg_ptr = msg_get(info);
1153
1154		inode->i_atime = inode->i_mtime = inode->i_ctime =
1155				CURRENT_TIME;
1156
1157		/* There is now free space in queue. */
1158		pipelined_receive(info);
1159		spin_unlock(&info->lock);
1160		ret = 0;
1161	}
1162	if (ret == 0) {
1163		ret = msg_ptr->m_ts;
1164
1165		if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1166			store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1167			ret = -EFAULT;
1168		}
1169		free_msg(msg_ptr);
1170	}
1171out_fput:
1172	fdput(f);
1173out:
1174	return ret;
1175}
1176
1177/*
1178 * Notes: the case when user wants us to deregister (with NULL as pointer)
1179 * and he isn't currently owner of notification, will be silently discarded.
1180 * It isn't explicitly defined in the POSIX.
1181 */
1182SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1183		const struct sigevent __user *, u_notification)
1184{
1185	int ret;
1186	struct fd f;
1187	struct sock *sock;
1188	struct inode *inode;
1189	struct sigevent notification;
1190	struct mqueue_inode_info *info;
1191	struct sk_buff *nc;
1192
1193	if (u_notification) {
1194		if (copy_from_user(&notification, u_notification,
1195					sizeof(struct sigevent)))
1196			return -EFAULT;
1197	}
1198
1199	audit_mq_notify(mqdes, u_notification ? &notification : NULL);
1200
1201	nc = NULL;
1202	sock = NULL;
1203	if (u_notification != NULL) {
1204		if (unlikely(notification.sigev_notify != SIGEV_NONE &&
1205			     notification.sigev_notify != SIGEV_SIGNAL &&
1206			     notification.sigev_notify != SIGEV_THREAD))
1207			return -EINVAL;
1208		if (notification.sigev_notify == SIGEV_SIGNAL &&
1209			!valid_signal(notification.sigev_signo)) {
1210			return -EINVAL;
1211		}
1212		if (notification.sigev_notify == SIGEV_THREAD) {
1213			long timeo;
1214
1215			/* create the notify skb */
1216			nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1217			if (!nc) {
1218				ret = -ENOMEM;
1219				goto out;
1220			}
1221			if (copy_from_user(nc->data,
1222					notification.sigev_value.sival_ptr,
1223					NOTIFY_COOKIE_LEN)) {
1224				ret = -EFAULT;
1225				goto out;
1226			}
1227
1228			/* TODO: add a header? */
1229			skb_put(nc, NOTIFY_COOKIE_LEN);
1230			/* and attach it to the socket */
1231retry:
1232			f = fdget(notification.sigev_signo);
1233			if (!f.file) {
1234				ret = -EBADF;
1235				goto out;
1236			}
1237			sock = netlink_getsockbyfilp(f.file);
1238			fdput(f);
1239			if (IS_ERR(sock)) {
1240				ret = PTR_ERR(sock);
1241				sock = NULL;
1242				goto out;
1243			}
1244
1245			timeo = MAX_SCHEDULE_TIMEOUT;
1246			ret = netlink_attachskb(sock, nc, &timeo, NULL);
1247			if (ret == 1)
1248				goto retry;
1249			if (ret) {
1250				sock = NULL;
1251				nc = NULL;
1252				goto out;
1253			}
1254		}
1255	}
1256
1257	f = fdget(mqdes);
1258	if (!f.file) {
1259		ret = -EBADF;
1260		goto out;
1261	}
1262
1263	inode = file_inode(f.file);
1264	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1265		ret = -EBADF;
1266		goto out_fput;
1267	}
1268	info = MQUEUE_I(inode);
1269
1270	ret = 0;
1271	spin_lock(&info->lock);
1272	if (u_notification == NULL) {
1273		if (info->notify_owner == task_tgid(current)) {
1274			remove_notification(info);
1275			inode->i_atime = inode->i_ctime = CURRENT_TIME;
1276		}
1277	} else if (info->notify_owner != NULL) {
1278		ret = -EBUSY;
1279	} else {
1280		switch (notification.sigev_notify) {
1281		case SIGEV_NONE:
1282			info->notify.sigev_notify = SIGEV_NONE;
1283			break;
1284		case SIGEV_THREAD:
1285			info->notify_sock = sock;
1286			info->notify_cookie = nc;
1287			sock = NULL;
1288			nc = NULL;
1289			info->notify.sigev_notify = SIGEV_THREAD;
1290			break;
1291		case SIGEV_SIGNAL:
1292			info->notify.sigev_signo = notification.sigev_signo;
1293			info->notify.sigev_value = notification.sigev_value;
1294			info->notify.sigev_notify = SIGEV_SIGNAL;
1295			break;
1296		}
1297
1298		info->notify_owner = get_pid(task_tgid(current));
1299		info->notify_user_ns = get_user_ns(current_user_ns());
1300		inode->i_atime = inode->i_ctime = CURRENT_TIME;
1301	}
1302	spin_unlock(&info->lock);
1303out_fput:
1304	fdput(f);
1305out:
1306	if (sock)
1307		netlink_detachskb(sock, nc);
1308	else if (nc)
1309		dev_kfree_skb(nc);
1310
1311	return ret;
1312}
1313
1314SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1315		const struct mq_attr __user *, u_mqstat,
1316		struct mq_attr __user *, u_omqstat)
1317{
1318	int ret;
1319	struct mq_attr mqstat, omqstat;
1320	struct fd f;
1321	struct inode *inode;
1322	struct mqueue_inode_info *info;
1323
1324	if (u_mqstat != NULL) {
1325		if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr)))
1326			return -EFAULT;
1327		if (mqstat.mq_flags & (~O_NONBLOCK))
1328			return -EINVAL;
1329	}
1330
1331	f = fdget(mqdes);
1332	if (!f.file) {
1333		ret = -EBADF;
1334		goto out;
1335	}
1336
1337	inode = file_inode(f.file);
1338	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1339		ret = -EBADF;
1340		goto out_fput;
1341	}
1342	info = MQUEUE_I(inode);
1343
1344	spin_lock(&info->lock);
1345
1346	omqstat = info->attr;
1347	omqstat.mq_flags = f.file->f_flags & O_NONBLOCK;
1348	if (u_mqstat) {
1349		audit_mq_getsetattr(mqdes, &mqstat);
1350		spin_lock(&f.file->f_lock);
1351		if (mqstat.mq_flags & O_NONBLOCK)
1352			f.file->f_flags |= O_NONBLOCK;
1353		else
1354			f.file->f_flags &= ~O_NONBLOCK;
1355		spin_unlock(&f.file->f_lock);
1356
1357		inode->i_atime = inode->i_ctime = CURRENT_TIME;
1358	}
1359
1360	spin_unlock(&info->lock);
1361
1362	ret = 0;
1363	if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat,
1364						sizeof(struct mq_attr)))
1365		ret = -EFAULT;
1366
1367out_fput:
1368	fdput(f);
1369out:
1370	return ret;
1371}
1372
1373static const struct inode_operations mqueue_dir_inode_operations = {
1374	.lookup = simple_lookup,
1375	.create = mqueue_create,
1376	.unlink = mqueue_unlink,
1377};
1378
1379static const struct file_operations mqueue_file_operations = {
1380	.flush = mqueue_flush_file,
1381	.poll = mqueue_poll_file,
1382	.read = mqueue_read_file,
1383	.llseek = default_llseek,
1384};
1385
1386static const struct super_operations mqueue_super_ops = {
1387	.alloc_inode = mqueue_alloc_inode,
1388	.destroy_inode = mqueue_destroy_inode,
1389	.evict_inode = mqueue_evict_inode,
1390	.statfs = simple_statfs,
1391};
1392
1393static struct file_system_type mqueue_fs_type = {
1394	.name = "mqueue",
1395	.mount = mqueue_mount,
1396	.kill_sb = kill_litter_super,
1397	.fs_flags = FS_USERNS_MOUNT,
1398};
1399
1400int mq_init_ns(struct ipc_namespace *ns)
1401{
1402	ns->mq_queues_count  = 0;
1403	ns->mq_queues_max    = DFLT_QUEUESMAX;
1404	ns->mq_msg_max       = DFLT_MSGMAX;
1405	ns->mq_msgsize_max   = DFLT_MSGSIZEMAX;
1406	ns->mq_msg_default   = DFLT_MSG;
1407	ns->mq_msgsize_default  = DFLT_MSGSIZE;
1408
1409	ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
1410	if (IS_ERR(ns->mq_mnt)) {
1411		int err = PTR_ERR(ns->mq_mnt);
1412		ns->mq_mnt = NULL;
1413		return err;
1414	}
1415	return 0;
1416}
1417
1418void mq_clear_sbinfo(struct ipc_namespace *ns)
1419{
1420	ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1421}
1422
1423void mq_put_mnt(struct ipc_namespace *ns)
1424{
1425	kern_unmount(ns->mq_mnt);
1426}
1427
1428static int __init init_mqueue_fs(void)
1429{
1430	int error;
1431
1432	mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1433				sizeof(struct mqueue_inode_info), 0,
1434				SLAB_HWCACHE_ALIGN, init_once);
1435	if (mqueue_inode_cachep == NULL)
1436		return -ENOMEM;
1437
1438	/* ignore failures - they are not fatal */
1439	mq_sysctl_table = mq_register_sysctl_table();
1440
1441	error = register_filesystem(&mqueue_fs_type);
1442	if (error)
1443		goto out_sysctl;
1444
1445	spin_lock_init(&mq_lock);
1446
1447	error = mq_init_ns(&init_ipc_ns);
1448	if (error)
1449		goto out_filesystem;
1450
1451	return 0;
1452
1453out_filesystem:
1454	unregister_filesystem(&mqueue_fs_type);
1455out_sysctl:
1456	if (mq_sysctl_table)
1457		unregister_sysctl_table(mq_sysctl_table);
1458	kmem_cache_destroy(mqueue_inode_cachep);
1459	return error;
1460}
1461
1462device_initcall(init_mqueue_fs);
1463