mqueue.c revision f419a2e3b64def707e1384ee38abb77f99af5f6d
1/*
2 * POSIX message queues filesystem for Linux.
3 *
4 * Copyright (C) 2003,2004  Krzysztof Benedyczak    (golbi@mat.uni.torun.pl)
5 *                          Michal Wronski          (michal.wronski@gmail.com)
6 *
7 * Spinlocks:               Mohamed Abbas           (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 * 			    Manfred Spraul	    (manfred@colorfullife.com)
10 *
11 * Audit:                   George Wilson           (ltcgcw@us.ibm.com)
12 *
13 * This file is released under the GPL.
14 */
15
16#include <linux/capability.h>
17#include <linux/init.h>
18#include <linux/pagemap.h>
19#include <linux/file.h>
20#include <linux/mount.h>
21#include <linux/namei.h>
22#include <linux/sysctl.h>
23#include <linux/poll.h>
24#include <linux/mqueue.h>
25#include <linux/msg.h>
26#include <linux/skbuff.h>
27#include <linux/netlink.h>
28#include <linux/syscalls.h>
29#include <linux/audit.h>
30#include <linux/signal.h>
31#include <linux/mutex.h>
32#include <linux/nsproxy.h>
33#include <linux/pid.h>
34
35#include <net/sock.h>
36#include "util.h"
37
38#define MQUEUE_MAGIC	0x19800202
39#define DIRENT_SIZE	20
40#define FILENT_SIZE	80
41
42#define SEND		0
43#define RECV		1
44
45#define STATE_NONE	0
46#define STATE_PENDING	1
47#define STATE_READY	2
48
49/* default values */
50#define DFLT_QUEUESMAX	256	/* max number of message queues */
51#define DFLT_MSGMAX 	10	/* max number of messages in each queue */
52#define HARD_MSGMAX 	(131072/sizeof(void*))
53#define DFLT_MSGSIZEMAX 8192	/* max message size */
54
55
56struct ext_wait_queue {		/* queue of sleeping tasks */
57	struct task_struct *task;
58	struct list_head list;
59	struct msg_msg *msg;	/* ptr of loaded message */
60	int state;		/* one of STATE_* values */
61};
62
63struct mqueue_inode_info {
64	spinlock_t lock;
65	struct inode vfs_inode;
66	wait_queue_head_t wait_q;
67
68	struct msg_msg **messages;
69	struct mq_attr attr;
70
71	struct sigevent notify;
72	struct pid* notify_owner;
73	struct user_struct *user;	/* user who created, for accounting */
74	struct sock *notify_sock;
75	struct sk_buff *notify_cookie;
76
77	/* for tasks waiting for free space and messages, respectively */
78	struct ext_wait_queue e_wait_q[2];
79
80	unsigned long qsize; /* size of queue in memory (sum of all msgs) */
81};
82
83static const struct inode_operations mqueue_dir_inode_operations;
84static const struct file_operations mqueue_file_operations;
85static struct super_operations mqueue_super_ops;
86static void remove_notification(struct mqueue_inode_info *info);
87
88static spinlock_t mq_lock;
89static struct kmem_cache *mqueue_inode_cachep;
90static struct vfsmount *mqueue_mnt;
91
92static unsigned int queues_count;
93static unsigned int queues_max 	= DFLT_QUEUESMAX;
94static unsigned int msg_max 	= DFLT_MSGMAX;
95static unsigned int msgsize_max = DFLT_MSGSIZEMAX;
96
97static struct ctl_table_header * mq_sysctl_table;
98
99static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
100{
101	return container_of(inode, struct mqueue_inode_info, vfs_inode);
102}
103
104static struct inode *mqueue_get_inode(struct super_block *sb, int mode,
105							struct mq_attr *attr)
106{
107	struct inode *inode;
108
109	inode = new_inode(sb);
110	if (inode) {
111		inode->i_mode = mode;
112		inode->i_uid = current->fsuid;
113		inode->i_gid = current->fsgid;
114		inode->i_blocks = 0;
115		inode->i_mtime = inode->i_ctime = inode->i_atime =
116				CURRENT_TIME;
117
118		if (S_ISREG(mode)) {
119			struct mqueue_inode_info *info;
120			struct task_struct *p = current;
121			struct user_struct *u = p->user;
122			unsigned long mq_bytes, mq_msg_tblsz;
123
124			inode->i_fop = &mqueue_file_operations;
125			inode->i_size = FILENT_SIZE;
126			/* mqueue specific info */
127			info = MQUEUE_I(inode);
128			spin_lock_init(&info->lock);
129			init_waitqueue_head(&info->wait_q);
130			INIT_LIST_HEAD(&info->e_wait_q[0].list);
131			INIT_LIST_HEAD(&info->e_wait_q[1].list);
132			info->messages = NULL;
133			info->notify_owner = NULL;
134			info->qsize = 0;
135			info->user = NULL;	/* set when all is ok */
136			memset(&info->attr, 0, sizeof(info->attr));
137			info->attr.mq_maxmsg = DFLT_MSGMAX;
138			info->attr.mq_msgsize = DFLT_MSGSIZEMAX;
139			if (attr) {
140				info->attr.mq_maxmsg = attr->mq_maxmsg;
141				info->attr.mq_msgsize = attr->mq_msgsize;
142			}
143			mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *);
144			mq_bytes = (mq_msg_tblsz +
145				(info->attr.mq_maxmsg * info->attr.mq_msgsize));
146
147			spin_lock(&mq_lock);
148			if (u->mq_bytes + mq_bytes < u->mq_bytes ||
149		 	    u->mq_bytes + mq_bytes >
150			    p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) {
151				spin_unlock(&mq_lock);
152				goto out_inode;
153			}
154			u->mq_bytes += mq_bytes;
155			spin_unlock(&mq_lock);
156
157			info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
158			if (!info->messages) {
159				spin_lock(&mq_lock);
160				u->mq_bytes -= mq_bytes;
161				spin_unlock(&mq_lock);
162				goto out_inode;
163			}
164			/* all is ok */
165			info->user = get_uid(u);
166		} else if (S_ISDIR(mode)) {
167			inc_nlink(inode);
168			/* Some things misbehave if size == 0 on a directory */
169			inode->i_size = 2 * DIRENT_SIZE;
170			inode->i_op = &mqueue_dir_inode_operations;
171			inode->i_fop = &simple_dir_operations;
172		}
173	}
174	return inode;
175out_inode:
176	make_bad_inode(inode);
177	iput(inode);
178	return NULL;
179}
180
181static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
182{
183	struct inode *inode;
184
185	sb->s_blocksize = PAGE_CACHE_SIZE;
186	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
187	sb->s_magic = MQUEUE_MAGIC;
188	sb->s_op = &mqueue_super_ops;
189
190	inode = mqueue_get_inode(sb, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
191	if (!inode)
192		return -ENOMEM;
193
194	sb->s_root = d_alloc_root(inode);
195	if (!sb->s_root) {
196		iput(inode);
197		return -ENOMEM;
198	}
199
200	return 0;
201}
202
203static int mqueue_get_sb(struct file_system_type *fs_type,
204			 int flags, const char *dev_name,
205			 void *data, struct vfsmount *mnt)
206{
207	return get_sb_single(fs_type, flags, data, mqueue_fill_super, mnt);
208}
209
210static void init_once(void *foo)
211{
212	struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
213
214	inode_init_once(&p->vfs_inode);
215}
216
217static struct inode *mqueue_alloc_inode(struct super_block *sb)
218{
219	struct mqueue_inode_info *ei;
220
221	ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
222	if (!ei)
223		return NULL;
224	return &ei->vfs_inode;
225}
226
227static void mqueue_destroy_inode(struct inode *inode)
228{
229	kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
230}
231
232static void mqueue_delete_inode(struct inode *inode)
233{
234	struct mqueue_inode_info *info;
235	struct user_struct *user;
236	unsigned long mq_bytes;
237	int i;
238
239	if (S_ISDIR(inode->i_mode)) {
240		clear_inode(inode);
241		return;
242	}
243	info = MQUEUE_I(inode);
244	spin_lock(&info->lock);
245	for (i = 0; i < info->attr.mq_curmsgs; i++)
246		free_msg(info->messages[i]);
247	kfree(info->messages);
248	spin_unlock(&info->lock);
249
250	clear_inode(inode);
251
252	mq_bytes = (info->attr.mq_maxmsg * sizeof(struct msg_msg *) +
253		   (info->attr.mq_maxmsg * info->attr.mq_msgsize));
254	user = info->user;
255	if (user) {
256		spin_lock(&mq_lock);
257		user->mq_bytes -= mq_bytes;
258		queues_count--;
259		spin_unlock(&mq_lock);
260		free_uid(user);
261	}
262}
263
264static int mqueue_create(struct inode *dir, struct dentry *dentry,
265				int mode, struct nameidata *nd)
266{
267	struct inode *inode;
268	struct mq_attr *attr = dentry->d_fsdata;
269	int error;
270
271	spin_lock(&mq_lock);
272	if (queues_count >= queues_max && !capable(CAP_SYS_RESOURCE)) {
273		error = -ENOSPC;
274		goto out_lock;
275	}
276	queues_count++;
277	spin_unlock(&mq_lock);
278
279	inode = mqueue_get_inode(dir->i_sb, mode, attr);
280	if (!inode) {
281		error = -ENOMEM;
282		spin_lock(&mq_lock);
283		queues_count--;
284		goto out_lock;
285	}
286
287	dir->i_size += DIRENT_SIZE;
288	dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
289
290	d_instantiate(dentry, inode);
291	dget(dentry);
292	return 0;
293out_lock:
294	spin_unlock(&mq_lock);
295	return error;
296}
297
298static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
299{
300  	struct inode *inode = dentry->d_inode;
301
302	dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
303	dir->i_size -= DIRENT_SIZE;
304  	drop_nlink(inode);
305  	dput(dentry);
306  	return 0;
307}
308
309/*
310*	This is routine for system read from queue file.
311*	To avoid mess with doing here some sort of mq_receive we allow
312*	to read only queue size & notification info (the only values
313*	that are interesting from user point of view and aren't accessible
314*	through std routines)
315*/
316static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
317				size_t count, loff_t *off)
318{
319	struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
320	char buffer[FILENT_SIZE];
321	ssize_t ret;
322
323	spin_lock(&info->lock);
324	snprintf(buffer, sizeof(buffer),
325			"QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
326			info->qsize,
327			info->notify_owner ? info->notify.sigev_notify : 0,
328			(info->notify_owner &&
329			 info->notify.sigev_notify == SIGEV_SIGNAL) ?
330				info->notify.sigev_signo : 0,
331			pid_vnr(info->notify_owner));
332	spin_unlock(&info->lock);
333	buffer[sizeof(buffer)-1] = '\0';
334
335	ret = simple_read_from_buffer(u_data, count, off, buffer,
336				strlen(buffer));
337	if (ret <= 0)
338		return ret;
339
340	filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME;
341	return ret;
342}
343
344static int mqueue_flush_file(struct file *filp, fl_owner_t id)
345{
346	struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
347
348	spin_lock(&info->lock);
349	if (task_tgid(current) == info->notify_owner)
350		remove_notification(info);
351
352	spin_unlock(&info->lock);
353	return 0;
354}
355
356static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
357{
358	struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
359	int retval = 0;
360
361	poll_wait(filp, &info->wait_q, poll_tab);
362
363	spin_lock(&info->lock);
364	if (info->attr.mq_curmsgs)
365		retval = POLLIN | POLLRDNORM;
366
367	if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
368		retval |= POLLOUT | POLLWRNORM;
369	spin_unlock(&info->lock);
370
371	return retval;
372}
373
374/* Adds current to info->e_wait_q[sr] before element with smaller prio */
375static void wq_add(struct mqueue_inode_info *info, int sr,
376			struct ext_wait_queue *ewp)
377{
378	struct ext_wait_queue *walk;
379
380	ewp->task = current;
381
382	list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
383		if (walk->task->static_prio <= current->static_prio) {
384			list_add_tail(&ewp->list, &walk->list);
385			return;
386		}
387	}
388	list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
389}
390
391/*
392 * Puts current task to sleep. Caller must hold queue lock. After return
393 * lock isn't held.
394 * sr: SEND or RECV
395 */
396static int wq_sleep(struct mqueue_inode_info *info, int sr,
397			long timeout, struct ext_wait_queue *ewp)
398{
399	int retval;
400	signed long time;
401
402	wq_add(info, sr, ewp);
403
404	for (;;) {
405		set_current_state(TASK_INTERRUPTIBLE);
406
407		spin_unlock(&info->lock);
408		time = schedule_timeout(timeout);
409
410		while (ewp->state == STATE_PENDING)
411			cpu_relax();
412
413		if (ewp->state == STATE_READY) {
414			retval = 0;
415			goto out;
416		}
417		spin_lock(&info->lock);
418		if (ewp->state == STATE_READY) {
419			retval = 0;
420			goto out_unlock;
421		}
422		if (signal_pending(current)) {
423			retval = -ERESTARTSYS;
424			break;
425		}
426		if (time == 0) {
427			retval = -ETIMEDOUT;
428			break;
429		}
430	}
431	list_del(&ewp->list);
432out_unlock:
433	spin_unlock(&info->lock);
434out:
435	return retval;
436}
437
438/*
439 * Returns waiting task that should be serviced first or NULL if none exists
440 */
441static struct ext_wait_queue *wq_get_first_waiter(
442		struct mqueue_inode_info *info, int sr)
443{
444	struct list_head *ptr;
445
446	ptr = info->e_wait_q[sr].list.prev;
447	if (ptr == &info->e_wait_q[sr].list)
448		return NULL;
449	return list_entry(ptr, struct ext_wait_queue, list);
450}
451
452/* Auxiliary functions to manipulate messages' list */
453static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info)
454{
455	int k;
456
457	k = info->attr.mq_curmsgs - 1;
458	while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) {
459		info->messages[k + 1] = info->messages[k];
460		k--;
461	}
462	info->attr.mq_curmsgs++;
463	info->qsize += ptr->m_ts;
464	info->messages[k + 1] = ptr;
465}
466
467static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
468{
469	info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts;
470	return info->messages[info->attr.mq_curmsgs];
471}
472
473static inline void set_cookie(struct sk_buff *skb, char code)
474{
475	((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
476}
477
478/*
479 * The next function is only to split too long sys_mq_timedsend
480 */
481static void __do_notify(struct mqueue_inode_info *info)
482{
483	/* notification
484	 * invoked when there is registered process and there isn't process
485	 * waiting synchronously for message AND state of queue changed from
486	 * empty to not empty. Here we are sure that no one is waiting
487	 * synchronously. */
488	if (info->notify_owner &&
489	    info->attr.mq_curmsgs == 1) {
490		struct siginfo sig_i;
491		switch (info->notify.sigev_notify) {
492		case SIGEV_NONE:
493			break;
494		case SIGEV_SIGNAL:
495			/* sends signal */
496
497			sig_i.si_signo = info->notify.sigev_signo;
498			sig_i.si_errno = 0;
499			sig_i.si_code = SI_MESGQ;
500			sig_i.si_value = info->notify.sigev_value;
501			sig_i.si_pid = task_tgid_vnr(current);
502			sig_i.si_uid = current->uid;
503
504			kill_pid_info(info->notify.sigev_signo,
505				      &sig_i, info->notify_owner);
506			break;
507		case SIGEV_THREAD:
508			set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
509			netlink_sendskb(info->notify_sock, info->notify_cookie);
510			break;
511		}
512		/* after notification unregisters process */
513		put_pid(info->notify_owner);
514		info->notify_owner = NULL;
515	}
516	wake_up(&info->wait_q);
517}
518
519static long prepare_timeout(const struct timespec __user *u_arg)
520{
521	struct timespec ts, nowts;
522	long timeout;
523
524	if (u_arg) {
525		if (unlikely(copy_from_user(&ts, u_arg,
526					sizeof(struct timespec))))
527			return -EFAULT;
528
529		if (unlikely(ts.tv_nsec < 0 || ts.tv_sec < 0
530			|| ts.tv_nsec >= NSEC_PER_SEC))
531			return -EINVAL;
532		nowts = CURRENT_TIME;
533		/* first subtract as jiffies can't be too big */
534		ts.tv_sec -= nowts.tv_sec;
535		if (ts.tv_nsec < nowts.tv_nsec) {
536			ts.tv_nsec += NSEC_PER_SEC;
537			ts.tv_sec--;
538		}
539		ts.tv_nsec -= nowts.tv_nsec;
540		if (ts.tv_sec < 0)
541			return 0;
542
543		timeout = timespec_to_jiffies(&ts) + 1;
544	} else
545		return MAX_SCHEDULE_TIMEOUT;
546
547	return timeout;
548}
549
550static void remove_notification(struct mqueue_inode_info *info)
551{
552	if (info->notify_owner != NULL &&
553	    info->notify.sigev_notify == SIGEV_THREAD) {
554		set_cookie(info->notify_cookie, NOTIFY_REMOVED);
555		netlink_sendskb(info->notify_sock, info->notify_cookie);
556	}
557	put_pid(info->notify_owner);
558	info->notify_owner = NULL;
559}
560
561static int mq_attr_ok(struct mq_attr *attr)
562{
563	if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
564		return 0;
565	if (capable(CAP_SYS_RESOURCE)) {
566		if (attr->mq_maxmsg > HARD_MSGMAX)
567			return 0;
568	} else {
569		if (attr->mq_maxmsg > msg_max ||
570				attr->mq_msgsize > msgsize_max)
571			return 0;
572	}
573	/* check for overflow */
574	if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
575		return 0;
576	if ((unsigned long)(attr->mq_maxmsg * attr->mq_msgsize) +
577	    (attr->mq_maxmsg * sizeof (struct msg_msg *)) <
578	    (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize))
579		return 0;
580	return 1;
581}
582
583/*
584 * Invoked when creating a new queue via sys_mq_open
585 */
586static struct file *do_create(struct dentry *dir, struct dentry *dentry,
587			int oflag, mode_t mode, struct mq_attr __user *u_attr)
588{
589	struct mq_attr attr;
590	struct file *result;
591	int ret;
592
593	if (u_attr) {
594		ret = -EFAULT;
595		if (copy_from_user(&attr, u_attr, sizeof(attr)))
596			goto out;
597		ret = -EINVAL;
598		if (!mq_attr_ok(&attr))
599			goto out;
600		/* store for use during create */
601		dentry->d_fsdata = &attr;
602	}
603
604	mode &= ~current->fs->umask;
605	ret = mnt_want_write(mqueue_mnt);
606	if (ret)
607		goto out;
608	ret = vfs_create(dir->d_inode, dentry, mode, NULL);
609	dentry->d_fsdata = NULL;
610	if (ret)
611		goto out_drop_write;
612
613	result = dentry_open(dentry, mqueue_mnt, oflag);
614	/*
615	 * dentry_open() took a persistent mnt_want_write(),
616	 * so we can now drop this one.
617	 */
618	mnt_drop_write(mqueue_mnt);
619	return result;
620
621out_drop_write:
622	mnt_drop_write(mqueue_mnt);
623out:
624	dput(dentry);
625	mntput(mqueue_mnt);
626	return ERR_PTR(ret);
627}
628
629/* Opens existing queue */
630static struct file *do_open(struct dentry *dentry, int oflag)
631{
632static int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
633					MAY_READ | MAY_WRITE };
634
635	if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) {
636		dput(dentry);
637		mntput(mqueue_mnt);
638		return ERR_PTR(-EINVAL);
639	}
640
641	if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) {
642		dput(dentry);
643		mntput(mqueue_mnt);
644		return ERR_PTR(-EACCES);
645	}
646
647	return dentry_open(dentry, mqueue_mnt, oflag);
648}
649
650asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode,
651				struct mq_attr __user *u_attr)
652{
653	struct dentry *dentry;
654	struct file *filp;
655	char *name;
656	int fd, error;
657
658	error = audit_mq_open(oflag, mode, u_attr);
659	if (error != 0)
660		return error;
661
662	if (IS_ERR(name = getname(u_name)))
663		return PTR_ERR(name);
664
665	fd = get_unused_fd_flags(O_CLOEXEC);
666	if (fd < 0)
667		goto out_putname;
668
669	mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
670	dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
671	if (IS_ERR(dentry)) {
672		error = PTR_ERR(dentry);
673		goto out_err;
674	}
675	mntget(mqueue_mnt);
676
677	if (oflag & O_CREAT) {
678		if (dentry->d_inode) {	/* entry already exists */
679			audit_inode(name, dentry);
680			error = -EEXIST;
681			if (oflag & O_EXCL)
682				goto out;
683			filp = do_open(dentry, oflag);
684		} else {
685			filp = do_create(mqueue_mnt->mnt_root, dentry,
686						oflag, mode, u_attr);
687		}
688	} else {
689		error = -ENOENT;
690		if (!dentry->d_inode)
691			goto out;
692		audit_inode(name, dentry);
693		filp = do_open(dentry, oflag);
694	}
695
696	if (IS_ERR(filp)) {
697		error = PTR_ERR(filp);
698		goto out_putfd;
699	}
700
701	fd_install(fd, filp);
702	goto out_upsem;
703
704out:
705	dput(dentry);
706	mntput(mqueue_mnt);
707out_putfd:
708	put_unused_fd(fd);
709out_err:
710	fd = error;
711out_upsem:
712	mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
713out_putname:
714	putname(name);
715	return fd;
716}
717
718asmlinkage long sys_mq_unlink(const char __user *u_name)
719{
720	int err;
721	char *name;
722	struct dentry *dentry;
723	struct inode *inode = NULL;
724
725	name = getname(u_name);
726	if (IS_ERR(name))
727		return PTR_ERR(name);
728
729	mutex_lock_nested(&mqueue_mnt->mnt_root->d_inode->i_mutex,
730			I_MUTEX_PARENT);
731	dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
732	if (IS_ERR(dentry)) {
733		err = PTR_ERR(dentry);
734		goto out_unlock;
735	}
736
737	if (!dentry->d_inode) {
738		err = -ENOENT;
739		goto out_err;
740	}
741
742	inode = dentry->d_inode;
743	if (inode)
744		atomic_inc(&inode->i_count);
745	err = mnt_want_write(mqueue_mnt);
746	if (err)
747		goto out_err;
748	err = vfs_unlink(dentry->d_parent->d_inode, dentry);
749	mnt_drop_write(mqueue_mnt);
750out_err:
751	dput(dentry);
752
753out_unlock:
754	mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
755	putname(name);
756	if (inode)
757		iput(inode);
758
759	return err;
760}
761
762/* Pipelined send and receive functions.
763 *
764 * If a receiver finds no waiting message, then it registers itself in the
765 * list of waiting receivers. A sender checks that list before adding the new
766 * message into the message array. If there is a waiting receiver, then it
767 * bypasses the message array and directly hands the message over to the
768 * receiver.
769 * The receiver accepts the message and returns without grabbing the queue
770 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
771 * are necessary. The same algorithm is used for sysv semaphores, see
772 * ipc/sem.c for more details.
773 *
774 * The same algorithm is used for senders.
775 */
776
777/* pipelined_send() - send a message directly to the task waiting in
778 * sys_mq_timedreceive() (without inserting message into a queue).
779 */
780static inline void pipelined_send(struct mqueue_inode_info *info,
781				  struct msg_msg *message,
782				  struct ext_wait_queue *receiver)
783{
784	receiver->msg = message;
785	list_del(&receiver->list);
786	receiver->state = STATE_PENDING;
787	wake_up_process(receiver->task);
788	smp_wmb();
789	receiver->state = STATE_READY;
790}
791
792/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
793 * gets its message and put to the queue (we have one free place for sure). */
794static inline void pipelined_receive(struct mqueue_inode_info *info)
795{
796	struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
797
798	if (!sender) {
799		/* for poll */
800		wake_up_interruptible(&info->wait_q);
801		return;
802	}
803	msg_insert(sender->msg, info);
804	list_del(&sender->list);
805	sender->state = STATE_PENDING;
806	wake_up_process(sender->task);
807	smp_wmb();
808	sender->state = STATE_READY;
809}
810
811asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
812	size_t msg_len, unsigned int msg_prio,
813	const struct timespec __user *u_abs_timeout)
814{
815	struct file *filp;
816	struct inode *inode;
817	struct ext_wait_queue wait;
818	struct ext_wait_queue *receiver;
819	struct msg_msg *msg_ptr;
820	struct mqueue_inode_info *info;
821	long timeout;
822	int ret;
823
824	ret = audit_mq_timedsend(mqdes, msg_len, msg_prio, u_abs_timeout);
825	if (ret != 0)
826		return ret;
827
828	if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
829		return -EINVAL;
830
831	timeout = prepare_timeout(u_abs_timeout);
832
833	ret = -EBADF;
834	filp = fget(mqdes);
835	if (unlikely(!filp))
836		goto out;
837
838	inode = filp->f_path.dentry->d_inode;
839	if (unlikely(filp->f_op != &mqueue_file_operations))
840		goto out_fput;
841	info = MQUEUE_I(inode);
842	audit_inode(NULL, filp->f_path.dentry);
843
844	if (unlikely(!(filp->f_mode & FMODE_WRITE)))
845		goto out_fput;
846
847	if (unlikely(msg_len > info->attr.mq_msgsize)) {
848		ret = -EMSGSIZE;
849		goto out_fput;
850	}
851
852	/* First try to allocate memory, before doing anything with
853	 * existing queues. */
854	msg_ptr = load_msg(u_msg_ptr, msg_len);
855	if (IS_ERR(msg_ptr)) {
856		ret = PTR_ERR(msg_ptr);
857		goto out_fput;
858	}
859	msg_ptr->m_ts = msg_len;
860	msg_ptr->m_type = msg_prio;
861
862	spin_lock(&info->lock);
863
864	if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
865		if (filp->f_flags & O_NONBLOCK) {
866			spin_unlock(&info->lock);
867			ret = -EAGAIN;
868		} else if (unlikely(timeout < 0)) {
869			spin_unlock(&info->lock);
870			ret = timeout;
871		} else {
872			wait.task = current;
873			wait.msg = (void *) msg_ptr;
874			wait.state = STATE_NONE;
875			ret = wq_sleep(info, SEND, timeout, &wait);
876		}
877		if (ret < 0)
878			free_msg(msg_ptr);
879	} else {
880		receiver = wq_get_first_waiter(info, RECV);
881		if (receiver) {
882			pipelined_send(info, msg_ptr, receiver);
883		} else {
884			/* adds message to the queue */
885			msg_insert(msg_ptr, info);
886			__do_notify(info);
887		}
888		inode->i_atime = inode->i_mtime = inode->i_ctime =
889				CURRENT_TIME;
890		spin_unlock(&info->lock);
891		ret = 0;
892	}
893out_fput:
894	fput(filp);
895out:
896	return ret;
897}
898
899asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
900	size_t msg_len, unsigned int __user *u_msg_prio,
901	const struct timespec __user *u_abs_timeout)
902{
903	long timeout;
904	ssize_t ret;
905	struct msg_msg *msg_ptr;
906	struct file *filp;
907	struct inode *inode;
908	struct mqueue_inode_info *info;
909	struct ext_wait_queue wait;
910
911	ret = audit_mq_timedreceive(mqdes, msg_len, u_msg_prio, u_abs_timeout);
912	if (ret != 0)
913		return ret;
914
915	timeout = prepare_timeout(u_abs_timeout);
916
917	ret = -EBADF;
918	filp = fget(mqdes);
919	if (unlikely(!filp))
920		goto out;
921
922	inode = filp->f_path.dentry->d_inode;
923	if (unlikely(filp->f_op != &mqueue_file_operations))
924		goto out_fput;
925	info = MQUEUE_I(inode);
926	audit_inode(NULL, filp->f_path.dentry);
927
928	if (unlikely(!(filp->f_mode & FMODE_READ)))
929		goto out_fput;
930
931	/* checks if buffer is big enough */
932	if (unlikely(msg_len < info->attr.mq_msgsize)) {
933		ret = -EMSGSIZE;
934		goto out_fput;
935	}
936
937	spin_lock(&info->lock);
938	if (info->attr.mq_curmsgs == 0) {
939		if (filp->f_flags & O_NONBLOCK) {
940			spin_unlock(&info->lock);
941			ret = -EAGAIN;
942			msg_ptr = NULL;
943		} else if (unlikely(timeout < 0)) {
944			spin_unlock(&info->lock);
945			ret = timeout;
946			msg_ptr = NULL;
947		} else {
948			wait.task = current;
949			wait.state = STATE_NONE;
950			ret = wq_sleep(info, RECV, timeout, &wait);
951			msg_ptr = wait.msg;
952		}
953	} else {
954		msg_ptr = msg_get(info);
955
956		inode->i_atime = inode->i_mtime = inode->i_ctime =
957				CURRENT_TIME;
958
959		/* There is now free space in queue. */
960		pipelined_receive(info);
961		spin_unlock(&info->lock);
962		ret = 0;
963	}
964	if (ret == 0) {
965		ret = msg_ptr->m_ts;
966
967		if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
968			store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
969			ret = -EFAULT;
970		}
971		free_msg(msg_ptr);
972	}
973out_fput:
974	fput(filp);
975out:
976	return ret;
977}
978
979/*
980 * Notes: the case when user wants us to deregister (with NULL as pointer)
981 * and he isn't currently owner of notification, will be silently discarded.
982 * It isn't explicitly defined in the POSIX.
983 */
984asmlinkage long sys_mq_notify(mqd_t mqdes,
985				const struct sigevent __user *u_notification)
986{
987	int ret;
988	struct file *filp;
989	struct sock *sock;
990	struct inode *inode;
991	struct sigevent notification;
992	struct mqueue_inode_info *info;
993	struct sk_buff *nc;
994
995	ret = audit_mq_notify(mqdes, u_notification);
996	if (ret != 0)
997		return ret;
998
999	nc = NULL;
1000	sock = NULL;
1001	if (u_notification != NULL) {
1002		if (copy_from_user(&notification, u_notification,
1003					sizeof(struct sigevent)))
1004			return -EFAULT;
1005
1006		if (unlikely(notification.sigev_notify != SIGEV_NONE &&
1007			     notification.sigev_notify != SIGEV_SIGNAL &&
1008			     notification.sigev_notify != SIGEV_THREAD))
1009			return -EINVAL;
1010		if (notification.sigev_notify == SIGEV_SIGNAL &&
1011			!valid_signal(notification.sigev_signo)) {
1012			return -EINVAL;
1013		}
1014		if (notification.sigev_notify == SIGEV_THREAD) {
1015			long timeo;
1016
1017			/* create the notify skb */
1018			nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1019			ret = -ENOMEM;
1020			if (!nc)
1021				goto out;
1022			ret = -EFAULT;
1023			if (copy_from_user(nc->data,
1024					notification.sigev_value.sival_ptr,
1025					NOTIFY_COOKIE_LEN)) {
1026				goto out;
1027			}
1028
1029			/* TODO: add a header? */
1030			skb_put(nc, NOTIFY_COOKIE_LEN);
1031			/* and attach it to the socket */
1032retry:
1033			filp = fget(notification.sigev_signo);
1034			ret = -EBADF;
1035			if (!filp)
1036				goto out;
1037			sock = netlink_getsockbyfilp(filp);
1038			fput(filp);
1039			if (IS_ERR(sock)) {
1040				ret = PTR_ERR(sock);
1041				sock = NULL;
1042				goto out;
1043			}
1044
1045			timeo = MAX_SCHEDULE_TIMEOUT;
1046			ret = netlink_attachskb(sock, nc, &timeo, NULL);
1047			if (ret == 1)
1048		       		goto retry;
1049			if (ret) {
1050				sock = NULL;
1051				nc = NULL;
1052				goto out;
1053			}
1054		}
1055	}
1056
1057	ret = -EBADF;
1058	filp = fget(mqdes);
1059	if (!filp)
1060		goto out;
1061
1062	inode = filp->f_path.dentry->d_inode;
1063	if (unlikely(filp->f_op != &mqueue_file_operations))
1064		goto out_fput;
1065	info = MQUEUE_I(inode);
1066
1067	ret = 0;
1068	spin_lock(&info->lock);
1069	if (u_notification == NULL) {
1070		if (info->notify_owner == task_tgid(current)) {
1071			remove_notification(info);
1072			inode->i_atime = inode->i_ctime = CURRENT_TIME;
1073		}
1074	} else if (info->notify_owner != NULL) {
1075		ret = -EBUSY;
1076	} else {
1077		switch (notification.sigev_notify) {
1078		case SIGEV_NONE:
1079			info->notify.sigev_notify = SIGEV_NONE;
1080			break;
1081		case SIGEV_THREAD:
1082			info->notify_sock = sock;
1083			info->notify_cookie = nc;
1084			sock = NULL;
1085			nc = NULL;
1086			info->notify.sigev_notify = SIGEV_THREAD;
1087			break;
1088		case SIGEV_SIGNAL:
1089			info->notify.sigev_signo = notification.sigev_signo;
1090			info->notify.sigev_value = notification.sigev_value;
1091			info->notify.sigev_notify = SIGEV_SIGNAL;
1092			break;
1093		}
1094
1095		info->notify_owner = get_pid(task_tgid(current));
1096		inode->i_atime = inode->i_ctime = CURRENT_TIME;
1097	}
1098	spin_unlock(&info->lock);
1099out_fput:
1100	fput(filp);
1101out:
1102	if (sock) {
1103		netlink_detachskb(sock, nc);
1104	} else if (nc) {
1105		dev_kfree_skb(nc);
1106	}
1107	return ret;
1108}
1109
1110asmlinkage long sys_mq_getsetattr(mqd_t mqdes,
1111			const struct mq_attr __user *u_mqstat,
1112			struct mq_attr __user *u_omqstat)
1113{
1114	int ret;
1115	struct mq_attr mqstat, omqstat;
1116	struct file *filp;
1117	struct inode *inode;
1118	struct mqueue_inode_info *info;
1119
1120	if (u_mqstat != NULL) {
1121		if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr)))
1122			return -EFAULT;
1123		if (mqstat.mq_flags & (~O_NONBLOCK))
1124			return -EINVAL;
1125	}
1126
1127	ret = -EBADF;
1128	filp = fget(mqdes);
1129	if (!filp)
1130		goto out;
1131
1132	inode = filp->f_path.dentry->d_inode;
1133	if (unlikely(filp->f_op != &mqueue_file_operations))
1134		goto out_fput;
1135	info = MQUEUE_I(inode);
1136
1137	spin_lock(&info->lock);
1138
1139	omqstat = info->attr;
1140	omqstat.mq_flags = filp->f_flags & O_NONBLOCK;
1141	if (u_mqstat) {
1142		ret = audit_mq_getsetattr(mqdes, &mqstat);
1143		if (ret != 0) {
1144			spin_unlock(&info->lock);
1145			goto out_fput;
1146		}
1147		if (mqstat.mq_flags & O_NONBLOCK)
1148			filp->f_flags |= O_NONBLOCK;
1149		else
1150			filp->f_flags &= ~O_NONBLOCK;
1151
1152		inode->i_atime = inode->i_ctime = CURRENT_TIME;
1153	}
1154
1155	spin_unlock(&info->lock);
1156
1157	ret = 0;
1158	if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat,
1159						sizeof(struct mq_attr)))
1160		ret = -EFAULT;
1161
1162out_fput:
1163	fput(filp);
1164out:
1165	return ret;
1166}
1167
1168static const struct inode_operations mqueue_dir_inode_operations = {
1169	.lookup = simple_lookup,
1170	.create = mqueue_create,
1171	.unlink = mqueue_unlink,
1172};
1173
1174static const struct file_operations mqueue_file_operations = {
1175	.flush = mqueue_flush_file,
1176	.poll = mqueue_poll_file,
1177	.read = mqueue_read_file,
1178};
1179
1180static struct super_operations mqueue_super_ops = {
1181	.alloc_inode = mqueue_alloc_inode,
1182	.destroy_inode = mqueue_destroy_inode,
1183	.statfs = simple_statfs,
1184	.delete_inode = mqueue_delete_inode,
1185	.drop_inode = generic_delete_inode,
1186};
1187
1188static struct file_system_type mqueue_fs_type = {
1189	.name = "mqueue",
1190	.get_sb = mqueue_get_sb,
1191	.kill_sb = kill_litter_super,
1192};
1193
1194static int msg_max_limit_min = DFLT_MSGMAX;
1195static int msg_max_limit_max = HARD_MSGMAX;
1196
1197static int msg_maxsize_limit_min = DFLT_MSGSIZEMAX;
1198static int msg_maxsize_limit_max = INT_MAX;
1199
1200static ctl_table mq_sysctls[] = {
1201	{
1202		.procname	= "queues_max",
1203		.data		= &queues_max,
1204		.maxlen		= sizeof(int),
1205		.mode		= 0644,
1206		.proc_handler	= &proc_dointvec,
1207	},
1208	{
1209		.procname	= "msg_max",
1210		.data		= &msg_max,
1211		.maxlen		= sizeof(int),
1212		.mode		= 0644,
1213		.proc_handler	= &proc_dointvec_minmax,
1214		.extra1		= &msg_max_limit_min,
1215		.extra2		= &msg_max_limit_max,
1216	},
1217	{
1218		.procname	= "msgsize_max",
1219		.data		= &msgsize_max,
1220		.maxlen		= sizeof(int),
1221		.mode		= 0644,
1222		.proc_handler	= &proc_dointvec_minmax,
1223		.extra1		= &msg_maxsize_limit_min,
1224		.extra2		= &msg_maxsize_limit_max,
1225	},
1226	{ .ctl_name = 0 }
1227};
1228
1229static ctl_table mq_sysctl_dir[] = {
1230	{
1231		.procname	= "mqueue",
1232		.mode		= 0555,
1233		.child		= mq_sysctls,
1234	},
1235	{ .ctl_name = 0 }
1236};
1237
1238static ctl_table mq_sysctl_root[] = {
1239	{
1240		.ctl_name	= CTL_FS,
1241		.procname	= "fs",
1242		.mode		= 0555,
1243		.child		= mq_sysctl_dir,
1244	},
1245	{ .ctl_name = 0 }
1246};
1247
1248static int __init init_mqueue_fs(void)
1249{
1250	int error;
1251
1252	mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1253				sizeof(struct mqueue_inode_info), 0,
1254				SLAB_HWCACHE_ALIGN, init_once);
1255	if (mqueue_inode_cachep == NULL)
1256		return -ENOMEM;
1257
1258	/* ignore failues - they are not fatal */
1259	mq_sysctl_table = register_sysctl_table(mq_sysctl_root);
1260
1261	error = register_filesystem(&mqueue_fs_type);
1262	if (error)
1263		goto out_sysctl;
1264
1265	if (IS_ERR(mqueue_mnt = kern_mount(&mqueue_fs_type))) {
1266		error = PTR_ERR(mqueue_mnt);
1267		goto out_filesystem;
1268	}
1269
1270	/* internal initialization - not common for vfs */
1271	queues_count = 0;
1272	spin_lock_init(&mq_lock);
1273
1274	return 0;
1275
1276out_filesystem:
1277	unregister_filesystem(&mqueue_fs_type);
1278out_sysctl:
1279	if (mq_sysctl_table)
1280		unregister_sysctl_table(mq_sysctl_table);
1281	kmem_cache_destroy(mqueue_inode_cachep);
1282	return error;
1283}
1284
1285__initcall(init_mqueue_fs);
1286