mqueue.c revision 50953fe9e00ebbeffa032a565ab2f08312d51a87
1/* 2 * POSIX message queues filesystem for Linux. 3 * 4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl) 5 * Michal Wronski (michal.wronski@gmail.com) 6 * 7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) 8 * Lockless receive & send, fd based notify: 9 * Manfred Spraul (manfred@colorfullife.com) 10 * 11 * Audit: George Wilson (ltcgcw@us.ibm.com) 12 * 13 * This file is released under the GPL. 14 */ 15 16#include <linux/capability.h> 17#include <linux/init.h> 18#include <linux/pagemap.h> 19#include <linux/file.h> 20#include <linux/mount.h> 21#include <linux/namei.h> 22#include <linux/sysctl.h> 23#include <linux/poll.h> 24#include <linux/mqueue.h> 25#include <linux/msg.h> 26#include <linux/skbuff.h> 27#include <linux/netlink.h> 28#include <linux/syscalls.h> 29#include <linux/audit.h> 30#include <linux/signal.h> 31#include <linux/mutex.h> 32 33#include <net/sock.h> 34#include "util.h" 35 36#define MQUEUE_MAGIC 0x19800202 37#define DIRENT_SIZE 20 38#define FILENT_SIZE 80 39 40#define SEND 0 41#define RECV 1 42 43#define STATE_NONE 0 44#define STATE_PENDING 1 45#define STATE_READY 2 46 47/* used by sysctl */ 48#define FS_MQUEUE 1 49#define CTL_QUEUESMAX 2 50#define CTL_MSGMAX 3 51#define CTL_MSGSIZEMAX 4 52 53/* default values */ 54#define DFLT_QUEUESMAX 256 /* max number of message queues */ 55#define DFLT_MSGMAX 10 /* max number of messages in each queue */ 56#define HARD_MSGMAX (131072/sizeof(void*)) 57#define DFLT_MSGSIZEMAX 8192 /* max message size */ 58 59 60struct ext_wait_queue { /* queue of sleeping tasks */ 61 struct task_struct *task; 62 struct list_head list; 63 struct msg_msg *msg; /* ptr of loaded message */ 64 int state; /* one of STATE_* values */ 65}; 66 67struct mqueue_inode_info { 68 spinlock_t lock; 69 struct inode vfs_inode; 70 wait_queue_head_t wait_q; 71 72 struct msg_msg **messages; 73 struct mq_attr attr; 74 75 struct sigevent notify; 76 struct pid* notify_owner; 77 struct user_struct *user; /* user who created, for accounting */ 78 struct sock *notify_sock; 79 struct sk_buff *notify_cookie; 80 81 /* for tasks waiting for free space and messages, respectively */ 82 struct ext_wait_queue e_wait_q[2]; 83 84 unsigned long qsize; /* size of queue in memory (sum of all msgs) */ 85}; 86 87static const struct inode_operations mqueue_dir_inode_operations; 88static const struct file_operations mqueue_file_operations; 89static struct super_operations mqueue_super_ops; 90static void remove_notification(struct mqueue_inode_info *info); 91 92static spinlock_t mq_lock; 93static struct kmem_cache *mqueue_inode_cachep; 94static struct vfsmount *mqueue_mnt; 95 96static unsigned int queues_count; 97static unsigned int queues_max = DFLT_QUEUESMAX; 98static unsigned int msg_max = DFLT_MSGMAX; 99static unsigned int msgsize_max = DFLT_MSGSIZEMAX; 100 101static struct ctl_table_header * mq_sysctl_table; 102 103static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) 104{ 105 return container_of(inode, struct mqueue_inode_info, vfs_inode); 106} 107 108static struct inode *mqueue_get_inode(struct super_block *sb, int mode, 109 struct mq_attr *attr) 110{ 111 struct inode *inode; 112 113 inode = new_inode(sb); 114 if (inode) { 115 inode->i_mode = mode; 116 inode->i_uid = current->fsuid; 117 inode->i_gid = current->fsgid; 118 inode->i_blocks = 0; 119 inode->i_mtime = inode->i_ctime = inode->i_atime = 120 CURRENT_TIME; 121 122 if (S_ISREG(mode)) { 123 struct mqueue_inode_info *info; 124 struct task_struct *p = current; 125 struct user_struct *u = p->user; 126 unsigned long mq_bytes, mq_msg_tblsz; 127 128 inode->i_fop = &mqueue_file_operations; 129 inode->i_size = FILENT_SIZE; 130 /* mqueue specific info */ 131 info = MQUEUE_I(inode); 132 spin_lock_init(&info->lock); 133 init_waitqueue_head(&info->wait_q); 134 INIT_LIST_HEAD(&info->e_wait_q[0].list); 135 INIT_LIST_HEAD(&info->e_wait_q[1].list); 136 info->messages = NULL; 137 info->notify_owner = NULL; 138 info->qsize = 0; 139 info->user = NULL; /* set when all is ok */ 140 memset(&info->attr, 0, sizeof(info->attr)); 141 info->attr.mq_maxmsg = DFLT_MSGMAX; 142 info->attr.mq_msgsize = DFLT_MSGSIZEMAX; 143 if (attr) { 144 info->attr.mq_maxmsg = attr->mq_maxmsg; 145 info->attr.mq_msgsize = attr->mq_msgsize; 146 } 147 mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *); 148 mq_bytes = (mq_msg_tblsz + 149 (info->attr.mq_maxmsg * info->attr.mq_msgsize)); 150 151 spin_lock(&mq_lock); 152 if (u->mq_bytes + mq_bytes < u->mq_bytes || 153 u->mq_bytes + mq_bytes > 154 p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) { 155 spin_unlock(&mq_lock); 156 goto out_inode; 157 } 158 u->mq_bytes += mq_bytes; 159 spin_unlock(&mq_lock); 160 161 info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL); 162 if (!info->messages) { 163 spin_lock(&mq_lock); 164 u->mq_bytes -= mq_bytes; 165 spin_unlock(&mq_lock); 166 goto out_inode; 167 } 168 /* all is ok */ 169 info->user = get_uid(u); 170 } else if (S_ISDIR(mode)) { 171 inc_nlink(inode); 172 /* Some things misbehave if size == 0 on a directory */ 173 inode->i_size = 2 * DIRENT_SIZE; 174 inode->i_op = &mqueue_dir_inode_operations; 175 inode->i_fop = &simple_dir_operations; 176 } 177 } 178 return inode; 179out_inode: 180 make_bad_inode(inode); 181 iput(inode); 182 return NULL; 183} 184 185static int mqueue_fill_super(struct super_block *sb, void *data, int silent) 186{ 187 struct inode *inode; 188 189 sb->s_blocksize = PAGE_CACHE_SIZE; 190 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 191 sb->s_magic = MQUEUE_MAGIC; 192 sb->s_op = &mqueue_super_ops; 193 194 inode = mqueue_get_inode(sb, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); 195 if (!inode) 196 return -ENOMEM; 197 198 sb->s_root = d_alloc_root(inode); 199 if (!sb->s_root) { 200 iput(inode); 201 return -ENOMEM; 202 } 203 204 return 0; 205} 206 207static int mqueue_get_sb(struct file_system_type *fs_type, 208 int flags, const char *dev_name, 209 void *data, struct vfsmount *mnt) 210{ 211 return get_sb_single(fs_type, flags, data, mqueue_fill_super, mnt); 212} 213 214static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags) 215{ 216 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; 217 218 if (flags & SLAB_CTOR_CONSTRUCTOR) 219 inode_init_once(&p->vfs_inode); 220} 221 222static struct inode *mqueue_alloc_inode(struct super_block *sb) 223{ 224 struct mqueue_inode_info *ei; 225 226 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); 227 if (!ei) 228 return NULL; 229 return &ei->vfs_inode; 230} 231 232static void mqueue_destroy_inode(struct inode *inode) 233{ 234 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); 235} 236 237static void mqueue_delete_inode(struct inode *inode) 238{ 239 struct mqueue_inode_info *info; 240 struct user_struct *user; 241 unsigned long mq_bytes; 242 int i; 243 244 if (S_ISDIR(inode->i_mode)) { 245 clear_inode(inode); 246 return; 247 } 248 info = MQUEUE_I(inode); 249 spin_lock(&info->lock); 250 for (i = 0; i < info->attr.mq_curmsgs; i++) 251 free_msg(info->messages[i]); 252 kfree(info->messages); 253 spin_unlock(&info->lock); 254 255 clear_inode(inode); 256 257 mq_bytes = (info->attr.mq_maxmsg * sizeof(struct msg_msg *) + 258 (info->attr.mq_maxmsg * info->attr.mq_msgsize)); 259 user = info->user; 260 if (user) { 261 spin_lock(&mq_lock); 262 user->mq_bytes -= mq_bytes; 263 queues_count--; 264 spin_unlock(&mq_lock); 265 free_uid(user); 266 } 267} 268 269static int mqueue_create(struct inode *dir, struct dentry *dentry, 270 int mode, struct nameidata *nd) 271{ 272 struct inode *inode; 273 struct mq_attr *attr = dentry->d_fsdata; 274 int error; 275 276 spin_lock(&mq_lock); 277 if (queues_count >= queues_max && !capable(CAP_SYS_RESOURCE)) { 278 error = -ENOSPC; 279 goto out_lock; 280 } 281 queues_count++; 282 spin_unlock(&mq_lock); 283 284 inode = mqueue_get_inode(dir->i_sb, mode, attr); 285 if (!inode) { 286 error = -ENOMEM; 287 spin_lock(&mq_lock); 288 queues_count--; 289 goto out_lock; 290 } 291 292 dir->i_size += DIRENT_SIZE; 293 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; 294 295 d_instantiate(dentry, inode); 296 dget(dentry); 297 return 0; 298out_lock: 299 spin_unlock(&mq_lock); 300 return error; 301} 302 303static int mqueue_unlink(struct inode *dir, struct dentry *dentry) 304{ 305 struct inode *inode = dentry->d_inode; 306 307 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; 308 dir->i_size -= DIRENT_SIZE; 309 drop_nlink(inode); 310 dput(dentry); 311 return 0; 312} 313 314/* 315* This is routine for system read from queue file. 316* To avoid mess with doing here some sort of mq_receive we allow 317* to read only queue size & notification info (the only values 318* that are interesting from user point of view and aren't accessible 319* through std routines) 320*/ 321static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, 322 size_t count, loff_t * off) 323{ 324 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 325 char buffer[FILENT_SIZE]; 326 size_t slen; 327 loff_t o; 328 329 if (!count) 330 return 0; 331 332 spin_lock(&info->lock); 333 snprintf(buffer, sizeof(buffer), 334 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n", 335 info->qsize, 336 info->notify_owner ? info->notify.sigev_notify : 0, 337 (info->notify_owner && 338 info->notify.sigev_notify == SIGEV_SIGNAL) ? 339 info->notify.sigev_signo : 0, 340 pid_nr(info->notify_owner)); 341 spin_unlock(&info->lock); 342 buffer[sizeof(buffer)-1] = '\0'; 343 slen = strlen(buffer)+1; 344 345 o = *off; 346 if (o > slen) 347 return 0; 348 349 if (o + count > slen) 350 count = slen - o; 351 352 if (copy_to_user(u_data, buffer + o, count)) 353 return -EFAULT; 354 355 *off = o + count; 356 filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME; 357 return count; 358} 359 360static int mqueue_flush_file(struct file *filp, fl_owner_t id) 361{ 362 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 363 364 spin_lock(&info->lock); 365 if (task_tgid(current) == info->notify_owner) 366 remove_notification(info); 367 368 spin_unlock(&info->lock); 369 return 0; 370} 371 372static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) 373{ 374 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 375 int retval = 0; 376 377 poll_wait(filp, &info->wait_q, poll_tab); 378 379 spin_lock(&info->lock); 380 if (info->attr.mq_curmsgs) 381 retval = POLLIN | POLLRDNORM; 382 383 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) 384 retval |= POLLOUT | POLLWRNORM; 385 spin_unlock(&info->lock); 386 387 return retval; 388} 389 390/* Adds current to info->e_wait_q[sr] before element with smaller prio */ 391static void wq_add(struct mqueue_inode_info *info, int sr, 392 struct ext_wait_queue *ewp) 393{ 394 struct ext_wait_queue *walk; 395 396 ewp->task = current; 397 398 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { 399 if (walk->task->static_prio <= current->static_prio) { 400 list_add_tail(&ewp->list, &walk->list); 401 return; 402 } 403 } 404 list_add_tail(&ewp->list, &info->e_wait_q[sr].list); 405} 406 407/* 408 * Puts current task to sleep. Caller must hold queue lock. After return 409 * lock isn't held. 410 * sr: SEND or RECV 411 */ 412static int wq_sleep(struct mqueue_inode_info *info, int sr, 413 long timeout, struct ext_wait_queue *ewp) 414{ 415 int retval; 416 signed long time; 417 418 wq_add(info, sr, ewp); 419 420 for (;;) { 421 set_current_state(TASK_INTERRUPTIBLE); 422 423 spin_unlock(&info->lock); 424 time = schedule_timeout(timeout); 425 426 while (ewp->state == STATE_PENDING) 427 cpu_relax(); 428 429 if (ewp->state == STATE_READY) { 430 retval = 0; 431 goto out; 432 } 433 spin_lock(&info->lock); 434 if (ewp->state == STATE_READY) { 435 retval = 0; 436 goto out_unlock; 437 } 438 if (signal_pending(current)) { 439 retval = -ERESTARTSYS; 440 break; 441 } 442 if (time == 0) { 443 retval = -ETIMEDOUT; 444 break; 445 } 446 } 447 list_del(&ewp->list); 448out_unlock: 449 spin_unlock(&info->lock); 450out: 451 return retval; 452} 453 454/* 455 * Returns waiting task that should be serviced first or NULL if none exists 456 */ 457static struct ext_wait_queue *wq_get_first_waiter( 458 struct mqueue_inode_info *info, int sr) 459{ 460 struct list_head *ptr; 461 462 ptr = info->e_wait_q[sr].list.prev; 463 if (ptr == &info->e_wait_q[sr].list) 464 return NULL; 465 return list_entry(ptr, struct ext_wait_queue, list); 466} 467 468/* Auxiliary functions to manipulate messages' list */ 469static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info) 470{ 471 int k; 472 473 k = info->attr.mq_curmsgs - 1; 474 while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) { 475 info->messages[k + 1] = info->messages[k]; 476 k--; 477 } 478 info->attr.mq_curmsgs++; 479 info->qsize += ptr->m_ts; 480 info->messages[k + 1] = ptr; 481} 482 483static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) 484{ 485 info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts; 486 return info->messages[info->attr.mq_curmsgs]; 487} 488 489static inline void set_cookie(struct sk_buff *skb, char code) 490{ 491 ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code; 492} 493 494/* 495 * The next function is only to split too long sys_mq_timedsend 496 */ 497static void __do_notify(struct mqueue_inode_info *info) 498{ 499 /* notification 500 * invoked when there is registered process and there isn't process 501 * waiting synchronously for message AND state of queue changed from 502 * empty to not empty. Here we are sure that no one is waiting 503 * synchronously. */ 504 if (info->notify_owner && 505 info->attr.mq_curmsgs == 1) { 506 struct siginfo sig_i; 507 switch (info->notify.sigev_notify) { 508 case SIGEV_NONE: 509 break; 510 case SIGEV_SIGNAL: 511 /* sends signal */ 512 513 sig_i.si_signo = info->notify.sigev_signo; 514 sig_i.si_errno = 0; 515 sig_i.si_code = SI_MESGQ; 516 sig_i.si_value = info->notify.sigev_value; 517 sig_i.si_pid = current->tgid; 518 sig_i.si_uid = current->uid; 519 520 kill_pid_info(info->notify.sigev_signo, 521 &sig_i, info->notify_owner); 522 break; 523 case SIGEV_THREAD: 524 set_cookie(info->notify_cookie, NOTIFY_WOKENUP); 525 netlink_sendskb(info->notify_sock, 526 info->notify_cookie, 0); 527 break; 528 } 529 /* after notification unregisters process */ 530 put_pid(info->notify_owner); 531 info->notify_owner = NULL; 532 } 533 wake_up(&info->wait_q); 534} 535 536static long prepare_timeout(const struct timespec __user *u_arg) 537{ 538 struct timespec ts, nowts; 539 long timeout; 540 541 if (u_arg) { 542 if (unlikely(copy_from_user(&ts, u_arg, 543 sizeof(struct timespec)))) 544 return -EFAULT; 545 546 if (unlikely(ts.tv_nsec < 0 || ts.tv_sec < 0 547 || ts.tv_nsec >= NSEC_PER_SEC)) 548 return -EINVAL; 549 nowts = CURRENT_TIME; 550 /* first subtract as jiffies can't be too big */ 551 ts.tv_sec -= nowts.tv_sec; 552 if (ts.tv_nsec < nowts.tv_nsec) { 553 ts.tv_nsec += NSEC_PER_SEC; 554 ts.tv_sec--; 555 } 556 ts.tv_nsec -= nowts.tv_nsec; 557 if (ts.tv_sec < 0) 558 return 0; 559 560 timeout = timespec_to_jiffies(&ts) + 1; 561 } else 562 return MAX_SCHEDULE_TIMEOUT; 563 564 return timeout; 565} 566 567static void remove_notification(struct mqueue_inode_info *info) 568{ 569 if (info->notify_owner != NULL && 570 info->notify.sigev_notify == SIGEV_THREAD) { 571 set_cookie(info->notify_cookie, NOTIFY_REMOVED); 572 netlink_sendskb(info->notify_sock, info->notify_cookie, 0); 573 } 574 put_pid(info->notify_owner); 575 info->notify_owner = NULL; 576} 577 578static int mq_attr_ok(struct mq_attr *attr) 579{ 580 if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) 581 return 0; 582 if (capable(CAP_SYS_RESOURCE)) { 583 if (attr->mq_maxmsg > HARD_MSGMAX) 584 return 0; 585 } else { 586 if (attr->mq_maxmsg > msg_max || 587 attr->mq_msgsize > msgsize_max) 588 return 0; 589 } 590 /* check for overflow */ 591 if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) 592 return 0; 593 if ((unsigned long)(attr->mq_maxmsg * attr->mq_msgsize) + 594 (attr->mq_maxmsg * sizeof (struct msg_msg *)) < 595 (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize)) 596 return 0; 597 return 1; 598} 599 600/* 601 * Invoked when creating a new queue via sys_mq_open 602 */ 603static struct file *do_create(struct dentry *dir, struct dentry *dentry, 604 int oflag, mode_t mode, struct mq_attr __user *u_attr) 605{ 606 struct mq_attr attr; 607 int ret; 608 609 if (u_attr) { 610 ret = -EFAULT; 611 if (copy_from_user(&attr, u_attr, sizeof(attr))) 612 goto out; 613 ret = -EINVAL; 614 if (!mq_attr_ok(&attr)) 615 goto out; 616 /* store for use during create */ 617 dentry->d_fsdata = &attr; 618 } 619 620 mode &= ~current->fs->umask; 621 ret = vfs_create(dir->d_inode, dentry, mode, NULL); 622 dentry->d_fsdata = NULL; 623 if (ret) 624 goto out; 625 626 return dentry_open(dentry, mqueue_mnt, oflag); 627 628out: 629 dput(dentry); 630 mntput(mqueue_mnt); 631 return ERR_PTR(ret); 632} 633 634/* Opens existing queue */ 635static struct file *do_open(struct dentry *dentry, int oflag) 636{ 637static int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, 638 MAY_READ | MAY_WRITE }; 639 640 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) { 641 dput(dentry); 642 mntput(mqueue_mnt); 643 return ERR_PTR(-EINVAL); 644 } 645 646 if (permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE], NULL)) { 647 dput(dentry); 648 mntput(mqueue_mnt); 649 return ERR_PTR(-EACCES); 650 } 651 652 return dentry_open(dentry, mqueue_mnt, oflag); 653} 654 655asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode, 656 struct mq_attr __user *u_attr) 657{ 658 struct dentry *dentry; 659 struct file *filp; 660 char *name; 661 int fd, error; 662 663 error = audit_mq_open(oflag, mode, u_attr); 664 if (error != 0) 665 return error; 666 667 if (IS_ERR(name = getname(u_name))) 668 return PTR_ERR(name); 669 670 fd = get_unused_fd(); 671 if (fd < 0) 672 goto out_putname; 673 674 mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex); 675 dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name)); 676 if (IS_ERR(dentry)) { 677 error = PTR_ERR(dentry); 678 goto out_err; 679 } 680 mntget(mqueue_mnt); 681 682 if (oflag & O_CREAT) { 683 if (dentry->d_inode) { /* entry already exists */ 684 error = -EEXIST; 685 if (oflag & O_EXCL) 686 goto out; 687 filp = do_open(dentry, oflag); 688 } else { 689 filp = do_create(mqueue_mnt->mnt_root, dentry, 690 oflag, mode, u_attr); 691 } 692 } else { 693 error = -ENOENT; 694 if (!dentry->d_inode) 695 goto out; 696 filp = do_open(dentry, oflag); 697 } 698 699 if (IS_ERR(filp)) { 700 error = PTR_ERR(filp); 701 goto out_putfd; 702 } 703 704 set_close_on_exec(fd, 1); 705 fd_install(fd, filp); 706 goto out_upsem; 707 708out: 709 dput(dentry); 710 mntput(mqueue_mnt); 711out_putfd: 712 put_unused_fd(fd); 713out_err: 714 fd = error; 715out_upsem: 716 mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex); 717out_putname: 718 putname(name); 719 return fd; 720} 721 722asmlinkage long sys_mq_unlink(const char __user *u_name) 723{ 724 int err; 725 char *name; 726 struct dentry *dentry; 727 struct inode *inode = NULL; 728 729 name = getname(u_name); 730 if (IS_ERR(name)) 731 return PTR_ERR(name); 732 733 mutex_lock_nested(&mqueue_mnt->mnt_root->d_inode->i_mutex, 734 I_MUTEX_PARENT); 735 dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name)); 736 if (IS_ERR(dentry)) { 737 err = PTR_ERR(dentry); 738 goto out_unlock; 739 } 740 741 if (!dentry->d_inode) { 742 err = -ENOENT; 743 goto out_err; 744 } 745 746 inode = dentry->d_inode; 747 if (inode) 748 atomic_inc(&inode->i_count); 749 750 err = vfs_unlink(dentry->d_parent->d_inode, dentry); 751out_err: 752 dput(dentry); 753 754out_unlock: 755 mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex); 756 putname(name); 757 if (inode) 758 iput(inode); 759 760 return err; 761} 762 763/* Pipelined send and receive functions. 764 * 765 * If a receiver finds no waiting message, then it registers itself in the 766 * list of waiting receivers. A sender checks that list before adding the new 767 * message into the message array. If there is a waiting receiver, then it 768 * bypasses the message array and directly hands the message over to the 769 * receiver. 770 * The receiver accepts the message and returns without grabbing the queue 771 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers 772 * are necessary. The same algorithm is used for sysv semaphores, see 773 * ipc/sem.c for more details. 774 * 775 * The same algorithm is used for senders. 776 */ 777 778/* pipelined_send() - send a message directly to the task waiting in 779 * sys_mq_timedreceive() (without inserting message into a queue). 780 */ 781static inline void pipelined_send(struct mqueue_inode_info *info, 782 struct msg_msg *message, 783 struct ext_wait_queue *receiver) 784{ 785 receiver->msg = message; 786 list_del(&receiver->list); 787 receiver->state = STATE_PENDING; 788 wake_up_process(receiver->task); 789 smp_wmb(); 790 receiver->state = STATE_READY; 791} 792 793/* pipelined_receive() - if there is task waiting in sys_mq_timedsend() 794 * gets its message and put to the queue (we have one free place for sure). */ 795static inline void pipelined_receive(struct mqueue_inode_info *info) 796{ 797 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); 798 799 if (!sender) { 800 /* for poll */ 801 wake_up_interruptible(&info->wait_q); 802 return; 803 } 804 msg_insert(sender->msg, info); 805 list_del(&sender->list); 806 sender->state = STATE_PENDING; 807 wake_up_process(sender->task); 808 smp_wmb(); 809 sender->state = STATE_READY; 810} 811 812asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, 813 size_t msg_len, unsigned int msg_prio, 814 const struct timespec __user *u_abs_timeout) 815{ 816 struct file *filp; 817 struct inode *inode; 818 struct ext_wait_queue wait; 819 struct ext_wait_queue *receiver; 820 struct msg_msg *msg_ptr; 821 struct mqueue_inode_info *info; 822 long timeout; 823 int ret; 824 825 ret = audit_mq_timedsend(mqdes, msg_len, msg_prio, u_abs_timeout); 826 if (ret != 0) 827 return ret; 828 829 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) 830 return -EINVAL; 831 832 timeout = prepare_timeout(u_abs_timeout); 833 834 ret = -EBADF; 835 filp = fget(mqdes); 836 if (unlikely(!filp)) 837 goto out; 838 839 inode = filp->f_path.dentry->d_inode; 840 if (unlikely(filp->f_op != &mqueue_file_operations)) 841 goto out_fput; 842 info = MQUEUE_I(inode); 843 844 if (unlikely(!(filp->f_mode & FMODE_WRITE))) 845 goto out_fput; 846 847 if (unlikely(msg_len > info->attr.mq_msgsize)) { 848 ret = -EMSGSIZE; 849 goto out_fput; 850 } 851 852 /* First try to allocate memory, before doing anything with 853 * existing queues. */ 854 msg_ptr = load_msg(u_msg_ptr, msg_len); 855 if (IS_ERR(msg_ptr)) { 856 ret = PTR_ERR(msg_ptr); 857 goto out_fput; 858 } 859 msg_ptr->m_ts = msg_len; 860 msg_ptr->m_type = msg_prio; 861 862 spin_lock(&info->lock); 863 864 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { 865 if (filp->f_flags & O_NONBLOCK) { 866 spin_unlock(&info->lock); 867 ret = -EAGAIN; 868 } else if (unlikely(timeout < 0)) { 869 spin_unlock(&info->lock); 870 ret = timeout; 871 } else { 872 wait.task = current; 873 wait.msg = (void *) msg_ptr; 874 wait.state = STATE_NONE; 875 ret = wq_sleep(info, SEND, timeout, &wait); 876 } 877 if (ret < 0) 878 free_msg(msg_ptr); 879 } else { 880 receiver = wq_get_first_waiter(info, RECV); 881 if (receiver) { 882 pipelined_send(info, msg_ptr, receiver); 883 } else { 884 /* adds message to the queue */ 885 msg_insert(msg_ptr, info); 886 __do_notify(info); 887 } 888 inode->i_atime = inode->i_mtime = inode->i_ctime = 889 CURRENT_TIME; 890 spin_unlock(&info->lock); 891 ret = 0; 892 } 893out_fput: 894 fput(filp); 895out: 896 return ret; 897} 898 899asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, 900 size_t msg_len, unsigned int __user *u_msg_prio, 901 const struct timespec __user *u_abs_timeout) 902{ 903 long timeout; 904 ssize_t ret; 905 struct msg_msg *msg_ptr; 906 struct file *filp; 907 struct inode *inode; 908 struct mqueue_inode_info *info; 909 struct ext_wait_queue wait; 910 911 ret = audit_mq_timedreceive(mqdes, msg_len, u_msg_prio, u_abs_timeout); 912 if (ret != 0) 913 return ret; 914 915 timeout = prepare_timeout(u_abs_timeout); 916 917 ret = -EBADF; 918 filp = fget(mqdes); 919 if (unlikely(!filp)) 920 goto out; 921 922 inode = filp->f_path.dentry->d_inode; 923 if (unlikely(filp->f_op != &mqueue_file_operations)) 924 goto out_fput; 925 info = MQUEUE_I(inode); 926 927 if (unlikely(!(filp->f_mode & FMODE_READ))) 928 goto out_fput; 929 930 /* checks if buffer is big enough */ 931 if (unlikely(msg_len < info->attr.mq_msgsize)) { 932 ret = -EMSGSIZE; 933 goto out_fput; 934 } 935 936 spin_lock(&info->lock); 937 if (info->attr.mq_curmsgs == 0) { 938 if (filp->f_flags & O_NONBLOCK) { 939 spin_unlock(&info->lock); 940 ret = -EAGAIN; 941 msg_ptr = NULL; 942 } else if (unlikely(timeout < 0)) { 943 spin_unlock(&info->lock); 944 ret = timeout; 945 msg_ptr = NULL; 946 } else { 947 wait.task = current; 948 wait.state = STATE_NONE; 949 ret = wq_sleep(info, RECV, timeout, &wait); 950 msg_ptr = wait.msg; 951 } 952 } else { 953 msg_ptr = msg_get(info); 954 955 inode->i_atime = inode->i_mtime = inode->i_ctime = 956 CURRENT_TIME; 957 958 /* There is now free space in queue. */ 959 pipelined_receive(info); 960 spin_unlock(&info->lock); 961 ret = 0; 962 } 963 if (ret == 0) { 964 ret = msg_ptr->m_ts; 965 966 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) || 967 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) { 968 ret = -EFAULT; 969 } 970 free_msg(msg_ptr); 971 } 972out_fput: 973 fput(filp); 974out: 975 return ret; 976} 977 978/* 979 * Notes: the case when user wants us to deregister (with NULL as pointer) 980 * and he isn't currently owner of notification, will be silently discarded. 981 * It isn't explicitly defined in the POSIX. 982 */ 983asmlinkage long sys_mq_notify(mqd_t mqdes, 984 const struct sigevent __user *u_notification) 985{ 986 int ret; 987 struct file *filp; 988 struct sock *sock; 989 struct inode *inode; 990 struct sigevent notification; 991 struct mqueue_inode_info *info; 992 struct sk_buff *nc; 993 994 ret = audit_mq_notify(mqdes, u_notification); 995 if (ret != 0) 996 return ret; 997 998 nc = NULL; 999 sock = NULL; 1000 if (u_notification != NULL) { 1001 if (copy_from_user(¬ification, u_notification, 1002 sizeof(struct sigevent))) 1003 return -EFAULT; 1004 1005 if (unlikely(notification.sigev_notify != SIGEV_NONE && 1006 notification.sigev_notify != SIGEV_SIGNAL && 1007 notification.sigev_notify != SIGEV_THREAD)) 1008 return -EINVAL; 1009 if (notification.sigev_notify == SIGEV_SIGNAL && 1010 !valid_signal(notification.sigev_signo)) { 1011 return -EINVAL; 1012 } 1013 if (notification.sigev_notify == SIGEV_THREAD) { 1014 /* create the notify skb */ 1015 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); 1016 ret = -ENOMEM; 1017 if (!nc) 1018 goto out; 1019 ret = -EFAULT; 1020 if (copy_from_user(nc->data, 1021 notification.sigev_value.sival_ptr, 1022 NOTIFY_COOKIE_LEN)) { 1023 goto out; 1024 } 1025 1026 /* TODO: add a header? */ 1027 skb_put(nc, NOTIFY_COOKIE_LEN); 1028 /* and attach it to the socket */ 1029retry: 1030 filp = fget(notification.sigev_signo); 1031 ret = -EBADF; 1032 if (!filp) 1033 goto out; 1034 sock = netlink_getsockbyfilp(filp); 1035 fput(filp); 1036 if (IS_ERR(sock)) { 1037 ret = PTR_ERR(sock); 1038 sock = NULL; 1039 goto out; 1040 } 1041 1042 ret = netlink_attachskb(sock, nc, 0, 1043 MAX_SCHEDULE_TIMEOUT, NULL); 1044 if (ret == 1) 1045 goto retry; 1046 if (ret) { 1047 sock = NULL; 1048 nc = NULL; 1049 goto out; 1050 } 1051 } 1052 } 1053 1054 ret = -EBADF; 1055 filp = fget(mqdes); 1056 if (!filp) 1057 goto out; 1058 1059 inode = filp->f_path.dentry->d_inode; 1060 if (unlikely(filp->f_op != &mqueue_file_operations)) 1061 goto out_fput; 1062 info = MQUEUE_I(inode); 1063 1064 ret = 0; 1065 spin_lock(&info->lock); 1066 if (u_notification == NULL) { 1067 if (info->notify_owner == task_tgid(current)) { 1068 remove_notification(info); 1069 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1070 } 1071 } else if (info->notify_owner != NULL) { 1072 ret = -EBUSY; 1073 } else { 1074 switch (notification.sigev_notify) { 1075 case SIGEV_NONE: 1076 info->notify.sigev_notify = SIGEV_NONE; 1077 break; 1078 case SIGEV_THREAD: 1079 info->notify_sock = sock; 1080 info->notify_cookie = nc; 1081 sock = NULL; 1082 nc = NULL; 1083 info->notify.sigev_notify = SIGEV_THREAD; 1084 break; 1085 case SIGEV_SIGNAL: 1086 info->notify.sigev_signo = notification.sigev_signo; 1087 info->notify.sigev_value = notification.sigev_value; 1088 info->notify.sigev_notify = SIGEV_SIGNAL; 1089 break; 1090 } 1091 1092 info->notify_owner = get_pid(task_tgid(current)); 1093 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1094 } 1095 spin_unlock(&info->lock); 1096out_fput: 1097 fput(filp); 1098out: 1099 if (sock) { 1100 netlink_detachskb(sock, nc); 1101 } else if (nc) { 1102 dev_kfree_skb(nc); 1103 } 1104 return ret; 1105} 1106 1107asmlinkage long sys_mq_getsetattr(mqd_t mqdes, 1108 const struct mq_attr __user *u_mqstat, 1109 struct mq_attr __user *u_omqstat) 1110{ 1111 int ret; 1112 struct mq_attr mqstat, omqstat; 1113 struct file *filp; 1114 struct inode *inode; 1115 struct mqueue_inode_info *info; 1116 1117 if (u_mqstat != NULL) { 1118 if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr))) 1119 return -EFAULT; 1120 if (mqstat.mq_flags & (~O_NONBLOCK)) 1121 return -EINVAL; 1122 } 1123 1124 ret = -EBADF; 1125 filp = fget(mqdes); 1126 if (!filp) 1127 goto out; 1128 1129 inode = filp->f_path.dentry->d_inode; 1130 if (unlikely(filp->f_op != &mqueue_file_operations)) 1131 goto out_fput; 1132 info = MQUEUE_I(inode); 1133 1134 spin_lock(&info->lock); 1135 1136 omqstat = info->attr; 1137 omqstat.mq_flags = filp->f_flags & O_NONBLOCK; 1138 if (u_mqstat) { 1139 ret = audit_mq_getsetattr(mqdes, &mqstat); 1140 if (ret != 0) 1141 goto out; 1142 if (mqstat.mq_flags & O_NONBLOCK) 1143 filp->f_flags |= O_NONBLOCK; 1144 else 1145 filp->f_flags &= ~O_NONBLOCK; 1146 1147 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1148 } 1149 1150 spin_unlock(&info->lock); 1151 1152 ret = 0; 1153 if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat, 1154 sizeof(struct mq_attr))) 1155 ret = -EFAULT; 1156 1157out_fput: 1158 fput(filp); 1159out: 1160 return ret; 1161} 1162 1163static const struct inode_operations mqueue_dir_inode_operations = { 1164 .lookup = simple_lookup, 1165 .create = mqueue_create, 1166 .unlink = mqueue_unlink, 1167}; 1168 1169static const struct file_operations mqueue_file_operations = { 1170 .flush = mqueue_flush_file, 1171 .poll = mqueue_poll_file, 1172 .read = mqueue_read_file, 1173}; 1174 1175static struct super_operations mqueue_super_ops = { 1176 .alloc_inode = mqueue_alloc_inode, 1177 .destroy_inode = mqueue_destroy_inode, 1178 .statfs = simple_statfs, 1179 .delete_inode = mqueue_delete_inode, 1180 .drop_inode = generic_delete_inode, 1181}; 1182 1183static struct file_system_type mqueue_fs_type = { 1184 .name = "mqueue", 1185 .get_sb = mqueue_get_sb, 1186 .kill_sb = kill_litter_super, 1187}; 1188 1189static int msg_max_limit_min = DFLT_MSGMAX; 1190static int msg_max_limit_max = HARD_MSGMAX; 1191 1192static int msg_maxsize_limit_min = DFLT_MSGSIZEMAX; 1193static int msg_maxsize_limit_max = INT_MAX; 1194 1195static ctl_table mq_sysctls[] = { 1196 { 1197 .ctl_name = CTL_QUEUESMAX, 1198 .procname = "queues_max", 1199 .data = &queues_max, 1200 .maxlen = sizeof(int), 1201 .mode = 0644, 1202 .proc_handler = &proc_dointvec, 1203 }, 1204 { 1205 .ctl_name = CTL_MSGMAX, 1206 .procname = "msg_max", 1207 .data = &msg_max, 1208 .maxlen = sizeof(int), 1209 .mode = 0644, 1210 .proc_handler = &proc_dointvec_minmax, 1211 .extra1 = &msg_max_limit_min, 1212 .extra2 = &msg_max_limit_max, 1213 }, 1214 { 1215 .ctl_name = CTL_MSGSIZEMAX, 1216 .procname = "msgsize_max", 1217 .data = &msgsize_max, 1218 .maxlen = sizeof(int), 1219 .mode = 0644, 1220 .proc_handler = &proc_dointvec_minmax, 1221 .extra1 = &msg_maxsize_limit_min, 1222 .extra2 = &msg_maxsize_limit_max, 1223 }, 1224 { .ctl_name = 0 } 1225}; 1226 1227static ctl_table mq_sysctl_dir[] = { 1228 { 1229 .ctl_name = FS_MQUEUE, 1230 .procname = "mqueue", 1231 .mode = 0555, 1232 .child = mq_sysctls, 1233 }, 1234 { .ctl_name = 0 } 1235}; 1236 1237static ctl_table mq_sysctl_root[] = { 1238 { 1239 .ctl_name = CTL_FS, 1240 .procname = "fs", 1241 .mode = 0555, 1242 .child = mq_sysctl_dir, 1243 }, 1244 { .ctl_name = 0 } 1245}; 1246 1247static int __init init_mqueue_fs(void) 1248{ 1249 int error; 1250 1251 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", 1252 sizeof(struct mqueue_inode_info), 0, 1253 SLAB_HWCACHE_ALIGN, init_once, NULL); 1254 if (mqueue_inode_cachep == NULL) 1255 return -ENOMEM; 1256 1257 /* ignore failues - they are not fatal */ 1258 mq_sysctl_table = register_sysctl_table(mq_sysctl_root); 1259 1260 error = register_filesystem(&mqueue_fs_type); 1261 if (error) 1262 goto out_sysctl; 1263 1264 if (IS_ERR(mqueue_mnt = kern_mount(&mqueue_fs_type))) { 1265 error = PTR_ERR(mqueue_mnt); 1266 goto out_filesystem; 1267 } 1268 1269 /* internal initialization - not common for vfs */ 1270 queues_count = 0; 1271 spin_lock_init(&mq_lock); 1272 1273 return 0; 1274 1275out_filesystem: 1276 unregister_filesystem(&mqueue_fs_type); 1277out_sysctl: 1278 if (mq_sysctl_table) 1279 unregister_sysctl_table(mq_sysctl_table); 1280 kmem_cache_destroy(mqueue_inode_cachep); 1281 return error; 1282} 1283 1284__initcall(init_mqueue_fs); 1285