mqueue.c revision 7a434814c7a6500b08bf4419ba8712b152d08d08
1/* 2 * POSIX message queues filesystem for Linux. 3 * 4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl) 5 * Michal Wronski (michal.wronski@gmail.com) 6 * 7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) 8 * Lockless receive & send, fd based notify: 9 * Manfred Spraul (manfred@colorfullife.com) 10 * 11 * Audit: George Wilson (ltcgcw@us.ibm.com) 12 * 13 * This file is released under the GPL. 14 */ 15 16#include <linux/capability.h> 17#include <linux/init.h> 18#include <linux/pagemap.h> 19#include <linux/file.h> 20#include <linux/mount.h> 21#include <linux/namei.h> 22#include <linux/sysctl.h> 23#include <linux/poll.h> 24#include <linux/mqueue.h> 25#include <linux/msg.h> 26#include <linux/skbuff.h> 27#include <linux/netlink.h> 28#include <linux/syscalls.h> 29#include <linux/audit.h> 30#include <linux/signal.h> 31#include <linux/mutex.h> 32 33#include <net/sock.h> 34#include "util.h" 35 36#define MQUEUE_MAGIC 0x19800202 37#define DIRENT_SIZE 20 38#define FILENT_SIZE 80 39 40#define SEND 0 41#define RECV 1 42 43#define STATE_NONE 0 44#define STATE_PENDING 1 45#define STATE_READY 2 46 47/* used by sysctl */ 48#define FS_MQUEUE 1 49#define CTL_QUEUESMAX 2 50#define CTL_MSGMAX 3 51#define CTL_MSGSIZEMAX 4 52 53/* default values */ 54#define DFLT_QUEUESMAX 256 /* max number of message queues */ 55#define DFLT_MSGMAX 10 /* max number of messages in each queue */ 56#define HARD_MSGMAX (131072/sizeof(void*)) 57#define DFLT_MSGSIZEMAX 8192 /* max message size */ 58 59 60struct ext_wait_queue { /* queue of sleeping tasks */ 61 struct task_struct *task; 62 struct list_head list; 63 struct msg_msg *msg; /* ptr of loaded message */ 64 int state; /* one of STATE_* values */ 65}; 66 67struct mqueue_inode_info { 68 spinlock_t lock; 69 struct inode vfs_inode; 70 wait_queue_head_t wait_q; 71 72 struct msg_msg **messages; 73 struct mq_attr attr; 74 75 struct sigevent notify; 76 struct pid* notify_owner; 77 struct user_struct *user; /* user who created, for accounting */ 78 struct sock *notify_sock; 79 struct sk_buff *notify_cookie; 80 81 /* for tasks waiting for free space and messages, respectively */ 82 struct ext_wait_queue e_wait_q[2]; 83 84 unsigned long qsize; /* size of queue in memory (sum of all msgs) */ 85}; 86 87static const struct inode_operations mqueue_dir_inode_operations; 88static const struct file_operations mqueue_file_operations; 89static struct super_operations mqueue_super_ops; 90static void remove_notification(struct mqueue_inode_info *info); 91 92static spinlock_t mq_lock; 93static struct kmem_cache *mqueue_inode_cachep; 94static struct vfsmount *mqueue_mnt; 95 96static unsigned int queues_count; 97static unsigned int queues_max = DFLT_QUEUESMAX; 98static unsigned int msg_max = DFLT_MSGMAX; 99static unsigned int msgsize_max = DFLT_MSGSIZEMAX; 100 101static struct ctl_table_header * mq_sysctl_table; 102 103static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) 104{ 105 return container_of(inode, struct mqueue_inode_info, vfs_inode); 106} 107 108static struct inode *mqueue_get_inode(struct super_block *sb, int mode, 109 struct mq_attr *attr) 110{ 111 struct inode *inode; 112 113 inode = new_inode(sb); 114 if (inode) { 115 inode->i_mode = mode; 116 inode->i_uid = current->fsuid; 117 inode->i_gid = current->fsgid; 118 inode->i_blocks = 0; 119 inode->i_mtime = inode->i_ctime = inode->i_atime = 120 CURRENT_TIME; 121 122 if (S_ISREG(mode)) { 123 struct mqueue_inode_info *info; 124 struct task_struct *p = current; 125 struct user_struct *u = p->user; 126 unsigned long mq_bytes, mq_msg_tblsz; 127 128 inode->i_fop = &mqueue_file_operations; 129 inode->i_size = FILENT_SIZE; 130 /* mqueue specific info */ 131 info = MQUEUE_I(inode); 132 spin_lock_init(&info->lock); 133 init_waitqueue_head(&info->wait_q); 134 INIT_LIST_HEAD(&info->e_wait_q[0].list); 135 INIT_LIST_HEAD(&info->e_wait_q[1].list); 136 info->messages = NULL; 137 info->notify_owner = NULL; 138 info->qsize = 0; 139 info->user = NULL; /* set when all is ok */ 140 memset(&info->attr, 0, sizeof(info->attr)); 141 info->attr.mq_maxmsg = DFLT_MSGMAX; 142 info->attr.mq_msgsize = DFLT_MSGSIZEMAX; 143 if (attr) { 144 info->attr.mq_maxmsg = attr->mq_maxmsg; 145 info->attr.mq_msgsize = attr->mq_msgsize; 146 } 147 mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *); 148 mq_bytes = (mq_msg_tblsz + 149 (info->attr.mq_maxmsg * info->attr.mq_msgsize)); 150 151 spin_lock(&mq_lock); 152 if (u->mq_bytes + mq_bytes < u->mq_bytes || 153 u->mq_bytes + mq_bytes > 154 p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) { 155 spin_unlock(&mq_lock); 156 goto out_inode; 157 } 158 u->mq_bytes += mq_bytes; 159 spin_unlock(&mq_lock); 160 161 info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL); 162 if (!info->messages) { 163 spin_lock(&mq_lock); 164 u->mq_bytes -= mq_bytes; 165 spin_unlock(&mq_lock); 166 goto out_inode; 167 } 168 /* all is ok */ 169 info->user = get_uid(u); 170 } else if (S_ISDIR(mode)) { 171 inc_nlink(inode); 172 /* Some things misbehave if size == 0 on a directory */ 173 inode->i_size = 2 * DIRENT_SIZE; 174 inode->i_op = &mqueue_dir_inode_operations; 175 inode->i_fop = &simple_dir_operations; 176 } 177 } 178 return inode; 179out_inode: 180 make_bad_inode(inode); 181 iput(inode); 182 return NULL; 183} 184 185static int mqueue_fill_super(struct super_block *sb, void *data, int silent) 186{ 187 struct inode *inode; 188 189 sb->s_blocksize = PAGE_CACHE_SIZE; 190 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 191 sb->s_magic = MQUEUE_MAGIC; 192 sb->s_op = &mqueue_super_ops; 193 194 inode = mqueue_get_inode(sb, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); 195 if (!inode) 196 return -ENOMEM; 197 198 sb->s_root = d_alloc_root(inode); 199 if (!sb->s_root) { 200 iput(inode); 201 return -ENOMEM; 202 } 203 204 return 0; 205} 206 207static int mqueue_get_sb(struct file_system_type *fs_type, 208 int flags, const char *dev_name, 209 void *data, struct vfsmount *mnt) 210{ 211 return get_sb_single(fs_type, flags, data, mqueue_fill_super, mnt); 212} 213 214static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags) 215{ 216 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; 217 218 if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) == 219 SLAB_CTOR_CONSTRUCTOR) 220 inode_init_once(&p->vfs_inode); 221} 222 223static struct inode *mqueue_alloc_inode(struct super_block *sb) 224{ 225 struct mqueue_inode_info *ei; 226 227 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); 228 if (!ei) 229 return NULL; 230 return &ei->vfs_inode; 231} 232 233static void mqueue_destroy_inode(struct inode *inode) 234{ 235 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); 236} 237 238static void mqueue_delete_inode(struct inode *inode) 239{ 240 struct mqueue_inode_info *info; 241 struct user_struct *user; 242 unsigned long mq_bytes; 243 int i; 244 245 if (S_ISDIR(inode->i_mode)) { 246 clear_inode(inode); 247 return; 248 } 249 info = MQUEUE_I(inode); 250 spin_lock(&info->lock); 251 for (i = 0; i < info->attr.mq_curmsgs; i++) 252 free_msg(info->messages[i]); 253 kfree(info->messages); 254 spin_unlock(&info->lock); 255 256 clear_inode(inode); 257 258 mq_bytes = (info->attr.mq_maxmsg * sizeof(struct msg_msg *) + 259 (info->attr.mq_maxmsg * info->attr.mq_msgsize)); 260 user = info->user; 261 if (user) { 262 spin_lock(&mq_lock); 263 user->mq_bytes -= mq_bytes; 264 queues_count--; 265 spin_unlock(&mq_lock); 266 free_uid(user); 267 } 268} 269 270static int mqueue_create(struct inode *dir, struct dentry *dentry, 271 int mode, struct nameidata *nd) 272{ 273 struct inode *inode; 274 struct mq_attr *attr = dentry->d_fsdata; 275 int error; 276 277 spin_lock(&mq_lock); 278 if (queues_count >= queues_max && !capable(CAP_SYS_RESOURCE)) { 279 error = -ENOSPC; 280 goto out_lock; 281 } 282 queues_count++; 283 spin_unlock(&mq_lock); 284 285 inode = mqueue_get_inode(dir->i_sb, mode, attr); 286 if (!inode) { 287 error = -ENOMEM; 288 spin_lock(&mq_lock); 289 queues_count--; 290 goto out_lock; 291 } 292 293 dir->i_size += DIRENT_SIZE; 294 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; 295 296 d_instantiate(dentry, inode); 297 dget(dentry); 298 return 0; 299out_lock: 300 spin_unlock(&mq_lock); 301 return error; 302} 303 304static int mqueue_unlink(struct inode *dir, struct dentry *dentry) 305{ 306 struct inode *inode = dentry->d_inode; 307 308 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; 309 dir->i_size -= DIRENT_SIZE; 310 drop_nlink(inode); 311 dput(dentry); 312 return 0; 313} 314 315/* 316* This is routine for system read from queue file. 317* To avoid mess with doing here some sort of mq_receive we allow 318* to read only queue size & notification info (the only values 319* that are interesting from user point of view and aren't accessible 320* through std routines) 321*/ 322static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, 323 size_t count, loff_t * off) 324{ 325 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 326 char buffer[FILENT_SIZE]; 327 size_t slen; 328 loff_t o; 329 330 if (!count) 331 return 0; 332 333 spin_lock(&info->lock); 334 snprintf(buffer, sizeof(buffer), 335 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n", 336 info->qsize, 337 info->notify_owner ? info->notify.sigev_notify : 0, 338 (info->notify_owner && 339 info->notify.sigev_notify == SIGEV_SIGNAL) ? 340 info->notify.sigev_signo : 0, 341 pid_nr(info->notify_owner)); 342 spin_unlock(&info->lock); 343 buffer[sizeof(buffer)-1] = '\0'; 344 slen = strlen(buffer)+1; 345 346 o = *off; 347 if (o > slen) 348 return 0; 349 350 if (o + count > slen) 351 count = slen - o; 352 353 if (copy_to_user(u_data, buffer + o, count)) 354 return -EFAULT; 355 356 *off = o + count; 357 filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME; 358 return count; 359} 360 361static int mqueue_flush_file(struct file *filp, fl_owner_t id) 362{ 363 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 364 365 spin_lock(&info->lock); 366 if (task_tgid(current) == info->notify_owner) 367 remove_notification(info); 368 369 spin_unlock(&info->lock); 370 return 0; 371} 372 373static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) 374{ 375 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 376 int retval = 0; 377 378 poll_wait(filp, &info->wait_q, poll_tab); 379 380 spin_lock(&info->lock); 381 if (info->attr.mq_curmsgs) 382 retval = POLLIN | POLLRDNORM; 383 384 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) 385 retval |= POLLOUT | POLLWRNORM; 386 spin_unlock(&info->lock); 387 388 return retval; 389} 390 391/* Adds current to info->e_wait_q[sr] before element with smaller prio */ 392static void wq_add(struct mqueue_inode_info *info, int sr, 393 struct ext_wait_queue *ewp) 394{ 395 struct ext_wait_queue *walk; 396 397 ewp->task = current; 398 399 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { 400 if (walk->task->static_prio <= current->static_prio) { 401 list_add_tail(&ewp->list, &walk->list); 402 return; 403 } 404 } 405 list_add_tail(&ewp->list, &info->e_wait_q[sr].list); 406} 407 408/* 409 * Puts current task to sleep. Caller must hold queue lock. After return 410 * lock isn't held. 411 * sr: SEND or RECV 412 */ 413static int wq_sleep(struct mqueue_inode_info *info, int sr, 414 long timeout, struct ext_wait_queue *ewp) 415{ 416 int retval; 417 signed long time; 418 419 wq_add(info, sr, ewp); 420 421 for (;;) { 422 set_current_state(TASK_INTERRUPTIBLE); 423 424 spin_unlock(&info->lock); 425 time = schedule_timeout(timeout); 426 427 while (ewp->state == STATE_PENDING) 428 cpu_relax(); 429 430 if (ewp->state == STATE_READY) { 431 retval = 0; 432 goto out; 433 } 434 spin_lock(&info->lock); 435 if (ewp->state == STATE_READY) { 436 retval = 0; 437 goto out_unlock; 438 } 439 if (signal_pending(current)) { 440 retval = -ERESTARTSYS; 441 break; 442 } 443 if (time == 0) { 444 retval = -ETIMEDOUT; 445 break; 446 } 447 } 448 list_del(&ewp->list); 449out_unlock: 450 spin_unlock(&info->lock); 451out: 452 return retval; 453} 454 455/* 456 * Returns waiting task that should be serviced first or NULL if none exists 457 */ 458static struct ext_wait_queue *wq_get_first_waiter( 459 struct mqueue_inode_info *info, int sr) 460{ 461 struct list_head *ptr; 462 463 ptr = info->e_wait_q[sr].list.prev; 464 if (ptr == &info->e_wait_q[sr].list) 465 return NULL; 466 return list_entry(ptr, struct ext_wait_queue, list); 467} 468 469/* Auxiliary functions to manipulate messages' list */ 470static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info) 471{ 472 int k; 473 474 k = info->attr.mq_curmsgs - 1; 475 while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) { 476 info->messages[k + 1] = info->messages[k]; 477 k--; 478 } 479 info->attr.mq_curmsgs++; 480 info->qsize += ptr->m_ts; 481 info->messages[k + 1] = ptr; 482} 483 484static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) 485{ 486 info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts; 487 return info->messages[info->attr.mq_curmsgs]; 488} 489 490static inline void set_cookie(struct sk_buff *skb, char code) 491{ 492 ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code; 493} 494 495/* 496 * The next function is only to split too long sys_mq_timedsend 497 */ 498static void __do_notify(struct mqueue_inode_info *info) 499{ 500 /* notification 501 * invoked when there is registered process and there isn't process 502 * waiting synchronously for message AND state of queue changed from 503 * empty to not empty. Here we are sure that no one is waiting 504 * synchronously. */ 505 if (info->notify_owner && 506 info->attr.mq_curmsgs == 1) { 507 struct siginfo sig_i; 508 switch (info->notify.sigev_notify) { 509 case SIGEV_NONE: 510 break; 511 case SIGEV_SIGNAL: 512 /* sends signal */ 513 514 sig_i.si_signo = info->notify.sigev_signo; 515 sig_i.si_errno = 0; 516 sig_i.si_code = SI_MESGQ; 517 sig_i.si_value = info->notify.sigev_value; 518 sig_i.si_pid = current->tgid; 519 sig_i.si_uid = current->uid; 520 521 kill_pid_info(info->notify.sigev_signo, 522 &sig_i, info->notify_owner); 523 break; 524 case SIGEV_THREAD: 525 set_cookie(info->notify_cookie, NOTIFY_WOKENUP); 526 netlink_sendskb(info->notify_sock, 527 info->notify_cookie, 0); 528 break; 529 } 530 /* after notification unregisters process */ 531 put_pid(info->notify_owner); 532 info->notify_owner = NULL; 533 } 534 wake_up(&info->wait_q); 535} 536 537static long prepare_timeout(const struct timespec __user *u_arg) 538{ 539 struct timespec ts, nowts; 540 long timeout; 541 542 if (u_arg) { 543 if (unlikely(copy_from_user(&ts, u_arg, 544 sizeof(struct timespec)))) 545 return -EFAULT; 546 547 if (unlikely(ts.tv_nsec < 0 || ts.tv_sec < 0 548 || ts.tv_nsec >= NSEC_PER_SEC)) 549 return -EINVAL; 550 nowts = CURRENT_TIME; 551 /* first subtract as jiffies can't be too big */ 552 ts.tv_sec -= nowts.tv_sec; 553 if (ts.tv_nsec < nowts.tv_nsec) { 554 ts.tv_nsec += NSEC_PER_SEC; 555 ts.tv_sec--; 556 } 557 ts.tv_nsec -= nowts.tv_nsec; 558 if (ts.tv_sec < 0) 559 return 0; 560 561 timeout = timespec_to_jiffies(&ts) + 1; 562 } else 563 return MAX_SCHEDULE_TIMEOUT; 564 565 return timeout; 566} 567 568static void remove_notification(struct mqueue_inode_info *info) 569{ 570 if (info->notify_owner != NULL && 571 info->notify.sigev_notify == SIGEV_THREAD) { 572 set_cookie(info->notify_cookie, NOTIFY_REMOVED); 573 netlink_sendskb(info->notify_sock, info->notify_cookie, 0); 574 } 575 put_pid(info->notify_owner); 576 info->notify_owner = NULL; 577} 578 579static int mq_attr_ok(struct mq_attr *attr) 580{ 581 if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) 582 return 0; 583 if (capable(CAP_SYS_RESOURCE)) { 584 if (attr->mq_maxmsg > HARD_MSGMAX) 585 return 0; 586 } else { 587 if (attr->mq_maxmsg > msg_max || 588 attr->mq_msgsize > msgsize_max) 589 return 0; 590 } 591 /* check for overflow */ 592 if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) 593 return 0; 594 if ((unsigned long)(attr->mq_maxmsg * attr->mq_msgsize) + 595 (attr->mq_maxmsg * sizeof (struct msg_msg *)) < 596 (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize)) 597 return 0; 598 return 1; 599} 600 601/* 602 * Invoked when creating a new queue via sys_mq_open 603 */ 604static struct file *do_create(struct dentry *dir, struct dentry *dentry, 605 int oflag, mode_t mode, struct mq_attr __user *u_attr) 606{ 607 struct mq_attr attr; 608 int ret; 609 610 if (u_attr) { 611 ret = -EFAULT; 612 if (copy_from_user(&attr, u_attr, sizeof(attr))) 613 goto out; 614 ret = -EINVAL; 615 if (!mq_attr_ok(&attr)) 616 goto out; 617 /* store for use during create */ 618 dentry->d_fsdata = &attr; 619 } 620 621 mode &= ~current->fs->umask; 622 ret = vfs_create(dir->d_inode, dentry, mode, NULL); 623 dentry->d_fsdata = NULL; 624 if (ret) 625 goto out; 626 627 return dentry_open(dentry, mqueue_mnt, oflag); 628 629out: 630 dput(dentry); 631 mntput(mqueue_mnt); 632 return ERR_PTR(ret); 633} 634 635/* Opens existing queue */ 636static struct file *do_open(struct dentry *dentry, int oflag) 637{ 638static int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, 639 MAY_READ | MAY_WRITE }; 640 641 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) { 642 dput(dentry); 643 mntput(mqueue_mnt); 644 return ERR_PTR(-EINVAL); 645 } 646 647 if (permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE], NULL)) { 648 dput(dentry); 649 mntput(mqueue_mnt); 650 return ERR_PTR(-EACCES); 651 } 652 653 return dentry_open(dentry, mqueue_mnt, oflag); 654} 655 656asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode, 657 struct mq_attr __user *u_attr) 658{ 659 struct dentry *dentry; 660 struct file *filp; 661 char *name; 662 int fd, error; 663 664 error = audit_mq_open(oflag, mode, u_attr); 665 if (error != 0) 666 return error; 667 668 if (IS_ERR(name = getname(u_name))) 669 return PTR_ERR(name); 670 671 fd = get_unused_fd(); 672 if (fd < 0) 673 goto out_putname; 674 675 mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex); 676 dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name)); 677 if (IS_ERR(dentry)) { 678 error = PTR_ERR(dentry); 679 goto out_err; 680 } 681 mntget(mqueue_mnt); 682 683 if (oflag & O_CREAT) { 684 if (dentry->d_inode) { /* entry already exists */ 685 error = -EEXIST; 686 if (oflag & O_EXCL) 687 goto out; 688 filp = do_open(dentry, oflag); 689 } else { 690 filp = do_create(mqueue_mnt->mnt_root, dentry, 691 oflag, mode, u_attr); 692 } 693 } else { 694 error = -ENOENT; 695 if (!dentry->d_inode) 696 goto out; 697 filp = do_open(dentry, oflag); 698 } 699 700 if (IS_ERR(filp)) { 701 error = PTR_ERR(filp); 702 goto out_putfd; 703 } 704 705 set_close_on_exec(fd, 1); 706 fd_install(fd, filp); 707 goto out_upsem; 708 709out: 710 dput(dentry); 711 mntput(mqueue_mnt); 712out_putfd: 713 put_unused_fd(fd); 714out_err: 715 fd = error; 716out_upsem: 717 mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex); 718out_putname: 719 putname(name); 720 return fd; 721} 722 723asmlinkage long sys_mq_unlink(const char __user *u_name) 724{ 725 int err; 726 char *name; 727 struct dentry *dentry; 728 struct inode *inode = NULL; 729 730 name = getname(u_name); 731 if (IS_ERR(name)) 732 return PTR_ERR(name); 733 734 mutex_lock_nested(&mqueue_mnt->mnt_root->d_inode->i_mutex, 735 I_MUTEX_PARENT); 736 dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name)); 737 if (IS_ERR(dentry)) { 738 err = PTR_ERR(dentry); 739 goto out_unlock; 740 } 741 742 if (!dentry->d_inode) { 743 err = -ENOENT; 744 goto out_err; 745 } 746 747 inode = dentry->d_inode; 748 if (inode) 749 atomic_inc(&inode->i_count); 750 751 err = vfs_unlink(dentry->d_parent->d_inode, dentry); 752out_err: 753 dput(dentry); 754 755out_unlock: 756 mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex); 757 putname(name); 758 if (inode) 759 iput(inode); 760 761 return err; 762} 763 764/* Pipelined send and receive functions. 765 * 766 * If a receiver finds no waiting message, then it registers itself in the 767 * list of waiting receivers. A sender checks that list before adding the new 768 * message into the message array. If there is a waiting receiver, then it 769 * bypasses the message array and directly hands the message over to the 770 * receiver. 771 * The receiver accepts the message and returns without grabbing the queue 772 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers 773 * are necessary. The same algorithm is used for sysv semaphores, see 774 * ipc/sem.c for more details. 775 * 776 * The same algorithm is used for senders. 777 */ 778 779/* pipelined_send() - send a message directly to the task waiting in 780 * sys_mq_timedreceive() (without inserting message into a queue). 781 */ 782static inline void pipelined_send(struct mqueue_inode_info *info, 783 struct msg_msg *message, 784 struct ext_wait_queue *receiver) 785{ 786 receiver->msg = message; 787 list_del(&receiver->list); 788 receiver->state = STATE_PENDING; 789 wake_up_process(receiver->task); 790 smp_wmb(); 791 receiver->state = STATE_READY; 792} 793 794/* pipelined_receive() - if there is task waiting in sys_mq_timedsend() 795 * gets its message and put to the queue (we have one free place for sure). */ 796static inline void pipelined_receive(struct mqueue_inode_info *info) 797{ 798 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); 799 800 if (!sender) { 801 /* for poll */ 802 wake_up_interruptible(&info->wait_q); 803 return; 804 } 805 msg_insert(sender->msg, info); 806 list_del(&sender->list); 807 sender->state = STATE_PENDING; 808 wake_up_process(sender->task); 809 smp_wmb(); 810 sender->state = STATE_READY; 811} 812 813asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, 814 size_t msg_len, unsigned int msg_prio, 815 const struct timespec __user *u_abs_timeout) 816{ 817 struct file *filp; 818 struct inode *inode; 819 struct ext_wait_queue wait; 820 struct ext_wait_queue *receiver; 821 struct msg_msg *msg_ptr; 822 struct mqueue_inode_info *info; 823 long timeout; 824 int ret; 825 826 ret = audit_mq_timedsend(mqdes, msg_len, msg_prio, u_abs_timeout); 827 if (ret != 0) 828 return ret; 829 830 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) 831 return -EINVAL; 832 833 timeout = prepare_timeout(u_abs_timeout); 834 835 ret = -EBADF; 836 filp = fget(mqdes); 837 if (unlikely(!filp)) 838 goto out; 839 840 inode = filp->f_path.dentry->d_inode; 841 if (unlikely(filp->f_op != &mqueue_file_operations)) 842 goto out_fput; 843 info = MQUEUE_I(inode); 844 845 if (unlikely(!(filp->f_mode & FMODE_WRITE))) 846 goto out_fput; 847 848 if (unlikely(msg_len > info->attr.mq_msgsize)) { 849 ret = -EMSGSIZE; 850 goto out_fput; 851 } 852 853 /* First try to allocate memory, before doing anything with 854 * existing queues. */ 855 msg_ptr = load_msg(u_msg_ptr, msg_len); 856 if (IS_ERR(msg_ptr)) { 857 ret = PTR_ERR(msg_ptr); 858 goto out_fput; 859 } 860 msg_ptr->m_ts = msg_len; 861 msg_ptr->m_type = msg_prio; 862 863 spin_lock(&info->lock); 864 865 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { 866 if (filp->f_flags & O_NONBLOCK) { 867 spin_unlock(&info->lock); 868 ret = -EAGAIN; 869 } else if (unlikely(timeout < 0)) { 870 spin_unlock(&info->lock); 871 ret = timeout; 872 } else { 873 wait.task = current; 874 wait.msg = (void *) msg_ptr; 875 wait.state = STATE_NONE; 876 ret = wq_sleep(info, SEND, timeout, &wait); 877 } 878 if (ret < 0) 879 free_msg(msg_ptr); 880 } else { 881 receiver = wq_get_first_waiter(info, RECV); 882 if (receiver) { 883 pipelined_send(info, msg_ptr, receiver); 884 } else { 885 /* adds message to the queue */ 886 msg_insert(msg_ptr, info); 887 __do_notify(info); 888 } 889 inode->i_atime = inode->i_mtime = inode->i_ctime = 890 CURRENT_TIME; 891 spin_unlock(&info->lock); 892 ret = 0; 893 } 894out_fput: 895 fput(filp); 896out: 897 return ret; 898} 899 900asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, 901 size_t msg_len, unsigned int __user *u_msg_prio, 902 const struct timespec __user *u_abs_timeout) 903{ 904 long timeout; 905 ssize_t ret; 906 struct msg_msg *msg_ptr; 907 struct file *filp; 908 struct inode *inode; 909 struct mqueue_inode_info *info; 910 struct ext_wait_queue wait; 911 912 ret = audit_mq_timedreceive(mqdes, msg_len, u_msg_prio, u_abs_timeout); 913 if (ret != 0) 914 return ret; 915 916 timeout = prepare_timeout(u_abs_timeout); 917 918 ret = -EBADF; 919 filp = fget(mqdes); 920 if (unlikely(!filp)) 921 goto out; 922 923 inode = filp->f_path.dentry->d_inode; 924 if (unlikely(filp->f_op != &mqueue_file_operations)) 925 goto out_fput; 926 info = MQUEUE_I(inode); 927 928 if (unlikely(!(filp->f_mode & FMODE_READ))) 929 goto out_fput; 930 931 /* checks if buffer is big enough */ 932 if (unlikely(msg_len < info->attr.mq_msgsize)) { 933 ret = -EMSGSIZE; 934 goto out_fput; 935 } 936 937 spin_lock(&info->lock); 938 if (info->attr.mq_curmsgs == 0) { 939 if (filp->f_flags & O_NONBLOCK) { 940 spin_unlock(&info->lock); 941 ret = -EAGAIN; 942 msg_ptr = NULL; 943 } else if (unlikely(timeout < 0)) { 944 spin_unlock(&info->lock); 945 ret = timeout; 946 msg_ptr = NULL; 947 } else { 948 wait.task = current; 949 wait.state = STATE_NONE; 950 ret = wq_sleep(info, RECV, timeout, &wait); 951 msg_ptr = wait.msg; 952 } 953 } else { 954 msg_ptr = msg_get(info); 955 956 inode->i_atime = inode->i_mtime = inode->i_ctime = 957 CURRENT_TIME; 958 959 /* There is now free space in queue. */ 960 pipelined_receive(info); 961 spin_unlock(&info->lock); 962 ret = 0; 963 } 964 if (ret == 0) { 965 ret = msg_ptr->m_ts; 966 967 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) || 968 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) { 969 ret = -EFAULT; 970 } 971 free_msg(msg_ptr); 972 } 973out_fput: 974 fput(filp); 975out: 976 return ret; 977} 978 979/* 980 * Notes: the case when user wants us to deregister (with NULL as pointer) 981 * and he isn't currently owner of notification, will be silently discarded. 982 * It isn't explicitly defined in the POSIX. 983 */ 984asmlinkage long sys_mq_notify(mqd_t mqdes, 985 const struct sigevent __user *u_notification) 986{ 987 int ret; 988 struct file *filp; 989 struct sock *sock; 990 struct inode *inode; 991 struct sigevent notification; 992 struct mqueue_inode_info *info; 993 struct sk_buff *nc; 994 995 ret = audit_mq_notify(mqdes, u_notification); 996 if (ret != 0) 997 return ret; 998 999 nc = NULL; 1000 sock = NULL; 1001 if (u_notification != NULL) { 1002 if (copy_from_user(¬ification, u_notification, 1003 sizeof(struct sigevent))) 1004 return -EFAULT; 1005 1006 if (unlikely(notification.sigev_notify != SIGEV_NONE && 1007 notification.sigev_notify != SIGEV_SIGNAL && 1008 notification.sigev_notify != SIGEV_THREAD)) 1009 return -EINVAL; 1010 if (notification.sigev_notify == SIGEV_SIGNAL && 1011 !valid_signal(notification.sigev_signo)) { 1012 return -EINVAL; 1013 } 1014 if (notification.sigev_notify == SIGEV_THREAD) { 1015 /* create the notify skb */ 1016 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); 1017 ret = -ENOMEM; 1018 if (!nc) 1019 goto out; 1020 ret = -EFAULT; 1021 if (copy_from_user(nc->data, 1022 notification.sigev_value.sival_ptr, 1023 NOTIFY_COOKIE_LEN)) { 1024 goto out; 1025 } 1026 1027 /* TODO: add a header? */ 1028 skb_put(nc, NOTIFY_COOKIE_LEN); 1029 /* and attach it to the socket */ 1030retry: 1031 filp = fget(notification.sigev_signo); 1032 ret = -EBADF; 1033 if (!filp) 1034 goto out; 1035 sock = netlink_getsockbyfilp(filp); 1036 fput(filp); 1037 if (IS_ERR(sock)) { 1038 ret = PTR_ERR(sock); 1039 sock = NULL; 1040 goto out; 1041 } 1042 1043 ret = netlink_attachskb(sock, nc, 0, 1044 MAX_SCHEDULE_TIMEOUT, NULL); 1045 if (ret == 1) 1046 goto retry; 1047 if (ret) { 1048 sock = NULL; 1049 nc = NULL; 1050 goto out; 1051 } 1052 } 1053 } 1054 1055 ret = -EBADF; 1056 filp = fget(mqdes); 1057 if (!filp) 1058 goto out; 1059 1060 inode = filp->f_path.dentry->d_inode; 1061 if (unlikely(filp->f_op != &mqueue_file_operations)) 1062 goto out_fput; 1063 info = MQUEUE_I(inode); 1064 1065 ret = 0; 1066 spin_lock(&info->lock); 1067 if (u_notification == NULL) { 1068 if (info->notify_owner == task_tgid(current)) { 1069 remove_notification(info); 1070 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1071 } 1072 } else if (info->notify_owner != NULL) { 1073 ret = -EBUSY; 1074 } else { 1075 switch (notification.sigev_notify) { 1076 case SIGEV_NONE: 1077 info->notify.sigev_notify = SIGEV_NONE; 1078 break; 1079 case SIGEV_THREAD: 1080 info->notify_sock = sock; 1081 info->notify_cookie = nc; 1082 sock = NULL; 1083 nc = NULL; 1084 info->notify.sigev_notify = SIGEV_THREAD; 1085 break; 1086 case SIGEV_SIGNAL: 1087 info->notify.sigev_signo = notification.sigev_signo; 1088 info->notify.sigev_value = notification.sigev_value; 1089 info->notify.sigev_notify = SIGEV_SIGNAL; 1090 break; 1091 } 1092 1093 info->notify_owner = get_pid(task_tgid(current)); 1094 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1095 } 1096 spin_unlock(&info->lock); 1097out_fput: 1098 fput(filp); 1099out: 1100 if (sock) { 1101 netlink_detachskb(sock, nc); 1102 } else if (nc) { 1103 dev_kfree_skb(nc); 1104 } 1105 return ret; 1106} 1107 1108asmlinkage long sys_mq_getsetattr(mqd_t mqdes, 1109 const struct mq_attr __user *u_mqstat, 1110 struct mq_attr __user *u_omqstat) 1111{ 1112 int ret; 1113 struct mq_attr mqstat, omqstat; 1114 struct file *filp; 1115 struct inode *inode; 1116 struct mqueue_inode_info *info; 1117 1118 if (u_mqstat != NULL) { 1119 if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr))) 1120 return -EFAULT; 1121 if (mqstat.mq_flags & (~O_NONBLOCK)) 1122 return -EINVAL; 1123 } 1124 1125 ret = -EBADF; 1126 filp = fget(mqdes); 1127 if (!filp) 1128 goto out; 1129 1130 inode = filp->f_path.dentry->d_inode; 1131 if (unlikely(filp->f_op != &mqueue_file_operations)) 1132 goto out_fput; 1133 info = MQUEUE_I(inode); 1134 1135 spin_lock(&info->lock); 1136 1137 omqstat = info->attr; 1138 omqstat.mq_flags = filp->f_flags & O_NONBLOCK; 1139 if (u_mqstat) { 1140 ret = audit_mq_getsetattr(mqdes, &mqstat); 1141 if (ret != 0) 1142 goto out; 1143 if (mqstat.mq_flags & O_NONBLOCK) 1144 filp->f_flags |= O_NONBLOCK; 1145 else 1146 filp->f_flags &= ~O_NONBLOCK; 1147 1148 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1149 } 1150 1151 spin_unlock(&info->lock); 1152 1153 ret = 0; 1154 if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat, 1155 sizeof(struct mq_attr))) 1156 ret = -EFAULT; 1157 1158out_fput: 1159 fput(filp); 1160out: 1161 return ret; 1162} 1163 1164static const struct inode_operations mqueue_dir_inode_operations = { 1165 .lookup = simple_lookup, 1166 .create = mqueue_create, 1167 .unlink = mqueue_unlink, 1168}; 1169 1170static const struct file_operations mqueue_file_operations = { 1171 .flush = mqueue_flush_file, 1172 .poll = mqueue_poll_file, 1173 .read = mqueue_read_file, 1174}; 1175 1176static struct super_operations mqueue_super_ops = { 1177 .alloc_inode = mqueue_alloc_inode, 1178 .destroy_inode = mqueue_destroy_inode, 1179 .statfs = simple_statfs, 1180 .delete_inode = mqueue_delete_inode, 1181 .drop_inode = generic_delete_inode, 1182}; 1183 1184static struct file_system_type mqueue_fs_type = { 1185 .name = "mqueue", 1186 .get_sb = mqueue_get_sb, 1187 .kill_sb = kill_litter_super, 1188}; 1189 1190static int msg_max_limit_min = DFLT_MSGMAX; 1191static int msg_max_limit_max = HARD_MSGMAX; 1192 1193static int msg_maxsize_limit_min = DFLT_MSGSIZEMAX; 1194static int msg_maxsize_limit_max = INT_MAX; 1195 1196static ctl_table mq_sysctls[] = { 1197 { 1198 .ctl_name = CTL_QUEUESMAX, 1199 .procname = "queues_max", 1200 .data = &queues_max, 1201 .maxlen = sizeof(int), 1202 .mode = 0644, 1203 .proc_handler = &proc_dointvec, 1204 }, 1205 { 1206 .ctl_name = CTL_MSGMAX, 1207 .procname = "msg_max", 1208 .data = &msg_max, 1209 .maxlen = sizeof(int), 1210 .mode = 0644, 1211 .proc_handler = &proc_dointvec_minmax, 1212 .extra1 = &msg_max_limit_min, 1213 .extra2 = &msg_max_limit_max, 1214 }, 1215 { 1216 .ctl_name = CTL_MSGSIZEMAX, 1217 .procname = "msgsize_max", 1218 .data = &msgsize_max, 1219 .maxlen = sizeof(int), 1220 .mode = 0644, 1221 .proc_handler = &proc_dointvec_minmax, 1222 .extra1 = &msg_maxsize_limit_min, 1223 .extra2 = &msg_maxsize_limit_max, 1224 }, 1225 { .ctl_name = 0 } 1226}; 1227 1228static ctl_table mq_sysctl_dir[] = { 1229 { 1230 .ctl_name = FS_MQUEUE, 1231 .procname = "mqueue", 1232 .mode = 0555, 1233 .child = mq_sysctls, 1234 }, 1235 { .ctl_name = 0 } 1236}; 1237 1238static ctl_table mq_sysctl_root[] = { 1239 { 1240 .ctl_name = CTL_FS, 1241 .procname = "fs", 1242 .mode = 0555, 1243 .child = mq_sysctl_dir, 1244 }, 1245 { .ctl_name = 0 } 1246}; 1247 1248static int __init init_mqueue_fs(void) 1249{ 1250 int error; 1251 1252 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", 1253 sizeof(struct mqueue_inode_info), 0, 1254 SLAB_HWCACHE_ALIGN, init_once, NULL); 1255 if (mqueue_inode_cachep == NULL) 1256 return -ENOMEM; 1257 1258 /* ignore failues - they are not fatal */ 1259 mq_sysctl_table = register_sysctl_table(mq_sysctl_root); 1260 1261 error = register_filesystem(&mqueue_fs_type); 1262 if (error) 1263 goto out_sysctl; 1264 1265 if (IS_ERR(mqueue_mnt = kern_mount(&mqueue_fs_type))) { 1266 error = PTR_ERR(mqueue_mnt); 1267 goto out_filesystem; 1268 } 1269 1270 /* internal initialization - not common for vfs */ 1271 queues_count = 0; 1272 spin_lock_init(&mq_lock); 1273 1274 return 0; 1275 1276out_filesystem: 1277 unregister_filesystem(&mqueue_fs_type); 1278out_sysctl: 1279 if (mq_sysctl_table) 1280 unregister_sysctl_table(mq_sysctl_table); 1281 kmem_cache_destroy(mqueue_inode_cachep); 1282 return error; 1283} 1284 1285__initcall(init_mqueue_fs); 1286