mqueue.c revision f1eb1332b8f07e937add24c6fd2ac40b8737a2f4
1/* 2 * POSIX message queues filesystem for Linux. 3 * 4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl) 5 * Michal Wronski (michal.wronski@gmail.com) 6 * 7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) 8 * Lockless receive & send, fd based notify: 9 * Manfred Spraul (manfred@colorfullife.com) 10 * 11 * Audit: George Wilson (ltcgcw@us.ibm.com) 12 * 13 * This file is released under the GPL. 14 */ 15 16#include <linux/capability.h> 17#include <linux/init.h> 18#include <linux/pagemap.h> 19#include <linux/file.h> 20#include <linux/mount.h> 21#include <linux/namei.h> 22#include <linux/sysctl.h> 23#include <linux/poll.h> 24#include <linux/mqueue.h> 25#include <linux/msg.h> 26#include <linux/skbuff.h> 27#include <linux/netlink.h> 28#include <linux/syscalls.h> 29#include <linux/audit.h> 30#include <linux/signal.h> 31#include <linux/mutex.h> 32#include <linux/nsproxy.h> 33#include <linux/pid.h> 34#include <linux/ipc_namespace.h> 35 36#include <net/sock.h> 37#include "util.h" 38 39#define MQUEUE_MAGIC 0x19800202 40#define DIRENT_SIZE 20 41#define FILENT_SIZE 80 42 43#define SEND 0 44#define RECV 1 45 46#define STATE_NONE 0 47#define STATE_PENDING 1 48#define STATE_READY 2 49 50struct ext_wait_queue { /* queue of sleeping tasks */ 51 struct task_struct *task; 52 struct list_head list; 53 struct msg_msg *msg; /* ptr of loaded message */ 54 int state; /* one of STATE_* values */ 55}; 56 57struct mqueue_inode_info { 58 spinlock_t lock; 59 struct inode vfs_inode; 60 wait_queue_head_t wait_q; 61 62 struct msg_msg **messages; 63 struct mq_attr attr; 64 65 struct sigevent notify; 66 struct pid* notify_owner; 67 struct user_struct *user; /* user who created, for accounting */ 68 struct sock *notify_sock; 69 struct sk_buff *notify_cookie; 70 71 /* for tasks waiting for free space and messages, respectively */ 72 struct ext_wait_queue e_wait_q[2]; 73 74 unsigned long qsize; /* size of queue in memory (sum of all msgs) */ 75}; 76 77static const struct inode_operations mqueue_dir_inode_operations; 78static const struct file_operations mqueue_file_operations; 79static const struct super_operations mqueue_super_ops; 80static void remove_notification(struct mqueue_inode_info *info); 81 82static struct kmem_cache *mqueue_inode_cachep; 83 84static struct ctl_table_header * mq_sysctl_table; 85 86static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) 87{ 88 return container_of(inode, struct mqueue_inode_info, vfs_inode); 89} 90 91/* 92 * This routine should be called with the mq_lock held. 93 */ 94static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode) 95{ 96 return get_ipc_ns(inode->i_sb->s_fs_info); 97} 98 99static struct ipc_namespace *get_ns_from_inode(struct inode *inode) 100{ 101 struct ipc_namespace *ns; 102 103 spin_lock(&mq_lock); 104 ns = __get_ns_from_inode(inode); 105 spin_unlock(&mq_lock); 106 return ns; 107} 108 109static struct inode *mqueue_get_inode(struct super_block *sb, 110 struct ipc_namespace *ipc_ns, int mode, 111 struct mq_attr *attr) 112{ 113 struct user_struct *u = current_user(); 114 struct inode *inode; 115 116 inode = new_inode(sb); 117 if (inode) { 118 inode->i_mode = mode; 119 inode->i_uid = current_fsuid(); 120 inode->i_gid = current_fsgid(); 121 inode->i_mtime = inode->i_ctime = inode->i_atime = 122 CURRENT_TIME; 123 124 if (S_ISREG(mode)) { 125 struct mqueue_inode_info *info; 126 struct task_struct *p = current; 127 unsigned long mq_bytes, mq_msg_tblsz; 128 129 inode->i_fop = &mqueue_file_operations; 130 inode->i_size = FILENT_SIZE; 131 /* mqueue specific info */ 132 info = MQUEUE_I(inode); 133 spin_lock_init(&info->lock); 134 init_waitqueue_head(&info->wait_q); 135 INIT_LIST_HEAD(&info->e_wait_q[0].list); 136 INIT_LIST_HEAD(&info->e_wait_q[1].list); 137 info->notify_owner = NULL; 138 info->qsize = 0; 139 info->user = NULL; /* set when all is ok */ 140 memset(&info->attr, 0, sizeof(info->attr)); 141 info->attr.mq_maxmsg = ipc_ns->mq_msg_max; 142 info->attr.mq_msgsize = ipc_ns->mq_msgsize_max; 143 if (attr) { 144 info->attr.mq_maxmsg = attr->mq_maxmsg; 145 info->attr.mq_msgsize = attr->mq_msgsize; 146 } 147 mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *); 148 info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL); 149 if (!info->messages) 150 goto out_inode; 151 152 mq_bytes = (mq_msg_tblsz + 153 (info->attr.mq_maxmsg * info->attr.mq_msgsize)); 154 155 spin_lock(&mq_lock); 156 if (u->mq_bytes + mq_bytes < u->mq_bytes || 157 u->mq_bytes + mq_bytes > 158 task_rlimit(p, RLIMIT_MSGQUEUE)) { 159 spin_unlock(&mq_lock); 160 kfree(info->messages); 161 goto out_inode; 162 } 163 u->mq_bytes += mq_bytes; 164 spin_unlock(&mq_lock); 165 166 /* all is ok */ 167 info->user = get_uid(u); 168 } else if (S_ISDIR(mode)) { 169 inc_nlink(inode); 170 /* Some things misbehave if size == 0 on a directory */ 171 inode->i_size = 2 * DIRENT_SIZE; 172 inode->i_op = &mqueue_dir_inode_operations; 173 inode->i_fop = &simple_dir_operations; 174 } 175 } 176 return inode; 177out_inode: 178 make_bad_inode(inode); 179 iput(inode); 180 return NULL; 181} 182 183static int mqueue_fill_super(struct super_block *sb, void *data, int silent) 184{ 185 struct inode *inode; 186 struct ipc_namespace *ns = data; 187 int error; 188 189 sb->s_blocksize = PAGE_CACHE_SIZE; 190 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 191 sb->s_magic = MQUEUE_MAGIC; 192 sb->s_op = &mqueue_super_ops; 193 194 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, 195 NULL); 196 if (!inode) { 197 error = -ENOMEM; 198 goto out; 199 } 200 201 sb->s_root = d_alloc_root(inode); 202 if (!sb->s_root) { 203 iput(inode); 204 error = -ENOMEM; 205 goto out; 206 } 207 error = 0; 208 209out: 210 return error; 211} 212 213static int mqueue_get_sb(struct file_system_type *fs_type, 214 int flags, const char *dev_name, 215 void *data, struct vfsmount *mnt) 216{ 217 if (!(flags & MS_KERNMOUNT)) 218 data = current->nsproxy->ipc_ns; 219 return get_sb_ns(fs_type, flags, data, mqueue_fill_super, mnt); 220} 221 222static void init_once(void *foo) 223{ 224 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; 225 226 inode_init_once(&p->vfs_inode); 227} 228 229static struct inode *mqueue_alloc_inode(struct super_block *sb) 230{ 231 struct mqueue_inode_info *ei; 232 233 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); 234 if (!ei) 235 return NULL; 236 return &ei->vfs_inode; 237} 238 239static void mqueue_destroy_inode(struct inode *inode) 240{ 241 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); 242} 243 244static void mqueue_delete_inode(struct inode *inode) 245{ 246 struct mqueue_inode_info *info; 247 struct user_struct *user; 248 unsigned long mq_bytes; 249 int i; 250 struct ipc_namespace *ipc_ns; 251 252 if (S_ISDIR(inode->i_mode)) { 253 clear_inode(inode); 254 return; 255 } 256 ipc_ns = get_ns_from_inode(inode); 257 info = MQUEUE_I(inode); 258 spin_lock(&info->lock); 259 for (i = 0; i < info->attr.mq_curmsgs; i++) 260 free_msg(info->messages[i]); 261 kfree(info->messages); 262 spin_unlock(&info->lock); 263 264 clear_inode(inode); 265 266 /* Total amount of bytes accounted for the mqueue */ 267 mq_bytes = info->attr.mq_maxmsg * (sizeof(struct msg_msg *) 268 + info->attr.mq_msgsize); 269 user = info->user; 270 if (user) { 271 spin_lock(&mq_lock); 272 user->mq_bytes -= mq_bytes; 273 /* 274 * get_ns_from_inode() ensures that the 275 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns 276 * to which we now hold a reference, or it is NULL. 277 * We can't put it here under mq_lock, though. 278 */ 279 if (ipc_ns) 280 ipc_ns->mq_queues_count--; 281 spin_unlock(&mq_lock); 282 free_uid(user); 283 } 284 if (ipc_ns) 285 put_ipc_ns(ipc_ns); 286} 287 288static int mqueue_create(struct inode *dir, struct dentry *dentry, 289 int mode, struct nameidata *nd) 290{ 291 struct inode *inode; 292 struct mq_attr *attr = dentry->d_fsdata; 293 int error; 294 struct ipc_namespace *ipc_ns; 295 296 spin_lock(&mq_lock); 297 ipc_ns = __get_ns_from_inode(dir); 298 if (!ipc_ns) { 299 error = -EACCES; 300 goto out_unlock; 301 } 302 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && 303 !capable(CAP_SYS_RESOURCE)) { 304 error = -ENOSPC; 305 goto out_unlock; 306 } 307 ipc_ns->mq_queues_count++; 308 spin_unlock(&mq_lock); 309 310 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); 311 if (!inode) { 312 error = -ENOMEM; 313 spin_lock(&mq_lock); 314 ipc_ns->mq_queues_count--; 315 goto out_unlock; 316 } 317 318 put_ipc_ns(ipc_ns); 319 dir->i_size += DIRENT_SIZE; 320 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; 321 322 d_instantiate(dentry, inode); 323 dget(dentry); 324 return 0; 325out_unlock: 326 spin_unlock(&mq_lock); 327 if (ipc_ns) 328 put_ipc_ns(ipc_ns); 329 return error; 330} 331 332static int mqueue_unlink(struct inode *dir, struct dentry *dentry) 333{ 334 struct inode *inode = dentry->d_inode; 335 336 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; 337 dir->i_size -= DIRENT_SIZE; 338 drop_nlink(inode); 339 dput(dentry); 340 return 0; 341} 342 343/* 344* This is routine for system read from queue file. 345* To avoid mess with doing here some sort of mq_receive we allow 346* to read only queue size & notification info (the only values 347* that are interesting from user point of view and aren't accessible 348* through std routines) 349*/ 350static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, 351 size_t count, loff_t *off) 352{ 353 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 354 char buffer[FILENT_SIZE]; 355 ssize_t ret; 356 357 spin_lock(&info->lock); 358 snprintf(buffer, sizeof(buffer), 359 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n", 360 info->qsize, 361 info->notify_owner ? info->notify.sigev_notify : 0, 362 (info->notify_owner && 363 info->notify.sigev_notify == SIGEV_SIGNAL) ? 364 info->notify.sigev_signo : 0, 365 pid_vnr(info->notify_owner)); 366 spin_unlock(&info->lock); 367 buffer[sizeof(buffer)-1] = '\0'; 368 369 ret = simple_read_from_buffer(u_data, count, off, buffer, 370 strlen(buffer)); 371 if (ret <= 0) 372 return ret; 373 374 filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME; 375 return ret; 376} 377 378static int mqueue_flush_file(struct file *filp, fl_owner_t id) 379{ 380 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 381 382 spin_lock(&info->lock); 383 if (task_tgid(current) == info->notify_owner) 384 remove_notification(info); 385 386 spin_unlock(&info->lock); 387 return 0; 388} 389 390static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) 391{ 392 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 393 int retval = 0; 394 395 poll_wait(filp, &info->wait_q, poll_tab); 396 397 spin_lock(&info->lock); 398 if (info->attr.mq_curmsgs) 399 retval = POLLIN | POLLRDNORM; 400 401 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) 402 retval |= POLLOUT | POLLWRNORM; 403 spin_unlock(&info->lock); 404 405 return retval; 406} 407 408/* Adds current to info->e_wait_q[sr] before element with smaller prio */ 409static void wq_add(struct mqueue_inode_info *info, int sr, 410 struct ext_wait_queue *ewp) 411{ 412 struct ext_wait_queue *walk; 413 414 ewp->task = current; 415 416 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { 417 if (walk->task->static_prio <= current->static_prio) { 418 list_add_tail(&ewp->list, &walk->list); 419 return; 420 } 421 } 422 list_add_tail(&ewp->list, &info->e_wait_q[sr].list); 423} 424 425/* 426 * Puts current task to sleep. Caller must hold queue lock. After return 427 * lock isn't held. 428 * sr: SEND or RECV 429 */ 430static int wq_sleep(struct mqueue_inode_info *info, int sr, 431 long timeout, struct ext_wait_queue *ewp) 432{ 433 int retval; 434 signed long time; 435 436 wq_add(info, sr, ewp); 437 438 for (;;) { 439 set_current_state(TASK_INTERRUPTIBLE); 440 441 spin_unlock(&info->lock); 442 time = schedule_timeout(timeout); 443 444 while (ewp->state == STATE_PENDING) 445 cpu_relax(); 446 447 if (ewp->state == STATE_READY) { 448 retval = 0; 449 goto out; 450 } 451 spin_lock(&info->lock); 452 if (ewp->state == STATE_READY) { 453 retval = 0; 454 goto out_unlock; 455 } 456 if (signal_pending(current)) { 457 retval = -ERESTARTSYS; 458 break; 459 } 460 if (time == 0) { 461 retval = -ETIMEDOUT; 462 break; 463 } 464 } 465 list_del(&ewp->list); 466out_unlock: 467 spin_unlock(&info->lock); 468out: 469 return retval; 470} 471 472/* 473 * Returns waiting task that should be serviced first or NULL if none exists 474 */ 475static struct ext_wait_queue *wq_get_first_waiter( 476 struct mqueue_inode_info *info, int sr) 477{ 478 struct list_head *ptr; 479 480 ptr = info->e_wait_q[sr].list.prev; 481 if (ptr == &info->e_wait_q[sr].list) 482 return NULL; 483 return list_entry(ptr, struct ext_wait_queue, list); 484} 485 486/* Auxiliary functions to manipulate messages' list */ 487static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info) 488{ 489 int k; 490 491 k = info->attr.mq_curmsgs - 1; 492 while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) { 493 info->messages[k + 1] = info->messages[k]; 494 k--; 495 } 496 info->attr.mq_curmsgs++; 497 info->qsize += ptr->m_ts; 498 info->messages[k + 1] = ptr; 499} 500 501static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) 502{ 503 info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts; 504 return info->messages[info->attr.mq_curmsgs]; 505} 506 507static inline void set_cookie(struct sk_buff *skb, char code) 508{ 509 ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code; 510} 511 512/* 513 * The next function is only to split too long sys_mq_timedsend 514 */ 515static void __do_notify(struct mqueue_inode_info *info) 516{ 517 /* notification 518 * invoked when there is registered process and there isn't process 519 * waiting synchronously for message AND state of queue changed from 520 * empty to not empty. Here we are sure that no one is waiting 521 * synchronously. */ 522 if (info->notify_owner && 523 info->attr.mq_curmsgs == 1) { 524 struct siginfo sig_i; 525 switch (info->notify.sigev_notify) { 526 case SIGEV_NONE: 527 break; 528 case SIGEV_SIGNAL: 529 /* sends signal */ 530 531 sig_i.si_signo = info->notify.sigev_signo; 532 sig_i.si_errno = 0; 533 sig_i.si_code = SI_MESGQ; 534 sig_i.si_value = info->notify.sigev_value; 535 sig_i.si_pid = task_tgid_nr_ns(current, 536 ns_of_pid(info->notify_owner)); 537 sig_i.si_uid = current_uid(); 538 539 kill_pid_info(info->notify.sigev_signo, 540 &sig_i, info->notify_owner); 541 break; 542 case SIGEV_THREAD: 543 set_cookie(info->notify_cookie, NOTIFY_WOKENUP); 544 netlink_sendskb(info->notify_sock, info->notify_cookie); 545 break; 546 } 547 /* after notification unregisters process */ 548 put_pid(info->notify_owner); 549 info->notify_owner = NULL; 550 } 551 wake_up(&info->wait_q); 552} 553 554static long prepare_timeout(struct timespec *p) 555{ 556 struct timespec nowts; 557 long timeout; 558 559 if (p) { 560 if (unlikely(p->tv_nsec < 0 || p->tv_sec < 0 561 || p->tv_nsec >= NSEC_PER_SEC)) 562 return -EINVAL; 563 nowts = CURRENT_TIME; 564 /* first subtract as jiffies can't be too big */ 565 p->tv_sec -= nowts.tv_sec; 566 if (p->tv_nsec < nowts.tv_nsec) { 567 p->tv_nsec += NSEC_PER_SEC; 568 p->tv_sec--; 569 } 570 p->tv_nsec -= nowts.tv_nsec; 571 if (p->tv_sec < 0) 572 return 0; 573 574 timeout = timespec_to_jiffies(p) + 1; 575 } else 576 return MAX_SCHEDULE_TIMEOUT; 577 578 return timeout; 579} 580 581static void remove_notification(struct mqueue_inode_info *info) 582{ 583 if (info->notify_owner != NULL && 584 info->notify.sigev_notify == SIGEV_THREAD) { 585 set_cookie(info->notify_cookie, NOTIFY_REMOVED); 586 netlink_sendskb(info->notify_sock, info->notify_cookie); 587 } 588 put_pid(info->notify_owner); 589 info->notify_owner = NULL; 590} 591 592static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr) 593{ 594 if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) 595 return 0; 596 if (capable(CAP_SYS_RESOURCE)) { 597 if (attr->mq_maxmsg > HARD_MSGMAX) 598 return 0; 599 } else { 600 if (attr->mq_maxmsg > ipc_ns->mq_msg_max || 601 attr->mq_msgsize > ipc_ns->mq_msgsize_max) 602 return 0; 603 } 604 /* check for overflow */ 605 if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) 606 return 0; 607 if ((unsigned long)(attr->mq_maxmsg * (attr->mq_msgsize 608 + sizeof (struct msg_msg *))) < 609 (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize)) 610 return 0; 611 return 1; 612} 613 614/* 615 * Invoked when creating a new queue via sys_mq_open 616 */ 617static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir, 618 struct dentry *dentry, int oflag, mode_t mode, 619 struct mq_attr *attr) 620{ 621 const struct cred *cred = current_cred(); 622 struct file *result; 623 int ret; 624 625 if (attr) { 626 if (!mq_attr_ok(ipc_ns, attr)) { 627 ret = -EINVAL; 628 goto out; 629 } 630 /* store for use during create */ 631 dentry->d_fsdata = attr; 632 } 633 634 mode &= ~current_umask(); 635 ret = mnt_want_write(ipc_ns->mq_mnt); 636 if (ret) 637 goto out; 638 ret = vfs_create(dir->d_inode, dentry, mode, NULL); 639 dentry->d_fsdata = NULL; 640 if (ret) 641 goto out_drop_write; 642 643 result = dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred); 644 /* 645 * dentry_open() took a persistent mnt_want_write(), 646 * so we can now drop this one. 647 */ 648 mnt_drop_write(ipc_ns->mq_mnt); 649 return result; 650 651out_drop_write: 652 mnt_drop_write(ipc_ns->mq_mnt); 653out: 654 dput(dentry); 655 mntput(ipc_ns->mq_mnt); 656 return ERR_PTR(ret); 657} 658 659/* Opens existing queue */ 660static struct file *do_open(struct ipc_namespace *ipc_ns, 661 struct dentry *dentry, int oflag) 662{ 663 int ret; 664 const struct cred *cred = current_cred(); 665 666 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, 667 MAY_READ | MAY_WRITE }; 668 669 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) { 670 ret = -EINVAL; 671 goto err; 672 } 673 674 if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) { 675 ret = -EACCES; 676 goto err; 677 } 678 679 return dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred); 680 681err: 682 dput(dentry); 683 mntput(ipc_ns->mq_mnt); 684 return ERR_PTR(ret); 685} 686 687SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode, 688 struct mq_attr __user *, u_attr) 689{ 690 struct dentry *dentry; 691 struct file *filp; 692 char *name; 693 struct mq_attr attr; 694 int fd, error; 695 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 696 697 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) 698 return -EFAULT; 699 700 audit_mq_open(oflag, mode, u_attr ? &attr : NULL); 701 702 if (IS_ERR(name = getname(u_name))) 703 return PTR_ERR(name); 704 705 fd = get_unused_fd_flags(O_CLOEXEC); 706 if (fd < 0) 707 goto out_putname; 708 709 mutex_lock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); 710 dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name)); 711 if (IS_ERR(dentry)) { 712 error = PTR_ERR(dentry); 713 goto out_putfd; 714 } 715 mntget(ipc_ns->mq_mnt); 716 717 if (oflag & O_CREAT) { 718 if (dentry->d_inode) { /* entry already exists */ 719 audit_inode(name, dentry); 720 if (oflag & O_EXCL) { 721 error = -EEXIST; 722 goto out; 723 } 724 filp = do_open(ipc_ns, dentry, oflag); 725 } else { 726 filp = do_create(ipc_ns, ipc_ns->mq_mnt->mnt_root, 727 dentry, oflag, mode, 728 u_attr ? &attr : NULL); 729 } 730 } else { 731 if (!dentry->d_inode) { 732 error = -ENOENT; 733 goto out; 734 } 735 audit_inode(name, dentry); 736 filp = do_open(ipc_ns, dentry, oflag); 737 } 738 739 if (IS_ERR(filp)) { 740 error = PTR_ERR(filp); 741 goto out_putfd; 742 } 743 744 fd_install(fd, filp); 745 goto out_upsem; 746 747out: 748 dput(dentry); 749 mntput(ipc_ns->mq_mnt); 750out_putfd: 751 put_unused_fd(fd); 752 fd = error; 753out_upsem: 754 mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); 755out_putname: 756 putname(name); 757 return fd; 758} 759 760SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) 761{ 762 int err; 763 char *name; 764 struct dentry *dentry; 765 struct inode *inode = NULL; 766 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 767 768 name = getname(u_name); 769 if (IS_ERR(name)) 770 return PTR_ERR(name); 771 772 mutex_lock_nested(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex, 773 I_MUTEX_PARENT); 774 dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name)); 775 if (IS_ERR(dentry)) { 776 err = PTR_ERR(dentry); 777 goto out_unlock; 778 } 779 780 if (!dentry->d_inode) { 781 err = -ENOENT; 782 goto out_err; 783 } 784 785 inode = dentry->d_inode; 786 if (inode) 787 atomic_inc(&inode->i_count); 788 err = mnt_want_write(ipc_ns->mq_mnt); 789 if (err) 790 goto out_err; 791 err = vfs_unlink(dentry->d_parent->d_inode, dentry); 792 mnt_drop_write(ipc_ns->mq_mnt); 793out_err: 794 dput(dentry); 795 796out_unlock: 797 mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); 798 putname(name); 799 if (inode) 800 iput(inode); 801 802 return err; 803} 804 805/* Pipelined send and receive functions. 806 * 807 * If a receiver finds no waiting message, then it registers itself in the 808 * list of waiting receivers. A sender checks that list before adding the new 809 * message into the message array. If there is a waiting receiver, then it 810 * bypasses the message array and directly hands the message over to the 811 * receiver. 812 * The receiver accepts the message and returns without grabbing the queue 813 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers 814 * are necessary. The same algorithm is used for sysv semaphores, see 815 * ipc/sem.c for more details. 816 * 817 * The same algorithm is used for senders. 818 */ 819 820/* pipelined_send() - send a message directly to the task waiting in 821 * sys_mq_timedreceive() (without inserting message into a queue). 822 */ 823static inline void pipelined_send(struct mqueue_inode_info *info, 824 struct msg_msg *message, 825 struct ext_wait_queue *receiver) 826{ 827 receiver->msg = message; 828 list_del(&receiver->list); 829 receiver->state = STATE_PENDING; 830 wake_up_process(receiver->task); 831 smp_wmb(); 832 receiver->state = STATE_READY; 833} 834 835/* pipelined_receive() - if there is task waiting in sys_mq_timedsend() 836 * gets its message and put to the queue (we have one free place for sure). */ 837static inline void pipelined_receive(struct mqueue_inode_info *info) 838{ 839 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); 840 841 if (!sender) { 842 /* for poll */ 843 wake_up_interruptible(&info->wait_q); 844 return; 845 } 846 msg_insert(sender->msg, info); 847 list_del(&sender->list); 848 sender->state = STATE_PENDING; 849 wake_up_process(sender->task); 850 smp_wmb(); 851 sender->state = STATE_READY; 852} 853 854SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, 855 size_t, msg_len, unsigned int, msg_prio, 856 const struct timespec __user *, u_abs_timeout) 857{ 858 struct file *filp; 859 struct inode *inode; 860 struct ext_wait_queue wait; 861 struct ext_wait_queue *receiver; 862 struct msg_msg *msg_ptr; 863 struct mqueue_inode_info *info; 864 struct timespec ts, *p = NULL; 865 long timeout; 866 int ret; 867 868 if (u_abs_timeout) { 869 if (copy_from_user(&ts, u_abs_timeout, 870 sizeof(struct timespec))) 871 return -EFAULT; 872 p = &ts; 873 } 874 875 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) 876 return -EINVAL; 877 878 audit_mq_sendrecv(mqdes, msg_len, msg_prio, p); 879 timeout = prepare_timeout(p); 880 881 filp = fget(mqdes); 882 if (unlikely(!filp)) { 883 ret = -EBADF; 884 goto out; 885 } 886 887 inode = filp->f_path.dentry->d_inode; 888 if (unlikely(filp->f_op != &mqueue_file_operations)) { 889 ret = -EBADF; 890 goto out_fput; 891 } 892 info = MQUEUE_I(inode); 893 audit_inode(NULL, filp->f_path.dentry); 894 895 if (unlikely(!(filp->f_mode & FMODE_WRITE))) { 896 ret = -EBADF; 897 goto out_fput; 898 } 899 900 if (unlikely(msg_len > info->attr.mq_msgsize)) { 901 ret = -EMSGSIZE; 902 goto out_fput; 903 } 904 905 /* First try to allocate memory, before doing anything with 906 * existing queues. */ 907 msg_ptr = load_msg(u_msg_ptr, msg_len); 908 if (IS_ERR(msg_ptr)) { 909 ret = PTR_ERR(msg_ptr); 910 goto out_fput; 911 } 912 msg_ptr->m_ts = msg_len; 913 msg_ptr->m_type = msg_prio; 914 915 spin_lock(&info->lock); 916 917 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { 918 if (filp->f_flags & O_NONBLOCK) { 919 spin_unlock(&info->lock); 920 ret = -EAGAIN; 921 } else if (unlikely(timeout < 0)) { 922 spin_unlock(&info->lock); 923 ret = timeout; 924 } else { 925 wait.task = current; 926 wait.msg = (void *) msg_ptr; 927 wait.state = STATE_NONE; 928 ret = wq_sleep(info, SEND, timeout, &wait); 929 } 930 if (ret < 0) 931 free_msg(msg_ptr); 932 } else { 933 receiver = wq_get_first_waiter(info, RECV); 934 if (receiver) { 935 pipelined_send(info, msg_ptr, receiver); 936 } else { 937 /* adds message to the queue */ 938 msg_insert(msg_ptr, info); 939 __do_notify(info); 940 } 941 inode->i_atime = inode->i_mtime = inode->i_ctime = 942 CURRENT_TIME; 943 spin_unlock(&info->lock); 944 ret = 0; 945 } 946out_fput: 947 fput(filp); 948out: 949 return ret; 950} 951 952SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, 953 size_t, msg_len, unsigned int __user *, u_msg_prio, 954 const struct timespec __user *, u_abs_timeout) 955{ 956 long timeout; 957 ssize_t ret; 958 struct msg_msg *msg_ptr; 959 struct file *filp; 960 struct inode *inode; 961 struct mqueue_inode_info *info; 962 struct ext_wait_queue wait; 963 struct timespec ts, *p = NULL; 964 965 if (u_abs_timeout) { 966 if (copy_from_user(&ts, u_abs_timeout, 967 sizeof(struct timespec))) 968 return -EFAULT; 969 p = &ts; 970 } 971 972 audit_mq_sendrecv(mqdes, msg_len, 0, p); 973 timeout = prepare_timeout(p); 974 975 filp = fget(mqdes); 976 if (unlikely(!filp)) { 977 ret = -EBADF; 978 goto out; 979 } 980 981 inode = filp->f_path.dentry->d_inode; 982 if (unlikely(filp->f_op != &mqueue_file_operations)) { 983 ret = -EBADF; 984 goto out_fput; 985 } 986 info = MQUEUE_I(inode); 987 audit_inode(NULL, filp->f_path.dentry); 988 989 if (unlikely(!(filp->f_mode & FMODE_READ))) { 990 ret = -EBADF; 991 goto out_fput; 992 } 993 994 /* checks if buffer is big enough */ 995 if (unlikely(msg_len < info->attr.mq_msgsize)) { 996 ret = -EMSGSIZE; 997 goto out_fput; 998 } 999 1000 spin_lock(&info->lock); 1001 if (info->attr.mq_curmsgs == 0) { 1002 if (filp->f_flags & O_NONBLOCK) { 1003 spin_unlock(&info->lock); 1004 ret = -EAGAIN; 1005 msg_ptr = NULL; 1006 } else if (unlikely(timeout < 0)) { 1007 spin_unlock(&info->lock); 1008 ret = timeout; 1009 msg_ptr = NULL; 1010 } else { 1011 wait.task = current; 1012 wait.state = STATE_NONE; 1013 ret = wq_sleep(info, RECV, timeout, &wait); 1014 msg_ptr = wait.msg; 1015 } 1016 } else { 1017 msg_ptr = msg_get(info); 1018 1019 inode->i_atime = inode->i_mtime = inode->i_ctime = 1020 CURRENT_TIME; 1021 1022 /* There is now free space in queue. */ 1023 pipelined_receive(info); 1024 spin_unlock(&info->lock); 1025 ret = 0; 1026 } 1027 if (ret == 0) { 1028 ret = msg_ptr->m_ts; 1029 1030 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) || 1031 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) { 1032 ret = -EFAULT; 1033 } 1034 free_msg(msg_ptr); 1035 } 1036out_fput: 1037 fput(filp); 1038out: 1039 return ret; 1040} 1041 1042/* 1043 * Notes: the case when user wants us to deregister (with NULL as pointer) 1044 * and he isn't currently owner of notification, will be silently discarded. 1045 * It isn't explicitly defined in the POSIX. 1046 */ 1047SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, 1048 const struct sigevent __user *, u_notification) 1049{ 1050 int ret; 1051 struct file *filp; 1052 struct sock *sock; 1053 struct inode *inode; 1054 struct sigevent notification; 1055 struct mqueue_inode_info *info; 1056 struct sk_buff *nc; 1057 1058 if (u_notification) { 1059 if (copy_from_user(¬ification, u_notification, 1060 sizeof(struct sigevent))) 1061 return -EFAULT; 1062 } 1063 1064 audit_mq_notify(mqdes, u_notification ? ¬ification : NULL); 1065 1066 nc = NULL; 1067 sock = NULL; 1068 if (u_notification != NULL) { 1069 if (unlikely(notification.sigev_notify != SIGEV_NONE && 1070 notification.sigev_notify != SIGEV_SIGNAL && 1071 notification.sigev_notify != SIGEV_THREAD)) 1072 return -EINVAL; 1073 if (notification.sigev_notify == SIGEV_SIGNAL && 1074 !valid_signal(notification.sigev_signo)) { 1075 return -EINVAL; 1076 } 1077 if (notification.sigev_notify == SIGEV_THREAD) { 1078 long timeo; 1079 1080 /* create the notify skb */ 1081 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); 1082 if (!nc) { 1083 ret = -ENOMEM; 1084 goto out; 1085 } 1086 if (copy_from_user(nc->data, 1087 notification.sigev_value.sival_ptr, 1088 NOTIFY_COOKIE_LEN)) { 1089 ret = -EFAULT; 1090 goto out; 1091 } 1092 1093 /* TODO: add a header? */ 1094 skb_put(nc, NOTIFY_COOKIE_LEN); 1095 /* and attach it to the socket */ 1096retry: 1097 filp = fget(notification.sigev_signo); 1098 if (!filp) { 1099 ret = -EBADF; 1100 goto out; 1101 } 1102 sock = netlink_getsockbyfilp(filp); 1103 fput(filp); 1104 if (IS_ERR(sock)) { 1105 ret = PTR_ERR(sock); 1106 sock = NULL; 1107 goto out; 1108 } 1109 1110 timeo = MAX_SCHEDULE_TIMEOUT; 1111 ret = netlink_attachskb(sock, nc, &timeo, NULL); 1112 if (ret == 1) 1113 goto retry; 1114 if (ret) { 1115 sock = NULL; 1116 nc = NULL; 1117 goto out; 1118 } 1119 } 1120 } 1121 1122 filp = fget(mqdes); 1123 if (!filp) { 1124 ret = -EBADF; 1125 goto out; 1126 } 1127 1128 inode = filp->f_path.dentry->d_inode; 1129 if (unlikely(filp->f_op != &mqueue_file_operations)) { 1130 ret = -EBADF; 1131 goto out_fput; 1132 } 1133 info = MQUEUE_I(inode); 1134 1135 ret = 0; 1136 spin_lock(&info->lock); 1137 if (u_notification == NULL) { 1138 if (info->notify_owner == task_tgid(current)) { 1139 remove_notification(info); 1140 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1141 } 1142 } else if (info->notify_owner != NULL) { 1143 ret = -EBUSY; 1144 } else { 1145 switch (notification.sigev_notify) { 1146 case SIGEV_NONE: 1147 info->notify.sigev_notify = SIGEV_NONE; 1148 break; 1149 case SIGEV_THREAD: 1150 info->notify_sock = sock; 1151 info->notify_cookie = nc; 1152 sock = NULL; 1153 nc = NULL; 1154 info->notify.sigev_notify = SIGEV_THREAD; 1155 break; 1156 case SIGEV_SIGNAL: 1157 info->notify.sigev_signo = notification.sigev_signo; 1158 info->notify.sigev_value = notification.sigev_value; 1159 info->notify.sigev_notify = SIGEV_SIGNAL; 1160 break; 1161 } 1162 1163 info->notify_owner = get_pid(task_tgid(current)); 1164 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1165 } 1166 spin_unlock(&info->lock); 1167out_fput: 1168 fput(filp); 1169out: 1170 if (sock) { 1171 netlink_detachskb(sock, nc); 1172 } else if (nc) { 1173 dev_kfree_skb(nc); 1174 } 1175 return ret; 1176} 1177 1178SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, 1179 const struct mq_attr __user *, u_mqstat, 1180 struct mq_attr __user *, u_omqstat) 1181{ 1182 int ret; 1183 struct mq_attr mqstat, omqstat; 1184 struct file *filp; 1185 struct inode *inode; 1186 struct mqueue_inode_info *info; 1187 1188 if (u_mqstat != NULL) { 1189 if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr))) 1190 return -EFAULT; 1191 if (mqstat.mq_flags & (~O_NONBLOCK)) 1192 return -EINVAL; 1193 } 1194 1195 filp = fget(mqdes); 1196 if (!filp) { 1197 ret = -EBADF; 1198 goto out; 1199 } 1200 1201 inode = filp->f_path.dentry->d_inode; 1202 if (unlikely(filp->f_op != &mqueue_file_operations)) { 1203 ret = -EBADF; 1204 goto out_fput; 1205 } 1206 info = MQUEUE_I(inode); 1207 1208 spin_lock(&info->lock); 1209 1210 omqstat = info->attr; 1211 omqstat.mq_flags = filp->f_flags & O_NONBLOCK; 1212 if (u_mqstat) { 1213 audit_mq_getsetattr(mqdes, &mqstat); 1214 spin_lock(&filp->f_lock); 1215 if (mqstat.mq_flags & O_NONBLOCK) 1216 filp->f_flags |= O_NONBLOCK; 1217 else 1218 filp->f_flags &= ~O_NONBLOCK; 1219 spin_unlock(&filp->f_lock); 1220 1221 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1222 } 1223 1224 spin_unlock(&info->lock); 1225 1226 ret = 0; 1227 if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat, 1228 sizeof(struct mq_attr))) 1229 ret = -EFAULT; 1230 1231out_fput: 1232 fput(filp); 1233out: 1234 return ret; 1235} 1236 1237static const struct inode_operations mqueue_dir_inode_operations = { 1238 .lookup = simple_lookup, 1239 .create = mqueue_create, 1240 .unlink = mqueue_unlink, 1241}; 1242 1243static const struct file_operations mqueue_file_operations = { 1244 .flush = mqueue_flush_file, 1245 .poll = mqueue_poll_file, 1246 .read = mqueue_read_file, 1247}; 1248 1249static const struct super_operations mqueue_super_ops = { 1250 .alloc_inode = mqueue_alloc_inode, 1251 .destroy_inode = mqueue_destroy_inode, 1252 .statfs = simple_statfs, 1253 .delete_inode = mqueue_delete_inode, 1254 .drop_inode = generic_delete_inode, 1255}; 1256 1257static struct file_system_type mqueue_fs_type = { 1258 .name = "mqueue", 1259 .get_sb = mqueue_get_sb, 1260 .kill_sb = kill_litter_super, 1261}; 1262 1263int mq_init_ns(struct ipc_namespace *ns) 1264{ 1265 ns->mq_queues_count = 0; 1266 ns->mq_queues_max = DFLT_QUEUESMAX; 1267 ns->mq_msg_max = DFLT_MSGMAX; 1268 ns->mq_msgsize_max = DFLT_MSGSIZEMAX; 1269 1270 ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns); 1271 if (IS_ERR(ns->mq_mnt)) { 1272 int err = PTR_ERR(ns->mq_mnt); 1273 ns->mq_mnt = NULL; 1274 return err; 1275 } 1276 return 0; 1277} 1278 1279void mq_clear_sbinfo(struct ipc_namespace *ns) 1280{ 1281 ns->mq_mnt->mnt_sb->s_fs_info = NULL; 1282} 1283 1284void mq_put_mnt(struct ipc_namespace *ns) 1285{ 1286 mntput(ns->mq_mnt); 1287} 1288 1289static int __init init_mqueue_fs(void) 1290{ 1291 int error; 1292 1293 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", 1294 sizeof(struct mqueue_inode_info), 0, 1295 SLAB_HWCACHE_ALIGN, init_once); 1296 if (mqueue_inode_cachep == NULL) 1297 return -ENOMEM; 1298 1299 /* ignore failures - they are not fatal */ 1300 mq_sysctl_table = mq_register_sysctl_table(); 1301 1302 error = register_filesystem(&mqueue_fs_type); 1303 if (error) 1304 goto out_sysctl; 1305 1306 spin_lock_init(&mq_lock); 1307 1308 init_ipc_ns.mq_mnt = kern_mount_data(&mqueue_fs_type, &init_ipc_ns); 1309 if (IS_ERR(init_ipc_ns.mq_mnt)) { 1310 error = PTR_ERR(init_ipc_ns.mq_mnt); 1311 goto out_filesystem; 1312 } 1313 1314 return 0; 1315 1316out_filesystem: 1317 unregister_filesystem(&mqueue_fs_type); 1318out_sysctl: 1319 if (mq_sysctl_table) 1320 unregister_sysctl_table(mq_sysctl_table); 1321 kmem_cache_destroy(mqueue_inode_cachep); 1322 return error; 1323} 1324 1325__initcall(init_mqueue_fs); 1326