mqueue.c revision 4294a8eedb17bbc45e1e7447c2a4d05332943248
1/* 2 * POSIX message queues filesystem for Linux. 3 * 4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl) 5 * Michal Wronski (michal.wronski@gmail.com) 6 * 7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) 8 * Lockless receive & send, fd based notify: 9 * Manfred Spraul (manfred@colorfullife.com) 10 * 11 * Audit: George Wilson (ltcgcw@us.ibm.com) 12 * 13 * This file is released under the GPL. 14 */ 15 16#include <linux/capability.h> 17#include <linux/init.h> 18#include <linux/pagemap.h> 19#include <linux/file.h> 20#include <linux/mount.h> 21#include <linux/namei.h> 22#include <linux/sysctl.h> 23#include <linux/poll.h> 24#include <linux/mqueue.h> 25#include <linux/msg.h> 26#include <linux/skbuff.h> 27#include <linux/netlink.h> 28#include <linux/syscalls.h> 29#include <linux/audit.h> 30#include <linux/signal.h> 31#include <linux/mutex.h> 32#include <linux/nsproxy.h> 33#include <linux/pid.h> 34#include <linux/ipc_namespace.h> 35 36#include <net/sock.h> 37#include "util.h" 38 39#define MQUEUE_MAGIC 0x19800202 40#define DIRENT_SIZE 20 41#define FILENT_SIZE 80 42 43#define SEND 0 44#define RECV 1 45 46#define STATE_NONE 0 47#define STATE_PENDING 1 48#define STATE_READY 2 49 50struct ext_wait_queue { /* queue of sleeping tasks */ 51 struct task_struct *task; 52 struct list_head list; 53 struct msg_msg *msg; /* ptr of loaded message */ 54 int state; /* one of STATE_* values */ 55}; 56 57struct mqueue_inode_info { 58 spinlock_t lock; 59 struct inode vfs_inode; 60 wait_queue_head_t wait_q; 61 62 struct msg_msg **messages; 63 struct mq_attr attr; 64 65 struct sigevent notify; 66 struct pid* notify_owner; 67 struct user_struct *user; /* user who created, for accounting */ 68 struct sock *notify_sock; 69 struct sk_buff *notify_cookie; 70 71 /* for tasks waiting for free space and messages, respectively */ 72 struct ext_wait_queue e_wait_q[2]; 73 74 unsigned long qsize; /* size of queue in memory (sum of all msgs) */ 75}; 76 77static const struct inode_operations mqueue_dir_inode_operations; 78static const struct file_operations mqueue_file_operations; 79static const struct super_operations mqueue_super_ops; 80static void remove_notification(struct mqueue_inode_info *info); 81 82static struct kmem_cache *mqueue_inode_cachep; 83 84static struct ctl_table_header * mq_sysctl_table; 85 86static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) 87{ 88 return container_of(inode, struct mqueue_inode_info, vfs_inode); 89} 90 91/* 92 * This routine should be called with the mq_lock held. 93 */ 94static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode) 95{ 96 return get_ipc_ns(inode->i_sb->s_fs_info); 97} 98 99static struct ipc_namespace *get_ns_from_inode(struct inode *inode) 100{ 101 struct ipc_namespace *ns; 102 103 spin_lock(&mq_lock); 104 ns = __get_ns_from_inode(inode); 105 spin_unlock(&mq_lock); 106 return ns; 107} 108 109static struct inode *mqueue_get_inode(struct super_block *sb, 110 struct ipc_namespace *ipc_ns, int mode, 111 struct mq_attr *attr) 112{ 113 struct user_struct *u = current_user(); 114 struct inode *inode; 115 116 inode = new_inode(sb); 117 if (inode) { 118 inode->i_mode = mode; 119 inode->i_uid = current_fsuid(); 120 inode->i_gid = current_fsgid(); 121 inode->i_mtime = inode->i_ctime = inode->i_atime = 122 CURRENT_TIME; 123 124 if (S_ISREG(mode)) { 125 struct mqueue_inode_info *info; 126 struct task_struct *p = current; 127 unsigned long mq_bytes, mq_msg_tblsz; 128 129 inode->i_fop = &mqueue_file_operations; 130 inode->i_size = FILENT_SIZE; 131 /* mqueue specific info */ 132 info = MQUEUE_I(inode); 133 spin_lock_init(&info->lock); 134 init_waitqueue_head(&info->wait_q); 135 INIT_LIST_HEAD(&info->e_wait_q[0].list); 136 INIT_LIST_HEAD(&info->e_wait_q[1].list); 137 info->messages = NULL; 138 info->notify_owner = NULL; 139 info->qsize = 0; 140 info->user = NULL; /* set when all is ok */ 141 memset(&info->attr, 0, sizeof(info->attr)); 142 info->attr.mq_maxmsg = ipc_ns->mq_msg_max; 143 info->attr.mq_msgsize = ipc_ns->mq_msgsize_max; 144 if (attr) { 145 info->attr.mq_maxmsg = attr->mq_maxmsg; 146 info->attr.mq_msgsize = attr->mq_msgsize; 147 } 148 mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *); 149 mq_bytes = (mq_msg_tblsz + 150 (info->attr.mq_maxmsg * info->attr.mq_msgsize)); 151 152 spin_lock(&mq_lock); 153 if (u->mq_bytes + mq_bytes < u->mq_bytes || 154 u->mq_bytes + mq_bytes > 155 p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) { 156 spin_unlock(&mq_lock); 157 goto out_inode; 158 } 159 u->mq_bytes += mq_bytes; 160 spin_unlock(&mq_lock); 161 162 info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL); 163 if (!info->messages) { 164 spin_lock(&mq_lock); 165 u->mq_bytes -= mq_bytes; 166 spin_unlock(&mq_lock); 167 goto out_inode; 168 } 169 /* all is ok */ 170 info->user = get_uid(u); 171 } else if (S_ISDIR(mode)) { 172 inc_nlink(inode); 173 /* Some things misbehave if size == 0 on a directory */ 174 inode->i_size = 2 * DIRENT_SIZE; 175 inode->i_op = &mqueue_dir_inode_operations; 176 inode->i_fop = &simple_dir_operations; 177 } 178 } 179 return inode; 180out_inode: 181 make_bad_inode(inode); 182 iput(inode); 183 return NULL; 184} 185 186static int mqueue_fill_super(struct super_block *sb, void *data, int silent) 187{ 188 struct inode *inode; 189 struct ipc_namespace *ns = data; 190 int error = 0; 191 192 sb->s_blocksize = PAGE_CACHE_SIZE; 193 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 194 sb->s_magic = MQUEUE_MAGIC; 195 sb->s_op = &mqueue_super_ops; 196 197 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, 198 NULL); 199 if (!inode) { 200 error = -ENOMEM; 201 goto out; 202 } 203 204 sb->s_root = d_alloc_root(inode); 205 if (!sb->s_root) { 206 iput(inode); 207 error = -ENOMEM; 208 } 209 210out: 211 return error; 212} 213 214static int mqueue_get_sb(struct file_system_type *fs_type, 215 int flags, const char *dev_name, 216 void *data, struct vfsmount *mnt) 217{ 218 if (!(flags & MS_KERNMOUNT)) 219 data = current->nsproxy->ipc_ns; 220 return get_sb_ns(fs_type, flags, data, mqueue_fill_super, mnt); 221} 222 223static void init_once(void *foo) 224{ 225 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; 226 227 inode_init_once(&p->vfs_inode); 228} 229 230static struct inode *mqueue_alloc_inode(struct super_block *sb) 231{ 232 struct mqueue_inode_info *ei; 233 234 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); 235 if (!ei) 236 return NULL; 237 return &ei->vfs_inode; 238} 239 240static void mqueue_destroy_inode(struct inode *inode) 241{ 242 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); 243} 244 245static void mqueue_delete_inode(struct inode *inode) 246{ 247 struct mqueue_inode_info *info; 248 struct user_struct *user; 249 unsigned long mq_bytes; 250 int i; 251 struct ipc_namespace *ipc_ns; 252 253 if (S_ISDIR(inode->i_mode)) { 254 clear_inode(inode); 255 return; 256 } 257 ipc_ns = get_ns_from_inode(inode); 258 info = MQUEUE_I(inode); 259 spin_lock(&info->lock); 260 for (i = 0; i < info->attr.mq_curmsgs; i++) 261 free_msg(info->messages[i]); 262 kfree(info->messages); 263 spin_unlock(&info->lock); 264 265 clear_inode(inode); 266 267 mq_bytes = (info->attr.mq_maxmsg * sizeof(struct msg_msg *) + 268 (info->attr.mq_maxmsg * info->attr.mq_msgsize)); 269 user = info->user; 270 if (user) { 271 spin_lock(&mq_lock); 272 user->mq_bytes -= mq_bytes; 273 /* 274 * get_ns_from_inode() ensures that the 275 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns 276 * to which we now hold a reference, or it is NULL. 277 * We can't put it here under mq_lock, though. 278 */ 279 if (ipc_ns) 280 ipc_ns->mq_queues_count--; 281 spin_unlock(&mq_lock); 282 free_uid(user); 283 } 284 if (ipc_ns) 285 put_ipc_ns(ipc_ns); 286} 287 288static int mqueue_create(struct inode *dir, struct dentry *dentry, 289 int mode, struct nameidata *nd) 290{ 291 struct inode *inode; 292 struct mq_attr *attr = dentry->d_fsdata; 293 int error; 294 struct ipc_namespace *ipc_ns; 295 296 spin_lock(&mq_lock); 297 ipc_ns = __get_ns_from_inode(dir); 298 if (!ipc_ns) { 299 error = -EACCES; 300 goto out_unlock; 301 } 302 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && 303 !capable(CAP_SYS_RESOURCE)) { 304 error = -ENOSPC; 305 goto out_unlock; 306 } 307 ipc_ns->mq_queues_count++; 308 spin_unlock(&mq_lock); 309 310 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); 311 if (!inode) { 312 error = -ENOMEM; 313 spin_lock(&mq_lock); 314 ipc_ns->mq_queues_count--; 315 goto out_unlock; 316 } 317 318 put_ipc_ns(ipc_ns); 319 dir->i_size += DIRENT_SIZE; 320 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; 321 322 d_instantiate(dentry, inode); 323 dget(dentry); 324 return 0; 325out_unlock: 326 spin_unlock(&mq_lock); 327 if (ipc_ns) 328 put_ipc_ns(ipc_ns); 329 return error; 330} 331 332static int mqueue_unlink(struct inode *dir, struct dentry *dentry) 333{ 334 struct inode *inode = dentry->d_inode; 335 336 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; 337 dir->i_size -= DIRENT_SIZE; 338 drop_nlink(inode); 339 dput(dentry); 340 return 0; 341} 342 343/* 344* This is routine for system read from queue file. 345* To avoid mess with doing here some sort of mq_receive we allow 346* to read only queue size & notification info (the only values 347* that are interesting from user point of view and aren't accessible 348* through std routines) 349*/ 350static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, 351 size_t count, loff_t *off) 352{ 353 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 354 char buffer[FILENT_SIZE]; 355 ssize_t ret; 356 357 spin_lock(&info->lock); 358 snprintf(buffer, sizeof(buffer), 359 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n", 360 info->qsize, 361 info->notify_owner ? info->notify.sigev_notify : 0, 362 (info->notify_owner && 363 info->notify.sigev_notify == SIGEV_SIGNAL) ? 364 info->notify.sigev_signo : 0, 365 pid_vnr(info->notify_owner)); 366 spin_unlock(&info->lock); 367 buffer[sizeof(buffer)-1] = '\0'; 368 369 ret = simple_read_from_buffer(u_data, count, off, buffer, 370 strlen(buffer)); 371 if (ret <= 0) 372 return ret; 373 374 filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME; 375 return ret; 376} 377 378static int mqueue_flush_file(struct file *filp, fl_owner_t id) 379{ 380 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 381 382 spin_lock(&info->lock); 383 if (task_tgid(current) == info->notify_owner) 384 remove_notification(info); 385 386 spin_unlock(&info->lock); 387 return 0; 388} 389 390static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) 391{ 392 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 393 int retval = 0; 394 395 poll_wait(filp, &info->wait_q, poll_tab); 396 397 spin_lock(&info->lock); 398 if (info->attr.mq_curmsgs) 399 retval = POLLIN | POLLRDNORM; 400 401 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) 402 retval |= POLLOUT | POLLWRNORM; 403 spin_unlock(&info->lock); 404 405 return retval; 406} 407 408/* Adds current to info->e_wait_q[sr] before element with smaller prio */ 409static void wq_add(struct mqueue_inode_info *info, int sr, 410 struct ext_wait_queue *ewp) 411{ 412 struct ext_wait_queue *walk; 413 414 ewp->task = current; 415 416 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { 417 if (walk->task->static_prio <= current->static_prio) { 418 list_add_tail(&ewp->list, &walk->list); 419 return; 420 } 421 } 422 list_add_tail(&ewp->list, &info->e_wait_q[sr].list); 423} 424 425/* 426 * Puts current task to sleep. Caller must hold queue lock. After return 427 * lock isn't held. 428 * sr: SEND or RECV 429 */ 430static int wq_sleep(struct mqueue_inode_info *info, int sr, 431 long timeout, struct ext_wait_queue *ewp) 432{ 433 int retval; 434 signed long time; 435 436 wq_add(info, sr, ewp); 437 438 for (;;) { 439 set_current_state(TASK_INTERRUPTIBLE); 440 441 spin_unlock(&info->lock); 442 time = schedule_timeout(timeout); 443 444 while (ewp->state == STATE_PENDING) 445 cpu_relax(); 446 447 if (ewp->state == STATE_READY) { 448 retval = 0; 449 goto out; 450 } 451 spin_lock(&info->lock); 452 if (ewp->state == STATE_READY) { 453 retval = 0; 454 goto out_unlock; 455 } 456 if (signal_pending(current)) { 457 retval = -ERESTARTSYS; 458 break; 459 } 460 if (time == 0) { 461 retval = -ETIMEDOUT; 462 break; 463 } 464 } 465 list_del(&ewp->list); 466out_unlock: 467 spin_unlock(&info->lock); 468out: 469 return retval; 470} 471 472/* 473 * Returns waiting task that should be serviced first or NULL if none exists 474 */ 475static struct ext_wait_queue *wq_get_first_waiter( 476 struct mqueue_inode_info *info, int sr) 477{ 478 struct list_head *ptr; 479 480 ptr = info->e_wait_q[sr].list.prev; 481 if (ptr == &info->e_wait_q[sr].list) 482 return NULL; 483 return list_entry(ptr, struct ext_wait_queue, list); 484} 485 486/* Auxiliary functions to manipulate messages' list */ 487static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info) 488{ 489 int k; 490 491 k = info->attr.mq_curmsgs - 1; 492 while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) { 493 info->messages[k + 1] = info->messages[k]; 494 k--; 495 } 496 info->attr.mq_curmsgs++; 497 info->qsize += ptr->m_ts; 498 info->messages[k + 1] = ptr; 499} 500 501static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) 502{ 503 info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts; 504 return info->messages[info->attr.mq_curmsgs]; 505} 506 507static inline void set_cookie(struct sk_buff *skb, char code) 508{ 509 ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code; 510} 511 512/* 513 * The next function is only to split too long sys_mq_timedsend 514 */ 515static void __do_notify(struct mqueue_inode_info *info) 516{ 517 /* notification 518 * invoked when there is registered process and there isn't process 519 * waiting synchronously for message AND state of queue changed from 520 * empty to not empty. Here we are sure that no one is waiting 521 * synchronously. */ 522 if (info->notify_owner && 523 info->attr.mq_curmsgs == 1) { 524 struct siginfo sig_i; 525 switch (info->notify.sigev_notify) { 526 case SIGEV_NONE: 527 break; 528 case SIGEV_SIGNAL: 529 /* sends signal */ 530 531 sig_i.si_signo = info->notify.sigev_signo; 532 sig_i.si_errno = 0; 533 sig_i.si_code = SI_MESGQ; 534 sig_i.si_value = info->notify.sigev_value; 535 sig_i.si_pid = task_tgid_nr_ns(current, 536 ns_of_pid(info->notify_owner)); 537 sig_i.si_uid = current_uid(); 538 539 kill_pid_info(info->notify.sigev_signo, 540 &sig_i, info->notify_owner); 541 break; 542 case SIGEV_THREAD: 543 set_cookie(info->notify_cookie, NOTIFY_WOKENUP); 544 netlink_sendskb(info->notify_sock, info->notify_cookie); 545 break; 546 } 547 /* after notification unregisters process */ 548 put_pid(info->notify_owner); 549 info->notify_owner = NULL; 550 } 551 wake_up(&info->wait_q); 552} 553 554static long prepare_timeout(struct timespec *p) 555{ 556 struct timespec nowts; 557 long timeout; 558 559 if (p) { 560 if (unlikely(p->tv_nsec < 0 || p->tv_sec < 0 561 || p->tv_nsec >= NSEC_PER_SEC)) 562 return -EINVAL; 563 nowts = CURRENT_TIME; 564 /* first subtract as jiffies can't be too big */ 565 p->tv_sec -= nowts.tv_sec; 566 if (p->tv_nsec < nowts.tv_nsec) { 567 p->tv_nsec += NSEC_PER_SEC; 568 p->tv_sec--; 569 } 570 p->tv_nsec -= nowts.tv_nsec; 571 if (p->tv_sec < 0) 572 return 0; 573 574 timeout = timespec_to_jiffies(p) + 1; 575 } else 576 return MAX_SCHEDULE_TIMEOUT; 577 578 return timeout; 579} 580 581static void remove_notification(struct mqueue_inode_info *info) 582{ 583 if (info->notify_owner != NULL && 584 info->notify.sigev_notify == SIGEV_THREAD) { 585 set_cookie(info->notify_cookie, NOTIFY_REMOVED); 586 netlink_sendskb(info->notify_sock, info->notify_cookie); 587 } 588 put_pid(info->notify_owner); 589 info->notify_owner = NULL; 590} 591 592static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr) 593{ 594 if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) 595 return 0; 596 if (capable(CAP_SYS_RESOURCE)) { 597 if (attr->mq_maxmsg > HARD_MSGMAX) 598 return 0; 599 } else { 600 if (attr->mq_maxmsg > ipc_ns->mq_msg_max || 601 attr->mq_msgsize > ipc_ns->mq_msgsize_max) 602 return 0; 603 } 604 /* check for overflow */ 605 if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) 606 return 0; 607 if ((unsigned long)(attr->mq_maxmsg * attr->mq_msgsize) + 608 (attr->mq_maxmsg * sizeof (struct msg_msg *)) < 609 (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize)) 610 return 0; 611 return 1; 612} 613 614/* 615 * Invoked when creating a new queue via sys_mq_open 616 */ 617static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir, 618 struct dentry *dentry, int oflag, mode_t mode, 619 struct mq_attr *attr) 620{ 621 const struct cred *cred = current_cred(); 622 struct file *result; 623 int ret; 624 625 if (attr) { 626 ret = -EINVAL; 627 if (!mq_attr_ok(ipc_ns, attr)) 628 goto out; 629 /* store for use during create */ 630 dentry->d_fsdata = attr; 631 } 632 633 mode &= ~current_umask(); 634 ret = mnt_want_write(ipc_ns->mq_mnt); 635 if (ret) 636 goto out; 637 ret = vfs_create(dir->d_inode, dentry, mode, NULL); 638 dentry->d_fsdata = NULL; 639 if (ret) 640 goto out_drop_write; 641 642 result = dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred); 643 /* 644 * dentry_open() took a persistent mnt_want_write(), 645 * so we can now drop this one. 646 */ 647 mnt_drop_write(ipc_ns->mq_mnt); 648 return result; 649 650out_drop_write: 651 mnt_drop_write(ipc_ns->mq_mnt); 652out: 653 dput(dentry); 654 mntput(ipc_ns->mq_mnt); 655 return ERR_PTR(ret); 656} 657 658/* Opens existing queue */ 659static struct file *do_open(struct ipc_namespace *ipc_ns, 660 struct dentry *dentry, int oflag) 661{ 662 const struct cred *cred = current_cred(); 663 664 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, 665 MAY_READ | MAY_WRITE }; 666 667 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) { 668 dput(dentry); 669 mntput(ipc_ns->mq_mnt); 670 return ERR_PTR(-EINVAL); 671 } 672 673 if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) { 674 dput(dentry); 675 mntput(ipc_ns->mq_mnt); 676 return ERR_PTR(-EACCES); 677 } 678 679 return dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred); 680} 681 682SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode, 683 struct mq_attr __user *, u_attr) 684{ 685 struct dentry *dentry; 686 struct file *filp; 687 char *name; 688 struct mq_attr attr; 689 int fd, error; 690 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 691 692 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) 693 return -EFAULT; 694 695 audit_mq_open(oflag, mode, u_attr ? &attr : NULL); 696 697 if (IS_ERR(name = getname(u_name))) 698 return PTR_ERR(name); 699 700 fd = get_unused_fd_flags(O_CLOEXEC); 701 if (fd < 0) 702 goto out_putname; 703 704 mutex_lock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); 705 dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name)); 706 if (IS_ERR(dentry)) { 707 error = PTR_ERR(dentry); 708 goto out_putfd; 709 } 710 mntget(ipc_ns->mq_mnt); 711 712 if (oflag & O_CREAT) { 713 if (dentry->d_inode) { /* entry already exists */ 714 audit_inode(name, dentry); 715 error = -EEXIST; 716 if (oflag & O_EXCL) 717 goto out; 718 filp = do_open(ipc_ns, dentry, oflag); 719 } else { 720 filp = do_create(ipc_ns, ipc_ns->mq_mnt->mnt_root, 721 dentry, oflag, mode, 722 u_attr ? &attr : NULL); 723 } 724 } else { 725 error = -ENOENT; 726 if (!dentry->d_inode) 727 goto out; 728 audit_inode(name, dentry); 729 filp = do_open(ipc_ns, dentry, oflag); 730 } 731 732 if (IS_ERR(filp)) { 733 error = PTR_ERR(filp); 734 goto out_putfd; 735 } 736 737 fd_install(fd, filp); 738 goto out_upsem; 739 740out: 741 dput(dentry); 742 mntput(ipc_ns->mq_mnt); 743out_putfd: 744 put_unused_fd(fd); 745 fd = error; 746out_upsem: 747 mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); 748out_putname: 749 putname(name); 750 return fd; 751} 752 753SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) 754{ 755 int err; 756 char *name; 757 struct dentry *dentry; 758 struct inode *inode = NULL; 759 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 760 761 name = getname(u_name); 762 if (IS_ERR(name)) 763 return PTR_ERR(name); 764 765 mutex_lock_nested(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex, 766 I_MUTEX_PARENT); 767 dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name)); 768 if (IS_ERR(dentry)) { 769 err = PTR_ERR(dentry); 770 goto out_unlock; 771 } 772 773 if (!dentry->d_inode) { 774 err = -ENOENT; 775 goto out_err; 776 } 777 778 inode = dentry->d_inode; 779 if (inode) 780 atomic_inc(&inode->i_count); 781 err = mnt_want_write(ipc_ns->mq_mnt); 782 if (err) 783 goto out_err; 784 err = vfs_unlink(dentry->d_parent->d_inode, dentry); 785 mnt_drop_write(ipc_ns->mq_mnt); 786out_err: 787 dput(dentry); 788 789out_unlock: 790 mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); 791 putname(name); 792 if (inode) 793 iput(inode); 794 795 return err; 796} 797 798/* Pipelined send and receive functions. 799 * 800 * If a receiver finds no waiting message, then it registers itself in the 801 * list of waiting receivers. A sender checks that list before adding the new 802 * message into the message array. If there is a waiting receiver, then it 803 * bypasses the message array and directly hands the message over to the 804 * receiver. 805 * The receiver accepts the message and returns without grabbing the queue 806 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers 807 * are necessary. The same algorithm is used for sysv semaphores, see 808 * ipc/sem.c for more details. 809 * 810 * The same algorithm is used for senders. 811 */ 812 813/* pipelined_send() - send a message directly to the task waiting in 814 * sys_mq_timedreceive() (without inserting message into a queue). 815 */ 816static inline void pipelined_send(struct mqueue_inode_info *info, 817 struct msg_msg *message, 818 struct ext_wait_queue *receiver) 819{ 820 receiver->msg = message; 821 list_del(&receiver->list); 822 receiver->state = STATE_PENDING; 823 wake_up_process(receiver->task); 824 smp_wmb(); 825 receiver->state = STATE_READY; 826} 827 828/* pipelined_receive() - if there is task waiting in sys_mq_timedsend() 829 * gets its message and put to the queue (we have one free place for sure). */ 830static inline void pipelined_receive(struct mqueue_inode_info *info) 831{ 832 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); 833 834 if (!sender) { 835 /* for poll */ 836 wake_up_interruptible(&info->wait_q); 837 return; 838 } 839 msg_insert(sender->msg, info); 840 list_del(&sender->list); 841 sender->state = STATE_PENDING; 842 wake_up_process(sender->task); 843 smp_wmb(); 844 sender->state = STATE_READY; 845} 846 847SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, 848 size_t, msg_len, unsigned int, msg_prio, 849 const struct timespec __user *, u_abs_timeout) 850{ 851 struct file *filp; 852 struct inode *inode; 853 struct ext_wait_queue wait; 854 struct ext_wait_queue *receiver; 855 struct msg_msg *msg_ptr; 856 struct mqueue_inode_info *info; 857 struct timespec ts, *p = NULL; 858 long timeout; 859 int ret; 860 861 if (u_abs_timeout) { 862 if (copy_from_user(&ts, u_abs_timeout, 863 sizeof(struct timespec))) 864 return -EFAULT; 865 p = &ts; 866 } 867 868 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) 869 return -EINVAL; 870 871 audit_mq_sendrecv(mqdes, msg_len, msg_prio, p); 872 timeout = prepare_timeout(p); 873 874 ret = -EBADF; 875 filp = fget(mqdes); 876 if (unlikely(!filp)) 877 goto out; 878 879 inode = filp->f_path.dentry->d_inode; 880 if (unlikely(filp->f_op != &mqueue_file_operations)) 881 goto out_fput; 882 info = MQUEUE_I(inode); 883 audit_inode(NULL, filp->f_path.dentry); 884 885 if (unlikely(!(filp->f_mode & FMODE_WRITE))) 886 goto out_fput; 887 888 if (unlikely(msg_len > info->attr.mq_msgsize)) { 889 ret = -EMSGSIZE; 890 goto out_fput; 891 } 892 893 /* First try to allocate memory, before doing anything with 894 * existing queues. */ 895 msg_ptr = load_msg(u_msg_ptr, msg_len); 896 if (IS_ERR(msg_ptr)) { 897 ret = PTR_ERR(msg_ptr); 898 goto out_fput; 899 } 900 msg_ptr->m_ts = msg_len; 901 msg_ptr->m_type = msg_prio; 902 903 spin_lock(&info->lock); 904 905 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { 906 if (filp->f_flags & O_NONBLOCK) { 907 spin_unlock(&info->lock); 908 ret = -EAGAIN; 909 } else if (unlikely(timeout < 0)) { 910 spin_unlock(&info->lock); 911 ret = timeout; 912 } else { 913 wait.task = current; 914 wait.msg = (void *) msg_ptr; 915 wait.state = STATE_NONE; 916 ret = wq_sleep(info, SEND, timeout, &wait); 917 } 918 if (ret < 0) 919 free_msg(msg_ptr); 920 } else { 921 receiver = wq_get_first_waiter(info, RECV); 922 if (receiver) { 923 pipelined_send(info, msg_ptr, receiver); 924 } else { 925 /* adds message to the queue */ 926 msg_insert(msg_ptr, info); 927 __do_notify(info); 928 } 929 inode->i_atime = inode->i_mtime = inode->i_ctime = 930 CURRENT_TIME; 931 spin_unlock(&info->lock); 932 ret = 0; 933 } 934out_fput: 935 fput(filp); 936out: 937 return ret; 938} 939 940SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, 941 size_t, msg_len, unsigned int __user *, u_msg_prio, 942 const struct timespec __user *, u_abs_timeout) 943{ 944 long timeout; 945 ssize_t ret; 946 struct msg_msg *msg_ptr; 947 struct file *filp; 948 struct inode *inode; 949 struct mqueue_inode_info *info; 950 struct ext_wait_queue wait; 951 struct timespec ts, *p = NULL; 952 953 if (u_abs_timeout) { 954 if (copy_from_user(&ts, u_abs_timeout, 955 sizeof(struct timespec))) 956 return -EFAULT; 957 p = &ts; 958 } 959 960 audit_mq_sendrecv(mqdes, msg_len, 0, p); 961 timeout = prepare_timeout(p); 962 963 ret = -EBADF; 964 filp = fget(mqdes); 965 if (unlikely(!filp)) 966 goto out; 967 968 inode = filp->f_path.dentry->d_inode; 969 if (unlikely(filp->f_op != &mqueue_file_operations)) 970 goto out_fput; 971 info = MQUEUE_I(inode); 972 audit_inode(NULL, filp->f_path.dentry); 973 974 if (unlikely(!(filp->f_mode & FMODE_READ))) 975 goto out_fput; 976 977 /* checks if buffer is big enough */ 978 if (unlikely(msg_len < info->attr.mq_msgsize)) { 979 ret = -EMSGSIZE; 980 goto out_fput; 981 } 982 983 spin_lock(&info->lock); 984 if (info->attr.mq_curmsgs == 0) { 985 if (filp->f_flags & O_NONBLOCK) { 986 spin_unlock(&info->lock); 987 ret = -EAGAIN; 988 msg_ptr = NULL; 989 } else if (unlikely(timeout < 0)) { 990 spin_unlock(&info->lock); 991 ret = timeout; 992 msg_ptr = NULL; 993 } else { 994 wait.task = current; 995 wait.state = STATE_NONE; 996 ret = wq_sleep(info, RECV, timeout, &wait); 997 msg_ptr = wait.msg; 998 } 999 } else { 1000 msg_ptr = msg_get(info); 1001 1002 inode->i_atime = inode->i_mtime = inode->i_ctime = 1003 CURRENT_TIME; 1004 1005 /* There is now free space in queue. */ 1006 pipelined_receive(info); 1007 spin_unlock(&info->lock); 1008 ret = 0; 1009 } 1010 if (ret == 0) { 1011 ret = msg_ptr->m_ts; 1012 1013 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) || 1014 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) { 1015 ret = -EFAULT; 1016 } 1017 free_msg(msg_ptr); 1018 } 1019out_fput: 1020 fput(filp); 1021out: 1022 return ret; 1023} 1024 1025/* 1026 * Notes: the case when user wants us to deregister (with NULL as pointer) 1027 * and he isn't currently owner of notification, will be silently discarded. 1028 * It isn't explicitly defined in the POSIX. 1029 */ 1030SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, 1031 const struct sigevent __user *, u_notification) 1032{ 1033 int ret; 1034 struct file *filp; 1035 struct sock *sock; 1036 struct inode *inode; 1037 struct sigevent notification; 1038 struct mqueue_inode_info *info; 1039 struct sk_buff *nc; 1040 1041 if (u_notification) { 1042 if (copy_from_user(¬ification, u_notification, 1043 sizeof(struct sigevent))) 1044 return -EFAULT; 1045 } 1046 1047 audit_mq_notify(mqdes, u_notification ? ¬ification : NULL); 1048 1049 nc = NULL; 1050 sock = NULL; 1051 if (u_notification != NULL) { 1052 if (unlikely(notification.sigev_notify != SIGEV_NONE && 1053 notification.sigev_notify != SIGEV_SIGNAL && 1054 notification.sigev_notify != SIGEV_THREAD)) 1055 return -EINVAL; 1056 if (notification.sigev_notify == SIGEV_SIGNAL && 1057 !valid_signal(notification.sigev_signo)) { 1058 return -EINVAL; 1059 } 1060 if (notification.sigev_notify == SIGEV_THREAD) { 1061 long timeo; 1062 1063 /* create the notify skb */ 1064 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); 1065 ret = -ENOMEM; 1066 if (!nc) 1067 goto out; 1068 ret = -EFAULT; 1069 if (copy_from_user(nc->data, 1070 notification.sigev_value.sival_ptr, 1071 NOTIFY_COOKIE_LEN)) { 1072 goto out; 1073 } 1074 1075 /* TODO: add a header? */ 1076 skb_put(nc, NOTIFY_COOKIE_LEN); 1077 /* and attach it to the socket */ 1078retry: 1079 filp = fget(notification.sigev_signo); 1080 ret = -EBADF; 1081 if (!filp) 1082 goto out; 1083 sock = netlink_getsockbyfilp(filp); 1084 fput(filp); 1085 if (IS_ERR(sock)) { 1086 ret = PTR_ERR(sock); 1087 sock = NULL; 1088 goto out; 1089 } 1090 1091 timeo = MAX_SCHEDULE_TIMEOUT; 1092 ret = netlink_attachskb(sock, nc, &timeo, NULL); 1093 if (ret == 1) 1094 goto retry; 1095 if (ret) { 1096 sock = NULL; 1097 nc = NULL; 1098 goto out; 1099 } 1100 } 1101 } 1102 1103 ret = -EBADF; 1104 filp = fget(mqdes); 1105 if (!filp) 1106 goto out; 1107 1108 inode = filp->f_path.dentry->d_inode; 1109 if (unlikely(filp->f_op != &mqueue_file_operations)) 1110 goto out_fput; 1111 info = MQUEUE_I(inode); 1112 1113 ret = 0; 1114 spin_lock(&info->lock); 1115 if (u_notification == NULL) { 1116 if (info->notify_owner == task_tgid(current)) { 1117 remove_notification(info); 1118 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1119 } 1120 } else if (info->notify_owner != NULL) { 1121 ret = -EBUSY; 1122 } else { 1123 switch (notification.sigev_notify) { 1124 case SIGEV_NONE: 1125 info->notify.sigev_notify = SIGEV_NONE; 1126 break; 1127 case SIGEV_THREAD: 1128 info->notify_sock = sock; 1129 info->notify_cookie = nc; 1130 sock = NULL; 1131 nc = NULL; 1132 info->notify.sigev_notify = SIGEV_THREAD; 1133 break; 1134 case SIGEV_SIGNAL: 1135 info->notify.sigev_signo = notification.sigev_signo; 1136 info->notify.sigev_value = notification.sigev_value; 1137 info->notify.sigev_notify = SIGEV_SIGNAL; 1138 break; 1139 } 1140 1141 info->notify_owner = get_pid(task_tgid(current)); 1142 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1143 } 1144 spin_unlock(&info->lock); 1145out_fput: 1146 fput(filp); 1147out: 1148 if (sock) { 1149 netlink_detachskb(sock, nc); 1150 } else if (nc) { 1151 dev_kfree_skb(nc); 1152 } 1153 return ret; 1154} 1155 1156SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, 1157 const struct mq_attr __user *, u_mqstat, 1158 struct mq_attr __user *, u_omqstat) 1159{ 1160 int ret; 1161 struct mq_attr mqstat, omqstat; 1162 struct file *filp; 1163 struct inode *inode; 1164 struct mqueue_inode_info *info; 1165 1166 if (u_mqstat != NULL) { 1167 if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr))) 1168 return -EFAULT; 1169 if (mqstat.mq_flags & (~O_NONBLOCK)) 1170 return -EINVAL; 1171 } 1172 1173 ret = -EBADF; 1174 filp = fget(mqdes); 1175 if (!filp) 1176 goto out; 1177 1178 inode = filp->f_path.dentry->d_inode; 1179 if (unlikely(filp->f_op != &mqueue_file_operations)) 1180 goto out_fput; 1181 info = MQUEUE_I(inode); 1182 1183 spin_lock(&info->lock); 1184 1185 omqstat = info->attr; 1186 omqstat.mq_flags = filp->f_flags & O_NONBLOCK; 1187 if (u_mqstat) { 1188 audit_mq_getsetattr(mqdes, &mqstat); 1189 spin_lock(&filp->f_lock); 1190 if (mqstat.mq_flags & O_NONBLOCK) 1191 filp->f_flags |= O_NONBLOCK; 1192 else 1193 filp->f_flags &= ~O_NONBLOCK; 1194 spin_unlock(&filp->f_lock); 1195 1196 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1197 } 1198 1199 spin_unlock(&info->lock); 1200 1201 ret = 0; 1202 if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat, 1203 sizeof(struct mq_attr))) 1204 ret = -EFAULT; 1205 1206out_fput: 1207 fput(filp); 1208out: 1209 return ret; 1210} 1211 1212static const struct inode_operations mqueue_dir_inode_operations = { 1213 .lookup = simple_lookup, 1214 .create = mqueue_create, 1215 .unlink = mqueue_unlink, 1216}; 1217 1218static const struct file_operations mqueue_file_operations = { 1219 .flush = mqueue_flush_file, 1220 .poll = mqueue_poll_file, 1221 .read = mqueue_read_file, 1222}; 1223 1224static const struct super_operations mqueue_super_ops = { 1225 .alloc_inode = mqueue_alloc_inode, 1226 .destroy_inode = mqueue_destroy_inode, 1227 .statfs = simple_statfs, 1228 .delete_inode = mqueue_delete_inode, 1229 .drop_inode = generic_delete_inode, 1230}; 1231 1232static struct file_system_type mqueue_fs_type = { 1233 .name = "mqueue", 1234 .get_sb = mqueue_get_sb, 1235 .kill_sb = kill_litter_super, 1236}; 1237 1238int mq_init_ns(struct ipc_namespace *ns) 1239{ 1240 ns->mq_queues_count = 0; 1241 ns->mq_queues_max = DFLT_QUEUESMAX; 1242 ns->mq_msg_max = DFLT_MSGMAX; 1243 ns->mq_msgsize_max = DFLT_MSGSIZEMAX; 1244 1245 ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns); 1246 if (IS_ERR(ns->mq_mnt)) { 1247 int err = PTR_ERR(ns->mq_mnt); 1248 ns->mq_mnt = NULL; 1249 return err; 1250 } 1251 return 0; 1252} 1253 1254void mq_clear_sbinfo(struct ipc_namespace *ns) 1255{ 1256 ns->mq_mnt->mnt_sb->s_fs_info = NULL; 1257} 1258 1259void mq_put_mnt(struct ipc_namespace *ns) 1260{ 1261 mntput(ns->mq_mnt); 1262} 1263 1264static int __init init_mqueue_fs(void) 1265{ 1266 int error; 1267 1268 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", 1269 sizeof(struct mqueue_inode_info), 0, 1270 SLAB_HWCACHE_ALIGN, init_once); 1271 if (mqueue_inode_cachep == NULL) 1272 return -ENOMEM; 1273 1274 /* ignore failues - they are not fatal */ 1275 mq_sysctl_table = mq_register_sysctl_table(); 1276 1277 error = register_filesystem(&mqueue_fs_type); 1278 if (error) 1279 goto out_sysctl; 1280 1281 spin_lock_init(&mq_lock); 1282 1283 init_ipc_ns.mq_mnt = kern_mount_data(&mqueue_fs_type, &init_ipc_ns); 1284 if (IS_ERR(init_ipc_ns.mq_mnt)) { 1285 error = PTR_ERR(init_ipc_ns.mq_mnt); 1286 goto out_filesystem; 1287 } 1288 1289 return 0; 1290 1291out_filesystem: 1292 unregister_filesystem(&mqueue_fs_type); 1293out_sysctl: 1294 if (mq_sysctl_table) 1295 unregister_sysctl_table(mq_sysctl_table); 1296 kmem_cache_destroy(mqueue_inode_cachep); 1297 return error; 1298} 1299 1300__initcall(init_mqueue_fs); 1301