mqueue.c revision c8308b1c91056b09e96d40dbde4880ea685c377e
1/* 2 * POSIX message queues filesystem for Linux. 3 * 4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl) 5 * Michal Wronski (michal.wronski@gmail.com) 6 * 7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) 8 * Lockless receive & send, fd based notify: 9 * Manfred Spraul (manfred@colorfullife.com) 10 * 11 * Audit: George Wilson (ltcgcw@us.ibm.com) 12 * 13 * This file is released under the GPL. 14 */ 15 16#include <linux/capability.h> 17#include <linux/init.h> 18#include <linux/pagemap.h> 19#include <linux/file.h> 20#include <linux/mount.h> 21#include <linux/namei.h> 22#include <linux/sysctl.h> 23#include <linux/poll.h> 24#include <linux/mqueue.h> 25#include <linux/msg.h> 26#include <linux/skbuff.h> 27#include <linux/netlink.h> 28#include <linux/syscalls.h> 29#include <linux/audit.h> 30#include <linux/signal.h> 31#include <linux/mutex.h> 32#include <linux/nsproxy.h> 33#include <linux/pid.h> 34#include <linux/ipc_namespace.h> 35 36#include <net/sock.h> 37#include "util.h" 38 39#define MQUEUE_MAGIC 0x19800202 40#define DIRENT_SIZE 20 41#define FILENT_SIZE 80 42 43#define SEND 0 44#define RECV 1 45 46#define STATE_NONE 0 47#define STATE_PENDING 1 48#define STATE_READY 2 49 50struct ext_wait_queue { /* queue of sleeping tasks */ 51 struct task_struct *task; 52 struct list_head list; 53 struct msg_msg *msg; /* ptr of loaded message */ 54 int state; /* one of STATE_* values */ 55}; 56 57struct mqueue_inode_info { 58 spinlock_t lock; 59 struct inode vfs_inode; 60 wait_queue_head_t wait_q; 61 62 struct msg_msg **messages; 63 struct mq_attr attr; 64 65 struct sigevent notify; 66 struct pid* notify_owner; 67 struct user_struct *user; /* user who created, for accounting */ 68 struct sock *notify_sock; 69 struct sk_buff *notify_cookie; 70 71 /* for tasks waiting for free space and messages, respectively */ 72 struct ext_wait_queue e_wait_q[2]; 73 74 unsigned long qsize; /* size of queue in memory (sum of all msgs) */ 75}; 76 77static const struct inode_operations mqueue_dir_inode_operations; 78static const struct file_operations mqueue_file_operations; 79static const struct super_operations mqueue_super_ops; 80static void remove_notification(struct mqueue_inode_info *info); 81 82static struct kmem_cache *mqueue_inode_cachep; 83 84static struct ctl_table_header * mq_sysctl_table; 85 86static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) 87{ 88 return container_of(inode, struct mqueue_inode_info, vfs_inode); 89} 90 91/* 92 * This routine should be called with the mq_lock held. 93 */ 94static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode) 95{ 96 return get_ipc_ns(inode->i_sb->s_fs_info); 97} 98 99static struct ipc_namespace *get_ns_from_inode(struct inode *inode) 100{ 101 struct ipc_namespace *ns; 102 103 spin_lock(&mq_lock); 104 ns = __get_ns_from_inode(inode); 105 spin_unlock(&mq_lock); 106 return ns; 107} 108 109static struct inode *mqueue_get_inode(struct super_block *sb, 110 struct ipc_namespace *ipc_ns, int mode, 111 struct mq_attr *attr) 112{ 113 struct user_struct *u = current_user(); 114 struct inode *inode; 115 116 inode = new_inode(sb); 117 if (inode) { 118 inode->i_mode = mode; 119 inode->i_uid = current_fsuid(); 120 inode->i_gid = current_fsgid(); 121 inode->i_mtime = inode->i_ctime = inode->i_atime = 122 CURRENT_TIME; 123 124 if (S_ISREG(mode)) { 125 struct mqueue_inode_info *info; 126 struct task_struct *p = current; 127 unsigned long mq_bytes, mq_msg_tblsz; 128 129 inode->i_fop = &mqueue_file_operations; 130 inode->i_size = FILENT_SIZE; 131 /* mqueue specific info */ 132 info = MQUEUE_I(inode); 133 spin_lock_init(&info->lock); 134 init_waitqueue_head(&info->wait_q); 135 INIT_LIST_HEAD(&info->e_wait_q[0].list); 136 INIT_LIST_HEAD(&info->e_wait_q[1].list); 137 info->notify_owner = NULL; 138 info->qsize = 0; 139 info->user = NULL; /* set when all is ok */ 140 memset(&info->attr, 0, sizeof(info->attr)); 141 info->attr.mq_maxmsg = ipc_ns->mq_msg_max; 142 info->attr.mq_msgsize = ipc_ns->mq_msgsize_max; 143 if (attr) { 144 info->attr.mq_maxmsg = attr->mq_maxmsg; 145 info->attr.mq_msgsize = attr->mq_msgsize; 146 } 147 mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *); 148 info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL); 149 if (!info->messages) 150 goto out_inode; 151 152 mq_bytes = (mq_msg_tblsz + 153 (info->attr.mq_maxmsg * info->attr.mq_msgsize)); 154 155 spin_lock(&mq_lock); 156 if (u->mq_bytes + mq_bytes < u->mq_bytes || 157 u->mq_bytes + mq_bytes > 158 p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) { 159 spin_unlock(&mq_lock); 160 kfree(info->messages); 161 goto out_inode; 162 } 163 u->mq_bytes += mq_bytes; 164 spin_unlock(&mq_lock); 165 166 /* all is ok */ 167 info->user = get_uid(u); 168 } else if (S_ISDIR(mode)) { 169 inc_nlink(inode); 170 /* Some things misbehave if size == 0 on a directory */ 171 inode->i_size = 2 * DIRENT_SIZE; 172 inode->i_op = &mqueue_dir_inode_operations; 173 inode->i_fop = &simple_dir_operations; 174 } 175 } 176 return inode; 177out_inode: 178 make_bad_inode(inode); 179 iput(inode); 180 return NULL; 181} 182 183static int mqueue_fill_super(struct super_block *sb, void *data, int silent) 184{ 185 struct inode *inode; 186 struct ipc_namespace *ns = data; 187 int error = 0; 188 189 sb->s_blocksize = PAGE_CACHE_SIZE; 190 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 191 sb->s_magic = MQUEUE_MAGIC; 192 sb->s_op = &mqueue_super_ops; 193 194 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, 195 NULL); 196 if (!inode) { 197 error = -ENOMEM; 198 goto out; 199 } 200 201 sb->s_root = d_alloc_root(inode); 202 if (!sb->s_root) { 203 iput(inode); 204 error = -ENOMEM; 205 } 206 207out: 208 return error; 209} 210 211static int mqueue_get_sb(struct file_system_type *fs_type, 212 int flags, const char *dev_name, 213 void *data, struct vfsmount *mnt) 214{ 215 if (!(flags & MS_KERNMOUNT)) 216 data = current->nsproxy->ipc_ns; 217 return get_sb_ns(fs_type, flags, data, mqueue_fill_super, mnt); 218} 219 220static void init_once(void *foo) 221{ 222 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; 223 224 inode_init_once(&p->vfs_inode); 225} 226 227static struct inode *mqueue_alloc_inode(struct super_block *sb) 228{ 229 struct mqueue_inode_info *ei; 230 231 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); 232 if (!ei) 233 return NULL; 234 return &ei->vfs_inode; 235} 236 237static void mqueue_destroy_inode(struct inode *inode) 238{ 239 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); 240} 241 242static void mqueue_delete_inode(struct inode *inode) 243{ 244 struct mqueue_inode_info *info; 245 struct user_struct *user; 246 unsigned long mq_bytes; 247 int i; 248 struct ipc_namespace *ipc_ns; 249 250 if (S_ISDIR(inode->i_mode)) { 251 clear_inode(inode); 252 return; 253 } 254 ipc_ns = get_ns_from_inode(inode); 255 info = MQUEUE_I(inode); 256 spin_lock(&info->lock); 257 for (i = 0; i < info->attr.mq_curmsgs; i++) 258 free_msg(info->messages[i]); 259 kfree(info->messages); 260 spin_unlock(&info->lock); 261 262 clear_inode(inode); 263 264 mq_bytes = (info->attr.mq_maxmsg * sizeof(struct msg_msg *) + 265 (info->attr.mq_maxmsg * info->attr.mq_msgsize)); 266 user = info->user; 267 if (user) { 268 spin_lock(&mq_lock); 269 user->mq_bytes -= mq_bytes; 270 /* 271 * get_ns_from_inode() ensures that the 272 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns 273 * to which we now hold a reference, or it is NULL. 274 * We can't put it here under mq_lock, though. 275 */ 276 if (ipc_ns) 277 ipc_ns->mq_queues_count--; 278 spin_unlock(&mq_lock); 279 free_uid(user); 280 } 281 if (ipc_ns) 282 put_ipc_ns(ipc_ns); 283} 284 285static int mqueue_create(struct inode *dir, struct dentry *dentry, 286 int mode, struct nameidata *nd) 287{ 288 struct inode *inode; 289 struct mq_attr *attr = dentry->d_fsdata; 290 int error; 291 struct ipc_namespace *ipc_ns; 292 293 spin_lock(&mq_lock); 294 ipc_ns = __get_ns_from_inode(dir); 295 if (!ipc_ns) { 296 error = -EACCES; 297 goto out_unlock; 298 } 299 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && 300 !capable(CAP_SYS_RESOURCE)) { 301 error = -ENOSPC; 302 goto out_unlock; 303 } 304 ipc_ns->mq_queues_count++; 305 spin_unlock(&mq_lock); 306 307 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); 308 if (!inode) { 309 error = -ENOMEM; 310 spin_lock(&mq_lock); 311 ipc_ns->mq_queues_count--; 312 goto out_unlock; 313 } 314 315 put_ipc_ns(ipc_ns); 316 dir->i_size += DIRENT_SIZE; 317 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; 318 319 d_instantiate(dentry, inode); 320 dget(dentry); 321 return 0; 322out_unlock: 323 spin_unlock(&mq_lock); 324 if (ipc_ns) 325 put_ipc_ns(ipc_ns); 326 return error; 327} 328 329static int mqueue_unlink(struct inode *dir, struct dentry *dentry) 330{ 331 struct inode *inode = dentry->d_inode; 332 333 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; 334 dir->i_size -= DIRENT_SIZE; 335 drop_nlink(inode); 336 dput(dentry); 337 return 0; 338} 339 340/* 341* This is routine for system read from queue file. 342* To avoid mess with doing here some sort of mq_receive we allow 343* to read only queue size & notification info (the only values 344* that are interesting from user point of view and aren't accessible 345* through std routines) 346*/ 347static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, 348 size_t count, loff_t *off) 349{ 350 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 351 char buffer[FILENT_SIZE]; 352 ssize_t ret; 353 354 spin_lock(&info->lock); 355 snprintf(buffer, sizeof(buffer), 356 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n", 357 info->qsize, 358 info->notify_owner ? info->notify.sigev_notify : 0, 359 (info->notify_owner && 360 info->notify.sigev_notify == SIGEV_SIGNAL) ? 361 info->notify.sigev_signo : 0, 362 pid_vnr(info->notify_owner)); 363 spin_unlock(&info->lock); 364 buffer[sizeof(buffer)-1] = '\0'; 365 366 ret = simple_read_from_buffer(u_data, count, off, buffer, 367 strlen(buffer)); 368 if (ret <= 0) 369 return ret; 370 371 filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME; 372 return ret; 373} 374 375static int mqueue_flush_file(struct file *filp, fl_owner_t id) 376{ 377 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 378 379 spin_lock(&info->lock); 380 if (task_tgid(current) == info->notify_owner) 381 remove_notification(info); 382 383 spin_unlock(&info->lock); 384 return 0; 385} 386 387static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) 388{ 389 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 390 int retval = 0; 391 392 poll_wait(filp, &info->wait_q, poll_tab); 393 394 spin_lock(&info->lock); 395 if (info->attr.mq_curmsgs) 396 retval = POLLIN | POLLRDNORM; 397 398 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) 399 retval |= POLLOUT | POLLWRNORM; 400 spin_unlock(&info->lock); 401 402 return retval; 403} 404 405/* Adds current to info->e_wait_q[sr] before element with smaller prio */ 406static void wq_add(struct mqueue_inode_info *info, int sr, 407 struct ext_wait_queue *ewp) 408{ 409 struct ext_wait_queue *walk; 410 411 ewp->task = current; 412 413 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { 414 if (walk->task->static_prio <= current->static_prio) { 415 list_add_tail(&ewp->list, &walk->list); 416 return; 417 } 418 } 419 list_add_tail(&ewp->list, &info->e_wait_q[sr].list); 420} 421 422/* 423 * Puts current task to sleep. Caller must hold queue lock. After return 424 * lock isn't held. 425 * sr: SEND or RECV 426 */ 427static int wq_sleep(struct mqueue_inode_info *info, int sr, 428 long timeout, struct ext_wait_queue *ewp) 429{ 430 int retval; 431 signed long time; 432 433 wq_add(info, sr, ewp); 434 435 for (;;) { 436 set_current_state(TASK_INTERRUPTIBLE); 437 438 spin_unlock(&info->lock); 439 time = schedule_timeout(timeout); 440 441 while (ewp->state == STATE_PENDING) 442 cpu_relax(); 443 444 if (ewp->state == STATE_READY) { 445 retval = 0; 446 goto out; 447 } 448 spin_lock(&info->lock); 449 if (ewp->state == STATE_READY) { 450 retval = 0; 451 goto out_unlock; 452 } 453 if (signal_pending(current)) { 454 retval = -ERESTARTSYS; 455 break; 456 } 457 if (time == 0) { 458 retval = -ETIMEDOUT; 459 break; 460 } 461 } 462 list_del(&ewp->list); 463out_unlock: 464 spin_unlock(&info->lock); 465out: 466 return retval; 467} 468 469/* 470 * Returns waiting task that should be serviced first or NULL if none exists 471 */ 472static struct ext_wait_queue *wq_get_first_waiter( 473 struct mqueue_inode_info *info, int sr) 474{ 475 struct list_head *ptr; 476 477 ptr = info->e_wait_q[sr].list.prev; 478 if (ptr == &info->e_wait_q[sr].list) 479 return NULL; 480 return list_entry(ptr, struct ext_wait_queue, list); 481} 482 483/* Auxiliary functions to manipulate messages' list */ 484static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info) 485{ 486 int k; 487 488 k = info->attr.mq_curmsgs - 1; 489 while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) { 490 info->messages[k + 1] = info->messages[k]; 491 k--; 492 } 493 info->attr.mq_curmsgs++; 494 info->qsize += ptr->m_ts; 495 info->messages[k + 1] = ptr; 496} 497 498static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) 499{ 500 info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts; 501 return info->messages[info->attr.mq_curmsgs]; 502} 503 504static inline void set_cookie(struct sk_buff *skb, char code) 505{ 506 ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code; 507} 508 509/* 510 * The next function is only to split too long sys_mq_timedsend 511 */ 512static void __do_notify(struct mqueue_inode_info *info) 513{ 514 /* notification 515 * invoked when there is registered process and there isn't process 516 * waiting synchronously for message AND state of queue changed from 517 * empty to not empty. Here we are sure that no one is waiting 518 * synchronously. */ 519 if (info->notify_owner && 520 info->attr.mq_curmsgs == 1) { 521 struct siginfo sig_i; 522 switch (info->notify.sigev_notify) { 523 case SIGEV_NONE: 524 break; 525 case SIGEV_SIGNAL: 526 /* sends signal */ 527 528 sig_i.si_signo = info->notify.sigev_signo; 529 sig_i.si_errno = 0; 530 sig_i.si_code = SI_MESGQ; 531 sig_i.si_value = info->notify.sigev_value; 532 sig_i.si_pid = task_tgid_nr_ns(current, 533 ns_of_pid(info->notify_owner)); 534 sig_i.si_uid = current_uid(); 535 536 kill_pid_info(info->notify.sigev_signo, 537 &sig_i, info->notify_owner); 538 break; 539 case SIGEV_THREAD: 540 set_cookie(info->notify_cookie, NOTIFY_WOKENUP); 541 netlink_sendskb(info->notify_sock, info->notify_cookie); 542 break; 543 } 544 /* after notification unregisters process */ 545 put_pid(info->notify_owner); 546 info->notify_owner = NULL; 547 } 548 wake_up(&info->wait_q); 549} 550 551static long prepare_timeout(struct timespec *p) 552{ 553 struct timespec nowts; 554 long timeout; 555 556 if (p) { 557 if (unlikely(p->tv_nsec < 0 || p->tv_sec < 0 558 || p->tv_nsec >= NSEC_PER_SEC)) 559 return -EINVAL; 560 nowts = CURRENT_TIME; 561 /* first subtract as jiffies can't be too big */ 562 p->tv_sec -= nowts.tv_sec; 563 if (p->tv_nsec < nowts.tv_nsec) { 564 p->tv_nsec += NSEC_PER_SEC; 565 p->tv_sec--; 566 } 567 p->tv_nsec -= nowts.tv_nsec; 568 if (p->tv_sec < 0) 569 return 0; 570 571 timeout = timespec_to_jiffies(p) + 1; 572 } else 573 return MAX_SCHEDULE_TIMEOUT; 574 575 return timeout; 576} 577 578static void remove_notification(struct mqueue_inode_info *info) 579{ 580 if (info->notify_owner != NULL && 581 info->notify.sigev_notify == SIGEV_THREAD) { 582 set_cookie(info->notify_cookie, NOTIFY_REMOVED); 583 netlink_sendskb(info->notify_sock, info->notify_cookie); 584 } 585 put_pid(info->notify_owner); 586 info->notify_owner = NULL; 587} 588 589static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr) 590{ 591 if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) 592 return 0; 593 if (capable(CAP_SYS_RESOURCE)) { 594 if (attr->mq_maxmsg > HARD_MSGMAX) 595 return 0; 596 } else { 597 if (attr->mq_maxmsg > ipc_ns->mq_msg_max || 598 attr->mq_msgsize > ipc_ns->mq_msgsize_max) 599 return 0; 600 } 601 /* check for overflow */ 602 if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) 603 return 0; 604 if ((unsigned long)(attr->mq_maxmsg * attr->mq_msgsize) + 605 (attr->mq_maxmsg * sizeof (struct msg_msg *)) < 606 (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize)) 607 return 0; 608 return 1; 609} 610 611/* 612 * Invoked when creating a new queue via sys_mq_open 613 */ 614static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir, 615 struct dentry *dentry, int oflag, mode_t mode, 616 struct mq_attr *attr) 617{ 618 const struct cred *cred = current_cred(); 619 struct file *result; 620 int ret; 621 622 if (attr) { 623 ret = -EINVAL; 624 if (!mq_attr_ok(ipc_ns, attr)) 625 goto out; 626 /* store for use during create */ 627 dentry->d_fsdata = attr; 628 } 629 630 mode &= ~current_umask(); 631 ret = mnt_want_write(ipc_ns->mq_mnt); 632 if (ret) 633 goto out; 634 ret = vfs_create(dir->d_inode, dentry, mode, NULL); 635 dentry->d_fsdata = NULL; 636 if (ret) 637 goto out_drop_write; 638 639 result = dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred); 640 /* 641 * dentry_open() took a persistent mnt_want_write(), 642 * so we can now drop this one. 643 */ 644 mnt_drop_write(ipc_ns->mq_mnt); 645 return result; 646 647out_drop_write: 648 mnt_drop_write(ipc_ns->mq_mnt); 649out: 650 dput(dentry); 651 mntput(ipc_ns->mq_mnt); 652 return ERR_PTR(ret); 653} 654 655/* Opens existing queue */ 656static struct file *do_open(struct ipc_namespace *ipc_ns, 657 struct dentry *dentry, int oflag) 658{ 659 const struct cred *cred = current_cred(); 660 661 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, 662 MAY_READ | MAY_WRITE }; 663 664 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) { 665 dput(dentry); 666 mntput(ipc_ns->mq_mnt); 667 return ERR_PTR(-EINVAL); 668 } 669 670 if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) { 671 dput(dentry); 672 mntput(ipc_ns->mq_mnt); 673 return ERR_PTR(-EACCES); 674 } 675 676 return dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred); 677} 678 679SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode, 680 struct mq_attr __user *, u_attr) 681{ 682 struct dentry *dentry; 683 struct file *filp; 684 char *name; 685 struct mq_attr attr; 686 int fd, error; 687 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 688 689 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) 690 return -EFAULT; 691 692 audit_mq_open(oflag, mode, u_attr ? &attr : NULL); 693 694 if (IS_ERR(name = getname(u_name))) 695 return PTR_ERR(name); 696 697 fd = get_unused_fd_flags(O_CLOEXEC); 698 if (fd < 0) 699 goto out_putname; 700 701 mutex_lock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); 702 dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name)); 703 if (IS_ERR(dentry)) { 704 error = PTR_ERR(dentry); 705 goto out_putfd; 706 } 707 mntget(ipc_ns->mq_mnt); 708 709 if (oflag & O_CREAT) { 710 if (dentry->d_inode) { /* entry already exists */ 711 audit_inode(name, dentry); 712 error = -EEXIST; 713 if (oflag & O_EXCL) 714 goto out; 715 filp = do_open(ipc_ns, dentry, oflag); 716 } else { 717 filp = do_create(ipc_ns, ipc_ns->mq_mnt->mnt_root, 718 dentry, oflag, mode, 719 u_attr ? &attr : NULL); 720 } 721 } else { 722 error = -ENOENT; 723 if (!dentry->d_inode) 724 goto out; 725 audit_inode(name, dentry); 726 filp = do_open(ipc_ns, dentry, oflag); 727 } 728 729 if (IS_ERR(filp)) { 730 error = PTR_ERR(filp); 731 goto out_putfd; 732 } 733 734 fd_install(fd, filp); 735 goto out_upsem; 736 737out: 738 dput(dentry); 739 mntput(ipc_ns->mq_mnt); 740out_putfd: 741 put_unused_fd(fd); 742 fd = error; 743out_upsem: 744 mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); 745out_putname: 746 putname(name); 747 return fd; 748} 749 750SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) 751{ 752 int err; 753 char *name; 754 struct dentry *dentry; 755 struct inode *inode = NULL; 756 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 757 758 name = getname(u_name); 759 if (IS_ERR(name)) 760 return PTR_ERR(name); 761 762 mutex_lock_nested(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex, 763 I_MUTEX_PARENT); 764 dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name)); 765 if (IS_ERR(dentry)) { 766 err = PTR_ERR(dentry); 767 goto out_unlock; 768 } 769 770 if (!dentry->d_inode) { 771 err = -ENOENT; 772 goto out_err; 773 } 774 775 inode = dentry->d_inode; 776 if (inode) 777 atomic_inc(&inode->i_count); 778 err = mnt_want_write(ipc_ns->mq_mnt); 779 if (err) 780 goto out_err; 781 err = vfs_unlink(dentry->d_parent->d_inode, dentry); 782 mnt_drop_write(ipc_ns->mq_mnt); 783out_err: 784 dput(dentry); 785 786out_unlock: 787 mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); 788 putname(name); 789 if (inode) 790 iput(inode); 791 792 return err; 793} 794 795/* Pipelined send and receive functions. 796 * 797 * If a receiver finds no waiting message, then it registers itself in the 798 * list of waiting receivers. A sender checks that list before adding the new 799 * message into the message array. If there is a waiting receiver, then it 800 * bypasses the message array and directly hands the message over to the 801 * receiver. 802 * The receiver accepts the message and returns without grabbing the queue 803 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers 804 * are necessary. The same algorithm is used for sysv semaphores, see 805 * ipc/sem.c for more details. 806 * 807 * The same algorithm is used for senders. 808 */ 809 810/* pipelined_send() - send a message directly to the task waiting in 811 * sys_mq_timedreceive() (without inserting message into a queue). 812 */ 813static inline void pipelined_send(struct mqueue_inode_info *info, 814 struct msg_msg *message, 815 struct ext_wait_queue *receiver) 816{ 817 receiver->msg = message; 818 list_del(&receiver->list); 819 receiver->state = STATE_PENDING; 820 wake_up_process(receiver->task); 821 smp_wmb(); 822 receiver->state = STATE_READY; 823} 824 825/* pipelined_receive() - if there is task waiting in sys_mq_timedsend() 826 * gets its message and put to the queue (we have one free place for sure). */ 827static inline void pipelined_receive(struct mqueue_inode_info *info) 828{ 829 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); 830 831 if (!sender) { 832 /* for poll */ 833 wake_up_interruptible(&info->wait_q); 834 return; 835 } 836 msg_insert(sender->msg, info); 837 list_del(&sender->list); 838 sender->state = STATE_PENDING; 839 wake_up_process(sender->task); 840 smp_wmb(); 841 sender->state = STATE_READY; 842} 843 844SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, 845 size_t, msg_len, unsigned int, msg_prio, 846 const struct timespec __user *, u_abs_timeout) 847{ 848 struct file *filp; 849 struct inode *inode; 850 struct ext_wait_queue wait; 851 struct ext_wait_queue *receiver; 852 struct msg_msg *msg_ptr; 853 struct mqueue_inode_info *info; 854 struct timespec ts, *p = NULL; 855 long timeout; 856 int ret; 857 858 if (u_abs_timeout) { 859 if (copy_from_user(&ts, u_abs_timeout, 860 sizeof(struct timespec))) 861 return -EFAULT; 862 p = &ts; 863 } 864 865 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) 866 return -EINVAL; 867 868 audit_mq_sendrecv(mqdes, msg_len, msg_prio, p); 869 timeout = prepare_timeout(p); 870 871 ret = -EBADF; 872 filp = fget(mqdes); 873 if (unlikely(!filp)) 874 goto out; 875 876 inode = filp->f_path.dentry->d_inode; 877 if (unlikely(filp->f_op != &mqueue_file_operations)) 878 goto out_fput; 879 info = MQUEUE_I(inode); 880 audit_inode(NULL, filp->f_path.dentry); 881 882 if (unlikely(!(filp->f_mode & FMODE_WRITE))) 883 goto out_fput; 884 885 if (unlikely(msg_len > info->attr.mq_msgsize)) { 886 ret = -EMSGSIZE; 887 goto out_fput; 888 } 889 890 /* First try to allocate memory, before doing anything with 891 * existing queues. */ 892 msg_ptr = load_msg(u_msg_ptr, msg_len); 893 if (IS_ERR(msg_ptr)) { 894 ret = PTR_ERR(msg_ptr); 895 goto out_fput; 896 } 897 msg_ptr->m_ts = msg_len; 898 msg_ptr->m_type = msg_prio; 899 900 spin_lock(&info->lock); 901 902 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { 903 if (filp->f_flags & O_NONBLOCK) { 904 spin_unlock(&info->lock); 905 ret = -EAGAIN; 906 } else if (unlikely(timeout < 0)) { 907 spin_unlock(&info->lock); 908 ret = timeout; 909 } else { 910 wait.task = current; 911 wait.msg = (void *) msg_ptr; 912 wait.state = STATE_NONE; 913 ret = wq_sleep(info, SEND, timeout, &wait); 914 } 915 if (ret < 0) 916 free_msg(msg_ptr); 917 } else { 918 receiver = wq_get_first_waiter(info, RECV); 919 if (receiver) { 920 pipelined_send(info, msg_ptr, receiver); 921 } else { 922 /* adds message to the queue */ 923 msg_insert(msg_ptr, info); 924 __do_notify(info); 925 } 926 inode->i_atime = inode->i_mtime = inode->i_ctime = 927 CURRENT_TIME; 928 spin_unlock(&info->lock); 929 ret = 0; 930 } 931out_fput: 932 fput(filp); 933out: 934 return ret; 935} 936 937SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, 938 size_t, msg_len, unsigned int __user *, u_msg_prio, 939 const struct timespec __user *, u_abs_timeout) 940{ 941 long timeout; 942 ssize_t ret; 943 struct msg_msg *msg_ptr; 944 struct file *filp; 945 struct inode *inode; 946 struct mqueue_inode_info *info; 947 struct ext_wait_queue wait; 948 struct timespec ts, *p = NULL; 949 950 if (u_abs_timeout) { 951 if (copy_from_user(&ts, u_abs_timeout, 952 sizeof(struct timespec))) 953 return -EFAULT; 954 p = &ts; 955 } 956 957 audit_mq_sendrecv(mqdes, msg_len, 0, p); 958 timeout = prepare_timeout(p); 959 960 ret = -EBADF; 961 filp = fget(mqdes); 962 if (unlikely(!filp)) 963 goto out; 964 965 inode = filp->f_path.dentry->d_inode; 966 if (unlikely(filp->f_op != &mqueue_file_operations)) 967 goto out_fput; 968 info = MQUEUE_I(inode); 969 audit_inode(NULL, filp->f_path.dentry); 970 971 if (unlikely(!(filp->f_mode & FMODE_READ))) 972 goto out_fput; 973 974 /* checks if buffer is big enough */ 975 if (unlikely(msg_len < info->attr.mq_msgsize)) { 976 ret = -EMSGSIZE; 977 goto out_fput; 978 } 979 980 spin_lock(&info->lock); 981 if (info->attr.mq_curmsgs == 0) { 982 if (filp->f_flags & O_NONBLOCK) { 983 spin_unlock(&info->lock); 984 ret = -EAGAIN; 985 msg_ptr = NULL; 986 } else if (unlikely(timeout < 0)) { 987 spin_unlock(&info->lock); 988 ret = timeout; 989 msg_ptr = NULL; 990 } else { 991 wait.task = current; 992 wait.state = STATE_NONE; 993 ret = wq_sleep(info, RECV, timeout, &wait); 994 msg_ptr = wait.msg; 995 } 996 } else { 997 msg_ptr = msg_get(info); 998 999 inode->i_atime = inode->i_mtime = inode->i_ctime = 1000 CURRENT_TIME; 1001 1002 /* There is now free space in queue. */ 1003 pipelined_receive(info); 1004 spin_unlock(&info->lock); 1005 ret = 0; 1006 } 1007 if (ret == 0) { 1008 ret = msg_ptr->m_ts; 1009 1010 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) || 1011 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) { 1012 ret = -EFAULT; 1013 } 1014 free_msg(msg_ptr); 1015 } 1016out_fput: 1017 fput(filp); 1018out: 1019 return ret; 1020} 1021 1022/* 1023 * Notes: the case when user wants us to deregister (with NULL as pointer) 1024 * and he isn't currently owner of notification, will be silently discarded. 1025 * It isn't explicitly defined in the POSIX. 1026 */ 1027SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, 1028 const struct sigevent __user *, u_notification) 1029{ 1030 int ret; 1031 struct file *filp; 1032 struct sock *sock; 1033 struct inode *inode; 1034 struct sigevent notification; 1035 struct mqueue_inode_info *info; 1036 struct sk_buff *nc; 1037 1038 if (u_notification) { 1039 if (copy_from_user(¬ification, u_notification, 1040 sizeof(struct sigevent))) 1041 return -EFAULT; 1042 } 1043 1044 audit_mq_notify(mqdes, u_notification ? ¬ification : NULL); 1045 1046 nc = NULL; 1047 sock = NULL; 1048 if (u_notification != NULL) { 1049 if (unlikely(notification.sigev_notify != SIGEV_NONE && 1050 notification.sigev_notify != SIGEV_SIGNAL && 1051 notification.sigev_notify != SIGEV_THREAD)) 1052 return -EINVAL; 1053 if (notification.sigev_notify == SIGEV_SIGNAL && 1054 !valid_signal(notification.sigev_signo)) { 1055 return -EINVAL; 1056 } 1057 if (notification.sigev_notify == SIGEV_THREAD) { 1058 long timeo; 1059 1060 /* create the notify skb */ 1061 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); 1062 ret = -ENOMEM; 1063 if (!nc) 1064 goto out; 1065 ret = -EFAULT; 1066 if (copy_from_user(nc->data, 1067 notification.sigev_value.sival_ptr, 1068 NOTIFY_COOKIE_LEN)) { 1069 goto out; 1070 } 1071 1072 /* TODO: add a header? */ 1073 skb_put(nc, NOTIFY_COOKIE_LEN); 1074 /* and attach it to the socket */ 1075retry: 1076 filp = fget(notification.sigev_signo); 1077 ret = -EBADF; 1078 if (!filp) 1079 goto out; 1080 sock = netlink_getsockbyfilp(filp); 1081 fput(filp); 1082 if (IS_ERR(sock)) { 1083 ret = PTR_ERR(sock); 1084 sock = NULL; 1085 goto out; 1086 } 1087 1088 timeo = MAX_SCHEDULE_TIMEOUT; 1089 ret = netlink_attachskb(sock, nc, &timeo, NULL); 1090 if (ret == 1) 1091 goto retry; 1092 if (ret) { 1093 sock = NULL; 1094 nc = NULL; 1095 goto out; 1096 } 1097 } 1098 } 1099 1100 ret = -EBADF; 1101 filp = fget(mqdes); 1102 if (!filp) 1103 goto out; 1104 1105 inode = filp->f_path.dentry->d_inode; 1106 if (unlikely(filp->f_op != &mqueue_file_operations)) 1107 goto out_fput; 1108 info = MQUEUE_I(inode); 1109 1110 ret = 0; 1111 spin_lock(&info->lock); 1112 if (u_notification == NULL) { 1113 if (info->notify_owner == task_tgid(current)) { 1114 remove_notification(info); 1115 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1116 } 1117 } else if (info->notify_owner != NULL) { 1118 ret = -EBUSY; 1119 } else { 1120 switch (notification.sigev_notify) { 1121 case SIGEV_NONE: 1122 info->notify.sigev_notify = SIGEV_NONE; 1123 break; 1124 case SIGEV_THREAD: 1125 info->notify_sock = sock; 1126 info->notify_cookie = nc; 1127 sock = NULL; 1128 nc = NULL; 1129 info->notify.sigev_notify = SIGEV_THREAD; 1130 break; 1131 case SIGEV_SIGNAL: 1132 info->notify.sigev_signo = notification.sigev_signo; 1133 info->notify.sigev_value = notification.sigev_value; 1134 info->notify.sigev_notify = SIGEV_SIGNAL; 1135 break; 1136 } 1137 1138 info->notify_owner = get_pid(task_tgid(current)); 1139 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1140 } 1141 spin_unlock(&info->lock); 1142out_fput: 1143 fput(filp); 1144out: 1145 if (sock) { 1146 netlink_detachskb(sock, nc); 1147 } else if (nc) { 1148 dev_kfree_skb(nc); 1149 } 1150 return ret; 1151} 1152 1153SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, 1154 const struct mq_attr __user *, u_mqstat, 1155 struct mq_attr __user *, u_omqstat) 1156{ 1157 int ret; 1158 struct mq_attr mqstat, omqstat; 1159 struct file *filp; 1160 struct inode *inode; 1161 struct mqueue_inode_info *info; 1162 1163 if (u_mqstat != NULL) { 1164 if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr))) 1165 return -EFAULT; 1166 if (mqstat.mq_flags & (~O_NONBLOCK)) 1167 return -EINVAL; 1168 } 1169 1170 ret = -EBADF; 1171 filp = fget(mqdes); 1172 if (!filp) 1173 goto out; 1174 1175 inode = filp->f_path.dentry->d_inode; 1176 if (unlikely(filp->f_op != &mqueue_file_operations)) 1177 goto out_fput; 1178 info = MQUEUE_I(inode); 1179 1180 spin_lock(&info->lock); 1181 1182 omqstat = info->attr; 1183 omqstat.mq_flags = filp->f_flags & O_NONBLOCK; 1184 if (u_mqstat) { 1185 audit_mq_getsetattr(mqdes, &mqstat); 1186 spin_lock(&filp->f_lock); 1187 if (mqstat.mq_flags & O_NONBLOCK) 1188 filp->f_flags |= O_NONBLOCK; 1189 else 1190 filp->f_flags &= ~O_NONBLOCK; 1191 spin_unlock(&filp->f_lock); 1192 1193 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1194 } 1195 1196 spin_unlock(&info->lock); 1197 1198 ret = 0; 1199 if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat, 1200 sizeof(struct mq_attr))) 1201 ret = -EFAULT; 1202 1203out_fput: 1204 fput(filp); 1205out: 1206 return ret; 1207} 1208 1209static const struct inode_operations mqueue_dir_inode_operations = { 1210 .lookup = simple_lookup, 1211 .create = mqueue_create, 1212 .unlink = mqueue_unlink, 1213}; 1214 1215static const struct file_operations mqueue_file_operations = { 1216 .flush = mqueue_flush_file, 1217 .poll = mqueue_poll_file, 1218 .read = mqueue_read_file, 1219}; 1220 1221static const struct super_operations mqueue_super_ops = { 1222 .alloc_inode = mqueue_alloc_inode, 1223 .destroy_inode = mqueue_destroy_inode, 1224 .statfs = simple_statfs, 1225 .delete_inode = mqueue_delete_inode, 1226 .drop_inode = generic_delete_inode, 1227}; 1228 1229static struct file_system_type mqueue_fs_type = { 1230 .name = "mqueue", 1231 .get_sb = mqueue_get_sb, 1232 .kill_sb = kill_litter_super, 1233}; 1234 1235int mq_init_ns(struct ipc_namespace *ns) 1236{ 1237 ns->mq_queues_count = 0; 1238 ns->mq_queues_max = DFLT_QUEUESMAX; 1239 ns->mq_msg_max = DFLT_MSGMAX; 1240 ns->mq_msgsize_max = DFLT_MSGSIZEMAX; 1241 1242 ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns); 1243 if (IS_ERR(ns->mq_mnt)) { 1244 int err = PTR_ERR(ns->mq_mnt); 1245 ns->mq_mnt = NULL; 1246 return err; 1247 } 1248 return 0; 1249} 1250 1251void mq_clear_sbinfo(struct ipc_namespace *ns) 1252{ 1253 ns->mq_mnt->mnt_sb->s_fs_info = NULL; 1254} 1255 1256void mq_put_mnt(struct ipc_namespace *ns) 1257{ 1258 mntput(ns->mq_mnt); 1259} 1260 1261static int __init init_mqueue_fs(void) 1262{ 1263 int error; 1264 1265 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", 1266 sizeof(struct mqueue_inode_info), 0, 1267 SLAB_HWCACHE_ALIGN, init_once); 1268 if (mqueue_inode_cachep == NULL) 1269 return -ENOMEM; 1270 1271 /* ignore failues - they are not fatal */ 1272 mq_sysctl_table = mq_register_sysctl_table(); 1273 1274 error = register_filesystem(&mqueue_fs_type); 1275 if (error) 1276 goto out_sysctl; 1277 1278 spin_lock_init(&mq_lock); 1279 1280 init_ipc_ns.mq_mnt = kern_mount_data(&mqueue_fs_type, &init_ipc_ns); 1281 if (IS_ERR(init_ipc_ns.mq_mnt)) { 1282 error = PTR_ERR(init_ipc_ns.mq_mnt); 1283 goto out_filesystem; 1284 } 1285 1286 return 0; 1287 1288out_filesystem: 1289 unregister_filesystem(&mqueue_fs_type); 1290out_sysctl: 1291 if (mq_sysctl_table) 1292 unregister_sysctl_table(mq_sysctl_table); 1293 kmem_cache_destroy(mqueue_inode_cachep); 1294 return error; 1295} 1296 1297__initcall(init_mqueue_fs); 1298