file.c revision dcfadfa4ec5a12404a99ad6426871a6b03a62b37
1/* 2 * linux/fs/file.c 3 * 4 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes 5 * 6 * Manage the dynamic fd arrays in the process files_struct. 7 */ 8 9#include <linux/export.h> 10#include <linux/fs.h> 11#include <linux/mm.h> 12#include <linux/mmzone.h> 13#include <linux/time.h> 14#include <linux/sched.h> 15#include <linux/slab.h> 16#include <linux/vmalloc.h> 17#include <linux/file.h> 18#include <linux/fdtable.h> 19#include <linux/bitops.h> 20#include <linux/interrupt.h> 21#include <linux/spinlock.h> 22#include <linux/rcupdate.h> 23#include <linux/workqueue.h> 24 25struct fdtable_defer { 26 spinlock_t lock; 27 struct work_struct wq; 28 struct fdtable *next; 29}; 30 31int sysctl_nr_open __read_mostly = 1024*1024; 32int sysctl_nr_open_min = BITS_PER_LONG; 33int sysctl_nr_open_max = 1024 * 1024; /* raised later */ 34 35/* 36 * We use this list to defer free fdtables that have vmalloced 37 * sets/arrays. By keeping a per-cpu list, we avoid having to embed 38 * the work_struct in fdtable itself which avoids a 64 byte (i386) increase in 39 * this per-task structure. 40 */ 41static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list); 42 43static void *alloc_fdmem(size_t size) 44{ 45 /* 46 * Very large allocations can stress page reclaim, so fall back to 47 * vmalloc() if the allocation size will be considered "large" by the VM. 48 */ 49 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 50 void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN); 51 if (data != NULL) 52 return data; 53 } 54 return vmalloc(size); 55} 56 57static void free_fdmem(void *ptr) 58{ 59 is_vmalloc_addr(ptr) ? vfree(ptr) : kfree(ptr); 60} 61 62static void __free_fdtable(struct fdtable *fdt) 63{ 64 free_fdmem(fdt->fd); 65 free_fdmem(fdt->open_fds); 66 kfree(fdt); 67} 68 69static void free_fdtable_work(struct work_struct *work) 70{ 71 struct fdtable_defer *f = 72 container_of(work, struct fdtable_defer, wq); 73 struct fdtable *fdt; 74 75 spin_lock_bh(&f->lock); 76 fdt = f->next; 77 f->next = NULL; 78 spin_unlock_bh(&f->lock); 79 while(fdt) { 80 struct fdtable *next = fdt->next; 81 82 __free_fdtable(fdt); 83 fdt = next; 84 } 85} 86 87void free_fdtable_rcu(struct rcu_head *rcu) 88{ 89 struct fdtable *fdt = container_of(rcu, struct fdtable, rcu); 90 struct fdtable_defer *fddef; 91 92 BUG_ON(!fdt); 93 94 if (fdt->max_fds <= NR_OPEN_DEFAULT) { 95 /* 96 * This fdtable is embedded in the files structure and that 97 * structure itself is getting destroyed. 98 */ 99 kmem_cache_free(files_cachep, 100 container_of(fdt, struct files_struct, fdtab)); 101 return; 102 } 103 if (!is_vmalloc_addr(fdt->fd) && !is_vmalloc_addr(fdt->open_fds)) { 104 kfree(fdt->fd); 105 kfree(fdt->open_fds); 106 kfree(fdt); 107 } else { 108 fddef = &get_cpu_var(fdtable_defer_list); 109 spin_lock(&fddef->lock); 110 fdt->next = fddef->next; 111 fddef->next = fdt; 112 /* vmallocs are handled from the workqueue context */ 113 schedule_work(&fddef->wq); 114 spin_unlock(&fddef->lock); 115 put_cpu_var(fdtable_defer_list); 116 } 117} 118 119/* 120 * Expand the fdset in the files_struct. Called with the files spinlock 121 * held for write. 122 */ 123static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) 124{ 125 unsigned int cpy, set; 126 127 BUG_ON(nfdt->max_fds < ofdt->max_fds); 128 129 cpy = ofdt->max_fds * sizeof(struct file *); 130 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *); 131 memcpy(nfdt->fd, ofdt->fd, cpy); 132 memset((char *)(nfdt->fd) + cpy, 0, set); 133 134 cpy = ofdt->max_fds / BITS_PER_BYTE; 135 set = (nfdt->max_fds - ofdt->max_fds) / BITS_PER_BYTE; 136 memcpy(nfdt->open_fds, ofdt->open_fds, cpy); 137 memset((char *)(nfdt->open_fds) + cpy, 0, set); 138 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy); 139 memset((char *)(nfdt->close_on_exec) + cpy, 0, set); 140} 141 142static struct fdtable * alloc_fdtable(unsigned int nr) 143{ 144 struct fdtable *fdt; 145 void *data; 146 147 /* 148 * Figure out how many fds we actually want to support in this fdtable. 149 * Allocation steps are keyed to the size of the fdarray, since it 150 * grows far faster than any of the other dynamic data. We try to fit 151 * the fdarray into comfortable page-tuned chunks: starting at 1024B 152 * and growing in powers of two from there on. 153 */ 154 nr /= (1024 / sizeof(struct file *)); 155 nr = roundup_pow_of_two(nr + 1); 156 nr *= (1024 / sizeof(struct file *)); 157 /* 158 * Note that this can drive nr *below* what we had passed if sysctl_nr_open 159 * had been set lower between the check in expand_files() and here. Deal 160 * with that in caller, it's cheaper that way. 161 * 162 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise 163 * bitmaps handling below becomes unpleasant, to put it mildly... 164 */ 165 if (unlikely(nr > sysctl_nr_open)) 166 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1; 167 168 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL); 169 if (!fdt) 170 goto out; 171 fdt->max_fds = nr; 172 data = alloc_fdmem(nr * sizeof(struct file *)); 173 if (!data) 174 goto out_fdt; 175 fdt->fd = data; 176 177 data = alloc_fdmem(max_t(size_t, 178 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES)); 179 if (!data) 180 goto out_arr; 181 fdt->open_fds = data; 182 data += nr / BITS_PER_BYTE; 183 fdt->close_on_exec = data; 184 fdt->next = NULL; 185 186 return fdt; 187 188out_arr: 189 free_fdmem(fdt->fd); 190out_fdt: 191 kfree(fdt); 192out: 193 return NULL; 194} 195 196/* 197 * Expand the file descriptor table. 198 * This function will allocate a new fdtable and both fd array and fdset, of 199 * the given size. 200 * Return <0 error code on error; 1 on successful completion. 201 * The files->file_lock should be held on entry, and will be held on exit. 202 */ 203static int expand_fdtable(struct files_struct *files, int nr) 204 __releases(files->file_lock) 205 __acquires(files->file_lock) 206{ 207 struct fdtable *new_fdt, *cur_fdt; 208 209 spin_unlock(&files->file_lock); 210 new_fdt = alloc_fdtable(nr); 211 spin_lock(&files->file_lock); 212 if (!new_fdt) 213 return -ENOMEM; 214 /* 215 * extremely unlikely race - sysctl_nr_open decreased between the check in 216 * caller and alloc_fdtable(). Cheaper to catch it here... 217 */ 218 if (unlikely(new_fdt->max_fds <= nr)) { 219 __free_fdtable(new_fdt); 220 return -EMFILE; 221 } 222 /* 223 * Check again since another task may have expanded the fd table while 224 * we dropped the lock 225 */ 226 cur_fdt = files_fdtable(files); 227 if (nr >= cur_fdt->max_fds) { 228 /* Continue as planned */ 229 copy_fdtable(new_fdt, cur_fdt); 230 rcu_assign_pointer(files->fdt, new_fdt); 231 if (cur_fdt->max_fds > NR_OPEN_DEFAULT) 232 free_fdtable(cur_fdt); 233 } else { 234 /* Somebody else expanded, so undo our attempt */ 235 __free_fdtable(new_fdt); 236 } 237 return 1; 238} 239 240/* 241 * Expand files. 242 * This function will expand the file structures, if the requested size exceeds 243 * the current capacity and there is room for expansion. 244 * Return <0 error code on error; 0 when nothing done; 1 when files were 245 * expanded and execution may have blocked. 246 * The files->file_lock should be held on entry, and will be held on exit. 247 */ 248int expand_files(struct files_struct *files, int nr) 249{ 250 struct fdtable *fdt; 251 252 fdt = files_fdtable(files); 253 254 /* Do we need to expand? */ 255 if (nr < fdt->max_fds) 256 return 0; 257 258 /* Can we expand? */ 259 if (nr >= sysctl_nr_open) 260 return -EMFILE; 261 262 /* All good, so we try */ 263 return expand_fdtable(files, nr); 264} 265 266static int count_open_files(struct fdtable *fdt) 267{ 268 int size = fdt->max_fds; 269 int i; 270 271 /* Find the last open fd */ 272 for (i = size / BITS_PER_LONG; i > 0; ) { 273 if (fdt->open_fds[--i]) 274 break; 275 } 276 i = (i + 1) * BITS_PER_LONG; 277 return i; 278} 279 280/* 281 * Allocate a new files structure and copy contents from the 282 * passed in files structure. 283 * errorp will be valid only when the returned files_struct is NULL. 284 */ 285struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) 286{ 287 struct files_struct *newf; 288 struct file **old_fds, **new_fds; 289 int open_files, size, i; 290 struct fdtable *old_fdt, *new_fdt; 291 292 *errorp = -ENOMEM; 293 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); 294 if (!newf) 295 goto out; 296 297 atomic_set(&newf->count, 1); 298 299 spin_lock_init(&newf->file_lock); 300 newf->next_fd = 0; 301 new_fdt = &newf->fdtab; 302 new_fdt->max_fds = NR_OPEN_DEFAULT; 303 new_fdt->close_on_exec = newf->close_on_exec_init; 304 new_fdt->open_fds = newf->open_fds_init; 305 new_fdt->fd = &newf->fd_array[0]; 306 new_fdt->next = NULL; 307 308 spin_lock(&oldf->file_lock); 309 old_fdt = files_fdtable(oldf); 310 open_files = count_open_files(old_fdt); 311 312 /* 313 * Check whether we need to allocate a larger fd array and fd set. 314 */ 315 while (unlikely(open_files > new_fdt->max_fds)) { 316 spin_unlock(&oldf->file_lock); 317 318 if (new_fdt != &newf->fdtab) 319 __free_fdtable(new_fdt); 320 321 new_fdt = alloc_fdtable(open_files - 1); 322 if (!new_fdt) { 323 *errorp = -ENOMEM; 324 goto out_release; 325 } 326 327 /* beyond sysctl_nr_open; nothing to do */ 328 if (unlikely(new_fdt->max_fds < open_files)) { 329 __free_fdtable(new_fdt); 330 *errorp = -EMFILE; 331 goto out_release; 332 } 333 334 /* 335 * Reacquire the oldf lock and a pointer to its fd table 336 * who knows it may have a new bigger fd table. We need 337 * the latest pointer. 338 */ 339 spin_lock(&oldf->file_lock); 340 old_fdt = files_fdtable(oldf); 341 open_files = count_open_files(old_fdt); 342 } 343 344 old_fds = old_fdt->fd; 345 new_fds = new_fdt->fd; 346 347 memcpy(new_fdt->open_fds, old_fdt->open_fds, open_files / 8); 348 memcpy(new_fdt->close_on_exec, old_fdt->close_on_exec, open_files / 8); 349 350 for (i = open_files; i != 0; i--) { 351 struct file *f = *old_fds++; 352 if (f) { 353 get_file(f); 354 } else { 355 /* 356 * The fd may be claimed in the fd bitmap but not yet 357 * instantiated in the files array if a sibling thread 358 * is partway through open(). So make sure that this 359 * fd is available to the new process. 360 */ 361 __clear_open_fd(open_files - i, new_fdt); 362 } 363 rcu_assign_pointer(*new_fds++, f); 364 } 365 spin_unlock(&oldf->file_lock); 366 367 /* compute the remainder to be cleared */ 368 size = (new_fdt->max_fds - open_files) * sizeof(struct file *); 369 370 /* This is long word aligned thus could use a optimized version */ 371 memset(new_fds, 0, size); 372 373 if (new_fdt->max_fds > open_files) { 374 int left = (new_fdt->max_fds - open_files) / 8; 375 int start = open_files / BITS_PER_LONG; 376 377 memset(&new_fdt->open_fds[start], 0, left); 378 memset(&new_fdt->close_on_exec[start], 0, left); 379 } 380 381 rcu_assign_pointer(newf->fdt, new_fdt); 382 383 return newf; 384 385out_release: 386 kmem_cache_free(files_cachep, newf); 387out: 388 return NULL; 389} 390 391static void __devinit fdtable_defer_list_init(int cpu) 392{ 393 struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); 394 spin_lock_init(&fddef->lock); 395 INIT_WORK(&fddef->wq, free_fdtable_work); 396 fddef->next = NULL; 397} 398 399void __init files_defer_init(void) 400{ 401 int i; 402 for_each_possible_cpu(i) 403 fdtable_defer_list_init(i); 404 sysctl_nr_open_max = min((size_t)INT_MAX, ~(size_t)0/sizeof(void *)) & 405 -BITS_PER_LONG; 406} 407 408struct files_struct init_files = { 409 .count = ATOMIC_INIT(1), 410 .fdt = &init_files.fdtab, 411 .fdtab = { 412 .max_fds = NR_OPEN_DEFAULT, 413 .fd = &init_files.fd_array[0], 414 .close_on_exec = init_files.close_on_exec_init, 415 .open_fds = init_files.open_fds_init, 416 }, 417 .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock), 418}; 419 420/* 421 * allocate a file descriptor, mark it busy. 422 */ 423int __alloc_fd(struct files_struct *files, 424 unsigned start, unsigned end, unsigned flags) 425{ 426 unsigned int fd; 427 int error; 428 struct fdtable *fdt; 429 430 spin_lock(&files->file_lock); 431repeat: 432 fdt = files_fdtable(files); 433 fd = start; 434 if (fd < files->next_fd) 435 fd = files->next_fd; 436 437 if (fd < fdt->max_fds) 438 fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, fd); 439 440 /* 441 * N.B. For clone tasks sharing a files structure, this test 442 * will limit the total number of files that can be opened. 443 */ 444 error = -EMFILE; 445 if (fd >= end) 446 goto out; 447 448 error = expand_files(files, fd); 449 if (error < 0) 450 goto out; 451 452 /* 453 * If we needed to expand the fs array we 454 * might have blocked - try again. 455 */ 456 if (error) 457 goto repeat; 458 459 if (start <= files->next_fd) 460 files->next_fd = fd + 1; 461 462 __set_open_fd(fd, fdt); 463 if (flags & O_CLOEXEC) 464 __set_close_on_exec(fd, fdt); 465 else 466 __clear_close_on_exec(fd, fdt); 467 error = fd; 468#if 1 469 /* Sanity check */ 470 if (rcu_dereference_raw(fdt->fd[fd]) != NULL) { 471 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd); 472 rcu_assign_pointer(fdt->fd[fd], NULL); 473 } 474#endif 475 476out: 477 spin_unlock(&files->file_lock); 478 return error; 479} 480 481int alloc_fd(unsigned start, unsigned flags) 482{ 483 return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags); 484} 485 486int get_unused_fd_flags(unsigned flags) 487{ 488 return __alloc_fd(current->files, 0, rlimit(RLIMIT_NOFILE), flags); 489} 490EXPORT_SYMBOL(get_unused_fd_flags); 491