kmemleak.c revision bab4a34afc301fdb81b6ea0e3098d96fc356e03a
1/* 2 * mm/kmemleak.c 3 * 4 * Copyright (C) 2008 ARM Limited 5 * Written by Catalin Marinas <catalin.marinas@arm.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * 20 * 21 * For more information on the algorithm and kmemleak usage, please see 22 * Documentation/kmemleak.txt. 23 * 24 * Notes on locking 25 * ---------------- 26 * 27 * The following locks and mutexes are used by kmemleak: 28 * 29 * - kmemleak_lock (rwlock): protects the object_list modifications and 30 * accesses to the object_tree_root. The object_list is the main list 31 * holding the metadata (struct kmemleak_object) for the allocated memory 32 * blocks. The object_tree_root is a priority search tree used to look-up 33 * metadata based on a pointer to the corresponding memory block. The 34 * kmemleak_object structures are added to the object_list and 35 * object_tree_root in the create_object() function called from the 36 * kmemleak_alloc() callback and removed in delete_object() called from the 37 * kmemleak_free() callback 38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to 39 * the metadata (e.g. count) are protected by this lock. Note that some 40 * members of this structure may be protected by other means (atomic or 41 * kmemleak_lock). This lock is also held when scanning the corresponding 42 * memory block to avoid the kernel freeing it via the kmemleak_free() 43 * callback. This is less heavyweight than holding a global lock like 44 * kmemleak_lock during scanning 45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for 46 * unreferenced objects at a time. The gray_list contains the objects which 47 * are already referenced or marked as false positives and need to be 48 * scanned. This list is only modified during a scanning episode when the 49 * scan_mutex is held. At the end of a scan, the gray_list is always empty. 50 * Note that the kmemleak_object.use_count is incremented when an object is 51 * added to the gray_list and therefore cannot be freed 52 * - kmemleak_mutex (mutex): prevents multiple users of the "kmemleak" debugfs 53 * file together with modifications to the memory scanning parameters 54 * including the scan_thread pointer 55 * 56 * The kmemleak_object structures have a use_count incremented or decremented 57 * using the get_object()/put_object() functions. When the use_count becomes 58 * 0, this count can no longer be incremented and put_object() schedules the 59 * kmemleak_object freeing via an RCU callback. All calls to the get_object() 60 * function must be protected by rcu_read_lock() to avoid accessing a freed 61 * structure. 62 */ 63 64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 65 66#include <linux/init.h> 67#include <linux/kernel.h> 68#include <linux/list.h> 69#include <linux/sched.h> 70#include <linux/jiffies.h> 71#include <linux/delay.h> 72#include <linux/module.h> 73#include <linux/kthread.h> 74#include <linux/prio_tree.h> 75#include <linux/gfp.h> 76#include <linux/fs.h> 77#include <linux/debugfs.h> 78#include <linux/seq_file.h> 79#include <linux/cpumask.h> 80#include <linux/spinlock.h> 81#include <linux/mutex.h> 82#include <linux/rcupdate.h> 83#include <linux/stacktrace.h> 84#include <linux/cache.h> 85#include <linux/percpu.h> 86#include <linux/hardirq.h> 87#include <linux/mmzone.h> 88#include <linux/slab.h> 89#include <linux/thread_info.h> 90#include <linux/err.h> 91#include <linux/uaccess.h> 92#include <linux/string.h> 93#include <linux/nodemask.h> 94#include <linux/mm.h> 95 96#include <asm/sections.h> 97#include <asm/processor.h> 98#include <asm/atomic.h> 99 100#include <linux/kmemleak.h> 101 102/* 103 * Kmemleak configuration and common defines. 104 */ 105#define MAX_TRACE 16 /* stack trace length */ 106#define REPORTS_NR 50 /* maximum number of reported leaks */ 107#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ 108#define MSECS_SCAN_YIELD 10 /* CPU yielding period */ 109#define SECS_FIRST_SCAN 60 /* delay before the first scan */ 110#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ 111 112#define BYTES_PER_POINTER sizeof(void *) 113 114/* GFP bitmask for kmemleak internal allocations */ 115#define GFP_KMEMLEAK_MASK (GFP_KERNEL | GFP_ATOMIC) 116 117/* scanning area inside a memory block */ 118struct kmemleak_scan_area { 119 struct hlist_node node; 120 unsigned long offset; 121 size_t length; 122}; 123 124/* 125 * Structure holding the metadata for each allocated memory block. 126 * Modifications to such objects should be made while holding the 127 * object->lock. Insertions or deletions from object_list, gray_list or 128 * tree_node are already protected by the corresponding locks or mutex (see 129 * the notes on locking above). These objects are reference-counted 130 * (use_count) and freed using the RCU mechanism. 131 */ 132struct kmemleak_object { 133 spinlock_t lock; 134 unsigned long flags; /* object status flags */ 135 struct list_head object_list; 136 struct list_head gray_list; 137 struct prio_tree_node tree_node; 138 struct rcu_head rcu; /* object_list lockless traversal */ 139 /* object usage count; object freed when use_count == 0 */ 140 atomic_t use_count; 141 unsigned long pointer; 142 size_t size; 143 /* minimum number of a pointers found before it is considered leak */ 144 int min_count; 145 /* the total number of pointers found pointing to this object */ 146 int count; 147 /* memory ranges to be scanned inside an object (empty for all) */ 148 struct hlist_head area_list; 149 unsigned long trace[MAX_TRACE]; 150 unsigned int trace_len; 151 unsigned long jiffies; /* creation timestamp */ 152 pid_t pid; /* pid of the current task */ 153 char comm[TASK_COMM_LEN]; /* executable name */ 154}; 155 156/* flag representing the memory block allocation status */ 157#define OBJECT_ALLOCATED (1 << 0) 158/* flag set after the first reporting of an unreference object */ 159#define OBJECT_REPORTED (1 << 1) 160/* flag set to not scan the object */ 161#define OBJECT_NO_SCAN (1 << 2) 162 163/* the list of all allocated objects */ 164static LIST_HEAD(object_list); 165/* the list of gray-colored objects (see color_gray comment below) */ 166static LIST_HEAD(gray_list); 167/* prio search tree for object boundaries */ 168static struct prio_tree_root object_tree_root; 169/* rw_lock protecting the access to object_list and prio_tree_root */ 170static DEFINE_RWLOCK(kmemleak_lock); 171 172/* allocation caches for kmemleak internal data */ 173static struct kmem_cache *object_cache; 174static struct kmem_cache *scan_area_cache; 175 176/* set if tracing memory operations is enabled */ 177static atomic_t kmemleak_enabled = ATOMIC_INIT(0); 178/* set in the late_initcall if there were no errors */ 179static atomic_t kmemleak_initialized = ATOMIC_INIT(0); 180/* enables or disables early logging of the memory operations */ 181static atomic_t kmemleak_early_log = ATOMIC_INIT(1); 182/* set if a fata kmemleak error has occurred */ 183static atomic_t kmemleak_error = ATOMIC_INIT(0); 184 185/* minimum and maximum address that may be valid pointers */ 186static unsigned long min_addr = ULONG_MAX; 187static unsigned long max_addr; 188 189/* used for yielding the CPU to other tasks during scanning */ 190static unsigned long next_scan_yield; 191static struct task_struct *scan_thread; 192static unsigned long jiffies_scan_yield; 193static unsigned long jiffies_min_age; 194/* delay between automatic memory scannings */ 195static signed long jiffies_scan_wait; 196/* enables or disables the task stacks scanning */ 197static int kmemleak_stack_scan = 1; 198/* mutex protecting the memory scanning */ 199static DEFINE_MUTEX(scan_mutex); 200/* mutex protecting the access to the /sys/kernel/debug/kmemleak file */ 201static DEFINE_MUTEX(kmemleak_mutex); 202 203/* number of leaks reported (for limitation purposes) */ 204static int reported_leaks; 205 206/* 207 * Early object allocation/freeing logging. Kmemleak is initialized after the 208 * kernel allocator. However, both the kernel allocator and kmemleak may 209 * allocate memory blocks which need to be tracked. Kmemleak defines an 210 * arbitrary buffer to hold the allocation/freeing information before it is 211 * fully initialized. 212 */ 213 214/* kmemleak operation type for early logging */ 215enum { 216 KMEMLEAK_ALLOC, 217 KMEMLEAK_FREE, 218 KMEMLEAK_NOT_LEAK, 219 KMEMLEAK_IGNORE, 220 KMEMLEAK_SCAN_AREA, 221 KMEMLEAK_NO_SCAN 222}; 223 224/* 225 * Structure holding the information passed to kmemleak callbacks during the 226 * early logging. 227 */ 228struct early_log { 229 int op_type; /* kmemleak operation type */ 230 const void *ptr; /* allocated/freed memory block */ 231 size_t size; /* memory block size */ 232 int min_count; /* minimum reference count */ 233 unsigned long offset; /* scan area offset */ 234 size_t length; /* scan area length */ 235}; 236 237/* early logging buffer and current position */ 238static struct early_log early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE]; 239static int crt_early_log; 240 241static void kmemleak_disable(void); 242 243/* 244 * Print a warning and dump the stack trace. 245 */ 246#define kmemleak_warn(x...) do { \ 247 pr_warning(x); \ 248 dump_stack(); \ 249} while (0) 250 251/* 252 * Macro invoked when a serious kmemleak condition occured and cannot be 253 * recovered from. Kmemleak will be disabled and further allocation/freeing 254 * tracing no longer available. 255 */ 256#define kmemleak_stop(x...) do { \ 257 kmemleak_warn(x); \ 258 kmemleak_disable(); \ 259} while (0) 260 261/* 262 * Object colors, encoded with count and min_count: 263 * - white - orphan object, not enough references to it (count < min_count) 264 * - gray - not orphan, not marked as false positive (min_count == 0) or 265 * sufficient references to it (count >= min_count) 266 * - black - ignore, it doesn't contain references (e.g. text section) 267 * (min_count == -1). No function defined for this color. 268 * Newly created objects don't have any color assigned (object->count == -1) 269 * before the next memory scan when they become white. 270 */ 271static int color_white(const struct kmemleak_object *object) 272{ 273 return object->count != -1 && object->count < object->min_count; 274} 275 276static int color_gray(const struct kmemleak_object *object) 277{ 278 return object->min_count != -1 && object->count >= object->min_count; 279} 280 281/* 282 * Objects are considered unreferenced only if their color is white, they have 283 * not be deleted and have a minimum age to avoid false positives caused by 284 * pointers temporarily stored in CPU registers. 285 */ 286static int unreferenced_object(struct kmemleak_object *object) 287{ 288 return (object->flags & OBJECT_ALLOCATED) && color_white(object) && 289 time_is_before_eq_jiffies(object->jiffies + jiffies_min_age); 290} 291 292/* 293 * Printing of the unreferenced objects information to the seq file. The 294 * print_unreferenced function must be called with the object->lock held. 295 */ 296static void print_unreferenced(struct seq_file *seq, 297 struct kmemleak_object *object) 298{ 299 int i; 300 301 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", 302 object->pointer, object->size); 303 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n", 304 object->comm, object->pid, object->jiffies); 305 seq_printf(seq, " backtrace:\n"); 306 307 for (i = 0; i < object->trace_len; i++) { 308 void *ptr = (void *)object->trace[i]; 309 seq_printf(seq, " [<%p>] %pS\n", ptr, ptr); 310 } 311} 312 313/* 314 * Print the kmemleak_object information. This function is used mainly for 315 * debugging special cases when kmemleak operations. It must be called with 316 * the object->lock held. 317 */ 318static void dump_object_info(struct kmemleak_object *object) 319{ 320 struct stack_trace trace; 321 322 trace.nr_entries = object->trace_len; 323 trace.entries = object->trace; 324 325 pr_notice("Object 0x%08lx (size %zu):\n", 326 object->tree_node.start, object->size); 327 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", 328 object->comm, object->pid, object->jiffies); 329 pr_notice(" min_count = %d\n", object->min_count); 330 pr_notice(" count = %d\n", object->count); 331 pr_notice(" backtrace:\n"); 332 print_stack_trace(&trace, 4); 333} 334 335/* 336 * Look-up a memory block metadata (kmemleak_object) in the priority search 337 * tree based on a pointer value. If alias is 0, only values pointing to the 338 * beginning of the memory block are allowed. The kmemleak_lock must be held 339 * when calling this function. 340 */ 341static struct kmemleak_object *lookup_object(unsigned long ptr, int alias) 342{ 343 struct prio_tree_node *node; 344 struct prio_tree_iter iter; 345 struct kmemleak_object *object; 346 347 prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr); 348 node = prio_tree_next(&iter); 349 if (node) { 350 object = prio_tree_entry(node, struct kmemleak_object, 351 tree_node); 352 if (!alias && object->pointer != ptr) { 353 kmemleak_warn("Found object by alias"); 354 object = NULL; 355 } 356 } else 357 object = NULL; 358 359 return object; 360} 361 362/* 363 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note 364 * that once an object's use_count reached 0, the RCU freeing was already 365 * registered and the object should no longer be used. This function must be 366 * called under the protection of rcu_read_lock(). 367 */ 368static int get_object(struct kmemleak_object *object) 369{ 370 return atomic_inc_not_zero(&object->use_count); 371} 372 373/* 374 * RCU callback to free a kmemleak_object. 375 */ 376static void free_object_rcu(struct rcu_head *rcu) 377{ 378 struct hlist_node *elem, *tmp; 379 struct kmemleak_scan_area *area; 380 struct kmemleak_object *object = 381 container_of(rcu, struct kmemleak_object, rcu); 382 383 /* 384 * Once use_count is 0 (guaranteed by put_object), there is no other 385 * code accessing this object, hence no need for locking. 386 */ 387 hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) { 388 hlist_del(elem); 389 kmem_cache_free(scan_area_cache, area); 390 } 391 kmem_cache_free(object_cache, object); 392} 393 394/* 395 * Decrement the object use_count. Once the count is 0, free the object using 396 * an RCU callback. Since put_object() may be called via the kmemleak_free() -> 397 * delete_object() path, the delayed RCU freeing ensures that there is no 398 * recursive call to the kernel allocator. Lock-less RCU object_list traversal 399 * is also possible. 400 */ 401static void put_object(struct kmemleak_object *object) 402{ 403 if (!atomic_dec_and_test(&object->use_count)) 404 return; 405 406 /* should only get here after delete_object was called */ 407 WARN_ON(object->flags & OBJECT_ALLOCATED); 408 409 call_rcu(&object->rcu, free_object_rcu); 410} 411 412/* 413 * Look up an object in the prio search tree and increase its use_count. 414 */ 415static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) 416{ 417 unsigned long flags; 418 struct kmemleak_object *object = NULL; 419 420 rcu_read_lock(); 421 read_lock_irqsave(&kmemleak_lock, flags); 422 if (ptr >= min_addr && ptr < max_addr) 423 object = lookup_object(ptr, alias); 424 read_unlock_irqrestore(&kmemleak_lock, flags); 425 426 /* check whether the object is still available */ 427 if (object && !get_object(object)) 428 object = NULL; 429 rcu_read_unlock(); 430 431 return object; 432} 433 434/* 435 * Create the metadata (struct kmemleak_object) corresponding to an allocated 436 * memory block and add it to the object_list and object_tree_root. 437 */ 438static void create_object(unsigned long ptr, size_t size, int min_count, 439 gfp_t gfp) 440{ 441 unsigned long flags; 442 struct kmemleak_object *object; 443 struct prio_tree_node *node; 444 struct stack_trace trace; 445 446 object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK); 447 if (!object) { 448 kmemleak_stop("Cannot allocate a kmemleak_object structure\n"); 449 return; 450 } 451 452 INIT_LIST_HEAD(&object->object_list); 453 INIT_LIST_HEAD(&object->gray_list); 454 INIT_HLIST_HEAD(&object->area_list); 455 spin_lock_init(&object->lock); 456 atomic_set(&object->use_count, 1); 457 object->flags = OBJECT_ALLOCATED; 458 object->pointer = ptr; 459 object->size = size; 460 object->min_count = min_count; 461 object->count = -1; /* no color initially */ 462 object->jiffies = jiffies; 463 464 /* task information */ 465 if (in_irq()) { 466 object->pid = 0; 467 strncpy(object->comm, "hardirq", sizeof(object->comm)); 468 } else if (in_softirq()) { 469 object->pid = 0; 470 strncpy(object->comm, "softirq", sizeof(object->comm)); 471 } else { 472 object->pid = current->pid; 473 /* 474 * There is a small chance of a race with set_task_comm(), 475 * however using get_task_comm() here may cause locking 476 * dependency issues with current->alloc_lock. In the worst 477 * case, the command line is not correct. 478 */ 479 strncpy(object->comm, current->comm, sizeof(object->comm)); 480 } 481 482 /* kernel backtrace */ 483 trace.max_entries = MAX_TRACE; 484 trace.nr_entries = 0; 485 trace.entries = object->trace; 486 trace.skip = 1; 487 save_stack_trace(&trace); 488 object->trace_len = trace.nr_entries; 489 490 INIT_PRIO_TREE_NODE(&object->tree_node); 491 object->tree_node.start = ptr; 492 object->tree_node.last = ptr + size - 1; 493 494 write_lock_irqsave(&kmemleak_lock, flags); 495 min_addr = min(min_addr, ptr); 496 max_addr = max(max_addr, ptr + size); 497 node = prio_tree_insert(&object_tree_root, &object->tree_node); 498 /* 499 * The code calling the kernel does not yet have the pointer to the 500 * memory block to be able to free it. However, we still hold the 501 * kmemleak_lock here in case parts of the kernel started freeing 502 * random memory blocks. 503 */ 504 if (node != &object->tree_node) { 505 unsigned long flags; 506 507 kmemleak_stop("Cannot insert 0x%lx into the object search tree " 508 "(already existing)\n", ptr); 509 object = lookup_object(ptr, 1); 510 spin_lock_irqsave(&object->lock, flags); 511 dump_object_info(object); 512 spin_unlock_irqrestore(&object->lock, flags); 513 514 goto out; 515 } 516 list_add_tail_rcu(&object->object_list, &object_list); 517out: 518 write_unlock_irqrestore(&kmemleak_lock, flags); 519} 520 521/* 522 * Remove the metadata (struct kmemleak_object) for a memory block from the 523 * object_list and object_tree_root and decrement its use_count. 524 */ 525static void delete_object(unsigned long ptr) 526{ 527 unsigned long flags; 528 struct kmemleak_object *object; 529 530 write_lock_irqsave(&kmemleak_lock, flags); 531 object = lookup_object(ptr, 0); 532 if (!object) { 533 kmemleak_warn("Freeing unknown object at 0x%08lx\n", 534 ptr); 535 write_unlock_irqrestore(&kmemleak_lock, flags); 536 return; 537 } 538 prio_tree_remove(&object_tree_root, &object->tree_node); 539 list_del_rcu(&object->object_list); 540 write_unlock_irqrestore(&kmemleak_lock, flags); 541 542 WARN_ON(!(object->flags & OBJECT_ALLOCATED)); 543 WARN_ON(atomic_read(&object->use_count) < 1); 544 545 /* 546 * Locking here also ensures that the corresponding memory block 547 * cannot be freed when it is being scanned. 548 */ 549 spin_lock_irqsave(&object->lock, flags); 550 object->flags &= ~OBJECT_ALLOCATED; 551 spin_unlock_irqrestore(&object->lock, flags); 552 put_object(object); 553} 554 555/* 556 * Make a object permanently as gray-colored so that it can no longer be 557 * reported as a leak. This is used in general to mark a false positive. 558 */ 559static void make_gray_object(unsigned long ptr) 560{ 561 unsigned long flags; 562 struct kmemleak_object *object; 563 564 object = find_and_get_object(ptr, 0); 565 if (!object) { 566 kmemleak_warn("Graying unknown object at 0x%08lx\n", ptr); 567 return; 568 } 569 570 spin_lock_irqsave(&object->lock, flags); 571 object->min_count = 0; 572 spin_unlock_irqrestore(&object->lock, flags); 573 put_object(object); 574} 575 576/* 577 * Mark the object as black-colored so that it is ignored from scans and 578 * reporting. 579 */ 580static void make_black_object(unsigned long ptr) 581{ 582 unsigned long flags; 583 struct kmemleak_object *object; 584 585 object = find_and_get_object(ptr, 0); 586 if (!object) { 587 kmemleak_warn("Blacking unknown object at 0x%08lx\n", ptr); 588 return; 589 } 590 591 spin_lock_irqsave(&object->lock, flags); 592 object->min_count = -1; 593 spin_unlock_irqrestore(&object->lock, flags); 594 put_object(object); 595} 596 597/* 598 * Add a scanning area to the object. If at least one such area is added, 599 * kmemleak will only scan these ranges rather than the whole memory block. 600 */ 601static void add_scan_area(unsigned long ptr, unsigned long offset, 602 size_t length, gfp_t gfp) 603{ 604 unsigned long flags; 605 struct kmemleak_object *object; 606 struct kmemleak_scan_area *area; 607 608 object = find_and_get_object(ptr, 0); 609 if (!object) { 610 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", 611 ptr); 612 return; 613 } 614 615 area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK); 616 if (!area) { 617 kmemleak_warn("Cannot allocate a scan area\n"); 618 goto out; 619 } 620 621 spin_lock_irqsave(&object->lock, flags); 622 if (offset + length > object->size) { 623 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); 624 dump_object_info(object); 625 kmem_cache_free(scan_area_cache, area); 626 goto out_unlock; 627 } 628 629 INIT_HLIST_NODE(&area->node); 630 area->offset = offset; 631 area->length = length; 632 633 hlist_add_head(&area->node, &object->area_list); 634out_unlock: 635 spin_unlock_irqrestore(&object->lock, flags); 636out: 637 put_object(object); 638} 639 640/* 641 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give 642 * pointer. Such object will not be scanned by kmemleak but references to it 643 * are searched. 644 */ 645static void object_no_scan(unsigned long ptr) 646{ 647 unsigned long flags; 648 struct kmemleak_object *object; 649 650 object = find_and_get_object(ptr, 0); 651 if (!object) { 652 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr); 653 return; 654 } 655 656 spin_lock_irqsave(&object->lock, flags); 657 object->flags |= OBJECT_NO_SCAN; 658 spin_unlock_irqrestore(&object->lock, flags); 659 put_object(object); 660} 661 662/* 663 * Log an early kmemleak_* call to the early_log buffer. These calls will be 664 * processed later once kmemleak is fully initialized. 665 */ 666static void log_early(int op_type, const void *ptr, size_t size, 667 int min_count, unsigned long offset, size_t length) 668{ 669 unsigned long flags; 670 struct early_log *log; 671 672 if (crt_early_log >= ARRAY_SIZE(early_log)) { 673 pr_warning("Early log buffer exceeded\n"); 674 kmemleak_disable(); 675 return; 676 } 677 678 /* 679 * There is no need for locking since the kernel is still in UP mode 680 * at this stage. Disabling the IRQs is enough. 681 */ 682 local_irq_save(flags); 683 log = &early_log[crt_early_log]; 684 log->op_type = op_type; 685 log->ptr = ptr; 686 log->size = size; 687 log->min_count = min_count; 688 log->offset = offset; 689 log->length = length; 690 crt_early_log++; 691 local_irq_restore(flags); 692} 693 694/* 695 * Memory allocation function callback. This function is called from the 696 * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc, 697 * vmalloc etc.). 698 */ 699void kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp) 700{ 701 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); 702 703 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 704 create_object((unsigned long)ptr, size, min_count, gfp); 705 else if (atomic_read(&kmemleak_early_log)) 706 log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0); 707} 708EXPORT_SYMBOL_GPL(kmemleak_alloc); 709 710/* 711 * Memory freeing function callback. This function is called from the kernel 712 * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.). 713 */ 714void kmemleak_free(const void *ptr) 715{ 716 pr_debug("%s(0x%p)\n", __func__, ptr); 717 718 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 719 delete_object((unsigned long)ptr); 720 else if (atomic_read(&kmemleak_early_log)) 721 log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); 722} 723EXPORT_SYMBOL_GPL(kmemleak_free); 724 725/* 726 * Mark an already allocated memory block as a false positive. This will cause 727 * the block to no longer be reported as leak and always be scanned. 728 */ 729void kmemleak_not_leak(const void *ptr) 730{ 731 pr_debug("%s(0x%p)\n", __func__, ptr); 732 733 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 734 make_gray_object((unsigned long)ptr); 735 else if (atomic_read(&kmemleak_early_log)) 736 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0); 737} 738EXPORT_SYMBOL(kmemleak_not_leak); 739 740/* 741 * Ignore a memory block. This is usually done when it is known that the 742 * corresponding block is not a leak and does not contain any references to 743 * other allocated memory blocks. 744 */ 745void kmemleak_ignore(const void *ptr) 746{ 747 pr_debug("%s(0x%p)\n", __func__, ptr); 748 749 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 750 make_black_object((unsigned long)ptr); 751 else if (atomic_read(&kmemleak_early_log)) 752 log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0); 753} 754EXPORT_SYMBOL(kmemleak_ignore); 755 756/* 757 * Limit the range to be scanned in an allocated memory block. 758 */ 759void kmemleak_scan_area(const void *ptr, unsigned long offset, size_t length, 760 gfp_t gfp) 761{ 762 pr_debug("%s(0x%p)\n", __func__, ptr); 763 764 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 765 add_scan_area((unsigned long)ptr, offset, length, gfp); 766 else if (atomic_read(&kmemleak_early_log)) 767 log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length); 768} 769EXPORT_SYMBOL(kmemleak_scan_area); 770 771/* 772 * Inform kmemleak not to scan the given memory block. 773 */ 774void kmemleak_no_scan(const void *ptr) 775{ 776 pr_debug("%s(0x%p)\n", __func__, ptr); 777 778 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 779 object_no_scan((unsigned long)ptr); 780 else if (atomic_read(&kmemleak_early_log)) 781 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0); 782} 783EXPORT_SYMBOL(kmemleak_no_scan); 784 785/* 786 * Yield the CPU so that other tasks get a chance to run. The yielding is 787 * rate-limited to avoid excessive number of calls to the schedule() function 788 * during memory scanning. 789 */ 790static void scan_yield(void) 791{ 792 might_sleep(); 793 794 if (time_is_before_eq_jiffies(next_scan_yield)) { 795 schedule(); 796 next_scan_yield = jiffies + jiffies_scan_yield; 797 } 798} 799 800/* 801 * Memory scanning is a long process and it needs to be interruptable. This 802 * function checks whether such interrupt condition occured. 803 */ 804static int scan_should_stop(void) 805{ 806 if (!atomic_read(&kmemleak_enabled)) 807 return 1; 808 809 /* 810 * This function may be called from either process or kthread context, 811 * hence the need to check for both stop conditions. 812 */ 813 if (current->mm) 814 return signal_pending(current); 815 else 816 return kthread_should_stop(); 817 818 return 0; 819} 820 821/* 822 * Scan a memory block (exclusive range) for valid pointers and add those 823 * found to the gray list. 824 */ 825static void scan_block(void *_start, void *_end, 826 struct kmemleak_object *scanned) 827{ 828 unsigned long *ptr; 829 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); 830 unsigned long *end = _end - (BYTES_PER_POINTER - 1); 831 832 for (ptr = start; ptr < end; ptr++) { 833 unsigned long flags; 834 unsigned long pointer = *ptr; 835 struct kmemleak_object *object; 836 837 if (scan_should_stop()) 838 break; 839 840 /* 841 * When scanning a memory block with a corresponding 842 * kmemleak_object, the CPU yielding is handled in the calling 843 * code since it holds the object->lock to avoid the block 844 * freeing. 845 */ 846 if (!scanned) 847 scan_yield(); 848 849 object = find_and_get_object(pointer, 1); 850 if (!object) 851 continue; 852 if (object == scanned) { 853 /* self referenced, ignore */ 854 put_object(object); 855 continue; 856 } 857 858 /* 859 * Avoid the lockdep recursive warning on object->lock being 860 * previously acquired in scan_object(). These locks are 861 * enclosed by scan_mutex. 862 */ 863 spin_lock_irqsave_nested(&object->lock, flags, 864 SINGLE_DEPTH_NESTING); 865 if (!color_white(object)) { 866 /* non-orphan, ignored or new */ 867 spin_unlock_irqrestore(&object->lock, flags); 868 put_object(object); 869 continue; 870 } 871 872 /* 873 * Increase the object's reference count (number of pointers 874 * to the memory block). If this count reaches the required 875 * minimum, the object's color will become gray and it will be 876 * added to the gray_list. 877 */ 878 object->count++; 879 if (color_gray(object)) 880 list_add_tail(&object->gray_list, &gray_list); 881 else 882 put_object(object); 883 spin_unlock_irqrestore(&object->lock, flags); 884 } 885} 886 887/* 888 * Scan a memory block corresponding to a kmemleak_object. A condition is 889 * that object->use_count >= 1. 890 */ 891static void scan_object(struct kmemleak_object *object) 892{ 893 struct kmemleak_scan_area *area; 894 struct hlist_node *elem; 895 unsigned long flags; 896 897 /* 898 * Once the object->lock is aquired, the corresponding memory block 899 * cannot be freed (the same lock is aquired in delete_object). 900 */ 901 spin_lock_irqsave(&object->lock, flags); 902 if (object->flags & OBJECT_NO_SCAN) 903 goto out; 904 if (!(object->flags & OBJECT_ALLOCATED)) 905 /* already freed object */ 906 goto out; 907 if (hlist_empty(&object->area_list)) 908 scan_block((void *)object->pointer, 909 (void *)(object->pointer + object->size), object); 910 else 911 hlist_for_each_entry(area, elem, &object->area_list, node) 912 scan_block((void *)(object->pointer + area->offset), 913 (void *)(object->pointer + area->offset 914 + area->length), object); 915out: 916 spin_unlock_irqrestore(&object->lock, flags); 917} 918 919/* 920 * Scan data sections and all the referenced memory blocks allocated via the 921 * kernel's standard allocators. This function must be called with the 922 * scan_mutex held. 923 */ 924static void kmemleak_scan(void) 925{ 926 unsigned long flags; 927 struct kmemleak_object *object, *tmp; 928 struct task_struct *task; 929 int i; 930 931 /* prepare the kmemleak_object's */ 932 rcu_read_lock(); 933 list_for_each_entry_rcu(object, &object_list, object_list) { 934 spin_lock_irqsave(&object->lock, flags); 935#ifdef DEBUG 936 /* 937 * With a few exceptions there should be a maximum of 938 * 1 reference to any object at this point. 939 */ 940 if (atomic_read(&object->use_count) > 1) { 941 pr_debug("object->use_count = %d\n", 942 atomic_read(&object->use_count)); 943 dump_object_info(object); 944 } 945#endif 946 /* reset the reference count (whiten the object) */ 947 object->count = 0; 948 if (color_gray(object) && get_object(object)) 949 list_add_tail(&object->gray_list, &gray_list); 950 951 spin_unlock_irqrestore(&object->lock, flags); 952 } 953 rcu_read_unlock(); 954 955 /* data/bss scanning */ 956 scan_block(_sdata, _edata, NULL); 957 scan_block(__bss_start, __bss_stop, NULL); 958 959#ifdef CONFIG_SMP 960 /* per-cpu sections scanning */ 961 for_each_possible_cpu(i) 962 scan_block(__per_cpu_start + per_cpu_offset(i), 963 __per_cpu_end + per_cpu_offset(i), NULL); 964#endif 965 966 /* 967 * Struct page scanning for each node. The code below is not yet safe 968 * with MEMORY_HOTPLUG. 969 */ 970 for_each_online_node(i) { 971 pg_data_t *pgdat = NODE_DATA(i); 972 unsigned long start_pfn = pgdat->node_start_pfn; 973 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; 974 unsigned long pfn; 975 976 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 977 struct page *page; 978 979 if (!pfn_valid(pfn)) 980 continue; 981 page = pfn_to_page(pfn); 982 /* only scan if page is in use */ 983 if (page_count(page) == 0) 984 continue; 985 scan_block(page, page + 1, NULL); 986 } 987 } 988 989 /* 990 * Scanning the task stacks may introduce false negatives and it is 991 * not enabled by default. 992 */ 993 if (kmemleak_stack_scan) { 994 read_lock(&tasklist_lock); 995 for_each_process(task) 996 scan_block(task_stack_page(task), 997 task_stack_page(task) + THREAD_SIZE, NULL); 998 read_unlock(&tasklist_lock); 999 } 1000 1001 /* 1002 * Scan the objects already referenced from the sections scanned 1003 * above. More objects will be referenced and, if there are no memory 1004 * leaks, all the objects will be scanned. The list traversal is safe 1005 * for both tail additions and removals from inside the loop. The 1006 * kmemleak objects cannot be freed from outside the loop because their 1007 * use_count was increased. 1008 */ 1009 object = list_entry(gray_list.next, typeof(*object), gray_list); 1010 while (&object->gray_list != &gray_list) { 1011 scan_yield(); 1012 1013 /* may add new objects to the list */ 1014 if (!scan_should_stop()) 1015 scan_object(object); 1016 1017 tmp = list_entry(object->gray_list.next, typeof(*object), 1018 gray_list); 1019 1020 /* remove the object from the list and release it */ 1021 list_del(&object->gray_list); 1022 put_object(object); 1023 1024 object = tmp; 1025 } 1026 WARN_ON(!list_empty(&gray_list)); 1027} 1028 1029/* 1030 * Thread function performing automatic memory scanning. Unreferenced objects 1031 * at the end of a memory scan are reported but only the first time. 1032 */ 1033static int kmemleak_scan_thread(void *arg) 1034{ 1035 static int first_run = 1; 1036 1037 pr_info("Automatic memory scanning thread started\n"); 1038 1039 /* 1040 * Wait before the first scan to allow the system to fully initialize. 1041 */ 1042 if (first_run) { 1043 first_run = 0; 1044 ssleep(SECS_FIRST_SCAN); 1045 } 1046 1047 while (!kthread_should_stop()) { 1048 struct kmemleak_object *object; 1049 signed long timeout = jiffies_scan_wait; 1050 int new_leaks = 0; 1051 1052 mutex_lock(&scan_mutex); 1053 1054 kmemleak_scan(); 1055 1056 rcu_read_lock(); 1057 list_for_each_entry_rcu(object, &object_list, object_list) { 1058 unsigned long flags; 1059 1060 spin_lock_irqsave(&object->lock, flags); 1061 if (unreferenced_object(object) && 1062 !(object->flags & OBJECT_REPORTED)) { 1063 object->flags |= OBJECT_REPORTED; 1064 new_leaks++; 1065 } 1066 spin_unlock_irqrestore(&object->lock, flags); 1067 } 1068 rcu_read_unlock(); 1069 1070 if (new_leaks) 1071 pr_info("%d new suspected memory leaks (see " 1072 "/sys/kernel/debug/kmemleak)\n", new_leaks); 1073 1074 mutex_unlock(&scan_mutex); 1075 /* wait before the next scan */ 1076 while (timeout && !kthread_should_stop()) 1077 timeout = schedule_timeout_interruptible(timeout); 1078 } 1079 1080 pr_info("Automatic memory scanning thread ended\n"); 1081 1082 return 0; 1083} 1084 1085/* 1086 * Start the automatic memory scanning thread. This function must be called 1087 * with the kmemleak_mutex held. 1088 */ 1089void start_scan_thread(void) 1090{ 1091 if (scan_thread) 1092 return; 1093 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak"); 1094 if (IS_ERR(scan_thread)) { 1095 pr_warning("Failed to create the scan thread\n"); 1096 scan_thread = NULL; 1097 } 1098} 1099 1100/* 1101 * Stop the automatic memory scanning thread. This function must be called 1102 * with the kmemleak_mutex held. 1103 */ 1104void stop_scan_thread(void) 1105{ 1106 if (scan_thread) { 1107 kthread_stop(scan_thread); 1108 scan_thread = NULL; 1109 } 1110} 1111 1112/* 1113 * Iterate over the object_list and return the first valid object at or after 1114 * the required position with its use_count incremented. The function triggers 1115 * a memory scanning when the pos argument points to the first position. 1116 */ 1117static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) 1118{ 1119 struct kmemleak_object *object; 1120 loff_t n = *pos; 1121 1122 if (!n) { 1123 kmemleak_scan(); 1124 reported_leaks = 0; 1125 } 1126 if (reported_leaks >= REPORTS_NR) 1127 return NULL; 1128 1129 rcu_read_lock(); 1130 list_for_each_entry_rcu(object, &object_list, object_list) { 1131 if (n-- > 0) 1132 continue; 1133 if (get_object(object)) 1134 goto out; 1135 } 1136 object = NULL; 1137out: 1138 rcu_read_unlock(); 1139 return object; 1140} 1141 1142/* 1143 * Return the next object in the object_list. The function decrements the 1144 * use_count of the previous object and increases that of the next one. 1145 */ 1146static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1147{ 1148 struct kmemleak_object *prev_obj = v; 1149 struct kmemleak_object *next_obj = NULL; 1150 struct list_head *n = &prev_obj->object_list; 1151 1152 ++(*pos); 1153 if (reported_leaks >= REPORTS_NR) 1154 goto out; 1155 1156 rcu_read_lock(); 1157 list_for_each_continue_rcu(n, &object_list) { 1158 next_obj = list_entry(n, struct kmemleak_object, object_list); 1159 if (get_object(next_obj)) 1160 break; 1161 } 1162 rcu_read_unlock(); 1163out: 1164 put_object(prev_obj); 1165 return next_obj; 1166} 1167 1168/* 1169 * Decrement the use_count of the last object required, if any. 1170 */ 1171static void kmemleak_seq_stop(struct seq_file *seq, void *v) 1172{ 1173 if (v) 1174 put_object(v); 1175} 1176 1177/* 1178 * Print the information for an unreferenced object to the seq file. 1179 */ 1180static int kmemleak_seq_show(struct seq_file *seq, void *v) 1181{ 1182 struct kmemleak_object *object = v; 1183 unsigned long flags; 1184 1185 spin_lock_irqsave(&object->lock, flags); 1186 if (!unreferenced_object(object)) 1187 goto out; 1188 print_unreferenced(seq, object); 1189 reported_leaks++; 1190out: 1191 spin_unlock_irqrestore(&object->lock, flags); 1192 return 0; 1193} 1194 1195static const struct seq_operations kmemleak_seq_ops = { 1196 .start = kmemleak_seq_start, 1197 .next = kmemleak_seq_next, 1198 .stop = kmemleak_seq_stop, 1199 .show = kmemleak_seq_show, 1200}; 1201 1202static int kmemleak_open(struct inode *inode, struct file *file) 1203{ 1204 int ret = 0; 1205 1206 if (!atomic_read(&kmemleak_enabled)) 1207 return -EBUSY; 1208 1209 ret = mutex_lock_interruptible(&kmemleak_mutex); 1210 if (ret < 0) 1211 goto out; 1212 if (file->f_mode & FMODE_READ) { 1213 ret = mutex_lock_interruptible(&scan_mutex); 1214 if (ret < 0) 1215 goto kmemleak_unlock; 1216 ret = seq_open(file, &kmemleak_seq_ops); 1217 if (ret < 0) 1218 goto scan_unlock; 1219 } 1220 return ret; 1221 1222scan_unlock: 1223 mutex_unlock(&scan_mutex); 1224kmemleak_unlock: 1225 mutex_unlock(&kmemleak_mutex); 1226out: 1227 return ret; 1228} 1229 1230static int kmemleak_release(struct inode *inode, struct file *file) 1231{ 1232 int ret = 0; 1233 1234 if (file->f_mode & FMODE_READ) { 1235 seq_release(inode, file); 1236 mutex_unlock(&scan_mutex); 1237 } 1238 mutex_unlock(&kmemleak_mutex); 1239 1240 return ret; 1241} 1242 1243/* 1244 * File write operation to configure kmemleak at run-time. The following 1245 * commands can be written to the /sys/kernel/debug/kmemleak file: 1246 * off - disable kmemleak (irreversible) 1247 * stack=on - enable the task stacks scanning 1248 * stack=off - disable the tasks stacks scanning 1249 * scan=on - start the automatic memory scanning thread 1250 * scan=off - stop the automatic memory scanning thread 1251 * scan=... - set the automatic memory scanning period in seconds (0 to 1252 * disable it) 1253 */ 1254static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, 1255 size_t size, loff_t *ppos) 1256{ 1257 char buf[64]; 1258 int buf_size; 1259 1260 if (!atomic_read(&kmemleak_enabled)) 1261 return -EBUSY; 1262 1263 buf_size = min(size, (sizeof(buf) - 1)); 1264 if (strncpy_from_user(buf, user_buf, buf_size) < 0) 1265 return -EFAULT; 1266 buf[buf_size] = 0; 1267 1268 if (strncmp(buf, "off", 3) == 0) 1269 kmemleak_disable(); 1270 else if (strncmp(buf, "stack=on", 8) == 0) 1271 kmemleak_stack_scan = 1; 1272 else if (strncmp(buf, "stack=off", 9) == 0) 1273 kmemleak_stack_scan = 0; 1274 else if (strncmp(buf, "scan=on", 7) == 0) 1275 start_scan_thread(); 1276 else if (strncmp(buf, "scan=off", 8) == 0) 1277 stop_scan_thread(); 1278 else if (strncmp(buf, "scan=", 5) == 0) { 1279 unsigned long secs; 1280 int err; 1281 1282 err = strict_strtoul(buf + 5, 0, &secs); 1283 if (err < 0) 1284 return err; 1285 stop_scan_thread(); 1286 if (secs) { 1287 jiffies_scan_wait = msecs_to_jiffies(secs * 1000); 1288 start_scan_thread(); 1289 } 1290 } else 1291 return -EINVAL; 1292 1293 /* ignore the rest of the buffer, only one command at a time */ 1294 *ppos += size; 1295 return size; 1296} 1297 1298static const struct file_operations kmemleak_fops = { 1299 .owner = THIS_MODULE, 1300 .open = kmemleak_open, 1301 .read = seq_read, 1302 .write = kmemleak_write, 1303 .llseek = seq_lseek, 1304 .release = kmemleak_release, 1305}; 1306 1307/* 1308 * Perform the freeing of the kmemleak internal objects after waiting for any 1309 * current memory scan to complete. 1310 */ 1311static int kmemleak_cleanup_thread(void *arg) 1312{ 1313 struct kmemleak_object *object; 1314 1315 mutex_lock(&kmemleak_mutex); 1316 stop_scan_thread(); 1317 mutex_unlock(&kmemleak_mutex); 1318 1319 mutex_lock(&scan_mutex); 1320 rcu_read_lock(); 1321 list_for_each_entry_rcu(object, &object_list, object_list) 1322 delete_object(object->pointer); 1323 rcu_read_unlock(); 1324 mutex_unlock(&scan_mutex); 1325 1326 return 0; 1327} 1328 1329/* 1330 * Start the clean-up thread. 1331 */ 1332static void kmemleak_cleanup(void) 1333{ 1334 struct task_struct *cleanup_thread; 1335 1336 cleanup_thread = kthread_run(kmemleak_cleanup_thread, NULL, 1337 "kmemleak-clean"); 1338 if (IS_ERR(cleanup_thread)) 1339 pr_warning("Failed to create the clean-up thread\n"); 1340} 1341 1342/* 1343 * Disable kmemleak. No memory allocation/freeing will be traced once this 1344 * function is called. Disabling kmemleak is an irreversible operation. 1345 */ 1346static void kmemleak_disable(void) 1347{ 1348 /* atomically check whether it was already invoked */ 1349 if (atomic_cmpxchg(&kmemleak_error, 0, 1)) 1350 return; 1351 1352 /* stop any memory operation tracing */ 1353 atomic_set(&kmemleak_early_log, 0); 1354 atomic_set(&kmemleak_enabled, 0); 1355 1356 /* check whether it is too early for a kernel thread */ 1357 if (atomic_read(&kmemleak_initialized)) 1358 kmemleak_cleanup(); 1359 1360 pr_info("Kernel memory leak detector disabled\n"); 1361} 1362 1363/* 1364 * Allow boot-time kmemleak disabling (enabled by default). 1365 */ 1366static int kmemleak_boot_config(char *str) 1367{ 1368 if (!str) 1369 return -EINVAL; 1370 if (strcmp(str, "off") == 0) 1371 kmemleak_disable(); 1372 else if (strcmp(str, "on") != 0) 1373 return -EINVAL; 1374 return 0; 1375} 1376early_param("kmemleak", kmemleak_boot_config); 1377 1378/* 1379 * Kmemleak initialization. 1380 */ 1381void __init kmemleak_init(void) 1382{ 1383 int i; 1384 unsigned long flags; 1385 1386 jiffies_scan_yield = msecs_to_jiffies(MSECS_SCAN_YIELD); 1387 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); 1388 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); 1389 1390 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); 1391 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); 1392 INIT_PRIO_TREE_ROOT(&object_tree_root); 1393 1394 /* the kernel is still in UP mode, so disabling the IRQs is enough */ 1395 local_irq_save(flags); 1396 if (!atomic_read(&kmemleak_error)) { 1397 atomic_set(&kmemleak_enabled, 1); 1398 atomic_set(&kmemleak_early_log, 0); 1399 } 1400 local_irq_restore(flags); 1401 1402 /* 1403 * This is the point where tracking allocations is safe. Automatic 1404 * scanning is started during the late initcall. Add the early logged 1405 * callbacks to the kmemleak infrastructure. 1406 */ 1407 for (i = 0; i < crt_early_log; i++) { 1408 struct early_log *log = &early_log[i]; 1409 1410 switch (log->op_type) { 1411 case KMEMLEAK_ALLOC: 1412 kmemleak_alloc(log->ptr, log->size, log->min_count, 1413 GFP_KERNEL); 1414 break; 1415 case KMEMLEAK_FREE: 1416 kmemleak_free(log->ptr); 1417 break; 1418 case KMEMLEAK_NOT_LEAK: 1419 kmemleak_not_leak(log->ptr); 1420 break; 1421 case KMEMLEAK_IGNORE: 1422 kmemleak_ignore(log->ptr); 1423 break; 1424 case KMEMLEAK_SCAN_AREA: 1425 kmemleak_scan_area(log->ptr, log->offset, log->length, 1426 GFP_KERNEL); 1427 break; 1428 case KMEMLEAK_NO_SCAN: 1429 kmemleak_no_scan(log->ptr); 1430 break; 1431 default: 1432 WARN_ON(1); 1433 } 1434 } 1435} 1436 1437/* 1438 * Late initialization function. 1439 */ 1440static int __init kmemleak_late_init(void) 1441{ 1442 struct dentry *dentry; 1443 1444 atomic_set(&kmemleak_initialized, 1); 1445 1446 if (atomic_read(&kmemleak_error)) { 1447 /* 1448 * Some error occured and kmemleak was disabled. There is a 1449 * small chance that kmemleak_disable() was called immediately 1450 * after setting kmemleak_initialized and we may end up with 1451 * two clean-up threads but serialized by scan_mutex. 1452 */ 1453 kmemleak_cleanup(); 1454 return -ENOMEM; 1455 } 1456 1457 dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL, 1458 &kmemleak_fops); 1459 if (!dentry) 1460 pr_warning("Failed to create the debugfs kmemleak file\n"); 1461 mutex_lock(&kmemleak_mutex); 1462 start_scan_thread(); 1463 mutex_unlock(&kmemleak_mutex); 1464 1465 pr_info("Kernel memory leak detector initialized\n"); 1466 1467 return 0; 1468} 1469late_initcall(kmemleak_late_init); 1470