kmemleak.c revision 0494e08281d08f0a3dc442eb5e5cecc125b53b27
1/* 2 * mm/kmemleak.c 3 * 4 * Copyright (C) 2008 ARM Limited 5 * Written by Catalin Marinas <catalin.marinas@arm.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * 20 * 21 * For more information on the algorithm and kmemleak usage, please see 22 * Documentation/kmemleak.txt. 23 * 24 * Notes on locking 25 * ---------------- 26 * 27 * The following locks and mutexes are used by kmemleak: 28 * 29 * - kmemleak_lock (rwlock): protects the object_list modifications and 30 * accesses to the object_tree_root. The object_list is the main list 31 * holding the metadata (struct kmemleak_object) for the allocated memory 32 * blocks. The object_tree_root is a priority search tree used to look-up 33 * metadata based on a pointer to the corresponding memory block. The 34 * kmemleak_object structures are added to the object_list and 35 * object_tree_root in the create_object() function called from the 36 * kmemleak_alloc() callback and removed in delete_object() called from the 37 * kmemleak_free() callback 38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to 39 * the metadata (e.g. count) are protected by this lock. Note that some 40 * members of this structure may be protected by other means (atomic or 41 * kmemleak_lock). This lock is also held when scanning the corresponding 42 * memory block to avoid the kernel freeing it via the kmemleak_free() 43 * callback. This is less heavyweight than holding a global lock like 44 * kmemleak_lock during scanning 45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for 46 * unreferenced objects at a time. The gray_list contains the objects which 47 * are already referenced or marked as false positives and need to be 48 * scanned. This list is only modified during a scanning episode when the 49 * scan_mutex is held. At the end of a scan, the gray_list is always empty. 50 * Note that the kmemleak_object.use_count is incremented when an object is 51 * added to the gray_list and therefore cannot be freed. This mutex also 52 * prevents multiple users of the "kmemleak" debugfs file together with 53 * modifications to the memory scanning parameters including the scan_thread 54 * pointer 55 * 56 * The kmemleak_object structures have a use_count incremented or decremented 57 * using the get_object()/put_object() functions. When the use_count becomes 58 * 0, this count can no longer be incremented and put_object() schedules the 59 * kmemleak_object freeing via an RCU callback. All calls to the get_object() 60 * function must be protected by rcu_read_lock() to avoid accessing a freed 61 * structure. 62 */ 63 64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 65 66#include <linux/init.h> 67#include <linux/kernel.h> 68#include <linux/list.h> 69#include <linux/sched.h> 70#include <linux/jiffies.h> 71#include <linux/delay.h> 72#include <linux/module.h> 73#include <linux/kthread.h> 74#include <linux/prio_tree.h> 75#include <linux/gfp.h> 76#include <linux/fs.h> 77#include <linux/debugfs.h> 78#include <linux/seq_file.h> 79#include <linux/cpumask.h> 80#include <linux/spinlock.h> 81#include <linux/mutex.h> 82#include <linux/rcupdate.h> 83#include <linux/stacktrace.h> 84#include <linux/cache.h> 85#include <linux/percpu.h> 86#include <linux/hardirq.h> 87#include <linux/mmzone.h> 88#include <linux/slab.h> 89#include <linux/thread_info.h> 90#include <linux/err.h> 91#include <linux/uaccess.h> 92#include <linux/string.h> 93#include <linux/nodemask.h> 94#include <linux/mm.h> 95 96#include <asm/sections.h> 97#include <asm/processor.h> 98#include <asm/atomic.h> 99 100#include <linux/kmemleak.h> 101 102/* 103 * Kmemleak configuration and common defines. 104 */ 105#define MAX_TRACE 16 /* stack trace length */ 106#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ 107#define SECS_FIRST_SCAN 60 /* delay before the first scan */ 108#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ 109#define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */ 110#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */ 111 112#define BYTES_PER_POINTER sizeof(void *) 113 114/* GFP bitmask for kmemleak internal allocations */ 115#define GFP_KMEMLEAK_MASK (GFP_KERNEL | GFP_ATOMIC) 116 117/* scanning area inside a memory block */ 118struct kmemleak_scan_area { 119 struct hlist_node node; 120 unsigned long offset; 121 size_t length; 122}; 123 124/* 125 * Structure holding the metadata for each allocated memory block. 126 * Modifications to such objects should be made while holding the 127 * object->lock. Insertions or deletions from object_list, gray_list or 128 * tree_node are already protected by the corresponding locks or mutex (see 129 * the notes on locking above). These objects are reference-counted 130 * (use_count) and freed using the RCU mechanism. 131 */ 132struct kmemleak_object { 133 spinlock_t lock; 134 unsigned long flags; /* object status flags */ 135 struct list_head object_list; 136 struct list_head gray_list; 137 struct prio_tree_node tree_node; 138 struct rcu_head rcu; /* object_list lockless traversal */ 139 /* object usage count; object freed when use_count == 0 */ 140 atomic_t use_count; 141 unsigned long pointer; 142 size_t size; 143 /* minimum number of a pointers found before it is considered leak */ 144 int min_count; 145 /* the total number of pointers found pointing to this object */ 146 int count; 147 /* memory ranges to be scanned inside an object (empty for all) */ 148 struct hlist_head area_list; 149 unsigned long trace[MAX_TRACE]; 150 unsigned int trace_len; 151 unsigned long jiffies; /* creation timestamp */ 152 pid_t pid; /* pid of the current task */ 153 char comm[TASK_COMM_LEN]; /* executable name */ 154}; 155 156/* flag representing the memory block allocation status */ 157#define OBJECT_ALLOCATED (1 << 0) 158/* flag set after the first reporting of an unreference object */ 159#define OBJECT_REPORTED (1 << 1) 160/* flag set to not scan the object */ 161#define OBJECT_NO_SCAN (1 << 2) 162/* flag set on newly allocated objects */ 163#define OBJECT_NEW (1 << 3) 164 165/* number of bytes to print per line; must be 16 or 32 */ 166#define HEX_ROW_SIZE 16 167/* number of bytes to print at a time (1, 2, 4, 8) */ 168#define HEX_GROUP_SIZE 1 169/* include ASCII after the hex output */ 170#define HEX_ASCII 1 171/* max number of lines to be printed */ 172#define HEX_MAX_LINES 2 173 174/* the list of all allocated objects */ 175static LIST_HEAD(object_list); 176/* the list of gray-colored objects (see color_gray comment below) */ 177static LIST_HEAD(gray_list); 178/* prio search tree for object boundaries */ 179static struct prio_tree_root object_tree_root; 180/* rw_lock protecting the access to object_list and prio_tree_root */ 181static DEFINE_RWLOCK(kmemleak_lock); 182 183/* allocation caches for kmemleak internal data */ 184static struct kmem_cache *object_cache; 185static struct kmem_cache *scan_area_cache; 186 187/* set if tracing memory operations is enabled */ 188static atomic_t kmemleak_enabled = ATOMIC_INIT(0); 189/* set in the late_initcall if there were no errors */ 190static atomic_t kmemleak_initialized = ATOMIC_INIT(0); 191/* enables or disables early logging of the memory operations */ 192static atomic_t kmemleak_early_log = ATOMIC_INIT(1); 193/* set if a fata kmemleak error has occurred */ 194static atomic_t kmemleak_error = ATOMIC_INIT(0); 195 196/* minimum and maximum address that may be valid pointers */ 197static unsigned long min_addr = ULONG_MAX; 198static unsigned long max_addr; 199 200static struct task_struct *scan_thread; 201/* used to avoid reporting of recently allocated objects */ 202static unsigned long jiffies_min_age; 203static unsigned long jiffies_last_scan; 204/* delay between automatic memory scannings */ 205static signed long jiffies_scan_wait; 206/* enables or disables the task stacks scanning */ 207static int kmemleak_stack_scan = 1; 208/* protects the memory scanning, parameters and debug/kmemleak file access */ 209static DEFINE_MUTEX(scan_mutex); 210 211/* 212 * Early object allocation/freeing logging. Kmemleak is initialized after the 213 * kernel allocator. However, both the kernel allocator and kmemleak may 214 * allocate memory blocks which need to be tracked. Kmemleak defines an 215 * arbitrary buffer to hold the allocation/freeing information before it is 216 * fully initialized. 217 */ 218 219/* kmemleak operation type for early logging */ 220enum { 221 KMEMLEAK_ALLOC, 222 KMEMLEAK_FREE, 223 KMEMLEAK_FREE_PART, 224 KMEMLEAK_NOT_LEAK, 225 KMEMLEAK_IGNORE, 226 KMEMLEAK_SCAN_AREA, 227 KMEMLEAK_NO_SCAN 228}; 229 230/* 231 * Structure holding the information passed to kmemleak callbacks during the 232 * early logging. 233 */ 234struct early_log { 235 int op_type; /* kmemleak operation type */ 236 const void *ptr; /* allocated/freed memory block */ 237 size_t size; /* memory block size */ 238 int min_count; /* minimum reference count */ 239 unsigned long offset; /* scan area offset */ 240 size_t length; /* scan area length */ 241 unsigned long trace[MAX_TRACE]; /* stack trace */ 242 unsigned int trace_len; /* stack trace length */ 243}; 244 245/* early logging buffer and current position */ 246static struct early_log 247 early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata; 248static int crt_early_log __initdata; 249 250static void kmemleak_disable(void); 251 252/* 253 * Print a warning and dump the stack trace. 254 */ 255#define kmemleak_warn(x...) do { \ 256 pr_warning(x); \ 257 dump_stack(); \ 258} while (0) 259 260/* 261 * Macro invoked when a serious kmemleak condition occured and cannot be 262 * recovered from. Kmemleak will be disabled and further allocation/freeing 263 * tracing no longer available. 264 */ 265#define kmemleak_stop(x...) do { \ 266 kmemleak_warn(x); \ 267 kmemleak_disable(); \ 268} while (0) 269 270/* 271 * Printing of the objects hex dump to the seq file. The number of lines to be 272 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The 273 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called 274 * with the object->lock held. 275 */ 276static void hex_dump_object(struct seq_file *seq, 277 struct kmemleak_object *object) 278{ 279 const u8 *ptr = (const u8 *)object->pointer; 280 int i, len, remaining; 281 unsigned char linebuf[HEX_ROW_SIZE * 5]; 282 283 /* limit the number of lines to HEX_MAX_LINES */ 284 remaining = len = 285 min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE)); 286 287 seq_printf(seq, " hex dump (first %d bytes):\n", len); 288 for (i = 0; i < len; i += HEX_ROW_SIZE) { 289 int linelen = min(remaining, HEX_ROW_SIZE); 290 291 remaining -= HEX_ROW_SIZE; 292 hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE, 293 HEX_GROUP_SIZE, linebuf, sizeof(linebuf), 294 HEX_ASCII); 295 seq_printf(seq, " %s\n", linebuf); 296 } 297} 298 299/* 300 * Object colors, encoded with count and min_count: 301 * - white - orphan object, not enough references to it (count < min_count) 302 * - gray - not orphan, not marked as false positive (min_count == 0) or 303 * sufficient references to it (count >= min_count) 304 * - black - ignore, it doesn't contain references (e.g. text section) 305 * (min_count == -1). No function defined for this color. 306 * Newly created objects don't have any color assigned (object->count == -1) 307 * before the next memory scan when they become white. 308 */ 309static int color_white(const struct kmemleak_object *object) 310{ 311 return object->count != -1 && object->count < object->min_count; 312} 313 314static int color_gray(const struct kmemleak_object *object) 315{ 316 return object->min_count != -1 && object->count >= object->min_count; 317} 318 319static int color_black(const struct kmemleak_object *object) 320{ 321 return object->min_count == -1; 322} 323 324/* 325 * Objects are considered unreferenced only if their color is white, they have 326 * not be deleted and have a minimum age to avoid false positives caused by 327 * pointers temporarily stored in CPU registers. 328 */ 329static int unreferenced_object(struct kmemleak_object *object) 330{ 331 return (object->flags & OBJECT_ALLOCATED) && color_white(object) && 332 time_before_eq(object->jiffies + jiffies_min_age, 333 jiffies_last_scan); 334} 335 336/* 337 * Printing of the unreferenced objects information to the seq file. The 338 * print_unreferenced function must be called with the object->lock held. 339 */ 340static void print_unreferenced(struct seq_file *seq, 341 struct kmemleak_object *object) 342{ 343 int i; 344 345 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", 346 object->pointer, object->size); 347 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n", 348 object->comm, object->pid, object->jiffies); 349 hex_dump_object(seq, object); 350 seq_printf(seq, " backtrace:\n"); 351 352 for (i = 0; i < object->trace_len; i++) { 353 void *ptr = (void *)object->trace[i]; 354 seq_printf(seq, " [<%p>] %pS\n", ptr, ptr); 355 } 356} 357 358/* 359 * Print the kmemleak_object information. This function is used mainly for 360 * debugging special cases when kmemleak operations. It must be called with 361 * the object->lock held. 362 */ 363static void dump_object_info(struct kmemleak_object *object) 364{ 365 struct stack_trace trace; 366 367 trace.nr_entries = object->trace_len; 368 trace.entries = object->trace; 369 370 pr_notice("Object 0x%08lx (size %zu):\n", 371 object->tree_node.start, object->size); 372 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", 373 object->comm, object->pid, object->jiffies); 374 pr_notice(" min_count = %d\n", object->min_count); 375 pr_notice(" count = %d\n", object->count); 376 pr_notice(" flags = 0x%lx\n", object->flags); 377 pr_notice(" backtrace:\n"); 378 print_stack_trace(&trace, 4); 379} 380 381/* 382 * Look-up a memory block metadata (kmemleak_object) in the priority search 383 * tree based on a pointer value. If alias is 0, only values pointing to the 384 * beginning of the memory block are allowed. The kmemleak_lock must be held 385 * when calling this function. 386 */ 387static struct kmemleak_object *lookup_object(unsigned long ptr, int alias) 388{ 389 struct prio_tree_node *node; 390 struct prio_tree_iter iter; 391 struct kmemleak_object *object; 392 393 prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr); 394 node = prio_tree_next(&iter); 395 if (node) { 396 object = prio_tree_entry(node, struct kmemleak_object, 397 tree_node); 398 if (!alias && object->pointer != ptr) { 399 kmemleak_warn("Found object by alias"); 400 object = NULL; 401 } 402 } else 403 object = NULL; 404 405 return object; 406} 407 408/* 409 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note 410 * that once an object's use_count reached 0, the RCU freeing was already 411 * registered and the object should no longer be used. This function must be 412 * called under the protection of rcu_read_lock(). 413 */ 414static int get_object(struct kmemleak_object *object) 415{ 416 return atomic_inc_not_zero(&object->use_count); 417} 418 419/* 420 * RCU callback to free a kmemleak_object. 421 */ 422static void free_object_rcu(struct rcu_head *rcu) 423{ 424 struct hlist_node *elem, *tmp; 425 struct kmemleak_scan_area *area; 426 struct kmemleak_object *object = 427 container_of(rcu, struct kmemleak_object, rcu); 428 429 /* 430 * Once use_count is 0 (guaranteed by put_object), there is no other 431 * code accessing this object, hence no need for locking. 432 */ 433 hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) { 434 hlist_del(elem); 435 kmem_cache_free(scan_area_cache, area); 436 } 437 kmem_cache_free(object_cache, object); 438} 439 440/* 441 * Decrement the object use_count. Once the count is 0, free the object using 442 * an RCU callback. Since put_object() may be called via the kmemleak_free() -> 443 * delete_object() path, the delayed RCU freeing ensures that there is no 444 * recursive call to the kernel allocator. Lock-less RCU object_list traversal 445 * is also possible. 446 */ 447static void put_object(struct kmemleak_object *object) 448{ 449 if (!atomic_dec_and_test(&object->use_count)) 450 return; 451 452 /* should only get here after delete_object was called */ 453 WARN_ON(object->flags & OBJECT_ALLOCATED); 454 455 call_rcu(&object->rcu, free_object_rcu); 456} 457 458/* 459 * Look up an object in the prio search tree and increase its use_count. 460 */ 461static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) 462{ 463 unsigned long flags; 464 struct kmemleak_object *object = NULL; 465 466 rcu_read_lock(); 467 read_lock_irqsave(&kmemleak_lock, flags); 468 if (ptr >= min_addr && ptr < max_addr) 469 object = lookup_object(ptr, alias); 470 read_unlock_irqrestore(&kmemleak_lock, flags); 471 472 /* check whether the object is still available */ 473 if (object && !get_object(object)) 474 object = NULL; 475 rcu_read_unlock(); 476 477 return object; 478} 479 480/* 481 * Save stack trace to the given array of MAX_TRACE size. 482 */ 483static int __save_stack_trace(unsigned long *trace) 484{ 485 struct stack_trace stack_trace; 486 487 stack_trace.max_entries = MAX_TRACE; 488 stack_trace.nr_entries = 0; 489 stack_trace.entries = trace; 490 stack_trace.skip = 2; 491 save_stack_trace(&stack_trace); 492 493 return stack_trace.nr_entries; 494} 495 496/* 497 * Create the metadata (struct kmemleak_object) corresponding to an allocated 498 * memory block and add it to the object_list and object_tree_root. 499 */ 500static struct kmemleak_object *create_object(unsigned long ptr, size_t size, 501 int min_count, gfp_t gfp) 502{ 503 unsigned long flags; 504 struct kmemleak_object *object; 505 struct prio_tree_node *node; 506 507 object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK); 508 if (!object) { 509 kmemleak_stop("Cannot allocate a kmemleak_object structure\n"); 510 return NULL; 511 } 512 513 INIT_LIST_HEAD(&object->object_list); 514 INIT_LIST_HEAD(&object->gray_list); 515 INIT_HLIST_HEAD(&object->area_list); 516 spin_lock_init(&object->lock); 517 atomic_set(&object->use_count, 1); 518 object->flags = OBJECT_ALLOCATED | OBJECT_NEW; 519 object->pointer = ptr; 520 object->size = size; 521 object->min_count = min_count; 522 object->count = -1; /* no color initially */ 523 object->jiffies = jiffies; 524 525 /* task information */ 526 if (in_irq()) { 527 object->pid = 0; 528 strncpy(object->comm, "hardirq", sizeof(object->comm)); 529 } else if (in_softirq()) { 530 object->pid = 0; 531 strncpy(object->comm, "softirq", sizeof(object->comm)); 532 } else { 533 object->pid = current->pid; 534 /* 535 * There is a small chance of a race with set_task_comm(), 536 * however using get_task_comm() here may cause locking 537 * dependency issues with current->alloc_lock. In the worst 538 * case, the command line is not correct. 539 */ 540 strncpy(object->comm, current->comm, sizeof(object->comm)); 541 } 542 543 /* kernel backtrace */ 544 object->trace_len = __save_stack_trace(object->trace); 545 546 INIT_PRIO_TREE_NODE(&object->tree_node); 547 object->tree_node.start = ptr; 548 object->tree_node.last = ptr + size - 1; 549 550 write_lock_irqsave(&kmemleak_lock, flags); 551 min_addr = min(min_addr, ptr); 552 max_addr = max(max_addr, ptr + size); 553 node = prio_tree_insert(&object_tree_root, &object->tree_node); 554 /* 555 * The code calling the kernel does not yet have the pointer to the 556 * memory block to be able to free it. However, we still hold the 557 * kmemleak_lock here in case parts of the kernel started freeing 558 * random memory blocks. 559 */ 560 if (node != &object->tree_node) { 561 unsigned long flags; 562 563 kmemleak_stop("Cannot insert 0x%lx into the object search tree " 564 "(already existing)\n", ptr); 565 object = lookup_object(ptr, 1); 566 spin_lock_irqsave(&object->lock, flags); 567 dump_object_info(object); 568 spin_unlock_irqrestore(&object->lock, flags); 569 570 goto out; 571 } 572 list_add_tail_rcu(&object->object_list, &object_list); 573out: 574 write_unlock_irqrestore(&kmemleak_lock, flags); 575 return object; 576} 577 578/* 579 * Remove the metadata (struct kmemleak_object) for a memory block from the 580 * object_list and object_tree_root and decrement its use_count. 581 */ 582static void __delete_object(struct kmemleak_object *object) 583{ 584 unsigned long flags; 585 586 write_lock_irqsave(&kmemleak_lock, flags); 587 prio_tree_remove(&object_tree_root, &object->tree_node); 588 list_del_rcu(&object->object_list); 589 write_unlock_irqrestore(&kmemleak_lock, flags); 590 591 WARN_ON(!(object->flags & OBJECT_ALLOCATED)); 592 WARN_ON(atomic_read(&object->use_count) < 2); 593 594 /* 595 * Locking here also ensures that the corresponding memory block 596 * cannot be freed when it is being scanned. 597 */ 598 spin_lock_irqsave(&object->lock, flags); 599 object->flags &= ~OBJECT_ALLOCATED; 600 spin_unlock_irqrestore(&object->lock, flags); 601 put_object(object); 602} 603 604/* 605 * Look up the metadata (struct kmemleak_object) corresponding to ptr and 606 * delete it. 607 */ 608static void delete_object_full(unsigned long ptr) 609{ 610 struct kmemleak_object *object; 611 612 object = find_and_get_object(ptr, 0); 613 if (!object) { 614#ifdef DEBUG 615 kmemleak_warn("Freeing unknown object at 0x%08lx\n", 616 ptr); 617#endif 618 return; 619 } 620 __delete_object(object); 621 put_object(object); 622} 623 624/* 625 * Look up the metadata (struct kmemleak_object) corresponding to ptr and 626 * delete it. If the memory block is partially freed, the function may create 627 * additional metadata for the remaining parts of the block. 628 */ 629static void delete_object_part(unsigned long ptr, size_t size) 630{ 631 struct kmemleak_object *object; 632 unsigned long start, end; 633 634 object = find_and_get_object(ptr, 1); 635 if (!object) { 636#ifdef DEBUG 637 kmemleak_warn("Partially freeing unknown object at 0x%08lx " 638 "(size %zu)\n", ptr, size); 639#endif 640 return; 641 } 642 __delete_object(object); 643 644 /* 645 * Create one or two objects that may result from the memory block 646 * split. Note that partial freeing is only done by free_bootmem() and 647 * this happens before kmemleak_init() is called. The path below is 648 * only executed during early log recording in kmemleak_init(), so 649 * GFP_KERNEL is enough. 650 */ 651 start = object->pointer; 652 end = object->pointer + object->size; 653 if (ptr > start) 654 create_object(start, ptr - start, object->min_count, 655 GFP_KERNEL); 656 if (ptr + size < end) 657 create_object(ptr + size, end - ptr - size, object->min_count, 658 GFP_KERNEL); 659 660 put_object(object); 661} 662/* 663 * Make a object permanently as gray-colored so that it can no longer be 664 * reported as a leak. This is used in general to mark a false positive. 665 */ 666static void make_gray_object(unsigned long ptr) 667{ 668 unsigned long flags; 669 struct kmemleak_object *object; 670 671 object = find_and_get_object(ptr, 0); 672 if (!object) { 673 kmemleak_warn("Graying unknown object at 0x%08lx\n", ptr); 674 return; 675 } 676 677 spin_lock_irqsave(&object->lock, flags); 678 object->min_count = 0; 679 spin_unlock_irqrestore(&object->lock, flags); 680 put_object(object); 681} 682 683/* 684 * Mark the object as black-colored so that it is ignored from scans and 685 * reporting. 686 */ 687static void make_black_object(unsigned long ptr) 688{ 689 unsigned long flags; 690 struct kmemleak_object *object; 691 692 object = find_and_get_object(ptr, 0); 693 if (!object) { 694 kmemleak_warn("Blacking unknown object at 0x%08lx\n", ptr); 695 return; 696 } 697 698 spin_lock_irqsave(&object->lock, flags); 699 object->min_count = -1; 700 object->flags |= OBJECT_NO_SCAN; 701 spin_unlock_irqrestore(&object->lock, flags); 702 put_object(object); 703} 704 705/* 706 * Add a scanning area to the object. If at least one such area is added, 707 * kmemleak will only scan these ranges rather than the whole memory block. 708 */ 709static void add_scan_area(unsigned long ptr, unsigned long offset, 710 size_t length, gfp_t gfp) 711{ 712 unsigned long flags; 713 struct kmemleak_object *object; 714 struct kmemleak_scan_area *area; 715 716 object = find_and_get_object(ptr, 0); 717 if (!object) { 718 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", 719 ptr); 720 return; 721 } 722 723 area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK); 724 if (!area) { 725 kmemleak_warn("Cannot allocate a scan area\n"); 726 goto out; 727 } 728 729 spin_lock_irqsave(&object->lock, flags); 730 if (offset + length > object->size) { 731 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); 732 dump_object_info(object); 733 kmem_cache_free(scan_area_cache, area); 734 goto out_unlock; 735 } 736 737 INIT_HLIST_NODE(&area->node); 738 area->offset = offset; 739 area->length = length; 740 741 hlist_add_head(&area->node, &object->area_list); 742out_unlock: 743 spin_unlock_irqrestore(&object->lock, flags); 744out: 745 put_object(object); 746} 747 748/* 749 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give 750 * pointer. Such object will not be scanned by kmemleak but references to it 751 * are searched. 752 */ 753static void object_no_scan(unsigned long ptr) 754{ 755 unsigned long flags; 756 struct kmemleak_object *object; 757 758 object = find_and_get_object(ptr, 0); 759 if (!object) { 760 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr); 761 return; 762 } 763 764 spin_lock_irqsave(&object->lock, flags); 765 object->flags |= OBJECT_NO_SCAN; 766 spin_unlock_irqrestore(&object->lock, flags); 767 put_object(object); 768} 769 770/* 771 * Log an early kmemleak_* call to the early_log buffer. These calls will be 772 * processed later once kmemleak is fully initialized. 773 */ 774static void __init log_early(int op_type, const void *ptr, size_t size, 775 int min_count, unsigned long offset, size_t length) 776{ 777 unsigned long flags; 778 struct early_log *log; 779 780 if (crt_early_log >= ARRAY_SIZE(early_log)) { 781 pr_warning("Early log buffer exceeded\n"); 782 kmemleak_disable(); 783 return; 784 } 785 786 /* 787 * There is no need for locking since the kernel is still in UP mode 788 * at this stage. Disabling the IRQs is enough. 789 */ 790 local_irq_save(flags); 791 log = &early_log[crt_early_log]; 792 log->op_type = op_type; 793 log->ptr = ptr; 794 log->size = size; 795 log->min_count = min_count; 796 log->offset = offset; 797 log->length = length; 798 if (op_type == KMEMLEAK_ALLOC) 799 log->trace_len = __save_stack_trace(log->trace); 800 crt_early_log++; 801 local_irq_restore(flags); 802} 803 804/* 805 * Log an early allocated block and populate the stack trace. 806 */ 807static void early_alloc(struct early_log *log) 808{ 809 struct kmemleak_object *object; 810 unsigned long flags; 811 int i; 812 813 if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr)) 814 return; 815 816 /* 817 * RCU locking needed to ensure object is not freed via put_object(). 818 */ 819 rcu_read_lock(); 820 object = create_object((unsigned long)log->ptr, log->size, 821 log->min_count, GFP_KERNEL); 822 spin_lock_irqsave(&object->lock, flags); 823 for (i = 0; i < log->trace_len; i++) 824 object->trace[i] = log->trace[i]; 825 object->trace_len = log->trace_len; 826 spin_unlock_irqrestore(&object->lock, flags); 827 rcu_read_unlock(); 828} 829 830/* 831 * Memory allocation function callback. This function is called from the 832 * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc, 833 * vmalloc etc.). 834 */ 835void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, 836 gfp_t gfp) 837{ 838 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); 839 840 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 841 create_object((unsigned long)ptr, size, min_count, gfp); 842 else if (atomic_read(&kmemleak_early_log)) 843 log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0); 844} 845EXPORT_SYMBOL_GPL(kmemleak_alloc); 846 847/* 848 * Memory freeing function callback. This function is called from the kernel 849 * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.). 850 */ 851void __ref kmemleak_free(const void *ptr) 852{ 853 pr_debug("%s(0x%p)\n", __func__, ptr); 854 855 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 856 delete_object_full((unsigned long)ptr); 857 else if (atomic_read(&kmemleak_early_log)) 858 log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); 859} 860EXPORT_SYMBOL_GPL(kmemleak_free); 861 862/* 863 * Partial memory freeing function callback. This function is usually called 864 * from bootmem allocator when (part of) a memory block is freed. 865 */ 866void __ref kmemleak_free_part(const void *ptr, size_t size) 867{ 868 pr_debug("%s(0x%p)\n", __func__, ptr); 869 870 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 871 delete_object_part((unsigned long)ptr, size); 872 else if (atomic_read(&kmemleak_early_log)) 873 log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0); 874} 875EXPORT_SYMBOL_GPL(kmemleak_free_part); 876 877/* 878 * Mark an already allocated memory block as a false positive. This will cause 879 * the block to no longer be reported as leak and always be scanned. 880 */ 881void __ref kmemleak_not_leak(const void *ptr) 882{ 883 pr_debug("%s(0x%p)\n", __func__, ptr); 884 885 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 886 make_gray_object((unsigned long)ptr); 887 else if (atomic_read(&kmemleak_early_log)) 888 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0); 889} 890EXPORT_SYMBOL(kmemleak_not_leak); 891 892/* 893 * Ignore a memory block. This is usually done when it is known that the 894 * corresponding block is not a leak and does not contain any references to 895 * other allocated memory blocks. 896 */ 897void __ref kmemleak_ignore(const void *ptr) 898{ 899 pr_debug("%s(0x%p)\n", __func__, ptr); 900 901 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 902 make_black_object((unsigned long)ptr); 903 else if (atomic_read(&kmemleak_early_log)) 904 log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0); 905} 906EXPORT_SYMBOL(kmemleak_ignore); 907 908/* 909 * Limit the range to be scanned in an allocated memory block. 910 */ 911void __ref kmemleak_scan_area(const void *ptr, unsigned long offset, 912 size_t length, gfp_t gfp) 913{ 914 pr_debug("%s(0x%p)\n", __func__, ptr); 915 916 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 917 add_scan_area((unsigned long)ptr, offset, length, gfp); 918 else if (atomic_read(&kmemleak_early_log)) 919 log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length); 920} 921EXPORT_SYMBOL(kmemleak_scan_area); 922 923/* 924 * Inform kmemleak not to scan the given memory block. 925 */ 926void __ref kmemleak_no_scan(const void *ptr) 927{ 928 pr_debug("%s(0x%p)\n", __func__, ptr); 929 930 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 931 object_no_scan((unsigned long)ptr); 932 else if (atomic_read(&kmemleak_early_log)) 933 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0); 934} 935EXPORT_SYMBOL(kmemleak_no_scan); 936 937/* 938 * Memory scanning is a long process and it needs to be interruptable. This 939 * function checks whether such interrupt condition occured. 940 */ 941static int scan_should_stop(void) 942{ 943 if (!atomic_read(&kmemleak_enabled)) 944 return 1; 945 946 /* 947 * This function may be called from either process or kthread context, 948 * hence the need to check for both stop conditions. 949 */ 950 if (current->mm) 951 return signal_pending(current); 952 else 953 return kthread_should_stop(); 954 955 return 0; 956} 957 958/* 959 * Scan a memory block (exclusive range) for valid pointers and add those 960 * found to the gray list. 961 */ 962static void scan_block(void *_start, void *_end, 963 struct kmemleak_object *scanned, int allow_resched) 964{ 965 unsigned long *ptr; 966 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); 967 unsigned long *end = _end - (BYTES_PER_POINTER - 1); 968 969 for (ptr = start; ptr < end; ptr++) { 970 unsigned long flags; 971 unsigned long pointer = *ptr; 972 struct kmemleak_object *object; 973 974 if (allow_resched) 975 cond_resched(); 976 if (scan_should_stop()) 977 break; 978 979 object = find_and_get_object(pointer, 1); 980 if (!object) 981 continue; 982 if (object == scanned) { 983 /* self referenced, ignore */ 984 put_object(object); 985 continue; 986 } 987 988 /* 989 * Avoid the lockdep recursive warning on object->lock being 990 * previously acquired in scan_object(). These locks are 991 * enclosed by scan_mutex. 992 */ 993 spin_lock_irqsave_nested(&object->lock, flags, 994 SINGLE_DEPTH_NESTING); 995 if (!color_white(object)) { 996 /* non-orphan, ignored or new */ 997 spin_unlock_irqrestore(&object->lock, flags); 998 put_object(object); 999 continue; 1000 } 1001 1002 /* 1003 * Increase the object's reference count (number of pointers 1004 * to the memory block). If this count reaches the required 1005 * minimum, the object's color will become gray and it will be 1006 * added to the gray_list. 1007 */ 1008 object->count++; 1009 if (color_gray(object)) 1010 list_add_tail(&object->gray_list, &gray_list); 1011 else 1012 put_object(object); 1013 spin_unlock_irqrestore(&object->lock, flags); 1014 } 1015} 1016 1017/* 1018 * Scan a memory block corresponding to a kmemleak_object. A condition is 1019 * that object->use_count >= 1. 1020 */ 1021static void scan_object(struct kmemleak_object *object) 1022{ 1023 struct kmemleak_scan_area *area; 1024 struct hlist_node *elem; 1025 unsigned long flags; 1026 1027 /* 1028 * Once the object->lock is aquired, the corresponding memory block 1029 * cannot be freed (the same lock is aquired in delete_object). 1030 */ 1031 spin_lock_irqsave(&object->lock, flags); 1032 if (object->flags & OBJECT_NO_SCAN) 1033 goto out; 1034 if (!(object->flags & OBJECT_ALLOCATED)) 1035 /* already freed object */ 1036 goto out; 1037 if (hlist_empty(&object->area_list)) { 1038 void *start = (void *)object->pointer; 1039 void *end = (void *)(object->pointer + object->size); 1040 1041 while (start < end && (object->flags & OBJECT_ALLOCATED) && 1042 !(object->flags & OBJECT_NO_SCAN)) { 1043 scan_block(start, min(start + MAX_SCAN_SIZE, end), 1044 object, 0); 1045 start += MAX_SCAN_SIZE; 1046 1047 spin_unlock_irqrestore(&object->lock, flags); 1048 cond_resched(); 1049 spin_lock_irqsave(&object->lock, flags); 1050 } 1051 } else 1052 hlist_for_each_entry(area, elem, &object->area_list, node) 1053 scan_block((void *)(object->pointer + area->offset), 1054 (void *)(object->pointer + area->offset 1055 + area->length), object, 0); 1056out: 1057 spin_unlock_irqrestore(&object->lock, flags); 1058} 1059 1060/* 1061 * Scan data sections and all the referenced memory blocks allocated via the 1062 * kernel's standard allocators. This function must be called with the 1063 * scan_mutex held. 1064 */ 1065static void kmemleak_scan(void) 1066{ 1067 unsigned long flags; 1068 struct kmemleak_object *object, *tmp; 1069 struct task_struct *task; 1070 int i; 1071 int new_leaks = 0; 1072 int gray_list_pass = 0; 1073 1074 jiffies_last_scan = jiffies; 1075 1076 /* prepare the kmemleak_object's */ 1077 rcu_read_lock(); 1078 list_for_each_entry_rcu(object, &object_list, object_list) { 1079 spin_lock_irqsave(&object->lock, flags); 1080#ifdef DEBUG 1081 /* 1082 * With a few exceptions there should be a maximum of 1083 * 1 reference to any object at this point. 1084 */ 1085 if (atomic_read(&object->use_count) > 1) { 1086 pr_debug("object->use_count = %d\n", 1087 atomic_read(&object->use_count)); 1088 dump_object_info(object); 1089 } 1090#endif 1091 /* reset the reference count (whiten the object) */ 1092 object->count = 0; 1093 object->flags &= ~OBJECT_NEW; 1094 if (color_gray(object) && get_object(object)) 1095 list_add_tail(&object->gray_list, &gray_list); 1096 1097 spin_unlock_irqrestore(&object->lock, flags); 1098 } 1099 rcu_read_unlock(); 1100 1101 /* data/bss scanning */ 1102 scan_block(_sdata, _edata, NULL, 1); 1103 scan_block(__bss_start, __bss_stop, NULL, 1); 1104 1105#ifdef CONFIG_SMP 1106 /* per-cpu sections scanning */ 1107 for_each_possible_cpu(i) 1108 scan_block(__per_cpu_start + per_cpu_offset(i), 1109 __per_cpu_end + per_cpu_offset(i), NULL, 1); 1110#endif 1111 1112 /* 1113 * Struct page scanning for each node. The code below is not yet safe 1114 * with MEMORY_HOTPLUG. 1115 */ 1116 for_each_online_node(i) { 1117 pg_data_t *pgdat = NODE_DATA(i); 1118 unsigned long start_pfn = pgdat->node_start_pfn; 1119 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; 1120 unsigned long pfn; 1121 1122 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 1123 struct page *page; 1124 1125 if (!pfn_valid(pfn)) 1126 continue; 1127 page = pfn_to_page(pfn); 1128 /* only scan if page is in use */ 1129 if (page_count(page) == 0) 1130 continue; 1131 scan_block(page, page + 1, NULL, 1); 1132 } 1133 } 1134 1135 /* 1136 * Scanning the task stacks may introduce false negatives and it is 1137 * not enabled by default. 1138 */ 1139 if (kmemleak_stack_scan) { 1140 read_lock(&tasklist_lock); 1141 for_each_process(task) 1142 scan_block(task_stack_page(task), 1143 task_stack_page(task) + THREAD_SIZE, 1144 NULL, 0); 1145 read_unlock(&tasklist_lock); 1146 } 1147 1148 /* 1149 * Scan the objects already referenced from the sections scanned 1150 * above. More objects will be referenced and, if there are no memory 1151 * leaks, all the objects will be scanned. The list traversal is safe 1152 * for both tail additions and removals from inside the loop. The 1153 * kmemleak objects cannot be freed from outside the loop because their 1154 * use_count was increased. 1155 */ 1156repeat: 1157 object = list_entry(gray_list.next, typeof(*object), gray_list); 1158 while (&object->gray_list != &gray_list) { 1159 cond_resched(); 1160 1161 /* may add new objects to the list */ 1162 if (!scan_should_stop()) 1163 scan_object(object); 1164 1165 tmp = list_entry(object->gray_list.next, typeof(*object), 1166 gray_list); 1167 1168 /* remove the object from the list and release it */ 1169 list_del(&object->gray_list); 1170 put_object(object); 1171 1172 object = tmp; 1173 } 1174 1175 if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES) 1176 goto scan_end; 1177 1178 /* 1179 * Check for new objects allocated during this scanning and add them 1180 * to the gray list. 1181 */ 1182 rcu_read_lock(); 1183 list_for_each_entry_rcu(object, &object_list, object_list) { 1184 spin_lock_irqsave(&object->lock, flags); 1185 if ((object->flags & OBJECT_NEW) && !color_black(object) && 1186 get_object(object)) { 1187 object->flags &= ~OBJECT_NEW; 1188 list_add_tail(&object->gray_list, &gray_list); 1189 } 1190 spin_unlock_irqrestore(&object->lock, flags); 1191 } 1192 rcu_read_unlock(); 1193 1194 if (!list_empty(&gray_list)) 1195 goto repeat; 1196 1197scan_end: 1198 WARN_ON(!list_empty(&gray_list)); 1199 1200 /* 1201 * If scanning was stopped or new objects were being allocated at a 1202 * higher rate than gray list scanning, do not report any new 1203 * unreferenced objects. 1204 */ 1205 if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES) 1206 return; 1207 1208 /* 1209 * Scanning result reporting. 1210 */ 1211 rcu_read_lock(); 1212 list_for_each_entry_rcu(object, &object_list, object_list) { 1213 spin_lock_irqsave(&object->lock, flags); 1214 if (unreferenced_object(object) && 1215 !(object->flags & OBJECT_REPORTED)) { 1216 object->flags |= OBJECT_REPORTED; 1217 new_leaks++; 1218 } 1219 spin_unlock_irqrestore(&object->lock, flags); 1220 } 1221 rcu_read_unlock(); 1222 1223 if (new_leaks) 1224 pr_info("%d new suspected memory leaks (see " 1225 "/sys/kernel/debug/kmemleak)\n", new_leaks); 1226 1227} 1228 1229/* 1230 * Thread function performing automatic memory scanning. Unreferenced objects 1231 * at the end of a memory scan are reported but only the first time. 1232 */ 1233static int kmemleak_scan_thread(void *arg) 1234{ 1235 static int first_run = 1; 1236 1237 pr_info("Automatic memory scanning thread started\n"); 1238 set_user_nice(current, 10); 1239 1240 /* 1241 * Wait before the first scan to allow the system to fully initialize. 1242 */ 1243 if (first_run) { 1244 first_run = 0; 1245 ssleep(SECS_FIRST_SCAN); 1246 } 1247 1248 while (!kthread_should_stop()) { 1249 signed long timeout = jiffies_scan_wait; 1250 1251 mutex_lock(&scan_mutex); 1252 kmemleak_scan(); 1253 mutex_unlock(&scan_mutex); 1254 1255 /* wait before the next scan */ 1256 while (timeout && !kthread_should_stop()) 1257 timeout = schedule_timeout_interruptible(timeout); 1258 } 1259 1260 pr_info("Automatic memory scanning thread ended\n"); 1261 1262 return 0; 1263} 1264 1265/* 1266 * Start the automatic memory scanning thread. This function must be called 1267 * with the scan_mutex held. 1268 */ 1269void start_scan_thread(void) 1270{ 1271 if (scan_thread) 1272 return; 1273 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak"); 1274 if (IS_ERR(scan_thread)) { 1275 pr_warning("Failed to create the scan thread\n"); 1276 scan_thread = NULL; 1277 } 1278} 1279 1280/* 1281 * Stop the automatic memory scanning thread. This function must be called 1282 * with the scan_mutex held. 1283 */ 1284void stop_scan_thread(void) 1285{ 1286 if (scan_thread) { 1287 kthread_stop(scan_thread); 1288 scan_thread = NULL; 1289 } 1290} 1291 1292/* 1293 * Iterate over the object_list and return the first valid object at or after 1294 * the required position with its use_count incremented. The function triggers 1295 * a memory scanning when the pos argument points to the first position. 1296 */ 1297static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) 1298{ 1299 struct kmemleak_object *object; 1300 loff_t n = *pos; 1301 int err; 1302 1303 err = mutex_lock_interruptible(&scan_mutex); 1304 if (err < 0) 1305 return ERR_PTR(err); 1306 1307 rcu_read_lock(); 1308 list_for_each_entry_rcu(object, &object_list, object_list) { 1309 if (n-- > 0) 1310 continue; 1311 if (get_object(object)) 1312 goto out; 1313 } 1314 object = NULL; 1315out: 1316 return object; 1317} 1318 1319/* 1320 * Return the next object in the object_list. The function decrements the 1321 * use_count of the previous object and increases that of the next one. 1322 */ 1323static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1324{ 1325 struct kmemleak_object *prev_obj = v; 1326 struct kmemleak_object *next_obj = NULL; 1327 struct list_head *n = &prev_obj->object_list; 1328 1329 ++(*pos); 1330 1331 list_for_each_continue_rcu(n, &object_list) { 1332 next_obj = list_entry(n, struct kmemleak_object, object_list); 1333 if (get_object(next_obj)) 1334 break; 1335 } 1336 1337 put_object(prev_obj); 1338 return next_obj; 1339} 1340 1341/* 1342 * Decrement the use_count of the last object required, if any. 1343 */ 1344static void kmemleak_seq_stop(struct seq_file *seq, void *v) 1345{ 1346 if (!IS_ERR(v)) { 1347 /* 1348 * kmemleak_seq_start may return ERR_PTR if the scan_mutex 1349 * waiting was interrupted, so only release it if !IS_ERR. 1350 */ 1351 rcu_read_unlock(); 1352 mutex_unlock(&scan_mutex); 1353 if (v) 1354 put_object(v); 1355 } 1356} 1357 1358/* 1359 * Print the information for an unreferenced object to the seq file. 1360 */ 1361static int kmemleak_seq_show(struct seq_file *seq, void *v) 1362{ 1363 struct kmemleak_object *object = v; 1364 unsigned long flags; 1365 1366 spin_lock_irqsave(&object->lock, flags); 1367 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) 1368 print_unreferenced(seq, object); 1369 spin_unlock_irqrestore(&object->lock, flags); 1370 return 0; 1371} 1372 1373static const struct seq_operations kmemleak_seq_ops = { 1374 .start = kmemleak_seq_start, 1375 .next = kmemleak_seq_next, 1376 .stop = kmemleak_seq_stop, 1377 .show = kmemleak_seq_show, 1378}; 1379 1380static int kmemleak_open(struct inode *inode, struct file *file) 1381{ 1382 if (!atomic_read(&kmemleak_enabled)) 1383 return -EBUSY; 1384 1385 return seq_open(file, &kmemleak_seq_ops); 1386} 1387 1388static int kmemleak_release(struct inode *inode, struct file *file) 1389{ 1390 return seq_release(inode, file); 1391} 1392 1393static int dump_str_object_info(const char *str) 1394{ 1395 unsigned long flags; 1396 struct kmemleak_object *object; 1397 unsigned long addr; 1398 1399 addr= simple_strtoul(str, NULL, 0); 1400 object = find_and_get_object(addr, 0); 1401 if (!object) { 1402 pr_info("Unknown object at 0x%08lx\n", addr); 1403 return -EINVAL; 1404 } 1405 1406 spin_lock_irqsave(&object->lock, flags); 1407 dump_object_info(object); 1408 spin_unlock_irqrestore(&object->lock, flags); 1409 1410 put_object(object); 1411 return 0; 1412} 1413 1414/* 1415 * File write operation to configure kmemleak at run-time. The following 1416 * commands can be written to the /sys/kernel/debug/kmemleak file: 1417 * off - disable kmemleak (irreversible) 1418 * stack=on - enable the task stacks scanning 1419 * stack=off - disable the tasks stacks scanning 1420 * scan=on - start the automatic memory scanning thread 1421 * scan=off - stop the automatic memory scanning thread 1422 * scan=... - set the automatic memory scanning period in seconds (0 to 1423 * disable it) 1424 * scan - trigger a memory scan 1425 * dump=... - dump information about the object found at the given address 1426 */ 1427static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, 1428 size_t size, loff_t *ppos) 1429{ 1430 char buf[64]; 1431 int buf_size; 1432 int ret; 1433 1434 buf_size = min(size, (sizeof(buf) - 1)); 1435 if (strncpy_from_user(buf, user_buf, buf_size) < 0) 1436 return -EFAULT; 1437 buf[buf_size] = 0; 1438 1439 ret = mutex_lock_interruptible(&scan_mutex); 1440 if (ret < 0) 1441 return ret; 1442 1443 if (strncmp(buf, "off", 3) == 0) 1444 kmemleak_disable(); 1445 else if (strncmp(buf, "stack=on", 8) == 0) 1446 kmemleak_stack_scan = 1; 1447 else if (strncmp(buf, "stack=off", 9) == 0) 1448 kmemleak_stack_scan = 0; 1449 else if (strncmp(buf, "scan=on", 7) == 0) 1450 start_scan_thread(); 1451 else if (strncmp(buf, "scan=off", 8) == 0) 1452 stop_scan_thread(); 1453 else if (strncmp(buf, "scan=", 5) == 0) { 1454 unsigned long secs; 1455 1456 ret = strict_strtoul(buf + 5, 0, &secs); 1457 if (ret < 0) 1458 goto out; 1459 stop_scan_thread(); 1460 if (secs) { 1461 jiffies_scan_wait = msecs_to_jiffies(secs * 1000); 1462 start_scan_thread(); 1463 } 1464 } else if (strncmp(buf, "scan", 4) == 0) 1465 kmemleak_scan(); 1466 else if (strncmp(buf, "dump=", 5) == 0) 1467 ret = dump_str_object_info(buf + 5); 1468 else 1469 ret = -EINVAL; 1470 1471out: 1472 mutex_unlock(&scan_mutex); 1473 if (ret < 0) 1474 return ret; 1475 1476 /* ignore the rest of the buffer, only one command at a time */ 1477 *ppos += size; 1478 return size; 1479} 1480 1481static const struct file_operations kmemleak_fops = { 1482 .owner = THIS_MODULE, 1483 .open = kmemleak_open, 1484 .read = seq_read, 1485 .write = kmemleak_write, 1486 .llseek = seq_lseek, 1487 .release = kmemleak_release, 1488}; 1489 1490/* 1491 * Perform the freeing of the kmemleak internal objects after waiting for any 1492 * current memory scan to complete. 1493 */ 1494static int kmemleak_cleanup_thread(void *arg) 1495{ 1496 struct kmemleak_object *object; 1497 1498 mutex_lock(&scan_mutex); 1499 stop_scan_thread(); 1500 1501 rcu_read_lock(); 1502 list_for_each_entry_rcu(object, &object_list, object_list) 1503 delete_object_full(object->pointer); 1504 rcu_read_unlock(); 1505 mutex_unlock(&scan_mutex); 1506 1507 return 0; 1508} 1509 1510/* 1511 * Start the clean-up thread. 1512 */ 1513static void kmemleak_cleanup(void) 1514{ 1515 struct task_struct *cleanup_thread; 1516 1517 cleanup_thread = kthread_run(kmemleak_cleanup_thread, NULL, 1518 "kmemleak-clean"); 1519 if (IS_ERR(cleanup_thread)) 1520 pr_warning("Failed to create the clean-up thread\n"); 1521} 1522 1523/* 1524 * Disable kmemleak. No memory allocation/freeing will be traced once this 1525 * function is called. Disabling kmemleak is an irreversible operation. 1526 */ 1527static void kmemleak_disable(void) 1528{ 1529 /* atomically check whether it was already invoked */ 1530 if (atomic_cmpxchg(&kmemleak_error, 0, 1)) 1531 return; 1532 1533 /* stop any memory operation tracing */ 1534 atomic_set(&kmemleak_early_log, 0); 1535 atomic_set(&kmemleak_enabled, 0); 1536 1537 /* check whether it is too early for a kernel thread */ 1538 if (atomic_read(&kmemleak_initialized)) 1539 kmemleak_cleanup(); 1540 1541 pr_info("Kernel memory leak detector disabled\n"); 1542} 1543 1544/* 1545 * Allow boot-time kmemleak disabling (enabled by default). 1546 */ 1547static int kmemleak_boot_config(char *str) 1548{ 1549 if (!str) 1550 return -EINVAL; 1551 if (strcmp(str, "off") == 0) 1552 kmemleak_disable(); 1553 else if (strcmp(str, "on") != 0) 1554 return -EINVAL; 1555 return 0; 1556} 1557early_param("kmemleak", kmemleak_boot_config); 1558 1559/* 1560 * Kmemleak initialization. 1561 */ 1562void __init kmemleak_init(void) 1563{ 1564 int i; 1565 unsigned long flags; 1566 1567 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); 1568 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); 1569 1570 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); 1571 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); 1572 INIT_PRIO_TREE_ROOT(&object_tree_root); 1573 1574 /* the kernel is still in UP mode, so disabling the IRQs is enough */ 1575 local_irq_save(flags); 1576 if (!atomic_read(&kmemleak_error)) { 1577 atomic_set(&kmemleak_enabled, 1); 1578 atomic_set(&kmemleak_early_log, 0); 1579 } 1580 local_irq_restore(flags); 1581 1582 /* 1583 * This is the point where tracking allocations is safe. Automatic 1584 * scanning is started during the late initcall. Add the early logged 1585 * callbacks to the kmemleak infrastructure. 1586 */ 1587 for (i = 0; i < crt_early_log; i++) { 1588 struct early_log *log = &early_log[i]; 1589 1590 switch (log->op_type) { 1591 case KMEMLEAK_ALLOC: 1592 early_alloc(log); 1593 break; 1594 case KMEMLEAK_FREE: 1595 kmemleak_free(log->ptr); 1596 break; 1597 case KMEMLEAK_FREE_PART: 1598 kmemleak_free_part(log->ptr, log->size); 1599 break; 1600 case KMEMLEAK_NOT_LEAK: 1601 kmemleak_not_leak(log->ptr); 1602 break; 1603 case KMEMLEAK_IGNORE: 1604 kmemleak_ignore(log->ptr); 1605 break; 1606 case KMEMLEAK_SCAN_AREA: 1607 kmemleak_scan_area(log->ptr, log->offset, log->length, 1608 GFP_KERNEL); 1609 break; 1610 case KMEMLEAK_NO_SCAN: 1611 kmemleak_no_scan(log->ptr); 1612 break; 1613 default: 1614 WARN_ON(1); 1615 } 1616 } 1617} 1618 1619/* 1620 * Late initialization function. 1621 */ 1622static int __init kmemleak_late_init(void) 1623{ 1624 struct dentry *dentry; 1625 1626 atomic_set(&kmemleak_initialized, 1); 1627 1628 if (atomic_read(&kmemleak_error)) { 1629 /* 1630 * Some error occured and kmemleak was disabled. There is a 1631 * small chance that kmemleak_disable() was called immediately 1632 * after setting kmemleak_initialized and we may end up with 1633 * two clean-up threads but serialized by scan_mutex. 1634 */ 1635 kmemleak_cleanup(); 1636 return -ENOMEM; 1637 } 1638 1639 dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL, 1640 &kmemleak_fops); 1641 if (!dentry) 1642 pr_warning("Failed to create the debugfs kmemleak file\n"); 1643 mutex_lock(&scan_mutex); 1644 start_scan_thread(); 1645 mutex_unlock(&scan_mutex); 1646 1647 pr_info("Kernel memory leak detector initialized\n"); 1648 1649 return 0; 1650} 1651late_initcall(kmemleak_late_init); 1652