kmemleak.c revision 288c857d66a400ca4846dd700eb1c4820d738bb9
1/*
2 * mm/kmemleak.c
3 *
4 * Copyright (C) 2008 ARM Limited
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 *
21 * For more information on the algorithm and kmemleak usage, please see
22 * Documentation/kmemleak.txt.
23 *
24 * Notes on locking
25 * ----------------
26 *
27 * The following locks and mutexes are used by kmemleak:
28 *
29 * - kmemleak_lock (rwlock): protects the object_list modifications and
30 *   accesses to the object_tree_root. The object_list is the main list
31 *   holding the metadata (struct kmemleak_object) for the allocated memory
32 *   blocks. The object_tree_root is a priority search tree used to look-up
33 *   metadata based on a pointer to the corresponding memory block.  The
34 *   kmemleak_object structures are added to the object_list and
35 *   object_tree_root in the create_object() function called from the
36 *   kmemleak_alloc() callback and removed in delete_object() called from the
37 *   kmemleak_free() callback
38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39 *   the metadata (e.g. count) are protected by this lock. Note that some
40 *   members of this structure may be protected by other means (atomic or
41 *   kmemleak_lock). This lock is also held when scanning the corresponding
42 *   memory block to avoid the kernel freeing it via the kmemleak_free()
43 *   callback. This is less heavyweight than holding a global lock like
44 *   kmemleak_lock during scanning
45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 *   unreferenced objects at a time. The gray_list contains the objects which
47 *   are already referenced or marked as false positives and need to be
48 *   scanned. This list is only modified during a scanning episode when the
49 *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
50 *   Note that the kmemleak_object.use_count is incremented when an object is
51 *   added to the gray_list and therefore cannot be freed. This mutex also
52 *   prevents multiple users of the "kmemleak" debugfs file together with
53 *   modifications to the memory scanning parameters including the scan_thread
54 *   pointer
55 *
56 * The kmemleak_object structures have a use_count incremented or decremented
57 * using the get_object()/put_object() functions. When the use_count becomes
58 * 0, this count can no longer be incremented and put_object() schedules the
59 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
60 * function must be protected by rcu_read_lock() to avoid accessing a freed
61 * structure.
62 */
63
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
66#include <linux/init.h>
67#include <linux/kernel.h>
68#include <linux/list.h>
69#include <linux/sched.h>
70#include <linux/jiffies.h>
71#include <linux/delay.h>
72#include <linux/module.h>
73#include <linux/kthread.h>
74#include <linux/prio_tree.h>
75#include <linux/gfp.h>
76#include <linux/fs.h>
77#include <linux/debugfs.h>
78#include <linux/seq_file.h>
79#include <linux/cpumask.h>
80#include <linux/spinlock.h>
81#include <linux/mutex.h>
82#include <linux/rcupdate.h>
83#include <linux/stacktrace.h>
84#include <linux/cache.h>
85#include <linux/percpu.h>
86#include <linux/hardirq.h>
87#include <linux/mmzone.h>
88#include <linux/slab.h>
89#include <linux/thread_info.h>
90#include <linux/err.h>
91#include <linux/uaccess.h>
92#include <linux/string.h>
93#include <linux/nodemask.h>
94#include <linux/mm.h>
95
96#include <asm/sections.h>
97#include <asm/processor.h>
98#include <asm/atomic.h>
99
100#include <linux/kmemleak.h>
101
102/*
103 * Kmemleak configuration and common defines.
104 */
105#define MAX_TRACE		16	/* stack trace length */
106#define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
107#define SECS_FIRST_SCAN		60	/* delay before the first scan */
108#define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
109
110#define BYTES_PER_POINTER	sizeof(void *)
111
112/* GFP bitmask for kmemleak internal allocations */
113#define GFP_KMEMLEAK_MASK	(GFP_KERNEL | GFP_ATOMIC)
114
115/* scanning area inside a memory block */
116struct kmemleak_scan_area {
117	struct hlist_node node;
118	unsigned long offset;
119	size_t length;
120};
121
122/*
123 * Structure holding the metadata for each allocated memory block.
124 * Modifications to such objects should be made while holding the
125 * object->lock. Insertions or deletions from object_list, gray_list or
126 * tree_node are already protected by the corresponding locks or mutex (see
127 * the notes on locking above). These objects are reference-counted
128 * (use_count) and freed using the RCU mechanism.
129 */
130struct kmemleak_object {
131	spinlock_t lock;
132	unsigned long flags;		/* object status flags */
133	struct list_head object_list;
134	struct list_head gray_list;
135	struct prio_tree_node tree_node;
136	struct rcu_head rcu;		/* object_list lockless traversal */
137	/* object usage count; object freed when use_count == 0 */
138	atomic_t use_count;
139	unsigned long pointer;
140	size_t size;
141	/* minimum number of a pointers found before it is considered leak */
142	int min_count;
143	/* the total number of pointers found pointing to this object */
144	int count;
145	/* memory ranges to be scanned inside an object (empty for all) */
146	struct hlist_head area_list;
147	unsigned long trace[MAX_TRACE];
148	unsigned int trace_len;
149	unsigned long jiffies;		/* creation timestamp */
150	pid_t pid;			/* pid of the current task */
151	char comm[TASK_COMM_LEN];	/* executable name */
152};
153
154/* flag representing the memory block allocation status */
155#define OBJECT_ALLOCATED	(1 << 0)
156/* flag set after the first reporting of an unreference object */
157#define OBJECT_REPORTED		(1 << 1)
158/* flag set to not scan the object */
159#define OBJECT_NO_SCAN		(1 << 2)
160
161/* the list of all allocated objects */
162static LIST_HEAD(object_list);
163/* the list of gray-colored objects (see color_gray comment below) */
164static LIST_HEAD(gray_list);
165/* prio search tree for object boundaries */
166static struct prio_tree_root object_tree_root;
167/* rw_lock protecting the access to object_list and prio_tree_root */
168static DEFINE_RWLOCK(kmemleak_lock);
169
170/* allocation caches for kmemleak internal data */
171static struct kmem_cache *object_cache;
172static struct kmem_cache *scan_area_cache;
173
174/* set if tracing memory operations is enabled */
175static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
176/* set in the late_initcall if there were no errors */
177static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
178/* enables or disables early logging of the memory operations */
179static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
180/* set if a fata kmemleak error has occurred */
181static atomic_t kmemleak_error = ATOMIC_INIT(0);
182
183/* minimum and maximum address that may be valid pointers */
184static unsigned long min_addr = ULONG_MAX;
185static unsigned long max_addr;
186
187static struct task_struct *scan_thread;
188/* used to avoid reporting of recently allocated objects */
189static unsigned long jiffies_min_age;
190static unsigned long jiffies_last_scan;
191/* delay between automatic memory scannings */
192static signed long jiffies_scan_wait;
193/* enables or disables the task stacks scanning */
194static int kmemleak_stack_scan = 1;
195/* protects the memory scanning, parameters and debug/kmemleak file access */
196static DEFINE_MUTEX(scan_mutex);
197
198/*
199 * Early object allocation/freeing logging. Kmemleak is initialized after the
200 * kernel allocator. However, both the kernel allocator and kmemleak may
201 * allocate memory blocks which need to be tracked. Kmemleak defines an
202 * arbitrary buffer to hold the allocation/freeing information before it is
203 * fully initialized.
204 */
205
206/* kmemleak operation type for early logging */
207enum {
208	KMEMLEAK_ALLOC,
209	KMEMLEAK_FREE,
210	KMEMLEAK_NOT_LEAK,
211	KMEMLEAK_IGNORE,
212	KMEMLEAK_SCAN_AREA,
213	KMEMLEAK_NO_SCAN
214};
215
216/*
217 * Structure holding the information passed to kmemleak callbacks during the
218 * early logging.
219 */
220struct early_log {
221	int op_type;			/* kmemleak operation type */
222	const void *ptr;		/* allocated/freed memory block */
223	size_t size;			/* memory block size */
224	int min_count;			/* minimum reference count */
225	unsigned long offset;		/* scan area offset */
226	size_t length;			/* scan area length */
227};
228
229/* early logging buffer and current position */
230static struct early_log early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE];
231static int crt_early_log;
232
233static void kmemleak_disable(void);
234
235/*
236 * Print a warning and dump the stack trace.
237 */
238#define kmemleak_warn(x...)	do {	\
239	pr_warning(x);			\
240	dump_stack();			\
241} while (0)
242
243/*
244 * Macro invoked when a serious kmemleak condition occured and cannot be
245 * recovered from. Kmemleak will be disabled and further allocation/freeing
246 * tracing no longer available.
247 */
248#define kmemleak_stop(x...)	do {	\
249	kmemleak_warn(x);		\
250	kmemleak_disable();		\
251} while (0)
252
253/*
254 * Object colors, encoded with count and min_count:
255 * - white - orphan object, not enough references to it (count < min_count)
256 * - gray  - not orphan, not marked as false positive (min_count == 0) or
257 *		sufficient references to it (count >= min_count)
258 * - black - ignore, it doesn't contain references (e.g. text section)
259 *		(min_count == -1). No function defined for this color.
260 * Newly created objects don't have any color assigned (object->count == -1)
261 * before the next memory scan when they become white.
262 */
263static int color_white(const struct kmemleak_object *object)
264{
265	return object->count != -1 && object->count < object->min_count;
266}
267
268static int color_gray(const struct kmemleak_object *object)
269{
270	return object->min_count != -1 && object->count >= object->min_count;
271}
272
273/*
274 * Objects are considered unreferenced only if their color is white, they have
275 * not be deleted and have a minimum age to avoid false positives caused by
276 * pointers temporarily stored in CPU registers.
277 */
278static int unreferenced_object(struct kmemleak_object *object)
279{
280	return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
281		time_before_eq(object->jiffies + jiffies_min_age,
282			       jiffies_last_scan);
283}
284
285/*
286 * Printing of the unreferenced objects information to the seq file. The
287 * print_unreferenced function must be called with the object->lock held.
288 */
289static void print_unreferenced(struct seq_file *seq,
290			       struct kmemleak_object *object)
291{
292	int i;
293
294	seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
295		   object->pointer, object->size);
296	seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu\n",
297		   object->comm, object->pid, object->jiffies);
298	seq_printf(seq, "  backtrace:\n");
299
300	for (i = 0; i < object->trace_len; i++) {
301		void *ptr = (void *)object->trace[i];
302		seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
303	}
304}
305
306/*
307 * Print the kmemleak_object information. This function is used mainly for
308 * debugging special cases when kmemleak operations. It must be called with
309 * the object->lock held.
310 */
311static void dump_object_info(struct kmemleak_object *object)
312{
313	struct stack_trace trace;
314
315	trace.nr_entries = object->trace_len;
316	trace.entries = object->trace;
317
318	pr_notice("Object 0x%08lx (size %zu):\n",
319		  object->tree_node.start, object->size);
320	pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
321		  object->comm, object->pid, object->jiffies);
322	pr_notice("  min_count = %d\n", object->min_count);
323	pr_notice("  count = %d\n", object->count);
324	pr_notice("  backtrace:\n");
325	print_stack_trace(&trace, 4);
326}
327
328/*
329 * Look-up a memory block metadata (kmemleak_object) in the priority search
330 * tree based on a pointer value. If alias is 0, only values pointing to the
331 * beginning of the memory block are allowed. The kmemleak_lock must be held
332 * when calling this function.
333 */
334static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
335{
336	struct prio_tree_node *node;
337	struct prio_tree_iter iter;
338	struct kmemleak_object *object;
339
340	prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr);
341	node = prio_tree_next(&iter);
342	if (node) {
343		object = prio_tree_entry(node, struct kmemleak_object,
344					 tree_node);
345		if (!alias && object->pointer != ptr) {
346			kmemleak_warn("Found object by alias");
347			object = NULL;
348		}
349	} else
350		object = NULL;
351
352	return object;
353}
354
355/*
356 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
357 * that once an object's use_count reached 0, the RCU freeing was already
358 * registered and the object should no longer be used. This function must be
359 * called under the protection of rcu_read_lock().
360 */
361static int get_object(struct kmemleak_object *object)
362{
363	return atomic_inc_not_zero(&object->use_count);
364}
365
366/*
367 * RCU callback to free a kmemleak_object.
368 */
369static void free_object_rcu(struct rcu_head *rcu)
370{
371	struct hlist_node *elem, *tmp;
372	struct kmemleak_scan_area *area;
373	struct kmemleak_object *object =
374		container_of(rcu, struct kmemleak_object, rcu);
375
376	/*
377	 * Once use_count is 0 (guaranteed by put_object), there is no other
378	 * code accessing this object, hence no need for locking.
379	 */
380	hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) {
381		hlist_del(elem);
382		kmem_cache_free(scan_area_cache, area);
383	}
384	kmem_cache_free(object_cache, object);
385}
386
387/*
388 * Decrement the object use_count. Once the count is 0, free the object using
389 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
390 * delete_object() path, the delayed RCU freeing ensures that there is no
391 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
392 * is also possible.
393 */
394static void put_object(struct kmemleak_object *object)
395{
396	if (!atomic_dec_and_test(&object->use_count))
397		return;
398
399	/* should only get here after delete_object was called */
400	WARN_ON(object->flags & OBJECT_ALLOCATED);
401
402	call_rcu(&object->rcu, free_object_rcu);
403}
404
405/*
406 * Look up an object in the prio search tree and increase its use_count.
407 */
408static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
409{
410	unsigned long flags;
411	struct kmemleak_object *object = NULL;
412
413	rcu_read_lock();
414	read_lock_irqsave(&kmemleak_lock, flags);
415	if (ptr >= min_addr && ptr < max_addr)
416		object = lookup_object(ptr, alias);
417	read_unlock_irqrestore(&kmemleak_lock, flags);
418
419	/* check whether the object is still available */
420	if (object && !get_object(object))
421		object = NULL;
422	rcu_read_unlock();
423
424	return object;
425}
426
427/*
428 * Create the metadata (struct kmemleak_object) corresponding to an allocated
429 * memory block and add it to the object_list and object_tree_root.
430 */
431static void create_object(unsigned long ptr, size_t size, int min_count,
432			  gfp_t gfp)
433{
434	unsigned long flags;
435	struct kmemleak_object *object;
436	struct prio_tree_node *node;
437	struct stack_trace trace;
438
439	object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
440	if (!object) {
441		kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
442		return;
443	}
444
445	INIT_LIST_HEAD(&object->object_list);
446	INIT_LIST_HEAD(&object->gray_list);
447	INIT_HLIST_HEAD(&object->area_list);
448	spin_lock_init(&object->lock);
449	atomic_set(&object->use_count, 1);
450	object->flags = OBJECT_ALLOCATED;
451	object->pointer = ptr;
452	object->size = size;
453	object->min_count = min_count;
454	object->count = -1;			/* no color initially */
455	object->jiffies = jiffies;
456
457	/* task information */
458	if (in_irq()) {
459		object->pid = 0;
460		strncpy(object->comm, "hardirq", sizeof(object->comm));
461	} else if (in_softirq()) {
462		object->pid = 0;
463		strncpy(object->comm, "softirq", sizeof(object->comm));
464	} else {
465		object->pid = current->pid;
466		/*
467		 * There is a small chance of a race with set_task_comm(),
468		 * however using get_task_comm() here may cause locking
469		 * dependency issues with current->alloc_lock. In the worst
470		 * case, the command line is not correct.
471		 */
472		strncpy(object->comm, current->comm, sizeof(object->comm));
473	}
474
475	/* kernel backtrace */
476	trace.max_entries = MAX_TRACE;
477	trace.nr_entries = 0;
478	trace.entries = object->trace;
479	trace.skip = 1;
480	save_stack_trace(&trace);
481	object->trace_len = trace.nr_entries;
482
483	INIT_PRIO_TREE_NODE(&object->tree_node);
484	object->tree_node.start = ptr;
485	object->tree_node.last = ptr + size - 1;
486
487	write_lock_irqsave(&kmemleak_lock, flags);
488	min_addr = min(min_addr, ptr);
489	max_addr = max(max_addr, ptr + size);
490	node = prio_tree_insert(&object_tree_root, &object->tree_node);
491	/*
492	 * The code calling the kernel does not yet have the pointer to the
493	 * memory block to be able to free it.  However, we still hold the
494	 * kmemleak_lock here in case parts of the kernel started freeing
495	 * random memory blocks.
496	 */
497	if (node != &object->tree_node) {
498		unsigned long flags;
499
500		kmemleak_stop("Cannot insert 0x%lx into the object search tree "
501			      "(already existing)\n", ptr);
502		object = lookup_object(ptr, 1);
503		spin_lock_irqsave(&object->lock, flags);
504		dump_object_info(object);
505		spin_unlock_irqrestore(&object->lock, flags);
506
507		goto out;
508	}
509	list_add_tail_rcu(&object->object_list, &object_list);
510out:
511	write_unlock_irqrestore(&kmemleak_lock, flags);
512}
513
514/*
515 * Remove the metadata (struct kmemleak_object) for a memory block from the
516 * object_list and object_tree_root and decrement its use_count.
517 */
518static void delete_object(unsigned long ptr)
519{
520	unsigned long flags;
521	struct kmemleak_object *object;
522
523	write_lock_irqsave(&kmemleak_lock, flags);
524	object = lookup_object(ptr, 0);
525	if (!object) {
526#ifdef DEBUG
527		kmemleak_warn("Freeing unknown object at 0x%08lx\n",
528			      ptr);
529#endif
530		write_unlock_irqrestore(&kmemleak_lock, flags);
531		return;
532	}
533	prio_tree_remove(&object_tree_root, &object->tree_node);
534	list_del_rcu(&object->object_list);
535	write_unlock_irqrestore(&kmemleak_lock, flags);
536
537	WARN_ON(!(object->flags & OBJECT_ALLOCATED));
538	WARN_ON(atomic_read(&object->use_count) < 1);
539
540	/*
541	 * Locking here also ensures that the corresponding memory block
542	 * cannot be freed when it is being scanned.
543	 */
544	spin_lock_irqsave(&object->lock, flags);
545	object->flags &= ~OBJECT_ALLOCATED;
546	spin_unlock_irqrestore(&object->lock, flags);
547	put_object(object);
548}
549
550/*
551 * Make a object permanently as gray-colored so that it can no longer be
552 * reported as a leak. This is used in general to mark a false positive.
553 */
554static void make_gray_object(unsigned long ptr)
555{
556	unsigned long flags;
557	struct kmemleak_object *object;
558
559	object = find_and_get_object(ptr, 0);
560	if (!object) {
561		kmemleak_warn("Graying unknown object at 0x%08lx\n", ptr);
562		return;
563	}
564
565	spin_lock_irqsave(&object->lock, flags);
566	object->min_count = 0;
567	spin_unlock_irqrestore(&object->lock, flags);
568	put_object(object);
569}
570
571/*
572 * Mark the object as black-colored so that it is ignored from scans and
573 * reporting.
574 */
575static void make_black_object(unsigned long ptr)
576{
577	unsigned long flags;
578	struct kmemleak_object *object;
579
580	object = find_and_get_object(ptr, 0);
581	if (!object) {
582		kmemleak_warn("Blacking unknown object at 0x%08lx\n", ptr);
583		return;
584	}
585
586	spin_lock_irqsave(&object->lock, flags);
587	object->min_count = -1;
588	spin_unlock_irqrestore(&object->lock, flags);
589	put_object(object);
590}
591
592/*
593 * Add a scanning area to the object. If at least one such area is added,
594 * kmemleak will only scan these ranges rather than the whole memory block.
595 */
596static void add_scan_area(unsigned long ptr, unsigned long offset,
597			  size_t length, gfp_t gfp)
598{
599	unsigned long flags;
600	struct kmemleak_object *object;
601	struct kmemleak_scan_area *area;
602
603	object = find_and_get_object(ptr, 0);
604	if (!object) {
605		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
606			      ptr);
607		return;
608	}
609
610	area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK);
611	if (!area) {
612		kmemleak_warn("Cannot allocate a scan area\n");
613		goto out;
614	}
615
616	spin_lock_irqsave(&object->lock, flags);
617	if (offset + length > object->size) {
618		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
619		dump_object_info(object);
620		kmem_cache_free(scan_area_cache, area);
621		goto out_unlock;
622	}
623
624	INIT_HLIST_NODE(&area->node);
625	area->offset = offset;
626	area->length = length;
627
628	hlist_add_head(&area->node, &object->area_list);
629out_unlock:
630	spin_unlock_irqrestore(&object->lock, flags);
631out:
632	put_object(object);
633}
634
635/*
636 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
637 * pointer. Such object will not be scanned by kmemleak but references to it
638 * are searched.
639 */
640static void object_no_scan(unsigned long ptr)
641{
642	unsigned long flags;
643	struct kmemleak_object *object;
644
645	object = find_and_get_object(ptr, 0);
646	if (!object) {
647		kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
648		return;
649	}
650
651	spin_lock_irqsave(&object->lock, flags);
652	object->flags |= OBJECT_NO_SCAN;
653	spin_unlock_irqrestore(&object->lock, flags);
654	put_object(object);
655}
656
657/*
658 * Log an early kmemleak_* call to the early_log buffer. These calls will be
659 * processed later once kmemleak is fully initialized.
660 */
661static void log_early(int op_type, const void *ptr, size_t size,
662		      int min_count, unsigned long offset, size_t length)
663{
664	unsigned long flags;
665	struct early_log *log;
666
667	if (crt_early_log >= ARRAY_SIZE(early_log)) {
668		pr_warning("Early log buffer exceeded\n");
669		kmemleak_disable();
670		return;
671	}
672
673	/*
674	 * There is no need for locking since the kernel is still in UP mode
675	 * at this stage. Disabling the IRQs is enough.
676	 */
677	local_irq_save(flags);
678	log = &early_log[crt_early_log];
679	log->op_type = op_type;
680	log->ptr = ptr;
681	log->size = size;
682	log->min_count = min_count;
683	log->offset = offset;
684	log->length = length;
685	crt_early_log++;
686	local_irq_restore(flags);
687}
688
689/*
690 * Memory allocation function callback. This function is called from the
691 * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc,
692 * vmalloc etc.).
693 */
694void kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp)
695{
696	pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
697
698	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
699		create_object((unsigned long)ptr, size, min_count, gfp);
700	else if (atomic_read(&kmemleak_early_log))
701		log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0);
702}
703EXPORT_SYMBOL_GPL(kmemleak_alloc);
704
705/*
706 * Memory freeing function callback. This function is called from the kernel
707 * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.).
708 */
709void kmemleak_free(const void *ptr)
710{
711	pr_debug("%s(0x%p)\n", __func__, ptr);
712
713	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
714		delete_object((unsigned long)ptr);
715	else if (atomic_read(&kmemleak_early_log))
716		log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0);
717}
718EXPORT_SYMBOL_GPL(kmemleak_free);
719
720/*
721 * Mark an already allocated memory block as a false positive. This will cause
722 * the block to no longer be reported as leak and always be scanned.
723 */
724void kmemleak_not_leak(const void *ptr)
725{
726	pr_debug("%s(0x%p)\n", __func__, ptr);
727
728	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
729		make_gray_object((unsigned long)ptr);
730	else if (atomic_read(&kmemleak_early_log))
731		log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0);
732}
733EXPORT_SYMBOL(kmemleak_not_leak);
734
735/*
736 * Ignore a memory block. This is usually done when it is known that the
737 * corresponding block is not a leak and does not contain any references to
738 * other allocated memory blocks.
739 */
740void kmemleak_ignore(const void *ptr)
741{
742	pr_debug("%s(0x%p)\n", __func__, ptr);
743
744	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
745		make_black_object((unsigned long)ptr);
746	else if (atomic_read(&kmemleak_early_log))
747		log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0);
748}
749EXPORT_SYMBOL(kmemleak_ignore);
750
751/*
752 * Limit the range to be scanned in an allocated memory block.
753 */
754void kmemleak_scan_area(const void *ptr, unsigned long offset, size_t length,
755			gfp_t gfp)
756{
757	pr_debug("%s(0x%p)\n", __func__, ptr);
758
759	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
760		add_scan_area((unsigned long)ptr, offset, length, gfp);
761	else if (atomic_read(&kmemleak_early_log))
762		log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length);
763}
764EXPORT_SYMBOL(kmemleak_scan_area);
765
766/*
767 * Inform kmemleak not to scan the given memory block.
768 */
769void kmemleak_no_scan(const void *ptr)
770{
771	pr_debug("%s(0x%p)\n", __func__, ptr);
772
773	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
774		object_no_scan((unsigned long)ptr);
775	else if (atomic_read(&kmemleak_early_log))
776		log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0);
777}
778EXPORT_SYMBOL(kmemleak_no_scan);
779
780/*
781 * Memory scanning is a long process and it needs to be interruptable. This
782 * function checks whether such interrupt condition occured.
783 */
784static int scan_should_stop(void)
785{
786	if (!atomic_read(&kmemleak_enabled))
787		return 1;
788
789	/*
790	 * This function may be called from either process or kthread context,
791	 * hence the need to check for both stop conditions.
792	 */
793	if (current->mm)
794		return signal_pending(current);
795	else
796		return kthread_should_stop();
797
798	return 0;
799}
800
801/*
802 * Scan a memory block (exclusive range) for valid pointers and add those
803 * found to the gray list.
804 */
805static void scan_block(void *_start, void *_end,
806		       struct kmemleak_object *scanned, int allow_resched)
807{
808	unsigned long *ptr;
809	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
810	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
811
812	for (ptr = start; ptr < end; ptr++) {
813		unsigned long flags;
814		unsigned long pointer = *ptr;
815		struct kmemleak_object *object;
816
817		if (allow_resched)
818			cond_resched();
819		if (scan_should_stop())
820			break;
821
822		object = find_and_get_object(pointer, 1);
823		if (!object)
824			continue;
825		if (object == scanned) {
826			/* self referenced, ignore */
827			put_object(object);
828			continue;
829		}
830
831		/*
832		 * Avoid the lockdep recursive warning on object->lock being
833		 * previously acquired in scan_object(). These locks are
834		 * enclosed by scan_mutex.
835		 */
836		spin_lock_irqsave_nested(&object->lock, flags,
837					 SINGLE_DEPTH_NESTING);
838		if (!color_white(object)) {
839			/* non-orphan, ignored or new */
840			spin_unlock_irqrestore(&object->lock, flags);
841			put_object(object);
842			continue;
843		}
844
845		/*
846		 * Increase the object's reference count (number of pointers
847		 * to the memory block). If this count reaches the required
848		 * minimum, the object's color will become gray and it will be
849		 * added to the gray_list.
850		 */
851		object->count++;
852		if (color_gray(object))
853			list_add_tail(&object->gray_list, &gray_list);
854		else
855			put_object(object);
856		spin_unlock_irqrestore(&object->lock, flags);
857	}
858}
859
860/*
861 * Scan a memory block corresponding to a kmemleak_object. A condition is
862 * that object->use_count >= 1.
863 */
864static void scan_object(struct kmemleak_object *object)
865{
866	struct kmemleak_scan_area *area;
867	struct hlist_node *elem;
868	unsigned long flags;
869
870	/*
871	 * Once the object->lock is aquired, the corresponding memory block
872	 * cannot be freed (the same lock is aquired in delete_object).
873	 */
874	spin_lock_irqsave(&object->lock, flags);
875	if (object->flags & OBJECT_NO_SCAN)
876		goto out;
877	if (!(object->flags & OBJECT_ALLOCATED))
878		/* already freed object */
879		goto out;
880	if (hlist_empty(&object->area_list))
881		scan_block((void *)object->pointer,
882			   (void *)(object->pointer + object->size), object, 0);
883	else
884		hlist_for_each_entry(area, elem, &object->area_list, node)
885			scan_block((void *)(object->pointer + area->offset),
886				   (void *)(object->pointer + area->offset
887					    + area->length), object, 0);
888out:
889	spin_unlock_irqrestore(&object->lock, flags);
890}
891
892/*
893 * Scan data sections and all the referenced memory blocks allocated via the
894 * kernel's standard allocators. This function must be called with the
895 * scan_mutex held.
896 */
897static void kmemleak_scan(void)
898{
899	unsigned long flags;
900	struct kmemleak_object *object, *tmp;
901	struct task_struct *task;
902	int i;
903	int new_leaks = 0;
904
905	jiffies_last_scan = jiffies;
906
907	/* prepare the kmemleak_object's */
908	rcu_read_lock();
909	list_for_each_entry_rcu(object, &object_list, object_list) {
910		spin_lock_irqsave(&object->lock, flags);
911#ifdef DEBUG
912		/*
913		 * With a few exceptions there should be a maximum of
914		 * 1 reference to any object at this point.
915		 */
916		if (atomic_read(&object->use_count) > 1) {
917			pr_debug("object->use_count = %d\n",
918				 atomic_read(&object->use_count));
919			dump_object_info(object);
920		}
921#endif
922		/* reset the reference count (whiten the object) */
923		object->count = 0;
924		if (color_gray(object) && get_object(object))
925			list_add_tail(&object->gray_list, &gray_list);
926
927		spin_unlock_irqrestore(&object->lock, flags);
928	}
929	rcu_read_unlock();
930
931	/* data/bss scanning */
932	scan_block(_sdata, _edata, NULL, 1);
933	scan_block(__bss_start, __bss_stop, NULL, 1);
934
935#ifdef CONFIG_SMP
936	/* per-cpu sections scanning */
937	for_each_possible_cpu(i)
938		scan_block(__per_cpu_start + per_cpu_offset(i),
939			   __per_cpu_end + per_cpu_offset(i), NULL, 1);
940#endif
941
942	/*
943	 * Struct page scanning for each node. The code below is not yet safe
944	 * with MEMORY_HOTPLUG.
945	 */
946	for_each_online_node(i) {
947		pg_data_t *pgdat = NODE_DATA(i);
948		unsigned long start_pfn = pgdat->node_start_pfn;
949		unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
950		unsigned long pfn;
951
952		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
953			struct page *page;
954
955			if (!pfn_valid(pfn))
956				continue;
957			page = pfn_to_page(pfn);
958			/* only scan if page is in use */
959			if (page_count(page) == 0)
960				continue;
961			scan_block(page, page + 1, NULL, 1);
962		}
963	}
964
965	/*
966	 * Scanning the task stacks may introduce false negatives and it is
967	 * not enabled by default.
968	 */
969	if (kmemleak_stack_scan) {
970		read_lock(&tasklist_lock);
971		for_each_process(task)
972			scan_block(task_stack_page(task),
973				   task_stack_page(task) + THREAD_SIZE,
974				   NULL, 0);
975		read_unlock(&tasklist_lock);
976	}
977
978	/*
979	 * Scan the objects already referenced from the sections scanned
980	 * above. More objects will be referenced and, if there are no memory
981	 * leaks, all the objects will be scanned. The list traversal is safe
982	 * for both tail additions and removals from inside the loop. The
983	 * kmemleak objects cannot be freed from outside the loop because their
984	 * use_count was increased.
985	 */
986	object = list_entry(gray_list.next, typeof(*object), gray_list);
987	while (&object->gray_list != &gray_list) {
988		cond_resched();
989
990		/* may add new objects to the list */
991		if (!scan_should_stop())
992			scan_object(object);
993
994		tmp = list_entry(object->gray_list.next, typeof(*object),
995				 gray_list);
996
997		/* remove the object from the list and release it */
998		list_del(&object->gray_list);
999		put_object(object);
1000
1001		object = tmp;
1002	}
1003	WARN_ON(!list_empty(&gray_list));
1004
1005	/*
1006	 * If scanning was stopped do not report any new unreferenced objects.
1007	 */
1008	if (scan_should_stop())
1009		return;
1010
1011	/*
1012	 * Scanning result reporting.
1013	 */
1014	rcu_read_lock();
1015	list_for_each_entry_rcu(object, &object_list, object_list) {
1016		spin_lock_irqsave(&object->lock, flags);
1017		if (unreferenced_object(object) &&
1018		    !(object->flags & OBJECT_REPORTED)) {
1019			object->flags |= OBJECT_REPORTED;
1020			new_leaks++;
1021		}
1022		spin_unlock_irqrestore(&object->lock, flags);
1023	}
1024	rcu_read_unlock();
1025
1026	if (new_leaks)
1027		pr_info("%d new suspected memory leaks (see "
1028			"/sys/kernel/debug/kmemleak)\n", new_leaks);
1029
1030}
1031
1032/*
1033 * Thread function performing automatic memory scanning. Unreferenced objects
1034 * at the end of a memory scan are reported but only the first time.
1035 */
1036static int kmemleak_scan_thread(void *arg)
1037{
1038	static int first_run = 1;
1039
1040	pr_info("Automatic memory scanning thread started\n");
1041	set_user_nice(current, 10);
1042
1043	/*
1044	 * Wait before the first scan to allow the system to fully initialize.
1045	 */
1046	if (first_run) {
1047		first_run = 0;
1048		ssleep(SECS_FIRST_SCAN);
1049	}
1050
1051	while (!kthread_should_stop()) {
1052		signed long timeout = jiffies_scan_wait;
1053
1054		mutex_lock(&scan_mutex);
1055		kmemleak_scan();
1056		mutex_unlock(&scan_mutex);
1057
1058		/* wait before the next scan */
1059		while (timeout && !kthread_should_stop())
1060			timeout = schedule_timeout_interruptible(timeout);
1061	}
1062
1063	pr_info("Automatic memory scanning thread ended\n");
1064
1065	return 0;
1066}
1067
1068/*
1069 * Start the automatic memory scanning thread. This function must be called
1070 * with the scan_mutex held.
1071 */
1072void start_scan_thread(void)
1073{
1074	if (scan_thread)
1075		return;
1076	scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1077	if (IS_ERR(scan_thread)) {
1078		pr_warning("Failed to create the scan thread\n");
1079		scan_thread = NULL;
1080	}
1081}
1082
1083/*
1084 * Stop the automatic memory scanning thread. This function must be called
1085 * with the scan_mutex held.
1086 */
1087void stop_scan_thread(void)
1088{
1089	if (scan_thread) {
1090		kthread_stop(scan_thread);
1091		scan_thread = NULL;
1092	}
1093}
1094
1095/*
1096 * Iterate over the object_list and return the first valid object at or after
1097 * the required position with its use_count incremented. The function triggers
1098 * a memory scanning when the pos argument points to the first position.
1099 */
1100static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1101{
1102	struct kmemleak_object *object;
1103	loff_t n = *pos;
1104
1105	rcu_read_lock();
1106	list_for_each_entry_rcu(object, &object_list, object_list) {
1107		if (n-- > 0)
1108			continue;
1109		if (get_object(object))
1110			goto out;
1111	}
1112	object = NULL;
1113out:
1114	rcu_read_unlock();
1115	return object;
1116}
1117
1118/*
1119 * Return the next object in the object_list. The function decrements the
1120 * use_count of the previous object and increases that of the next one.
1121 */
1122static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1123{
1124	struct kmemleak_object *prev_obj = v;
1125	struct kmemleak_object *next_obj = NULL;
1126	struct list_head *n = &prev_obj->object_list;
1127
1128	++(*pos);
1129
1130	rcu_read_lock();
1131	list_for_each_continue_rcu(n, &object_list) {
1132		next_obj = list_entry(n, struct kmemleak_object, object_list);
1133		if (get_object(next_obj))
1134			break;
1135	}
1136	rcu_read_unlock();
1137
1138	put_object(prev_obj);
1139	return next_obj;
1140}
1141
1142/*
1143 * Decrement the use_count of the last object required, if any.
1144 */
1145static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1146{
1147	if (v)
1148		put_object(v);
1149}
1150
1151/*
1152 * Print the information for an unreferenced object to the seq file.
1153 */
1154static int kmemleak_seq_show(struct seq_file *seq, void *v)
1155{
1156	struct kmemleak_object *object = v;
1157	unsigned long flags;
1158
1159	spin_lock_irqsave(&object->lock, flags);
1160	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1161		print_unreferenced(seq, object);
1162	spin_unlock_irqrestore(&object->lock, flags);
1163	return 0;
1164}
1165
1166static const struct seq_operations kmemleak_seq_ops = {
1167	.start = kmemleak_seq_start,
1168	.next  = kmemleak_seq_next,
1169	.stop  = kmemleak_seq_stop,
1170	.show  = kmemleak_seq_show,
1171};
1172
1173static int kmemleak_open(struct inode *inode, struct file *file)
1174{
1175	int ret = 0;
1176
1177	if (!atomic_read(&kmemleak_enabled))
1178		return -EBUSY;
1179
1180	ret = mutex_lock_interruptible(&scan_mutex);
1181	if (ret < 0)
1182		goto out;
1183	if (file->f_mode & FMODE_READ) {
1184		ret = seq_open(file, &kmemleak_seq_ops);
1185		if (ret < 0)
1186			goto scan_unlock;
1187	}
1188	return ret;
1189
1190scan_unlock:
1191	mutex_unlock(&scan_mutex);
1192out:
1193	return ret;
1194}
1195
1196static int kmemleak_release(struct inode *inode, struct file *file)
1197{
1198	int ret = 0;
1199
1200	if (file->f_mode & FMODE_READ)
1201		seq_release(inode, file);
1202	mutex_unlock(&scan_mutex);
1203
1204	return ret;
1205}
1206
1207/*
1208 * File write operation to configure kmemleak at run-time. The following
1209 * commands can be written to the /sys/kernel/debug/kmemleak file:
1210 *   off	- disable kmemleak (irreversible)
1211 *   stack=on	- enable the task stacks scanning
1212 *   stack=off	- disable the tasks stacks scanning
1213 *   scan=on	- start the automatic memory scanning thread
1214 *   scan=off	- stop the automatic memory scanning thread
1215 *   scan=...	- set the automatic memory scanning period in seconds (0 to
1216 *		  disable it)
1217 *   scan	- trigger a memory scan
1218 */
1219static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1220			      size_t size, loff_t *ppos)
1221{
1222	char buf[64];
1223	int buf_size;
1224
1225	if (!atomic_read(&kmemleak_enabled))
1226		return -EBUSY;
1227
1228	buf_size = min(size, (sizeof(buf) - 1));
1229	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1230		return -EFAULT;
1231	buf[buf_size] = 0;
1232
1233	if (strncmp(buf, "off", 3) == 0)
1234		kmemleak_disable();
1235	else if (strncmp(buf, "stack=on", 8) == 0)
1236		kmemleak_stack_scan = 1;
1237	else if (strncmp(buf, "stack=off", 9) == 0)
1238		kmemleak_stack_scan = 0;
1239	else if (strncmp(buf, "scan=on", 7) == 0)
1240		start_scan_thread();
1241	else if (strncmp(buf, "scan=off", 8) == 0)
1242		stop_scan_thread();
1243	else if (strncmp(buf, "scan=", 5) == 0) {
1244		unsigned long secs;
1245		int err;
1246
1247		err = strict_strtoul(buf + 5, 0, &secs);
1248		if (err < 0)
1249			return err;
1250		stop_scan_thread();
1251		if (secs) {
1252			jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1253			start_scan_thread();
1254		}
1255	} else if (strncmp(buf, "scan", 4) == 0)
1256		kmemleak_scan();
1257	else
1258		return -EINVAL;
1259
1260	/* ignore the rest of the buffer, only one command at a time */
1261	*ppos += size;
1262	return size;
1263}
1264
1265static const struct file_operations kmemleak_fops = {
1266	.owner		= THIS_MODULE,
1267	.open		= kmemleak_open,
1268	.read		= seq_read,
1269	.write		= kmemleak_write,
1270	.llseek		= seq_lseek,
1271	.release	= kmemleak_release,
1272};
1273
1274/*
1275 * Perform the freeing of the kmemleak internal objects after waiting for any
1276 * current memory scan to complete.
1277 */
1278static int kmemleak_cleanup_thread(void *arg)
1279{
1280	struct kmemleak_object *object;
1281
1282	mutex_lock(&scan_mutex);
1283	stop_scan_thread();
1284
1285	rcu_read_lock();
1286	list_for_each_entry_rcu(object, &object_list, object_list)
1287		delete_object(object->pointer);
1288	rcu_read_unlock();
1289	mutex_unlock(&scan_mutex);
1290
1291	return 0;
1292}
1293
1294/*
1295 * Start the clean-up thread.
1296 */
1297static void kmemleak_cleanup(void)
1298{
1299	struct task_struct *cleanup_thread;
1300
1301	cleanup_thread = kthread_run(kmemleak_cleanup_thread, NULL,
1302				     "kmemleak-clean");
1303	if (IS_ERR(cleanup_thread))
1304		pr_warning("Failed to create the clean-up thread\n");
1305}
1306
1307/*
1308 * Disable kmemleak. No memory allocation/freeing will be traced once this
1309 * function is called. Disabling kmemleak is an irreversible operation.
1310 */
1311static void kmemleak_disable(void)
1312{
1313	/* atomically check whether it was already invoked */
1314	if (atomic_cmpxchg(&kmemleak_error, 0, 1))
1315		return;
1316
1317	/* stop any memory operation tracing */
1318	atomic_set(&kmemleak_early_log, 0);
1319	atomic_set(&kmemleak_enabled, 0);
1320
1321	/* check whether it is too early for a kernel thread */
1322	if (atomic_read(&kmemleak_initialized))
1323		kmemleak_cleanup();
1324
1325	pr_info("Kernel memory leak detector disabled\n");
1326}
1327
1328/*
1329 * Allow boot-time kmemleak disabling (enabled by default).
1330 */
1331static int kmemleak_boot_config(char *str)
1332{
1333	if (!str)
1334		return -EINVAL;
1335	if (strcmp(str, "off") == 0)
1336		kmemleak_disable();
1337	else if (strcmp(str, "on") != 0)
1338		return -EINVAL;
1339	return 0;
1340}
1341early_param("kmemleak", kmemleak_boot_config);
1342
1343/*
1344 * Kmemleak initialization.
1345 */
1346void __init kmemleak_init(void)
1347{
1348	int i;
1349	unsigned long flags;
1350
1351	jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1352	jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1353
1354	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1355	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1356	INIT_PRIO_TREE_ROOT(&object_tree_root);
1357
1358	/* the kernel is still in UP mode, so disabling the IRQs is enough */
1359	local_irq_save(flags);
1360	if (!atomic_read(&kmemleak_error)) {
1361		atomic_set(&kmemleak_enabled, 1);
1362		atomic_set(&kmemleak_early_log, 0);
1363	}
1364	local_irq_restore(flags);
1365
1366	/*
1367	 * This is the point where tracking allocations is safe. Automatic
1368	 * scanning is started during the late initcall. Add the early logged
1369	 * callbacks to the kmemleak infrastructure.
1370	 */
1371	for (i = 0; i < crt_early_log; i++) {
1372		struct early_log *log = &early_log[i];
1373
1374		switch (log->op_type) {
1375		case KMEMLEAK_ALLOC:
1376			kmemleak_alloc(log->ptr, log->size, log->min_count,
1377				       GFP_KERNEL);
1378			break;
1379		case KMEMLEAK_FREE:
1380			kmemleak_free(log->ptr);
1381			break;
1382		case KMEMLEAK_NOT_LEAK:
1383			kmemleak_not_leak(log->ptr);
1384			break;
1385		case KMEMLEAK_IGNORE:
1386			kmemleak_ignore(log->ptr);
1387			break;
1388		case KMEMLEAK_SCAN_AREA:
1389			kmemleak_scan_area(log->ptr, log->offset, log->length,
1390					   GFP_KERNEL);
1391			break;
1392		case KMEMLEAK_NO_SCAN:
1393			kmemleak_no_scan(log->ptr);
1394			break;
1395		default:
1396			WARN_ON(1);
1397		}
1398	}
1399}
1400
1401/*
1402 * Late initialization function.
1403 */
1404static int __init kmemleak_late_init(void)
1405{
1406	struct dentry *dentry;
1407
1408	atomic_set(&kmemleak_initialized, 1);
1409
1410	if (atomic_read(&kmemleak_error)) {
1411		/*
1412		 * Some error occured and kmemleak was disabled. There is a
1413		 * small chance that kmemleak_disable() was called immediately
1414		 * after setting kmemleak_initialized and we may end up with
1415		 * two clean-up threads but serialized by scan_mutex.
1416		 */
1417		kmemleak_cleanup();
1418		return -ENOMEM;
1419	}
1420
1421	dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1422				     &kmemleak_fops);
1423	if (!dentry)
1424		pr_warning("Failed to create the debugfs kmemleak file\n");
1425	mutex_lock(&scan_mutex);
1426	start_scan_thread();
1427	mutex_unlock(&scan_mutex);
1428
1429	pr_info("Kernel memory leak detector initialized\n");
1430
1431	return 0;
1432}
1433late_initcall(kmemleak_late_init);
1434