kmemleak.c revision b6e687221eb840bacd4d4a991e5f8e7ed3ae910a
1/*
2 * mm/kmemleak.c
3 *
4 * Copyright (C) 2008 ARM Limited
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 *
21 * For more information on the algorithm and kmemleak usage, please see
22 * Documentation/kmemleak.txt.
23 *
24 * Notes on locking
25 * ----------------
26 *
27 * The following locks and mutexes are used by kmemleak:
28 *
29 * - kmemleak_lock (rwlock): protects the object_list modifications and
30 *   accesses to the object_tree_root. The object_list is the main list
31 *   holding the metadata (struct kmemleak_object) for the allocated memory
32 *   blocks. The object_tree_root is a priority search tree used to look-up
33 *   metadata based on a pointer to the corresponding memory block.  The
34 *   kmemleak_object structures are added to the object_list and
35 *   object_tree_root in the create_object() function called from the
36 *   kmemleak_alloc() callback and removed in delete_object() called from the
37 *   kmemleak_free() callback
38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39 *   the metadata (e.g. count) are protected by this lock. Note that some
40 *   members of this structure may be protected by other means (atomic or
41 *   kmemleak_lock). This lock is also held when scanning the corresponding
42 *   memory block to avoid the kernel freeing it via the kmemleak_free()
43 *   callback. This is less heavyweight than holding a global lock like
44 *   kmemleak_lock during scanning
45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 *   unreferenced objects at a time. The gray_list contains the objects which
47 *   are already referenced or marked as false positives and need to be
48 *   scanned. This list is only modified during a scanning episode when the
49 *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
50 *   Note that the kmemleak_object.use_count is incremented when an object is
51 *   added to the gray_list and therefore cannot be freed. This mutex also
52 *   prevents multiple users of the "kmemleak" debugfs file together with
53 *   modifications to the memory scanning parameters including the scan_thread
54 *   pointer
55 *
56 * The kmemleak_object structures have a use_count incremented or decremented
57 * using the get_object()/put_object() functions. When the use_count becomes
58 * 0, this count can no longer be incremented and put_object() schedules the
59 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
60 * function must be protected by rcu_read_lock() to avoid accessing a freed
61 * structure.
62 */
63
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
66#include <linux/init.h>
67#include <linux/kernel.h>
68#include <linux/list.h>
69#include <linux/sched.h>
70#include <linux/jiffies.h>
71#include <linux/delay.h>
72#include <linux/module.h>
73#include <linux/kthread.h>
74#include <linux/prio_tree.h>
75#include <linux/gfp.h>
76#include <linux/fs.h>
77#include <linux/debugfs.h>
78#include <linux/seq_file.h>
79#include <linux/cpumask.h>
80#include <linux/spinlock.h>
81#include <linux/mutex.h>
82#include <linux/rcupdate.h>
83#include <linux/stacktrace.h>
84#include <linux/cache.h>
85#include <linux/percpu.h>
86#include <linux/hardirq.h>
87#include <linux/mmzone.h>
88#include <linux/slab.h>
89#include <linux/thread_info.h>
90#include <linux/err.h>
91#include <linux/uaccess.h>
92#include <linux/string.h>
93#include <linux/nodemask.h>
94#include <linux/mm.h>
95
96#include <asm/sections.h>
97#include <asm/processor.h>
98#include <asm/atomic.h>
99
100#include <linux/kmemleak.h>
101
102/*
103 * Kmemleak configuration and common defines.
104 */
105#define MAX_TRACE		16	/* stack trace length */
106#define REPORTS_NR		50	/* maximum number of reported leaks */
107#define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
108#define MSECS_SCAN_YIELD	10	/* CPU yielding period */
109#define SECS_FIRST_SCAN		60	/* delay before the first scan */
110#define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
111
112#define BYTES_PER_POINTER	sizeof(void *)
113
114/* GFP bitmask for kmemleak internal allocations */
115#define GFP_KMEMLEAK_MASK	(GFP_KERNEL | GFP_ATOMIC)
116
117/* scanning area inside a memory block */
118struct kmemleak_scan_area {
119	struct hlist_node node;
120	unsigned long offset;
121	size_t length;
122};
123
124/*
125 * Structure holding the metadata for each allocated memory block.
126 * Modifications to such objects should be made while holding the
127 * object->lock. Insertions or deletions from object_list, gray_list or
128 * tree_node are already protected by the corresponding locks or mutex (see
129 * the notes on locking above). These objects are reference-counted
130 * (use_count) and freed using the RCU mechanism.
131 */
132struct kmemleak_object {
133	spinlock_t lock;
134	unsigned long flags;		/* object status flags */
135	struct list_head object_list;
136	struct list_head gray_list;
137	struct prio_tree_node tree_node;
138	struct rcu_head rcu;		/* object_list lockless traversal */
139	/* object usage count; object freed when use_count == 0 */
140	atomic_t use_count;
141	unsigned long pointer;
142	size_t size;
143	/* minimum number of a pointers found before it is considered leak */
144	int min_count;
145	/* the total number of pointers found pointing to this object */
146	int count;
147	/* memory ranges to be scanned inside an object (empty for all) */
148	struct hlist_head area_list;
149	unsigned long trace[MAX_TRACE];
150	unsigned int trace_len;
151	unsigned long jiffies;		/* creation timestamp */
152	pid_t pid;			/* pid of the current task */
153	char comm[TASK_COMM_LEN];	/* executable name */
154};
155
156/* flag representing the memory block allocation status */
157#define OBJECT_ALLOCATED	(1 << 0)
158/* flag set after the first reporting of an unreference object */
159#define OBJECT_REPORTED		(1 << 1)
160/* flag set to not scan the object */
161#define OBJECT_NO_SCAN		(1 << 2)
162
163/* the list of all allocated objects */
164static LIST_HEAD(object_list);
165/* the list of gray-colored objects (see color_gray comment below) */
166static LIST_HEAD(gray_list);
167/* prio search tree for object boundaries */
168static struct prio_tree_root object_tree_root;
169/* rw_lock protecting the access to object_list and prio_tree_root */
170static DEFINE_RWLOCK(kmemleak_lock);
171
172/* allocation caches for kmemleak internal data */
173static struct kmem_cache *object_cache;
174static struct kmem_cache *scan_area_cache;
175
176/* set if tracing memory operations is enabled */
177static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
178/* set in the late_initcall if there were no errors */
179static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
180/* enables or disables early logging of the memory operations */
181static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
182/* set if a fata kmemleak error has occurred */
183static atomic_t kmemleak_error = ATOMIC_INIT(0);
184
185/* minimum and maximum address that may be valid pointers */
186static unsigned long min_addr = ULONG_MAX;
187static unsigned long max_addr;
188
189/* used for yielding the CPU to other tasks during scanning */
190static unsigned long next_scan_yield;
191static struct task_struct *scan_thread;
192static unsigned long jiffies_scan_yield;
193/* used to avoid reporting of recently allocated objects */
194static unsigned long jiffies_min_age;
195static unsigned long jiffies_last_scan;
196/* delay between automatic memory scannings */
197static signed long jiffies_scan_wait;
198/* enables or disables the task stacks scanning */
199static int kmemleak_stack_scan = 1;
200/* protects the memory scanning, parameters and debug/kmemleak file access */
201static DEFINE_MUTEX(scan_mutex);
202
203/* number of leaks reported (for limitation purposes) */
204static int reported_leaks;
205
206/*
207 * Early object allocation/freeing logging. Kmemleak is initialized after the
208 * kernel allocator. However, both the kernel allocator and kmemleak may
209 * allocate memory blocks which need to be tracked. Kmemleak defines an
210 * arbitrary buffer to hold the allocation/freeing information before it is
211 * fully initialized.
212 */
213
214/* kmemleak operation type for early logging */
215enum {
216	KMEMLEAK_ALLOC,
217	KMEMLEAK_FREE,
218	KMEMLEAK_NOT_LEAK,
219	KMEMLEAK_IGNORE,
220	KMEMLEAK_SCAN_AREA,
221	KMEMLEAK_NO_SCAN
222};
223
224/*
225 * Structure holding the information passed to kmemleak callbacks during the
226 * early logging.
227 */
228struct early_log {
229	int op_type;			/* kmemleak operation type */
230	const void *ptr;		/* allocated/freed memory block */
231	size_t size;			/* memory block size */
232	int min_count;			/* minimum reference count */
233	unsigned long offset;		/* scan area offset */
234	size_t length;			/* scan area length */
235};
236
237/* early logging buffer and current position */
238static struct early_log early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE];
239static int crt_early_log;
240
241static void kmemleak_disable(void);
242
243/*
244 * Print a warning and dump the stack trace.
245 */
246#define kmemleak_warn(x...)	do {	\
247	pr_warning(x);			\
248	dump_stack();			\
249} while (0)
250
251/*
252 * Macro invoked when a serious kmemleak condition occured and cannot be
253 * recovered from. Kmemleak will be disabled and further allocation/freeing
254 * tracing no longer available.
255 */
256#define kmemleak_stop(x...)	do {	\
257	kmemleak_warn(x);		\
258	kmemleak_disable();		\
259} while (0)
260
261/*
262 * Object colors, encoded with count and min_count:
263 * - white - orphan object, not enough references to it (count < min_count)
264 * - gray  - not orphan, not marked as false positive (min_count == 0) or
265 *		sufficient references to it (count >= min_count)
266 * - black - ignore, it doesn't contain references (e.g. text section)
267 *		(min_count == -1). No function defined for this color.
268 * Newly created objects don't have any color assigned (object->count == -1)
269 * before the next memory scan when they become white.
270 */
271static int color_white(const struct kmemleak_object *object)
272{
273	return object->count != -1 && object->count < object->min_count;
274}
275
276static int color_gray(const struct kmemleak_object *object)
277{
278	return object->min_count != -1 && object->count >= object->min_count;
279}
280
281/*
282 * Objects are considered unreferenced only if their color is white, they have
283 * not be deleted and have a minimum age to avoid false positives caused by
284 * pointers temporarily stored in CPU registers.
285 */
286static int unreferenced_object(struct kmemleak_object *object)
287{
288	return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
289		time_before_eq(object->jiffies + jiffies_min_age,
290			       jiffies_last_scan);
291}
292
293/*
294 * Printing of the unreferenced objects information to the seq file. The
295 * print_unreferenced function must be called with the object->lock held.
296 */
297static void print_unreferenced(struct seq_file *seq,
298			       struct kmemleak_object *object)
299{
300	int i;
301
302	seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
303		   object->pointer, object->size);
304	seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu\n",
305		   object->comm, object->pid, object->jiffies);
306	seq_printf(seq, "  backtrace:\n");
307
308	for (i = 0; i < object->trace_len; i++) {
309		void *ptr = (void *)object->trace[i];
310		seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
311	}
312}
313
314/*
315 * Print the kmemleak_object information. This function is used mainly for
316 * debugging special cases when kmemleak operations. It must be called with
317 * the object->lock held.
318 */
319static void dump_object_info(struct kmemleak_object *object)
320{
321	struct stack_trace trace;
322
323	trace.nr_entries = object->trace_len;
324	trace.entries = object->trace;
325
326	pr_notice("Object 0x%08lx (size %zu):\n",
327		  object->tree_node.start, object->size);
328	pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
329		  object->comm, object->pid, object->jiffies);
330	pr_notice("  min_count = %d\n", object->min_count);
331	pr_notice("  count = %d\n", object->count);
332	pr_notice("  backtrace:\n");
333	print_stack_trace(&trace, 4);
334}
335
336/*
337 * Look-up a memory block metadata (kmemleak_object) in the priority search
338 * tree based on a pointer value. If alias is 0, only values pointing to the
339 * beginning of the memory block are allowed. The kmemleak_lock must be held
340 * when calling this function.
341 */
342static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
343{
344	struct prio_tree_node *node;
345	struct prio_tree_iter iter;
346	struct kmemleak_object *object;
347
348	prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr);
349	node = prio_tree_next(&iter);
350	if (node) {
351		object = prio_tree_entry(node, struct kmemleak_object,
352					 tree_node);
353		if (!alias && object->pointer != ptr) {
354			kmemleak_warn("Found object by alias");
355			object = NULL;
356		}
357	} else
358		object = NULL;
359
360	return object;
361}
362
363/*
364 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
365 * that once an object's use_count reached 0, the RCU freeing was already
366 * registered and the object should no longer be used. This function must be
367 * called under the protection of rcu_read_lock().
368 */
369static int get_object(struct kmemleak_object *object)
370{
371	return atomic_inc_not_zero(&object->use_count);
372}
373
374/*
375 * RCU callback to free a kmemleak_object.
376 */
377static void free_object_rcu(struct rcu_head *rcu)
378{
379	struct hlist_node *elem, *tmp;
380	struct kmemleak_scan_area *area;
381	struct kmemleak_object *object =
382		container_of(rcu, struct kmemleak_object, rcu);
383
384	/*
385	 * Once use_count is 0 (guaranteed by put_object), there is no other
386	 * code accessing this object, hence no need for locking.
387	 */
388	hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) {
389		hlist_del(elem);
390		kmem_cache_free(scan_area_cache, area);
391	}
392	kmem_cache_free(object_cache, object);
393}
394
395/*
396 * Decrement the object use_count. Once the count is 0, free the object using
397 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
398 * delete_object() path, the delayed RCU freeing ensures that there is no
399 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
400 * is also possible.
401 */
402static void put_object(struct kmemleak_object *object)
403{
404	if (!atomic_dec_and_test(&object->use_count))
405		return;
406
407	/* should only get here after delete_object was called */
408	WARN_ON(object->flags & OBJECT_ALLOCATED);
409
410	call_rcu(&object->rcu, free_object_rcu);
411}
412
413/*
414 * Look up an object in the prio search tree and increase its use_count.
415 */
416static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
417{
418	unsigned long flags;
419	struct kmemleak_object *object = NULL;
420
421	rcu_read_lock();
422	read_lock_irqsave(&kmemleak_lock, flags);
423	if (ptr >= min_addr && ptr < max_addr)
424		object = lookup_object(ptr, alias);
425	read_unlock_irqrestore(&kmemleak_lock, flags);
426
427	/* check whether the object is still available */
428	if (object && !get_object(object))
429		object = NULL;
430	rcu_read_unlock();
431
432	return object;
433}
434
435/*
436 * Create the metadata (struct kmemleak_object) corresponding to an allocated
437 * memory block and add it to the object_list and object_tree_root.
438 */
439static void create_object(unsigned long ptr, size_t size, int min_count,
440			  gfp_t gfp)
441{
442	unsigned long flags;
443	struct kmemleak_object *object;
444	struct prio_tree_node *node;
445	struct stack_trace trace;
446
447	object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
448	if (!object) {
449		kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
450		return;
451	}
452
453	INIT_LIST_HEAD(&object->object_list);
454	INIT_LIST_HEAD(&object->gray_list);
455	INIT_HLIST_HEAD(&object->area_list);
456	spin_lock_init(&object->lock);
457	atomic_set(&object->use_count, 1);
458	object->flags = OBJECT_ALLOCATED;
459	object->pointer = ptr;
460	object->size = size;
461	object->min_count = min_count;
462	object->count = -1;			/* no color initially */
463	object->jiffies = jiffies;
464
465	/* task information */
466	if (in_irq()) {
467		object->pid = 0;
468		strncpy(object->comm, "hardirq", sizeof(object->comm));
469	} else if (in_softirq()) {
470		object->pid = 0;
471		strncpy(object->comm, "softirq", sizeof(object->comm));
472	} else {
473		object->pid = current->pid;
474		/*
475		 * There is a small chance of a race with set_task_comm(),
476		 * however using get_task_comm() here may cause locking
477		 * dependency issues with current->alloc_lock. In the worst
478		 * case, the command line is not correct.
479		 */
480		strncpy(object->comm, current->comm, sizeof(object->comm));
481	}
482
483	/* kernel backtrace */
484	trace.max_entries = MAX_TRACE;
485	trace.nr_entries = 0;
486	trace.entries = object->trace;
487	trace.skip = 1;
488	save_stack_trace(&trace);
489	object->trace_len = trace.nr_entries;
490
491	INIT_PRIO_TREE_NODE(&object->tree_node);
492	object->tree_node.start = ptr;
493	object->tree_node.last = ptr + size - 1;
494
495	write_lock_irqsave(&kmemleak_lock, flags);
496	min_addr = min(min_addr, ptr);
497	max_addr = max(max_addr, ptr + size);
498	node = prio_tree_insert(&object_tree_root, &object->tree_node);
499	/*
500	 * The code calling the kernel does not yet have the pointer to the
501	 * memory block to be able to free it.  However, we still hold the
502	 * kmemleak_lock here in case parts of the kernel started freeing
503	 * random memory blocks.
504	 */
505	if (node != &object->tree_node) {
506		unsigned long flags;
507
508		kmemleak_stop("Cannot insert 0x%lx into the object search tree "
509			      "(already existing)\n", ptr);
510		object = lookup_object(ptr, 1);
511		spin_lock_irqsave(&object->lock, flags);
512		dump_object_info(object);
513		spin_unlock_irqrestore(&object->lock, flags);
514
515		goto out;
516	}
517	list_add_tail_rcu(&object->object_list, &object_list);
518out:
519	write_unlock_irqrestore(&kmemleak_lock, flags);
520}
521
522/*
523 * Remove the metadata (struct kmemleak_object) for a memory block from the
524 * object_list and object_tree_root and decrement its use_count.
525 */
526static void delete_object(unsigned long ptr)
527{
528	unsigned long flags;
529	struct kmemleak_object *object;
530
531	write_lock_irqsave(&kmemleak_lock, flags);
532	object = lookup_object(ptr, 0);
533	if (!object) {
534#ifdef DEBUG
535		kmemleak_warn("Freeing unknown object at 0x%08lx\n",
536			      ptr);
537#endif
538		write_unlock_irqrestore(&kmemleak_lock, flags);
539		return;
540	}
541	prio_tree_remove(&object_tree_root, &object->tree_node);
542	list_del_rcu(&object->object_list);
543	write_unlock_irqrestore(&kmemleak_lock, flags);
544
545	WARN_ON(!(object->flags & OBJECT_ALLOCATED));
546	WARN_ON(atomic_read(&object->use_count) < 1);
547
548	/*
549	 * Locking here also ensures that the corresponding memory block
550	 * cannot be freed when it is being scanned.
551	 */
552	spin_lock_irqsave(&object->lock, flags);
553	object->flags &= ~OBJECT_ALLOCATED;
554	spin_unlock_irqrestore(&object->lock, flags);
555	put_object(object);
556}
557
558/*
559 * Make a object permanently as gray-colored so that it can no longer be
560 * reported as a leak. This is used in general to mark a false positive.
561 */
562static void make_gray_object(unsigned long ptr)
563{
564	unsigned long flags;
565	struct kmemleak_object *object;
566
567	object = find_and_get_object(ptr, 0);
568	if (!object) {
569		kmemleak_warn("Graying unknown object at 0x%08lx\n", ptr);
570		return;
571	}
572
573	spin_lock_irqsave(&object->lock, flags);
574	object->min_count = 0;
575	spin_unlock_irqrestore(&object->lock, flags);
576	put_object(object);
577}
578
579/*
580 * Mark the object as black-colored so that it is ignored from scans and
581 * reporting.
582 */
583static void make_black_object(unsigned long ptr)
584{
585	unsigned long flags;
586	struct kmemleak_object *object;
587
588	object = find_and_get_object(ptr, 0);
589	if (!object) {
590		kmemleak_warn("Blacking unknown object at 0x%08lx\n", ptr);
591		return;
592	}
593
594	spin_lock_irqsave(&object->lock, flags);
595	object->min_count = -1;
596	spin_unlock_irqrestore(&object->lock, flags);
597	put_object(object);
598}
599
600/*
601 * Add a scanning area to the object. If at least one such area is added,
602 * kmemleak will only scan these ranges rather than the whole memory block.
603 */
604static void add_scan_area(unsigned long ptr, unsigned long offset,
605			  size_t length, gfp_t gfp)
606{
607	unsigned long flags;
608	struct kmemleak_object *object;
609	struct kmemleak_scan_area *area;
610
611	object = find_and_get_object(ptr, 0);
612	if (!object) {
613		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
614			      ptr);
615		return;
616	}
617
618	area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK);
619	if (!area) {
620		kmemleak_warn("Cannot allocate a scan area\n");
621		goto out;
622	}
623
624	spin_lock_irqsave(&object->lock, flags);
625	if (offset + length > object->size) {
626		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
627		dump_object_info(object);
628		kmem_cache_free(scan_area_cache, area);
629		goto out_unlock;
630	}
631
632	INIT_HLIST_NODE(&area->node);
633	area->offset = offset;
634	area->length = length;
635
636	hlist_add_head(&area->node, &object->area_list);
637out_unlock:
638	spin_unlock_irqrestore(&object->lock, flags);
639out:
640	put_object(object);
641}
642
643/*
644 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
645 * pointer. Such object will not be scanned by kmemleak but references to it
646 * are searched.
647 */
648static void object_no_scan(unsigned long ptr)
649{
650	unsigned long flags;
651	struct kmemleak_object *object;
652
653	object = find_and_get_object(ptr, 0);
654	if (!object) {
655		kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
656		return;
657	}
658
659	spin_lock_irqsave(&object->lock, flags);
660	object->flags |= OBJECT_NO_SCAN;
661	spin_unlock_irqrestore(&object->lock, flags);
662	put_object(object);
663}
664
665/*
666 * Log an early kmemleak_* call to the early_log buffer. These calls will be
667 * processed later once kmemleak is fully initialized.
668 */
669static void log_early(int op_type, const void *ptr, size_t size,
670		      int min_count, unsigned long offset, size_t length)
671{
672	unsigned long flags;
673	struct early_log *log;
674
675	if (crt_early_log >= ARRAY_SIZE(early_log)) {
676		pr_warning("Early log buffer exceeded\n");
677		kmemleak_disable();
678		return;
679	}
680
681	/*
682	 * There is no need for locking since the kernel is still in UP mode
683	 * at this stage. Disabling the IRQs is enough.
684	 */
685	local_irq_save(flags);
686	log = &early_log[crt_early_log];
687	log->op_type = op_type;
688	log->ptr = ptr;
689	log->size = size;
690	log->min_count = min_count;
691	log->offset = offset;
692	log->length = length;
693	crt_early_log++;
694	local_irq_restore(flags);
695}
696
697/*
698 * Memory allocation function callback. This function is called from the
699 * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc,
700 * vmalloc etc.).
701 */
702void kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp)
703{
704	pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
705
706	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
707		create_object((unsigned long)ptr, size, min_count, gfp);
708	else if (atomic_read(&kmemleak_early_log))
709		log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0);
710}
711EXPORT_SYMBOL_GPL(kmemleak_alloc);
712
713/*
714 * Memory freeing function callback. This function is called from the kernel
715 * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.).
716 */
717void kmemleak_free(const void *ptr)
718{
719	pr_debug("%s(0x%p)\n", __func__, ptr);
720
721	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
722		delete_object((unsigned long)ptr);
723	else if (atomic_read(&kmemleak_early_log))
724		log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0);
725}
726EXPORT_SYMBOL_GPL(kmemleak_free);
727
728/*
729 * Mark an already allocated memory block as a false positive. This will cause
730 * the block to no longer be reported as leak and always be scanned.
731 */
732void kmemleak_not_leak(const void *ptr)
733{
734	pr_debug("%s(0x%p)\n", __func__, ptr);
735
736	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
737		make_gray_object((unsigned long)ptr);
738	else if (atomic_read(&kmemleak_early_log))
739		log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0);
740}
741EXPORT_SYMBOL(kmemleak_not_leak);
742
743/*
744 * Ignore a memory block. This is usually done when it is known that the
745 * corresponding block is not a leak and does not contain any references to
746 * other allocated memory blocks.
747 */
748void kmemleak_ignore(const void *ptr)
749{
750	pr_debug("%s(0x%p)\n", __func__, ptr);
751
752	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
753		make_black_object((unsigned long)ptr);
754	else if (atomic_read(&kmemleak_early_log))
755		log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0);
756}
757EXPORT_SYMBOL(kmemleak_ignore);
758
759/*
760 * Limit the range to be scanned in an allocated memory block.
761 */
762void kmemleak_scan_area(const void *ptr, unsigned long offset, size_t length,
763			gfp_t gfp)
764{
765	pr_debug("%s(0x%p)\n", __func__, ptr);
766
767	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
768		add_scan_area((unsigned long)ptr, offset, length, gfp);
769	else if (atomic_read(&kmemleak_early_log))
770		log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length);
771}
772EXPORT_SYMBOL(kmemleak_scan_area);
773
774/*
775 * Inform kmemleak not to scan the given memory block.
776 */
777void kmemleak_no_scan(const void *ptr)
778{
779	pr_debug("%s(0x%p)\n", __func__, ptr);
780
781	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
782		object_no_scan((unsigned long)ptr);
783	else if (atomic_read(&kmemleak_early_log))
784		log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0);
785}
786EXPORT_SYMBOL(kmemleak_no_scan);
787
788/*
789 * Yield the CPU so that other tasks get a chance to run.  The yielding is
790 * rate-limited to avoid excessive number of calls to the schedule() function
791 * during memory scanning.
792 */
793static void scan_yield(void)
794{
795	might_sleep();
796
797	if (time_is_before_eq_jiffies(next_scan_yield)) {
798		schedule();
799		next_scan_yield = jiffies + jiffies_scan_yield;
800	}
801}
802
803/*
804 * Memory scanning is a long process and it needs to be interruptable. This
805 * function checks whether such interrupt condition occured.
806 */
807static int scan_should_stop(void)
808{
809	if (!atomic_read(&kmemleak_enabled))
810		return 1;
811
812	/*
813	 * This function may be called from either process or kthread context,
814	 * hence the need to check for both stop conditions.
815	 */
816	if (current->mm)
817		return signal_pending(current);
818	else
819		return kthread_should_stop();
820
821	return 0;
822}
823
824/*
825 * Scan a memory block (exclusive range) for valid pointers and add those
826 * found to the gray list.
827 */
828static void scan_block(void *_start, void *_end,
829		       struct kmemleak_object *scanned)
830{
831	unsigned long *ptr;
832	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
833	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
834
835	for (ptr = start; ptr < end; ptr++) {
836		unsigned long flags;
837		unsigned long pointer = *ptr;
838		struct kmemleak_object *object;
839
840		if (scan_should_stop())
841			break;
842
843		/*
844		 * When scanning a memory block with a corresponding
845		 * kmemleak_object, the CPU yielding is handled in the calling
846		 * code since it holds the object->lock to avoid the block
847		 * freeing.
848		 */
849		if (!scanned)
850			scan_yield();
851
852		object = find_and_get_object(pointer, 1);
853		if (!object)
854			continue;
855		if (object == scanned) {
856			/* self referenced, ignore */
857			put_object(object);
858			continue;
859		}
860
861		/*
862		 * Avoid the lockdep recursive warning on object->lock being
863		 * previously acquired in scan_object(). These locks are
864		 * enclosed by scan_mutex.
865		 */
866		spin_lock_irqsave_nested(&object->lock, flags,
867					 SINGLE_DEPTH_NESTING);
868		if (!color_white(object)) {
869			/* non-orphan, ignored or new */
870			spin_unlock_irqrestore(&object->lock, flags);
871			put_object(object);
872			continue;
873		}
874
875		/*
876		 * Increase the object's reference count (number of pointers
877		 * to the memory block). If this count reaches the required
878		 * minimum, the object's color will become gray and it will be
879		 * added to the gray_list.
880		 */
881		object->count++;
882		if (color_gray(object))
883			list_add_tail(&object->gray_list, &gray_list);
884		else
885			put_object(object);
886		spin_unlock_irqrestore(&object->lock, flags);
887	}
888}
889
890/*
891 * Scan a memory block corresponding to a kmemleak_object. A condition is
892 * that object->use_count >= 1.
893 */
894static void scan_object(struct kmemleak_object *object)
895{
896	struct kmemleak_scan_area *area;
897	struct hlist_node *elem;
898	unsigned long flags;
899
900	/*
901	 * Once the object->lock is aquired, the corresponding memory block
902	 * cannot be freed (the same lock is aquired in delete_object).
903	 */
904	spin_lock_irqsave(&object->lock, flags);
905	if (object->flags & OBJECT_NO_SCAN)
906		goto out;
907	if (!(object->flags & OBJECT_ALLOCATED))
908		/* already freed object */
909		goto out;
910	if (hlist_empty(&object->area_list))
911		scan_block((void *)object->pointer,
912			   (void *)(object->pointer + object->size), object);
913	else
914		hlist_for_each_entry(area, elem, &object->area_list, node)
915			scan_block((void *)(object->pointer + area->offset),
916				   (void *)(object->pointer + area->offset
917					    + area->length), object);
918out:
919	spin_unlock_irqrestore(&object->lock, flags);
920}
921
922/*
923 * Scan data sections and all the referenced memory blocks allocated via the
924 * kernel's standard allocators. This function must be called with the
925 * scan_mutex held.
926 */
927static void kmemleak_scan(void)
928{
929	unsigned long flags;
930	struct kmemleak_object *object, *tmp;
931	struct task_struct *task;
932	int i;
933	int new_leaks = 0;
934
935	jiffies_last_scan = jiffies;
936
937	/* prepare the kmemleak_object's */
938	rcu_read_lock();
939	list_for_each_entry_rcu(object, &object_list, object_list) {
940		spin_lock_irqsave(&object->lock, flags);
941#ifdef DEBUG
942		/*
943		 * With a few exceptions there should be a maximum of
944		 * 1 reference to any object at this point.
945		 */
946		if (atomic_read(&object->use_count) > 1) {
947			pr_debug("object->use_count = %d\n",
948				 atomic_read(&object->use_count));
949			dump_object_info(object);
950		}
951#endif
952		/* reset the reference count (whiten the object) */
953		object->count = 0;
954		if (color_gray(object) && get_object(object))
955			list_add_tail(&object->gray_list, &gray_list);
956
957		spin_unlock_irqrestore(&object->lock, flags);
958	}
959	rcu_read_unlock();
960
961	/* data/bss scanning */
962	scan_block(_sdata, _edata, NULL);
963	scan_block(__bss_start, __bss_stop, NULL);
964
965#ifdef CONFIG_SMP
966	/* per-cpu sections scanning */
967	for_each_possible_cpu(i)
968		scan_block(__per_cpu_start + per_cpu_offset(i),
969			   __per_cpu_end + per_cpu_offset(i), NULL);
970#endif
971
972	/*
973	 * Struct page scanning for each node. The code below is not yet safe
974	 * with MEMORY_HOTPLUG.
975	 */
976	for_each_online_node(i) {
977		pg_data_t *pgdat = NODE_DATA(i);
978		unsigned long start_pfn = pgdat->node_start_pfn;
979		unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
980		unsigned long pfn;
981
982		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
983			struct page *page;
984
985			if (!pfn_valid(pfn))
986				continue;
987			page = pfn_to_page(pfn);
988			/* only scan if page is in use */
989			if (page_count(page) == 0)
990				continue;
991			scan_block(page, page + 1, NULL);
992		}
993	}
994
995	/*
996	 * Scanning the task stacks may introduce false negatives and it is
997	 * not enabled by default.
998	 */
999	if (kmemleak_stack_scan) {
1000		read_lock(&tasklist_lock);
1001		for_each_process(task)
1002			scan_block(task_stack_page(task),
1003				   task_stack_page(task) + THREAD_SIZE, NULL);
1004		read_unlock(&tasklist_lock);
1005	}
1006
1007	/*
1008	 * Scan the objects already referenced from the sections scanned
1009	 * above. More objects will be referenced and, if there are no memory
1010	 * leaks, all the objects will be scanned. The list traversal is safe
1011	 * for both tail additions and removals from inside the loop. The
1012	 * kmemleak objects cannot be freed from outside the loop because their
1013	 * use_count was increased.
1014	 */
1015	object = list_entry(gray_list.next, typeof(*object), gray_list);
1016	while (&object->gray_list != &gray_list) {
1017		scan_yield();
1018
1019		/* may add new objects to the list */
1020		if (!scan_should_stop())
1021			scan_object(object);
1022
1023		tmp = list_entry(object->gray_list.next, typeof(*object),
1024				 gray_list);
1025
1026		/* remove the object from the list and release it */
1027		list_del(&object->gray_list);
1028		put_object(object);
1029
1030		object = tmp;
1031	}
1032	WARN_ON(!list_empty(&gray_list));
1033
1034	/*
1035	 * If scanning was stopped do not report any new unreferenced objects.
1036	 */
1037	if (scan_should_stop())
1038		return;
1039
1040	/*
1041	 * Scanning result reporting.
1042	 */
1043	rcu_read_lock();
1044	list_for_each_entry_rcu(object, &object_list, object_list) {
1045		spin_lock_irqsave(&object->lock, flags);
1046		if (unreferenced_object(object) &&
1047		    !(object->flags & OBJECT_REPORTED)) {
1048			object->flags |= OBJECT_REPORTED;
1049			new_leaks++;
1050		}
1051		spin_unlock_irqrestore(&object->lock, flags);
1052	}
1053	rcu_read_unlock();
1054
1055	if (new_leaks)
1056		pr_info("%d new suspected memory leaks (see "
1057			"/sys/kernel/debug/kmemleak)\n", new_leaks);
1058
1059}
1060
1061/*
1062 * Thread function performing automatic memory scanning. Unreferenced objects
1063 * at the end of a memory scan are reported but only the first time.
1064 */
1065static int kmemleak_scan_thread(void *arg)
1066{
1067	static int first_run = 1;
1068
1069	pr_info("Automatic memory scanning thread started\n");
1070
1071	/*
1072	 * Wait before the first scan to allow the system to fully initialize.
1073	 */
1074	if (first_run) {
1075		first_run = 0;
1076		ssleep(SECS_FIRST_SCAN);
1077	}
1078
1079	while (!kthread_should_stop()) {
1080		signed long timeout = jiffies_scan_wait;
1081
1082		mutex_lock(&scan_mutex);
1083		kmemleak_scan();
1084		mutex_unlock(&scan_mutex);
1085
1086		/* wait before the next scan */
1087		while (timeout && !kthread_should_stop())
1088			timeout = schedule_timeout_interruptible(timeout);
1089	}
1090
1091	pr_info("Automatic memory scanning thread ended\n");
1092
1093	return 0;
1094}
1095
1096/*
1097 * Start the automatic memory scanning thread. This function must be called
1098 * with the scan_mutex held.
1099 */
1100void start_scan_thread(void)
1101{
1102	if (scan_thread)
1103		return;
1104	scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1105	if (IS_ERR(scan_thread)) {
1106		pr_warning("Failed to create the scan thread\n");
1107		scan_thread = NULL;
1108	}
1109}
1110
1111/*
1112 * Stop the automatic memory scanning thread. This function must be called
1113 * with the scan_mutex held.
1114 */
1115void stop_scan_thread(void)
1116{
1117	if (scan_thread) {
1118		kthread_stop(scan_thread);
1119		scan_thread = NULL;
1120	}
1121}
1122
1123/*
1124 * Iterate over the object_list and return the first valid object at or after
1125 * the required position with its use_count incremented. The function triggers
1126 * a memory scanning when the pos argument points to the first position.
1127 */
1128static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1129{
1130	struct kmemleak_object *object;
1131	loff_t n = *pos;
1132
1133	if (!n)
1134		reported_leaks = 0;
1135	if (reported_leaks >= REPORTS_NR)
1136		return NULL;
1137
1138	rcu_read_lock();
1139	list_for_each_entry_rcu(object, &object_list, object_list) {
1140		if (n-- > 0)
1141			continue;
1142		if (get_object(object))
1143			goto out;
1144	}
1145	object = NULL;
1146out:
1147	rcu_read_unlock();
1148	return object;
1149}
1150
1151/*
1152 * Return the next object in the object_list. The function decrements the
1153 * use_count of the previous object and increases that of the next one.
1154 */
1155static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1156{
1157	struct kmemleak_object *prev_obj = v;
1158	struct kmemleak_object *next_obj = NULL;
1159	struct list_head *n = &prev_obj->object_list;
1160
1161	++(*pos);
1162	if (reported_leaks >= REPORTS_NR)
1163		goto out;
1164
1165	rcu_read_lock();
1166	list_for_each_continue_rcu(n, &object_list) {
1167		next_obj = list_entry(n, struct kmemleak_object, object_list);
1168		if (get_object(next_obj))
1169			break;
1170	}
1171	rcu_read_unlock();
1172out:
1173	put_object(prev_obj);
1174	return next_obj;
1175}
1176
1177/*
1178 * Decrement the use_count of the last object required, if any.
1179 */
1180static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1181{
1182	if (v)
1183		put_object(v);
1184}
1185
1186/*
1187 * Print the information for an unreferenced object to the seq file.
1188 */
1189static int kmemleak_seq_show(struct seq_file *seq, void *v)
1190{
1191	struct kmemleak_object *object = v;
1192	unsigned long flags;
1193
1194	spin_lock_irqsave(&object->lock, flags);
1195	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) {
1196		print_unreferenced(seq, object);
1197		reported_leaks++;
1198	}
1199	spin_unlock_irqrestore(&object->lock, flags);
1200	return 0;
1201}
1202
1203static const struct seq_operations kmemleak_seq_ops = {
1204	.start = kmemleak_seq_start,
1205	.next  = kmemleak_seq_next,
1206	.stop  = kmemleak_seq_stop,
1207	.show  = kmemleak_seq_show,
1208};
1209
1210static int kmemleak_open(struct inode *inode, struct file *file)
1211{
1212	int ret = 0;
1213
1214	if (!atomic_read(&kmemleak_enabled))
1215		return -EBUSY;
1216
1217	ret = mutex_lock_interruptible(&scan_mutex);
1218	if (ret < 0)
1219		goto out;
1220	if (file->f_mode & FMODE_READ) {
1221		ret = seq_open(file, &kmemleak_seq_ops);
1222		if (ret < 0)
1223			goto scan_unlock;
1224	}
1225	return ret;
1226
1227scan_unlock:
1228	mutex_unlock(&scan_mutex);
1229out:
1230	return ret;
1231}
1232
1233static int kmemleak_release(struct inode *inode, struct file *file)
1234{
1235	int ret = 0;
1236
1237	if (file->f_mode & FMODE_READ)
1238		seq_release(inode, file);
1239	mutex_unlock(&scan_mutex);
1240
1241	return ret;
1242}
1243
1244/*
1245 * File write operation to configure kmemleak at run-time. The following
1246 * commands can be written to the /sys/kernel/debug/kmemleak file:
1247 *   off	- disable kmemleak (irreversible)
1248 *   stack=on	- enable the task stacks scanning
1249 *   stack=off	- disable the tasks stacks scanning
1250 *   scan=on	- start the automatic memory scanning thread
1251 *   scan=off	- stop the automatic memory scanning thread
1252 *   scan=...	- set the automatic memory scanning period in seconds (0 to
1253 *		  disable it)
1254 *   scan	- trigger a memory scan
1255 */
1256static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1257			      size_t size, loff_t *ppos)
1258{
1259	char buf[64];
1260	int buf_size;
1261
1262	if (!atomic_read(&kmemleak_enabled))
1263		return -EBUSY;
1264
1265	buf_size = min(size, (sizeof(buf) - 1));
1266	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1267		return -EFAULT;
1268	buf[buf_size] = 0;
1269
1270	if (strncmp(buf, "off", 3) == 0)
1271		kmemleak_disable();
1272	else if (strncmp(buf, "stack=on", 8) == 0)
1273		kmemleak_stack_scan = 1;
1274	else if (strncmp(buf, "stack=off", 9) == 0)
1275		kmemleak_stack_scan = 0;
1276	else if (strncmp(buf, "scan=on", 7) == 0)
1277		start_scan_thread();
1278	else if (strncmp(buf, "scan=off", 8) == 0)
1279		stop_scan_thread();
1280	else if (strncmp(buf, "scan=", 5) == 0) {
1281		unsigned long secs;
1282		int err;
1283
1284		err = strict_strtoul(buf + 5, 0, &secs);
1285		if (err < 0)
1286			return err;
1287		stop_scan_thread();
1288		if (secs) {
1289			jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1290			start_scan_thread();
1291		}
1292	} else if (strncmp(buf, "scan", 4) == 0)
1293		kmemleak_scan();
1294	else
1295		return -EINVAL;
1296
1297	/* ignore the rest of the buffer, only one command at a time */
1298	*ppos += size;
1299	return size;
1300}
1301
1302static const struct file_operations kmemleak_fops = {
1303	.owner		= THIS_MODULE,
1304	.open		= kmemleak_open,
1305	.read		= seq_read,
1306	.write		= kmemleak_write,
1307	.llseek		= seq_lseek,
1308	.release	= kmemleak_release,
1309};
1310
1311/*
1312 * Perform the freeing of the kmemleak internal objects after waiting for any
1313 * current memory scan to complete.
1314 */
1315static int kmemleak_cleanup_thread(void *arg)
1316{
1317	struct kmemleak_object *object;
1318
1319	mutex_lock(&scan_mutex);
1320	stop_scan_thread();
1321
1322	rcu_read_lock();
1323	list_for_each_entry_rcu(object, &object_list, object_list)
1324		delete_object(object->pointer);
1325	rcu_read_unlock();
1326	mutex_unlock(&scan_mutex);
1327
1328	return 0;
1329}
1330
1331/*
1332 * Start the clean-up thread.
1333 */
1334static void kmemleak_cleanup(void)
1335{
1336	struct task_struct *cleanup_thread;
1337
1338	cleanup_thread = kthread_run(kmemleak_cleanup_thread, NULL,
1339				     "kmemleak-clean");
1340	if (IS_ERR(cleanup_thread))
1341		pr_warning("Failed to create the clean-up thread\n");
1342}
1343
1344/*
1345 * Disable kmemleak. No memory allocation/freeing will be traced once this
1346 * function is called. Disabling kmemleak is an irreversible operation.
1347 */
1348static void kmemleak_disable(void)
1349{
1350	/* atomically check whether it was already invoked */
1351	if (atomic_cmpxchg(&kmemleak_error, 0, 1))
1352		return;
1353
1354	/* stop any memory operation tracing */
1355	atomic_set(&kmemleak_early_log, 0);
1356	atomic_set(&kmemleak_enabled, 0);
1357
1358	/* check whether it is too early for a kernel thread */
1359	if (atomic_read(&kmemleak_initialized))
1360		kmemleak_cleanup();
1361
1362	pr_info("Kernel memory leak detector disabled\n");
1363}
1364
1365/*
1366 * Allow boot-time kmemleak disabling (enabled by default).
1367 */
1368static int kmemleak_boot_config(char *str)
1369{
1370	if (!str)
1371		return -EINVAL;
1372	if (strcmp(str, "off") == 0)
1373		kmemleak_disable();
1374	else if (strcmp(str, "on") != 0)
1375		return -EINVAL;
1376	return 0;
1377}
1378early_param("kmemleak", kmemleak_boot_config);
1379
1380/*
1381 * Kmemleak initialization.
1382 */
1383void __init kmemleak_init(void)
1384{
1385	int i;
1386	unsigned long flags;
1387
1388	jiffies_scan_yield = msecs_to_jiffies(MSECS_SCAN_YIELD);
1389	jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1390	jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1391
1392	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1393	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1394	INIT_PRIO_TREE_ROOT(&object_tree_root);
1395
1396	/* the kernel is still in UP mode, so disabling the IRQs is enough */
1397	local_irq_save(flags);
1398	if (!atomic_read(&kmemleak_error)) {
1399		atomic_set(&kmemleak_enabled, 1);
1400		atomic_set(&kmemleak_early_log, 0);
1401	}
1402	local_irq_restore(flags);
1403
1404	/*
1405	 * This is the point where tracking allocations is safe. Automatic
1406	 * scanning is started during the late initcall. Add the early logged
1407	 * callbacks to the kmemleak infrastructure.
1408	 */
1409	for (i = 0; i < crt_early_log; i++) {
1410		struct early_log *log = &early_log[i];
1411
1412		switch (log->op_type) {
1413		case KMEMLEAK_ALLOC:
1414			kmemleak_alloc(log->ptr, log->size, log->min_count,
1415				       GFP_KERNEL);
1416			break;
1417		case KMEMLEAK_FREE:
1418			kmemleak_free(log->ptr);
1419			break;
1420		case KMEMLEAK_NOT_LEAK:
1421			kmemleak_not_leak(log->ptr);
1422			break;
1423		case KMEMLEAK_IGNORE:
1424			kmemleak_ignore(log->ptr);
1425			break;
1426		case KMEMLEAK_SCAN_AREA:
1427			kmemleak_scan_area(log->ptr, log->offset, log->length,
1428					   GFP_KERNEL);
1429			break;
1430		case KMEMLEAK_NO_SCAN:
1431			kmemleak_no_scan(log->ptr);
1432			break;
1433		default:
1434			WARN_ON(1);
1435		}
1436	}
1437}
1438
1439/*
1440 * Late initialization function.
1441 */
1442static int __init kmemleak_late_init(void)
1443{
1444	struct dentry *dentry;
1445
1446	atomic_set(&kmemleak_initialized, 1);
1447
1448	if (atomic_read(&kmemleak_error)) {
1449		/*
1450		 * Some error occured and kmemleak was disabled. There is a
1451		 * small chance that kmemleak_disable() was called immediately
1452		 * after setting kmemleak_initialized and we may end up with
1453		 * two clean-up threads but serialized by scan_mutex.
1454		 */
1455		kmemleak_cleanup();
1456		return -ENOMEM;
1457	}
1458
1459	dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1460				     &kmemleak_fops);
1461	if (!dentry)
1462		pr_warning("Failed to create the debugfs kmemleak file\n");
1463	mutex_lock(&scan_mutex);
1464	start_scan_thread();
1465	mutex_unlock(&scan_mutex);
1466
1467	pr_info("Kernel memory leak detector initialized\n");
1468
1469	return 0;
1470}
1471late_initcall(kmemleak_late_init);
1472