kmemleak.c revision a1084c8779f5efa771c6896a0a4184900b4ab736
1/*
2 * mm/kmemleak.c
3 *
4 * Copyright (C) 2008 ARM Limited
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 *
21 * For more information on the algorithm and kmemleak usage, please see
22 * Documentation/kmemleak.txt.
23 *
24 * Notes on locking
25 * ----------------
26 *
27 * The following locks and mutexes are used by kmemleak:
28 *
29 * - kmemleak_lock (rwlock): protects the object_list modifications and
30 *   accesses to the object_tree_root. The object_list is the main list
31 *   holding the metadata (struct kmemleak_object) for the allocated memory
32 *   blocks. The object_tree_root is a priority search tree used to look-up
33 *   metadata based on a pointer to the corresponding memory block.  The
34 *   kmemleak_object structures are added to the object_list and
35 *   object_tree_root in the create_object() function called from the
36 *   kmemleak_alloc() callback and removed in delete_object() called from the
37 *   kmemleak_free() callback
38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39 *   the metadata (e.g. count) are protected by this lock. Note that some
40 *   members of this structure may be protected by other means (atomic or
41 *   kmemleak_lock). This lock is also held when scanning the corresponding
42 *   memory block to avoid the kernel freeing it via the kmemleak_free()
43 *   callback. This is less heavyweight than holding a global lock like
44 *   kmemleak_lock during scanning
45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 *   unreferenced objects at a time. The gray_list contains the objects which
47 *   are already referenced or marked as false positives and need to be
48 *   scanned. This list is only modified during a scanning episode when the
49 *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
50 *   Note that the kmemleak_object.use_count is incremented when an object is
51 *   added to the gray_list and therefore cannot be freed. This mutex also
52 *   prevents multiple users of the "kmemleak" debugfs file together with
53 *   modifications to the memory scanning parameters including the scan_thread
54 *   pointer
55 *
56 * The kmemleak_object structures have a use_count incremented or decremented
57 * using the get_object()/put_object() functions. When the use_count becomes
58 * 0, this count can no longer be incremented and put_object() schedules the
59 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
60 * function must be protected by rcu_read_lock() to avoid accessing a freed
61 * structure.
62 */
63
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
66#include <linux/init.h>
67#include <linux/kernel.h>
68#include <linux/list.h>
69#include <linux/sched.h>
70#include <linux/jiffies.h>
71#include <linux/delay.h>
72#include <linux/module.h>
73#include <linux/kthread.h>
74#include <linux/prio_tree.h>
75#include <linux/gfp.h>
76#include <linux/fs.h>
77#include <linux/debugfs.h>
78#include <linux/seq_file.h>
79#include <linux/cpumask.h>
80#include <linux/spinlock.h>
81#include <linux/mutex.h>
82#include <linux/rcupdate.h>
83#include <linux/stacktrace.h>
84#include <linux/cache.h>
85#include <linux/percpu.h>
86#include <linux/hardirq.h>
87#include <linux/mmzone.h>
88#include <linux/slab.h>
89#include <linux/thread_info.h>
90#include <linux/err.h>
91#include <linux/uaccess.h>
92#include <linux/string.h>
93#include <linux/nodemask.h>
94#include <linux/mm.h>
95#include <linux/workqueue.h>
96
97#include <asm/sections.h>
98#include <asm/processor.h>
99#include <asm/atomic.h>
100
101#include <linux/kmemcheck.h>
102#include <linux/kmemleak.h>
103
104/*
105 * Kmemleak configuration and common defines.
106 */
107#define MAX_TRACE		16	/* stack trace length */
108#define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
109#define SECS_FIRST_SCAN		60	/* delay before the first scan */
110#define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
111#define GRAY_LIST_PASSES	25	/* maximum number of gray list scans */
112#define MAX_SCAN_SIZE		4096	/* maximum size of a scanned block */
113
114#define BYTES_PER_POINTER	sizeof(void *)
115
116/* GFP bitmask for kmemleak internal allocations */
117#define GFP_KMEMLEAK_MASK	(GFP_KERNEL | GFP_ATOMIC)
118
119/* scanning area inside a memory block */
120struct kmemleak_scan_area {
121	struct hlist_node node;
122	unsigned long offset;
123	size_t length;
124};
125
126#define KMEMLEAK_GREY	0
127#define KMEMLEAK_BLACK	-1
128
129/*
130 * Structure holding the metadata for each allocated memory block.
131 * Modifications to such objects should be made while holding the
132 * object->lock. Insertions or deletions from object_list, gray_list or
133 * tree_node are already protected by the corresponding locks or mutex (see
134 * the notes on locking above). These objects are reference-counted
135 * (use_count) and freed using the RCU mechanism.
136 */
137struct kmemleak_object {
138	spinlock_t lock;
139	unsigned long flags;		/* object status flags */
140	struct list_head object_list;
141	struct list_head gray_list;
142	struct prio_tree_node tree_node;
143	struct rcu_head rcu;		/* object_list lockless traversal */
144	/* object usage count; object freed when use_count == 0 */
145	atomic_t use_count;
146	unsigned long pointer;
147	size_t size;
148	/* minimum number of a pointers found before it is considered leak */
149	int min_count;
150	/* the total number of pointers found pointing to this object */
151	int count;
152	/* memory ranges to be scanned inside an object (empty for all) */
153	struct hlist_head area_list;
154	unsigned long trace[MAX_TRACE];
155	unsigned int trace_len;
156	unsigned long jiffies;		/* creation timestamp */
157	pid_t pid;			/* pid of the current task */
158	char comm[TASK_COMM_LEN];	/* executable name */
159};
160
161/* flag representing the memory block allocation status */
162#define OBJECT_ALLOCATED	(1 << 0)
163/* flag set after the first reporting of an unreference object */
164#define OBJECT_REPORTED		(1 << 1)
165/* flag set to not scan the object */
166#define OBJECT_NO_SCAN		(1 << 2)
167/* flag set on newly allocated objects */
168#define OBJECT_NEW		(1 << 3)
169
170/* number of bytes to print per line; must be 16 or 32 */
171#define HEX_ROW_SIZE		16
172/* number of bytes to print at a time (1, 2, 4, 8) */
173#define HEX_GROUP_SIZE		1
174/* include ASCII after the hex output */
175#define HEX_ASCII		1
176/* max number of lines to be printed */
177#define HEX_MAX_LINES		2
178
179/* the list of all allocated objects */
180static LIST_HEAD(object_list);
181/* the list of gray-colored objects (see color_gray comment below) */
182static LIST_HEAD(gray_list);
183/* prio search tree for object boundaries */
184static struct prio_tree_root object_tree_root;
185/* rw_lock protecting the access to object_list and prio_tree_root */
186static DEFINE_RWLOCK(kmemleak_lock);
187
188/* allocation caches for kmemleak internal data */
189static struct kmem_cache *object_cache;
190static struct kmem_cache *scan_area_cache;
191
192/* set if tracing memory operations is enabled */
193static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
194/* set in the late_initcall if there were no errors */
195static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
196/* enables or disables early logging of the memory operations */
197static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
198/* set if a fata kmemleak error has occurred */
199static atomic_t kmemleak_error = ATOMIC_INIT(0);
200
201/* minimum and maximum address that may be valid pointers */
202static unsigned long min_addr = ULONG_MAX;
203static unsigned long max_addr;
204
205static struct task_struct *scan_thread;
206/* used to avoid reporting of recently allocated objects */
207static unsigned long jiffies_min_age;
208static unsigned long jiffies_last_scan;
209/* delay between automatic memory scannings */
210static signed long jiffies_scan_wait;
211/* enables or disables the task stacks scanning */
212static int kmemleak_stack_scan = 1;
213/* protects the memory scanning, parameters and debug/kmemleak file access */
214static DEFINE_MUTEX(scan_mutex);
215
216/*
217 * Early object allocation/freeing logging. Kmemleak is initialized after the
218 * kernel allocator. However, both the kernel allocator and kmemleak may
219 * allocate memory blocks which need to be tracked. Kmemleak defines an
220 * arbitrary buffer to hold the allocation/freeing information before it is
221 * fully initialized.
222 */
223
224/* kmemleak operation type for early logging */
225enum {
226	KMEMLEAK_ALLOC,
227	KMEMLEAK_FREE,
228	KMEMLEAK_FREE_PART,
229	KMEMLEAK_NOT_LEAK,
230	KMEMLEAK_IGNORE,
231	KMEMLEAK_SCAN_AREA,
232	KMEMLEAK_NO_SCAN
233};
234
235/*
236 * Structure holding the information passed to kmemleak callbacks during the
237 * early logging.
238 */
239struct early_log {
240	int op_type;			/* kmemleak operation type */
241	const void *ptr;		/* allocated/freed memory block */
242	size_t size;			/* memory block size */
243	int min_count;			/* minimum reference count */
244	unsigned long offset;		/* scan area offset */
245	size_t length;			/* scan area length */
246	unsigned long trace[MAX_TRACE];	/* stack trace */
247	unsigned int trace_len;		/* stack trace length */
248};
249
250/* early logging buffer and current position */
251static struct early_log
252	early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
253static int crt_early_log __initdata;
254
255static void kmemleak_disable(void);
256
257/*
258 * Print a warning and dump the stack trace.
259 */
260#define kmemleak_warn(x...)	do {	\
261	pr_warning(x);			\
262	dump_stack();			\
263} while (0)
264
265/*
266 * Macro invoked when a serious kmemleak condition occured and cannot be
267 * recovered from. Kmemleak will be disabled and further allocation/freeing
268 * tracing no longer available.
269 */
270#define kmemleak_stop(x...)	do {	\
271	kmemleak_warn(x);		\
272	kmemleak_disable();		\
273} while (0)
274
275/*
276 * Printing of the objects hex dump to the seq file. The number of lines to be
277 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
278 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
279 * with the object->lock held.
280 */
281static void hex_dump_object(struct seq_file *seq,
282			    struct kmemleak_object *object)
283{
284	const u8 *ptr = (const u8 *)object->pointer;
285	int i, len, remaining;
286	unsigned char linebuf[HEX_ROW_SIZE * 5];
287
288	/* limit the number of lines to HEX_MAX_LINES */
289	remaining = len =
290		min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
291
292	seq_printf(seq, "  hex dump (first %d bytes):\n", len);
293	for (i = 0; i < len; i += HEX_ROW_SIZE) {
294		int linelen = min(remaining, HEX_ROW_SIZE);
295
296		remaining -= HEX_ROW_SIZE;
297		hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
298				   HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
299				   HEX_ASCII);
300		seq_printf(seq, "    %s\n", linebuf);
301	}
302}
303
304/*
305 * Object colors, encoded with count and min_count:
306 * - white - orphan object, not enough references to it (count < min_count)
307 * - gray  - not orphan, not marked as false positive (min_count == 0) or
308 *		sufficient references to it (count >= min_count)
309 * - black - ignore, it doesn't contain references (e.g. text section)
310 *		(min_count == -1). No function defined for this color.
311 * Newly created objects don't have any color assigned (object->count == -1)
312 * before the next memory scan when they become white.
313 */
314static bool color_white(const struct kmemleak_object *object)
315{
316	return object->count != KMEMLEAK_BLACK &&
317		object->count < object->min_count;
318}
319
320static bool color_gray(const struct kmemleak_object *object)
321{
322	return object->min_count != KMEMLEAK_BLACK &&
323		object->count >= object->min_count;
324}
325
326static bool color_black(const struct kmemleak_object *object)
327{
328	return object->min_count == KMEMLEAK_BLACK;
329}
330
331/*
332 * Objects are considered unreferenced only if their color is white, they have
333 * not be deleted and have a minimum age to avoid false positives caused by
334 * pointers temporarily stored in CPU registers.
335 */
336static bool unreferenced_object(struct kmemleak_object *object)
337{
338	return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
339		time_before_eq(object->jiffies + jiffies_min_age,
340			       jiffies_last_scan);
341}
342
343/*
344 * Printing of the unreferenced objects information to the seq file. The
345 * print_unreferenced function must be called with the object->lock held.
346 */
347static void print_unreferenced(struct seq_file *seq,
348			       struct kmemleak_object *object)
349{
350	int i;
351
352	seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
353		   object->pointer, object->size);
354	seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu\n",
355		   object->comm, object->pid, object->jiffies);
356	hex_dump_object(seq, object);
357	seq_printf(seq, "  backtrace:\n");
358
359	for (i = 0; i < object->trace_len; i++) {
360		void *ptr = (void *)object->trace[i];
361		seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
362	}
363}
364
365/*
366 * Print the kmemleak_object information. This function is used mainly for
367 * debugging special cases when kmemleak operations. It must be called with
368 * the object->lock held.
369 */
370static void dump_object_info(struct kmemleak_object *object)
371{
372	struct stack_trace trace;
373
374	trace.nr_entries = object->trace_len;
375	trace.entries = object->trace;
376
377	pr_notice("Object 0x%08lx (size %zu):\n",
378		  object->tree_node.start, object->size);
379	pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
380		  object->comm, object->pid, object->jiffies);
381	pr_notice("  min_count = %d\n", object->min_count);
382	pr_notice("  count = %d\n", object->count);
383	pr_notice("  flags = 0x%lx\n", object->flags);
384	pr_notice("  backtrace:\n");
385	print_stack_trace(&trace, 4);
386}
387
388/*
389 * Look-up a memory block metadata (kmemleak_object) in the priority search
390 * tree based on a pointer value. If alias is 0, only values pointing to the
391 * beginning of the memory block are allowed. The kmemleak_lock must be held
392 * when calling this function.
393 */
394static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
395{
396	struct prio_tree_node *node;
397	struct prio_tree_iter iter;
398	struct kmemleak_object *object;
399
400	prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr);
401	node = prio_tree_next(&iter);
402	if (node) {
403		object = prio_tree_entry(node, struct kmemleak_object,
404					 tree_node);
405		if (!alias && object->pointer != ptr) {
406			kmemleak_warn("Found object by alias");
407			object = NULL;
408		}
409	} else
410		object = NULL;
411
412	return object;
413}
414
415/*
416 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
417 * that once an object's use_count reached 0, the RCU freeing was already
418 * registered and the object should no longer be used. This function must be
419 * called under the protection of rcu_read_lock().
420 */
421static int get_object(struct kmemleak_object *object)
422{
423	return atomic_inc_not_zero(&object->use_count);
424}
425
426/*
427 * RCU callback to free a kmemleak_object.
428 */
429static void free_object_rcu(struct rcu_head *rcu)
430{
431	struct hlist_node *elem, *tmp;
432	struct kmemleak_scan_area *area;
433	struct kmemleak_object *object =
434		container_of(rcu, struct kmemleak_object, rcu);
435
436	/*
437	 * Once use_count is 0 (guaranteed by put_object), there is no other
438	 * code accessing this object, hence no need for locking.
439	 */
440	hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) {
441		hlist_del(elem);
442		kmem_cache_free(scan_area_cache, area);
443	}
444	kmem_cache_free(object_cache, object);
445}
446
447/*
448 * Decrement the object use_count. Once the count is 0, free the object using
449 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
450 * delete_object() path, the delayed RCU freeing ensures that there is no
451 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
452 * is also possible.
453 */
454static void put_object(struct kmemleak_object *object)
455{
456	if (!atomic_dec_and_test(&object->use_count))
457		return;
458
459	/* should only get here after delete_object was called */
460	WARN_ON(object->flags & OBJECT_ALLOCATED);
461
462	call_rcu(&object->rcu, free_object_rcu);
463}
464
465/*
466 * Look up an object in the prio search tree and increase its use_count.
467 */
468static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
469{
470	unsigned long flags;
471	struct kmemleak_object *object = NULL;
472
473	rcu_read_lock();
474	read_lock_irqsave(&kmemleak_lock, flags);
475	if (ptr >= min_addr && ptr < max_addr)
476		object = lookup_object(ptr, alias);
477	read_unlock_irqrestore(&kmemleak_lock, flags);
478
479	/* check whether the object is still available */
480	if (object && !get_object(object))
481		object = NULL;
482	rcu_read_unlock();
483
484	return object;
485}
486
487/*
488 * Save stack trace to the given array of MAX_TRACE size.
489 */
490static int __save_stack_trace(unsigned long *trace)
491{
492	struct stack_trace stack_trace;
493
494	stack_trace.max_entries = MAX_TRACE;
495	stack_trace.nr_entries = 0;
496	stack_trace.entries = trace;
497	stack_trace.skip = 2;
498	save_stack_trace(&stack_trace);
499
500	return stack_trace.nr_entries;
501}
502
503/*
504 * Create the metadata (struct kmemleak_object) corresponding to an allocated
505 * memory block and add it to the object_list and object_tree_root.
506 */
507static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
508					     int min_count, gfp_t gfp)
509{
510	unsigned long flags;
511	struct kmemleak_object *object;
512	struct prio_tree_node *node;
513
514	object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
515	if (!object) {
516		kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
517		return NULL;
518	}
519
520	INIT_LIST_HEAD(&object->object_list);
521	INIT_LIST_HEAD(&object->gray_list);
522	INIT_HLIST_HEAD(&object->area_list);
523	spin_lock_init(&object->lock);
524	atomic_set(&object->use_count, 1);
525	object->flags = OBJECT_ALLOCATED | OBJECT_NEW;
526	object->pointer = ptr;
527	object->size = size;
528	object->min_count = min_count;
529	object->count = -1;			/* no color initially */
530	object->jiffies = jiffies;
531
532	/* task information */
533	if (in_irq()) {
534		object->pid = 0;
535		strncpy(object->comm, "hardirq", sizeof(object->comm));
536	} else if (in_softirq()) {
537		object->pid = 0;
538		strncpy(object->comm, "softirq", sizeof(object->comm));
539	} else {
540		object->pid = current->pid;
541		/*
542		 * There is a small chance of a race with set_task_comm(),
543		 * however using get_task_comm() here may cause locking
544		 * dependency issues with current->alloc_lock. In the worst
545		 * case, the command line is not correct.
546		 */
547		strncpy(object->comm, current->comm, sizeof(object->comm));
548	}
549
550	/* kernel backtrace */
551	object->trace_len = __save_stack_trace(object->trace);
552
553	INIT_PRIO_TREE_NODE(&object->tree_node);
554	object->tree_node.start = ptr;
555	object->tree_node.last = ptr + size - 1;
556
557	write_lock_irqsave(&kmemleak_lock, flags);
558	min_addr = min(min_addr, ptr);
559	max_addr = max(max_addr, ptr + size);
560	node = prio_tree_insert(&object_tree_root, &object->tree_node);
561	/*
562	 * The code calling the kernel does not yet have the pointer to the
563	 * memory block to be able to free it.  However, we still hold the
564	 * kmemleak_lock here in case parts of the kernel started freeing
565	 * random memory blocks.
566	 */
567	if (node != &object->tree_node) {
568		unsigned long flags;
569
570		kmemleak_stop("Cannot insert 0x%lx into the object search tree "
571			      "(already existing)\n", ptr);
572		object = lookup_object(ptr, 1);
573		spin_lock_irqsave(&object->lock, flags);
574		dump_object_info(object);
575		spin_unlock_irqrestore(&object->lock, flags);
576
577		goto out;
578	}
579	list_add_tail_rcu(&object->object_list, &object_list);
580out:
581	write_unlock_irqrestore(&kmemleak_lock, flags);
582	return object;
583}
584
585/*
586 * Remove the metadata (struct kmemleak_object) for a memory block from the
587 * object_list and object_tree_root and decrement its use_count.
588 */
589static void __delete_object(struct kmemleak_object *object)
590{
591	unsigned long flags;
592
593	write_lock_irqsave(&kmemleak_lock, flags);
594	prio_tree_remove(&object_tree_root, &object->tree_node);
595	list_del_rcu(&object->object_list);
596	write_unlock_irqrestore(&kmemleak_lock, flags);
597
598	WARN_ON(!(object->flags & OBJECT_ALLOCATED));
599	WARN_ON(atomic_read(&object->use_count) < 2);
600
601	/*
602	 * Locking here also ensures that the corresponding memory block
603	 * cannot be freed when it is being scanned.
604	 */
605	spin_lock_irqsave(&object->lock, flags);
606	object->flags &= ~OBJECT_ALLOCATED;
607	spin_unlock_irqrestore(&object->lock, flags);
608	put_object(object);
609}
610
611/*
612 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
613 * delete it.
614 */
615static void delete_object_full(unsigned long ptr)
616{
617	struct kmemleak_object *object;
618
619	object = find_and_get_object(ptr, 0);
620	if (!object) {
621#ifdef DEBUG
622		kmemleak_warn("Freeing unknown object at 0x%08lx\n",
623			      ptr);
624#endif
625		return;
626	}
627	__delete_object(object);
628	put_object(object);
629}
630
631/*
632 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
633 * delete it. If the memory block is partially freed, the function may create
634 * additional metadata for the remaining parts of the block.
635 */
636static void delete_object_part(unsigned long ptr, size_t size)
637{
638	struct kmemleak_object *object;
639	unsigned long start, end;
640
641	object = find_and_get_object(ptr, 1);
642	if (!object) {
643#ifdef DEBUG
644		kmemleak_warn("Partially freeing unknown object at 0x%08lx "
645			      "(size %zu)\n", ptr, size);
646#endif
647		return;
648	}
649	__delete_object(object);
650
651	/*
652	 * Create one or two objects that may result from the memory block
653	 * split. Note that partial freeing is only done by free_bootmem() and
654	 * this happens before kmemleak_init() is called. The path below is
655	 * only executed during early log recording in kmemleak_init(), so
656	 * GFP_KERNEL is enough.
657	 */
658	start = object->pointer;
659	end = object->pointer + object->size;
660	if (ptr > start)
661		create_object(start, ptr - start, object->min_count,
662			      GFP_KERNEL);
663	if (ptr + size < end)
664		create_object(ptr + size, end - ptr - size, object->min_count,
665			      GFP_KERNEL);
666
667	put_object(object);
668}
669
670static void __paint_it(struct kmemleak_object *object, int color)
671{
672	object->min_count = color;
673	if (color == KMEMLEAK_BLACK)
674		object->flags |= OBJECT_NO_SCAN;
675}
676
677static void paint_it(struct kmemleak_object *object, int color)
678{
679	unsigned long flags;
680
681	spin_lock_irqsave(&object->lock, flags);
682	__paint_it(object, color);
683	spin_unlock_irqrestore(&object->lock, flags);
684}
685
686static void paint_ptr(unsigned long ptr, int color)
687{
688	struct kmemleak_object *object;
689
690	object = find_and_get_object(ptr, 0);
691	if (!object) {
692		kmemleak_warn("Trying to color unknown object "
693			      "at 0x%08lx as %s\n", ptr,
694			      (color == KMEMLEAK_GREY) ? "Grey" :
695			      (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
696		return;
697	}
698	paint_it(object, color);
699	put_object(object);
700}
701
702/*
703 * Make a object permanently as gray-colored so that it can no longer be
704 * reported as a leak. This is used in general to mark a false positive.
705 */
706static void make_gray_object(unsigned long ptr)
707{
708	paint_ptr(ptr, KMEMLEAK_GREY);
709}
710
711/*
712 * Mark the object as black-colored so that it is ignored from scans and
713 * reporting.
714 */
715static void make_black_object(unsigned long ptr)
716{
717	paint_ptr(ptr, KMEMLEAK_BLACK);
718}
719
720/*
721 * Add a scanning area to the object. If at least one such area is added,
722 * kmemleak will only scan these ranges rather than the whole memory block.
723 */
724static void add_scan_area(unsigned long ptr, unsigned long offset,
725			  size_t length, gfp_t gfp)
726{
727	unsigned long flags;
728	struct kmemleak_object *object;
729	struct kmemleak_scan_area *area;
730
731	object = find_and_get_object(ptr, 0);
732	if (!object) {
733		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
734			      ptr);
735		return;
736	}
737
738	area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK);
739	if (!area) {
740		kmemleak_warn("Cannot allocate a scan area\n");
741		goto out;
742	}
743
744	spin_lock_irqsave(&object->lock, flags);
745	if (offset + length > object->size) {
746		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
747		dump_object_info(object);
748		kmem_cache_free(scan_area_cache, area);
749		goto out_unlock;
750	}
751
752	INIT_HLIST_NODE(&area->node);
753	area->offset = offset;
754	area->length = length;
755
756	hlist_add_head(&area->node, &object->area_list);
757out_unlock:
758	spin_unlock_irqrestore(&object->lock, flags);
759out:
760	put_object(object);
761}
762
763/*
764 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
765 * pointer. Such object will not be scanned by kmemleak but references to it
766 * are searched.
767 */
768static void object_no_scan(unsigned long ptr)
769{
770	unsigned long flags;
771	struct kmemleak_object *object;
772
773	object = find_and_get_object(ptr, 0);
774	if (!object) {
775		kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
776		return;
777	}
778
779	spin_lock_irqsave(&object->lock, flags);
780	object->flags |= OBJECT_NO_SCAN;
781	spin_unlock_irqrestore(&object->lock, flags);
782	put_object(object);
783}
784
785/*
786 * Log an early kmemleak_* call to the early_log buffer. These calls will be
787 * processed later once kmemleak is fully initialized.
788 */
789static void __init log_early(int op_type, const void *ptr, size_t size,
790			     int min_count, unsigned long offset, size_t length)
791{
792	unsigned long flags;
793	struct early_log *log;
794
795	if (crt_early_log >= ARRAY_SIZE(early_log)) {
796		pr_warning("Early log buffer exceeded\n");
797		kmemleak_disable();
798		return;
799	}
800
801	/*
802	 * There is no need for locking since the kernel is still in UP mode
803	 * at this stage. Disabling the IRQs is enough.
804	 */
805	local_irq_save(flags);
806	log = &early_log[crt_early_log];
807	log->op_type = op_type;
808	log->ptr = ptr;
809	log->size = size;
810	log->min_count = min_count;
811	log->offset = offset;
812	log->length = length;
813	if (op_type == KMEMLEAK_ALLOC)
814		log->trace_len = __save_stack_trace(log->trace);
815	crt_early_log++;
816	local_irq_restore(flags);
817}
818
819/*
820 * Log an early allocated block and populate the stack trace.
821 */
822static void early_alloc(struct early_log *log)
823{
824	struct kmemleak_object *object;
825	unsigned long flags;
826	int i;
827
828	if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr))
829		return;
830
831	/*
832	 * RCU locking needed to ensure object is not freed via put_object().
833	 */
834	rcu_read_lock();
835	object = create_object((unsigned long)log->ptr, log->size,
836			       log->min_count, GFP_KERNEL);
837	spin_lock_irqsave(&object->lock, flags);
838	for (i = 0; i < log->trace_len; i++)
839		object->trace[i] = log->trace[i];
840	object->trace_len = log->trace_len;
841	spin_unlock_irqrestore(&object->lock, flags);
842	rcu_read_unlock();
843}
844
845/*
846 * Memory allocation function callback. This function is called from the
847 * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc,
848 * vmalloc etc.).
849 */
850void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
851			  gfp_t gfp)
852{
853	pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
854
855	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
856		create_object((unsigned long)ptr, size, min_count, gfp);
857	else if (atomic_read(&kmemleak_early_log))
858		log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0);
859}
860EXPORT_SYMBOL_GPL(kmemleak_alloc);
861
862/*
863 * Memory freeing function callback. This function is called from the kernel
864 * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.).
865 */
866void __ref kmemleak_free(const void *ptr)
867{
868	pr_debug("%s(0x%p)\n", __func__, ptr);
869
870	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
871		delete_object_full((unsigned long)ptr);
872	else if (atomic_read(&kmemleak_early_log))
873		log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0);
874}
875EXPORT_SYMBOL_GPL(kmemleak_free);
876
877/*
878 * Partial memory freeing function callback. This function is usually called
879 * from bootmem allocator when (part of) a memory block is freed.
880 */
881void __ref kmemleak_free_part(const void *ptr, size_t size)
882{
883	pr_debug("%s(0x%p)\n", __func__, ptr);
884
885	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
886		delete_object_part((unsigned long)ptr, size);
887	else if (atomic_read(&kmemleak_early_log))
888		log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0);
889}
890EXPORT_SYMBOL_GPL(kmemleak_free_part);
891
892/*
893 * Mark an already allocated memory block as a false positive. This will cause
894 * the block to no longer be reported as leak and always be scanned.
895 */
896void __ref kmemleak_not_leak(const void *ptr)
897{
898	pr_debug("%s(0x%p)\n", __func__, ptr);
899
900	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
901		make_gray_object((unsigned long)ptr);
902	else if (atomic_read(&kmemleak_early_log))
903		log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0);
904}
905EXPORT_SYMBOL(kmemleak_not_leak);
906
907/*
908 * Ignore a memory block. This is usually done when it is known that the
909 * corresponding block is not a leak and does not contain any references to
910 * other allocated memory blocks.
911 */
912void __ref kmemleak_ignore(const void *ptr)
913{
914	pr_debug("%s(0x%p)\n", __func__, ptr);
915
916	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
917		make_black_object((unsigned long)ptr);
918	else if (atomic_read(&kmemleak_early_log))
919		log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0);
920}
921EXPORT_SYMBOL(kmemleak_ignore);
922
923/*
924 * Limit the range to be scanned in an allocated memory block.
925 */
926void __ref kmemleak_scan_area(const void *ptr, unsigned long offset,
927			      size_t length, gfp_t gfp)
928{
929	pr_debug("%s(0x%p)\n", __func__, ptr);
930
931	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
932		add_scan_area((unsigned long)ptr, offset, length, gfp);
933	else if (atomic_read(&kmemleak_early_log))
934		log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length);
935}
936EXPORT_SYMBOL(kmemleak_scan_area);
937
938/*
939 * Inform kmemleak not to scan the given memory block.
940 */
941void __ref kmemleak_no_scan(const void *ptr)
942{
943	pr_debug("%s(0x%p)\n", __func__, ptr);
944
945	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
946		object_no_scan((unsigned long)ptr);
947	else if (atomic_read(&kmemleak_early_log))
948		log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0);
949}
950EXPORT_SYMBOL(kmemleak_no_scan);
951
952/*
953 * Memory scanning is a long process and it needs to be interruptable. This
954 * function checks whether such interrupt condition occured.
955 */
956static int scan_should_stop(void)
957{
958	if (!atomic_read(&kmemleak_enabled))
959		return 1;
960
961	/*
962	 * This function may be called from either process or kthread context,
963	 * hence the need to check for both stop conditions.
964	 */
965	if (current->mm)
966		return signal_pending(current);
967	else
968		return kthread_should_stop();
969
970	return 0;
971}
972
973/*
974 * Scan a memory block (exclusive range) for valid pointers and add those
975 * found to the gray list.
976 */
977static void scan_block(void *_start, void *_end,
978		       struct kmemleak_object *scanned, int allow_resched)
979{
980	unsigned long *ptr;
981	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
982	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
983
984	for (ptr = start; ptr < end; ptr++) {
985		struct kmemleak_object *object;
986		unsigned long flags;
987		unsigned long pointer;
988
989		if (allow_resched)
990			cond_resched();
991		if (scan_should_stop())
992			break;
993
994		/* don't scan uninitialized memory */
995		if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
996						  BYTES_PER_POINTER))
997			continue;
998
999		pointer = *ptr;
1000
1001		object = find_and_get_object(pointer, 1);
1002		if (!object)
1003			continue;
1004		if (object == scanned) {
1005			/* self referenced, ignore */
1006			put_object(object);
1007			continue;
1008		}
1009
1010		/*
1011		 * Avoid the lockdep recursive warning on object->lock being
1012		 * previously acquired in scan_object(). These locks are
1013		 * enclosed by scan_mutex.
1014		 */
1015		spin_lock_irqsave_nested(&object->lock, flags,
1016					 SINGLE_DEPTH_NESTING);
1017		if (!color_white(object)) {
1018			/* non-orphan, ignored or new */
1019			spin_unlock_irqrestore(&object->lock, flags);
1020			put_object(object);
1021			continue;
1022		}
1023
1024		/*
1025		 * Increase the object's reference count (number of pointers
1026		 * to the memory block). If this count reaches the required
1027		 * minimum, the object's color will become gray and it will be
1028		 * added to the gray_list.
1029		 */
1030		object->count++;
1031		if (color_gray(object))
1032			list_add_tail(&object->gray_list, &gray_list);
1033		else
1034			put_object(object);
1035		spin_unlock_irqrestore(&object->lock, flags);
1036	}
1037}
1038
1039/*
1040 * Scan a memory block corresponding to a kmemleak_object. A condition is
1041 * that object->use_count >= 1.
1042 */
1043static void scan_object(struct kmemleak_object *object)
1044{
1045	struct kmemleak_scan_area *area;
1046	struct hlist_node *elem;
1047	unsigned long flags;
1048
1049	/*
1050	 * Once the object->lock is aquired, the corresponding memory block
1051	 * cannot be freed (the same lock is aquired in delete_object).
1052	 */
1053	spin_lock_irqsave(&object->lock, flags);
1054	if (object->flags & OBJECT_NO_SCAN)
1055		goto out;
1056	if (!(object->flags & OBJECT_ALLOCATED))
1057		/* already freed object */
1058		goto out;
1059	if (hlist_empty(&object->area_list)) {
1060		void *start = (void *)object->pointer;
1061		void *end = (void *)(object->pointer + object->size);
1062
1063		while (start < end && (object->flags & OBJECT_ALLOCATED) &&
1064		       !(object->flags & OBJECT_NO_SCAN)) {
1065			scan_block(start, min(start + MAX_SCAN_SIZE, end),
1066				   object, 0);
1067			start += MAX_SCAN_SIZE;
1068
1069			spin_unlock_irqrestore(&object->lock, flags);
1070			cond_resched();
1071			spin_lock_irqsave(&object->lock, flags);
1072		}
1073	} else
1074		hlist_for_each_entry(area, elem, &object->area_list, node)
1075			scan_block((void *)(object->pointer + area->offset),
1076				   (void *)(object->pointer + area->offset
1077					    + area->length), object, 0);
1078out:
1079	spin_unlock_irqrestore(&object->lock, flags);
1080}
1081
1082/*
1083 * Scan data sections and all the referenced memory blocks allocated via the
1084 * kernel's standard allocators. This function must be called with the
1085 * scan_mutex held.
1086 */
1087static void kmemleak_scan(void)
1088{
1089	unsigned long flags;
1090	struct kmemleak_object *object, *tmp;
1091	int i;
1092	int new_leaks = 0;
1093	int gray_list_pass = 0;
1094
1095	jiffies_last_scan = jiffies;
1096
1097	/* prepare the kmemleak_object's */
1098	rcu_read_lock();
1099	list_for_each_entry_rcu(object, &object_list, object_list) {
1100		spin_lock_irqsave(&object->lock, flags);
1101#ifdef DEBUG
1102		/*
1103		 * With a few exceptions there should be a maximum of
1104		 * 1 reference to any object at this point.
1105		 */
1106		if (atomic_read(&object->use_count) > 1) {
1107			pr_debug("object->use_count = %d\n",
1108				 atomic_read(&object->use_count));
1109			dump_object_info(object);
1110		}
1111#endif
1112		/* reset the reference count (whiten the object) */
1113		object->count = 0;
1114		object->flags &= ~OBJECT_NEW;
1115		if (color_gray(object) && get_object(object))
1116			list_add_tail(&object->gray_list, &gray_list);
1117
1118		spin_unlock_irqrestore(&object->lock, flags);
1119	}
1120	rcu_read_unlock();
1121
1122	/* data/bss scanning */
1123	scan_block(_sdata, _edata, NULL, 1);
1124	scan_block(__bss_start, __bss_stop, NULL, 1);
1125
1126#ifdef CONFIG_SMP
1127	/* per-cpu sections scanning */
1128	for_each_possible_cpu(i)
1129		scan_block(__per_cpu_start + per_cpu_offset(i),
1130			   __per_cpu_end + per_cpu_offset(i), NULL, 1);
1131#endif
1132
1133	/*
1134	 * Struct page scanning for each node. The code below is not yet safe
1135	 * with MEMORY_HOTPLUG.
1136	 */
1137	for_each_online_node(i) {
1138		pg_data_t *pgdat = NODE_DATA(i);
1139		unsigned long start_pfn = pgdat->node_start_pfn;
1140		unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1141		unsigned long pfn;
1142
1143		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1144			struct page *page;
1145
1146			if (!pfn_valid(pfn))
1147				continue;
1148			page = pfn_to_page(pfn);
1149			/* only scan if page is in use */
1150			if (page_count(page) == 0)
1151				continue;
1152			scan_block(page, page + 1, NULL, 1);
1153		}
1154	}
1155
1156	/*
1157	 * Scanning the task stacks (may introduce false negatives).
1158	 */
1159	if (kmemleak_stack_scan) {
1160		struct task_struct *p, *g;
1161
1162		read_lock(&tasklist_lock);
1163		do_each_thread(g, p) {
1164			scan_block(task_stack_page(p), task_stack_page(p) +
1165				   THREAD_SIZE, NULL, 0);
1166		} while_each_thread(g, p);
1167		read_unlock(&tasklist_lock);
1168	}
1169
1170	/*
1171	 * Scan the objects already referenced from the sections scanned
1172	 * above. More objects will be referenced and, if there are no memory
1173	 * leaks, all the objects will be scanned. The list traversal is safe
1174	 * for both tail additions and removals from inside the loop. The
1175	 * kmemleak objects cannot be freed from outside the loop because their
1176	 * use_count was increased.
1177	 */
1178repeat:
1179	object = list_entry(gray_list.next, typeof(*object), gray_list);
1180	while (&object->gray_list != &gray_list) {
1181		cond_resched();
1182
1183		/* may add new objects to the list */
1184		if (!scan_should_stop())
1185			scan_object(object);
1186
1187		tmp = list_entry(object->gray_list.next, typeof(*object),
1188				 gray_list);
1189
1190		/* remove the object from the list and release it */
1191		list_del(&object->gray_list);
1192		put_object(object);
1193
1194		object = tmp;
1195	}
1196
1197	if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES)
1198		goto scan_end;
1199
1200	/*
1201	 * Check for new objects allocated during this scanning and add them
1202	 * to the gray list.
1203	 */
1204	rcu_read_lock();
1205	list_for_each_entry_rcu(object, &object_list, object_list) {
1206		spin_lock_irqsave(&object->lock, flags);
1207		if ((object->flags & OBJECT_NEW) && !color_black(object) &&
1208		    get_object(object)) {
1209			object->flags &= ~OBJECT_NEW;
1210			list_add_tail(&object->gray_list, &gray_list);
1211		}
1212		spin_unlock_irqrestore(&object->lock, flags);
1213	}
1214	rcu_read_unlock();
1215
1216	if (!list_empty(&gray_list))
1217		goto repeat;
1218
1219scan_end:
1220	WARN_ON(!list_empty(&gray_list));
1221
1222	/*
1223	 * If scanning was stopped or new objects were being allocated at a
1224	 * higher rate than gray list scanning, do not report any new
1225	 * unreferenced objects.
1226	 */
1227	if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES)
1228		return;
1229
1230	/*
1231	 * Scanning result reporting.
1232	 */
1233	rcu_read_lock();
1234	list_for_each_entry_rcu(object, &object_list, object_list) {
1235		spin_lock_irqsave(&object->lock, flags);
1236		if (unreferenced_object(object) &&
1237		    !(object->flags & OBJECT_REPORTED)) {
1238			object->flags |= OBJECT_REPORTED;
1239			new_leaks++;
1240		}
1241		spin_unlock_irqrestore(&object->lock, flags);
1242	}
1243	rcu_read_unlock();
1244
1245	if (new_leaks)
1246		pr_info("%d new suspected memory leaks (see "
1247			"/sys/kernel/debug/kmemleak)\n", new_leaks);
1248
1249}
1250
1251/*
1252 * Thread function performing automatic memory scanning. Unreferenced objects
1253 * at the end of a memory scan are reported but only the first time.
1254 */
1255static int kmemleak_scan_thread(void *arg)
1256{
1257	static int first_run = 1;
1258
1259	pr_info("Automatic memory scanning thread started\n");
1260	set_user_nice(current, 10);
1261
1262	/*
1263	 * Wait before the first scan to allow the system to fully initialize.
1264	 */
1265	if (first_run) {
1266		first_run = 0;
1267		ssleep(SECS_FIRST_SCAN);
1268	}
1269
1270	while (!kthread_should_stop()) {
1271		signed long timeout = jiffies_scan_wait;
1272
1273		mutex_lock(&scan_mutex);
1274		kmemleak_scan();
1275		mutex_unlock(&scan_mutex);
1276
1277		/* wait before the next scan */
1278		while (timeout && !kthread_should_stop())
1279			timeout = schedule_timeout_interruptible(timeout);
1280	}
1281
1282	pr_info("Automatic memory scanning thread ended\n");
1283
1284	return 0;
1285}
1286
1287/*
1288 * Start the automatic memory scanning thread. This function must be called
1289 * with the scan_mutex held.
1290 */
1291void start_scan_thread(void)
1292{
1293	if (scan_thread)
1294		return;
1295	scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1296	if (IS_ERR(scan_thread)) {
1297		pr_warning("Failed to create the scan thread\n");
1298		scan_thread = NULL;
1299	}
1300}
1301
1302/*
1303 * Stop the automatic memory scanning thread. This function must be called
1304 * with the scan_mutex held.
1305 */
1306void stop_scan_thread(void)
1307{
1308	if (scan_thread) {
1309		kthread_stop(scan_thread);
1310		scan_thread = NULL;
1311	}
1312}
1313
1314/*
1315 * Iterate over the object_list and return the first valid object at or after
1316 * the required position with its use_count incremented. The function triggers
1317 * a memory scanning when the pos argument points to the first position.
1318 */
1319static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1320{
1321	struct kmemleak_object *object;
1322	loff_t n = *pos;
1323	int err;
1324
1325	err = mutex_lock_interruptible(&scan_mutex);
1326	if (err < 0)
1327		return ERR_PTR(err);
1328
1329	rcu_read_lock();
1330	list_for_each_entry_rcu(object, &object_list, object_list) {
1331		if (n-- > 0)
1332			continue;
1333		if (get_object(object))
1334			goto out;
1335	}
1336	object = NULL;
1337out:
1338	return object;
1339}
1340
1341/*
1342 * Return the next object in the object_list. The function decrements the
1343 * use_count of the previous object and increases that of the next one.
1344 */
1345static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1346{
1347	struct kmemleak_object *prev_obj = v;
1348	struct kmemleak_object *next_obj = NULL;
1349	struct list_head *n = &prev_obj->object_list;
1350
1351	++(*pos);
1352
1353	list_for_each_continue_rcu(n, &object_list) {
1354		next_obj = list_entry(n, struct kmemleak_object, object_list);
1355		if (get_object(next_obj))
1356			break;
1357	}
1358
1359	put_object(prev_obj);
1360	return next_obj;
1361}
1362
1363/*
1364 * Decrement the use_count of the last object required, if any.
1365 */
1366static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1367{
1368	if (!IS_ERR(v)) {
1369		/*
1370		 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1371		 * waiting was interrupted, so only release it if !IS_ERR.
1372		 */
1373		rcu_read_unlock();
1374		mutex_unlock(&scan_mutex);
1375		if (v)
1376			put_object(v);
1377	}
1378}
1379
1380/*
1381 * Print the information for an unreferenced object to the seq file.
1382 */
1383static int kmemleak_seq_show(struct seq_file *seq, void *v)
1384{
1385	struct kmemleak_object *object = v;
1386	unsigned long flags;
1387
1388	spin_lock_irqsave(&object->lock, flags);
1389	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1390		print_unreferenced(seq, object);
1391	spin_unlock_irqrestore(&object->lock, flags);
1392	return 0;
1393}
1394
1395static const struct seq_operations kmemleak_seq_ops = {
1396	.start = kmemleak_seq_start,
1397	.next  = kmemleak_seq_next,
1398	.stop  = kmemleak_seq_stop,
1399	.show  = kmemleak_seq_show,
1400};
1401
1402static int kmemleak_open(struct inode *inode, struct file *file)
1403{
1404	if (!atomic_read(&kmemleak_enabled))
1405		return -EBUSY;
1406
1407	return seq_open(file, &kmemleak_seq_ops);
1408}
1409
1410static int kmemleak_release(struct inode *inode, struct file *file)
1411{
1412	return seq_release(inode, file);
1413}
1414
1415static int dump_str_object_info(const char *str)
1416{
1417	unsigned long flags;
1418	struct kmemleak_object *object;
1419	unsigned long addr;
1420
1421	addr= simple_strtoul(str, NULL, 0);
1422	object = find_and_get_object(addr, 0);
1423	if (!object) {
1424		pr_info("Unknown object at 0x%08lx\n", addr);
1425		return -EINVAL;
1426	}
1427
1428	spin_lock_irqsave(&object->lock, flags);
1429	dump_object_info(object);
1430	spin_unlock_irqrestore(&object->lock, flags);
1431
1432	put_object(object);
1433	return 0;
1434}
1435
1436/*
1437 * We use grey instead of black to ensure we can do future scans on the same
1438 * objects. If we did not do future scans these black objects could
1439 * potentially contain references to newly allocated objects in the future and
1440 * we'd end up with false positives.
1441 */
1442static void kmemleak_clear(void)
1443{
1444	struct kmemleak_object *object;
1445	unsigned long flags;
1446
1447	rcu_read_lock();
1448	list_for_each_entry_rcu(object, &object_list, object_list) {
1449		spin_lock_irqsave(&object->lock, flags);
1450		if ((object->flags & OBJECT_REPORTED) &&
1451		    unreferenced_object(object))
1452			__paint_it(object, KMEMLEAK_GREY);
1453		spin_unlock_irqrestore(&object->lock, flags);
1454	}
1455	rcu_read_unlock();
1456}
1457
1458/*
1459 * File write operation to configure kmemleak at run-time. The following
1460 * commands can be written to the /sys/kernel/debug/kmemleak file:
1461 *   off	- disable kmemleak (irreversible)
1462 *   stack=on	- enable the task stacks scanning
1463 *   stack=off	- disable the tasks stacks scanning
1464 *   scan=on	- start the automatic memory scanning thread
1465 *   scan=off	- stop the automatic memory scanning thread
1466 *   scan=...	- set the automatic memory scanning period in seconds (0 to
1467 *		  disable it)
1468 *   scan	- trigger a memory scan
1469 *   clear	- mark all current reported unreferenced kmemleak objects as
1470 *		  grey to ignore printing them
1471 *   dump=...	- dump information about the object found at the given address
1472 */
1473static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1474			      size_t size, loff_t *ppos)
1475{
1476	char buf[64];
1477	int buf_size;
1478	int ret;
1479
1480	buf_size = min(size, (sizeof(buf) - 1));
1481	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1482		return -EFAULT;
1483	buf[buf_size] = 0;
1484
1485	ret = mutex_lock_interruptible(&scan_mutex);
1486	if (ret < 0)
1487		return ret;
1488
1489	if (strncmp(buf, "off", 3) == 0)
1490		kmemleak_disable();
1491	else if (strncmp(buf, "stack=on", 8) == 0)
1492		kmemleak_stack_scan = 1;
1493	else if (strncmp(buf, "stack=off", 9) == 0)
1494		kmemleak_stack_scan = 0;
1495	else if (strncmp(buf, "scan=on", 7) == 0)
1496		start_scan_thread();
1497	else if (strncmp(buf, "scan=off", 8) == 0)
1498		stop_scan_thread();
1499	else if (strncmp(buf, "scan=", 5) == 0) {
1500		unsigned long secs;
1501
1502		ret = strict_strtoul(buf + 5, 0, &secs);
1503		if (ret < 0)
1504			goto out;
1505		stop_scan_thread();
1506		if (secs) {
1507			jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1508			start_scan_thread();
1509		}
1510	} else if (strncmp(buf, "scan", 4) == 0)
1511		kmemleak_scan();
1512	else if (strncmp(buf, "clear", 5) == 0)
1513		kmemleak_clear();
1514	else if (strncmp(buf, "dump=", 5) == 0)
1515		ret = dump_str_object_info(buf + 5);
1516	else
1517		ret = -EINVAL;
1518
1519out:
1520	mutex_unlock(&scan_mutex);
1521	if (ret < 0)
1522		return ret;
1523
1524	/* ignore the rest of the buffer, only one command at a time */
1525	*ppos += size;
1526	return size;
1527}
1528
1529static const struct file_operations kmemleak_fops = {
1530	.owner		= THIS_MODULE,
1531	.open		= kmemleak_open,
1532	.read		= seq_read,
1533	.write		= kmemleak_write,
1534	.llseek		= seq_lseek,
1535	.release	= kmemleak_release,
1536};
1537
1538/*
1539 * Perform the freeing of the kmemleak internal objects after waiting for any
1540 * current memory scan to complete.
1541 */
1542static void kmemleak_do_cleanup(struct work_struct *work)
1543{
1544	struct kmemleak_object *object;
1545
1546	mutex_lock(&scan_mutex);
1547	stop_scan_thread();
1548
1549	rcu_read_lock();
1550	list_for_each_entry_rcu(object, &object_list, object_list)
1551		delete_object_full(object->pointer);
1552	rcu_read_unlock();
1553	mutex_unlock(&scan_mutex);
1554}
1555
1556static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1557
1558/*
1559 * Disable kmemleak. No memory allocation/freeing will be traced once this
1560 * function is called. Disabling kmemleak is an irreversible operation.
1561 */
1562static void kmemleak_disable(void)
1563{
1564	/* atomically check whether it was already invoked */
1565	if (atomic_cmpxchg(&kmemleak_error, 0, 1))
1566		return;
1567
1568	/* stop any memory operation tracing */
1569	atomic_set(&kmemleak_early_log, 0);
1570	atomic_set(&kmemleak_enabled, 0);
1571
1572	/* check whether it is too early for a kernel thread */
1573	if (atomic_read(&kmemleak_initialized))
1574		schedule_work(&cleanup_work);
1575
1576	pr_info("Kernel memory leak detector disabled\n");
1577}
1578
1579/*
1580 * Allow boot-time kmemleak disabling (enabled by default).
1581 */
1582static int kmemleak_boot_config(char *str)
1583{
1584	if (!str)
1585		return -EINVAL;
1586	if (strcmp(str, "off") == 0)
1587		kmemleak_disable();
1588	else if (strcmp(str, "on") != 0)
1589		return -EINVAL;
1590	return 0;
1591}
1592early_param("kmemleak", kmemleak_boot_config);
1593
1594/*
1595 * Kmemleak initialization.
1596 */
1597void __init kmemleak_init(void)
1598{
1599	int i;
1600	unsigned long flags;
1601
1602	jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1603	jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1604
1605	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1606	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1607	INIT_PRIO_TREE_ROOT(&object_tree_root);
1608
1609	/* the kernel is still in UP mode, so disabling the IRQs is enough */
1610	local_irq_save(flags);
1611	if (!atomic_read(&kmemleak_error)) {
1612		atomic_set(&kmemleak_enabled, 1);
1613		atomic_set(&kmemleak_early_log, 0);
1614	}
1615	local_irq_restore(flags);
1616
1617	/*
1618	 * This is the point where tracking allocations is safe. Automatic
1619	 * scanning is started during the late initcall. Add the early logged
1620	 * callbacks to the kmemleak infrastructure.
1621	 */
1622	for (i = 0; i < crt_early_log; i++) {
1623		struct early_log *log = &early_log[i];
1624
1625		switch (log->op_type) {
1626		case KMEMLEAK_ALLOC:
1627			early_alloc(log);
1628			break;
1629		case KMEMLEAK_FREE:
1630			kmemleak_free(log->ptr);
1631			break;
1632		case KMEMLEAK_FREE_PART:
1633			kmemleak_free_part(log->ptr, log->size);
1634			break;
1635		case KMEMLEAK_NOT_LEAK:
1636			kmemleak_not_leak(log->ptr);
1637			break;
1638		case KMEMLEAK_IGNORE:
1639			kmemleak_ignore(log->ptr);
1640			break;
1641		case KMEMLEAK_SCAN_AREA:
1642			kmemleak_scan_area(log->ptr, log->offset, log->length,
1643					   GFP_KERNEL);
1644			break;
1645		case KMEMLEAK_NO_SCAN:
1646			kmemleak_no_scan(log->ptr);
1647			break;
1648		default:
1649			WARN_ON(1);
1650		}
1651	}
1652}
1653
1654/*
1655 * Late initialization function.
1656 */
1657static int __init kmemleak_late_init(void)
1658{
1659	struct dentry *dentry;
1660
1661	atomic_set(&kmemleak_initialized, 1);
1662
1663	if (atomic_read(&kmemleak_error)) {
1664		/*
1665		 * Some error occured and kmemleak was disabled. There is a
1666		 * small chance that kmemleak_disable() was called immediately
1667		 * after setting kmemleak_initialized and we may end up with
1668		 * two clean-up threads but serialized by scan_mutex.
1669		 */
1670		schedule_work(&cleanup_work);
1671		return -ENOMEM;
1672	}
1673
1674	dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1675				     &kmemleak_fops);
1676	if (!dentry)
1677		pr_warning("Failed to create the debugfs kmemleak file\n");
1678	mutex_lock(&scan_mutex);
1679	start_scan_thread();
1680	mutex_unlock(&scan_mutex);
1681
1682	pr_info("Kernel memory leak detector initialized\n");
1683
1684	return 0;
1685}
1686late_initcall(kmemleak_late_init);
1687