kmemleak.c revision 4a558dd6f93d419cd318958577e25492bd09e960
1/*
2 * mm/kmemleak.c
3 *
4 * Copyright (C) 2008 ARM Limited
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 *
21 * For more information on the algorithm and kmemleak usage, please see
22 * Documentation/kmemleak.txt.
23 *
24 * Notes on locking
25 * ----------------
26 *
27 * The following locks and mutexes are used by kmemleak:
28 *
29 * - kmemleak_lock (rwlock): protects the object_list modifications and
30 *   accesses to the object_tree_root. The object_list is the main list
31 *   holding the metadata (struct kmemleak_object) for the allocated memory
32 *   blocks. The object_tree_root is a priority search tree used to look-up
33 *   metadata based on a pointer to the corresponding memory block.  The
34 *   kmemleak_object structures are added to the object_list and
35 *   object_tree_root in the create_object() function called from the
36 *   kmemleak_alloc() callback and removed in delete_object() called from the
37 *   kmemleak_free() callback
38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39 *   the metadata (e.g. count) are protected by this lock. Note that some
40 *   members of this structure may be protected by other means (atomic or
41 *   kmemleak_lock). This lock is also held when scanning the corresponding
42 *   memory block to avoid the kernel freeing it via the kmemleak_free()
43 *   callback. This is less heavyweight than holding a global lock like
44 *   kmemleak_lock during scanning
45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 *   unreferenced objects at a time. The gray_list contains the objects which
47 *   are already referenced or marked as false positives and need to be
48 *   scanned. This list is only modified during a scanning episode when the
49 *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
50 *   Note that the kmemleak_object.use_count is incremented when an object is
51 *   added to the gray_list and therefore cannot be freed. This mutex also
52 *   prevents multiple users of the "kmemleak" debugfs file together with
53 *   modifications to the memory scanning parameters including the scan_thread
54 *   pointer
55 *
56 * The kmemleak_object structures have a use_count incremented or decremented
57 * using the get_object()/put_object() functions. When the use_count becomes
58 * 0, this count can no longer be incremented and put_object() schedules the
59 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
60 * function must be protected by rcu_read_lock() to avoid accessing a freed
61 * structure.
62 */
63
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
66#include <linux/init.h>
67#include <linux/kernel.h>
68#include <linux/list.h>
69#include <linux/sched.h>
70#include <linux/jiffies.h>
71#include <linux/delay.h>
72#include <linux/module.h>
73#include <linux/kthread.h>
74#include <linux/prio_tree.h>
75#include <linux/gfp.h>
76#include <linux/fs.h>
77#include <linux/debugfs.h>
78#include <linux/seq_file.h>
79#include <linux/cpumask.h>
80#include <linux/spinlock.h>
81#include <linux/mutex.h>
82#include <linux/rcupdate.h>
83#include <linux/stacktrace.h>
84#include <linux/cache.h>
85#include <linux/percpu.h>
86#include <linux/hardirq.h>
87#include <linux/mmzone.h>
88#include <linux/slab.h>
89#include <linux/thread_info.h>
90#include <linux/err.h>
91#include <linux/uaccess.h>
92#include <linux/string.h>
93#include <linux/nodemask.h>
94#include <linux/mm.h>
95#include <linux/workqueue.h>
96
97#include <asm/sections.h>
98#include <asm/processor.h>
99#include <asm/atomic.h>
100
101#include <linux/kmemcheck.h>
102#include <linux/kmemleak.h>
103
104/*
105 * Kmemleak configuration and common defines.
106 */
107#define MAX_TRACE		16	/* stack trace length */
108#define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
109#define SECS_FIRST_SCAN		60	/* delay before the first scan */
110#define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
111#define GRAY_LIST_PASSES	25	/* maximum number of gray list scans */
112#define MAX_SCAN_SIZE		4096	/* maximum size of a scanned block */
113
114#define BYTES_PER_POINTER	sizeof(void *)
115
116/* GFP bitmask for kmemleak internal allocations */
117#define GFP_KMEMLEAK_MASK	(GFP_KERNEL | GFP_ATOMIC)
118
119/* scanning area inside a memory block */
120struct kmemleak_scan_area {
121	struct hlist_node node;
122	unsigned long offset;
123	size_t length;
124};
125
126/*
127 * Structure holding the metadata for each allocated memory block.
128 * Modifications to such objects should be made while holding the
129 * object->lock. Insertions or deletions from object_list, gray_list or
130 * tree_node are already protected by the corresponding locks or mutex (see
131 * the notes on locking above). These objects are reference-counted
132 * (use_count) and freed using the RCU mechanism.
133 */
134struct kmemleak_object {
135	spinlock_t lock;
136	unsigned long flags;		/* object status flags */
137	struct list_head object_list;
138	struct list_head gray_list;
139	struct prio_tree_node tree_node;
140	struct rcu_head rcu;		/* object_list lockless traversal */
141	/* object usage count; object freed when use_count == 0 */
142	atomic_t use_count;
143	unsigned long pointer;
144	size_t size;
145	/* minimum number of a pointers found before it is considered leak */
146	int min_count;
147	/* the total number of pointers found pointing to this object */
148	int count;
149	/* memory ranges to be scanned inside an object (empty for all) */
150	struct hlist_head area_list;
151	unsigned long trace[MAX_TRACE];
152	unsigned int trace_len;
153	unsigned long jiffies;		/* creation timestamp */
154	pid_t pid;			/* pid of the current task */
155	char comm[TASK_COMM_LEN];	/* executable name */
156};
157
158/* flag representing the memory block allocation status */
159#define OBJECT_ALLOCATED	(1 << 0)
160/* flag set after the first reporting of an unreference object */
161#define OBJECT_REPORTED		(1 << 1)
162/* flag set to not scan the object */
163#define OBJECT_NO_SCAN		(1 << 2)
164/* flag set on newly allocated objects */
165#define OBJECT_NEW		(1 << 3)
166
167/* number of bytes to print per line; must be 16 or 32 */
168#define HEX_ROW_SIZE		16
169/* number of bytes to print at a time (1, 2, 4, 8) */
170#define HEX_GROUP_SIZE		1
171/* include ASCII after the hex output */
172#define HEX_ASCII		1
173/* max number of lines to be printed */
174#define HEX_MAX_LINES		2
175
176/* the list of all allocated objects */
177static LIST_HEAD(object_list);
178/* the list of gray-colored objects (see color_gray comment below) */
179static LIST_HEAD(gray_list);
180/* prio search tree for object boundaries */
181static struct prio_tree_root object_tree_root;
182/* rw_lock protecting the access to object_list and prio_tree_root */
183static DEFINE_RWLOCK(kmemleak_lock);
184
185/* allocation caches for kmemleak internal data */
186static struct kmem_cache *object_cache;
187static struct kmem_cache *scan_area_cache;
188
189/* set if tracing memory operations is enabled */
190static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
191/* set in the late_initcall if there were no errors */
192static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
193/* enables or disables early logging of the memory operations */
194static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
195/* set if a fata kmemleak error has occurred */
196static atomic_t kmemleak_error = ATOMIC_INIT(0);
197
198/* minimum and maximum address that may be valid pointers */
199static unsigned long min_addr = ULONG_MAX;
200static unsigned long max_addr;
201
202static struct task_struct *scan_thread;
203/* used to avoid reporting of recently allocated objects */
204static unsigned long jiffies_min_age;
205static unsigned long jiffies_last_scan;
206/* delay between automatic memory scannings */
207static signed long jiffies_scan_wait;
208/* enables or disables the task stacks scanning */
209static int kmemleak_stack_scan = 1;
210/* protects the memory scanning, parameters and debug/kmemleak file access */
211static DEFINE_MUTEX(scan_mutex);
212
213/*
214 * Early object allocation/freeing logging. Kmemleak is initialized after the
215 * kernel allocator. However, both the kernel allocator and kmemleak may
216 * allocate memory blocks which need to be tracked. Kmemleak defines an
217 * arbitrary buffer to hold the allocation/freeing information before it is
218 * fully initialized.
219 */
220
221/* kmemleak operation type for early logging */
222enum {
223	KMEMLEAK_ALLOC,
224	KMEMLEAK_FREE,
225	KMEMLEAK_FREE_PART,
226	KMEMLEAK_NOT_LEAK,
227	KMEMLEAK_IGNORE,
228	KMEMLEAK_SCAN_AREA,
229	KMEMLEAK_NO_SCAN
230};
231
232/*
233 * Structure holding the information passed to kmemleak callbacks during the
234 * early logging.
235 */
236struct early_log {
237	int op_type;			/* kmemleak operation type */
238	const void *ptr;		/* allocated/freed memory block */
239	size_t size;			/* memory block size */
240	int min_count;			/* minimum reference count */
241	unsigned long offset;		/* scan area offset */
242	size_t length;			/* scan area length */
243	unsigned long trace[MAX_TRACE];	/* stack trace */
244	unsigned int trace_len;		/* stack trace length */
245};
246
247/* early logging buffer and current position */
248static struct early_log
249	early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
250static int crt_early_log __initdata;
251
252static void kmemleak_disable(void);
253
254/*
255 * Print a warning and dump the stack trace.
256 */
257#define kmemleak_warn(x...)	do {	\
258	pr_warning(x);			\
259	dump_stack();			\
260} while (0)
261
262/*
263 * Macro invoked when a serious kmemleak condition occured and cannot be
264 * recovered from. Kmemleak will be disabled and further allocation/freeing
265 * tracing no longer available.
266 */
267#define kmemleak_stop(x...)	do {	\
268	kmemleak_warn(x);		\
269	kmemleak_disable();		\
270} while (0)
271
272/*
273 * Printing of the objects hex dump to the seq file. The number of lines to be
274 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
275 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
276 * with the object->lock held.
277 */
278static void hex_dump_object(struct seq_file *seq,
279			    struct kmemleak_object *object)
280{
281	const u8 *ptr = (const u8 *)object->pointer;
282	int i, len, remaining;
283	unsigned char linebuf[HEX_ROW_SIZE * 5];
284
285	/* limit the number of lines to HEX_MAX_LINES */
286	remaining = len =
287		min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
288
289	seq_printf(seq, "  hex dump (first %d bytes):\n", len);
290	for (i = 0; i < len; i += HEX_ROW_SIZE) {
291		int linelen = min(remaining, HEX_ROW_SIZE);
292
293		remaining -= HEX_ROW_SIZE;
294		hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
295				   HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
296				   HEX_ASCII);
297		seq_printf(seq, "    %s\n", linebuf);
298	}
299}
300
301/*
302 * Object colors, encoded with count and min_count:
303 * - white - orphan object, not enough references to it (count < min_count)
304 * - gray  - not orphan, not marked as false positive (min_count == 0) or
305 *		sufficient references to it (count >= min_count)
306 * - black - ignore, it doesn't contain references (e.g. text section)
307 *		(min_count == -1). No function defined for this color.
308 * Newly created objects don't have any color assigned (object->count == -1)
309 * before the next memory scan when they become white.
310 */
311static bool color_white(const struct kmemleak_object *object)
312{
313	return object->count != -1 && object->count < object->min_count;
314}
315
316static bool color_gray(const struct kmemleak_object *object)
317{
318	return object->min_count != -1 && object->count >= object->min_count;
319}
320
321static bool color_black(const struct kmemleak_object *object)
322{
323	return object->min_count == -1;
324}
325
326/*
327 * Objects are considered unreferenced only if their color is white, they have
328 * not be deleted and have a minimum age to avoid false positives caused by
329 * pointers temporarily stored in CPU registers.
330 */
331static bool unreferenced_object(struct kmemleak_object *object)
332{
333	return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
334		time_before_eq(object->jiffies + jiffies_min_age,
335			       jiffies_last_scan);
336}
337
338/*
339 * Printing of the unreferenced objects information to the seq file. The
340 * print_unreferenced function must be called with the object->lock held.
341 */
342static void print_unreferenced(struct seq_file *seq,
343			       struct kmemleak_object *object)
344{
345	int i;
346
347	seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
348		   object->pointer, object->size);
349	seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu\n",
350		   object->comm, object->pid, object->jiffies);
351	hex_dump_object(seq, object);
352	seq_printf(seq, "  backtrace:\n");
353
354	for (i = 0; i < object->trace_len; i++) {
355		void *ptr = (void *)object->trace[i];
356		seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
357	}
358}
359
360/*
361 * Print the kmemleak_object information. This function is used mainly for
362 * debugging special cases when kmemleak operations. It must be called with
363 * the object->lock held.
364 */
365static void dump_object_info(struct kmemleak_object *object)
366{
367	struct stack_trace trace;
368
369	trace.nr_entries = object->trace_len;
370	trace.entries = object->trace;
371
372	pr_notice("Object 0x%08lx (size %zu):\n",
373		  object->tree_node.start, object->size);
374	pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
375		  object->comm, object->pid, object->jiffies);
376	pr_notice("  min_count = %d\n", object->min_count);
377	pr_notice("  count = %d\n", object->count);
378	pr_notice("  flags = 0x%lx\n", object->flags);
379	pr_notice("  backtrace:\n");
380	print_stack_trace(&trace, 4);
381}
382
383/*
384 * Look-up a memory block metadata (kmemleak_object) in the priority search
385 * tree based on a pointer value. If alias is 0, only values pointing to the
386 * beginning of the memory block are allowed. The kmemleak_lock must be held
387 * when calling this function.
388 */
389static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
390{
391	struct prio_tree_node *node;
392	struct prio_tree_iter iter;
393	struct kmemleak_object *object;
394
395	prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr);
396	node = prio_tree_next(&iter);
397	if (node) {
398		object = prio_tree_entry(node, struct kmemleak_object,
399					 tree_node);
400		if (!alias && object->pointer != ptr) {
401			kmemleak_warn("Found object by alias");
402			object = NULL;
403		}
404	} else
405		object = NULL;
406
407	return object;
408}
409
410/*
411 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
412 * that once an object's use_count reached 0, the RCU freeing was already
413 * registered and the object should no longer be used. This function must be
414 * called under the protection of rcu_read_lock().
415 */
416static int get_object(struct kmemleak_object *object)
417{
418	return atomic_inc_not_zero(&object->use_count);
419}
420
421/*
422 * RCU callback to free a kmemleak_object.
423 */
424static void free_object_rcu(struct rcu_head *rcu)
425{
426	struct hlist_node *elem, *tmp;
427	struct kmemleak_scan_area *area;
428	struct kmemleak_object *object =
429		container_of(rcu, struct kmemleak_object, rcu);
430
431	/*
432	 * Once use_count is 0 (guaranteed by put_object), there is no other
433	 * code accessing this object, hence no need for locking.
434	 */
435	hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) {
436		hlist_del(elem);
437		kmem_cache_free(scan_area_cache, area);
438	}
439	kmem_cache_free(object_cache, object);
440}
441
442/*
443 * Decrement the object use_count. Once the count is 0, free the object using
444 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
445 * delete_object() path, the delayed RCU freeing ensures that there is no
446 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
447 * is also possible.
448 */
449static void put_object(struct kmemleak_object *object)
450{
451	if (!atomic_dec_and_test(&object->use_count))
452		return;
453
454	/* should only get here after delete_object was called */
455	WARN_ON(object->flags & OBJECT_ALLOCATED);
456
457	call_rcu(&object->rcu, free_object_rcu);
458}
459
460/*
461 * Look up an object in the prio search tree and increase its use_count.
462 */
463static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
464{
465	unsigned long flags;
466	struct kmemleak_object *object = NULL;
467
468	rcu_read_lock();
469	read_lock_irqsave(&kmemleak_lock, flags);
470	if (ptr >= min_addr && ptr < max_addr)
471		object = lookup_object(ptr, alias);
472	read_unlock_irqrestore(&kmemleak_lock, flags);
473
474	/* check whether the object is still available */
475	if (object && !get_object(object))
476		object = NULL;
477	rcu_read_unlock();
478
479	return object;
480}
481
482/*
483 * Save stack trace to the given array of MAX_TRACE size.
484 */
485static int __save_stack_trace(unsigned long *trace)
486{
487	struct stack_trace stack_trace;
488
489	stack_trace.max_entries = MAX_TRACE;
490	stack_trace.nr_entries = 0;
491	stack_trace.entries = trace;
492	stack_trace.skip = 2;
493	save_stack_trace(&stack_trace);
494
495	return stack_trace.nr_entries;
496}
497
498/*
499 * Create the metadata (struct kmemleak_object) corresponding to an allocated
500 * memory block and add it to the object_list and object_tree_root.
501 */
502static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
503					     int min_count, gfp_t gfp)
504{
505	unsigned long flags;
506	struct kmemleak_object *object;
507	struct prio_tree_node *node;
508
509	object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
510	if (!object) {
511		kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
512		return NULL;
513	}
514
515	INIT_LIST_HEAD(&object->object_list);
516	INIT_LIST_HEAD(&object->gray_list);
517	INIT_HLIST_HEAD(&object->area_list);
518	spin_lock_init(&object->lock);
519	atomic_set(&object->use_count, 1);
520	object->flags = OBJECT_ALLOCATED | OBJECT_NEW;
521	object->pointer = ptr;
522	object->size = size;
523	object->min_count = min_count;
524	object->count = -1;			/* no color initially */
525	object->jiffies = jiffies;
526
527	/* task information */
528	if (in_irq()) {
529		object->pid = 0;
530		strncpy(object->comm, "hardirq", sizeof(object->comm));
531	} else if (in_softirq()) {
532		object->pid = 0;
533		strncpy(object->comm, "softirq", sizeof(object->comm));
534	} else {
535		object->pid = current->pid;
536		/*
537		 * There is a small chance of a race with set_task_comm(),
538		 * however using get_task_comm() here may cause locking
539		 * dependency issues with current->alloc_lock. In the worst
540		 * case, the command line is not correct.
541		 */
542		strncpy(object->comm, current->comm, sizeof(object->comm));
543	}
544
545	/* kernel backtrace */
546	object->trace_len = __save_stack_trace(object->trace);
547
548	INIT_PRIO_TREE_NODE(&object->tree_node);
549	object->tree_node.start = ptr;
550	object->tree_node.last = ptr + size - 1;
551
552	write_lock_irqsave(&kmemleak_lock, flags);
553	min_addr = min(min_addr, ptr);
554	max_addr = max(max_addr, ptr + size);
555	node = prio_tree_insert(&object_tree_root, &object->tree_node);
556	/*
557	 * The code calling the kernel does not yet have the pointer to the
558	 * memory block to be able to free it.  However, we still hold the
559	 * kmemleak_lock here in case parts of the kernel started freeing
560	 * random memory blocks.
561	 */
562	if (node != &object->tree_node) {
563		unsigned long flags;
564
565		kmemleak_stop("Cannot insert 0x%lx into the object search tree "
566			      "(already existing)\n", ptr);
567		object = lookup_object(ptr, 1);
568		spin_lock_irqsave(&object->lock, flags);
569		dump_object_info(object);
570		spin_unlock_irqrestore(&object->lock, flags);
571
572		goto out;
573	}
574	list_add_tail_rcu(&object->object_list, &object_list);
575out:
576	write_unlock_irqrestore(&kmemleak_lock, flags);
577	return object;
578}
579
580/*
581 * Remove the metadata (struct kmemleak_object) for a memory block from the
582 * object_list and object_tree_root and decrement its use_count.
583 */
584static void __delete_object(struct kmemleak_object *object)
585{
586	unsigned long flags;
587
588	write_lock_irqsave(&kmemleak_lock, flags);
589	prio_tree_remove(&object_tree_root, &object->tree_node);
590	list_del_rcu(&object->object_list);
591	write_unlock_irqrestore(&kmemleak_lock, flags);
592
593	WARN_ON(!(object->flags & OBJECT_ALLOCATED));
594	WARN_ON(atomic_read(&object->use_count) < 2);
595
596	/*
597	 * Locking here also ensures that the corresponding memory block
598	 * cannot be freed when it is being scanned.
599	 */
600	spin_lock_irqsave(&object->lock, flags);
601	object->flags &= ~OBJECT_ALLOCATED;
602	spin_unlock_irqrestore(&object->lock, flags);
603	put_object(object);
604}
605
606/*
607 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
608 * delete it.
609 */
610static void delete_object_full(unsigned long ptr)
611{
612	struct kmemleak_object *object;
613
614	object = find_and_get_object(ptr, 0);
615	if (!object) {
616#ifdef DEBUG
617		kmemleak_warn("Freeing unknown object at 0x%08lx\n",
618			      ptr);
619#endif
620		return;
621	}
622	__delete_object(object);
623	put_object(object);
624}
625
626/*
627 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
628 * delete it. If the memory block is partially freed, the function may create
629 * additional metadata for the remaining parts of the block.
630 */
631static void delete_object_part(unsigned long ptr, size_t size)
632{
633	struct kmemleak_object *object;
634	unsigned long start, end;
635
636	object = find_and_get_object(ptr, 1);
637	if (!object) {
638#ifdef DEBUG
639		kmemleak_warn("Partially freeing unknown object at 0x%08lx "
640			      "(size %zu)\n", ptr, size);
641#endif
642		return;
643	}
644	__delete_object(object);
645
646	/*
647	 * Create one or two objects that may result from the memory block
648	 * split. Note that partial freeing is only done by free_bootmem() and
649	 * this happens before kmemleak_init() is called. The path below is
650	 * only executed during early log recording in kmemleak_init(), so
651	 * GFP_KERNEL is enough.
652	 */
653	start = object->pointer;
654	end = object->pointer + object->size;
655	if (ptr > start)
656		create_object(start, ptr - start, object->min_count,
657			      GFP_KERNEL);
658	if (ptr + size < end)
659		create_object(ptr + size, end - ptr - size, object->min_count,
660			      GFP_KERNEL);
661
662	put_object(object);
663}
664/*
665 * Make a object permanently as gray-colored so that it can no longer be
666 * reported as a leak. This is used in general to mark a false positive.
667 */
668static void make_gray_object(unsigned long ptr)
669{
670	unsigned long flags;
671	struct kmemleak_object *object;
672
673	object = find_and_get_object(ptr, 0);
674	if (!object) {
675		kmemleak_warn("Graying unknown object at 0x%08lx\n", ptr);
676		return;
677	}
678
679	spin_lock_irqsave(&object->lock, flags);
680	object->min_count = 0;
681	spin_unlock_irqrestore(&object->lock, flags);
682	put_object(object);
683}
684
685/*
686 * Mark the object as black-colored so that it is ignored from scans and
687 * reporting.
688 */
689static void make_black_object(unsigned long ptr)
690{
691	unsigned long flags;
692	struct kmemleak_object *object;
693
694	object = find_and_get_object(ptr, 0);
695	if (!object) {
696		kmemleak_warn("Blacking unknown object at 0x%08lx\n", ptr);
697		return;
698	}
699
700	spin_lock_irqsave(&object->lock, flags);
701	object->min_count = -1;
702	object->flags |= OBJECT_NO_SCAN;
703	spin_unlock_irqrestore(&object->lock, flags);
704	put_object(object);
705}
706
707/*
708 * Add a scanning area to the object. If at least one such area is added,
709 * kmemleak will only scan these ranges rather than the whole memory block.
710 */
711static void add_scan_area(unsigned long ptr, unsigned long offset,
712			  size_t length, gfp_t gfp)
713{
714	unsigned long flags;
715	struct kmemleak_object *object;
716	struct kmemleak_scan_area *area;
717
718	object = find_and_get_object(ptr, 0);
719	if (!object) {
720		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
721			      ptr);
722		return;
723	}
724
725	area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK);
726	if (!area) {
727		kmemleak_warn("Cannot allocate a scan area\n");
728		goto out;
729	}
730
731	spin_lock_irqsave(&object->lock, flags);
732	if (offset + length > object->size) {
733		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
734		dump_object_info(object);
735		kmem_cache_free(scan_area_cache, area);
736		goto out_unlock;
737	}
738
739	INIT_HLIST_NODE(&area->node);
740	area->offset = offset;
741	area->length = length;
742
743	hlist_add_head(&area->node, &object->area_list);
744out_unlock:
745	spin_unlock_irqrestore(&object->lock, flags);
746out:
747	put_object(object);
748}
749
750/*
751 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
752 * pointer. Such object will not be scanned by kmemleak but references to it
753 * are searched.
754 */
755static void object_no_scan(unsigned long ptr)
756{
757	unsigned long flags;
758	struct kmemleak_object *object;
759
760	object = find_and_get_object(ptr, 0);
761	if (!object) {
762		kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
763		return;
764	}
765
766	spin_lock_irqsave(&object->lock, flags);
767	object->flags |= OBJECT_NO_SCAN;
768	spin_unlock_irqrestore(&object->lock, flags);
769	put_object(object);
770}
771
772/*
773 * Log an early kmemleak_* call to the early_log buffer. These calls will be
774 * processed later once kmemleak is fully initialized.
775 */
776static void __init log_early(int op_type, const void *ptr, size_t size,
777			     int min_count, unsigned long offset, size_t length)
778{
779	unsigned long flags;
780	struct early_log *log;
781
782	if (crt_early_log >= ARRAY_SIZE(early_log)) {
783		pr_warning("Early log buffer exceeded\n");
784		kmemleak_disable();
785		return;
786	}
787
788	/*
789	 * There is no need for locking since the kernel is still in UP mode
790	 * at this stage. Disabling the IRQs is enough.
791	 */
792	local_irq_save(flags);
793	log = &early_log[crt_early_log];
794	log->op_type = op_type;
795	log->ptr = ptr;
796	log->size = size;
797	log->min_count = min_count;
798	log->offset = offset;
799	log->length = length;
800	if (op_type == KMEMLEAK_ALLOC)
801		log->trace_len = __save_stack_trace(log->trace);
802	crt_early_log++;
803	local_irq_restore(flags);
804}
805
806/*
807 * Log an early allocated block and populate the stack trace.
808 */
809static void early_alloc(struct early_log *log)
810{
811	struct kmemleak_object *object;
812	unsigned long flags;
813	int i;
814
815	if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr))
816		return;
817
818	/*
819	 * RCU locking needed to ensure object is not freed via put_object().
820	 */
821	rcu_read_lock();
822	object = create_object((unsigned long)log->ptr, log->size,
823			       log->min_count, GFP_KERNEL);
824	spin_lock_irqsave(&object->lock, flags);
825	for (i = 0; i < log->trace_len; i++)
826		object->trace[i] = log->trace[i];
827	object->trace_len = log->trace_len;
828	spin_unlock_irqrestore(&object->lock, flags);
829	rcu_read_unlock();
830}
831
832/*
833 * Memory allocation function callback. This function is called from the
834 * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc,
835 * vmalloc etc.).
836 */
837void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
838			  gfp_t gfp)
839{
840	pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
841
842	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
843		create_object((unsigned long)ptr, size, min_count, gfp);
844	else if (atomic_read(&kmemleak_early_log))
845		log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0);
846}
847EXPORT_SYMBOL_GPL(kmemleak_alloc);
848
849/*
850 * Memory freeing function callback. This function is called from the kernel
851 * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.).
852 */
853void __ref kmemleak_free(const void *ptr)
854{
855	pr_debug("%s(0x%p)\n", __func__, ptr);
856
857	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
858		delete_object_full((unsigned long)ptr);
859	else if (atomic_read(&kmemleak_early_log))
860		log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0);
861}
862EXPORT_SYMBOL_GPL(kmemleak_free);
863
864/*
865 * Partial memory freeing function callback. This function is usually called
866 * from bootmem allocator when (part of) a memory block is freed.
867 */
868void __ref kmemleak_free_part(const void *ptr, size_t size)
869{
870	pr_debug("%s(0x%p)\n", __func__, ptr);
871
872	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
873		delete_object_part((unsigned long)ptr, size);
874	else if (atomic_read(&kmemleak_early_log))
875		log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0);
876}
877EXPORT_SYMBOL_GPL(kmemleak_free_part);
878
879/*
880 * Mark an already allocated memory block as a false positive. This will cause
881 * the block to no longer be reported as leak and always be scanned.
882 */
883void __ref kmemleak_not_leak(const void *ptr)
884{
885	pr_debug("%s(0x%p)\n", __func__, ptr);
886
887	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
888		make_gray_object((unsigned long)ptr);
889	else if (atomic_read(&kmemleak_early_log))
890		log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0);
891}
892EXPORT_SYMBOL(kmemleak_not_leak);
893
894/*
895 * Ignore a memory block. This is usually done when it is known that the
896 * corresponding block is not a leak and does not contain any references to
897 * other allocated memory blocks.
898 */
899void __ref kmemleak_ignore(const void *ptr)
900{
901	pr_debug("%s(0x%p)\n", __func__, ptr);
902
903	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
904		make_black_object((unsigned long)ptr);
905	else if (atomic_read(&kmemleak_early_log))
906		log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0);
907}
908EXPORT_SYMBOL(kmemleak_ignore);
909
910/*
911 * Limit the range to be scanned in an allocated memory block.
912 */
913void __ref kmemleak_scan_area(const void *ptr, unsigned long offset,
914			      size_t length, gfp_t gfp)
915{
916	pr_debug("%s(0x%p)\n", __func__, ptr);
917
918	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
919		add_scan_area((unsigned long)ptr, offset, length, gfp);
920	else if (atomic_read(&kmemleak_early_log))
921		log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length);
922}
923EXPORT_SYMBOL(kmemleak_scan_area);
924
925/*
926 * Inform kmemleak not to scan the given memory block.
927 */
928void __ref kmemleak_no_scan(const void *ptr)
929{
930	pr_debug("%s(0x%p)\n", __func__, ptr);
931
932	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
933		object_no_scan((unsigned long)ptr);
934	else if (atomic_read(&kmemleak_early_log))
935		log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0);
936}
937EXPORT_SYMBOL(kmemleak_no_scan);
938
939/*
940 * Memory scanning is a long process and it needs to be interruptable. This
941 * function checks whether such interrupt condition occured.
942 */
943static int scan_should_stop(void)
944{
945	if (!atomic_read(&kmemleak_enabled))
946		return 1;
947
948	/*
949	 * This function may be called from either process or kthread context,
950	 * hence the need to check for both stop conditions.
951	 */
952	if (current->mm)
953		return signal_pending(current);
954	else
955		return kthread_should_stop();
956
957	return 0;
958}
959
960/*
961 * Scan a memory block (exclusive range) for valid pointers and add those
962 * found to the gray list.
963 */
964static void scan_block(void *_start, void *_end,
965		       struct kmemleak_object *scanned, int allow_resched)
966{
967	unsigned long *ptr;
968	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
969	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
970
971	for (ptr = start; ptr < end; ptr++) {
972		struct kmemleak_object *object;
973		unsigned long flags;
974		unsigned long pointer;
975
976		if (allow_resched)
977			cond_resched();
978		if (scan_should_stop())
979			break;
980
981		/* don't scan uninitialized memory */
982		if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
983						  BYTES_PER_POINTER))
984			continue;
985
986		pointer = *ptr;
987
988		object = find_and_get_object(pointer, 1);
989		if (!object)
990			continue;
991		if (object == scanned) {
992			/* self referenced, ignore */
993			put_object(object);
994			continue;
995		}
996
997		/*
998		 * Avoid the lockdep recursive warning on object->lock being
999		 * previously acquired in scan_object(). These locks are
1000		 * enclosed by scan_mutex.
1001		 */
1002		spin_lock_irqsave_nested(&object->lock, flags,
1003					 SINGLE_DEPTH_NESTING);
1004		if (!color_white(object)) {
1005			/* non-orphan, ignored or new */
1006			spin_unlock_irqrestore(&object->lock, flags);
1007			put_object(object);
1008			continue;
1009		}
1010
1011		/*
1012		 * Increase the object's reference count (number of pointers
1013		 * to the memory block). If this count reaches the required
1014		 * minimum, the object's color will become gray and it will be
1015		 * added to the gray_list.
1016		 */
1017		object->count++;
1018		if (color_gray(object))
1019			list_add_tail(&object->gray_list, &gray_list);
1020		else
1021			put_object(object);
1022		spin_unlock_irqrestore(&object->lock, flags);
1023	}
1024}
1025
1026/*
1027 * Scan a memory block corresponding to a kmemleak_object. A condition is
1028 * that object->use_count >= 1.
1029 */
1030static void scan_object(struct kmemleak_object *object)
1031{
1032	struct kmemleak_scan_area *area;
1033	struct hlist_node *elem;
1034	unsigned long flags;
1035
1036	/*
1037	 * Once the object->lock is aquired, the corresponding memory block
1038	 * cannot be freed (the same lock is aquired in delete_object).
1039	 */
1040	spin_lock_irqsave(&object->lock, flags);
1041	if (object->flags & OBJECT_NO_SCAN)
1042		goto out;
1043	if (!(object->flags & OBJECT_ALLOCATED))
1044		/* already freed object */
1045		goto out;
1046	if (hlist_empty(&object->area_list)) {
1047		void *start = (void *)object->pointer;
1048		void *end = (void *)(object->pointer + object->size);
1049
1050		while (start < end && (object->flags & OBJECT_ALLOCATED) &&
1051		       !(object->flags & OBJECT_NO_SCAN)) {
1052			scan_block(start, min(start + MAX_SCAN_SIZE, end),
1053				   object, 0);
1054			start += MAX_SCAN_SIZE;
1055
1056			spin_unlock_irqrestore(&object->lock, flags);
1057			cond_resched();
1058			spin_lock_irqsave(&object->lock, flags);
1059		}
1060	} else
1061		hlist_for_each_entry(area, elem, &object->area_list, node)
1062			scan_block((void *)(object->pointer + area->offset),
1063				   (void *)(object->pointer + area->offset
1064					    + area->length), object, 0);
1065out:
1066	spin_unlock_irqrestore(&object->lock, flags);
1067}
1068
1069/*
1070 * Scan data sections and all the referenced memory blocks allocated via the
1071 * kernel's standard allocators. This function must be called with the
1072 * scan_mutex held.
1073 */
1074static void kmemleak_scan(void)
1075{
1076	unsigned long flags;
1077	struct kmemleak_object *object, *tmp;
1078	int i;
1079	int new_leaks = 0;
1080	int gray_list_pass = 0;
1081
1082	jiffies_last_scan = jiffies;
1083
1084	/* prepare the kmemleak_object's */
1085	rcu_read_lock();
1086	list_for_each_entry_rcu(object, &object_list, object_list) {
1087		spin_lock_irqsave(&object->lock, flags);
1088#ifdef DEBUG
1089		/*
1090		 * With a few exceptions there should be a maximum of
1091		 * 1 reference to any object at this point.
1092		 */
1093		if (atomic_read(&object->use_count) > 1) {
1094			pr_debug("object->use_count = %d\n",
1095				 atomic_read(&object->use_count));
1096			dump_object_info(object);
1097		}
1098#endif
1099		/* reset the reference count (whiten the object) */
1100		object->count = 0;
1101		object->flags &= ~OBJECT_NEW;
1102		if (color_gray(object) && get_object(object))
1103			list_add_tail(&object->gray_list, &gray_list);
1104
1105		spin_unlock_irqrestore(&object->lock, flags);
1106	}
1107	rcu_read_unlock();
1108
1109	/* data/bss scanning */
1110	scan_block(_sdata, _edata, NULL, 1);
1111	scan_block(__bss_start, __bss_stop, NULL, 1);
1112
1113#ifdef CONFIG_SMP
1114	/* per-cpu sections scanning */
1115	for_each_possible_cpu(i)
1116		scan_block(__per_cpu_start + per_cpu_offset(i),
1117			   __per_cpu_end + per_cpu_offset(i), NULL, 1);
1118#endif
1119
1120	/*
1121	 * Struct page scanning for each node. The code below is not yet safe
1122	 * with MEMORY_HOTPLUG.
1123	 */
1124	for_each_online_node(i) {
1125		pg_data_t *pgdat = NODE_DATA(i);
1126		unsigned long start_pfn = pgdat->node_start_pfn;
1127		unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1128		unsigned long pfn;
1129
1130		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1131			struct page *page;
1132
1133			if (!pfn_valid(pfn))
1134				continue;
1135			page = pfn_to_page(pfn);
1136			/* only scan if page is in use */
1137			if (page_count(page) == 0)
1138				continue;
1139			scan_block(page, page + 1, NULL, 1);
1140		}
1141	}
1142
1143	/*
1144	 * Scanning the task stacks (may introduce false negatives).
1145	 */
1146	if (kmemleak_stack_scan) {
1147		struct task_struct *p, *g;
1148
1149		read_lock(&tasklist_lock);
1150		do_each_thread(g, p) {
1151			scan_block(task_stack_page(p), task_stack_page(p) +
1152				   THREAD_SIZE, NULL, 0);
1153		} while_each_thread(g, p);
1154		read_unlock(&tasklist_lock);
1155	}
1156
1157	/*
1158	 * Scan the objects already referenced from the sections scanned
1159	 * above. More objects will be referenced and, if there are no memory
1160	 * leaks, all the objects will be scanned. The list traversal is safe
1161	 * for both tail additions and removals from inside the loop. The
1162	 * kmemleak objects cannot be freed from outside the loop because their
1163	 * use_count was increased.
1164	 */
1165repeat:
1166	object = list_entry(gray_list.next, typeof(*object), gray_list);
1167	while (&object->gray_list != &gray_list) {
1168		cond_resched();
1169
1170		/* may add new objects to the list */
1171		if (!scan_should_stop())
1172			scan_object(object);
1173
1174		tmp = list_entry(object->gray_list.next, typeof(*object),
1175				 gray_list);
1176
1177		/* remove the object from the list and release it */
1178		list_del(&object->gray_list);
1179		put_object(object);
1180
1181		object = tmp;
1182	}
1183
1184	if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES)
1185		goto scan_end;
1186
1187	/*
1188	 * Check for new objects allocated during this scanning and add them
1189	 * to the gray list.
1190	 */
1191	rcu_read_lock();
1192	list_for_each_entry_rcu(object, &object_list, object_list) {
1193		spin_lock_irqsave(&object->lock, flags);
1194		if ((object->flags & OBJECT_NEW) && !color_black(object) &&
1195		    get_object(object)) {
1196			object->flags &= ~OBJECT_NEW;
1197			list_add_tail(&object->gray_list, &gray_list);
1198		}
1199		spin_unlock_irqrestore(&object->lock, flags);
1200	}
1201	rcu_read_unlock();
1202
1203	if (!list_empty(&gray_list))
1204		goto repeat;
1205
1206scan_end:
1207	WARN_ON(!list_empty(&gray_list));
1208
1209	/*
1210	 * If scanning was stopped or new objects were being allocated at a
1211	 * higher rate than gray list scanning, do not report any new
1212	 * unreferenced objects.
1213	 */
1214	if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES)
1215		return;
1216
1217	/*
1218	 * Scanning result reporting.
1219	 */
1220	rcu_read_lock();
1221	list_for_each_entry_rcu(object, &object_list, object_list) {
1222		spin_lock_irqsave(&object->lock, flags);
1223		if (unreferenced_object(object) &&
1224		    !(object->flags & OBJECT_REPORTED)) {
1225			object->flags |= OBJECT_REPORTED;
1226			new_leaks++;
1227		}
1228		spin_unlock_irqrestore(&object->lock, flags);
1229	}
1230	rcu_read_unlock();
1231
1232	if (new_leaks)
1233		pr_info("%d new suspected memory leaks (see "
1234			"/sys/kernel/debug/kmemleak)\n", new_leaks);
1235
1236}
1237
1238/*
1239 * Thread function performing automatic memory scanning. Unreferenced objects
1240 * at the end of a memory scan are reported but only the first time.
1241 */
1242static int kmemleak_scan_thread(void *arg)
1243{
1244	static int first_run = 1;
1245
1246	pr_info("Automatic memory scanning thread started\n");
1247	set_user_nice(current, 10);
1248
1249	/*
1250	 * Wait before the first scan to allow the system to fully initialize.
1251	 */
1252	if (first_run) {
1253		first_run = 0;
1254		ssleep(SECS_FIRST_SCAN);
1255	}
1256
1257	while (!kthread_should_stop()) {
1258		signed long timeout = jiffies_scan_wait;
1259
1260		mutex_lock(&scan_mutex);
1261		kmemleak_scan();
1262		mutex_unlock(&scan_mutex);
1263
1264		/* wait before the next scan */
1265		while (timeout && !kthread_should_stop())
1266			timeout = schedule_timeout_interruptible(timeout);
1267	}
1268
1269	pr_info("Automatic memory scanning thread ended\n");
1270
1271	return 0;
1272}
1273
1274/*
1275 * Start the automatic memory scanning thread. This function must be called
1276 * with the scan_mutex held.
1277 */
1278void start_scan_thread(void)
1279{
1280	if (scan_thread)
1281		return;
1282	scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1283	if (IS_ERR(scan_thread)) {
1284		pr_warning("Failed to create the scan thread\n");
1285		scan_thread = NULL;
1286	}
1287}
1288
1289/*
1290 * Stop the automatic memory scanning thread. This function must be called
1291 * with the scan_mutex held.
1292 */
1293void stop_scan_thread(void)
1294{
1295	if (scan_thread) {
1296		kthread_stop(scan_thread);
1297		scan_thread = NULL;
1298	}
1299}
1300
1301/*
1302 * Iterate over the object_list and return the first valid object at or after
1303 * the required position with its use_count incremented. The function triggers
1304 * a memory scanning when the pos argument points to the first position.
1305 */
1306static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1307{
1308	struct kmemleak_object *object;
1309	loff_t n = *pos;
1310	int err;
1311
1312	err = mutex_lock_interruptible(&scan_mutex);
1313	if (err < 0)
1314		return ERR_PTR(err);
1315
1316	rcu_read_lock();
1317	list_for_each_entry_rcu(object, &object_list, object_list) {
1318		if (n-- > 0)
1319			continue;
1320		if (get_object(object))
1321			goto out;
1322	}
1323	object = NULL;
1324out:
1325	return object;
1326}
1327
1328/*
1329 * Return the next object in the object_list. The function decrements the
1330 * use_count of the previous object and increases that of the next one.
1331 */
1332static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1333{
1334	struct kmemleak_object *prev_obj = v;
1335	struct kmemleak_object *next_obj = NULL;
1336	struct list_head *n = &prev_obj->object_list;
1337
1338	++(*pos);
1339
1340	list_for_each_continue_rcu(n, &object_list) {
1341		next_obj = list_entry(n, struct kmemleak_object, object_list);
1342		if (get_object(next_obj))
1343			break;
1344	}
1345
1346	put_object(prev_obj);
1347	return next_obj;
1348}
1349
1350/*
1351 * Decrement the use_count of the last object required, if any.
1352 */
1353static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1354{
1355	if (!IS_ERR(v)) {
1356		/*
1357		 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1358		 * waiting was interrupted, so only release it if !IS_ERR.
1359		 */
1360		rcu_read_unlock();
1361		mutex_unlock(&scan_mutex);
1362		if (v)
1363			put_object(v);
1364	}
1365}
1366
1367/*
1368 * Print the information for an unreferenced object to the seq file.
1369 */
1370static int kmemleak_seq_show(struct seq_file *seq, void *v)
1371{
1372	struct kmemleak_object *object = v;
1373	unsigned long flags;
1374
1375	spin_lock_irqsave(&object->lock, flags);
1376	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1377		print_unreferenced(seq, object);
1378	spin_unlock_irqrestore(&object->lock, flags);
1379	return 0;
1380}
1381
1382static const struct seq_operations kmemleak_seq_ops = {
1383	.start = kmemleak_seq_start,
1384	.next  = kmemleak_seq_next,
1385	.stop  = kmemleak_seq_stop,
1386	.show  = kmemleak_seq_show,
1387};
1388
1389static int kmemleak_open(struct inode *inode, struct file *file)
1390{
1391	if (!atomic_read(&kmemleak_enabled))
1392		return -EBUSY;
1393
1394	return seq_open(file, &kmemleak_seq_ops);
1395}
1396
1397static int kmemleak_release(struct inode *inode, struct file *file)
1398{
1399	return seq_release(inode, file);
1400}
1401
1402static int dump_str_object_info(const char *str)
1403{
1404	unsigned long flags;
1405	struct kmemleak_object *object;
1406	unsigned long addr;
1407
1408	addr= simple_strtoul(str, NULL, 0);
1409	object = find_and_get_object(addr, 0);
1410	if (!object) {
1411		pr_info("Unknown object at 0x%08lx\n", addr);
1412		return -EINVAL;
1413	}
1414
1415	spin_lock_irqsave(&object->lock, flags);
1416	dump_object_info(object);
1417	spin_unlock_irqrestore(&object->lock, flags);
1418
1419	put_object(object);
1420	return 0;
1421}
1422
1423/*
1424 * File write operation to configure kmemleak at run-time. The following
1425 * commands can be written to the /sys/kernel/debug/kmemleak file:
1426 *   off	- disable kmemleak (irreversible)
1427 *   stack=on	- enable the task stacks scanning
1428 *   stack=off	- disable the tasks stacks scanning
1429 *   scan=on	- start the automatic memory scanning thread
1430 *   scan=off	- stop the automatic memory scanning thread
1431 *   scan=...	- set the automatic memory scanning period in seconds (0 to
1432 *		  disable it)
1433 *   scan	- trigger a memory scan
1434 *   dump=...	- dump information about the object found at the given address
1435 */
1436static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1437			      size_t size, loff_t *ppos)
1438{
1439	char buf[64];
1440	int buf_size;
1441	int ret;
1442
1443	buf_size = min(size, (sizeof(buf) - 1));
1444	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1445		return -EFAULT;
1446	buf[buf_size] = 0;
1447
1448	ret = mutex_lock_interruptible(&scan_mutex);
1449	if (ret < 0)
1450		return ret;
1451
1452	if (strncmp(buf, "off", 3) == 0)
1453		kmemleak_disable();
1454	else if (strncmp(buf, "stack=on", 8) == 0)
1455		kmemleak_stack_scan = 1;
1456	else if (strncmp(buf, "stack=off", 9) == 0)
1457		kmemleak_stack_scan = 0;
1458	else if (strncmp(buf, "scan=on", 7) == 0)
1459		start_scan_thread();
1460	else if (strncmp(buf, "scan=off", 8) == 0)
1461		stop_scan_thread();
1462	else if (strncmp(buf, "scan=", 5) == 0) {
1463		unsigned long secs;
1464
1465		ret = strict_strtoul(buf + 5, 0, &secs);
1466		if (ret < 0)
1467			goto out;
1468		stop_scan_thread();
1469		if (secs) {
1470			jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1471			start_scan_thread();
1472		}
1473	} else if (strncmp(buf, "scan", 4) == 0)
1474		kmemleak_scan();
1475	else if (strncmp(buf, "dump=", 5) == 0)
1476		ret = dump_str_object_info(buf + 5);
1477	else
1478		ret = -EINVAL;
1479
1480out:
1481	mutex_unlock(&scan_mutex);
1482	if (ret < 0)
1483		return ret;
1484
1485	/* ignore the rest of the buffer, only one command at a time */
1486	*ppos += size;
1487	return size;
1488}
1489
1490static const struct file_operations kmemleak_fops = {
1491	.owner		= THIS_MODULE,
1492	.open		= kmemleak_open,
1493	.read		= seq_read,
1494	.write		= kmemleak_write,
1495	.llseek		= seq_lseek,
1496	.release	= kmemleak_release,
1497};
1498
1499/*
1500 * Perform the freeing of the kmemleak internal objects after waiting for any
1501 * current memory scan to complete.
1502 */
1503static void kmemleak_do_cleanup(struct work_struct *work)
1504{
1505	struct kmemleak_object *object;
1506
1507	mutex_lock(&scan_mutex);
1508	stop_scan_thread();
1509
1510	rcu_read_lock();
1511	list_for_each_entry_rcu(object, &object_list, object_list)
1512		delete_object_full(object->pointer);
1513	rcu_read_unlock();
1514	mutex_unlock(&scan_mutex);
1515}
1516
1517static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1518
1519/*
1520 * Disable kmemleak. No memory allocation/freeing will be traced once this
1521 * function is called. Disabling kmemleak is an irreversible operation.
1522 */
1523static void kmemleak_disable(void)
1524{
1525	/* atomically check whether it was already invoked */
1526	if (atomic_cmpxchg(&kmemleak_error, 0, 1))
1527		return;
1528
1529	/* stop any memory operation tracing */
1530	atomic_set(&kmemleak_early_log, 0);
1531	atomic_set(&kmemleak_enabled, 0);
1532
1533	/* check whether it is too early for a kernel thread */
1534	if (atomic_read(&kmemleak_initialized))
1535		schedule_work(&cleanup_work);
1536
1537	pr_info("Kernel memory leak detector disabled\n");
1538}
1539
1540/*
1541 * Allow boot-time kmemleak disabling (enabled by default).
1542 */
1543static int kmemleak_boot_config(char *str)
1544{
1545	if (!str)
1546		return -EINVAL;
1547	if (strcmp(str, "off") == 0)
1548		kmemleak_disable();
1549	else if (strcmp(str, "on") != 0)
1550		return -EINVAL;
1551	return 0;
1552}
1553early_param("kmemleak", kmemleak_boot_config);
1554
1555/*
1556 * Kmemleak initialization.
1557 */
1558void __init kmemleak_init(void)
1559{
1560	int i;
1561	unsigned long flags;
1562
1563	jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1564	jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1565
1566	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1567	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1568	INIT_PRIO_TREE_ROOT(&object_tree_root);
1569
1570	/* the kernel is still in UP mode, so disabling the IRQs is enough */
1571	local_irq_save(flags);
1572	if (!atomic_read(&kmemleak_error)) {
1573		atomic_set(&kmemleak_enabled, 1);
1574		atomic_set(&kmemleak_early_log, 0);
1575	}
1576	local_irq_restore(flags);
1577
1578	/*
1579	 * This is the point where tracking allocations is safe. Automatic
1580	 * scanning is started during the late initcall. Add the early logged
1581	 * callbacks to the kmemleak infrastructure.
1582	 */
1583	for (i = 0; i < crt_early_log; i++) {
1584		struct early_log *log = &early_log[i];
1585
1586		switch (log->op_type) {
1587		case KMEMLEAK_ALLOC:
1588			early_alloc(log);
1589			break;
1590		case KMEMLEAK_FREE:
1591			kmemleak_free(log->ptr);
1592			break;
1593		case KMEMLEAK_FREE_PART:
1594			kmemleak_free_part(log->ptr, log->size);
1595			break;
1596		case KMEMLEAK_NOT_LEAK:
1597			kmemleak_not_leak(log->ptr);
1598			break;
1599		case KMEMLEAK_IGNORE:
1600			kmemleak_ignore(log->ptr);
1601			break;
1602		case KMEMLEAK_SCAN_AREA:
1603			kmemleak_scan_area(log->ptr, log->offset, log->length,
1604					   GFP_KERNEL);
1605			break;
1606		case KMEMLEAK_NO_SCAN:
1607			kmemleak_no_scan(log->ptr);
1608			break;
1609		default:
1610			WARN_ON(1);
1611		}
1612	}
1613}
1614
1615/*
1616 * Late initialization function.
1617 */
1618static int __init kmemleak_late_init(void)
1619{
1620	struct dentry *dentry;
1621
1622	atomic_set(&kmemleak_initialized, 1);
1623
1624	if (atomic_read(&kmemleak_error)) {
1625		/*
1626		 * Some error occured and kmemleak was disabled. There is a
1627		 * small chance that kmemleak_disable() was called immediately
1628		 * after setting kmemleak_initialized and we may end up with
1629		 * two clean-up threads but serialized by scan_mutex.
1630		 */
1631		schedule_work(&cleanup_work);
1632		return -ENOMEM;
1633	}
1634
1635	dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1636				     &kmemleak_fops);
1637	if (!dentry)
1638		pr_warning("Failed to create the debugfs kmemleak file\n");
1639	mutex_lock(&scan_mutex);
1640	start_scan_thread();
1641	mutex_unlock(&scan_mutex);
1642
1643	pr_info("Kernel memory leak detector initialized\n");
1644
1645	return 0;
1646}
1647late_initcall(kmemleak_late_init);
1648