1#include <linux/gfp.h>
2#include <linux/mm_types.h>
3#include <linux/mm.h>
4#include <linux/slab.h>
5#include "slab.h"
6#include <linux/kmemcheck.h>
7
8void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
9{
10	struct page *shadow;
11	int pages;
12	int i;
13
14	pages = 1 << order;
15
16	/*
17	 * With kmemcheck enabled, we need to allocate a memory area for the
18	 * shadow bits as well.
19	 */
20	shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
21	if (!shadow) {
22		if (printk_ratelimit())
23			printk(KERN_ERR "kmemcheck: failed to allocate "
24				"shadow bitmap\n");
25		return;
26	}
27
28	for(i = 0; i < pages; ++i)
29		page[i].shadow = page_address(&shadow[i]);
30
31	/*
32	 * Mark it as non-present for the MMU so that our accesses to
33	 * this memory will trigger a page fault and let us analyze
34	 * the memory accesses.
35	 */
36	kmemcheck_hide_pages(page, pages);
37}
38
39void kmemcheck_free_shadow(struct page *page, int order)
40{
41	struct page *shadow;
42	int pages;
43	int i;
44
45	if (!kmemcheck_page_is_tracked(page))
46		return;
47
48	pages = 1 << order;
49
50	kmemcheck_show_pages(page, pages);
51
52	shadow = virt_to_page(page[0].shadow);
53
54	for(i = 0; i < pages; ++i)
55		page[i].shadow = NULL;
56
57	__free_pages(shadow, order);
58}
59
60void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
61			  size_t size)
62{
63	/*
64	 * Has already been memset(), which initializes the shadow for us
65	 * as well.
66	 */
67	if (gfpflags & __GFP_ZERO)
68		return;
69
70	/* No need to initialize the shadow of a non-tracked slab. */
71	if (s->flags & SLAB_NOTRACK)
72		return;
73
74	if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) {
75		/*
76		 * Allow notracked objects to be allocated from
77		 * tracked caches. Note however that these objects
78		 * will still get page faults on access, they just
79		 * won't ever be flagged as uninitialized. If page
80		 * faults are not acceptable, the slab cache itself
81		 * should be marked NOTRACK.
82		 */
83		kmemcheck_mark_initialized(object, size);
84	} else if (!s->ctor) {
85		/*
86		 * New objects should be marked uninitialized before
87		 * they're returned to the called.
88		 */
89		kmemcheck_mark_uninitialized(object, size);
90	}
91}
92
93void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
94{
95	/* TODO: RCU freeing is unsupported for now; hide false positives. */
96	if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
97		kmemcheck_mark_freed(object, size);
98}
99
100void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
101			       gfp_t gfpflags)
102{
103	int pages;
104
105	if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
106		return;
107
108	pages = 1 << order;
109
110	/*
111	 * NOTE: We choose to track GFP_ZERO pages too; in fact, they
112	 * can become uninitialized by copying uninitialized memory
113	 * into them.
114	 */
115
116	/* XXX: Can use zone->node for node? */
117	kmemcheck_alloc_shadow(page, order, gfpflags, -1);
118
119	if (gfpflags & __GFP_ZERO)
120		kmemcheck_mark_initialized_pages(page, pages);
121	else
122		kmemcheck_mark_uninitialized_pages(page, pages);
123}
124