1/* Copyright (C) 2007-2010 The Android Open Source Project
2**
3** This software is licensed under the terms of the GNU General Public
4** License version 2, as published by the Free Software Foundation, and
5** may be copied, distributed, and modified under those terms.
6**
7** This program is distributed in the hope that it will be useful,
8** but WITHOUT ANY WARRANTY; without even the implied warranty of
9** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10** GNU General Public License for more details.
11*/
12
13/*
14 * Contains implementation of routines that implement a red-black tree of
15 * memory blocks allocated by the guest system.
16 */
17
18#include "memcheck_malloc_map.h"
19#include "memcheck_util.h"
20#include "memcheck_logging.h"
21
22/* Global flag, indicating whether or not __ld/__stx_mmu should be instrumented
23 * for checking for access violations. If read / write access violation check.
24 * Defined in memcheck.c
25 */
26extern int memcheck_instrument_mmu;
27
28/* Allocation descriptor stored in the map. */
29typedef struct AllocMapEntry {
30    /* R-B tree entry. */
31    RB_ENTRY(AllocMapEntry) rb_entry;
32
33    /* Allocation descriptor for this entry. */
34    MallocDescEx            desc;
35} AllocMapEntry;
36
37// =============================================================================
38// Inlines
39// =============================================================================
40
41/* Gets address of the beginning of an allocation block for the given entry in
42 * the map.
43 * Param:
44 *  adesc - Entry in the allocation descriptors map.
45 * Return:
46 *  Address of the beginning of an allocation block for the given entry in the
47 *  map.
48 */
49static inline target_ulong
50allocmapentry_alloc_begins(const AllocMapEntry* adesc)
51{
52    return adesc->desc.malloc_desc.ptr;
53}
54
55/* Gets address of the end of an allocation block for the given entry in
56 * the map.
57 * Param:
58 *  adesc - Entry in the allocation descriptors map.
59 * Return:
60 *  Address of the end of an allocation block for the given entry in the map.
61 */
62static inline target_ulong
63allocmapentry_alloc_ends(const AllocMapEntry* adesc)
64{
65    return mallocdesc_get_alloc_end(&adesc->desc.malloc_desc);
66}
67
68// =============================================================================
69// R-B Tree implementation
70// =============================================================================
71
72/* Compare routine for the allocation descriptors map.
73 * Param:
74 *  d1 - First map entry to compare.
75 *  d2 - Second map entry to compare.
76 * Return:
77 *  0 - Descriptors are equal. Note that descriptors are considered to be
78 *      equal iff memory blocks they describe intersect in any part.
79 *  1 - d1 is greater than d2
80 *  -1 - d1 is less than d2.
81 */
82static inline int
83cmp_rb(AllocMapEntry* d1, AllocMapEntry* d2)
84{
85    const target_ulong start1 = allocmapentry_alloc_begins(d1);
86    const target_ulong start2 = allocmapentry_alloc_begins(d2);
87
88    if (start1 < start2) {
89        return (allocmapentry_alloc_ends(d1) - 1) < start2 ? -1 : 0;
90    }
91    return (allocmapentry_alloc_ends(d2) - 1) < start1 ? 1 : 0;
92}
93
94/* Expands RB macros here. */
95RB_GENERATE(AllocMap, AllocMapEntry, rb_entry, cmp_rb);
96
97// =============================================================================
98// Static routines
99// =============================================================================
100
101/* Inserts new (or replaces existing) entry into allocation descriptors map.
102 * See comments on allocmap_insert routine in the header file for details
103 * about this routine.
104 */
105static RBTMapResult
106allocmap_insert_desc(AllocMap* map,
107                     AllocMapEntry* adesc,
108                     MallocDescEx* replaced)
109{
110    AllocMapEntry* existing = AllocMap_RB_INSERT(map, adesc);
111    if (existing == NULL) {
112        return RBT_MAP_RESULT_ENTRY_INSERTED;
113    }
114
115    // Matching entry exists. Lets see if we need to replace it.
116    if (replaced == NULL) {
117        return RBT_MAP_RESULT_ENTRY_ALREADY_EXISTS;
118    }
119
120    /* Copy existing entry to the provided buffer and replace it
121     * with the new one. */
122    memcpy(replaced, &existing->desc, sizeof(MallocDescEx));
123    AllocMap_RB_REMOVE(map, existing);
124    qemu_free(existing);
125    AllocMap_RB_INSERT(map, adesc);
126    return RBT_MAP_RESULT_ENTRY_REPLACED;
127}
128
129/* Finds an entry in the allocation descriptors map that matches the given
130 * address range.
131 * Param:
132 *  map - Allocation descriptors map where to search for an entry.
133 *  address - Virtual address in the guest's user space to find matching
134 *      entry for.
135 * Return:
136 *  Address of an allocation descriptors map entry that matches the given
137 *  address, or NULL if no such entry has been found.
138 */
139static inline AllocMapEntry*
140allocmap_find_entry(const AllocMap* map,
141                    target_ulong address,
142                    uint32_t block_size)
143{
144    AllocMapEntry adesc;
145    adesc.desc.malloc_desc.ptr = address;
146    adesc.desc.malloc_desc.requested_bytes = block_size;
147    adesc.desc.malloc_desc.prefix_size = 0;
148    adesc.desc.malloc_desc.suffix_size = 0;
149    return AllocMap_RB_FIND((AllocMap*)map, &adesc);
150}
151
152// =============================================================================
153// Map API
154// =============================================================================
155
156void
157allocmap_init(AllocMap* map)
158{
159    RB_INIT(map);
160}
161
162RBTMapResult
163allocmap_insert(AllocMap* map, const MallocDescEx* desc, MallocDescEx* replaced)
164{
165    RBTMapResult ret;
166
167    // Allocate and initialize new map entry.
168    AllocMapEntry* adesc = qemu_malloc(sizeof(AllocMapEntry));
169    if (adesc == NULL) {
170        ME("memcheck: Unable to allocate new AllocMapEntry on insert.");
171        return RBT_MAP_RESULT_ERROR;
172    }
173    memcpy(&adesc->desc, desc, sizeof(MallocDescEx));
174
175    // Insert new entry into the map.
176    ret = allocmap_insert_desc(map, adesc, replaced);
177    if (ret == RBT_MAP_RESULT_ENTRY_ALREADY_EXISTS ||
178        ret == RBT_MAP_RESULT_ERROR) {
179        /* Another descriptor already exists for this block, or an error
180         * occurred. We have to tree new descriptor, as it wasn't inserted. */
181        qemu_free(adesc);
182    }
183    return ret;
184}
185
186MallocDescEx*
187allocmap_find(const AllocMap* map, target_ulong address, uint32_t block_size)
188{
189    AllocMapEntry* adesc = allocmap_find_entry(map, address, block_size);
190    return adesc != NULL ? &adesc->desc : NULL;
191}
192
193int
194allocmap_pull(AllocMap* map, target_ulong address, MallocDescEx* pulled)
195{
196    AllocMapEntry* adesc = allocmap_find_entry(map, address, 1);
197    if (adesc != NULL) {
198        memcpy(pulled, &adesc->desc, sizeof(MallocDescEx));
199        AllocMap_RB_REMOVE(map, adesc);
200        qemu_free(adesc);
201        return 0;
202    } else {
203        return -1;
204    }
205}
206
207int
208allocmap_pull_first(AllocMap* map, MallocDescEx* pulled)
209{
210    AllocMapEntry* first = RB_MIN(AllocMap, map);
211    if (first != NULL) {
212        memcpy(pulled, &first->desc, sizeof(MallocDescEx));
213        AllocMap_RB_REMOVE(map, first);
214        qemu_free(first);
215        return 0;
216    } else {
217        return -1;
218    }
219}
220
221int
222allocmap_copy(AllocMap* to,
223              const AllocMap* from,
224              uint32_t set_flags,
225              uint32_t clear_flags)
226{
227    AllocMapEntry* entry;
228    RB_FOREACH(entry, AllocMap, (AllocMap*)from) {
229        RBTMapResult ins_res;
230        AllocMapEntry* new_entry =
231                (AllocMapEntry*)qemu_malloc(sizeof(AllocMapEntry));
232        if (new_entry == NULL) {
233            ME("memcheck: Unable to allocate new AllocMapEntry on copy.");
234            return -1;
235        }
236        memcpy(new_entry, entry, sizeof(AllocMapEntry));
237        new_entry->desc.flags &= ~clear_flags;
238        new_entry->desc.flags |= set_flags;
239        if (entry->desc.call_stack_count) {
240            new_entry->desc.call_stack =
241               qemu_malloc(entry->desc.call_stack_count * sizeof(target_ulong));
242            memcpy(new_entry->desc.call_stack, entry->desc.call_stack,
243                   entry->desc.call_stack_count * sizeof(target_ulong));
244        } else {
245            new_entry->desc.call_stack = NULL;
246        }
247        new_entry->desc.call_stack_count = entry->desc.call_stack_count;
248        ins_res = allocmap_insert_desc(to, new_entry, NULL);
249        if (ins_res == RBT_MAP_RESULT_ENTRY_INSERTED) {
250            if (memcheck_instrument_mmu) {
251                // Invalidate TLB cache for inserted entry.
252                invalidate_tlb_cache(new_entry->desc.malloc_desc.ptr,
253                        mallocdesc_get_alloc_end(&new_entry->desc.malloc_desc));
254            }
255        } else {
256            ME("memcheck: Unable to insert new map entry on copy. Insert returned %u",
257               ins_res);
258            if (new_entry->desc.call_stack != NULL) {
259                qemu_free(new_entry->desc.call_stack);
260            }
261            qemu_free(new_entry);
262            return -1;
263        }
264    }
265
266    return 0;
267}
268
269int
270allocmap_empty(AllocMap* map)
271{
272    MallocDescEx pulled;
273    int removed = 0;
274
275    while (!allocmap_pull_first(map, &pulled)) {
276        removed++;
277        if (pulled.call_stack != NULL) {
278            qemu_free(pulled.call_stack);
279        }
280    }
281
282    return removed;
283}
284