1/*
2 * Copyright (C) 2007 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "MemoryDealer"
18
19#include <binder/MemoryDealer.h>
20#include <binder/IPCThreadState.h>
21#include <binder/MemoryBase.h>
22
23#include <utils/Log.h>
24#include <utils/SortedVector.h>
25#include <utils/String8.h>
26#include <utils/threads.h>
27
28#include <stdint.h>
29#include <stdio.h>
30#include <stdlib.h>
31#include <fcntl.h>
32#include <unistd.h>
33#include <errno.h>
34#include <string.h>
35
36#include <sys/stat.h>
37#include <sys/types.h>
38#include <sys/mman.h>
39#include <sys/file.h>
40
41namespace android {
42// ----------------------------------------------------------------------------
43
44/*
45 * A simple templatized doubly linked-list implementation
46 */
47
48template <typename NODE>
49class LinkedList
50{
51    NODE*  mFirst;
52    NODE*  mLast;
53
54public:
55                LinkedList() : mFirst(0), mLast(0) { }
56    bool        isEmpty() const { return mFirst == 0; }
57    NODE const* head() const { return mFirst; }
58    NODE*       head() { return mFirst; }
59    NODE const* tail() const { return mLast; }
60    NODE*       tail() { return mLast; }
61
62    void insertAfter(NODE* node, NODE* newNode) {
63        newNode->prev = node;
64        newNode->next = node->next;
65        if (node->next == 0) mLast = newNode;
66        else                 node->next->prev = newNode;
67        node->next = newNode;
68    }
69
70    void insertBefore(NODE* node, NODE* newNode) {
71         newNode->prev = node->prev;
72         newNode->next = node;
73         if (node->prev == 0)   mFirst = newNode;
74         else                   node->prev->next = newNode;
75         node->prev = newNode;
76    }
77
78    void insertHead(NODE* newNode) {
79        if (mFirst == 0) {
80            mFirst = mLast = newNode;
81            newNode->prev = newNode->next = 0;
82        } else {
83            newNode->prev = 0;
84            newNode->next = mFirst;
85            mFirst->prev = newNode;
86            mFirst = newNode;
87        }
88    }
89
90    void insertTail(NODE* newNode) {
91        if (mLast == 0) {
92            insertHead(newNode);
93        } else {
94            newNode->prev = mLast;
95            newNode->next = 0;
96            mLast->next = newNode;
97            mLast = newNode;
98        }
99    }
100
101    NODE* remove(NODE* node) {
102        if (node->prev == 0)    mFirst = node->next;
103        else                    node->prev->next = node->next;
104        if (node->next == 0)    mLast = node->prev;
105        else                    node->next->prev = node->prev;
106        return node;
107    }
108};
109
110// ----------------------------------------------------------------------------
111
112class Allocation : public MemoryBase {
113public:
114    Allocation(const sp<MemoryDealer>& dealer,
115            const sp<IMemoryHeap>& heap, ssize_t offset, size_t size);
116    virtual ~Allocation();
117private:
118    sp<MemoryDealer> mDealer;
119};
120
121// ----------------------------------------------------------------------------
122
123class SimpleBestFitAllocator
124{
125    enum {
126        PAGE_ALIGNED = 0x00000001
127    };
128public:
129    SimpleBestFitAllocator(size_t size);
130    ~SimpleBestFitAllocator();
131
132    size_t      allocate(size_t size, uint32_t flags = 0);
133    status_t    deallocate(size_t offset);
134    size_t      size() const;
135    void        dump(const char* what) const;
136    void        dump(String8& res, const char* what) const;
137
138private:
139
140    struct chunk_t {
141        chunk_t(size_t start, size_t size)
142        : start(start), size(size), free(1), prev(0), next(0) {
143        }
144        size_t              start;
145        size_t              size : 28;
146        int                 free : 4;
147        mutable chunk_t*    prev;
148        mutable chunk_t*    next;
149    };
150
151    ssize_t  alloc(size_t size, uint32_t flags);
152    chunk_t* dealloc(size_t start);
153    void     dump_l(const char* what) const;
154    void     dump_l(String8& res, const char* what) const;
155
156    static const int    kMemoryAlign;
157    mutable Mutex       mLock;
158    LinkedList<chunk_t> mList;
159    size_t              mHeapSize;
160};
161
162// ----------------------------------------------------------------------------
163
164Allocation::Allocation(
165        const sp<MemoryDealer>& dealer,
166        const sp<IMemoryHeap>& heap, ssize_t offset, size_t size)
167    : MemoryBase(heap, offset, size), mDealer(dealer)
168{
169#ifndef NDEBUG
170    void* const start_ptr = (void*)(intptr_t(heap->base()) + offset);
171    memset(start_ptr, 0xda, size);
172#endif
173}
174
175Allocation::~Allocation()
176{
177    size_t freedOffset = getOffset();
178    size_t freedSize   = getSize();
179    if (freedSize) {
180        /* NOTE: it's VERY important to not free allocations of size 0 because
181         * they're special as they don't have any record in the allocator
182         * and could alias some real allocation (their offset is zero). */
183        mDealer->deallocate(freedOffset);
184
185        // keep the size to unmap in excess
186        size_t pagesize = getpagesize();
187        size_t start = freedOffset;
188        size_t end = start + freedSize;
189        start &= ~(pagesize-1);
190        end = (end + pagesize-1) & ~(pagesize-1);
191
192        // give back to the kernel the pages we don't need
193        size_t free_start = freedOffset;
194        size_t free_end = free_start + freedSize;
195        if (start < free_start)
196            start = free_start;
197        if (end > free_end)
198            end = free_end;
199        start = (start + pagesize-1) & ~(pagesize-1);
200        end &= ~(pagesize-1);
201
202        if (start < end) {
203            void* const start_ptr = (void*)(intptr_t(getHeap()->base()) + start);
204            size_t size = end-start;
205
206#ifndef NDEBUG
207            memset(start_ptr, 0xdf, size);
208#endif
209
210            // MADV_REMOVE is not defined on Dapper based Goobuntu
211#ifdef MADV_REMOVE
212            if (size) {
213                int err = madvise(start_ptr, size, MADV_REMOVE);
214                LOGW_IF(err, "madvise(%p, %u, MADV_REMOVE) returned %s",
215                        start_ptr, size, err<0 ? strerror(errno) : "Ok");
216            }
217#endif
218        }
219    }
220}
221
222// ----------------------------------------------------------------------------
223
224MemoryDealer::MemoryDealer(size_t size, const char* name)
225    : mHeap(new MemoryHeapBase(size, 0, name)),
226    mAllocator(new SimpleBestFitAllocator(size))
227{
228}
229
230MemoryDealer::~MemoryDealer()
231{
232    delete mAllocator;
233}
234
235sp<IMemory> MemoryDealer::allocate(size_t size)
236{
237    sp<IMemory> memory;
238    const ssize_t offset = allocator()->allocate(size);
239    if (offset >= 0) {
240        memory = new Allocation(this, heap(), offset, size);
241    }
242    return memory;
243}
244
245void MemoryDealer::deallocate(size_t offset)
246{
247    allocator()->deallocate(offset);
248}
249
250void MemoryDealer::dump(const char* what) const
251{
252    allocator()->dump(what);
253}
254
255const sp<IMemoryHeap>& MemoryDealer::heap() const {
256    return mHeap;
257}
258
259SimpleBestFitAllocator* MemoryDealer::allocator() const {
260    return mAllocator;
261}
262
263// ----------------------------------------------------------------------------
264
265// align all the memory blocks on a cache-line boundary
266const int SimpleBestFitAllocator::kMemoryAlign = 32;
267
268SimpleBestFitAllocator::SimpleBestFitAllocator(size_t size)
269{
270    size_t pagesize = getpagesize();
271    mHeapSize = ((size + pagesize-1) & ~(pagesize-1));
272
273    chunk_t* node = new chunk_t(0, mHeapSize / kMemoryAlign);
274    mList.insertHead(node);
275}
276
277SimpleBestFitAllocator::~SimpleBestFitAllocator()
278{
279    while(!mList.isEmpty()) {
280        delete mList.remove(mList.head());
281    }
282}
283
284size_t SimpleBestFitAllocator::size() const
285{
286    return mHeapSize;
287}
288
289size_t SimpleBestFitAllocator::allocate(size_t size, uint32_t flags)
290{
291    Mutex::Autolock _l(mLock);
292    ssize_t offset = alloc(size, flags);
293    return offset;
294}
295
296status_t SimpleBestFitAllocator::deallocate(size_t offset)
297{
298    Mutex::Autolock _l(mLock);
299    chunk_t const * const freed = dealloc(offset);
300    if (freed) {
301        return NO_ERROR;
302    }
303    return NAME_NOT_FOUND;
304}
305
306ssize_t SimpleBestFitAllocator::alloc(size_t size, uint32_t flags)
307{
308    if (size == 0) {
309        return 0;
310    }
311    size = (size + kMemoryAlign-1) / kMemoryAlign;
312    chunk_t* free_chunk = 0;
313    chunk_t* cur = mList.head();
314
315    size_t pagesize = getpagesize();
316    while (cur) {
317        int extra = 0;
318        if (flags & PAGE_ALIGNED)
319            extra = ( -cur->start & ((pagesize/kMemoryAlign)-1) ) ;
320
321        // best fit
322        if (cur->free && (cur->size >= (size+extra))) {
323            if ((!free_chunk) || (cur->size < free_chunk->size)) {
324                free_chunk = cur;
325            }
326            if (cur->size == size) {
327                break;
328            }
329        }
330        cur = cur->next;
331    }
332
333    if (free_chunk) {
334        const size_t free_size = free_chunk->size;
335        free_chunk->free = 0;
336        free_chunk->size = size;
337        if (free_size > size) {
338            int extra = 0;
339            if (flags & PAGE_ALIGNED)
340                extra = ( -free_chunk->start & ((pagesize/kMemoryAlign)-1) ) ;
341            if (extra) {
342                chunk_t* split = new chunk_t(free_chunk->start, extra);
343                free_chunk->start += extra;
344                mList.insertBefore(free_chunk, split);
345            }
346
347            LOGE_IF((flags&PAGE_ALIGNED) &&
348                    ((free_chunk->start*kMemoryAlign)&(pagesize-1)),
349                    "PAGE_ALIGNED requested, but page is not aligned!!!");
350
351            const ssize_t tail_free = free_size - (size+extra);
352            if (tail_free > 0) {
353                chunk_t* split = new chunk_t(
354                        free_chunk->start + free_chunk->size, tail_free);
355                mList.insertAfter(free_chunk, split);
356            }
357        }
358        return (free_chunk->start)*kMemoryAlign;
359    }
360    return NO_MEMORY;
361}
362
363SimpleBestFitAllocator::chunk_t* SimpleBestFitAllocator::dealloc(size_t start)
364{
365    start = start / kMemoryAlign;
366    chunk_t* cur = mList.head();
367    while (cur) {
368        if (cur->start == start) {
369            LOG_FATAL_IF(cur->free,
370                "block at offset 0x%08lX of size 0x%08lX already freed",
371                cur->start*kMemoryAlign, cur->size*kMemoryAlign);
372
373            // merge freed blocks together
374            chunk_t* freed = cur;
375            cur->free = 1;
376            do {
377                chunk_t* const p = cur->prev;
378                chunk_t* const n = cur->next;
379                if (p && (p->free || !cur->size)) {
380                    freed = p;
381                    p->size += cur->size;
382                    mList.remove(cur);
383                    delete cur;
384                }
385                cur = n;
386            } while (cur && cur->free);
387
388            #ifndef NDEBUG
389                if (!freed->free) {
390                    dump_l("dealloc (!freed->free)");
391                }
392            #endif
393            LOG_FATAL_IF(!freed->free,
394                "freed block at offset 0x%08lX of size 0x%08lX is not free!",
395                freed->start * kMemoryAlign, freed->size * kMemoryAlign);
396
397            return freed;
398        }
399        cur = cur->next;
400    }
401    return 0;
402}
403
404void SimpleBestFitAllocator::dump(const char* what) const
405{
406    Mutex::Autolock _l(mLock);
407    dump_l(what);
408}
409
410void SimpleBestFitAllocator::dump_l(const char* what) const
411{
412    String8 result;
413    dump_l(result, what);
414    LOGD("%s", result.string());
415}
416
417void SimpleBestFitAllocator::dump(String8& result,
418        const char* what) const
419{
420    Mutex::Autolock _l(mLock);
421    dump_l(result, what);
422}
423
424void SimpleBestFitAllocator::dump_l(String8& result,
425        const char* what) const
426{
427    size_t size = 0;
428    int32_t i = 0;
429    chunk_t const* cur = mList.head();
430
431    const size_t SIZE = 256;
432    char buffer[SIZE];
433    snprintf(buffer, SIZE, "  %s (%p, size=%u)\n",
434            what, this, (unsigned int)mHeapSize);
435
436    result.append(buffer);
437
438    while (cur) {
439        const char* errs[] = {"", "| link bogus NP",
440                            "| link bogus PN", "| link bogus NP+PN" };
441        int np = ((cur->next) && cur->next->prev != cur) ? 1 : 0;
442        int pn = ((cur->prev) && cur->prev->next != cur) ? 2 : 0;
443
444        snprintf(buffer, SIZE, "  %3u: %08x | 0x%08X | 0x%08X | %s %s\n",
445            i, int(cur), int(cur->start*kMemoryAlign),
446            int(cur->size*kMemoryAlign),
447                    int(cur->free) ? "F" : "A",
448                    errs[np|pn]);
449
450        result.append(buffer);
451
452        if (!cur->free)
453            size += cur->size*kMemoryAlign;
454
455        i++;
456        cur = cur->next;
457    }
458    snprintf(buffer, SIZE,
459            "  size allocated: %u (%u KB)\n", int(size), int(size/1024));
460    result.append(buffer);
461}
462
463
464}; // namespace android
465