1/*
2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 *     * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 *     * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 *     * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include "config.h"
32#include "wtf/PartitionAlloc.h"
33
34#include "wtf/BitwiseOperations.h"
35#include "wtf/OwnPtr.h"
36#include "wtf/PassOwnPtr.h"
37#include <gtest/gtest.h>
38#include <stdlib.h>
39#include <string.h>
40
41#if OS(POSIX)
42#include <sys/mman.h>
43
44#ifndef MAP_ANONYMOUS
45#define MAP_ANONYMOUS MAP_ANON
46#endif
47#endif // OS(POSIX)
48
49#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
50
51namespace {
52
53static const size_t kTestMaxAllocation = 4096;
54static PartitionAllocator<kTestMaxAllocation> allocator;
55
56static const size_t kTestAllocSize = sizeof(void*);
57#ifdef NDEBUG
58static const size_t kPointerOffset = 0;
59static const size_t kExtraAllocSize = 0;
60#else
61static const size_t kPointerOffset = sizeof(uintptr_t);
62static const size_t kExtraAllocSize = sizeof(uintptr_t) * 2;
63#endif
64static const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize;
65static const size_t kTestBucketIndex = kRealAllocSize >> WTF::kBucketShift;
66
67static void TestSetup()
68{
69    allocator.init();
70}
71
72static void TestShutdown()
73{
74    // We expect no leaks in the general case. We have a test for leak
75    // detection.
76    EXPECT_TRUE(allocator.shutdown());
77}
78
79static WTF::PartitionPage* GetFullPage(size_t size)
80{
81    size_t realSize = size + kExtraAllocSize;
82    size_t bucketIdx = realSize >> WTF::kBucketShift;
83    WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
84    size_t numSlots = bucket->pageSize / realSize;
85    void* first = 0;
86    void* last = 0;
87    size_t i;
88    for (i = 0; i < numSlots; ++i) {
89        void* ptr = partitionAlloc(allocator.root(), size);
90        EXPECT_TRUE(ptr);
91        if (!i)
92            first = ptr;
93        else if (i == numSlots - 1)
94            last = ptr;
95    }
96    EXPECT_EQ(reinterpret_cast<size_t>(first) & WTF::kPartitionPageBaseMask, reinterpret_cast<size_t>(last) & WTF::kPartitionPageBaseMask);
97    EXPECT_EQ(numSlots, static_cast<size_t>(bucket->activePagesHead->numAllocatedSlots));
98    EXPECT_EQ(0, partitionPageFreelistHead(bucket->activePagesHead));
99    EXPECT_TRUE(bucket->activePagesHead);
100    EXPECT_TRUE(bucket->activePagesHead != &allocator.root()->seedPage);
101    return bucket->activePagesHead;
102}
103
104static void FreeFullPage(WTF::PartitionPage* page)
105{
106    size_t size = partitionBucketSize(page->bucket);
107    size_t numSlots = page->bucket->pageSize / size;
108    EXPECT_EQ(numSlots, static_cast<size_t>(abs(page->numAllocatedSlots)));
109    char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page));
110    size_t i;
111    for (i = 0; i < numSlots; ++i) {
112        partitionFree(ptr + kPointerOffset);
113        ptr += size;
114    }
115}
116
117// Check that the most basic of allocate / free pairs work.
118TEST(WTF_PartitionAlloc, Basic)
119{
120    TestSetup();
121    WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
122
123    EXPECT_FALSE(bucket->freePagesHead);
124    EXPECT_EQ(&bucket->root->seedPage, bucket->activePagesHead);
125    EXPECT_EQ(0, bucket->activePagesHead->activePageNext);
126
127    void* ptr = partitionAlloc(allocator.root(), kTestAllocSize);
128    EXPECT_TRUE(ptr);
129    EXPECT_EQ(kPointerOffset, reinterpret_cast<size_t>(ptr) & WTF::kPartitionPageOffsetMask);
130    // Check that the offset appears to include a guard page.
131    EXPECT_EQ(WTF::kPartitionPageSize + kPointerOffset, reinterpret_cast<size_t>(ptr) & WTF::kSuperPageOffsetMask);
132
133    partitionFree(ptr);
134    // Expect that the last active page does not get tossed to the freelist.
135    EXPECT_FALSE(bucket->freePagesHead);
136
137    TestShutdown();
138}
139
140// Check that we can detect a memory leak.
141TEST(WTF_PartitionAlloc, SimpleLeak)
142{
143    TestSetup();
144    void* leakedPtr = partitionAlloc(allocator.root(), kTestAllocSize);
145    (void)leakedPtr;
146    EXPECT_FALSE(allocator.shutdown());
147}
148
149// Test multiple allocations, and freelist handling.
150TEST(WTF_PartitionAlloc, MultiAlloc)
151{
152    TestSetup();
153
154    char* ptr1 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
155    char* ptr2 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
156    EXPECT_TRUE(ptr1);
157    EXPECT_TRUE(ptr2);
158    ptrdiff_t diff = ptr2 - ptr1;
159    EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
160
161    // Check that we re-use the just-freed slot.
162    partitionFree(ptr2);
163    ptr2 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
164    EXPECT_TRUE(ptr2);
165    diff = ptr2 - ptr1;
166    EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
167    partitionFree(ptr1);
168    ptr1 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
169    EXPECT_TRUE(ptr1);
170    diff = ptr2 - ptr1;
171    EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
172
173    char* ptr3 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
174    EXPECT_TRUE(ptr3);
175    diff = ptr3 - ptr1;
176    EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize * 2), diff);
177
178    partitionFree(ptr1);
179    partitionFree(ptr2);
180    partitionFree(ptr3);
181
182    TestShutdown();
183}
184
185// Test a bucket with multiple pages.
186TEST(WTF_PartitionAlloc, MultiPages)
187{
188    TestSetup();
189    WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
190
191    WTF::PartitionPage* page = GetFullPage(kTestAllocSize);
192    FreeFullPage(page);
193    EXPECT_FALSE(bucket->freePagesHead);
194    EXPECT_EQ(page, bucket->activePagesHead);
195    EXPECT_EQ(0, page->activePageNext);
196    EXPECT_EQ(0, page->numAllocatedSlots);
197
198    page = GetFullPage(kTestAllocSize);
199    WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize);
200
201    EXPECT_EQ(page2, bucket->activePagesHead);
202    EXPECT_EQ(0, page2->activePageNext);
203    EXPECT_EQ(reinterpret_cast<uintptr_t>(partitionPageToPointer(page)) & WTF::kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(page2)) & WTF::kSuperPageBaseMask);
204
205    // Fully free the non-current page. It should not be freelisted because
206    // their is no other immediately useable page. The other page is full.
207    FreeFullPage(page);
208    EXPECT_EQ(0, page->numAllocatedSlots);
209    EXPECT_FALSE(bucket->freePagesHead);
210    EXPECT_EQ(page, bucket->activePagesHead);
211
212    // Allocate a new page, it should pull from the freelist.
213    page = GetFullPage(kTestAllocSize);
214    EXPECT_FALSE(bucket->freePagesHead);
215    EXPECT_EQ(page, bucket->activePagesHead);
216
217    FreeFullPage(page);
218    FreeFullPage(page2);
219    EXPECT_EQ(0, page->numAllocatedSlots);
220    EXPECT_EQ(-1, page2->numAllocatedSlots);
221
222    TestShutdown();
223}
224
225// Test some finer aspects of internal page transitions.
226TEST(WTF_PartitionAlloc, PageTransitions)
227{
228    TestSetup();
229    WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
230
231    WTF::PartitionPage* page1 = GetFullPage(kTestAllocSize);
232    EXPECT_EQ(page1, bucket->activePagesHead);
233    EXPECT_EQ(0, page1->activePageNext);
234    WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize);
235    EXPECT_EQ(page2, bucket->activePagesHead);
236    EXPECT_EQ(0, page2->activePageNext);
237
238    // Bounce page1 back into the non-full list then fill it up again.
239    char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointerOffset;
240    partitionFree(ptr);
241    EXPECT_EQ(page1, bucket->activePagesHead);
242    (void) partitionAlloc(allocator.root(), kTestAllocSize);
243    EXPECT_EQ(page1, bucket->activePagesHead);
244    EXPECT_EQ(page2, bucket->activePagesHead->activePageNext);
245
246    // Allocating another page at this point should cause us to scan over page1
247    // (which is both full and NOT our current page), and evict it from the
248    // freelist. Older code had a O(n^2) condition due to failure to do this.
249    WTF::PartitionPage* page3 = GetFullPage(kTestAllocSize);
250    EXPECT_EQ(page3, bucket->activePagesHead);
251    EXPECT_EQ(0, page3->activePageNext);
252
253    // Work out a pointer into page2 and free it.
254    ptr = reinterpret_cast<char*>(partitionPageToPointer(page2)) + kPointerOffset;
255    partitionFree(ptr);
256    // Trying to allocate at this time should cause us to cycle around to page2
257    // and find the recently freed slot.
258    char* newPtr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
259    EXPECT_EQ(ptr, newPtr);
260    EXPECT_EQ(page2, bucket->activePagesHead);
261    EXPECT_EQ(page3, page2->activePageNext);
262
263    // Work out a pointer into page1 and free it. This should pull the page
264    // back into the list of available pages.
265    ptr = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointerOffset;
266    partitionFree(ptr);
267    // This allocation should be satisfied by page1.
268    newPtr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
269    EXPECT_EQ(ptr, newPtr);
270    EXPECT_EQ(page1, bucket->activePagesHead);
271    EXPECT_EQ(page2, page1->activePageNext);
272
273    FreeFullPage(page3);
274    FreeFullPage(page2);
275    FreeFullPage(page1);
276
277    // Allocating whilst in this state exposed a bug, so keep the test.
278    ptr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
279    partitionFree(ptr);
280
281    TestShutdown();
282}
283
284// Test some corner cases relating to page transitions in the internal
285// free page list metadata bucket.
286TEST(WTF_PartitionAlloc, FreePageListPageTransitions)
287{
288    TestSetup();
289    WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
290
291    size_t numToFillFreeListPage = WTF::kPartitionPageSize / (sizeof(WTF::PartitionPage) + kExtraAllocSize);
292    // The +1 is because we need to account for the fact that the current page
293    // never gets thrown on the freelist.
294    ++numToFillFreeListPage;
295    OwnPtr<WTF::PartitionPage*[]> pages = adoptArrayPtr(new WTF::PartitionPage*[numToFillFreeListPage]);
296
297    size_t i;
298    for (i = 0; i < numToFillFreeListPage; ++i) {
299        pages[i] = GetFullPage(kTestAllocSize);
300    }
301    EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead);
302    for (i = 0; i < numToFillFreeListPage; ++i)
303        FreeFullPage(pages[i]);
304    EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
305    EXPECT_EQ(0, bucket->activePagesHead->activePageNext);
306
307    // Allocate / free in a different bucket size so we get control of a
308    // different free page list. We need two pages because one will be the last
309    // active page and not get freed.
310    WTF::PartitionPage* page1 = GetFullPage(kTestAllocSize * 2);
311    WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize * 2);
312    FreeFullPage(page1);
313    FreeFullPage(page2);
314
315    // If we re-allocate all kTestAllocSize allocations, we'll pull all the
316    // free pages and end up freeing the first page for free page objects.
317    // It's getting a bit tricky but a nice re-entrancy is going on:
318    // alloc(kTestAllocSize) -> pulls page from free page list ->
319    // free(PartitionFreepagelistEntry) -> last entry in page freed ->
320    // alloc(PartitionFreepagelistEntry).
321    for (i = 0; i < numToFillFreeListPage; ++i) {
322        pages[i] = GetFullPage(kTestAllocSize);
323    }
324    EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead);
325
326    // As part of the final free-up, we'll test another re-entrancy:
327    // free(kTestAllocSize) -> last entry in page freed ->
328    // alloc(PartitionFreepagelistEntry) -> pulls page from free page list ->
329    // free(PartitionFreepagelistEntry)
330    for (i = 0; i < numToFillFreeListPage; ++i)
331        FreeFullPage(pages[i]);
332    EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
333    EXPECT_EQ(0, bucket->activePagesHead->activePageNext);
334
335    TestShutdown();
336}
337
338// Test a large series of allocations that cross more than one underlying
339// 64KB super page allocation.
340TEST(WTF_PartitionAlloc, MultiPageAllocs)
341{
342    TestSetup();
343    // This is guaranteed to cross a super page boundary because the first
344    // partition page "slot" will be taken up by a guard page.
345    size_t numPagesNeeded = WTF::kNumPartitionPagesPerSuperPage;
346    // The super page should begin and end in a guard so we one less page in
347    // order to allocate a single page in the new super page.
348    --numPagesNeeded;
349
350    EXPECT_GT(numPagesNeeded, 1u);
351    OwnPtr<WTF::PartitionPage*[]> pages;
352    pages = adoptArrayPtr(new WTF::PartitionPage*[numPagesNeeded]);
353    uintptr_t firstSuperPageBase = 0;
354    size_t i;
355    for (i = 0; i < numPagesNeeded; ++i) {
356        pages[i] = GetFullPage(kTestAllocSize);
357        void* storagePtr = partitionPageToPointer(pages[i]);
358        if (!i)
359            firstSuperPageBase = reinterpret_cast<uintptr_t>(storagePtr) & WTF::kSuperPageBaseMask;
360        if (i == numPagesNeeded - 1) {
361            uintptr_t secondSuperPageBase = reinterpret_cast<uintptr_t>(storagePtr) & WTF::kSuperPageBaseMask;
362            uintptr_t secondSuperPageOffset = reinterpret_cast<uintptr_t>(storagePtr) & WTF::kSuperPageOffsetMask;
363            EXPECT_FALSE(secondSuperPageBase == firstSuperPageBase);
364            // Check that we allocated a guard page for the second page.
365            EXPECT_EQ(WTF::kPartitionPageSize, secondSuperPageOffset);
366        }
367    }
368    for (i = 0; i < numPagesNeeded; ++i)
369        FreeFullPage(pages[i]);
370
371    TestShutdown();
372}
373
374// Test the generic allocation functions that can handle arbitrary sizes and
375// reallocing etc.
376TEST(WTF_PartitionAlloc, GenericAlloc)
377{
378    TestSetup();
379
380    void* ptr = partitionAllocGeneric(allocator.root(), 1);
381    EXPECT_TRUE(ptr);
382    partitionFreeGeneric(allocator.root(), ptr);
383    ptr = partitionAllocGeneric(allocator.root(), PartitionAllocator<4096>::kMaxAllocation + 1);
384    EXPECT_TRUE(ptr);
385    partitionFreeGeneric(allocator.root(), ptr);
386
387    ptr = partitionAllocGeneric(allocator.root(), 1);
388    EXPECT_TRUE(ptr);
389    void* origPtr = ptr;
390    char* charPtr = static_cast<char*>(ptr);
391    *charPtr = 'A';
392
393    // Change the size of the realloc, remaining inside the same bucket.
394    void* newPtr = partitionReallocGeneric(allocator.root(), ptr, 2);
395    EXPECT_EQ(ptr, newPtr);
396    newPtr = partitionReallocGeneric(allocator.root(), ptr, 1);
397    EXPECT_EQ(ptr, newPtr);
398    newPtr = partitionReallocGeneric(allocator.root(), ptr, WTF::QuantizedAllocation::kMinRounding);
399    EXPECT_EQ(ptr, newPtr);
400
401    // Change the size of the realloc, switching buckets.
402    newPtr = partitionReallocGeneric(allocator.root(), ptr, WTF::QuantizedAllocation::kMinRounding + 1);
403    EXPECT_NE(newPtr, ptr);
404    // Check that the realloc copied correctly.
405    char* newCharPtr = static_cast<char*>(newPtr);
406    EXPECT_EQ(*newCharPtr, 'A');
407#ifndef NDEBUG
408    // Subtle: this checks for an old bug where we copied too much from the
409    // source of the realloc. The condition can be detected by a trashing of
410    // the uninitialized value in the space of the upsized allocation.
411    EXPECT_EQ(WTF::kUninitializedByte, static_cast<unsigned char>(*(newCharPtr + WTF::QuantizedAllocation::kMinRounding)));
412#endif
413    *newCharPtr = 'B';
414    // The realloc moved. To check that the old allocation was freed, we can
415    // do an alloc of the old allocation size and check that the old allocation
416    // address is at the head of the freelist and reused.
417    void* reusedPtr = partitionAllocGeneric(allocator.root(), 1);
418    EXPECT_EQ(reusedPtr, origPtr);
419    partitionFreeGeneric(allocator.root(), reusedPtr);
420
421    // Downsize the realloc.
422    ptr = newPtr;
423    newPtr = partitionReallocGeneric(allocator.root(), ptr, 1);
424    EXPECT_EQ(newPtr, origPtr);
425    newCharPtr = static_cast<char*>(newPtr);
426    EXPECT_EQ(*newCharPtr, 'B');
427    *newCharPtr = 'C';
428
429    // Upsize the realloc to outside the partition.
430    ptr = newPtr;
431    newPtr = partitionReallocGeneric(allocator.root(), ptr, PartitionAllocator<4096>::kMaxAllocation + 1);
432    EXPECT_NE(newPtr, ptr);
433    newCharPtr = static_cast<char*>(newPtr);
434    EXPECT_EQ(*newCharPtr, 'C');
435    *newCharPtr = 'D';
436
437    // Upsize and downsize the realloc, remaining outside the partition.
438    ptr = newPtr;
439    newPtr = partitionReallocGeneric(allocator.root(), ptr, PartitionAllocator<4096>::kMaxAllocation * 10);
440    newCharPtr = static_cast<char*>(newPtr);
441    EXPECT_EQ(*newCharPtr, 'D');
442    *newCharPtr = 'E';
443    ptr = newPtr;
444    newPtr = partitionReallocGeneric(allocator.root(), ptr, PartitionAllocator<4096>::kMaxAllocation * 2);
445    newCharPtr = static_cast<char*>(newPtr);
446    EXPECT_EQ(*newCharPtr, 'E');
447    *newCharPtr = 'F';
448
449    // Downsize the realloc to inside the partition.
450    ptr = newPtr;
451    newPtr = partitionReallocGeneric(allocator.root(), ptr, 1);
452    EXPECT_NE(newPtr, ptr);
453    EXPECT_EQ(newPtr, origPtr);
454    newCharPtr = static_cast<char*>(newPtr);
455    EXPECT_EQ(*newCharPtr, 'F');
456
457    partitionFreeGeneric(allocator.root(), newPtr);
458    TestShutdown();
459}
460
461// Tests the handing out of freelists for partial pages.
462TEST(WTF_PartitionAlloc, PartialPageFreelists)
463{
464    TestSetup();
465
466    size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize;
467    EXPECT_EQ(WTF::kSystemPageSize - WTF::kAllocationGranularity, bigSize + kExtraAllocSize);
468    size_t bucketIdx = (bigSize + kExtraAllocSize) >> WTF::kBucketShift;
469    WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
470    EXPECT_EQ(0, bucket->freePagesHead);
471
472    void* ptr = partitionAlloc(allocator.root(), bigSize);
473    EXPECT_TRUE(ptr);
474
475    WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
476    // The freelist should be empty as only one slot could be allocated without
477    // touching more system pages.
478    EXPECT_EQ(0, partitionPageFreelistHead(page));
479    EXPECT_EQ(1, page->numAllocatedSlots);
480
481    void* ptr2 = partitionAlloc(allocator.root(), bigSize);
482    EXPECT_TRUE(ptr2);
483    EXPECT_EQ(0, partitionPageFreelistHead(page));
484    EXPECT_EQ(2, page->numAllocatedSlots);
485
486    void* ptr3 = partitionAlloc(allocator.root(), bigSize);
487    EXPECT_TRUE(ptr3);
488    EXPECT_EQ(0, partitionPageFreelistHead(page));
489    EXPECT_EQ(3, page->numAllocatedSlots);
490
491    void* ptr4 = partitionAlloc(allocator.root(), bigSize);
492    EXPECT_TRUE(ptr4);
493    EXPECT_EQ(0, partitionPageFreelistHead(page));
494    EXPECT_EQ(4, page->numAllocatedSlots);
495
496    void* ptr5 = partitionAlloc(allocator.root(), bigSize);
497    EXPECT_TRUE(ptr5);
498
499    WTF::PartitionPage* page2 = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr5));
500    EXPECT_EQ(1, page2->numAllocatedSlots);
501
502    // Churn things a little whilst there's a partial page freelist.
503    partitionFree(ptr);
504    ptr = partitionAlloc(allocator.root(), bigSize);
505    void* ptr6 = partitionAlloc(allocator.root(), bigSize);
506
507    partitionFree(ptr);
508    partitionFree(ptr2);
509    partitionFree(ptr3);
510    partitionFree(ptr4);
511    partitionFree(ptr5);
512    partitionFree(ptr6);
513    EXPECT_TRUE(bucket->freePagesHead);
514    EXPECT_EQ(page, bucket->freePagesHead);
515    EXPECT_TRUE(partitionPageFreelistHead(page2));
516    EXPECT_EQ(0, page2->numAllocatedSlots);
517
518    // And test a couple of sizes that do not cross kSystemPageSize with a single allocation.
519    size_t mediumSize = WTF::kSystemPageSize / 2;
520    bucketIdx = (mediumSize + kExtraAllocSize) >> WTF::kBucketShift;
521    bucket = &allocator.root()->buckets()[bucketIdx];
522    EXPECT_EQ(0, bucket->freePagesHead);
523
524    ptr = partitionAlloc(allocator.root(), mediumSize);
525    EXPECT_TRUE(ptr);
526    page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
527    EXPECT_EQ(1, page->numAllocatedSlots);
528    size_t totalSlots = page->bucket->pageSize / (mediumSize + kExtraAllocSize);
529    size_t firstPageSlots = WTF::kSystemPageSize / (mediumSize + kExtraAllocSize);
530    EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
531
532    partitionFree(ptr);
533
534    size_t smallSize = WTF::kSystemPageSize / 4;
535    bucketIdx = (smallSize + kExtraAllocSize) >> WTF::kBucketShift;
536    bucket = &allocator.root()->buckets()[bucketIdx];
537    EXPECT_EQ(0, bucket->freePagesHead);
538
539    ptr = partitionAlloc(allocator.root(), smallSize);
540    EXPECT_TRUE(ptr);
541    page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
542    EXPECT_EQ(1, page->numAllocatedSlots);
543    totalSlots = page->bucket->pageSize / (smallSize + kExtraAllocSize);
544    firstPageSlots = WTF::kSystemPageSize / (smallSize + kExtraAllocSize);
545    EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
546
547    partitionFree(ptr);
548    EXPECT_TRUE(partitionPageFreelistHead(page));
549    EXPECT_EQ(0, page->numAllocatedSlots);
550
551    size_t verySmallSize = WTF::kAllocationGranularity;
552    bucketIdx = (verySmallSize + kExtraAllocSize) >> WTF::kBucketShift;
553    bucket = &allocator.root()->buckets()[bucketIdx];
554    EXPECT_EQ(0, bucket->freePagesHead);
555
556    ptr = partitionAlloc(allocator.root(), verySmallSize);
557    EXPECT_TRUE(ptr);
558    page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
559    EXPECT_EQ(1, page->numAllocatedSlots);
560    totalSlots = page->bucket->pageSize / (verySmallSize + kExtraAllocSize);
561    firstPageSlots = WTF::kSystemPageSize / (verySmallSize + kExtraAllocSize);
562    EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
563
564    partitionFree(ptr);
565    EXPECT_TRUE(partitionPageFreelistHead(page));
566    EXPECT_EQ(0, page->numAllocatedSlots);
567
568    TestShutdown();
569}
570
571// Test some of the fragmentation-resistant properties of the allocator.
572TEST(WTF_PartitionAlloc, PageRefilling)
573{
574    TestSetup();
575    WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
576
577    // Grab two full pages and a non-full page.
578    WTF::PartitionPage* page1 = GetFullPage(kTestAllocSize);
579    WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize);
580    void* ptr = partitionAlloc(allocator.root(), kTestAllocSize);
581    EXPECT_TRUE(ptr);
582    EXPECT_NE(page1, bucket->activePagesHead);
583    EXPECT_NE(page2, bucket->activePagesHead);
584    WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
585    EXPECT_EQ(1, page->numAllocatedSlots);
586
587    // Work out a pointer into page2 and free it; and then page1 and free it.
588    char* ptr2 = reinterpret_cast<char*>(WTF::partitionPageToPointer(page1)) + kPointerOffset;
589    partitionFree(ptr2);
590    ptr2 = reinterpret_cast<char*>(WTF::partitionPageToPointer(page2)) + kPointerOffset;
591    partitionFree(ptr2);
592
593    // If we perform two allocations from the same bucket now, we expect to
594    // refill both the nearly full pages.
595    (void) partitionAlloc(allocator.root(), kTestAllocSize);
596    (void) partitionAlloc(allocator.root(), kTestAllocSize);
597    EXPECT_EQ(1, page->numAllocatedSlots);
598
599    FreeFullPage(page2);
600    FreeFullPage(page1);
601    partitionFree(ptr);
602
603    TestShutdown();
604}
605
606// Basic tests to ensure that allocations work for partial page buckets.
607TEST(WTF_PartitionAlloc, PartialPages)
608{
609    TestSetup();
610
611    // Find a size that is backed by a partial partition page.
612    size_t size = sizeof(void*);
613    WTF::PartitionBucket* bucket = 0;
614    while (size < kTestMaxAllocation) {
615        bucket = &allocator.root()->buckets()[size >> WTF::kBucketShift];
616        if (bucket->pageSize < WTF::kPartitionPageSize)
617            break;
618        size += sizeof(void*);
619    }
620    EXPECT_LT(size, kTestMaxAllocation);
621
622    WTF::PartitionPage* page1 = GetFullPage(size);
623    WTF::PartitionPage* page2 = GetFullPage(size);
624    FreeFullPage(page2);
625    FreeFullPage(page1);
626
627    TestShutdown();
628}
629
630// Test correct handling if our mapping collides with another.
631TEST(WTF_PartitionAlloc, MappingCollision)
632{
633    TestSetup();
634    // The -2 is because the first and last partition pages in a super page are
635    // guard pages.
636    size_t numPartitionPagesNeeded = WTF::kNumPartitionPagesPerSuperPage - 2;
637    OwnPtr<WTF::PartitionPage*[]> firstSuperPagePages = adoptArrayPtr(new WTF::PartitionPage*[numPartitionPagesNeeded]);
638    OwnPtr<WTF::PartitionPage*[]> secondSuperPagePages = adoptArrayPtr(new WTF::PartitionPage*[numPartitionPagesNeeded]);
639
640    size_t i;
641    for (i = 0; i < numPartitionPagesNeeded; ++i)
642        firstSuperPagePages[i] = GetFullPage(kTestAllocSize);
643
644    char* pageBase = reinterpret_cast<char*>(WTF::partitionPageToPointer(firstSuperPagePages[0]));
645    EXPECT_EQ(WTF::kPartitionPageSize, reinterpret_cast<uintptr_t>(pageBase) & WTF::kSuperPageOffsetMask);
646    pageBase -= WTF::kPartitionPageSize;
647    // Map a single system page either side of the mapping for our allocations,
648    // with the goal of tripping up alignment of the next mapping.
649    void* map1 = WTF::allocPages(pageBase - WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
650    EXPECT_TRUE(map1);
651    void* map2 = WTF::allocPages(pageBase + WTF::kSuperPageSize, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
652    EXPECT_TRUE(map2);
653    WTF::setSystemPagesInaccessible(map1, WTF::kPageAllocationGranularity);
654    WTF::setSystemPagesInaccessible(map2, WTF::kPageAllocationGranularity);
655
656    for (i = 0; i < numPartitionPagesNeeded; ++i)
657        secondSuperPagePages[i] = GetFullPage(kTestAllocSize);
658
659    WTF::freePages(map1, WTF::kPageAllocationGranularity);
660    WTF::freePages(map2, WTF::kPageAllocationGranularity);
661
662    pageBase = reinterpret_cast<char*>(partitionPageToPointer(secondSuperPagePages[0]));
663    EXPECT_EQ(WTF::kPartitionPageSize, reinterpret_cast<uintptr_t>(pageBase) & WTF::kSuperPageOffsetMask);
664    pageBase -= WTF::kPartitionPageSize;
665    // Map a single system page either side of the mapping for our allocations,
666    // with the goal of tripping up alignment of the next mapping.
667    map1 = WTF::allocPages(pageBase - WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
668    EXPECT_TRUE(map1);
669    map2 = WTF::allocPages(pageBase + WTF::kSuperPageSize, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
670    EXPECT_TRUE(map2);
671    WTF::setSystemPagesInaccessible(map1, WTF::kPageAllocationGranularity);
672    WTF::setSystemPagesInaccessible(map2, WTF::kPageAllocationGranularity);
673
674    WTF::PartitionPage* pageInThirdSuperPage = GetFullPage(kTestAllocSize);
675    WTF::freePages(map1, WTF::kPageAllocationGranularity);
676    WTF::freePages(map2, WTF::kPageAllocationGranularity);
677
678    EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThirdSuperPage)) & WTF::kPartitionPageOffsetMask);
679
680    // And make sure we really did get a page in a new superpage.
681    EXPECT_NE(reinterpret_cast<uintptr_t>(partitionPageToPointer(firstSuperPagePages[0])) & WTF::kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThirdSuperPage)) & WTF::kSuperPageBaseMask);
682    EXPECT_NE(reinterpret_cast<uintptr_t>(partitionPageToPointer(secondSuperPagePages[0])) & WTF::kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThirdSuperPage)) & WTF::kSuperPageBaseMask);
683
684    FreeFullPage(pageInThirdSuperPage);
685    for (i = 0; i < numPartitionPagesNeeded; ++i) {
686        FreeFullPage(firstSuperPagePages[i]);
687        FreeFullPage(secondSuperPagePages[i]);
688    }
689
690    TestShutdown();
691}
692
693// Tests that the countLeadingZeros() functions work to our satisfaction.
694// It doesn't seem worth the overhead of a whole new file for these tests, so
695// we'll put them here since partitionAllocGeneric will depend heavily on these
696// functions working correctly.
697TEST(WTF_PartitionAlloc, CLZWorks)
698{
699    EXPECT_EQ(32u, WTF::countLeadingZeros32(0));
700    EXPECT_EQ(31u, WTF::countLeadingZeros32(1));
701    EXPECT_EQ(1u, WTF::countLeadingZeros32(1 << 30));
702    EXPECT_EQ(0u, WTF::countLeadingZeros32(1 << 31));
703
704#if CPU(64BIT)
705    EXPECT_EQ(64u, WTF::countLeadingZerosSizet(0ull));
706    EXPECT_EQ(63u, WTF::countLeadingZerosSizet(1ull));
707    EXPECT_EQ(32u, WTF::countLeadingZerosSizet(1ull << 31));
708    EXPECT_EQ(1u, WTF::countLeadingZerosSizet(1ull << 62));
709    EXPECT_EQ(0u, WTF::countLeadingZerosSizet(1ull << 63));
710#else
711    EXPECT_EQ(32u, WTF::countLeadingZerosSizet(0));
712    EXPECT_EQ(31u, WTF::countLeadingZerosSizet(1));
713    EXPECT_EQ(1u, WTF::countLeadingZerosSizet(1 << 30));
714    EXPECT_EQ(0u, WTF::countLeadingZerosSizet(1 << 31));
715#endif
716}
717
718} // namespace
719
720#endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
721