1/*
2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 *     * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 *     * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 *     * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#ifndef WTF_PartitionAlloc_h
32#define WTF_PartitionAlloc_h
33
34// DESCRIPTION
35// partitionAlloc() / partitionAllocGeneric() and partitionFree() /
36// partitionFreeGeneric() are approximately analagous to malloc() and free().
37//
38// The main difference is that a PartitionRoot / PartitionRootGeneric object
39// must be supplied to these functions, representing a specific "heap partition"
40// that will be used to satisfy the allocation. Different partitions are
41// guaranteed to exist in separate address spaces, including being separate from
42// the main system heap. If the contained objects are all freed, physical memory
43// is returned to the system but the address space remains reserved.
44//
45// THE ONLY LEGITIMATE WAY TO OBTAIN A PartitionRoot IS THROUGH THE
46// SizeSpecificPartitionAllocator / PartitionAllocatorGeneric classes. To
47// minimize the instruction count to the fullest extent possible, the
48// PartitonRoot is really just a header adjacent to other data areas provided
49// by the allocator class.
50//
51// The partitionAlloc() variant of the API has the following caveats:
52// - Allocations and frees against a single partition must be single threaded.
53// - Allocations must not exceed a max size, chosen at compile-time via a
54// templated parameter to PartitionAllocator.
55// - Allocation sizes must be aligned to the system pointer size.
56// - Allocations are bucketed exactly according to size.
57//
58// And for partitionAllocGeneric():
59// - Multi-threaded use against a single partition is ok; locking is handled.
60// - Allocations of any arbitrary size can be handled (subject to a limit of
61// INT_MAX bytes for security reasons).
62// - Bucketing is by approximate size, for example an allocation of 4000 bytes
63// might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and
64// keep worst-case waste to ~10%.
65//
66// The allocators are designed to be extremely fast, thanks to the following
67// properties and design:
68// - Just a single (reasonably predicatable) branch in the hot / fast path for
69// both allocating and (significantly) freeing.
70// - A minimal number of operations in the hot / fast path, with the slow paths
71// in separate functions, leading to the possibility of inlining.
72// - Each partition page (which is usually multiple physical pages) has a
73// metadata structure which allows fast mapping of free() address to an
74// underlying bucket.
75// - Supports a lock-free API for fast performance in single-threaded cases.
76// - The freelist for a given bucket is split across a number of partition
77// pages, enabling various simple tricks to try and minimize fragmentation.
78// - Fine-grained bucket sizes leading to less waste and better packing.
79//
80// The following security properties are provided at this time:
81// - Linear overflows cannot corrupt into the partition.
82// - Linear overflows cannot corrupt out of the partition.
83// - Freed pages will only be re-used within the partition.
84//   (exception: large allocations > ~1MB)
85// - Freed pages will only hold same-sized objects when re-used.
86// - Dereference of freelist pointer should fault.
87// - Out-of-line main metadata: linear over or underflow cannot corrupt it.
88// - Partial pointer overwrite of freelist pointer should fault.
89// - Rudimentary double-free detection.
90// - Large allocations (> ~1MB) are guard-paged at the beginning and end.
91//
92// The following security properties could be investigated in the future:
93// - Per-object bucketing (instead of per-size) is mostly available at the API,
94// but not used yet.
95// - No randomness of freelist entries or bucket position.
96// - Better checking for wild pointers in free().
97// - Better freelist masking function to guarantee fault on 32-bit.
98
99#include "wtf/Assertions.h"
100#include "wtf/BitwiseOperations.h"
101#include "wtf/ByteSwap.h"
102#include "wtf/CPU.h"
103#include "wtf/PageAllocator.h"
104#include "wtf/SpinLock.h"
105
106#include <limits.h>
107
108#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
109#include <stdlib.h>
110#endif
111
112#if ENABLE(ASSERT)
113#include <string.h>
114#endif
115
116namespace WTF {
117
118// Maximum size of a partition's mappings. 2046MB. Note that the total amount of
119// bytes allocatable at the API will be smaller. This is because things like
120// guard pages, metadata, page headers and wasted space come out of the total.
121// The 2GB is not necessarily contiguous in virtual address space.
122static const size_t kMaxPartitionSize = 2046u * 1024u * 1024u;
123
124// Allocation granularity of sizeof(void*) bytes.
125static const size_t kAllocationGranularity = sizeof(void*);
126static const size_t kAllocationGranularityMask = kAllocationGranularity - 1;
127static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2;
128
129// Underlying partition storage pages are a power-of-two size. It is typical
130// for a partition page to be based on multiple system pages. Most references to
131// "page" refer to partition pages.
132// We also have the concept of "super pages" -- these are the underlying system
133// allocations we make. Super pages contain multiple partition pages inside them
134// and include space for a small amount of metadata per partition page.
135// Inside super pages, we store "slot spans". A slot span is a continguous range
136// of one or more partition pages that stores allocations of the same size.
137// Slot span sizes are adjusted depending on the allocation size, to make sure
138// the packing does not lead to unused (wasted) space at the end of the last
139// system page of the span. For our current max slot span size of 64k and other
140// constant values, we pack _all_ partitionAllocGeneric() sizes perfectly up
141// against the end of a system page.
142static const size_t kPartitionPageShift = 14; // 16KB
143static const size_t kPartitionPageSize = 1 << kPartitionPageShift;
144static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1;
145static const size_t kPartitionPageBaseMask = ~kPartitionPageOffsetMask;
146static const size_t kMaxPartitionPagesPerSlotSpan = 4;
147
148// To avoid fragmentation via never-used freelist entries, we hand out partition
149// freelist sections gradually, in units of the dominant system page size.
150// What we're actually doing is avoiding filling the full partition page
151// (typically 16KB) will freelist pointers right away. Writing freelist
152// pointers will fault and dirty a private page, which is very wasteful if we
153// never actually store objects there.
154static const size_t kNumSystemPagesPerPartitionPage = kPartitionPageSize / kSystemPageSize;
155static const size_t kMaxSystemPagesPerSlotSpan = kNumSystemPagesPerPartitionPage * kMaxPartitionPagesPerSlotSpan;
156
157// We reserve virtual address space in 2MB chunks (aligned to 2MB as well).
158// These chunks are called "super pages". We do this so that we can store
159// metadata in the first few pages of each 2MB aligned section. This leads to
160// a very fast free(). We specifically choose 2MB because this virtual address
161// block represents a full but single PTE allocation on ARM, ia32 and x64.
162static const size_t kSuperPageShift = 21; // 2MB
163static const size_t kSuperPageSize = 1 << kSuperPageShift;
164static const size_t kSuperPageOffsetMask = kSuperPageSize - 1;
165static const size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
166static const size_t kNumPartitionPagesPerSuperPage = kSuperPageSize / kPartitionPageSize;
167
168static const size_t kPageMetadataShift = 5; // 32 bytes per partition page.
169static const size_t kPageMetadataSize = 1 << kPageMetadataShift;
170
171// The following kGeneric* constants apply to the generic variants of the API.
172// The "order" of an allocation is closely related to the power-of-two size of
173// the allocation. More precisely, the order is the bit index of the
174// most-significant-bit in the allocation size, where the bit numbers starts
175// at index 1 for the least-significant-bit.
176// In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2
177// covers 2->3, order 3 covers 4->7, order 4 covers 8->15.
178static const size_t kGenericMinBucketedOrder = 4; // 8 bytes.
179static const size_t kGenericMaxBucketedOrder = 20; // Largest bucketed order is 1<<(20-1) (storing 512KB -> almost 1MB)
180static const size_t kGenericNumBucketedOrders = (kGenericMaxBucketedOrder - kGenericMinBucketedOrder) + 1;
181static const size_t kGenericNumBucketsPerOrderBits = 3; // Eight buckets per order (for the higher orders), e.g. order 8 is 128, 144, 160, ..., 240
182static const size_t kGenericNumBucketsPerOrder = 1 << kGenericNumBucketsPerOrderBits;
183static const size_t kGenericSmallestBucket = 1 << (kGenericMinBucketedOrder - 1);
184static const size_t kGenericMaxBucketSpacing = 1 << ((kGenericMaxBucketedOrder - 1) - kGenericNumBucketsPerOrderBits);
185static const size_t kGenericMaxBucketed = (1 << (kGenericMaxBucketedOrder - 1)) + ((kGenericNumBucketsPerOrder - 1) * kGenericMaxBucketSpacing);
186static const size_t kGenericMinDirectMappedDownsize = kGenericMaxBucketed + 1; // Limit when downsizing a direct mapping using realloc().
187static const size_t kGenericMaxDirectMapped = INT_MAX - kSystemPageSize;
188static const size_t kBitsPerSizet = sizeof(void*) * CHAR_BIT;
189
190// Constants for the memory reclaim logic.
191static const size_t kMaxFreeableSpans = 16;
192
193#if ENABLE(ASSERT)
194// These two byte values match tcmalloc.
195static const unsigned char kUninitializedByte = 0xAB;
196static const unsigned char kFreedByte = 0xCD;
197static const uint32_t kCookieValue = 0xDEADBEEFu;
198static const size_t kCookieSize = 16; // Handles alignment up to XMM instructions on Intel.
199#endif
200
201struct PartitionBucket;
202struct PartitionRootBase;
203
204struct PartitionFreelistEntry {
205    PartitionFreelistEntry* next;
206};
207
208// Some notes on page states. A page can be in one of three major states:
209// 1) Active.
210// 2) Full.
211// 3) Free.
212// An active page has available free slots. A full page has no free slots. A
213// free page has had its backing memory released back to the system.
214// There are two linked lists tracking the pages. The "active page" list is an
215// approximation of a list of active pages. It is an approximation because both
216// free and full pages may briefly be present in the list until we next do a
217// scan over it. The "free page" list is an accurate list of pages which have
218// been returned back to the system.
219// The significant page transitions are:
220// - free() will detect when a full page has a slot free()'d and immediately
221// return the page to the head of the active list.
222// - free() will detect when a page is fully emptied. It _may_ add it to the
223// free list and it _may_ leave it on the active list until a future list scan.
224// - malloc() _may_ scan the active page list in order to fulfil the request.
225// If it does this, full and free pages encountered will be booted out of the
226// active list. If there are no suitable active pages found, a free page (if one
227// exists) will be pulled from the free list on to the active list.
228struct PartitionPage {
229    PartitionFreelistEntry* freelistHead;
230    PartitionPage* nextPage;
231    PartitionBucket* bucket;
232    int16_t numAllocatedSlots; // Deliberately signed, -1 for free page, -n for full pages.
233    uint16_t numUnprovisionedSlots;
234    uint16_t pageOffset;
235    int16_t freeCacheIndex; // -1 if not in the free cache.
236};
237
238struct PartitionBucket {
239    PartitionPage* activePagesHead; // Accessed most in hot path => goes first.
240    PartitionPage* freePagesHead;
241    uint32_t slotSize;
242    uint16_t numSystemPagesPerSlotSpan;
243    uint16_t numFullPages;
244};
245
246// An "extent" is a span of consecutive superpages. We link to the partition's
247// next extent (if there is one) at the very start of a superpage's metadata
248// area.
249struct PartitionSuperPageExtentEntry {
250    PartitionRootBase* root;
251    char* superPageBase;
252    char* superPagesEnd;
253    PartitionSuperPageExtentEntry* next;
254};
255
256struct WTF_EXPORT PartitionRootBase {
257    size_t totalSizeOfCommittedPages;
258    size_t totalSizeOfSuperPages;
259    unsigned numBuckets;
260    unsigned maxAllocation;
261    bool initialized;
262    char* nextSuperPage;
263    char* nextPartitionPage;
264    char* nextPartitionPageEnd;
265    PartitionSuperPageExtentEntry* currentExtent;
266    PartitionSuperPageExtentEntry* firstExtent;
267    PartitionPage* globalEmptyPageRing[kMaxFreeableSpans];
268    size_t globalEmptyPageRingIndex;
269    uintptr_t invertedSelf;
270
271    static int gInitializedLock;
272    static bool gInitialized;
273    static PartitionPage gSeedPage;
274    static PartitionBucket gPagedBucket;
275};
276
277// Never instantiate a PartitionRoot directly, instead use PartitionAlloc.
278struct PartitionRoot : public PartitionRootBase {
279    // The PartitionAlloc templated class ensures the following is correct.
280    ALWAYS_INLINE PartitionBucket* buckets() { return reinterpret_cast<PartitionBucket*>(this + 1); }
281    ALWAYS_INLINE const PartitionBucket* buckets() const { return reinterpret_cast<const PartitionBucket*>(this + 1); }
282};
283
284// Never instantiate a PartitionRootGeneric directly, instead use PartitionAllocatorGeneric.
285struct PartitionRootGeneric : public PartitionRootBase {
286    int lock;
287    // Some pre-computed constants.
288    size_t orderIndexShifts[kBitsPerSizet + 1];
289    size_t orderSubIndexMasks[kBitsPerSizet + 1];
290    // The bucket lookup table lets us map a size_t to a bucket quickly.
291    // The trailing +1 caters for the overflow case for very large allocation sizes.
292    // It is one flat array instead of a 2D array because in the 2D world, we'd
293    // need to index array[blah][max+1] which risks undefined behavior.
294    PartitionBucket* bucketLookups[((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder) + 1];
295    PartitionBucket buckets[kGenericNumBucketedOrders * kGenericNumBucketsPerOrder];
296};
297
298// Flags for partitionAllocGenericFlags.
299enum PartitionAllocFlags {
300    PartitionAllocReturnNull = 1 << 0,
301};
302
303WTF_EXPORT void partitionAllocInit(PartitionRoot*, size_t numBuckets, size_t maxAllocation);
304WTF_EXPORT bool partitionAllocShutdown(PartitionRoot*);
305WTF_EXPORT void partitionAllocGenericInit(PartitionRootGeneric*);
306WTF_EXPORT bool partitionAllocGenericShutdown(PartitionRootGeneric*);
307
308WTF_EXPORT NEVER_INLINE void* partitionAllocSlowPath(PartitionRootBase*, int, size_t, PartitionBucket*);
309WTF_EXPORT NEVER_INLINE void partitionFreeSlowPath(PartitionPage*);
310WTF_EXPORT NEVER_INLINE void* partitionReallocGeneric(PartitionRootGeneric*, void*, size_t);
311
312#ifndef NDEBUG
313WTF_EXPORT void partitionDumpStats(const PartitionRoot&);
314#endif
315
316ALWAYS_INLINE PartitionFreelistEntry* partitionFreelistMask(PartitionFreelistEntry* ptr)
317{
318    // We use bswap on little endian as a fast mask for two reasons:
319    // 1) If an object is freed and its vtable used where the attacker doesn't
320    // get the chance to run allocations between the free and use, the vtable
321    // dereference is likely to fault.
322    // 2) If the attacker has a linear buffer overflow and elects to try and
323    // corrupt a freelist pointer, partial pointer overwrite attacks are
324    // thwarted.
325    // For big endian, similar guarantees are arrived at with a negation.
326#if CPU(BIG_ENDIAN)
327    uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
328#else
329    uintptr_t masked = bswapuintptrt(reinterpret_cast<uintptr_t>(ptr));
330#endif
331    return reinterpret_cast<PartitionFreelistEntry*>(masked);
332}
333
334ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size)
335{
336#if ENABLE(ASSERT)
337    // Add space for cookies, checking for integer overflow.
338    ASSERT(size + (2 * kCookieSize) > size);
339    size += 2 * kCookieSize;
340#endif
341    return size;
342}
343
344ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size)
345{
346#if ENABLE(ASSERT)
347    // Remove space for cookies.
348    ASSERT(size >= 2 * kCookieSize);
349    size -= 2 * kCookieSize;
350#endif
351    return size;
352}
353
354ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr)
355{
356#if ENABLE(ASSERT)
357    // The value given to the application is actually just after the cookie.
358    ptr = static_cast<char*>(ptr) - kCookieSize;
359#endif
360    return ptr;
361}
362
363ALWAYS_INLINE void partitionCookieWriteValue(void* ptr)
364{
365#if ENABLE(ASSERT)
366    uint32_t* cookiePtr = reinterpret_cast<uint32_t*>(ptr);
367    for (size_t i = 0; i < kCookieSize / sizeof(kCookieValue); ++i, ++cookiePtr)
368        *cookiePtr = kCookieValue;
369#endif
370}
371
372ALWAYS_INLINE void partitionCookieCheckValue(void* ptr)
373{
374#if ENABLE(ASSERT)
375    uint32_t* cookiePtr = reinterpret_cast<uint32_t*>(ptr);
376    for (size_t i = 0; i < kCookieSize / sizeof(kCookieValue); ++i, ++cookiePtr)
377        ASSERT(*cookiePtr == kCookieValue);
378#endif
379}
380
381ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr)
382{
383    uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr);
384    ASSERT(!(pointerAsUint & kSuperPageOffsetMask));
385    // The metadata area is exactly one system page (the guard page) into the
386    // super page.
387    return reinterpret_cast<char*>(pointerAsUint + kSystemPageSize);
388}
389
390ALWAYS_INLINE PartitionPage* partitionPointerToPageNoAlignmentCheck(void* ptr)
391{
392    uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr);
393    char* superPagePtr = reinterpret_cast<char*>(pointerAsUint & kSuperPageBaseMask);
394    uintptr_t partitionPageIndex = (pointerAsUint & kSuperPageOffsetMask) >> kPartitionPageShift;
395    // Index 0 is invalid because it is the metadata area and the last index is invalid because it is a guard page.
396    ASSERT(partitionPageIndex);
397    ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1);
398    PartitionPage* page = reinterpret_cast<PartitionPage*>(partitionSuperPageToMetadataArea(superPagePtr) + (partitionPageIndex << kPageMetadataShift));
399    // Many partition pages can share the same page object. Adjust for that.
400    size_t delta = page->pageOffset << kPageMetadataShift;
401    page = reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta);
402    return page;
403}
404
405ALWAYS_INLINE void* partitionPageToPointer(PartitionPage* page)
406{
407    uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(page);
408    uintptr_t superPageOffset = (pointerAsUint & kSuperPageOffsetMask);
409    ASSERT(superPageOffset > kSystemPageSize);
410    ASSERT(superPageOffset < kSystemPageSize + (kNumPartitionPagesPerSuperPage * kPageMetadataSize));
411    uintptr_t partitionPageIndex = (superPageOffset - kSystemPageSize) >> kPageMetadataShift;
412    // Index 0 is invalid because it is the metadata area and the last index is invalid because it is a guard page.
413    ASSERT(partitionPageIndex);
414    ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1);
415    uintptr_t superPageBase = (pointerAsUint & kSuperPageBaseMask);
416    void* ret = reinterpret_cast<void*>(superPageBase + (partitionPageIndex << kPartitionPageShift));
417    return ret;
418}
419
420ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr)
421{
422    PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr);
423    // Checks that the pointer is a multiple of bucket size.
424    ASSERT(!((reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(partitionPageToPointer(page))) % page->bucket->slotSize));
425    return page;
426}
427
428ALWAYS_INLINE PartitionRootBase* partitionPageToRoot(PartitionPage* page)
429{
430    PartitionSuperPageExtentEntry* extentEntry = reinterpret_cast<PartitionSuperPageExtentEntry*>(reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask);
431    return extentEntry->root;
432}
433
434ALWAYS_INLINE bool partitionPointerIsValid(void* ptr)
435{
436    PartitionPage* page = partitionPointerToPage(ptr);
437    PartitionRootBase* root = partitionPageToRoot(page);
438    return root->invertedSelf == ~reinterpret_cast<uintptr_t>(root);
439}
440
441ALWAYS_INLINE void* partitionBucketAlloc(PartitionRootBase* root, int flags, size_t size, PartitionBucket* bucket)
442{
443    PartitionPage* page = bucket->activePagesHead;
444    ASSERT(page->numAllocatedSlots >= 0);
445    void* ret = page->freelistHead;
446    if (LIKELY(ret != 0)) {
447        // If these asserts fire, you probably corrupted memory.
448        ASSERT(partitionPointerIsValid(ret));
449        PartitionFreelistEntry* newHead = partitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next);
450        page->freelistHead = newHead;
451        ASSERT(!ret || partitionPointerIsValid(ret));
452        page->numAllocatedSlots++;
453    } else {
454        ret = partitionAllocSlowPath(root, flags, size, bucket);
455    }
456#if ENABLE(ASSERT)
457    if (!ret)
458        return 0;
459    // Fill the uninitialized pattern. and write the cookies.
460    page = partitionPointerToPage(ret);
461    size_t bucketSize = page->bucket->slotSize;
462    memset(ret, kUninitializedByte, bucketSize);
463    partitionCookieWriteValue(ret);
464    partitionCookieWriteValue(reinterpret_cast<char*>(ret) + bucketSize - kCookieSize);
465    // The value given to the application is actually just after the cookie.
466    ret = static_cast<char*>(ret) + kCookieSize;
467#endif
468    return ret;
469}
470
471ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root, size_t size)
472{
473#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
474    void* result = malloc(size);
475    RELEASE_ASSERT(result);
476    return result;
477#else
478    size = partitionCookieSizeAdjustAdd(size);
479    ASSERT(root->initialized);
480    size_t index = size >> kBucketShift;
481    ASSERT(index < root->numBuckets);
482    ASSERT(size == index << kBucketShift);
483    PartitionBucket* bucket = &root->buckets()[index];
484    return partitionBucketAlloc(root, 0, size, bucket);
485#endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
486}
487
488ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page)
489{
490    // If these asserts fire, you probably corrupted memory.
491#if ENABLE(ASSERT)
492    size_t bucketSize = page->bucket->slotSize;
493    partitionCookieCheckValue(ptr);
494    partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + bucketSize - kCookieSize);
495    memset(ptr, kFreedByte, bucketSize);
496#endif
497    ASSERT(page->numAllocatedSlots);
498    PartitionFreelistEntry* freelistHead = page->freelistHead;
499    ASSERT(!freelistHead || partitionPointerIsValid(freelistHead));
500    RELEASE_ASSERT(ptr != freelistHead); // Catches an immediate double free.
501    ASSERT(!freelistHead || ptr != partitionFreelistMask(freelistHead->next)); // Look for double free one level deeper in debug.
502    PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr);
503    entry->next = partitionFreelistMask(freelistHead);
504    page->freelistHead = entry;
505    --page->numAllocatedSlots;
506    if (UNLIKELY(page->numAllocatedSlots <= 0))
507        partitionFreeSlowPath(page);
508}
509
510ALWAYS_INLINE void partitionFree(void* ptr)
511{
512#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
513    free(ptr);
514#else
515    ptr = partitionCookieFreePointerAdjust(ptr);
516    ASSERT(partitionPointerIsValid(ptr));
517    PartitionPage* page = partitionPointerToPage(ptr);
518    partitionFreeWithPage(ptr, page);
519#endif
520}
521
522ALWAYS_INLINE PartitionBucket* partitionGenericSizeToBucket(PartitionRootGeneric* root, size_t size)
523{
524    size_t order = kBitsPerSizet - countLeadingZerosSizet(size);
525    // The order index is simply the next few bits after the most significant bit.
526    size_t orderIndex = (size >> root->orderIndexShifts[order]) & (kGenericNumBucketsPerOrder - 1);
527    // And if the remaining bits are non-zero we must bump the bucket up.
528    size_t subOrderIndex = size & root->orderSubIndexMasks[order];
529    PartitionBucket* bucket = root->bucketLookups[(order << kGenericNumBucketsPerOrderBits) + orderIndex + !!subOrderIndex];
530    ASSERT(!bucket->slotSize || bucket->slotSize >= size);
531    ASSERT(!(bucket->slotSize % kGenericSmallestBucket));
532    return bucket;
533}
534
535ALWAYS_INLINE void* partitionAllocGenericFlags(PartitionRootGeneric* root, int flags, size_t size)
536{
537#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
538    void* result = malloc(size);
539    RELEASE_ASSERT(result);
540    return result;
541#else
542    ASSERT(root->initialized);
543    size = partitionCookieSizeAdjustAdd(size);
544    PartitionBucket* bucket = partitionGenericSizeToBucket(root, size);
545    spinLockLock(&root->lock);
546    void* ret = partitionBucketAlloc(root, flags, size, bucket);
547    spinLockUnlock(&root->lock);
548    return ret;
549#endif
550}
551
552ALWAYS_INLINE void* partitionAllocGeneric(PartitionRootGeneric* root, size_t size)
553{
554    return partitionAllocGenericFlags(root, 0, size);
555}
556
557ALWAYS_INLINE void partitionFreeGeneric(PartitionRootGeneric* root, void* ptr)
558{
559#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
560    free(ptr);
561#else
562    ASSERT(root->initialized);
563
564    if (UNLIKELY(!ptr))
565        return;
566
567    ptr = partitionCookieFreePointerAdjust(ptr);
568    ASSERT(partitionPointerIsValid(ptr));
569    PartitionPage* page = partitionPointerToPage(ptr);
570    spinLockLock(&root->lock);
571    partitionFreeWithPage(ptr, page);
572    spinLockUnlock(&root->lock);
573#endif
574}
575
576ALWAYS_INLINE bool partitionBucketIsDirectMapped(PartitionBucket* bucket)
577{
578    return !bucket->numSystemPagesPerSlotSpan;
579}
580
581ALWAYS_INLINE size_t partitionDirectMapSize(size_t size)
582{
583    // Caller must check that the size is not above the kGenericMaxDirectMapped
584    // limit before calling. This also guards against integer overflow in the
585    // calculation here.
586    ASSERT(size <= kGenericMaxDirectMapped);
587    return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
588}
589
590ALWAYS_INLINE size_t partitionAllocActualSize(PartitionRootGeneric* root, size_t size)
591{
592#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
593    return size;
594#else
595    ASSERT(root->initialized);
596    size = partitionCookieSizeAdjustAdd(size);
597    PartitionBucket* bucket = partitionGenericSizeToBucket(root, size);
598    if (LIKELY(!partitionBucketIsDirectMapped(bucket))) {
599        size = bucket->slotSize;
600    } else if (size > kGenericMaxDirectMapped) {
601        // Too large to allocate => return the size unchanged.
602    } else {
603        ASSERT(bucket == &PartitionRootBase::gPagedBucket);
604        size = partitionDirectMapSize(size);
605    }
606    return partitionCookieSizeAdjustSubtract(size);
607#endif
608}
609
610ALWAYS_INLINE bool partitionAllocSupportsGetSize()
611{
612#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
613    return false;
614#else
615    return true;
616#endif
617}
618
619ALWAYS_INLINE size_t partitionAllocGetSize(void* ptr)
620{
621    // No need to lock here. Only 'ptr' being freed by another thread could
622    // cause trouble, and the caller is responsible for that not happening.
623    ASSERT(partitionAllocSupportsGetSize());
624    ptr = partitionCookieFreePointerAdjust(ptr);
625    ASSERT(partitionPointerIsValid(ptr));
626    PartitionPage* page = partitionPointerToPage(ptr);
627    size_t size = page->bucket->slotSize;
628    return partitionCookieSizeAdjustSubtract(size);
629}
630
631// N (or more accurately, N - sizeof(void*)) represents the largest size in
632// bytes that will be handled by a SizeSpecificPartitionAllocator.
633// Attempts to partitionAlloc() more than this amount will fail.
634template <size_t N>
635class SizeSpecificPartitionAllocator {
636public:
637    static const size_t kMaxAllocation = N - kAllocationGranularity;
638    static const size_t kNumBuckets = N / kAllocationGranularity;
639    void init() { partitionAllocInit(&m_partitionRoot, kNumBuckets, kMaxAllocation); }
640    bool shutdown() { return partitionAllocShutdown(&m_partitionRoot); }
641    ALWAYS_INLINE PartitionRoot* root() { return &m_partitionRoot; }
642private:
643    PartitionRoot m_partitionRoot;
644    PartitionBucket m_actualBuckets[kNumBuckets];
645};
646
647class PartitionAllocatorGeneric {
648public:
649    void init() { partitionAllocGenericInit(&m_partitionRoot); }
650    bool shutdown() { return partitionAllocGenericShutdown(&m_partitionRoot); }
651    ALWAYS_INLINE PartitionRootGeneric* root() { return &m_partitionRoot; }
652private:
653    PartitionRootGeneric m_partitionRoot;
654};
655
656} // namespace WTF
657
658using WTF::SizeSpecificPartitionAllocator;
659using WTF::PartitionAllocatorGeneric;
660using WTF::PartitionRoot;
661using WTF::partitionAllocInit;
662using WTF::partitionAllocShutdown;
663using WTF::partitionAlloc;
664using WTF::partitionFree;
665using WTF::partitionAllocGeneric;
666using WTF::partitionFreeGeneric;
667using WTF::partitionReallocGeneric;
668using WTF::partitionAllocActualSize;
669using WTF::partitionAllocSupportsGetSize;
670using WTF::partitionAllocGetSize;
671
672#endif // WTF_PartitionAlloc_h
673