1/*
2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 *     * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 *     * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 *     * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#ifndef Heap_h
32#define Heap_h
33
34#include "platform/PlatformExport.h"
35#include "platform/heap/AddressSanitizer.h"
36#include "platform/heap/ThreadState.h"
37#include "platform/heap/Visitor.h"
38
39#include "wtf/Assertions.h"
40#include "wtf/HashCountedSet.h"
41#include "wtf/LinkedHashSet.h"
42#include "wtf/ListHashSet.h"
43#include "wtf/OwnPtr.h"
44#include "wtf/PassRefPtr.h"
45#include "wtf/ThreadSafeRefCounted.h"
46
47#include <stdint.h>
48
49namespace WebCore {
50
51const size_t blinkPageSizeLog2 = 17;
52const size_t blinkPageSize = 1 << blinkPageSizeLog2;
53const size_t blinkPageOffsetMask = blinkPageSize - 1;
54const size_t blinkPageBaseMask = ~blinkPageOffsetMask;
55// Double precision floats are more efficient when 8 byte aligned, so we 8 byte
56// align all allocations even on 32 bit.
57const size_t allocationGranularity = 8;
58const size_t allocationMask = allocationGranularity - 1;
59const size_t objectStartBitMapSize = (blinkPageSize + ((8 * allocationGranularity) - 1)) / (8 * allocationGranularity);
60const size_t reservedForObjectBitMap = ((objectStartBitMapSize + allocationMask) & ~allocationMask);
61const size_t maxHeapObjectSize = 1 << 27;
62
63const size_t markBitMask = 1;
64const size_t freeListMask = 2;
65const size_t debugBitMask = 4;
66const size_t sizeMask = ~7;
67const uint8_t freelistZapValue = 42;
68const uint8_t finalizedZapValue = 24;
69
70class HeapStats;
71class PageMemory;
72template<ThreadAffinity affinity> class ThreadLocalPersistents;
73template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTrait<T>::Affinity > > class Persistent;
74template<typename T> class CrossThreadPersistent;
75
76PLATFORM_EXPORT size_t osPageSize();
77
78// Blink heap pages are set up with a guard page before and after the
79// payload.
80inline size_t blinkPagePayloadSize()
81{
82    return blinkPageSize - 2 * osPageSize();
83}
84
85// Blink heap pages are aligned to the Blink heap page size.
86// Therefore, the start of a Blink page can be obtained by
87// rounding down to the Blink page size.
88inline Address roundToBlinkPageStart(Address address)
89{
90    return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blinkPageBaseMask);
91}
92
93inline Address roundToBlinkPageEnd(Address address)
94{
95    return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address - 1) & blinkPageBaseMask) + blinkPageSize;
96}
97
98// Compute the amount of padding we have to add to a header to make
99// the size of the header plus the padding a multiple of 8 bytes.
100template<typename Header>
101inline size_t headerPadding()
102{
103    return (allocationGranularity - (sizeof(Header) % allocationGranularity)) % allocationGranularity;
104}
105
106// Masks an address down to the enclosing blink page base address.
107inline Address blinkPageAddress(Address address)
108{
109    return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blinkPageBaseMask);
110}
111
112#ifndef NDEBUG
113
114// Sanity check for a page header address: the address of the page
115// header should be OS page size away from being Blink page size
116// aligned.
117inline bool isPageHeaderAddress(Address address)
118{
119    return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - osPageSize());
120}
121#endif
122
123// Mask an address down to the enclosing oilpan heap page base address.
124// All oilpan heap pages are aligned at blinkPageBase plus an OS page size.
125// FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our typed heaps.
126// This is only exported to enable tests in HeapTest.cpp.
127PLATFORM_EXPORT inline Address pageHeaderAddress(Address address)
128{
129    return blinkPageAddress(address) + osPageSize();
130}
131
132// Common header for heap pages.
133class BaseHeapPage {
134public:
135    BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state)
136        : m_storage(storage)
137        , m_gcInfo(gcInfo)
138        , m_threadState(state)
139        , m_padding(0)
140    {
141        ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
142    }
143
144    // Check if the given address points to an object in this
145    // heap page. If so, find the start of that object and mark it
146    // using the given Visitor. Otherwise do nothing. The pointer must
147    // be within the same aligned blinkPageSize as the this-pointer.
148    //
149    // This is used during conservative stack scanning to
150    // conservatively mark all objects that could be referenced from
151    // the stack.
152    virtual void checkAndMarkPointer(Visitor*, Address) = 0;
153
154#if ENABLE(GC_TRACING)
155    virtual const GCInfo* findGCInfo(Address) = 0;
156#endif
157
158    Address address() { return reinterpret_cast<Address>(this); }
159    PageMemory* storage() const { return m_storage; }
160    ThreadState* threadState() const { return m_threadState; }
161    const GCInfo* gcInfo() { return m_gcInfo; }
162    virtual bool isLargeObject() { return false; }
163
164private:
165    // Accessor to silence unused warnings for the m_padding field.
166    intptr_t padding() const { return m_padding; }
167
168    PageMemory* m_storage;
169    const GCInfo* m_gcInfo;
170    ThreadState* m_threadState;
171    // Pointer sized integer to ensure proper alignment of the
172    // HeapPage header. This can be used as a bit field if we need
173    // to associate more information with pages.
174    intptr_t m_padding;
175};
176
177// Large allocations are allocated as separate objects and linked in a
178// list.
179//
180// In order to use the same memory allocation routines for everything
181// allocated in the heap, large objects are considered heap pages
182// containing only one object.
183//
184// The layout of a large heap object is as follows:
185//
186// | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader | payload |
187template<typename Header>
188class LargeHeapObject : public BaseHeapPage {
189public:
190    LargeHeapObject(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state) : BaseHeapPage(storage, gcInfo, state)
191    {
192        COMPILE_ASSERT(!(sizeof(LargeHeapObject<Header>) & allocationMask), large_heap_object_header_misaligned);
193    }
194
195    virtual void checkAndMarkPointer(Visitor*, Address) OVERRIDE;
196    virtual bool isLargeObject() OVERRIDE { return true; }
197
198#if ENABLE(GC_TRACING)
199    virtual const GCInfo* findGCInfo(Address address)
200    {
201        if (!objectContains(address))
202            return 0;
203        return gcInfo();
204    }
205#endif
206
207    void link(LargeHeapObject<Header>** previousNext)
208    {
209        m_next = *previousNext;
210        *previousNext = this;
211    }
212
213    void unlink(LargeHeapObject<Header>** previousNext)
214    {
215        *previousNext = m_next;
216    }
217
218    // The LargeHeapObject pseudo-page contains one actual object. Determine
219    // whether the pointer is within that object.
220    bool objectContains(Address object)
221    {
222        return (payload() <= object) && (object < address() + size());
223    }
224
225    // Returns true for any address that is on one of the pages that this
226    // large object uses. That ensures that we can use a negative result to
227    // populate the negative page cache.
228    bool contains(Address object)
229    {
230        return roundToBlinkPageStart(address()) <= object && object < roundToBlinkPageEnd(address() + size());
231    }
232
233    LargeHeapObject<Header>* next()
234    {
235        return m_next;
236    }
237
238    size_t size()
239    {
240        return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
241    }
242
243    Address payload() { return heapObjectHeader()->payload(); }
244    size_t payloadSize() { return heapObjectHeader()->payloadSize(); }
245
246    Header* heapObjectHeader()
247    {
248        Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
249        return reinterpret_cast<Header*>(headerAddress);
250    }
251
252    bool isMarked();
253    void unmark();
254    void getStats(HeapStats&);
255    void mark(Visitor*);
256    void finalize();
257
258private:
259    friend class ThreadHeap<Header>;
260
261    LargeHeapObject<Header>* m_next;
262};
263
264// The BasicObjectHeader is the minimal object header. It is used when
265// encountering heap space of size allocationGranularity to mark it as
266// as freelist entry.
267class PLATFORM_EXPORT BasicObjectHeader {
268public:
269    NO_SANITIZE_ADDRESS
270    explicit BasicObjectHeader(size_t encodedSize)
271        : m_size(encodedSize) { }
272
273    static size_t freeListEncodedSize(size_t size) { return size | freeListMask; }
274
275    NO_SANITIZE_ADDRESS
276    bool isFree() { return m_size & freeListMask; }
277
278    NO_SANITIZE_ADDRESS
279    size_t size() const { return m_size & sizeMask; }
280
281protected:
282    size_t m_size;
283};
284
285// Our heap object layout is layered with the HeapObjectHeader closest
286// to the payload, this can be wrapped in a FinalizedObjectHeader if the
287// object is on the GeneralHeap and not on a specific TypedHeap.
288// Finally if the object is a large object (> blinkPageSize/2) then it is
289// wrapped with a LargeObjectHeader.
290//
291// Object memory layout:
292// [ LargeObjectHeader | ] [ FinalizedObjectHeader | ] HeapObjectHeader | payload
293// The [ ] notation denotes that the LargeObjectHeader and the FinalizedObjectHeader
294// are independently optional.
295class PLATFORM_EXPORT HeapObjectHeader : public BasicObjectHeader {
296public:
297    NO_SANITIZE_ADDRESS
298    explicit HeapObjectHeader(size_t encodedSize)
299        : BasicObjectHeader(encodedSize)
300#ifndef NDEBUG
301        , m_magic(magic)
302#endif
303    { }
304
305    NO_SANITIZE_ADDRESS
306    HeapObjectHeader(size_t encodedSize, const GCInfo*)
307        : BasicObjectHeader(encodedSize)
308#ifndef NDEBUG
309        , m_magic(magic)
310#endif
311    { }
312
313    inline void checkHeader() const;
314    inline bool isMarked() const;
315
316    inline void mark();
317    inline void unmark();
318
319    inline const GCInfo* gcInfo() { return 0; }
320
321    inline Address payload();
322    inline size_t payloadSize();
323    inline Address payloadEnd();
324
325    inline void setDebugMark();
326    inline void clearDebugMark();
327    inline bool hasDebugMark() const;
328
329    // Zap magic number with a new magic number that means there was once an
330    // object allocated here, but it was freed because nobody marked it during
331    // GC.
332    void zapMagic();
333
334    static void finalize(const GCInfo*, Address, size_t);
335    static HeapObjectHeader* fromPayload(const void*);
336
337    static const intptr_t magic = 0xc0de247;
338    static const intptr_t zappedMagic = 0xC0DEdead;
339    // The zap value for vtables should be < 4K to ensure it cannot be
340    // used for dispatch.
341    static const intptr_t zappedVTable = 0xd0d;
342
343private:
344#ifndef NDEBUG
345    intptr_t m_magic;
346#endif
347};
348
349const size_t objectHeaderSize = sizeof(HeapObjectHeader);
350
351// Each object on the GeneralHeap needs to carry a pointer to its
352// own GCInfo structure for tracing and potential finalization.
353class PLATFORM_EXPORT FinalizedHeapObjectHeader : public HeapObjectHeader {
354public:
355    NO_SANITIZE_ADDRESS
356    FinalizedHeapObjectHeader(size_t encodedSize, const GCInfo* gcInfo)
357        : HeapObjectHeader(encodedSize)
358        , m_gcInfo(gcInfo)
359    {
360    }
361
362    inline Address payload();
363    inline size_t payloadSize();
364
365    NO_SANITIZE_ADDRESS
366    const GCInfo* gcInfo() { return m_gcInfo; }
367
368    NO_SANITIZE_ADDRESS
369    TraceCallback traceCallback() { return m_gcInfo->m_trace; }
370
371    void finalize();
372
373    NO_SANITIZE_ADDRESS
374    inline bool hasFinalizer() { return m_gcInfo->hasFinalizer(); }
375
376    static FinalizedHeapObjectHeader* fromPayload(const void*);
377
378    NO_SANITIZE_ADDRESS
379    bool hasVTable() { return m_gcInfo->hasVTable(); }
380
381private:
382    const GCInfo* m_gcInfo;
383};
384
385const size_t finalizedHeaderSize = sizeof(FinalizedHeapObjectHeader);
386
387class FreeListEntry : public HeapObjectHeader {
388public:
389    NO_SANITIZE_ADDRESS
390    explicit FreeListEntry(size_t size)
391        : HeapObjectHeader(freeListEncodedSize(size))
392        , m_next(0)
393    {
394#if !defined(NDEBUG) && !ASAN
395        // Zap free area with asterisks, aka 0x2a2a2a2a.
396        // For ASAN don't zap since we keep accounting in the freelist entry.
397        for (size_t i = sizeof(*this); i < size; i++)
398            reinterpret_cast<Address>(this)[i] = freelistZapValue;
399        ASSERT(size >= objectHeaderSize);
400        zapMagic();
401#endif
402    }
403
404    Address address() { return reinterpret_cast<Address>(this); }
405
406    NO_SANITIZE_ADDRESS
407    void unlink(FreeListEntry** prevNext)
408    {
409        *prevNext = m_next;
410        m_next = 0;
411    }
412
413    NO_SANITIZE_ADDRESS
414    void link(FreeListEntry** prevNext)
415    {
416        m_next = *prevNext;
417        *prevNext = this;
418    }
419
420#if defined(ADDRESS_SANITIZER)
421    NO_SANITIZE_ADDRESS
422    bool shouldAddToFreeList()
423    {
424        // Init if not already magic.
425        if ((m_asanMagic & ~asanDeferMemoryReuseMask) != asanMagic) {
426            m_asanMagic = asanMagic | asanDeferMemoryReuseCount;
427            return false;
428        }
429        // Decrement if count part of asanMagic > 0.
430        if (m_asanMagic & asanDeferMemoryReuseMask)
431            m_asanMagic--;
432        return !(m_asanMagic & asanDeferMemoryReuseMask);
433    }
434#endif
435
436private:
437    FreeListEntry* m_next;
438#if defined(ADDRESS_SANITIZER)
439    unsigned m_asanMagic;
440#endif
441};
442
443// Representation of Blink heap pages.
444//
445// Pages are specialized on the type of header on the object they
446// contain. If a heap page only contains a certain type of object all
447// of the objects will have the same GCInfo pointer and therefore that
448// pointer can be stored in the HeapPage instead of in the header of
449// each object. In that case objects have only a HeapObjectHeader and
450// not a FinalizedHeapObjectHeader saving a word per object.
451template<typename Header>
452class HeapPage : public BaseHeapPage {
453public:
454    HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*);
455
456    void link(HeapPage**);
457    static void unlink(HeapPage*, HeapPage**);
458
459    bool isEmpty();
460
461    // Returns true for the whole blinkPageSize page that the page is on, even
462    // for the header, and the unmapped guard page at the start. That ensures
463    // the result can be used to populate the negative page cache.
464    bool contains(Address addr)
465    {
466        Address blinkPageStart = roundToBlinkPageStart(address());
467        ASSERT(blinkPageStart == address() - osPageSize()); // Page is at aligned address plus guard page size.
468        return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize;
469    }
470
471    HeapPage* next() { return m_next; }
472
473    Address payload()
474    {
475        return address() + sizeof(*this) + headerPadding<Header>();
476    }
477
478    static size_t payloadSize()
479    {
480        return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header>()) & ~allocationMask;
481    }
482
483    Address end() { return payload() + payloadSize(); }
484
485    void getStats(HeapStats&);
486    void clearMarks();
487    void sweep();
488    void clearObjectStartBitMap();
489    void finalize(Header*);
490    virtual void checkAndMarkPointer(Visitor*, Address) OVERRIDE;
491#if ENABLE(GC_TRACING)
492    const GCInfo* findGCInfo(Address) OVERRIDE;
493#endif
494    ThreadHeap<Header>* heap() { return m_heap; }
495#if defined(ADDRESS_SANITIZER)
496    void poisonUnmarkedObjects();
497#endif
498
499protected:
500    Header* findHeaderFromAddress(Address);
501    void populateObjectStartBitMap();
502    bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; }
503    TraceCallback traceCallback(Header*);
504    bool hasVTable(Header*);
505
506    HeapPage<Header>* m_next;
507    ThreadHeap<Header>* m_heap;
508    bool m_objectStartBitMapComputed;
509    uint8_t m_objectStartBitMap[reservedForObjectBitMap];
510
511    friend class ThreadHeap<Header>;
512};
513
514class AddressEntry {
515public:
516    AddressEntry() : m_address(0) { }
517
518    explicit AddressEntry(Address address) : m_address(address) { }
519
520    Address address() const { return m_address; }
521
522private:
523    Address m_address;
524};
525
526class PositiveEntry : public AddressEntry {
527public:
528    PositiveEntry()
529        : AddressEntry()
530        , m_containingPage(0)
531    {
532    }
533
534    PositiveEntry(Address address, BaseHeapPage* containingPage)
535        : AddressEntry(address)
536        , m_containingPage(containingPage)
537    {
538    }
539
540    BaseHeapPage* result() const { return m_containingPage; }
541
542    typedef BaseHeapPage* LookupResult;
543
544private:
545    BaseHeapPage* m_containingPage;
546};
547
548class NegativeEntry : public AddressEntry {
549public:
550    NegativeEntry() : AddressEntry() { }
551
552    NegativeEntry(Address address, bool) : AddressEntry(address) { }
553
554    bool result() const { return true; }
555
556    typedef bool LookupResult;
557};
558
559// A HeapExtentCache provides a fast way of taking an arbitrary
560// pointer-sized word, and determining whether it can be interpreted
561// as a pointer to an area that is managed by the garbage collected
562// Blink heap. There is a cache of 'pages' that have previously been
563// determined to be wholly inside the heap. The size of these pages must be
564// smaller than the allocation alignment of the heap pages. We determine
565// on-heap-ness by rounding down the pointer to the nearest page and looking up
566// the page in the cache. If there is a miss in the cache we can ask the heap
567// to determine the status of the pointer by iterating over all of the heap.
568// The result is then cached in the two-way associative page cache.
569//
570// A HeapContainsCache is a positive cache. Therefore, it must be flushed when
571// memory is removed from the Blink heap. The HeapDoesNotContainCache is a
572// negative cache, so it must be flushed when memory is added to the heap.
573template<typename Entry>
574class HeapExtentCache {
575public:
576    HeapExtentCache()
577        : m_entries(adoptArrayPtr(new Entry[HeapExtentCache::numberOfEntries]))
578        , m_hasEntries(false)
579    {
580    }
581
582    void flush();
583    bool contains(Address);
584    bool isEmpty() { return !m_hasEntries; }
585
586    // Perform a lookup in the cache.
587    //
588    // If lookup returns null/false the argument address was not found in
589    // the cache and it is unknown if the address is in the Blink
590    // heap.
591    //
592    // If lookup returns true/a page, the argument address was found in the
593    // cache. For the HeapContainsCache this means the address is in the heap.
594    // For the HeapDoesNotContainCache this means the address is not in the
595    // heap.
596    PLATFORM_EXPORT typename Entry::LookupResult lookup(Address);
597
598    // Add an entry to the cache.
599    PLATFORM_EXPORT void addEntry(Address, typename Entry::LookupResult);
600
601private:
602    static const int numberOfEntriesLog2 = 12;
603    static const int numberOfEntries = 1 << numberOfEntriesLog2;
604
605    static size_t hash(Address);
606
607    WTF::OwnPtr<Entry[]> m_entries;
608    bool m_hasEntries;
609
610    friend class ThreadState;
611};
612
613// Normally these would be typedefs instead of subclasses, but that makes them
614// very hard to forward declare.
615class HeapContainsCache : public HeapExtentCache<PositiveEntry> {
616public:
617    BaseHeapPage* lookup(Address);
618    void addEntry(Address, BaseHeapPage*);
619};
620
621class HeapDoesNotContainCache : public HeapExtentCache<NegativeEntry> { };
622
623// FIXME: This is currently used by the WebAudio code.
624// We should attempt to restructure the WebAudio code so that the main thread
625// alone determines life-time and receives messages about life-time from the
626// audio thread.
627template<typename T>
628class ThreadSafeRefCountedGarbageCollected : public GarbageCollectedFinalized<T>, public WTF::ThreadSafeRefCountedBase {
629    WTF_MAKE_NONCOPYABLE(ThreadSafeRefCountedGarbageCollected);
630
631public:
632    ThreadSafeRefCountedGarbageCollected()
633    {
634        m_keepAlive = adoptPtr(new CrossThreadPersistent<T>(static_cast<T*>(this)));
635    }
636
637    // Override ref to deal with a case where a reference count goes up
638    // from 0 to 1. This can happen in the following scenario:
639    // (1) The reference count becomes 0, but on-stack pointers keep references to the object.
640    // (2) The on-stack pointer is assigned to a RefPtr. The reference count becomes 1.
641    // In this case, we have to resurrect m_keepAlive.
642    void ref()
643    {
644        MutexLocker lock(m_mutex);
645        if (UNLIKELY(!refCount())) {
646            ASSERT(!m_keepAlive);
647            m_keepAlive = adoptPtr(new CrossThreadPersistent<T>(static_cast<T*>(this)));
648        }
649        WTF::ThreadSafeRefCountedBase::ref();
650    }
651
652    // Override deref to deal with our own deallocation based on ref counting.
653    void deref()
654    {
655        MutexLocker lock(m_mutex);
656        if (derefBase()) {
657            ASSERT(m_keepAlive);
658            m_keepAlive.clear();
659        }
660    }
661
662    using GarbageCollectedFinalized<T>::operator new;
663    using GarbageCollectedFinalized<T>::operator delete;
664
665protected:
666    ~ThreadSafeRefCountedGarbageCollected() { }
667
668private:
669    OwnPtr<CrossThreadPersistent<T> > m_keepAlive;
670    mutable Mutex m_mutex;
671};
672
673// The CallbackStack contains all the visitor callbacks used to trace and mark
674// objects. A specific CallbackStack instance contains at most bufferSize elements.
675// If more space is needed a new CallbackStack instance is created and chained
676// together with the former instance. I.e. a logical CallbackStack can be made of
677// multiple chained CallbackStack object instances.
678// There are two logical callback stacks. One containing all the marking callbacks and
679// one containing the weak pointer callbacks.
680class CallbackStack {
681public:
682    CallbackStack(CallbackStack** first)
683        : m_limit(&(m_buffer[bufferSize]))
684        , m_current(&(m_buffer[0]))
685        , m_next(*first)
686    {
687#ifndef NDEBUG
688        clearUnused();
689#endif
690        *first = this;
691    }
692
693    ~CallbackStack();
694    void clearUnused();
695
696    void assertIsEmpty();
697
698    class Item {
699    public:
700        Item() { }
701        Item(void* object, VisitorCallback callback)
702            : m_object(object)
703            , m_callback(callback)
704        {
705        }
706        void* object() { return m_object; }
707        VisitorCallback callback() { return m_callback; }
708
709    private:
710        void* m_object;
711        VisitorCallback m_callback;
712    };
713
714    static void init(CallbackStack** first);
715    static void shutdown(CallbackStack** first);
716    bool popAndInvokeCallback(CallbackStack** first, Visitor*);
717
718    Item* allocateEntry(CallbackStack** first)
719    {
720        if (m_current < m_limit)
721            return m_current++;
722        return (new CallbackStack(first))->allocateEntry(first);
723    }
724
725private:
726    static const size_t bufferSize = 8000;
727    Item m_buffer[bufferSize];
728    Item* m_limit;
729    Item* m_current;
730    CallbackStack* m_next;
731};
732
733// Non-template super class used to pass a heap around to other classes.
734class BaseHeap {
735public:
736    virtual ~BaseHeap() { }
737
738    // Find the page in this thread heap containing the given
739    // address. Returns 0 if the address is not contained in any
740    // page in this thread heap.
741    virtual BaseHeapPage* heapPageFromAddress(Address) = 0;
742
743#if ENABLE(GC_TRACING)
744    virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) = 0;
745#endif
746
747    // Sweep this part of the Blink heap. This finalizes dead objects
748    // and builds freelists for all the unused memory.
749    virtual void sweep() = 0;
750
751    // Forcefully finalize all objects in this part of the Blink heap
752    // (potentially with the exception of one object). This is used
753    // during thread termination to make sure that all objects for the
754    // dying thread are finalized.
755    virtual void assertEmpty() = 0;
756
757    virtual void clearFreeLists() = 0;
758    virtual void clearMarks() = 0;
759#ifndef NDEBUG
760    virtual void getScannedStats(HeapStats&) = 0;
761#endif
762
763    virtual void makeConsistentForGC() = 0;
764    virtual bool isConsistentForGC() = 0;
765
766    // Returns a bucket number for inserting a FreeListEntry of a
767    // given size. All FreeListEntries in the given bucket, n, have
768    // size >= 2^n.
769    static int bucketIndexForSize(size_t);
770};
771
772// Thread heaps represent a part of the per-thread Blink heap.
773//
774// Each Blink thread has a number of thread heaps: one general heap
775// that contains any type of object and a number of heaps specialized
776// for specific object types (such as Node).
777//
778// Each thread heap contains the functionality to allocate new objects
779// (potentially adding new pages to the heap), to find and mark
780// objects during conservative stack scanning and to sweep the set of
781// pages after a GC.
782template<typename Header>
783class ThreadHeap : public BaseHeap {
784public:
785    ThreadHeap(ThreadState*);
786    virtual ~ThreadHeap();
787
788    virtual BaseHeapPage* heapPageFromAddress(Address);
789#if ENABLE(GC_TRACING)
790    virtual const GCInfo* findGCInfoOfLargeHeapObject(Address);
791#endif
792    virtual void sweep();
793    virtual void assertEmpty();
794    virtual void clearFreeLists();
795    virtual void clearMarks();
796#ifndef NDEBUG
797    virtual void getScannedStats(HeapStats&);
798#endif
799
800    virtual void makeConsistentForGC();
801    virtual bool isConsistentForGC();
802
803    ThreadState* threadState() { return m_threadState; }
804    HeapStats& stats() { return m_threadState->stats(); }
805    void flushHeapContainsCache()
806    {
807        m_threadState->heapContainsCache()->flush();
808    }
809
810    inline Address allocate(size_t, const GCInfo*);
811    void addToFreeList(Address, size_t);
812    void addPageToPool(HeapPage<Header>*);
813    inline static size_t roundedAllocationSize(size_t size)
814    {
815        return allocationSizeFromSize(size) - sizeof(Header);
816    }
817
818private:
819    // Once pages have been used for one thread heap they will never
820    // be reused for another thread heap. Instead of unmapping, we add
821    // the pages to a pool of pages to be reused later by this thread
822    // heap. This is done as a security feature to avoid type
823    // confusion. The heap is type segregated by having separate
824    // thread heaps for various types of objects. Holding on to pages
825    // ensures that the same virtual address space cannot be used for
826    // objects of another type than the type contained in this thread
827    // heap.
828    class PagePoolEntry {
829    public:
830        PagePoolEntry(PageMemory* storage, PagePoolEntry* next)
831            : m_storage(storage)
832            , m_next(next)
833        { }
834
835        PageMemory* storage() { return m_storage; }
836        PagePoolEntry* next() { return m_next; }
837
838    private:
839        PageMemory* m_storage;
840        PagePoolEntry* m_next;
841    };
842
843    PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*);
844    static size_t allocationSizeFromSize(size_t);
845    void addPageToHeap(const GCInfo*);
846    PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*);
847    Address currentAllocationPoint() const { return m_currentAllocationPoint; }
848    size_t remainingAllocationSize() const { return m_remainingAllocationSize; }
849    bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); }
850    void setAllocationPoint(Address point, size_t size)
851    {
852        ASSERT(!point || heapPageFromAddress(point));
853        ASSERT(size <= HeapPage<Header>::payloadSize());
854        m_currentAllocationPoint = point;
855        m_remainingAllocationSize = size;
856    }
857    void ensureCurrentAllocation(size_t, const GCInfo*);
858    bool allocateFromFreeList(size_t);
859
860    void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**);
861
862    void allocatePage(const GCInfo*);
863    PageMemory* takePageFromPool();
864    void clearPagePool();
865    void deletePages();
866
867    Address m_currentAllocationPoint;
868    size_t m_remainingAllocationSize;
869
870    HeapPage<Header>* m_firstPage;
871    LargeHeapObject<Header>* m_firstLargeHeapObject;
872
873    int m_biggestFreeListIndex;
874    ThreadState* m_threadState;
875
876    // All FreeListEntries in the nth list have size >= 2^n.
877    FreeListEntry* m_freeLists[blinkPageSizeLog2];
878
879    // List of pages that have been previously allocated, but are now
880    // unused.
881    PagePoolEntry* m_pagePool;
882};
883
884class PLATFORM_EXPORT Heap {
885public:
886    static void init();
887    static void shutdown();
888    static void doShutdown();
889
890    static BaseHeapPage* contains(Address);
891    static BaseHeapPage* contains(void* pointer) { return contains(reinterpret_cast<Address>(pointer)); }
892    static BaseHeapPage* contains(const void* pointer) { return contains(const_cast<void*>(pointer)); }
893
894    // Push a trace callback on the marking stack.
895    static void pushTraceCallback(void* containerObject, TraceCallback);
896
897    // Add a weak pointer callback to the weak callback work list. General
898    // object pointer callbacks are added to a thread local weak callback work
899    // list and the callback is called on the thread that owns the object, with
900    // the closure pointer as an argument. Most of the time, the closure and
901    // the containerObject can be the same thing, but the containerObject is
902    // constrained to be on the heap, since the heap is used to identify the
903    // correct thread.
904    static void pushWeakObjectPointerCallback(void* closure, void* containerObject, WeakPointerCallback);
905
906    // Similar to the more general pushWeakObjectPointerCallback, but cell
907    // pointer callbacks are added to a static callback work list and the weak
908    // callback is performed on the thread performing garbage collection. This
909    // is OK because cells are just cleared and no deallocation can happen.
910    static void pushWeakCellPointerCallback(void** cell, WeakPointerCallback);
911
912    // Pop the top of the marking stack and call the callback with the visitor
913    // and the object. Returns false when there is nothing more to do.
914    static bool popAndInvokeTraceCallback(Visitor*);
915
916    // Remove an item from the weak callback work list and call the callback
917    // with the visitor and the closure pointer. Returns false when there is
918    // nothing more to do.
919    static bool popAndInvokeWeakPointerCallback(Visitor*);
920
921    template<typename T> static Address allocate(size_t);
922    template<typename T> static Address reallocate(void* previous, size_t);
923
924    static void collectGarbage(ThreadState::StackState);
925    static void collectAllGarbage();
926    static void setForcePreciseGCForTesting();
927
928    static void prepareForGC();
929
930    // Conservatively checks whether an address is a pointer in any of the thread
931    // heaps. If so marks the object pointed to as live.
932    static Address checkAndMarkPointer(Visitor*, Address);
933
934#if ENABLE(GC_TRACING)
935    // Dump the path to specified object on the next GC. This method is to be invoked from GDB.
936    static void dumpPathToObjectOnNextGC(void* p);
937
938    // Forcibly find GCInfo of the object at Address.
939    // This is slow and should only be used for debug purposes.
940    // It involves finding the heap page and scanning the heap page for an object header.
941    static const GCInfo* findGCInfo(Address);
942
943    static String createBacktraceString();
944#endif
945
946    // Collect heap stats for all threads attached to the Blink
947    // garbage collector. Should only be called during garbage
948    // collection where threads are known to be at safe points.
949    static void getStats(HeapStats*);
950
951    static void getHeapSpaceSize(uint64_t*, uint64_t*);
952
953    static bool isConsistentForGC();
954    static void makeConsistentForGC();
955
956    static void flushHeapDoesNotContainCache();
957    static bool heapDoesNotContainCacheIsEmpty() { return s_heapDoesNotContainCache->isEmpty(); }
958
959    // Return true if the last GC found a pointer into a heap page
960    // during conservative scanning.
961    static bool lastGCWasConservative() { return s_lastGCWasConservative; }
962
963private:
964    static Visitor* s_markingVisitor;
965
966    static CallbackStack* s_markingStack;
967    static CallbackStack* s_weakCallbackStack;
968    static HeapDoesNotContainCache* s_heapDoesNotContainCache;
969    static bool s_shutdownCalled;
970    static bool s_lastGCWasConservative;
971    friend class ThreadState;
972};
973
974// The NoAllocationScope class is used in debug mode to catch unwanted
975// allocations. E.g. allocations during GC.
976template<ThreadAffinity Affinity>
977class NoAllocationScope {
978public:
979    NoAllocationScope() : m_active(true) { enter(); }
980
981    explicit NoAllocationScope(bool active) : m_active(active) { enter(); }
982
983    NoAllocationScope(const NoAllocationScope& other) : m_active(other.m_active) { enter(); }
984
985    NoAllocationScope& operator=(const NoAllocationScope& other)
986    {
987        release();
988        m_active = other.m_active;
989        enter();
990        return *this;
991    }
992
993    ~NoAllocationScope() { release(); }
994
995    void release()
996    {
997        if (m_active) {
998            ThreadStateFor<Affinity>::state()->leaveNoAllocationScope();
999            m_active = false;
1000        }
1001    }
1002
1003private:
1004    void enter() const
1005    {
1006        if (m_active)
1007            ThreadStateFor<Affinity>::state()->enterNoAllocationScope();
1008    }
1009
1010    bool m_active;
1011};
1012
1013// Base class for objects allocated in the Blink garbage-collected
1014// heap.
1015//
1016// Defines a 'new' operator that allocates the memory in the
1017// heap. 'delete' should not be called on objects that inherit from
1018// GarbageCollected.
1019//
1020// Instances of GarbageCollected will *NOT* get finalized. Their
1021// destructor will not be called. Therefore, only classes that have
1022// trivial destructors with no semantic meaning (including all their
1023// subclasses) should inherit from GarbageCollected. If there are
1024// non-trival destructors in a given class or any of its subclasses,
1025// GarbageCollectedFinalized should be used which guarantees that the
1026// destructor is called on an instance when the garbage collector
1027// determines that it is no longer reachable.
1028template<typename T>
1029class GarbageCollected {
1030    WTF_MAKE_NONCOPYABLE(GarbageCollected);
1031
1032    // For now direct allocation of arrays on the heap is not allowed.
1033    void* operator new[](size_t size);
1034#if OS(WIN) && COMPILER(MSVC)
1035    // Due to some quirkiness in the MSVC compiler we have to provide
1036    // the delete[] operator in the GarbageCollected subclasses as it
1037    // is called when a class is exported in a DLL.
1038protected:
1039    void operator delete[](void* p)
1040    {
1041        ASSERT_NOT_REACHED();
1042    }
1043#else
1044    void operator delete[](void* p);
1045#endif
1046public:
1047    typedef T GarbageCollectedBase;
1048
1049    void* operator new(size_t size)
1050    {
1051        return Heap::allocate<T>(size);
1052    }
1053
1054    void operator delete(void* p)
1055    {
1056        ASSERT_NOT_REACHED();
1057    }
1058
1059protected:
1060    GarbageCollected()
1061    {
1062    }
1063};
1064
1065// Base class for objects allocated in the Blink garbage-collected
1066// heap.
1067//
1068// Defines a 'new' operator that allocates the memory in the
1069// heap. 'delete' should not be called on objects that inherit from
1070// GarbageCollected.
1071//
1072// Instances of GarbageCollectedFinalized will have their destructor
1073// called when the garbage collector determines that the object is no
1074// longer reachable.
1075template<typename T>
1076class GarbageCollectedFinalized : public GarbageCollected<T> {
1077    WTF_MAKE_NONCOPYABLE(GarbageCollectedFinalized);
1078
1079protected:
1080    // finalizeGarbageCollectedObject is called when the object is
1081    // freed from the heap. By default finalization means calling the
1082    // destructor on the object. finalizeGarbageCollectedObject can be
1083    // overridden to support calling the destructor of a
1084    // subclass. This is useful for objects without vtables that
1085    // require explicit dispatching. The name is intentionally a bit
1086    // long to make name conflicts less likely.
1087    void finalizeGarbageCollectedObject()
1088    {
1089        static_cast<T*>(this)->~T();
1090    }
1091
1092    GarbageCollectedFinalized() { }
1093    ~GarbageCollectedFinalized() { }
1094
1095    template<typename U> friend struct HasFinalizer;
1096    template<typename U, bool> friend struct FinalizerTraitImpl;
1097};
1098
1099// Base class for objects that are in the Blink garbage-collected heap
1100// and are still reference counted.
1101//
1102// This class should be used sparingly and only to gradually move
1103// objects from being reference counted to being managed by the blink
1104// garbage collector.
1105//
1106// While the current reference counting keeps one of these objects
1107// alive it will have a Persistent handle to itself allocated so we
1108// will not reclaim the memory. When the reference count reaches 0 the
1109// persistent handle will be deleted. When the garbage collector
1110// determines that there are no other references to the object it will
1111// be reclaimed and the destructor of the reclaimed object will be
1112// called at that time.
1113template<typename T>
1114class RefCountedGarbageCollected : public GarbageCollectedFinalized<T> {
1115    WTF_MAKE_NONCOPYABLE(RefCountedGarbageCollected);
1116
1117public:
1118    RefCountedGarbageCollected()
1119        : m_refCount(1)
1120    {
1121        makeKeepAlive();
1122    }
1123
1124    // Implement method to increase reference count for use with
1125    // RefPtrs.
1126    //
1127    // In contrast to the normal WTF::RefCounted, the reference count
1128    // can reach 0 and increase again. This happens in the following
1129    // scenario:
1130    //
1131    // (1) The reference count becomes 0, but members, persistents, or
1132    //     on-stack pointers keep references to the object.
1133    //
1134    // (2) The pointer is assigned to a RefPtr again and the reference
1135    //     count becomes 1.
1136    //
1137    // In this case, we have to resurrect m_keepAlive.
1138    void ref()
1139    {
1140        if (UNLIKELY(!m_refCount)) {
1141            ASSERT(ThreadStateFor<ThreadingTrait<T>::Affinity>::state()->contains(reinterpret_cast<Address>(this)));
1142            makeKeepAlive();
1143        }
1144        ++m_refCount;
1145    }
1146
1147    // Implement method to decrease reference count for use with
1148    // RefPtrs.
1149    //
1150    // In contrast to the normal WTF::RefCounted implementation, the
1151    // object itself is not deleted when the reference count reaches
1152    // 0. Instead, the keep-alive persistent handle is deallocated so
1153    // that the object can be reclaimed when the garbage collector
1154    // determines that there are no other references to the object.
1155    void deref()
1156    {
1157        ASSERT(m_refCount > 0);
1158        if (!--m_refCount) {
1159            delete m_keepAlive;
1160            m_keepAlive = 0;
1161        }
1162    }
1163
1164    bool hasOneRef()
1165    {
1166        return m_refCount == 1;
1167    }
1168
1169protected:
1170    ~RefCountedGarbageCollected() { }
1171
1172private:
1173    void makeKeepAlive()
1174    {
1175        ASSERT(!m_keepAlive);
1176        m_keepAlive = new Persistent<T>(static_cast<T*>(this));
1177    }
1178
1179    int m_refCount;
1180    Persistent<T>* m_keepAlive;
1181};
1182
1183template<typename T>
1184T* adoptRefCountedGarbageCollected(T* ptr)
1185{
1186    ASSERT(ptr->hasOneRef());
1187    ptr->deref();
1188    WTF::adopted(ptr);
1189    return ptr;
1190}
1191
1192// Classes that contain heap references but aren't themselves heap
1193// allocated, have some extra macros available which allows their use
1194// to be restricted to cases where the garbage collector is able
1195// to discover their heap references.
1196//
1197// STACK_ALLOCATED(): Use if the object is only stack allocated. Heap objects
1198// should be in Members but you do not need the trace method as they are on
1199// the stack. (Down the line these might turn in to raw pointers, but for
1200// now Members indicates that we have thought about them and explicitly
1201// taken care of them.)
1202//
1203// DISALLOW_ALLOCATION(): Cannot be allocated with new operators but can
1204// be a part object. If it has Members you need a trace method and the
1205// containing object needs to call that trace method.
1206//
1207// ALLOW_ONLY_INLINE_ALLOCATION(): Allows only placement new operator.
1208// This disallows general allocation of this object but allows to put
1209// the object as a value object in collections. If these have Members you
1210// need to have a trace method. That trace method will be called
1211// automatically by the Heap collections.
1212//
1213#if COMPILER_SUPPORTS(CXX_DELETED_FUNCTIONS)
1214#define DISALLOW_ALLOCATION()                                   \
1215    private:                                                    \
1216        void* operator new(size_t) = delete;                    \
1217        void* operator new(size_t, NotNullTag, void*) = delete; \
1218        void* operator new(size_t, void*) = delete;
1219
1220#define ALLOW_ONLY_INLINE_ALLOCATION()                                              \
1221    public:                                                                         \
1222        void* operator new(size_t, NotNullTag, void* location) { return location; } \
1223        void* operator new(size_t, void* location) { return location; }             \
1224    private:                                                                        \
1225        void* operator new(size_t) = delete;
1226
1227#define STATIC_ONLY(Type) \
1228    private:              \
1229        Type() = delete;
1230
1231#else
1232
1233#define DISALLOW_ALLOCATION()                          \
1234    private:                                           \
1235        void* operator new(size_t);                    \
1236        void* operator new(size_t, NotNullTag, void*); \
1237        void* operator new(size_t, void*)
1238
1239#define ALLOW_ONLY_INLINE_ALLOCATION()                                              \
1240    public:                                                                         \
1241        void* operator new(size_t, NotNullTag, void* location) { return location; } \
1242        void* operator new(size_t, void* location) { return location; }             \
1243    private:                                                                        \
1244        void* operator new(size_t);
1245
1246#define STATIC_ONLY(Type)  \
1247    private:               \
1248        Type();
1249
1250#endif
1251
1252
1253// These macros insert annotations that the Blink GC plugin for clang uses for
1254// verification. STACK_ALLOCATED is used to declare that objects of this type
1255// are always stack allocated. GC_PLUGIN_IGNORE is used to make the plugin
1256// ignore a particular class or field when checking for proper usage. When using
1257// GC_PLUGIN_IGNORE a bug-number should be provided as an argument where the
1258// bug describes what needs to happen to remove the GC_PLUGIN_IGNORE again.
1259#if COMPILER(CLANG)
1260#define STACK_ALLOCATED()                                       \
1261    private:                                                    \
1262        __attribute__((annotate("blink_stack_allocated")))      \
1263        void* operator new(size_t) = delete;                    \
1264        void* operator new(size_t, NotNullTag, void*) = delete; \
1265        void* operator new(size_t, void*) = delete;
1266
1267#define GC_PLUGIN_IGNORE(bug)                           \
1268    __attribute__((annotate("blink_gc_plugin_ignore")))
1269#else
1270#define STACK_ALLOCATED() DISALLOW_ALLOCATION()
1271#define GC_PLUGIN_IGNORE(bug)
1272#endif
1273
1274NO_SANITIZE_ADDRESS
1275void HeapObjectHeader::checkHeader() const
1276{
1277    ASSERT(m_magic == magic);
1278}
1279
1280Address HeapObjectHeader::payload()
1281{
1282    return reinterpret_cast<Address>(this) + objectHeaderSize;
1283}
1284
1285size_t HeapObjectHeader::payloadSize()
1286{
1287    return size() - objectHeaderSize;
1288}
1289
1290Address HeapObjectHeader::payloadEnd()
1291{
1292    return reinterpret_cast<Address>(this) + size();
1293}
1294
1295NO_SANITIZE_ADDRESS
1296void HeapObjectHeader::mark()
1297{
1298    checkHeader();
1299    m_size |= markBitMask;
1300}
1301
1302Address FinalizedHeapObjectHeader::payload()
1303{
1304    return reinterpret_cast<Address>(this) + finalizedHeaderSize;
1305}
1306
1307size_t FinalizedHeapObjectHeader::payloadSize()
1308{
1309    return size() - finalizedHeaderSize;
1310}
1311
1312template<typename Header>
1313size_t ThreadHeap<Header>::allocationSizeFromSize(size_t size)
1314{
1315    // Check the size before computing the actual allocation size. The
1316    // allocation size calculation can overflow for large sizes and
1317    // the check therefore has to happen before any calculation on the
1318    // size.
1319    RELEASE_ASSERT(size < maxHeapObjectSize);
1320
1321    // Add space for header.
1322    size_t allocationSize = size + sizeof(Header);
1323    // Align size with allocation granularity.
1324    allocationSize = (allocationSize + allocationMask) & ~allocationMask;
1325    return allocationSize;
1326}
1327
1328template<typename Header>
1329Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo)
1330{
1331    size_t allocationSize = allocationSizeFromSize(size);
1332    bool isLargeObject = allocationSize > blinkPageSize / 2;
1333    if (isLargeObject)
1334        return allocateLargeObject(allocationSize, gcInfo);
1335    if (m_remainingAllocationSize < allocationSize)
1336        return outOfLineAllocate(size, gcInfo);
1337    Address headerAddress = m_currentAllocationPoint;
1338    m_currentAllocationPoint += allocationSize;
1339    m_remainingAllocationSize -= allocationSize;
1340    Header* header = new (NotNull, headerAddress) Header(allocationSize, gcInfo);
1341    size_t payloadSize = allocationSize - sizeof(Header);
1342    stats().increaseObjectSpace(payloadSize);
1343    Address result = headerAddress + sizeof(*header);
1344    ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1345    // Unpoison the memory used for the object (payload).
1346    ASAN_UNPOISON_MEMORY_REGION(result, payloadSize);
1347    memset(result, 0, payloadSize);
1348    ASSERT(heapPageFromAddress(headerAddress + allocationSize - 1));
1349    return result;
1350}
1351
1352// FIXME: Allocate objects that do not need finalization separately
1353// and use separate sweeping to not have to check for finalizers.
1354template<typename T>
1355Address Heap::allocate(size_t size)
1356{
1357    ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1358    ASSERT(state->isAllocationAllowed());
1359    BaseHeap* heap = state->heap(HeapTrait<T>::index);
1360    Address addr =
1361        static_cast<typename HeapTrait<T>::HeapType*>(heap)->allocate(size, GCInfoTrait<T>::get());
1362    return addr;
1363}
1364
1365// FIXME: Allocate objects that do not need finalization separately
1366// and use separate sweeping to not have to check for finalizers.
1367template<typename T>
1368Address Heap::reallocate(void* previous, size_t size)
1369{
1370    if (!size) {
1371        // If the new size is 0 this is equivalent to either
1372        // free(previous) or malloc(0). In both cases we do
1373        // nothing and return 0.
1374        return 0;
1375    }
1376    ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1377    ASSERT(state->isAllocationAllowed());
1378    // FIXME: Currently only supports raw allocation on the
1379    // GeneralHeap. Hence we assume the header is a
1380    // FinalizedHeapObjectHeader.
1381    ASSERT(HeapTrait<T>::index == GeneralHeap);
1382    BaseHeap* heap = state->heap(HeapTrait<T>::index);
1383    Address address = static_cast<typename HeapTrait<T>::HeapType*>(heap)->allocate(size, GCInfoTrait<T>::get());
1384    if (!previous) {
1385        // This is equivalent to malloc(size).
1386        return address;
1387    }
1388    FinalizedHeapObjectHeader* previousHeader = FinalizedHeapObjectHeader::fromPayload(previous);
1389    ASSERT(!previousHeader->hasFinalizer());
1390    ASSERT(previousHeader->gcInfo() == GCInfoTrait<T>::get());
1391    size_t copySize = previousHeader->payloadSize();
1392    if (copySize > size)
1393        copySize = size;
1394    memcpy(address, previous, copySize);
1395    return address;
1396}
1397
1398class HeapAllocatorQuantizer {
1399public:
1400    template<typename T>
1401    static size_t quantizedSize(size_t count)
1402    {
1403        RELEASE_ASSERT(count <= kMaxUnquantizedAllocation / sizeof(T));
1404        return HeapTrait<T>::HeapType::roundedAllocationSize(count * sizeof(T));
1405    }
1406    static const size_t kMaxUnquantizedAllocation = maxHeapObjectSize;
1407};
1408
1409// This is a static-only class used as a trait on collections to make them heap allocated.
1410// However see also HeapListHashSetAllocator.
1411class HeapAllocator {
1412public:
1413    typedef HeapAllocatorQuantizer Quantizer;
1414    typedef WebCore::Visitor Visitor;
1415    static const bool isGarbageCollected = true;
1416
1417    template <typename Return, typename Metadata>
1418    static Return backingMalloc(size_t size)
1419    {
1420        return malloc<Return, Metadata>(size);
1421    }
1422    template <typename Return, typename Metadata>
1423    static Return zeroedBackingMalloc(size_t size)
1424    {
1425        return malloc<Return, Metadata>(size);
1426    }
1427    template <typename Return, typename Metadata>
1428    static Return malloc(size_t size)
1429    {
1430        return reinterpret_cast<Return>(Heap::allocate<Metadata>(size));
1431    }
1432    static void backingFree(void* address) { }
1433    static void free(void* address) { }
1434    template<typename T>
1435    static void* newArray(size_t bytes)
1436    {
1437        ASSERT_NOT_REACHED();
1438        return 0;
1439    }
1440
1441    static void deleteArray(void* ptr)
1442    {
1443        ASSERT_NOT_REACHED();
1444    }
1445
1446    static void markUsingGCInfo(Visitor* visitor, const void* buffer)
1447    {
1448        visitor->mark(buffer, FinalizedHeapObjectHeader::fromPayload(buffer)->traceCallback());
1449    }
1450
1451    static void markNoTracing(Visitor* visitor, const void* t) { visitor->markNoTracing(t); }
1452
1453    template<typename T, typename Traits>
1454    static void trace(Visitor* visitor, T& t)
1455    {
1456        CollectionBackingTraceTrait<WTF::ShouldBeTraced<Traits>::value, Traits::weakHandlingFlag, WeakPointersActWeak, T, Traits>::trace(visitor, t);
1457    }
1458
1459    static void registerWeakMembers(Visitor* visitor, const void* closure, const void* object, WeakPointerCallback callback)
1460    {
1461        visitor->registerWeakMembers(closure, object, callback);
1462    }
1463
1464    template<typename T>
1465    struct ResultType {
1466        typedef T* Type;
1467    };
1468
1469    // The WTF classes use Allocator::VectorBackingHelper in order to find a
1470    // class to template their backing allocation operation on. For off-heap
1471    // allocations the VectorBackingHelper is a dummy class, since the class is
1472    // not used during allocation of backing. For on-heap allocations this
1473    // typedef ensures that the allocation is done with the correct templated
1474    // instantiation of the allocation function. This ensures the correct GC
1475    // map is written when backing allocations take place.
1476    template<typename T, typename Traits>
1477    struct VectorBackingHelper {
1478        typedef HeapVectorBacking<T, Traits> Type;
1479    };
1480
1481    // Like the VectorBackingHelper, but this type is used for HashSet and
1482    // HashMap, both of which are implemented using HashTable.
1483    template<typename Table>
1484    struct HashTableBackingHelper {
1485        typedef HeapHashTableBacking<Table> Type;
1486    };
1487
1488    template<typename T>
1489    struct OtherType {
1490        typedef T* Type;
1491    };
1492
1493    template<typename T>
1494    static T& getOther(T* other)
1495    {
1496        return *other;
1497    }
1498
1499    static bool isAlive(Visitor* visitor, void* pointer) { return visitor->isAlive(pointer); }
1500
1501private:
1502    template<typename T, size_t u, typename V> friend class WTF::Vector;
1503    template<typename T, typename U, typename V, typename W> friend class WTF::HashSet;
1504    template<typename T, typename U, typename V, typename W, typename X, typename Y> friend class WTF::HashMap;
1505};
1506
1507template<typename Value>
1508static void traceListHashSetValue(Visitor* visitor, Value& value)
1509{
1510    // We use the default hash traits for the value in the node, because
1511    // ListHashSet does not let you specify any specific ones.
1512    // We don't allow ListHashSet of WeakMember, so we set that one false
1513    // (there's an assert elsewhere), but we have to specify some value for the
1514    // strongify template argument, so we specify WeakPointersActWeak,
1515    // arbitrarily.
1516    CollectionBackingTraceTrait<WTF::ShouldBeTraced<WTF::HashTraits<Value> >::value, WTF::NoWeakHandlingInCollections, WeakPointersActWeak, Value, WTF::HashTraits<Value> >::trace(visitor, value);
1517}
1518
1519// The inline capacity is just a dummy template argument to match the off-heap
1520// allocator.
1521// This inherits from the static-only HeapAllocator trait class, but we do
1522// declare pointers to instances. These pointers are always null, and no
1523// objects are instantiated.
1524template<typename ValueArg, size_t inlineCapacity>
1525struct HeapListHashSetAllocator : public HeapAllocator {
1526    typedef HeapAllocator TableAllocator;
1527    typedef WTF::ListHashSetNode<ValueArg, HeapListHashSetAllocator> Node;
1528
1529public:
1530    class AllocatorProvider {
1531    public:
1532        // For the heap allocation we don't need an actual allocator object, so we just
1533        // return null.
1534        HeapListHashSetAllocator* get() const { return 0; }
1535
1536        // No allocator object is needed.
1537        void createAllocatorIfNeeded() { }
1538
1539        // There is no allocator object in the HeapListHashSet (unlike in
1540        // the regular ListHashSet) so there is nothing to swap.
1541        void swap(AllocatorProvider& other) { }
1542    };
1543
1544    void deallocate(void* dummy) { }
1545
1546    // This is not a static method even though it could be, because it
1547    // needs to match the one that the (off-heap) ListHashSetAllocator
1548    // has. The 'this' pointer will always be null.
1549    void* allocateNode()
1550    {
1551        COMPILE_ASSERT(!WTF::IsWeak<ValueArg>::value, WeakPointersInAListHashSetWillJustResultInNullEntriesInTheSetThatsNotWhatYouWantConsiderUsingLinkedHashSetInstead);
1552        return malloc<void*, Node>(sizeof(Node));
1553    }
1554
1555    static void traceValue(Visitor* visitor, Node* node)
1556    {
1557        traceListHashSetValue(visitor, node->m_value);
1558    }
1559};
1560
1561// FIXME: These should just be template aliases:
1562//
1563// template<typename T, size_t inlineCapacity = 0>
1564// using HeapVector = Vector<T, inlineCapacity, HeapAllocator>;
1565//
1566// as soon as all the compilers we care about support that.
1567// MSVC supports it only in MSVC 2013.
1568template<
1569    typename KeyArg,
1570    typename MappedArg,
1571    typename HashArg = typename DefaultHash<KeyArg>::Hash,
1572    typename KeyTraitsArg = HashTraits<KeyArg>,
1573    typename MappedTraitsArg = HashTraits<MappedArg> >
1574class HeapHashMap : public HashMap<KeyArg, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg, HeapAllocator> { };
1575
1576template<
1577    typename ValueArg,
1578    typename HashArg = typename DefaultHash<ValueArg>::Hash,
1579    typename TraitsArg = HashTraits<ValueArg> >
1580class HeapHashSet : public HashSet<ValueArg, HashArg, TraitsArg, HeapAllocator> { };
1581
1582template<
1583    typename ValueArg,
1584    typename HashArg = typename DefaultHash<ValueArg>::Hash,
1585    typename TraitsArg = HashTraits<ValueArg> >
1586class HeapLinkedHashSet : public LinkedHashSet<ValueArg, HashArg, TraitsArg, HeapAllocator> { };
1587
1588template<
1589    typename ValueArg,
1590    size_t inlineCapacity = 0, // The inlineCapacity is just a dummy to match ListHashSet (off-heap).
1591    typename HashArg = typename DefaultHash<ValueArg>::Hash>
1592class HeapListHashSet : public ListHashSet<ValueArg, inlineCapacity, HashArg, HeapListHashSetAllocator<ValueArg, inlineCapacity> > { };
1593
1594template<
1595    typename Value,
1596    typename HashFunctions = typename DefaultHash<Value>::Hash,
1597    typename Traits = HashTraits<Value> >
1598class HeapHashCountedSet : public HashCountedSet<Value, HashFunctions, Traits, HeapAllocator> { };
1599
1600template<typename T, size_t inlineCapacity = 0>
1601class HeapVector : public Vector<T, inlineCapacity, HeapAllocator> {
1602public:
1603    HeapVector() { }
1604
1605    explicit HeapVector(size_t size) : Vector<T, inlineCapacity, HeapAllocator>(size)
1606    {
1607    }
1608
1609    HeapVector(size_t size, const T& val) : Vector<T, inlineCapacity, HeapAllocator>(size, val)
1610    {
1611    }
1612
1613    template<size_t otherCapacity>
1614    HeapVector(const HeapVector<T, otherCapacity>& other)
1615        : Vector<T, inlineCapacity, HeapAllocator>(other)
1616    {
1617    }
1618
1619    template<typename U>
1620    void append(const U& other)
1621    {
1622        Vector<T, inlineCapacity, HeapAllocator>::append(other);
1623    }
1624
1625    template<typename U, size_t otherCapacity>
1626    void appendVector(const HeapVector<U, otherCapacity>& other)
1627    {
1628        const Vector<U, otherCapacity, HeapAllocator>& otherVector = other;
1629        Vector<T, inlineCapacity, HeapAllocator>::appendVector(otherVector);
1630    }
1631};
1632
1633template<typename T, size_t inlineCapacity = 0>
1634class HeapDeque : public Deque<T, inlineCapacity, HeapAllocator> {
1635public:
1636    HeapDeque() { }
1637
1638    explicit HeapDeque(size_t size) : Deque<T, inlineCapacity, HeapAllocator>(size)
1639    {
1640    }
1641
1642    HeapDeque(size_t size, const T& val) : Deque<T, inlineCapacity, HeapAllocator>(size, val)
1643    {
1644    }
1645
1646    // FIXME: Doesn't work if there is an inline buffer, due to crbug.com/360572
1647    HeapDeque<T, 0>& operator=(const HeapDeque& other)
1648    {
1649        HeapDeque<T> copy(other);
1650        swap(copy);
1651        return *this;
1652    }
1653
1654    // FIXME: Doesn't work if there is an inline buffer, due to crbug.com/360572
1655    inline void swap(HeapDeque& other)
1656    {
1657        Deque<T, inlineCapacity, HeapAllocator>::swap(other);
1658    }
1659
1660    template<size_t otherCapacity>
1661    HeapDeque(const HeapDeque<T, otherCapacity>& other)
1662        : Deque<T, inlineCapacity, HeapAllocator>(other)
1663    {
1664    }
1665
1666    template<typename U>
1667    void append(const U& other)
1668    {
1669        Deque<T, inlineCapacity, HeapAllocator>::append(other);
1670    }
1671};
1672
1673template<typename T>
1674struct ThreadingTrait<Member<T> > {
1675    static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1676};
1677
1678template<typename T>
1679struct ThreadingTrait<WeakMember<T> > {
1680    static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1681};
1682
1683template<typename Key, typename Value, typename T, typename U, typename V>
1684struct ThreadingTrait<HashMap<Key, Value, T, U, V, HeapAllocator> > {
1685    static const ThreadAffinity Affinity =
1686        (ThreadingTrait<Key>::Affinity == MainThreadOnly)
1687        && (ThreadingTrait<Value>::Affinity == MainThreadOnly) ? MainThreadOnly : AnyThread;
1688};
1689
1690template<typename First, typename Second>
1691struct ThreadingTrait<WTF::KeyValuePair<First, Second> > {
1692    static const ThreadAffinity Affinity =
1693        (ThreadingTrait<First>::Affinity == MainThreadOnly)
1694        && (ThreadingTrait<Second>::Affinity == MainThreadOnly) ? MainThreadOnly : AnyThread;
1695};
1696
1697template<typename T, typename U, typename V>
1698struct ThreadingTrait<HashSet<T, U, V, HeapAllocator> > {
1699    static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1700};
1701
1702
1703template<typename T, size_t inlineCapacity>
1704struct ThreadingTrait<Vector<T, inlineCapacity, HeapAllocator> > {
1705    static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1706};
1707
1708template<typename T, typename Traits>
1709struct ThreadingTrait<HeapVectorBacking<T, Traits> > {
1710    static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1711};
1712
1713template<typename T, size_t inlineCapacity>
1714struct ThreadingTrait<Deque<T, inlineCapacity, HeapAllocator> > {
1715    static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1716};
1717
1718template<typename T, typename U, typename V>
1719struct ThreadingTrait<HashCountedSet<T, U, V, HeapAllocator> > {
1720    static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1721};
1722
1723template<typename Table>
1724struct ThreadingTrait<HeapHashTableBacking<Table> > {
1725    typedef typename Table::KeyType Key;
1726    typedef typename Table::ValueType Value;
1727    static const ThreadAffinity Affinity =
1728        (ThreadingTrait<Key>::Affinity == MainThreadOnly)
1729        && (ThreadingTrait<Value>::Affinity == MainThreadOnly) ? MainThreadOnly : AnyThread;
1730};
1731
1732template<typename T, typename U, typename V, typename W, typename X>
1733struct ThreadingTrait<HeapHashMap<T, U, V, W, X> > : public ThreadingTrait<HashMap<T, U, V, W, X, HeapAllocator> > { };
1734
1735template<typename T, typename U, typename V>
1736struct ThreadingTrait<HeapHashSet<T, U, V> > : public ThreadingTrait<HashSet<T, U, V, HeapAllocator> > { };
1737
1738template<typename T, size_t inlineCapacity>
1739struct ThreadingTrait<HeapVector<T, inlineCapacity> > : public ThreadingTrait<Vector<T, inlineCapacity, HeapAllocator> > { };
1740
1741template<typename T, size_t inlineCapacity>
1742struct ThreadingTrait<HeapDeque<T, inlineCapacity> > : public ThreadingTrait<Deque<T, inlineCapacity, HeapAllocator> > { };
1743
1744template<typename T, typename U, typename V>
1745struct ThreadingTrait<HeapHashCountedSet<T, U, V> > : public ThreadingTrait<HashCountedSet<T, U, V, HeapAllocator> > { };
1746
1747// The standard implementation of GCInfoTrait<T>::get() just returns a static
1748// from the class T, but we can't do that for HashMap, HashSet, Vector, etc.
1749// because they are in WTF and know nothing of GCInfos. Instead we have a
1750// specialization of GCInfoTrait for these four classes here.
1751
1752template<typename Key, typename Value, typename T, typename U, typename V>
1753struct GCInfoTrait<HashMap<Key, Value, T, U, V, HeapAllocator> > {
1754    static const GCInfo* get()
1755    {
1756        typedef HashMap<Key, Value, T, U, V, HeapAllocator> TargetType;
1757        static const GCInfo info = {
1758            TraceTrait<TargetType>::trace,
1759            0,
1760            false, // HashMap needs no finalizer.
1761            WTF::IsPolymorphic<TargetType>::value,
1762#if ENABLE(GC_TRACING)
1763            TypenameStringTrait<TargetType>::get()
1764#endif
1765        };
1766        return &info;
1767    }
1768};
1769
1770template<typename T, typename U, typename V>
1771struct GCInfoTrait<HashSet<T, U, V, HeapAllocator> > {
1772    static const GCInfo* get()
1773    {
1774        typedef HashSet<T, U, V, HeapAllocator> TargetType;
1775        static const GCInfo info = {
1776            TraceTrait<TargetType>::trace,
1777            0,
1778            false, // HashSet needs no finalizer.
1779            WTF::IsPolymorphic<TargetType>::value,
1780#if ENABLE(GC_TRACING)
1781            TypenameStringTrait<TargetType>::get()
1782#endif
1783        };
1784        return &info;
1785    }
1786};
1787
1788template<typename T, typename U, typename V>
1789struct GCInfoTrait<LinkedHashSet<T, U, V, HeapAllocator> > {
1790    static const GCInfo* get()
1791    {
1792        typedef LinkedHashSet<T, U, V, HeapAllocator> TargetType;
1793        static const GCInfo info = {
1794            TraceTrait<TargetType>::trace,
1795            LinkedHashSet<T, U, V, HeapAllocator>::finalize,
1796            true, // Needs finalization. The anchor needs to unlink itself from the chain.
1797            WTF::IsPolymorphic<TargetType>::value,
1798#if ENABLE(GC_TRACING)
1799            TypenameStringTrait<TargetType>::get()
1800#endif
1801        };
1802        return &info;
1803    }
1804};
1805
1806template<typename ValueArg, size_t inlineCapacity, typename U>
1807struct GCInfoTrait<ListHashSet<ValueArg, inlineCapacity, U, HeapListHashSetAllocator<ValueArg, inlineCapacity> > > {
1808    static const GCInfo* get()
1809    {
1810        typedef WTF::ListHashSet<ValueArg, inlineCapacity, U, HeapListHashSetAllocator<ValueArg, inlineCapacity> > TargetType;
1811        static const GCInfo info = {
1812            TraceTrait<TargetType>::trace,
1813            0,
1814            false, // ListHashSet needs no finalization though its backing might.
1815            false, // no vtable.
1816#if ENABLE(GC_TRACING)
1817            TypenameStringTrait<TargetType>::get()
1818#endif
1819        };
1820        return &info;
1821    }
1822};
1823
1824template<typename T, typename Allocator>
1825struct GCInfoTrait<WTF::ListHashSetNode<T, Allocator> > {
1826    static const GCInfo* get()
1827    {
1828        typedef WTF::ListHashSetNode<T, Allocator> TargetType;
1829        static const GCInfo info = {
1830            TraceTrait<TargetType>::trace,
1831            TargetType::finalize,
1832            WTF::HashTraits<T>::needsDestruction, // The node needs destruction if its data does.
1833            false, // no vtable.
1834#if ENABLE(GC_TRACING)
1835            TypenameStringTrait<TargetType>::get()
1836#endif
1837        };
1838        return &info;
1839    }
1840};
1841
1842template<typename T>
1843struct GCInfoTrait<Vector<T, 0, HeapAllocator> > {
1844    static const GCInfo* get()
1845    {
1846#if ENABLE(GC_TRACING)
1847        typedef Vector<T, 0, HeapAllocator> TargetType;
1848#endif
1849        static const GCInfo info = {
1850            TraceTrait<Vector<T, 0, HeapAllocator> >::trace,
1851            0,
1852            false, // Vector needs no finalizer if it has no inline capacity.
1853            WTF::IsPolymorphic<Vector<T, 0, HeapAllocator> >::value,
1854#if ENABLE(GC_TRACING)
1855            TypenameStringTrait<TargetType>::get()
1856#endif
1857        };
1858        return &info;
1859    }
1860};
1861
1862template<typename T, size_t inlineCapacity>
1863struct FinalizerTrait<Vector<T, inlineCapacity, HeapAllocator> > : public FinalizerTraitImpl<Vector<T, inlineCapacity, HeapAllocator>, true> { };
1864
1865template<typename T, size_t inlineCapacity>
1866struct GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator> > {
1867    static const GCInfo* get()
1868    {
1869        typedef Vector<T, inlineCapacity, HeapAllocator> TargetType;
1870        static const GCInfo info = {
1871            TraceTrait<TargetType>::trace,
1872            FinalizerTrait<TargetType>::finalize,
1873            // Finalizer is needed to destruct things stored in the inline capacity.
1874            inlineCapacity && VectorTraits<T>::needsDestruction,
1875            WTF::IsPolymorphic<TargetType>::value,
1876#if ENABLE(GC_TRACING)
1877            TypenameStringTrait<TargetType>::get()
1878#endif
1879        };
1880        return &info;
1881    }
1882};
1883
1884template<typename T>
1885struct GCInfoTrait<Deque<T, 0, HeapAllocator> > {
1886    static const GCInfo* get()
1887    {
1888        typedef Deque<T, 0, HeapAllocator> TargetType;
1889        static const GCInfo info = {
1890            TraceTrait<TargetType>::trace,
1891            0,
1892            false, // Deque needs no finalizer if it has no inline capacity.
1893            WTF::IsPolymorphic<TargetType>::value,
1894#if ENABLE(GC_TRACING)
1895            TypenameStringTrait<TargetType>::get()
1896#endif
1897        };
1898        return &info;
1899    }
1900    static const GCInfo info;
1901};
1902
1903template<typename T, typename U, typename V>
1904struct GCInfoTrait<HashCountedSet<T, U, V, HeapAllocator> > {
1905    static const GCInfo* get()
1906    {
1907        typedef HashCountedSet<T, U, V, HeapAllocator> TargetType;
1908        static const GCInfo info = {
1909            TraceTrait<TargetType>::trace,
1910            0,
1911            false, // HashCountedSet is just a HashTable, and needs no finalizer.
1912            WTF::IsPolymorphic<TargetType>::value,
1913#if ENABLE(GC_TRACING)
1914            TypenameStringTrait<TargetType>::get()
1915#endif
1916        };
1917        return &info;
1918    }
1919    static const GCInfo info;
1920};
1921
1922template<typename T, size_t inlineCapacity>
1923struct FinalizerTrait<Deque<T, inlineCapacity, HeapAllocator> > : public FinalizerTraitImpl<Deque<T, inlineCapacity, HeapAllocator>, true> { };
1924
1925template<typename T, size_t inlineCapacity>
1926struct GCInfoTrait<Deque<T, inlineCapacity, HeapAllocator> > {
1927    static const GCInfo* get()
1928    {
1929        typedef Deque<T, inlineCapacity, HeapAllocator> TargetType;
1930        static const GCInfo info = {
1931            TraceTrait<TargetType>::trace,
1932            FinalizerTrait<TargetType>::finalize,
1933            // Finalizer is needed to destruct things stored in the inline capacity.
1934            inlineCapacity && VectorTraits<T>::needsDestruction,
1935            WTF::IsPolymorphic<TargetType>::value,
1936#if ENABLE(GC_TRACING)
1937            TypenameStringTrait<TargetType>::get()
1938#endif
1939        };
1940        return &info;
1941    }
1942    static const GCInfo info;
1943};
1944
1945template<typename T, typename Traits>
1946struct GCInfoTrait<HeapVectorBacking<T, Traits> > {
1947    static const GCInfo* get()
1948    {
1949        typedef HeapVectorBacking<T, Traits> TargetType;
1950        static const GCInfo info = {
1951            TraceTrait<TargetType>::trace,
1952            FinalizerTrait<TargetType>::finalize,
1953            Traits::needsDestruction,
1954            false, // We don't support embedded objects in HeapVectors with vtables.
1955#if ENABLE(GC_TRACING)
1956            TypenameStringTrait<TargetType>::get()
1957#endif
1958        };
1959        return &info;
1960    }
1961};
1962
1963template<typename Table>
1964struct GCInfoTrait<HeapHashTableBacking<Table> > {
1965    static const GCInfo* get()
1966    {
1967        typedef HeapHashTableBacking<Table> TargetType;
1968        static const GCInfo info = {
1969            TraceTrait<TargetType>::trace,
1970            HeapHashTableBacking<Table>::finalize,
1971            Table::ValueTraits::needsDestruction,
1972            WTF::IsPolymorphic<TargetType>::value,
1973#if ENABLE(GC_TRACING)
1974            TypenameStringTrait<TargetType>::get()
1975#endif
1976        };
1977        return &info;
1978    }
1979};
1980
1981// This is for tracing inside collections that have special support for weak
1982// pointers. This is normally handled through the HashTraits, but it is not
1983// feasible to add methods for handling tracing to the hash traits of WTF
1984// classes like KeyValuePair, which is used to implement HashMap. This trait
1985// lets us add custom handling for such classes.
1986template<WTF::WeakHandlingFlag weakHandlingFlag, ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
1987struct TraceInCollectionTrait;
1988
1989// Catch-all for types that have a way to trace that don't have special
1990// handling for weakness in collections. This means that if this type
1991// contains WeakMember fields, they will simply be zeroed, but the entry
1992// will not be removed from the collection. This always happens for
1993// things in vectors, which don't currently support special handling of
1994// weak elements.
1995template<ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
1996struct TraceInCollectionTrait<WTF::NoWeakHandlingInCollections, strongify, T, Traits> {
1997    static void trace(Visitor* visitor, T& t)
1998    {
1999        TraceTrait<T>::trace(visitor, &t);
2000    }
2001};
2002
2003// Catch-all for things that have HashTrait support for tracing with weakness.
2004template<ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
2005struct TraceInCollectionTrait<WTF::WeakHandlingInCollections, strongify, T, Traits> {
2006    static void trace(Visitor* visitor, T& t)
2007    {
2008        Traits::traceInCollection(visitor, t, strongify);
2009    }
2010};
2011
2012// Vector backing that needs marking. We don't support weak members in vectors.
2013template<ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
2014struct TraceInCollectionTrait<WTF::NoWeakHandlingInCollections, strongify, HeapVectorBacking<T, Traits>, void> {
2015    static void trace(Visitor* visitor, void* self)
2016    {
2017        // The allocator can oversize the allocation a little, according to
2018        // the allocation granularity. The extra size is included in the
2019        // payloadSize call below, since there is nowhere to store the
2020        // originally allocated memory. This assert ensures that visiting the
2021        // last bit of memory can't cause trouble.
2022        COMPILE_ASSERT(!WTF::ShouldBeTraced<Traits>::value || sizeof(T) > allocationGranularity || Traits::canInitializeWithMemset, HeapOverallocationCanCauseSpuriousVisits);
2023
2024        T* array = reinterpret_cast<T*>(self);
2025        WebCore::FinalizedHeapObjectHeader* header = WebCore::FinalizedHeapObjectHeader::fromPayload(self);
2026        // Use the payload size as recorded by the heap to determine how many
2027        // elements to mark.
2028        size_t length = header->payloadSize() / sizeof(T);
2029        for (size_t i = 0; i < length; i++)
2030            CollectionBackingTraceTrait<WTF::ShouldBeTraced<Traits>::value, Traits::weakHandlingFlag, WeakPointersActStrong, T, Traits>::trace(visitor, array[i]);
2031    }
2032};
2033
2034// Almost all hash table backings are visited with this specialization.
2035template<ShouldWeakPointersBeMarkedStrongly strongify, typename Table>
2036struct TraceInCollectionTrait<WTF::NoWeakHandlingInCollections, strongify, HeapHashTableBacking<Table>, void> {
2037    typedef typename Table::ValueType Value;
2038    typedef typename Table::ValueTraits Traits;
2039    static void trace(Visitor* visitor, void* self)
2040    {
2041        Value* array = reinterpret_cast<Value*>(self);
2042        WebCore::FinalizedHeapObjectHeader* header = WebCore::FinalizedHeapObjectHeader::fromPayload(self);
2043        size_t length = header->payloadSize() / sizeof(Value);
2044        for (size_t i = 0; i < length; i++) {
2045            if (!WTF::HashTableHelper<Value, typename Table::ExtractorType, typename Table::KeyTraitsType>::isEmptyOrDeletedBucket(array[i]))
2046                CollectionBackingTraceTrait<WTF::ShouldBeTraced<Traits>::value, Traits::weakHandlingFlag, strongify, Value, Traits>::trace(visitor, array[i]);
2047        }
2048    }
2049};
2050
2051// This specialization of TraceInCollectionTrait is for the backing of
2052// HeapListHashSet. This is for the case that we find a reference to the
2053// backing from the stack. That probably means we have a GC while we are in a
2054// ListHashSet method since normal API use does not put pointers to the backing
2055// on the stack.
2056template<ShouldWeakPointersBeMarkedStrongly strongify, typename NodeContents, size_t inlineCapacity, typename T, typename U, typename V, typename W, typename X, typename Y>
2057struct TraceInCollectionTrait<WTF::NoWeakHandlingInCollections, strongify, HeapHashTableBacking<WTF::HashTable<WTF::ListHashSetNode<NodeContents, HeapListHashSetAllocator<T, inlineCapacity> >*, U, V, W, X, Y, HeapAllocator> >, void> {
2058    typedef WTF::ListHashSetNode<NodeContents, HeapListHashSetAllocator<T, inlineCapacity> > Node;
2059    typedef WTF::HashTable<Node*, U, V, W, X, Y, HeapAllocator> Table;
2060    static void trace(Visitor* visitor, void* self)
2061    {
2062        Node** array = reinterpret_cast<Node**>(self);
2063        WebCore::FinalizedHeapObjectHeader* header = WebCore::FinalizedHeapObjectHeader::fromPayload(self);
2064        size_t length = header->payloadSize() / sizeof(Node*);
2065        for (size_t i = 0; i < length; i++) {
2066            if (!WTF::HashTableHelper<Node*, typename Table::ExtractorType, typename Table::KeyTraitsType>::isEmptyOrDeletedBucket(array[i])) {
2067                traceListHashSetValue(visitor, array[i]->m_value);
2068                // Just mark the node without tracing because we already traced
2069                // the contents, and there is no need to trace the next and
2070                // prev fields since iterating over the hash table backing will
2071                // find the whole chain.
2072                visitor->markNoTracing(array[i]);
2073            }
2074        }
2075    }
2076};
2077
2078// Key value pairs, as used in HashMap. To disambiguate template choice we have
2079// to have two versions, first the one with no special weak handling, then the
2080// one with weak handling.
2081template<ShouldWeakPointersBeMarkedStrongly strongify, typename Key, typename Value, typename Traits>
2082struct TraceInCollectionTrait<WTF::NoWeakHandlingInCollections, strongify, WTF::KeyValuePair<Key, Value>, Traits>  {
2083    static void trace(Visitor* visitor, WTF::KeyValuePair<Key, Value>& self)
2084    {
2085        ASSERT(WTF::ShouldBeTraced<Traits>::value);
2086        CollectionBackingTraceTrait<WTF::ShouldBeTraced<typename Traits::KeyTraits>::value, WTF::NoWeakHandlingInCollections, strongify, Key, typename Traits::KeyTraits>::trace(visitor, self.key);
2087        CollectionBackingTraceTrait<WTF::ShouldBeTraced<typename Traits::ValueTraits>::value, WTF::NoWeakHandlingInCollections, strongify, Value, typename Traits::ValueTraits>::trace(visitor, self.value);
2088    }
2089};
2090
2091template<ShouldWeakPointersBeMarkedStrongly strongify, typename Key, typename Value, typename Traits>
2092struct TraceInCollectionTrait<WTF::WeakHandlingInCollections, strongify, WTF::KeyValuePair<Key, Value>, Traits> {
2093    static void trace(Visitor* visitor, WTF::KeyValuePair<Key, Value>& self)
2094    {
2095        ASSERT(WTF::ShouldBeTraced<Traits>::value || strongify == WeakPointersActStrong);
2096        CollectionBackingTraceTrait<WTF::ShouldBeTraced<typename Traits::KeyTraits>::value, Traits::KeyTraits::weakHandlingFlag, strongify, Key, typename Traits::KeyTraits>::trace(visitor, self.key);
2097        CollectionBackingTraceTrait<WTF::ShouldBeTraced<typename Traits::ValueTraits>::value, Traits::ValueTraits::weakHandlingFlag, strongify, Value, typename Traits::ValueTraits>::trace(visitor, self.value);
2098    }
2099};
2100
2101// Nodes used by LinkedHashSet. Again we need two versions to disambiguate the
2102// template.
2103template<ShouldWeakPointersBeMarkedStrongly strongify, typename Value, typename Traits>
2104struct TraceInCollectionTrait<WTF::NoWeakHandlingInCollections, strongify, WTF::LinkedHashSetNode<Value>, Traits> {
2105    static void trace(Visitor* visitor, WTF::LinkedHashSetNode<Value>& self)
2106    {
2107        ASSERT(WTF::ShouldBeTraced<Traits>::value);
2108        TraceTrait<Value>::trace(visitor, &self.m_value);
2109    }
2110};
2111
2112template<ShouldWeakPointersBeMarkedStrongly strongify, typename Value, typename Traits>
2113struct TraceInCollectionTrait<WTF::WeakHandlingInCollections, strongify, WTF::LinkedHashSetNode<Value>, Traits> {
2114    static void trace(Visitor* visitor, WTF::LinkedHashSetNode<Value>& self)
2115    {
2116        ASSERT(WTF::ShouldBeTraced<Traits>::value || strongify == WeakPointersActStrong);
2117        TraceInCollectionTrait<WTF::WeakHandlingInCollections, strongify, Value, typename Traits::ValueTraits>::trace(visitor, self.m_value);
2118    }
2119};
2120
2121// ListHashSetNode pointers (a ListHashSet is implemented as a hash table of these pointers).
2122template<ShouldWeakPointersBeMarkedStrongly strongify, typename Value, size_t inlineCapacity, typename Traits>
2123struct TraceInCollectionTrait<WTF::NoWeakHandlingInCollections, strongify, WTF::ListHashSetNode<Value, HeapListHashSetAllocator<Value, inlineCapacity> >*, Traits> {
2124    typedef WTF::ListHashSetNode<Value, HeapListHashSetAllocator<Value, inlineCapacity> > Node;
2125    static void trace(Visitor* visitor, Node* node)
2126    {
2127        traceListHashSetValue(visitor, node->m_value);
2128        // Just mark the node without tracing because we already traced the
2129        // contents, and there is no need to trace the next and prev fields
2130        // since iterating over the hash table backing will find the whole
2131        // chain.
2132        visitor->markNoTracing(node);
2133    }
2134};
2135
2136// FIXME: oilpan: Perhaps we don't need this any more.
2137// Catch-all for things that don't need marking and have no weak pointers. We
2138// do nothing, even if WeakPointersActStrong.
2139template<ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename U>
2140struct CollectionBackingTraceTrait<false, WTF::NoWeakHandlingInCollections, strongify, T, U> {
2141    static void trace(Visitor*, T&) { }
2142};
2143
2144// Catch-all for things that don't need marking. They have weak pointers, but
2145// we are not marking weak pointers in this object in this GC.
2146template<typename T, typename U>
2147struct CollectionBackingTraceTrait<false, WTF::WeakHandlingInCollections, WeakPointersActWeak, T, U> {
2148    static void trace(Visitor*, T&) { }
2149};
2150
2151// Things that need marking because they contain weak pointers that we are making
2152// strong for this GC because there is an outstanding iterator that would be
2153// disturbed if we started removing entries from the colletion.
2154template<typename T, typename Traits>
2155struct CollectionBackingTraceTrait<false, WTF::WeakHandlingInCollections, WeakPointersActStrong, T, Traits> {
2156    static void trace(Visitor* visitor, T& t) { TraceInCollectionTrait<WTF::WeakHandlingInCollections, WeakPointersActStrong, T, Traits>::trace(visitor, t); }
2157    static void trace(Visitor* visitor, void* t) { TraceInCollectionTrait<WTF::WeakHandlingInCollections, WeakPointersActStrong, T, Traits>::trace(visitor, t); }
2158};
2159
2160// Things that need marking because they contain strong pointers
2161template<WTF::WeakHandlingFlag weakHandlingFlag, ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
2162struct CollectionBackingTraceTrait<true, weakHandlingFlag, strongify, T, Traits> {
2163    static void trace(Visitor* visitor, T& t) { TraceInCollectionTrait<weakHandlingFlag, strongify, T, Traits>::trace(visitor, t); }
2164    template <typename U>
2165    static void trace(Visitor* visitor, U* t) { TraceInCollectionTrait<weakHandlingFlag, strongify, T, Traits>::trace(visitor, T(t)); }
2166};
2167
2168template<typename T> struct WeakHandlingHashTraits : WTF::SimpleClassHashTraits<T> {
2169    // We want to treat the object as a weak object in the sense that it can
2170    // disappear from hash sets and hash maps.
2171    static const WTF::WeakHandlingFlag weakHandlingFlag = WTF::WeakHandlingInCollections;
2172    // Normally whether or not an object needs tracing is inferred
2173    // automatically from the presence of the trace method, but we don't
2174    // necessarily have a trace method, and we may not need one because T
2175    // can perhaps only be allocated inside collections, never as indpendent
2176    // objects. Explicitly mark this as needing tracing and it will be traced
2177    // in collections using the traceInCollection method, which it must have.
2178    template<typename U = void> struct NeedsTracingLazily {
2179        static const bool value = true;
2180    };
2181    // This method is called at the end of GC to test which elements should be
2182    // removed from the collection. It returns true if the object contains
2183    // non-live pointers. If this method incorrectly returns false, then we can
2184    // have dangerous dangling pointers!
2185    template<typename Visitor> static bool shouldRemoveFromCollection(Visitor* visitor, T& t)
2186    {
2187        return t.shouldRemoveFromCollection(visitor);
2188    }
2189    // The traceInCollection method traces differently depending on whether we
2190    // are strongifying the trace operation. We strongify the trace operation
2191    // when there are active iterators on the object. In this case all
2192    // WeakMembers are marked like strong members so that elements do not
2193    // suddenly disappear during iteration.
2194    static void traceInCollection(WebCore::Visitor* visitor, T& t, WebCore::ShouldWeakPointersBeMarkedStrongly strongify)
2195    {
2196        t.traceInCollection(visitor, strongify);
2197    }
2198};
2199
2200template<typename T, typename Traits>
2201struct TraceTrait<HeapVectorBacking<T, Traits> > {
2202    typedef HeapVectorBacking<T, Traits> Backing;
2203    static void trace(Visitor* visitor, void* self)
2204    {
2205        COMPILE_ASSERT(!WTF::IsWeak<T>::value, WeDontSupportWeaknessInHeapVectorsOrDeques);
2206        if (WTF::ShouldBeTraced<Traits>::value)
2207            TraceInCollectionTrait<WTF::NoWeakHandlingInCollections, WeakPointersActWeak, HeapVectorBacking<T, Traits>, void>::trace(visitor, self);
2208    }
2209    static void mark(Visitor* visitor, const Backing* backing)
2210    {
2211        visitor->mark(backing, &trace);
2212    }
2213    static void checkGCInfo(Visitor* visitor, const Backing* backing)
2214    {
2215#ifndef NDEBUG
2216        visitor->checkGCInfo(const_cast<Backing*>(backing), GCInfoTrait<Backing>::get());
2217#endif
2218    }
2219};
2220
2221// The trace trait for the heap hashtable backing is used when we find a
2222// direct pointer to the backing from the conservative stack scanner. This
2223// normally indicates that there is an ongoing iteration over the table, and so
2224// we disable weak processing of table entries. When the backing is found
2225// through the owning hash table we mark differently, in order to do weak
2226// processing.
2227template<typename Table>
2228struct TraceTrait<HeapHashTableBacking<Table> > {
2229    typedef HeapHashTableBacking<Table> Backing;
2230    typedef typename Table::ValueTraits Traits;
2231    static void trace(Visitor* visitor, void* self)
2232    {
2233        if (WTF::ShouldBeTraced<Traits>::value || Traits::weakHandlingFlag == WTF::WeakHandlingInCollections)
2234            TraceInCollectionTrait<WTF::NoWeakHandlingInCollections, WeakPointersActStrong, Backing, void>::trace(visitor, self);
2235    }
2236    static void mark(Visitor* visitor, const Backing* backing)
2237    {
2238        if (WTF::ShouldBeTraced<Traits>::value || Traits::weakHandlingFlag == WTF::WeakHandlingInCollections)
2239            visitor->mark(backing, &trace);
2240        else
2241            visitor->markNoTracing(backing); // If we know the trace function will do nothing there is no need to call it.
2242    }
2243    static void checkGCInfo(Visitor* visitor, const Backing* backing)
2244    {
2245#ifndef NDEBUG
2246        visitor->checkGCInfo(const_cast<Backing*>(backing), GCInfoTrait<Backing>::get());
2247#endif
2248    }
2249};
2250
2251template<typename Table>
2252void HeapHashTableBacking<Table>::finalize(void* pointer)
2253{
2254    typedef typename Table::ValueType Value;
2255    ASSERT(Table::ValueTraits::needsDestruction);
2256    FinalizedHeapObjectHeader* header = FinalizedHeapObjectHeader::fromPayload(pointer);
2257    // Use the payload size as recorded by the heap to determine how many
2258    // elements to finalize.
2259    size_t length = header->payloadSize() / sizeof(Value);
2260    Value* table = reinterpret_cast<Value*>(pointer);
2261    for (unsigned i = 0; i < length; i++) {
2262        if (!Table::isEmptyOrDeletedBucket(table[i]))
2263            table[i].~Value();
2264    }
2265}
2266
2267template<typename T, typename U, typename V, typename W, typename X>
2268struct GCInfoTrait<HeapHashMap<T, U, V, W, X> > : public GCInfoTrait<HashMap<T, U, V, W, X, HeapAllocator> > { };
2269template<typename T, typename U, typename V>
2270struct GCInfoTrait<HeapHashSet<T, U, V> > : public GCInfoTrait<HashSet<T, U, V, HeapAllocator> > { };
2271template<typename T, typename U, typename V>
2272struct GCInfoTrait<HeapLinkedHashSet<T, U, V> > : public GCInfoTrait<LinkedHashSet<T, U, V, HeapAllocator> > { };
2273template<typename T, size_t inlineCapacity, typename U>
2274struct GCInfoTrait<HeapListHashSet<T, inlineCapacity, U> > : public GCInfoTrait<ListHashSet<T, inlineCapacity, U, HeapListHashSetAllocator<T, inlineCapacity> > > { };
2275template<typename T, size_t inlineCapacity>
2276struct GCInfoTrait<HeapVector<T, inlineCapacity> > : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator> > { };
2277template<typename T, size_t inlineCapacity>
2278struct GCInfoTrait<HeapDeque<T, inlineCapacity> > : public GCInfoTrait<Deque<T, inlineCapacity, HeapAllocator> > { };
2279template<typename T, typename U, typename V>
2280struct GCInfoTrait<HeapHashCountedSet<T, U, V> > : public GCInfoTrait<HashCountedSet<T, U, V, HeapAllocator> > { };
2281
2282template<typename T>
2283struct IfWeakMember;
2284
2285template<typename T>
2286struct IfWeakMember {
2287    template<typename U>
2288    static bool isDead(Visitor*, const U&) { return false; }
2289};
2290
2291template<typename T>
2292struct IfWeakMember<WeakMember<T> > {
2293    static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visitor->isAlive(t.get()); }
2294};
2295
2296#if COMPILER(CLANG)
2297// Clang does not export the symbols that we have explicitly asked it
2298// to export. This forces it to export all the methods from ThreadHeap.
2299template<> void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo*);
2300template<> void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo*);
2301extern template class PLATFORM_EXPORT ThreadHeap<FinalizedHeapObjectHeader>;
2302extern template class PLATFORM_EXPORT ThreadHeap<HeapObjectHeader>;
2303#endif
2304
2305}
2306
2307#endif // Heap_h
2308