1/*
2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 *     * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 *     * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 *     * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#ifndef Heap_h
32#define Heap_h
33
34#include "platform/PlatformExport.h"
35#include "platform/heap/AddressSanitizer.h"
36#include "platform/heap/ThreadState.h"
37#include "platform/heap/Visitor.h"
38#include "public/platform/WebThread.h"
39#include "wtf/Assertions.h"
40#include "wtf/HashCountedSet.h"
41#include "wtf/LinkedHashSet.h"
42#include "wtf/ListHashSet.h"
43#include "wtf/OwnPtr.h"
44#include "wtf/PassRefPtr.h"
45#include "wtf/ThreadSafeRefCounted.h"
46
47#include <stdint.h>
48
49namespace blink {
50
51const size_t blinkPageSizeLog2 = 17;
52const size_t blinkPageSize = 1 << blinkPageSizeLog2;
53const size_t blinkPageOffsetMask = blinkPageSize - 1;
54const size_t blinkPageBaseMask = ~blinkPageOffsetMask;
55
56// We allocate pages at random addresses but in groups of
57// blinkPagesPerRegion at a given random address. We group pages to
58// not spread out too much over the address space which would blow
59// away the page tables and lead to bad performance.
60const size_t blinkPagesPerRegion = 10;
61
62// Double precision floats are more efficient when 8 byte aligned, so we 8 byte
63// align all allocations even on 32 bit.
64const size_t allocationGranularity = 8;
65const size_t allocationMask = allocationGranularity - 1;
66const size_t objectStartBitMapSize = (blinkPageSize + ((8 * allocationGranularity) - 1)) / (8 * allocationGranularity);
67const size_t reservedForObjectBitMap = ((objectStartBitMapSize + allocationMask) & ~allocationMask);
68const size_t maxHeapObjectSizeLog2 = 27;
69const size_t maxHeapObjectSize = 1 << maxHeapObjectSizeLog2;
70
71const size_t markBitMask = 1;
72const size_t freeListMask = 2;
73// The dead bit is used for objects that have gone through a GC marking, but did
74// not get swept before a new GC started. In that case we set the dead bit on
75// objects that were not marked in the previous GC to ensure we are not tracing
76// them via a conservatively found pointer. Tracing dead objects could lead to
77// tracing of already finalized objects in another thread's heap which is a
78// use-after-free situation.
79const size_t deadBitMask = 4;
80// On free-list entries we reuse the dead bit to distinguish a normal free-list
81// entry from one that has been promptly freed.
82const size_t promptlyFreedMask = freeListMask | deadBitMask;
83#if ENABLE(GC_PROFILE_HEAP)
84const size_t heapObjectGenerations = 8;
85const size_t maxHeapObjectAge = heapObjectGenerations - 1;
86const size_t heapObjectAgeMask = ~(maxHeapObjectSize - 1);
87const size_t sizeMask = ~heapObjectAgeMask & ~static_cast<size_t>(7);
88#else
89const size_t sizeMask = ~static_cast<size_t>(7);
90#endif
91const uint8_t freelistZapValue = 42;
92const uint8_t finalizedZapValue = 24;
93// The orphaned zap value must be zero in the lowest bits to allow for using
94// the mark bit when tracing.
95const uint8_t orphanedZapValue = 240;
96
97const int numberOfMarkingThreads = 2;
98
99const int numberOfPagesToConsiderForCoalescing = 100;
100
101enum CallbackInvocationMode {
102    GlobalMarking,
103    ThreadLocalMarking,
104};
105
106class CallbackStack;
107class HeapStats;
108class PageMemory;
109template<ThreadAffinity affinity> class ThreadLocalPersistents;
110template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTrait<T>::Affinity > > class Persistent;
111template<typename T> class CrossThreadPersistent;
112
113#if ENABLE(GC_PROFILE_HEAP)
114class TracedValue;
115#endif
116
117PLATFORM_EXPORT size_t osPageSize();
118
119// Blink heap pages are set up with a guard page before and after the
120// payload.
121inline size_t blinkPagePayloadSize()
122{
123    return blinkPageSize - 2 * osPageSize();
124}
125
126// Blink heap pages are aligned to the Blink heap page size.
127// Therefore, the start of a Blink page can be obtained by
128// rounding down to the Blink page size.
129inline Address roundToBlinkPageStart(Address address)
130{
131    return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blinkPageBaseMask);
132}
133
134inline Address roundToBlinkPageEnd(Address address)
135{
136    return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address - 1) & blinkPageBaseMask) + blinkPageSize;
137}
138
139// Compute the amount of padding we have to add to a header to make
140// the size of the header plus the padding a multiple of 8 bytes.
141template<typename Header>
142inline size_t headerPadding()
143{
144    return (allocationGranularity - (sizeof(Header) % allocationGranularity)) % allocationGranularity;
145}
146
147// Masks an address down to the enclosing blink page base address.
148inline Address blinkPageAddress(Address address)
149{
150    return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blinkPageBaseMask);
151}
152
153#if ENABLE(ASSERT)
154
155// Sanity check for a page header address: the address of the page
156// header should be OS page size away from being Blink page size
157// aligned.
158inline bool isPageHeaderAddress(Address address)
159{
160    return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - osPageSize());
161}
162#endif
163
164// Mask an address down to the enclosing oilpan heap base page.
165// All oilpan heap pages are aligned at blinkPageBase plus an OS page size.
166// FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our typed heaps.
167// This is only exported to enable tests in HeapTest.cpp.
168PLATFORM_EXPORT inline BaseHeapPage* pageHeaderFromObject(const void* object)
169{
170    Address address = reinterpret_cast<Address>(const_cast<void*>(object));
171    return reinterpret_cast<BaseHeapPage*>(blinkPageAddress(address) + osPageSize());
172}
173
174// Large allocations are allocated as separate objects and linked in a
175// list.
176//
177// In order to use the same memory allocation routines for everything
178// allocated in the heap, large objects are considered heap pages
179// containing only one object.
180//
181// The layout of a large heap object is as follows:
182//
183// | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader | payload |
184template<typename Header>
185class LargeHeapObject : public BaseHeapPage {
186public:
187    LargeHeapObject(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state) : BaseHeapPage(storage, gcInfo, state)
188    {
189        COMPILE_ASSERT(!(sizeof(LargeHeapObject<Header>) & allocationMask), large_heap_object_header_misaligned);
190    }
191
192    virtual void checkAndMarkPointer(Visitor*, Address) OVERRIDE;
193    virtual bool isLargeObject() OVERRIDE { return true; }
194
195#if ENABLE(GC_PROFILE_MARKING)
196    virtual const GCInfo* findGCInfo(Address address)
197    {
198        if (!objectContains(address))
199            return 0;
200        return gcInfo();
201    }
202#endif
203
204#if ENABLE(GC_PROFILE_HEAP)
205    void snapshot(TracedValue*, ThreadState::SnapshotInfo*);
206#endif
207
208    void link(LargeHeapObject<Header>** previousNext)
209    {
210        m_next = *previousNext;
211        *previousNext = this;
212    }
213
214    void unlink(LargeHeapObject<Header>** previousNext)
215    {
216        *previousNext = m_next;
217    }
218
219    // The LargeHeapObject pseudo-page contains one actual object. Determine
220    // whether the pointer is within that object.
221    bool objectContains(Address object)
222    {
223        return (payload() <= object) && (object < address() + size());
224    }
225
226    // Returns true for any address that is on one of the pages that this
227    // large object uses. That ensures that we can use a negative result to
228    // populate the negative page cache.
229    virtual bool contains(Address object) OVERRIDE
230    {
231        return roundToBlinkPageStart(address()) <= object && object < roundToBlinkPageEnd(address() + size());
232    }
233
234    LargeHeapObject<Header>* next()
235    {
236        return m_next;
237    }
238
239    size_t size()
240    {
241        return heapObjectHeader()->size() + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
242    }
243
244    Address payload() { return heapObjectHeader()->payload(); }
245    size_t payloadSize() { return heapObjectHeader()->payloadSize(); }
246
247    Header* heapObjectHeader()
248    {
249        Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
250        return reinterpret_cast<Header*>(headerAddress);
251    }
252
253    bool isMarked();
254    void unmark();
255    void getStats(HeapStats&);
256    void mark(Visitor*);
257    void finalize();
258    void setDeadMark();
259    virtual void markOrphaned()
260    {
261        // Zap the payload with a recognizable value to detect any incorrect
262        // cross thread pointer usage.
263        memset(payload(), orphanedZapValue, payloadSize());
264        BaseHeapPage::markOrphaned();
265    }
266
267private:
268    friend class ThreadHeap<Header>;
269
270    LargeHeapObject<Header>* m_next;
271};
272
273// The BasicObjectHeader is the minimal object header. It is used when
274// encountering heap space of size allocationGranularity to mark it as
275// as freelist entry.
276class PLATFORM_EXPORT BasicObjectHeader {
277public:
278    NO_SANITIZE_ADDRESS
279    explicit BasicObjectHeader(size_t encodedSize)
280        : m_size(encodedSize) { }
281
282    static size_t freeListEncodedSize(size_t size) { return size | freeListMask; }
283
284    NO_SANITIZE_ADDRESS
285    bool isFree() { return m_size & freeListMask; }
286
287    NO_SANITIZE_ADDRESS
288    bool isPromptlyFreed() { return (m_size & promptlyFreedMask) == promptlyFreedMask; }
289
290    NO_SANITIZE_ADDRESS
291    void markPromptlyFreed() { m_size |= promptlyFreedMask; }
292
293    NO_SANITIZE_ADDRESS
294    size_t size() const { return m_size & sizeMask; }
295
296#if ENABLE(GC_PROFILE_HEAP)
297    NO_SANITIZE_ADDRESS
298    size_t encodedSize() const { return m_size; }
299
300    NO_SANITIZE_ADDRESS
301    size_t age() const { return m_size >> maxHeapObjectSizeLog2; }
302
303    NO_SANITIZE_ADDRESS
304    void incAge()
305    {
306        size_t current = age();
307        if (current < maxHeapObjectAge)
308            m_size = ((current + 1) << maxHeapObjectSizeLog2) | (m_size & ~heapObjectAgeMask);
309    }
310#endif
311
312protected:
313    volatile unsigned m_size;
314};
315
316// Our heap object layout is layered with the HeapObjectHeader closest
317// to the payload, this can be wrapped in a FinalizedObjectHeader if the
318// object is on the GeneralHeap and not on a specific TypedHeap.
319// Finally if the object is a large object (> blinkPageSize/2) then it is
320// wrapped with a LargeObjectHeader.
321//
322// Object memory layout:
323// [ LargeObjectHeader | ] [ FinalizedObjectHeader | ] HeapObjectHeader | payload
324// The [ ] notation denotes that the LargeObjectHeader and the FinalizedObjectHeader
325// are independently optional.
326class PLATFORM_EXPORT HeapObjectHeader : public BasicObjectHeader {
327public:
328    NO_SANITIZE_ADDRESS
329    explicit HeapObjectHeader(size_t encodedSize)
330        : BasicObjectHeader(encodedSize)
331#if ENABLE(ASSERT)
332        , m_magic(magic)
333#endif
334    { }
335
336    NO_SANITIZE_ADDRESS
337    HeapObjectHeader(size_t encodedSize, const GCInfo*)
338        : BasicObjectHeader(encodedSize)
339#if ENABLE(ASSERT)
340        , m_magic(magic)
341#endif
342    { }
343
344    inline void checkHeader() const;
345    inline bool isMarked() const;
346
347    inline void mark();
348    inline void unmark();
349
350    inline const GCInfo* gcInfo() { return 0; }
351
352    inline Address payload();
353    inline size_t payloadSize();
354    inline Address payloadEnd();
355
356    inline void setDeadMark();
357    inline void clearDeadMark();
358    inline bool hasDeadMark() const;
359
360    // Zap magic number with a new magic number that means there was once an
361    // object allocated here, but it was freed because nobody marked it during
362    // GC.
363    void zapMagic();
364
365    static void finalize(const GCInfo*, Address, size_t);
366    static HeapObjectHeader* fromPayload(const void*);
367
368    static const intptr_t magic = 0xc0de247;
369    static const intptr_t zappedMagic = 0xC0DEdead;
370    // The zap value for vtables should be < 4K to ensure it cannot be
371    // used for dispatch.
372    static const intptr_t zappedVTable = 0xd0d;
373
374private:
375#if ENABLE(ASSERT)
376    intptr_t m_magic;
377#endif
378};
379
380const size_t objectHeaderSize = sizeof(HeapObjectHeader);
381
382// Each object on the GeneralHeap needs to carry a pointer to its
383// own GCInfo structure for tracing and potential finalization.
384class PLATFORM_EXPORT FinalizedHeapObjectHeader : public HeapObjectHeader {
385public:
386    NO_SANITIZE_ADDRESS
387    FinalizedHeapObjectHeader(size_t encodedSize, const GCInfo* gcInfo)
388        : HeapObjectHeader(encodedSize)
389        , m_gcInfo(gcInfo)
390    {
391    }
392
393    inline Address payload();
394    inline size_t payloadSize();
395
396    NO_SANITIZE_ADDRESS
397    const GCInfo* gcInfo() { return m_gcInfo; }
398
399    NO_SANITIZE_ADDRESS
400    TraceCallback traceCallback() { return m_gcInfo->m_trace; }
401
402    void finalize();
403
404    NO_SANITIZE_ADDRESS
405    inline bool hasFinalizer() { return m_gcInfo->hasFinalizer(); }
406
407    static FinalizedHeapObjectHeader* fromPayload(const void*);
408
409    NO_SANITIZE_ADDRESS
410    bool hasVTable() { return m_gcInfo->hasVTable(); }
411
412private:
413    const GCInfo* m_gcInfo;
414};
415
416const size_t finalizedHeaderSize = sizeof(FinalizedHeapObjectHeader);
417
418class FreeListEntry : public HeapObjectHeader {
419public:
420    NO_SANITIZE_ADDRESS
421    explicit FreeListEntry(size_t size)
422        : HeapObjectHeader(freeListEncodedSize(size))
423        , m_next(0)
424    {
425#if ENABLE(ASSERT) && !defined(ADDRESS_SANITIZER)
426        // Zap free area with asterisks, aka 0x2a2a2a2a.
427        // For ASan don't zap since we keep accounting in the freelist entry.
428        for (size_t i = sizeof(*this); i < size; i++)
429            reinterpret_cast<Address>(this)[i] = freelistZapValue;
430        ASSERT(size >= objectHeaderSize);
431        zapMagic();
432#endif
433    }
434
435    Address address() { return reinterpret_cast<Address>(this); }
436
437    NO_SANITIZE_ADDRESS
438    void unlink(FreeListEntry** prevNext)
439    {
440        *prevNext = m_next;
441        m_next = 0;
442    }
443
444    NO_SANITIZE_ADDRESS
445    void link(FreeListEntry** prevNext)
446    {
447        m_next = *prevNext;
448        *prevNext = this;
449    }
450
451    NO_SANITIZE_ADDRESS
452    FreeListEntry* next() const { return m_next; }
453
454    NO_SANITIZE_ADDRESS
455    void append(FreeListEntry* next)
456    {
457        ASSERT(!m_next);
458        m_next = next;
459    }
460
461#if defined(ADDRESS_SANITIZER)
462    NO_SANITIZE_ADDRESS
463    bool shouldAddToFreeList()
464    {
465        // Init if not already magic.
466        if ((m_asanMagic & ~asanDeferMemoryReuseMask) != asanMagic) {
467            m_asanMagic = asanMagic | asanDeferMemoryReuseCount;
468            return false;
469        }
470        // Decrement if count part of asanMagic > 0.
471        if (m_asanMagic & asanDeferMemoryReuseMask)
472            m_asanMagic--;
473        return !(m_asanMagic & asanDeferMemoryReuseMask);
474    }
475#endif
476
477private:
478    FreeListEntry* m_next;
479#if defined(ADDRESS_SANITIZER)
480    unsigned m_asanMagic;
481#endif
482};
483
484// Representation of Blink heap pages.
485//
486// Pages are specialized on the type of header on the object they
487// contain. If a heap page only contains a certain type of object all
488// of the objects will have the same GCInfo pointer and therefore that
489// pointer can be stored in the HeapPage instead of in the header of
490// each object. In that case objects have only a HeapObjectHeader and
491// not a FinalizedHeapObjectHeader saving a word per object.
492template<typename Header>
493class HeapPage : public BaseHeapPage {
494public:
495    HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*);
496
497    void link(HeapPage**);
498    static void unlink(ThreadHeap<Header>*, HeapPage*, HeapPage**);
499
500    bool isEmpty();
501
502    // Returns true for the whole blinkPageSize page that the page is on, even
503    // for the header, and the unmapped guard page at the start. That ensures
504    // the result can be used to populate the negative page cache.
505    virtual bool contains(Address addr) OVERRIDE
506    {
507        Address blinkPageStart = roundToBlinkPageStart(address());
508        ASSERT(blinkPageStart == address() - osPageSize()); // Page is at aligned address plus guard page size.
509        return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize;
510    }
511
512    HeapPage* next() { return m_next; }
513
514    Address payload()
515    {
516        return address() + sizeof(*this) + headerPadding<Header>();
517    }
518
519    static size_t payloadSize()
520    {
521        return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header>()) & ~allocationMask;
522    }
523
524    Address end() { return payload() + payloadSize(); }
525
526    void getStats(HeapStats&);
527    void clearLiveAndMarkDead();
528    void sweep(HeapStats*, ThreadHeap<Header>*);
529    void clearObjectStartBitMap();
530    void finalize(Header*);
531    virtual void checkAndMarkPointer(Visitor*, Address) OVERRIDE;
532#if ENABLE(GC_PROFILE_MARKING)
533    const GCInfo* findGCInfo(Address) OVERRIDE;
534#endif
535#if ENABLE(GC_PROFILE_HEAP)
536    virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*);
537#endif
538
539#if defined(ADDRESS_SANITIZER)
540    void poisonUnmarkedObjects();
541#endif
542    NO_SANITIZE_ADDRESS
543    virtual void markOrphaned()
544    {
545        // Zap the payload with a recognizable value to detect any incorrect
546        // cross thread pointer usage.
547#if defined(ADDRESS_SANITIZER)
548        // Don't use memset when running with ASan since this needs to zap
549        // poisoned memory as well and the NO_SANITIZE_ADDRESS annotation
550        // only works for code in this method and not for calls to memset.
551        for (Address current = payload(); current < payload() + payloadSize(); ++current)
552            *current = orphanedZapValue;
553#else
554        memset(payload(), orphanedZapValue, payloadSize());
555#endif
556        BaseHeapPage::markOrphaned();
557    }
558
559protected:
560    Header* findHeaderFromAddress(Address);
561    void populateObjectStartBitMap();
562    bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; }
563    TraceCallback traceCallback(Header*);
564    bool hasVTable(Header*);
565
566    intptr_t padding() const { return m_padding; }
567
568    HeapPage<Header>* m_next;
569    intptr_t m_padding; // Preserve 8-byte alignment on 32-bit systems.
570    bool m_objectStartBitMapComputed;
571    uint8_t m_objectStartBitMap[reservedForObjectBitMap];
572
573    friend class ThreadHeap<Header>;
574};
575
576class AddressEntry {
577public:
578    AddressEntry() : m_address(0) { }
579
580    explicit AddressEntry(Address address) : m_address(address) { }
581
582    Address address() const { return m_address; }
583
584private:
585    Address m_address;
586};
587
588class PositiveEntry : public AddressEntry {
589public:
590    PositiveEntry()
591        : AddressEntry()
592        , m_containingPage(0)
593    {
594    }
595
596    PositiveEntry(Address address, BaseHeapPage* containingPage)
597        : AddressEntry(address)
598        , m_containingPage(containingPage)
599    {
600    }
601
602    BaseHeapPage* result() const { return m_containingPage; }
603
604    typedef BaseHeapPage* LookupResult;
605
606private:
607    BaseHeapPage* m_containingPage;
608};
609
610class NegativeEntry : public AddressEntry {
611public:
612    NegativeEntry() : AddressEntry() { }
613
614    NegativeEntry(Address address, bool) : AddressEntry(address) { }
615
616    bool result() const { return true; }
617
618    typedef bool LookupResult;
619};
620
621// A HeapExtentCache provides a fast way of taking an arbitrary
622// pointer-sized word, and determining whether it can be interpreted
623// as a pointer to an area that is managed by the garbage collected
624// Blink heap. There is a cache of 'pages' that have previously been
625// determined to be wholly inside the heap. The size of these pages must be
626// smaller than the allocation alignment of the heap pages. We determine
627// on-heap-ness by rounding down the pointer to the nearest page and looking up
628// the page in the cache. If there is a miss in the cache we can ask the heap
629// to determine the status of the pointer by iterating over all of the heap.
630// The result is then cached in the two-way associative page cache.
631//
632// A HeapContainsCache is a positive cache. Therefore, it must be flushed when
633// memory is removed from the Blink heap. The HeapDoesNotContainCache is a
634// negative cache, so it must be flushed when memory is added to the heap.
635template<typename Entry>
636class HeapExtentCache {
637public:
638    HeapExtentCache()
639        : m_entries(adoptArrayPtr(new Entry[HeapExtentCache::numberOfEntries]))
640        , m_hasEntries(false)
641    {
642    }
643
644    void flush();
645    bool contains(Address);
646    bool isEmpty() { return !m_hasEntries; }
647
648    // Perform a lookup in the cache.
649    //
650    // If lookup returns null/false the argument address was not found in
651    // the cache and it is unknown if the address is in the Blink
652    // heap.
653    //
654    // If lookup returns true/a page, the argument address was found in the
655    // cache. For the HeapContainsCache this means the address is in the heap.
656    // For the HeapDoesNotContainCache this means the address is not in the
657    // heap.
658    PLATFORM_EXPORT typename Entry::LookupResult lookup(Address);
659
660    // Add an entry to the cache.
661    PLATFORM_EXPORT void addEntry(Address, typename Entry::LookupResult);
662
663private:
664    static const int numberOfEntriesLog2 = 12;
665    static const int numberOfEntries = 1 << numberOfEntriesLog2;
666
667    static size_t hash(Address);
668
669    WTF::OwnPtr<Entry[]> m_entries;
670    bool m_hasEntries;
671
672    friend class ThreadState;
673};
674
675// Normally these would be typedefs instead of subclasses, but that makes them
676// very hard to forward declare.
677class HeapContainsCache : public HeapExtentCache<PositiveEntry> {
678public:
679    BaseHeapPage* lookup(Address);
680    void addEntry(Address, BaseHeapPage*);
681};
682
683class HeapDoesNotContainCache : public HeapExtentCache<NegativeEntry> { };
684
685// FIXME: This is currently used by the WebAudio code.
686// We should attempt to restructure the WebAudio code so that the main thread
687// alone determines life-time and receives messages about life-time from the
688// audio thread.
689template<typename T>
690class ThreadSafeRefCountedGarbageCollected : public GarbageCollectedFinalized<T>, public WTF::ThreadSafeRefCountedBase {
691    WTF_MAKE_NONCOPYABLE(ThreadSafeRefCountedGarbageCollected);
692
693public:
694    ThreadSafeRefCountedGarbageCollected()
695    {
696        makeKeepAlive();
697    }
698
699    // Override ref to deal with a case where a reference count goes up
700    // from 0 to 1. This can happen in the following scenario:
701    // (1) The reference count becomes 0, but on-stack pointers keep references to the object.
702    // (2) The on-stack pointer is assigned to a RefPtr. The reference count becomes 1.
703    // In this case, we have to resurrect m_keepAlive.
704    void ref()
705    {
706        MutexLocker lock(m_mutex);
707        if (UNLIKELY(!refCount())) {
708            makeKeepAlive();
709        }
710        WTF::ThreadSafeRefCountedBase::ref();
711    }
712
713    // Override deref to deal with our own deallocation based on ref counting.
714    void deref()
715    {
716        MutexLocker lock(m_mutex);
717        if (derefBase()) {
718            ASSERT(m_keepAlive);
719            m_keepAlive.clear();
720        }
721    }
722
723    using GarbageCollectedFinalized<T>::operator new;
724    using GarbageCollectedFinalized<T>::operator delete;
725
726protected:
727    ~ThreadSafeRefCountedGarbageCollected() { }
728
729private:
730    void makeKeepAlive()
731    {
732        ASSERT(!m_keepAlive);
733        m_keepAlive = adoptPtr(new CrossThreadPersistent<T>(static_cast<T*>(this)));
734    }
735
736    OwnPtr<CrossThreadPersistent<T> > m_keepAlive;
737    mutable Mutex m_mutex;
738};
739
740template<typename DataType>
741class PagePool {
742protected:
743    PagePool();
744
745    class PoolEntry {
746    public:
747        PoolEntry(DataType* data, PoolEntry* next)
748            : data(data)
749            , next(next)
750        { }
751
752        DataType* data;
753        PoolEntry* next;
754    };
755
756    PoolEntry* m_pool[NumberOfHeaps];
757};
758
759// Once pages have been used for one type of thread heap they will never be
760// reused for another type of thread heap. Instead of unmapping, we add the
761// pages to a pool of pages to be reused later by a thread heap of the same
762// type. This is done as a security feature to avoid type confusion. The
763// heaps are type segregated by having separate thread heaps for different
764// types of objects. Holding on to pages ensures that the same virtual address
765// space cannot be used for objects of another type than the type contained
766// in this page to begin with.
767class FreePagePool : public PagePool<PageMemory> {
768public:
769    ~FreePagePool();
770    void addFreePage(int, PageMemory*);
771    PageMemory* takeFreePage(int);
772
773private:
774    Mutex m_mutex[NumberOfHeaps];
775};
776
777class OrphanedPagePool : public PagePool<BaseHeapPage> {
778public:
779    ~OrphanedPagePool();
780    void addOrphanedPage(int, BaseHeapPage*);
781    void decommitOrphanedPages();
782#if ENABLE(ASSERT)
783    bool contains(void*);
784#endif
785private:
786    void clearMemory(PageMemory*);
787};
788
789// Non-template super class used to pass a heap around to other classes.
790class BaseHeap {
791public:
792    virtual ~BaseHeap() { }
793    virtual void cleanupPages() = 0;
794
795    // Find the page in this thread heap containing the given
796    // address. Returns 0 if the address is not contained in any
797    // page in this thread heap.
798    virtual BaseHeapPage* heapPageFromAddress(Address) = 0;
799
800#if ENABLE(GC_PROFILE_MARKING)
801    virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) = 0;
802#endif
803
804#if ENABLE(GC_PROFILE_HEAP)
805    virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) = 0;
806#endif
807
808    // Sweep this part of the Blink heap. This finalizes dead objects
809    // and builds freelists for all the unused memory.
810    virtual void sweep(HeapStats*) = 0;
811    virtual void postSweepProcessing() = 0;
812
813    virtual void clearFreeLists() = 0;
814    virtual void clearLiveAndMarkDead() = 0;
815
816    virtual void makeConsistentForSweeping() = 0;
817
818#if ENABLE(ASSERT)
819    virtual bool isConsistentForSweeping() = 0;
820
821    virtual void getScannedStats(HeapStats&) = 0;
822#endif
823
824    virtual void prepareHeapForTermination() = 0;
825
826    virtual int normalPageCount() = 0;
827
828    virtual BaseHeap* split(int normalPages) = 0;
829    virtual void merge(BaseHeap* other) = 0;
830
831    // Returns a bucket number for inserting a FreeListEntry of a
832    // given size. All FreeListEntries in the given bucket, n, have
833    // size >= 2^n.
834    static int bucketIndexForSize(size_t);
835};
836
837// Thread heaps represent a part of the per-thread Blink heap.
838//
839// Each Blink thread has a number of thread heaps: one general heap
840// that contains any type of object and a number of heaps specialized
841// for specific object types (such as Node).
842//
843// Each thread heap contains the functionality to allocate new objects
844// (potentially adding new pages to the heap), to find and mark
845// objects during conservative stack scanning and to sweep the set of
846// pages after a GC.
847template<typename Header>
848class ThreadHeap : public BaseHeap {
849public:
850    ThreadHeap(ThreadState*, int);
851    virtual ~ThreadHeap();
852    virtual void cleanupPages();
853
854    virtual BaseHeapPage* heapPageFromAddress(Address);
855#if ENABLE(GC_PROFILE_MARKING)
856    virtual const GCInfo* findGCInfoOfLargeHeapObject(Address);
857#endif
858#if ENABLE(GC_PROFILE_HEAP)
859    virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*);
860#endif
861
862    virtual void sweep(HeapStats*);
863    virtual void postSweepProcessing();
864
865    virtual void clearFreeLists();
866    virtual void clearLiveAndMarkDead();
867
868    virtual void makeConsistentForSweeping();
869
870#if ENABLE(ASSERT)
871    virtual bool isConsistentForSweeping();
872
873    virtual void getScannedStats(HeapStats&);
874#endif
875
876    ThreadState* threadState() { return m_threadState; }
877    HeapStats& stats() { return m_threadState->stats(); }
878    void flushHeapContainsCache()
879    {
880        m_threadState->heapContainsCache()->flush();
881    }
882
883    inline Address allocate(size_t, const GCInfo*);
884    void addToFreeList(Address, size_t);
885    inline static size_t roundedAllocationSize(size_t size)
886    {
887        return allocationSizeFromSize(size) - sizeof(Header);
888    }
889
890    virtual void prepareHeapForTermination();
891
892    virtual int normalPageCount() { return m_numberOfNormalPages; }
893
894    virtual BaseHeap* split(int numberOfNormalPages);
895    virtual void merge(BaseHeap* splitOffBase);
896
897    void removePageFromHeap(HeapPage<Header>*);
898
899    PLATFORM_EXPORT void promptlyFreeObject(Header*);
900
901private:
902    void addPageToHeap(const GCInfo*);
903    PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*);
904    static size_t allocationSizeFromSize(size_t);
905    PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*);
906    Address currentAllocationPoint() const { return m_currentAllocationPoint; }
907    size_t remainingAllocationSize() const { return m_remainingAllocationSize; }
908    bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); }
909    void setAllocationPoint(Address point, size_t size)
910    {
911        ASSERT(!point || heapPageFromAddress(point));
912        ASSERT(size <= HeapPage<Header>::payloadSize());
913        m_currentAllocationPoint = point;
914        m_remainingAllocationSize = size;
915    }
916    void ensureCurrentAllocation(size_t, const GCInfo*);
917    bool allocateFromFreeList(size_t);
918
919    void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**);
920    void allocatePage(const GCInfo*);
921
922#if ENABLE(ASSERT)
923    bool pagesToBeSweptContains(Address);
924    bool pagesAllocatedDuringSweepingContains(Address);
925#endif
926
927    void sweepNormalPages(HeapStats*);
928    void sweepLargePages(HeapStats*);
929    bool coalesce(size_t);
930
931    Address m_currentAllocationPoint;
932    size_t m_remainingAllocationSize;
933
934    HeapPage<Header>* m_firstPage;
935    LargeHeapObject<Header>* m_firstLargeHeapObject;
936
937    HeapPage<Header>* m_firstPageAllocatedDuringSweeping;
938    HeapPage<Header>* m_lastPageAllocatedDuringSweeping;
939
940    // Merge point for parallel sweep.
941    HeapPage<Header>* m_mergePoint;
942
943    int m_biggestFreeListIndex;
944
945    ThreadState* m_threadState;
946
947    // All FreeListEntries in the nth list have size >= 2^n.
948    FreeListEntry* m_freeLists[blinkPageSizeLog2];
949    FreeListEntry* m_lastFreeListEntries[blinkPageSizeLog2];
950
951    // Index into the page pools. This is used to ensure that the pages of the
952    // same type go into the correct page pool and thus avoid type confusion.
953    int m_index;
954
955    int m_numberOfNormalPages;
956
957    // The promptly freed count contains the number of promptly freed objects
958    // since the last sweep or since it was manually reset to delay coalescing.
959    size_t m_promptlyFreedCount;
960};
961
962class PLATFORM_EXPORT Heap {
963public:
964    static void init();
965    static void shutdown();
966    static void doShutdown();
967
968    static BaseHeapPage* contains(Address);
969    static BaseHeapPage* contains(void* pointer) { return contains(reinterpret_cast<Address>(pointer)); }
970    static BaseHeapPage* contains(const void* pointer) { return contains(const_cast<void*>(pointer)); }
971#if ENABLE(ASSERT)
972    static bool containedInHeapOrOrphanedPage(void*);
973#endif
974
975    // Push a trace callback on the marking stack.
976    static void pushTraceCallback(CallbackStack*, void* containerObject, TraceCallback);
977
978    // Push a trace callback on the post-marking callback stack. These callbacks
979    // are called after normal marking (including ephemeron iteration).
980    static void pushPostMarkingCallback(void*, TraceCallback);
981
982    // Add a weak pointer callback to the weak callback work list. General
983    // object pointer callbacks are added to a thread local weak callback work
984    // list and the callback is called on the thread that owns the object, with
985    // the closure pointer as an argument. Most of the time, the closure and
986    // the containerObject can be the same thing, but the containerObject is
987    // constrained to be on the heap, since the heap is used to identify the
988    // correct thread.
989    static void pushWeakObjectPointerCallback(void* closure, void* containerObject, WeakPointerCallback);
990
991    // Similar to the more general pushWeakObjectPointerCallback, but cell
992    // pointer callbacks are added to a static callback work list and the weak
993    // callback is performed on the thread performing garbage collection. This
994    // is OK because cells are just cleared and no deallocation can happen.
995    static void pushWeakCellPointerCallback(void** cell, WeakPointerCallback);
996
997    // Pop the top of a marking stack and call the callback with the visitor
998    // and the object. Returns false when there is nothing more to do.
999    template<CallbackInvocationMode Mode> static bool popAndInvokeTraceCallback(CallbackStack*, Visitor*);
1000
1001    // Remove an item from the post-marking callback stack and call
1002    // the callback with the visitor and the object pointer. Returns
1003    // false when there is nothing more to do.
1004    static bool popAndInvokePostMarkingCallback(Visitor*);
1005
1006    // Remove an item from the weak callback work list and call the callback
1007    // with the visitor and the closure pointer. Returns false when there is
1008    // nothing more to do.
1009    static bool popAndInvokeWeakPointerCallback(Visitor*);
1010
1011    // Register an ephemeron table for fixed-point iteration.
1012    static void registerWeakTable(void* containerObject, EphemeronCallback, EphemeronCallback);
1013#if ENABLE(ASSERT)
1014    static bool weakTableRegistered(const void*);
1015#endif
1016
1017    template<typename T, typename HeapTraits> static Address allocate(size_t);
1018    // FIXME: remove this once c++11 is allowed everywhere:
1019    template<typename T> static Address allocate(size_t);
1020
1021    template<typename T> static Address reallocate(void* previous, size_t);
1022
1023    static void collectGarbage(ThreadState::StackState, ThreadState::CauseOfGC = ThreadState::NormalGC);
1024    static void collectGarbageForTerminatingThread(ThreadState*);
1025    static void collectAllGarbage();
1026    static void processMarkingStackEntries(int* numberOfMarkingThreads);
1027    static void processMarkingStackOnMultipleThreads();
1028    static void processMarkingStackInParallel();
1029    template<CallbackInvocationMode Mode> static void processMarkingStack();
1030    static void postMarkingProcessing();
1031    static void globalWeakProcessing();
1032    static void setForcePreciseGCForTesting();
1033
1034    static void prepareForGC();
1035
1036    // Conservatively checks whether an address is a pointer in any of the thread
1037    // heaps. If so marks the object pointed to as live.
1038    static Address checkAndMarkPointer(Visitor*, Address);
1039
1040#if ENABLE(GC_PROFILE_MARKING)
1041    // Dump the path to specified object on the next GC. This method is to be invoked from GDB.
1042    static void dumpPathToObjectOnNextGC(void* p);
1043
1044    // Forcibly find GCInfo of the object at Address.
1045    // This is slow and should only be used for debug purposes.
1046    // It involves finding the heap page and scanning the heap page for an object header.
1047    static const GCInfo* findGCInfo(Address);
1048
1049    static String createBacktraceString();
1050#endif
1051
1052    // Collect heap stats for all threads attached to the Blink
1053    // garbage collector. Should only be called during garbage
1054    // collection where threads are known to be at safe points.
1055    static void getStats(HeapStats*);
1056
1057    static void getHeapSpaceSize(uint64_t*, uint64_t*);
1058
1059    static void makeConsistentForSweeping();
1060
1061#if ENABLE(ASSERT)
1062    static bool isConsistentForSweeping();
1063#endif
1064
1065    static void flushHeapDoesNotContainCache();
1066    static bool heapDoesNotContainCacheIsEmpty() { return s_heapDoesNotContainCache->isEmpty(); }
1067
1068    // Return true if the last GC found a pointer into a heap page
1069    // during conservative scanning.
1070    static bool lastGCWasConservative() { return s_lastGCWasConservative; }
1071
1072    static FreePagePool* freePagePool() { return s_freePagePool; }
1073    static OrphanedPagePool* orphanedPagePool() { return s_orphanedPagePool; }
1074
1075private:
1076    static Visitor* s_markingVisitor;
1077    static Vector<OwnPtr<blink::WebThread> >* s_markingThreads;
1078    static CallbackStack* s_markingStack;
1079    static CallbackStack* s_postMarkingCallbackStack;
1080    static CallbackStack* s_weakCallbackStack;
1081    static CallbackStack* s_ephemeronStack;
1082    static HeapDoesNotContainCache* s_heapDoesNotContainCache;
1083    static bool s_shutdownCalled;
1084    static bool s_lastGCWasConservative;
1085    static FreePagePool* s_freePagePool;
1086    static OrphanedPagePool* s_orphanedPagePool;
1087    friend class ThreadState;
1088};
1089
1090// The NoAllocationScope class is used in debug mode to catch unwanted
1091// allocations. E.g. allocations during GC.
1092template<ThreadAffinity Affinity>
1093class NoAllocationScope {
1094public:
1095    NoAllocationScope() : m_active(true) { enter(); }
1096
1097    explicit NoAllocationScope(bool active) : m_active(active) { enter(); }
1098
1099    NoAllocationScope(const NoAllocationScope& other) : m_active(other.m_active) { enter(); }
1100
1101    NoAllocationScope& operator=(const NoAllocationScope& other)
1102    {
1103        release();
1104        m_active = other.m_active;
1105        enter();
1106        return *this;
1107    }
1108
1109    ~NoAllocationScope() { release(); }
1110
1111    void release()
1112    {
1113        if (m_active) {
1114            ThreadStateFor<Affinity>::state()->leaveNoAllocationScope();
1115            m_active = false;
1116        }
1117    }
1118
1119private:
1120    void enter() const
1121    {
1122        if (m_active)
1123            ThreadStateFor<Affinity>::state()->enterNoAllocationScope();
1124    }
1125
1126    bool m_active;
1127};
1128
1129// Base class for objects allocated in the Blink garbage-collected
1130// heap.
1131//
1132// Defines a 'new' operator that allocates the memory in the
1133// heap. 'delete' should not be called on objects that inherit from
1134// GarbageCollected.
1135//
1136// Instances of GarbageCollected will *NOT* get finalized. Their
1137// destructor will not be called. Therefore, only classes that have
1138// trivial destructors with no semantic meaning (including all their
1139// subclasses) should inherit from GarbageCollected. If there are
1140// non-trival destructors in a given class or any of its subclasses,
1141// GarbageCollectedFinalized should be used which guarantees that the
1142// destructor is called on an instance when the garbage collector
1143// determines that it is no longer reachable.
1144template<typename T>
1145class GarbageCollected {
1146    WTF_MAKE_NONCOPYABLE(GarbageCollected);
1147
1148    // For now direct allocation of arrays on the heap is not allowed.
1149    void* operator new[](size_t size);
1150#if OS(WIN) && COMPILER(MSVC)
1151    // Due to some quirkiness in the MSVC compiler we have to provide
1152    // the delete[] operator in the GarbageCollected subclasses as it
1153    // is called when a class is exported in a DLL.
1154protected:
1155    void operator delete[](void* p)
1156    {
1157        ASSERT_NOT_REACHED();
1158    }
1159#else
1160    void operator delete[](void* p);
1161#endif
1162public:
1163    typedef T GarbageCollectedBase;
1164
1165    void* operator new(size_t size)
1166    {
1167        return Heap::allocate<T>(size);
1168    }
1169
1170    void operator delete(void* p)
1171    {
1172        ASSERT_NOT_REACHED();
1173    }
1174
1175protected:
1176    GarbageCollected()
1177    {
1178    }
1179};
1180
1181// Base class for objects allocated in the Blink garbage-collected
1182// heap.
1183//
1184// Defines a 'new' operator that allocates the memory in the
1185// heap. 'delete' should not be called on objects that inherit from
1186// GarbageCollected.
1187//
1188// Instances of GarbageCollectedFinalized will have their destructor
1189// called when the garbage collector determines that the object is no
1190// longer reachable.
1191template<typename T>
1192class GarbageCollectedFinalized : public GarbageCollected<T> {
1193    WTF_MAKE_NONCOPYABLE(GarbageCollectedFinalized);
1194
1195protected:
1196    // finalizeGarbageCollectedObject is called when the object is
1197    // freed from the heap. By default finalization means calling the
1198    // destructor on the object. finalizeGarbageCollectedObject can be
1199    // overridden to support calling the destructor of a
1200    // subclass. This is useful for objects without vtables that
1201    // require explicit dispatching. The name is intentionally a bit
1202    // long to make name conflicts less likely.
1203    void finalizeGarbageCollectedObject()
1204    {
1205        static_cast<T*>(this)->~T();
1206    }
1207
1208    GarbageCollectedFinalized() { }
1209    ~GarbageCollectedFinalized() { }
1210
1211    template<typename U> friend struct HasFinalizer;
1212    template<typename U, bool> friend struct FinalizerTraitImpl;
1213};
1214
1215// Base class for objects that are in the Blink garbage-collected heap
1216// and are still reference counted.
1217//
1218// This class should be used sparingly and only to gradually move
1219// objects from being reference counted to being managed by the blink
1220// garbage collector.
1221//
1222// While the current reference counting keeps one of these objects
1223// alive it will have a Persistent handle to itself allocated so we
1224// will not reclaim the memory. When the reference count reaches 0 the
1225// persistent handle will be deleted. When the garbage collector
1226// determines that there are no other references to the object it will
1227// be reclaimed and the destructor of the reclaimed object will be
1228// called at that time.
1229template<typename T>
1230class RefCountedGarbageCollected : public GarbageCollectedFinalized<T> {
1231    WTF_MAKE_NONCOPYABLE(RefCountedGarbageCollected);
1232
1233public:
1234    RefCountedGarbageCollected()
1235        : m_refCount(1)
1236    {
1237        makeKeepAlive();
1238    }
1239
1240    // Implement method to increase reference count for use with
1241    // RefPtrs.
1242    //
1243    // In contrast to the normal WTF::RefCounted, the reference count
1244    // can reach 0 and increase again. This happens in the following
1245    // scenario:
1246    //
1247    // (1) The reference count becomes 0, but members, persistents, or
1248    //     on-stack pointers keep references to the object.
1249    //
1250    // (2) The pointer is assigned to a RefPtr again and the reference
1251    //     count becomes 1.
1252    //
1253    // In this case, we have to resurrect m_keepAlive.
1254    void ref()
1255    {
1256        if (UNLIKELY(!m_refCount)) {
1257            ASSERT(ThreadStateFor<ThreadingTrait<T>::Affinity>::state()->contains(reinterpret_cast<Address>(this)));
1258            makeKeepAlive();
1259        }
1260        ++m_refCount;
1261    }
1262
1263    // Implement method to decrease reference count for use with
1264    // RefPtrs.
1265    //
1266    // In contrast to the normal WTF::RefCounted implementation, the
1267    // object itself is not deleted when the reference count reaches
1268    // 0. Instead, the keep-alive persistent handle is deallocated so
1269    // that the object can be reclaimed when the garbage collector
1270    // determines that there are no other references to the object.
1271    void deref()
1272    {
1273        ASSERT(m_refCount > 0);
1274        if (!--m_refCount) {
1275            delete m_keepAlive;
1276            m_keepAlive = 0;
1277        }
1278    }
1279
1280    bool hasOneRef()
1281    {
1282        return m_refCount == 1;
1283    }
1284
1285protected:
1286    ~RefCountedGarbageCollected() { }
1287
1288private:
1289    void makeKeepAlive()
1290    {
1291        ASSERT(!m_keepAlive);
1292        m_keepAlive = new Persistent<T>(static_cast<T*>(this));
1293    }
1294
1295    int m_refCount;
1296    Persistent<T>* m_keepAlive;
1297};
1298
1299template<typename T>
1300T* adoptRefCountedGarbageCollected(T* ptr)
1301{
1302    ASSERT(ptr->hasOneRef());
1303    ptr->deref();
1304    WTF::adopted(ptr);
1305    return ptr;
1306}
1307
1308// Classes that contain heap references but aren't themselves heap
1309// allocated, have some extra macros available which allows their use
1310// to be restricted to cases where the garbage collector is able
1311// to discover their heap references.
1312//
1313// STACK_ALLOCATED(): Use if the object is only stack allocated. Heap objects
1314// should be in Members but you do not need the trace method as they are on
1315// the stack. (Down the line these might turn in to raw pointers, but for
1316// now Members indicates that we have thought about them and explicitly
1317// taken care of them.)
1318//
1319// DISALLOW_ALLOCATION(): Cannot be allocated with new operators but can
1320// be a part object. If it has Members you need a trace method and the
1321// containing object needs to call that trace method.
1322//
1323// ALLOW_ONLY_INLINE_ALLOCATION(): Allows only placement new operator.
1324// This disallows general allocation of this object but allows to put
1325// the object as a value object in collections. If these have Members you
1326// need to have a trace method. That trace method will be called
1327// automatically by the Heap collections.
1328//
1329#if COMPILER_SUPPORTS(CXX_DELETED_FUNCTIONS)
1330#define DISALLOW_ALLOCATION()                                   \
1331    private:                                                    \
1332        void* operator new(size_t) = delete;                    \
1333        void* operator new(size_t, NotNullTag, void*) = delete; \
1334        void* operator new(size_t, void*) = delete;
1335
1336#define ALLOW_ONLY_INLINE_ALLOCATION()                                              \
1337    public:                                                                         \
1338        void* operator new(size_t, NotNullTag, void* location) { return location; } \
1339        void* operator new(size_t, void* location) { return location; }             \
1340    private:                                                                        \
1341        void* operator new(size_t) = delete;
1342
1343#define STATIC_ONLY(Type) \
1344    private:              \
1345        Type() = delete;
1346
1347#else
1348
1349#define DISALLOW_ALLOCATION()                          \
1350    private:                                           \
1351        void* operator new(size_t);                    \
1352        void* operator new(size_t, NotNullTag, void*); \
1353        void* operator new(size_t, void*)
1354
1355#define ALLOW_ONLY_INLINE_ALLOCATION()                                              \
1356    public:                                                                         \
1357        void* operator new(size_t, NotNullTag, void* location) { return location; } \
1358        void* operator new(size_t, void* location) { return location; }             \
1359    private:                                                                        \
1360        void* operator new(size_t);
1361
1362#define STATIC_ONLY(Type)  \
1363    private:               \
1364        Type();
1365
1366#endif
1367
1368
1369// These macros insert annotations that the Blink GC plugin for clang uses for
1370// verification. STACK_ALLOCATED is used to declare that objects of this type
1371// are always stack allocated. GC_PLUGIN_IGNORE is used to make the plugin
1372// ignore a particular class or field when checking for proper usage. When using
1373// GC_PLUGIN_IGNORE a bug-number should be provided as an argument where the
1374// bug describes what needs to happen to remove the GC_PLUGIN_IGNORE again.
1375#if COMPILER(CLANG)
1376#define STACK_ALLOCATED()                                       \
1377    private:                                                    \
1378        __attribute__((annotate("blink_stack_allocated")))      \
1379        void* operator new(size_t) = delete;                    \
1380        void* operator new(size_t, NotNullTag, void*) = delete; \
1381        void* operator new(size_t, void*) = delete;
1382
1383#define GC_PLUGIN_IGNORE(bug)                           \
1384    __attribute__((annotate("blink_gc_plugin_ignore")))
1385#else
1386#define STACK_ALLOCATED() DISALLOW_ALLOCATION()
1387#define GC_PLUGIN_IGNORE(bug)
1388#endif
1389
1390NO_SANITIZE_ADDRESS
1391void HeapObjectHeader::checkHeader() const
1392{
1393#if ENABLE(ASSERT)
1394    BaseHeapPage* page = pageHeaderFromObject(this);
1395    ASSERT(page->orphaned() || m_magic == magic);
1396#endif
1397}
1398
1399Address HeapObjectHeader::payload()
1400{
1401    return reinterpret_cast<Address>(this) + objectHeaderSize;
1402}
1403
1404size_t HeapObjectHeader::payloadSize()
1405{
1406    return size() - objectHeaderSize;
1407}
1408
1409Address HeapObjectHeader::payloadEnd()
1410{
1411    return reinterpret_cast<Address>(this) + size();
1412}
1413
1414NO_SANITIZE_ADDRESS
1415void HeapObjectHeader::mark()
1416{
1417    checkHeader();
1418    // The use of atomic ops guarantees that the reads and writes are
1419    // atomic and that no memory operation reorderings take place.
1420    // Multiple threads can still read the old value and all store the
1421    // new value. However, the new value will be the same for all of
1422    // the threads and the end result is therefore consistent.
1423    unsigned size = asanUnsafeAcquireLoad(&m_size);
1424    asanUnsafeReleaseStore(&m_size, size | markBitMask);
1425}
1426
1427Address FinalizedHeapObjectHeader::payload()
1428{
1429    return reinterpret_cast<Address>(this) + finalizedHeaderSize;
1430}
1431
1432size_t FinalizedHeapObjectHeader::payloadSize()
1433{
1434    return size() - finalizedHeaderSize;
1435}
1436
1437template<typename Header>
1438size_t ThreadHeap<Header>::allocationSizeFromSize(size_t size)
1439{
1440    // Check the size before computing the actual allocation size. The
1441    // allocation size calculation can overflow for large sizes and
1442    // the check therefore has to happen before any calculation on the
1443    // size.
1444    RELEASE_ASSERT(size < maxHeapObjectSize);
1445
1446    // Add space for header.
1447    size_t allocationSize = size + sizeof(Header);
1448    // Align size with allocation granularity.
1449    allocationSize = (allocationSize + allocationMask) & ~allocationMask;
1450    return allocationSize;
1451}
1452
1453template<typename Header>
1454Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo)
1455{
1456    size_t allocationSize = allocationSizeFromSize(size);
1457    bool isLargeObject = allocationSize > blinkPageSize / 2;
1458    if (isLargeObject)
1459        return allocateLargeObject(allocationSize, gcInfo);
1460    if (m_remainingAllocationSize < allocationSize)
1461        return outOfLineAllocate(size, gcInfo);
1462    Address headerAddress = m_currentAllocationPoint;
1463    m_currentAllocationPoint += allocationSize;
1464    m_remainingAllocationSize -= allocationSize;
1465    Header* header = new (NotNull, headerAddress) Header(allocationSize, gcInfo);
1466    size_t payloadSize = allocationSize - sizeof(Header);
1467    stats().increaseObjectSpace(payloadSize);
1468    Address result = headerAddress + sizeof(*header);
1469    ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1470    // Unpoison the memory used for the object (payload).
1471    ASAN_UNPOISON_MEMORY_REGION(result, payloadSize);
1472#if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
1473    memset(result, 0, payloadSize);
1474#endif
1475    ASSERT(heapPageFromAddress(headerAddress + allocationSize - 1));
1476    return result;
1477}
1478
1479template<typename T, typename HeapTraits>
1480Address Heap::allocate(size_t size)
1481{
1482    ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1483    ASSERT(state->isAllocationAllowed());
1484    const GCInfo* gcInfo = GCInfoTrait<T>::get();
1485    int heapIndex = HeapTraits::index(gcInfo->hasFinalizer());
1486    BaseHeap* heap = state->heap(heapIndex);
1487    return static_cast<typename HeapTraits::HeapType*>(heap)->allocate(size, gcInfo);
1488}
1489
1490template<typename T>
1491Address Heap::allocate(size_t size)
1492{
1493    return allocate<T, HeapTypeTrait<T> >(size);
1494}
1495
1496template<typename T>
1497Address Heap::reallocate(void* previous, size_t size)
1498{
1499    if (!size) {
1500        // If the new size is 0 this is equivalent to either
1501        // free(previous) or malloc(0). In both cases we do
1502        // nothing and return 0.
1503        return 0;
1504    }
1505    ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1506    ASSERT(state->isAllocationAllowed());
1507    const GCInfo* gcInfo = GCInfoTrait<T>::get();
1508    int heapIndex = HeapTypeTrait<T>::index(gcInfo->hasFinalizer());
1509    // FIXME: Currently only supports raw allocation on the
1510    // GeneralHeap. Hence we assume the header is a
1511    // FinalizedHeapObjectHeader.
1512    ASSERT(heapIndex == GeneralHeap || heapIndex == GeneralHeapNonFinalized);
1513    BaseHeap* heap = state->heap(heapIndex);
1514    Address address = static_cast<typename HeapTypeTrait<T>::HeapType*>(heap)->allocate(size, gcInfo);
1515    if (!previous) {
1516        // This is equivalent to malloc(size).
1517        return address;
1518    }
1519    FinalizedHeapObjectHeader* previousHeader = FinalizedHeapObjectHeader::fromPayload(previous);
1520    ASSERT(!previousHeader->hasFinalizer());
1521    ASSERT(previousHeader->gcInfo() == gcInfo);
1522    size_t copySize = previousHeader->payloadSize();
1523    if (copySize > size)
1524        copySize = size;
1525    memcpy(address, previous, copySize);
1526    return address;
1527}
1528
1529class HeapAllocatorQuantizer {
1530public:
1531    template<typename T>
1532    static size_t quantizedSize(size_t count)
1533    {
1534        RELEASE_ASSERT(count <= kMaxUnquantizedAllocation / sizeof(T));
1535        return HeapIndexTrait<CollectionBackingHeap>::HeapType::roundedAllocationSize(count * sizeof(T));
1536    }
1537    static const size_t kMaxUnquantizedAllocation = maxHeapObjectSize;
1538};
1539
1540// This is a static-only class used as a trait on collections to make them heap allocated.
1541// However see also HeapListHashSetAllocator.
1542class HeapAllocator {
1543public:
1544    typedef HeapAllocatorQuantizer Quantizer;
1545    typedef blink::Visitor Visitor;
1546    static const bool isGarbageCollected = true;
1547
1548    template <typename Return, typename Metadata>
1549    static Return backingMalloc(size_t size)
1550    {
1551        return reinterpret_cast<Return>(Heap::allocate<Metadata, HeapIndexTrait<CollectionBackingHeap> >(size));
1552    }
1553    template <typename Return, typename Metadata>
1554    static Return zeroedBackingMalloc(size_t size)
1555    {
1556        return backingMalloc<Return, Metadata>(size);
1557    }
1558    template <typename Return, typename Metadata>
1559    static Return malloc(size_t size)
1560    {
1561        return reinterpret_cast<Return>(Heap::allocate<Metadata>(size));
1562    }
1563    PLATFORM_EXPORT static void backingFree(void* address);
1564
1565    static void free(void* address) { }
1566    template<typename T>
1567    static void* newArray(size_t bytes)
1568    {
1569        ASSERT_NOT_REACHED();
1570        return 0;
1571    }
1572
1573    static void deleteArray(void* ptr)
1574    {
1575        ASSERT_NOT_REACHED();
1576    }
1577
1578    static bool isAllocationAllowed()
1579    {
1580        return ThreadState::current()->isAllocationAllowed();
1581    }
1582
1583    static void markUsingGCInfo(Visitor* visitor, const void* buffer)
1584    {
1585        visitor->mark(buffer, FinalizedHeapObjectHeader::fromPayload(buffer)->traceCallback());
1586    }
1587
1588    static void markNoTracing(Visitor* visitor, const void* t) { visitor->markNoTracing(t); }
1589
1590    template<typename T, typename Traits>
1591    static void trace(Visitor* visitor, T& t)
1592    {
1593        CollectionBackingTraceTrait<WTF::ShouldBeTraced<Traits>::value, Traits::weakHandlingFlag, WTF::WeakPointersActWeak, T, Traits>::trace(visitor, t);
1594    }
1595
1596    static void registerDelayedMarkNoTracing(Visitor* visitor, const void* object)
1597    {
1598        visitor->registerDelayedMarkNoTracing(object);
1599    }
1600
1601    static void registerWeakMembers(Visitor* visitor, const void* closure, const void* object, WeakPointerCallback callback)
1602    {
1603        visitor->registerWeakMembers(closure, object, callback);
1604    }
1605
1606    static void registerWeakTable(Visitor* visitor, const void* closure, EphemeronCallback iterationCallback, EphemeronCallback iterationDoneCallback)
1607    {
1608        visitor->registerWeakTable(closure, iterationCallback, iterationDoneCallback);
1609    }
1610
1611#if ENABLE(ASSERT)
1612    static bool weakTableRegistered(Visitor* visitor, const void* closure)
1613    {
1614        return visitor->weakTableRegistered(closure);
1615    }
1616#endif
1617
1618    template<typename T>
1619    struct ResultType {
1620        typedef T* Type;
1621    };
1622
1623    // The WTF classes use Allocator::VectorBackingHelper in order to find a
1624    // class to template their backing allocation operation on. For off-heap
1625    // allocations the VectorBackingHelper is a dummy class, since the class is
1626    // not used during allocation of backing. For on-heap allocations this
1627    // typedef ensures that the allocation is done with the correct templated
1628    // instantiation of the allocation function. This ensures the correct GC
1629    // map is written when backing allocations take place.
1630    template<typename T, typename Traits>
1631    struct VectorBackingHelper {
1632        typedef HeapVectorBacking<T, Traits> Type;
1633    };
1634
1635    // Like the VectorBackingHelper, but this type is used for HashSet and
1636    // HashMap, both of which are implemented using HashTable.
1637    template<typename Table>
1638    struct HashTableBackingHelper {
1639        typedef HeapHashTableBacking<Table> Type;
1640    };
1641
1642    template<typename T>
1643    struct OtherType {
1644        typedef T* Type;
1645    };
1646
1647    template<typename T>
1648    static T& getOther(T* other)
1649    {
1650        return *other;
1651    }
1652
1653    static void enterNoAllocationScope()
1654    {
1655#if ENABLE(ASSERT)
1656        ThreadStateFor<AnyThread>::state()->enterNoAllocationScope();
1657#endif
1658    }
1659
1660    static void leaveNoAllocationScope()
1661    {
1662#if ENABLE(ASSERT)
1663        ThreadStateFor<AnyThread>::state()->leaveNoAllocationScope();
1664#endif
1665    }
1666
1667private:
1668    template<typename T, size_t u, typename V> friend class WTF::Vector;
1669    template<typename T, typename U, typename V, typename W> friend class WTF::HashSet;
1670    template<typename T, typename U, typename V, typename W, typename X, typename Y> friend class WTF::HashMap;
1671};
1672
1673template<typename Value>
1674static void traceListHashSetValue(Visitor* visitor, Value& value)
1675{
1676    // We use the default hash traits for the value in the node, because
1677    // ListHashSet does not let you specify any specific ones.
1678    // We don't allow ListHashSet of WeakMember, so we set that one false
1679    // (there's an assert elsewhere), but we have to specify some value for the
1680    // strongify template argument, so we specify WTF::WeakPointersActWeak,
1681    // arbitrarily.
1682    CollectionBackingTraceTrait<WTF::ShouldBeTraced<WTF::HashTraits<Value> >::value, WTF::NoWeakHandlingInCollections, WTF::WeakPointersActWeak, Value, WTF::HashTraits<Value> >::trace(visitor, value);
1683}
1684
1685// The inline capacity is just a dummy template argument to match the off-heap
1686// allocator.
1687// This inherits from the static-only HeapAllocator trait class, but we do
1688// declare pointers to instances. These pointers are always null, and no
1689// objects are instantiated.
1690template<typename ValueArg, size_t inlineCapacity>
1691struct HeapListHashSetAllocator : public HeapAllocator {
1692    typedef HeapAllocator TableAllocator;
1693    typedef WTF::ListHashSetNode<ValueArg, HeapListHashSetAllocator> Node;
1694
1695public:
1696    class AllocatorProvider {
1697    public:
1698        // For the heap allocation we don't need an actual allocator object, so we just
1699        // return null.
1700        HeapListHashSetAllocator* get() const { return 0; }
1701
1702        // No allocator object is needed.
1703        void createAllocatorIfNeeded() { }
1704
1705        // There is no allocator object in the HeapListHashSet (unlike in
1706        // the regular ListHashSet) so there is nothing to swap.
1707        void swap(AllocatorProvider& other) { }
1708    };
1709
1710    void deallocate(void* dummy) { }
1711
1712    // This is not a static method even though it could be, because it
1713    // needs to match the one that the (off-heap) ListHashSetAllocator
1714    // has. The 'this' pointer will always be null.
1715    void* allocateNode()
1716    {
1717        COMPILE_ASSERT(!WTF::IsWeak<ValueArg>::value, WeakPointersInAListHashSetWillJustResultInNullEntriesInTheSetThatsNotWhatYouWantConsiderUsingLinkedHashSetInstead);
1718        return malloc<void*, Node>(sizeof(Node));
1719    }
1720
1721    static void traceValue(Visitor* visitor, Node* node)
1722    {
1723        traceListHashSetValue(visitor, node->m_value);
1724    }
1725};
1726
1727// FIXME: These should just be template aliases:
1728//
1729// template<typename T, size_t inlineCapacity = 0>
1730// using HeapVector = Vector<T, inlineCapacity, HeapAllocator>;
1731//
1732// as soon as all the compilers we care about support that.
1733// MSVC supports it only in MSVC 2013.
1734template<
1735    typename KeyArg,
1736    typename MappedArg,
1737    typename HashArg = typename DefaultHash<KeyArg>::Hash,
1738    typename KeyTraitsArg = HashTraits<KeyArg>,
1739    typename MappedTraitsArg = HashTraits<MappedArg> >
1740class HeapHashMap : public HashMap<KeyArg, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg, HeapAllocator> { };
1741
1742template<
1743    typename ValueArg,
1744    typename HashArg = typename DefaultHash<ValueArg>::Hash,
1745    typename TraitsArg = HashTraits<ValueArg> >
1746class HeapHashSet : public HashSet<ValueArg, HashArg, TraitsArg, HeapAllocator> { };
1747
1748template<
1749    typename ValueArg,
1750    typename HashArg = typename DefaultHash<ValueArg>::Hash,
1751    typename TraitsArg = HashTraits<ValueArg> >
1752class HeapLinkedHashSet : public LinkedHashSet<ValueArg, HashArg, TraitsArg, HeapAllocator> { };
1753
1754template<
1755    typename ValueArg,
1756    size_t inlineCapacity = 0, // The inlineCapacity is just a dummy to match ListHashSet (off-heap).
1757    typename HashArg = typename DefaultHash<ValueArg>::Hash>
1758class HeapListHashSet : public ListHashSet<ValueArg, inlineCapacity, HashArg, HeapListHashSetAllocator<ValueArg, inlineCapacity> > { };
1759
1760template<
1761    typename Value,
1762    typename HashFunctions = typename DefaultHash<Value>::Hash,
1763    typename Traits = HashTraits<Value> >
1764class HeapHashCountedSet : public HashCountedSet<Value, HashFunctions, Traits, HeapAllocator> { };
1765
1766template<typename T, size_t inlineCapacity = 0>
1767class HeapVector : public Vector<T, inlineCapacity, HeapAllocator> {
1768public:
1769    HeapVector() { }
1770
1771    explicit HeapVector(size_t size) : Vector<T, inlineCapacity, HeapAllocator>(size)
1772    {
1773    }
1774
1775    HeapVector(size_t size, const T& val) : Vector<T, inlineCapacity, HeapAllocator>(size, val)
1776    {
1777    }
1778
1779    template<size_t otherCapacity>
1780    HeapVector(const HeapVector<T, otherCapacity>& other)
1781        : Vector<T, inlineCapacity, HeapAllocator>(other)
1782    {
1783    }
1784
1785    template<typename U>
1786    void append(const U& other)
1787    {
1788        Vector<T, inlineCapacity, HeapAllocator>::append(other);
1789    }
1790
1791    template<typename U, size_t otherCapacity>
1792    void appendVector(const HeapVector<U, otherCapacity>& other)
1793    {
1794        const Vector<U, otherCapacity, HeapAllocator>& otherVector = other;
1795        Vector<T, inlineCapacity, HeapAllocator>::appendVector(otherVector);
1796    }
1797};
1798
1799template<typename T, size_t inlineCapacity = 0>
1800class HeapDeque : public Deque<T, inlineCapacity, HeapAllocator> {
1801public:
1802    HeapDeque() { }
1803
1804    explicit HeapDeque(size_t size) : Deque<T, inlineCapacity, HeapAllocator>(size)
1805    {
1806    }
1807
1808    HeapDeque(size_t size, const T& val) : Deque<T, inlineCapacity, HeapAllocator>(size, val)
1809    {
1810    }
1811
1812    // FIXME: Doesn't work if there is an inline buffer, due to crbug.com/360572
1813    HeapDeque<T, 0>& operator=(const HeapDeque& other)
1814    {
1815        HeapDeque<T> copy(other);
1816        swap(copy);
1817        return *this;
1818    }
1819
1820    // FIXME: Doesn't work if there is an inline buffer, due to crbug.com/360572
1821    inline void swap(HeapDeque& other)
1822    {
1823        Deque<T, inlineCapacity, HeapAllocator>::swap(other);
1824    }
1825
1826    template<size_t otherCapacity>
1827    HeapDeque(const HeapDeque<T, otherCapacity>& other)
1828        : Deque<T, inlineCapacity, HeapAllocator>(other)
1829    {
1830    }
1831
1832    template<typename U>
1833    void append(const U& other)
1834    {
1835        Deque<T, inlineCapacity, HeapAllocator>::append(other);
1836    }
1837};
1838
1839template<typename T, size_t i>
1840inline void swap(HeapVector<T, i>& a, HeapVector<T, i>& b) { a.swap(b); }
1841template<typename T, size_t i>
1842inline void swap(HeapDeque<T, i>& a, HeapDeque<T, i>& b) { a.swap(b); }
1843template<typename T, typename U, typename V>
1844inline void swap(HeapHashSet<T, U, V>& a, HeapHashSet<T, U, V>& b) { a.swap(b); }
1845template<typename T, typename U, typename V, typename W, typename X>
1846inline void swap(HeapHashMap<T, U, V, W, X>& a, HeapHashMap<T, U, V, W, X>& b) { a.swap(b); }
1847template<typename T, size_t i, typename U>
1848inline void swap(HeapListHashSet<T, i, U>& a, HeapListHashSet<T, i, U>& b) { a.swap(b); }
1849template<typename T, typename U, typename V>
1850inline void swap(HeapLinkedHashSet<T, U, V>& a, HeapLinkedHashSet<T, U, V>& b) { a.swap(b); }
1851template<typename T, typename U, typename V>
1852inline void swap(HeapHashCountedSet<T, U, V>& a, HeapHashCountedSet<T, U, V>& b) { a.swap(b); }
1853
1854template<typename T>
1855struct ThreadingTrait<Member<T> > {
1856    static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1857};
1858
1859template<typename T>
1860struct ThreadingTrait<WeakMember<T> > {
1861    static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1862};
1863
1864template<typename Key, typename Value, typename T, typename U, typename V>
1865struct ThreadingTrait<HashMap<Key, Value, T, U, V, HeapAllocator> > {
1866    static const ThreadAffinity Affinity =
1867        (ThreadingTrait<Key>::Affinity == MainThreadOnly)
1868        && (ThreadingTrait<Value>::Affinity == MainThreadOnly) ? MainThreadOnly : AnyThread;
1869};
1870
1871template<typename First, typename Second>
1872struct ThreadingTrait<WTF::KeyValuePair<First, Second> > {
1873    static const ThreadAffinity Affinity =
1874        (ThreadingTrait<First>::Affinity == MainThreadOnly)
1875        && (ThreadingTrait<Second>::Affinity == MainThreadOnly) ? MainThreadOnly : AnyThread;
1876};
1877
1878template<typename T, typename U, typename V>
1879struct ThreadingTrait<HashSet<T, U, V, HeapAllocator> > {
1880    static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1881};
1882
1883
1884template<typename T, size_t inlineCapacity>
1885struct ThreadingTrait<Vector<T, inlineCapacity, HeapAllocator> > {
1886    static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1887};
1888
1889template<typename T, typename Traits>
1890struct ThreadingTrait<HeapVectorBacking<T, Traits> > {
1891    static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1892};
1893
1894template<typename T, size_t inlineCapacity>
1895struct ThreadingTrait<Deque<T, inlineCapacity, HeapAllocator> > {
1896    static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1897};
1898
1899template<typename T, typename U, typename V>
1900struct ThreadingTrait<HashCountedSet<T, U, V, HeapAllocator> > {
1901    static const ThreadAffinity Affinity = ThreadingTrait<T>::Affinity;
1902};
1903
1904template<typename Table>
1905struct ThreadingTrait<HeapHashTableBacking<Table> > {
1906    typedef typename Table::KeyType Key;
1907    typedef typename Table::ValueType Value;
1908    static const ThreadAffinity Affinity =
1909        (ThreadingTrait<Key>::Affinity == MainThreadOnly)
1910        && (ThreadingTrait<Value>::Affinity == MainThreadOnly) ? MainThreadOnly : AnyThread;
1911};
1912
1913template<typename T, typename U, typename V, typename W, typename X>
1914struct ThreadingTrait<HeapHashMap<T, U, V, W, X> > : public ThreadingTrait<HashMap<T, U, V, W, X, HeapAllocator> > { };
1915
1916template<typename T, typename U, typename V>
1917struct ThreadingTrait<HeapHashSet<T, U, V> > : public ThreadingTrait<HashSet<T, U, V, HeapAllocator> > { };
1918
1919template<typename T, size_t inlineCapacity>
1920struct ThreadingTrait<HeapVector<T, inlineCapacity> > : public ThreadingTrait<Vector<T, inlineCapacity, HeapAllocator> > { };
1921
1922template<typename T, size_t inlineCapacity>
1923struct ThreadingTrait<HeapDeque<T, inlineCapacity> > : public ThreadingTrait<Deque<T, inlineCapacity, HeapAllocator> > { };
1924
1925template<typename T, typename U, typename V>
1926struct ThreadingTrait<HeapHashCountedSet<T, U, V> > : public ThreadingTrait<HashCountedSet<T, U, V, HeapAllocator> > { };
1927
1928// The standard implementation of GCInfoTrait<T>::get() just returns a static
1929// from the class T, but we can't do that for HashMap, HashSet, Vector, etc.
1930// because they are in WTF and know nothing of GCInfos. Instead we have a
1931// specialization of GCInfoTrait for these four classes here.
1932
1933template<typename Key, typename Value, typename T, typename U, typename V>
1934struct GCInfoTrait<HashMap<Key, Value, T, U, V, HeapAllocator> > {
1935    static const GCInfo* get()
1936    {
1937        typedef HashMap<Key, Value, T, U, V, HeapAllocator> TargetType;
1938        static const GCInfo info = {
1939            TraceTrait<TargetType>::trace,
1940            0,
1941            false, // HashMap needs no finalizer.
1942            WTF::IsPolymorphic<TargetType>::value,
1943#if ENABLE(GC_PROFILING)
1944            TypenameStringTrait<TargetType>::get()
1945#endif
1946        };
1947        return &info;
1948    }
1949};
1950
1951template<typename T, typename U, typename V>
1952struct GCInfoTrait<HashSet<T, U, V, HeapAllocator> > {
1953    static const GCInfo* get()
1954    {
1955        typedef HashSet<T, U, V, HeapAllocator> TargetType;
1956        static const GCInfo info = {
1957            TraceTrait<TargetType>::trace,
1958            0,
1959            false, // HashSet needs no finalizer.
1960            WTF::IsPolymorphic<TargetType>::value,
1961#if ENABLE(GC_PROFILING)
1962            TypenameStringTrait<TargetType>::get()
1963#endif
1964        };
1965        return &info;
1966    }
1967};
1968
1969template<typename T, typename U, typename V>
1970struct GCInfoTrait<LinkedHashSet<T, U, V, HeapAllocator> > {
1971    static const GCInfo* get()
1972    {
1973        typedef LinkedHashSet<T, U, V, HeapAllocator> TargetType;
1974        static const GCInfo info = {
1975            TraceTrait<TargetType>::trace,
1976            LinkedHashSet<T, U, V, HeapAllocator>::finalize,
1977            true, // Needs finalization. The anchor needs to unlink itself from the chain.
1978            WTF::IsPolymorphic<TargetType>::value,
1979#if ENABLE(GC_PROFILING)
1980            TypenameStringTrait<TargetType>::get()
1981#endif
1982        };
1983        return &info;
1984    }
1985};
1986
1987template<typename ValueArg, size_t inlineCapacity, typename U>
1988struct GCInfoTrait<ListHashSet<ValueArg, inlineCapacity, U, HeapListHashSetAllocator<ValueArg, inlineCapacity> > > {
1989    static const GCInfo* get()
1990    {
1991        typedef WTF::ListHashSet<ValueArg, inlineCapacity, U, HeapListHashSetAllocator<ValueArg, inlineCapacity> > TargetType;
1992        static const GCInfo info = {
1993            TraceTrait<TargetType>::trace,
1994            0,
1995            false, // ListHashSet needs no finalization though its backing might.
1996            false, // no vtable.
1997#if ENABLE(GC_PROFILING)
1998            TypenameStringTrait<TargetType>::get()
1999#endif
2000        };
2001        return &info;
2002    }
2003};
2004
2005template<typename T, typename Allocator>
2006struct GCInfoTrait<WTF::ListHashSetNode<T, Allocator> > {
2007    static const GCInfo* get()
2008    {
2009        typedef WTF::ListHashSetNode<T, Allocator> TargetType;
2010        static const GCInfo info = {
2011            TraceTrait<TargetType>::trace,
2012            TargetType::finalize,
2013            WTF::HashTraits<T>::needsDestruction, // The node needs destruction if its data does.
2014            false, // no vtable.
2015#if ENABLE(GC_PROFILING)
2016            TypenameStringTrait<TargetType>::get()
2017#endif
2018        };
2019        return &info;
2020    }
2021};
2022
2023template<typename T>
2024struct GCInfoTrait<Vector<T, 0, HeapAllocator> > {
2025    static const GCInfo* get()
2026    {
2027#if ENABLE(GC_PROFILING)
2028        typedef Vector<T, 0, HeapAllocator> TargetType;
2029#endif
2030        static const GCInfo info = {
2031            TraceTrait<Vector<T, 0, HeapAllocator> >::trace,
2032            0,
2033            false, // Vector needs no finalizer if it has no inline capacity.
2034            WTF::IsPolymorphic<Vector<T, 0, HeapAllocator> >::value,
2035#if ENABLE(GC_PROFILING)
2036            TypenameStringTrait<TargetType>::get()
2037#endif
2038        };
2039        return &info;
2040    }
2041};
2042
2043template<typename T, size_t inlineCapacity>
2044struct FinalizerTrait<Vector<T, inlineCapacity, HeapAllocator> > : public FinalizerTraitImpl<Vector<T, inlineCapacity, HeapAllocator>, true> { };
2045
2046template<typename T, size_t inlineCapacity>
2047struct GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator> > {
2048    static const GCInfo* get()
2049    {
2050        typedef Vector<T, inlineCapacity, HeapAllocator> TargetType;
2051        static const GCInfo info = {
2052            TraceTrait<TargetType>::trace,
2053            FinalizerTrait<TargetType>::finalize,
2054            // Finalizer is needed to destruct things stored in the inline capacity.
2055            inlineCapacity && VectorTraits<T>::needsDestruction,
2056            WTF::IsPolymorphic<TargetType>::value,
2057#if ENABLE(GC_PROFILING)
2058            TypenameStringTrait<TargetType>::get()
2059#endif
2060        };
2061        return &info;
2062    }
2063};
2064
2065template<typename T>
2066struct GCInfoTrait<Deque<T, 0, HeapAllocator> > {
2067    static const GCInfo* get()
2068    {
2069        typedef Deque<T, 0, HeapAllocator> TargetType;
2070        static const GCInfo info = {
2071            TraceTrait<TargetType>::trace,
2072            0,
2073            false, // Deque needs no finalizer if it has no inline capacity.
2074            WTF::IsPolymorphic<TargetType>::value,
2075#if ENABLE(GC_PROFILING)
2076            TypenameStringTrait<TargetType>::get()
2077#endif
2078        };
2079        return &info;
2080    }
2081    static const GCInfo info;
2082};
2083
2084template<typename T, typename U, typename V>
2085struct GCInfoTrait<HashCountedSet<T, U, V, HeapAllocator> > {
2086    static const GCInfo* get()
2087    {
2088        typedef HashCountedSet<T, U, V, HeapAllocator> TargetType;
2089        static const GCInfo info = {
2090            TraceTrait<TargetType>::trace,
2091            0,
2092            false, // HashCountedSet is just a HashTable, and needs no finalizer.
2093            WTF::IsPolymorphic<TargetType>::value,
2094#if ENABLE(GC_PROFILING)
2095            TypenameStringTrait<TargetType>::get()
2096#endif
2097        };
2098        return &info;
2099    }
2100    static const GCInfo info;
2101};
2102
2103template<typename T, size_t inlineCapacity>
2104struct FinalizerTrait<Deque<T, inlineCapacity, HeapAllocator> > : public FinalizerTraitImpl<Deque<T, inlineCapacity, HeapAllocator>, true> { };
2105
2106template<typename T, size_t inlineCapacity>
2107struct GCInfoTrait<Deque<T, inlineCapacity, HeapAllocator> > {
2108    static const GCInfo* get()
2109    {
2110        typedef Deque<T, inlineCapacity, HeapAllocator> TargetType;
2111        static const GCInfo info = {
2112            TraceTrait<TargetType>::trace,
2113            FinalizerTrait<TargetType>::finalize,
2114            // Finalizer is needed to destruct things stored in the inline capacity.
2115            inlineCapacity && VectorTraits<T>::needsDestruction,
2116            WTF::IsPolymorphic<TargetType>::value,
2117#if ENABLE(GC_PROFILING)
2118            TypenameStringTrait<TargetType>::get()
2119#endif
2120        };
2121        return &info;
2122    }
2123    static const GCInfo info;
2124};
2125
2126template<typename T, typename Traits>
2127struct GCInfoTrait<HeapVectorBacking<T, Traits> > {
2128    static const GCInfo* get()
2129    {
2130        typedef HeapVectorBacking<T, Traits> TargetType;
2131        static const GCInfo info = {
2132            TraceTrait<TargetType>::trace,
2133            FinalizerTrait<TargetType>::finalize,
2134            Traits::needsDestruction,
2135            false, // We don't support embedded objects in HeapVectors with vtables.
2136#if ENABLE(GC_PROFILING)
2137            TypenameStringTrait<TargetType>::get()
2138#endif
2139        };
2140        return &info;
2141    }
2142};
2143
2144template<typename Table>
2145struct GCInfoTrait<HeapHashTableBacking<Table> > {
2146    static const GCInfo* get()
2147    {
2148        typedef HeapHashTableBacking<Table> TargetType;
2149        static const GCInfo info = {
2150            TraceTrait<TargetType>::trace,
2151            HeapHashTableBacking<Table>::finalize,
2152            Table::ValueTraits::needsDestruction,
2153            WTF::IsPolymorphic<TargetType>::value,
2154#if ENABLE(GC_PROFILING)
2155            TypenameStringTrait<TargetType>::get()
2156#endif
2157        };
2158        return &info;
2159    }
2160};
2161
2162} // namespace blink
2163
2164namespace WTF {
2165
2166// Catch-all for types that have a way to trace that don't have special
2167// handling for weakness in collections. This means that if this type
2168// contains WeakMember fields, they will simply be zeroed, but the entry
2169// will not be removed from the collection. This always happens for
2170// things in vectors, which don't currently support special handling of
2171// weak elements.
2172template<ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
2173struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, T, Traits> {
2174    static bool trace(blink::Visitor* visitor, T& t)
2175    {
2176        blink::TraceTrait<T>::trace(visitor, &t);
2177        return false;
2178    }
2179};
2180
2181// Catch-all for things that have HashTrait support for tracing with weakness.
2182template<ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
2183struct TraceInCollectionTrait<WeakHandlingInCollections, strongify, T, Traits> {
2184    static bool trace(blink::Visitor* visitor, T& t)
2185    {
2186        return Traits::traceInCollection(visitor, t, strongify);
2187    }
2188};
2189
2190// Vector backing that needs marking. We don't support weak members in vectors.
2191template<ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
2192struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, blink::HeapVectorBacking<T, Traits>, void> {
2193    static bool trace(blink::Visitor* visitor, void* self)
2194    {
2195        // The allocator can oversize the allocation a little, according to
2196        // the allocation granularity. The extra size is included in the
2197        // payloadSize call below, since there is nowhere to store the
2198        // originally allocated memory. This assert ensures that visiting the
2199        // last bit of memory can't cause trouble.
2200        COMPILE_ASSERT(!ShouldBeTraced<Traits>::value || sizeof(T) > blink::allocationGranularity || Traits::canInitializeWithMemset, HeapOverallocationCanCauseSpuriousVisits);
2201
2202        T* array = reinterpret_cast<T*>(self);
2203        blink::FinalizedHeapObjectHeader* header = blink::FinalizedHeapObjectHeader::fromPayload(self);
2204        // Use the payload size as recorded by the heap to determine how many
2205        // elements to mark.
2206        size_t length = header->payloadSize() / sizeof(T);
2207        for (size_t i = 0; i < length; i++)
2208            blink::CollectionBackingTraceTrait<ShouldBeTraced<Traits>::value, Traits::weakHandlingFlag, WeakPointersActStrong, T, Traits>::trace(visitor, array[i]);
2209        return false;
2210    }
2211};
2212
2213// Almost all hash table backings are visited with this specialization.
2214template<ShouldWeakPointersBeMarkedStrongly strongify, typename Table>
2215struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, blink::HeapHashTableBacking<Table>, void> {
2216    typedef typename Table::ValueType Value;
2217    typedef typename Table::ValueTraits Traits;
2218    static bool trace(blink::Visitor* visitor, void* self)
2219    {
2220        Value* array = reinterpret_cast<Value*>(self);
2221        blink::FinalizedHeapObjectHeader* header = blink::FinalizedHeapObjectHeader::fromPayload(self);
2222        size_t length = header->payloadSize() / sizeof(Value);
2223        for (size_t i = 0; i < length; i++) {
2224            if (!HashTableHelper<Value, typename Table::ExtractorType, typename Table::KeyTraitsType>::isEmptyOrDeletedBucket(array[i]))
2225                blink::CollectionBackingTraceTrait<ShouldBeTraced<Traits>::value, Traits::weakHandlingFlag, strongify, Value, Traits>::trace(visitor, array[i]);
2226        }
2227        return false;
2228    }
2229};
2230
2231// This specialization of TraceInCollectionTrait is for the backing of
2232// HeapListHashSet. This is for the case that we find a reference to the
2233// backing from the stack. That probably means we have a GC while we are in a
2234// ListHashSet method since normal API use does not put pointers to the backing
2235// on the stack.
2236template<ShouldWeakPointersBeMarkedStrongly strongify, typename NodeContents, size_t inlineCapacity, typename T, typename U, typename V, typename W, typename X, typename Y>
2237struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, blink::HeapHashTableBacking<HashTable<ListHashSetNode<NodeContents, blink::HeapListHashSetAllocator<T, inlineCapacity> >*, U, V, W, X, Y, blink::HeapAllocator> >, void> {
2238    typedef ListHashSetNode<NodeContents, blink::HeapListHashSetAllocator<T, inlineCapacity> > Node;
2239    typedef HashTable<Node*, U, V, W, X, Y, blink::HeapAllocator> Table;
2240    static bool trace(blink::Visitor* visitor, void* self)
2241    {
2242        Node** array = reinterpret_cast<Node**>(self);
2243        blink::FinalizedHeapObjectHeader* header = blink::FinalizedHeapObjectHeader::fromPayload(self);
2244        size_t length = header->payloadSize() / sizeof(Node*);
2245        for (size_t i = 0; i < length; i++) {
2246            if (!HashTableHelper<Node*, typename Table::ExtractorType, typename Table::KeyTraitsType>::isEmptyOrDeletedBucket(array[i])) {
2247                traceListHashSetValue(visitor, array[i]->m_value);
2248                // Just mark the node without tracing because we already traced
2249                // the contents, and there is no need to trace the next and
2250                // prev fields since iterating over the hash table backing will
2251                // find the whole chain.
2252                visitor->markNoTracing(array[i]);
2253            }
2254        }
2255        return false;
2256    }
2257};
2258
2259// Key value pairs, as used in HashMap. To disambiguate template choice we have
2260// to have two versions, first the one with no special weak handling, then the
2261// one with weak handling.
2262template<ShouldWeakPointersBeMarkedStrongly strongify, typename Key, typename Value, typename Traits>
2263struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, KeyValuePair<Key, Value>, Traits>  {
2264    static bool trace(blink::Visitor* visitor, KeyValuePair<Key, Value>& self)
2265    {
2266        ASSERT(ShouldBeTraced<Traits>::value);
2267        blink::CollectionBackingTraceTrait<ShouldBeTraced<typename Traits::KeyTraits>::value, NoWeakHandlingInCollections, strongify, Key, typename Traits::KeyTraits>::trace(visitor, self.key);
2268        blink::CollectionBackingTraceTrait<ShouldBeTraced<typename Traits::ValueTraits>::value, NoWeakHandlingInCollections, strongify, Value, typename Traits::ValueTraits>::trace(visitor, self.value);
2269        return false;
2270    }
2271};
2272
2273template<ShouldWeakPointersBeMarkedStrongly strongify, typename Key, typename Value, typename Traits>
2274struct TraceInCollectionTrait<WeakHandlingInCollections, strongify, KeyValuePair<Key, Value>, Traits> {
2275    static bool trace(blink::Visitor* visitor, KeyValuePair<Key, Value>& self)
2276    {
2277        // This is the core of the ephemeron-like functionality. If there is
2278        // weakness on the key side then we first check whether there are
2279        // dead weak pointers on that side, and if there are we don't mark the
2280        // value side (yet). Conversely if there is weakness on the value side
2281        // we check that first and don't mark the key side yet if we find dead
2282        // weak pointers.
2283        // Corner case: If there is weakness on both the key and value side,
2284        // and there are also strong pointers on the both sides then we could
2285        // unexpectedly leak. The scenario is that the weak pointer on the key
2286        // side is alive, which causes the strong pointer on the key side to be
2287        // marked. If that then results in the object pointed to by the weak
2288        // pointer on the value side being marked live, then the whole
2289        // key-value entry is leaked. To avoid unexpected leaking, we disallow
2290        // this case, but if you run into this assert, please reach out to Blink
2291        // reviewers, and we may relax it.
2292        const bool keyIsWeak = Traits::KeyTraits::weakHandlingFlag == WeakHandlingInCollections;
2293        const bool valueIsWeak = Traits::ValueTraits::weakHandlingFlag == WeakHandlingInCollections;
2294        const bool keyHasStrongRefs = ShouldBeTraced<typename Traits::KeyTraits>::value;
2295        const bool valueHasStrongRefs = ShouldBeTraced<typename Traits::ValueTraits>::value;
2296        COMPILE_ASSERT(!keyIsWeak || !valueIsWeak || !keyHasStrongRefs || !valueHasStrongRefs, ThisConfigurationWasDisallowedToAvoidUnexpectedLeaks);
2297        if ((valueIsWeak && !keyIsWeak) || (valueIsWeak && keyIsWeak && !valueHasStrongRefs)) {
2298            // Check value first.
2299            bool deadWeakObjectsFoundOnValueSide = blink::CollectionBackingTraceTrait<ShouldBeTraced<typename Traits::ValueTraits>::value, Traits::ValueTraits::weakHandlingFlag, strongify, Value, typename Traits::ValueTraits>::trace(visitor, self.value);
2300            if (deadWeakObjectsFoundOnValueSide)
2301                return true;
2302            return blink::CollectionBackingTraceTrait<ShouldBeTraced<typename Traits::KeyTraits>::value, Traits::KeyTraits::weakHandlingFlag, strongify, Key, typename Traits::KeyTraits>::trace(visitor, self.key);
2303        }
2304        // Check key first.
2305        bool deadWeakObjectsFoundOnKeySide = blink::CollectionBackingTraceTrait<ShouldBeTraced<typename Traits::KeyTraits>::value, Traits::KeyTraits::weakHandlingFlag, strongify, Key, typename Traits::KeyTraits>::trace(visitor, self.key);
2306        if (deadWeakObjectsFoundOnKeySide)
2307            return true;
2308        return blink::CollectionBackingTraceTrait<ShouldBeTraced<typename Traits::ValueTraits>::value, Traits::ValueTraits::weakHandlingFlag, strongify, Value, typename Traits::ValueTraits>::trace(visitor, self.value);
2309    }
2310};
2311
2312// Nodes used by LinkedHashSet. Again we need two versions to disambiguate the
2313// template.
2314template<ShouldWeakPointersBeMarkedStrongly strongify, typename Value, typename Allocator, typename Traits>
2315struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, LinkedHashSetNode<Value, Allocator>, Traits> {
2316    static bool trace(blink::Visitor* visitor, LinkedHashSetNode<Value, Allocator>& self)
2317    {
2318        ASSERT(ShouldBeTraced<Traits>::value);
2319        blink::TraceTrait<Value>::trace(visitor, &self.m_value);
2320        return false;
2321    }
2322};
2323
2324template<ShouldWeakPointersBeMarkedStrongly strongify, typename Value, typename Allocator, typename Traits>
2325struct TraceInCollectionTrait<WeakHandlingInCollections, strongify, LinkedHashSetNode<Value, Allocator>, Traits> {
2326    static bool trace(blink::Visitor* visitor, LinkedHashSetNode<Value, Allocator>& self)
2327    {
2328        return TraceInCollectionTrait<WeakHandlingInCollections, strongify, Value, typename Traits::ValueTraits>::trace(visitor, self.m_value);
2329    }
2330};
2331
2332// ListHashSetNode pointers (a ListHashSet is implemented as a hash table of these pointers).
2333template<ShouldWeakPointersBeMarkedStrongly strongify, typename Value, size_t inlineCapacity, typename Traits>
2334struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, ListHashSetNode<Value, blink::HeapListHashSetAllocator<Value, inlineCapacity> >*, Traits> {
2335    typedef ListHashSetNode<Value, blink::HeapListHashSetAllocator<Value, inlineCapacity> > Node;
2336    static bool trace(blink::Visitor* visitor, Node* node)
2337    {
2338        traceListHashSetValue(visitor, node->m_value);
2339        // Just mark the node without tracing because we already traced the
2340        // contents, and there is no need to trace the next and prev fields
2341        // since iterating over the hash table backing will find the whole
2342        // chain.
2343        visitor->markNoTracing(node);
2344        return false;
2345    }
2346};
2347
2348} // namespace WTF
2349
2350namespace blink {
2351
2352// CollectionBackingTraceTrait. Do nothing for things in collections that don't
2353// need tracing, or call TraceInCollectionTrait for those that do.
2354
2355// Specialization for things that don't need marking and have no weak pointers. We
2356// do nothing, even if WTF::WeakPointersActStrong.
2357template<WTF::ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
2358struct CollectionBackingTraceTrait<false, WTF::NoWeakHandlingInCollections, strongify, T, Traits> {
2359    static bool trace(Visitor*, T&) { return false; }
2360};
2361
2362// Specialization for things that either need marking or have weak pointers or
2363// both.
2364template<bool needsTracing, WTF::WeakHandlingFlag weakHandlingFlag, WTF::ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Traits>
2365struct CollectionBackingTraceTrait {
2366    static bool trace(Visitor* visitor, T&t)
2367    {
2368        Visitor::verifyGarbageCollectedIfMember(reinterpret_cast<T*>(0));
2369        return WTF::TraceInCollectionTrait<weakHandlingFlag, strongify, T, Traits>::trace(visitor, t);
2370    }
2371};
2372
2373template<typename T> struct WeakHandlingHashTraits : WTF::SimpleClassHashTraits<T> {
2374    // We want to treat the object as a weak object in the sense that it can
2375    // disappear from hash sets and hash maps.
2376    static const WTF::WeakHandlingFlag weakHandlingFlag = WTF::WeakHandlingInCollections;
2377    // Normally whether or not an object needs tracing is inferred
2378    // automatically from the presence of the trace method, but we don't
2379    // necessarily have a trace method, and we may not need one because T
2380    // can perhaps only be allocated inside collections, never as indpendent
2381    // objects. Explicitly mark this as needing tracing and it will be traced
2382    // in collections using the traceInCollection method, which it must have.
2383    template<typename U = void> struct NeedsTracingLazily {
2384        static const bool value = true;
2385    };
2386    // The traceInCollection method traces differently depending on whether we
2387    // are strongifying the trace operation. We strongify the trace operation
2388    // when there are active iterators on the object. In this case all
2389    // WeakMembers are marked like strong members so that elements do not
2390    // suddenly disappear during iteration. Returns true if weak pointers to
2391    // dead objects were found: In this case any strong pointers were not yet
2392    // traced and the entry should be removed from the collection.
2393    static bool traceInCollection(Visitor* visitor, T& t, WTF::ShouldWeakPointersBeMarkedStrongly strongify)
2394    {
2395        return t.traceInCollection(visitor, strongify);
2396    }
2397};
2398
2399template<typename T, typename Traits>
2400struct TraceTrait<HeapVectorBacking<T, Traits> > {
2401    typedef HeapVectorBacking<T, Traits> Backing;
2402    static void trace(Visitor* visitor, void* self)
2403    {
2404        COMPILE_ASSERT(!WTF::IsWeak<T>::value, WeDontSupportWeaknessInHeapVectorsOrDeques);
2405        if (WTF::ShouldBeTraced<Traits>::value)
2406            WTF::TraceInCollectionTrait<WTF::NoWeakHandlingInCollections, WTF::WeakPointersActWeak, HeapVectorBacking<T, Traits>, void>::trace(visitor, self);
2407    }
2408    static void mark(Visitor* visitor, const Backing* backing)
2409    {
2410        visitor->mark(backing, &trace);
2411    }
2412    static void checkGCInfo(Visitor* visitor, const Backing* backing)
2413    {
2414#if ENABLE(ASSERT)
2415        visitor->checkGCInfo(const_cast<Backing*>(backing), GCInfoTrait<Backing>::get());
2416#endif
2417    }
2418};
2419
2420// The trace trait for the heap hashtable backing is used when we find a
2421// direct pointer to the backing from the conservative stack scanner. This
2422// normally indicates that there is an ongoing iteration over the table, and so
2423// we disable weak processing of table entries. When the backing is found
2424// through the owning hash table we mark differently, in order to do weak
2425// processing.
2426template<typename Table>
2427struct TraceTrait<HeapHashTableBacking<Table> > {
2428    typedef HeapHashTableBacking<Table> Backing;
2429    typedef typename Table::ValueTraits Traits;
2430    static void trace(Visitor* visitor, void* self)
2431    {
2432        if (WTF::ShouldBeTraced<Traits>::value || Traits::weakHandlingFlag == WTF::WeakHandlingInCollections)
2433            WTF::TraceInCollectionTrait<WTF::NoWeakHandlingInCollections, WTF::WeakPointersActStrong, Backing, void>::trace(visitor, self);
2434    }
2435    static void mark(Visitor* visitor, const Backing* backing)
2436    {
2437        if (WTF::ShouldBeTraced<Traits>::value || Traits::weakHandlingFlag == WTF::WeakHandlingInCollections)
2438            visitor->mark(backing, &trace);
2439        else
2440            visitor->markNoTracing(backing); // If we know the trace function will do nothing there is no need to call it.
2441    }
2442    static void checkGCInfo(Visitor* visitor, const Backing* backing)
2443    {
2444#if ENABLE(ASSERT)
2445        visitor->checkGCInfo(const_cast<Backing*>(backing), GCInfoTrait<Backing>::get());
2446#endif
2447    }
2448};
2449
2450template<typename Table>
2451void HeapHashTableBacking<Table>::finalize(void* pointer)
2452{
2453    typedef typename Table::ValueType Value;
2454    ASSERT(Table::ValueTraits::needsDestruction);
2455    FinalizedHeapObjectHeader* header = FinalizedHeapObjectHeader::fromPayload(pointer);
2456    // Use the payload size as recorded by the heap to determine how many
2457    // elements to finalize.
2458    size_t length = header->payloadSize() / sizeof(Value);
2459    Value* table = reinterpret_cast<Value*>(pointer);
2460    for (unsigned i = 0; i < length; i++) {
2461        if (!Table::isEmptyOrDeletedBucket(table[i]))
2462            table[i].~Value();
2463    }
2464}
2465
2466template<typename T, typename U, typename V, typename W, typename X>
2467struct GCInfoTrait<HeapHashMap<T, U, V, W, X> > : public GCInfoTrait<HashMap<T, U, V, W, X, HeapAllocator> > { };
2468template<typename T, typename U, typename V>
2469struct GCInfoTrait<HeapHashSet<T, U, V> > : public GCInfoTrait<HashSet<T, U, V, HeapAllocator> > { };
2470template<typename T, typename U, typename V>
2471struct GCInfoTrait<HeapLinkedHashSet<T, U, V> > : public GCInfoTrait<LinkedHashSet<T, U, V, HeapAllocator> > { };
2472template<typename T, size_t inlineCapacity, typename U>
2473struct GCInfoTrait<HeapListHashSet<T, inlineCapacity, U> > : public GCInfoTrait<ListHashSet<T, inlineCapacity, U, HeapListHashSetAllocator<T, inlineCapacity> > > { };
2474template<typename T, size_t inlineCapacity>
2475struct GCInfoTrait<HeapVector<T, inlineCapacity> > : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator> > { };
2476template<typename T, size_t inlineCapacity>
2477struct GCInfoTrait<HeapDeque<T, inlineCapacity> > : public GCInfoTrait<Deque<T, inlineCapacity, HeapAllocator> > { };
2478template<typename T, typename U, typename V>
2479struct GCInfoTrait<HeapHashCountedSet<T, U, V> > : public GCInfoTrait<HashCountedSet<T, U, V, HeapAllocator> > { };
2480
2481template<typename T>
2482struct IfWeakMember;
2483
2484template<typename T>
2485struct IfWeakMember {
2486    template<typename U>
2487    static bool isDead(Visitor*, const U&) { return false; }
2488};
2489
2490template<typename T>
2491struct IfWeakMember<WeakMember<T> > {
2492    static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visitor->isAlive(t.get()); }
2493};
2494
2495}
2496
2497#endif // Heap_h
2498