1/*
2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 *     * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 *     * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 *     * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#ifndef ThreadState_h
32#define ThreadState_h
33
34#include "platform/PlatformExport.h"
35#include "platform/heap/AddressSanitizer.h"
36#include "public/platform/WebThread.h"
37#include "wtf/HashSet.h"
38#include "wtf/OwnPtr.h"
39#include "wtf/PassOwnPtr.h"
40#include "wtf/ThreadSpecific.h"
41#include "wtf/Threading.h"
42#include "wtf/ThreadingPrimitives.h"
43#include "wtf/Vector.h"
44
45#if ENABLE(GC_PROFILE_HEAP)
46#include "wtf/HashMap.h"
47#endif
48
49namespace blink {
50
51class BaseHeap;
52class BaseHeapPage;
53class FinalizedHeapObjectHeader;
54struct GCInfo;
55class HeapContainsCache;
56class HeapObjectHeader;
57class PageMemory;
58class PersistentNode;
59class WrapperPersistentRegion;
60class Visitor;
61class SafePointBarrier;
62class SafePointAwareMutexLocker;
63template<typename Header> class ThreadHeap;
64class CallbackStack;
65
66typedef uint8_t* Address;
67
68typedef void (*FinalizationCallback)(void*);
69typedef void (*VisitorCallback)(Visitor*, void* self);
70typedef VisitorCallback TraceCallback;
71typedef VisitorCallback WeakPointerCallback;
72typedef VisitorCallback EphemeronCallback;
73
74// ThreadAffinity indicates which threads objects can be used on. We
75// distinguish between objects that can be used on the main thread
76// only and objects that can be used on any thread.
77//
78// For objects that can only be used on the main thread we avoid going
79// through thread-local storage to get to the thread state.
80//
81// FIXME: We should evaluate the performance gain. Having
82// ThreadAffinity is complicating the implementation and we should get
83// rid of it if it is fast enough to go through thread-local storage
84// always.
85enum ThreadAffinity {
86    AnyThread,
87    MainThreadOnly,
88};
89
90class Node;
91class CSSValue;
92
93template<typename T, bool derivesNode = WTF::IsSubclass<typename WTF::RemoveConst<T>::Type, Node>::value> struct DefaultThreadingTrait;
94
95template<typename T>
96struct DefaultThreadingTrait<T, false> {
97    static const ThreadAffinity Affinity = AnyThread;
98};
99
100template<typename T>
101struct DefaultThreadingTrait<T, true> {
102    static const ThreadAffinity Affinity = MainThreadOnly;
103};
104
105template<typename T>
106struct ThreadingTrait {
107    static const ThreadAffinity Affinity = DefaultThreadingTrait<T>::Affinity;
108};
109
110// Marks the specified class as being used from multiple threads. When
111// a class is used from multiple threads we go through thread local
112// storage to get the heap in which to allocate an object of that type
113// and when allocating a Persistent handle for an object with that
114// type. Notice that marking the base class does not automatically
115// mark its descendants and they have to be explicitly marked.
116#define USED_FROM_MULTIPLE_THREADS(Class)                 \
117    class Class;                                          \
118    template<> struct ThreadingTrait<Class> {             \
119        static const ThreadAffinity Affinity = AnyThread; \
120    }
121
122#define USED_FROM_MULTIPLE_THREADS_NAMESPACE(Namespace, Class)          \
123    namespace Namespace {                                               \
124        class Class;                                                    \
125    }                                                                   \
126    namespace blink {                                                 \
127        template<> struct ThreadingTrait<Namespace::Class> {            \
128            static const ThreadAffinity Affinity = AnyThread;           \
129        };                                                              \
130    }
131
132template<typename U> class ThreadingTrait<const U> : public ThreadingTrait<U> { };
133
134// List of typed heaps. The list is used to generate the implementation
135// of typed heap related methods.
136//
137// To create a new typed heap add a H(<ClassName>) to the
138// FOR_EACH_TYPED_HEAP macro below.
139#define FOR_EACH_TYPED_HEAP(H)  \
140    H(Node)
141
142#define TypedHeapEnumName(Type) Type##Heap,
143#define TypedHeapEnumNameNonFinalized(Type) Type##HeapNonFinalized,
144
145enum TypedHeaps {
146    GeneralHeap = 0,
147    CollectionBackingHeap,
148    FOR_EACH_TYPED_HEAP(TypedHeapEnumName)
149    GeneralHeapNonFinalized,
150    CollectionBackingHeapNonFinalized,
151    FOR_EACH_TYPED_HEAP(TypedHeapEnumNameNonFinalized)
152    // Values used for iteration of heap segments.
153    NumberOfHeaps,
154    FirstFinalizedHeap = GeneralHeap,
155    FirstNonFinalizedHeap = GeneralHeapNonFinalized,
156    NumberOfFinalizedHeaps = GeneralHeapNonFinalized,
157    NumberOfNonFinalizedHeaps = NumberOfHeaps - NumberOfFinalizedHeaps,
158    NonFinalizedHeapOffset = FirstNonFinalizedHeap
159};
160
161// Base implementation for HeapIndexTrait found below.
162template<int heapIndex>
163struct HeapIndexTraitBase {
164    typedef FinalizedHeapObjectHeader HeaderType;
165    typedef ThreadHeap<HeaderType> HeapType;
166    static const int finalizedIndex = heapIndex;
167    static const int nonFinalizedIndex = heapIndex + static_cast<int>(NonFinalizedHeapOffset);
168    static int index(bool isFinalized)
169    {
170        return isFinalized ? finalizedIndex : nonFinalizedIndex;
171    }
172};
173
174// HeapIndexTrait defines properties for each heap in the TypesHeaps enum.
175template<int index>
176struct HeapIndexTrait;
177
178template<>
179struct HeapIndexTrait<GeneralHeap> : public HeapIndexTraitBase<GeneralHeap> { };
180template<>
181struct HeapIndexTrait<GeneralHeapNonFinalized> : public HeapIndexTrait<GeneralHeap> { };
182
183template<>
184struct HeapIndexTrait<CollectionBackingHeap> : public HeapIndexTraitBase<CollectionBackingHeap> { };
185template<>
186struct HeapIndexTrait<CollectionBackingHeapNonFinalized> : public HeapIndexTrait<CollectionBackingHeap> { };
187
188#define DEFINE_TYPED_HEAP_INDEX_TRAIT(Type)                                     \
189    template<>                                                                  \
190    struct HeapIndexTrait<Type##Heap> : public HeapIndexTraitBase<Type##Heap> { \
191        typedef HeapObjectHeader HeaderType;                                    \
192        typedef ThreadHeap<HeaderType> HeapType;                                \
193    };                                                                          \
194    template<>                                                                  \
195    struct HeapIndexTrait<Type##HeapNonFinalized> : public HeapIndexTrait<Type##Heap> { };
196FOR_EACH_TYPED_HEAP(DEFINE_TYPED_HEAP_INDEX_TRAIT)
197#undef DEFINE_TYPED_HEAP_INDEX_TRAIT
198
199// HeapTypeTrait defines which heap to use for particular types.
200// By default objects are allocated in the GeneralHeap.
201template<typename T>
202struct HeapTypeTrait : public HeapIndexTrait<GeneralHeap> { };
203
204// We don't have any type-based mappings to the CollectionBackingHeap.
205
206// Each typed-heap maps the respective type to its heap.
207#define DEFINE_TYPED_HEAP_TRAIT(Type)                                   \
208    class Type;                                                         \
209    template<>                                                          \
210    struct HeapTypeTrait<class Type> : public HeapIndexTrait<Type##Heap> { };
211FOR_EACH_TYPED_HEAP(DEFINE_TYPED_HEAP_TRAIT)
212#undef DEFINE_TYPED_HEAP_TRAIT
213
214// A HeapStats structure keeps track of the amount of memory allocated
215// for a Blink heap and how much of that memory is used for actual
216// Blink objects. These stats are used in the heuristics to determine
217// when to perform garbage collections.
218class HeapStats {
219public:
220    HeapStats() : m_totalObjectSpace(0), m_totalAllocatedSpace(0) { }
221
222    size_t totalObjectSpace() const { return m_totalObjectSpace; }
223    size_t totalAllocatedSpace() const { return m_totalAllocatedSpace; }
224
225    void add(HeapStats* other)
226    {
227        m_totalObjectSpace += other->m_totalObjectSpace;
228        m_totalAllocatedSpace += other->m_totalAllocatedSpace;
229    }
230
231    void inline increaseObjectSpace(size_t newObjectSpace)
232    {
233        m_totalObjectSpace += newObjectSpace;
234    }
235
236    void inline decreaseObjectSpace(size_t deadObjectSpace)
237    {
238        m_totalObjectSpace -= deadObjectSpace;
239    }
240
241    void inline increaseAllocatedSpace(size_t newAllocatedSpace)
242    {
243        m_totalAllocatedSpace += newAllocatedSpace;
244    }
245
246    void inline decreaseAllocatedSpace(size_t deadAllocatedSpace)
247    {
248        m_totalAllocatedSpace -= deadAllocatedSpace;
249    }
250
251    void clear()
252    {
253        m_totalObjectSpace = 0;
254        m_totalAllocatedSpace = 0;
255    }
256
257    bool operator==(const HeapStats& other)
258    {
259        return m_totalAllocatedSpace == other.m_totalAllocatedSpace
260            && m_totalObjectSpace == other.m_totalObjectSpace;
261    }
262
263private:
264    size_t m_totalObjectSpace; // Actually contains objects that may be live, not including headers.
265    size_t m_totalAllocatedSpace; // Allocated from the OS.
266
267    friend class HeapTester;
268};
269
270class PLATFORM_EXPORT ThreadState {
271    WTF_MAKE_NONCOPYABLE(ThreadState);
272public:
273    // When garbage collecting we need to know whether or not there
274    // can be pointers to Blink GC managed objects on the stack for
275    // each thread. When threads reach a safe point they record
276    // whether or not they have pointers on the stack.
277    enum StackState {
278        NoHeapPointersOnStack,
279        HeapPointersOnStack
280    };
281
282    // When profiling we would like to identify forced GC requests.
283    enum CauseOfGC {
284        NormalGC,
285        ForcedGC
286    };
287
288    class NoSweepScope {
289    public:
290        explicit NoSweepScope(ThreadState* state) : m_state(state)
291        {
292            ASSERT(!m_state->m_sweepInProgress);
293            m_state->m_sweepInProgress = true;
294        }
295        ~NoSweepScope()
296        {
297            ASSERT(m_state->m_sweepInProgress);
298            m_state->m_sweepInProgress = false;
299        }
300    private:
301        ThreadState* m_state;
302    };
303
304    // The set of ThreadStates for all threads attached to the Blink
305    // garbage collector.
306    typedef HashSet<ThreadState*> AttachedThreadStateSet;
307    static AttachedThreadStateSet& attachedThreads();
308
309    // Initialize threading infrastructure. Should be called from the main
310    // thread.
311    static void init();
312    static void shutdown();
313    static void shutdownHeapIfNecessary();
314    bool isTerminating() { return m_isTerminating; }
315
316    static void attachMainThread();
317    static void detachMainThread();
318
319    // Trace all persistent roots, called when marking the managed heap objects.
320    static void visitPersistentRoots(Visitor*);
321
322    // Trace all objects found on the stack, used when doing conservative GCs.
323    static void visitStackRoots(Visitor*);
324
325    // Associate ThreadState object with the current thread. After this
326    // call thread can start using the garbage collected heap infrastructure.
327    // It also has to periodically check for safepoints.
328    static void attach();
329
330    // Disassociate attached ThreadState from the current thread. The thread
331    // can no longer use the garbage collected heap after this call.
332    static void detach();
333
334    static ThreadState* current() { return **s_threadSpecific; }
335    static ThreadState* mainThreadState()
336    {
337        return reinterpret_cast<ThreadState*>(s_mainThreadStateStorage);
338    }
339
340    bool isMainThread() const { return this == mainThreadState(); }
341    inline bool checkThread() const
342    {
343        ASSERT(m_thread == currentThread());
344        return true;
345    }
346
347    // shouldGC and shouldForceConservativeGC implement the heuristics
348    // that are used to determine when to collect garbage. If
349    // shouldForceConservativeGC returns true, we force the garbage
350    // collection immediately. Otherwise, if shouldGC returns true, we
351    // record that we should garbage collect the next time we return
352    // to the event loop. If both return false, we don't need to
353    // collect garbage at this point.
354    bool shouldGC();
355    bool shouldForceConservativeGC();
356    bool increasedEnoughToGC(size_t, size_t);
357    bool increasedEnoughToForceConservativeGC(size_t, size_t);
358
359    // If gcRequested returns true when a thread returns to its event
360    // loop the thread will initiate a garbage collection.
361    bool gcRequested();
362    void setGCRequested();
363    void clearGCRequested();
364
365    // Was the last GC forced for testing? This is set when garbage collection
366    // is forced for testing and there are pointers on the stack. It remains
367    // set until a garbage collection is triggered with no pointers on the stack.
368    // This is used for layout tests that trigger GCs and check if objects are
369    // dead at a given point in time. That only reliably works when we get
370    // precise GCs with no conservative stack scanning.
371    void setForcePreciseGCForTesting(bool);
372    bool forcePreciseGCForTesting();
373
374    bool sweepRequested();
375    void setSweepRequested();
376    void clearSweepRequested();
377    void performPendingSweep();
378
379    // Support for disallowing allocation. Mainly used for sanity
380    // checks asserts.
381    bool isAllocationAllowed() const { return !isAtSafePoint() && !m_noAllocationCount; }
382    void enterNoAllocationScope() { m_noAllocationCount++; }
383    void leaveNoAllocationScope() { m_noAllocationCount--; }
384
385    // Before performing GC the thread-specific heap state should be
386    // made consistent for sweeping.
387    void makeConsistentForSweeping();
388#if ENABLE(ASSERT)
389    bool isConsistentForSweeping();
390#endif
391
392    // Is the thread corresponding to this thread state currently
393    // performing GC?
394    bool isInGC() const { return m_inGC; }
395
396    // Is any of the threads registered with the blink garbage collection
397    // infrastructure currently performing GC?
398    static bool isAnyThreadInGC() { return s_inGC; }
399
400    void enterGC()
401    {
402        ASSERT(!m_inGC);
403        ASSERT(!s_inGC);
404        m_inGC = true;
405        s_inGC = true;
406    }
407
408    void leaveGC()
409    {
410        m_inGC = false;
411        s_inGC = false;
412    }
413
414    // Is the thread corresponding to this thread state currently
415    // sweeping?
416    bool isSweepInProgress() const { return m_sweepInProgress; }
417
418    void prepareForGC();
419
420    // Safepoint related functionality.
421    //
422    // When a thread attempts to perform GC it needs to stop all other threads
423    // that use the heap or at least guarantee that they will not touch any
424    // heap allocated object until GC is complete.
425    //
426    // We say that a thread is at a safepoint if this thread is guaranteed to
427    // not touch any heap allocated object or any heap related functionality until
428    // it leaves the safepoint.
429    //
430    // Notice that a thread does not have to be paused if it is at safepoint it
431    // can continue to run and perform tasks that do not require interaction
432    // with the heap. It will be paused if it attempts to leave the safepoint and
433    // there is a GC in progress.
434    //
435    // Each thread that has ThreadState attached must:
436    //   - periodically check if GC is requested from another thread by calling a safePoint() method;
437    //   - use SafePointScope around long running loops that have no safePoint() invocation inside,
438    //     such loops must not touch any heap object;
439    //   - register an Interruptor that can interrupt long running loops that have no calls to safePoint and
440    //     are not wrapped in a SafePointScope (e.g. Interruptor for JavaScript code)
441    //
442
443    // Request all other threads to stop. Must only be called if the current thread is at safepoint.
444    static bool stopThreads();
445    static void resumeThreads();
446
447    // Check if GC is requested by another thread and pause this thread if this is the case.
448    // Can only be called when current thread is in a consistent state.
449    void safePoint(StackState);
450
451    // Mark current thread as running inside safepoint.
452    void enterSafePointWithoutPointers() { enterSafePoint(NoHeapPointersOnStack, 0); }
453    void enterSafePointWithPointers(void* scopeMarker) { enterSafePoint(HeapPointersOnStack, scopeMarker); }
454    void leaveSafePoint(SafePointAwareMutexLocker* = 0);
455    bool isAtSafePoint() const { return m_atSafePoint; }
456
457    class SafePointScope {
458    public:
459        enum ScopeNesting {
460            NoNesting,
461            AllowNesting
462        };
463
464        explicit SafePointScope(StackState stackState, ScopeNesting nesting = NoNesting)
465            : m_state(ThreadState::current())
466        {
467            if (m_state->isAtSafePoint()) {
468                RELEASE_ASSERT(nesting == AllowNesting);
469                // We can ignore stackState because there should be no heap object
470                // pointers manipulation after outermost safepoint was entered.
471                m_state = 0;
472            } else {
473                m_state->enterSafePoint(stackState, this);
474            }
475        }
476
477        ~SafePointScope()
478        {
479            if (m_state)
480                m_state->leaveSafePoint();
481        }
482
483    private:
484        ThreadState* m_state;
485    };
486
487    // If attached thread enters long running loop that can call back
488    // into Blink and leaving and reentering safepoint at every
489    // transition between this loop and Blink is deemed too expensive
490    // then instead of marking this loop as a GC safepoint thread
491    // can provide an interruptor object which would allow GC
492    // to temporarily interrupt and pause this long running loop at
493    // an arbitrary moment creating a safepoint for a GC.
494    class PLATFORM_EXPORT Interruptor {
495    public:
496        virtual ~Interruptor() { }
497
498        // Request the interruptor to interrupt the thread and
499        // call onInterrupted on that thread once interruption
500        // succeeds.
501        virtual void requestInterrupt() = 0;
502
503        // Clear previous interrupt request.
504        virtual void clearInterrupt() = 0;
505
506    protected:
507        // This method is called on the interrupted thread to
508        // create a safepoint for a GC.
509        void onInterrupted();
510    };
511
512    void addInterruptor(Interruptor*);
513    void removeInterruptor(Interruptor*);
514
515    // CleanupTasks are executed when ThreadState performs
516    // cleanup before detaching.
517    class CleanupTask {
518    public:
519        virtual ~CleanupTask() { }
520
521        // Executed before the final GC.
522        virtual void preCleanup() { }
523
524        // Executed after the final GC. Thread heap is empty at this point.
525        virtual void postCleanup() { }
526    };
527
528    void addCleanupTask(PassOwnPtr<CleanupTask> cleanupTask)
529    {
530        m_cleanupTasks.append(cleanupTask);
531    }
532
533    // Should only be called under protection of threadAttachMutex().
534    const Vector<Interruptor*>& interruptors() const { return m_interruptors; }
535
536    void recordStackEnd(intptr_t* endOfStack)
537    {
538        m_endOfStack = endOfStack;
539    }
540
541    // Get one of the heap structures for this thread.
542    //
543    // The heap is split into multiple heap parts based on object
544    // types. To get the index for a given type, use
545    // HeapTypeTrait<Type>::index.
546    BaseHeap* heap(int index) const { return m_heaps[index]; }
547
548    // Infrastructure to determine if an address is within one of the
549    // address ranges for the Blink heap. If the address is in the Blink
550    // heap the containing heap page is returned.
551    HeapContainsCache* heapContainsCache() { return m_heapContainsCache.get(); }
552    BaseHeapPage* contains(Address address) { return heapPageFromAddress(address); }
553    BaseHeapPage* contains(void* pointer) { return contains(reinterpret_cast<Address>(pointer)); }
554    BaseHeapPage* contains(const void* pointer) { return contains(const_cast<void*>(pointer)); }
555
556    WrapperPersistentRegion* wrapperRoots() const
557    {
558        ASSERT(m_liveWrapperPersistents);
559        return m_liveWrapperPersistents;
560    }
561    WrapperPersistentRegion* takeWrapperPersistentRegion();
562    void freeWrapperPersistentRegion(WrapperPersistentRegion*);
563
564    // List of persistent roots allocated on the given thread.
565    PersistentNode* roots() const { return m_persistents.get(); }
566
567    // List of global persistent roots not owned by any particular thread.
568    // globalRootsMutex must be acquired before any modifications.
569    static PersistentNode* globalRoots();
570    static Mutex& globalRootsMutex();
571
572    // Visit local thread stack and trace all pointers conservatively.
573    void visitStack(Visitor*);
574
575    // Visit the asan fake stack frame corresponding to a slot on the
576    // real machine stack if there is one.
577    void visitAsanFakeStackForPointer(Visitor*, Address);
578
579    // Visit all persistents allocated on this thread.
580    void visitPersistents(Visitor*);
581
582    // Checks a given address and if a pointer into the oilpan heap marks
583    // the object to which it points.
584    bool checkAndMarkPointer(Visitor*, Address);
585
586#if ENABLE(GC_PROFILE_MARKING)
587    const GCInfo* findGCInfo(Address);
588    static const GCInfo* findGCInfoFromAllThreads(Address);
589#endif
590
591#if ENABLE(GC_PROFILE_HEAP)
592    struct SnapshotInfo {
593        ThreadState* state;
594
595        size_t freeSize;
596        size_t pageCount;
597
598        // Map from base-classes to a snapshot class-ids (used as index below).
599        HashMap<const GCInfo*, size_t> classTags;
600
601        // Map from class-id (index) to count/size.
602        Vector<int> liveCount;
603        Vector<int> deadCount;
604        Vector<size_t> liveSize;
605        Vector<size_t> deadSize;
606
607        // Map from class-id (index) to a vector of generation counts.
608        // For i < 7, the count is the number of objects that died after surviving |i| GCs.
609        // For i == 7, the count is the number of objects that survived at least 7 GCs.
610        Vector<Vector<int, 8> > generations;
611
612        explicit SnapshotInfo(ThreadState* state) : state(state), freeSize(0), pageCount(0) { }
613
614        size_t getClassTag(const GCInfo*);
615    };
616
617    void snapshot();
618#endif
619
620    void pushWeakObjectPointerCallback(void*, WeakPointerCallback);
621    bool popAndInvokeWeakPointerCallback(Visitor*);
622
623    void getStats(HeapStats&);
624    HeapStats& stats() { return m_stats; }
625    HeapStats& statsAfterLastGC() { return m_statsAfterLastGC; }
626
627    void setupHeapsForTermination();
628
629    void registerSweepingTask();
630    void unregisterSweepingTask();
631
632    Mutex& sweepMutex() { return m_sweepMutex; }
633
634private:
635    explicit ThreadState();
636    ~ThreadState();
637
638    friend class SafePointBarrier;
639    friend class SafePointAwareMutexLocker;
640
641    void enterSafePoint(StackState, void*);
642    NO_SANITIZE_ADDRESS void copyStackUntilSafePointScope();
643    void clearSafePointScopeMarker()
644    {
645        m_safePointStackCopy.clear();
646        m_safePointScopeMarker = 0;
647    }
648
649    void performPendingGC(StackState);
650
651    // Finds the Blink HeapPage in this thread-specific heap
652    // corresponding to a given address. Return 0 if the address is
653    // not contained in any of the pages. This does not consider
654    // large objects.
655    BaseHeapPage* heapPageFromAddress(Address);
656
657    // When ThreadState is detaching from non-main thread its
658    // heap is expected to be empty (because it is going away).
659    // Perform registered cleanup tasks and garbage collection
660    // to sweep away any objects that are left on this heap.
661    // We assert that nothing must remain after this cleanup.
662    // If assertion does not hold we crash as we are potentially
663    // in the dangling pointer situation.
664    void cleanup();
665    void cleanupPages();
666
667    void setLowCollectionRate(bool value) { m_lowCollectionRate = value; }
668
669    void waitUntilSweepersDone();
670
671    static WTF::ThreadSpecific<ThreadState*>* s_threadSpecific;
672    static SafePointBarrier* s_safePointBarrier;
673
674    // This variable is flipped to true after all threads are stoped
675    // and outermost GC has started.
676    static bool s_inGC;
677
678    // We can't create a static member of type ThreadState here
679    // because it will introduce global constructor and destructor.
680    // We would like to manage lifetime of the ThreadState attached
681    // to the main thread explicitly instead and still use normal
682    // constructor and destructor for the ThreadState class.
683    // For this we reserve static storage for the main ThreadState
684    // and lazily construct ThreadState in it using placement new.
685    static uint8_t s_mainThreadStateStorage[];
686
687    ThreadIdentifier m_thread;
688    WrapperPersistentRegion* m_liveWrapperPersistents;
689    WrapperPersistentRegion* m_pooledWrapperPersistents;
690    size_t m_pooledWrapperPersistentRegionCount;
691    OwnPtr<PersistentNode> m_persistents;
692    StackState m_stackState;
693    intptr_t* m_startOfStack;
694    intptr_t* m_endOfStack;
695    void* m_safePointScopeMarker;
696    Vector<Address> m_safePointStackCopy;
697    bool m_atSafePoint;
698    Vector<Interruptor*> m_interruptors;
699    bool m_gcRequested;
700    bool m_forcePreciseGCForTesting;
701    volatile int m_sweepRequested;
702    bool m_sweepInProgress;
703    size_t m_noAllocationCount;
704    bool m_inGC;
705    BaseHeap* m_heaps[NumberOfHeaps];
706    OwnPtr<HeapContainsCache> m_heapContainsCache;
707    HeapStats m_stats;
708    HeapStats m_statsAfterLastGC;
709
710    Vector<OwnPtr<CleanupTask> > m_cleanupTasks;
711    bool m_isTerminating;
712
713    bool m_lowCollectionRate;
714
715    OwnPtr<blink::WebThread> m_sweeperThread;
716    int m_numberOfSweeperTasks;
717    Mutex m_sweepMutex;
718    ThreadCondition m_sweepThreadCondition;
719
720    CallbackStack* m_weakCallbackStack;
721
722#if defined(ADDRESS_SANITIZER)
723    void* m_asanFakeStack;
724#endif
725};
726
727template<ThreadAffinity affinity> class ThreadStateFor;
728
729template<> class ThreadStateFor<MainThreadOnly> {
730public:
731    static ThreadState* state()
732    {
733        // This specialization must only be used from the main thread.
734        ASSERT(ThreadState::current()->isMainThread());
735        return ThreadState::mainThreadState();
736    }
737};
738
739template<> class ThreadStateFor<AnyThread> {
740public:
741    static ThreadState* state() { return ThreadState::current(); }
742};
743
744// The SafePointAwareMutexLocker is used to enter a safepoint while waiting for
745// a mutex lock. It also ensures that the lock is not held while waiting for a GC
746// to complete in the leaveSafePoint method, by releasing the lock if the
747// leaveSafePoint method cannot complete without blocking, see
748// SafePointBarrier::checkAndPark.
749class SafePointAwareMutexLocker {
750    WTF_MAKE_NONCOPYABLE(SafePointAwareMutexLocker);
751public:
752    explicit SafePointAwareMutexLocker(MutexBase& mutex, ThreadState::StackState stackState = ThreadState::HeapPointersOnStack)
753        : m_mutex(mutex)
754        , m_locked(false)
755    {
756        ThreadState* state = ThreadState::current();
757        do {
758            bool leaveSafePoint = false;
759            // We cannot enter a safepoint if we are currently sweeping. In that
760            // case we just try to acquire the lock without being at a safepoint.
761            // If another thread tries to do a GC at that time it might time out
762            // due to this thread not being at a safepoint and waiting on the lock.
763            if (!state->isSweepInProgress() && !state->isAtSafePoint()) {
764                state->enterSafePoint(stackState, this);
765                leaveSafePoint = true;
766            }
767            m_mutex.lock();
768            m_locked = true;
769            if (leaveSafePoint) {
770                // When leaving the safepoint we might end up release the mutex
771                // if another thread is requesting a GC, see
772                // SafePointBarrier::checkAndPark. This is the case where we
773                // loop around to reacquire the lock.
774                state->leaveSafePoint(this);
775            }
776        } while (!m_locked);
777    }
778
779    ~SafePointAwareMutexLocker()
780    {
781        ASSERT(m_locked);
782        m_mutex.unlock();
783    }
784
785private:
786    friend class SafePointBarrier;
787
788    void reset()
789    {
790        ASSERT(m_locked);
791        m_mutex.unlock();
792        m_locked = false;
793    }
794
795    MutexBase& m_mutex;
796    bool m_locked;
797};
798
799// Common header for heap pages. Needs to be defined before class Visitor.
800class BaseHeapPage {
801public:
802    BaseHeapPage(PageMemory*, const GCInfo*, ThreadState*);
803    virtual ~BaseHeapPage() { }
804
805    // Check if the given address points to an object in this
806    // heap page. If so, find the start of that object and mark it
807    // using the given Visitor. Otherwise do nothing. The pointer must
808    // be within the same aligned blinkPageSize as the this-pointer.
809    //
810    // This is used during conservative stack scanning to
811    // conservatively mark all objects that could be referenced from
812    // the stack.
813    virtual void checkAndMarkPointer(Visitor*, Address) = 0;
814    virtual bool contains(Address) = 0;
815
816#if ENABLE(GC_PROFILE_MARKING)
817    virtual const GCInfo* findGCInfo(Address) = 0;
818#endif
819
820    Address address() { return reinterpret_cast<Address>(this); }
821    PageMemory* storage() const { return m_storage; }
822    ThreadState* threadState() const { return m_threadState; }
823    const GCInfo* gcInfo() { return m_gcInfo; }
824    virtual bool isLargeObject() { return false; }
825    virtual void markOrphaned()
826    {
827        m_threadState = 0;
828        m_gcInfo = 0;
829        m_terminating = false;
830        m_tracedAfterOrphaned = false;
831    }
832    bool orphaned() { return !m_threadState; }
833    bool terminating() { return m_terminating; }
834    void setTerminating() { m_terminating = true; }
835    bool tracedAfterOrphaned() { return m_tracedAfterOrphaned; }
836    void setTracedAfterOrphaned() { m_tracedAfterOrphaned = true; }
837    size_t promptlyFreedSize() { return m_promptlyFreedSize; }
838    void resetPromptlyFreedSize() { m_promptlyFreedSize = 0; }
839    void addToPromptlyFreedSize(size_t size) { m_promptlyFreedSize += size; }
840
841private:
842    PageMemory* m_storage;
843    const GCInfo* m_gcInfo;
844    ThreadState* m_threadState;
845    // Pointer sized integer to ensure proper alignment of the
846    // HeapPage header. We use some of the bits to determine
847    // whether the page is part of a terminting thread or
848    // if the page is traced after being terminated (orphaned).
849    uintptr_t m_terminating : 1;
850    uintptr_t m_tracedAfterOrphaned : 1;
851    uintptr_t m_promptlyFreedSize : 17; // == blinkPageSizeLog2
852};
853
854}
855
856#endif // ThreadState_h
857