1/*
2 * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 David Levin <levin@chromium.org>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU * Library General Public License for more details.
12 *
13 * You should have received a copy of the GNU Library General Public License
14 * along with this library; see the file COPYING.LIB.  If not, write to
15 * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
16 * Boston, MA 02110-1301, USA.
17 *
18 */
19
20#ifndef WTF_HashTable_h
21#define WTF_HashTable_h
22
23#include "wtf/Alignment.h"
24#include "wtf/Assertions.h"
25#include "wtf/DefaultAllocator.h"
26#include "wtf/HashTraits.h"
27#include "wtf/WTF.h"
28
29#define DUMP_HASHTABLE_STATS 0
30#define DUMP_HASHTABLE_STATS_PER_TABLE 0
31
32#if DUMP_HASHTABLE_STATS_PER_TABLE
33#include "wtf/DataLog.h"
34#endif
35
36#if DUMP_HASHTABLE_STATS
37#if DUMP_HASHTABLE_STATS_PER_TABLE
38#define UPDATE_PROBE_COUNTS()                            \
39    ++probeCount;                                        \
40    HashTableStats::recordCollisionAtCount(probeCount);  \
41    ++perTableProbeCount;                                \
42    m_stats->recordCollisionAtCount(perTableProbeCount)
43#define UPDATE_ACCESS_COUNTS()                           \
44    atomicIncrement(&HashTableStats::numAccesses);       \
45    int probeCount = 0;                                  \
46    ++m_stats->numAccesses;                              \
47    int perTableProbeCount = 0
48#else
49#define UPDATE_PROBE_COUNTS()                            \
50    ++probeCount;                                        \
51    HashTableStats::recordCollisionAtCount(probeCount)
52#define UPDATE_ACCESS_COUNTS()                           \
53    atomicIncrement(&HashTableStats::numAccesses);       \
54    int probeCount = 0
55#endif
56#else
57#if DUMP_HASHTABLE_STATS_PER_TABLE
58#define UPDATE_PROBE_COUNTS()                            \
59    ++perTableProbeCount;                                \
60    m_stats->recordCollisionAtCount(perTableProbeCount)
61#define UPDATE_ACCESS_COUNTS()                           \
62    ++m_stats->numAccesses;                              \
63    int perTableProbeCount = 0
64#else
65#define UPDATE_PROBE_COUNTS() do { } while (0)
66#define UPDATE_ACCESS_COUNTS() do { } while (0)
67#endif
68#endif
69
70namespace WTF {
71
72#if DUMP_HASHTABLE_STATS
73
74    struct HashTableStats {
75        // The following variables are all atomically incremented when modified.
76        static int numAccesses;
77        static int numRehashes;
78        static int numRemoves;
79        static int numReinserts;
80
81        // The following variables are only modified in the recordCollisionAtCount method within a mutex.
82        static int maxCollisions;
83        static int numCollisions;
84        static int collisionGraph[4096];
85
86        static void recordCollisionAtCount(int count);
87        static void dumpStats();
88    };
89
90#endif
91
92    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
93    class HashTable;
94    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
95    class HashTableIterator;
96    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
97    class HashTableConstIterator;
98    template<typename Value, typename HashFunctions, typename HashTraits, typename Allocator>
99    class LinkedHashSet;
100    template<WeakHandlingFlag x, typename T, typename U, typename V, typename W, typename X, typename Y, typename Z>
101    struct WeakProcessingHashTableHelper;
102
103    typedef enum { HashItemKnownGood } HashItemKnownGoodTag;
104
105    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
106    class HashTableConstIterator {
107    private:
108        typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
109        typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> iterator;
110        typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> const_iterator;
111        typedef Value ValueType;
112        typedef typename Traits::IteratorConstGetType GetType;
113        typedef const ValueType* PointerType;
114
115        friend class HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>;
116        friend class HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>;
117
118        void skipEmptyBuckets()
119        {
120            while (m_position != m_endPosition && HashTableType::isEmptyOrDeletedBucket(*m_position))
121                ++m_position;
122        }
123
124        HashTableConstIterator(PointerType position, PointerType endPosition, const HashTableType* container)
125            : m_position(position)
126            , m_endPosition(endPosition)
127#if ENABLE(ASSERT)
128            , m_container(container)
129            , m_containerModifications(container->modifications())
130#endif
131        {
132            skipEmptyBuckets();
133        }
134
135        HashTableConstIterator(PointerType position, PointerType endPosition, const HashTableType* container, HashItemKnownGoodTag)
136            : m_position(position)
137            , m_endPosition(endPosition)
138#if ENABLE(ASSERT)
139            , m_container(container)
140            , m_containerModifications(container->modifications())
141#endif
142        {
143            ASSERT(m_containerModifications == m_container->modifications());
144        }
145
146        void checkModifications() const
147        {
148            // HashTable and collections that build on it do not support
149            // modifications while there is an iterator in use. The exception
150            // is ListHashSet, which has its own iterators that tolerate
151            // modification of the underlying set.
152            ASSERT(m_containerModifications == m_container->modifications());
153        }
154
155    public:
156        HashTableConstIterator()
157        {
158        }
159
160        GetType get() const
161        {
162            checkModifications();
163            return m_position;
164        }
165        typename Traits::IteratorConstReferenceType operator*() const { return Traits::getToReferenceConstConversion(get()); }
166        GetType operator->() const { return get(); }
167
168        const_iterator& operator++()
169        {
170            ASSERT(m_position != m_endPosition);
171            checkModifications();
172            ++m_position;
173            skipEmptyBuckets();
174            return *this;
175        }
176
177        // postfix ++ intentionally omitted
178
179        // Comparison.
180        bool operator==(const const_iterator& other) const
181        {
182            return m_position == other.m_position;
183        }
184        bool operator!=(const const_iterator& other) const
185        {
186            return m_position != other.m_position;
187        }
188        bool operator==(const iterator& other) const
189        {
190            return *this == static_cast<const_iterator>(other);
191        }
192        bool operator!=(const iterator& other) const
193        {
194            return *this != static_cast<const_iterator>(other);
195        }
196
197    private:
198        PointerType m_position;
199        PointerType m_endPosition;
200#if ENABLE(ASSERT)
201        const HashTableType* m_container;
202        int64_t m_containerModifications;
203#endif
204    };
205
206    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
207    class HashTableIterator {
208    private:
209        typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
210        typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> iterator;
211        typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> const_iterator;
212        typedef Value ValueType;
213        typedef typename Traits::IteratorGetType GetType;
214        typedef ValueType* PointerType;
215
216        friend class HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>;
217
218        HashTableIterator(PointerType pos, PointerType end, const HashTableType* container) : m_iterator(pos, end, container) { }
219        HashTableIterator(PointerType pos, PointerType end, const HashTableType* container, HashItemKnownGoodTag tag) : m_iterator(pos, end, container, tag) { }
220
221    public:
222        HashTableIterator() { }
223
224        // default copy, assignment and destructor are OK
225
226        GetType get() const { return const_cast<GetType>(m_iterator.get()); }
227        typename Traits::IteratorReferenceType operator*() const { return Traits::getToReferenceConversion(get()); }
228        GetType operator->() const { return get(); }
229
230        iterator& operator++() { ++m_iterator; return *this; }
231
232        // postfix ++ intentionally omitted
233
234        // Comparison.
235        bool operator==(const iterator& other) const { return m_iterator == other.m_iterator; }
236        bool operator!=(const iterator& other) const { return m_iterator != other.m_iterator; }
237        bool operator==(const const_iterator& other) const { return m_iterator == other; }
238        bool operator!=(const const_iterator& other) const { return m_iterator != other; }
239
240        operator const_iterator() const { return m_iterator; }
241
242    private:
243        const_iterator m_iterator;
244    };
245
246    using std::swap;
247
248    // Work around MSVC's standard library, whose swap for pairs does not swap by component.
249    template<typename T> inline void hashTableSwap(T& a, T& b)
250    {
251        swap(a, b);
252    }
253
254    template<typename T, typename U> inline void hashTableSwap(KeyValuePair<T, U>& a, KeyValuePair<T, U>& b)
255    {
256        swap(a.key, b.key);
257        swap(a.value, b.value);
258    }
259
260    template<typename T, typename Allocator, bool useSwap> struct Mover;
261    template<typename T, typename Allocator> struct Mover<T, Allocator, true> {
262        static void move(T& from, T& to)
263        {
264            // A swap operation should not normally allocate, but it may do so
265            // if it is falling back on some sort of triple assignment in the
266            // style of t = a; a = b; b = t because there is no overloaded swap
267            // operation. We can't allow allocation both because it is slower
268            // than a true swap operation, but also because allocation implies
269            // allowing GC: We cannot allow a GC after swapping only the key.
270            // The value is only traced if the key is present and therefore the
271            // GC will not see the value in the old backing if the key has been
272            // moved to the new backing. Therefore, we cannot allow GC until
273            // after both key and value have been moved.
274            Allocator::enterNoAllocationScope();
275            hashTableSwap(from, to);
276            Allocator::leaveNoAllocationScope();
277        }
278    };
279    template<typename T, typename Allocator> struct Mover<T, Allocator, false> {
280        static void move(T& from, T& to) { to = from; }
281    };
282
283    template<typename HashFunctions> class IdentityHashTranslator {
284    public:
285        template<typename T> static unsigned hash(const T& key) { return HashFunctions::hash(key); }
286        template<typename T, typename U> static bool equal(const T& a, const U& b) { return HashFunctions::equal(a, b); }
287        template<typename T, typename U, typename V> static void translate(T& location, const U&, const V& value) { location = value; }
288    };
289
290    template<typename HashTableType, typename ValueType> struct HashTableAddResult {
291        HashTableAddResult(const HashTableType* container, ValueType* storedValue, bool isNewEntry)
292            : storedValue(storedValue)
293            , isNewEntry(isNewEntry)
294#if ENABLE(SECURITY_ASSERT)
295            , m_container(container)
296            , m_containerModifications(container->modifications())
297#endif
298        {
299            ASSERT_UNUSED(container, container);
300        }
301
302        ~HashTableAddResult()
303        {
304            // If rehash happened before accessing storedValue, it's
305            // use-after-free. Any modification may cause a rehash, so we check
306            // for modifications here.
307            // Rehash after accessing storedValue is harmless but will assert if
308            // the AddResult destructor takes place after a modification. You
309            // may need to limit the scope of the AddResult.
310            ASSERT_WITH_SECURITY_IMPLICATION(m_containerModifications == m_container->modifications());
311        }
312
313        ValueType* storedValue;
314        bool isNewEntry;
315
316#if ENABLE(SECURITY_ASSERT)
317    private:
318        const HashTableType* m_container;
319        const int64_t m_containerModifications;
320#endif
321    };
322
323    template<typename Value, typename Extractor, typename KeyTraits>
324    struct HashTableHelper {
325        static bool isEmptyBucket(const Value& value) { return isHashTraitsEmptyValue<KeyTraits>(Extractor::extract(value)); }
326        static bool isDeletedBucket(const Value& value) { return KeyTraits::isDeletedValue(Extractor::extract(value)); }
327        static bool isEmptyOrDeletedBucket(const Value& value) { return isEmptyBucket(value) || isDeletedBucket(value); }
328    };
329
330    template<typename HashTranslator, typename KeyTraits, bool safeToCompareToEmptyOrDeleted>
331    struct HashTableKeyChecker {
332        // There's no simple generic way to make this check if safeToCompareToEmptyOrDeleted is false,
333        // so the check always passes.
334        template <typename T>
335        static bool checkKey(const T&) { return true; }
336    };
337
338    template<typename HashTranslator, typename KeyTraits>
339    struct HashTableKeyChecker<HashTranslator, KeyTraits, true> {
340        template <typename T>
341        static bool checkKey(const T& key)
342        {
343            // FIXME : Check also equality to the deleted value.
344            return !HashTranslator::equal(KeyTraits::emptyValue(), key);
345        }
346    };
347
348    // Don't declare a destructor for HeapAllocated hash tables.
349    template<typename Derived, bool isGarbageCollected>
350    class HashTableDestructorBase;
351
352    template<typename Derived>
353    class HashTableDestructorBase<Derived, true> { };
354
355    template<typename Derived>
356    class HashTableDestructorBase<Derived, false> {
357    public:
358        ~HashTableDestructorBase() { static_cast<Derived*>(this)->finalize(); }
359    };
360
361    // Note: empty or deleted key values are not allowed, using them may lead to undefined behavior.
362    // For pointer keys this means that null pointers are not allowed unless you supply custom key traits.
363    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
364    class HashTable : public HashTableDestructorBase<HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>, Allocator::isGarbageCollected> {
365    public:
366        typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> iterator;
367        typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> const_iterator;
368        typedef Traits ValueTraits;
369        typedef Key KeyType;
370        typedef typename KeyTraits::PeekInType KeyPeekInType;
371        typedef typename KeyTraits::PassInType KeyPassInType;
372        typedef Value ValueType;
373        typedef Extractor ExtractorType;
374        typedef KeyTraits KeyTraitsType;
375        typedef typename Traits::PassInType ValuePassInType;
376        typedef IdentityHashTranslator<HashFunctions> IdentityTranslatorType;
377        typedef HashTableAddResult<HashTable, ValueType> AddResult;
378
379#if DUMP_HASHTABLE_STATS_PER_TABLE
380        struct Stats {
381            Stats()
382                : numAccesses(0)
383                , numRehashes(0)
384                , numRemoves(0)
385                , numReinserts(0)
386                , maxCollisions(0)
387                , numCollisions(0)
388                , collisionGraph()
389            {
390            }
391
392            int numAccesses;
393            int numRehashes;
394            int numRemoves;
395            int numReinserts;
396
397            int maxCollisions;
398            int numCollisions;
399            int collisionGraph[4096];
400
401            void recordCollisionAtCount(int count)
402            {
403                if (count > maxCollisions)
404                    maxCollisions = count;
405                numCollisions++;
406                collisionGraph[count]++;
407            }
408
409            void dumpStats()
410            {
411                dataLogF("\nWTF::HashTable::Stats dump\n\n");
412                dataLogF("%d accesses\n", numAccesses);
413                dataLogF("%d total collisions, average %.2f probes per access\n", numCollisions, 1.0 * (numAccesses + numCollisions) / numAccesses);
414                dataLogF("longest collision chain: %d\n", maxCollisions);
415                for (int i = 1; i <= maxCollisions; i++) {
416                    dataLogF("  %d lookups with exactly %d collisions (%.2f%% , %.2f%% with this many or more)\n", collisionGraph[i], i, 100.0 * (collisionGraph[i] - collisionGraph[i+1]) / numAccesses, 100.0 * collisionGraph[i] / numAccesses);
417                }
418                dataLogF("%d rehashes\n", numRehashes);
419                dataLogF("%d reinserts\n", numReinserts);
420            }
421        };
422#endif
423
424        HashTable();
425        void finalize()
426        {
427            ASSERT(!Allocator::isGarbageCollected);
428            if (LIKELY(!m_table))
429                return;
430            deleteAllBucketsAndDeallocate(m_table, m_tableSize);
431            m_table = 0;
432        }
433
434        HashTable(const HashTable&);
435        void swap(HashTable&);
436        HashTable& operator=(const HashTable&);
437
438        // When the hash table is empty, just return the same iterator for end as for begin.
439        // This is more efficient because we don't have to skip all the empty and deleted
440        // buckets, and iterating an empty table is a common case that's worth optimizing.
441        iterator begin() { return isEmpty() ? end() : makeIterator(m_table); }
442        iterator end() { return makeKnownGoodIterator(m_table + m_tableSize); }
443        const_iterator begin() const { return isEmpty() ? end() : makeConstIterator(m_table); }
444        const_iterator end() const { return makeKnownGoodConstIterator(m_table + m_tableSize); }
445
446        unsigned size() const { return m_keyCount; }
447        unsigned capacity() const { return m_tableSize; }
448        bool isEmpty() const { return !m_keyCount; }
449
450        AddResult add(ValuePassInType value)
451        {
452            return add<IdentityTranslatorType>(Extractor::extract(value), value);
453        }
454
455        // A special version of add() that finds the object by hashing and comparing
456        // with some other type, to avoid the cost of type conversion if the object is already
457        // in the table.
458        template<typename HashTranslator, typename T, typename Extra> AddResult add(const T& key, const Extra&);
459        template<typename HashTranslator, typename T, typename Extra> AddResult addPassingHashCode(const T& key, const Extra&);
460
461        iterator find(KeyPeekInType key) { return find<IdentityTranslatorType>(key); }
462        const_iterator find(KeyPeekInType key) const { return find<IdentityTranslatorType>(key); }
463        bool contains(KeyPeekInType key) const { return contains<IdentityTranslatorType>(key); }
464
465        template<typename HashTranslator, typename T> iterator find(const T&);
466        template<typename HashTranslator, typename T> const_iterator find(const T&) const;
467        template<typename HashTranslator, typename T> bool contains(const T&) const;
468
469        void remove(KeyPeekInType);
470        void remove(iterator);
471        void remove(const_iterator);
472        void clear();
473
474        static bool isEmptyBucket(const ValueType& value) { return isHashTraitsEmptyValue<KeyTraits>(Extractor::extract(value)); }
475        static bool isDeletedBucket(const ValueType& value) { return KeyTraits::isDeletedValue(Extractor::extract(value)); }
476        static bool isEmptyOrDeletedBucket(const ValueType& value) { return HashTableHelper<ValueType, Extractor, KeyTraits>:: isEmptyOrDeletedBucket(value); }
477
478        ValueType* lookup(KeyPeekInType key) { return lookup<IdentityTranslatorType, KeyPeekInType>(key); }
479        template<typename HashTranslator, typename T> ValueType* lookup(T);
480        template<typename HashTranslator, typename T> const ValueType* lookup(T) const;
481
482        void trace(typename Allocator::Visitor*);
483
484#if ENABLE(ASSERT)
485        int64_t modifications() const { return m_modifications; }
486        void registerModification() { m_modifications++; }
487        // HashTable and collections that build on it do not support
488        // modifications while there is an iterator in use. The exception is
489        // ListHashSet, which has its own iterators that tolerate modification
490        // of the underlying set.
491        void checkModifications(int64_t mods) const { ASSERT(mods == m_modifications); }
492#else
493        int64_t modifications() const { return 0; }
494        void registerModification() { }
495        void checkModifications(int64_t mods) const { }
496#endif
497
498    private:
499        static ValueType* allocateTable(unsigned size);
500        static void deleteAllBucketsAndDeallocate(ValueType* table, unsigned size);
501
502        typedef std::pair<ValueType*, bool> LookupType;
503        typedef std::pair<LookupType, unsigned> FullLookupType;
504
505        LookupType lookupForWriting(const Key& key) { return lookupForWriting<IdentityTranslatorType>(key); };
506        template<typename HashTranslator, typename T> FullLookupType fullLookupForWriting(const T&);
507        template<typename HashTranslator, typename T> LookupType lookupForWriting(const T&);
508
509        void remove(ValueType*);
510
511        bool shouldExpand() const { return (m_keyCount + m_deletedCount) * m_maxLoad >= m_tableSize; }
512        bool mustRehashInPlace() const { return m_keyCount * m_minLoad < m_tableSize * 2; }
513        bool shouldShrink() const
514        {
515            // isAllocationAllowed check should be at the last because it's
516            // expensive.
517            return m_keyCount * m_minLoad < m_tableSize
518                && m_tableSize > KeyTraits::minimumTableSize
519                && Allocator::isAllocationAllowed();
520        }
521        ValueType* expand(ValueType* entry = 0);
522        void shrink() { rehash(m_tableSize / 2, 0); }
523
524        ValueType* rehash(unsigned newTableSize, ValueType* entry);
525        ValueType* reinsert(ValueType&);
526
527        static void initializeBucket(ValueType& bucket);
528        static void deleteBucket(ValueType& bucket) { bucket.~ValueType(); Traits::constructDeletedValue(bucket, Allocator::isGarbageCollected); }
529
530        FullLookupType makeLookupResult(ValueType* position, bool found, unsigned hash)
531            { return FullLookupType(LookupType(position, found), hash); }
532
533        iterator makeIterator(ValueType* pos) { return iterator(pos, m_table + m_tableSize, this); }
534        const_iterator makeConstIterator(ValueType* pos) const { return const_iterator(pos, m_table + m_tableSize, this); }
535        iterator makeKnownGoodIterator(ValueType* pos) { return iterator(pos, m_table + m_tableSize, this, HashItemKnownGood); }
536        const_iterator makeKnownGoodConstIterator(ValueType* pos) const { return const_iterator(pos, m_table + m_tableSize, this, HashItemKnownGood); }
537
538        static const unsigned m_maxLoad = 2;
539        static const unsigned m_minLoad = 6;
540
541        unsigned tableSizeMask() const
542        {
543            size_t mask = m_tableSize - 1;
544            ASSERT((mask & m_tableSize) == 0);
545            return mask;
546        }
547
548        void setEnqueued() { m_queueFlag = true; }
549        void clearEnqueued() { m_queueFlag = false; }
550        bool enqueued() { return m_queueFlag; }
551
552        ValueType* m_table;
553        unsigned m_tableSize;
554        unsigned m_keyCount;
555        unsigned m_deletedCount:31;
556        bool m_queueFlag:1;
557#if ENABLE(ASSERT)
558        unsigned m_modifications;
559#endif
560
561#if DUMP_HASHTABLE_STATS_PER_TABLE
562    public:
563        mutable OwnPtr<Stats> m_stats;
564#endif
565
566        template<WeakHandlingFlag x, typename T, typename U, typename V, typename W, typename X, typename Y, typename Z> friend struct WeakProcessingHashTableHelper;
567        template<typename T, typename U, typename V, typename W> friend class LinkedHashSet;
568    };
569
570    // Set all the bits to one after the most significant bit: 00110101010 -> 00111111111.
571    template<unsigned size> struct OneifyLowBits;
572    template<>
573    struct OneifyLowBits<0> {
574        static const unsigned value = 0;
575    };
576    template<unsigned number>
577    struct OneifyLowBits {
578        static const unsigned value = number | OneifyLowBits<(number >> 1)>::value;
579    };
580    // Compute the first power of two integer that is an upper bound of the parameter 'number'.
581    template<unsigned number>
582    struct UpperPowerOfTwoBound {
583        static const unsigned value = (OneifyLowBits<number - 1>::value + 1) * 2;
584    };
585
586    // Because power of two numbers are the limit of maxLoad, their capacity is twice the
587    // UpperPowerOfTwoBound, or 4 times their values.
588    template<unsigned size, bool isPowerOfTwo> struct HashTableCapacityForSizeSplitter;
589    template<unsigned size>
590    struct HashTableCapacityForSizeSplitter<size, true> {
591        static const unsigned value = size * 4;
592    };
593    template<unsigned size>
594    struct HashTableCapacityForSizeSplitter<size, false> {
595        static const unsigned value = UpperPowerOfTwoBound<size>::value;
596    };
597
598    // HashTableCapacityForSize computes the upper power of two capacity to hold the size parameter.
599    // This is done at compile time to initialize the HashTraits.
600    template<unsigned size>
601    struct HashTableCapacityForSize {
602        static const unsigned value = HashTableCapacityForSizeSplitter<size, !(size & (size - 1))>::value;
603        COMPILE_ASSERT(size > 0, HashTableNonZeroMinimumCapacity);
604        COMPILE_ASSERT(!static_cast<int>(value >> 31), HashTableNoCapacityOverflow);
605        COMPILE_ASSERT(value > (2 * size), HashTableCapacityHoldsContentSize);
606    };
607
608    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
609    inline HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::HashTable()
610        : m_table(0)
611        , m_tableSize(0)
612        , m_keyCount(0)
613        , m_deletedCount(0)
614        , m_queueFlag(false)
615#if ENABLE(ASSERT)
616        , m_modifications(0)
617#endif
618#if DUMP_HASHTABLE_STATS_PER_TABLE
619        , m_stats(adoptPtr(new Stats))
620#endif
621    {
622    }
623
624    inline unsigned doubleHash(unsigned key)
625    {
626        key = ~key + (key >> 23);
627        key ^= (key << 12);
628        key ^= (key >> 7);
629        key ^= (key << 2);
630        key ^= (key >> 20);
631        return key;
632    }
633
634    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
635    template<typename HashTranslator, typename T>
636    inline Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::lookup(T key)
637    {
638        return const_cast<Value*>(const_cast<const HashTable*>(this)->lookup<HashTranslator, T>(key));
639    }
640
641    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
642    template<typename HashTranslator, typename T>
643    inline const Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::lookup(T key) const
644    {
645        ASSERT((HashTableKeyChecker<HashTranslator, KeyTraits, HashFunctions::safeToCompareToEmptyOrDeleted>::checkKey(key)));
646        const ValueType* table = m_table;
647        if (!table)
648            return 0;
649
650        size_t k = 0;
651        size_t sizeMask = tableSizeMask();
652        unsigned h = HashTranslator::hash(key);
653        size_t i = h & sizeMask;
654
655        UPDATE_ACCESS_COUNTS();
656
657        while (1) {
658            const ValueType* entry = table + i;
659
660            if (HashFunctions::safeToCompareToEmptyOrDeleted) {
661                if (HashTranslator::equal(Extractor::extract(*entry), key))
662                    return entry;
663
664                if (isEmptyBucket(*entry))
665                    return 0;
666            } else {
667                if (isEmptyBucket(*entry))
668                    return 0;
669
670                if (!isDeletedBucket(*entry) && HashTranslator::equal(Extractor::extract(*entry), key))
671                    return entry;
672            }
673            UPDATE_PROBE_COUNTS();
674            if (!k)
675                k = 1 | doubleHash(h);
676            i = (i + k) & sizeMask;
677        }
678    }
679
680    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
681    template<typename HashTranslator, typename T>
682    inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::LookupType HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::lookupForWriting(const T& key)
683    {
684        ASSERT(m_table);
685        registerModification();
686
687        ValueType* table = m_table;
688        size_t k = 0;
689        size_t sizeMask = tableSizeMask();
690        unsigned h = HashTranslator::hash(key);
691        size_t i = h & sizeMask;
692
693        UPDATE_ACCESS_COUNTS();
694
695        ValueType* deletedEntry = 0;
696
697        while (1) {
698            ValueType* entry = table + i;
699
700            if (isEmptyBucket(*entry))
701                return LookupType(deletedEntry ? deletedEntry : entry, false);
702
703            if (HashFunctions::safeToCompareToEmptyOrDeleted) {
704                if (HashTranslator::equal(Extractor::extract(*entry), key))
705                    return LookupType(entry, true);
706
707                if (isDeletedBucket(*entry))
708                    deletedEntry = entry;
709            } else {
710                if (isDeletedBucket(*entry))
711                    deletedEntry = entry;
712                else if (HashTranslator::equal(Extractor::extract(*entry), key))
713                    return LookupType(entry, true);
714            }
715            UPDATE_PROBE_COUNTS();
716            if (!k)
717                k = 1 | doubleHash(h);
718            i = (i + k) & sizeMask;
719        }
720    }
721
722    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
723    template<typename HashTranslator, typename T>
724    inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::FullLookupType HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::fullLookupForWriting(const T& key)
725    {
726        ASSERT(m_table);
727        registerModification();
728
729        ValueType* table = m_table;
730        size_t k = 0;
731        size_t sizeMask = tableSizeMask();
732        unsigned h = HashTranslator::hash(key);
733        size_t i = h & sizeMask;
734
735        UPDATE_ACCESS_COUNTS();
736
737        ValueType* deletedEntry = 0;
738
739        while (1) {
740            ValueType* entry = table + i;
741
742            if (isEmptyBucket(*entry))
743                return makeLookupResult(deletedEntry ? deletedEntry : entry, false, h);
744
745            if (HashFunctions::safeToCompareToEmptyOrDeleted) {
746                if (HashTranslator::equal(Extractor::extract(*entry), key))
747                    return makeLookupResult(entry, true, h);
748
749                if (isDeletedBucket(*entry))
750                    deletedEntry = entry;
751            } else {
752                if (isDeletedBucket(*entry))
753                    deletedEntry = entry;
754                else if (HashTranslator::equal(Extractor::extract(*entry), key))
755                    return makeLookupResult(entry, true, h);
756            }
757            UPDATE_PROBE_COUNTS();
758            if (!k)
759                k = 1 | doubleHash(h);
760            i = (i + k) & sizeMask;
761        }
762    }
763
764    template<bool emptyValueIsZero> struct HashTableBucketInitializer;
765
766    template<> struct HashTableBucketInitializer<false> {
767        template<typename Traits, typename Value> static void initialize(Value& bucket)
768        {
769            new (NotNull, &bucket) Value(Traits::emptyValue());
770        }
771    };
772
773    template<> struct HashTableBucketInitializer<true> {
774        template<typename Traits, typename Value> static void initialize(Value& bucket)
775        {
776            // This initializes the bucket without copying the empty value.
777            // That makes it possible to use this with types that don't support copying.
778            // The memset to 0 looks like a slow operation but is optimized by the compilers.
779            memset(&bucket, 0, sizeof(bucket));
780        }
781    };
782
783    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
784    inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::initializeBucket(ValueType& bucket)
785    {
786        // For hash maps the key and value cannot be initialied simultaneously,
787        // and it would be wrong to have a GC when only one was initialized and
788        // the other still contained garbage (eg. from a previous use of the
789        // same slot). Therefore we forbid allocation (and thus GC) while the
790        // slot is initalized to an empty value.
791        Allocator::enterNoAllocationScope();
792        HashTableBucketInitializer<Traits::emptyValueIsZero>::template initialize<Traits>(bucket);
793        Allocator::leaveNoAllocationScope();
794    }
795
796    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
797    template<typename HashTranslator, typename T, typename Extra>
798    typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::AddResult HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::add(const T& key, const Extra& extra)
799    {
800        ASSERT(Allocator::isAllocationAllowed());
801        if (!m_table)
802            expand();
803
804        ASSERT(m_table);
805
806        ValueType* table = m_table;
807        size_t k = 0;
808        size_t sizeMask = tableSizeMask();
809        unsigned h = HashTranslator::hash(key);
810        size_t i = h & sizeMask;
811
812        UPDATE_ACCESS_COUNTS();
813
814        ValueType* deletedEntry = 0;
815        ValueType* entry;
816        while (1) {
817            entry = table + i;
818
819            if (isEmptyBucket(*entry))
820                break;
821
822            if (HashFunctions::safeToCompareToEmptyOrDeleted) {
823                if (HashTranslator::equal(Extractor::extract(*entry), key))
824                    return AddResult(this, entry, false);
825
826                if (isDeletedBucket(*entry))
827                    deletedEntry = entry;
828            } else {
829                if (isDeletedBucket(*entry))
830                    deletedEntry = entry;
831                else if (HashTranslator::equal(Extractor::extract(*entry), key))
832                    return AddResult(this, entry, false);
833            }
834            UPDATE_PROBE_COUNTS();
835            if (!k)
836                k = 1 | doubleHash(h);
837            i = (i + k) & sizeMask;
838        }
839
840        registerModification();
841
842        if (deletedEntry) {
843            // Overwrite any data left over from last use, using placement new
844            // or memset.
845            initializeBucket(*deletedEntry);
846            entry = deletedEntry;
847            --m_deletedCount;
848        }
849
850        HashTranslator::translate(*entry, key, extra);
851        ASSERT(!isEmptyOrDeletedBucket(*entry));
852
853        ++m_keyCount;
854
855        if (shouldExpand())
856            entry = expand(entry);
857
858        return AddResult(this, entry, true);
859    }
860
861    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
862    template<typename HashTranslator, typename T, typename Extra>
863    typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::AddResult HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::addPassingHashCode(const T& key, const Extra& extra)
864    {
865        ASSERT(Allocator::isAllocationAllowed());
866        if (!m_table)
867            expand();
868
869        FullLookupType lookupResult = fullLookupForWriting<HashTranslator>(key);
870
871        ValueType* entry = lookupResult.first.first;
872        bool found = lookupResult.first.second;
873        unsigned h = lookupResult.second;
874
875        if (found)
876            return AddResult(this, entry, false);
877
878        registerModification();
879
880        if (isDeletedBucket(*entry)) {
881            initializeBucket(*entry);
882            --m_deletedCount;
883        }
884
885        HashTranslator::translate(*entry, key, extra, h);
886        ASSERT(!isEmptyOrDeletedBucket(*entry));
887
888        ++m_keyCount;
889        if (shouldExpand())
890            entry = expand(entry);
891
892        return AddResult(this, entry, true);
893    }
894
895    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
896    Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::reinsert(ValueType& entry)
897    {
898        ASSERT(m_table);
899        registerModification();
900        ASSERT(!lookupForWriting(Extractor::extract(entry)).second);
901        ASSERT(!isDeletedBucket(*(lookupForWriting(Extractor::extract(entry)).first)));
902#if DUMP_HASHTABLE_STATS
903        atomicIncrement(&HashTableStats::numReinserts);
904#endif
905#if DUMP_HASHTABLE_STATS_PER_TABLE
906        ++m_stats->numReinserts;
907#endif
908        Value* newEntry = lookupForWriting(Extractor::extract(entry)).first;
909        Mover<ValueType, Allocator, Traits::needsDestruction>::move(entry, *newEntry);
910
911        return newEntry;
912    }
913
914    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
915    template <typename HashTranslator, typename T>
916    inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::iterator HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::find(const T& key)
917    {
918        ValueType* entry = lookup<HashTranslator>(key);
919        if (!entry)
920            return end();
921
922        return makeKnownGoodIterator(entry);
923    }
924
925    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
926    template <typename HashTranslator, typename T>
927    inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::const_iterator HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::find(const T& key) const
928    {
929        ValueType* entry = const_cast<HashTable*>(this)->lookup<HashTranslator>(key);
930        if (!entry)
931            return end();
932
933        return makeKnownGoodConstIterator(entry);
934    }
935
936    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
937    template <typename HashTranslator, typename T>
938    bool HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::contains(const T& key) const
939    {
940        return const_cast<HashTable*>(this)->lookup<HashTranslator>(key);
941    }
942
943    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
944    void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::remove(ValueType* pos)
945    {
946        registerModification();
947#if DUMP_HASHTABLE_STATS
948        atomicIncrement(&HashTableStats::numRemoves);
949#endif
950#if DUMP_HASHTABLE_STATS_PER_TABLE
951        ++m_stats->numRemoves;
952#endif
953
954        deleteBucket(*pos);
955        ++m_deletedCount;
956        --m_keyCount;
957
958        if (shouldShrink())
959            shrink();
960    }
961
962    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
963    inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::remove(iterator it)
964    {
965        if (it == end())
966            return;
967
968        remove(const_cast<ValueType*>(it.m_iterator.m_position));
969    }
970
971    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
972    inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::remove(const_iterator it)
973    {
974        if (it == end())
975            return;
976
977        remove(const_cast<ValueType*>(it.m_position));
978    }
979
980    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
981    inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::remove(KeyPeekInType key)
982    {
983        remove(find(key));
984    }
985
986    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
987    Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::allocateTable(unsigned size)
988    {
989        typedef typename Allocator::template HashTableBackingHelper<HashTable>::Type HashTableBacking;
990
991        size_t allocSize = size * sizeof(ValueType);
992        ValueType* result;
993        // Assert that we will not use memset on things with a vtable entry.
994        // The compiler will also check this on some platforms. We would
995        // like to check this on the whole value (key-value pair), but
996        // IsPolymorphic will return false for a pair of two types, even if
997        // one of the components is polymorphic.
998        COMPILE_ASSERT(!Traits::emptyValueIsZero || !IsPolymorphic<KeyType>::value, EmptyValueCannotBeZeroForThingsWithAVtable);
999        if (Traits::emptyValueIsZero) {
1000            result = Allocator::template zeroedBackingMalloc<ValueType*, HashTableBacking>(allocSize);
1001        } else {
1002            result = Allocator::template backingMalloc<ValueType*, HashTableBacking>(allocSize);
1003            for (unsigned i = 0; i < size; i++)
1004                initializeBucket(result[i]);
1005        }
1006        return result;
1007    }
1008
1009    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1010    void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::deleteAllBucketsAndDeallocate(ValueType* table, unsigned size)
1011    {
1012        if (Traits::needsDestruction) {
1013            for (unsigned i = 0; i < size; ++i) {
1014                // This code is called when the hash table is cleared or
1015                // resized. We have allocated a new backing store and we need
1016                // to run the destructors on the old backing store, as it is
1017                // being freed. If we are GCing we need to both call the
1018                // destructor and mark the bucket as deleted, otherwise the
1019                // destructor gets called again when the GC finds the backing
1020                // store. With the default allocator it's enough to call the
1021                // destructor, since we will free the memory explicitly and
1022                // we won't see the memory with the bucket again.
1023                if (!isEmptyOrDeletedBucket(table[i])) {
1024                    if (Allocator::isGarbageCollected)
1025                        deleteBucket(table[i]);
1026                    else
1027                        table[i].~ValueType();
1028                }
1029            }
1030        }
1031        Allocator::backingFree(table);
1032    }
1033
1034    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1035    Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::expand(Value* entry)
1036    {
1037        unsigned newSize;
1038        if (!m_tableSize) {
1039            newSize = KeyTraits::minimumTableSize;
1040        } else if (mustRehashInPlace()) {
1041            newSize = m_tableSize;
1042        } else {
1043            newSize = m_tableSize * 2;
1044            RELEASE_ASSERT(newSize > m_tableSize);
1045        }
1046
1047        return rehash(newSize, entry);
1048    }
1049
1050    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1051    Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::rehash(unsigned newTableSize, Value* entry)
1052    {
1053        unsigned oldTableSize = m_tableSize;
1054        ValueType* oldTable = m_table;
1055
1056#if DUMP_HASHTABLE_STATS
1057        if (oldTableSize != 0)
1058            atomicIncrement(&HashTableStats::numRehashes);
1059#endif
1060
1061#if DUMP_HASHTABLE_STATS_PER_TABLE
1062        if (oldTableSize != 0)
1063            ++m_stats->numRehashes;
1064#endif
1065
1066        m_table = allocateTable(newTableSize);
1067        m_tableSize = newTableSize;
1068
1069        Value* newEntry = 0;
1070        for (unsigned i = 0; i != oldTableSize; ++i) {
1071            if (isEmptyOrDeletedBucket(oldTable[i])) {
1072                ASSERT(&oldTable[i] != entry);
1073                continue;
1074            }
1075
1076            Value* reinsertedEntry = reinsert(oldTable[i]);
1077            if (&oldTable[i] == entry) {
1078                ASSERT(!newEntry);
1079                newEntry = reinsertedEntry;
1080            }
1081        }
1082
1083        m_deletedCount = 0;
1084
1085        deleteAllBucketsAndDeallocate(oldTable, oldTableSize);
1086
1087        return newEntry;
1088    }
1089
1090    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1091    void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::clear()
1092    {
1093        registerModification();
1094        if (!m_table)
1095            return;
1096
1097        deleteAllBucketsAndDeallocate(m_table, m_tableSize);
1098        m_table = 0;
1099        m_tableSize = 0;
1100        m_keyCount = 0;
1101    }
1102
1103    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1104    HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::HashTable(const HashTable& other)
1105        : m_table(0)
1106        , m_tableSize(0)
1107        , m_keyCount(0)
1108        , m_deletedCount(0)
1109        , m_queueFlag(false)
1110#if ENABLE(ASSERT)
1111        , m_modifications(0)
1112#endif
1113#if DUMP_HASHTABLE_STATS_PER_TABLE
1114        , m_stats(adoptPtr(new Stats(*other.m_stats)))
1115#endif
1116    {
1117        // Copy the hash table the dumb way, by adding each element to the new table.
1118        // It might be more efficient to copy the table slots, but it's not clear that efficiency is needed.
1119        const_iterator end = other.end();
1120        for (const_iterator it = other.begin(); it != end; ++it)
1121            add(*it);
1122    }
1123
1124    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1125    void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::swap(HashTable& other)
1126    {
1127        std::swap(m_table, other.m_table);
1128        std::swap(m_tableSize, other.m_tableSize);
1129        std::swap(m_keyCount, other.m_keyCount);
1130        // std::swap does not work for bit fields.
1131        unsigned deleted = m_deletedCount;
1132        m_deletedCount = other.m_deletedCount;
1133        other.m_deletedCount = deleted;
1134        ASSERT(!m_queueFlag);
1135        ASSERT(!other.m_queueFlag);
1136
1137#if ENABLE(ASSERT)
1138        std::swap(m_modifications, other.m_modifications);
1139#endif
1140
1141#if DUMP_HASHTABLE_STATS_PER_TABLE
1142        m_stats.swap(other.m_stats);
1143#endif
1144    }
1145
1146    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1147    HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>& HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::operator=(const HashTable& other)
1148    {
1149        HashTable tmp(other);
1150        swap(tmp);
1151        return *this;
1152    }
1153
1154    template<WeakHandlingFlag weakHandlingFlag, typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1155    struct WeakProcessingHashTableHelper;
1156
1157    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1158    struct WeakProcessingHashTableHelper<NoWeakHandlingInCollections, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> {
1159        static void process(typename Allocator::Visitor* visitor, void* closure) { }
1160        static void ephemeronIteration(typename Allocator::Visitor* visitor, void* closure) { }
1161        static void ephemeronIterationDone(typename Allocator::Visitor* visitor, void* closure) { }
1162    };
1163
1164    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1165    struct WeakProcessingHashTableHelper<WeakHandlingInCollections, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> {
1166        // Used for purely weak and for weak-and-strong tables (ephemerons).
1167        static void process(typename Allocator::Visitor* visitor, void* closure)
1168        {
1169            typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
1170            HashTableType* table = reinterpret_cast<HashTableType*>(closure);
1171            if (table->m_table) {
1172                // This is run as part of weak processing after full
1173                // marking. The backing store is therefore marked if
1174                // we get here.
1175                ASSERT(visitor->isAlive(table->m_table));
1176                // Now perform weak processing (this is a no-op if the backing
1177                // was accessible through an iterator and was already marked
1178                // strongly).
1179                typedef typename HashTableType::ValueType ValueType;
1180                for (ValueType* element = table->m_table + table->m_tableSize - 1; element >= table->m_table; element--) {
1181                    if (!HashTableType::isEmptyOrDeletedBucket(*element)) {
1182                        // At this stage calling trace can make no difference
1183                        // (everything is already traced), but we use the
1184                        // return value to remove things from the collection.
1185                        if (TraceInCollectionTrait<WeakHandlingInCollections, WeakPointersActWeak, ValueType, Traits>::trace(visitor, *element)) {
1186                            table->registerModification();
1187                            HashTableType::deleteBucket(*element); // Also calls the destructor.
1188                            table->m_deletedCount++;
1189                            table->m_keyCount--;
1190                            // We don't rehash the backing until the next add
1191                            // or delete, because that would cause allocation
1192                            // during GC.
1193                        }
1194                    }
1195                }
1196            }
1197        }
1198
1199        // Called repeatedly for tables that have both weak and strong pointers.
1200        static void ephemeronIteration(typename Allocator::Visitor* visitor, void* closure)
1201        {
1202            typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
1203            HashTableType* table = reinterpret_cast<HashTableType*>(closure);
1204            if (table->m_table) {
1205                // Check the hash table for elements that we now know will not
1206                // be removed by weak processing. Those elements need to have
1207                // their strong pointers traced.
1208                typedef typename HashTableType::ValueType ValueType;
1209                for (ValueType* element = table->m_table + table->m_tableSize - 1; element >= table->m_table; element--) {
1210                    if (!HashTableType::isEmptyOrDeletedBucket(*element))
1211                        TraceInCollectionTrait<WeakHandlingInCollections, WeakPointersActWeak, ValueType, Traits>::trace(visitor, *element);
1212                }
1213            }
1214        }
1215
1216        // Called when the ephemeron iteration is done and before running the per thread
1217        // weak processing. It is guaranteed to be called before any thread is resumed.
1218        static void ephemeronIterationDone(typename Allocator::Visitor* visitor, void* closure)
1219        {
1220            typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> HashTableType;
1221            HashTableType* table = reinterpret_cast<HashTableType*>(closure);
1222            ASSERT(Allocator::weakTableRegistered(visitor, table));
1223            table->clearEnqueued();
1224        }
1225    };
1226
1227    template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits, typename Allocator>
1228    void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::trace(typename Allocator::Visitor* visitor)
1229    {
1230        // If someone else already marked the backing and queued up the trace
1231        // and/or weak callback then we are done. This optimization does not
1232        // happen for ListHashSet since its iterator does not point at the
1233        // backing.
1234        if (!m_table || visitor->isAlive(m_table))
1235            return;
1236        // Normally, we mark the backing store without performing trace. This
1237        // means it is marked live, but the pointers inside it are not marked.
1238        // Instead we will mark the pointers below. However, for backing
1239        // stores that contain weak pointers the handling is rather different.
1240        // We don't mark the backing store here, so the marking GC will leave
1241        // the backing unmarked. If the backing is found in any other way than
1242        // through its HashTable (ie from an iterator) then the mark bit will
1243        // be set and the pointers will be marked strongly, avoiding problems
1244        // with iterating over things that disappear due to weak processing
1245        // while we are iterating over them. We register the backing store
1246        // pointer for delayed marking which will take place after we know if
1247        // the backing is reachable from elsewhere. We also register a
1248        // weakProcessing callback which will perform weak processing if needed.
1249        if (Traits::weakHandlingFlag == NoWeakHandlingInCollections) {
1250            Allocator::markNoTracing(visitor, m_table);
1251        } else {
1252            Allocator::registerDelayedMarkNoTracing(visitor, m_table);
1253            Allocator::registerWeakMembers(visitor, this, m_table, WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::process);
1254        }
1255        if (ShouldBeTraced<Traits>::value) {
1256            if (Traits::weakHandlingFlag == WeakHandlingInCollections) {
1257                // If we have both strong and weak pointers in the collection
1258                // then we queue up the collection for fixed point iteration a
1259                // la Ephemerons:
1260                // http://dl.acm.org/citation.cfm?doid=263698.263733 - see also
1261                // http://www.jucs.org/jucs_14_21/eliminating_cycles_in_weak
1262                ASSERT(!enqueued() || Allocator::weakTableRegistered(visitor, this));
1263                if (!enqueued()) {
1264                    Allocator::registerWeakTable(visitor, this,
1265                        WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::ephemeronIteration,
1266                        WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::ephemeronIterationDone);
1267                    setEnqueued();
1268                }
1269                // We don't need to trace the elements here, since registering
1270                // as a weak table above will cause them to be traced (perhaps
1271                // several times). It's better to wait until everything else is
1272                // traced before tracing the elements for the first time; this
1273                // may reduce (by one) the number of iterations needed to get
1274                // to a fixed point.
1275                return;
1276            }
1277            for (ValueType* element = m_table + m_tableSize - 1; element >= m_table; element--) {
1278                if (!isEmptyOrDeletedBucket(*element))
1279                    Allocator::template trace<ValueType, Traits>(visitor, *element);
1280            }
1281        }
1282    }
1283
1284    // iterator adapters
1285
1286    template<typename HashTableType, typename Traits> struct HashTableConstIteratorAdapter {
1287        HashTableConstIteratorAdapter() {}
1288        HashTableConstIteratorAdapter(const typename HashTableType::const_iterator& impl) : m_impl(impl) {}
1289        typedef typename Traits::IteratorConstGetType GetType;
1290        typedef typename HashTableType::ValueTraits::IteratorConstGetType SourceGetType;
1291
1292        GetType get() const { return const_cast<GetType>(SourceGetType(m_impl.get())); }
1293        typename Traits::IteratorConstReferenceType operator*() const { return Traits::getToReferenceConstConversion(get()); }
1294        GetType operator->() const { return get(); }
1295
1296        HashTableConstIteratorAdapter& operator++() { ++m_impl; return *this; }
1297        // postfix ++ intentionally omitted
1298
1299        typename HashTableType::const_iterator m_impl;
1300    };
1301
1302    template<typename HashTableType, typename Traits> struct HashTableIteratorAdapter {
1303        typedef typename Traits::IteratorGetType GetType;
1304        typedef typename HashTableType::ValueTraits::IteratorGetType SourceGetType;
1305
1306        HashTableIteratorAdapter() {}
1307        HashTableIteratorAdapter(const typename HashTableType::iterator& impl) : m_impl(impl) {}
1308
1309        GetType get() const { return const_cast<GetType>(SourceGetType(m_impl.get())); }
1310        typename Traits::IteratorReferenceType operator*() const { return Traits::getToReferenceConversion(get()); }
1311        GetType operator->() const { return get(); }
1312
1313        HashTableIteratorAdapter& operator++() { ++m_impl; return *this; }
1314        // postfix ++ intentionally omitted
1315
1316        operator HashTableConstIteratorAdapter<HashTableType, Traits>()
1317        {
1318            typename HashTableType::const_iterator i = m_impl;
1319            return i;
1320        }
1321
1322        typename HashTableType::iterator m_impl;
1323    };
1324
1325    template<typename T, typename U>
1326    inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b)
1327    {
1328        return a.m_impl == b.m_impl;
1329    }
1330
1331    template<typename T, typename U>
1332    inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b)
1333    {
1334        return a.m_impl != b.m_impl;
1335    }
1336
1337    template<typename T, typename U>
1338    inline bool operator==(const HashTableIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b)
1339    {
1340        return a.m_impl == b.m_impl;
1341    }
1342
1343    template<typename T, typename U>
1344    inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b)
1345    {
1346        return a.m_impl != b.m_impl;
1347    }
1348
1349    // All 4 combinations of ==, != and Const,non const.
1350    template<typename T, typename U>
1351    inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b)
1352    {
1353        return a.m_impl == b.m_impl;
1354    }
1355
1356    template<typename T, typename U>
1357    inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b)
1358    {
1359        return a.m_impl != b.m_impl;
1360    }
1361
1362    template<typename T, typename U>
1363    inline bool operator==(const HashTableIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b)
1364    {
1365        return a.m_impl == b.m_impl;
1366    }
1367
1368    template<typename T, typename U>
1369    inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b)
1370    {
1371        return a.m_impl != b.m_impl;
1372    }
1373
1374    template<typename Collection1, typename Collection2>
1375    inline void removeAll(Collection1& collection, const Collection2& toBeRemoved)
1376    {
1377        if (collection.isEmpty() || toBeRemoved.isEmpty())
1378            return;
1379        typedef typename Collection2::const_iterator CollectionIterator;
1380        CollectionIterator end(toBeRemoved.end());
1381        for (CollectionIterator it(toBeRemoved.begin()); it != end; ++it)
1382            collection.remove(*it);
1383    }
1384
1385} // namespace WTF
1386
1387#include "wtf/HashIterators.h"
1388
1389#endif // WTF_HashTable_h
1390