1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_STUB_CACHE_H_
6#define V8_STUB_CACHE_H_
7
8#include "src/macro-assembler.h"
9
10namespace v8 {
11namespace internal {
12
13
14// The stub cache is used for megamorphic property accesses.
15// It maps (map, name, type) to property access handlers. The cache does not
16// need explicit invalidation when a prototype chain is modified, since the
17// handlers verify the chain.
18
19
20class SCTableReference {
21 public:
22  Address address() const { return address_; }
23
24 private:
25  explicit SCTableReference(Address address) : address_(address) {}
26
27  Address address_;
28
29  friend class StubCache;
30};
31
32
33class StubCache {
34 public:
35  struct Entry {
36    Name* key;
37    Code* value;
38    Map* map;
39  };
40
41  void Initialize();
42  // Access cache for entry hash(name, map).
43  Code* Set(Name* name, Map* map, Code* code);
44  Code* Get(Name* name, Map* map, Code::Flags flags);
45  // Clear the lookup table (@ mark compact collection).
46  void Clear();
47  // Collect all maps that match the name and flags.
48  void CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
49                           Code::Flags flags, Handle<Context> native_context,
50                           Zone* zone);
51  // Generate code for probing the stub cache table.
52  // Arguments extra, extra2 and extra3 may be used to pass additional scratch
53  // registers. Set to no_reg if not needed.
54  // If leave_frame is true, then exit a frame before the tail call.
55  void GenerateProbe(MacroAssembler* masm, Code::Flags flags, bool leave_frame,
56                     Register receiver, Register name, Register scratch,
57                     Register extra, Register extra2 = no_reg,
58                     Register extra3 = no_reg);
59
60  enum Table { kPrimary, kSecondary };
61
62  SCTableReference key_reference(StubCache::Table table) {
63    return SCTableReference(
64        reinterpret_cast<Address>(&first_entry(table)->key));
65  }
66
67  SCTableReference map_reference(StubCache::Table table) {
68    return SCTableReference(
69        reinterpret_cast<Address>(&first_entry(table)->map));
70  }
71
72  SCTableReference value_reference(StubCache::Table table) {
73    return SCTableReference(
74        reinterpret_cast<Address>(&first_entry(table)->value));
75  }
76
77  StubCache::Entry* first_entry(StubCache::Table table) {
78    switch (table) {
79      case StubCache::kPrimary:
80        return StubCache::primary_;
81      case StubCache::kSecondary:
82        return StubCache::secondary_;
83    }
84    UNREACHABLE();
85    return NULL;
86  }
87
88  Isolate* isolate() { return isolate_; }
89
90  // Setting the entry size such that the index is shifted by Name::kHashShift
91  // is convenient; shifting down the length field (to extract the hash code)
92  // automatically discards the hash bit field.
93  static const int kCacheIndexShift = Name::kHashShift;
94
95 private:
96  explicit StubCache(Isolate* isolate);
97
98  // The stub cache has a primary and secondary level.  The two levels have
99  // different hashing algorithms in order to avoid simultaneous collisions
100  // in both caches.  Unlike a probing strategy (quadratic or otherwise) the
101  // update strategy on updates is fairly clear and simple:  Any existing entry
102  // in the primary cache is moved to the secondary cache, and secondary cache
103  // entries are overwritten.
104
105  // Hash algorithm for the primary table.  This algorithm is replicated in
106  // assembler for every architecture.  Returns an index into the table that
107  // is scaled by 1 << kCacheIndexShift.
108  static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) {
109    STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
110    // Compute the hash of the name (use entire hash field).
111    DCHECK(name->HasHashCode());
112    uint32_t field = name->hash_field();
113    // Using only the low bits in 64-bit mode is unlikely to increase the
114    // risk of collision even if the heap is spread over an area larger than
115    // 4Gb (and not at all if it isn't).
116    uint32_t map_low32bits =
117        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
118    // We always set the in_loop bit to zero when generating the lookup code
119    // so do it here too so the hash codes match.
120    uint32_t iflags =
121        (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
122    // Base the offset on a simple combination of name, flags, and map.
123    uint32_t key = (map_low32bits + field) ^ iflags;
124    return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
125  }
126
127  // Hash algorithm for the secondary table.  This algorithm is replicated in
128  // assembler for every architecture.  Returns an index into the table that
129  // is scaled by 1 << kCacheIndexShift.
130  static int SecondaryOffset(Name* name, Code::Flags flags, int seed) {
131    // Use the seed from the primary cache in the secondary cache.
132    uint32_t name_low32bits =
133        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
134    // We always set the in_loop bit to zero when generating the lookup code
135    // so do it here too so the hash codes match.
136    uint32_t iflags =
137        (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
138    uint32_t key = (seed - name_low32bits) + iflags;
139    return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
140  }
141
142  // Compute the entry for a given offset in exactly the same way as
143  // we do in generated code.  We generate an hash code that already
144  // ends in Name::kHashShift 0s.  Then we multiply it so it is a multiple
145  // of sizeof(Entry).  This makes it easier to avoid making mistakes
146  // in the hashed offset computations.
147  static Entry* entry(Entry* table, int offset) {
148    const int multiplier = sizeof(*table) >> Name::kHashShift;
149    return reinterpret_cast<Entry*>(reinterpret_cast<Address>(table) +
150                                    offset * multiplier);
151  }
152
153  static const int kPrimaryTableBits = 11;
154  static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
155  static const int kSecondaryTableBits = 9;
156  static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
157
158 private:
159  Entry primary_[kPrimaryTableSize];
160  Entry secondary_[kSecondaryTableSize];
161  Isolate* isolate_;
162
163  friend class Isolate;
164  friend class SCTableReference;
165
166  DISALLOW_COPY_AND_ASSIGN(StubCache);
167};
168}
169}  // namespace v8::internal
170
171#endif  // V8_STUB_CACHE_H_
172