1// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_STORE_BUFFER_H_
29#define V8_STORE_BUFFER_H_
30
31#include "allocation.h"
32#include "checks.h"
33#include "globals.h"
34#include "platform.h"
35#include "v8globals.h"
36
37namespace v8 {
38namespace internal {
39
40class StoreBuffer;
41
42typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
43
44typedef void (StoreBuffer::*RegionCallback)(
45    Address start, Address end, ObjectSlotCallback slot_callback);
46
47// Used to implement the write barrier by collecting addresses of pointers
48// between spaces.
49class StoreBuffer {
50 public:
51  explicit StoreBuffer(Heap* heap);
52
53  static void StoreBufferOverflow(Isolate* isolate);
54
55  inline Address TopAddress();
56
57  void SetUp();
58  void TearDown();
59
60  // This is used by the mutator to enter addresses into the store buffer.
61  inline void Mark(Address addr);
62
63  // This is used by the heap traversal to enter the addresses into the store
64  // buffer that should still be in the store buffer after GC.  It enters
65  // addresses directly into the old buffer because the GC starts by wiping the
66  // old buffer and thereafter only visits each cell once so there is no need
67  // to attempt to remove any dupes.  During the first part of a GC we
68  // are using the store buffer to access the old spaces and at the same time
69  // we are rebuilding the store buffer using this function.  There is, however
70  // no issue of overwriting the buffer we are iterating over, because this
71  // stage of the scavenge can only reduce the number of addresses in the store
72  // buffer (some objects are promoted so pointers to them do not need to be in
73  // the store buffer).  The later parts of the GC scan the pages that are
74  // exempt from the store buffer and process the promotion queue.  These steps
75  // can overflow this buffer.  We check for this and on overflow we call the
76  // callback set up with the StoreBufferRebuildScope object.
77  inline void EnterDirectlyIntoStoreBuffer(Address addr);
78
79  // Iterates over all pointers that go from old space to new space.  It will
80  // delete the store buffer as it starts so the callback should reenter
81  // surviving old-to-new pointers into the store buffer to rebuild it.
82  void IteratePointersToNewSpace(ObjectSlotCallback callback);
83
84  static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
85  static const int kStoreBufferSize = kStoreBufferOverflowBit;
86  static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
87  static const int kOldStoreBufferLength = kStoreBufferLength * 16;
88  static const int kHashSetLengthLog2 = 12;
89  static const int kHashSetLength = 1 << kHashSetLengthLog2;
90
91  void Compact();
92
93  void GCPrologue();
94  void GCEpilogue();
95
96  Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
97  Object*** Start() { return reinterpret_cast<Object***>(old_start_); }
98  Object*** Top() { return reinterpret_cast<Object***>(old_top_); }
99  void SetTop(Object*** top) {
100    ASSERT(top >= Start());
101    ASSERT(top <= Limit());
102    old_top_ = reinterpret_cast<Address*>(top);
103  }
104
105  bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
106  bool old_buffer_is_filtered() { return old_buffer_is_filtered_; }
107
108  // Goes through the store buffer removing pointers to things that have
109  // been promoted.  Rebuilds the store buffer completely if it overflowed.
110  void SortUniq();
111
112  void EnsureSpace(intptr_t space_needed);
113  void Verify();
114
115  bool PrepareForIteration();
116
117#ifdef DEBUG
118  void Clean();
119  // Slow, for asserts only.
120  bool CellIsInStoreBuffer(Address cell);
121#endif
122
123  void Filter(int flag);
124
125 private:
126  Heap* heap_;
127
128  // The store buffer is divided up into a new buffer that is constantly being
129  // filled by mutator activity and an old buffer that is filled with the data
130  // from the new buffer after compression.
131  Address* start_;
132  Address* limit_;
133
134  Address* old_start_;
135  Address* old_limit_;
136  Address* old_top_;
137  Address* old_reserved_limit_;
138  VirtualMemory* old_virtual_memory_;
139
140  bool old_buffer_is_sorted_;
141  bool old_buffer_is_filtered_;
142  bool during_gc_;
143  // The garbage collector iterates over many pointers to new space that are not
144  // handled by the store buffer.  This flag indicates whether the pointers
145  // found by the callbacks should be added to the store buffer or not.
146  bool store_buffer_rebuilding_enabled_;
147  StoreBufferCallback callback_;
148  bool may_move_store_buffer_entries_;
149
150  VirtualMemory* virtual_memory_;
151
152  // Two hash sets used for filtering.
153  // If address is in the hash set then it is guaranteed to be in the
154  // old part of the store buffer.
155  uintptr_t* hash_set_1_;
156  uintptr_t* hash_set_2_;
157  bool hash_sets_are_empty_;
158
159  void ClearFilteringHashSets();
160
161  void CheckForFullBuffer();
162  void Uniq();
163  void ExemptPopularPages(int prime_sample_step, int threshold);
164
165  void FindPointersToNewSpaceInRegion(Address start,
166                                      Address end,
167                                      ObjectSlotCallback slot_callback);
168
169  // For each region of pointers on a page in use from an old space call
170  // visit_pointer_region callback.
171  // If either visit_pointer_region or callback can cause an allocation
172  // in old space and changes in allocation watermark then
173  // can_preallocate_during_iteration should be set to true.
174  void IteratePointersOnPage(
175      PagedSpace* space,
176      Page* page,
177      RegionCallback region_callback,
178      ObjectSlotCallback slot_callback);
179
180  void FindPointersToNewSpaceInMaps(
181    Address start,
182    Address end,
183    ObjectSlotCallback slot_callback);
184
185  void FindPointersToNewSpaceInMapsRegion(
186    Address start,
187    Address end,
188    ObjectSlotCallback slot_callback);
189
190  void FindPointersToNewSpaceOnPage(
191    PagedSpace* space,
192    Page* page,
193    RegionCallback region_callback,
194    ObjectSlotCallback slot_callback);
195
196  void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
197
198#ifdef DEBUG
199  void VerifyPointers(PagedSpace* space, RegionCallback region_callback);
200  void VerifyPointers(LargeObjectSpace* space);
201#endif
202
203  friend class StoreBufferRebuildScope;
204  friend class DontMoveStoreBufferEntriesScope;
205};
206
207
208class StoreBufferRebuildScope {
209 public:
210  explicit StoreBufferRebuildScope(Heap* heap,
211                                   StoreBuffer* store_buffer,
212                                   StoreBufferCallback callback)
213      : heap_(heap),
214        store_buffer_(store_buffer),
215        stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
216        stored_callback_(store_buffer->callback_) {
217    store_buffer_->store_buffer_rebuilding_enabled_ = true;
218    store_buffer_->callback_ = callback;
219    (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent);
220  }
221
222  ~StoreBufferRebuildScope() {
223    store_buffer_->callback_ = stored_callback_;
224    store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
225    store_buffer_->CheckForFullBuffer();
226  }
227
228 private:
229  Heap* heap_;
230  StoreBuffer* store_buffer_;
231  bool stored_state_;
232  StoreBufferCallback stored_callback_;
233};
234
235
236class DontMoveStoreBufferEntriesScope {
237 public:
238  explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer)
239      : store_buffer_(store_buffer),
240        stored_state_(store_buffer->may_move_store_buffer_entries_) {
241    store_buffer_->may_move_store_buffer_entries_ = false;
242  }
243
244  ~DontMoveStoreBufferEntriesScope() {
245    store_buffer_->may_move_store_buffer_entries_ = stored_state_;
246  }
247
248 private:
249  StoreBuffer* store_buffer_;
250  bool stored_state_;
251};
252
253} }  // namespace v8::internal
254
255#endif  // V8_STORE_BUFFER_H_
256