spaces-inl.h revision c7cc028aaeedbbfa11c11d0b7b243b3d9e837ed9
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_INL_H_
29#define V8_SPACES_INL_H_
30
31#include "isolate.h"
32#include "spaces.h"
33#include "v8memory.h"
34
35namespace v8 {
36namespace internal {
37
38
39// -----------------------------------------------------------------------------
40// Bitmap
41
42void Bitmap::Clear(MemoryChunk* chunk) {
43  Bitmap* bitmap = chunk->markbits();
44  for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
45  chunk->ResetLiveBytes();
46}
47
48
49// -----------------------------------------------------------------------------
50// PageIterator
51
52
53PageIterator::PageIterator(PagedSpace* space)
54    : space_(space),
55      prev_page_(&space->anchor_),
56      next_page_(prev_page_->next_page()) { }
57
58
59bool PageIterator::has_next() {
60  return next_page_ != &space_->anchor_;
61}
62
63
64Page* PageIterator::next() {
65  ASSERT(has_next());
66  prev_page_ = next_page_;
67  next_page_ = next_page_->next_page();
68  return prev_page_;
69}
70
71
72// -----------------------------------------------------------------------------
73// NewSpacePageIterator
74
75
76NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
77    : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
78      next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
79      last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
80
81NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
82    : prev_page_(space->anchor()),
83      next_page_(prev_page_->next_page()),
84      last_page_(prev_page_->prev_page()) { }
85
86NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
87    : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
88      next_page_(NewSpacePage::FromAddress(start)),
89      last_page_(NewSpacePage::FromLimit(limit)) {
90  SemiSpace::AssertValidRange(start, limit);
91}
92
93
94bool NewSpacePageIterator::has_next() {
95  return prev_page_ != last_page_;
96}
97
98
99NewSpacePage* NewSpacePageIterator::next() {
100  ASSERT(has_next());
101  prev_page_ = next_page_;
102  next_page_ = next_page_->next_page();
103  return prev_page_;
104}
105
106
107// -----------------------------------------------------------------------------
108// HeapObjectIterator
109HeapObject* HeapObjectIterator::FromCurrentPage() {
110  while (cur_addr_ != cur_end_) {
111    if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
112      cur_addr_ = space_->limit();
113      continue;
114    }
115    HeapObject* obj = HeapObject::FromAddress(cur_addr_);
116    int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
117    cur_addr_ += obj_size;
118    ASSERT(cur_addr_ <= cur_end_);
119    if (!obj->IsFiller()) {
120      ASSERT_OBJECT_SIZE(obj_size);
121      return obj;
122    }
123  }
124  return NULL;
125}
126
127
128// -----------------------------------------------------------------------------
129// MemoryAllocator
130
131#ifdef ENABLE_HEAP_PROTECTION
132
133void MemoryAllocator::Protect(Address start, size_t size) {
134  OS::Protect(start, size);
135}
136
137
138void MemoryAllocator::Unprotect(Address start,
139                                size_t size,
140                                Executability executable) {
141  OS::Unprotect(start, size, executable);
142}
143
144
145void MemoryAllocator::ProtectChunkFromPage(Page* page) {
146  int id = GetChunkId(page);
147  OS::Protect(chunks_[id].address(), chunks_[id].size());
148}
149
150
151void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
152  int id = GetChunkId(page);
153  OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
154                chunks_[id].owner()->executable() == EXECUTABLE);
155}
156
157#endif
158
159
160// --------------------------------------------------------------------------
161// PagedSpace
162Page* Page::Initialize(Heap* heap,
163                       MemoryChunk* chunk,
164                       Executability executable,
165                       PagedSpace* owner) {
166  Page* page = reinterpret_cast<Page*>(chunk);
167  ASSERT(chunk->size() == static_cast<size_t>(kPageSize));
168  ASSERT(chunk->owner() == owner);
169  owner->IncreaseCapacity(page->area_size());
170  owner->Free(page->area_start(), page->area_size());
171
172  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
173
174  return page;
175}
176
177
178bool PagedSpace::Contains(Address addr) {
179  Page* p = Page::FromAddress(addr);
180  if (!p->is_valid()) return false;
181  return p->owner() == this;
182}
183
184
185void MemoryChunk::set_scan_on_scavenge(bool scan) {
186  if (scan) {
187    if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
188    SetFlag(SCAN_ON_SCAVENGE);
189  } else {
190    if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
191    ClearFlag(SCAN_ON_SCAVENGE);
192  }
193  heap_->incremental_marking()->SetOldSpacePageFlags(this);
194}
195
196
197MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
198  MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
199      OffsetFrom(addr) & ~Page::kPageAlignmentMask);
200  if (maybe->owner() != NULL) return maybe;
201  LargeObjectIterator iterator(HEAP->lo_space());
202  for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
203    // Fixed arrays are the only pointer-containing objects in large object
204    // space.
205    if (o->IsFixedArray()) {
206      MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
207      if (chunk->Contains(addr)) {
208        return chunk;
209      }
210    }
211  }
212  UNREACHABLE();
213  return NULL;
214}
215
216
217PointerChunkIterator::PointerChunkIterator(Heap* heap)
218    : state_(kOldPointerState),
219      old_pointer_iterator_(heap->old_pointer_space()),
220      map_iterator_(heap->map_space()),
221      lo_iterator_(heap->lo_space()) { }
222
223
224Page* Page::next_page() {
225  ASSERT(next_chunk()->owner() == owner());
226  return static_cast<Page*>(next_chunk());
227}
228
229
230Page* Page::prev_page() {
231  ASSERT(prev_chunk()->owner() == owner());
232  return static_cast<Page*>(prev_chunk());
233}
234
235
236void Page::set_next_page(Page* page) {
237  ASSERT(page->owner() == owner());
238  set_next_chunk(page);
239}
240
241
242void Page::set_prev_page(Page* page) {
243  ASSERT(page->owner() == owner());
244  set_prev_chunk(page);
245}
246
247
248// Try linear allocation in the page of alloc_info's allocation top.  Does
249// not contain slow case logic (e.g. move to the next page or try free list
250// allocation) so it can be used by all the allocation functions and for all
251// the paged spaces.
252HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
253  Address current_top = allocation_info_.top;
254  Address new_top = current_top + size_in_bytes;
255  if (new_top > allocation_info_.limit) return NULL;
256
257  allocation_info_.top = new_top;
258  return HeapObject::FromAddress(current_top);
259}
260
261
262// Raw allocation.
263MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
264  HeapObject* object = AllocateLinearly(size_in_bytes);
265  if (object != NULL) {
266    if (identity() == CODE_SPACE) {
267      SkipList::Update(object->address(), size_in_bytes);
268    }
269    return object;
270  }
271
272  object = free_list_.Allocate(size_in_bytes);
273  if (object != NULL) {
274    if (identity() == CODE_SPACE) {
275      SkipList::Update(object->address(), size_in_bytes);
276    }
277    return object;
278  }
279
280  object = SlowAllocateRaw(size_in_bytes);
281  if (object != NULL) {
282    if (identity() == CODE_SPACE) {
283      SkipList::Update(object->address(), size_in_bytes);
284    }
285    return object;
286  }
287
288  return Failure::RetryAfterGC(identity());
289}
290
291
292// -----------------------------------------------------------------------------
293// NewSpace
294
295
296MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
297  Address old_top = allocation_info_.top;
298  if (allocation_info_.limit - old_top < size_in_bytes) {
299    return SlowAllocateRaw(size_in_bytes);
300  }
301
302  Object* obj = HeapObject::FromAddress(allocation_info_.top);
303  allocation_info_.top += size_in_bytes;
304  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
305
306  return obj;
307}
308
309
310LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
311  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
312  return static_cast<LargePage*>(chunk);
313}
314
315
316intptr_t LargeObjectSpace::Available() {
317  return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
318}
319
320
321template <typename StringType>
322void NewSpace::ShrinkStringAtAllocationBoundary(String* string, int length) {
323  ASSERT(length <= string->length());
324  ASSERT(string->IsSeqString());
325  ASSERT(string->address() + StringType::SizeFor(string->length()) ==
326         allocation_info_.top);
327  Address old_top = allocation_info_.top;
328  allocation_info_.top =
329      string->address() + StringType::SizeFor(length);
330  string->set_length(length);
331  if (Marking::IsBlack(Marking::MarkBitFrom(string))) {
332    int delta = static_cast<int>(old_top - allocation_info_.top);
333    MemoryChunk::IncrementLiveBytesFromMutator(string->address(), -delta);
334  }
335}
336
337
338bool FreeListNode::IsFreeListNode(HeapObject* object) {
339  Map* map = object->map();
340  Heap* heap = object->GetHeap();
341  return map == heap->raw_unchecked_free_space_map()
342      || map == heap->raw_unchecked_one_pointer_filler_map()
343      || map == heap->raw_unchecked_two_pointer_filler_map();
344}
345
346} }  // namespace v8::internal
347
348#endif  // V8_SPACES_INL_H_
349