spaces.h revision 888f6729be6a6f6fbe246cb5a9f122e2dbe455b7
1// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_H_
29#define V8_SPACES_H_
30
31#include "list-inl.h"
32#include "log.h"
33
34namespace v8 {
35namespace internal {
36
37// -----------------------------------------------------------------------------
38// Heap structures:
39//
40// A JS heap consists of a young generation, an old generation, and a large
41// object space. The young generation is divided into two semispaces. A
42// scavenger implements Cheney's copying algorithm. The old generation is
43// separated into a map space and an old object space. The map space contains
44// all (and only) map objects, the rest of old objects go into the old space.
45// The old generation is collected by a mark-sweep-compact collector.
46//
47// The semispaces of the young generation are contiguous.  The old and map
48// spaces consists of a list of pages. A page has a page header, a remembered
49// set area, and an object area. A page size is deliberately chosen as 8K
50// bytes. The first word of a page is an opaque page header that has the
51// address of the next page and its ownership information. The second word may
52// have the allocation top address of this page. The next 248 bytes are
53// remembered sets. Heap objects are aligned to the pointer size (4 bytes). A
54// remembered set bit corresponds to a pointer in the object area.
55//
56// There is a separate large object space for objects larger than
57// Page::kMaxHeapObjectSize, so that they do not have to move during
58// collection.  The large object space is paged and uses the same remembered
59// set implementation.  Pages in large object space may be larger than 8K.
60//
61// NOTE: The mark-compact collector rebuilds the remembered set after a
62// collection. It reuses first a few words of the remembered set for
63// bookkeeping relocation information.
64
65
66// Some assertion macros used in the debugging mode.
67
68#define ASSERT_PAGE_ALIGNED(address)                                           \
69  ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
70
71#define ASSERT_OBJECT_ALIGNED(address)                                         \
72  ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
73
74#define ASSERT_MAP_ALIGNED(address)                                            \
75  ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
76
77#define ASSERT_OBJECT_SIZE(size)                                               \
78  ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
79
80#define ASSERT_PAGE_OFFSET(offset)                                             \
81  ASSERT((Page::kObjectStartOffset <= offset)                                  \
82      && (offset <= Page::kPageSize))
83
84#define ASSERT_MAP_PAGE_INDEX(index)                                           \
85  ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
86
87
88class PagedSpace;
89class MemoryAllocator;
90class AllocationInfo;
91
92// -----------------------------------------------------------------------------
93// A page normally has 8K bytes. Large object pages may be larger.  A page
94// address is always aligned to the 8K page size.  A page is divided into
95// three areas: the first two words are used for bookkeeping, the next 248
96// bytes are used as remembered set, and the rest of the page is the object
97// area.
98//
99// Pointers are aligned to the pointer size (4), only 1 bit is needed
100// for a pointer in the remembered set. Given an address, its remembered set
101// bit position (offset from the start of the page) is calculated by dividing
102// its page offset by 32. Therefore, the object area in a page starts at the
103// 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that
104// the first two words (64 bits) in a page can be used for other purposes.
105//
106// On the 64-bit platform, we add an offset to the start of the remembered set,
107// and pointers are aligned to 8-byte pointer size. This means that we need
108// only 128 bytes for the RSet, and only get two bytes free in the RSet's RSet.
109// For this reason we add an offset to get room for the Page data at the start.
110//
111// The mark-compact collector transforms a map pointer into a page index and a
112// page offset. The excact encoding is described in the comments for
113// class MapWord in objects.h.
114//
115// The only way to get a page pointer is by calling factory methods:
116//   Page* p = Page::FromAddress(addr); or
117//   Page* p = Page::FromAllocationTop(top);
118class Page {
119 public:
120  // Returns the page containing a given address. The address ranges
121  // from [page_addr .. page_addr + kPageSize[
122  //
123  // Note that this function only works for addresses in normal paged
124  // spaces and addresses in the first 8K of large object pages (i.e.,
125  // the start of large objects but not necessarily derived pointers
126  // within them).
127  INLINE(static Page* FromAddress(Address a)) {
128    return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
129  }
130
131  // Returns the page containing an allocation top. Because an allocation
132  // top address can be the upper bound of the page, we need to subtract
133  // it with kPointerSize first. The address ranges from
134  // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
135  INLINE(static Page* FromAllocationTop(Address top)) {
136    Page* p = FromAddress(top - kPointerSize);
137    ASSERT_PAGE_OFFSET(p->Offset(top));
138    return p;
139  }
140
141  // Returns the start address of this page.
142  Address address() { return reinterpret_cast<Address>(this); }
143
144  // Checks whether this is a valid page address.
145  bool is_valid() { return address() != NULL; }
146
147  // Returns the next page of this page.
148  inline Page* next_page();
149
150  // Return the end of allocation in this page. Undefined for unused pages.
151  inline Address AllocationTop();
152
153  // Returns the start address of the object area in this page.
154  Address ObjectAreaStart() { return address() + kObjectStartOffset; }
155
156  // Returns the end address (exclusive) of the object area in this page.
157  Address ObjectAreaEnd() { return address() + Page::kPageSize; }
158
159  // Returns the start address of the remembered set area.
160  Address RSetStart() { return address() + kRSetStartOffset; }
161
162  // Returns the end address of the remembered set area (exclusive).
163  Address RSetEnd() { return address() + kRSetEndOffset; }
164
165  // Checks whether an address is page aligned.
166  static bool IsAlignedToPageSize(Address a) {
167    return 0 == (OffsetFrom(a) & kPageAlignmentMask);
168  }
169
170  // True if this page is a large object page.
171  bool IsLargeObjectPage() { return (is_normal_page & 0x1) == 0; }
172
173  // Returns the offset of a given address to this page.
174  INLINE(int Offset(Address a)) {
175    int offset = static_cast<int>(a - address());
176    ASSERT_PAGE_OFFSET(offset);
177    return offset;
178  }
179
180  // Returns the address for a given offset to the this page.
181  Address OffsetToAddress(int offset) {
182    ASSERT_PAGE_OFFSET(offset);
183    return address() + offset;
184  }
185
186  // ---------------------------------------------------------------------
187  // Remembered set support
188
189  // Clears remembered set in this page.
190  inline void ClearRSet();
191
192  // Return the address of the remembered set word corresponding to an
193  // object address/offset pair, and the bit encoded as a single-bit
194  // mask in the output parameter 'bitmask'.
195  INLINE(static Address ComputeRSetBitPosition(Address address, int offset,
196                                               uint32_t* bitmask));
197
198  // Sets the corresponding remembered set bit for a given address.
199  INLINE(static void SetRSet(Address address, int offset));
200
201  // Clears the corresponding remembered set bit for a given address.
202  static inline void UnsetRSet(Address address, int offset);
203
204  // Checks whether the remembered set bit for a given address is set.
205  static inline bool IsRSetSet(Address address, int offset);
206
207#ifdef DEBUG
208  // Use a state to mark whether remembered set space can be used for other
209  // purposes.
210  enum RSetState { IN_USE,  NOT_IN_USE };
211  static bool is_rset_in_use() { return rset_state_ == IN_USE; }
212  static void set_rset_state(RSetState state) { rset_state_ = state; }
213#endif
214
215  // Page size in bytes.  This must be a multiple of the OS page size.
216  static const int kPageSize = 1 << kPageSizeBits;
217
218  // Page size mask.
219  static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
220
221  // The offset of the remembered set in a page, in addition to the empty bytes
222  // formed as the remembered bits of the remembered set itself.
223#ifdef V8_TARGET_ARCH_X64
224  static const int kRSetOffset = 4 * kPointerSize;  // Room for four pointers.
225#else
226  static const int kRSetOffset = 0;
227#endif
228  // The end offset of the remembered set in a page
229  // (heaps are aligned to pointer size).
230  static const int kRSetEndOffset = kRSetOffset + kPageSize / kBitsPerPointer;
231
232  // The start offset of the object area in a page.
233  // This needs to be at least (bits per uint32_t) * kBitsPerPointer,
234  // to align start of rset to a uint32_t address.
235  static const int kObjectStartOffset = 256;
236
237  // The start offset of the used part of the remembered set in a page.
238  static const int kRSetStartOffset = kRSetOffset +
239      kObjectStartOffset / kBitsPerPointer;
240
241  // Object area size in bytes.
242  static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
243
244  // Maximum object size that fits in a page.
245  static const int kMaxHeapObjectSize = kObjectAreaSize;
246
247  //---------------------------------------------------------------------------
248  // Page header description.
249  //
250  // If a page is not in the large object space, the first word,
251  // opaque_header, encodes the next page address (aligned to kPageSize 8K)
252  // and the chunk number (0 ~ 8K-1).  Only MemoryAllocator should use
253  // opaque_header. The value range of the opaque_header is [0..kPageSize[,
254  // or [next_page_start, next_page_end[. It cannot point to a valid address
255  // in the current page.  If a page is in the large object space, the first
256  // word *may* (if the page start and large object chunk start are the
257  // same) contain the address of the next large object chunk.
258  intptr_t opaque_header;
259
260  // If the page is not in the large object space, the low-order bit of the
261  // second word is set. If the page is in the large object space, the
262  // second word *may* (if the page start and large object chunk start are
263  // the same) contain the large object chunk size.  In either case, the
264  // low-order bit for large object pages will be cleared.
265  int is_normal_page;
266
267  // The following fields may overlap with remembered set, they can only
268  // be used in the mark-compact collector when remembered set is not
269  // used.
270
271  // The index of the page in its owner space.
272  int mc_page_index;
273
274  // The allocation pointer after relocating objects to this page.
275  Address mc_relocation_top;
276
277  // The forwarding address of the first live object in this page.
278  Address mc_first_forwarded;
279
280#ifdef DEBUG
281 private:
282  static RSetState rset_state_;  // state of the remembered set
283#endif
284};
285
286
287// ----------------------------------------------------------------------------
288// Space is the abstract superclass for all allocation spaces.
289class Space : public Malloced {
290 public:
291  Space(AllocationSpace id, Executability executable)
292      : id_(id), executable_(executable) {}
293
294  virtual ~Space() {}
295
296  // Does the space need executable memory?
297  Executability executable() { return executable_; }
298
299  // Identity used in error reporting.
300  AllocationSpace identity() { return id_; }
301
302  virtual int Size() = 0;
303
304#ifdef DEBUG
305  virtual void Print() = 0;
306#endif
307
308  // After calling this we can allocate a certain number of bytes using only
309  // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
310  // without using freelists or causing a GC.  This is used by partial
311  // snapshots.  It returns true of space was reserved or false if a GC is
312  // needed.  For paged spaces the space requested must include the space wasted
313  // at the end of each when allocating linearly.
314  virtual bool ReserveSpace(int bytes) = 0;
315
316 private:
317  AllocationSpace id_;
318  Executability executable_;
319};
320
321
322// ----------------------------------------------------------------------------
323// All heap objects containing executable code (code objects) must be allocated
324// from a 2 GB range of memory, so that they can call each other using 32-bit
325// displacements.  This happens automatically on 32-bit platforms, where 32-bit
326// displacements cover the entire 4GB virtual address space.  On 64-bit
327// platforms, we support this using the CodeRange object, which reserves and
328// manages a range of virtual memory.
329class CodeRange : public AllStatic {
330 public:
331  // Reserves a range of virtual memory, but does not commit any of it.
332  // Can only be called once, at heap initialization time.
333  // Returns false on failure.
334  static bool Setup(const size_t requested_size);
335
336  // Frees the range of virtual memory, and frees the data structures used to
337  // manage it.
338  static void TearDown();
339
340  static bool exists() { return code_range_ != NULL; }
341  static bool contains(Address address) {
342    if (code_range_ == NULL) return false;
343    Address start = static_cast<Address>(code_range_->address());
344    return start <= address && address < start + code_range_->size();
345  }
346
347  // Allocates a chunk of memory from the large-object portion of
348  // the code range.  On platforms with no separate code range, should
349  // not be called.
350  static void* AllocateRawMemory(const size_t requested, size_t* allocated);
351  static void FreeRawMemory(void* buf, size_t length);
352
353 private:
354  // The reserved range of virtual memory that all code objects are put in.
355  static VirtualMemory* code_range_;
356  // Plain old data class, just a struct plus a constructor.
357  class FreeBlock {
358   public:
359    FreeBlock(Address start_arg, size_t size_arg)
360        : start(start_arg), size(size_arg) {}
361    FreeBlock(void* start_arg, size_t size_arg)
362        : start(static_cast<Address>(start_arg)), size(size_arg) {}
363
364    Address start;
365    size_t size;
366  };
367
368  // Freed blocks of memory are added to the free list.  When the allocation
369  // list is exhausted, the free list is sorted and merged to make the new
370  // allocation list.
371  static List<FreeBlock> free_list_;
372  // Memory is allocated from the free blocks on the allocation list.
373  // The block at current_allocation_block_index_ is the current block.
374  static List<FreeBlock> allocation_list_;
375  static int current_allocation_block_index_;
376
377  // Finds a block on the allocation list that contains at least the
378  // requested amount of memory.  If none is found, sorts and merges
379  // the existing free memory blocks, and searches again.
380  // If none can be found, terminates V8 with FatalProcessOutOfMemory.
381  static void GetNextAllocationBlock(size_t requested);
382  // Compares the start addresses of two free blocks.
383  static int CompareFreeBlockAddress(const FreeBlock* left,
384                                     const FreeBlock* right);
385};
386
387
388// ----------------------------------------------------------------------------
389// A space acquires chunks of memory from the operating system. The memory
390// allocator manages chunks for the paged heap spaces (old space and map
391// space).  A paged chunk consists of pages. Pages in a chunk have contiguous
392// addresses and are linked as a list.
393//
394// The allocator keeps an initial chunk which is used for the new space.  The
395// leftover regions of the initial chunk are used for the initial chunks of
396// old space and map space if they are big enough to hold at least one page.
397// The allocator assumes that there is one old space and one map space, each
398// expands the space by allocating kPagesPerChunk pages except the last
399// expansion (before running out of space).  The first chunk may contain fewer
400// than kPagesPerChunk pages as well.
401//
402// The memory allocator also allocates chunks for the large object space, but
403// they are managed by the space itself.  The new space does not expand.
404
405class MemoryAllocator : public AllStatic {
406 public:
407  // Initializes its internal bookkeeping structures.
408  // Max capacity of the total space.
409  static bool Setup(int max_capacity);
410
411  // Deletes valid chunks.
412  static void TearDown();
413
414  // Reserves an initial address range of virtual memory to be split between
415  // the two new space semispaces, the old space, and the map space.  The
416  // memory is not yet committed or assigned to spaces and split into pages.
417  // The initial chunk is unmapped when the memory allocator is torn down.
418  // This function should only be called when there is not already a reserved
419  // initial chunk (initial_chunk_ should be NULL).  It returns the start
420  // address of the initial chunk if successful, with the side effect of
421  // setting the initial chunk, or else NULL if unsuccessful and leaves the
422  // initial chunk NULL.
423  static void* ReserveInitialChunk(const size_t requested);
424
425  // Commits pages from an as-yet-unmanaged block of virtual memory into a
426  // paged space.  The block should be part of the initial chunk reserved via
427  // a call to ReserveInitialChunk.  The number of pages is always returned in
428  // the output parameter num_pages.  This function assumes that the start
429  // address is non-null and that it is big enough to hold at least one
430  // page-aligned page.  The call always succeeds, and num_pages is always
431  // greater than zero.
432  static Page* CommitPages(Address start, size_t size, PagedSpace* owner,
433                           int* num_pages);
434
435  // Commit a contiguous block of memory from the initial chunk.  Assumes that
436  // the address is not NULL, the size is greater than zero, and that the
437  // block is contained in the initial chunk.  Returns true if it succeeded
438  // and false otherwise.
439  static bool CommitBlock(Address start, size_t size, Executability executable);
440
441
442  // Uncommit a contiguous block of memory [start..(start+size)[.
443  // start is not NULL, the size is greater than zero, and the
444  // block is contained in the initial chunk.  Returns true if it succeeded
445  // and false otherwise.
446  static bool UncommitBlock(Address start, size_t size);
447
448  // Attempts to allocate the requested (non-zero) number of pages from the
449  // OS.  Fewer pages might be allocated than requested. If it fails to
450  // allocate memory for the OS or cannot allocate a single page, this
451  // function returns an invalid page pointer (NULL). The caller must check
452  // whether the returned page is valid (by calling Page::is_valid()).  It is
453  // guaranteed that allocated pages have contiguous addresses.  The actual
454  // number of allocated pages is returned in the output parameter
455  // allocated_pages.  If the PagedSpace owner is executable and there is
456  // a code range, the pages are allocated from the code range.
457  static Page* AllocatePages(int requested_pages, int* allocated_pages,
458                             PagedSpace* owner);
459
460  // Frees pages from a given page and after. If 'p' is the first page
461  // of a chunk, pages from 'p' are freed and this function returns an
462  // invalid page pointer. Otherwise, the function searches a page
463  // after 'p' that is the first page of a chunk. Pages after the
464  // found page are freed and the function returns 'p'.
465  static Page* FreePages(Page* p);
466
467  // Allocates and frees raw memory of certain size.
468  // These are just thin wrappers around OS::Allocate and OS::Free,
469  // but keep track of allocated bytes as part of heap.
470  // If the flag is EXECUTABLE and a code range exists, the requested
471  // memory is allocated from the code range.  If a code range exists
472  // and the freed memory is in it, the code range manages the freed memory.
473  static void* AllocateRawMemory(const size_t requested,
474                                 size_t* allocated,
475                                 Executability executable);
476  static void FreeRawMemory(void* buf, size_t length);
477
478  // Returns the maximum available bytes of heaps.
479  static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
480
481  // Returns allocated spaces in bytes.
482  static int Size() { return size_; }
483
484  // Returns maximum available bytes that the old space can have.
485  static int MaxAvailable() {
486    return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
487  }
488
489  // Links two pages.
490  static inline void SetNextPage(Page* prev, Page* next);
491
492  // Returns the next page of a given page.
493  static inline Page* GetNextPage(Page* p);
494
495  // Checks whether a page belongs to a space.
496  static inline bool IsPageInSpace(Page* p, PagedSpace* space);
497
498  // Returns the space that owns the given page.
499  static inline PagedSpace* PageOwner(Page* page);
500
501  // Finds the first/last page in the same chunk as a given page.
502  static Page* FindFirstPageInSameChunk(Page* p);
503  static Page* FindLastPageInSameChunk(Page* p);
504
505#ifdef ENABLE_HEAP_PROTECTION
506  // Protect/unprotect a block of memory by marking it read-only/writable.
507  static inline void Protect(Address start, size_t size);
508  static inline void Unprotect(Address start, size_t size,
509                               Executability executable);
510
511  // Protect/unprotect a chunk given a page in the chunk.
512  static inline void ProtectChunkFromPage(Page* page);
513  static inline void UnprotectChunkFromPage(Page* page);
514#endif
515
516#ifdef DEBUG
517  // Reports statistic info of the space.
518  static void ReportStatistics();
519#endif
520
521  // Due to encoding limitation, we can only have 8K chunks.
522  static const int kMaxNofChunks = 1 << kPageSizeBits;
523  // If a chunk has at least 16 pages, the maximum heap size is about
524  // 8K * 8K * 16 = 1G bytes.
525#ifdef V8_TARGET_ARCH_X64
526  static const int kPagesPerChunk = 32;
527#else
528  static const int kPagesPerChunk = 16;
529#endif
530  static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
531
532 private:
533  // Maximum space size in bytes.
534  static int capacity_;
535
536  // Allocated space size in bytes.
537  static int size_;
538
539  // The initial chunk of virtual memory.
540  static VirtualMemory* initial_chunk_;
541
542  // Allocated chunk info: chunk start address, chunk size, and owning space.
543  class ChunkInfo BASE_EMBEDDED {
544   public:
545    ChunkInfo() : address_(NULL), size_(0), owner_(NULL) {}
546    void init(Address a, size_t s, PagedSpace* o) {
547      address_ = a;
548      size_ = s;
549      owner_ = o;
550    }
551    Address address() { return address_; }
552    size_t size() { return size_; }
553    PagedSpace* owner() { return owner_; }
554
555   private:
556    Address address_;
557    size_t size_;
558    PagedSpace* owner_;
559  };
560
561  // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
562  static List<ChunkInfo> chunks_;
563  static List<int> free_chunk_ids_;
564  static int max_nof_chunks_;
565  static int top_;
566
567  // Push/pop a free chunk id onto/from the stack.
568  static void Push(int free_chunk_id);
569  static int Pop();
570  static bool OutOfChunkIds() { return top_ == 0; }
571
572  // Frees a chunk.
573  static void DeleteChunk(int chunk_id);
574
575  // Basic check whether a chunk id is in the valid range.
576  static inline bool IsValidChunkId(int chunk_id);
577
578  // Checks whether a chunk id identifies an allocated chunk.
579  static inline bool IsValidChunk(int chunk_id);
580
581  // Returns the chunk id that a page belongs to.
582  static inline int GetChunkId(Page* p);
583
584  // True if the address lies in the initial chunk.
585  static inline bool InInitialChunk(Address address);
586
587  // Initializes pages in a chunk. Returns the first page address.
588  // This function and GetChunkId() are provided for the mark-compact
589  // collector to rebuild page headers in the from space, which is
590  // used as a marking stack and its page headers are destroyed.
591  static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
592                                      PagedSpace* owner);
593};
594
595
596// -----------------------------------------------------------------------------
597// Interface for heap object iterator to be implemented by all object space
598// object iterators.
599//
600// NOTE: The space specific object iterators also implements the own next()
601//       method which is used to avoid using virtual functions
602//       iterating a specific space.
603
604class ObjectIterator : public Malloced {
605 public:
606  virtual ~ObjectIterator() { }
607
608  virtual HeapObject* next_object() = 0;
609};
610
611
612// -----------------------------------------------------------------------------
613// Heap object iterator in new/old/map spaces.
614//
615// A HeapObjectIterator iterates objects from a given address to the
616// top of a space. The given address must be below the current
617// allocation pointer (space top). There are some caveats.
618//
619// (1) If the space top changes upward during iteration (because of
620//     allocating new objects), the iterator does not iterate objects
621//     above the original space top. The caller must create a new
622//     iterator starting from the old top in order to visit these new
623//     objects.
624//
625// (2) If new objects are allocated below the original allocation top
626//     (e.g., free-list allocation in paged spaces), the new objects
627//     may or may not be iterated depending on their position with
628//     respect to the current point of iteration.
629//
630// (3) The space top should not change downward during iteration,
631//     otherwise the iterator will return not-necessarily-valid
632//     objects.
633
634class HeapObjectIterator: public ObjectIterator {
635 public:
636  // Creates a new object iterator in a given space. If a start
637  // address is not given, the iterator starts from the space bottom.
638  // If the size function is not given, the iterator calls the default
639  // Object::Size().
640  explicit HeapObjectIterator(PagedSpace* space);
641  HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
642  HeapObjectIterator(PagedSpace* space, Address start);
643  HeapObjectIterator(PagedSpace* space,
644                     Address start,
645                     HeapObjectCallback size_func);
646
647  inline HeapObject* next() {
648    return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
649  }
650
651  // implementation of ObjectIterator.
652  virtual HeapObject* next_object() { return next(); }
653
654 private:
655  Address cur_addr_;  // current iteration point
656  Address end_addr_;  // end iteration point
657  Address cur_limit_;  // current page limit
658  HeapObjectCallback size_func_;  // size function
659  Page* end_page_;  // caches the page of the end address
660
661  HeapObject* FromCurrentPage() {
662    ASSERT(cur_addr_ < cur_limit_);
663
664    HeapObject* obj = HeapObject::FromAddress(cur_addr_);
665    int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
666    ASSERT_OBJECT_SIZE(obj_size);
667
668    cur_addr_ += obj_size;
669    ASSERT(cur_addr_ <= cur_limit_);
670
671    return obj;
672  }
673
674  // Slow path of next, goes into the next page.
675  HeapObject* FromNextPage();
676
677  // Initializes fields.
678  void Initialize(Address start, Address end, HeapObjectCallback size_func);
679
680#ifdef DEBUG
681  // Verifies whether fields have valid values.
682  void Verify();
683#endif
684};
685
686
687// -----------------------------------------------------------------------------
688// A PageIterator iterates the pages in a paged space.
689//
690// The PageIterator class provides three modes for iterating pages in a space:
691//   PAGES_IN_USE iterates pages containing allocated objects.
692//   PAGES_USED_BY_MC iterates pages that hold relocated objects during a
693//                    mark-compact collection.
694//   ALL_PAGES iterates all pages in the space.
695//
696// There are some caveats.
697//
698// (1) If the space expands during iteration, new pages will not be
699//     returned by the iterator in any mode.
700//
701// (2) If new objects are allocated during iteration, they will appear
702//     in pages returned by the iterator.  Allocation may cause the
703//     allocation pointer or MC allocation pointer in the last page to
704//     change between constructing the iterator and iterating the last
705//     page.
706//
707// (3) The space should not shrink during iteration, otherwise the
708//     iterator will return deallocated pages.
709
710class PageIterator BASE_EMBEDDED {
711 public:
712  enum Mode {
713    PAGES_IN_USE,
714    PAGES_USED_BY_MC,
715    ALL_PAGES
716  };
717
718  PageIterator(PagedSpace* space, Mode mode);
719
720  inline bool has_next();
721  inline Page* next();
722
723 private:
724  PagedSpace* space_;
725  Page* prev_page_;  // Previous page returned.
726  Page* stop_page_;  // Page to stop at (last page returned by the iterator).
727};
728
729
730// -----------------------------------------------------------------------------
731// A space has a list of pages. The next page can be accessed via
732// Page::next_page() call. The next page of the last page is an
733// invalid page pointer. A space can expand and shrink dynamically.
734
735// An abstraction of allocation and relocation pointers in a page-structured
736// space.
737class AllocationInfo {
738 public:
739  Address top;  // current allocation top
740  Address limit;  // current allocation limit
741
742#ifdef DEBUG
743  bool VerifyPagedAllocation() {
744    return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
745        && (top <= limit);
746  }
747#endif
748};
749
750
751// An abstraction of the accounting statistics of a page-structured space.
752// The 'capacity' of a space is the number of object-area bytes (ie, not
753// including page bookkeeping structures) currently in the space. The 'size'
754// of a space is the number of allocated bytes, the 'waste' in the space is
755// the number of bytes that are not allocated and not available to
756// allocation without reorganizing the space via a GC (eg, small blocks due
757// to internal fragmentation, top of page areas in map space), and the bytes
758// 'available' is the number of unallocated bytes that are not waste.  The
759// capacity is the sum of size, waste, and available.
760//
761// The stats are only set by functions that ensure they stay balanced. These
762// functions increase or decrease one of the non-capacity stats in
763// conjunction with capacity, or else they always balance increases and
764// decreases to the non-capacity stats.
765class AllocationStats BASE_EMBEDDED {
766 public:
767  AllocationStats() { Clear(); }
768
769  // Zero out all the allocation statistics (ie, no capacity).
770  void Clear() {
771    capacity_ = 0;
772    available_ = 0;
773    size_ = 0;
774    waste_ = 0;
775  }
776
777  // Reset the allocation statistics (ie, available = capacity with no
778  // wasted or allocated bytes).
779  void Reset() {
780    available_ = capacity_;
781    size_ = 0;
782    waste_ = 0;
783  }
784
785  // Accessors for the allocation statistics.
786  int Capacity() { return capacity_; }
787  int Available() { return available_; }
788  int Size() { return size_; }
789  int Waste() { return waste_; }
790
791  // Grow the space by adding available bytes.
792  void ExpandSpace(int size_in_bytes) {
793    capacity_ += size_in_bytes;
794    available_ += size_in_bytes;
795  }
796
797  // Shrink the space by removing available bytes.
798  void ShrinkSpace(int size_in_bytes) {
799    capacity_ -= size_in_bytes;
800    available_ -= size_in_bytes;
801  }
802
803  // Allocate from available bytes (available -> size).
804  void AllocateBytes(int size_in_bytes) {
805    available_ -= size_in_bytes;
806    size_ += size_in_bytes;
807  }
808
809  // Free allocated bytes, making them available (size -> available).
810  void DeallocateBytes(int size_in_bytes) {
811    size_ -= size_in_bytes;
812    available_ += size_in_bytes;
813  }
814
815  // Waste free bytes (available -> waste).
816  void WasteBytes(int size_in_bytes) {
817    available_ -= size_in_bytes;
818    waste_ += size_in_bytes;
819  }
820
821  // Consider the wasted bytes to be allocated, as they contain filler
822  // objects (waste -> size).
823  void FillWastedBytes(int size_in_bytes) {
824    waste_ -= size_in_bytes;
825    size_ += size_in_bytes;
826  }
827
828 private:
829  int capacity_;
830  int available_;
831  int size_;
832  int waste_;
833};
834
835
836class PagedSpace : public Space {
837 public:
838  // Creates a space with a maximum capacity, and an id.
839  PagedSpace(int max_capacity, AllocationSpace id, Executability executable);
840
841  virtual ~PagedSpace() {}
842
843  // Set up the space using the given address range of virtual memory (from
844  // the memory allocator's initial chunk) if possible.  If the block of
845  // addresses is not big enough to contain a single page-aligned page, a
846  // fresh chunk will be allocated.
847  bool Setup(Address start, size_t size);
848
849  // Returns true if the space has been successfully set up and not
850  // subsequently torn down.
851  bool HasBeenSetup();
852
853  // Cleans up the space, frees all pages in this space except those belonging
854  // to the initial chunk, uncommits addresses in the initial chunk.
855  void TearDown();
856
857  // Checks whether an object/address is in this space.
858  inline bool Contains(Address a);
859  bool Contains(HeapObject* o) { return Contains(o->address()); }
860
861  // Given an address occupied by a live object, return that object if it is
862  // in this space, or Failure::Exception() if it is not. The implementation
863  // iterates over objects in the page containing the address, the cost is
864  // linear in the number of objects in the page. It may be slow.
865  Object* FindObject(Address addr);
866
867  // Checks whether page is currently in use by this space.
868  bool IsUsed(Page* page);
869
870  // Clears remembered sets of pages in this space.
871  void ClearRSet();
872
873  // Prepares for a mark-compact GC.
874  virtual void PrepareForMarkCompact(bool will_compact) = 0;
875
876  virtual Address PageAllocationTop(Page* page) = 0;
877
878  // Current capacity without growing (Size() + Available() + Waste()).
879  int Capacity() { return accounting_stats_.Capacity(); }
880
881  // Total amount of memory committed for this space.  For paged
882  // spaces this equals the capacity.
883  int CommittedMemory() { return Capacity(); }
884
885  // Available bytes without growing.
886  int Available() { return accounting_stats_.Available(); }
887
888  // Allocated bytes in this space.
889  virtual int Size() { return accounting_stats_.Size(); }
890
891  // Wasted bytes due to fragmentation and not recoverable until the
892  // next GC of this space.
893  int Waste() { return accounting_stats_.Waste(); }
894
895  // Returns the address of the first object in this space.
896  Address bottom() { return first_page_->ObjectAreaStart(); }
897
898  // Returns the allocation pointer in this space.
899  Address top() { return allocation_info_.top; }
900
901  // Allocate the requested number of bytes in the space if possible, return a
902  // failure object if not.
903  inline Object* AllocateRaw(int size_in_bytes);
904
905  // Allocate the requested number of bytes for relocation during mark-compact
906  // collection.
907  inline Object* MCAllocateRaw(int size_in_bytes);
908
909  virtual bool ReserveSpace(int bytes);
910
911  // Used by ReserveSpace.
912  virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
913
914  // ---------------------------------------------------------------------------
915  // Mark-compact collection support functions
916
917  // Set the relocation point to the beginning of the space.
918  void MCResetRelocationInfo();
919
920  // Writes relocation info to the top page.
921  void MCWriteRelocationInfoToPage() {
922    TopPageOf(mc_forwarding_info_)->mc_relocation_top = mc_forwarding_info_.top;
923  }
924
925  // Computes the offset of a given address in this space to the beginning
926  // of the space.
927  int MCSpaceOffsetForAddress(Address addr);
928
929  // Updates the allocation pointer to the relocation top after a mark-compact
930  // collection.
931  virtual void MCCommitRelocationInfo() = 0;
932
933  // Releases half of unused pages.
934  void Shrink();
935
936  // Ensures that the capacity is at least 'capacity'. Returns false on failure.
937  bool EnsureCapacity(int capacity);
938
939#ifdef ENABLE_HEAP_PROTECTION
940  // Protect/unprotect the space by marking it read-only/writable.
941  void Protect();
942  void Unprotect();
943#endif
944
945#ifdef DEBUG
946  // Print meta info and objects in this space.
947  virtual void Print();
948
949  // Verify integrity of this space.
950  virtual void Verify(ObjectVisitor* visitor);
951
952  // Overridden by subclasses to verify space-specific object
953  // properties (e.g., only maps or free-list nodes are in map space).
954  virtual void VerifyObject(HeapObject* obj) {}
955
956  // Report code object related statistics
957  void CollectCodeStatistics();
958  static void ReportCodeStatistics();
959  static void ResetCodeStatistics();
960#endif
961
962 protected:
963  // Maximum capacity of this space.
964  int max_capacity_;
965
966  // Accounting information for this space.
967  AllocationStats accounting_stats_;
968
969  // The first page in this space.
970  Page* first_page_;
971
972  // The last page in this space.  Initially set in Setup, updated in
973  // Expand and Shrink.
974  Page* last_page_;
975
976  // Normal allocation information.
977  AllocationInfo allocation_info_;
978
979  // Relocation information during mark-compact collections.
980  AllocationInfo mc_forwarding_info_;
981
982  // Bytes of each page that cannot be allocated.  Possibly non-zero
983  // for pages in spaces with only fixed-size objects.  Always zero
984  // for pages in spaces with variable sized objects (those pages are
985  // padded with free-list nodes).
986  int page_extra_;
987
988  // Sets allocation pointer to a page bottom.
989  static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
990
991  // Returns the top page specified by an allocation info structure.
992  static Page* TopPageOf(AllocationInfo alloc_info) {
993    return Page::FromAllocationTop(alloc_info.limit);
994  }
995
996  int CountPagesToTop() {
997    Page* p = Page::FromAllocationTop(allocation_info_.top);
998    PageIterator it(this, PageIterator::ALL_PAGES);
999    int counter = 1;
1000    while (it.has_next()) {
1001      if (it.next() == p) return counter;
1002      counter++;
1003    }
1004    UNREACHABLE();
1005    return -1;
1006  }
1007
1008  // Expands the space by allocating a fixed number of pages. Returns false if
1009  // it cannot allocate requested number of pages from OS. Newly allocated
1010  // pages are append to the last_page;
1011  bool Expand(Page* last_page);
1012
1013  // Generic fast case allocation function that tries linear allocation in
1014  // the top page of 'alloc_info'.  Returns NULL on failure.
1015  inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
1016                                      int size_in_bytes);
1017
1018  // During normal allocation or deserialization, roll to the next page in
1019  // the space (there is assumed to be one) and allocate there.  This
1020  // function is space-dependent.
1021  virtual HeapObject* AllocateInNextPage(Page* current_page,
1022                                         int size_in_bytes) = 0;
1023
1024  // Slow path of AllocateRaw.  This function is space-dependent.
1025  virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
1026
1027  // Slow path of MCAllocateRaw.
1028  HeapObject* SlowMCAllocateRaw(int size_in_bytes);
1029
1030#ifdef DEBUG
1031  // Returns the number of total pages in this space.
1032  int CountTotalPages();
1033
1034  void DoPrintRSet(const char* space_name);
1035#endif
1036 private:
1037  // Returns the page of the allocation pointer.
1038  Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
1039
1040  // Returns a pointer to the page of the relocation pointer.
1041  Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
1042
1043  friend class PageIterator;
1044};
1045
1046
1047#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1048class NumberAndSizeInfo BASE_EMBEDDED {
1049 public:
1050  NumberAndSizeInfo() : number_(0), bytes_(0) {}
1051
1052  int number() const { return number_; }
1053  void increment_number(int num) { number_ += num; }
1054
1055  int bytes() const { return bytes_; }
1056  void increment_bytes(int size) { bytes_ += size; }
1057
1058  void clear() {
1059    number_ = 0;
1060    bytes_ = 0;
1061  }
1062
1063 private:
1064  int number_;
1065  int bytes_;
1066};
1067
1068
1069// HistogramInfo class for recording a single "bar" of a histogram.  This
1070// class is used for collecting statistics to print to stdout (when compiled
1071// with DEBUG) or to the log file (when compiled with
1072// ENABLE_LOGGING_AND_PROFILING).
1073class HistogramInfo: public NumberAndSizeInfo {
1074 public:
1075  HistogramInfo() : NumberAndSizeInfo() {}
1076
1077  const char* name() { return name_; }
1078  void set_name(const char* name) { name_ = name; }
1079
1080 private:
1081  const char* name_;
1082};
1083#endif
1084
1085
1086// -----------------------------------------------------------------------------
1087// SemiSpace in young generation
1088//
1089// A semispace is a contiguous chunk of memory. The mark-compact collector
1090// uses the memory in the from space as a marking stack when tracing live
1091// objects.
1092
1093class SemiSpace : public Space {
1094 public:
1095  // Constructor.
1096  SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) {
1097    start_ = NULL;
1098    age_mark_ = NULL;
1099  }
1100
1101  // Sets up the semispace using the given chunk.
1102  bool Setup(Address start, int initial_capacity, int maximum_capacity);
1103
1104  // Tear down the space.  Heap memory was not allocated by the space, so it
1105  // is not deallocated here.
1106  void TearDown();
1107
1108  // True if the space has been set up but not torn down.
1109  bool HasBeenSetup() { return start_ != NULL; }
1110
1111  // Grow the size of the semispace by committing extra virtual memory.
1112  // Assumes that the caller has checked that the semispace has not reached
1113  // its maximum capacity (and thus there is space available in the reserved
1114  // address range to grow).
1115  bool Grow();
1116
1117  // Grow the semispace to the new capacity.  The new capacity
1118  // requested must be larger than the current capacity.
1119  bool GrowTo(int new_capacity);
1120
1121  // Shrinks the semispace to the new capacity.  The new capacity
1122  // requested must be more than the amount of used memory in the
1123  // semispace and less than the current capacity.
1124  bool ShrinkTo(int new_capacity);
1125
1126  // Returns the start address of the space.
1127  Address low() { return start_; }
1128  // Returns one past the end address of the space.
1129  Address high() { return low() + capacity_; }
1130
1131  // Age mark accessors.
1132  Address age_mark() { return age_mark_; }
1133  void set_age_mark(Address mark) { age_mark_ = mark; }
1134
1135  // True if the address is in the address range of this semispace (not
1136  // necessarily below the allocation pointer).
1137  bool Contains(Address a) {
1138    return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1139           == reinterpret_cast<uintptr_t>(start_);
1140  }
1141
1142  // True if the object is a heap object in the address range of this
1143  // semispace (not necessarily below the allocation pointer).
1144  bool Contains(Object* o) {
1145    return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1146  }
1147
1148  // The offset of an address from the beginning of the space.
1149  int SpaceOffsetForAddress(Address addr) {
1150    return static_cast<int>(addr - low());
1151  }
1152
1153  // If we don't have these here then SemiSpace will be abstract.  However
1154  // they should never be called.
1155  virtual int Size() {
1156    UNREACHABLE();
1157    return 0;
1158  }
1159
1160  virtual bool ReserveSpace(int bytes) {
1161    UNREACHABLE();
1162    return false;
1163  }
1164
1165  bool is_committed() { return committed_; }
1166  bool Commit();
1167  bool Uncommit();
1168
1169#ifdef DEBUG
1170  virtual void Print();
1171  virtual void Verify();
1172#endif
1173
1174  // Returns the current capacity of the semi space.
1175  int Capacity() { return capacity_; }
1176
1177  // Returns the maximum capacity of the semi space.
1178  int MaximumCapacity() { return maximum_capacity_; }
1179
1180  // Returns the initial capacity of the semi space.
1181  int InitialCapacity() { return initial_capacity_; }
1182
1183 private:
1184  // The current and maximum capacity of the space.
1185  int capacity_;
1186  int maximum_capacity_;
1187  int initial_capacity_;
1188
1189  // The start address of the space.
1190  Address start_;
1191  // Used to govern object promotion during mark-compact collection.
1192  Address age_mark_;
1193
1194  // Masks and comparison values to test for containment in this semispace.
1195  uintptr_t address_mask_;
1196  uintptr_t object_mask_;
1197  uintptr_t object_expected_;
1198
1199  bool committed_;
1200
1201 public:
1202  TRACK_MEMORY("SemiSpace")
1203};
1204
1205
1206// A SemiSpaceIterator is an ObjectIterator that iterates over the active
1207// semispace of the heap's new space.  It iterates over the objects in the
1208// semispace from a given start address (defaulting to the bottom of the
1209// semispace) to the top of the semispace.  New objects allocated after the
1210// iterator is created are not iterated.
1211class SemiSpaceIterator : public ObjectIterator {
1212 public:
1213  // Create an iterator over the objects in the given space.  If no start
1214  // address is given, the iterator starts from the bottom of the space.  If
1215  // no size function is given, the iterator calls Object::Size().
1216  explicit SemiSpaceIterator(NewSpace* space);
1217  SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
1218  SemiSpaceIterator(NewSpace* space, Address start);
1219
1220  HeapObject* next() {
1221    if (current_ == limit_) return NULL;
1222
1223    HeapObject* object = HeapObject::FromAddress(current_);
1224    int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
1225
1226    current_ += size;
1227    return object;
1228  }
1229
1230  // Implementation of the ObjectIterator functions.
1231  virtual HeapObject* next_object() { return next(); }
1232
1233 private:
1234  void Initialize(NewSpace* space, Address start, Address end,
1235                  HeapObjectCallback size_func);
1236
1237  // The semispace.
1238  SemiSpace* space_;
1239  // The current iteration point.
1240  Address current_;
1241  // The end of iteration.
1242  Address limit_;
1243  // The callback function.
1244  HeapObjectCallback size_func_;
1245};
1246
1247
1248// -----------------------------------------------------------------------------
1249// The young generation space.
1250//
1251// The new space consists of a contiguous pair of semispaces.  It simply
1252// forwards most functions to the appropriate semispace.
1253
1254class NewSpace : public Space {
1255 public:
1256  // Constructor.
1257  NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {}
1258
1259  // Sets up the new space using the given chunk.
1260  bool Setup(Address start, int size);
1261
1262  // Tears down the space.  Heap memory was not allocated by the space, so it
1263  // is not deallocated here.
1264  void TearDown();
1265
1266  // True if the space has been set up but not torn down.
1267  bool HasBeenSetup() {
1268    return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
1269  }
1270
1271  // Flip the pair of spaces.
1272  void Flip();
1273
1274  // Grow the capacity of the semispaces.  Assumes that they are not at
1275  // their maximum capacity.
1276  void Grow();
1277
1278  // Shrink the capacity of the semispaces.
1279  void Shrink();
1280
1281  // True if the address or object lies in the address range of either
1282  // semispace (not necessarily below the allocation pointer).
1283  bool Contains(Address a) {
1284    return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1285        == reinterpret_cast<uintptr_t>(start_);
1286  }
1287  bool Contains(Object* o) {
1288    return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1289  }
1290
1291  // Return the allocated bytes in the active semispace.
1292  virtual int Size() { return static_cast<int>(top() - bottom()); }
1293
1294  // Return the current capacity of a semispace.
1295  int Capacity() {
1296    ASSERT(to_space_.Capacity() == from_space_.Capacity());
1297    return to_space_.Capacity();
1298  }
1299
1300  // Return the total amount of memory committed for new space.
1301  int CommittedMemory() {
1302    if (from_space_.is_committed()) return 2 * Capacity();
1303    return Capacity();
1304  }
1305
1306  // Return the available bytes without growing in the active semispace.
1307  int Available() { return Capacity() - Size(); }
1308
1309  // Return the maximum capacity of a semispace.
1310  int MaximumCapacity() {
1311    ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
1312    return to_space_.MaximumCapacity();
1313  }
1314
1315  // Returns the initial capacity of a semispace.
1316  int InitialCapacity() {
1317    ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
1318    return to_space_.InitialCapacity();
1319  }
1320
1321  // Return the address of the allocation pointer in the active semispace.
1322  Address top() { return allocation_info_.top; }
1323  // Return the address of the first object in the active semispace.
1324  Address bottom() { return to_space_.low(); }
1325
1326  // Get the age mark of the inactive semispace.
1327  Address age_mark() { return from_space_.age_mark(); }
1328  // Set the age mark in the active semispace.
1329  void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
1330
1331  // The start address of the space and a bit mask. Anding an address in the
1332  // new space with the mask will result in the start address.
1333  Address start() { return start_; }
1334  uintptr_t mask() { return address_mask_; }
1335
1336  // The allocation top and limit addresses.
1337  Address* allocation_top_address() { return &allocation_info_.top; }
1338  Address* allocation_limit_address() { return &allocation_info_.limit; }
1339
1340  Object* AllocateRaw(int size_in_bytes) {
1341    return AllocateRawInternal(size_in_bytes, &allocation_info_);
1342  }
1343
1344  // Allocate the requested number of bytes for relocation during mark-compact
1345  // collection.
1346  Object* MCAllocateRaw(int size_in_bytes) {
1347    return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
1348  }
1349
1350  // Reset the allocation pointer to the beginning of the active semispace.
1351  void ResetAllocationInfo();
1352  // Reset the reloction pointer to the bottom of the inactive semispace in
1353  // preparation for mark-compact collection.
1354  void MCResetRelocationInfo();
1355  // Update the allocation pointer in the active semispace after a
1356  // mark-compact collection.
1357  void MCCommitRelocationInfo();
1358
1359  // Get the extent of the inactive semispace (for use as a marking stack).
1360  Address FromSpaceLow() { return from_space_.low(); }
1361  Address FromSpaceHigh() { return from_space_.high(); }
1362
1363  // Get the extent of the active semispace (to sweep newly copied objects
1364  // during a scavenge collection).
1365  Address ToSpaceLow() { return to_space_.low(); }
1366  Address ToSpaceHigh() { return to_space_.high(); }
1367
1368  // Offsets from the beginning of the semispaces.
1369  int ToSpaceOffsetForAddress(Address a) {
1370    return to_space_.SpaceOffsetForAddress(a);
1371  }
1372  int FromSpaceOffsetForAddress(Address a) {
1373    return from_space_.SpaceOffsetForAddress(a);
1374  }
1375
1376  // True if the object is a heap object in the address range of the
1377  // respective semispace (not necessarily below the allocation pointer of the
1378  // semispace).
1379  bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
1380  bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
1381
1382  bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
1383  bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
1384
1385  virtual bool ReserveSpace(int bytes);
1386
1387#ifdef ENABLE_HEAP_PROTECTION
1388  // Protect/unprotect the space by marking it read-only/writable.
1389  virtual void Protect();
1390  virtual void Unprotect();
1391#endif
1392
1393#ifdef DEBUG
1394  // Verify the active semispace.
1395  virtual void Verify();
1396  // Print the active semispace.
1397  virtual void Print() { to_space_.Print(); }
1398#endif
1399
1400#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1401  // Iterates the active semispace to collect statistics.
1402  void CollectStatistics();
1403  // Reports previously collected statistics of the active semispace.
1404  void ReportStatistics();
1405  // Clears previously collected statistics.
1406  void ClearHistograms();
1407
1408  // Record the allocation or promotion of a heap object.  Note that we don't
1409  // record every single allocation, but only those that happen in the
1410  // to space during a scavenge GC.
1411  void RecordAllocation(HeapObject* obj);
1412  void RecordPromotion(HeapObject* obj);
1413#endif
1414
1415  // Return whether the operation succeded.
1416  bool CommitFromSpaceIfNeeded() {
1417    if (from_space_.is_committed()) return true;
1418    return from_space_.Commit();
1419  }
1420
1421  bool UncommitFromSpace() {
1422    if (!from_space_.is_committed()) return true;
1423    return from_space_.Uncommit();
1424  }
1425
1426 private:
1427  // The semispaces.
1428  SemiSpace to_space_;
1429  SemiSpace from_space_;
1430
1431  // Start address and bit mask for containment testing.
1432  Address start_;
1433  uintptr_t address_mask_;
1434  uintptr_t object_mask_;
1435  uintptr_t object_expected_;
1436
1437  // Allocation pointer and limit for normal allocation and allocation during
1438  // mark-compact collection.
1439  AllocationInfo allocation_info_;
1440  AllocationInfo mc_forwarding_info_;
1441
1442#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1443  HistogramInfo* allocated_histogram_;
1444  HistogramInfo* promoted_histogram_;
1445#endif
1446
1447  // Implementation of AllocateRaw and MCAllocateRaw.
1448  inline Object* AllocateRawInternal(int size_in_bytes,
1449                                     AllocationInfo* alloc_info);
1450
1451  friend class SemiSpaceIterator;
1452
1453 public:
1454  TRACK_MEMORY("NewSpace")
1455};
1456
1457
1458// -----------------------------------------------------------------------------
1459// Free lists for old object spaces
1460//
1461// Free-list nodes are free blocks in the heap.  They look like heap objects
1462// (free-list node pointers have the heap object tag, and they have a map like
1463// a heap object).  They have a size and a next pointer.  The next pointer is
1464// the raw address of the next free list node (or NULL).
1465class FreeListNode: public HeapObject {
1466 public:
1467  // Obtain a free-list node from a raw address.  This is not a cast because
1468  // it does not check nor require that the first word at the address is a map
1469  // pointer.
1470  static FreeListNode* FromAddress(Address address) {
1471    return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1472  }
1473
1474  static inline bool IsFreeListNode(HeapObject* object);
1475
1476  // Set the size in bytes, which can be read with HeapObject::Size().  This
1477  // function also writes a map to the first word of the block so that it
1478  // looks like a heap object to the garbage collector and heap iteration
1479  // functions.
1480  void set_size(int size_in_bytes);
1481
1482  // Accessors for the next field.
1483  inline Address next();
1484  inline void set_next(Address next);
1485
1486 private:
1487  static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
1488
1489  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1490};
1491
1492
1493// The free list for the old space.
1494class OldSpaceFreeList BASE_EMBEDDED {
1495 public:
1496  explicit OldSpaceFreeList(AllocationSpace owner);
1497
1498  // Clear the free list.
1499  void Reset();
1500
1501  // Return the number of bytes available on the free list.
1502  int available() { return available_; }
1503
1504  // Place a node on the free list.  The block of size 'size_in_bytes'
1505  // starting at 'start' is placed on the free list.  The return value is the
1506  // number of bytes that have been lost due to internal fragmentation by
1507  // freeing the block.  Bookkeeping information will be written to the block,
1508  // ie, its contents will be destroyed.  The start address should be word
1509  // aligned, and the size should be a non-zero multiple of the word size.
1510  int Free(Address start, int size_in_bytes);
1511
1512  // Allocate a block of size 'size_in_bytes' from the free list.  The block
1513  // is unitialized.  A failure is returned if no block is available.  The
1514  // number of bytes lost to fragmentation is returned in the output parameter
1515  // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
1516  Object* Allocate(int size_in_bytes, int* wasted_bytes);
1517
1518 private:
1519  // The size range of blocks, in bytes. (Smaller allocations are allowed, but
1520  // will always result in waste.)
1521  static const int kMinBlockSize = 2 * kPointerSize;
1522  static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1523
1524  // The identity of the owning space, for building allocation Failure
1525  // objects.
1526  AllocationSpace owner_;
1527
1528  // Total available bytes in all blocks on this free list.
1529  int available_;
1530
1531  // Blocks are put on exact free lists in an array, indexed by size in words.
1532  // The available sizes are kept in an increasingly ordered list. Entries
1533  // corresponding to sizes < kMinBlockSize always have an empty free list
1534  // (but index kHead is used for the head of the size list).
1535  struct SizeNode {
1536    // Address of the head FreeListNode of the implied block size or NULL.
1537    Address head_node_;
1538    // Size (words) of the next larger available size if head_node_ != NULL.
1539    int next_size_;
1540  };
1541  static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
1542  SizeNode free_[kFreeListsLength];
1543
1544  // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
1545  static const int kHead = kMinBlockSize / kPointerSize - 1;
1546  static const int kEnd = kMaxInt;
1547
1548  // We keep a "finger" in the size list to speed up a common pattern:
1549  // repeated requests for the same or increasing sizes.
1550  int finger_;
1551
1552  // Starting from *prev, find and return the smallest size >= index (words),
1553  // or kEnd. Update *prev to be the largest size < index, or kHead.
1554  int FindSize(int index, int* prev) {
1555    int cur = free_[*prev].next_size_;
1556    while (cur < index) {
1557      *prev = cur;
1558      cur = free_[cur].next_size_;
1559    }
1560    return cur;
1561  }
1562
1563  // Remove an existing element from the size list.
1564  void RemoveSize(int index) {
1565    int prev = kHead;
1566    int cur = FindSize(index, &prev);
1567    ASSERT(cur == index);
1568    free_[prev].next_size_ = free_[cur].next_size_;
1569    finger_ = prev;
1570  }
1571
1572  // Insert a new element into the size list.
1573  void InsertSize(int index) {
1574    int prev = kHead;
1575    int cur = FindSize(index, &prev);
1576    ASSERT(cur != index);
1577    free_[prev].next_size_ = index;
1578    free_[index].next_size_ = cur;
1579  }
1580
1581  // The size list is not updated during a sequence of calls to Free, but is
1582  // rebuilt before the next allocation.
1583  void RebuildSizeList();
1584  bool needs_rebuild_;
1585
1586#ifdef DEBUG
1587  // Does this free list contain a free block located at the address of 'node'?
1588  bool Contains(FreeListNode* node);
1589#endif
1590
1591  DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
1592};
1593
1594
1595// The free list for the map space.
1596class FixedSizeFreeList BASE_EMBEDDED {
1597 public:
1598  FixedSizeFreeList(AllocationSpace owner, int object_size);
1599
1600  // Clear the free list.
1601  void Reset();
1602
1603  // Return the number of bytes available on the free list.
1604  int available() { return available_; }
1605
1606  // Place a node on the free list.  The block starting at 'start' (assumed to
1607  // have size object_size_) is placed on the free list.  Bookkeeping
1608  // information will be written to the block, ie, its contents will be
1609  // destroyed.  The start address should be word aligned.
1610  void Free(Address start);
1611
1612  // Allocate a fixed sized block from the free list.  The block is unitialized.
1613  // A failure is returned if no block is available.
1614  Object* Allocate();
1615
1616 private:
1617  // Available bytes on the free list.
1618  int available_;
1619
1620  // The head of the free list.
1621  Address head_;
1622
1623  // The identity of the owning space, for building allocation Failure
1624  // objects.
1625  AllocationSpace owner_;
1626
1627  // The size of the objects in this space.
1628  int object_size_;
1629
1630  DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
1631};
1632
1633
1634// -----------------------------------------------------------------------------
1635// Old object space (excluding map objects)
1636
1637class OldSpace : public PagedSpace {
1638 public:
1639  // Creates an old space object with a given maximum capacity.
1640  // The constructor does not allocate pages from OS.
1641  explicit OldSpace(int max_capacity,
1642                    AllocationSpace id,
1643                    Executability executable)
1644      : PagedSpace(max_capacity, id, executable), free_list_(id) {
1645    page_extra_ = 0;
1646  }
1647
1648  // The bytes available on the free list (ie, not above the linear allocation
1649  // pointer).
1650  int AvailableFree() { return free_list_.available(); }
1651
1652  // The top of allocation in a page in this space. Undefined if page is unused.
1653  virtual Address PageAllocationTop(Page* page) {
1654    return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd();
1655  }
1656
1657  // Give a block of memory to the space's free list.  It might be added to
1658  // the free list or accounted as waste.
1659  void Free(Address start, int size_in_bytes) {
1660    int wasted_bytes = free_list_.Free(start, size_in_bytes);
1661    accounting_stats_.DeallocateBytes(size_in_bytes);
1662    accounting_stats_.WasteBytes(wasted_bytes);
1663  }
1664
1665  // Prepare for full garbage collection.  Resets the relocation pointer and
1666  // clears the free list.
1667  virtual void PrepareForMarkCompact(bool will_compact);
1668
1669  // Updates the allocation pointer to the relocation top after a mark-compact
1670  // collection.
1671  virtual void MCCommitRelocationInfo();
1672
1673  virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1674
1675#ifdef DEBUG
1676  // Reports statistics for the space
1677  void ReportStatistics();
1678  // Dump the remembered sets in the space to stdout.
1679  void PrintRSet();
1680#endif
1681
1682 protected:
1683  // Virtual function in the superclass.  Slow path of AllocateRaw.
1684  HeapObject* SlowAllocateRaw(int size_in_bytes);
1685
1686  // Virtual function in the superclass.  Allocate linearly at the start of
1687  // the page after current_page (there is assumed to be one).
1688  HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1689
1690 private:
1691  // The space's free list.
1692  OldSpaceFreeList free_list_;
1693
1694 public:
1695  TRACK_MEMORY("OldSpace")
1696};
1697
1698
1699// -----------------------------------------------------------------------------
1700// Old space for objects of a fixed size
1701
1702class FixedSpace : public PagedSpace {
1703 public:
1704  FixedSpace(int max_capacity,
1705             AllocationSpace id,
1706             int object_size_in_bytes,
1707             const char* name)
1708      : PagedSpace(max_capacity, id, NOT_EXECUTABLE),
1709        object_size_in_bytes_(object_size_in_bytes),
1710        name_(name),
1711        free_list_(id, object_size_in_bytes) {
1712    page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
1713  }
1714
1715  // The top of allocation in a page in this space. Undefined if page is unused.
1716  virtual Address PageAllocationTop(Page* page) {
1717    return page == TopPageOf(allocation_info_) ? top()
1718        : page->ObjectAreaEnd() - page_extra_;
1719  }
1720
1721  int object_size_in_bytes() { return object_size_in_bytes_; }
1722
1723  // Give a fixed sized block of memory to the space's free list.
1724  void Free(Address start) {
1725    free_list_.Free(start);
1726    accounting_stats_.DeallocateBytes(object_size_in_bytes_);
1727  }
1728
1729  // Prepares for a mark-compact GC.
1730  virtual void PrepareForMarkCompact(bool will_compact);
1731
1732  // Updates the allocation pointer to the relocation top after a mark-compact
1733  // collection.
1734  virtual void MCCommitRelocationInfo();
1735
1736  virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1737
1738#ifdef DEBUG
1739  // Reports statistic info of the space
1740  void ReportStatistics();
1741
1742  // Dump the remembered sets in the space to stdout.
1743  void PrintRSet();
1744#endif
1745
1746 protected:
1747  // Virtual function in the superclass.  Slow path of AllocateRaw.
1748  HeapObject* SlowAllocateRaw(int size_in_bytes);
1749
1750  // Virtual function in the superclass.  Allocate linearly at the start of
1751  // the page after current_page (there is assumed to be one).
1752  HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1753
1754  void ResetFreeList() {
1755    free_list_.Reset();
1756  }
1757
1758 private:
1759  // The size of objects in this space.
1760  int object_size_in_bytes_;
1761
1762  // The name of this space.
1763  const char* name_;
1764
1765  // The space's free list.
1766  FixedSizeFreeList free_list_;
1767};
1768
1769
1770// -----------------------------------------------------------------------------
1771// Old space for all map objects
1772
1773class MapSpace : public FixedSpace {
1774 public:
1775  // Creates a map space object with a maximum capacity.
1776  MapSpace(int max_capacity, int max_map_space_pages, AllocationSpace id)
1777      : FixedSpace(max_capacity, id, Map::kSize, "map"),
1778        max_map_space_pages_(max_map_space_pages) {
1779    ASSERT(max_map_space_pages < kMaxMapPageIndex);
1780  }
1781
1782  // Prepares for a mark-compact GC.
1783  virtual void PrepareForMarkCompact(bool will_compact);
1784
1785  // Given an index, returns the page address.
1786  Address PageAddress(int page_index) { return page_addresses_[page_index]; }
1787
1788  static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
1789
1790  // Are map pointers encodable into map word?
1791  bool MapPointersEncodable() {
1792    if (!FLAG_use_big_map_space) {
1793      ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
1794      return true;
1795    }
1796    return CountPagesToTop() <= max_map_space_pages_;
1797  }
1798
1799  // Should be called after forced sweep to find out if map space needs
1800  // compaction.
1801  bool NeedsCompaction(int live_maps) {
1802    return !MapPointersEncodable() && live_maps <= CompactionThreshold();
1803  }
1804
1805  Address TopAfterCompaction(int live_maps) {
1806    ASSERT(NeedsCompaction(live_maps));
1807
1808    int pages_left = live_maps / kMapsPerPage;
1809    PageIterator it(this, PageIterator::ALL_PAGES);
1810    while (pages_left-- > 0) {
1811      ASSERT(it.has_next());
1812      it.next()->ClearRSet();
1813    }
1814    ASSERT(it.has_next());
1815    Page* top_page = it.next();
1816    top_page->ClearRSet();
1817    ASSERT(top_page->is_valid());
1818
1819    int offset = live_maps % kMapsPerPage * Map::kSize;
1820    Address top = top_page->ObjectAreaStart() + offset;
1821    ASSERT(top < top_page->ObjectAreaEnd());
1822    ASSERT(Contains(top));
1823
1824    return top;
1825  }
1826
1827  void FinishCompaction(Address new_top, int live_maps) {
1828    Page* top_page = Page::FromAddress(new_top);
1829    ASSERT(top_page->is_valid());
1830
1831    SetAllocationInfo(&allocation_info_, top_page);
1832    allocation_info_.top = new_top;
1833
1834    int new_size = live_maps * Map::kSize;
1835    accounting_stats_.DeallocateBytes(accounting_stats_.Size());
1836    accounting_stats_.AllocateBytes(new_size);
1837
1838#ifdef DEBUG
1839    if (FLAG_enable_slow_asserts) {
1840      int actual_size = 0;
1841      for (Page* p = first_page_; p != top_page; p = p->next_page())
1842        actual_size += kMapsPerPage * Map::kSize;
1843      actual_size += (new_top - top_page->ObjectAreaStart());
1844      ASSERT(accounting_stats_.Size() == actual_size);
1845    }
1846#endif
1847
1848    Shrink();
1849    ResetFreeList();
1850  }
1851
1852 protected:
1853#ifdef DEBUG
1854  virtual void VerifyObject(HeapObject* obj);
1855#endif
1856
1857 private:
1858  static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
1859
1860  // Do map space compaction if there is a page gap.
1861  int CompactionThreshold() {
1862    return kMapsPerPage * (max_map_space_pages_ - 1);
1863  }
1864
1865  const int max_map_space_pages_;
1866
1867  // An array of page start address in a map space.
1868  Address page_addresses_[kMaxMapPageIndex];
1869
1870 public:
1871  TRACK_MEMORY("MapSpace")
1872};
1873
1874
1875// -----------------------------------------------------------------------------
1876// Old space for all global object property cell objects
1877
1878class CellSpace : public FixedSpace {
1879 public:
1880  // Creates a property cell space object with a maximum capacity.
1881  CellSpace(int max_capacity, AllocationSpace id)
1882      : FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
1883
1884 protected:
1885#ifdef DEBUG
1886  virtual void VerifyObject(HeapObject* obj);
1887#endif
1888
1889 public:
1890  TRACK_MEMORY("CellSpace")
1891};
1892
1893
1894// -----------------------------------------------------------------------------
1895// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
1896// the large object space. A large object is allocated from OS heap with
1897// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
1898// A large object always starts at Page::kObjectStartOffset to a page.
1899// Large objects do not move during garbage collections.
1900
1901// A LargeObjectChunk holds exactly one large object page with exactly one
1902// large object.
1903class LargeObjectChunk {
1904 public:
1905  // Allocates a new LargeObjectChunk that contains a large object page
1906  // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
1907  // object and possibly extra remembered set words) bytes after the object
1908  // area start of that page. The allocated chunk size is set in the output
1909  // parameter chunk_size.
1910  static LargeObjectChunk* New(int size_in_bytes,
1911                               size_t* chunk_size,
1912                               Executability executable);
1913
1914  // Interpret a raw address as a large object chunk.
1915  static LargeObjectChunk* FromAddress(Address address) {
1916    return reinterpret_cast<LargeObjectChunk*>(address);
1917  }
1918
1919  // Returns the address of this chunk.
1920  Address address() { return reinterpret_cast<Address>(this); }
1921
1922  // Accessors for the fields of the chunk.
1923  LargeObjectChunk* next() { return next_; }
1924  void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
1925
1926  size_t size() { return size_; }
1927  void set_size(size_t size_in_bytes) { size_ = size_in_bytes; }
1928
1929  // Returns the object in this chunk.
1930  inline HeapObject* GetObject();
1931
1932  // Given a requested size (including any extra remembered set words),
1933  // returns the physical size of a chunk to be allocated.
1934  static int ChunkSizeFor(int size_in_bytes);
1935
1936  // Given a chunk size, returns the object size it can accommodate (not
1937  // including any extra remembered set words).  Used by
1938  // LargeObjectSpace::Available.  Note that this can overestimate the size
1939  // of object that will fit in a chunk---if the object requires extra
1940  // remembered set words (eg, for large fixed arrays), the actual object
1941  // size for the chunk will be smaller than reported by this function.
1942  static int ObjectSizeFor(int chunk_size) {
1943    if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
1944    return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
1945  }
1946
1947 private:
1948  // A pointer to the next large object chunk in the space or NULL.
1949  LargeObjectChunk* next_;
1950
1951  // The size of this chunk.
1952  size_t size_;
1953
1954 public:
1955  TRACK_MEMORY("LargeObjectChunk")
1956};
1957
1958
1959class LargeObjectSpace : public Space {
1960 public:
1961  explicit LargeObjectSpace(AllocationSpace id);
1962  virtual ~LargeObjectSpace() {}
1963
1964  // Initializes internal data structures.
1965  bool Setup();
1966
1967  // Releases internal resources, frees objects in this space.
1968  void TearDown();
1969
1970  // Allocates a (non-FixedArray, non-Code) large object.
1971  Object* AllocateRaw(int size_in_bytes);
1972  // Allocates a large Code object.
1973  Object* AllocateRawCode(int size_in_bytes);
1974  // Allocates a large FixedArray.
1975  Object* AllocateRawFixedArray(int size_in_bytes);
1976
1977  // Available bytes for objects in this space, not including any extra
1978  // remembered set words.
1979  int Available() {
1980    return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
1981  }
1982
1983  virtual int Size() {
1984    return size_;
1985  }
1986
1987  int PageCount() {
1988    return page_count_;
1989  }
1990
1991  // Finds an object for a given address, returns Failure::Exception()
1992  // if it is not found. The function iterates through all objects in this
1993  // space, may be slow.
1994  Object* FindObject(Address a);
1995
1996  // Clears remembered sets.
1997  void ClearRSet();
1998
1999  // Iterates objects whose remembered set bits are set.
2000  void IterateRSet(ObjectSlotCallback func);
2001
2002  // Frees unmarked objects.
2003  void FreeUnmarkedObjects();
2004
2005  // Checks whether a heap object is in this space; O(1).
2006  bool Contains(HeapObject* obj);
2007
2008  // Checks whether the space is empty.
2009  bool IsEmpty() { return first_chunk_ == NULL; }
2010
2011  // See the comments for ReserveSpace in the Space class.  This has to be
2012  // called after ReserveSpace has been called on the paged spaces, since they
2013  // may use some memory, leaving less for large objects.
2014  virtual bool ReserveSpace(int bytes);
2015
2016#ifdef ENABLE_HEAP_PROTECTION
2017  // Protect/unprotect the space by marking it read-only/writable.
2018  void Protect();
2019  void Unprotect();
2020#endif
2021
2022#ifdef DEBUG
2023  virtual void Verify();
2024  virtual void Print();
2025  void ReportStatistics();
2026  void CollectCodeStatistics();
2027  // Dump the remembered sets in the space to stdout.
2028  void PrintRSet();
2029#endif
2030  // Checks whether an address is in the object area in this space.  It
2031  // iterates all objects in the space. May be slow.
2032  bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2033
2034 private:
2035  // The head of the linked list of large object chunks.
2036  LargeObjectChunk* first_chunk_;
2037  int size_;  // allocated bytes
2038  int page_count_;  // number of chunks
2039
2040
2041  // Shared implementation of AllocateRaw, AllocateRawCode and
2042  // AllocateRawFixedArray.
2043  Object* AllocateRawInternal(int requested_size,
2044                              int object_size,
2045                              Executability executable);
2046
2047  // Returns the number of extra bytes (rounded up to the nearest full word)
2048  // required for extra_object_bytes of extra pointers (in bytes).
2049  static inline int ExtraRSetBytesFor(int extra_object_bytes);
2050
2051  friend class LargeObjectIterator;
2052
2053 public:
2054  TRACK_MEMORY("LargeObjectSpace")
2055};
2056
2057
2058class LargeObjectIterator: public ObjectIterator {
2059 public:
2060  explicit LargeObjectIterator(LargeObjectSpace* space);
2061  LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2062
2063  HeapObject* next();
2064
2065  // implementation of ObjectIterator.
2066  virtual HeapObject* next_object() { return next(); }
2067
2068 private:
2069  LargeObjectChunk* current_;
2070  HeapObjectCallback size_func_;
2071};
2072
2073
2074} }  // namespace v8::internal
2075
2076#endif  // V8_SPACES_H_
2077