spaces.h revision 69a99ed0b2b2ef69d393c371b03db3a98aaf880e
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_H_
29#define V8_SPACES_H_
30
31#include "allocation.h"
32#include "list.h"
33#include "log.h"
34
35namespace v8 {
36namespace internal {
37
38class Isolate;
39
40// -----------------------------------------------------------------------------
41// Heap structures:
42//
43// A JS heap consists of a young generation, an old generation, and a large
44// object space. The young generation is divided into two semispaces. A
45// scavenger implements Cheney's copying algorithm. The old generation is
46// separated into a map space and an old object space. The map space contains
47// all (and only) map objects, the rest of old objects go into the old space.
48// The old generation is collected by a mark-sweep-compact collector.
49//
50// The semispaces of the young generation are contiguous.  The old and map
51// spaces consists of a list of pages. A page has a page header and an object
52// area. A page size is deliberately chosen as 8K bytes.
53// The first word of a page is an opaque page header that has the
54// address of the next page and its ownership information. The second word may
55// have the allocation top address of this page. Heap objects are aligned to the
56// pointer size.
57//
58// There is a separate large object space for objects larger than
59// Page::kMaxHeapObjectSize, so that they do not have to move during
60// collection. The large object space is paged. Pages in large object space
61// may be larger than 8K.
62//
63// A card marking write barrier is used to keep track of intergenerational
64// references. Old space pages are divided into regions of Page::kRegionSize
65// size. Each region has a corresponding dirty bit in the page header which is
66// set if the region might contain pointers to new space. For details about
67// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
68// method body.
69//
70// During scavenges and mark-sweep collections we iterate intergenerational
71// pointers without decoding heap object maps so if the page belongs to old
72// pointer space or large object space it is essential to guarantee that
73// the page does not contain any garbage pointers to new space: every pointer
74// aligned word which satisfies the Heap::InNewSpace() predicate must be a
75// pointer to a live heap object in new space. Thus objects in old pointer
76// and large object spaces should have a special layout (e.g. no bare integer
77// fields). This requirement does not apply to map space which is iterated in
78// a special fashion. However we still require pointer fields of dead maps to
79// be cleaned.
80//
81// To enable lazy cleaning of old space pages we use a notion of allocation
82// watermark. Every pointer under watermark is considered to be well formed.
83// Page allocation watermark is not necessarily equal to page allocation top but
84// all alive objects on page should reside under allocation watermark.
85// During scavenge allocation watermark might be bumped and invalid pointers
86// might appear below it. To avoid following them we store a valid watermark
87// into special field in the page header and set a page WATERMARK_INVALIDATED
88// flag. For details see comments in the Page::SetAllocationWatermark() method
89// body.
90//
91
92// Some assertion macros used in the debugging mode.
93
94#define ASSERT_PAGE_ALIGNED(address)                                           \
95  ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
96
97#define ASSERT_OBJECT_ALIGNED(address)                                         \
98  ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
99
100#define ASSERT_MAP_ALIGNED(address)                                            \
101  ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
102
103#define ASSERT_OBJECT_SIZE(size)                                               \
104  ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
105
106#define ASSERT_PAGE_OFFSET(offset)                                             \
107  ASSERT((Page::kObjectStartOffset <= offset)                                  \
108      && (offset <= Page::kPageSize))
109
110#define ASSERT_MAP_PAGE_INDEX(index)                                           \
111  ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
112
113
114class PagedSpace;
115class MemoryAllocator;
116class AllocationInfo;
117
118// -----------------------------------------------------------------------------
119// A page normally has 8K bytes. Large object pages may be larger.  A page
120// address is always aligned to the 8K page size.
121//
122// Each page starts with a header of Page::kPageHeaderSize size which contains
123// bookkeeping data.
124//
125// The mark-compact collector transforms a map pointer into a page index and a
126// page offset. The exact encoding is described in the comments for
127// class MapWord in objects.h.
128//
129// The only way to get a page pointer is by calling factory methods:
130//   Page* p = Page::FromAddress(addr); or
131//   Page* p = Page::FromAllocationTop(top);
132class Page {
133 public:
134  // Returns the page containing a given address. The address ranges
135  // from [page_addr .. page_addr + kPageSize[
136  //
137  // Note that this function only works for addresses in normal paged
138  // spaces and addresses in the first 8K of large object pages (i.e.,
139  // the start of large objects but not necessarily derived pointers
140  // within them).
141  INLINE(static Page* FromAddress(Address a)) {
142    return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
143  }
144
145  // Returns the page containing an allocation top. Because an allocation
146  // top address can be the upper bound of the page, we need to subtract
147  // it with kPointerSize first. The address ranges from
148  // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
149  INLINE(static Page* FromAllocationTop(Address top)) {
150    Page* p = FromAddress(top - kPointerSize);
151    ASSERT_PAGE_OFFSET(p->Offset(top));
152    return p;
153  }
154
155  // Returns the start address of this page.
156  Address address() { return reinterpret_cast<Address>(this); }
157
158  // Checks whether this is a valid page address.
159  bool is_valid() { return address() != NULL; }
160
161  // Returns the next page of this page.
162  inline Page* next_page();
163
164  // Return the end of allocation in this page. Undefined for unused pages.
165  inline Address AllocationTop();
166
167  // Return the allocation watermark for the page.
168  // For old space pages it is guaranteed that the area under the watermark
169  // does not contain any garbage pointers to new space.
170  inline Address AllocationWatermark();
171
172  // Return the allocation watermark offset from the beginning of the page.
173  inline uint32_t AllocationWatermarkOffset();
174
175  inline void SetAllocationWatermark(Address allocation_watermark);
176
177  inline void SetCachedAllocationWatermark(Address allocation_watermark);
178  inline Address CachedAllocationWatermark();
179
180  // Returns the start address of the object area in this page.
181  Address ObjectAreaStart() { return address() + kObjectStartOffset; }
182
183  // Returns the end address (exclusive) of the object area in this page.
184  Address ObjectAreaEnd() { return address() + Page::kPageSize; }
185
186  // Checks whether an address is page aligned.
187  static bool IsAlignedToPageSize(Address a) {
188    return 0 == (OffsetFrom(a) & kPageAlignmentMask);
189  }
190
191  // True if this page was in use before current compaction started.
192  // Result is valid only for pages owned by paged spaces and
193  // only after PagedSpace::PrepareForMarkCompact was called.
194  inline bool WasInUseBeforeMC();
195
196  inline void SetWasInUseBeforeMC(bool was_in_use);
197
198  // True if this page is a large object page.
199  inline bool IsLargeObjectPage();
200
201  inline void SetIsLargeObjectPage(bool is_large_object_page);
202
203  inline Executability PageExecutability();
204
205  inline void SetPageExecutability(Executability executable);
206
207  // Returns the offset of a given address to this page.
208  INLINE(int Offset(Address a)) {
209    int offset = static_cast<int>(a - address());
210    ASSERT_PAGE_OFFSET(offset);
211    return offset;
212  }
213
214  // Returns the address for a given offset to the this page.
215  Address OffsetToAddress(int offset) {
216    ASSERT_PAGE_OFFSET(offset);
217    return address() + offset;
218  }
219
220  // ---------------------------------------------------------------------
221  // Card marking support
222
223  static const uint32_t kAllRegionsCleanMarks = 0x0;
224  static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
225
226  inline uint32_t GetRegionMarks();
227  inline void SetRegionMarks(uint32_t dirty);
228
229  inline uint32_t GetRegionMaskForAddress(Address addr);
230  inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
231  inline int GetRegionNumberForAddress(Address addr);
232
233  inline void MarkRegionDirty(Address addr);
234  inline bool IsRegionDirty(Address addr);
235
236  inline void ClearRegionMarks(Address start,
237                               Address end,
238                               bool reaches_limit);
239
240  // Page size in bytes.  This must be a multiple of the OS page size.
241  static const int kPageSize = 1 << kPageSizeBits;
242
243  // Page size mask.
244  static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
245
246  static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
247    kIntSize + kPointerSize + kPointerSize;
248
249  // The start offset of the object area in a page. Aligned to both maps and
250  // code alignment to be suitable for both.
251  static const int kObjectStartOffset =
252      CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize));
253
254  // Object area size in bytes.
255  static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
256
257  // Maximum object size that fits in a page.
258  static const int kMaxHeapObjectSize = kObjectAreaSize;
259
260  static const int kDirtyFlagOffset = 2 * kPointerSize;
261  static const int kRegionSizeLog2 = 8;
262  static const int kRegionSize = 1 << kRegionSizeLog2;
263  static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
264
265  STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
266
267  enum PageFlag {
268    IS_NORMAL_PAGE = 0,
269    WAS_IN_USE_BEFORE_MC,
270
271    // Page allocation watermark was bumped by preallocation during scavenge.
272    // Correct watermark can be retrieved by CachedAllocationWatermark() method
273    WATERMARK_INVALIDATED,
274    IS_EXECUTABLE,
275    NUM_PAGE_FLAGS  // Must be last
276  };
277  static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
278
279  // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
280  // scavenge we just invalidate the watermark on each old space page after
281  // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
282  // flag at the beginning of the next scavenge and each page becomes marked as
283  // having a valid watermark.
284  //
285  // The following invariant must hold for pages in old pointer and map spaces:
286  //     If page is in use then page is marked as having invalid watermark at
287  //     the beginning and at the end of any GC.
288  //
289  // This invariant guarantees that after flipping flag meaning at the
290  // beginning of scavenge all pages in use will be marked as having valid
291  // watermark.
292  static inline void FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap);
293
294  // Returns true if the page allocation watermark was not altered during
295  // scavenge.
296  inline bool IsWatermarkValid();
297
298  inline void InvalidateWatermark(bool value);
299
300  inline bool GetPageFlag(PageFlag flag);
301  inline void SetPageFlag(PageFlag flag, bool value);
302  inline void ClearPageFlags();
303
304  inline void ClearGCFields();
305
306  static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
307  static const int kAllocationWatermarkOffsetBits  = kPageSizeBits + 1;
308  static const uint32_t kAllocationWatermarkOffsetMask =
309      ((1 << kAllocationWatermarkOffsetBits) - 1) <<
310      kAllocationWatermarkOffsetShift;
311
312  static const uint32_t kFlagsMask =
313    ((1 << kAllocationWatermarkOffsetShift) - 1);
314
315  STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
316               kAllocationWatermarkOffsetBits);
317
318  //---------------------------------------------------------------------------
319  // Page header description.
320  //
321  // If a page is not in the large object space, the first word,
322  // opaque_header, encodes the next page address (aligned to kPageSize 8K)
323  // and the chunk number (0 ~ 8K-1).  Only MemoryAllocator should use
324  // opaque_header. The value range of the opaque_header is [0..kPageSize[,
325  // or [next_page_start, next_page_end[. It cannot point to a valid address
326  // in the current page.  If a page is in the large object space, the first
327  // word *may* (if the page start and large object chunk start are the
328  // same) contain the address of the next large object chunk.
329  intptr_t opaque_header;
330
331  // If the page is not in the large object space, the low-order bit of the
332  // second word is set. If the page is in the large object space, the
333  // second word *may* (if the page start and large object chunk start are
334  // the same) contain the large object chunk size.  In either case, the
335  // low-order bit for large object pages will be cleared.
336  // For normal pages this word is used to store page flags and
337  // offset of allocation top.
338  intptr_t flags_;
339
340  // This field contains dirty marks for regions covering the page. Only dirty
341  // regions might contain intergenerational references.
342  // Only 32 dirty marks are supported so for large object pages several regions
343  // might be mapped to a single dirty mark.
344  uint32_t dirty_regions_;
345
346  // The index of the page in its owner space.
347  int mc_page_index;
348
349  // During mark-compact collections this field contains the forwarding address
350  // of the first live object in this page.
351  // During scavenge collection this field is used to store allocation watermark
352  // if it is altered during scavenge.
353  Address mc_first_forwarded;
354
355  Heap* heap_;
356};
357
358
359// ----------------------------------------------------------------------------
360// Space is the abstract superclass for all allocation spaces.
361class Space : public Malloced {
362 public:
363  Space(Heap* heap, AllocationSpace id, Executability executable)
364      : heap_(heap), id_(id), executable_(executable) {}
365
366  virtual ~Space() {}
367
368  Heap* heap() const { return heap_; }
369
370  // Does the space need executable memory?
371  Executability executable() { return executable_; }
372
373  // Identity used in error reporting.
374  AllocationSpace identity() { return id_; }
375
376  // Returns allocated size.
377  virtual intptr_t Size() = 0;
378
379  // Returns size of objects. Can differ from the allocated size
380  // (e.g. see LargeObjectSpace).
381  virtual intptr_t SizeOfObjects() { return Size(); }
382
383#ifdef DEBUG
384  virtual void Print() = 0;
385#endif
386
387  // After calling this we can allocate a certain number of bytes using only
388  // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
389  // without using freelists or causing a GC.  This is used by partial
390  // snapshots.  It returns true of space was reserved or false if a GC is
391  // needed.  For paged spaces the space requested must include the space wasted
392  // at the end of each when allocating linearly.
393  virtual bool ReserveSpace(int bytes) = 0;
394
395 private:
396  Heap* heap_;
397  AllocationSpace id_;
398  Executability executable_;
399};
400
401
402// ----------------------------------------------------------------------------
403// All heap objects containing executable code (code objects) must be allocated
404// from a 2 GB range of memory, so that they can call each other using 32-bit
405// displacements.  This happens automatically on 32-bit platforms, where 32-bit
406// displacements cover the entire 4GB virtual address space.  On 64-bit
407// platforms, we support this using the CodeRange object, which reserves and
408// manages a range of virtual memory.
409class CodeRange {
410 public:
411  explicit CodeRange(Isolate* isolate);
412  ~CodeRange() { TearDown(); }
413
414  // Reserves a range of virtual memory, but does not commit any of it.
415  // Can only be called once, at heap initialization time.
416  // Returns false on failure.
417  bool Setup(const size_t requested_size);
418
419  // Frees the range of virtual memory, and frees the data structures used to
420  // manage it.
421  void TearDown();
422
423  bool exists() { return this != NULL && code_range_ != NULL; }
424  bool contains(Address address) {
425    if (this == NULL || code_range_ == NULL) return false;
426    Address start = static_cast<Address>(code_range_->address());
427    return start <= address && address < start + code_range_->size();
428  }
429
430  // Allocates a chunk of memory from the large-object portion of
431  // the code range.  On platforms with no separate code range, should
432  // not be called.
433  MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
434                                          size_t* allocated);
435  void FreeRawMemory(void* buf, size_t length);
436
437 private:
438  Isolate* isolate_;
439
440  // The reserved range of virtual memory that all code objects are put in.
441  VirtualMemory* code_range_;
442  // Plain old data class, just a struct plus a constructor.
443  class FreeBlock {
444   public:
445    FreeBlock(Address start_arg, size_t size_arg)
446        : start(start_arg), size(size_arg) {}
447    FreeBlock(void* start_arg, size_t size_arg)
448        : start(static_cast<Address>(start_arg)), size(size_arg) {}
449
450    Address start;
451    size_t size;
452  };
453
454  // Freed blocks of memory are added to the free list.  When the allocation
455  // list is exhausted, the free list is sorted and merged to make the new
456  // allocation list.
457  List<FreeBlock> free_list_;
458  // Memory is allocated from the free blocks on the allocation list.
459  // The block at current_allocation_block_index_ is the current block.
460  List<FreeBlock> allocation_list_;
461  int current_allocation_block_index_;
462
463  // Finds a block on the allocation list that contains at least the
464  // requested amount of memory.  If none is found, sorts and merges
465  // the existing free memory blocks, and searches again.
466  // If none can be found, terminates V8 with FatalProcessOutOfMemory.
467  void GetNextAllocationBlock(size_t requested);
468  // Compares the start addresses of two free blocks.
469  static int CompareFreeBlockAddress(const FreeBlock* left,
470                                     const FreeBlock* right);
471
472  DISALLOW_COPY_AND_ASSIGN(CodeRange);
473};
474
475
476// ----------------------------------------------------------------------------
477// A space acquires chunks of memory from the operating system. The memory
478// allocator manages chunks for the paged heap spaces (old space and map
479// space).  A paged chunk consists of pages. Pages in a chunk have contiguous
480// addresses and are linked as a list.
481//
482// The allocator keeps an initial chunk which is used for the new space.  The
483// leftover regions of the initial chunk are used for the initial chunks of
484// old space and map space if they are big enough to hold at least one page.
485// The allocator assumes that there is one old space and one map space, each
486// expands the space by allocating kPagesPerChunk pages except the last
487// expansion (before running out of space).  The first chunk may contain fewer
488// than kPagesPerChunk pages as well.
489//
490// The memory allocator also allocates chunks for the large object space, but
491// they are managed by the space itself.  The new space does not expand.
492//
493// The fact that pages for paged spaces are allocated and deallocated in chunks
494// induces a constraint on the order of pages in a linked lists. We say that
495// pages are linked in the chunk-order if and only if every two consecutive
496// pages from the same chunk are consecutive in the linked list.
497//
498
499
500class MemoryAllocator {
501 public:
502  explicit MemoryAllocator(Isolate* isolate);
503
504  // Initializes its internal bookkeeping structures.
505  // Max capacity of the total space and executable memory limit.
506  bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
507
508  // Deletes valid chunks.
509  void TearDown();
510
511  // Reserves an initial address range of virtual memory to be split between
512  // the two new space semispaces, the old space, and the map space.  The
513  // memory is not yet committed or assigned to spaces and split into pages.
514  // The initial chunk is unmapped when the memory allocator is torn down.
515  // This function should only be called when there is not already a reserved
516  // initial chunk (initial_chunk_ should be NULL).  It returns the start
517  // address of the initial chunk if successful, with the side effect of
518  // setting the initial chunk, or else NULL if unsuccessful and leaves the
519  // initial chunk NULL.
520  void* ReserveInitialChunk(const size_t requested);
521
522  // Commits pages from an as-yet-unmanaged block of virtual memory into a
523  // paged space.  The block should be part of the initial chunk reserved via
524  // a call to ReserveInitialChunk.  The number of pages is always returned in
525  // the output parameter num_pages.  This function assumes that the start
526  // address is non-null and that it is big enough to hold at least one
527  // page-aligned page.  The call always succeeds, and num_pages is always
528  // greater than zero.
529  Page* CommitPages(Address start, size_t size, PagedSpace* owner,
530                    int* num_pages);
531
532  // Commit a contiguous block of memory from the initial chunk.  Assumes that
533  // the address is not NULL, the size is greater than zero, and that the
534  // block is contained in the initial chunk.  Returns true if it succeeded
535  // and false otherwise.
536  bool CommitBlock(Address start, size_t size, Executability executable);
537
538  // Uncommit a contiguous block of memory [start..(start+size)[.
539  // start is not NULL, the size is greater than zero, and the
540  // block is contained in the initial chunk.  Returns true if it succeeded
541  // and false otherwise.
542  bool UncommitBlock(Address start, size_t size);
543
544  // Zaps a contiguous block of memory [start..(start+size)[ thus
545  // filling it up with a recognizable non-NULL bit pattern.
546  void ZapBlock(Address start, size_t size);
547
548  // Attempts to allocate the requested (non-zero) number of pages from the
549  // OS.  Fewer pages might be allocated than requested. If it fails to
550  // allocate memory for the OS or cannot allocate a single page, this
551  // function returns an invalid page pointer (NULL). The caller must check
552  // whether the returned page is valid (by calling Page::is_valid()).  It is
553  // guaranteed that allocated pages have contiguous addresses.  The actual
554  // number of allocated pages is returned in the output parameter
555  // allocated_pages.  If the PagedSpace owner is executable and there is
556  // a code range, the pages are allocated from the code range.
557  Page* AllocatePages(int requested_pages, int* allocated_pages,
558                      PagedSpace* owner);
559
560  // Frees pages from a given page and after. Requires pages to be
561  // linked in chunk-order (see comment for class).
562  // If 'p' is the first page of a chunk, pages from 'p' are freed
563  // and this function returns an invalid page pointer.
564  // Otherwise, the function searches a page after 'p' that is
565  // the first page of a chunk. Pages after the found page
566  // are freed and the function returns 'p'.
567  Page* FreePages(Page* p);
568
569  // Frees all pages owned by given space.
570  void FreeAllPages(PagedSpace* space);
571
572  // Allocates and frees raw memory of certain size.
573  // These are just thin wrappers around OS::Allocate and OS::Free,
574  // but keep track of allocated bytes as part of heap.
575  // If the flag is EXECUTABLE and a code range exists, the requested
576  // memory is allocated from the code range.  If a code range exists
577  // and the freed memory is in it, the code range manages the freed memory.
578  MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
579                                          size_t* allocated,
580                                          Executability executable);
581  void FreeRawMemory(void* buf,
582                     size_t length,
583                     Executability executable);
584  void PerformAllocationCallback(ObjectSpace space,
585                                 AllocationAction action,
586                                 size_t size);
587
588  void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
589                                   ObjectSpace space,
590                                   AllocationAction action);
591  void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
592  bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
593
594  // Returns the maximum available bytes of heaps.
595  intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
596
597  // Returns allocated spaces in bytes.
598  intptr_t Size() { return size_; }
599
600  // Returns the maximum available executable bytes of heaps.
601  intptr_t AvailableExecutable() {
602    if (capacity_executable_ < size_executable_) return 0;
603    return capacity_executable_ - size_executable_;
604  }
605
606  // Returns allocated executable spaces in bytes.
607  intptr_t SizeExecutable() { return size_executable_; }
608
609  // Returns maximum available bytes that the old space can have.
610  intptr_t MaxAvailable() {
611    return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
612  }
613
614  // Links two pages.
615  inline void SetNextPage(Page* prev, Page* next);
616
617  // Returns the next page of a given page.
618  inline Page* GetNextPage(Page* p);
619
620  // Checks whether a page belongs to a space.
621  inline bool IsPageInSpace(Page* p, PagedSpace* space);
622
623  // Returns the space that owns the given page.
624  inline PagedSpace* PageOwner(Page* page);
625
626  // Finds the first/last page in the same chunk as a given page.
627  Page* FindFirstPageInSameChunk(Page* p);
628  Page* FindLastPageInSameChunk(Page* p);
629
630  // Relinks list of pages owned by space to make it chunk-ordered.
631  // Returns new first and last pages of space.
632  // Also returns last page in relinked list which has WasInUsedBeforeMC
633  // flag set.
634  void RelinkPageListInChunkOrder(PagedSpace* space,
635                                  Page** first_page,
636                                  Page** last_page,
637                                  Page** last_page_in_use);
638
639#ifdef DEBUG
640  // Reports statistic info of the space.
641  void ReportStatistics();
642#endif
643
644  // Due to encoding limitation, we can only have 8K chunks.
645  static const int kMaxNofChunks = 1 << kPageSizeBits;
646  // If a chunk has at least 16 pages, the maximum heap size is about
647  // 8K * 8K * 16 = 1G bytes.
648#ifdef V8_TARGET_ARCH_X64
649  static const int kPagesPerChunk = 32;
650  // On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
651  static const int kChunkTableLevels = 4;
652  static const int kChunkTableBitsPerLevel = 12;
653#else
654  static const int kPagesPerChunk = 16;
655  // On 32 bit the chunk table consists of 2 levels of 256-entry tables.
656  static const int kChunkTableLevels = 2;
657  static const int kChunkTableBitsPerLevel = 8;
658#endif
659
660 private:
661  static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
662
663  Isolate* isolate_;
664
665  // Maximum space size in bytes.
666  intptr_t capacity_;
667  // Maximum subset of capacity_ that can be executable
668  intptr_t capacity_executable_;
669
670  // Allocated space size in bytes.
671  intptr_t size_;
672
673  // Allocated executable space size in bytes.
674  intptr_t size_executable_;
675
676  struct MemoryAllocationCallbackRegistration {
677    MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
678                                         ObjectSpace space,
679                                         AllocationAction action)
680        : callback(callback), space(space), action(action) {
681    }
682    MemoryAllocationCallback callback;
683    ObjectSpace space;
684    AllocationAction action;
685  };
686  // A List of callback that are triggered when memory is allocated or free'd
687  List<MemoryAllocationCallbackRegistration>
688      memory_allocation_callbacks_;
689
690  // The initial chunk of virtual memory.
691  VirtualMemory* initial_chunk_;
692
693  // Allocated chunk info: chunk start address, chunk size, and owning space.
694  class ChunkInfo BASE_EMBEDDED {
695   public:
696    ChunkInfo() : address_(NULL),
697                  size_(0),
698                  owner_(NULL),
699                  executable_(NOT_EXECUTABLE),
700                  owner_identity_(FIRST_SPACE) {}
701    inline void init(Address a, size_t s, PagedSpace* o);
702    Address address() { return address_; }
703    size_t size() { return size_; }
704    PagedSpace* owner() { return owner_; }
705    // We save executability of the owner to allow using it
706    // when collecting stats after the owner has been destroyed.
707    Executability executable() const { return executable_; }
708    AllocationSpace owner_identity() const { return owner_identity_; }
709
710   private:
711    Address address_;
712    size_t size_;
713    PagedSpace* owner_;
714    Executability executable_;
715    AllocationSpace owner_identity_;
716  };
717
718  // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
719  List<ChunkInfo> chunks_;
720  List<int> free_chunk_ids_;
721  int max_nof_chunks_;
722  int top_;
723
724  // Push/pop a free chunk id onto/from the stack.
725  void Push(int free_chunk_id);
726  int Pop();
727  bool OutOfChunkIds() { return top_ == 0; }
728
729  // Frees a chunk.
730  void DeleteChunk(int chunk_id);
731
732  // Basic check whether a chunk id is in the valid range.
733  inline bool IsValidChunkId(int chunk_id);
734
735  // Checks whether a chunk id identifies an allocated chunk.
736  inline bool IsValidChunk(int chunk_id);
737
738  // Returns the chunk id that a page belongs to.
739  inline int GetChunkId(Page* p);
740
741  // True if the address lies in the initial chunk.
742  inline bool InInitialChunk(Address address);
743
744  // Initializes pages in a chunk. Returns the first page address.
745  // This function and GetChunkId() are provided for the mark-compact
746  // collector to rebuild page headers in the from space, which is
747  // used as a marking stack and its page headers are destroyed.
748  Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
749                               PagedSpace* owner);
750
751  Page* RelinkPagesInChunk(int chunk_id,
752                           Address chunk_start,
753                           size_t chunk_size,
754                           Page* prev,
755                           Page** last_page_in_use);
756
757  DISALLOW_COPY_AND_ASSIGN(MemoryAllocator);
758};
759
760
761// -----------------------------------------------------------------------------
762// Interface for heap object iterator to be implemented by all object space
763// object iterators.
764//
765// NOTE: The space specific object iterators also implements the own next()
766//       method which is used to avoid using virtual functions
767//       iterating a specific space.
768
769class ObjectIterator : public Malloced {
770 public:
771  virtual ~ObjectIterator() { }
772
773  virtual HeapObject* next_object() = 0;
774};
775
776
777// -----------------------------------------------------------------------------
778// Heap object iterator in new/old/map spaces.
779//
780// A HeapObjectIterator iterates objects from a given address to the
781// top of a space. The given address must be below the current
782// allocation pointer (space top). There are some caveats.
783//
784// (1) If the space top changes upward during iteration (because of
785//     allocating new objects), the iterator does not iterate objects
786//     above the original space top. The caller must create a new
787//     iterator starting from the old top in order to visit these new
788//     objects.
789//
790// (2) If new objects are allocated below the original allocation top
791//     (e.g., free-list allocation in paged spaces), the new objects
792//     may or may not be iterated depending on their position with
793//     respect to the current point of iteration.
794//
795// (3) The space top should not change downward during iteration,
796//     otherwise the iterator will return not-necessarily-valid
797//     objects.
798
799class HeapObjectIterator: public ObjectIterator {
800 public:
801  // Creates a new object iterator in a given space. If a start
802  // address is not given, the iterator starts from the space bottom.
803  // If the size function is not given, the iterator calls the default
804  // Object::Size().
805  explicit HeapObjectIterator(PagedSpace* space);
806  HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
807  HeapObjectIterator(PagedSpace* space, Address start);
808  HeapObjectIterator(PagedSpace* space,
809                     Address start,
810                     HeapObjectCallback size_func);
811  HeapObjectIterator(Page* page, HeapObjectCallback size_func);
812
813  inline HeapObject* next() {
814    return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
815  }
816
817  // implementation of ObjectIterator.
818  virtual HeapObject* next_object() { return next(); }
819
820 private:
821  Address cur_addr_;  // current iteration point
822  Address end_addr_;  // end iteration point
823  Address cur_limit_;  // current page limit
824  HeapObjectCallback size_func_;  // size function
825  Page* end_page_;  // caches the page of the end address
826
827  HeapObject* FromCurrentPage() {
828    ASSERT(cur_addr_ < cur_limit_);
829
830    HeapObject* obj = HeapObject::FromAddress(cur_addr_);
831    int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
832    ASSERT_OBJECT_SIZE(obj_size);
833
834    cur_addr_ += obj_size;
835    ASSERT(cur_addr_ <= cur_limit_);
836
837    return obj;
838  }
839
840  // Slow path of next, goes into the next page.
841  HeapObject* FromNextPage();
842
843  // Initializes fields.
844  void Initialize(Address start, Address end, HeapObjectCallback size_func);
845
846#ifdef DEBUG
847  // Verifies whether fields have valid values.
848  void Verify();
849#endif
850};
851
852
853// -----------------------------------------------------------------------------
854// A PageIterator iterates the pages in a paged space.
855//
856// The PageIterator class provides three modes for iterating pages in a space:
857//   PAGES_IN_USE iterates pages containing allocated objects.
858//   PAGES_USED_BY_MC iterates pages that hold relocated objects during a
859//                    mark-compact collection.
860//   ALL_PAGES iterates all pages in the space.
861//
862// There are some caveats.
863//
864// (1) If the space expands during iteration, new pages will not be
865//     returned by the iterator in any mode.
866//
867// (2) If new objects are allocated during iteration, they will appear
868//     in pages returned by the iterator.  Allocation may cause the
869//     allocation pointer or MC allocation pointer in the last page to
870//     change between constructing the iterator and iterating the last
871//     page.
872//
873// (3) The space should not shrink during iteration, otherwise the
874//     iterator will return deallocated pages.
875
876class PageIterator BASE_EMBEDDED {
877 public:
878  enum Mode {
879    PAGES_IN_USE,
880    PAGES_USED_BY_MC,
881    ALL_PAGES
882  };
883
884  PageIterator(PagedSpace* space, Mode mode);
885
886  inline bool has_next();
887  inline Page* next();
888
889 private:
890  PagedSpace* space_;
891  Page* prev_page_;  // Previous page returned.
892  Page* stop_page_;  // Page to stop at (last page returned by the iterator).
893};
894
895
896// -----------------------------------------------------------------------------
897// A space has a list of pages. The next page can be accessed via
898// Page::next_page() call. The next page of the last page is an
899// invalid page pointer. A space can expand and shrink dynamically.
900
901// An abstraction of allocation and relocation pointers in a page-structured
902// space.
903class AllocationInfo {
904 public:
905  Address top;  // current allocation top
906  Address limit;  // current allocation limit
907
908#ifdef DEBUG
909  bool VerifyPagedAllocation() {
910    return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
911        && (top <= limit);
912  }
913#endif
914};
915
916
917// An abstraction of the accounting statistics of a page-structured space.
918// The 'capacity' of a space is the number of object-area bytes (ie, not
919// including page bookkeeping structures) currently in the space. The 'size'
920// of a space is the number of allocated bytes, the 'waste' in the space is
921// the number of bytes that are not allocated and not available to
922// allocation without reorganizing the space via a GC (eg, small blocks due
923// to internal fragmentation, top of page areas in map space), and the bytes
924// 'available' is the number of unallocated bytes that are not waste.  The
925// capacity is the sum of size, waste, and available.
926//
927// The stats are only set by functions that ensure they stay balanced. These
928// functions increase or decrease one of the non-capacity stats in
929// conjunction with capacity, or else they always balance increases and
930// decreases to the non-capacity stats.
931class AllocationStats BASE_EMBEDDED {
932 public:
933  AllocationStats() { Clear(); }
934
935  // Zero out all the allocation statistics (ie, no capacity).
936  void Clear() {
937    capacity_ = 0;
938    available_ = 0;
939    size_ = 0;
940    waste_ = 0;
941  }
942
943  // Reset the allocation statistics (ie, available = capacity with no
944  // wasted or allocated bytes).
945  void Reset() {
946    available_ = capacity_;
947    size_ = 0;
948    waste_ = 0;
949  }
950
951  // Accessors for the allocation statistics.
952  intptr_t Capacity() { return capacity_; }
953  intptr_t Available() { return available_; }
954  intptr_t Size() { return size_; }
955  intptr_t Waste() { return waste_; }
956
957  // Grow the space by adding available bytes.
958  void ExpandSpace(int size_in_bytes) {
959    capacity_ += size_in_bytes;
960    available_ += size_in_bytes;
961  }
962
963  // Shrink the space by removing available bytes.
964  void ShrinkSpace(int size_in_bytes) {
965    capacity_ -= size_in_bytes;
966    available_ -= size_in_bytes;
967  }
968
969  // Allocate from available bytes (available -> size).
970  void AllocateBytes(intptr_t size_in_bytes) {
971    available_ -= size_in_bytes;
972    size_ += size_in_bytes;
973  }
974
975  // Free allocated bytes, making them available (size -> available).
976  void DeallocateBytes(intptr_t size_in_bytes) {
977    size_ -= size_in_bytes;
978    available_ += size_in_bytes;
979  }
980
981  // Waste free bytes (available -> waste).
982  void WasteBytes(int size_in_bytes) {
983    available_ -= size_in_bytes;
984    waste_ += size_in_bytes;
985  }
986
987  // Consider the wasted bytes to be allocated, as they contain filler
988  // objects (waste -> size).
989  void FillWastedBytes(intptr_t size_in_bytes) {
990    waste_ -= size_in_bytes;
991    size_ += size_in_bytes;
992  }
993
994 private:
995  intptr_t capacity_;
996  intptr_t available_;
997  intptr_t size_;
998  intptr_t waste_;
999};
1000
1001
1002class PagedSpace : public Space {
1003 public:
1004  // Creates a space with a maximum capacity, and an id.
1005  PagedSpace(Heap* heap,
1006             intptr_t max_capacity,
1007             AllocationSpace id,
1008             Executability executable);
1009
1010  virtual ~PagedSpace() {}
1011
1012  // Set up the space using the given address range of virtual memory (from
1013  // the memory allocator's initial chunk) if possible.  If the block of
1014  // addresses is not big enough to contain a single page-aligned page, a
1015  // fresh chunk will be allocated.
1016  bool Setup(Address start, size_t size);
1017
1018  // Returns true if the space has been successfully set up and not
1019  // subsequently torn down.
1020  bool HasBeenSetup();
1021
1022  // Cleans up the space, frees all pages in this space except those belonging
1023  // to the initial chunk, uncommits addresses in the initial chunk.
1024  void TearDown();
1025
1026  // Checks whether an object/address is in this space.
1027  inline bool Contains(Address a);
1028  bool Contains(HeapObject* o) { return Contains(o->address()); }
1029  // Never crashes even if a is not a valid pointer.
1030  inline bool SafeContains(Address a);
1031
1032  // Given an address occupied by a live object, return that object if it is
1033  // in this space, or Failure::Exception() if it is not. The implementation
1034  // iterates over objects in the page containing the address, the cost is
1035  // linear in the number of objects in the page. It may be slow.
1036  MUST_USE_RESULT MaybeObject* FindObject(Address addr);
1037
1038  // Checks whether page is currently in use by this space.
1039  bool IsUsed(Page* page);
1040
1041  void MarkAllPagesClean();
1042
1043  // Prepares for a mark-compact GC.
1044  virtual void PrepareForMarkCompact(bool will_compact);
1045
1046  // The top of allocation in a page in this space. Undefined if page is unused.
1047  Address PageAllocationTop(Page* page) {
1048    return page == TopPageOf(allocation_info_) ? top()
1049        : PageAllocationLimit(page);
1050  }
1051
1052  // The limit of allocation for a page in this space.
1053  virtual Address PageAllocationLimit(Page* page) = 0;
1054
1055  void FlushTopPageWatermark() {
1056    AllocationTopPage()->SetCachedAllocationWatermark(top());
1057    AllocationTopPage()->InvalidateWatermark(true);
1058  }
1059
1060  // Current capacity without growing (Size() + Available() + Waste()).
1061  intptr_t Capacity() { return accounting_stats_.Capacity(); }
1062
1063  // Total amount of memory committed for this space.  For paged
1064  // spaces this equals the capacity.
1065  intptr_t CommittedMemory() { return Capacity(); }
1066
1067  // Available bytes without growing.
1068  intptr_t Available() { return accounting_stats_.Available(); }
1069
1070  // Allocated bytes in this space.
1071  virtual intptr_t Size() { return accounting_stats_.Size(); }
1072
1073  // Wasted bytes due to fragmentation and not recoverable until the
1074  // next GC of this space.
1075  intptr_t Waste() { return accounting_stats_.Waste(); }
1076
1077  // Returns the address of the first object in this space.
1078  Address bottom() { return first_page_->ObjectAreaStart(); }
1079
1080  // Returns the allocation pointer in this space.
1081  Address top() { return allocation_info_.top; }
1082
1083  // Allocate the requested number of bytes in the space if possible, return a
1084  // failure object if not.
1085  MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
1086
1087  // Allocate the requested number of bytes for relocation during mark-compact
1088  // collection.
1089  MUST_USE_RESULT inline MaybeObject* MCAllocateRaw(int size_in_bytes);
1090
1091  virtual bool ReserveSpace(int bytes);
1092
1093  // Used by ReserveSpace.
1094  virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
1095
1096  // Free all pages in range from prev (exclusive) to last (inclusive).
1097  // Freed pages are moved to the end of page list.
1098  void FreePages(Page* prev, Page* last);
1099
1100  // Deallocates a block.
1101  virtual void DeallocateBlock(Address start,
1102                               int size_in_bytes,
1103                               bool add_to_freelist) = 0;
1104
1105  // Set space allocation info.
1106  void SetTop(Address top) {
1107    allocation_info_.top = top;
1108    allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
1109  }
1110
1111  // ---------------------------------------------------------------------------
1112  // Mark-compact collection support functions
1113
1114  // Set the relocation point to the beginning of the space.
1115  void MCResetRelocationInfo();
1116
1117  // Writes relocation info to the top page.
1118  void MCWriteRelocationInfoToPage() {
1119    TopPageOf(mc_forwarding_info_)->
1120        SetAllocationWatermark(mc_forwarding_info_.top);
1121  }
1122
1123  // Computes the offset of a given address in this space to the beginning
1124  // of the space.
1125  int MCSpaceOffsetForAddress(Address addr);
1126
1127  // Updates the allocation pointer to the relocation top after a mark-compact
1128  // collection.
1129  virtual void MCCommitRelocationInfo() = 0;
1130
1131  // Releases half of unused pages.
1132  void Shrink();
1133
1134  // Ensures that the capacity is at least 'capacity'. Returns false on failure.
1135  bool EnsureCapacity(int capacity);
1136
1137#ifdef DEBUG
1138  // Print meta info and objects in this space.
1139  virtual void Print();
1140
1141  // Verify integrity of this space.
1142  virtual void Verify(ObjectVisitor* visitor);
1143
1144  // Overridden by subclasses to verify space-specific object
1145  // properties (e.g., only maps or free-list nodes are in map space).
1146  virtual void VerifyObject(HeapObject* obj) {}
1147
1148  // Report code object related statistics
1149  void CollectCodeStatistics();
1150  static void ReportCodeStatistics();
1151  static void ResetCodeStatistics();
1152#endif
1153
1154  // Returns the page of the allocation pointer.
1155  Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
1156
1157  void RelinkPageListInChunkOrder(bool deallocate_blocks);
1158
1159 protected:
1160  // Maximum capacity of this space.
1161  intptr_t max_capacity_;
1162
1163  // Accounting information for this space.
1164  AllocationStats accounting_stats_;
1165
1166  // The first page in this space.
1167  Page* first_page_;
1168
1169  // The last page in this space.  Initially set in Setup, updated in
1170  // Expand and Shrink.
1171  Page* last_page_;
1172
1173  // True if pages owned by this space are linked in chunk-order.
1174  // See comment for class MemoryAllocator for definition of chunk-order.
1175  bool page_list_is_chunk_ordered_;
1176
1177  // Normal allocation information.
1178  AllocationInfo allocation_info_;
1179
1180  // Relocation information during mark-compact collections.
1181  AllocationInfo mc_forwarding_info_;
1182
1183  // Bytes of each page that cannot be allocated.  Possibly non-zero
1184  // for pages in spaces with only fixed-size objects.  Always zero
1185  // for pages in spaces with variable sized objects (those pages are
1186  // padded with free-list nodes).
1187  int page_extra_;
1188
1189  // Sets allocation pointer to a page bottom.
1190  static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
1191
1192  // Returns the top page specified by an allocation info structure.
1193  static Page* TopPageOf(AllocationInfo alloc_info) {
1194    return Page::FromAllocationTop(alloc_info.limit);
1195  }
1196
1197  int CountPagesToTop() {
1198    Page* p = Page::FromAllocationTop(allocation_info_.top);
1199    PageIterator it(this, PageIterator::ALL_PAGES);
1200    int counter = 1;
1201    while (it.has_next()) {
1202      if (it.next() == p) return counter;
1203      counter++;
1204    }
1205    UNREACHABLE();
1206    return -1;
1207  }
1208
1209  // Expands the space by allocating a fixed number of pages. Returns false if
1210  // it cannot allocate requested number of pages from OS. Newly allocated
1211  // pages are append to the last_page;
1212  bool Expand(Page* last_page);
1213
1214  // Generic fast case allocation function that tries linear allocation in
1215  // the top page of 'alloc_info'.  Returns NULL on failure.
1216  inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
1217                                      int size_in_bytes);
1218
1219  // During normal allocation or deserialization, roll to the next page in
1220  // the space (there is assumed to be one) and allocate there.  This
1221  // function is space-dependent.
1222  virtual HeapObject* AllocateInNextPage(Page* current_page,
1223                                         int size_in_bytes) = 0;
1224
1225  // Slow path of AllocateRaw.  This function is space-dependent.
1226  MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
1227
1228  // Slow path of MCAllocateRaw.
1229  MUST_USE_RESULT HeapObject* SlowMCAllocateRaw(int size_in_bytes);
1230
1231#ifdef DEBUG
1232  // Returns the number of total pages in this space.
1233  int CountTotalPages();
1234#endif
1235 private:
1236
1237  // Returns a pointer to the page of the relocation pointer.
1238  Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
1239
1240  friend class PageIterator;
1241};
1242
1243
1244class NumberAndSizeInfo BASE_EMBEDDED {
1245 public:
1246  NumberAndSizeInfo() : number_(0), bytes_(0) {}
1247
1248  int number() const { return number_; }
1249  void increment_number(int num) { number_ += num; }
1250
1251  int bytes() const { return bytes_; }
1252  void increment_bytes(int size) { bytes_ += size; }
1253
1254  void clear() {
1255    number_ = 0;
1256    bytes_ = 0;
1257  }
1258
1259 private:
1260  int number_;
1261  int bytes_;
1262};
1263
1264
1265// HistogramInfo class for recording a single "bar" of a histogram.  This
1266// class is used for collecting statistics to print to the log file.
1267class HistogramInfo: public NumberAndSizeInfo {
1268 public:
1269  HistogramInfo() : NumberAndSizeInfo() {}
1270
1271  const char* name() { return name_; }
1272  void set_name(const char* name) { name_ = name; }
1273
1274 private:
1275  const char* name_;
1276};
1277
1278
1279// -----------------------------------------------------------------------------
1280// SemiSpace in young generation
1281//
1282// A semispace is a contiguous chunk of memory. The mark-compact collector
1283// uses the memory in the from space as a marking stack when tracing live
1284// objects.
1285
1286class SemiSpace : public Space {
1287 public:
1288  // Constructor.
1289  explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) {
1290    start_ = NULL;
1291    age_mark_ = NULL;
1292  }
1293
1294  // Sets up the semispace using the given chunk.
1295  bool Setup(Address start, int initial_capacity, int maximum_capacity);
1296
1297  // Tear down the space.  Heap memory was not allocated by the space, so it
1298  // is not deallocated here.
1299  void TearDown();
1300
1301  // True if the space has been set up but not torn down.
1302  bool HasBeenSetup() { return start_ != NULL; }
1303
1304  // Grow the size of the semispace by committing extra virtual memory.
1305  // Assumes that the caller has checked that the semispace has not reached
1306  // its maximum capacity (and thus there is space available in the reserved
1307  // address range to grow).
1308  bool Grow();
1309
1310  // Grow the semispace to the new capacity.  The new capacity
1311  // requested must be larger than the current capacity.
1312  bool GrowTo(int new_capacity);
1313
1314  // Shrinks the semispace to the new capacity.  The new capacity
1315  // requested must be more than the amount of used memory in the
1316  // semispace and less than the current capacity.
1317  bool ShrinkTo(int new_capacity);
1318
1319  // Returns the start address of the space.
1320  Address low() { return start_; }
1321  // Returns one past the end address of the space.
1322  Address high() { return low() + capacity_; }
1323
1324  // Age mark accessors.
1325  Address age_mark() { return age_mark_; }
1326  void set_age_mark(Address mark) { age_mark_ = mark; }
1327
1328  // True if the address is in the address range of this semispace (not
1329  // necessarily below the allocation pointer).
1330  bool Contains(Address a) {
1331    return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1332           == reinterpret_cast<uintptr_t>(start_);
1333  }
1334
1335  // True if the object is a heap object in the address range of this
1336  // semispace (not necessarily below the allocation pointer).
1337  bool Contains(Object* o) {
1338    return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1339  }
1340
1341  // The offset of an address from the beginning of the space.
1342  int SpaceOffsetForAddress(Address addr) {
1343    return static_cast<int>(addr - low());
1344  }
1345
1346  // If we don't have these here then SemiSpace will be abstract.  However
1347  // they should never be called.
1348  virtual intptr_t Size() {
1349    UNREACHABLE();
1350    return 0;
1351  }
1352
1353  virtual bool ReserveSpace(int bytes) {
1354    UNREACHABLE();
1355    return false;
1356  }
1357
1358  bool is_committed() { return committed_; }
1359  bool Commit();
1360  bool Uncommit();
1361
1362#ifdef DEBUG
1363  virtual void Print();
1364  virtual void Verify();
1365#endif
1366
1367  // Returns the current capacity of the semi space.
1368  int Capacity() { return capacity_; }
1369
1370  // Returns the maximum capacity of the semi space.
1371  int MaximumCapacity() { return maximum_capacity_; }
1372
1373  // Returns the initial capacity of the semi space.
1374  int InitialCapacity() { return initial_capacity_; }
1375
1376 private:
1377  // The current and maximum capacity of the space.
1378  int capacity_;
1379  int maximum_capacity_;
1380  int initial_capacity_;
1381
1382  // The start address of the space.
1383  Address start_;
1384  // Used to govern object promotion during mark-compact collection.
1385  Address age_mark_;
1386
1387  // Masks and comparison values to test for containment in this semispace.
1388  uintptr_t address_mask_;
1389  uintptr_t object_mask_;
1390  uintptr_t object_expected_;
1391
1392  bool committed_;
1393
1394 public:
1395  TRACK_MEMORY("SemiSpace")
1396};
1397
1398
1399// A SemiSpaceIterator is an ObjectIterator that iterates over the active
1400// semispace of the heap's new space.  It iterates over the objects in the
1401// semispace from a given start address (defaulting to the bottom of the
1402// semispace) to the top of the semispace.  New objects allocated after the
1403// iterator is created are not iterated.
1404class SemiSpaceIterator : public ObjectIterator {
1405 public:
1406  // Create an iterator over the objects in the given space.  If no start
1407  // address is given, the iterator starts from the bottom of the space.  If
1408  // no size function is given, the iterator calls Object::Size().
1409  explicit SemiSpaceIterator(NewSpace* space);
1410  SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
1411  SemiSpaceIterator(NewSpace* space, Address start);
1412
1413  HeapObject* next() {
1414    if (current_ == limit_) return NULL;
1415
1416    HeapObject* object = HeapObject::FromAddress(current_);
1417    int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
1418
1419    current_ += size;
1420    return object;
1421  }
1422
1423  // Implementation of the ObjectIterator functions.
1424  virtual HeapObject* next_object() { return next(); }
1425
1426 private:
1427  void Initialize(NewSpace* space, Address start, Address end,
1428                  HeapObjectCallback size_func);
1429
1430  // The semispace.
1431  SemiSpace* space_;
1432  // The current iteration point.
1433  Address current_;
1434  // The end of iteration.
1435  Address limit_;
1436  // The callback function.
1437  HeapObjectCallback size_func_;
1438};
1439
1440
1441// -----------------------------------------------------------------------------
1442// The young generation space.
1443//
1444// The new space consists of a contiguous pair of semispaces.  It simply
1445// forwards most functions to the appropriate semispace.
1446
1447class NewSpace : public Space {
1448 public:
1449  // Constructor.
1450  explicit NewSpace(Heap* heap)
1451    : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
1452      to_space_(heap),
1453      from_space_(heap) {}
1454
1455  // Sets up the new space using the given chunk.
1456  bool Setup(Address start, int size);
1457
1458  // Tears down the space.  Heap memory was not allocated by the space, so it
1459  // is not deallocated here.
1460  void TearDown();
1461
1462  // True if the space has been set up but not torn down.
1463  bool HasBeenSetup() {
1464    return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
1465  }
1466
1467  // Flip the pair of spaces.
1468  void Flip();
1469
1470  // Grow the capacity of the semispaces.  Assumes that they are not at
1471  // their maximum capacity.
1472  void Grow();
1473
1474  // Shrink the capacity of the semispaces.
1475  void Shrink();
1476
1477  // True if the address or object lies in the address range of either
1478  // semispace (not necessarily below the allocation pointer).
1479  bool Contains(Address a) {
1480    return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1481        == reinterpret_cast<uintptr_t>(start_);
1482  }
1483  bool Contains(Object* o) {
1484    return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1485  }
1486
1487  // Return the allocated bytes in the active semispace.
1488  virtual intptr_t Size() { return static_cast<int>(top() - bottom()); }
1489  // The same, but returning an int.  We have to have the one that returns
1490  // intptr_t because it is inherited, but if we know we are dealing with the
1491  // new space, which can't get as big as the other spaces then this is useful:
1492  int SizeAsInt() { return static_cast<int>(Size()); }
1493
1494  // Return the current capacity of a semispace.
1495  intptr_t Capacity() {
1496    ASSERT(to_space_.Capacity() == from_space_.Capacity());
1497    return to_space_.Capacity();
1498  }
1499
1500  // Return the total amount of memory committed for new space.
1501  intptr_t CommittedMemory() {
1502    if (from_space_.is_committed()) return 2 * Capacity();
1503    return Capacity();
1504  }
1505
1506  // Return the available bytes without growing in the active semispace.
1507  intptr_t Available() { return Capacity() - Size(); }
1508
1509  // Return the maximum capacity of a semispace.
1510  int MaximumCapacity() {
1511    ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
1512    return to_space_.MaximumCapacity();
1513  }
1514
1515  // Returns the initial capacity of a semispace.
1516  int InitialCapacity() {
1517    ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
1518    return to_space_.InitialCapacity();
1519  }
1520
1521  // Return the address of the allocation pointer in the active semispace.
1522  Address top() { return allocation_info_.top; }
1523  // Return the address of the first object in the active semispace.
1524  Address bottom() { return to_space_.low(); }
1525
1526  // Get the age mark of the inactive semispace.
1527  Address age_mark() { return from_space_.age_mark(); }
1528  // Set the age mark in the active semispace.
1529  void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
1530
1531  // The start address of the space and a bit mask. Anding an address in the
1532  // new space with the mask will result in the start address.
1533  Address start() { return start_; }
1534  uintptr_t mask() { return address_mask_; }
1535
1536  // The allocation top and limit addresses.
1537  Address* allocation_top_address() { return &allocation_info_.top; }
1538  Address* allocation_limit_address() { return &allocation_info_.limit; }
1539
1540  MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) {
1541    return AllocateRawInternal(size_in_bytes, &allocation_info_);
1542  }
1543
1544  // Allocate the requested number of bytes for relocation during mark-compact
1545  // collection.
1546  MUST_USE_RESULT MaybeObject* MCAllocateRaw(int size_in_bytes) {
1547    return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
1548  }
1549
1550  // Reset the allocation pointer to the beginning of the active semispace.
1551  void ResetAllocationInfo();
1552  // Reset the reloction pointer to the bottom of the inactive semispace in
1553  // preparation for mark-compact collection.
1554  void MCResetRelocationInfo();
1555  // Update the allocation pointer in the active semispace after a
1556  // mark-compact collection.
1557  void MCCommitRelocationInfo();
1558
1559  // Get the extent of the inactive semispace (for use as a marking stack).
1560  Address FromSpaceLow() { return from_space_.low(); }
1561  Address FromSpaceHigh() { return from_space_.high(); }
1562
1563  // Get the extent of the active semispace (to sweep newly copied objects
1564  // during a scavenge collection).
1565  Address ToSpaceLow() { return to_space_.low(); }
1566  Address ToSpaceHigh() { return to_space_.high(); }
1567
1568  // Offsets from the beginning of the semispaces.
1569  int ToSpaceOffsetForAddress(Address a) {
1570    return to_space_.SpaceOffsetForAddress(a);
1571  }
1572  int FromSpaceOffsetForAddress(Address a) {
1573    return from_space_.SpaceOffsetForAddress(a);
1574  }
1575
1576  // True if the object is a heap object in the address range of the
1577  // respective semispace (not necessarily below the allocation pointer of the
1578  // semispace).
1579  bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
1580  bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
1581
1582  bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
1583  bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
1584
1585  virtual bool ReserveSpace(int bytes);
1586
1587  // Resizes a sequential string which must be the most recent thing that was
1588  // allocated in new space.
1589  template <typename StringType>
1590  inline void ShrinkStringAtAllocationBoundary(String* string, int len);
1591
1592#ifdef DEBUG
1593  // Verify the active semispace.
1594  virtual void Verify();
1595  // Print the active semispace.
1596  virtual void Print() { to_space_.Print(); }
1597#endif
1598
1599  // Iterates the active semispace to collect statistics.
1600  void CollectStatistics();
1601  // Reports previously collected statistics of the active semispace.
1602  void ReportStatistics();
1603  // Clears previously collected statistics.
1604  void ClearHistograms();
1605
1606  // Record the allocation or promotion of a heap object.  Note that we don't
1607  // record every single allocation, but only those that happen in the
1608  // to space during a scavenge GC.
1609  void RecordAllocation(HeapObject* obj);
1610  void RecordPromotion(HeapObject* obj);
1611
1612  // Return whether the operation succeded.
1613  bool CommitFromSpaceIfNeeded() {
1614    if (from_space_.is_committed()) return true;
1615    return from_space_.Commit();
1616  }
1617
1618  bool UncommitFromSpace() {
1619    if (!from_space_.is_committed()) return true;
1620    return from_space_.Uncommit();
1621  }
1622
1623 private:
1624  // The semispaces.
1625  SemiSpace to_space_;
1626  SemiSpace from_space_;
1627
1628  // Start address and bit mask for containment testing.
1629  Address start_;
1630  uintptr_t address_mask_;
1631  uintptr_t object_mask_;
1632  uintptr_t object_expected_;
1633
1634  // Allocation pointer and limit for normal allocation and allocation during
1635  // mark-compact collection.
1636  AllocationInfo allocation_info_;
1637  AllocationInfo mc_forwarding_info_;
1638
1639  HistogramInfo* allocated_histogram_;
1640  HistogramInfo* promoted_histogram_;
1641
1642  // Implementation of AllocateRaw and MCAllocateRaw.
1643  MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(
1644      int size_in_bytes,
1645      AllocationInfo* alloc_info);
1646
1647  friend class SemiSpaceIterator;
1648
1649 public:
1650  TRACK_MEMORY("NewSpace")
1651};
1652
1653
1654// -----------------------------------------------------------------------------
1655// Free lists for old object spaces
1656//
1657// Free-list nodes are free blocks in the heap.  They look like heap objects
1658// (free-list node pointers have the heap object tag, and they have a map like
1659// a heap object).  They have a size and a next pointer.  The next pointer is
1660// the raw address of the next free list node (or NULL).
1661class FreeListNode: public HeapObject {
1662 public:
1663  // Obtain a free-list node from a raw address.  This is not a cast because
1664  // it does not check nor require that the first word at the address is a map
1665  // pointer.
1666  static FreeListNode* FromAddress(Address address) {
1667    return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1668  }
1669
1670  static inline bool IsFreeListNode(HeapObject* object);
1671
1672  // Set the size in bytes, which can be read with HeapObject::Size().  This
1673  // function also writes a map to the first word of the block so that it
1674  // looks like a heap object to the garbage collector and heap iteration
1675  // functions.
1676  void set_size(Heap* heap, int size_in_bytes);
1677
1678  // Accessors for the next field.
1679  inline Address next(Heap* heap);
1680  inline void set_next(Heap* heap, Address next);
1681
1682 private:
1683  static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
1684
1685  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1686};
1687
1688
1689// The free list for the old space.
1690class OldSpaceFreeList BASE_EMBEDDED {
1691 public:
1692  OldSpaceFreeList(Heap* heap, AllocationSpace owner);
1693
1694  // Clear the free list.
1695  void Reset();
1696
1697  // Return the number of bytes available on the free list.
1698  intptr_t available() { return available_; }
1699
1700  // Place a node on the free list.  The block of size 'size_in_bytes'
1701  // starting at 'start' is placed on the free list.  The return value is the
1702  // number of bytes that have been lost due to internal fragmentation by
1703  // freeing the block.  Bookkeeping information will be written to the block,
1704  // ie, its contents will be destroyed.  The start address should be word
1705  // aligned, and the size should be a non-zero multiple of the word size.
1706  int Free(Address start, int size_in_bytes);
1707
1708  // Allocate a block of size 'size_in_bytes' from the free list.  The block
1709  // is unitialized.  A failure is returned if no block is available.  The
1710  // number of bytes lost to fragmentation is returned in the output parameter
1711  // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
1712  MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes);
1713
1714  void MarkNodes();
1715
1716 private:
1717  // The size range of blocks, in bytes. (Smaller allocations are allowed, but
1718  // will always result in waste.)
1719  static const int kMinBlockSize = 2 * kPointerSize;
1720  static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1721
1722  Heap* heap_;
1723
1724  // The identity of the owning space, for building allocation Failure
1725  // objects.
1726  AllocationSpace owner_;
1727
1728  // Total available bytes in all blocks on this free list.
1729  int available_;
1730
1731  // Blocks are put on exact free lists in an array, indexed by size in words.
1732  // The available sizes are kept in an increasingly ordered list. Entries
1733  // corresponding to sizes < kMinBlockSize always have an empty free list
1734  // (but index kHead is used for the head of the size list).
1735  struct SizeNode {
1736    // Address of the head FreeListNode of the implied block size or NULL.
1737    Address head_node_;
1738    // Size (words) of the next larger available size if head_node_ != NULL.
1739    int next_size_;
1740  };
1741  static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
1742  SizeNode free_[kFreeListsLength];
1743
1744  // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
1745  static const int kHead = kMinBlockSize / kPointerSize - 1;
1746  static const int kEnd = kMaxInt;
1747
1748  // We keep a "finger" in the size list to speed up a common pattern:
1749  // repeated requests for the same or increasing sizes.
1750  int finger_;
1751
1752  // Starting from *prev, find and return the smallest size >= index (words),
1753  // or kEnd. Update *prev to be the largest size < index, or kHead.
1754  int FindSize(int index, int* prev) {
1755    int cur = free_[*prev].next_size_;
1756    while (cur < index) {
1757      *prev = cur;
1758      cur = free_[cur].next_size_;
1759    }
1760    return cur;
1761  }
1762
1763  // Remove an existing element from the size list.
1764  void RemoveSize(int index) {
1765    int prev = kHead;
1766    int cur = FindSize(index, &prev);
1767    ASSERT(cur == index);
1768    free_[prev].next_size_ = free_[cur].next_size_;
1769    finger_ = prev;
1770  }
1771
1772  // Insert a new element into the size list.
1773  void InsertSize(int index) {
1774    int prev = kHead;
1775    int cur = FindSize(index, &prev);
1776    ASSERT(cur != index);
1777    free_[prev].next_size_ = index;
1778    free_[index].next_size_ = cur;
1779  }
1780
1781  // The size list is not updated during a sequence of calls to Free, but is
1782  // rebuilt before the next allocation.
1783  void RebuildSizeList();
1784  bool needs_rebuild_;
1785
1786#ifdef DEBUG
1787  // Does this free list contain a free block located at the address of 'node'?
1788  bool Contains(FreeListNode* node);
1789#endif
1790
1791  DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
1792};
1793
1794
1795// The free list for the map space.
1796class FixedSizeFreeList BASE_EMBEDDED {
1797 public:
1798  FixedSizeFreeList(Heap* heap, AllocationSpace owner, int object_size);
1799
1800  // Clear the free list.
1801  void Reset();
1802
1803  // Return the number of bytes available on the free list.
1804  intptr_t available() { return available_; }
1805
1806  // Place a node on the free list.  The block starting at 'start' (assumed to
1807  // have size object_size_) is placed on the free list.  Bookkeeping
1808  // information will be written to the block, ie, its contents will be
1809  // destroyed.  The start address should be word aligned.
1810  void Free(Address start);
1811
1812  // Allocate a fixed sized block from the free list.  The block is unitialized.
1813  // A failure is returned if no block is available.
1814  MUST_USE_RESULT MaybeObject* Allocate();
1815
1816  void MarkNodes();
1817
1818 private:
1819
1820  Heap* heap_;
1821
1822  // Available bytes on the free list.
1823  intptr_t available_;
1824
1825  // The head of the free list.
1826  Address head_;
1827
1828  // The tail of the free list.
1829  Address tail_;
1830
1831  // The identity of the owning space, for building allocation Failure
1832  // objects.
1833  AllocationSpace owner_;
1834
1835  // The size of the objects in this space.
1836  int object_size_;
1837
1838  DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
1839};
1840
1841
1842// -----------------------------------------------------------------------------
1843// Old object space (excluding map objects)
1844
1845class OldSpace : public PagedSpace {
1846 public:
1847  // Creates an old space object with a given maximum capacity.
1848  // The constructor does not allocate pages from OS.
1849  OldSpace(Heap* heap,
1850           intptr_t max_capacity,
1851           AllocationSpace id,
1852           Executability executable)
1853      : PagedSpace(heap, max_capacity, id, executable),
1854        free_list_(heap, id) {
1855    page_extra_ = 0;
1856  }
1857
1858  // The bytes available on the free list (ie, not above the linear allocation
1859  // pointer).
1860  intptr_t AvailableFree() { return free_list_.available(); }
1861
1862  // The limit of allocation for a page in this space.
1863  virtual Address PageAllocationLimit(Page* page) {
1864    return page->ObjectAreaEnd();
1865  }
1866
1867  // Give a block of memory to the space's free list.  It might be added to
1868  // the free list or accounted as waste.
1869  // If add_to_freelist is false then just accounting stats are updated and
1870  // no attempt to add area to free list is made.
1871  void Free(Address start, int size_in_bytes, bool add_to_freelist) {
1872    accounting_stats_.DeallocateBytes(size_in_bytes);
1873
1874    if (add_to_freelist) {
1875      int wasted_bytes = free_list_.Free(start, size_in_bytes);
1876      accounting_stats_.WasteBytes(wasted_bytes);
1877    }
1878  }
1879
1880  virtual void DeallocateBlock(Address start,
1881                               int size_in_bytes,
1882                               bool add_to_freelist);
1883
1884  // Prepare for full garbage collection.  Resets the relocation pointer and
1885  // clears the free list.
1886  virtual void PrepareForMarkCompact(bool will_compact);
1887
1888  // Updates the allocation pointer to the relocation top after a mark-compact
1889  // collection.
1890  virtual void MCCommitRelocationInfo();
1891
1892  virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1893
1894  void MarkFreeListNodes() { free_list_.MarkNodes(); }
1895
1896#ifdef DEBUG
1897  // Reports statistics for the space
1898  void ReportStatistics();
1899#endif
1900
1901 protected:
1902  // Virtual function in the superclass.  Slow path of AllocateRaw.
1903  MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
1904
1905  // Virtual function in the superclass.  Allocate linearly at the start of
1906  // the page after current_page (there is assumed to be one).
1907  HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1908
1909 private:
1910  // The space's free list.
1911  OldSpaceFreeList free_list_;
1912
1913 public:
1914  TRACK_MEMORY("OldSpace")
1915};
1916
1917
1918// -----------------------------------------------------------------------------
1919// Old space for objects of a fixed size
1920
1921class FixedSpace : public PagedSpace {
1922 public:
1923  FixedSpace(Heap* heap,
1924             intptr_t max_capacity,
1925             AllocationSpace id,
1926             int object_size_in_bytes,
1927             const char* name)
1928      : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
1929        object_size_in_bytes_(object_size_in_bytes),
1930        name_(name),
1931        free_list_(heap, id, object_size_in_bytes) {
1932    page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
1933  }
1934
1935  // The limit of allocation for a page in this space.
1936  virtual Address PageAllocationLimit(Page* page) {
1937    return page->ObjectAreaEnd() - page_extra_;
1938  }
1939
1940  int object_size_in_bytes() { return object_size_in_bytes_; }
1941
1942  // Give a fixed sized block of memory to the space's free list.
1943  // If add_to_freelist is false then just accounting stats are updated and
1944  // no attempt to add area to free list is made.
1945  void Free(Address start, bool add_to_freelist) {
1946    if (add_to_freelist) {
1947      free_list_.Free(start);
1948    }
1949    accounting_stats_.DeallocateBytes(object_size_in_bytes_);
1950  }
1951
1952  // Prepares for a mark-compact GC.
1953  virtual void PrepareForMarkCompact(bool will_compact);
1954
1955  // Updates the allocation pointer to the relocation top after a mark-compact
1956  // collection.
1957  virtual void MCCommitRelocationInfo();
1958
1959  virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1960
1961  virtual void DeallocateBlock(Address start,
1962                               int size_in_bytes,
1963                               bool add_to_freelist);
1964
1965  void MarkFreeListNodes() { free_list_.MarkNodes(); }
1966
1967#ifdef DEBUG
1968  // Reports statistic info of the space
1969  void ReportStatistics();
1970#endif
1971
1972 protected:
1973  // Virtual function in the superclass.  Slow path of AllocateRaw.
1974  MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
1975
1976  // Virtual function in the superclass.  Allocate linearly at the start of
1977  // the page after current_page (there is assumed to be one).
1978  HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1979
1980  void ResetFreeList() {
1981    free_list_.Reset();
1982  }
1983
1984 private:
1985  // The size of objects in this space.
1986  int object_size_in_bytes_;
1987
1988  // The name of this space.
1989  const char* name_;
1990
1991  // The space's free list.
1992  FixedSizeFreeList free_list_;
1993};
1994
1995
1996// -----------------------------------------------------------------------------
1997// Old space for all map objects
1998
1999class MapSpace : public FixedSpace {
2000 public:
2001  // Creates a map space object with a maximum capacity.
2002  MapSpace(Heap* heap,
2003           intptr_t max_capacity,
2004           int max_map_space_pages,
2005           AllocationSpace id)
2006      : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
2007        max_map_space_pages_(max_map_space_pages) {
2008    ASSERT(max_map_space_pages < kMaxMapPageIndex);
2009  }
2010
2011  // Prepares for a mark-compact GC.
2012  virtual void PrepareForMarkCompact(bool will_compact);
2013
2014  // Given an index, returns the page address.
2015  Address PageAddress(int page_index) { return page_addresses_[page_index]; }
2016
2017  static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
2018
2019  // Are map pointers encodable into map word?
2020  bool MapPointersEncodable() {
2021    if (!FLAG_use_big_map_space) {
2022      ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
2023      return true;
2024    }
2025    return CountPagesToTop() <= max_map_space_pages_;
2026  }
2027
2028  // Should be called after forced sweep to find out if map space needs
2029  // compaction.
2030  bool NeedsCompaction(int live_maps) {
2031    return !MapPointersEncodable() && live_maps <= CompactionThreshold();
2032  }
2033
2034  Address TopAfterCompaction(int live_maps) {
2035    ASSERT(NeedsCompaction(live_maps));
2036
2037    int pages_left = live_maps / kMapsPerPage;
2038    PageIterator it(this, PageIterator::ALL_PAGES);
2039    while (pages_left-- > 0) {
2040      ASSERT(it.has_next());
2041      it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
2042    }
2043    ASSERT(it.has_next());
2044    Page* top_page = it.next();
2045    top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
2046    ASSERT(top_page->is_valid());
2047
2048    int offset = live_maps % kMapsPerPage * Map::kSize;
2049    Address top = top_page->ObjectAreaStart() + offset;
2050    ASSERT(top < top_page->ObjectAreaEnd());
2051    ASSERT(Contains(top));
2052
2053    return top;
2054  }
2055
2056  void FinishCompaction(Address new_top, int live_maps) {
2057    Page* top_page = Page::FromAddress(new_top);
2058    ASSERT(top_page->is_valid());
2059
2060    SetAllocationInfo(&allocation_info_, top_page);
2061    allocation_info_.top = new_top;
2062
2063    int new_size = live_maps * Map::kSize;
2064    accounting_stats_.DeallocateBytes(accounting_stats_.Size());
2065    accounting_stats_.AllocateBytes(new_size);
2066
2067    // Flush allocation watermarks.
2068    for (Page* p = first_page_; p != top_page; p = p->next_page()) {
2069      p->SetAllocationWatermark(p->AllocationTop());
2070    }
2071    top_page->SetAllocationWatermark(new_top);
2072
2073#ifdef DEBUG
2074    if (FLAG_enable_slow_asserts) {
2075      intptr_t actual_size = 0;
2076      for (Page* p = first_page_; p != top_page; p = p->next_page())
2077        actual_size += kMapsPerPage * Map::kSize;
2078      actual_size += (new_top - top_page->ObjectAreaStart());
2079      ASSERT(accounting_stats_.Size() == actual_size);
2080    }
2081#endif
2082
2083    Shrink();
2084    ResetFreeList();
2085  }
2086
2087 protected:
2088#ifdef DEBUG
2089  virtual void VerifyObject(HeapObject* obj);
2090#endif
2091
2092 private:
2093  static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
2094
2095  // Do map space compaction if there is a page gap.
2096  int CompactionThreshold() {
2097    return kMapsPerPage * (max_map_space_pages_ - 1);
2098  }
2099
2100  const int max_map_space_pages_;
2101
2102  // An array of page start address in a map space.
2103  Address page_addresses_[kMaxMapPageIndex];
2104
2105 public:
2106  TRACK_MEMORY("MapSpace")
2107};
2108
2109
2110// -----------------------------------------------------------------------------
2111// Old space for all global object property cell objects
2112
2113class CellSpace : public FixedSpace {
2114 public:
2115  // Creates a property cell space object with a maximum capacity.
2116  CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2117      : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
2118  {}
2119
2120 protected:
2121#ifdef DEBUG
2122  virtual void VerifyObject(HeapObject* obj);
2123#endif
2124
2125 public:
2126  TRACK_MEMORY("CellSpace")
2127};
2128
2129
2130// -----------------------------------------------------------------------------
2131// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2132// the large object space. A large object is allocated from OS heap with
2133// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2134// A large object always starts at Page::kObjectStartOffset to a page.
2135// Large objects do not move during garbage collections.
2136
2137// A LargeObjectChunk holds exactly one large object page with exactly one
2138// large object.
2139class LargeObjectChunk {
2140 public:
2141  // Allocates a new LargeObjectChunk that contains a large object page
2142  // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
2143  // object) bytes after the object area start of that page.
2144  static LargeObjectChunk* New(int size_in_bytes, Executability executable);
2145
2146  // Free the memory associated with the chunk.
2147  void Free(Executability executable);
2148
2149  // Interpret a raw address as a large object chunk.
2150  static LargeObjectChunk* FromAddress(Address address) {
2151    return reinterpret_cast<LargeObjectChunk*>(address);
2152  }
2153
2154  // Returns the address of this chunk.
2155  Address address() { return reinterpret_cast<Address>(this); }
2156
2157  Page* GetPage() {
2158    return Page::FromAddress(RoundUp(address(), Page::kPageSize));
2159  }
2160
2161  // Accessors for the fields of the chunk.
2162  LargeObjectChunk* next() { return next_; }
2163  void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
2164  size_t size() { return size_ & ~Page::kPageFlagMask; }
2165
2166  // Compute the start address in the chunk.
2167  Address GetStartAddress() { return GetPage()->ObjectAreaStart(); }
2168
2169  // Returns the object in this chunk.
2170  HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); }
2171
2172  // Given a requested size returns the physical size of a chunk to be
2173  // allocated.
2174  static int ChunkSizeFor(int size_in_bytes);
2175
2176  // Given a chunk size, returns the object size it can accommodate.  Used by
2177  // LargeObjectSpace::Available.
2178  static intptr_t ObjectSizeFor(intptr_t chunk_size) {
2179    if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2180    return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2181  }
2182
2183 private:
2184  // A pointer to the next large object chunk in the space or NULL.
2185  LargeObjectChunk* next_;
2186
2187  // The total size of this chunk.
2188  size_t size_;
2189
2190 public:
2191  TRACK_MEMORY("LargeObjectChunk")
2192};
2193
2194
2195class LargeObjectSpace : public Space {
2196 public:
2197  LargeObjectSpace(Heap* heap, AllocationSpace id);
2198  virtual ~LargeObjectSpace() {}
2199
2200  // Initializes internal data structures.
2201  bool Setup();
2202
2203  // Releases internal resources, frees objects in this space.
2204  void TearDown();
2205
2206  // Allocates a (non-FixedArray, non-Code) large object.
2207  MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes);
2208  // Allocates a large Code object.
2209  MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes);
2210  // Allocates a large FixedArray.
2211  MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
2212
2213  // Available bytes for objects in this space.
2214  inline intptr_t Available();
2215
2216  virtual intptr_t Size() {
2217    return size_;
2218  }
2219
2220  virtual intptr_t SizeOfObjects() {
2221    return objects_size_;
2222  }
2223
2224  int PageCount() {
2225    return page_count_;
2226  }
2227
2228  // Finds an object for a given address, returns Failure::Exception()
2229  // if it is not found. The function iterates through all objects in this
2230  // space, may be slow.
2231  MaybeObject* FindObject(Address a);
2232
2233  // Finds a large object page containing the given pc, returns NULL
2234  // if such a page doesn't exist.
2235  LargeObjectChunk* FindChunkContainingPc(Address pc);
2236
2237  // Iterates objects covered by dirty regions.
2238  void IterateDirtyRegions(ObjectSlotCallback func);
2239
2240  // Frees unmarked objects.
2241  void FreeUnmarkedObjects();
2242
2243  // Checks whether a heap object is in this space; O(1).
2244  bool Contains(HeapObject* obj);
2245
2246  // Checks whether the space is empty.
2247  bool IsEmpty() { return first_chunk_ == NULL; }
2248
2249  // See the comments for ReserveSpace in the Space class.  This has to be
2250  // called after ReserveSpace has been called on the paged spaces, since they
2251  // may use some memory, leaving less for large objects.
2252  virtual bool ReserveSpace(int bytes);
2253
2254#ifdef DEBUG
2255  virtual void Verify();
2256  virtual void Print();
2257  void ReportStatistics();
2258  void CollectCodeStatistics();
2259#endif
2260  // Checks whether an address is in the object area in this space.  It
2261  // iterates all objects in the space. May be slow.
2262  bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2263
2264 private:
2265  // The head of the linked list of large object chunks.
2266  LargeObjectChunk* first_chunk_;
2267  intptr_t size_;  // allocated bytes
2268  int page_count_;  // number of chunks
2269  intptr_t objects_size_;  // size of objects
2270
2271  // Shared implementation of AllocateRaw, AllocateRawCode and
2272  // AllocateRawFixedArray.
2273  MUST_USE_RESULT MaybeObject* AllocateRawInternal(int requested_size,
2274                                                   int object_size,
2275                                                   Executability executable);
2276
2277  friend class LargeObjectIterator;
2278
2279 public:
2280  TRACK_MEMORY("LargeObjectSpace")
2281};
2282
2283
2284class LargeObjectIterator: public ObjectIterator {
2285 public:
2286  explicit LargeObjectIterator(LargeObjectSpace* space);
2287  LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2288
2289  HeapObject* next();
2290
2291  // implementation of ObjectIterator.
2292  virtual HeapObject* next_object() { return next(); }
2293
2294 private:
2295  LargeObjectChunk* current_;
2296  HeapObjectCallback size_func_;
2297};
2298
2299
2300#ifdef DEBUG
2301struct CommentStatistic {
2302  const char* comment;
2303  int size;
2304  int count;
2305  void Clear() {
2306    comment = NULL;
2307    size = 0;
2308    count = 0;
2309  }
2310  // Must be small, since an iteration is used for lookup.
2311  static const int kMaxComments = 64;
2312};
2313#endif
2314
2315
2316} }  // namespace v8::internal
2317
2318#endif  // V8_SPACES_H_
2319