spaces.h revision 0d5e116f6aee03185f237311a943491bb079a768
1// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_H_
29#define V8_SPACES_H_
30
31#include "list-inl.h"
32#include "log.h"
33
34namespace v8 {
35namespace internal {
36
37// -----------------------------------------------------------------------------
38// Heap structures:
39//
40// A JS heap consists of a young generation, an old generation, and a large
41// object space. The young generation is divided into two semispaces. A
42// scavenger implements Cheney's copying algorithm. The old generation is
43// separated into a map space and an old object space. The map space contains
44// all (and only) map objects, the rest of old objects go into the old space.
45// The old generation is collected by a mark-sweep-compact collector.
46//
47// The semispaces of the young generation are contiguous.  The old and map
48// spaces consists of a list of pages. A page has a page header and an object
49// area. A page size is deliberately chosen as 8K bytes.
50// The first word of a page is an opaque page header that has the
51// address of the next page and its ownership information. The second word may
52// have the allocation top address of this page. Heap objects are aligned to the
53// pointer size.
54//
55// There is a separate large object space for objects larger than
56// Page::kMaxHeapObjectSize, so that they do not have to move during
57// collection. The large object space is paged. Pages in large object space
58// may be larger than 8K.
59//
60// A card marking write barrier is used to keep track of intergenerational
61// references. Old space pages are divided into regions of Page::kRegionSize
62// size. Each region has a corresponding dirty bit in the page header which is
63// set if the region might contain pointers to new space. For details about
64// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
65// method body.
66//
67// During scavenges and mark-sweep collections we iterate intergenerational
68// pointers without decoding heap object maps so if the page belongs to old
69// pointer space or large object space it is essential to guarantee that
70// the page does not contain any garbage pointers to new space: every pointer
71// aligned word which satisfies the Heap::InNewSpace() predicate must be a
72// pointer to a live heap object in new space. Thus objects in old pointer
73// and large object spaces should have a special layout (e.g. no bare integer
74// fields). This requirement does not apply to map space which is iterated in
75// a special fashion. However we still require pointer fields of dead maps to
76// be cleaned.
77//
78// To enable lazy cleaning of old space pages we use a notion of allocation
79// watermark. Every pointer under watermark is considered to be well formed.
80// Page allocation watermark is not necessarily equal to page allocation top but
81// all alive objects on page should reside under allocation watermark.
82// During scavenge allocation watermark might be bumped and invalid pointers
83// might appear below it. To avoid following them we store a valid watermark
84// into special field in the page header and set a page WATERMARK_INVALIDATED
85// flag. For details see comments in the Page::SetAllocationWatermark() method
86// body.
87//
88
89// Some assertion macros used in the debugging mode.
90
91#define ASSERT_PAGE_ALIGNED(address)                                           \
92  ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
93
94#define ASSERT_OBJECT_ALIGNED(address)                                         \
95  ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
96
97#define ASSERT_MAP_ALIGNED(address)                                            \
98  ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
99
100#define ASSERT_OBJECT_SIZE(size)                                               \
101  ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
102
103#define ASSERT_PAGE_OFFSET(offset)                                             \
104  ASSERT((Page::kObjectStartOffset <= offset)                                  \
105      && (offset <= Page::kPageSize))
106
107#define ASSERT_MAP_PAGE_INDEX(index)                                           \
108  ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
109
110
111class PagedSpace;
112class MemoryAllocator;
113class AllocationInfo;
114
115// -----------------------------------------------------------------------------
116// A page normally has 8K bytes. Large object pages may be larger.  A page
117// address is always aligned to the 8K page size.
118//
119// Each page starts with a header of Page::kPageHeaderSize size which contains
120// bookkeeping data.
121//
122// The mark-compact collector transforms a map pointer into a page index and a
123// page offset. The exact encoding is described in the comments for
124// class MapWord in objects.h.
125//
126// The only way to get a page pointer is by calling factory methods:
127//   Page* p = Page::FromAddress(addr); or
128//   Page* p = Page::FromAllocationTop(top);
129class Page {
130 public:
131  // Returns the page containing a given address. The address ranges
132  // from [page_addr .. page_addr + kPageSize[
133  //
134  // Note that this function only works for addresses in normal paged
135  // spaces and addresses in the first 8K of large object pages (i.e.,
136  // the start of large objects but not necessarily derived pointers
137  // within them).
138  INLINE(static Page* FromAddress(Address a)) {
139    return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
140  }
141
142  // Returns the page containing an allocation top. Because an allocation
143  // top address can be the upper bound of the page, we need to subtract
144  // it with kPointerSize first. The address ranges from
145  // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
146  INLINE(static Page* FromAllocationTop(Address top)) {
147    Page* p = FromAddress(top - kPointerSize);
148    ASSERT_PAGE_OFFSET(p->Offset(top));
149    return p;
150  }
151
152  // Returns the start address of this page.
153  Address address() { return reinterpret_cast<Address>(this); }
154
155  // Checks whether this is a valid page address.
156  bool is_valid() { return address() != NULL; }
157
158  // Returns the next page of this page.
159  inline Page* next_page();
160
161  // Return the end of allocation in this page. Undefined for unused pages.
162  inline Address AllocationTop();
163
164  // Return the allocation watermark for the page.
165  // For old space pages it is guaranteed that the area under the watermark
166  // does not contain any garbage pointers to new space.
167  inline Address AllocationWatermark();
168
169  // Return the allocation watermark offset from the beginning of the page.
170  inline uint32_t AllocationWatermarkOffset();
171
172  inline void SetAllocationWatermark(Address allocation_watermark);
173
174  inline void SetCachedAllocationWatermark(Address allocation_watermark);
175  inline Address CachedAllocationWatermark();
176
177  // Returns the start address of the object area in this page.
178  Address ObjectAreaStart() { return address() + kObjectStartOffset; }
179
180  // Returns the end address (exclusive) of the object area in this page.
181  Address ObjectAreaEnd() { return address() + Page::kPageSize; }
182
183  // Checks whether an address is page aligned.
184  static bool IsAlignedToPageSize(Address a) {
185    return 0 == (OffsetFrom(a) & kPageAlignmentMask);
186  }
187
188  // True if this page was in use before current compaction started.
189  // Result is valid only for pages owned by paged spaces and
190  // only after PagedSpace::PrepareForMarkCompact was called.
191  inline bool WasInUseBeforeMC();
192
193  inline void SetWasInUseBeforeMC(bool was_in_use);
194
195  // True if this page is a large object page.
196  inline bool IsLargeObjectPage();
197
198  inline void SetIsLargeObjectPage(bool is_large_object_page);
199
200  inline bool IsPageExecutable();
201
202  inline void SetIsPageExecutable(bool is_page_executable);
203
204  // Returns the offset of a given address to this page.
205  INLINE(int Offset(Address a)) {
206    int offset = static_cast<int>(a - address());
207    ASSERT_PAGE_OFFSET(offset);
208    return offset;
209  }
210
211  // Returns the address for a given offset to the this page.
212  Address OffsetToAddress(int offset) {
213    ASSERT_PAGE_OFFSET(offset);
214    return address() + offset;
215  }
216
217  // ---------------------------------------------------------------------
218  // Card marking support
219
220  static const uint32_t kAllRegionsCleanMarks = 0x0;
221  static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
222
223  inline uint32_t GetRegionMarks();
224  inline void SetRegionMarks(uint32_t dirty);
225
226  inline uint32_t GetRegionMaskForAddress(Address addr);
227  inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
228  inline int GetRegionNumberForAddress(Address addr);
229
230  inline void MarkRegionDirty(Address addr);
231  inline bool IsRegionDirty(Address addr);
232
233  inline void ClearRegionMarks(Address start,
234                               Address end,
235                               bool reaches_limit);
236
237  // Page size in bytes.  This must be a multiple of the OS page size.
238  static const int kPageSize = 1 << kPageSizeBits;
239
240  // Page size mask.
241  static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
242
243  static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
244    kIntSize + kPointerSize;
245
246  // The start offset of the object area in a page. Aligned to both maps and
247  // code alignment to be suitable for both.
248  static const int kObjectStartOffset =
249      CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize));
250
251  // Object area size in bytes.
252  static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
253
254  // Maximum object size that fits in a page.
255  static const int kMaxHeapObjectSize = kObjectAreaSize;
256
257  static const int kDirtyFlagOffset = 2 * kPointerSize;
258  static const int kRegionSizeLog2 = 8;
259  static const int kRegionSize = 1 << kRegionSizeLog2;
260  static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
261
262  STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
263
264  enum PageFlag {
265    IS_NORMAL_PAGE = 0,
266    WAS_IN_USE_BEFORE_MC,
267
268    // Page allocation watermark was bumped by preallocation during scavenge.
269    // Correct watermark can be retrieved by CachedAllocationWatermark() method
270    WATERMARK_INVALIDATED,
271    IS_EXECUTABLE,
272    NUM_PAGE_FLAGS  // Must be last
273  };
274  static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
275
276  // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
277  // scavenge we just invalidate the watermark on each old space page after
278  // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
279  // flag at the beginning of the next scavenge and each page becomes marked as
280  // having a valid watermark.
281  //
282  // The following invariant must hold for pages in old pointer and map spaces:
283  //     If page is in use then page is marked as having invalid watermark at
284  //     the beginning and at the end of any GC.
285  //
286  // This invariant guarantees that after flipping flag meaning at the
287  // beginning of scavenge all pages in use will be marked as having valid
288  // watermark.
289  static inline void FlipMeaningOfInvalidatedWatermarkFlag();
290
291  // Returns true if the page allocation watermark was not altered during
292  // scavenge.
293  inline bool IsWatermarkValid();
294
295  inline void InvalidateWatermark(bool value);
296
297  inline bool GetPageFlag(PageFlag flag);
298  inline void SetPageFlag(PageFlag flag, bool value);
299  inline void ClearPageFlags();
300
301  inline void ClearGCFields();
302
303  static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
304  static const int kAllocationWatermarkOffsetBits  = kPageSizeBits + 1;
305  static const uint32_t kAllocationWatermarkOffsetMask =
306      ((1 << kAllocationWatermarkOffsetBits) - 1) <<
307      kAllocationWatermarkOffsetShift;
308
309  static const uint32_t kFlagsMask =
310    ((1 << kAllocationWatermarkOffsetShift) - 1);
311
312  STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
313               kAllocationWatermarkOffsetBits);
314
315  // This field contains the meaning of the WATERMARK_INVALIDATED flag.
316  // Instead of clearing this flag from all pages we just flip
317  // its meaning at the beginning of a scavenge.
318  static intptr_t watermark_invalidated_mark_;
319
320  //---------------------------------------------------------------------------
321  // Page header description.
322  //
323  // If a page is not in the large object space, the first word,
324  // opaque_header, encodes the next page address (aligned to kPageSize 8K)
325  // and the chunk number (0 ~ 8K-1).  Only MemoryAllocator should use
326  // opaque_header. The value range of the opaque_header is [0..kPageSize[,
327  // or [next_page_start, next_page_end[. It cannot point to a valid address
328  // in the current page.  If a page is in the large object space, the first
329  // word *may* (if the page start and large object chunk start are the
330  // same) contain the address of the next large object chunk.
331  intptr_t opaque_header;
332
333  // If the page is not in the large object space, the low-order bit of the
334  // second word is set. If the page is in the large object space, the
335  // second word *may* (if the page start and large object chunk start are
336  // the same) contain the large object chunk size.  In either case, the
337  // low-order bit for large object pages will be cleared.
338  // For normal pages this word is used to store page flags and
339  // offset of allocation top.
340  intptr_t flags_;
341
342  // This field contains dirty marks for regions covering the page. Only dirty
343  // regions might contain intergenerational references.
344  // Only 32 dirty marks are supported so for large object pages several regions
345  // might be mapped to a single dirty mark.
346  uint32_t dirty_regions_;
347
348  // The index of the page in its owner space.
349  int mc_page_index;
350
351  // During mark-compact collections this field contains the forwarding address
352  // of the first live object in this page.
353  // During scavenge collection this field is used to store allocation watermark
354  // if it is altered during scavenge.
355  Address mc_first_forwarded;
356};
357
358
359// ----------------------------------------------------------------------------
360// Space is the abstract superclass for all allocation spaces.
361class Space : public Malloced {
362 public:
363  Space(AllocationSpace id, Executability executable)
364      : id_(id), executable_(executable) {}
365
366  virtual ~Space() {}
367
368  // Does the space need executable memory?
369  Executability executable() { return executable_; }
370
371  // Identity used in error reporting.
372  AllocationSpace identity() { return id_; }
373
374  virtual int Size() = 0;
375
376#ifdef ENABLE_HEAP_PROTECTION
377  // Protect/unprotect the space by marking it read-only/writable.
378  virtual void Protect() = 0;
379  virtual void Unprotect() = 0;
380#endif
381
382#ifdef DEBUG
383  virtual void Print() = 0;
384#endif
385
386  // After calling this we can allocate a certain number of bytes using only
387  // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
388  // without using freelists or causing a GC.  This is used by partial
389  // snapshots.  It returns true of space was reserved or false if a GC is
390  // needed.  For paged spaces the space requested must include the space wasted
391  // at the end of each when allocating linearly.
392  virtual bool ReserveSpace(int bytes) = 0;
393
394 private:
395  AllocationSpace id_;
396  Executability executable_;
397};
398
399
400// ----------------------------------------------------------------------------
401// All heap objects containing executable code (code objects) must be allocated
402// from a 2 GB range of memory, so that they can call each other using 32-bit
403// displacements.  This happens automatically on 32-bit platforms, where 32-bit
404// displacements cover the entire 4GB virtual address space.  On 64-bit
405// platforms, we support this using the CodeRange object, which reserves and
406// manages a range of virtual memory.
407class CodeRange : public AllStatic {
408 public:
409  // Reserves a range of virtual memory, but does not commit any of it.
410  // Can only be called once, at heap initialization time.
411  // Returns false on failure.
412  static bool Setup(const size_t requested_size);
413
414  // Frees the range of virtual memory, and frees the data structures used to
415  // manage it.
416  static void TearDown();
417
418  static bool exists() { return code_range_ != NULL; }
419  static bool contains(Address address) {
420    if (code_range_ == NULL) return false;
421    Address start = static_cast<Address>(code_range_->address());
422    return start <= address && address < start + code_range_->size();
423  }
424
425  // Allocates a chunk of memory from the large-object portion of
426  // the code range.  On platforms with no separate code range, should
427  // not be called.
428  static void* AllocateRawMemory(const size_t requested, size_t* allocated);
429  static void FreeRawMemory(void* buf, size_t length);
430
431 private:
432  // The reserved range of virtual memory that all code objects are put in.
433  static VirtualMemory* code_range_;
434  // Plain old data class, just a struct plus a constructor.
435  class FreeBlock {
436   public:
437    FreeBlock(Address start_arg, size_t size_arg)
438        : start(start_arg), size(size_arg) {}
439    FreeBlock(void* start_arg, size_t size_arg)
440        : start(static_cast<Address>(start_arg)), size(size_arg) {}
441
442    Address start;
443    size_t size;
444  };
445
446  // Freed blocks of memory are added to the free list.  When the allocation
447  // list is exhausted, the free list is sorted and merged to make the new
448  // allocation list.
449  static List<FreeBlock> free_list_;
450  // Memory is allocated from the free blocks on the allocation list.
451  // The block at current_allocation_block_index_ is the current block.
452  static List<FreeBlock> allocation_list_;
453  static int current_allocation_block_index_;
454
455  // Finds a block on the allocation list that contains at least the
456  // requested amount of memory.  If none is found, sorts and merges
457  // the existing free memory blocks, and searches again.
458  // If none can be found, terminates V8 with FatalProcessOutOfMemory.
459  static void GetNextAllocationBlock(size_t requested);
460  // Compares the start addresses of two free blocks.
461  static int CompareFreeBlockAddress(const FreeBlock* left,
462                                     const FreeBlock* right);
463};
464
465
466// ----------------------------------------------------------------------------
467// A space acquires chunks of memory from the operating system. The memory
468// allocator manages chunks for the paged heap spaces (old space and map
469// space).  A paged chunk consists of pages. Pages in a chunk have contiguous
470// addresses and are linked as a list.
471//
472// The allocator keeps an initial chunk which is used for the new space.  The
473// leftover regions of the initial chunk are used for the initial chunks of
474// old space and map space if they are big enough to hold at least one page.
475// The allocator assumes that there is one old space and one map space, each
476// expands the space by allocating kPagesPerChunk pages except the last
477// expansion (before running out of space).  The first chunk may contain fewer
478// than kPagesPerChunk pages as well.
479//
480// The memory allocator also allocates chunks for the large object space, but
481// they are managed by the space itself.  The new space does not expand.
482//
483// The fact that pages for paged spaces are allocated and deallocated in chunks
484// induces a constraint on the order of pages in a linked lists. We say that
485// pages are linked in the chunk-order if and only if every two consecutive
486// pages from the same chunk are consecutive in the linked list.
487//
488
489
490class MemoryAllocator : public AllStatic {
491 public:
492  // Initializes its internal bookkeeping structures.
493  // Max capacity of the total space.
494  static bool Setup(int max_capacity);
495
496  // Deletes valid chunks.
497  static void TearDown();
498
499  // Reserves an initial address range of virtual memory to be split between
500  // the two new space semispaces, the old space, and the map space.  The
501  // memory is not yet committed or assigned to spaces and split into pages.
502  // The initial chunk is unmapped when the memory allocator is torn down.
503  // This function should only be called when there is not already a reserved
504  // initial chunk (initial_chunk_ should be NULL).  It returns the start
505  // address of the initial chunk if successful, with the side effect of
506  // setting the initial chunk, or else NULL if unsuccessful and leaves the
507  // initial chunk NULL.
508  static void* ReserveInitialChunk(const size_t requested);
509
510  // Commits pages from an as-yet-unmanaged block of virtual memory into a
511  // paged space.  The block should be part of the initial chunk reserved via
512  // a call to ReserveInitialChunk.  The number of pages is always returned in
513  // the output parameter num_pages.  This function assumes that the start
514  // address is non-null and that it is big enough to hold at least one
515  // page-aligned page.  The call always succeeds, and num_pages is always
516  // greater than zero.
517  static Page* CommitPages(Address start, size_t size, PagedSpace* owner,
518                           int* num_pages);
519
520  // Commit a contiguous block of memory from the initial chunk.  Assumes that
521  // the address is not NULL, the size is greater than zero, and that the
522  // block is contained in the initial chunk.  Returns true if it succeeded
523  // and false otherwise.
524  static bool CommitBlock(Address start, size_t size, Executability executable);
525
526  // Uncommit a contiguous block of memory [start..(start+size)[.
527  // start is not NULL, the size is greater than zero, and the
528  // block is contained in the initial chunk.  Returns true if it succeeded
529  // and false otherwise.
530  static bool UncommitBlock(Address start, size_t size);
531
532  // Zaps a contiguous block of memory [start..(start+size)[ thus
533  // filling it up with a recognizable non-NULL bit pattern.
534  static void ZapBlock(Address start, size_t size);
535
536  // Attempts to allocate the requested (non-zero) number of pages from the
537  // OS.  Fewer pages might be allocated than requested. If it fails to
538  // allocate memory for the OS or cannot allocate a single page, this
539  // function returns an invalid page pointer (NULL). The caller must check
540  // whether the returned page is valid (by calling Page::is_valid()).  It is
541  // guaranteed that allocated pages have contiguous addresses.  The actual
542  // number of allocated pages is returned in the output parameter
543  // allocated_pages.  If the PagedSpace owner is executable and there is
544  // a code range, the pages are allocated from the code range.
545  static Page* AllocatePages(int requested_pages, int* allocated_pages,
546                             PagedSpace* owner);
547
548  // Frees pages from a given page and after. Requires pages to be
549  // linked in chunk-order (see comment for class).
550  // If 'p' is the first page of a chunk, pages from 'p' are freed
551  // and this function returns an invalid page pointer.
552  // Otherwise, the function searches a page after 'p' that is
553  // the first page of a chunk. Pages after the found page
554  // are freed and the function returns 'p'.
555  static Page* FreePages(Page* p);
556
557  // Frees all pages owned by given space.
558  static void FreeAllPages(PagedSpace* space);
559
560  // Allocates and frees raw memory of certain size.
561  // These are just thin wrappers around OS::Allocate and OS::Free,
562  // but keep track of allocated bytes as part of heap.
563  // If the flag is EXECUTABLE and a code range exists, the requested
564  // memory is allocated from the code range.  If a code range exists
565  // and the freed memory is in it, the code range manages the freed memory.
566  static void* AllocateRawMemory(const size_t requested,
567                                 size_t* allocated,
568                                 Executability executable);
569  static void FreeRawMemory(void* buf,
570                            size_t length,
571                            Executability executable);
572  static void PerformAllocationCallback(ObjectSpace space,
573                                        AllocationAction action,
574                                        size_t size);
575
576  static void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
577                                          ObjectSpace space,
578                                          AllocationAction action);
579  static void RemoveMemoryAllocationCallback(
580      MemoryAllocationCallback callback);
581  static bool MemoryAllocationCallbackRegistered(
582      MemoryAllocationCallback callback);
583
584  // Returns the maximum available bytes of heaps.
585  static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
586
587  // Returns allocated spaces in bytes.
588  static int Size() { return size_; }
589
590  // Returns allocated executable spaces in bytes.
591  static int SizeExecutable() { return size_executable_; }
592
593  // Returns maximum available bytes that the old space can have.
594  static int MaxAvailable() {
595    return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
596  }
597
598  // Links two pages.
599  static inline void SetNextPage(Page* prev, Page* next);
600
601  // Returns the next page of a given page.
602  static inline Page* GetNextPage(Page* p);
603
604  // Checks whether a page belongs to a space.
605  static inline bool IsPageInSpace(Page* p, PagedSpace* space);
606
607  // Returns the space that owns the given page.
608  static inline PagedSpace* PageOwner(Page* page);
609
610  // Finds the first/last page in the same chunk as a given page.
611  static Page* FindFirstPageInSameChunk(Page* p);
612  static Page* FindLastPageInSameChunk(Page* p);
613
614  // Relinks list of pages owned by space to make it chunk-ordered.
615  // Returns new first and last pages of space.
616  // Also returns last page in relinked list which has WasInUsedBeforeMC
617  // flag set.
618  static void RelinkPageListInChunkOrder(PagedSpace* space,
619                                         Page** first_page,
620                                         Page** last_page,
621                                         Page** last_page_in_use);
622
623#ifdef ENABLE_HEAP_PROTECTION
624  // Protect/unprotect a block of memory by marking it read-only/writable.
625  static inline void Protect(Address start, size_t size);
626  static inline void Unprotect(Address start, size_t size,
627                               Executability executable);
628
629  // Protect/unprotect a chunk given a page in the chunk.
630  static inline void ProtectChunkFromPage(Page* page);
631  static inline void UnprotectChunkFromPage(Page* page);
632#endif
633
634#ifdef DEBUG
635  // Reports statistic info of the space.
636  static void ReportStatistics();
637#endif
638
639  // Due to encoding limitation, we can only have 8K chunks.
640  static const int kMaxNofChunks = 1 << kPageSizeBits;
641  // If a chunk has at least 16 pages, the maximum heap size is about
642  // 8K * 8K * 16 = 1G bytes.
643#ifdef V8_TARGET_ARCH_X64
644  static const int kPagesPerChunk = 32;
645#else
646  static const int kPagesPerChunk = 16;
647#endif
648  static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
649
650 private:
651  // Maximum space size in bytes.
652  static int capacity_;
653
654  // Allocated space size in bytes.
655  static int size_;
656  // Allocated executable space size in bytes.
657  static int size_executable_;
658
659  struct MemoryAllocationCallbackRegistration {
660    MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
661                                         ObjectSpace space,
662                                         AllocationAction action)
663        : callback(callback), space(space), action(action) {
664    }
665    MemoryAllocationCallback callback;
666    ObjectSpace space;
667    AllocationAction action;
668  };
669  // A List of callback that are triggered when memory is allocated or free'd
670  static List<MemoryAllocationCallbackRegistration>
671      memory_allocation_callbacks_;
672
673  // The initial chunk of virtual memory.
674  static VirtualMemory* initial_chunk_;
675
676  // Allocated chunk info: chunk start address, chunk size, and owning space.
677  class ChunkInfo BASE_EMBEDDED {
678   public:
679    ChunkInfo() : address_(NULL),
680                  size_(0),
681                  owner_(NULL),
682                  executable_(NOT_EXECUTABLE) {}
683    inline void init(Address a, size_t s, PagedSpace* o);
684    Address address() { return address_; }
685    size_t size() { return size_; }
686    PagedSpace* owner() { return owner_; }
687    // We save executability of the owner to allow using it
688    // when collecting stats after the owner has been destroyed.
689    Executability executable() const { return executable_; }
690
691   private:
692    Address address_;
693    size_t size_;
694    PagedSpace* owner_;
695    Executability executable_;
696  };
697
698  // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
699  static List<ChunkInfo> chunks_;
700  static List<int> free_chunk_ids_;
701  static int max_nof_chunks_;
702  static int top_;
703
704  // Push/pop a free chunk id onto/from the stack.
705  static void Push(int free_chunk_id);
706  static int Pop();
707  static bool OutOfChunkIds() { return top_ == 0; }
708
709  // Frees a chunk.
710  static void DeleteChunk(int chunk_id);
711
712  // Basic check whether a chunk id is in the valid range.
713  static inline bool IsValidChunkId(int chunk_id);
714
715  // Checks whether a chunk id identifies an allocated chunk.
716  static inline bool IsValidChunk(int chunk_id);
717
718  // Returns the chunk id that a page belongs to.
719  static inline int GetChunkId(Page* p);
720
721  // True if the address lies in the initial chunk.
722  static inline bool InInitialChunk(Address address);
723
724  // Initializes pages in a chunk. Returns the first page address.
725  // This function and GetChunkId() are provided for the mark-compact
726  // collector to rebuild page headers in the from space, which is
727  // used as a marking stack and its page headers are destroyed.
728  static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
729                                      PagedSpace* owner);
730
731  static Page* RelinkPagesInChunk(int chunk_id,
732                                  Address chunk_start,
733                                  size_t chunk_size,
734                                  Page* prev,
735                                  Page** last_page_in_use);
736};
737
738
739// -----------------------------------------------------------------------------
740// Interface for heap object iterator to be implemented by all object space
741// object iterators.
742//
743// NOTE: The space specific object iterators also implements the own next()
744//       method which is used to avoid using virtual functions
745//       iterating a specific space.
746
747class ObjectIterator : public Malloced {
748 public:
749  virtual ~ObjectIterator() { }
750
751  virtual HeapObject* next_object() = 0;
752};
753
754
755// -----------------------------------------------------------------------------
756// Heap object iterator in new/old/map spaces.
757//
758// A HeapObjectIterator iterates objects from a given address to the
759// top of a space. The given address must be below the current
760// allocation pointer (space top). There are some caveats.
761//
762// (1) If the space top changes upward during iteration (because of
763//     allocating new objects), the iterator does not iterate objects
764//     above the original space top. The caller must create a new
765//     iterator starting from the old top in order to visit these new
766//     objects.
767//
768// (2) If new objects are allocated below the original allocation top
769//     (e.g., free-list allocation in paged spaces), the new objects
770//     may or may not be iterated depending on their position with
771//     respect to the current point of iteration.
772//
773// (3) The space top should not change downward during iteration,
774//     otherwise the iterator will return not-necessarily-valid
775//     objects.
776
777class HeapObjectIterator: public ObjectIterator {
778 public:
779  // Creates a new object iterator in a given space. If a start
780  // address is not given, the iterator starts from the space bottom.
781  // If the size function is not given, the iterator calls the default
782  // Object::Size().
783  explicit HeapObjectIterator(PagedSpace* space);
784  HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
785  HeapObjectIterator(PagedSpace* space, Address start);
786  HeapObjectIterator(PagedSpace* space,
787                     Address start,
788                     HeapObjectCallback size_func);
789  HeapObjectIterator(Page* page, HeapObjectCallback size_func);
790
791  inline HeapObject* next() {
792    return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
793  }
794
795  // implementation of ObjectIterator.
796  virtual HeapObject* next_object() { return next(); }
797
798 private:
799  Address cur_addr_;  // current iteration point
800  Address end_addr_;  // end iteration point
801  Address cur_limit_;  // current page limit
802  HeapObjectCallback size_func_;  // size function
803  Page* end_page_;  // caches the page of the end address
804
805  HeapObject* FromCurrentPage() {
806    ASSERT(cur_addr_ < cur_limit_);
807
808    HeapObject* obj = HeapObject::FromAddress(cur_addr_);
809    int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
810    ASSERT_OBJECT_SIZE(obj_size);
811
812    cur_addr_ += obj_size;
813    ASSERT(cur_addr_ <= cur_limit_);
814
815    return obj;
816  }
817
818  // Slow path of next, goes into the next page.
819  HeapObject* FromNextPage();
820
821  // Initializes fields.
822  void Initialize(Address start, Address end, HeapObjectCallback size_func);
823
824#ifdef DEBUG
825  // Verifies whether fields have valid values.
826  void Verify();
827#endif
828};
829
830
831// -----------------------------------------------------------------------------
832// A PageIterator iterates the pages in a paged space.
833//
834// The PageIterator class provides three modes for iterating pages in a space:
835//   PAGES_IN_USE iterates pages containing allocated objects.
836//   PAGES_USED_BY_MC iterates pages that hold relocated objects during a
837//                    mark-compact collection.
838//   ALL_PAGES iterates all pages in the space.
839//
840// There are some caveats.
841//
842// (1) If the space expands during iteration, new pages will not be
843//     returned by the iterator in any mode.
844//
845// (2) If new objects are allocated during iteration, they will appear
846//     in pages returned by the iterator.  Allocation may cause the
847//     allocation pointer or MC allocation pointer in the last page to
848//     change between constructing the iterator and iterating the last
849//     page.
850//
851// (3) The space should not shrink during iteration, otherwise the
852//     iterator will return deallocated pages.
853
854class PageIterator BASE_EMBEDDED {
855 public:
856  enum Mode {
857    PAGES_IN_USE,
858    PAGES_USED_BY_MC,
859    ALL_PAGES
860  };
861
862  PageIterator(PagedSpace* space, Mode mode);
863
864  inline bool has_next();
865  inline Page* next();
866
867 private:
868  PagedSpace* space_;
869  Page* prev_page_;  // Previous page returned.
870  Page* stop_page_;  // Page to stop at (last page returned by the iterator).
871};
872
873
874// -----------------------------------------------------------------------------
875// A space has a list of pages. The next page can be accessed via
876// Page::next_page() call. The next page of the last page is an
877// invalid page pointer. A space can expand and shrink dynamically.
878
879// An abstraction of allocation and relocation pointers in a page-structured
880// space.
881class AllocationInfo {
882 public:
883  Address top;  // current allocation top
884  Address limit;  // current allocation limit
885
886#ifdef DEBUG
887  bool VerifyPagedAllocation() {
888    return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
889        && (top <= limit);
890  }
891#endif
892};
893
894
895// An abstraction of the accounting statistics of a page-structured space.
896// The 'capacity' of a space is the number of object-area bytes (ie, not
897// including page bookkeeping structures) currently in the space. The 'size'
898// of a space is the number of allocated bytes, the 'waste' in the space is
899// the number of bytes that are not allocated and not available to
900// allocation without reorganizing the space via a GC (eg, small blocks due
901// to internal fragmentation, top of page areas in map space), and the bytes
902// 'available' is the number of unallocated bytes that are not waste.  The
903// capacity is the sum of size, waste, and available.
904//
905// The stats are only set by functions that ensure they stay balanced. These
906// functions increase or decrease one of the non-capacity stats in
907// conjunction with capacity, or else they always balance increases and
908// decreases to the non-capacity stats.
909class AllocationStats BASE_EMBEDDED {
910 public:
911  AllocationStats() { Clear(); }
912
913  // Zero out all the allocation statistics (ie, no capacity).
914  void Clear() {
915    capacity_ = 0;
916    available_ = 0;
917    size_ = 0;
918    waste_ = 0;
919  }
920
921  // Reset the allocation statistics (ie, available = capacity with no
922  // wasted or allocated bytes).
923  void Reset() {
924    available_ = capacity_;
925    size_ = 0;
926    waste_ = 0;
927  }
928
929  // Accessors for the allocation statistics.
930  int Capacity() { return capacity_; }
931  int Available() { return available_; }
932  int Size() { return size_; }
933  int Waste() { return waste_; }
934
935  // Grow the space by adding available bytes.
936  void ExpandSpace(int size_in_bytes) {
937    capacity_ += size_in_bytes;
938    available_ += size_in_bytes;
939  }
940
941  // Shrink the space by removing available bytes.
942  void ShrinkSpace(int size_in_bytes) {
943    capacity_ -= size_in_bytes;
944    available_ -= size_in_bytes;
945  }
946
947  // Allocate from available bytes (available -> size).
948  void AllocateBytes(int size_in_bytes) {
949    available_ -= size_in_bytes;
950    size_ += size_in_bytes;
951  }
952
953  // Free allocated bytes, making them available (size -> available).
954  void DeallocateBytes(int size_in_bytes) {
955    size_ -= size_in_bytes;
956    available_ += size_in_bytes;
957  }
958
959  // Waste free bytes (available -> waste).
960  void WasteBytes(int size_in_bytes) {
961    available_ -= size_in_bytes;
962    waste_ += size_in_bytes;
963  }
964
965  // Consider the wasted bytes to be allocated, as they contain filler
966  // objects (waste -> size).
967  void FillWastedBytes(int size_in_bytes) {
968    waste_ -= size_in_bytes;
969    size_ += size_in_bytes;
970  }
971
972 private:
973  int capacity_;
974  int available_;
975  int size_;
976  int waste_;
977};
978
979
980class PagedSpace : public Space {
981 public:
982  // Creates a space with a maximum capacity, and an id.
983  PagedSpace(int max_capacity, AllocationSpace id, Executability executable);
984
985  virtual ~PagedSpace() {}
986
987  // Set up the space using the given address range of virtual memory (from
988  // the memory allocator's initial chunk) if possible.  If the block of
989  // addresses is not big enough to contain a single page-aligned page, a
990  // fresh chunk will be allocated.
991  bool Setup(Address start, size_t size);
992
993  // Returns true if the space has been successfully set up and not
994  // subsequently torn down.
995  bool HasBeenSetup();
996
997  // Cleans up the space, frees all pages in this space except those belonging
998  // to the initial chunk, uncommits addresses in the initial chunk.
999  void TearDown();
1000
1001  // Checks whether an object/address is in this space.
1002  inline bool Contains(Address a);
1003  bool Contains(HeapObject* o) { return Contains(o->address()); }
1004
1005  // Given an address occupied by a live object, return that object if it is
1006  // in this space, or Failure::Exception() if it is not. The implementation
1007  // iterates over objects in the page containing the address, the cost is
1008  // linear in the number of objects in the page. It may be slow.
1009  Object* FindObject(Address addr);
1010
1011  // Checks whether page is currently in use by this space.
1012  bool IsUsed(Page* page);
1013
1014  void MarkAllPagesClean();
1015
1016  // Prepares for a mark-compact GC.
1017  virtual void PrepareForMarkCompact(bool will_compact);
1018
1019  // The top of allocation in a page in this space. Undefined if page is unused.
1020  Address PageAllocationTop(Page* page) {
1021    return page == TopPageOf(allocation_info_) ? top()
1022        : PageAllocationLimit(page);
1023  }
1024
1025  // The limit of allocation for a page in this space.
1026  virtual Address PageAllocationLimit(Page* page) = 0;
1027
1028  void FlushTopPageWatermark() {
1029    AllocationTopPage()->SetCachedAllocationWatermark(top());
1030    AllocationTopPage()->InvalidateWatermark(true);
1031  }
1032
1033  // Current capacity without growing (Size() + Available() + Waste()).
1034  int Capacity() { return accounting_stats_.Capacity(); }
1035
1036  // Total amount of memory committed for this space.  For paged
1037  // spaces this equals the capacity.
1038  int CommittedMemory() { return Capacity(); }
1039
1040  // Available bytes without growing.
1041  int Available() { return accounting_stats_.Available(); }
1042
1043  // Allocated bytes in this space.
1044  virtual int Size() { return accounting_stats_.Size(); }
1045
1046  // Wasted bytes due to fragmentation and not recoverable until the
1047  // next GC of this space.
1048  int Waste() { return accounting_stats_.Waste(); }
1049
1050  // Returns the address of the first object in this space.
1051  Address bottom() { return first_page_->ObjectAreaStart(); }
1052
1053  // Returns the allocation pointer in this space.
1054  Address top() { return allocation_info_.top; }
1055
1056  // Allocate the requested number of bytes in the space if possible, return a
1057  // failure object if not.
1058  inline Object* AllocateRaw(int size_in_bytes);
1059
1060  // Allocate the requested number of bytes for relocation during mark-compact
1061  // collection.
1062  inline Object* MCAllocateRaw(int size_in_bytes);
1063
1064  virtual bool ReserveSpace(int bytes);
1065
1066  // Used by ReserveSpace.
1067  virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
1068
1069  // Free all pages in range from prev (exclusive) to last (inclusive).
1070  // Freed pages are moved to the end of page list.
1071  void FreePages(Page* prev, Page* last);
1072
1073  // Deallocates a block.
1074  virtual void DeallocateBlock(Address start,
1075                               int size_in_bytes,
1076                               bool add_to_freelist) = 0;
1077
1078  // Set space allocation info.
1079  void SetTop(Address top) {
1080    allocation_info_.top = top;
1081    allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
1082  }
1083
1084  // ---------------------------------------------------------------------------
1085  // Mark-compact collection support functions
1086
1087  // Set the relocation point to the beginning of the space.
1088  void MCResetRelocationInfo();
1089
1090  // Writes relocation info to the top page.
1091  void MCWriteRelocationInfoToPage() {
1092    TopPageOf(mc_forwarding_info_)->
1093        SetAllocationWatermark(mc_forwarding_info_.top);
1094  }
1095
1096  // Computes the offset of a given address in this space to the beginning
1097  // of the space.
1098  int MCSpaceOffsetForAddress(Address addr);
1099
1100  // Updates the allocation pointer to the relocation top after a mark-compact
1101  // collection.
1102  virtual void MCCommitRelocationInfo() = 0;
1103
1104  // Releases half of unused pages.
1105  void Shrink();
1106
1107  // Ensures that the capacity is at least 'capacity'. Returns false on failure.
1108  bool EnsureCapacity(int capacity);
1109
1110#ifdef ENABLE_HEAP_PROTECTION
1111  // Protect/unprotect the space by marking it read-only/writable.
1112  void Protect();
1113  void Unprotect();
1114#endif
1115
1116#ifdef DEBUG
1117  // Print meta info and objects in this space.
1118  virtual void Print();
1119
1120  // Verify integrity of this space.
1121  virtual void Verify(ObjectVisitor* visitor);
1122
1123  // Overridden by subclasses to verify space-specific object
1124  // properties (e.g., only maps or free-list nodes are in map space).
1125  virtual void VerifyObject(HeapObject* obj) {}
1126
1127  // Report code object related statistics
1128  void CollectCodeStatistics();
1129  static void ReportCodeStatistics();
1130  static void ResetCodeStatistics();
1131#endif
1132
1133  // Returns the page of the allocation pointer.
1134  Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
1135
1136  void RelinkPageListInChunkOrder(bool deallocate_blocks);
1137
1138 protected:
1139  // Maximum capacity of this space.
1140  int max_capacity_;
1141
1142  // Accounting information for this space.
1143  AllocationStats accounting_stats_;
1144
1145  // The first page in this space.
1146  Page* first_page_;
1147
1148  // The last page in this space.  Initially set in Setup, updated in
1149  // Expand and Shrink.
1150  Page* last_page_;
1151
1152  // True if pages owned by this space are linked in chunk-order.
1153  // See comment for class MemoryAllocator for definition of chunk-order.
1154  bool page_list_is_chunk_ordered_;
1155
1156  // Normal allocation information.
1157  AllocationInfo allocation_info_;
1158
1159  // Relocation information during mark-compact collections.
1160  AllocationInfo mc_forwarding_info_;
1161
1162  // Bytes of each page that cannot be allocated.  Possibly non-zero
1163  // for pages in spaces with only fixed-size objects.  Always zero
1164  // for pages in spaces with variable sized objects (those pages are
1165  // padded with free-list nodes).
1166  int page_extra_;
1167
1168  // Sets allocation pointer to a page bottom.
1169  static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
1170
1171  // Returns the top page specified by an allocation info structure.
1172  static Page* TopPageOf(AllocationInfo alloc_info) {
1173    return Page::FromAllocationTop(alloc_info.limit);
1174  }
1175
1176  int CountPagesToTop() {
1177    Page* p = Page::FromAllocationTop(allocation_info_.top);
1178    PageIterator it(this, PageIterator::ALL_PAGES);
1179    int counter = 1;
1180    while (it.has_next()) {
1181      if (it.next() == p) return counter;
1182      counter++;
1183    }
1184    UNREACHABLE();
1185    return -1;
1186  }
1187
1188  // Expands the space by allocating a fixed number of pages. Returns false if
1189  // it cannot allocate requested number of pages from OS. Newly allocated
1190  // pages are append to the last_page;
1191  bool Expand(Page* last_page);
1192
1193  // Generic fast case allocation function that tries linear allocation in
1194  // the top page of 'alloc_info'.  Returns NULL on failure.
1195  inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
1196                                      int size_in_bytes);
1197
1198  // During normal allocation or deserialization, roll to the next page in
1199  // the space (there is assumed to be one) and allocate there.  This
1200  // function is space-dependent.
1201  virtual HeapObject* AllocateInNextPage(Page* current_page,
1202                                         int size_in_bytes) = 0;
1203
1204  // Slow path of AllocateRaw.  This function is space-dependent.
1205  virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
1206
1207  // Slow path of MCAllocateRaw.
1208  HeapObject* SlowMCAllocateRaw(int size_in_bytes);
1209
1210#ifdef DEBUG
1211  // Returns the number of total pages in this space.
1212  int CountTotalPages();
1213#endif
1214 private:
1215
1216  // Returns a pointer to the page of the relocation pointer.
1217  Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
1218
1219  friend class PageIterator;
1220};
1221
1222
1223#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1224class NumberAndSizeInfo BASE_EMBEDDED {
1225 public:
1226  NumberAndSizeInfo() : number_(0), bytes_(0) {}
1227
1228  int number() const { return number_; }
1229  void increment_number(int num) { number_ += num; }
1230
1231  int bytes() const { return bytes_; }
1232  void increment_bytes(int size) { bytes_ += size; }
1233
1234  void clear() {
1235    number_ = 0;
1236    bytes_ = 0;
1237  }
1238
1239 private:
1240  int number_;
1241  int bytes_;
1242};
1243
1244
1245// HistogramInfo class for recording a single "bar" of a histogram.  This
1246// class is used for collecting statistics to print to stdout (when compiled
1247// with DEBUG) or to the log file (when compiled with
1248// ENABLE_LOGGING_AND_PROFILING).
1249class HistogramInfo: public NumberAndSizeInfo {
1250 public:
1251  HistogramInfo() : NumberAndSizeInfo() {}
1252
1253  const char* name() { return name_; }
1254  void set_name(const char* name) { name_ = name; }
1255
1256 private:
1257  const char* name_;
1258};
1259#endif
1260
1261
1262// -----------------------------------------------------------------------------
1263// SemiSpace in young generation
1264//
1265// A semispace is a contiguous chunk of memory. The mark-compact collector
1266// uses the memory in the from space as a marking stack when tracing live
1267// objects.
1268
1269class SemiSpace : public Space {
1270 public:
1271  // Constructor.
1272  SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) {
1273    start_ = NULL;
1274    age_mark_ = NULL;
1275  }
1276
1277  // Sets up the semispace using the given chunk.
1278  bool Setup(Address start, int initial_capacity, int maximum_capacity);
1279
1280  // Tear down the space.  Heap memory was not allocated by the space, so it
1281  // is not deallocated here.
1282  void TearDown();
1283
1284  // True if the space has been set up but not torn down.
1285  bool HasBeenSetup() { return start_ != NULL; }
1286
1287  // Grow the size of the semispace by committing extra virtual memory.
1288  // Assumes that the caller has checked that the semispace has not reached
1289  // its maximum capacity (and thus there is space available in the reserved
1290  // address range to grow).
1291  bool Grow();
1292
1293  // Grow the semispace to the new capacity.  The new capacity
1294  // requested must be larger than the current capacity.
1295  bool GrowTo(int new_capacity);
1296
1297  // Shrinks the semispace to the new capacity.  The new capacity
1298  // requested must be more than the amount of used memory in the
1299  // semispace and less than the current capacity.
1300  bool ShrinkTo(int new_capacity);
1301
1302  // Returns the start address of the space.
1303  Address low() { return start_; }
1304  // Returns one past the end address of the space.
1305  Address high() { return low() + capacity_; }
1306
1307  // Age mark accessors.
1308  Address age_mark() { return age_mark_; }
1309  void set_age_mark(Address mark) { age_mark_ = mark; }
1310
1311  // True if the address is in the address range of this semispace (not
1312  // necessarily below the allocation pointer).
1313  bool Contains(Address a) {
1314    return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1315           == reinterpret_cast<uintptr_t>(start_);
1316  }
1317
1318  // True if the object is a heap object in the address range of this
1319  // semispace (not necessarily below the allocation pointer).
1320  bool Contains(Object* o) {
1321    return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1322  }
1323
1324  // The offset of an address from the beginning of the space.
1325  int SpaceOffsetForAddress(Address addr) {
1326    return static_cast<int>(addr - low());
1327  }
1328
1329  // If we don't have these here then SemiSpace will be abstract.  However
1330  // they should never be called.
1331  virtual int Size() {
1332    UNREACHABLE();
1333    return 0;
1334  }
1335
1336  virtual bool ReserveSpace(int bytes) {
1337    UNREACHABLE();
1338    return false;
1339  }
1340
1341  bool is_committed() { return committed_; }
1342  bool Commit();
1343  bool Uncommit();
1344
1345#ifdef ENABLE_HEAP_PROTECTION
1346  // Protect/unprotect the space by marking it read-only/writable.
1347  virtual void Protect() {}
1348  virtual void Unprotect() {}
1349#endif
1350
1351#ifdef DEBUG
1352  virtual void Print();
1353  virtual void Verify();
1354#endif
1355
1356  // Returns the current capacity of the semi space.
1357  int Capacity() { return capacity_; }
1358
1359  // Returns the maximum capacity of the semi space.
1360  int MaximumCapacity() { return maximum_capacity_; }
1361
1362  // Returns the initial capacity of the semi space.
1363  int InitialCapacity() { return initial_capacity_; }
1364
1365 private:
1366  // The current and maximum capacity of the space.
1367  int capacity_;
1368  int maximum_capacity_;
1369  int initial_capacity_;
1370
1371  // The start address of the space.
1372  Address start_;
1373  // Used to govern object promotion during mark-compact collection.
1374  Address age_mark_;
1375
1376  // Masks and comparison values to test for containment in this semispace.
1377  uintptr_t address_mask_;
1378  uintptr_t object_mask_;
1379  uintptr_t object_expected_;
1380
1381  bool committed_;
1382
1383 public:
1384  TRACK_MEMORY("SemiSpace")
1385};
1386
1387
1388// A SemiSpaceIterator is an ObjectIterator that iterates over the active
1389// semispace of the heap's new space.  It iterates over the objects in the
1390// semispace from a given start address (defaulting to the bottom of the
1391// semispace) to the top of the semispace.  New objects allocated after the
1392// iterator is created are not iterated.
1393class SemiSpaceIterator : public ObjectIterator {
1394 public:
1395  // Create an iterator over the objects in the given space.  If no start
1396  // address is given, the iterator starts from the bottom of the space.  If
1397  // no size function is given, the iterator calls Object::Size().
1398  explicit SemiSpaceIterator(NewSpace* space);
1399  SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
1400  SemiSpaceIterator(NewSpace* space, Address start);
1401
1402  HeapObject* next() {
1403    if (current_ == limit_) return NULL;
1404
1405    HeapObject* object = HeapObject::FromAddress(current_);
1406    int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
1407
1408    current_ += size;
1409    return object;
1410  }
1411
1412  // Implementation of the ObjectIterator functions.
1413  virtual HeapObject* next_object() { return next(); }
1414
1415 private:
1416  void Initialize(NewSpace* space, Address start, Address end,
1417                  HeapObjectCallback size_func);
1418
1419  // The semispace.
1420  SemiSpace* space_;
1421  // The current iteration point.
1422  Address current_;
1423  // The end of iteration.
1424  Address limit_;
1425  // The callback function.
1426  HeapObjectCallback size_func_;
1427};
1428
1429
1430// -----------------------------------------------------------------------------
1431// The young generation space.
1432//
1433// The new space consists of a contiguous pair of semispaces.  It simply
1434// forwards most functions to the appropriate semispace.
1435
1436class NewSpace : public Space {
1437 public:
1438  // Constructor.
1439  NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {}
1440
1441  // Sets up the new space using the given chunk.
1442  bool Setup(Address start, int size);
1443
1444  // Tears down the space.  Heap memory was not allocated by the space, so it
1445  // is not deallocated here.
1446  void TearDown();
1447
1448  // True if the space has been set up but not torn down.
1449  bool HasBeenSetup() {
1450    return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
1451  }
1452
1453  // Flip the pair of spaces.
1454  void Flip();
1455
1456  // Grow the capacity of the semispaces.  Assumes that they are not at
1457  // their maximum capacity.
1458  void Grow();
1459
1460  // Shrink the capacity of the semispaces.
1461  void Shrink();
1462
1463  // True if the address or object lies in the address range of either
1464  // semispace (not necessarily below the allocation pointer).
1465  bool Contains(Address a) {
1466    return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1467        == reinterpret_cast<uintptr_t>(start_);
1468  }
1469  bool Contains(Object* o) {
1470    return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1471  }
1472
1473  // Return the allocated bytes in the active semispace.
1474  virtual int Size() { return static_cast<int>(top() - bottom()); }
1475
1476  // Return the current capacity of a semispace.
1477  int Capacity() {
1478    ASSERT(to_space_.Capacity() == from_space_.Capacity());
1479    return to_space_.Capacity();
1480  }
1481
1482  // Return the total amount of memory committed for new space.
1483  int CommittedMemory() {
1484    if (from_space_.is_committed()) return 2 * Capacity();
1485    return Capacity();
1486  }
1487
1488  // Return the available bytes without growing in the active semispace.
1489  int Available() { return Capacity() - Size(); }
1490
1491  // Return the maximum capacity of a semispace.
1492  int MaximumCapacity() {
1493    ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
1494    return to_space_.MaximumCapacity();
1495  }
1496
1497  // Returns the initial capacity of a semispace.
1498  int InitialCapacity() {
1499    ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
1500    return to_space_.InitialCapacity();
1501  }
1502
1503  // Return the address of the allocation pointer in the active semispace.
1504  Address top() { return allocation_info_.top; }
1505  // Return the address of the first object in the active semispace.
1506  Address bottom() { return to_space_.low(); }
1507
1508  // Get the age mark of the inactive semispace.
1509  Address age_mark() { return from_space_.age_mark(); }
1510  // Set the age mark in the active semispace.
1511  void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
1512
1513  // The start address of the space and a bit mask. Anding an address in the
1514  // new space with the mask will result in the start address.
1515  Address start() { return start_; }
1516  uintptr_t mask() { return address_mask_; }
1517
1518  // The allocation top and limit addresses.
1519  Address* allocation_top_address() { return &allocation_info_.top; }
1520  Address* allocation_limit_address() { return &allocation_info_.limit; }
1521
1522  Object* AllocateRaw(int size_in_bytes) {
1523    return AllocateRawInternal(size_in_bytes, &allocation_info_);
1524  }
1525
1526  // Allocate the requested number of bytes for relocation during mark-compact
1527  // collection.
1528  Object* MCAllocateRaw(int size_in_bytes) {
1529    return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
1530  }
1531
1532  // Reset the allocation pointer to the beginning of the active semispace.
1533  void ResetAllocationInfo();
1534  // Reset the reloction pointer to the bottom of the inactive semispace in
1535  // preparation for mark-compact collection.
1536  void MCResetRelocationInfo();
1537  // Update the allocation pointer in the active semispace after a
1538  // mark-compact collection.
1539  void MCCommitRelocationInfo();
1540
1541  // Get the extent of the inactive semispace (for use as a marking stack).
1542  Address FromSpaceLow() { return from_space_.low(); }
1543  Address FromSpaceHigh() { return from_space_.high(); }
1544
1545  // Get the extent of the active semispace (to sweep newly copied objects
1546  // during a scavenge collection).
1547  Address ToSpaceLow() { return to_space_.low(); }
1548  Address ToSpaceHigh() { return to_space_.high(); }
1549
1550  // Offsets from the beginning of the semispaces.
1551  int ToSpaceOffsetForAddress(Address a) {
1552    return to_space_.SpaceOffsetForAddress(a);
1553  }
1554  int FromSpaceOffsetForAddress(Address a) {
1555    return from_space_.SpaceOffsetForAddress(a);
1556  }
1557
1558  // True if the object is a heap object in the address range of the
1559  // respective semispace (not necessarily below the allocation pointer of the
1560  // semispace).
1561  bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
1562  bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
1563
1564  bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
1565  bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
1566
1567  virtual bool ReserveSpace(int bytes);
1568
1569#ifdef ENABLE_HEAP_PROTECTION
1570  // Protect/unprotect the space by marking it read-only/writable.
1571  virtual void Protect();
1572  virtual void Unprotect();
1573#endif
1574
1575#ifdef DEBUG
1576  // Verify the active semispace.
1577  virtual void Verify();
1578  // Print the active semispace.
1579  virtual void Print() { to_space_.Print(); }
1580#endif
1581
1582#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1583  // Iterates the active semispace to collect statistics.
1584  void CollectStatistics();
1585  // Reports previously collected statistics of the active semispace.
1586  void ReportStatistics();
1587  // Clears previously collected statistics.
1588  void ClearHistograms();
1589
1590  // Record the allocation or promotion of a heap object.  Note that we don't
1591  // record every single allocation, but only those that happen in the
1592  // to space during a scavenge GC.
1593  void RecordAllocation(HeapObject* obj);
1594  void RecordPromotion(HeapObject* obj);
1595#endif
1596
1597  // Return whether the operation succeded.
1598  bool CommitFromSpaceIfNeeded() {
1599    if (from_space_.is_committed()) return true;
1600    return from_space_.Commit();
1601  }
1602
1603  bool UncommitFromSpace() {
1604    if (!from_space_.is_committed()) return true;
1605    return from_space_.Uncommit();
1606  }
1607
1608 private:
1609  // The semispaces.
1610  SemiSpace to_space_;
1611  SemiSpace from_space_;
1612
1613  // Start address and bit mask for containment testing.
1614  Address start_;
1615  uintptr_t address_mask_;
1616  uintptr_t object_mask_;
1617  uintptr_t object_expected_;
1618
1619  // Allocation pointer and limit for normal allocation and allocation during
1620  // mark-compact collection.
1621  AllocationInfo allocation_info_;
1622  AllocationInfo mc_forwarding_info_;
1623
1624#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1625  HistogramInfo* allocated_histogram_;
1626  HistogramInfo* promoted_histogram_;
1627#endif
1628
1629  // Implementation of AllocateRaw and MCAllocateRaw.
1630  inline Object* AllocateRawInternal(int size_in_bytes,
1631                                     AllocationInfo* alloc_info);
1632
1633  friend class SemiSpaceIterator;
1634
1635 public:
1636  TRACK_MEMORY("NewSpace")
1637};
1638
1639
1640// -----------------------------------------------------------------------------
1641// Free lists for old object spaces
1642//
1643// Free-list nodes are free blocks in the heap.  They look like heap objects
1644// (free-list node pointers have the heap object tag, and they have a map like
1645// a heap object).  They have a size and a next pointer.  The next pointer is
1646// the raw address of the next free list node (or NULL).
1647class FreeListNode: public HeapObject {
1648 public:
1649  // Obtain a free-list node from a raw address.  This is not a cast because
1650  // it does not check nor require that the first word at the address is a map
1651  // pointer.
1652  static FreeListNode* FromAddress(Address address) {
1653    return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1654  }
1655
1656  static inline bool IsFreeListNode(HeapObject* object);
1657
1658  // Set the size in bytes, which can be read with HeapObject::Size().  This
1659  // function also writes a map to the first word of the block so that it
1660  // looks like a heap object to the garbage collector and heap iteration
1661  // functions.
1662  void set_size(int size_in_bytes);
1663
1664  // Accessors for the next field.
1665  inline Address next();
1666  inline void set_next(Address next);
1667
1668 private:
1669  static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
1670
1671  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1672};
1673
1674
1675// The free list for the old space.
1676class OldSpaceFreeList BASE_EMBEDDED {
1677 public:
1678  explicit OldSpaceFreeList(AllocationSpace owner);
1679
1680  // Clear the free list.
1681  void Reset();
1682
1683  // Return the number of bytes available on the free list.
1684  int available() { return available_; }
1685
1686  // Place a node on the free list.  The block of size 'size_in_bytes'
1687  // starting at 'start' is placed on the free list.  The return value is the
1688  // number of bytes that have been lost due to internal fragmentation by
1689  // freeing the block.  Bookkeeping information will be written to the block,
1690  // ie, its contents will be destroyed.  The start address should be word
1691  // aligned, and the size should be a non-zero multiple of the word size.
1692  int Free(Address start, int size_in_bytes);
1693
1694  // Allocate a block of size 'size_in_bytes' from the free list.  The block
1695  // is unitialized.  A failure is returned if no block is available.  The
1696  // number of bytes lost to fragmentation is returned in the output parameter
1697  // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
1698  Object* Allocate(int size_in_bytes, int* wasted_bytes);
1699
1700 private:
1701  // The size range of blocks, in bytes. (Smaller allocations are allowed, but
1702  // will always result in waste.)
1703  static const int kMinBlockSize = 2 * kPointerSize;
1704  static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1705
1706  // The identity of the owning space, for building allocation Failure
1707  // objects.
1708  AllocationSpace owner_;
1709
1710  // Total available bytes in all blocks on this free list.
1711  int available_;
1712
1713  // Blocks are put on exact free lists in an array, indexed by size in words.
1714  // The available sizes are kept in an increasingly ordered list. Entries
1715  // corresponding to sizes < kMinBlockSize always have an empty free list
1716  // (but index kHead is used for the head of the size list).
1717  struct SizeNode {
1718    // Address of the head FreeListNode of the implied block size or NULL.
1719    Address head_node_;
1720    // Size (words) of the next larger available size if head_node_ != NULL.
1721    int next_size_;
1722  };
1723  static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
1724  SizeNode free_[kFreeListsLength];
1725
1726  // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
1727  static const int kHead = kMinBlockSize / kPointerSize - 1;
1728  static const int kEnd = kMaxInt;
1729
1730  // We keep a "finger" in the size list to speed up a common pattern:
1731  // repeated requests for the same or increasing sizes.
1732  int finger_;
1733
1734  // Starting from *prev, find and return the smallest size >= index (words),
1735  // or kEnd. Update *prev to be the largest size < index, or kHead.
1736  int FindSize(int index, int* prev) {
1737    int cur = free_[*prev].next_size_;
1738    while (cur < index) {
1739      *prev = cur;
1740      cur = free_[cur].next_size_;
1741    }
1742    return cur;
1743  }
1744
1745  // Remove an existing element from the size list.
1746  void RemoveSize(int index) {
1747    int prev = kHead;
1748    int cur = FindSize(index, &prev);
1749    ASSERT(cur == index);
1750    free_[prev].next_size_ = free_[cur].next_size_;
1751    finger_ = prev;
1752  }
1753
1754  // Insert a new element into the size list.
1755  void InsertSize(int index) {
1756    int prev = kHead;
1757    int cur = FindSize(index, &prev);
1758    ASSERT(cur != index);
1759    free_[prev].next_size_ = index;
1760    free_[index].next_size_ = cur;
1761  }
1762
1763  // The size list is not updated during a sequence of calls to Free, but is
1764  // rebuilt before the next allocation.
1765  void RebuildSizeList();
1766  bool needs_rebuild_;
1767
1768#ifdef DEBUG
1769  // Does this free list contain a free block located at the address of 'node'?
1770  bool Contains(FreeListNode* node);
1771#endif
1772
1773  DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
1774};
1775
1776
1777// The free list for the map space.
1778class FixedSizeFreeList BASE_EMBEDDED {
1779 public:
1780  FixedSizeFreeList(AllocationSpace owner, int object_size);
1781
1782  // Clear the free list.
1783  void Reset();
1784
1785  // Return the number of bytes available on the free list.
1786  int available() { return available_; }
1787
1788  // Place a node on the free list.  The block starting at 'start' (assumed to
1789  // have size object_size_) is placed on the free list.  Bookkeeping
1790  // information will be written to the block, ie, its contents will be
1791  // destroyed.  The start address should be word aligned.
1792  void Free(Address start);
1793
1794  // Allocate a fixed sized block from the free list.  The block is unitialized.
1795  // A failure is returned if no block is available.
1796  Object* Allocate();
1797
1798 private:
1799  // Available bytes on the free list.
1800  int available_;
1801
1802  // The head of the free list.
1803  Address head_;
1804
1805  // The tail of the free list.
1806  Address tail_;
1807
1808  // The identity of the owning space, for building allocation Failure
1809  // objects.
1810  AllocationSpace owner_;
1811
1812  // The size of the objects in this space.
1813  int object_size_;
1814
1815  DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
1816};
1817
1818
1819// -----------------------------------------------------------------------------
1820// Old object space (excluding map objects)
1821
1822class OldSpace : public PagedSpace {
1823 public:
1824  // Creates an old space object with a given maximum capacity.
1825  // The constructor does not allocate pages from OS.
1826  explicit OldSpace(int max_capacity,
1827                    AllocationSpace id,
1828                    Executability executable)
1829      : PagedSpace(max_capacity, id, executable), free_list_(id) {
1830    page_extra_ = 0;
1831  }
1832
1833  // The bytes available on the free list (ie, not above the linear allocation
1834  // pointer).
1835  int AvailableFree() { return free_list_.available(); }
1836
1837  // The limit of allocation for a page in this space.
1838  virtual Address PageAllocationLimit(Page* page) {
1839    return page->ObjectAreaEnd();
1840  }
1841
1842  // Give a block of memory to the space's free list.  It might be added to
1843  // the free list or accounted as waste.
1844  // If add_to_freelist is false then just accounting stats are updated and
1845  // no attempt to add area to free list is made.
1846  void Free(Address start, int size_in_bytes, bool add_to_freelist) {
1847    accounting_stats_.DeallocateBytes(size_in_bytes);
1848
1849    if (add_to_freelist) {
1850      int wasted_bytes = free_list_.Free(start, size_in_bytes);
1851      accounting_stats_.WasteBytes(wasted_bytes);
1852    }
1853  }
1854
1855  virtual void DeallocateBlock(Address start,
1856                               int size_in_bytes,
1857                               bool add_to_freelist);
1858
1859  // Prepare for full garbage collection.  Resets the relocation pointer and
1860  // clears the free list.
1861  virtual void PrepareForMarkCompact(bool will_compact);
1862
1863  // Updates the allocation pointer to the relocation top after a mark-compact
1864  // collection.
1865  virtual void MCCommitRelocationInfo();
1866
1867  virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1868
1869#ifdef DEBUG
1870  // Reports statistics for the space
1871  void ReportStatistics();
1872#endif
1873
1874 protected:
1875  // Virtual function in the superclass.  Slow path of AllocateRaw.
1876  HeapObject* SlowAllocateRaw(int size_in_bytes);
1877
1878  // Virtual function in the superclass.  Allocate linearly at the start of
1879  // the page after current_page (there is assumed to be one).
1880  HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1881
1882 private:
1883  // The space's free list.
1884  OldSpaceFreeList free_list_;
1885
1886 public:
1887  TRACK_MEMORY("OldSpace")
1888};
1889
1890
1891// -----------------------------------------------------------------------------
1892// Old space for objects of a fixed size
1893
1894class FixedSpace : public PagedSpace {
1895 public:
1896  FixedSpace(int max_capacity,
1897             AllocationSpace id,
1898             int object_size_in_bytes,
1899             const char* name)
1900      : PagedSpace(max_capacity, id, NOT_EXECUTABLE),
1901        object_size_in_bytes_(object_size_in_bytes),
1902        name_(name),
1903        free_list_(id, object_size_in_bytes) {
1904    page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
1905  }
1906
1907  // The limit of allocation for a page in this space.
1908  virtual Address PageAllocationLimit(Page* page) {
1909    return page->ObjectAreaEnd() - page_extra_;
1910  }
1911
1912  int object_size_in_bytes() { return object_size_in_bytes_; }
1913
1914  // Give a fixed sized block of memory to the space's free list.
1915  // If add_to_freelist is false then just accounting stats are updated and
1916  // no attempt to add area to free list is made.
1917  void Free(Address start, bool add_to_freelist) {
1918    if (add_to_freelist) {
1919      free_list_.Free(start);
1920    }
1921    accounting_stats_.DeallocateBytes(object_size_in_bytes_);
1922  }
1923
1924  // Prepares for a mark-compact GC.
1925  virtual void PrepareForMarkCompact(bool will_compact);
1926
1927  // Updates the allocation pointer to the relocation top after a mark-compact
1928  // collection.
1929  virtual void MCCommitRelocationInfo();
1930
1931  virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1932
1933  virtual void DeallocateBlock(Address start,
1934                               int size_in_bytes,
1935                               bool add_to_freelist);
1936#ifdef DEBUG
1937  // Reports statistic info of the space
1938  void ReportStatistics();
1939#endif
1940
1941 protected:
1942  // Virtual function in the superclass.  Slow path of AllocateRaw.
1943  HeapObject* SlowAllocateRaw(int size_in_bytes);
1944
1945  // Virtual function in the superclass.  Allocate linearly at the start of
1946  // the page after current_page (there is assumed to be one).
1947  HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1948
1949  void ResetFreeList() {
1950    free_list_.Reset();
1951  }
1952
1953 private:
1954  // The size of objects in this space.
1955  int object_size_in_bytes_;
1956
1957  // The name of this space.
1958  const char* name_;
1959
1960  // The space's free list.
1961  FixedSizeFreeList free_list_;
1962};
1963
1964
1965// -----------------------------------------------------------------------------
1966// Old space for all map objects
1967
1968class MapSpace : public FixedSpace {
1969 public:
1970  // Creates a map space object with a maximum capacity.
1971  MapSpace(int max_capacity, int max_map_space_pages, AllocationSpace id)
1972      : FixedSpace(max_capacity, id, Map::kSize, "map"),
1973        max_map_space_pages_(max_map_space_pages) {
1974    ASSERT(max_map_space_pages < kMaxMapPageIndex);
1975  }
1976
1977  // Prepares for a mark-compact GC.
1978  virtual void PrepareForMarkCompact(bool will_compact);
1979
1980  // Given an index, returns the page address.
1981  Address PageAddress(int page_index) { return page_addresses_[page_index]; }
1982
1983  static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
1984
1985  // Are map pointers encodable into map word?
1986  bool MapPointersEncodable() {
1987    if (!FLAG_use_big_map_space) {
1988      ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
1989      return true;
1990    }
1991    return CountPagesToTop() <= max_map_space_pages_;
1992  }
1993
1994  // Should be called after forced sweep to find out if map space needs
1995  // compaction.
1996  bool NeedsCompaction(int live_maps) {
1997    return !MapPointersEncodable() && live_maps <= CompactionThreshold();
1998  }
1999
2000  Address TopAfterCompaction(int live_maps) {
2001    ASSERT(NeedsCompaction(live_maps));
2002
2003    int pages_left = live_maps / kMapsPerPage;
2004    PageIterator it(this, PageIterator::ALL_PAGES);
2005    while (pages_left-- > 0) {
2006      ASSERT(it.has_next());
2007      it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
2008    }
2009    ASSERT(it.has_next());
2010    Page* top_page = it.next();
2011    top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
2012    ASSERT(top_page->is_valid());
2013
2014    int offset = live_maps % kMapsPerPage * Map::kSize;
2015    Address top = top_page->ObjectAreaStart() + offset;
2016    ASSERT(top < top_page->ObjectAreaEnd());
2017    ASSERT(Contains(top));
2018
2019    return top;
2020  }
2021
2022  void FinishCompaction(Address new_top, int live_maps) {
2023    Page* top_page = Page::FromAddress(new_top);
2024    ASSERT(top_page->is_valid());
2025
2026    SetAllocationInfo(&allocation_info_, top_page);
2027    allocation_info_.top = new_top;
2028
2029    int new_size = live_maps * Map::kSize;
2030    accounting_stats_.DeallocateBytes(accounting_stats_.Size());
2031    accounting_stats_.AllocateBytes(new_size);
2032
2033#ifdef DEBUG
2034    if (FLAG_enable_slow_asserts) {
2035      intptr_t actual_size = 0;
2036      for (Page* p = first_page_; p != top_page; p = p->next_page())
2037        actual_size += kMapsPerPage * Map::kSize;
2038      actual_size += (new_top - top_page->ObjectAreaStart());
2039      ASSERT(accounting_stats_.Size() == actual_size);
2040    }
2041#endif
2042
2043    Shrink();
2044    ResetFreeList();
2045  }
2046
2047 protected:
2048#ifdef DEBUG
2049  virtual void VerifyObject(HeapObject* obj);
2050#endif
2051
2052 private:
2053  static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
2054
2055  // Do map space compaction if there is a page gap.
2056  int CompactionThreshold() {
2057    return kMapsPerPage * (max_map_space_pages_ - 1);
2058  }
2059
2060  const int max_map_space_pages_;
2061
2062  // An array of page start address in a map space.
2063  Address page_addresses_[kMaxMapPageIndex];
2064
2065 public:
2066  TRACK_MEMORY("MapSpace")
2067};
2068
2069
2070// -----------------------------------------------------------------------------
2071// Old space for all global object property cell objects
2072
2073class CellSpace : public FixedSpace {
2074 public:
2075  // Creates a property cell space object with a maximum capacity.
2076  CellSpace(int max_capacity, AllocationSpace id)
2077      : FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
2078
2079 protected:
2080#ifdef DEBUG
2081  virtual void VerifyObject(HeapObject* obj);
2082#endif
2083
2084 public:
2085  TRACK_MEMORY("CellSpace")
2086};
2087
2088
2089// -----------------------------------------------------------------------------
2090// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2091// the large object space. A large object is allocated from OS heap with
2092// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2093// A large object always starts at Page::kObjectStartOffset to a page.
2094// Large objects do not move during garbage collections.
2095
2096// A LargeObjectChunk holds exactly one large object page with exactly one
2097// large object.
2098class LargeObjectChunk {
2099 public:
2100  // Allocates a new LargeObjectChunk that contains a large object page
2101  // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
2102  // object) bytes after the object area start of that page.
2103  // The allocated chunk size is set in the output parameter chunk_size.
2104  static LargeObjectChunk* New(int size_in_bytes,
2105                               size_t* chunk_size,
2106                               Executability executable);
2107
2108  // Interpret a raw address as a large object chunk.
2109  static LargeObjectChunk* FromAddress(Address address) {
2110    return reinterpret_cast<LargeObjectChunk*>(address);
2111  }
2112
2113  // Returns the address of this chunk.
2114  Address address() { return reinterpret_cast<Address>(this); }
2115
2116  // Accessors for the fields of the chunk.
2117  LargeObjectChunk* next() { return next_; }
2118  void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
2119
2120  size_t size() { return size_ & ~Page::kPageFlagMask; }
2121  void set_size(size_t size_in_bytes) { size_ = size_in_bytes; }
2122
2123  // Returns the object in this chunk.
2124  inline HeapObject* GetObject();
2125
2126  // Given a requested size returns the physical size of a chunk to be
2127  // allocated.
2128  static int ChunkSizeFor(int size_in_bytes);
2129
2130  // Given a chunk size, returns the object size it can accommodate.  Used by
2131  // LargeObjectSpace::Available.
2132  static int ObjectSizeFor(int chunk_size) {
2133    if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2134    return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2135  }
2136
2137 private:
2138  // A pointer to the next large object chunk in the space or NULL.
2139  LargeObjectChunk* next_;
2140
2141  // The size of this chunk.
2142  size_t size_;
2143
2144 public:
2145  TRACK_MEMORY("LargeObjectChunk")
2146};
2147
2148
2149class LargeObjectSpace : public Space {
2150 public:
2151  explicit LargeObjectSpace(AllocationSpace id);
2152  virtual ~LargeObjectSpace() {}
2153
2154  // Initializes internal data structures.
2155  bool Setup();
2156
2157  // Releases internal resources, frees objects in this space.
2158  void TearDown();
2159
2160  // Allocates a (non-FixedArray, non-Code) large object.
2161  Object* AllocateRaw(int size_in_bytes);
2162  // Allocates a large Code object.
2163  Object* AllocateRawCode(int size_in_bytes);
2164  // Allocates a large FixedArray.
2165  Object* AllocateRawFixedArray(int size_in_bytes);
2166
2167  // Available bytes for objects in this space.
2168  int Available() {
2169    return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
2170  }
2171
2172  virtual int Size() {
2173    return size_;
2174  }
2175
2176  int PageCount() {
2177    return page_count_;
2178  }
2179
2180  // Finds an object for a given address, returns Failure::Exception()
2181  // if it is not found. The function iterates through all objects in this
2182  // space, may be slow.
2183  Object* FindObject(Address a);
2184
2185  // Finds a large object page containing the given pc, returns NULL
2186  // if such a page doesn't exist.
2187  LargeObjectChunk* FindChunkContainingPc(Address pc);
2188
2189
2190  // Iterates objects covered by dirty regions.
2191  void IterateDirtyRegions(ObjectSlotCallback func);
2192
2193  // Frees unmarked objects.
2194  void FreeUnmarkedObjects();
2195
2196  // Checks whether a heap object is in this space; O(1).
2197  bool Contains(HeapObject* obj);
2198
2199  // Checks whether the space is empty.
2200  bool IsEmpty() { return first_chunk_ == NULL; }
2201
2202  // See the comments for ReserveSpace in the Space class.  This has to be
2203  // called after ReserveSpace has been called on the paged spaces, since they
2204  // may use some memory, leaving less for large objects.
2205  virtual bool ReserveSpace(int bytes);
2206
2207#ifdef ENABLE_HEAP_PROTECTION
2208  // Protect/unprotect the space by marking it read-only/writable.
2209  void Protect();
2210  void Unprotect();
2211#endif
2212
2213#ifdef DEBUG
2214  virtual void Verify();
2215  virtual void Print();
2216  void ReportStatistics();
2217  void CollectCodeStatistics();
2218#endif
2219  // Checks whether an address is in the object area in this space.  It
2220  // iterates all objects in the space. May be slow.
2221  bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2222
2223 private:
2224  // The head of the linked list of large object chunks.
2225  LargeObjectChunk* first_chunk_;
2226  int size_;  // allocated bytes
2227  int page_count_;  // number of chunks
2228
2229
2230  // Shared implementation of AllocateRaw, AllocateRawCode and
2231  // AllocateRawFixedArray.
2232  Object* AllocateRawInternal(int requested_size,
2233                              int object_size,
2234                              Executability executable);
2235
2236  friend class LargeObjectIterator;
2237
2238 public:
2239  TRACK_MEMORY("LargeObjectSpace")
2240};
2241
2242
2243class LargeObjectIterator: public ObjectIterator {
2244 public:
2245  explicit LargeObjectIterator(LargeObjectSpace* space);
2246  LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2247
2248  HeapObject* next();
2249
2250  // implementation of ObjectIterator.
2251  virtual HeapObject* next_object() { return next(); }
2252
2253 private:
2254  LargeObjectChunk* current_;
2255  HeapObjectCallback size_func_;
2256};
2257
2258
2259} }  // namespace v8::internal
2260
2261#endif  // V8_SPACES_H_
2262