spaces.h revision 3ef787dbeca8a5fb1086949cda830dccee07bfbd
1// Copyright 2011 the V8 project authors. All rights reserved. 2// Redistribution and use in source and binary forms, with or without 3// modification, are permitted provided that the following conditions are 4// met: 5// 6// * Redistributions of source code must retain the above copyright 7// notice, this list of conditions and the following disclaimer. 8// * Redistributions in binary form must reproduce the above 9// copyright notice, this list of conditions and the following 10// disclaimer in the documentation and/or other materials provided 11// with the distribution. 12// * Neither the name of Google Inc. nor the names of its 13// contributors may be used to endorse or promote products derived 14// from this software without specific prior written permission. 15// 16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28#ifndef V8_SPACES_H_ 29#define V8_SPACES_H_ 30 31#include "allocation.h" 32#include "hashmap.h" 33#include "list.h" 34#include "log.h" 35 36namespace v8 { 37namespace internal { 38 39class Isolate; 40 41// ----------------------------------------------------------------------------- 42// Heap structures: 43// 44// A JS heap consists of a young generation, an old generation, and a large 45// object space. The young generation is divided into two semispaces. A 46// scavenger implements Cheney's copying algorithm. The old generation is 47// separated into a map space and an old object space. The map space contains 48// all (and only) map objects, the rest of old objects go into the old space. 49// The old generation is collected by a mark-sweep-compact collector. 50// 51// The semispaces of the young generation are contiguous. The old and map 52// spaces consists of a list of pages. A page has a page header and an object 53// area. 54// 55// There is a separate large object space for objects larger than 56// Page::kMaxHeapObjectSize, so that they do not have to move during 57// collection. The large object space is paged. Pages in large object space 58// may be larger than the page size. 59// 60// A store-buffer based write barrier is used to keep track of intergenerational 61// references. See store-buffer.h. 62// 63// During scavenges and mark-sweep collections we sometimes (after a store 64// buffer overflow) iterate intergenerational pointers without decoding heap 65// object maps so if the page belongs to old pointer space or large object 66// space it is essential to guarantee that the page does not contain any 67// garbage pointers to new space: every pointer aligned word which satisfies 68// the Heap::InNewSpace() predicate must be a pointer to a live heap object in 69// new space. Thus objects in old pointer and large object spaces should have a 70// special layout (e.g. no bare integer fields). This requirement does not 71// apply to map space which is iterated in a special fashion. However we still 72// require pointer fields of dead maps to be cleaned. 73// 74// To enable lazy cleaning of old space pages we can mark chunks of the page 75// as being garbage. Garbage sections are marked with a special map. These 76// sections are skipped when scanning the page, even if we are otherwise 77// scanning without regard for object boundaries. Garbage sections are chained 78// together to form a free list after a GC. Garbage sections created outside 79// of GCs by object trunctation etc. may not be in the free list chain. Very 80// small free spaces are ignored, they need only be cleaned of bogus pointers 81// into new space. 82// 83// Each page may have up to one special garbage section. The start of this 84// section is denoted by the top field in the space. The end of the section 85// is denoted by the limit field in the space. This special garbage section 86// is not marked with a free space map in the data. The point of this section 87// is to enable linear allocation without having to constantly update the byte 88// array every time the top field is updated and a new object is created. The 89// special garbage section is not in the chain of garbage sections. 90// 91// Since the top and limit fields are in the space, not the page, only one page 92// has a special garbage section, and if the top and limit are equal then there 93// is no special garbage section. 94 95// Some assertion macros used in the debugging mode. 96 97#define ASSERT_PAGE_ALIGNED(address) \ 98 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) 99 100#define ASSERT_OBJECT_ALIGNED(address) \ 101 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0) 102 103#define ASSERT_MAP_ALIGNED(address) \ 104 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0) 105 106#define ASSERT_OBJECT_SIZE(size) \ 107 ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize)) 108 109#define ASSERT_PAGE_OFFSET(offset) \ 110 ASSERT((Page::kObjectStartOffset <= offset) \ 111 && (offset <= Page::kPageSize)) 112 113#define ASSERT_MAP_PAGE_INDEX(index) \ 114 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) 115 116 117class PagedSpace; 118class MemoryAllocator; 119class AllocationInfo; 120class Space; 121class FreeList; 122class MemoryChunk; 123 124class MarkBit { 125 public: 126 typedef uint32_t CellType; 127 128 inline MarkBit(CellType* cell, CellType mask, bool data_only) 129 : cell_(cell), mask_(mask), data_only_(data_only) { } 130 131 inline CellType* cell() { return cell_; } 132 inline CellType mask() { return mask_; } 133 134#ifdef DEBUG 135 bool operator==(const MarkBit& other) { 136 return cell_ == other.cell_ && mask_ == other.mask_; 137 } 138#endif 139 140 inline void Set() { *cell_ |= mask_; } 141 inline bool Get() { return (*cell_ & mask_) != 0; } 142 inline void Clear() { *cell_ &= ~mask_; } 143 144 inline bool data_only() { return data_only_; } 145 146 inline MarkBit Next() { 147 CellType new_mask = mask_ << 1; 148 if (new_mask == 0) { 149 return MarkBit(cell_ + 1, 1, data_only_); 150 } else { 151 return MarkBit(cell_, new_mask, data_only_); 152 } 153 } 154 155 private: 156 CellType* cell_; 157 CellType mask_; 158 // This boolean indicates that the object is in a data-only space with no 159 // pointers. This enables some optimizations when marking. 160 // It is expected that this field is inlined and turned into control flow 161 // at the place where the MarkBit object is created. 162 bool data_only_; 163}; 164 165 166// Bitmap is a sequence of cells each containing fixed number of bits. 167class Bitmap { 168 public: 169 static const uint32_t kBitsPerCell = 32; 170 static const uint32_t kBitsPerCellLog2 = 5; 171 static const uint32_t kBitIndexMask = kBitsPerCell - 1; 172 static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte; 173 static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2; 174 175 static const size_t kLength = 176 (1 << kPageSizeBits) >> (kPointerSizeLog2); 177 178 static const size_t kSize = 179 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2); 180 181 182 static int CellsForLength(int length) { 183 return (length + kBitsPerCell - 1) >> kBitsPerCellLog2; 184 } 185 186 int CellsCount() { 187 return CellsForLength(kLength); 188 } 189 190 static int SizeFor(int cells_count) { 191 return sizeof(MarkBit::CellType) * cells_count; 192 } 193 194 INLINE(static uint32_t IndexToCell(uint32_t index)) { 195 return index >> kBitsPerCellLog2; 196 } 197 198 INLINE(static uint32_t CellToIndex(uint32_t index)) { 199 return index << kBitsPerCellLog2; 200 } 201 202 INLINE(static uint32_t CellAlignIndex(uint32_t index)) { 203 return (index + kBitIndexMask) & ~kBitIndexMask; 204 } 205 206 INLINE(MarkBit::CellType* cells()) { 207 return reinterpret_cast<MarkBit::CellType*>(this); 208 } 209 210 INLINE(Address address()) { 211 return reinterpret_cast<Address>(this); 212 } 213 214 INLINE(static Bitmap* FromAddress(Address addr)) { 215 return reinterpret_cast<Bitmap*>(addr); 216 } 217 218 inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) { 219 MarkBit::CellType mask = 1 << (index & kBitIndexMask); 220 MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2); 221 return MarkBit(cell, mask, data_only); 222 } 223 224 static inline void Clear(MemoryChunk* chunk); 225 226 static void PrintWord(uint32_t word, uint32_t himask = 0) { 227 for (uint32_t mask = 1; mask != 0; mask <<= 1) { 228 if ((mask & himask) != 0) PrintF("["); 229 PrintF((mask & word) ? "1" : "0"); 230 if ((mask & himask) != 0) PrintF("]"); 231 } 232 } 233 234 class CellPrinter { 235 public: 236 CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { } 237 238 void Print(uint32_t pos, uint32_t cell) { 239 if (cell == seq_type) { 240 seq_length++; 241 return; 242 } 243 244 Flush(); 245 246 if (IsSeq(cell)) { 247 seq_start = pos; 248 seq_length = 0; 249 seq_type = cell; 250 return; 251 } 252 253 PrintF("%d: ", pos); 254 PrintWord(cell); 255 PrintF("\n"); 256 } 257 258 void Flush() { 259 if (seq_length > 0) { 260 PrintF("%d: %dx%d\n", 261 seq_start, 262 seq_type == 0 ? 0 : 1, 263 seq_length * kBitsPerCell); 264 seq_length = 0; 265 } 266 } 267 268 static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; } 269 270 private: 271 uint32_t seq_start; 272 uint32_t seq_type; 273 uint32_t seq_length; 274 }; 275 276 void Print() { 277 CellPrinter printer; 278 for (int i = 0; i < CellsCount(); i++) { 279 printer.Print(i, cells()[i]); 280 } 281 printer.Flush(); 282 PrintF("\n"); 283 } 284 285 bool IsClean() { 286 for (int i = 0; i < CellsCount(); i++) { 287 if (cells()[i] != 0) return false; 288 } 289 return true; 290 } 291}; 292 293 294class SkipList; 295class SlotsBuffer; 296 297// MemoryChunk represents a memory region owned by a specific space. 298// It is divided into the header and the body. Chunk start is always 299// 1MB aligned. Start of the body is aligned so it can accommodate 300// any heap object. 301class MemoryChunk { 302 public: 303 // Only works if the pointer is in the first kPageSize of the MemoryChunk. 304 static MemoryChunk* FromAddress(Address a) { 305 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); 306 } 307 308 // Only works for addresses in pointer spaces, not data or code spaces. 309 static inline MemoryChunk* FromAnyPointerAddress(Address addr); 310 311 Address address() { return reinterpret_cast<Address>(this); } 312 313 bool is_valid() { return address() != NULL; } 314 315 MemoryChunk* next_chunk() const { return next_chunk_; } 316 MemoryChunk* prev_chunk() const { return prev_chunk_; } 317 318 void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; } 319 void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; } 320 321 Space* owner() const { 322 if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) == 323 kFailureTag) { 324 return reinterpret_cast<Space*>(owner_ - kFailureTag); 325 } else { 326 return NULL; 327 } 328 } 329 330 void set_owner(Space* space) { 331 ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0); 332 owner_ = reinterpret_cast<Address>(space) + kFailureTag; 333 ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) == 334 kFailureTag); 335 } 336 337 VirtualMemory* reserved_memory() { 338 return &reservation_; 339 } 340 341 void InitializeReservedMemory() { 342 reservation_.Reset(); 343 } 344 345 void set_reserved_memory(VirtualMemory* reservation) { 346 ASSERT_NOT_NULL(reservation); 347 reservation_.TakeControl(reservation); 348 } 349 350 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); } 351 void initialize_scan_on_scavenge(bool scan) { 352 if (scan) { 353 SetFlag(SCAN_ON_SCAVENGE); 354 } else { 355 ClearFlag(SCAN_ON_SCAVENGE); 356 } 357 } 358 inline void set_scan_on_scavenge(bool scan); 359 360 int store_buffer_counter() { return store_buffer_counter_; } 361 void set_store_buffer_counter(int counter) { 362 store_buffer_counter_ = counter; 363 } 364 365 bool Contains(Address addr) { 366 return addr >= area_start() && addr < area_end(); 367 } 368 369 // Checks whether addr can be a limit of addresses in this page. 370 // It's a limit if it's in the page, or if it's just after the 371 // last byte of the page. 372 bool ContainsLimit(Address addr) { 373 return addr >= area_start() && addr <= area_end(); 374 } 375 376 enum MemoryChunkFlags { 377 IS_EXECUTABLE, 378 ABOUT_TO_BE_FREED, 379 POINTERS_TO_HERE_ARE_INTERESTING, 380 POINTERS_FROM_HERE_ARE_INTERESTING, 381 SCAN_ON_SCAVENGE, 382 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. 383 IN_TO_SPACE, // All pages in new space has one of these two set. 384 NEW_SPACE_BELOW_AGE_MARK, 385 CONTAINS_ONLY_DATA, 386 EVACUATION_CANDIDATE, 387 RESCAN_ON_EVACUATION, 388 389 // Pages swept precisely can be iterated, hitting only the live objects. 390 // Whereas those swept conservatively cannot be iterated over. Both flags 391 // indicate that marking bits have been cleared by the sweeper, otherwise 392 // marking bits are still intact. 393 WAS_SWEPT_PRECISELY, 394 WAS_SWEPT_CONSERVATIVELY, 395 396 // Last flag, keep at bottom. 397 NUM_MEMORY_CHUNK_FLAGS 398 }; 399 400 401 static const int kPointersToHereAreInterestingMask = 402 1 << POINTERS_TO_HERE_ARE_INTERESTING; 403 404 static const int kPointersFromHereAreInterestingMask = 405 1 << POINTERS_FROM_HERE_ARE_INTERESTING; 406 407 static const int kEvacuationCandidateMask = 408 1 << EVACUATION_CANDIDATE; 409 410 static const int kSkipEvacuationSlotsRecordingMask = 411 (1 << EVACUATION_CANDIDATE) | 412 (1 << RESCAN_ON_EVACUATION) | 413 (1 << IN_FROM_SPACE) | 414 (1 << IN_TO_SPACE); 415 416 417 void SetFlag(int flag) { 418 flags_ |= static_cast<uintptr_t>(1) << flag; 419 } 420 421 void ClearFlag(int flag) { 422 flags_ &= ~(static_cast<uintptr_t>(1) << flag); 423 } 424 425 void SetFlagTo(int flag, bool value) { 426 if (value) { 427 SetFlag(flag); 428 } else { 429 ClearFlag(flag); 430 } 431 } 432 433 bool IsFlagSet(int flag) { 434 return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0; 435 } 436 437 // Set or clear multiple flags at a time. The flags in the mask 438 // are set to the value in "flags", the rest retain the current value 439 // in flags_. 440 void SetFlags(intptr_t flags, intptr_t mask) { 441 flags_ = (flags_ & ~mask) | (flags & mask); 442 } 443 444 // Return all current flags. 445 intptr_t GetFlags() { return flags_; } 446 447 // Manage live byte count (count of bytes known to be live, 448 // because they are marked black). 449 void ResetLiveBytes() { 450 if (FLAG_gc_verbose) { 451 PrintF("ResetLiveBytes:%p:%x->0\n", 452 static_cast<void*>(this), live_byte_count_); 453 } 454 live_byte_count_ = 0; 455 } 456 void IncrementLiveBytes(int by) { 457 if (FLAG_gc_verbose) { 458 printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", 459 static_cast<void*>(this), live_byte_count_, 460 ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by), 461 live_byte_count_ + by); 462 } 463 live_byte_count_ += by; 464 ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_); 465 } 466 int LiveBytes() { 467 ASSERT(static_cast<unsigned>(live_byte_count_) <= size_); 468 return live_byte_count_; 469 } 470 471 static void IncrementLiveBytesFromGC(Address address, int by) { 472 MemoryChunk::FromAddress(address)->IncrementLiveBytes(by); 473 } 474 475 static void IncrementLiveBytesFromMutator(Address address, int by); 476 477 static const intptr_t kAlignment = 478 (static_cast<uintptr_t>(1) << kPageSizeBits); 479 480 static const intptr_t kAlignmentMask = kAlignment - 1; 481 482 static const intptr_t kSizeOffset = kPointerSize + kPointerSize; 483 484 static const intptr_t kLiveBytesOffset = 485 kSizeOffset + kPointerSize + kPointerSize + kPointerSize + 486 kPointerSize + kPointerSize + 487 kPointerSize + kPointerSize + kPointerSize + kIntSize; 488 489 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize; 490 491 static const size_t kHeaderSize = 492 kSlotsBufferOffset + kPointerSize + kPointerSize; 493 494 static const int kBodyOffset = 495 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize)); 496 497 // The start offset of the object area in a page. Aligned to both maps and 498 // code alignment to be suitable for both. Also aligned to 32 words because 499 // the marking bitmap is arranged in 32 bit chunks. 500 static const int kObjectStartAlignment = 32 * kPointerSize; 501 static const int kObjectStartOffset = kBodyOffset - 1 + 502 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); 503 504 size_t size() const { return size_; } 505 506 void set_size(size_t size) { 507 size_ = size; 508 } 509 510 void SetArea(Address area_start, Address area_end) { 511 area_start_ = area_start; 512 area_end_ = area_end; 513 } 514 515 Executability executable() { 516 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; 517 } 518 519 bool ContainsOnlyData() { 520 return IsFlagSet(CONTAINS_ONLY_DATA); 521 } 522 523 bool InNewSpace() { 524 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; 525 } 526 527 bool InToSpace() { 528 return IsFlagSet(IN_TO_SPACE); 529 } 530 531 bool InFromSpace() { 532 return IsFlagSet(IN_FROM_SPACE); 533 } 534 535 // --------------------------------------------------------------------- 536 // Markbits support 537 538 inline Bitmap* markbits() { 539 return Bitmap::FromAddress(address() + kHeaderSize); 540 } 541 542 void PrintMarkbits() { markbits()->Print(); } 543 544 inline uint32_t AddressToMarkbitIndex(Address addr) { 545 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2; 546 } 547 548 inline static uint32_t FastAddressToMarkbitIndex(Address addr) { 549 const intptr_t offset = 550 reinterpret_cast<intptr_t>(addr) & kAlignmentMask; 551 552 return static_cast<uint32_t>(offset) >> kPointerSizeLog2; 553 } 554 555 inline Address MarkbitIndexToAddress(uint32_t index) { 556 return this->address() + (index << kPointerSizeLog2); 557 } 558 559 void InsertAfter(MemoryChunk* other); 560 void Unlink(); 561 562 inline Heap* heap() { return heap_; } 563 564 static const int kFlagsOffset = kPointerSize * 3; 565 566 bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); } 567 568 bool ShouldSkipEvacuationSlotRecording() { 569 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; 570 } 571 572 inline SkipList* skip_list() { 573 return skip_list_; 574 } 575 576 inline void set_skip_list(SkipList* skip_list) { 577 skip_list_ = skip_list; 578 } 579 580 inline SlotsBuffer* slots_buffer() { 581 return slots_buffer_; 582 } 583 584 inline SlotsBuffer** slots_buffer_address() { 585 return &slots_buffer_; 586 } 587 588 void MarkEvacuationCandidate() { 589 ASSERT(slots_buffer_ == NULL); 590 SetFlag(EVACUATION_CANDIDATE); 591 } 592 593 void ClearEvacuationCandidate() { 594 ASSERT(slots_buffer_ == NULL); 595 ClearFlag(EVACUATION_CANDIDATE); 596 } 597 598 Address area_start() { return area_start_; } 599 Address area_end() { return area_end_; } 600 int area_size() { 601 return static_cast<int>(area_end() - area_start()); 602 } 603 604 protected: 605 MemoryChunk* next_chunk_; 606 MemoryChunk* prev_chunk_; 607 size_t size_; 608 intptr_t flags_; 609 610 // Start and end of allocatable memory on this chunk. 611 Address area_start_; 612 Address area_end_; 613 614 // If the chunk needs to remember its memory reservation, it is stored here. 615 VirtualMemory reservation_; 616 // The identity of the owning space. This is tagged as a failure pointer, but 617 // no failure can be in an object, so this can be distinguished from any entry 618 // in a fixed array. 619 Address owner_; 620 Heap* heap_; 621 // Used by the store buffer to keep track of which pages to mark scan-on- 622 // scavenge. 623 int store_buffer_counter_; 624 // Count of bytes marked black on page. 625 int live_byte_count_; 626 SlotsBuffer* slots_buffer_; 627 SkipList* skip_list_; 628 629 static MemoryChunk* Initialize(Heap* heap, 630 Address base, 631 size_t size, 632 Address area_start, 633 Address area_end, 634 Executability executable, 635 Space* owner); 636 637 friend class MemoryAllocator; 638}; 639 640STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); 641 642// ----------------------------------------------------------------------------- 643// A page is a memory chunk of a size 1MB. Large object pages may be larger. 644// 645// The only way to get a page pointer is by calling factory methods: 646// Page* p = Page::FromAddress(addr); or 647// Page* p = Page::FromAllocationTop(top); 648class Page : public MemoryChunk { 649 public: 650 // Returns the page containing a given address. The address ranges 651 // from [page_addr .. page_addr + kPageSize[ 652 // This only works if the object is in fact in a page. See also MemoryChunk:: 653 // FromAddress() and FromAnyAddress(). 654 INLINE(static Page* FromAddress(Address a)) { 655 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); 656 } 657 658 // Returns the page containing an allocation top. Because an allocation 659 // top address can be the upper bound of the page, we need to subtract 660 // it with kPointerSize first. The address ranges from 661 // [page_addr + kObjectStartOffset .. page_addr + kPageSize]. 662 INLINE(static Page* FromAllocationTop(Address top)) { 663 Page* p = FromAddress(top - kPointerSize); 664 return p; 665 } 666 667 // Returns the next page in the chain of pages owned by a space. 668 inline Page* next_page(); 669 inline Page* prev_page(); 670 inline void set_next_page(Page* page); 671 inline void set_prev_page(Page* page); 672 673 // Checks whether an address is page aligned. 674 static bool IsAlignedToPageSize(Address a) { 675 return 0 == (OffsetFrom(a) & kPageAlignmentMask); 676 } 677 678 // Returns the offset of a given address to this page. 679 INLINE(int Offset(Address a)) { 680 int offset = static_cast<int>(a - address()); 681 return offset; 682 } 683 684 // Returns the address for a given offset to the this page. 685 Address OffsetToAddress(int offset) { 686 ASSERT_PAGE_OFFSET(offset); 687 return address() + offset; 688 } 689 690 // --------------------------------------------------------------------- 691 692 // Page size in bytes. This must be a multiple of the OS page size. 693 static const int kPageSize = 1 << kPageSizeBits; 694 695 // Object area size in bytes. 696 static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset; 697 698 // Maximum object size that fits in a page. 699 static const int kMaxNonCodeHeapObjectSize = kNonCodeObjectAreaSize; 700 701 // Page size mask. 702 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; 703 704 inline void ClearGCFields(); 705 706 static inline Page* Initialize(Heap* heap, 707 MemoryChunk* chunk, 708 Executability executable, 709 PagedSpace* owner); 710 711 void InitializeAsAnchor(PagedSpace* owner); 712 713 bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); } 714 bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); } 715 bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); } 716 717 void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); } 718 void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); } 719 720 void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); } 721 void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); } 722 723#ifdef DEBUG 724 void Print(); 725#endif // DEBUG 726 727 friend class MemoryAllocator; 728}; 729 730 731STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize); 732 733 734class LargePage : public MemoryChunk { 735 public: 736 HeapObject* GetObject() { 737 return HeapObject::FromAddress(area_start()); 738 } 739 740 inline LargePage* next_page() const { 741 return static_cast<LargePage*>(next_chunk()); 742 } 743 744 inline void set_next_page(LargePage* page) { 745 set_next_chunk(page); 746 } 747 private: 748 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk); 749 750 friend class MemoryAllocator; 751}; 752 753STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize); 754 755// ---------------------------------------------------------------------------- 756// Space is the abstract superclass for all allocation spaces. 757class Space : public Malloced { 758 public: 759 Space(Heap* heap, AllocationSpace id, Executability executable) 760 : heap_(heap), id_(id), executable_(executable) {} 761 762 virtual ~Space() {} 763 764 Heap* heap() const { return heap_; } 765 766 // Does the space need executable memory? 767 Executability executable() { return executable_; } 768 769 // Identity used in error reporting. 770 AllocationSpace identity() { return id_; } 771 772 // Returns allocated size. 773 virtual intptr_t Size() = 0; 774 775 // Returns size of objects. Can differ from the allocated size 776 // (e.g. see LargeObjectSpace). 777 virtual intptr_t SizeOfObjects() { return Size(); } 778 779 virtual int RoundSizeDownToObjectAlignment(int size) { 780 if (id_ == CODE_SPACE) { 781 return RoundDown(size, kCodeAlignment); 782 } else { 783 return RoundDown(size, kPointerSize); 784 } 785 } 786 787#ifdef DEBUG 788 virtual void Print() = 0; 789#endif 790 791 // After calling this we can allocate a certain number of bytes using only 792 // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope) 793 // without using freelists or causing a GC. This is used by partial 794 // snapshots. It returns true of space was reserved or false if a GC is 795 // needed. For paged spaces the space requested must include the space wasted 796 // at the end of each when allocating linearly. 797 virtual bool ReserveSpace(int bytes) = 0; 798 799 private: 800 Heap* heap_; 801 AllocationSpace id_; 802 Executability executable_; 803}; 804 805 806// ---------------------------------------------------------------------------- 807// All heap objects containing executable code (code objects) must be allocated 808// from a 2 GB range of memory, so that they can call each other using 32-bit 809// displacements. This happens automatically on 32-bit platforms, where 32-bit 810// displacements cover the entire 4GB virtual address space. On 64-bit 811// platforms, we support this using the CodeRange object, which reserves and 812// manages a range of virtual memory. 813class CodeRange { 814 public: 815 explicit CodeRange(Isolate* isolate); 816 ~CodeRange() { TearDown(); } 817 818 // Reserves a range of virtual memory, but does not commit any of it. 819 // Can only be called once, at heap initialization time. 820 // Returns false on failure. 821 bool SetUp(const size_t requested_size); 822 823 // Frees the range of virtual memory, and frees the data structures used to 824 // manage it. 825 void TearDown(); 826 827 bool exists() { return this != NULL && code_range_ != NULL; } 828 bool contains(Address address) { 829 if (this == NULL || code_range_ == NULL) return false; 830 Address start = static_cast<Address>(code_range_->address()); 831 return start <= address && address < start + code_range_->size(); 832 } 833 834 // Allocates a chunk of memory from the large-object portion of 835 // the code range. On platforms with no separate code range, should 836 // not be called. 837 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested, 838 size_t* allocated); 839 void FreeRawMemory(Address buf, size_t length); 840 841 private: 842 Isolate* isolate_; 843 844 // The reserved range of virtual memory that all code objects are put in. 845 VirtualMemory* code_range_; 846 // Plain old data class, just a struct plus a constructor. 847 class FreeBlock { 848 public: 849 FreeBlock(Address start_arg, size_t size_arg) 850 : start(start_arg), size(size_arg) { 851 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); 852 ASSERT(size >= static_cast<size_t>(Page::kPageSize)); 853 } 854 FreeBlock(void* start_arg, size_t size_arg) 855 : start(static_cast<Address>(start_arg)), size(size_arg) { 856 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); 857 ASSERT(size >= static_cast<size_t>(Page::kPageSize)); 858 } 859 860 Address start; 861 size_t size; 862 }; 863 864 // Freed blocks of memory are added to the free list. When the allocation 865 // list is exhausted, the free list is sorted and merged to make the new 866 // allocation list. 867 List<FreeBlock> free_list_; 868 // Memory is allocated from the free blocks on the allocation list. 869 // The block at current_allocation_block_index_ is the current block. 870 List<FreeBlock> allocation_list_; 871 int current_allocation_block_index_; 872 873 // Finds a block on the allocation list that contains at least the 874 // requested amount of memory. If none is found, sorts and merges 875 // the existing free memory blocks, and searches again. 876 // If none can be found, terminates V8 with FatalProcessOutOfMemory. 877 void GetNextAllocationBlock(size_t requested); 878 // Compares the start addresses of two free blocks. 879 static int CompareFreeBlockAddress(const FreeBlock* left, 880 const FreeBlock* right); 881 882 DISALLOW_COPY_AND_ASSIGN(CodeRange); 883}; 884 885 886class SkipList { 887 public: 888 SkipList() { 889 Clear(); 890 } 891 892 void Clear() { 893 for (int idx = 0; idx < kSize; idx++) { 894 starts_[idx] = reinterpret_cast<Address>(-1); 895 } 896 } 897 898 Address StartFor(Address addr) { 899 return starts_[RegionNumber(addr)]; 900 } 901 902 void AddObject(Address addr, int size) { 903 int start_region = RegionNumber(addr); 904 int end_region = RegionNumber(addr + size - kPointerSize); 905 for (int idx = start_region; idx <= end_region; idx++) { 906 if (starts_[idx] > addr) starts_[idx] = addr; 907 } 908 } 909 910 static inline int RegionNumber(Address addr) { 911 return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2; 912 } 913 914 static void Update(Address addr, int size) { 915 Page* page = Page::FromAddress(addr); 916 SkipList* list = page->skip_list(); 917 if (list == NULL) { 918 list = new SkipList(); 919 page->set_skip_list(list); 920 } 921 922 list->AddObject(addr, size); 923 } 924 925 private: 926 static const int kRegionSizeLog2 = 13; 927 static const int kRegionSize = 1 << kRegionSizeLog2; 928 static const int kSize = Page::kPageSize / kRegionSize; 929 930 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0); 931 932 Address starts_[kSize]; 933}; 934 935 936// ---------------------------------------------------------------------------- 937// A space acquires chunks of memory from the operating system. The memory 938// allocator allocated and deallocates pages for the paged heap spaces and large 939// pages for large object space. 940// 941// Each space has to manage it's own pages. 942// 943class MemoryAllocator { 944 public: 945 explicit MemoryAllocator(Isolate* isolate); 946 947 // Initializes its internal bookkeeping structures. 948 // Max capacity of the total space and executable memory limit. 949 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable); 950 951 void TearDown(); 952 953 Page* AllocatePage(PagedSpace* owner, Executability executable); 954 955 LargePage* AllocateLargePage(intptr_t object_size, 956 Executability executable, 957 Space* owner); 958 959 void Free(MemoryChunk* chunk); 960 961 // Returns the maximum available bytes of heaps. 962 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } 963 964 // Returns allocated spaces in bytes. 965 intptr_t Size() { return size_; } 966 967 // Returns the maximum available executable bytes of heaps. 968 intptr_t AvailableExecutable() { 969 if (capacity_executable_ < size_executable_) return 0; 970 return capacity_executable_ - size_executable_; 971 } 972 973 // Returns allocated executable spaces in bytes. 974 intptr_t SizeExecutable() { return size_executable_; } 975 976 // Returns maximum available bytes that the old space can have. 977 intptr_t MaxAvailable() { 978 return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize; 979 } 980 981#ifdef DEBUG 982 // Reports statistic info of the space. 983 void ReportStatistics(); 984#endif 985 986 MemoryChunk* AllocateChunk(intptr_t body_size, 987 Executability executable, 988 Space* space); 989 990 Address ReserveAlignedMemory(size_t requested, 991 size_t alignment, 992 VirtualMemory* controller); 993 Address AllocateAlignedMemory(size_t requested, 994 size_t alignment, 995 Executability executable, 996 VirtualMemory* controller); 997 998 void FreeMemory(VirtualMemory* reservation, Executability executable); 999 void FreeMemory(Address addr, size_t size, Executability executable); 1000 1001 // Commit a contiguous block of memory from the initial chunk. Assumes that 1002 // the address is not NULL, the size is greater than zero, and that the 1003 // block is contained in the initial chunk. Returns true if it succeeded 1004 // and false otherwise. 1005 bool CommitBlock(Address start, size_t size, Executability executable); 1006 1007 // Uncommit a contiguous block of memory [start..(start+size)[. 1008 // start is not NULL, the size is greater than zero, and the 1009 // block is contained in the initial chunk. Returns true if it succeeded 1010 // and false otherwise. 1011 bool UncommitBlock(Address start, size_t size); 1012 1013 // Zaps a contiguous block of memory [start..(start+size)[ thus 1014 // filling it up with a recognizable non-NULL bit pattern. 1015 void ZapBlock(Address start, size_t size); 1016 1017 void PerformAllocationCallback(ObjectSpace space, 1018 AllocationAction action, 1019 size_t size); 1020 1021 void AddMemoryAllocationCallback(MemoryAllocationCallback callback, 1022 ObjectSpace space, 1023 AllocationAction action); 1024 1025 void RemoveMemoryAllocationCallback( 1026 MemoryAllocationCallback callback); 1027 1028 bool MemoryAllocationCallbackRegistered( 1029 MemoryAllocationCallback callback); 1030 1031 static int CodePageGuardStartOffset(); 1032 1033 static int CodePageGuardSize(); 1034 1035 static int CodePageAreaStartOffset(); 1036 1037 static int CodePageAreaEndOffset(); 1038 1039 static int CodePageAreaSize() { 1040 return CodePageAreaEndOffset() - CodePageAreaStartOffset(); 1041 } 1042 1043 static bool CommitCodePage(VirtualMemory* vm, Address start, size_t size); 1044 1045 private: 1046 Isolate* isolate_; 1047 1048 // Maximum space size in bytes. 1049 size_t capacity_; 1050 // Maximum subset of capacity_ that can be executable 1051 size_t capacity_executable_; 1052 1053 // Allocated space size in bytes. 1054 size_t size_; 1055 // Allocated executable space size in bytes. 1056 size_t size_executable_; 1057 1058 struct MemoryAllocationCallbackRegistration { 1059 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, 1060 ObjectSpace space, 1061 AllocationAction action) 1062 : callback(callback), space(space), action(action) { 1063 } 1064 MemoryAllocationCallback callback; 1065 ObjectSpace space; 1066 AllocationAction action; 1067 }; 1068 1069 // A List of callback that are triggered when memory is allocated or free'd 1070 List<MemoryAllocationCallbackRegistration> 1071 memory_allocation_callbacks_; 1072 1073 // Initializes pages in a chunk. Returns the first page address. 1074 // This function and GetChunkId() are provided for the mark-compact 1075 // collector to rebuild page headers in the from space, which is 1076 // used as a marking stack and its page headers are destroyed. 1077 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, 1078 PagedSpace* owner); 1079 1080 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); 1081}; 1082 1083 1084// ----------------------------------------------------------------------------- 1085// Interface for heap object iterator to be implemented by all object space 1086// object iterators. 1087// 1088// NOTE: The space specific object iterators also implements the own next() 1089// method which is used to avoid using virtual functions 1090// iterating a specific space. 1091 1092class ObjectIterator : public Malloced { 1093 public: 1094 virtual ~ObjectIterator() { } 1095 1096 virtual HeapObject* next_object() = 0; 1097}; 1098 1099 1100// ----------------------------------------------------------------------------- 1101// Heap object iterator in new/old/map spaces. 1102// 1103// A HeapObjectIterator iterates objects from the bottom of the given space 1104// to its top or from the bottom of the given page to its top. 1105// 1106// If objects are allocated in the page during iteration the iterator may 1107// or may not iterate over those objects. The caller must create a new 1108// iterator in order to be sure to visit these new objects. 1109class HeapObjectIterator: public ObjectIterator { 1110 public: 1111 // Creates a new object iterator in a given space. 1112 // If the size function is not given, the iterator calls the default 1113 // Object::Size(). 1114 explicit HeapObjectIterator(PagedSpace* space); 1115 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func); 1116 HeapObjectIterator(Page* page, HeapObjectCallback size_func); 1117 1118 // Advance to the next object, skipping free spaces and other fillers and 1119 // skipping the special garbage section of which there is one per space. 1120 // Returns NULL when the iteration has ended. 1121 inline HeapObject* Next() { 1122 do { 1123 HeapObject* next_obj = FromCurrentPage(); 1124 if (next_obj != NULL) return next_obj; 1125 } while (AdvanceToNextPage()); 1126 return NULL; 1127 } 1128 1129 virtual HeapObject* next_object() { 1130 return Next(); 1131 } 1132 1133 private: 1134 enum PageMode { kOnePageOnly, kAllPagesInSpace }; 1135 1136 Address cur_addr_; // Current iteration point. 1137 Address cur_end_; // End iteration point. 1138 HeapObjectCallback size_func_; // Size function or NULL. 1139 PagedSpace* space_; 1140 PageMode page_mode_; 1141 1142 // Fast (inlined) path of next(). 1143 inline HeapObject* FromCurrentPage(); 1144 1145 // Slow path of next(), goes into the next page. Returns false if the 1146 // iteration has ended. 1147 bool AdvanceToNextPage(); 1148 1149 // Initializes fields. 1150 inline void Initialize(PagedSpace* owner, 1151 Address start, 1152 Address end, 1153 PageMode mode, 1154 HeapObjectCallback size_func); 1155}; 1156 1157 1158// ----------------------------------------------------------------------------- 1159// A PageIterator iterates the pages in a paged space. 1160 1161class PageIterator BASE_EMBEDDED { 1162 public: 1163 explicit inline PageIterator(PagedSpace* space); 1164 1165 inline bool has_next(); 1166 inline Page* next(); 1167 1168 private: 1169 PagedSpace* space_; 1170 Page* prev_page_; // Previous page returned. 1171 // Next page that will be returned. Cached here so that we can use this 1172 // iterator for operations that deallocate pages. 1173 Page* next_page_; 1174}; 1175 1176 1177// ----------------------------------------------------------------------------- 1178// A space has a circular list of pages. The next page can be accessed via 1179// Page::next_page() call. 1180 1181// An abstraction of allocation and relocation pointers in a page-structured 1182// space. 1183class AllocationInfo { 1184 public: 1185 AllocationInfo() : top(NULL), limit(NULL) { 1186 } 1187 1188 Address top; // Current allocation top. 1189 Address limit; // Current allocation limit. 1190 1191#ifdef DEBUG 1192 bool VerifyPagedAllocation() { 1193 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit)) 1194 && (top <= limit); 1195 } 1196#endif 1197}; 1198 1199 1200// An abstraction of the accounting statistics of a page-structured space. 1201// The 'capacity' of a space is the number of object-area bytes (i.e., not 1202// including page bookkeeping structures) currently in the space. The 'size' 1203// of a space is the number of allocated bytes, the 'waste' in the space is 1204// the number of bytes that are not allocated and not available to 1205// allocation without reorganizing the space via a GC (e.g. small blocks due 1206// to internal fragmentation, top of page areas in map space), and the bytes 1207// 'available' is the number of unallocated bytes that are not waste. The 1208// capacity is the sum of size, waste, and available. 1209// 1210// The stats are only set by functions that ensure they stay balanced. These 1211// functions increase or decrease one of the non-capacity stats in 1212// conjunction with capacity, or else they always balance increases and 1213// decreases to the non-capacity stats. 1214class AllocationStats BASE_EMBEDDED { 1215 public: 1216 AllocationStats() { Clear(); } 1217 1218 // Zero out all the allocation statistics (i.e., no capacity). 1219 void Clear() { 1220 capacity_ = 0; 1221 size_ = 0; 1222 waste_ = 0; 1223 } 1224 1225 void ClearSizeWaste() { 1226 size_ = capacity_; 1227 waste_ = 0; 1228 } 1229 1230 // Reset the allocation statistics (i.e., available = capacity with no 1231 // wasted or allocated bytes). 1232 void Reset() { 1233 size_ = 0; 1234 waste_ = 0; 1235 } 1236 1237 // Accessors for the allocation statistics. 1238 intptr_t Capacity() { return capacity_; } 1239 intptr_t Size() { return size_; } 1240 intptr_t Waste() { return waste_; } 1241 1242 // Grow the space by adding available bytes. They are initially marked as 1243 // being in use (part of the size), but will normally be immediately freed, 1244 // putting them on the free list and removing them from size_. 1245 void ExpandSpace(int size_in_bytes) { 1246 capacity_ += size_in_bytes; 1247 size_ += size_in_bytes; 1248 ASSERT(size_ >= 0); 1249 } 1250 1251 // Shrink the space by removing available bytes. Since shrinking is done 1252 // during sweeping, bytes have been marked as being in use (part of the size) 1253 // and are hereby freed. 1254 void ShrinkSpace(int size_in_bytes) { 1255 capacity_ -= size_in_bytes; 1256 size_ -= size_in_bytes; 1257 ASSERT(size_ >= 0); 1258 } 1259 1260 // Allocate from available bytes (available -> size). 1261 void AllocateBytes(intptr_t size_in_bytes) { 1262 size_ += size_in_bytes; 1263 ASSERT(size_ >= 0); 1264 } 1265 1266 // Free allocated bytes, making them available (size -> available). 1267 void DeallocateBytes(intptr_t size_in_bytes) { 1268 size_ -= size_in_bytes; 1269 ASSERT(size_ >= 0); 1270 } 1271 1272 // Waste free bytes (available -> waste). 1273 void WasteBytes(int size_in_bytes) { 1274 size_ -= size_in_bytes; 1275 waste_ += size_in_bytes; 1276 ASSERT(size_ >= 0); 1277 } 1278 1279 private: 1280 intptr_t capacity_; 1281 intptr_t size_; 1282 intptr_t waste_; 1283}; 1284 1285 1286// ----------------------------------------------------------------------------- 1287// Free lists for old object spaces 1288// 1289// Free-list nodes are free blocks in the heap. They look like heap objects 1290// (free-list node pointers have the heap object tag, and they have a map like 1291// a heap object). They have a size and a next pointer. The next pointer is 1292// the raw address of the next free list node (or NULL). 1293class FreeListNode: public HeapObject { 1294 public: 1295 // Obtain a free-list node from a raw address. This is not a cast because 1296 // it does not check nor require that the first word at the address is a map 1297 // pointer. 1298 static FreeListNode* FromAddress(Address address) { 1299 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address)); 1300 } 1301 1302 static inline bool IsFreeListNode(HeapObject* object); 1303 1304 // Set the size in bytes, which can be read with HeapObject::Size(). This 1305 // function also writes a map to the first word of the block so that it 1306 // looks like a heap object to the garbage collector and heap iteration 1307 // functions. 1308 void set_size(Heap* heap, int size_in_bytes); 1309 1310 // Accessors for the next field. 1311 inline FreeListNode* next(); 1312 inline FreeListNode** next_address(); 1313 inline void set_next(FreeListNode* next); 1314 1315 inline void Zap(); 1316 1317 private: 1318 static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize); 1319 1320 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode); 1321}; 1322 1323 1324// The free list for the old space. The free list is organized in such a way 1325// as to encourage objects allocated around the same time to be near each 1326// other. The normal way to allocate is intended to be by bumping a 'top' 1327// pointer until it hits a 'limit' pointer. When the limit is hit we need to 1328// find a new space to allocate from. This is done with the free list, which 1329// is divided up into rough categories to cut down on waste. Having finer 1330// categories would scatter allocation more. 1331 1332// The old space free list is organized in categories. 1333// 1-31 words: Such small free areas are discarded for efficiency reasons. 1334// They can be reclaimed by the compactor. However the distance between top 1335// and limit may be this small. 1336// 32-255 words: There is a list of spaces this large. It is used for top and 1337// limit when the object we need to allocate is 1-31 words in size. These 1338// spaces are called small. 1339// 256-2047 words: There is a list of spaces this large. It is used for top and 1340// limit when the object we need to allocate is 32-255 words in size. These 1341// spaces are called medium. 1342// 1048-16383 words: There is a list of spaces this large. It is used for top 1343// and limit when the object we need to allocate is 256-2047 words in size. 1344// These spaces are call large. 1345// At least 16384 words. This list is for objects of 2048 words or larger. 1346// Empty pages are added to this list. These spaces are called huge. 1347class FreeList BASE_EMBEDDED { 1348 public: 1349 explicit FreeList(PagedSpace* owner); 1350 1351 // Clear the free list. 1352 void Reset(); 1353 1354 // Return the number of bytes available on the free list. 1355 intptr_t available() { return available_; } 1356 1357 // Place a node on the free list. The block of size 'size_in_bytes' 1358 // starting at 'start' is placed on the free list. The return value is the 1359 // number of bytes that have been lost due to internal fragmentation by 1360 // freeing the block. Bookkeeping information will be written to the block, 1361 // i.e., its contents will be destroyed. The start address should be word 1362 // aligned, and the size should be a non-zero multiple of the word size. 1363 int Free(Address start, int size_in_bytes); 1364 1365 // Allocate a block of size 'size_in_bytes' from the free list. The block 1366 // is unitialized. A failure is returned if no block is available. The 1367 // number of bytes lost to fragmentation is returned in the output parameter 1368 // 'wasted_bytes'. The size should be a non-zero multiple of the word size. 1369 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); 1370 1371#ifdef DEBUG 1372 void Zap(); 1373 static intptr_t SumFreeList(FreeListNode* node); 1374 static int FreeListLength(FreeListNode* cur); 1375 intptr_t SumFreeLists(); 1376 bool IsVeryLong(); 1377#endif 1378 1379 struct SizeStats { 1380 intptr_t Total() { 1381 return small_size_ + medium_size_ + large_size_ + huge_size_; 1382 } 1383 1384 intptr_t small_size_; 1385 intptr_t medium_size_; 1386 intptr_t large_size_; 1387 intptr_t huge_size_; 1388 }; 1389 1390 void CountFreeListItems(Page* p, SizeStats* sizes); 1391 1392 intptr_t EvictFreeListItems(Page* p); 1393 1394 private: 1395 // The size range of blocks, in bytes. 1396 static const int kMinBlockSize = 3 * kPointerSize; 1397 static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize; 1398 1399 FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size); 1400 1401 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size); 1402 1403 PagedSpace* owner_; 1404 Heap* heap_; 1405 1406 // Total available bytes in all blocks on this free list. 1407 int available_; 1408 1409 static const int kSmallListMin = 0x20 * kPointerSize; 1410 static const int kSmallListMax = 0xff * kPointerSize; 1411 static const int kMediumListMax = 0x7ff * kPointerSize; 1412 static const int kLargeListMax = 0x3fff * kPointerSize; 1413 static const int kSmallAllocationMax = kSmallListMin - kPointerSize; 1414 static const int kMediumAllocationMax = kSmallListMax; 1415 static const int kLargeAllocationMax = kMediumListMax; 1416 FreeListNode* small_list_; 1417 FreeListNode* medium_list_; 1418 FreeListNode* large_list_; 1419 FreeListNode* huge_list_; 1420 1421 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); 1422}; 1423 1424 1425class PagedSpace : public Space { 1426 public: 1427 // Creates a space with a maximum capacity, and an id. 1428 PagedSpace(Heap* heap, 1429 intptr_t max_capacity, 1430 AllocationSpace id, 1431 Executability executable); 1432 1433 virtual ~PagedSpace() {} 1434 1435 // Set up the space using the given address range of virtual memory (from 1436 // the memory allocator's initial chunk) if possible. If the block of 1437 // addresses is not big enough to contain a single page-aligned page, a 1438 // fresh chunk will be allocated. 1439 bool SetUp(); 1440 1441 // Returns true if the space has been successfully set up and not 1442 // subsequently torn down. 1443 bool HasBeenSetUp(); 1444 1445 // Cleans up the space, frees all pages in this space except those belonging 1446 // to the initial chunk, uncommits addresses in the initial chunk. 1447 void TearDown(); 1448 1449 // Checks whether an object/address is in this space. 1450 inline bool Contains(Address a); 1451 bool Contains(HeapObject* o) { return Contains(o->address()); } 1452 1453 // Given an address occupied by a live object, return that object if it is 1454 // in this space, or Failure::Exception() if it is not. The implementation 1455 // iterates over objects in the page containing the address, the cost is 1456 // linear in the number of objects in the page. It may be slow. 1457 MUST_USE_RESULT MaybeObject* FindObject(Address addr); 1458 1459 // Prepares for a mark-compact GC. 1460 virtual void PrepareForMarkCompact(); 1461 1462 // Current capacity without growing (Size() + Available()). 1463 intptr_t Capacity() { return accounting_stats_.Capacity(); } 1464 1465 // Total amount of memory committed for this space. For paged 1466 // spaces this equals the capacity. 1467 intptr_t CommittedMemory() { return Capacity(); } 1468 1469 // Sets the capacity, the available space and the wasted space to zero. 1470 // The stats are rebuilt during sweeping by adding each page to the 1471 // capacity and the size when it is encountered. As free spaces are 1472 // discovered during the sweeping they are subtracted from the size and added 1473 // to the available and wasted totals. 1474 void ClearStats() { 1475 accounting_stats_.ClearSizeWaste(); 1476 } 1477 1478 // Available bytes without growing. These are the bytes on the free list. 1479 // The bytes in the linear allocation area are not included in this total 1480 // because updating the stats would slow down allocation. New pages are 1481 // immediately added to the free list so they show up here. 1482 intptr_t Available() { return free_list_.available(); } 1483 1484 // Allocated bytes in this space. Garbage bytes that were not found due to 1485 // lazy sweeping are counted as being allocated! The bytes in the current 1486 // linear allocation area (between top and limit) are also counted here. 1487 virtual intptr_t Size() { return accounting_stats_.Size(); } 1488 1489 // As size, but the bytes in lazily swept pages are estimated and the bytes 1490 // in the current linear allocation area are not included. 1491 virtual intptr_t SizeOfObjects() { 1492 ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0)); 1493 return Size() - unswept_free_bytes_ - (limit() - top()); 1494 } 1495 1496 // Wasted bytes in this space. These are just the bytes that were thrown away 1497 // due to being too small to use for allocation. They do not include the 1498 // free bytes that were not found at all due to lazy sweeping. 1499 virtual intptr_t Waste() { return accounting_stats_.Waste(); } 1500 1501 // Returns the allocation pointer in this space. 1502 Address top() { return allocation_info_.top; } 1503 Address limit() { return allocation_info_.limit; } 1504 1505 // Allocate the requested number of bytes in the space if possible, return a 1506 // failure object if not. 1507 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); 1508 1509 virtual bool ReserveSpace(int bytes); 1510 1511 // Give a block of memory to the space's free list. It might be added to 1512 // the free list or accounted as waste. 1513 // If add_to_freelist is false then just accounting stats are updated and 1514 // no attempt to add area to free list is made. 1515 int Free(Address start, int size_in_bytes) { 1516 int wasted = free_list_.Free(start, size_in_bytes); 1517 accounting_stats_.DeallocateBytes(size_in_bytes - wasted); 1518 return size_in_bytes - wasted; 1519 } 1520 1521 // Set space allocation info. 1522 void SetTop(Address top, Address limit) { 1523 ASSERT(top == limit || 1524 Page::FromAddress(top) == Page::FromAddress(limit - 1)); 1525 allocation_info_.top = top; 1526 allocation_info_.limit = limit; 1527 } 1528 1529 void Allocate(int bytes) { 1530 accounting_stats_.AllocateBytes(bytes); 1531 } 1532 1533 void IncreaseCapacity(int size) { 1534 accounting_stats_.ExpandSpace(size); 1535 } 1536 1537 // Releases an unused page and shrinks the space. 1538 void ReleasePage(Page* page); 1539 1540 // Releases all of the unused pages. 1541 void ReleaseAllUnusedPages(); 1542 1543 // The dummy page that anchors the linked list of pages. 1544 Page* anchor() { return &anchor_; } 1545 1546#ifdef DEBUG 1547 // Print meta info and objects in this space. 1548 virtual void Print(); 1549 1550 // Verify integrity of this space. 1551 virtual void Verify(ObjectVisitor* visitor); 1552 1553 // Reports statistics for the space 1554 void ReportStatistics(); 1555 1556 // Overridden by subclasses to verify space-specific object 1557 // properties (e.g., only maps or free-list nodes are in map space). 1558 virtual void VerifyObject(HeapObject* obj) {} 1559 1560 // Report code object related statistics 1561 void CollectCodeStatistics(); 1562 static void ReportCodeStatistics(); 1563 static void ResetCodeStatistics(); 1564#endif 1565 1566 bool was_swept_conservatively() { return was_swept_conservatively_; } 1567 void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; } 1568 1569 // Evacuation candidates are swept by evacuator. Needs to return a valid 1570 // result before _and_ after evacuation has finished. 1571 static bool ShouldBeSweptLazily(Page* p) { 1572 return !p->IsEvacuationCandidate() && 1573 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && 1574 !p->WasSweptPrecisely(); 1575 } 1576 1577 void SetPagesToSweep(Page* first) { 1578 ASSERT(unswept_free_bytes_ == 0); 1579 if (first == &anchor_) first = NULL; 1580 first_unswept_page_ = first; 1581 } 1582 1583 void IncrementUnsweptFreeBytes(int by) { 1584 unswept_free_bytes_ += by; 1585 } 1586 1587 void IncreaseUnsweptFreeBytes(Page* p) { 1588 ASSERT(ShouldBeSweptLazily(p)); 1589 unswept_free_bytes_ += (p->area_size() - p->LiveBytes()); 1590 } 1591 1592 void DecreaseUnsweptFreeBytes(Page* p) { 1593 ASSERT(ShouldBeSweptLazily(p)); 1594 unswept_free_bytes_ -= (p->area_size() - p->LiveBytes()); 1595 } 1596 1597 bool AdvanceSweeper(intptr_t bytes_to_sweep); 1598 1599 bool IsSweepingComplete() { 1600 return !first_unswept_page_->is_valid(); 1601 } 1602 1603 Page* FirstPage() { return anchor_.next_page(); } 1604 Page* LastPage() { return anchor_.prev_page(); } 1605 1606 void CountFreeListItems(Page* p, FreeList::SizeStats* sizes) { 1607 free_list_.CountFreeListItems(p, sizes); 1608 } 1609 1610 void EvictEvacuationCandidatesFromFreeLists(); 1611 1612 bool CanExpand(); 1613 1614 // Returns the number of total pages in this space. 1615 int CountTotalPages(); 1616 1617 // Return size of allocatable area on a page in this space. 1618 inline int AreaSize() { 1619 return area_size_; 1620 } 1621 1622 protected: 1623 int area_size_; 1624 1625 // Maximum capacity of this space. 1626 intptr_t max_capacity_; 1627 1628 // Accounting information for this space. 1629 AllocationStats accounting_stats_; 1630 1631 // The dummy page that anchors the double linked list of pages. 1632 Page anchor_; 1633 1634 // The space's free list. 1635 FreeList free_list_; 1636 1637 // Normal allocation information. 1638 AllocationInfo allocation_info_; 1639 1640 // Bytes of each page that cannot be allocated. Possibly non-zero 1641 // for pages in spaces with only fixed-size objects. Always zero 1642 // for pages in spaces with variable sized objects (those pages are 1643 // padded with free-list nodes). 1644 int page_extra_; 1645 1646 bool was_swept_conservatively_; 1647 1648 // The first page to be swept when the lazy sweeper advances. Is set 1649 // to NULL when all pages have been swept. 1650 Page* first_unswept_page_; 1651 1652 // The number of free bytes which could be reclaimed by advancing the 1653 // lazy sweeper. This is only an estimation because lazy sweeping is 1654 // done conservatively. 1655 intptr_t unswept_free_bytes_; 1656 1657 // Expands the space by allocating a fixed number of pages. Returns false if 1658 // it cannot allocate requested number of pages from OS, or if the hard heap 1659 // size limit has been hit. 1660 bool Expand(); 1661 1662 // Generic fast case allocation function that tries linear allocation at the 1663 // address denoted by top in allocation_info_. 1664 inline HeapObject* AllocateLinearly(int size_in_bytes); 1665 1666 // Slow path of AllocateRaw. This function is space-dependent. 1667 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes); 1668 1669 friend class PageIterator; 1670}; 1671 1672 1673class NumberAndSizeInfo BASE_EMBEDDED { 1674 public: 1675 NumberAndSizeInfo() : number_(0), bytes_(0) {} 1676 1677 int number() const { return number_; } 1678 void increment_number(int num) { number_ += num; } 1679 1680 int bytes() const { return bytes_; } 1681 void increment_bytes(int size) { bytes_ += size; } 1682 1683 void clear() { 1684 number_ = 0; 1685 bytes_ = 0; 1686 } 1687 1688 private: 1689 int number_; 1690 int bytes_; 1691}; 1692 1693 1694// HistogramInfo class for recording a single "bar" of a histogram. This 1695// class is used for collecting statistics to print to the log file. 1696class HistogramInfo: public NumberAndSizeInfo { 1697 public: 1698 HistogramInfo() : NumberAndSizeInfo() {} 1699 1700 const char* name() { return name_; } 1701 void set_name(const char* name) { name_ = name; } 1702 1703 private: 1704 const char* name_; 1705}; 1706 1707 1708enum SemiSpaceId { 1709 kFromSpace = 0, 1710 kToSpace = 1 1711}; 1712 1713 1714class SemiSpace; 1715 1716 1717class NewSpacePage : public MemoryChunk { 1718 public: 1719 // GC related flags copied from from-space to to-space when 1720 // flipping semispaces. 1721 static const intptr_t kCopyOnFlipFlagsMask = 1722 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | 1723 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) | 1724 (1 << MemoryChunk::SCAN_ON_SCAVENGE); 1725 1726 static const int kAreaSize = Page::kNonCodeObjectAreaSize; 1727 1728 inline NewSpacePage* next_page() const { 1729 return static_cast<NewSpacePage*>(next_chunk()); 1730 } 1731 1732 inline void set_next_page(NewSpacePage* page) { 1733 set_next_chunk(page); 1734 } 1735 1736 inline NewSpacePage* prev_page() const { 1737 return static_cast<NewSpacePage*>(prev_chunk()); 1738 } 1739 1740 inline void set_prev_page(NewSpacePage* page) { 1741 set_prev_chunk(page); 1742 } 1743 1744 SemiSpace* semi_space() { 1745 return reinterpret_cast<SemiSpace*>(owner()); 1746 } 1747 1748 bool is_anchor() { return !this->InNewSpace(); } 1749 1750 static bool IsAtStart(Address addr) { 1751 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) 1752 == kObjectStartOffset; 1753 } 1754 1755 static bool IsAtEnd(Address addr) { 1756 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0; 1757 } 1758 1759 Address address() { 1760 return reinterpret_cast<Address>(this); 1761 } 1762 1763 // Finds the NewSpacePage containg the given address. 1764 static inline NewSpacePage* FromAddress(Address address_in_page) { 1765 Address page_start = 1766 reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) & 1767 ~Page::kPageAlignmentMask); 1768 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start); 1769 return page; 1770 } 1771 1772 // Find the page for a limit address. A limit address is either an address 1773 // inside a page, or the address right after the last byte of a page. 1774 static inline NewSpacePage* FromLimit(Address address_limit) { 1775 return NewSpacePage::FromAddress(address_limit - 1); 1776 } 1777 1778 private: 1779 // Create a NewSpacePage object that is only used as anchor 1780 // for the doubly-linked list of real pages. 1781 explicit NewSpacePage(SemiSpace* owner) { 1782 InitializeAsAnchor(owner); 1783 } 1784 1785 static NewSpacePage* Initialize(Heap* heap, 1786 Address start, 1787 SemiSpace* semi_space); 1788 1789 // Intialize a fake NewSpacePage used as sentinel at the ends 1790 // of a doubly-linked list of real NewSpacePages. 1791 // Only uses the prev/next links, and sets flags to not be in new-space. 1792 void InitializeAsAnchor(SemiSpace* owner); 1793 1794 friend class SemiSpace; 1795 friend class SemiSpaceIterator; 1796}; 1797 1798 1799// ----------------------------------------------------------------------------- 1800// SemiSpace in young generation 1801// 1802// A semispace is a contiguous chunk of memory holding page-like memory 1803// chunks. The mark-compact collector uses the memory of the first page in 1804// the from space as a marking stack when tracing live objects. 1805 1806class SemiSpace : public Space { 1807 public: 1808 // Constructor. 1809 SemiSpace(Heap* heap, SemiSpaceId semispace) 1810 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), 1811 start_(NULL), 1812 age_mark_(NULL), 1813 id_(semispace), 1814 anchor_(this), 1815 current_page_(NULL) { } 1816 1817 // Sets up the semispace using the given chunk. 1818 void SetUp(Address start, int initial_capacity, int maximum_capacity); 1819 1820 // Tear down the space. Heap memory was not allocated by the space, so it 1821 // is not deallocated here. 1822 void TearDown(); 1823 1824 // True if the space has been set up but not torn down. 1825 bool HasBeenSetUp() { return start_ != NULL; } 1826 1827 // Grow the semispace to the new capacity. The new capacity 1828 // requested must be larger than the current capacity and less than 1829 // the maximum capacity. 1830 bool GrowTo(int new_capacity); 1831 1832 // Shrinks the semispace to the new capacity. The new capacity 1833 // requested must be more than the amount of used memory in the 1834 // semispace and less than the current capacity. 1835 bool ShrinkTo(int new_capacity); 1836 1837 // Returns the start address of the first page of the space. 1838 Address space_start() { 1839 ASSERT(anchor_.next_page() != &anchor_); 1840 return anchor_.next_page()->area_start(); 1841 } 1842 1843 // Returns the start address of the current page of the space. 1844 Address page_low() { 1845 return current_page_->area_start(); 1846 } 1847 1848 // Returns one past the end address of the space. 1849 Address space_end() { 1850 return anchor_.prev_page()->area_end(); 1851 } 1852 1853 // Returns one past the end address of the current page of the space. 1854 Address page_high() { 1855 return current_page_->area_end(); 1856 } 1857 1858 bool AdvancePage() { 1859 NewSpacePage* next_page = current_page_->next_page(); 1860 if (next_page == anchor()) return false; 1861 current_page_ = next_page; 1862 return true; 1863 } 1864 1865 // Resets the space to using the first page. 1866 void Reset(); 1867 1868 // Age mark accessors. 1869 Address age_mark() { return age_mark_; } 1870 void set_age_mark(Address mark); 1871 1872 // True if the address is in the address range of this semispace (not 1873 // necessarily below the allocation pointer). 1874 bool Contains(Address a) { 1875 return (reinterpret_cast<uintptr_t>(a) & address_mask_) 1876 == reinterpret_cast<uintptr_t>(start_); 1877 } 1878 1879 // True if the object is a heap object in the address range of this 1880 // semispace (not necessarily below the allocation pointer). 1881 bool Contains(Object* o) { 1882 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_; 1883 } 1884 1885 // If we don't have these here then SemiSpace will be abstract. However 1886 // they should never be called. 1887 virtual intptr_t Size() { 1888 UNREACHABLE(); 1889 return 0; 1890 } 1891 1892 virtual bool ReserveSpace(int bytes) { 1893 UNREACHABLE(); 1894 return false; 1895 } 1896 1897 bool is_committed() { return committed_; } 1898 bool Commit(); 1899 bool Uncommit(); 1900 1901 NewSpacePage* first_page() { return anchor_.next_page(); } 1902 NewSpacePage* current_page() { return current_page_; } 1903 1904#ifdef DEBUG 1905 virtual void Print(); 1906 virtual void Verify(); 1907 // Validate a range of of addresses in a SemiSpace. 1908 // The "from" address must be on a page prior to the "to" address, 1909 // in the linked page order, or it must be earlier on the same page. 1910 static void AssertValidRange(Address from, Address to); 1911#else 1912 // Do nothing. 1913 inline static void AssertValidRange(Address from, Address to) {} 1914#endif 1915 1916 // Returns the current capacity of the semi space. 1917 int Capacity() { return capacity_; } 1918 1919 // Returns the maximum capacity of the semi space. 1920 int MaximumCapacity() { return maximum_capacity_; } 1921 1922 // Returns the initial capacity of the semi space. 1923 int InitialCapacity() { return initial_capacity_; } 1924 1925 SemiSpaceId id() { return id_; } 1926 1927 static void Swap(SemiSpace* from, SemiSpace* to); 1928 1929 private: 1930 // Flips the semispace between being from-space and to-space. 1931 // Copies the flags into the masked positions on all pages in the space. 1932 void FlipPages(intptr_t flags, intptr_t flag_mask); 1933 1934 NewSpacePage* anchor() { return &anchor_; } 1935 1936 // The current and maximum capacity of the space. 1937 int capacity_; 1938 int maximum_capacity_; 1939 int initial_capacity_; 1940 1941 // The start address of the space. 1942 Address start_; 1943 // Used to govern object promotion during mark-compact collection. 1944 Address age_mark_; 1945 1946 // Masks and comparison values to test for containment in this semispace. 1947 uintptr_t address_mask_; 1948 uintptr_t object_mask_; 1949 uintptr_t object_expected_; 1950 1951 bool committed_; 1952 SemiSpaceId id_; 1953 1954 NewSpacePage anchor_; 1955 NewSpacePage* current_page_; 1956 1957 friend class SemiSpaceIterator; 1958 friend class NewSpacePageIterator; 1959 public: 1960 TRACK_MEMORY("SemiSpace") 1961}; 1962 1963 1964// A SemiSpaceIterator is an ObjectIterator that iterates over the active 1965// semispace of the heap's new space. It iterates over the objects in the 1966// semispace from a given start address (defaulting to the bottom of the 1967// semispace) to the top of the semispace. New objects allocated after the 1968// iterator is created are not iterated. 1969class SemiSpaceIterator : public ObjectIterator { 1970 public: 1971 // Create an iterator over the objects in the given space. If no start 1972 // address is given, the iterator starts from the bottom of the space. If 1973 // no size function is given, the iterator calls Object::Size(). 1974 1975 // Iterate over all of allocated to-space. 1976 explicit SemiSpaceIterator(NewSpace* space); 1977 // Iterate over all of allocated to-space, with a custome size function. 1978 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func); 1979 // Iterate over part of allocated to-space, from start to the end 1980 // of allocation. 1981 SemiSpaceIterator(NewSpace* space, Address start); 1982 // Iterate from one address to another in the same semi-space. 1983 SemiSpaceIterator(Address from, Address to); 1984 1985 HeapObject* Next() { 1986 if (current_ == limit_) return NULL; 1987 if (NewSpacePage::IsAtEnd(current_)) { 1988 NewSpacePage* page = NewSpacePage::FromLimit(current_); 1989 page = page->next_page(); 1990 ASSERT(!page->is_anchor()); 1991 current_ = page->area_start(); 1992 if (current_ == limit_) return NULL; 1993 } 1994 1995 HeapObject* object = HeapObject::FromAddress(current_); 1996 int size = (size_func_ == NULL) ? object->Size() : size_func_(object); 1997 1998 current_ += size; 1999 return object; 2000 } 2001 2002 // Implementation of the ObjectIterator functions. 2003 virtual HeapObject* next_object() { return Next(); } 2004 2005 private: 2006 void Initialize(Address start, 2007 Address end, 2008 HeapObjectCallback size_func); 2009 2010 // The current iteration point. 2011 Address current_; 2012 // The end of iteration. 2013 Address limit_; 2014 // The callback function. 2015 HeapObjectCallback size_func_; 2016}; 2017 2018 2019// ----------------------------------------------------------------------------- 2020// A PageIterator iterates the pages in a semi-space. 2021class NewSpacePageIterator BASE_EMBEDDED { 2022 public: 2023 // Make an iterator that runs over all pages in to-space. 2024 explicit inline NewSpacePageIterator(NewSpace* space); 2025 2026 // Make an iterator that runs over all pages in the given semispace, 2027 // even those not used in allocation. 2028 explicit inline NewSpacePageIterator(SemiSpace* space); 2029 2030 // Make iterator that iterates from the page containing start 2031 // to the page that contains limit in the same semispace. 2032 inline NewSpacePageIterator(Address start, Address limit); 2033 2034 inline bool has_next(); 2035 inline NewSpacePage* next(); 2036 2037 private: 2038 NewSpacePage* prev_page_; // Previous page returned. 2039 // Next page that will be returned. Cached here so that we can use this 2040 // iterator for operations that deallocate pages. 2041 NewSpacePage* next_page_; 2042 // Last page returned. 2043 NewSpacePage* last_page_; 2044}; 2045 2046 2047// ----------------------------------------------------------------------------- 2048// The young generation space. 2049// 2050// The new space consists of a contiguous pair of semispaces. It simply 2051// forwards most functions to the appropriate semispace. 2052 2053class NewSpace : public Space { 2054 public: 2055 // Constructor. 2056 explicit NewSpace(Heap* heap) 2057 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), 2058 to_space_(heap, kToSpace), 2059 from_space_(heap, kFromSpace), 2060 reservation_(), 2061 inline_allocation_limit_step_(0) {} 2062 2063 // Sets up the new space using the given chunk. 2064 bool SetUp(int reserved_semispace_size_, int max_semispace_size); 2065 2066 // Tears down the space. Heap memory was not allocated by the space, so it 2067 // is not deallocated here. 2068 void TearDown(); 2069 2070 // True if the space has been set up but not torn down. 2071 bool HasBeenSetUp() { 2072 return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp(); 2073 } 2074 2075 // Flip the pair of spaces. 2076 void Flip(); 2077 2078 // Grow the capacity of the semispaces. Assumes that they are not at 2079 // their maximum capacity. 2080 void Grow(); 2081 2082 // Shrink the capacity of the semispaces. 2083 void Shrink(); 2084 2085 // True if the address or object lies in the address range of either 2086 // semispace (not necessarily below the allocation pointer). 2087 bool Contains(Address a) { 2088 return (reinterpret_cast<uintptr_t>(a) & address_mask_) 2089 == reinterpret_cast<uintptr_t>(start_); 2090 } 2091 2092 bool Contains(Object* o) { 2093 Address a = reinterpret_cast<Address>(o); 2094 return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_; 2095 } 2096 2097 // Return the allocated bytes in the active semispace. 2098 virtual intptr_t Size() { 2099 return pages_used_ * NewSpacePage::kAreaSize + 2100 static_cast<int>(top() - to_space_.page_low()); 2101 } 2102 2103 // The same, but returning an int. We have to have the one that returns 2104 // intptr_t because it is inherited, but if we know we are dealing with the 2105 // new space, which can't get as big as the other spaces then this is useful: 2106 int SizeAsInt() { return static_cast<int>(Size()); } 2107 2108 // Return the current capacity of a semispace. 2109 intptr_t EffectiveCapacity() { 2110 SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity()); 2111 return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize; 2112 } 2113 2114 // Return the current capacity of a semispace. 2115 intptr_t Capacity() { 2116 ASSERT(to_space_.Capacity() == from_space_.Capacity()); 2117 return to_space_.Capacity(); 2118 } 2119 2120 // Return the total amount of memory committed for new space. 2121 intptr_t CommittedMemory() { 2122 if (from_space_.is_committed()) return 2 * Capacity(); 2123 return Capacity(); 2124 } 2125 2126 // Return the available bytes without growing. 2127 intptr_t Available() { 2128 return Capacity() - Size(); 2129 } 2130 2131 // Return the maximum capacity of a semispace. 2132 int MaximumCapacity() { 2133 ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity()); 2134 return to_space_.MaximumCapacity(); 2135 } 2136 2137 // Returns the initial capacity of a semispace. 2138 int InitialCapacity() { 2139 ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity()); 2140 return to_space_.InitialCapacity(); 2141 } 2142 2143 // Return the address of the allocation pointer in the active semispace. 2144 Address top() { 2145 ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top)); 2146 return allocation_info_.top; 2147 } 2148 // Return the address of the first object in the active semispace. 2149 Address bottom() { return to_space_.space_start(); } 2150 2151 // Get the age mark of the inactive semispace. 2152 Address age_mark() { return from_space_.age_mark(); } 2153 // Set the age mark in the active semispace. 2154 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); } 2155 2156 // The start address of the space and a bit mask. Anding an address in the 2157 // new space with the mask will result in the start address. 2158 Address start() { return start_; } 2159 uintptr_t mask() { return address_mask_; } 2160 2161 INLINE(uint32_t AddressToMarkbitIndex(Address addr)) { 2162 ASSERT(Contains(addr)); 2163 ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) || 2164 IsAligned(OffsetFrom(addr) - 1, kPointerSize)); 2165 return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2; 2166 } 2167 2168 INLINE(Address MarkbitIndexToAddress(uint32_t index)) { 2169 return reinterpret_cast<Address>(index << kPointerSizeLog2); 2170 } 2171 2172 // The allocation top and limit addresses. 2173 Address* allocation_top_address() { return &allocation_info_.top; } 2174 Address* allocation_limit_address() { return &allocation_info_.limit; } 2175 2176 MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes)); 2177 2178 // Reset the allocation pointer to the beginning of the active semispace. 2179 void ResetAllocationInfo(); 2180 2181 void LowerInlineAllocationLimit(intptr_t step) { 2182 inline_allocation_limit_step_ = step; 2183 if (step == 0) { 2184 allocation_info_.limit = to_space_.page_high(); 2185 } else { 2186 allocation_info_.limit = Min( 2187 allocation_info_.top + inline_allocation_limit_step_, 2188 allocation_info_.limit); 2189 } 2190 top_on_previous_step_ = allocation_info_.top; 2191 } 2192 2193 // Get the extent of the inactive semispace (for use as a marking stack, 2194 // or to zap it). Notice: space-addresses are not necessarily on the 2195 // same page, so FromSpaceStart() might be above FromSpaceEnd(). 2196 Address FromSpacePageLow() { return from_space_.page_low(); } 2197 Address FromSpacePageHigh() { return from_space_.page_high(); } 2198 Address FromSpaceStart() { return from_space_.space_start(); } 2199 Address FromSpaceEnd() { return from_space_.space_end(); } 2200 2201 // Get the extent of the active semispace's pages' memory. 2202 Address ToSpaceStart() { return to_space_.space_start(); } 2203 Address ToSpaceEnd() { return to_space_.space_end(); } 2204 2205 inline bool ToSpaceContains(Address address) { 2206 return to_space_.Contains(address); 2207 } 2208 inline bool FromSpaceContains(Address address) { 2209 return from_space_.Contains(address); 2210 } 2211 2212 // True if the object is a heap object in the address range of the 2213 // respective semispace (not necessarily below the allocation pointer of the 2214 // semispace). 2215 inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); } 2216 inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); } 2217 2218 // Try to switch the active semispace to a new, empty, page. 2219 // Returns false if this isn't possible or reasonable (i.e., there 2220 // are no pages, or the current page is already empty), or true 2221 // if successful. 2222 bool AddFreshPage(); 2223 2224 virtual bool ReserveSpace(int bytes); 2225 2226 // Resizes a sequential string which must be the most recent thing that was 2227 // allocated in new space. 2228 template <typename StringType> 2229 inline void ShrinkStringAtAllocationBoundary(String* string, int len); 2230 2231#ifdef DEBUG 2232 // Verify the active semispace. 2233 virtual void Verify(); 2234 // Print the active semispace. 2235 virtual void Print() { to_space_.Print(); } 2236#endif 2237 2238 // Iterates the active semispace to collect statistics. 2239 void CollectStatistics(); 2240 // Reports previously collected statistics of the active semispace. 2241 void ReportStatistics(); 2242 // Clears previously collected statistics. 2243 void ClearHistograms(); 2244 2245 // Record the allocation or promotion of a heap object. Note that we don't 2246 // record every single allocation, but only those that happen in the 2247 // to space during a scavenge GC. 2248 void RecordAllocation(HeapObject* obj); 2249 void RecordPromotion(HeapObject* obj); 2250 2251 // Return whether the operation succeded. 2252 bool CommitFromSpaceIfNeeded() { 2253 if (from_space_.is_committed()) return true; 2254 return from_space_.Commit(); 2255 } 2256 2257 bool UncommitFromSpace() { 2258 if (!from_space_.is_committed()) return true; 2259 return from_space_.Uncommit(); 2260 } 2261 2262 inline intptr_t inline_allocation_limit_step() { 2263 return inline_allocation_limit_step_; 2264 } 2265 2266 SemiSpace* active_space() { return &to_space_; } 2267 2268 private: 2269 // Update allocation info to match the current to-space page. 2270 void UpdateAllocationInfo(); 2271 2272 Address chunk_base_; 2273 uintptr_t chunk_size_; 2274 2275 // The semispaces. 2276 SemiSpace to_space_; 2277 SemiSpace from_space_; 2278 VirtualMemory reservation_; 2279 int pages_used_; 2280 2281 // Start address and bit mask for containment testing. 2282 Address start_; 2283 uintptr_t address_mask_; 2284 uintptr_t object_mask_; 2285 uintptr_t object_expected_; 2286 2287 // Allocation pointer and limit for normal allocation and allocation during 2288 // mark-compact collection. 2289 AllocationInfo allocation_info_; 2290 2291 // When incremental marking is active we will set allocation_info_.limit 2292 // to be lower than actual limit and then will gradually increase it 2293 // in steps to guarantee that we do incremental marking steps even 2294 // when all allocation is performed from inlined generated code. 2295 intptr_t inline_allocation_limit_step_; 2296 2297 Address top_on_previous_step_; 2298 2299 HistogramInfo* allocated_histogram_; 2300 HistogramInfo* promoted_histogram_; 2301 2302 MUST_USE_RESULT MaybeObject* SlowAllocateRaw(int size_in_bytes); 2303 2304 friend class SemiSpaceIterator; 2305 2306 public: 2307 TRACK_MEMORY("NewSpace") 2308}; 2309 2310 2311// ----------------------------------------------------------------------------- 2312// Old object space (excluding map objects) 2313 2314class OldSpace : public PagedSpace { 2315 public: 2316 // Creates an old space object with a given maximum capacity. 2317 // The constructor does not allocate pages from OS. 2318 OldSpace(Heap* heap, 2319 intptr_t max_capacity, 2320 AllocationSpace id, 2321 Executability executable) 2322 : PagedSpace(heap, max_capacity, id, executable) { 2323 page_extra_ = 0; 2324 } 2325 2326 // The limit of allocation for a page in this space. 2327 virtual Address PageAllocationLimit(Page* page) { 2328 return page->area_end(); 2329 } 2330 2331 public: 2332 TRACK_MEMORY("OldSpace") 2333}; 2334 2335 2336// For contiguous spaces, top should be in the space (or at the end) and limit 2337// should be the end of the space. 2338#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ 2339 SLOW_ASSERT((space).page_low() <= (info).top \ 2340 && (info).top <= (space).page_high() \ 2341 && (info).limit <= (space).page_high()) 2342 2343 2344// ----------------------------------------------------------------------------- 2345// Old space for objects of a fixed size 2346 2347class FixedSpace : public PagedSpace { 2348 public: 2349 FixedSpace(Heap* heap, 2350 intptr_t max_capacity, 2351 AllocationSpace id, 2352 int object_size_in_bytes, 2353 const char* name) 2354 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), 2355 object_size_in_bytes_(object_size_in_bytes), 2356 name_(name) { 2357 page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes; 2358 } 2359 2360 // The limit of allocation for a page in this space. 2361 virtual Address PageAllocationLimit(Page* page) { 2362 return page->area_end() - page_extra_; 2363 } 2364 2365 int object_size_in_bytes() { return object_size_in_bytes_; } 2366 2367 // Prepares for a mark-compact GC. 2368 virtual void PrepareForMarkCompact(); 2369 2370 protected: 2371 void ResetFreeList() { 2372 free_list_.Reset(); 2373 } 2374 2375 private: 2376 // The size of objects in this space. 2377 int object_size_in_bytes_; 2378 2379 // The name of this space. 2380 const char* name_; 2381}; 2382 2383 2384// ----------------------------------------------------------------------------- 2385// Old space for all map objects 2386 2387class MapSpace : public FixedSpace { 2388 public: 2389 // Creates a map space object with a maximum capacity. 2390 MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id) 2391 : FixedSpace(heap, max_capacity, id, Map::kSize, "map"), 2392 max_map_space_pages_(kMaxMapPageIndex - 1) { 2393 } 2394 2395 // Given an index, returns the page address. 2396 // TODO(1600): this limit is artifical just to keep code compilable 2397 static const int kMaxMapPageIndex = 1 << 16; 2398 2399 virtual int RoundSizeDownToObjectAlignment(int size) { 2400 if (IsPowerOf2(Map::kSize)) { 2401 return RoundDown(size, Map::kSize); 2402 } else { 2403 return (size / Map::kSize) * Map::kSize; 2404 } 2405 } 2406 2407 protected: 2408#ifdef DEBUG 2409 virtual void VerifyObject(HeapObject* obj); 2410#endif 2411 2412 private: 2413 static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize; 2414 2415 // Do map space compaction if there is a page gap. 2416 int CompactionThreshold() { 2417 return kMapsPerPage * (max_map_space_pages_ - 1); 2418 } 2419 2420 const int max_map_space_pages_; 2421 2422 public: 2423 TRACK_MEMORY("MapSpace") 2424}; 2425 2426 2427// ----------------------------------------------------------------------------- 2428// Old space for all global object property cell objects 2429 2430class CellSpace : public FixedSpace { 2431 public: 2432 // Creates a property cell space object with a maximum capacity. 2433 CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id) 2434 : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell") 2435 {} 2436 2437 virtual int RoundSizeDownToObjectAlignment(int size) { 2438 if (IsPowerOf2(JSGlobalPropertyCell::kSize)) { 2439 return RoundDown(size, JSGlobalPropertyCell::kSize); 2440 } else { 2441 return (size / JSGlobalPropertyCell::kSize) * JSGlobalPropertyCell::kSize; 2442 } 2443 } 2444 2445 protected: 2446#ifdef DEBUG 2447 virtual void VerifyObject(HeapObject* obj); 2448#endif 2449 2450 public: 2451 TRACK_MEMORY("CellSpace") 2452}; 2453 2454 2455// ----------------------------------------------------------------------------- 2456// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by 2457// the large object space. A large object is allocated from OS heap with 2458// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset). 2459// A large object always starts at Page::kObjectStartOffset to a page. 2460// Large objects do not move during garbage collections. 2461 2462class LargeObjectSpace : public Space { 2463 public: 2464 LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id); 2465 virtual ~LargeObjectSpace() {} 2466 2467 // Initializes internal data structures. 2468 bool SetUp(); 2469 2470 // Releases internal resources, frees objects in this space. 2471 void TearDown(); 2472 2473 static intptr_t ObjectSizeFor(intptr_t chunk_size) { 2474 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; 2475 return chunk_size - Page::kPageSize - Page::kObjectStartOffset; 2476 } 2477 2478 // Shared implementation of AllocateRaw, AllocateRawCode and 2479 // AllocateRawFixedArray. 2480 MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size, 2481 Executability executable); 2482 2483 // Available bytes for objects in this space. 2484 inline intptr_t Available(); 2485 2486 virtual intptr_t Size() { 2487 return size_; 2488 } 2489 2490 virtual intptr_t SizeOfObjects() { 2491 return objects_size_; 2492 } 2493 2494 int PageCount() { 2495 return page_count_; 2496 } 2497 2498 // Finds an object for a given address, returns Failure::Exception() 2499 // if it is not found. The function iterates through all objects in this 2500 // space, may be slow. 2501 MaybeObject* FindObject(Address a); 2502 2503 // Finds a large object page containing the given address, returns NULL 2504 // if such a page doesn't exist. 2505 LargePage* FindPage(Address a); 2506 2507 // Frees unmarked objects. 2508 void FreeUnmarkedObjects(); 2509 2510 // Checks whether a heap object is in this space; O(1). 2511 bool Contains(HeapObject* obj); 2512 2513 // Checks whether the space is empty. 2514 bool IsEmpty() { return first_page_ == NULL; } 2515 2516 // See the comments for ReserveSpace in the Space class. This has to be 2517 // called after ReserveSpace has been called on the paged spaces, since they 2518 // may use some memory, leaving less for large objects. 2519 virtual bool ReserveSpace(int bytes); 2520 2521 LargePage* first_page() { return first_page_; } 2522 2523#ifdef DEBUG 2524 virtual void Verify(); 2525 virtual void Print(); 2526 void ReportStatistics(); 2527 void CollectCodeStatistics(); 2528#endif 2529 // Checks whether an address is in the object area in this space. It 2530 // iterates all objects in the space. May be slow. 2531 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); } 2532 2533 private: 2534 intptr_t max_capacity_; 2535 // The head of the linked list of large object chunks. 2536 LargePage* first_page_; 2537 intptr_t size_; // allocated bytes 2538 int page_count_; // number of chunks 2539 intptr_t objects_size_; // size of objects 2540 // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them 2541 HashMap chunk_map_; 2542 2543 friend class LargeObjectIterator; 2544 2545 public: 2546 TRACK_MEMORY("LargeObjectSpace") 2547}; 2548 2549 2550class LargeObjectIterator: public ObjectIterator { 2551 public: 2552 explicit LargeObjectIterator(LargeObjectSpace* space); 2553 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func); 2554 2555 HeapObject* Next(); 2556 2557 // implementation of ObjectIterator. 2558 virtual HeapObject* next_object() { return Next(); } 2559 2560 private: 2561 LargePage* current_; 2562 HeapObjectCallback size_func_; 2563}; 2564 2565 2566// Iterates over the chunks (pages and large object pages) that can contain 2567// pointers to new space. 2568class PointerChunkIterator BASE_EMBEDDED { 2569 public: 2570 inline explicit PointerChunkIterator(Heap* heap); 2571 2572 // Return NULL when the iterator is done. 2573 MemoryChunk* next() { 2574 switch (state_) { 2575 case kOldPointerState: { 2576 if (old_pointer_iterator_.has_next()) { 2577 return old_pointer_iterator_.next(); 2578 } 2579 state_ = kMapState; 2580 // Fall through. 2581 } 2582 case kMapState: { 2583 if (map_iterator_.has_next()) { 2584 return map_iterator_.next(); 2585 } 2586 state_ = kLargeObjectState; 2587 // Fall through. 2588 } 2589 case kLargeObjectState: { 2590 HeapObject* heap_object; 2591 do { 2592 heap_object = lo_iterator_.Next(); 2593 if (heap_object == NULL) { 2594 state_ = kFinishedState; 2595 return NULL; 2596 } 2597 // Fixed arrays are the only pointer-containing objects in large 2598 // object space. 2599 } while (!heap_object->IsFixedArray()); 2600 MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address()); 2601 return answer; 2602 } 2603 case kFinishedState: 2604 return NULL; 2605 default: 2606 break; 2607 } 2608 UNREACHABLE(); 2609 return NULL; 2610 } 2611 2612 2613 private: 2614 enum State { 2615 kOldPointerState, 2616 kMapState, 2617 kLargeObjectState, 2618 kFinishedState 2619 }; 2620 State state_; 2621 PageIterator old_pointer_iterator_; 2622 PageIterator map_iterator_; 2623 LargeObjectIterator lo_iterator_; 2624}; 2625 2626 2627#ifdef DEBUG 2628struct CommentStatistic { 2629 const char* comment; 2630 int size; 2631 int count; 2632 void Clear() { 2633 comment = NULL; 2634 size = 0; 2635 count = 0; 2636 } 2637 // Must be small, since an iteration is used for lookup. 2638 static const int kMaxComments = 64; 2639}; 2640#endif 2641 2642 2643} } // namespace v8::internal 2644 2645#endif // V8_SPACES_H_ 2646