serialize.cc revision c7cc028aaeedbbfa11c11d0b7b243b3d9e837ed9
1// Copyright 2012 the V8 project authors. All rights reserved. 2// Redistribution and use in source and binary forms, with or without 3// modification, are permitted provided that the following conditions are 4// met: 5// 6// * Redistributions of source code must retain the above copyright 7// notice, this list of conditions and the following disclaimer. 8// * Redistributions in binary form must reproduce the above 9// copyright notice, this list of conditions and the following 10// disclaimer in the documentation and/or other materials provided 11// with the distribution. 12// * Neither the name of Google Inc. nor the names of its 13// contributors may be used to endorse or promote products derived 14// from this software without specific prior written permission. 15// 16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28#include "v8.h" 29 30#include "accessors.h" 31#include "api.h" 32#include "bootstrapper.h" 33#include "execution.h" 34#include "global-handles.h" 35#include "ic-inl.h" 36#include "natives.h" 37#include "platform.h" 38#include "runtime.h" 39#include "serialize.h" 40#include "stub-cache.h" 41#include "v8threads.h" 42 43namespace v8 { 44namespace internal { 45 46 47// ----------------------------------------------------------------------------- 48// Coding of external references. 49 50// The encoding of an external reference. The type is in the high word. 51// The id is in the low word. 52static uint32_t EncodeExternal(TypeCode type, uint16_t id) { 53 return static_cast<uint32_t>(type) << 16 | id; 54} 55 56 57static int* GetInternalPointer(StatsCounter* counter) { 58 // All counters refer to dummy_counter, if deserializing happens without 59 // setting up counters. 60 static int dummy_counter = 0; 61 return counter->Enabled() ? counter->GetInternalPointer() : &dummy_counter; 62} 63 64 65ExternalReferenceTable* ExternalReferenceTable::instance(Isolate* isolate) { 66 ExternalReferenceTable* external_reference_table = 67 isolate->external_reference_table(); 68 if (external_reference_table == NULL) { 69 external_reference_table = new ExternalReferenceTable(isolate); 70 isolate->set_external_reference_table(external_reference_table); 71 } 72 return external_reference_table; 73} 74 75 76void ExternalReferenceTable::AddFromId(TypeCode type, 77 uint16_t id, 78 const char* name, 79 Isolate* isolate) { 80 Address address; 81 switch (type) { 82 case C_BUILTIN: { 83 ExternalReference ref(static_cast<Builtins::CFunctionId>(id), isolate); 84 address = ref.address(); 85 break; 86 } 87 case BUILTIN: { 88 ExternalReference ref(static_cast<Builtins::Name>(id), isolate); 89 address = ref.address(); 90 break; 91 } 92 case RUNTIME_FUNCTION: { 93 ExternalReference ref(static_cast<Runtime::FunctionId>(id), isolate); 94 address = ref.address(); 95 break; 96 } 97 case IC_UTILITY: { 98 ExternalReference ref(IC_Utility(static_cast<IC::UtilityId>(id)), 99 isolate); 100 address = ref.address(); 101 break; 102 } 103 default: 104 UNREACHABLE(); 105 return; 106 } 107 Add(address, type, id, name); 108} 109 110 111void ExternalReferenceTable::Add(Address address, 112 TypeCode type, 113 uint16_t id, 114 const char* name) { 115 ASSERT_NE(NULL, address); 116 ExternalReferenceEntry entry; 117 entry.address = address; 118 entry.code = EncodeExternal(type, id); 119 entry.name = name; 120 ASSERT_NE(0, entry.code); 121 refs_.Add(entry); 122 if (id > max_id_[type]) max_id_[type] = id; 123} 124 125 126void ExternalReferenceTable::PopulateTable(Isolate* isolate) { 127 for (int type_code = 0; type_code < kTypeCodeCount; type_code++) { 128 max_id_[type_code] = 0; 129 } 130 131 // The following populates all of the different type of external references 132 // into the ExternalReferenceTable. 133 // 134 // NOTE: This function was originally 100k of code. It has since been 135 // rewritten to be mostly table driven, as the callback macro style tends to 136 // very easily cause code bloat. Please be careful in the future when adding 137 // new references. 138 139 struct RefTableEntry { 140 TypeCode type; 141 uint16_t id; 142 const char* name; 143 }; 144 145 static const RefTableEntry ref_table[] = { 146 // Builtins 147#define DEF_ENTRY_C(name, ignored) \ 148 { C_BUILTIN, \ 149 Builtins::c_##name, \ 150 "Builtins::" #name }, 151 152 BUILTIN_LIST_C(DEF_ENTRY_C) 153#undef DEF_ENTRY_C 154 155#define DEF_ENTRY_C(name, ignored) \ 156 { BUILTIN, \ 157 Builtins::k##name, \ 158 "Builtins::" #name }, 159#define DEF_ENTRY_A(name, kind, state, extra) DEF_ENTRY_C(name, ignored) 160 161 BUILTIN_LIST_C(DEF_ENTRY_C) 162 BUILTIN_LIST_A(DEF_ENTRY_A) 163 BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A) 164#undef DEF_ENTRY_C 165#undef DEF_ENTRY_A 166 167 // Runtime functions 168#define RUNTIME_ENTRY(name, nargs, ressize) \ 169 { RUNTIME_FUNCTION, \ 170 Runtime::k##name, \ 171 "Runtime::" #name }, 172 173 RUNTIME_FUNCTION_LIST(RUNTIME_ENTRY) 174#undef RUNTIME_ENTRY 175 176 // IC utilities 177#define IC_ENTRY(name) \ 178 { IC_UTILITY, \ 179 IC::k##name, \ 180 "IC::" #name }, 181 182 IC_UTIL_LIST(IC_ENTRY) 183#undef IC_ENTRY 184 }; // end of ref_table[]. 185 186 for (size_t i = 0; i < ARRAY_SIZE(ref_table); ++i) { 187 AddFromId(ref_table[i].type, 188 ref_table[i].id, 189 ref_table[i].name, 190 isolate); 191 } 192 193#ifdef ENABLE_DEBUGGER_SUPPORT 194 // Debug addresses 195 Add(Debug_Address(Debug::k_after_break_target_address).address(isolate), 196 DEBUG_ADDRESS, 197 Debug::k_after_break_target_address << kDebugIdShift, 198 "Debug::after_break_target_address()"); 199 Add(Debug_Address(Debug::k_debug_break_slot_address).address(isolate), 200 DEBUG_ADDRESS, 201 Debug::k_debug_break_slot_address << kDebugIdShift, 202 "Debug::debug_break_slot_address()"); 203 Add(Debug_Address(Debug::k_debug_break_return_address).address(isolate), 204 DEBUG_ADDRESS, 205 Debug::k_debug_break_return_address << kDebugIdShift, 206 "Debug::debug_break_return_address()"); 207 Add(Debug_Address(Debug::k_restarter_frame_function_pointer).address(isolate), 208 DEBUG_ADDRESS, 209 Debug::k_restarter_frame_function_pointer << kDebugIdShift, 210 "Debug::restarter_frame_function_pointer_address()"); 211#endif 212 213 // Stat counters 214 struct StatsRefTableEntry { 215 StatsCounter* (Counters::*counter)(); 216 uint16_t id; 217 const char* name; 218 }; 219 220 const StatsRefTableEntry stats_ref_table[] = { 221#define COUNTER_ENTRY(name, caption) \ 222 { &Counters::name, \ 223 Counters::k_##name, \ 224 "Counters::" #name }, 225 226 STATS_COUNTER_LIST_1(COUNTER_ENTRY) 227 STATS_COUNTER_LIST_2(COUNTER_ENTRY) 228#undef COUNTER_ENTRY 229 }; // end of stats_ref_table[]. 230 231 Counters* counters = isolate->counters(); 232 for (size_t i = 0; i < ARRAY_SIZE(stats_ref_table); ++i) { 233 Add(reinterpret_cast<Address>(GetInternalPointer( 234 (counters->*(stats_ref_table[i].counter))())), 235 STATS_COUNTER, 236 stats_ref_table[i].id, 237 stats_ref_table[i].name); 238 } 239 240 // Top addresses 241 242 const char* AddressNames[] = { 243#define BUILD_NAME_LITERAL(CamelName, hacker_name) \ 244 "Isolate::" #hacker_name "_address", 245 FOR_EACH_ISOLATE_ADDRESS_NAME(BUILD_NAME_LITERAL) 246 NULL 247#undef C 248 }; 249 250 for (uint16_t i = 0; i < Isolate::kIsolateAddressCount; ++i) { 251 Add(isolate->get_address_from_id((Isolate::AddressId)i), 252 TOP_ADDRESS, i, AddressNames[i]); 253 } 254 255 // Accessors 256#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \ 257 Add((Address)&Accessors::name, \ 258 ACCESSOR, \ 259 Accessors::k##name, \ 260 "Accessors::" #name); 261 262 ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION) 263#undef ACCESSOR_DESCRIPTOR_DECLARATION 264 265 StubCache* stub_cache = isolate->stub_cache(); 266 267 // Stub cache tables 268 Add(stub_cache->key_reference(StubCache::kPrimary).address(), 269 STUB_CACHE_TABLE, 270 1, 271 "StubCache::primary_->key"); 272 Add(stub_cache->value_reference(StubCache::kPrimary).address(), 273 STUB_CACHE_TABLE, 274 2, 275 "StubCache::primary_->value"); 276 Add(stub_cache->key_reference(StubCache::kSecondary).address(), 277 STUB_CACHE_TABLE, 278 3, 279 "StubCache::secondary_->key"); 280 Add(stub_cache->value_reference(StubCache::kSecondary).address(), 281 STUB_CACHE_TABLE, 282 4, 283 "StubCache::secondary_->value"); 284 285 // Runtime entries 286 Add(ExternalReference::perform_gc_function(isolate).address(), 287 RUNTIME_ENTRY, 288 1, 289 "Runtime::PerformGC"); 290 Add(ExternalReference::fill_heap_number_with_random_function( 291 isolate).address(), 292 RUNTIME_ENTRY, 293 2, 294 "V8::FillHeapNumberWithRandom"); 295 Add(ExternalReference::random_uint32_function(isolate).address(), 296 RUNTIME_ENTRY, 297 3, 298 "V8::Random"); 299 Add(ExternalReference::delete_handle_scope_extensions(isolate).address(), 300 RUNTIME_ENTRY, 301 4, 302 "HandleScope::DeleteExtensions"); 303 Add(ExternalReference:: 304 incremental_marking_record_write_function(isolate).address(), 305 RUNTIME_ENTRY, 306 5, 307 "IncrementalMarking::RecordWrite"); 308 Add(ExternalReference::store_buffer_overflow_function(isolate).address(), 309 RUNTIME_ENTRY, 310 6, 311 "StoreBuffer::StoreBufferOverflow"); 312 Add(ExternalReference:: 313 incremental_evacuation_record_write_function(isolate).address(), 314 RUNTIME_ENTRY, 315 7, 316 "IncrementalMarking::RecordWrite"); 317 318 319 320 // Miscellaneous 321 Add(ExternalReference::roots_array_start(isolate).address(), 322 UNCLASSIFIED, 323 3, 324 "Heap::roots_array_start()"); 325 Add(ExternalReference::address_of_stack_limit(isolate).address(), 326 UNCLASSIFIED, 327 4, 328 "StackGuard::address_of_jslimit()"); 329 Add(ExternalReference::address_of_real_stack_limit(isolate).address(), 330 UNCLASSIFIED, 331 5, 332 "StackGuard::address_of_real_jslimit()"); 333#ifndef V8_INTERPRETED_REGEXP 334 Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(), 335 UNCLASSIFIED, 336 6, 337 "RegExpStack::limit_address()"); 338 Add(ExternalReference::address_of_regexp_stack_memory_address( 339 isolate).address(), 340 UNCLASSIFIED, 341 7, 342 "RegExpStack::memory_address()"); 343 Add(ExternalReference::address_of_regexp_stack_memory_size(isolate).address(), 344 UNCLASSIFIED, 345 8, 346 "RegExpStack::memory_size()"); 347 Add(ExternalReference::address_of_static_offsets_vector(isolate).address(), 348 UNCLASSIFIED, 349 9, 350 "OffsetsVector::static_offsets_vector"); 351#endif // V8_INTERPRETED_REGEXP 352 Add(ExternalReference::new_space_start(isolate).address(), 353 UNCLASSIFIED, 354 10, 355 "Heap::NewSpaceStart()"); 356 Add(ExternalReference::new_space_mask(isolate).address(), 357 UNCLASSIFIED, 358 11, 359 "Heap::NewSpaceMask()"); 360 Add(ExternalReference::heap_always_allocate_scope_depth(isolate).address(), 361 UNCLASSIFIED, 362 12, 363 "Heap::always_allocate_scope_depth()"); 364 Add(ExternalReference::new_space_allocation_limit_address(isolate).address(), 365 UNCLASSIFIED, 366 14, 367 "Heap::NewSpaceAllocationLimitAddress()"); 368 Add(ExternalReference::new_space_allocation_top_address(isolate).address(), 369 UNCLASSIFIED, 370 15, 371 "Heap::NewSpaceAllocationTopAddress()"); 372#ifdef ENABLE_DEBUGGER_SUPPORT 373 Add(ExternalReference::debug_break(isolate).address(), 374 UNCLASSIFIED, 375 16, 376 "Debug::Break()"); 377 Add(ExternalReference::debug_step_in_fp_address(isolate).address(), 378 UNCLASSIFIED, 379 17, 380 "Debug::step_in_fp_addr()"); 381#endif 382 Add(ExternalReference::double_fp_operation(Token::ADD, isolate).address(), 383 UNCLASSIFIED, 384 18, 385 "add_two_doubles"); 386 Add(ExternalReference::double_fp_operation(Token::SUB, isolate).address(), 387 UNCLASSIFIED, 388 19, 389 "sub_two_doubles"); 390 Add(ExternalReference::double_fp_operation(Token::MUL, isolate).address(), 391 UNCLASSIFIED, 392 20, 393 "mul_two_doubles"); 394 Add(ExternalReference::double_fp_operation(Token::DIV, isolate).address(), 395 UNCLASSIFIED, 396 21, 397 "div_two_doubles"); 398 Add(ExternalReference::double_fp_operation(Token::MOD, isolate).address(), 399 UNCLASSIFIED, 400 22, 401 "mod_two_doubles"); 402 Add(ExternalReference::compare_doubles(isolate).address(), 403 UNCLASSIFIED, 404 23, 405 "compare_doubles"); 406#ifndef V8_INTERPRETED_REGEXP 407 Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(), 408 UNCLASSIFIED, 409 24, 410 "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()"); 411 Add(ExternalReference::re_check_stack_guard_state(isolate).address(), 412 UNCLASSIFIED, 413 25, 414 "RegExpMacroAssembler*::CheckStackGuardState()"); 415 Add(ExternalReference::re_grow_stack(isolate).address(), 416 UNCLASSIFIED, 417 26, 418 "NativeRegExpMacroAssembler::GrowStack()"); 419 Add(ExternalReference::re_word_character_map().address(), 420 UNCLASSIFIED, 421 27, 422 "NativeRegExpMacroAssembler::word_character_map"); 423#endif // V8_INTERPRETED_REGEXP 424 // Keyed lookup cache. 425 Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(), 426 UNCLASSIFIED, 427 28, 428 "KeyedLookupCache::keys()"); 429 Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(), 430 UNCLASSIFIED, 431 29, 432 "KeyedLookupCache::field_offsets()"); 433 Add(ExternalReference::transcendental_cache_array_address(isolate).address(), 434 UNCLASSIFIED, 435 30, 436 "TranscendentalCache::caches()"); 437 Add(ExternalReference::handle_scope_next_address().address(), 438 UNCLASSIFIED, 439 31, 440 "HandleScope::next"); 441 Add(ExternalReference::handle_scope_limit_address().address(), 442 UNCLASSIFIED, 443 32, 444 "HandleScope::limit"); 445 Add(ExternalReference::handle_scope_level_address().address(), 446 UNCLASSIFIED, 447 33, 448 "HandleScope::level"); 449 Add(ExternalReference::new_deoptimizer_function(isolate).address(), 450 UNCLASSIFIED, 451 34, 452 "Deoptimizer::New()"); 453 Add(ExternalReference::compute_output_frames_function(isolate).address(), 454 UNCLASSIFIED, 455 35, 456 "Deoptimizer::ComputeOutputFrames()"); 457 Add(ExternalReference::address_of_min_int().address(), 458 UNCLASSIFIED, 459 36, 460 "LDoubleConstant::min_int"); 461 Add(ExternalReference::address_of_one_half().address(), 462 UNCLASSIFIED, 463 37, 464 "LDoubleConstant::one_half"); 465 Add(ExternalReference::isolate_address().address(), 466 UNCLASSIFIED, 467 38, 468 "isolate"); 469 Add(ExternalReference::address_of_minus_zero().address(), 470 UNCLASSIFIED, 471 39, 472 "LDoubleConstant::minus_zero"); 473 Add(ExternalReference::address_of_negative_infinity().address(), 474 UNCLASSIFIED, 475 40, 476 "LDoubleConstant::negative_infinity"); 477 Add(ExternalReference::power_double_double_function(isolate).address(), 478 UNCLASSIFIED, 479 41, 480 "power_double_double_function"); 481 Add(ExternalReference::power_double_int_function(isolate).address(), 482 UNCLASSIFIED, 483 42, 484 "power_double_int_function"); 485 Add(ExternalReference::store_buffer_top(isolate).address(), 486 UNCLASSIFIED, 487 43, 488 "store_buffer_top"); 489 Add(ExternalReference::address_of_canonical_non_hole_nan().address(), 490 UNCLASSIFIED, 491 44, 492 "canonical_nan"); 493 Add(ExternalReference::address_of_the_hole_nan().address(), 494 UNCLASSIFIED, 495 45, 496 "the_hole_nan"); 497} 498 499 500ExternalReferenceEncoder::ExternalReferenceEncoder() 501 : encodings_(Match), 502 isolate_(Isolate::Current()) { 503 ExternalReferenceTable* external_references = 504 ExternalReferenceTable::instance(isolate_); 505 for (int i = 0; i < external_references->size(); ++i) { 506 Put(external_references->address(i), i); 507 } 508} 509 510 511uint32_t ExternalReferenceEncoder::Encode(Address key) const { 512 int index = IndexOf(key); 513 ASSERT(key == NULL || index >= 0); 514 return index >=0 ? 515 ExternalReferenceTable::instance(isolate_)->code(index) : 0; 516} 517 518 519const char* ExternalReferenceEncoder::NameOfAddress(Address key) const { 520 int index = IndexOf(key); 521 return index >= 0 ? 522 ExternalReferenceTable::instance(isolate_)->name(index) : NULL; 523} 524 525 526int ExternalReferenceEncoder::IndexOf(Address key) const { 527 if (key == NULL) return -1; 528 HashMap::Entry* entry = 529 const_cast<HashMap&>(encodings_).Lookup(key, Hash(key), false); 530 return entry == NULL 531 ? -1 532 : static_cast<int>(reinterpret_cast<intptr_t>(entry->value)); 533} 534 535 536void ExternalReferenceEncoder::Put(Address key, int index) { 537 HashMap::Entry* entry = encodings_.Lookup(key, Hash(key), true); 538 entry->value = reinterpret_cast<void*>(index); 539} 540 541 542ExternalReferenceDecoder::ExternalReferenceDecoder() 543 : encodings_(NewArray<Address*>(kTypeCodeCount)), 544 isolate_(Isolate::Current()) { 545 ExternalReferenceTable* external_references = 546 ExternalReferenceTable::instance(isolate_); 547 for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) { 548 int max = external_references->max_id(type) + 1; 549 encodings_[type] = NewArray<Address>(max + 1); 550 } 551 for (int i = 0; i < external_references->size(); ++i) { 552 Put(external_references->code(i), external_references->address(i)); 553 } 554} 555 556 557ExternalReferenceDecoder::~ExternalReferenceDecoder() { 558 for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) { 559 DeleteArray(encodings_[type]); 560 } 561 DeleteArray(encodings_); 562} 563 564 565bool Serializer::serialization_enabled_ = false; 566bool Serializer::too_late_to_enable_now_ = false; 567 568 569Deserializer::Deserializer(SnapshotByteSource* source) 570 : isolate_(NULL), 571 source_(source), 572 external_reference_decoder_(NULL) { 573} 574 575 576// This routine both allocates a new object, and also keeps 577// track of where objects have been allocated so that we can 578// fix back references when deserializing. 579Address Deserializer::Allocate(int space_index, Space* space, int size) { 580 Address address; 581 if (!SpaceIsLarge(space_index)) { 582 ASSERT(!SpaceIsPaged(space_index) || 583 size <= Page::kPageSize - Page::kObjectStartOffset); 584 MaybeObject* maybe_new_allocation; 585 if (space_index == NEW_SPACE) { 586 maybe_new_allocation = 587 reinterpret_cast<NewSpace*>(space)->AllocateRaw(size); 588 } else { 589 maybe_new_allocation = 590 reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size); 591 } 592 ASSERT(!maybe_new_allocation->IsFailure()); 593 Object* new_allocation = maybe_new_allocation->ToObjectUnchecked(); 594 HeapObject* new_object = HeapObject::cast(new_allocation); 595 address = new_object->address(); 596 high_water_[space_index] = address + size; 597 } else { 598 ASSERT(SpaceIsLarge(space_index)); 599 LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space); 600 Object* new_allocation; 601 if (space_index == kLargeData || space_index == kLargeFixedArray) { 602 new_allocation = 603 lo_space->AllocateRaw(size, NOT_EXECUTABLE)->ToObjectUnchecked(); 604 } else { 605 ASSERT_EQ(kLargeCode, space_index); 606 new_allocation = 607 lo_space->AllocateRaw(size, EXECUTABLE)->ToObjectUnchecked(); 608 } 609 HeapObject* new_object = HeapObject::cast(new_allocation); 610 // Record all large objects in the same space. 611 address = new_object->address(); 612 pages_[LO_SPACE].Add(address); 613 } 614 last_object_address_ = address; 615 return address; 616} 617 618 619// This returns the address of an object that has been described in the 620// snapshot as being offset bytes back in a particular space. 621HeapObject* Deserializer::GetAddressFromEnd(int space) { 622 int offset = source_->GetInt(); 623 ASSERT(!SpaceIsLarge(space)); 624 offset <<= kObjectAlignmentBits; 625 return HeapObject::FromAddress(high_water_[space] - offset); 626} 627 628 629// This returns the address of an object that has been described in the 630// snapshot as being offset bytes into a particular space. 631HeapObject* Deserializer::GetAddressFromStart(int space) { 632 int offset = source_->GetInt(); 633 if (SpaceIsLarge(space)) { 634 // Large spaces have one object per 'page'. 635 return HeapObject::FromAddress(pages_[LO_SPACE][offset]); 636 } 637 offset <<= kObjectAlignmentBits; 638 if (space == NEW_SPACE) { 639 // New space has only one space - numbered 0. 640 return HeapObject::FromAddress(pages_[space][0] + offset); 641 } 642 ASSERT(SpaceIsPaged(space)); 643 int page_of_pointee = offset >> kPageSizeBits; 644 Address object_address = pages_[space][page_of_pointee] + 645 (offset & Page::kPageAlignmentMask); 646 return HeapObject::FromAddress(object_address); 647} 648 649 650void Deserializer::Deserialize() { 651 isolate_ = Isolate::Current(); 652 ASSERT(isolate_ != NULL); 653 // Don't GC while deserializing - just expand the heap. 654 AlwaysAllocateScope always_allocate; 655 // Don't use the free lists while deserializing. 656 LinearAllocationScope allocate_linearly; 657 // No active threads. 658 ASSERT_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse()); 659 // No active handles. 660 ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty()); 661 // Make sure the entire partial snapshot cache is traversed, filling it with 662 // valid object pointers. 663 isolate_->set_serialize_partial_snapshot_cache_length( 664 Isolate::kPartialSnapshotCacheCapacity); 665 ASSERT_EQ(NULL, external_reference_decoder_); 666 external_reference_decoder_ = new ExternalReferenceDecoder(); 667 isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG); 668 isolate_->heap()->IterateWeakRoots(this, VISIT_ALL); 669 670 isolate_->heap()->set_global_contexts_list( 671 isolate_->heap()->undefined_value()); 672 673 // Update data pointers to the external strings containing natives sources. 674 for (int i = 0; i < Natives::GetBuiltinsCount(); i++) { 675 Object* source = isolate_->heap()->natives_source_cache()->get(i); 676 if (!source->IsUndefined()) { 677 ExternalAsciiString::cast(source)->update_data_cache(); 678 } 679 } 680} 681 682 683void Deserializer::DeserializePartial(Object** root) { 684 isolate_ = Isolate::Current(); 685 // Don't GC while deserializing - just expand the heap. 686 AlwaysAllocateScope always_allocate; 687 // Don't use the free lists while deserializing. 688 LinearAllocationScope allocate_linearly; 689 if (external_reference_decoder_ == NULL) { 690 external_reference_decoder_ = new ExternalReferenceDecoder(); 691 } 692 VisitPointer(root); 693} 694 695 696Deserializer::~Deserializer() { 697 ASSERT(source_->AtEOF()); 698 if (external_reference_decoder_) { 699 delete external_reference_decoder_; 700 external_reference_decoder_ = NULL; 701 } 702} 703 704 705// This is called on the roots. It is the driver of the deserialization 706// process. It is also called on the body of each function. 707void Deserializer::VisitPointers(Object** start, Object** end) { 708 // The space must be new space. Any other space would cause ReadChunk to try 709 // to update the remembered using NULL as the address. 710 ReadChunk(start, end, NEW_SPACE, NULL); 711} 712 713 714// This routine writes the new object into the pointer provided and then 715// returns true if the new object was in young space and false otherwise. 716// The reason for this strange interface is that otherwise the object is 717// written very late, which means the FreeSpace map is not set up by the 718// time we need to use it to mark the space at the end of a page free. 719void Deserializer::ReadObject(int space_number, 720 Space* space, 721 Object** write_back) { 722 int size = source_->GetInt() << kObjectAlignmentBits; 723 Address address = Allocate(space_number, space, size); 724 *write_back = HeapObject::FromAddress(address); 725 Object** current = reinterpret_cast<Object**>(address); 726 Object** limit = current + (size >> kPointerSizeLog2); 727 if (FLAG_log_snapshot_positions) { 728 LOG(isolate_, SnapshotPositionEvent(address, source_->position())); 729 } 730 ReadChunk(current, limit, space_number, address); 731#ifdef DEBUG 732 bool is_codespace = (space == HEAP->code_space()) || 733 ((space == HEAP->lo_space()) && (space_number == kLargeCode)); 734 ASSERT(HeapObject::FromAddress(address)->IsCode() == is_codespace); 735#endif 736} 737 738 739// This macro is always used with a constant argument so it should all fold 740// away to almost nothing in the generated code. It might be nicer to do this 741// with the ternary operator but there are type issues with that. 742#define ASSIGN_DEST_SPACE(space_number) \ 743 Space* dest_space; \ 744 if (space_number == NEW_SPACE) { \ 745 dest_space = isolate->heap()->new_space(); \ 746 } else if (space_number == OLD_POINTER_SPACE) { \ 747 dest_space = isolate->heap()->old_pointer_space(); \ 748 } else if (space_number == OLD_DATA_SPACE) { \ 749 dest_space = isolate->heap()->old_data_space(); \ 750 } else if (space_number == CODE_SPACE) { \ 751 dest_space = isolate->heap()->code_space(); \ 752 } else if (space_number == MAP_SPACE) { \ 753 dest_space = isolate->heap()->map_space(); \ 754 } else if (space_number == CELL_SPACE) { \ 755 dest_space = isolate->heap()->cell_space(); \ 756 } else { \ 757 ASSERT(space_number >= LO_SPACE); \ 758 dest_space = isolate->heap()->lo_space(); \ 759 } 760 761 762static const int kUnknownOffsetFromStart = -1; 763 764 765void Deserializer::ReadChunk(Object** current, 766 Object** limit, 767 int source_space, 768 Address current_object_address) { 769 Isolate* const isolate = isolate_; 770 bool write_barrier_needed = (current_object_address != NULL && 771 source_space != NEW_SPACE && 772 source_space != CELL_SPACE && 773 source_space != CODE_SPACE && 774 source_space != OLD_DATA_SPACE); 775 while (current < limit) { 776 int data = source_->Get(); 777 switch (data) { 778#define CASE_STATEMENT(where, how, within, space_number) \ 779 case where + how + within + space_number: \ 780 ASSERT((where & ~kPointedToMask) == 0); \ 781 ASSERT((how & ~kHowToCodeMask) == 0); \ 782 ASSERT((within & ~kWhereToPointMask) == 0); \ 783 ASSERT((space_number & ~kSpaceMask) == 0); 784 785#define CASE_BODY(where, how, within, space_number_if_any, offset_from_start) \ 786 { \ 787 bool emit_write_barrier = false; \ 788 bool current_was_incremented = false; \ 789 int space_number = space_number_if_any == kAnyOldSpace ? \ 790 (data & kSpaceMask) : space_number_if_any; \ 791 if (where == kNewObject && how == kPlain && within == kStartOfObject) {\ 792 ASSIGN_DEST_SPACE(space_number) \ 793 ReadObject(space_number, dest_space, current); \ 794 emit_write_barrier = (space_number == NEW_SPACE); \ 795 } else { \ 796 Object* new_object = NULL; /* May not be a real Object pointer. */ \ 797 if (where == kNewObject) { \ 798 ASSIGN_DEST_SPACE(space_number) \ 799 ReadObject(space_number, dest_space, &new_object); \ 800 } else if (where == kRootArray) { \ 801 int root_id = source_->GetInt(); \ 802 new_object = isolate->heap()->roots_array_start()[root_id]; \ 803 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ 804 } else if (where == kPartialSnapshotCache) { \ 805 int cache_index = source_->GetInt(); \ 806 new_object = isolate->serialize_partial_snapshot_cache() \ 807 [cache_index]; \ 808 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ 809 } else if (where == kExternalReference) { \ 810 int reference_id = source_->GetInt(); \ 811 Address address = external_reference_decoder_-> \ 812 Decode(reference_id); \ 813 new_object = reinterpret_cast<Object*>(address); \ 814 } else if (where == kBackref) { \ 815 emit_write_barrier = (space_number == NEW_SPACE); \ 816 new_object = GetAddressFromEnd(data & kSpaceMask); \ 817 } else { \ 818 ASSERT(where == kFromStart); \ 819 if (offset_from_start == kUnknownOffsetFromStart) { \ 820 emit_write_barrier = (space_number == NEW_SPACE); \ 821 new_object = GetAddressFromStart(data & kSpaceMask); \ 822 } else { \ 823 Address object_address = pages_[space_number][0] + \ 824 (offset_from_start << kObjectAlignmentBits); \ 825 new_object = HeapObject::FromAddress(object_address); \ 826 } \ 827 } \ 828 if (within == kFirstInstruction) { \ 829 Code* new_code_object = reinterpret_cast<Code*>(new_object); \ 830 new_object = reinterpret_cast<Object*>( \ 831 new_code_object->instruction_start()); \ 832 } \ 833 if (how == kFromCode) { \ 834 Address location_of_branch_data = \ 835 reinterpret_cast<Address>(current); \ 836 Assembler::set_target_at(location_of_branch_data, \ 837 reinterpret_cast<Address>(new_object)); \ 838 if (within == kFirstInstruction) { \ 839 location_of_branch_data += Assembler::kCallTargetSize; \ 840 current = reinterpret_cast<Object**>(location_of_branch_data); \ 841 current_was_incremented = true; \ 842 } \ 843 } else { \ 844 *current = new_object; \ 845 } \ 846 } \ 847 if (emit_write_barrier && write_barrier_needed) { \ 848 Address current_address = reinterpret_cast<Address>(current); \ 849 isolate->heap()->RecordWrite( \ 850 current_object_address, \ 851 static_cast<int>(current_address - current_object_address)); \ 852 } \ 853 if (!current_was_incremented) { \ 854 current++; \ 855 } \ 856 break; \ 857 } \ 858 859// This generates a case and a body for each space. The large object spaces are 860// very rare in snapshots so they are grouped in one body. 861#define ONE_PER_SPACE(where, how, within) \ 862 CASE_STATEMENT(where, how, within, NEW_SPACE) \ 863 CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \ 864 CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \ 865 CASE_BODY(where, how, within, OLD_DATA_SPACE, kUnknownOffsetFromStart) \ 866 CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \ 867 CASE_BODY(where, how, within, OLD_POINTER_SPACE, kUnknownOffsetFromStart) \ 868 CASE_STATEMENT(where, how, within, CODE_SPACE) \ 869 CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \ 870 CASE_STATEMENT(where, how, within, CELL_SPACE) \ 871 CASE_BODY(where, how, within, CELL_SPACE, kUnknownOffsetFromStart) \ 872 CASE_STATEMENT(where, how, within, MAP_SPACE) \ 873 CASE_BODY(where, how, within, MAP_SPACE, kUnknownOffsetFromStart) \ 874 CASE_STATEMENT(where, how, within, kLargeData) \ 875 CASE_STATEMENT(where, how, within, kLargeCode) \ 876 CASE_STATEMENT(where, how, within, kLargeFixedArray) \ 877 CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart) 878 879// This generates a case and a body for the new space (which has to do extra 880// write barrier handling) and handles the other spaces with 8 fall-through 881// cases and one body. 882#define ALL_SPACES(where, how, within) \ 883 CASE_STATEMENT(where, how, within, NEW_SPACE) \ 884 CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \ 885 CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \ 886 CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \ 887 CASE_STATEMENT(where, how, within, CODE_SPACE) \ 888 CASE_STATEMENT(where, how, within, CELL_SPACE) \ 889 CASE_STATEMENT(where, how, within, MAP_SPACE) \ 890 CASE_STATEMENT(where, how, within, kLargeData) \ 891 CASE_STATEMENT(where, how, within, kLargeCode) \ 892 CASE_STATEMENT(where, how, within, kLargeFixedArray) \ 893 CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart) 894 895#define ONE_PER_CODE_SPACE(where, how, within) \ 896 CASE_STATEMENT(where, how, within, CODE_SPACE) \ 897 CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \ 898 CASE_STATEMENT(where, how, within, kLargeCode) \ 899 CASE_BODY(where, how, within, kLargeCode, kUnknownOffsetFromStart) 900 901#define FOUR_CASES(byte_code) \ 902 case byte_code: \ 903 case byte_code + 1: \ 904 case byte_code + 2: \ 905 case byte_code + 3: 906 907#define SIXTEEN_CASES(byte_code) \ 908 FOUR_CASES(byte_code) \ 909 FOUR_CASES(byte_code + 4) \ 910 FOUR_CASES(byte_code + 8) \ 911 FOUR_CASES(byte_code + 12) 912 913 // We generate 15 cases and bodies that process special tags that combine 914 // the raw data tag and the length into one byte. 915#define RAW_CASE(index, size) \ 916 case kRawData + index: { \ 917 byte* raw_data_out = reinterpret_cast<byte*>(current); \ 918 source_->CopyRaw(raw_data_out, size); \ 919 current = reinterpret_cast<Object**>(raw_data_out + size); \ 920 break; \ 921 } 922 COMMON_RAW_LENGTHS(RAW_CASE) 923#undef RAW_CASE 924 925 // Deserialize a chunk of raw data that doesn't have one of the popular 926 // lengths. 927 case kRawData: { 928 int size = source_->GetInt(); 929 byte* raw_data_out = reinterpret_cast<byte*>(current); 930 source_->CopyRaw(raw_data_out, size); 931 current = reinterpret_cast<Object**>(raw_data_out + size); 932 break; 933 } 934 935 SIXTEEN_CASES(kRootArrayLowConstants) 936 SIXTEEN_CASES(kRootArrayHighConstants) { 937 int root_id = RootArrayConstantFromByteCode(data); 938 Object* object = isolate->heap()->roots_array_start()[root_id]; 939 ASSERT(!isolate->heap()->InNewSpace(object)); 940 *current++ = object; 941 break; 942 } 943 944 case kRepeat: { 945 int repeats = source_->GetInt(); 946 Object* object = current[-1]; 947 ASSERT(!isolate->heap()->InNewSpace(object)); 948 for (int i = 0; i < repeats; i++) current[i] = object; 949 current += repeats; 950 break; 951 } 952 953 STATIC_ASSERT(kRootArrayNumberOfConstantEncodings == 954 Heap::kOldSpaceRoots); 955 STATIC_ASSERT(kMaxRepeats == 12); 956 FOUR_CASES(kConstantRepeat) 957 FOUR_CASES(kConstantRepeat + 4) 958 FOUR_CASES(kConstantRepeat + 8) { 959 int repeats = RepeatsForCode(data); 960 Object* object = current[-1]; 961 ASSERT(!isolate->heap()->InNewSpace(object)); 962 for (int i = 0; i < repeats; i++) current[i] = object; 963 current += repeats; 964 break; 965 } 966 967 // Deserialize a new object and write a pointer to it to the current 968 // object. 969 ONE_PER_SPACE(kNewObject, kPlain, kStartOfObject) 970 // Support for direct instruction pointers in functions 971 ONE_PER_CODE_SPACE(kNewObject, kPlain, kFirstInstruction) 972 // Deserialize a new code object and write a pointer to its first 973 // instruction to the current code object. 974 ONE_PER_SPACE(kNewObject, kFromCode, kFirstInstruction) 975 // Find a recently deserialized object using its offset from the current 976 // allocation point and write a pointer to it to the current object. 977 ALL_SPACES(kBackref, kPlain, kStartOfObject) 978 // Find a recently deserialized code object using its offset from the 979 // current allocation point and write a pointer to its first instruction 980 // to the current code object or the instruction pointer in a function 981 // object. 982 ALL_SPACES(kBackref, kFromCode, kFirstInstruction) 983 ALL_SPACES(kBackref, kPlain, kFirstInstruction) 984 // Find an already deserialized object using its offset from the start 985 // and write a pointer to it to the current object. 986 ALL_SPACES(kFromStart, kPlain, kStartOfObject) 987 ALL_SPACES(kFromStart, kPlain, kFirstInstruction) 988 // Find an already deserialized code object using its offset from the 989 // start and write a pointer to its first instruction to the current code 990 // object. 991 ALL_SPACES(kFromStart, kFromCode, kFirstInstruction) 992 // Find an object in the roots array and write a pointer to it to the 993 // current object. 994 CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0) 995 CASE_BODY(kRootArray, kPlain, kStartOfObject, 0, kUnknownOffsetFromStart) 996 // Find an object in the partial snapshots cache and write a pointer to it 997 // to the current object. 998 CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0) 999 CASE_BODY(kPartialSnapshotCache, 1000 kPlain, 1001 kStartOfObject, 1002 0, 1003 kUnknownOffsetFromStart) 1004 // Find an code entry in the partial snapshots cache and 1005 // write a pointer to it to the current object. 1006 CASE_STATEMENT(kPartialSnapshotCache, kPlain, kFirstInstruction, 0) 1007 CASE_BODY(kPartialSnapshotCache, 1008 kPlain, 1009 kFirstInstruction, 1010 0, 1011 kUnknownOffsetFromStart) 1012 // Find an external reference and write a pointer to it to the current 1013 // object. 1014 CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0) 1015 CASE_BODY(kExternalReference, 1016 kPlain, 1017 kStartOfObject, 1018 0, 1019 kUnknownOffsetFromStart) 1020 // Find an external reference and write a pointer to it in the current 1021 // code object. 1022 CASE_STATEMENT(kExternalReference, kFromCode, kStartOfObject, 0) 1023 CASE_BODY(kExternalReference, 1024 kFromCode, 1025 kStartOfObject, 1026 0, 1027 kUnknownOffsetFromStart) 1028 1029#undef CASE_STATEMENT 1030#undef CASE_BODY 1031#undef ONE_PER_SPACE 1032#undef ALL_SPACES 1033#undef ASSIGN_DEST_SPACE 1034 1035 case kNewPage: { 1036 int space = source_->Get(); 1037 pages_[space].Add(last_object_address_); 1038 if (space == CODE_SPACE) { 1039 CPU::FlushICache(last_object_address_, Page::kPageSize); 1040 } 1041 break; 1042 } 1043 1044 case kSkip: { 1045 current++; 1046 break; 1047 } 1048 1049 case kNativesStringResource: { 1050 int index = source_->Get(); 1051 Vector<const char> source_vector = Natives::GetRawScriptSource(index); 1052 NativesExternalStringResource* resource = 1053 new NativesExternalStringResource(isolate->bootstrapper(), 1054 source_vector.start(), 1055 source_vector.length()); 1056 *current++ = reinterpret_cast<Object*>(resource); 1057 break; 1058 } 1059 1060 case kSynchronize: { 1061 // If we get here then that indicates that you have a mismatch between 1062 // the number of GC roots when serializing and deserializing. 1063 UNREACHABLE(); 1064 } 1065 1066 default: 1067 UNREACHABLE(); 1068 } 1069 } 1070 ASSERT_EQ(current, limit); 1071} 1072 1073 1074void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) { 1075 const int max_shift = ((kPointerSize * kBitsPerByte) / 7) * 7; 1076 for (int shift = max_shift; shift > 0; shift -= 7) { 1077 if (integer >= static_cast<uintptr_t>(1u) << shift) { 1078 Put((static_cast<int>((integer >> shift)) & 0x7f) | 0x80, "IntPart"); 1079 } 1080 } 1081 PutSection(static_cast<int>(integer & 0x7f), "IntLastPart"); 1082} 1083 1084 1085Serializer::Serializer(SnapshotByteSink* sink) 1086 : sink_(sink), 1087 current_root_index_(0), 1088 external_reference_encoder_(new ExternalReferenceEncoder), 1089 large_object_total_(0), 1090 root_index_wave_front_(0) { 1091 isolate_ = Isolate::Current(); 1092 // The serializer is meant to be used only to generate initial heap images 1093 // from a context in which there is only one isolate. 1094 ASSERT(isolate_->IsDefaultIsolate()); 1095 for (int i = 0; i <= LAST_SPACE; i++) { 1096 fullness_[i] = 0; 1097 } 1098} 1099 1100 1101Serializer::~Serializer() { 1102 delete external_reference_encoder_; 1103} 1104 1105 1106void StartupSerializer::SerializeStrongReferences() { 1107 Isolate* isolate = Isolate::Current(); 1108 // No active threads. 1109 CHECK_EQ(NULL, Isolate::Current()->thread_manager()->FirstThreadStateInUse()); 1110 // No active or weak handles. 1111 CHECK(isolate->handle_scope_implementer()->blocks()->is_empty()); 1112 CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles()); 1113 // We don't support serializing installed extensions. 1114 CHECK(!isolate->has_installed_extensions()); 1115 1116 HEAP->IterateStrongRoots(this, VISIT_ONLY_STRONG); 1117} 1118 1119 1120void PartialSerializer::Serialize(Object** object) { 1121 this->VisitPointer(object); 1122 Isolate* isolate = Isolate::Current(); 1123 1124 // After we have done the partial serialization the partial snapshot cache 1125 // will contain some references needed to decode the partial snapshot. We 1126 // fill it up with undefineds so it has a predictable length so the 1127 // deserialization code doesn't need to know the length. 1128 for (int index = isolate->serialize_partial_snapshot_cache_length(); 1129 index < Isolate::kPartialSnapshotCacheCapacity; 1130 index++) { 1131 isolate->serialize_partial_snapshot_cache()[index] = 1132 isolate->heap()->undefined_value(); 1133 startup_serializer_->VisitPointer( 1134 &isolate->serialize_partial_snapshot_cache()[index]); 1135 } 1136 isolate->set_serialize_partial_snapshot_cache_length( 1137 Isolate::kPartialSnapshotCacheCapacity); 1138} 1139 1140 1141void Serializer::VisitPointers(Object** start, Object** end) { 1142 Isolate* isolate = Isolate::Current(); 1143 1144 for (Object** current = start; current < end; current++) { 1145 if (start == isolate->heap()->roots_array_start()) { 1146 root_index_wave_front_ = 1147 Max(root_index_wave_front_, static_cast<intptr_t>(current - start)); 1148 } 1149 if (reinterpret_cast<Address>(current) == 1150 isolate->heap()->store_buffer()->TopAddress()) { 1151 sink_->Put(kSkip, "Skip"); 1152 } else if ((*current)->IsSmi()) { 1153 sink_->Put(kRawData, "RawData"); 1154 sink_->PutInt(kPointerSize, "length"); 1155 for (int i = 0; i < kPointerSize; i++) { 1156 sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte"); 1157 } 1158 } else { 1159 SerializeObject(*current, kPlain, kStartOfObject); 1160 } 1161 } 1162} 1163 1164 1165// This ensures that the partial snapshot cache keeps things alive during GC and 1166// tracks their movement. When it is called during serialization of the startup 1167// snapshot the partial snapshot is empty, so nothing happens. When the partial 1168// (context) snapshot is created, this array is populated with the pointers that 1169// the partial snapshot will need. As that happens we emit serialized objects to 1170// the startup snapshot that correspond to the elements of this cache array. On 1171// deserialization we therefore need to visit the cache array. This fills it up 1172// with pointers to deserialized objects. 1173void SerializerDeserializer::Iterate(ObjectVisitor* visitor) { 1174 Isolate* isolate = Isolate::Current(); 1175 visitor->VisitPointers( 1176 isolate->serialize_partial_snapshot_cache(), 1177 &isolate->serialize_partial_snapshot_cache()[ 1178 isolate->serialize_partial_snapshot_cache_length()]); 1179} 1180 1181 1182// When deserializing we need to set the size of the snapshot cache. This means 1183// the root iteration code (above) will iterate over array elements, writing the 1184// references to deserialized objects in them. 1185void SerializerDeserializer::SetSnapshotCacheSize(int size) { 1186 Isolate::Current()->set_serialize_partial_snapshot_cache_length(size); 1187} 1188 1189 1190int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) { 1191 Isolate* isolate = Isolate::Current(); 1192 1193 for (int i = 0; 1194 i < isolate->serialize_partial_snapshot_cache_length(); 1195 i++) { 1196 Object* entry = isolate->serialize_partial_snapshot_cache()[i]; 1197 if (entry == heap_object) return i; 1198 } 1199 1200 // We didn't find the object in the cache. So we add it to the cache and 1201 // then visit the pointer so that it becomes part of the startup snapshot 1202 // and we can refer to it from the partial snapshot. 1203 int length = isolate->serialize_partial_snapshot_cache_length(); 1204 CHECK(length < Isolate::kPartialSnapshotCacheCapacity); 1205 isolate->serialize_partial_snapshot_cache()[length] = heap_object; 1206 startup_serializer_->VisitPointer( 1207 &isolate->serialize_partial_snapshot_cache()[length]); 1208 // We don't recurse from the startup snapshot generator into the partial 1209 // snapshot generator. 1210 ASSERT(length == isolate->serialize_partial_snapshot_cache_length()); 1211 isolate->set_serialize_partial_snapshot_cache_length(length + 1); 1212 return length; 1213} 1214 1215 1216int Serializer::RootIndex(HeapObject* heap_object) { 1217 Heap* heap = HEAP; 1218 if (heap->InNewSpace(heap_object)) return kInvalidRootIndex; 1219 for (int i = 0; i < root_index_wave_front_; i++) { 1220 Object* root = heap->roots_array_start()[i]; 1221 if (!root->IsSmi() && root == heap_object) return i; 1222 } 1223 return kInvalidRootIndex; 1224} 1225 1226 1227// Encode the location of an already deserialized object in order to write its 1228// location into a later object. We can encode the location as an offset from 1229// the start of the deserialized objects or as an offset backwards from the 1230// current allocation pointer. 1231void Serializer::SerializeReferenceToPreviousObject( 1232 int space, 1233 int address, 1234 HowToCode how_to_code, 1235 WhereToPoint where_to_point) { 1236 int offset = CurrentAllocationAddress(space) - address; 1237 bool from_start = true; 1238 if (SpaceIsPaged(space)) { 1239 // For paged space it is simple to encode back from current allocation if 1240 // the object is on the same page as the current allocation pointer. 1241 if ((CurrentAllocationAddress(space) >> kPageSizeBits) == 1242 (address >> kPageSizeBits)) { 1243 from_start = false; 1244 address = offset; 1245 } 1246 } else if (space == NEW_SPACE) { 1247 // For new space it is always simple to encode back from current allocation. 1248 if (offset < address) { 1249 from_start = false; 1250 address = offset; 1251 } 1252 } 1253 // If we are actually dealing with real offsets (and not a numbering of 1254 // all objects) then we should shift out the bits that are always 0. 1255 if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits; 1256 if (from_start) { 1257 sink_->Put(kFromStart + how_to_code + where_to_point + space, "RefSer"); 1258 sink_->PutInt(address, "address"); 1259 } else { 1260 sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer"); 1261 sink_->PutInt(address, "address"); 1262 } 1263} 1264 1265 1266void StartupSerializer::SerializeObject( 1267 Object* o, 1268 HowToCode how_to_code, 1269 WhereToPoint where_to_point) { 1270 CHECK(o->IsHeapObject()); 1271 HeapObject* heap_object = HeapObject::cast(o); 1272 1273 int root_index; 1274 if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) { 1275 PutRoot(root_index, heap_object, how_to_code, where_to_point); 1276 return; 1277 } 1278 1279 if (address_mapper_.IsMapped(heap_object)) { 1280 int space = SpaceOfAlreadySerializedObject(heap_object); 1281 int address = address_mapper_.MappedTo(heap_object); 1282 SerializeReferenceToPreviousObject(space, 1283 address, 1284 how_to_code, 1285 where_to_point); 1286 } else { 1287 // Object has not yet been serialized. Serialize it here. 1288 ObjectSerializer object_serializer(this, 1289 heap_object, 1290 sink_, 1291 how_to_code, 1292 where_to_point); 1293 object_serializer.Serialize(); 1294 } 1295} 1296 1297 1298void StartupSerializer::SerializeWeakReferences() { 1299 for (int i = Isolate::Current()->serialize_partial_snapshot_cache_length(); 1300 i < Isolate::kPartialSnapshotCacheCapacity; 1301 i++) { 1302 sink_->Put(kRootArray + kPlain + kStartOfObject, "RootSerialization"); 1303 sink_->PutInt(Heap::kUndefinedValueRootIndex, "root_index"); 1304 } 1305 HEAP->IterateWeakRoots(this, VISIT_ALL); 1306} 1307 1308 1309void Serializer::PutRoot(int root_index, 1310 HeapObject* object, 1311 SerializerDeserializer::HowToCode how_to_code, 1312 SerializerDeserializer::WhereToPoint where_to_point) { 1313 if (how_to_code == kPlain && 1314 where_to_point == kStartOfObject && 1315 root_index < kRootArrayNumberOfConstantEncodings && 1316 !HEAP->InNewSpace(object)) { 1317 if (root_index < kRootArrayNumberOfLowConstantEncodings) { 1318 sink_->Put(kRootArrayLowConstants + root_index, "RootLoConstant"); 1319 } else { 1320 sink_->Put(kRootArrayHighConstants + root_index - 1321 kRootArrayNumberOfLowConstantEncodings, 1322 "RootHiConstant"); 1323 } 1324 } else { 1325 sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization"); 1326 sink_->PutInt(root_index, "root_index"); 1327 } 1328} 1329 1330 1331void PartialSerializer::SerializeObject( 1332 Object* o, 1333 HowToCode how_to_code, 1334 WhereToPoint where_to_point) { 1335 CHECK(o->IsHeapObject()); 1336 HeapObject* heap_object = HeapObject::cast(o); 1337 1338 if (heap_object->IsMap()) { 1339 // The code-caches link to context-specific code objects, which 1340 // the startup and context serializes cannot currently handle. 1341 ASSERT(Map::cast(heap_object)->code_cache() == 1342 heap_object->GetHeap()->raw_unchecked_empty_fixed_array()); 1343 } 1344 1345 int root_index; 1346 if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) { 1347 PutRoot(root_index, heap_object, how_to_code, where_to_point); 1348 return; 1349 } 1350 1351 if (ShouldBeInThePartialSnapshotCache(heap_object)) { 1352 int cache_index = PartialSnapshotCacheIndex(heap_object); 1353 sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point, 1354 "PartialSnapshotCache"); 1355 sink_->PutInt(cache_index, "partial_snapshot_cache_index"); 1356 return; 1357 } 1358 1359 // Pointers from the partial snapshot to the objects in the startup snapshot 1360 // should go through the root array or through the partial snapshot cache. 1361 // If this is not the case you may have to add something to the root array. 1362 ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object)); 1363 // All the symbols that the partial snapshot needs should be either in the 1364 // root table or in the partial snapshot cache. 1365 ASSERT(!heap_object->IsSymbol()); 1366 1367 if (address_mapper_.IsMapped(heap_object)) { 1368 int space = SpaceOfAlreadySerializedObject(heap_object); 1369 int address = address_mapper_.MappedTo(heap_object); 1370 SerializeReferenceToPreviousObject(space, 1371 address, 1372 how_to_code, 1373 where_to_point); 1374 } else { 1375 // Object has not yet been serialized. Serialize it here. 1376 ObjectSerializer serializer(this, 1377 heap_object, 1378 sink_, 1379 how_to_code, 1380 where_to_point); 1381 serializer.Serialize(); 1382 } 1383} 1384 1385 1386void Serializer::ObjectSerializer::Serialize() { 1387 int space = Serializer::SpaceOfObject(object_); 1388 int size = object_->Size(); 1389 1390 sink_->Put(kNewObject + reference_representation_ + space, 1391 "ObjectSerialization"); 1392 sink_->PutInt(size >> kObjectAlignmentBits, "Size in words"); 1393 1394 LOG(i::Isolate::Current(), 1395 SnapshotPositionEvent(object_->address(), sink_->Position())); 1396 1397 // Mark this object as already serialized. 1398 bool start_new_page; 1399 int offset = serializer_->Allocate(space, size, &start_new_page); 1400 serializer_->address_mapper()->AddMapping(object_, offset); 1401 if (start_new_page) { 1402 sink_->Put(kNewPage, "NewPage"); 1403 sink_->PutSection(space, "NewPageSpace"); 1404 } 1405 1406 // Serialize the map (first word of the object). 1407 serializer_->SerializeObject(object_->map(), kPlain, kStartOfObject); 1408 1409 // Serialize the rest of the object. 1410 CHECK_EQ(0, bytes_processed_so_far_); 1411 bytes_processed_so_far_ = kPointerSize; 1412 object_->IterateBody(object_->map()->instance_type(), size, this); 1413 OutputRawData(object_->address() + size); 1414} 1415 1416 1417void Serializer::ObjectSerializer::VisitPointers(Object** start, 1418 Object** end) { 1419 Object** current = start; 1420 while (current < end) { 1421 while (current < end && (*current)->IsSmi()) current++; 1422 if (current < end) OutputRawData(reinterpret_cast<Address>(current)); 1423 1424 while (current < end && !(*current)->IsSmi()) { 1425 HeapObject* current_contents = HeapObject::cast(*current); 1426 int root_index = serializer_->RootIndex(current_contents); 1427 // Repeats are not subject to the write barrier so there are only some 1428 // objects that can be used in a repeat encoding. These are the early 1429 // ones in the root array that are never in new space. 1430 if (current != start && 1431 root_index != kInvalidRootIndex && 1432 root_index < kRootArrayNumberOfConstantEncodings && 1433 current_contents == current[-1]) { 1434 ASSERT(!HEAP->InNewSpace(current_contents)); 1435 int repeat_count = 1; 1436 while (current < end - 1 && current[repeat_count] == current_contents) { 1437 repeat_count++; 1438 } 1439 current += repeat_count; 1440 bytes_processed_so_far_ += repeat_count * kPointerSize; 1441 if (repeat_count > kMaxRepeats) { 1442 sink_->Put(kRepeat, "SerializeRepeats"); 1443 sink_->PutInt(repeat_count, "SerializeRepeats"); 1444 } else { 1445 sink_->Put(CodeForRepeats(repeat_count), "SerializeRepeats"); 1446 } 1447 } else { 1448 serializer_->SerializeObject(current_contents, kPlain, kStartOfObject); 1449 bytes_processed_so_far_ += kPointerSize; 1450 current++; 1451 } 1452 } 1453 } 1454} 1455 1456 1457void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) { 1458 Object** current = rinfo->target_object_address(); 1459 1460 OutputRawData(rinfo->target_address_address()); 1461 HowToCode representation = rinfo->IsCodedSpecially() ? kFromCode : kPlain; 1462 serializer_->SerializeObject(*current, representation, kStartOfObject); 1463 bytes_processed_so_far_ += rinfo->target_address_size(); 1464} 1465 1466 1467void Serializer::ObjectSerializer::VisitExternalReferences(Address* start, 1468 Address* end) { 1469 Address references_start = reinterpret_cast<Address>(start); 1470 OutputRawData(references_start); 1471 1472 for (Address* current = start; current < end; current++) { 1473 sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef"); 1474 int reference_id = serializer_->EncodeExternalReference(*current); 1475 sink_->PutInt(reference_id, "reference id"); 1476 } 1477 bytes_processed_so_far_ += static_cast<int>((end - start) * kPointerSize); 1478} 1479 1480 1481void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) { 1482 Address references_start = rinfo->target_address_address(); 1483 OutputRawData(references_start); 1484 1485 Address* current = rinfo->target_reference_address(); 1486 int representation = rinfo->IsCodedSpecially() ? 1487 kFromCode + kStartOfObject : kPlain + kStartOfObject; 1488 sink_->Put(kExternalReference + representation, "ExternalRef"); 1489 int reference_id = serializer_->EncodeExternalReference(*current); 1490 sink_->PutInt(reference_id, "reference id"); 1491 bytes_processed_so_far_ += rinfo->target_address_size(); 1492} 1493 1494 1495void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) { 1496 Address target_start = rinfo->target_address_address(); 1497 OutputRawData(target_start); 1498 Address target = rinfo->target_address(); 1499 uint32_t encoding = serializer_->EncodeExternalReference(target); 1500 CHECK(target == NULL ? encoding == 0 : encoding != 0); 1501 int representation; 1502 // Can't use a ternary operator because of gcc. 1503 if (rinfo->IsCodedSpecially()) { 1504 representation = kStartOfObject + kFromCode; 1505 } else { 1506 representation = kStartOfObject + kPlain; 1507 } 1508 sink_->Put(kExternalReference + representation, "ExternalReference"); 1509 sink_->PutInt(encoding, "reference id"); 1510 bytes_processed_so_far_ += rinfo->target_address_size(); 1511} 1512 1513 1514void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) { 1515 CHECK(RelocInfo::IsCodeTarget(rinfo->rmode())); 1516 Address target_start = rinfo->target_address_address(); 1517 OutputRawData(target_start); 1518 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); 1519 serializer_->SerializeObject(target, kFromCode, kFirstInstruction); 1520 bytes_processed_so_far_ += rinfo->target_address_size(); 1521} 1522 1523 1524void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) { 1525 Code* target = Code::cast(Code::GetObjectFromEntryAddress(entry_address)); 1526 OutputRawData(entry_address); 1527 serializer_->SerializeObject(target, kPlain, kFirstInstruction); 1528 bytes_processed_so_far_ += kPointerSize; 1529} 1530 1531 1532void Serializer::ObjectSerializer::VisitGlobalPropertyCell(RelocInfo* rinfo) { 1533 // We shouldn't have any global property cell references in code 1534 // objects in the snapshot. 1535 UNREACHABLE(); 1536} 1537 1538 1539void Serializer::ObjectSerializer::VisitExternalAsciiString( 1540 v8::String::ExternalAsciiStringResource** resource_pointer) { 1541 Address references_start = reinterpret_cast<Address>(resource_pointer); 1542 OutputRawData(references_start); 1543 for (int i = 0; i < Natives::GetBuiltinsCount(); i++) { 1544 Object* source = HEAP->natives_source_cache()->get(i); 1545 if (!source->IsUndefined()) { 1546 ExternalAsciiString* string = ExternalAsciiString::cast(source); 1547 typedef v8::String::ExternalAsciiStringResource Resource; 1548 const Resource* resource = string->resource(); 1549 if (resource == *resource_pointer) { 1550 sink_->Put(kNativesStringResource, "NativesStringResource"); 1551 sink_->PutSection(i, "NativesStringResourceEnd"); 1552 bytes_processed_so_far_ += sizeof(resource); 1553 return; 1554 } 1555 } 1556 } 1557 // One of the strings in the natives cache should match the resource. We 1558 // can't serialize any other kinds of external strings. 1559 UNREACHABLE(); 1560} 1561 1562 1563void Serializer::ObjectSerializer::OutputRawData(Address up_to) { 1564 Address object_start = object_->address(); 1565 int up_to_offset = static_cast<int>(up_to - object_start); 1566 int skipped = up_to_offset - bytes_processed_so_far_; 1567 // This assert will fail if the reloc info gives us the target_address_address 1568 // locations in a non-ascending order. Luckily that doesn't happen. 1569 ASSERT(skipped >= 0); 1570 if (skipped != 0) { 1571 Address base = object_start + bytes_processed_so_far_; 1572#define RAW_CASE(index, length) \ 1573 if (skipped == length) { \ 1574 sink_->PutSection(kRawData + index, "RawDataFixed"); \ 1575 } else /* NOLINT */ 1576 COMMON_RAW_LENGTHS(RAW_CASE) 1577#undef RAW_CASE 1578 { /* NOLINT */ 1579 sink_->Put(kRawData, "RawData"); 1580 sink_->PutInt(skipped, "length"); 1581 } 1582 for (int i = 0; i < skipped; i++) { 1583 unsigned int data = base[i]; 1584 sink_->PutSection(data, "Byte"); 1585 } 1586 bytes_processed_so_far_ += skipped; 1587 } 1588} 1589 1590 1591int Serializer::SpaceOfObject(HeapObject* object) { 1592 for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { 1593 AllocationSpace s = static_cast<AllocationSpace>(i); 1594 if (HEAP->InSpace(object, s)) { 1595 if (i == LO_SPACE) { 1596 if (object->IsCode()) { 1597 return kLargeCode; 1598 } else if (object->IsFixedArray()) { 1599 return kLargeFixedArray; 1600 } else { 1601 return kLargeData; 1602 } 1603 } 1604 return i; 1605 } 1606 } 1607 UNREACHABLE(); 1608 return 0; 1609} 1610 1611 1612int Serializer::SpaceOfAlreadySerializedObject(HeapObject* object) { 1613 for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { 1614 AllocationSpace s = static_cast<AllocationSpace>(i); 1615 if (HEAP->InSpace(object, s)) { 1616 return i; 1617 } 1618 } 1619 UNREACHABLE(); 1620 return 0; 1621} 1622 1623 1624int Serializer::Allocate(int space, int size, bool* new_page) { 1625 CHECK(space >= 0 && space < kNumberOfSpaces); 1626 if (SpaceIsLarge(space)) { 1627 // In large object space we merely number the objects instead of trying to 1628 // determine some sort of address. 1629 *new_page = true; 1630 large_object_total_ += size; 1631 return fullness_[LO_SPACE]++; 1632 } 1633 *new_page = false; 1634 if (fullness_[space] == 0) { 1635 *new_page = true; 1636 } 1637 if (SpaceIsPaged(space)) { 1638 // Paged spaces are a little special. We encode their addresses as if the 1639 // pages were all contiguous and each page were filled up in the range 1640 // 0 - Page::kObjectAreaSize. In practice the pages may not be contiguous 1641 // and allocation does not start at offset 0 in the page, but this scheme 1642 // means the deserializer can get the page number quickly by shifting the 1643 // serialized address. 1644 CHECK(IsPowerOf2(Page::kPageSize)); 1645 int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1)); 1646 CHECK(size <= SpaceAreaSize(space)); 1647 if (used_in_this_page + size > SpaceAreaSize(space)) { 1648 *new_page = true; 1649 fullness_[space] = RoundUp(fullness_[space], Page::kPageSize); 1650 } 1651 } 1652 int allocation_address = fullness_[space]; 1653 fullness_[space] = allocation_address + size; 1654 return allocation_address; 1655} 1656 1657 1658int Serializer::SpaceAreaSize(int space) { 1659 if (space == CODE_SPACE) { 1660 return isolate_->memory_allocator()->CodePageAreaSize(); 1661 } else { 1662 return Page::kPageSize - Page::kObjectStartOffset; 1663 } 1664} 1665 1666 1667} } // namespace v8::internal 1668