serialize.cc revision 592a9fc1d8ea420377a2e7efd0600e20b058be2b
1// Copyright 2011 the V8 project authors. All rights reserved. 2// Redistribution and use in source and binary forms, with or without 3// modification, are permitted provided that the following conditions are 4// met: 5// 6// * Redistributions of source code must retain the above copyright 7// notice, this list of conditions and the following disclaimer. 8// * Redistributions in binary form must reproduce the above 9// copyright notice, this list of conditions and the following 10// disclaimer in the documentation and/or other materials provided 11// with the distribution. 12// * Neither the name of Google Inc. nor the names of its 13// contributors may be used to endorse or promote products derived 14// from this software without specific prior written permission. 15// 16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28#include "v8.h" 29 30#include "accessors.h" 31#include "api.h" 32#include "bootstrapper.h" 33#include "execution.h" 34#include "global-handles.h" 35#include "ic-inl.h" 36#include "natives.h" 37#include "platform.h" 38#include "runtime.h" 39#include "serialize.h" 40#include "stub-cache.h" 41#include "v8threads.h" 42 43namespace v8 { 44namespace internal { 45 46 47// ----------------------------------------------------------------------------- 48// Coding of external references. 49 50// The encoding of an external reference. The type is in the high word. 51// The id is in the low word. 52static uint32_t EncodeExternal(TypeCode type, uint16_t id) { 53 return static_cast<uint32_t>(type) << 16 | id; 54} 55 56 57static int* GetInternalPointer(StatsCounter* counter) { 58 // All counters refer to dummy_counter, if deserializing happens without 59 // setting up counters. 60 static int dummy_counter = 0; 61 return counter->Enabled() ? counter->GetInternalPointer() : &dummy_counter; 62} 63 64 65ExternalReferenceTable* ExternalReferenceTable::instance(Isolate* isolate) { 66 ExternalReferenceTable* external_reference_table = 67 isolate->external_reference_table(); 68 if (external_reference_table == NULL) { 69 external_reference_table = new ExternalReferenceTable(isolate); 70 isolate->set_external_reference_table(external_reference_table); 71 } 72 return external_reference_table; 73} 74 75 76void ExternalReferenceTable::AddFromId(TypeCode type, 77 uint16_t id, 78 const char* name, 79 Isolate* isolate) { 80 Address address; 81 switch (type) { 82 case C_BUILTIN: { 83 ExternalReference ref(static_cast<Builtins::CFunctionId>(id), isolate); 84 address = ref.address(); 85 break; 86 } 87 case BUILTIN: { 88 ExternalReference ref(static_cast<Builtins::Name>(id), isolate); 89 address = ref.address(); 90 break; 91 } 92 case RUNTIME_FUNCTION: { 93 ExternalReference ref(static_cast<Runtime::FunctionId>(id), isolate); 94 address = ref.address(); 95 break; 96 } 97 case IC_UTILITY: { 98 ExternalReference ref(IC_Utility(static_cast<IC::UtilityId>(id)), 99 isolate); 100 address = ref.address(); 101 break; 102 } 103 default: 104 UNREACHABLE(); 105 return; 106 } 107 Add(address, type, id, name); 108} 109 110 111void ExternalReferenceTable::Add(Address address, 112 TypeCode type, 113 uint16_t id, 114 const char* name) { 115 ASSERT_NE(NULL, address); 116 ExternalReferenceEntry entry; 117 entry.address = address; 118 entry.code = EncodeExternal(type, id); 119 entry.name = name; 120 ASSERT_NE(0, entry.code); 121 refs_.Add(entry); 122 if (id > max_id_[type]) max_id_[type] = id; 123} 124 125 126void ExternalReferenceTable::PopulateTable(Isolate* isolate) { 127 for (int type_code = 0; type_code < kTypeCodeCount; type_code++) { 128 max_id_[type_code] = 0; 129 } 130 131 // The following populates all of the different type of external references 132 // into the ExternalReferenceTable. 133 // 134 // NOTE: This function was originally 100k of code. It has since been 135 // rewritten to be mostly table driven, as the callback macro style tends to 136 // very easily cause code bloat. Please be careful in the future when adding 137 // new references. 138 139 struct RefTableEntry { 140 TypeCode type; 141 uint16_t id; 142 const char* name; 143 }; 144 145 static const RefTableEntry ref_table[] = { 146 // Builtins 147#define DEF_ENTRY_C(name, ignored) \ 148 { C_BUILTIN, \ 149 Builtins::c_##name, \ 150 "Builtins::" #name }, 151 152 BUILTIN_LIST_C(DEF_ENTRY_C) 153#undef DEF_ENTRY_C 154 155#define DEF_ENTRY_C(name, ignored) \ 156 { BUILTIN, \ 157 Builtins::k##name, \ 158 "Builtins::" #name }, 159#define DEF_ENTRY_A(name, kind, state, extra) DEF_ENTRY_C(name, ignored) 160 161 BUILTIN_LIST_C(DEF_ENTRY_C) 162 BUILTIN_LIST_A(DEF_ENTRY_A) 163 BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A) 164#undef DEF_ENTRY_C 165#undef DEF_ENTRY_A 166 167 // Runtime functions 168#define RUNTIME_ENTRY(name, nargs, ressize) \ 169 { RUNTIME_FUNCTION, \ 170 Runtime::k##name, \ 171 "Runtime::" #name }, 172 173 RUNTIME_FUNCTION_LIST(RUNTIME_ENTRY) 174#undef RUNTIME_ENTRY 175 176 // IC utilities 177#define IC_ENTRY(name) \ 178 { IC_UTILITY, \ 179 IC::k##name, \ 180 "IC::" #name }, 181 182 IC_UTIL_LIST(IC_ENTRY) 183#undef IC_ENTRY 184 }; // end of ref_table[]. 185 186 for (size_t i = 0; i < ARRAY_SIZE(ref_table); ++i) { 187 AddFromId(ref_table[i].type, 188 ref_table[i].id, 189 ref_table[i].name, 190 isolate); 191 } 192 193#ifdef ENABLE_DEBUGGER_SUPPORT 194 // Debug addresses 195 Add(Debug_Address(Debug::k_after_break_target_address).address(isolate), 196 DEBUG_ADDRESS, 197 Debug::k_after_break_target_address << kDebugIdShift, 198 "Debug::after_break_target_address()"); 199 Add(Debug_Address(Debug::k_debug_break_slot_address).address(isolate), 200 DEBUG_ADDRESS, 201 Debug::k_debug_break_slot_address << kDebugIdShift, 202 "Debug::debug_break_slot_address()"); 203 Add(Debug_Address(Debug::k_debug_break_return_address).address(isolate), 204 DEBUG_ADDRESS, 205 Debug::k_debug_break_return_address << kDebugIdShift, 206 "Debug::debug_break_return_address()"); 207 Add(Debug_Address(Debug::k_restarter_frame_function_pointer).address(isolate), 208 DEBUG_ADDRESS, 209 Debug::k_restarter_frame_function_pointer << kDebugIdShift, 210 "Debug::restarter_frame_function_pointer_address()"); 211#endif 212 213 // Stat counters 214 struct StatsRefTableEntry { 215 StatsCounter* (Counters::*counter)(); 216 uint16_t id; 217 const char* name; 218 }; 219 220 const StatsRefTableEntry stats_ref_table[] = { 221#define COUNTER_ENTRY(name, caption) \ 222 { &Counters::name, \ 223 Counters::k_##name, \ 224 "Counters::" #name }, 225 226 STATS_COUNTER_LIST_1(COUNTER_ENTRY) 227 STATS_COUNTER_LIST_2(COUNTER_ENTRY) 228#undef COUNTER_ENTRY 229 }; // end of stats_ref_table[]. 230 231 Counters* counters = isolate->counters(); 232 for (size_t i = 0; i < ARRAY_SIZE(stats_ref_table); ++i) { 233 Add(reinterpret_cast<Address>(GetInternalPointer( 234 (counters->*(stats_ref_table[i].counter))())), 235 STATS_COUNTER, 236 stats_ref_table[i].id, 237 stats_ref_table[i].name); 238 } 239 240 // Top addresses 241 242 const char* AddressNames[] = { 243#define BUILD_NAME_LITERAL(CamelName, hacker_name) \ 244 "Isolate::" #hacker_name "_address", 245 FOR_EACH_ISOLATE_ADDRESS_NAME(BUILD_NAME_LITERAL) 246 NULL 247#undef C 248 }; 249 250 for (uint16_t i = 0; i < Isolate::kIsolateAddressCount; ++i) { 251 Add(isolate->get_address_from_id((Isolate::AddressId)i), 252 TOP_ADDRESS, i, AddressNames[i]); 253 } 254 255 // Accessors 256#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \ 257 Add((Address)&Accessors::name, \ 258 ACCESSOR, \ 259 Accessors::k##name, \ 260 "Accessors::" #name); 261 262 ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION) 263#undef ACCESSOR_DESCRIPTOR_DECLARATION 264 265 StubCache* stub_cache = isolate->stub_cache(); 266 267 // Stub cache tables 268 Add(stub_cache->key_reference(StubCache::kPrimary).address(), 269 STUB_CACHE_TABLE, 270 1, 271 "StubCache::primary_->key"); 272 Add(stub_cache->value_reference(StubCache::kPrimary).address(), 273 STUB_CACHE_TABLE, 274 2, 275 "StubCache::primary_->value"); 276 Add(stub_cache->key_reference(StubCache::kSecondary).address(), 277 STUB_CACHE_TABLE, 278 3, 279 "StubCache::secondary_->key"); 280 Add(stub_cache->value_reference(StubCache::kSecondary).address(), 281 STUB_CACHE_TABLE, 282 4, 283 "StubCache::secondary_->value"); 284 285 // Runtime entries 286 Add(ExternalReference::perform_gc_function(isolate).address(), 287 RUNTIME_ENTRY, 288 1, 289 "Runtime::PerformGC"); 290 Add(ExternalReference::fill_heap_number_with_random_function( 291 isolate).address(), 292 RUNTIME_ENTRY, 293 2, 294 "V8::FillHeapNumberWithRandom"); 295 Add(ExternalReference::random_uint32_function(isolate).address(), 296 RUNTIME_ENTRY, 297 3, 298 "V8::Random"); 299 Add(ExternalReference::delete_handle_scope_extensions(isolate).address(), 300 RUNTIME_ENTRY, 301 4, 302 "HandleScope::DeleteExtensions"); 303 Add(ExternalReference:: 304 incremental_marking_record_write_function(isolate).address(), 305 RUNTIME_ENTRY, 306 5, 307 "IncrementalMarking::RecordWrite"); 308 Add(ExternalReference::store_buffer_overflow_function(isolate).address(), 309 RUNTIME_ENTRY, 310 6, 311 "StoreBuffer::StoreBufferOverflow"); 312 Add(ExternalReference:: 313 incremental_evacuation_record_write_function(isolate).address(), 314 RUNTIME_ENTRY, 315 7, 316 "IncrementalMarking::RecordWrite"); 317 318 319 320 // Miscellaneous 321 Add(ExternalReference::roots_array_start(isolate).address(), 322 UNCLASSIFIED, 323 3, 324 "Heap::roots_array_start()"); 325 Add(ExternalReference::address_of_stack_limit(isolate).address(), 326 UNCLASSIFIED, 327 4, 328 "StackGuard::address_of_jslimit()"); 329 Add(ExternalReference::address_of_real_stack_limit(isolate).address(), 330 UNCLASSIFIED, 331 5, 332 "StackGuard::address_of_real_jslimit()"); 333#ifndef V8_INTERPRETED_REGEXP 334 Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(), 335 UNCLASSIFIED, 336 6, 337 "RegExpStack::limit_address()"); 338 Add(ExternalReference::address_of_regexp_stack_memory_address( 339 isolate).address(), 340 UNCLASSIFIED, 341 7, 342 "RegExpStack::memory_address()"); 343 Add(ExternalReference::address_of_regexp_stack_memory_size(isolate).address(), 344 UNCLASSIFIED, 345 8, 346 "RegExpStack::memory_size()"); 347 Add(ExternalReference::address_of_static_offsets_vector(isolate).address(), 348 UNCLASSIFIED, 349 9, 350 "OffsetsVector::static_offsets_vector"); 351#endif // V8_INTERPRETED_REGEXP 352 Add(ExternalReference::new_space_start(isolate).address(), 353 UNCLASSIFIED, 354 10, 355 "Heap::NewSpaceStart()"); 356 Add(ExternalReference::new_space_mask(isolate).address(), 357 UNCLASSIFIED, 358 11, 359 "Heap::NewSpaceMask()"); 360 Add(ExternalReference::heap_always_allocate_scope_depth(isolate).address(), 361 UNCLASSIFIED, 362 12, 363 "Heap::always_allocate_scope_depth()"); 364 Add(ExternalReference::new_space_allocation_limit_address(isolate).address(), 365 UNCLASSIFIED, 366 14, 367 "Heap::NewSpaceAllocationLimitAddress()"); 368 Add(ExternalReference::new_space_allocation_top_address(isolate).address(), 369 UNCLASSIFIED, 370 15, 371 "Heap::NewSpaceAllocationTopAddress()"); 372#ifdef ENABLE_DEBUGGER_SUPPORT 373 Add(ExternalReference::debug_break(isolate).address(), 374 UNCLASSIFIED, 375 16, 376 "Debug::Break()"); 377 Add(ExternalReference::debug_step_in_fp_address(isolate).address(), 378 UNCLASSIFIED, 379 17, 380 "Debug::step_in_fp_addr()"); 381#endif 382 Add(ExternalReference::double_fp_operation(Token::ADD, isolate).address(), 383 UNCLASSIFIED, 384 18, 385 "add_two_doubles"); 386 Add(ExternalReference::double_fp_operation(Token::SUB, isolate).address(), 387 UNCLASSIFIED, 388 19, 389 "sub_two_doubles"); 390 Add(ExternalReference::double_fp_operation(Token::MUL, isolate).address(), 391 UNCLASSIFIED, 392 20, 393 "mul_two_doubles"); 394 Add(ExternalReference::double_fp_operation(Token::DIV, isolate).address(), 395 UNCLASSIFIED, 396 21, 397 "div_two_doubles"); 398 Add(ExternalReference::double_fp_operation(Token::MOD, isolate).address(), 399 UNCLASSIFIED, 400 22, 401 "mod_two_doubles"); 402 Add(ExternalReference::compare_doubles(isolate).address(), 403 UNCLASSIFIED, 404 23, 405 "compare_doubles"); 406#ifndef V8_INTERPRETED_REGEXP 407 Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(), 408 UNCLASSIFIED, 409 24, 410 "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()"); 411 Add(ExternalReference::re_check_stack_guard_state(isolate).address(), 412 UNCLASSIFIED, 413 25, 414 "RegExpMacroAssembler*::CheckStackGuardState()"); 415 Add(ExternalReference::re_grow_stack(isolate).address(), 416 UNCLASSIFIED, 417 26, 418 "NativeRegExpMacroAssembler::GrowStack()"); 419 Add(ExternalReference::re_word_character_map().address(), 420 UNCLASSIFIED, 421 27, 422 "NativeRegExpMacroAssembler::word_character_map"); 423#endif // V8_INTERPRETED_REGEXP 424 // Keyed lookup cache. 425 Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(), 426 UNCLASSIFIED, 427 28, 428 "KeyedLookupCache::keys()"); 429 Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(), 430 UNCLASSIFIED, 431 29, 432 "KeyedLookupCache::field_offsets()"); 433 Add(ExternalReference::transcendental_cache_array_address(isolate).address(), 434 UNCLASSIFIED, 435 30, 436 "TranscendentalCache::caches()"); 437 Add(ExternalReference::handle_scope_next_address().address(), 438 UNCLASSIFIED, 439 31, 440 "HandleScope::next"); 441 Add(ExternalReference::handle_scope_limit_address().address(), 442 UNCLASSIFIED, 443 32, 444 "HandleScope::limit"); 445 Add(ExternalReference::handle_scope_level_address().address(), 446 UNCLASSIFIED, 447 33, 448 "HandleScope::level"); 449 Add(ExternalReference::new_deoptimizer_function(isolate).address(), 450 UNCLASSIFIED, 451 34, 452 "Deoptimizer::New()"); 453 Add(ExternalReference::compute_output_frames_function(isolate).address(), 454 UNCLASSIFIED, 455 35, 456 "Deoptimizer::ComputeOutputFrames()"); 457 Add(ExternalReference::address_of_min_int().address(), 458 UNCLASSIFIED, 459 36, 460 "LDoubleConstant::min_int"); 461 Add(ExternalReference::address_of_one_half().address(), 462 UNCLASSIFIED, 463 37, 464 "LDoubleConstant::one_half"); 465 Add(ExternalReference::isolate_address().address(), 466 UNCLASSIFIED, 467 38, 468 "isolate"); 469 Add(ExternalReference::address_of_minus_zero().address(), 470 UNCLASSIFIED, 471 39, 472 "LDoubleConstant::minus_zero"); 473 Add(ExternalReference::address_of_negative_infinity().address(), 474 UNCLASSIFIED, 475 40, 476 "LDoubleConstant::negative_infinity"); 477 Add(ExternalReference::power_double_double_function(isolate).address(), 478 UNCLASSIFIED, 479 41, 480 "power_double_double_function"); 481 Add(ExternalReference::power_double_int_function(isolate).address(), 482 UNCLASSIFIED, 483 42, 484 "power_double_int_function"); 485 Add(ExternalReference::store_buffer_top(isolate).address(), 486 UNCLASSIFIED, 487 43, 488 "store_buffer_top"); 489 Add(ExternalReference::address_of_canonical_non_hole_nan().address(), 490 UNCLASSIFIED, 491 44, 492 "canonical_nan"); 493 Add(ExternalReference::address_of_the_hole_nan().address(), 494 UNCLASSIFIED, 495 45, 496 "the_hole_nan"); 497} 498 499 500ExternalReferenceEncoder::ExternalReferenceEncoder() 501 : encodings_(Match), 502 isolate_(Isolate::Current()) { 503 ExternalReferenceTable* external_references = 504 ExternalReferenceTable::instance(isolate_); 505 for (int i = 0; i < external_references->size(); ++i) { 506 Put(external_references->address(i), i); 507 } 508} 509 510 511uint32_t ExternalReferenceEncoder::Encode(Address key) const { 512 int index = IndexOf(key); 513 ASSERT(key == NULL || index >= 0); 514 return index >=0 ? 515 ExternalReferenceTable::instance(isolate_)->code(index) : 0; 516} 517 518 519const char* ExternalReferenceEncoder::NameOfAddress(Address key) const { 520 int index = IndexOf(key); 521 return index >= 0 ? 522 ExternalReferenceTable::instance(isolate_)->name(index) : NULL; 523} 524 525 526int ExternalReferenceEncoder::IndexOf(Address key) const { 527 if (key == NULL) return -1; 528 HashMap::Entry* entry = 529 const_cast<HashMap&>(encodings_).Lookup(key, Hash(key), false); 530 return entry == NULL 531 ? -1 532 : static_cast<int>(reinterpret_cast<intptr_t>(entry->value)); 533} 534 535 536void ExternalReferenceEncoder::Put(Address key, int index) { 537 HashMap::Entry* entry = encodings_.Lookup(key, Hash(key), true); 538 entry->value = reinterpret_cast<void*>(index); 539} 540 541 542ExternalReferenceDecoder::ExternalReferenceDecoder() 543 : encodings_(NewArray<Address*>(kTypeCodeCount)), 544 isolate_(Isolate::Current()) { 545 ExternalReferenceTable* external_references = 546 ExternalReferenceTable::instance(isolate_); 547 for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) { 548 int max = external_references->max_id(type) + 1; 549 encodings_[type] = NewArray<Address>(max + 1); 550 } 551 for (int i = 0; i < external_references->size(); ++i) { 552 Put(external_references->code(i), external_references->address(i)); 553 } 554} 555 556 557ExternalReferenceDecoder::~ExternalReferenceDecoder() { 558 for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) { 559 DeleteArray(encodings_[type]); 560 } 561 DeleteArray(encodings_); 562} 563 564 565bool Serializer::serialization_enabled_ = false; 566bool Serializer::too_late_to_enable_now_ = false; 567 568 569Deserializer::Deserializer(SnapshotByteSource* source) 570 : isolate_(NULL), 571 source_(source), 572 external_reference_decoder_(NULL) { 573} 574 575 576// This routine both allocates a new object, and also keeps 577// track of where objects have been allocated so that we can 578// fix back references when deserializing. 579Address Deserializer::Allocate(int space_index, Space* space, int size) { 580 Address address; 581 if (!SpaceIsLarge(space_index)) { 582 ASSERT(!SpaceIsPaged(space_index) || 583 size <= Page::kPageSize - Page::kObjectStartOffset); 584 MaybeObject* maybe_new_allocation; 585 if (space_index == NEW_SPACE) { 586 maybe_new_allocation = 587 reinterpret_cast<NewSpace*>(space)->AllocateRaw(size); 588 } else { 589 maybe_new_allocation = 590 reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size); 591 } 592 ASSERT(!maybe_new_allocation->IsFailure()); 593 Object* new_allocation = maybe_new_allocation->ToObjectUnchecked(); 594 HeapObject* new_object = HeapObject::cast(new_allocation); 595 address = new_object->address(); 596 high_water_[space_index] = address + size; 597 } else { 598 ASSERT(SpaceIsLarge(space_index)); 599 LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space); 600 Object* new_allocation; 601 if (space_index == kLargeData || space_index == kLargeFixedArray) { 602 new_allocation = 603 lo_space->AllocateRaw(size, NOT_EXECUTABLE)->ToObjectUnchecked(); 604 } else { 605 ASSERT_EQ(kLargeCode, space_index); 606 new_allocation = 607 lo_space->AllocateRaw(size, EXECUTABLE)->ToObjectUnchecked(); 608 } 609 HeapObject* new_object = HeapObject::cast(new_allocation); 610 // Record all large objects in the same space. 611 address = new_object->address(); 612 pages_[LO_SPACE].Add(address); 613 } 614 last_object_address_ = address; 615 return address; 616} 617 618 619// This returns the address of an object that has been described in the 620// snapshot as being offset bytes back in a particular space. 621HeapObject* Deserializer::GetAddressFromEnd(int space) { 622 int offset = source_->GetInt(); 623 ASSERT(!SpaceIsLarge(space)); 624 offset <<= kObjectAlignmentBits; 625 return HeapObject::FromAddress(high_water_[space] - offset); 626} 627 628 629// This returns the address of an object that has been described in the 630// snapshot as being offset bytes into a particular space. 631HeapObject* Deserializer::GetAddressFromStart(int space) { 632 int offset = source_->GetInt(); 633 if (SpaceIsLarge(space)) { 634 // Large spaces have one object per 'page'. 635 return HeapObject::FromAddress(pages_[LO_SPACE][offset]); 636 } 637 offset <<= kObjectAlignmentBits; 638 if (space == NEW_SPACE) { 639 // New space has only one space - numbered 0. 640 return HeapObject::FromAddress(pages_[space][0] + offset); 641 } 642 ASSERT(SpaceIsPaged(space)); 643 int page_of_pointee = offset >> kPageSizeBits; 644 Address object_address = pages_[space][page_of_pointee] + 645 (offset & Page::kPageAlignmentMask); 646 return HeapObject::FromAddress(object_address); 647} 648 649 650void Deserializer::Deserialize() { 651 isolate_ = Isolate::Current(); 652 ASSERT(isolate_ != NULL); 653 // Don't GC while deserializing - just expand the heap. 654 AlwaysAllocateScope always_allocate; 655 // Don't use the free lists while deserializing. 656 LinearAllocationScope allocate_linearly; 657 // No active threads. 658 ASSERT_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse()); 659 // No active handles. 660 ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty()); 661 // Make sure the entire partial snapshot cache is traversed, filling it with 662 // valid object pointers. 663 isolate_->set_serialize_partial_snapshot_cache_length( 664 Isolate::kPartialSnapshotCacheCapacity); 665 ASSERT_EQ(NULL, external_reference_decoder_); 666 external_reference_decoder_ = new ExternalReferenceDecoder(); 667 isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG); 668 isolate_->heap()->IterateWeakRoots(this, VISIT_ALL); 669 670 isolate_->heap()->set_global_contexts_list( 671 isolate_->heap()->undefined_value()); 672 673 // Update data pointers to the external strings containing natives sources. 674 for (int i = 0; i < Natives::GetBuiltinsCount(); i++) { 675 Object* source = isolate_->heap()->natives_source_cache()->get(i); 676 if (!source->IsUndefined()) { 677 ExternalAsciiString::cast(source)->update_data_cache(); 678 } 679 } 680} 681 682 683void Deserializer::DeserializePartial(Object** root) { 684 isolate_ = Isolate::Current(); 685 // Don't GC while deserializing - just expand the heap. 686 AlwaysAllocateScope always_allocate; 687 // Don't use the free lists while deserializing. 688 LinearAllocationScope allocate_linearly; 689 if (external_reference_decoder_ == NULL) { 690 external_reference_decoder_ = new ExternalReferenceDecoder(); 691 } 692 VisitPointer(root); 693} 694 695 696Deserializer::~Deserializer() { 697 ASSERT(source_->AtEOF()); 698 if (external_reference_decoder_) { 699 delete external_reference_decoder_; 700 external_reference_decoder_ = NULL; 701 } 702} 703 704 705// This is called on the roots. It is the driver of the deserialization 706// process. It is also called on the body of each function. 707void Deserializer::VisitPointers(Object** start, Object** end) { 708 // The space must be new space. Any other space would cause ReadChunk to try 709 // to update the remembered using NULL as the address. 710 ReadChunk(start, end, NEW_SPACE, NULL); 711} 712 713 714// This routine writes the new object into the pointer provided and then 715// returns true if the new object was in young space and false otherwise. 716// The reason for this strange interface is that otherwise the object is 717// written very late, which means the FreeSpace map is not set up by the 718// time we need to use it to mark the space at the end of a page free. 719void Deserializer::ReadObject(int space_number, 720 Space* space, 721 Object** write_back) { 722 int size = source_->GetInt() << kObjectAlignmentBits; 723 Address address = Allocate(space_number, space, size); 724 *write_back = HeapObject::FromAddress(address); 725 Object** current = reinterpret_cast<Object**>(address); 726 Object** limit = current + (size >> kPointerSizeLog2); 727 if (FLAG_log_snapshot_positions) { 728 LOG(isolate_, SnapshotPositionEvent(address, source_->position())); 729 } 730 ReadChunk(current, limit, space_number, address); 731#ifdef DEBUG 732 bool is_codespace = (space == HEAP->code_space()) || 733 ((space == HEAP->lo_space()) && (space_number == kLargeCode)); 734 ASSERT(HeapObject::FromAddress(address)->IsCode() == is_codespace); 735#endif 736} 737 738 739// This macro is always used with a constant argument so it should all fold 740// away to almost nothing in the generated code. It might be nicer to do this 741// with the ternary operator but there are type issues with that. 742#define ASSIGN_DEST_SPACE(space_number) \ 743 Space* dest_space; \ 744 if (space_number == NEW_SPACE) { \ 745 dest_space = isolate->heap()->new_space(); \ 746 } else if (space_number == OLD_POINTER_SPACE) { \ 747 dest_space = isolate->heap()->old_pointer_space(); \ 748 } else if (space_number == OLD_DATA_SPACE) { \ 749 dest_space = isolate->heap()->old_data_space(); \ 750 } else if (space_number == CODE_SPACE) { \ 751 dest_space = isolate->heap()->code_space(); \ 752 } else if (space_number == MAP_SPACE) { \ 753 dest_space = isolate->heap()->map_space(); \ 754 } else if (space_number == CELL_SPACE) { \ 755 dest_space = isolate->heap()->cell_space(); \ 756 } else { \ 757 ASSERT(space_number >= LO_SPACE); \ 758 dest_space = isolate->heap()->lo_space(); \ 759 } 760 761 762static const int kUnknownOffsetFromStart = -1; 763 764 765void Deserializer::ReadChunk(Object** current, 766 Object** limit, 767 int source_space, 768 Address current_object_address) { 769 Isolate* const isolate = isolate_; 770 bool write_barrier_needed = (current_object_address != NULL && 771 source_space != NEW_SPACE && 772 source_space != CELL_SPACE && 773 source_space != CODE_SPACE && 774 source_space != OLD_DATA_SPACE); 775 while (current < limit) { 776 int data = source_->Get(); 777 switch (data) { 778#define CASE_STATEMENT(where, how, within, space_number) \ 779 case where + how + within + space_number: \ 780 ASSERT((where & ~kPointedToMask) == 0); \ 781 ASSERT((how & ~kHowToCodeMask) == 0); \ 782 ASSERT((within & ~kWhereToPointMask) == 0); \ 783 ASSERT((space_number & ~kSpaceMask) == 0); 784 785#define CASE_BODY(where, how, within, space_number_if_any, offset_from_start) \ 786 { \ 787 bool emit_write_barrier = false; \ 788 bool current_was_incremented = false; \ 789 int space_number = space_number_if_any == kAnyOldSpace ? \ 790 (data & kSpaceMask) : space_number_if_any; \ 791 if (where == kNewObject && how == kPlain && within == kStartOfObject) {\ 792 ASSIGN_DEST_SPACE(space_number) \ 793 ReadObject(space_number, dest_space, current); \ 794 emit_write_barrier = (space_number == NEW_SPACE); \ 795 } else { \ 796 Object* new_object = NULL; /* May not be a real Object pointer. */ \ 797 if (where == kNewObject) { \ 798 ASSIGN_DEST_SPACE(space_number) \ 799 ReadObject(space_number, dest_space, &new_object); \ 800 } else if (where == kRootArray) { \ 801 int root_id = source_->GetInt(); \ 802 new_object = isolate->heap()->roots_array_start()[root_id]; \ 803 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ 804 } else if (where == kPartialSnapshotCache) { \ 805 int cache_index = source_->GetInt(); \ 806 new_object = isolate->serialize_partial_snapshot_cache() \ 807 [cache_index]; \ 808 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ 809 } else if (where == kExternalReference) { \ 810 int reference_id = source_->GetInt(); \ 811 Address address = external_reference_decoder_-> \ 812 Decode(reference_id); \ 813 new_object = reinterpret_cast<Object*>(address); \ 814 } else if (where == kBackref) { \ 815 emit_write_barrier = (space_number == NEW_SPACE); \ 816 new_object = GetAddressFromEnd(data & kSpaceMask); \ 817 } else { \ 818 ASSERT(where == kFromStart); \ 819 if (offset_from_start == kUnknownOffsetFromStart) { \ 820 emit_write_barrier = (space_number == NEW_SPACE); \ 821 new_object = GetAddressFromStart(data & kSpaceMask); \ 822 } else { \ 823 Address object_address = pages_[space_number][0] + \ 824 (offset_from_start << kObjectAlignmentBits); \ 825 new_object = HeapObject::FromAddress(object_address); \ 826 } \ 827 } \ 828 if (within == kFirstInstruction) { \ 829 Code* new_code_object = reinterpret_cast<Code*>(new_object); \ 830 new_object = reinterpret_cast<Object*>( \ 831 new_code_object->instruction_start()); \ 832 } \ 833 if (how == kFromCode) { \ 834 Address location_of_branch_data = \ 835 reinterpret_cast<Address>(current); \ 836 Assembler::set_target_at(location_of_branch_data, \ 837 reinterpret_cast<Address>(new_object)); \ 838 if (within == kFirstInstruction) { \ 839 location_of_branch_data += Assembler::kCallTargetSize; \ 840 current = reinterpret_cast<Object**>(location_of_branch_data); \ 841 current_was_incremented = true; \ 842 } \ 843 } else { \ 844 *current = new_object; \ 845 } \ 846 } \ 847 if (emit_write_barrier && write_barrier_needed) { \ 848 Address current_address = reinterpret_cast<Address>(current); \ 849 isolate->heap()->RecordWrite( \ 850 current_object_address, \ 851 static_cast<int>(current_address - current_object_address)); \ 852 } \ 853 if (!current_was_incremented) { \ 854 current++; \ 855 } \ 856 break; \ 857 } \ 858 859// This generates a case and a body for each space. The large object spaces are 860// very rare in snapshots so they are grouped in one body. 861#define ONE_PER_SPACE(where, how, within) \ 862 CASE_STATEMENT(where, how, within, NEW_SPACE) \ 863 CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \ 864 CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \ 865 CASE_BODY(where, how, within, OLD_DATA_SPACE, kUnknownOffsetFromStart) \ 866 CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \ 867 CASE_BODY(where, how, within, OLD_POINTER_SPACE, kUnknownOffsetFromStart) \ 868 CASE_STATEMENT(where, how, within, CODE_SPACE) \ 869 CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \ 870 CASE_STATEMENT(where, how, within, CELL_SPACE) \ 871 CASE_BODY(where, how, within, CELL_SPACE, kUnknownOffsetFromStart) \ 872 CASE_STATEMENT(where, how, within, MAP_SPACE) \ 873 CASE_BODY(where, how, within, MAP_SPACE, kUnknownOffsetFromStart) \ 874 CASE_STATEMENT(where, how, within, kLargeData) \ 875 CASE_STATEMENT(where, how, within, kLargeCode) \ 876 CASE_STATEMENT(where, how, within, kLargeFixedArray) \ 877 CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart) 878 879// This generates a case and a body for the new space (which has to do extra 880// write barrier handling) and handles the other spaces with 8 fall-through 881// cases and one body. 882#define ALL_SPACES(where, how, within) \ 883 CASE_STATEMENT(where, how, within, NEW_SPACE) \ 884 CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \ 885 CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \ 886 CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \ 887 CASE_STATEMENT(where, how, within, CODE_SPACE) \ 888 CASE_STATEMENT(where, how, within, CELL_SPACE) \ 889 CASE_STATEMENT(where, how, within, MAP_SPACE) \ 890 CASE_STATEMENT(where, how, within, kLargeData) \ 891 CASE_STATEMENT(where, how, within, kLargeCode) \ 892 CASE_STATEMENT(where, how, within, kLargeFixedArray) \ 893 CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart) 894 895#define ONE_PER_CODE_SPACE(where, how, within) \ 896 CASE_STATEMENT(where, how, within, CODE_SPACE) \ 897 CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \ 898 CASE_STATEMENT(where, how, within, kLargeCode) \ 899 CASE_BODY(where, how, within, kLargeCode, kUnknownOffsetFromStart) 900 901#define FOUR_CASES(byte_code) \ 902 case byte_code: \ 903 case byte_code + 1: \ 904 case byte_code + 2: \ 905 case byte_code + 3: 906 907#define SIXTEEN_CASES(byte_code) \ 908 FOUR_CASES(byte_code) \ 909 FOUR_CASES(byte_code + 4) \ 910 FOUR_CASES(byte_code + 8) \ 911 FOUR_CASES(byte_code + 12) 912 913 // We generate 15 cases and bodies that process special tags that combine 914 // the raw data tag and the length into one byte. 915#define RAW_CASE(index, size) \ 916 case kRawData + index: { \ 917 byte* raw_data_out = reinterpret_cast<byte*>(current); \ 918 source_->CopyRaw(raw_data_out, size); \ 919 current = reinterpret_cast<Object**>(raw_data_out + size); \ 920 break; \ 921 } 922 COMMON_RAW_LENGTHS(RAW_CASE) 923#undef RAW_CASE 924 925 // Deserialize a chunk of raw data that doesn't have one of the popular 926 // lengths. 927 case kRawData: { 928 int size = source_->GetInt(); 929 byte* raw_data_out = reinterpret_cast<byte*>(current); 930 source_->CopyRaw(raw_data_out, size); 931 current = reinterpret_cast<Object**>(raw_data_out + size); 932 break; 933 } 934 935 SIXTEEN_CASES(kRootArrayLowConstants) 936 SIXTEEN_CASES(kRootArrayHighConstants) { 937 int root_id = RootArrayConstantFromByteCode(data); 938 Object* object = isolate->heap()->roots_array_start()[root_id]; 939 ASSERT(!isolate->heap()->InNewSpace(object)); 940 *current++ = object; 941 break; 942 } 943 944 case kRepeat: { 945 int repeats = source_->GetInt(); 946 Object* object = current[-1]; 947 ASSERT(!isolate->heap()->InNewSpace(object)); 948 for (int i = 0; i < repeats; i++) current[i] = object; 949 current += repeats; 950 break; 951 } 952 953 STATIC_ASSERT(kRootArrayNumberOfConstantEncodings == 954 Heap::kOldSpaceRoots); 955 STATIC_ASSERT(kMaxRepeats == 12); 956 FOUR_CASES(kConstantRepeat) 957 FOUR_CASES(kConstantRepeat + 4) 958 FOUR_CASES(kConstantRepeat + 8) { 959 int repeats = RepeatsForCode(data); 960 Object* object = current[-1]; 961 ASSERT(!isolate->heap()->InNewSpace(object)); 962 for (int i = 0; i < repeats; i++) current[i] = object; 963 current += repeats; 964 break; 965 } 966 967 // Deserialize a new object and write a pointer to it to the current 968 // object. 969 ONE_PER_SPACE(kNewObject, kPlain, kStartOfObject) 970 // Support for direct instruction pointers in functions 971 ONE_PER_CODE_SPACE(kNewObject, kPlain, kFirstInstruction) 972 // Deserialize a new code object and write a pointer to its first 973 // instruction to the current code object. 974 ONE_PER_SPACE(kNewObject, kFromCode, kFirstInstruction) 975 // Find a recently deserialized object using its offset from the current 976 // allocation point and write a pointer to it to the current object. 977 ALL_SPACES(kBackref, kPlain, kStartOfObject) 978 // Find a recently deserialized code object using its offset from the 979 // current allocation point and write a pointer to its first instruction 980 // to the current code object or the instruction pointer in a function 981 // object. 982 ALL_SPACES(kBackref, kFromCode, kFirstInstruction) 983 ALL_SPACES(kBackref, kPlain, kFirstInstruction) 984 // Find an already deserialized object using its offset from the start 985 // and write a pointer to it to the current object. 986 ALL_SPACES(kFromStart, kPlain, kStartOfObject) 987 ALL_SPACES(kFromStart, kPlain, kFirstInstruction) 988 // Find an already deserialized code object using its offset from the 989 // start and write a pointer to its first instruction to the current code 990 // object. 991 ALL_SPACES(kFromStart, kFromCode, kFirstInstruction) 992 // Find an object in the roots array and write a pointer to it to the 993 // current object. 994 CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0) 995 CASE_BODY(kRootArray, kPlain, kStartOfObject, 0, kUnknownOffsetFromStart) 996 // Find an object in the partial snapshots cache and write a pointer to it 997 // to the current object. 998 CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0) 999 CASE_BODY(kPartialSnapshotCache, 1000 kPlain, 1001 kStartOfObject, 1002 0, 1003 kUnknownOffsetFromStart) 1004 // Find an code entry in the partial snapshots cache and 1005 // write a pointer to it to the current object. 1006 CASE_STATEMENT(kPartialSnapshotCache, kPlain, kFirstInstruction, 0) 1007 CASE_BODY(kPartialSnapshotCache, 1008 kPlain, 1009 kFirstInstruction, 1010 0, 1011 kUnknownOffsetFromStart) 1012 // Find an external reference and write a pointer to it to the current 1013 // object. 1014 CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0) 1015 CASE_BODY(kExternalReference, 1016 kPlain, 1017 kStartOfObject, 1018 0, 1019 kUnknownOffsetFromStart) 1020 // Find an external reference and write a pointer to it in the current 1021 // code object. 1022 CASE_STATEMENT(kExternalReference, kFromCode, kStartOfObject, 0) 1023 CASE_BODY(kExternalReference, 1024 kFromCode, 1025 kStartOfObject, 1026 0, 1027 kUnknownOffsetFromStart) 1028 1029#undef CASE_STATEMENT 1030#undef CASE_BODY 1031#undef ONE_PER_SPACE 1032#undef ALL_SPACES 1033#undef ASSIGN_DEST_SPACE 1034 1035 case kNewPage: { 1036 int space = source_->Get(); 1037 pages_[space].Add(last_object_address_); 1038 if (space == CODE_SPACE) { 1039 CPU::FlushICache(last_object_address_, Page::kPageSize); 1040 } 1041 break; 1042 } 1043 1044 case kSkip: { 1045 current++; 1046 break; 1047 } 1048 1049 case kNativesStringResource: { 1050 int index = source_->Get(); 1051 Vector<const char> source_vector = Natives::GetRawScriptSource(index); 1052 NativesExternalStringResource* resource = 1053 new NativesExternalStringResource(isolate->bootstrapper(), 1054 source_vector.start(), 1055 source_vector.length()); 1056 *current++ = reinterpret_cast<Object*>(resource); 1057 break; 1058 } 1059 1060 case kSynchronize: { 1061 // If we get here then that indicates that you have a mismatch between 1062 // the number of GC roots when serializing and deserializing. 1063 UNREACHABLE(); 1064 } 1065 1066 default: 1067 UNREACHABLE(); 1068 } 1069 } 1070 ASSERT_EQ(current, limit); 1071} 1072 1073 1074void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) { 1075 const int max_shift = ((kPointerSize * kBitsPerByte) / 7) * 7; 1076 for (int shift = max_shift; shift > 0; shift -= 7) { 1077 if (integer >= static_cast<uintptr_t>(1u) << shift) { 1078 Put((static_cast<int>((integer >> shift)) & 0x7f) | 0x80, "IntPart"); 1079 } 1080 } 1081 PutSection(static_cast<int>(integer & 0x7f), "IntLastPart"); 1082} 1083 1084#ifdef DEBUG 1085 1086void Deserializer::Synchronize(const char* tag) { 1087 int data = source_->Get(); 1088 // If this assert fails then that indicates that you have a mismatch between 1089 // the number of GC roots when serializing and deserializing. 1090 ASSERT_EQ(kSynchronize, data); 1091 do { 1092 int character = source_->Get(); 1093 if (character == 0) break; 1094 if (FLAG_debug_serialization) { 1095 PrintF("%c", character); 1096 } 1097 } while (true); 1098 if (FLAG_debug_serialization) { 1099 PrintF("\n"); 1100 } 1101} 1102 1103 1104void Serializer::Synchronize(const char* tag) { 1105 sink_->Put(kSynchronize, tag); 1106 int character; 1107 do { 1108 character = *tag++; 1109 sink_->PutSection(character, "TagCharacter"); 1110 } while (character != 0); 1111} 1112 1113#endif 1114 1115Serializer::Serializer(SnapshotByteSink* sink) 1116 : sink_(sink), 1117 current_root_index_(0), 1118 external_reference_encoder_(new ExternalReferenceEncoder), 1119 large_object_total_(0), 1120 root_index_wave_front_(0) { 1121 isolate_ = Isolate::Current(); 1122 // The serializer is meant to be used only to generate initial heap images 1123 // from a context in which there is only one isolate. 1124 ASSERT(isolate_->IsDefaultIsolate()); 1125 for (int i = 0; i <= LAST_SPACE; i++) { 1126 fullness_[i] = 0; 1127 } 1128} 1129 1130 1131Serializer::~Serializer() { 1132 delete external_reference_encoder_; 1133} 1134 1135 1136void StartupSerializer::SerializeStrongReferences() { 1137 Isolate* isolate = Isolate::Current(); 1138 // No active threads. 1139 CHECK_EQ(NULL, Isolate::Current()->thread_manager()->FirstThreadStateInUse()); 1140 // No active or weak handles. 1141 CHECK(isolate->handle_scope_implementer()->blocks()->is_empty()); 1142 CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles()); 1143 // We don't support serializing installed extensions. 1144 CHECK(!isolate->has_installed_extensions()); 1145 1146 HEAP->IterateStrongRoots(this, VISIT_ONLY_STRONG); 1147} 1148 1149 1150void PartialSerializer::Serialize(Object** object) { 1151 this->VisitPointer(object); 1152 Isolate* isolate = Isolate::Current(); 1153 1154 // After we have done the partial serialization the partial snapshot cache 1155 // will contain some references needed to decode the partial snapshot. We 1156 // fill it up with undefineds so it has a predictable length so the 1157 // deserialization code doesn't need to know the length. 1158 for (int index = isolate->serialize_partial_snapshot_cache_length(); 1159 index < Isolate::kPartialSnapshotCacheCapacity; 1160 index++) { 1161 isolate->serialize_partial_snapshot_cache()[index] = 1162 isolate->heap()->undefined_value(); 1163 startup_serializer_->VisitPointer( 1164 &isolate->serialize_partial_snapshot_cache()[index]); 1165 } 1166 isolate->set_serialize_partial_snapshot_cache_length( 1167 Isolate::kPartialSnapshotCacheCapacity); 1168} 1169 1170 1171void Serializer::VisitPointers(Object** start, Object** end) { 1172 Isolate* isolate = Isolate::Current(); 1173 1174 for (Object** current = start; current < end; current++) { 1175 if (start == isolate->heap()->roots_array_start()) { 1176 root_index_wave_front_ = 1177 Max(root_index_wave_front_, static_cast<intptr_t>(current - start)); 1178 } 1179 if (reinterpret_cast<Address>(current) == 1180 isolate->heap()->store_buffer()->TopAddress()) { 1181 sink_->Put(kSkip, "Skip"); 1182 } else if ((*current)->IsSmi()) { 1183 sink_->Put(kRawData, "RawData"); 1184 sink_->PutInt(kPointerSize, "length"); 1185 for (int i = 0; i < kPointerSize; i++) { 1186 sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte"); 1187 } 1188 } else { 1189 SerializeObject(*current, kPlain, kStartOfObject); 1190 } 1191 } 1192} 1193 1194 1195// This ensures that the partial snapshot cache keeps things alive during GC and 1196// tracks their movement. When it is called during serialization of the startup 1197// snapshot the partial snapshot is empty, so nothing happens. When the partial 1198// (context) snapshot is created, this array is populated with the pointers that 1199// the partial snapshot will need. As that happens we emit serialized objects to 1200// the startup snapshot that correspond to the elements of this cache array. On 1201// deserialization we therefore need to visit the cache array. This fills it up 1202// with pointers to deserialized objects. 1203void SerializerDeserializer::Iterate(ObjectVisitor* visitor) { 1204 Isolate* isolate = Isolate::Current(); 1205 visitor->VisitPointers( 1206 isolate->serialize_partial_snapshot_cache(), 1207 &isolate->serialize_partial_snapshot_cache()[ 1208 isolate->serialize_partial_snapshot_cache_length()]); 1209} 1210 1211 1212// When deserializing we need to set the size of the snapshot cache. This means 1213// the root iteration code (above) will iterate over array elements, writing the 1214// references to deserialized objects in them. 1215void SerializerDeserializer::SetSnapshotCacheSize(int size) { 1216 Isolate::Current()->set_serialize_partial_snapshot_cache_length(size); 1217} 1218 1219 1220int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) { 1221 Isolate* isolate = Isolate::Current(); 1222 1223 for (int i = 0; 1224 i < isolate->serialize_partial_snapshot_cache_length(); 1225 i++) { 1226 Object* entry = isolate->serialize_partial_snapshot_cache()[i]; 1227 if (entry == heap_object) return i; 1228 } 1229 1230 // We didn't find the object in the cache. So we add it to the cache and 1231 // then visit the pointer so that it becomes part of the startup snapshot 1232 // and we can refer to it from the partial snapshot. 1233 int length = isolate->serialize_partial_snapshot_cache_length(); 1234 CHECK(length < Isolate::kPartialSnapshotCacheCapacity); 1235 isolate->serialize_partial_snapshot_cache()[length] = heap_object; 1236 startup_serializer_->VisitPointer( 1237 &isolate->serialize_partial_snapshot_cache()[length]); 1238 // We don't recurse from the startup snapshot generator into the partial 1239 // snapshot generator. 1240 ASSERT(length == isolate->serialize_partial_snapshot_cache_length()); 1241 isolate->set_serialize_partial_snapshot_cache_length(length + 1); 1242 return length; 1243} 1244 1245 1246int Serializer::RootIndex(HeapObject* heap_object) { 1247 Heap* heap = HEAP; 1248 if (heap->InNewSpace(heap_object)) return kInvalidRootIndex; 1249 for (int i = 0; i < root_index_wave_front_; i++) { 1250 Object* root = heap->roots_array_start()[i]; 1251 if (!root->IsSmi() && root == heap_object) return i; 1252 } 1253 return kInvalidRootIndex; 1254} 1255 1256 1257// Encode the location of an already deserialized object in order to write its 1258// location into a later object. We can encode the location as an offset from 1259// the start of the deserialized objects or as an offset backwards from the 1260// current allocation pointer. 1261void Serializer::SerializeReferenceToPreviousObject( 1262 int space, 1263 int address, 1264 HowToCode how_to_code, 1265 WhereToPoint where_to_point) { 1266 int offset = CurrentAllocationAddress(space) - address; 1267 bool from_start = true; 1268 if (SpaceIsPaged(space)) { 1269 // For paged space it is simple to encode back from current allocation if 1270 // the object is on the same page as the current allocation pointer. 1271 if ((CurrentAllocationAddress(space) >> kPageSizeBits) == 1272 (address >> kPageSizeBits)) { 1273 from_start = false; 1274 address = offset; 1275 } 1276 } else if (space == NEW_SPACE) { 1277 // For new space it is always simple to encode back from current allocation. 1278 if (offset < address) { 1279 from_start = false; 1280 address = offset; 1281 } 1282 } 1283 // If we are actually dealing with real offsets (and not a numbering of 1284 // all objects) then we should shift out the bits that are always 0. 1285 if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits; 1286 if (from_start) { 1287 sink_->Put(kFromStart + how_to_code + where_to_point + space, "RefSer"); 1288 sink_->PutInt(address, "address"); 1289 } else { 1290 sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer"); 1291 sink_->PutInt(address, "address"); 1292 } 1293} 1294 1295 1296void StartupSerializer::SerializeObject( 1297 Object* o, 1298 HowToCode how_to_code, 1299 WhereToPoint where_to_point) { 1300 CHECK(o->IsHeapObject()); 1301 HeapObject* heap_object = HeapObject::cast(o); 1302 1303 int root_index; 1304 if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) { 1305 PutRoot(root_index, heap_object, how_to_code, where_to_point); 1306 return; 1307 } 1308 1309 if (address_mapper_.IsMapped(heap_object)) { 1310 int space = SpaceOfAlreadySerializedObject(heap_object); 1311 int address = address_mapper_.MappedTo(heap_object); 1312 SerializeReferenceToPreviousObject(space, 1313 address, 1314 how_to_code, 1315 where_to_point); 1316 } else { 1317 // Object has not yet been serialized. Serialize it here. 1318 ObjectSerializer object_serializer(this, 1319 heap_object, 1320 sink_, 1321 how_to_code, 1322 where_to_point); 1323 object_serializer.Serialize(); 1324 } 1325} 1326 1327 1328void StartupSerializer::SerializeWeakReferences() { 1329 for (int i = Isolate::Current()->serialize_partial_snapshot_cache_length(); 1330 i < Isolate::kPartialSnapshotCacheCapacity; 1331 i++) { 1332 sink_->Put(kRootArray + kPlain + kStartOfObject, "RootSerialization"); 1333 sink_->PutInt(Heap::kUndefinedValueRootIndex, "root_index"); 1334 } 1335 HEAP->IterateWeakRoots(this, VISIT_ALL); 1336} 1337 1338 1339void Serializer::PutRoot(int root_index, 1340 HeapObject* object, 1341 SerializerDeserializer::HowToCode how_to_code, 1342 SerializerDeserializer::WhereToPoint where_to_point) { 1343 if (how_to_code == kPlain && 1344 where_to_point == kStartOfObject && 1345 root_index < kRootArrayNumberOfConstantEncodings && 1346 !HEAP->InNewSpace(object)) { 1347 if (root_index < kRootArrayNumberOfLowConstantEncodings) { 1348 sink_->Put(kRootArrayLowConstants + root_index, "RootLoConstant"); 1349 } else { 1350 sink_->Put(kRootArrayHighConstants + root_index - 1351 kRootArrayNumberOfLowConstantEncodings, 1352 "RootHiConstant"); 1353 } 1354 } else { 1355 sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization"); 1356 sink_->PutInt(root_index, "root_index"); 1357 } 1358} 1359 1360 1361void PartialSerializer::SerializeObject( 1362 Object* o, 1363 HowToCode how_to_code, 1364 WhereToPoint where_to_point) { 1365 CHECK(o->IsHeapObject()); 1366 HeapObject* heap_object = HeapObject::cast(o); 1367 1368 int root_index; 1369 if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) { 1370 PutRoot(root_index, heap_object, how_to_code, where_to_point); 1371 return; 1372 } 1373 1374 if (ShouldBeInThePartialSnapshotCache(heap_object)) { 1375 int cache_index = PartialSnapshotCacheIndex(heap_object); 1376 sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point, 1377 "PartialSnapshotCache"); 1378 sink_->PutInt(cache_index, "partial_snapshot_cache_index"); 1379 return; 1380 } 1381 1382 // Pointers from the partial snapshot to the objects in the startup snapshot 1383 // should go through the root array or through the partial snapshot cache. 1384 // If this is not the case you may have to add something to the root array. 1385 ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object)); 1386 // All the symbols that the partial snapshot needs should be either in the 1387 // root table or in the partial snapshot cache. 1388 ASSERT(!heap_object->IsSymbol()); 1389 1390 if (address_mapper_.IsMapped(heap_object)) { 1391 int space = SpaceOfAlreadySerializedObject(heap_object); 1392 int address = address_mapper_.MappedTo(heap_object); 1393 SerializeReferenceToPreviousObject(space, 1394 address, 1395 how_to_code, 1396 where_to_point); 1397 } else { 1398 // Object has not yet been serialized. Serialize it here. 1399 ObjectSerializer serializer(this, 1400 heap_object, 1401 sink_, 1402 how_to_code, 1403 where_to_point); 1404 serializer.Serialize(); 1405 } 1406} 1407 1408 1409void Serializer::ObjectSerializer::Serialize() { 1410 int space = Serializer::SpaceOfObject(object_); 1411 int size = object_->Size(); 1412 1413 sink_->Put(kNewObject + reference_representation_ + space, 1414 "ObjectSerialization"); 1415 sink_->PutInt(size >> kObjectAlignmentBits, "Size in words"); 1416 1417 LOG(i::Isolate::Current(), 1418 SnapshotPositionEvent(object_->address(), sink_->Position())); 1419 1420 // Mark this object as already serialized. 1421 bool start_new_page; 1422 int offset = serializer_->Allocate(space, size, &start_new_page); 1423 serializer_->address_mapper()->AddMapping(object_, offset); 1424 if (start_new_page) { 1425 sink_->Put(kNewPage, "NewPage"); 1426 sink_->PutSection(space, "NewPageSpace"); 1427 } 1428 1429 // Serialize the map (first word of the object). 1430 serializer_->SerializeObject(object_->map(), kPlain, kStartOfObject); 1431 1432 // Serialize the rest of the object. 1433 CHECK_EQ(0, bytes_processed_so_far_); 1434 bytes_processed_so_far_ = kPointerSize; 1435 object_->IterateBody(object_->map()->instance_type(), size, this); 1436 OutputRawData(object_->address() + size); 1437} 1438 1439 1440void Serializer::ObjectSerializer::VisitPointers(Object** start, 1441 Object** end) { 1442 Object** current = start; 1443 while (current < end) { 1444 while (current < end && (*current)->IsSmi()) current++; 1445 if (current < end) OutputRawData(reinterpret_cast<Address>(current)); 1446 1447 while (current < end && !(*current)->IsSmi()) { 1448 HeapObject* current_contents = HeapObject::cast(*current); 1449 int root_index = serializer_->RootIndex(current_contents); 1450 // Repeats are not subject to the write barrier so there are only some 1451 // objects that can be used in a repeat encoding. These are the early 1452 // ones in the root array that are never in new space. 1453 if (current != start && 1454 root_index != kInvalidRootIndex && 1455 root_index < kRootArrayNumberOfConstantEncodings && 1456 current_contents == current[-1]) { 1457 ASSERT(!HEAP->InNewSpace(current_contents)); 1458 int repeat_count = 1; 1459 while (current < end - 1 && current[repeat_count] == current_contents) { 1460 repeat_count++; 1461 } 1462 current += repeat_count; 1463 bytes_processed_so_far_ += repeat_count * kPointerSize; 1464 if (repeat_count > kMaxRepeats) { 1465 sink_->Put(kRepeat, "SerializeRepeats"); 1466 sink_->PutInt(repeat_count, "SerializeRepeats"); 1467 } else { 1468 sink_->Put(CodeForRepeats(repeat_count), "SerializeRepeats"); 1469 } 1470 } else { 1471 serializer_->SerializeObject(current_contents, kPlain, kStartOfObject); 1472 bytes_processed_so_far_ += kPointerSize; 1473 current++; 1474 } 1475 } 1476 } 1477} 1478 1479 1480void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) { 1481 Object** current = rinfo->target_object_address(); 1482 1483 OutputRawData(rinfo->target_address_address()); 1484 HowToCode representation = rinfo->IsCodedSpecially() ? kFromCode : kPlain; 1485 serializer_->SerializeObject(*current, representation, kStartOfObject); 1486 bytes_processed_so_far_ += rinfo->target_address_size(); 1487} 1488 1489 1490void Serializer::ObjectSerializer::VisitExternalReferences(Address* start, 1491 Address* end) { 1492 Address references_start = reinterpret_cast<Address>(start); 1493 OutputRawData(references_start); 1494 1495 for (Address* current = start; current < end; current++) { 1496 sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef"); 1497 int reference_id = serializer_->EncodeExternalReference(*current); 1498 sink_->PutInt(reference_id, "reference id"); 1499 } 1500 bytes_processed_so_far_ += static_cast<int>((end - start) * kPointerSize); 1501} 1502 1503 1504void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) { 1505 Address references_start = rinfo->target_address_address(); 1506 OutputRawData(references_start); 1507 1508 Address* current = rinfo->target_reference_address(); 1509 int representation = rinfo->IsCodedSpecially() ? 1510 kFromCode + kStartOfObject : kPlain + kStartOfObject; 1511 sink_->Put(kExternalReference + representation, "ExternalRef"); 1512 int reference_id = serializer_->EncodeExternalReference(*current); 1513 sink_->PutInt(reference_id, "reference id"); 1514 bytes_processed_so_far_ += rinfo->target_address_size(); 1515} 1516 1517 1518void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) { 1519 Address target_start = rinfo->target_address_address(); 1520 OutputRawData(target_start); 1521 Address target = rinfo->target_address(); 1522 uint32_t encoding = serializer_->EncodeExternalReference(target); 1523 CHECK(target == NULL ? encoding == 0 : encoding != 0); 1524 int representation; 1525 // Can't use a ternary operator because of gcc. 1526 if (rinfo->IsCodedSpecially()) { 1527 representation = kStartOfObject + kFromCode; 1528 } else { 1529 representation = kStartOfObject + kPlain; 1530 } 1531 sink_->Put(kExternalReference + representation, "ExternalReference"); 1532 sink_->PutInt(encoding, "reference id"); 1533 bytes_processed_so_far_ += rinfo->target_address_size(); 1534} 1535 1536 1537void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) { 1538 CHECK(RelocInfo::IsCodeTarget(rinfo->rmode())); 1539 Address target_start = rinfo->target_address_address(); 1540 OutputRawData(target_start); 1541 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); 1542 serializer_->SerializeObject(target, kFromCode, kFirstInstruction); 1543 bytes_processed_so_far_ += rinfo->target_address_size(); 1544} 1545 1546 1547void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) { 1548 Code* target = Code::cast(Code::GetObjectFromEntryAddress(entry_address)); 1549 OutputRawData(entry_address); 1550 serializer_->SerializeObject(target, kPlain, kFirstInstruction); 1551 bytes_processed_so_far_ += kPointerSize; 1552} 1553 1554 1555void Serializer::ObjectSerializer::VisitGlobalPropertyCell(RelocInfo* rinfo) { 1556 // We shouldn't have any global property cell references in code 1557 // objects in the snapshot. 1558 UNREACHABLE(); 1559} 1560 1561 1562void Serializer::ObjectSerializer::VisitExternalAsciiString( 1563 v8::String::ExternalAsciiStringResource** resource_pointer) { 1564 Address references_start = reinterpret_cast<Address>(resource_pointer); 1565 OutputRawData(references_start); 1566 for (int i = 0; i < Natives::GetBuiltinsCount(); i++) { 1567 Object* source = HEAP->natives_source_cache()->get(i); 1568 if (!source->IsUndefined()) { 1569 ExternalAsciiString* string = ExternalAsciiString::cast(source); 1570 typedef v8::String::ExternalAsciiStringResource Resource; 1571 const Resource* resource = string->resource(); 1572 if (resource == *resource_pointer) { 1573 sink_->Put(kNativesStringResource, "NativesStringResource"); 1574 sink_->PutSection(i, "NativesStringResourceEnd"); 1575 bytes_processed_so_far_ += sizeof(resource); 1576 return; 1577 } 1578 } 1579 } 1580 // One of the strings in the natives cache should match the resource. We 1581 // can't serialize any other kinds of external strings. 1582 UNREACHABLE(); 1583} 1584 1585 1586void Serializer::ObjectSerializer::OutputRawData(Address up_to) { 1587 Address object_start = object_->address(); 1588 int up_to_offset = static_cast<int>(up_to - object_start); 1589 int skipped = up_to_offset - bytes_processed_so_far_; 1590 // This assert will fail if the reloc info gives us the target_address_address 1591 // locations in a non-ascending order. Luckily that doesn't happen. 1592 ASSERT(skipped >= 0); 1593 if (skipped != 0) { 1594 Address base = object_start + bytes_processed_so_far_; 1595#define RAW_CASE(index, length) \ 1596 if (skipped == length) { \ 1597 sink_->PutSection(kRawData + index, "RawDataFixed"); \ 1598 } else /* NOLINT */ 1599 COMMON_RAW_LENGTHS(RAW_CASE) 1600#undef RAW_CASE 1601 { /* NOLINT */ 1602 sink_->Put(kRawData, "RawData"); 1603 sink_->PutInt(skipped, "length"); 1604 } 1605 for (int i = 0; i < skipped; i++) { 1606 unsigned int data = base[i]; 1607 sink_->PutSection(data, "Byte"); 1608 } 1609 bytes_processed_so_far_ += skipped; 1610 } 1611} 1612 1613 1614int Serializer::SpaceOfObject(HeapObject* object) { 1615 for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { 1616 AllocationSpace s = static_cast<AllocationSpace>(i); 1617 if (HEAP->InSpace(object, s)) { 1618 if (i == LO_SPACE) { 1619 if (object->IsCode()) { 1620 return kLargeCode; 1621 } else if (object->IsFixedArray()) { 1622 return kLargeFixedArray; 1623 } else { 1624 return kLargeData; 1625 } 1626 } 1627 return i; 1628 } 1629 } 1630 UNREACHABLE(); 1631 return 0; 1632} 1633 1634 1635int Serializer::SpaceOfAlreadySerializedObject(HeapObject* object) { 1636 for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { 1637 AllocationSpace s = static_cast<AllocationSpace>(i); 1638 if (HEAP->InSpace(object, s)) { 1639 return i; 1640 } 1641 } 1642 UNREACHABLE(); 1643 return 0; 1644} 1645 1646 1647int Serializer::Allocate(int space, int size, bool* new_page) { 1648 CHECK(space >= 0 && space < kNumberOfSpaces); 1649 if (SpaceIsLarge(space)) { 1650 // In large object space we merely number the objects instead of trying to 1651 // determine some sort of address. 1652 *new_page = true; 1653 large_object_total_ += size; 1654 return fullness_[LO_SPACE]++; 1655 } 1656 *new_page = false; 1657 if (fullness_[space] == 0) { 1658 *new_page = true; 1659 } 1660 if (SpaceIsPaged(space)) { 1661 // Paged spaces are a little special. We encode their addresses as if the 1662 // pages were all contiguous and each page were filled up in the range 1663 // 0 - Page::kObjectAreaSize. In practice the pages may not be contiguous 1664 // and allocation does not start at offset 0 in the page, but this scheme 1665 // means the deserializer can get the page number quickly by shifting the 1666 // serialized address. 1667 CHECK(IsPowerOf2(Page::kPageSize)); 1668 int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1)); 1669 CHECK(size <= SpaceAreaSize(space)); 1670 if (used_in_this_page + size > SpaceAreaSize(space)) { 1671 *new_page = true; 1672 fullness_[space] = RoundUp(fullness_[space], Page::kPageSize); 1673 } 1674 } 1675 int allocation_address = fullness_[space]; 1676 fullness_[space] = allocation_address + size; 1677 return allocation_address; 1678} 1679 1680 1681int Serializer::SpaceAreaSize(int space) { 1682 if (space == CODE_SPACE) { 1683 return isolate_->memory_allocator()->CodePageAreaSize(); 1684 } else { 1685 return Page::kPageSize - Page::kObjectStartOffset; 1686 } 1687} 1688 1689 1690} } // namespace v8::internal 1691