serialize.cc revision 5913587db4c6bab03d97bfe44b06289fd6d7270d
12a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// Copyright 2006-2008 the V8 project authors. All rights reserved. 22a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// Redistribution and use in source and binary forms, with or without 32a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// modification, are permitted provided that the following conditions are 42a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// met: 52a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// 62a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// * Redistributions of source code must retain the above copyright 72a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// notice, this list of conditions and the following disclaimer. 82a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// * Redistributions in binary form must reproduce the above 92a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// copyright notice, this list of conditions and the following 102a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// disclaimer in the documentation and/or other materials provided 112a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// with the distribution. 122a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// * Neither the name of Google Inc. nor the names of its 132a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// contributors may be used to endorse or promote products derived 142a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// from this software without specific prior written permission. 152a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// 162a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 172a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 182a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 192a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 202a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 212a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 222a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 232a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 242a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 252a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 262a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 272a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) 282a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)#include "v8.h" 292a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) 302a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)#include "accessors.h" 312a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)#include "api.h" 322a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)#include "execution.h" 332a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)#include "global-handles.h" 342a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)#include "ic-inl.h" 352a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)#include "natives.h" 362a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)#include "platform.h" 372a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)#include "runtime.h" 382a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)#include "serialize.h" 392a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)#include "stub-cache.h" 402a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)#include "v8threads.h" 412a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)#include "top.h" 422a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)#include "bootstrapper.h" 432a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) 442a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)namespace v8 { 452a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)namespace internal { 462a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) 472a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) 482a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// ----------------------------------------------------------------------------- 492a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// Coding of external references. 502a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) 512a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// The encoding of an external reference. The type is in the high word. 522a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// The id is in the low word. 532a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)static uint32_t EncodeExternal(TypeCode type, uint16_t id) { 542a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) return static_cast<uint32_t>(type) << 16 | id; 552a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)} 562a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) 572a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) 582a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)static int* GetInternalPointer(StatsCounter* counter) { 592a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) // All counters refer to dummy_counter, if deserializing happens without 602a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) // setting up counters. 612a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) static int dummy_counter = 0; 622a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) return counter->Enabled() ? counter->GetInternalPointer() : &dummy_counter; 632a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)} 642a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) 652a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) 662a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// ExternalReferenceTable is a helper class that defines the relationship 672a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// between external references and their encodings. It is used to build 682a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder. 692a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)class ExternalReferenceTable { 702a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) public: 712a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) static ExternalReferenceTable* instance() { 722a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) if (!instance_) instance_ = new ExternalReferenceTable(); 732a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) return instance_; 742a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) } 752a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) 76 int size() const { return refs_.length(); } 77 78 Address address(int i) { return refs_[i].address; } 79 80 uint32_t code(int i) { return refs_[i].code; } 81 82 const char* name(int i) { return refs_[i].name; } 83 84 int max_id(int code) { return max_id_[code]; } 85 86 private: 87 static ExternalReferenceTable* instance_; 88 89 ExternalReferenceTable() : refs_(64) { PopulateTable(); } 90 ~ExternalReferenceTable() { } 91 92 struct ExternalReferenceEntry { 93 Address address; 94 uint32_t code; 95 const char* name; 96 }; 97 98 void PopulateTable(); 99 100 // For a few types of references, we can get their address from their id. 101 void AddFromId(TypeCode type, uint16_t id, const char* name); 102 103 // For other types of references, the caller will figure out the address. 104 void Add(Address address, TypeCode type, uint16_t id, const char* name); 105 106 List<ExternalReferenceEntry> refs_; 107 int max_id_[kTypeCodeCount]; 108}; 109 110 111ExternalReferenceTable* ExternalReferenceTable::instance_ = NULL; 112 113 114void ExternalReferenceTable::AddFromId(TypeCode type, 115 uint16_t id, 116 const char* name) { 117 Address address; 118 switch (type) { 119 case C_BUILTIN: { 120 ExternalReference ref(static_cast<Builtins::CFunctionId>(id)); 121 address = ref.address(); 122 break; 123 } 124 case BUILTIN: { 125 ExternalReference ref(static_cast<Builtins::Name>(id)); 126 address = ref.address(); 127 break; 128 } 129 case RUNTIME_FUNCTION: { 130 ExternalReference ref(static_cast<Runtime::FunctionId>(id)); 131 address = ref.address(); 132 break; 133 } 134 case IC_UTILITY: { 135 ExternalReference ref(IC_Utility(static_cast<IC::UtilityId>(id))); 136 address = ref.address(); 137 break; 138 } 139 default: 140 UNREACHABLE(); 141 return; 142 } 143 Add(address, type, id, name); 144} 145 146 147void ExternalReferenceTable::Add(Address address, 148 TypeCode type, 149 uint16_t id, 150 const char* name) { 151 ASSERT_NE(NULL, address); 152 ExternalReferenceEntry entry; 153 entry.address = address; 154 entry.code = EncodeExternal(type, id); 155 entry.name = name; 156 ASSERT_NE(0, entry.code); 157 refs_.Add(entry); 158 if (id > max_id_[type]) max_id_[type] = id; 159} 160 161 162void ExternalReferenceTable::PopulateTable() { 163 for (int type_code = 0; type_code < kTypeCodeCount; type_code++) { 164 max_id_[type_code] = 0; 165 } 166 167 // The following populates all of the different type of external references 168 // into the ExternalReferenceTable. 169 // 170 // NOTE: This function was originally 100k of code. It has since been 171 // rewritten to be mostly table driven, as the callback macro style tends to 172 // very easily cause code bloat. Please be careful in the future when adding 173 // new references. 174 175 struct RefTableEntry { 176 TypeCode type; 177 uint16_t id; 178 const char* name; 179 }; 180 181 static const RefTableEntry ref_table[] = { 182 // Builtins 183#define DEF_ENTRY_C(name, ignored) \ 184 { C_BUILTIN, \ 185 Builtins::c_##name, \ 186 "Builtins::" #name }, 187 188 BUILTIN_LIST_C(DEF_ENTRY_C) 189#undef DEF_ENTRY_C 190 191#define DEF_ENTRY_C(name, ignored) \ 192 { BUILTIN, \ 193 Builtins::name, \ 194 "Builtins::" #name }, 195#define DEF_ENTRY_A(name, kind, state) DEF_ENTRY_C(name, ignored) 196 197 BUILTIN_LIST_C(DEF_ENTRY_C) 198 BUILTIN_LIST_A(DEF_ENTRY_A) 199 BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A) 200#undef DEF_ENTRY_C 201#undef DEF_ENTRY_A 202 203 // Runtime functions 204#define RUNTIME_ENTRY(name, nargs, ressize) \ 205 { RUNTIME_FUNCTION, \ 206 Runtime::k##name, \ 207 "Runtime::" #name }, 208 209 RUNTIME_FUNCTION_LIST(RUNTIME_ENTRY) 210#undef RUNTIME_ENTRY 211 212 // IC utilities 213#define IC_ENTRY(name) \ 214 { IC_UTILITY, \ 215 IC::k##name, \ 216 "IC::" #name }, 217 218 IC_UTIL_LIST(IC_ENTRY) 219#undef IC_ENTRY 220 }; // end of ref_table[]. 221 222 for (size_t i = 0; i < ARRAY_SIZE(ref_table); ++i) { 223 AddFromId(ref_table[i].type, ref_table[i].id, ref_table[i].name); 224 } 225 226#ifdef ENABLE_DEBUGGER_SUPPORT 227 // Debug addresses 228 Add(Debug_Address(Debug::k_after_break_target_address).address(), 229 DEBUG_ADDRESS, 230 Debug::k_after_break_target_address << kDebugIdShift, 231 "Debug::after_break_target_address()"); 232 Add(Debug_Address(Debug::k_debug_break_slot_address).address(), 233 DEBUG_ADDRESS, 234 Debug::k_debug_break_slot_address << kDebugIdShift, 235 "Debug::debug_break_slot_address()"); 236 Add(Debug_Address(Debug::k_debug_break_return_address).address(), 237 DEBUG_ADDRESS, 238 Debug::k_debug_break_return_address << kDebugIdShift, 239 "Debug::debug_break_return_address()"); 240 Add(Debug_Address(Debug::k_restarter_frame_function_pointer).address(), 241 DEBUG_ADDRESS, 242 Debug::k_restarter_frame_function_pointer << kDebugIdShift, 243 "Debug::restarter_frame_function_pointer_address()"); 244#endif 245 246 // Stat counters 247 struct StatsRefTableEntry { 248 StatsCounter* counter; 249 uint16_t id; 250 const char* name; 251 }; 252 253 static const StatsRefTableEntry stats_ref_table[] = { 254#define COUNTER_ENTRY(name, caption) \ 255 { &Counters::name, \ 256 Counters::k_##name, \ 257 "Counters::" #name }, 258 259 STATS_COUNTER_LIST_1(COUNTER_ENTRY) 260 STATS_COUNTER_LIST_2(COUNTER_ENTRY) 261#undef COUNTER_ENTRY 262 }; // end of stats_ref_table[]. 263 264 for (size_t i = 0; i < ARRAY_SIZE(stats_ref_table); ++i) { 265 Add(reinterpret_cast<Address>( 266 GetInternalPointer(stats_ref_table[i].counter)), 267 STATS_COUNTER, 268 stats_ref_table[i].id, 269 stats_ref_table[i].name); 270 } 271 272 // Top addresses 273 const char* top_address_format = "Top::%s"; 274 275 const char* AddressNames[] = { 276#define C(name) #name, 277 TOP_ADDRESS_LIST(C) 278 TOP_ADDRESS_LIST_PROF(C) 279 NULL 280#undef C 281 }; 282 283 int top_format_length = StrLength(top_address_format) - 2; 284 for (uint16_t i = 0; i < Top::k_top_address_count; ++i) { 285 const char* address_name = AddressNames[i]; 286 Vector<char> name = 287 Vector<char>::New(top_format_length + StrLength(address_name) + 1); 288 const char* chars = name.start(); 289 OS::SNPrintF(name, top_address_format, address_name); 290 Add(Top::get_address_from_id((Top::AddressId)i), TOP_ADDRESS, i, chars); 291 } 292 293 // Extensions 294 Add(FUNCTION_ADDR(GCExtension::GC), EXTENSION, 1, 295 "GCExtension::GC"); 296 297 // Accessors 298#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \ 299 Add((Address)&Accessors::name, \ 300 ACCESSOR, \ 301 Accessors::k##name, \ 302 "Accessors::" #name); 303 304 ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION) 305#undef ACCESSOR_DESCRIPTOR_DECLARATION 306 307 // Stub cache tables 308 Add(SCTableReference::keyReference(StubCache::kPrimary).address(), 309 STUB_CACHE_TABLE, 310 1, 311 "StubCache::primary_->key"); 312 Add(SCTableReference::valueReference(StubCache::kPrimary).address(), 313 STUB_CACHE_TABLE, 314 2, 315 "StubCache::primary_->value"); 316 Add(SCTableReference::keyReference(StubCache::kSecondary).address(), 317 STUB_CACHE_TABLE, 318 3, 319 "StubCache::secondary_->key"); 320 Add(SCTableReference::valueReference(StubCache::kSecondary).address(), 321 STUB_CACHE_TABLE, 322 4, 323 "StubCache::secondary_->value"); 324 325 // Runtime entries 326 Add(ExternalReference::perform_gc_function().address(), 327 RUNTIME_ENTRY, 328 1, 329 "Runtime::PerformGC"); 330 Add(ExternalReference::fill_heap_number_with_random_function().address(), 331 RUNTIME_ENTRY, 332 2, 333 "V8::FillHeapNumberWithRandom"); 334 335 Add(ExternalReference::random_uint32_function().address(), 336 RUNTIME_ENTRY, 337 3, 338 "V8::Random"); 339 340 Add(ExternalReference::delete_handle_scope_extensions().address(), 341 RUNTIME_ENTRY, 342 3, 343 "HandleScope::DeleteExtensions"); 344 345 // Miscellaneous 346 Add(ExternalReference::the_hole_value_location().address(), 347 UNCLASSIFIED, 348 2, 349 "Factory::the_hole_value().location()"); 350 Add(ExternalReference::roots_address().address(), 351 UNCLASSIFIED, 352 3, 353 "Heap::roots_address()"); 354 Add(ExternalReference::address_of_stack_limit().address(), 355 UNCLASSIFIED, 356 4, 357 "StackGuard::address_of_jslimit()"); 358 Add(ExternalReference::address_of_real_stack_limit().address(), 359 UNCLASSIFIED, 360 5, 361 "StackGuard::address_of_real_jslimit()"); 362#ifndef V8_INTERPRETED_REGEXP 363 Add(ExternalReference::address_of_regexp_stack_limit().address(), 364 UNCLASSIFIED, 365 6, 366 "RegExpStack::limit_address()"); 367 Add(ExternalReference::address_of_regexp_stack_memory_address().address(), 368 UNCLASSIFIED, 369 7, 370 "RegExpStack::memory_address()"); 371 Add(ExternalReference::address_of_regexp_stack_memory_size().address(), 372 UNCLASSIFIED, 373 8, 374 "RegExpStack::memory_size()"); 375 Add(ExternalReference::address_of_static_offsets_vector().address(), 376 UNCLASSIFIED, 377 9, 378 "OffsetsVector::static_offsets_vector"); 379#endif // V8_INTERPRETED_REGEXP 380 Add(ExternalReference::new_space_start().address(), 381 UNCLASSIFIED, 382 10, 383 "Heap::NewSpaceStart()"); 384 Add(ExternalReference::new_space_mask().address(), 385 UNCLASSIFIED, 386 11, 387 "Heap::NewSpaceMask()"); 388 Add(ExternalReference::heap_always_allocate_scope_depth().address(), 389 UNCLASSIFIED, 390 12, 391 "Heap::always_allocate_scope_depth()"); 392 Add(ExternalReference::new_space_allocation_limit_address().address(), 393 UNCLASSIFIED, 394 13, 395 "Heap::NewSpaceAllocationLimitAddress()"); 396 Add(ExternalReference::new_space_allocation_top_address().address(), 397 UNCLASSIFIED, 398 14, 399 "Heap::NewSpaceAllocationTopAddress()"); 400#ifdef ENABLE_DEBUGGER_SUPPORT 401 Add(ExternalReference::debug_break().address(), 402 UNCLASSIFIED, 403 15, 404 "Debug::Break()"); 405 Add(ExternalReference::debug_step_in_fp_address().address(), 406 UNCLASSIFIED, 407 16, 408 "Debug::step_in_fp_addr()"); 409#endif 410 Add(ExternalReference::double_fp_operation(Token::ADD).address(), 411 UNCLASSIFIED, 412 17, 413 "add_two_doubles"); 414 Add(ExternalReference::double_fp_operation(Token::SUB).address(), 415 UNCLASSIFIED, 416 18, 417 "sub_two_doubles"); 418 Add(ExternalReference::double_fp_operation(Token::MUL).address(), 419 UNCLASSIFIED, 420 19, 421 "mul_two_doubles"); 422 Add(ExternalReference::double_fp_operation(Token::DIV).address(), 423 UNCLASSIFIED, 424 20, 425 "div_two_doubles"); 426 Add(ExternalReference::double_fp_operation(Token::MOD).address(), 427 UNCLASSIFIED, 428 21, 429 "mod_two_doubles"); 430 Add(ExternalReference::compare_doubles().address(), 431 UNCLASSIFIED, 432 22, 433 "compare_doubles"); 434#ifndef V8_INTERPRETED_REGEXP 435 Add(ExternalReference::re_case_insensitive_compare_uc16().address(), 436 UNCLASSIFIED, 437 23, 438 "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()"); 439 Add(ExternalReference::re_check_stack_guard_state().address(), 440 UNCLASSIFIED, 441 24, 442 "RegExpMacroAssembler*::CheckStackGuardState()"); 443 Add(ExternalReference::re_grow_stack().address(), 444 UNCLASSIFIED, 445 25, 446 "NativeRegExpMacroAssembler::GrowStack()"); 447 Add(ExternalReference::re_word_character_map().address(), 448 UNCLASSIFIED, 449 26, 450 "NativeRegExpMacroAssembler::word_character_map"); 451#endif // V8_INTERPRETED_REGEXP 452 // Keyed lookup cache. 453 Add(ExternalReference::keyed_lookup_cache_keys().address(), 454 UNCLASSIFIED, 455 27, 456 "KeyedLookupCache::keys()"); 457 Add(ExternalReference::keyed_lookup_cache_field_offsets().address(), 458 UNCLASSIFIED, 459 28, 460 "KeyedLookupCache::field_offsets()"); 461 Add(ExternalReference::transcendental_cache_array_address().address(), 462 UNCLASSIFIED, 463 29, 464 "TranscendentalCache::caches()"); 465 Add(ExternalReference::handle_scope_next_address().address(), 466 UNCLASSIFIED, 467 30, 468 "HandleScope::next"); 469 Add(ExternalReference::handle_scope_limit_address().address(), 470 UNCLASSIFIED, 471 31, 472 "HandleScope::limit"); 473 Add(ExternalReference::handle_scope_level_address().address(), 474 UNCLASSIFIED, 475 32, 476 "HandleScope::level"); 477} 478 479 480ExternalReferenceEncoder::ExternalReferenceEncoder() 481 : encodings_(Match) { 482 ExternalReferenceTable* external_references = 483 ExternalReferenceTable::instance(); 484 for (int i = 0; i < external_references->size(); ++i) { 485 Put(external_references->address(i), i); 486 } 487} 488 489 490uint32_t ExternalReferenceEncoder::Encode(Address key) const { 491 int index = IndexOf(key); 492 ASSERT(key == NULL || index >= 0); 493 return index >=0 ? ExternalReferenceTable::instance()->code(index) : 0; 494} 495 496 497const char* ExternalReferenceEncoder::NameOfAddress(Address key) const { 498 int index = IndexOf(key); 499 return index >=0 ? ExternalReferenceTable::instance()->name(index) : NULL; 500} 501 502 503int ExternalReferenceEncoder::IndexOf(Address key) const { 504 if (key == NULL) return -1; 505 HashMap::Entry* entry = 506 const_cast<HashMap &>(encodings_).Lookup(key, Hash(key), false); 507 return entry == NULL 508 ? -1 509 : static_cast<int>(reinterpret_cast<intptr_t>(entry->value)); 510} 511 512 513void ExternalReferenceEncoder::Put(Address key, int index) { 514 HashMap::Entry* entry = encodings_.Lookup(key, Hash(key), true); 515 entry->value = reinterpret_cast<void*>(index); 516} 517 518 519ExternalReferenceDecoder::ExternalReferenceDecoder() 520 : encodings_(NewArray<Address*>(kTypeCodeCount)) { 521 ExternalReferenceTable* external_references = 522 ExternalReferenceTable::instance(); 523 for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) { 524 int max = external_references->max_id(type) + 1; 525 encodings_[type] = NewArray<Address>(max + 1); 526 } 527 for (int i = 0; i < external_references->size(); ++i) { 528 Put(external_references->code(i), external_references->address(i)); 529 } 530} 531 532 533ExternalReferenceDecoder::~ExternalReferenceDecoder() { 534 for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) { 535 DeleteArray(encodings_[type]); 536 } 537 DeleteArray(encodings_); 538} 539 540 541bool Serializer::serialization_enabled_ = false; 542bool Serializer::too_late_to_enable_now_ = false; 543ExternalReferenceDecoder* Deserializer::external_reference_decoder_ = NULL; 544 545 546Deserializer::Deserializer(SnapshotByteSource* source) : source_(source) { 547} 548 549 550// This routine both allocates a new object, and also keeps 551// track of where objects have been allocated so that we can 552// fix back references when deserializing. 553Address Deserializer::Allocate(int space_index, Space* space, int size) { 554 Address address; 555 if (!SpaceIsLarge(space_index)) { 556 ASSERT(!SpaceIsPaged(space_index) || 557 size <= Page::kPageSize - Page::kObjectStartOffset); 558 MaybeObject* maybe_new_allocation; 559 if (space_index == NEW_SPACE) { 560 maybe_new_allocation = 561 reinterpret_cast<NewSpace*>(space)->AllocateRaw(size); 562 } else { 563 maybe_new_allocation = 564 reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size); 565 } 566 Object* new_allocation = maybe_new_allocation->ToObjectUnchecked(); 567 HeapObject* new_object = HeapObject::cast(new_allocation); 568 address = new_object->address(); 569 high_water_[space_index] = address + size; 570 } else { 571 ASSERT(SpaceIsLarge(space_index)); 572 ASSERT(size > Page::kPageSize - Page::kObjectStartOffset); 573 LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space); 574 Object* new_allocation; 575 if (space_index == kLargeData) { 576 new_allocation = lo_space->AllocateRaw(size)->ToObjectUnchecked(); 577 } else if (space_index == kLargeFixedArray) { 578 new_allocation = 579 lo_space->AllocateRawFixedArray(size)->ToObjectUnchecked(); 580 } else { 581 ASSERT_EQ(kLargeCode, space_index); 582 new_allocation = lo_space->AllocateRawCode(size)->ToObjectUnchecked(); 583 } 584 HeapObject* new_object = HeapObject::cast(new_allocation); 585 // Record all large objects in the same space. 586 address = new_object->address(); 587 pages_[LO_SPACE].Add(address); 588 } 589 last_object_address_ = address; 590 return address; 591} 592 593 594// This returns the address of an object that has been described in the 595// snapshot as being offset bytes back in a particular space. 596HeapObject* Deserializer::GetAddressFromEnd(int space) { 597 int offset = source_->GetInt(); 598 ASSERT(!SpaceIsLarge(space)); 599 offset <<= kObjectAlignmentBits; 600 return HeapObject::FromAddress(high_water_[space] - offset); 601} 602 603 604// This returns the address of an object that has been described in the 605// snapshot as being offset bytes into a particular space. 606HeapObject* Deserializer::GetAddressFromStart(int space) { 607 int offset = source_->GetInt(); 608 if (SpaceIsLarge(space)) { 609 // Large spaces have one object per 'page'. 610 return HeapObject::FromAddress(pages_[LO_SPACE][offset]); 611 } 612 offset <<= kObjectAlignmentBits; 613 if (space == NEW_SPACE) { 614 // New space has only one space - numbered 0. 615 return HeapObject::FromAddress(pages_[space][0] + offset); 616 } 617 ASSERT(SpaceIsPaged(space)); 618 int page_of_pointee = offset >> kPageSizeBits; 619 Address object_address = pages_[space][page_of_pointee] + 620 (offset & Page::kPageAlignmentMask); 621 return HeapObject::FromAddress(object_address); 622} 623 624 625void Deserializer::Deserialize() { 626 // Don't GC while deserializing - just expand the heap. 627 AlwaysAllocateScope always_allocate; 628 // Don't use the free lists while deserializing. 629 LinearAllocationScope allocate_linearly; 630 // No active threads. 631 ASSERT_EQ(NULL, ThreadState::FirstInUse()); 632 // No active handles. 633 ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty()); 634 // Make sure the entire partial snapshot cache is traversed, filling it with 635 // valid object pointers. 636 partial_snapshot_cache_length_ = kPartialSnapshotCacheCapacity; 637 ASSERT_EQ(NULL, external_reference_decoder_); 638 external_reference_decoder_ = new ExternalReferenceDecoder(); 639 Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG); 640 Heap::IterateWeakRoots(this, VISIT_ALL); 641 642 Heap::set_global_contexts_list(Heap::undefined_value()); 643} 644 645 646void Deserializer::DeserializePartial(Object** root) { 647 // Don't GC while deserializing - just expand the heap. 648 AlwaysAllocateScope always_allocate; 649 // Don't use the free lists while deserializing. 650 LinearAllocationScope allocate_linearly; 651 if (external_reference_decoder_ == NULL) { 652 external_reference_decoder_ = new ExternalReferenceDecoder(); 653 } 654 VisitPointer(root); 655} 656 657 658Deserializer::~Deserializer() { 659 ASSERT(source_->AtEOF()); 660 if (external_reference_decoder_ != NULL) { 661 delete external_reference_decoder_; 662 external_reference_decoder_ = NULL; 663 } 664} 665 666 667// This is called on the roots. It is the driver of the deserialization 668// process. It is also called on the body of each function. 669void Deserializer::VisitPointers(Object** start, Object** end) { 670 // The space must be new space. Any other space would cause ReadChunk to try 671 // to update the remembered using NULL as the address. 672 ReadChunk(start, end, NEW_SPACE, NULL); 673} 674 675 676// This routine writes the new object into the pointer provided and then 677// returns true if the new object was in young space and false otherwise. 678// The reason for this strange interface is that otherwise the object is 679// written very late, which means the ByteArray map is not set up by the 680// time we need to use it to mark the space at the end of a page free (by 681// making it into a byte array). 682void Deserializer::ReadObject(int space_number, 683 Space* space, 684 Object** write_back) { 685 int size = source_->GetInt() << kObjectAlignmentBits; 686 Address address = Allocate(space_number, space, size); 687 *write_back = HeapObject::FromAddress(address); 688 Object** current = reinterpret_cast<Object**>(address); 689 Object** limit = current + (size >> kPointerSizeLog2); 690 if (FLAG_log_snapshot_positions) { 691 LOG(SnapshotPositionEvent(address, source_->position())); 692 } 693 ReadChunk(current, limit, space_number, address); 694} 695 696 697// This macro is always used with a constant argument so it should all fold 698// away to almost nothing in the generated code. It might be nicer to do this 699// with the ternary operator but there are type issues with that. 700#define ASSIGN_DEST_SPACE(space_number) \ 701 Space* dest_space; \ 702 if (space_number == NEW_SPACE) { \ 703 dest_space = Heap::new_space(); \ 704 } else if (space_number == OLD_POINTER_SPACE) { \ 705 dest_space = Heap::old_pointer_space(); \ 706 } else if (space_number == OLD_DATA_SPACE) { \ 707 dest_space = Heap::old_data_space(); \ 708 } else if (space_number == CODE_SPACE) { \ 709 dest_space = Heap::code_space(); \ 710 } else if (space_number == MAP_SPACE) { \ 711 dest_space = Heap::map_space(); \ 712 } else if (space_number == CELL_SPACE) { \ 713 dest_space = Heap::cell_space(); \ 714 } else { \ 715 ASSERT(space_number >= LO_SPACE); \ 716 dest_space = Heap::lo_space(); \ 717 } 718 719 720static const int kUnknownOffsetFromStart = -1; 721 722 723void Deserializer::ReadChunk(Object** current, 724 Object** limit, 725 int source_space, 726 Address address) { 727 while (current < limit) { 728 int data = source_->Get(); 729 switch (data) { 730#define CASE_STATEMENT(where, how, within, space_number) \ 731 case where + how + within + space_number: \ 732 ASSERT((where & ~kPointedToMask) == 0); \ 733 ASSERT((how & ~kHowToCodeMask) == 0); \ 734 ASSERT((within & ~kWhereToPointMask) == 0); \ 735 ASSERT((space_number & ~kSpaceMask) == 0); 736 737#define CASE_BODY(where, how, within, space_number_if_any, offset_from_start) \ 738 { \ 739 bool emit_write_barrier = false; \ 740 bool current_was_incremented = false; \ 741 int space_number = space_number_if_any == kAnyOldSpace ? \ 742 (data & kSpaceMask) : space_number_if_any; \ 743 if (where == kNewObject && how == kPlain && within == kStartOfObject) {\ 744 ASSIGN_DEST_SPACE(space_number) \ 745 ReadObject(space_number, dest_space, current); \ 746 emit_write_barrier = \ 747 (space_number == NEW_SPACE && source_space != NEW_SPACE); \ 748 } else { \ 749 Object* new_object = NULL; /* May not be a real Object pointer. */ \ 750 if (where == kNewObject) { \ 751 ASSIGN_DEST_SPACE(space_number) \ 752 ReadObject(space_number, dest_space, &new_object); \ 753 } else if (where == kRootArray) { \ 754 int root_id = source_->GetInt(); \ 755 new_object = Heap::roots_address()[root_id]; \ 756 } else if (where == kPartialSnapshotCache) { \ 757 int cache_index = source_->GetInt(); \ 758 new_object = partial_snapshot_cache_[cache_index]; \ 759 } else if (where == kExternalReference) { \ 760 int reference_id = source_->GetInt(); \ 761 Address address = \ 762 external_reference_decoder_->Decode(reference_id); \ 763 new_object = reinterpret_cast<Object*>(address); \ 764 } else if (where == kBackref) { \ 765 emit_write_barrier = \ 766 (space_number == NEW_SPACE && source_space != NEW_SPACE); \ 767 new_object = GetAddressFromEnd(data & kSpaceMask); \ 768 } else { \ 769 ASSERT(where == kFromStart); \ 770 if (offset_from_start == kUnknownOffsetFromStart) { \ 771 emit_write_barrier = \ 772 (space_number == NEW_SPACE && source_space != NEW_SPACE); \ 773 new_object = GetAddressFromStart(data & kSpaceMask); \ 774 } else { \ 775 Address object_address = pages_[space_number][0] + \ 776 (offset_from_start << kObjectAlignmentBits); \ 777 new_object = HeapObject::FromAddress(object_address); \ 778 } \ 779 } \ 780 if (within == kFirstInstruction) { \ 781 Code* new_code_object = reinterpret_cast<Code*>(new_object); \ 782 new_object = reinterpret_cast<Object*>( \ 783 new_code_object->instruction_start()); \ 784 } \ 785 if (how == kFromCode) { \ 786 Address location_of_branch_data = \ 787 reinterpret_cast<Address>(current); \ 788 Assembler::set_target_at(location_of_branch_data, \ 789 reinterpret_cast<Address>(new_object)); \ 790 if (within == kFirstInstruction) { \ 791 location_of_branch_data += Assembler::kCallTargetSize; \ 792 current = reinterpret_cast<Object**>(location_of_branch_data); \ 793 current_was_incremented = true; \ 794 } \ 795 } else { \ 796 *current = new_object; \ 797 } \ 798 } \ 799 if (emit_write_barrier) { \ 800 Heap::RecordWrite(address, static_cast<int>( \ 801 reinterpret_cast<Address>(current) - address)); \ 802 } \ 803 if (!current_was_incremented) { \ 804 current++; /* Increment current if it wasn't done above. */ \ 805 } \ 806 break; \ 807 } \ 808 809// This generates a case and a body for each space. The large object spaces are 810// very rare in snapshots so they are grouped in one body. 811#define ONE_PER_SPACE(where, how, within) \ 812 CASE_STATEMENT(where, how, within, NEW_SPACE) \ 813 CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \ 814 CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \ 815 CASE_BODY(where, how, within, OLD_DATA_SPACE, kUnknownOffsetFromStart) \ 816 CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \ 817 CASE_BODY(where, how, within, OLD_POINTER_SPACE, kUnknownOffsetFromStart) \ 818 CASE_STATEMENT(where, how, within, CODE_SPACE) \ 819 CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \ 820 CASE_STATEMENT(where, how, within, CELL_SPACE) \ 821 CASE_BODY(where, how, within, CELL_SPACE, kUnknownOffsetFromStart) \ 822 CASE_STATEMENT(where, how, within, MAP_SPACE) \ 823 CASE_BODY(where, how, within, MAP_SPACE, kUnknownOffsetFromStart) \ 824 CASE_STATEMENT(where, how, within, kLargeData) \ 825 CASE_STATEMENT(where, how, within, kLargeCode) \ 826 CASE_STATEMENT(where, how, within, kLargeFixedArray) \ 827 CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart) 828 829// This generates a case and a body for the new space (which has to do extra 830// write barrier handling) and handles the other spaces with 8 fall-through 831// cases and one body. 832#define ALL_SPACES(where, how, within) \ 833 CASE_STATEMENT(where, how, within, NEW_SPACE) \ 834 CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \ 835 CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \ 836 CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \ 837 CASE_STATEMENT(where, how, within, CODE_SPACE) \ 838 CASE_STATEMENT(where, how, within, CELL_SPACE) \ 839 CASE_STATEMENT(where, how, within, MAP_SPACE) \ 840 CASE_STATEMENT(where, how, within, kLargeData) \ 841 CASE_STATEMENT(where, how, within, kLargeCode) \ 842 CASE_STATEMENT(where, how, within, kLargeFixedArray) \ 843 CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart) 844 845#define ONE_PER_CODE_SPACE(where, how, within) \ 846 CASE_STATEMENT(where, how, within, CODE_SPACE) \ 847 CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \ 848 CASE_STATEMENT(where, how, within, kLargeCode) \ 849 CASE_BODY(where, how, within, LO_SPACE, kUnknownOffsetFromStart) 850 851#define EMIT_COMMON_REFERENCE_PATTERNS(pseudo_space_number, \ 852 space_number, \ 853 offset_from_start) \ 854 CASE_STATEMENT(kFromStart, kPlain, kStartOfObject, pseudo_space_number) \ 855 CASE_BODY(kFromStart, kPlain, kStartOfObject, space_number, offset_from_start) 856 857 // We generate 15 cases and bodies that process special tags that combine 858 // the raw data tag and the length into one byte. 859#define RAW_CASE(index, size) \ 860 case kRawData + index: { \ 861 byte* raw_data_out = reinterpret_cast<byte*>(current); \ 862 source_->CopyRaw(raw_data_out, size); \ 863 current = reinterpret_cast<Object**>(raw_data_out + size); \ 864 break; \ 865 } 866 COMMON_RAW_LENGTHS(RAW_CASE) 867#undef RAW_CASE 868 869 // Deserialize a chunk of raw data that doesn't have one of the popular 870 // lengths. 871 case kRawData: { 872 int size = source_->GetInt(); 873 byte* raw_data_out = reinterpret_cast<byte*>(current); 874 source_->CopyRaw(raw_data_out, size); 875 current = reinterpret_cast<Object**>(raw_data_out + size); 876 break; 877 } 878 879 // Deserialize a new object and write a pointer to it to the current 880 // object. 881 ONE_PER_SPACE(kNewObject, kPlain, kStartOfObject) 882 // Support for direct instruction pointers in functions 883 ONE_PER_CODE_SPACE(kNewObject, kPlain, kFirstInstruction) 884 // Deserialize a new code object and write a pointer to its first 885 // instruction to the current code object. 886 ONE_PER_SPACE(kNewObject, kFromCode, kFirstInstruction) 887 // Find a recently deserialized object using its offset from the current 888 // allocation point and write a pointer to it to the current object. 889 ALL_SPACES(kBackref, kPlain, kStartOfObject) 890 // Find a recently deserialized code object using its offset from the 891 // current allocation point and write a pointer to its first instruction 892 // to the current code object or the instruction pointer in a function 893 // object. 894 ALL_SPACES(kBackref, kFromCode, kFirstInstruction) 895 ALL_SPACES(kBackref, kPlain, kFirstInstruction) 896 // Find an already deserialized object using its offset from the start 897 // and write a pointer to it to the current object. 898 ALL_SPACES(kFromStart, kPlain, kStartOfObject) 899 ALL_SPACES(kFromStart, kPlain, kFirstInstruction) 900 // Find an already deserialized code object using its offset from the 901 // start and write a pointer to its first instruction to the current code 902 // object. 903 ALL_SPACES(kFromStart, kFromCode, kFirstInstruction) 904 // Find an already deserialized object at one of the predetermined popular 905 // offsets from the start and write a pointer to it in the current object. 906 COMMON_REFERENCE_PATTERNS(EMIT_COMMON_REFERENCE_PATTERNS) 907 // Find an object in the roots array and write a pointer to it to the 908 // current object. 909 CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0) 910 CASE_BODY(kRootArray, kPlain, kStartOfObject, 0, kUnknownOffsetFromStart) 911 // Find an object in the partial snapshots cache and write a pointer to it 912 // to the current object. 913 CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0) 914 CASE_BODY(kPartialSnapshotCache, 915 kPlain, 916 kStartOfObject, 917 0, 918 kUnknownOffsetFromStart) 919 // Find an code entry in the partial snapshots cache and 920 // write a pointer to it to the current object. 921 CASE_STATEMENT(kPartialSnapshotCache, kPlain, kFirstInstruction, 0) 922 CASE_BODY(kPartialSnapshotCache, 923 kPlain, 924 kFirstInstruction, 925 0, 926 kUnknownOffsetFromStart) 927 // Find an external reference and write a pointer to it to the current 928 // object. 929 CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0) 930 CASE_BODY(kExternalReference, 931 kPlain, 932 kStartOfObject, 933 0, 934 kUnknownOffsetFromStart) 935 // Find an external reference and write a pointer to it in the current 936 // code object. 937 CASE_STATEMENT(kExternalReference, kFromCode, kStartOfObject, 0) 938 CASE_BODY(kExternalReference, 939 kFromCode, 940 kStartOfObject, 941 0, 942 kUnknownOffsetFromStart) 943 944#undef CASE_STATEMENT 945#undef CASE_BODY 946#undef ONE_PER_SPACE 947#undef ALL_SPACES 948#undef EMIT_COMMON_REFERENCE_PATTERNS 949#undef ASSIGN_DEST_SPACE 950 951 case kNewPage: { 952 int space = source_->Get(); 953 pages_[space].Add(last_object_address_); 954 if (space == CODE_SPACE) { 955 CPU::FlushICache(last_object_address_, Page::kPageSize); 956 } 957 break; 958 } 959 960 case kNativesStringResource: { 961 int index = source_->Get(); 962 Vector<const char> source_vector = Natives::GetScriptSource(index); 963 NativesExternalStringResource* resource = 964 new NativesExternalStringResource(source_vector.start()); 965 *current++ = reinterpret_cast<Object*>(resource); 966 break; 967 } 968 969 case kSynchronize: { 970 // If we get here then that indicates that you have a mismatch between 971 // the number of GC roots when serializing and deserializing. 972 UNREACHABLE(); 973 } 974 975 default: 976 UNREACHABLE(); 977 } 978 } 979 ASSERT_EQ(current, limit); 980} 981 982 983void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) { 984 const int max_shift = ((kPointerSize * kBitsPerByte) / 7) * 7; 985 for (int shift = max_shift; shift > 0; shift -= 7) { 986 if (integer >= static_cast<uintptr_t>(1u) << shift) { 987 Put((static_cast<int>((integer >> shift)) & 0x7f) | 0x80, "IntPart"); 988 } 989 } 990 PutSection(static_cast<int>(integer & 0x7f), "IntLastPart"); 991} 992 993#ifdef DEBUG 994 995void Deserializer::Synchronize(const char* tag) { 996 int data = source_->Get(); 997 // If this assert fails then that indicates that you have a mismatch between 998 // the number of GC roots when serializing and deserializing. 999 ASSERT_EQ(kSynchronize, data); 1000 do { 1001 int character = source_->Get(); 1002 if (character == 0) break; 1003 if (FLAG_debug_serialization) { 1004 PrintF("%c", character); 1005 } 1006 } while (true); 1007 if (FLAG_debug_serialization) { 1008 PrintF("\n"); 1009 } 1010} 1011 1012 1013void Serializer::Synchronize(const char* tag) { 1014 sink_->Put(kSynchronize, tag); 1015 int character; 1016 do { 1017 character = *tag++; 1018 sink_->PutSection(character, "TagCharacter"); 1019 } while (character != 0); 1020} 1021 1022#endif 1023 1024Serializer::Serializer(SnapshotByteSink* sink) 1025 : sink_(sink), 1026 current_root_index_(0), 1027 external_reference_encoder_(new ExternalReferenceEncoder), 1028 large_object_total_(0) { 1029 for (int i = 0; i <= LAST_SPACE; i++) { 1030 fullness_[i] = 0; 1031 } 1032} 1033 1034 1035Serializer::~Serializer() { 1036 delete external_reference_encoder_; 1037} 1038 1039 1040void StartupSerializer::SerializeStrongReferences() { 1041 // No active threads. 1042 CHECK_EQ(NULL, ThreadState::FirstInUse()); 1043 // No active or weak handles. 1044 CHECK(HandleScopeImplementer::instance()->blocks()->is_empty()); 1045 CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles()); 1046 // We don't support serializing installed extensions. 1047 for (RegisteredExtension* ext = RegisteredExtension::first_extension(); 1048 ext != NULL; 1049 ext = ext->next()) { 1050 CHECK_NE(v8::INSTALLED, ext->state()); 1051 } 1052 Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG); 1053} 1054 1055 1056void PartialSerializer::Serialize(Object** object) { 1057 this->VisitPointer(object); 1058 1059 // After we have done the partial serialization the partial snapshot cache 1060 // will contain some references needed to decode the partial snapshot. We 1061 // fill it up with undefineds so it has a predictable length so the 1062 // deserialization code doesn't need to know the length. 1063 for (int index = partial_snapshot_cache_length_; 1064 index < kPartialSnapshotCacheCapacity; 1065 index++) { 1066 partial_snapshot_cache_[index] = Heap::undefined_value(); 1067 startup_serializer_->VisitPointer(&partial_snapshot_cache_[index]); 1068 } 1069 partial_snapshot_cache_length_ = kPartialSnapshotCacheCapacity; 1070} 1071 1072 1073void Serializer::VisitPointers(Object** start, Object** end) { 1074 for (Object** current = start; current < end; current++) { 1075 if ((*current)->IsSmi()) { 1076 sink_->Put(kRawData, "RawData"); 1077 sink_->PutInt(kPointerSize, "length"); 1078 for (int i = 0; i < kPointerSize; i++) { 1079 sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte"); 1080 } 1081 } else { 1082 SerializeObject(*current, kPlain, kStartOfObject); 1083 } 1084 } 1085} 1086 1087 1088Object* SerializerDeserializer::partial_snapshot_cache_[ 1089 kPartialSnapshotCacheCapacity]; 1090int SerializerDeserializer::partial_snapshot_cache_length_ = 0; 1091 1092 1093// This ensures that the partial snapshot cache keeps things alive during GC and 1094// tracks their movement. When it is called during serialization of the startup 1095// snapshot the partial snapshot is empty, so nothing happens. When the partial 1096// (context) snapshot is created, this array is populated with the pointers that 1097// the partial snapshot will need. As that happens we emit serialized objects to 1098// the startup snapshot that correspond to the elements of this cache array. On 1099// deserialization we therefore need to visit the cache array. This fills it up 1100// with pointers to deserialized objects. 1101void SerializerDeserializer::Iterate(ObjectVisitor* visitor) { 1102 visitor->VisitPointers( 1103 &partial_snapshot_cache_[0], 1104 &partial_snapshot_cache_[partial_snapshot_cache_length_]); 1105} 1106 1107 1108// When deserializing we need to set the size of the snapshot cache. This means 1109// the root iteration code (above) will iterate over array elements, writing the 1110// references to deserialized objects in them. 1111void SerializerDeserializer::SetSnapshotCacheSize(int size) { 1112 partial_snapshot_cache_length_ = size; 1113} 1114 1115 1116int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) { 1117 for (int i = 0; i < partial_snapshot_cache_length_; i++) { 1118 Object* entry = partial_snapshot_cache_[i]; 1119 if (entry == heap_object) return i; 1120 } 1121 1122 // We didn't find the object in the cache. So we add it to the cache and 1123 // then visit the pointer so that it becomes part of the startup snapshot 1124 // and we can refer to it from the partial snapshot. 1125 int length = partial_snapshot_cache_length_; 1126 CHECK(length < kPartialSnapshotCacheCapacity); 1127 partial_snapshot_cache_[length] = heap_object; 1128 startup_serializer_->VisitPointer(&partial_snapshot_cache_[length]); 1129 // We don't recurse from the startup snapshot generator into the partial 1130 // snapshot generator. 1131 ASSERT(length == partial_snapshot_cache_length_); 1132 return partial_snapshot_cache_length_++; 1133} 1134 1135 1136int PartialSerializer::RootIndex(HeapObject* heap_object) { 1137 for (int i = 0; i < Heap::kRootListLength; i++) { 1138 Object* root = Heap::roots_address()[i]; 1139 if (root == heap_object) return i; 1140 } 1141 return kInvalidRootIndex; 1142} 1143 1144 1145// Encode the location of an already deserialized object in order to write its 1146// location into a later object. We can encode the location as an offset from 1147// the start of the deserialized objects or as an offset backwards from the 1148// current allocation pointer. 1149void Serializer::SerializeReferenceToPreviousObject( 1150 int space, 1151 int address, 1152 HowToCode how_to_code, 1153 WhereToPoint where_to_point) { 1154 int offset = CurrentAllocationAddress(space) - address; 1155 bool from_start = true; 1156 if (SpaceIsPaged(space)) { 1157 // For paged space it is simple to encode back from current allocation if 1158 // the object is on the same page as the current allocation pointer. 1159 if ((CurrentAllocationAddress(space) >> kPageSizeBits) == 1160 (address >> kPageSizeBits)) { 1161 from_start = false; 1162 address = offset; 1163 } 1164 } else if (space == NEW_SPACE) { 1165 // For new space it is always simple to encode back from current allocation. 1166 if (offset < address) { 1167 from_start = false; 1168 address = offset; 1169 } 1170 } 1171 // If we are actually dealing with real offsets (and not a numbering of 1172 // all objects) then we should shift out the bits that are always 0. 1173 if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits; 1174 if (from_start) { 1175#define COMMON_REFS_CASE(pseudo_space, actual_space, offset) \ 1176 if (space == actual_space && address == offset && \ 1177 how_to_code == kPlain && where_to_point == kStartOfObject) { \ 1178 sink_->Put(kFromStart + how_to_code + where_to_point + \ 1179 pseudo_space, "RefSer"); \ 1180 } else /* NOLINT */ 1181 COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE) 1182#undef COMMON_REFS_CASE 1183 { /* NOLINT */ 1184 sink_->Put(kFromStart + how_to_code + where_to_point + space, "RefSer"); 1185 sink_->PutInt(address, "address"); 1186 } 1187 } else { 1188 sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer"); 1189 sink_->PutInt(address, "address"); 1190 } 1191} 1192 1193 1194void StartupSerializer::SerializeObject( 1195 Object* o, 1196 HowToCode how_to_code, 1197 WhereToPoint where_to_point) { 1198 CHECK(o->IsHeapObject()); 1199 HeapObject* heap_object = HeapObject::cast(o); 1200 1201 if (address_mapper_.IsMapped(heap_object)) { 1202 int space = SpaceOfAlreadySerializedObject(heap_object); 1203 int address = address_mapper_.MappedTo(heap_object); 1204 SerializeReferenceToPreviousObject(space, 1205 address, 1206 how_to_code, 1207 where_to_point); 1208 } else { 1209 // Object has not yet been serialized. Serialize it here. 1210 ObjectSerializer object_serializer(this, 1211 heap_object, 1212 sink_, 1213 how_to_code, 1214 where_to_point); 1215 object_serializer.Serialize(); 1216 } 1217} 1218 1219 1220void StartupSerializer::SerializeWeakReferences() { 1221 for (int i = partial_snapshot_cache_length_; 1222 i < kPartialSnapshotCacheCapacity; 1223 i++) { 1224 sink_->Put(kRootArray + kPlain + kStartOfObject, "RootSerialization"); 1225 sink_->PutInt(Heap::kUndefinedValueRootIndex, "root_index"); 1226 } 1227 Heap::IterateWeakRoots(this, VISIT_ALL); 1228} 1229 1230 1231void PartialSerializer::SerializeObject( 1232 Object* o, 1233 HowToCode how_to_code, 1234 WhereToPoint where_to_point) { 1235 CHECK(o->IsHeapObject()); 1236 HeapObject* heap_object = HeapObject::cast(o); 1237 1238 int root_index; 1239 if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) { 1240 sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization"); 1241 sink_->PutInt(root_index, "root_index"); 1242 return; 1243 } 1244 1245 if (ShouldBeInThePartialSnapshotCache(heap_object)) { 1246 int cache_index = PartialSnapshotCacheIndex(heap_object); 1247 sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point, 1248 "PartialSnapshotCache"); 1249 sink_->PutInt(cache_index, "partial_snapshot_cache_index"); 1250 return; 1251 } 1252 1253 // Pointers from the partial snapshot to the objects in the startup snapshot 1254 // should go through the root array or through the partial snapshot cache. 1255 // If this is not the case you may have to add something to the root array. 1256 ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object)); 1257 // All the symbols that the partial snapshot needs should be either in the 1258 // root table or in the partial snapshot cache. 1259 ASSERT(!heap_object->IsSymbol()); 1260 1261 if (address_mapper_.IsMapped(heap_object)) { 1262 int space = SpaceOfAlreadySerializedObject(heap_object); 1263 int address = address_mapper_.MappedTo(heap_object); 1264 SerializeReferenceToPreviousObject(space, 1265 address, 1266 how_to_code, 1267 where_to_point); 1268 } else { 1269 // Object has not yet been serialized. Serialize it here. 1270 ObjectSerializer serializer(this, 1271 heap_object, 1272 sink_, 1273 how_to_code, 1274 where_to_point); 1275 serializer.Serialize(); 1276 } 1277} 1278 1279 1280void Serializer::ObjectSerializer::Serialize() { 1281 int space = Serializer::SpaceOfObject(object_); 1282 int size = object_->Size(); 1283 1284 sink_->Put(kNewObject + reference_representation_ + space, 1285 "ObjectSerialization"); 1286 sink_->PutInt(size >> kObjectAlignmentBits, "Size in words"); 1287 1288 LOG(SnapshotPositionEvent(object_->address(), sink_->Position())); 1289 1290 // Mark this object as already serialized. 1291 bool start_new_page; 1292 int offset = serializer_->Allocate(space, size, &start_new_page); 1293 serializer_->address_mapper()->AddMapping(object_, offset); 1294 if (start_new_page) { 1295 sink_->Put(kNewPage, "NewPage"); 1296 sink_->PutSection(space, "NewPageSpace"); 1297 } 1298 1299 // Serialize the map (first word of the object). 1300 serializer_->SerializeObject(object_->map(), kPlain, kStartOfObject); 1301 1302 // Serialize the rest of the object. 1303 CHECK_EQ(0, bytes_processed_so_far_); 1304 bytes_processed_so_far_ = kPointerSize; 1305 object_->IterateBody(object_->map()->instance_type(), size, this); 1306 OutputRawData(object_->address() + size); 1307} 1308 1309 1310void Serializer::ObjectSerializer::VisitPointers(Object** start, 1311 Object** end) { 1312 Object** current = start; 1313 while (current < end) { 1314 while (current < end && (*current)->IsSmi()) current++; 1315 if (current < end) OutputRawData(reinterpret_cast<Address>(current)); 1316 1317 while (current < end && !(*current)->IsSmi()) { 1318 serializer_->SerializeObject(*current, kPlain, kStartOfObject); 1319 bytes_processed_so_far_ += kPointerSize; 1320 current++; 1321 } 1322 } 1323} 1324 1325 1326void Serializer::ObjectSerializer::VisitExternalReferences(Address* start, 1327 Address* end) { 1328 Address references_start = reinterpret_cast<Address>(start); 1329 OutputRawData(references_start); 1330 1331 for (Address* current = start; current < end; current++) { 1332 sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef"); 1333 int reference_id = serializer_->EncodeExternalReference(*current); 1334 sink_->PutInt(reference_id, "reference id"); 1335 } 1336 bytes_processed_so_far_ += static_cast<int>((end - start) * kPointerSize); 1337} 1338 1339 1340void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) { 1341 Address target_start = rinfo->target_address_address(); 1342 OutputRawData(target_start); 1343 Address target = rinfo->target_address(); 1344 uint32_t encoding = serializer_->EncodeExternalReference(target); 1345 CHECK(target == NULL ? encoding == 0 : encoding != 0); 1346 int representation; 1347 // Can't use a ternary operator because of gcc. 1348 if (rinfo->IsCodedSpecially()) { 1349 representation = kStartOfObject + kFromCode; 1350 } else { 1351 representation = kStartOfObject + kPlain; 1352 } 1353 sink_->Put(kExternalReference + representation, "ExternalReference"); 1354 sink_->PutInt(encoding, "reference id"); 1355 bytes_processed_so_far_ += rinfo->target_address_size(); 1356} 1357 1358 1359void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) { 1360 CHECK(RelocInfo::IsCodeTarget(rinfo->rmode())); 1361 Address target_start = rinfo->target_address_address(); 1362 OutputRawData(target_start); 1363 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); 1364 serializer_->SerializeObject(target, kFromCode, kFirstInstruction); 1365 bytes_processed_so_far_ += rinfo->target_address_size(); 1366} 1367 1368 1369void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) { 1370 Code* target = Code::cast(Code::GetObjectFromEntryAddress(entry_address)); 1371 OutputRawData(entry_address); 1372 serializer_->SerializeObject(target, kPlain, kFirstInstruction); 1373 bytes_processed_so_far_ += kPointerSize; 1374} 1375 1376 1377void Serializer::ObjectSerializer::VisitExternalAsciiString( 1378 v8::String::ExternalAsciiStringResource** resource_pointer) { 1379 Address references_start = reinterpret_cast<Address>(resource_pointer); 1380 OutputRawData(references_start); 1381 for (int i = 0; i < Natives::GetBuiltinsCount(); i++) { 1382 Object* source = Heap::natives_source_cache()->get(i); 1383 if (!source->IsUndefined()) { 1384 ExternalAsciiString* string = ExternalAsciiString::cast(source); 1385 typedef v8::String::ExternalAsciiStringResource Resource; 1386 Resource* resource = string->resource(); 1387 if (resource == *resource_pointer) { 1388 sink_->Put(kNativesStringResource, "NativesStringResource"); 1389 sink_->PutSection(i, "NativesStringResourceEnd"); 1390 bytes_processed_so_far_ += sizeof(resource); 1391 return; 1392 } 1393 } 1394 } 1395 // One of the strings in the natives cache should match the resource. We 1396 // can't serialize any other kinds of external strings. 1397 UNREACHABLE(); 1398} 1399 1400 1401void Serializer::ObjectSerializer::OutputRawData(Address up_to) { 1402 Address object_start = object_->address(); 1403 int up_to_offset = static_cast<int>(up_to - object_start); 1404 int skipped = up_to_offset - bytes_processed_so_far_; 1405 // This assert will fail if the reloc info gives us the target_address_address 1406 // locations in a non-ascending order. Luckily that doesn't happen. 1407 ASSERT(skipped >= 0); 1408 if (skipped != 0) { 1409 Address base = object_start + bytes_processed_so_far_; 1410#define RAW_CASE(index, length) \ 1411 if (skipped == length) { \ 1412 sink_->PutSection(kRawData + index, "RawDataFixed"); \ 1413 } else /* NOLINT */ 1414 COMMON_RAW_LENGTHS(RAW_CASE) 1415#undef RAW_CASE 1416 { /* NOLINT */ 1417 sink_->Put(kRawData, "RawData"); 1418 sink_->PutInt(skipped, "length"); 1419 } 1420 for (int i = 0; i < skipped; i++) { 1421 unsigned int data = base[i]; 1422 sink_->PutSection(data, "Byte"); 1423 } 1424 bytes_processed_so_far_ += skipped; 1425 } 1426} 1427 1428 1429int Serializer::SpaceOfObject(HeapObject* object) { 1430 for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { 1431 AllocationSpace s = static_cast<AllocationSpace>(i); 1432 if (Heap::InSpace(object, s)) { 1433 if (i == LO_SPACE) { 1434 if (object->IsCode()) { 1435 return kLargeCode; 1436 } else if (object->IsFixedArray()) { 1437 return kLargeFixedArray; 1438 } else { 1439 return kLargeData; 1440 } 1441 } 1442 return i; 1443 } 1444 } 1445 UNREACHABLE(); 1446 return 0; 1447} 1448 1449 1450int Serializer::SpaceOfAlreadySerializedObject(HeapObject* object) { 1451 for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { 1452 AllocationSpace s = static_cast<AllocationSpace>(i); 1453 if (Heap::InSpace(object, s)) { 1454 return i; 1455 } 1456 } 1457 UNREACHABLE(); 1458 return 0; 1459} 1460 1461 1462int Serializer::Allocate(int space, int size, bool* new_page) { 1463 CHECK(space >= 0 && space < kNumberOfSpaces); 1464 if (SpaceIsLarge(space)) { 1465 // In large object space we merely number the objects instead of trying to 1466 // determine some sort of address. 1467 *new_page = true; 1468 large_object_total_ += size; 1469 return fullness_[LO_SPACE]++; 1470 } 1471 *new_page = false; 1472 if (fullness_[space] == 0) { 1473 *new_page = true; 1474 } 1475 if (SpaceIsPaged(space)) { 1476 // Paged spaces are a little special. We encode their addresses as if the 1477 // pages were all contiguous and each page were filled up in the range 1478 // 0 - Page::kObjectAreaSize. In practice the pages may not be contiguous 1479 // and allocation does not start at offset 0 in the page, but this scheme 1480 // means the deserializer can get the page number quickly by shifting the 1481 // serialized address. 1482 CHECK(IsPowerOf2(Page::kPageSize)); 1483 int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1)); 1484 CHECK(size <= Page::kObjectAreaSize); 1485 if (used_in_this_page + size > Page::kObjectAreaSize) { 1486 *new_page = true; 1487 fullness_[space] = RoundUp(fullness_[space], Page::kPageSize); 1488 } 1489 } 1490 int allocation_address = fullness_[space]; 1491 fullness_[space] = allocation_address + size; 1492 return allocation_address; 1493} 1494 1495 1496} } // namespace v8::internal 1497