deoptimizer.cc revision 589d6979ff2ef66fca2d8fa51404c369ca5e9250
1// Copyright 2011 the V8 project authors. All rights reserved. 2// Redistribution and use in source and binary forms, with or without 3// modification, are permitted provided that the following conditions are 4// met: 5// 6// * Redistributions of source code must retain the above copyright 7// notice, this list of conditions and the following disclaimer. 8// * Redistributions in binary form must reproduce the above 9// copyright notice, this list of conditions and the following 10// disclaimer in the documentation and/or other materials provided 11// with the distribution. 12// * Neither the name of Google Inc. nor the names of its 13// contributors may be used to endorse or promote products derived 14// from this software without specific prior written permission. 15// 16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28#include "v8.h" 29 30#include "codegen.h" 31#include "deoptimizer.h" 32#include "disasm.h" 33#include "full-codegen.h" 34#include "global-handles.h" 35#include "macro-assembler.h" 36#include "prettyprinter.h" 37 38 39namespace v8 { 40namespace internal { 41 42DeoptimizerData::DeoptimizerData() { 43 eager_deoptimization_entry_code_ = NULL; 44 lazy_deoptimization_entry_code_ = NULL; 45 current_ = NULL; 46 deoptimizing_code_list_ = NULL; 47#ifdef ENABLE_DEBUGGER_SUPPORT 48 deoptimized_frame_info_ = NULL; 49#endif 50} 51 52 53DeoptimizerData::~DeoptimizerData() { 54 if (eager_deoptimization_entry_code_ != NULL) { 55 eager_deoptimization_entry_code_->Free(EXECUTABLE); 56 eager_deoptimization_entry_code_ = NULL; 57 } 58 if (lazy_deoptimization_entry_code_ != NULL) { 59 lazy_deoptimization_entry_code_->Free(EXECUTABLE); 60 lazy_deoptimization_entry_code_ = NULL; 61 } 62} 63 64 65#ifdef ENABLE_DEBUGGER_SUPPORT 66void DeoptimizerData::Iterate(ObjectVisitor* v) { 67 if (deoptimized_frame_info_ != NULL) { 68 deoptimized_frame_info_->Iterate(v); 69 } 70} 71#endif 72 73 74Deoptimizer* Deoptimizer::New(JSFunction* function, 75 BailoutType type, 76 unsigned bailout_id, 77 Address from, 78 int fp_to_sp_delta, 79 Isolate* isolate) { 80 ASSERT(isolate == Isolate::Current()); 81 Deoptimizer* deoptimizer = new Deoptimizer(isolate, 82 function, 83 type, 84 bailout_id, 85 from, 86 fp_to_sp_delta, 87 NULL); 88 ASSERT(isolate->deoptimizer_data()->current_ == NULL); 89 isolate->deoptimizer_data()->current_ = deoptimizer; 90 return deoptimizer; 91} 92 93 94Deoptimizer* Deoptimizer::Grab(Isolate* isolate) { 95 ASSERT(isolate == Isolate::Current()); 96 Deoptimizer* result = isolate->deoptimizer_data()->current_; 97 ASSERT(result != NULL); 98 result->DeleteFrameDescriptions(); 99 isolate->deoptimizer_data()->current_ = NULL; 100 return result; 101} 102 103#ifdef ENABLE_DEBUGGER_SUPPORT 104DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame( 105 JavaScriptFrame* frame, 106 int frame_index, 107 Isolate* isolate) { 108 ASSERT(isolate == Isolate::Current()); 109 ASSERT(frame->is_optimized()); 110 ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL); 111 112 // Get the function and code from the frame. 113 JSFunction* function = JSFunction::cast(frame->function()); 114 Code* code = frame->LookupCode(); 115 Address code_start_address = code->instruction_start(); 116 117 // Locate the deoptimization point in the code. As we are at a call the 118 // return address must be at a place in the code with deoptimization support. 119 int deoptimization_index = Safepoint::kNoDeoptimizationIndex; 120 // Scope this as the safe point constructor will disallow allocation. 121 { 122 SafepointTable table(code); 123 for (unsigned i = 0; i < table.length(); ++i) { 124 Address address = code_start_address + table.GetPcOffset(i); 125 if (address == frame->pc()) { 126 SafepointEntry safepoint_entry = table.GetEntry(i); 127 ASSERT(safepoint_entry.deoptimization_index() != 128 Safepoint::kNoDeoptimizationIndex); 129 deoptimization_index = safepoint_entry.deoptimization_index(); 130 break; 131 } 132 } 133 } 134 ASSERT(deoptimization_index != Safepoint::kNoDeoptimizationIndex); 135 136 // Always use the actual stack slots when calculating the fp to sp 137 // delta adding two for the function and context. 138 unsigned stack_slots = code->stack_slots(); 139 unsigned fp_to_sp_delta = ((stack_slots + 2) * kPointerSize); 140 141 Deoptimizer* deoptimizer = new Deoptimizer(isolate, 142 function, 143 Deoptimizer::DEBUGGER, 144 deoptimization_index, 145 frame->pc(), 146 fp_to_sp_delta, 147 code); 148 Address tos = frame->fp() - fp_to_sp_delta; 149 deoptimizer->FillInputFrame(tos, frame); 150 151 // Calculate the output frames. 152 Deoptimizer::ComputeOutputFrames(deoptimizer); 153 154 // Create the GC safe output frame information and register it for GC 155 // handling. 156 ASSERT_LT(frame_index, deoptimizer->output_count()); 157 DeoptimizedFrameInfo* info = 158 new DeoptimizedFrameInfo(deoptimizer, frame_index); 159 isolate->deoptimizer_data()->deoptimized_frame_info_ = info; 160 161 // Get the "simulated" top and size for the requested frame. 162 Address top = 163 reinterpret_cast<Address>(deoptimizer->output_[frame_index]->GetTop()); 164 uint32_t size = deoptimizer->output_[frame_index]->GetFrameSize(); 165 166 // Done with the GC-unsafe frame descriptions. This re-enables allocation. 167 deoptimizer->DeleteFrameDescriptions(); 168 169 // Allocate a heap number for the doubles belonging to this frame. 170 deoptimizer->MaterializeHeapNumbersForDebuggerInspectableFrame( 171 top, size, info); 172 173 // Finished using the deoptimizer instance. 174 delete deoptimizer; 175 176 return info; 177} 178 179 180void Deoptimizer::DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info, 181 Isolate* isolate) { 182 ASSERT(isolate == Isolate::Current()); 183 ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == info); 184 delete info; 185 isolate->deoptimizer_data()->deoptimized_frame_info_ = NULL; 186} 187#endif 188 189void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, 190 int count, 191 BailoutType type) { 192 TableEntryGenerator generator(masm, type, count); 193 generator.Generate(); 194} 195 196 197class DeoptimizingVisitor : public OptimizedFunctionVisitor { 198 public: 199 virtual void EnterContext(Context* context) { 200 if (FLAG_trace_deopt) { 201 PrintF("[deoptimize context: %" V8PRIxPTR "]\n", 202 reinterpret_cast<intptr_t>(context)); 203 } 204 } 205 206 virtual void VisitFunction(JSFunction* function) { 207 Deoptimizer::DeoptimizeFunction(function); 208 } 209 210 virtual void LeaveContext(Context* context) { 211 context->ClearOptimizedFunctions(); 212 } 213}; 214 215 216void Deoptimizer::DeoptimizeAll() { 217 AssertNoAllocation no_allocation; 218 219 if (FLAG_trace_deopt) { 220 PrintF("[deoptimize all contexts]\n"); 221 } 222 223 DeoptimizingVisitor visitor; 224 VisitAllOptimizedFunctions(&visitor); 225} 226 227 228void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) { 229 AssertNoAllocation no_allocation; 230 231 DeoptimizingVisitor visitor; 232 VisitAllOptimizedFunctionsForGlobalObject(object, &visitor); 233} 234 235 236void Deoptimizer::VisitAllOptimizedFunctionsForContext( 237 Context* context, OptimizedFunctionVisitor* visitor) { 238 AssertNoAllocation no_allocation; 239 240 ASSERT(context->IsGlobalContext()); 241 242 visitor->EnterContext(context); 243 // Run through the list of optimized functions and deoptimize them. 244 Object* element = context->OptimizedFunctionsListHead(); 245 while (!element->IsUndefined()) { 246 JSFunction* element_function = JSFunction::cast(element); 247 // Get the next link before deoptimizing as deoptimizing will clear the 248 // next link. 249 element = element_function->next_function_link(); 250 visitor->VisitFunction(element_function); 251 } 252 visitor->LeaveContext(context); 253} 254 255 256void Deoptimizer::VisitAllOptimizedFunctionsForGlobalObject( 257 JSObject* object, OptimizedFunctionVisitor* visitor) { 258 AssertNoAllocation no_allocation; 259 260 if (object->IsJSGlobalProxy()) { 261 Object* proto = object->GetPrototype(); 262 ASSERT(proto->IsJSGlobalObject()); 263 VisitAllOptimizedFunctionsForContext( 264 GlobalObject::cast(proto)->global_context(), visitor); 265 } else if (object->IsGlobalObject()) { 266 VisitAllOptimizedFunctionsForContext( 267 GlobalObject::cast(object)->global_context(), visitor); 268 } 269} 270 271 272void Deoptimizer::VisitAllOptimizedFunctions( 273 OptimizedFunctionVisitor* visitor) { 274 AssertNoAllocation no_allocation; 275 276 // Run through the list of all global contexts and deoptimize. 277 Object* global = Isolate::Current()->heap()->global_contexts_list(); 278 while (!global->IsUndefined()) { 279 VisitAllOptimizedFunctionsForGlobalObject(Context::cast(global)->global(), 280 visitor); 281 global = Context::cast(global)->get(Context::NEXT_CONTEXT_LINK); 282 } 283} 284 285 286void Deoptimizer::HandleWeakDeoptimizedCode( 287 v8::Persistent<v8::Value> obj, void* data) { 288 DeoptimizingCodeListNode* node = 289 reinterpret_cast<DeoptimizingCodeListNode*>(data); 290 RemoveDeoptimizingCode(*node->code()); 291#ifdef DEBUG 292 node = Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_; 293 while (node != NULL) { 294 ASSERT(node != reinterpret_cast<DeoptimizingCodeListNode*>(data)); 295 node = node->next(); 296 } 297#endif 298} 299 300 301void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) { 302 deoptimizer->DoComputeOutputFrames(); 303} 304 305 306Deoptimizer::Deoptimizer(Isolate* isolate, 307 JSFunction* function, 308 BailoutType type, 309 unsigned bailout_id, 310 Address from, 311 int fp_to_sp_delta, 312 Code* optimized_code) 313 : isolate_(isolate), 314 function_(function), 315 bailout_id_(bailout_id), 316 bailout_type_(type), 317 from_(from), 318 fp_to_sp_delta_(fp_to_sp_delta), 319 input_(NULL), 320 output_count_(0), 321 output_(NULL), 322 deferred_heap_numbers_(0) { 323 if (FLAG_trace_deopt && type != OSR) { 324 if (type == DEBUGGER) { 325 PrintF("**** DEOPT FOR DEBUGGER: "); 326 } else { 327 PrintF("**** DEOPT: "); 328 } 329 function->PrintName(); 330 PrintF(" at bailout #%u, address 0x%" V8PRIxPTR ", frame size %d\n", 331 bailout_id, 332 reinterpret_cast<intptr_t>(from), 333 fp_to_sp_delta - (2 * kPointerSize)); 334 } else if (FLAG_trace_osr && type == OSR) { 335 PrintF("**** OSR: "); 336 function->PrintName(); 337 PrintF(" at ast id #%u, address 0x%" V8PRIxPTR ", frame size %d\n", 338 bailout_id, 339 reinterpret_cast<intptr_t>(from), 340 fp_to_sp_delta - (2 * kPointerSize)); 341 } 342 // Find the optimized code. 343 if (type == EAGER) { 344 ASSERT(from == NULL); 345 optimized_code_ = function_->code(); 346 } else if (type == LAZY) { 347 optimized_code_ = FindDeoptimizingCodeFromAddress(from); 348 ASSERT(optimized_code_ != NULL); 349 } else if (type == OSR) { 350 // The function has already been optimized and we're transitioning 351 // from the unoptimized shared version to the optimized one in the 352 // function. The return address (from) points to unoptimized code. 353 optimized_code_ = function_->code(); 354 ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION); 355 ASSERT(!optimized_code_->contains(from)); 356 } else if (type == DEBUGGER) { 357 optimized_code_ = optimized_code; 358 ASSERT(optimized_code_->contains(from)); 359 } 360 ASSERT(HEAP->allow_allocation(false)); 361 unsigned size = ComputeInputFrameSize(); 362 input_ = new(size) FrameDescription(size, function); 363#ifdef DEBUG 364 input_->SetKind(Code::OPTIMIZED_FUNCTION); 365#endif 366} 367 368 369Deoptimizer::~Deoptimizer() { 370 ASSERT(input_ == NULL && output_ == NULL); 371} 372 373 374void Deoptimizer::DeleteFrameDescriptions() { 375 delete input_; 376 for (int i = 0; i < output_count_; ++i) { 377 if (output_[i] != input_) delete output_[i]; 378 } 379 delete[] output_; 380 input_ = NULL; 381 output_ = NULL; 382 ASSERT(!HEAP->allow_allocation(true)); 383} 384 385 386Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) { 387 ASSERT(id >= 0); 388 if (id >= kNumberOfEntries) return NULL; 389 LargeObjectChunk* base = NULL; 390 DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); 391 if (type == EAGER) { 392 if (data->eager_deoptimization_entry_code_ == NULL) { 393 data->eager_deoptimization_entry_code_ = CreateCode(type); 394 } 395 base = data->eager_deoptimization_entry_code_; 396 } else { 397 if (data->lazy_deoptimization_entry_code_ == NULL) { 398 data->lazy_deoptimization_entry_code_ = CreateCode(type); 399 } 400 base = data->lazy_deoptimization_entry_code_; 401 } 402 return 403 static_cast<Address>(base->GetStartAddress()) + (id * table_entry_size_); 404} 405 406 407int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) { 408 LargeObjectChunk* base = NULL; 409 DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); 410 if (type == EAGER) { 411 base = data->eager_deoptimization_entry_code_; 412 } else { 413 base = data->lazy_deoptimization_entry_code_; 414 } 415 if (base == NULL || 416 addr < base->GetStartAddress() || 417 addr >= base->GetStartAddress() + 418 (kNumberOfEntries * table_entry_size_)) { 419 return kNotDeoptimizationEntry; 420 } 421 ASSERT_EQ(0, 422 static_cast<int>(addr - base->GetStartAddress()) % table_entry_size_); 423 return static_cast<int>(addr - base->GetStartAddress()) / table_entry_size_; 424} 425 426 427int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data, 428 unsigned id, 429 SharedFunctionInfo* shared) { 430 // TODO(kasperl): For now, we do a simple linear search for the PC 431 // offset associated with the given node id. This should probably be 432 // changed to a binary search. 433 int length = data->DeoptPoints(); 434 Smi* smi_id = Smi::FromInt(id); 435 for (int i = 0; i < length; i++) { 436 if (data->AstId(i) == smi_id) { 437 return data->PcAndState(i)->value(); 438 } 439 } 440 PrintF("[couldn't find pc offset for node=%u]\n", id); 441 PrintF("[method: %s]\n", *shared->DebugName()->ToCString()); 442 // Print the source code if available. 443 HeapStringAllocator string_allocator; 444 StringStream stream(&string_allocator); 445 shared->SourceCodePrint(&stream, -1); 446 PrintF("[source:\n%s\n]", *stream.ToCString()); 447 448 UNREACHABLE(); 449 return -1; 450} 451 452 453int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) { 454 int length = 0; 455 DeoptimizingCodeListNode* node = 456 isolate->deoptimizer_data()->deoptimizing_code_list_; 457 while (node != NULL) { 458 length++; 459 node = node->next(); 460 } 461 return length; 462} 463 464 465void Deoptimizer::DoComputeOutputFrames() { 466 if (bailout_type_ == OSR) { 467 DoComputeOsrOutputFrame(); 468 return; 469 } 470 471 // Print some helpful diagnostic information. 472 int64_t start = OS::Ticks(); 473 if (FLAG_trace_deopt) { 474 PrintF("[deoptimizing%s: begin 0x%08" V8PRIxPTR " ", 475 (bailout_type_ == LAZY ? " (lazy)" : ""), 476 reinterpret_cast<intptr_t>(function_)); 477 function_->PrintName(); 478 PrintF(" @%d]\n", bailout_id_); 479 } 480 481 // Determine basic deoptimization information. The optimized frame is 482 // described by the input data. 483 DeoptimizationInputData* input_data = 484 DeoptimizationInputData::cast(optimized_code_->deoptimization_data()); 485 unsigned node_id = input_data->AstId(bailout_id_)->value(); 486 ByteArray* translations = input_data->TranslationByteArray(); 487 unsigned translation_index = 488 input_data->TranslationIndex(bailout_id_)->value(); 489 490 // Do the input frame to output frame(s) translation. 491 TranslationIterator iterator(translations, translation_index); 492 Translation::Opcode opcode = 493 static_cast<Translation::Opcode>(iterator.Next()); 494 ASSERT(Translation::BEGIN == opcode); 495 USE(opcode); 496 // Read the number of output frames and allocate an array for their 497 // descriptions. 498 int count = iterator.Next(); 499 ASSERT(output_ == NULL); 500 output_ = new FrameDescription*[count]; 501 for (int i = 0; i < count; ++i) { 502 output_[i] = NULL; 503 } 504 output_count_ = count; 505 506 // Translate each output frame. 507 for (int i = 0; i < count; ++i) { 508 DoComputeFrame(&iterator, i); 509 } 510 511 // Print some helpful diagnostic information. 512 if (FLAG_trace_deopt) { 513 double ms = static_cast<double>(OS::Ticks() - start) / 1000; 514 int index = output_count_ - 1; // Index of the topmost frame. 515 JSFunction* function = output_[index]->GetFunction(); 516 PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ", 517 reinterpret_cast<intptr_t>(function)); 518 function->PrintName(); 519 PrintF(" => node=%u, pc=0x%08" V8PRIxPTR ", state=%s, took %0.3f ms]\n", 520 node_id, 521 output_[index]->GetPc(), 522 FullCodeGenerator::State2String( 523 static_cast<FullCodeGenerator::State>( 524 output_[index]->GetState()->value())), 525 ms); 526 } 527} 528 529 530void Deoptimizer::MaterializeHeapNumbers() { 531 ASSERT_NE(DEBUGGER, bailout_type_); 532 for (int i = 0; i < deferred_heap_numbers_.length(); i++) { 533 HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i]; 534 Handle<Object> num = isolate_->factory()->NewNumber(d.value()); 535 if (FLAG_trace_deopt) { 536 PrintF("Materializing a new heap number %p [%e] in slot %p\n", 537 reinterpret_cast<void*>(*num), 538 d.value(), 539 d.slot_address()); 540 } 541 542 Memory::Object_at(d.slot_address()) = *num; 543 } 544} 545 546 547#ifdef ENABLE_DEBUGGER_SUPPORT 548void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame( 549 Address top, uint32_t size, DeoptimizedFrameInfo* info) { 550 ASSERT_EQ(DEBUGGER, bailout_type_); 551 for (int i = 0; i < deferred_heap_numbers_.length(); i++) { 552 HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i]; 553 554 // Check of the heap number to materialize actually belong to the frame 555 // being extracted. 556 Address slot = d.slot_address(); 557 if (top <= slot && slot < top + size) { 558 Handle<Object> num = isolate_->factory()->NewNumber(d.value()); 559 // Calculate the index with the botton of the expression stack 560 // at index 0, and the fixed part (including incoming arguments) 561 // at negative indexes. 562 int index = static_cast<int>( 563 info->expression_count_ - (slot - top) / kPointerSize - 1); 564 if (FLAG_trace_deopt) { 565 PrintF("Materializing a new heap number %p [%e] in slot %p" 566 "for stack index %d\n", 567 reinterpret_cast<void*>(*num), 568 d.value(), 569 d.slot_address(), 570 index); 571 } 572 if (index >=0) { 573 info->SetExpression(index, *num); 574 } else { 575 // Calculate parameter index subtracting one for the receiver. 576 int parameter_index = 577 index + 578 static_cast<int>(size) / kPointerSize - 579 info->expression_count_ - 1; 580 info->SetParameter(parameter_index, *num); 581 } 582 } 583 } 584} 585#endif 586 587 588void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, 589 int frame_index, 590 unsigned output_offset) { 591 disasm::NameConverter converter; 592 // A GC-safe temporary placeholder that we can put in the output frame. 593 const intptr_t kPlaceholder = reinterpret_cast<intptr_t>(Smi::FromInt(0)); 594 595 // Ignore commands marked as duplicate and act on the first non-duplicate. 596 Translation::Opcode opcode = 597 static_cast<Translation::Opcode>(iterator->Next()); 598 while (opcode == Translation::DUPLICATE) { 599 opcode = static_cast<Translation::Opcode>(iterator->Next()); 600 iterator->Skip(Translation::NumberOfOperandsFor(opcode)); 601 opcode = static_cast<Translation::Opcode>(iterator->Next()); 602 } 603 604 switch (opcode) { 605 case Translation::BEGIN: 606 case Translation::FRAME: 607 case Translation::DUPLICATE: 608 UNREACHABLE(); 609 return; 610 611 case Translation::REGISTER: { 612 int input_reg = iterator->Next(); 613 intptr_t input_value = input_->GetRegister(input_reg); 614 if (FLAG_trace_deopt) { 615 PrintF( 616 " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s\n", 617 output_[frame_index]->GetTop() + output_offset, 618 output_offset, 619 input_value, 620 converter.NameOfCPURegister(input_reg)); 621 } 622 output_[frame_index]->SetFrameSlot(output_offset, input_value); 623 return; 624 } 625 626 case Translation::INT32_REGISTER: { 627 int input_reg = iterator->Next(); 628 intptr_t value = input_->GetRegister(input_reg); 629 bool is_smi = Smi::IsValid(value); 630 if (FLAG_trace_deopt) { 631 PrintF( 632 " 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n", 633 output_[frame_index]->GetTop() + output_offset, 634 output_offset, 635 value, 636 converter.NameOfCPURegister(input_reg), 637 is_smi ? "smi" : "heap number"); 638 } 639 if (is_smi) { 640 intptr_t tagged_value = 641 reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value))); 642 output_[frame_index]->SetFrameSlot(output_offset, tagged_value); 643 } else { 644 // We save the untagged value on the side and store a GC-safe 645 // temporary placeholder in the frame. 646 AddDoubleValue(output_[frame_index]->GetTop() + output_offset, 647 static_cast<double>(static_cast<int32_t>(value))); 648 output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder); 649 } 650 return; 651 } 652 653 case Translation::DOUBLE_REGISTER: { 654 int input_reg = iterator->Next(); 655 double value = input_->GetDoubleRegister(input_reg); 656 if (FLAG_trace_deopt) { 657 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; %s\n", 658 output_[frame_index]->GetTop() + output_offset, 659 output_offset, 660 value, 661 DoubleRegister::AllocationIndexToString(input_reg)); 662 } 663 // We save the untagged value on the side and store a GC-safe 664 // temporary placeholder in the frame. 665 AddDoubleValue(output_[frame_index]->GetTop() + output_offset, value); 666 output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder); 667 return; 668 } 669 670 case Translation::STACK_SLOT: { 671 int input_slot_index = iterator->Next(); 672 unsigned input_offset = 673 input_->GetOffsetFromSlotIndex(this, input_slot_index); 674 intptr_t input_value = input_->GetFrameSlot(input_offset); 675 if (FLAG_trace_deopt) { 676 PrintF(" 0x%08" V8PRIxPTR ": ", 677 output_[frame_index]->GetTop() + output_offset); 678 PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d]\n", 679 output_offset, 680 input_value, 681 input_offset); 682 } 683 output_[frame_index]->SetFrameSlot(output_offset, input_value); 684 return; 685 } 686 687 case Translation::INT32_STACK_SLOT: { 688 int input_slot_index = iterator->Next(); 689 unsigned input_offset = 690 input_->GetOffsetFromSlotIndex(this, input_slot_index); 691 intptr_t value = input_->GetFrameSlot(input_offset); 692 bool is_smi = Smi::IsValid(value); 693 if (FLAG_trace_deopt) { 694 PrintF(" 0x%08" V8PRIxPTR ": ", 695 output_[frame_index]->GetTop() + output_offset); 696 PrintF("[top + %d] <- %" V8PRIdPTR " ; [esp + %d] (%s)\n", 697 output_offset, 698 value, 699 input_offset, 700 is_smi ? "smi" : "heap number"); 701 } 702 if (is_smi) { 703 intptr_t tagged_value = 704 reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value))); 705 output_[frame_index]->SetFrameSlot(output_offset, tagged_value); 706 } else { 707 // We save the untagged value on the side and store a GC-safe 708 // temporary placeholder in the frame. 709 AddDoubleValue(output_[frame_index]->GetTop() + output_offset, 710 static_cast<double>(static_cast<int32_t>(value))); 711 output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder); 712 } 713 return; 714 } 715 716 case Translation::DOUBLE_STACK_SLOT: { 717 int input_slot_index = iterator->Next(); 718 unsigned input_offset = 719 input_->GetOffsetFromSlotIndex(this, input_slot_index); 720 double value = input_->GetDoubleFrameSlot(input_offset); 721 if (FLAG_trace_deopt) { 722 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [esp + %d]\n", 723 output_[frame_index]->GetTop() + output_offset, 724 output_offset, 725 value, 726 input_offset); 727 } 728 // We save the untagged value on the side and store a GC-safe 729 // temporary placeholder in the frame. 730 AddDoubleValue(output_[frame_index]->GetTop() + output_offset, value); 731 output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder); 732 return; 733 } 734 735 case Translation::LITERAL: { 736 Object* literal = ComputeLiteral(iterator->Next()); 737 if (FLAG_trace_deopt) { 738 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ", 739 output_[frame_index]->GetTop() + output_offset, 740 output_offset); 741 literal->ShortPrint(); 742 PrintF(" ; literal\n"); 743 } 744 intptr_t value = reinterpret_cast<intptr_t>(literal); 745 output_[frame_index]->SetFrameSlot(output_offset, value); 746 return; 747 } 748 749 case Translation::ARGUMENTS_OBJECT: { 750 // Use the arguments marker value as a sentinel and fill in the arguments 751 // object after the deoptimized frame is built. 752 ASSERT(frame_index == 0); // Only supported for first frame. 753 if (FLAG_trace_deopt) { 754 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ", 755 output_[frame_index]->GetTop() + output_offset, 756 output_offset); 757 isolate_->heap()->arguments_marker()->ShortPrint(); 758 PrintF(" ; arguments object\n"); 759 } 760 intptr_t value = reinterpret_cast<intptr_t>( 761 isolate_->heap()->arguments_marker()); 762 output_[frame_index]->SetFrameSlot(output_offset, value); 763 return; 764 } 765 } 766} 767 768 769bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator, 770 int* input_offset) { 771 disasm::NameConverter converter; 772 FrameDescription* output = output_[0]; 773 774 // The input values are all part of the unoptimized frame so they 775 // are all tagged pointers. 776 uintptr_t input_value = input_->GetFrameSlot(*input_offset); 777 Object* input_object = reinterpret_cast<Object*>(input_value); 778 779 Translation::Opcode opcode = 780 static_cast<Translation::Opcode>(iterator->Next()); 781 bool duplicate = (opcode == Translation::DUPLICATE); 782 if (duplicate) { 783 opcode = static_cast<Translation::Opcode>(iterator->Next()); 784 } 785 786 switch (opcode) { 787 case Translation::BEGIN: 788 case Translation::FRAME: 789 case Translation::DUPLICATE: 790 UNREACHABLE(); // Malformed input. 791 return false; 792 793 case Translation::REGISTER: { 794 int output_reg = iterator->Next(); 795 if (FLAG_trace_osr) { 796 PrintF(" %s <- 0x%08" V8PRIxPTR " ; [sp + %d]\n", 797 converter.NameOfCPURegister(output_reg), 798 input_value, 799 *input_offset); 800 } 801 output->SetRegister(output_reg, input_value); 802 break; 803 } 804 805 case Translation::INT32_REGISTER: { 806 // Abort OSR if we don't have a number. 807 if (!input_object->IsNumber()) return false; 808 809 int output_reg = iterator->Next(); 810 int int32_value = input_object->IsSmi() 811 ? Smi::cast(input_object)->value() 812 : FastD2I(input_object->Number()); 813 // Abort the translation if the conversion lost information. 814 if (!input_object->IsSmi() && 815 FastI2D(int32_value) != input_object->Number()) { 816 if (FLAG_trace_osr) { 817 PrintF("**** %g could not be converted to int32 ****\n", 818 input_object->Number()); 819 } 820 return false; 821 } 822 if (FLAG_trace_osr) { 823 PrintF(" %s <- %d (int32) ; [sp + %d]\n", 824 converter.NameOfCPURegister(output_reg), 825 int32_value, 826 *input_offset); 827 } 828 output->SetRegister(output_reg, int32_value); 829 break; 830 } 831 832 case Translation::DOUBLE_REGISTER: { 833 // Abort OSR if we don't have a number. 834 if (!input_object->IsNumber()) return false; 835 836 int output_reg = iterator->Next(); 837 double double_value = input_object->Number(); 838 if (FLAG_trace_osr) { 839 PrintF(" %s <- %g (double) ; [sp + %d]\n", 840 DoubleRegister::AllocationIndexToString(output_reg), 841 double_value, 842 *input_offset); 843 } 844 output->SetDoubleRegister(output_reg, double_value); 845 break; 846 } 847 848 case Translation::STACK_SLOT: { 849 int output_index = iterator->Next(); 850 unsigned output_offset = 851 output->GetOffsetFromSlotIndex(this, output_index); 852 if (FLAG_trace_osr) { 853 PrintF(" [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d]\n", 854 output_offset, 855 input_value, 856 *input_offset); 857 } 858 output->SetFrameSlot(output_offset, input_value); 859 break; 860 } 861 862 case Translation::INT32_STACK_SLOT: { 863 // Abort OSR if we don't have a number. 864 if (!input_object->IsNumber()) return false; 865 866 int output_index = iterator->Next(); 867 unsigned output_offset = 868 output->GetOffsetFromSlotIndex(this, output_index); 869 int int32_value = input_object->IsSmi() 870 ? Smi::cast(input_object)->value() 871 : DoubleToInt32(input_object->Number()); 872 // Abort the translation if the conversion lost information. 873 if (!input_object->IsSmi() && 874 FastI2D(int32_value) != input_object->Number()) { 875 if (FLAG_trace_osr) { 876 PrintF("**** %g could not be converted to int32 ****\n", 877 input_object->Number()); 878 } 879 return false; 880 } 881 if (FLAG_trace_osr) { 882 PrintF(" [sp + %d] <- %d (int32) ; [sp + %d]\n", 883 output_offset, 884 int32_value, 885 *input_offset); 886 } 887 output->SetFrameSlot(output_offset, int32_value); 888 break; 889 } 890 891 case Translation::DOUBLE_STACK_SLOT: { 892 static const int kLowerOffset = 0 * kPointerSize; 893 static const int kUpperOffset = 1 * kPointerSize; 894 895 // Abort OSR if we don't have a number. 896 if (!input_object->IsNumber()) return false; 897 898 int output_index = iterator->Next(); 899 unsigned output_offset = 900 output->GetOffsetFromSlotIndex(this, output_index); 901 double double_value = input_object->Number(); 902 uint64_t int_value = BitCast<uint64_t, double>(double_value); 903 int32_t lower = static_cast<int32_t>(int_value); 904 int32_t upper = static_cast<int32_t>(int_value >> kBitsPerInt); 905 if (FLAG_trace_osr) { 906 PrintF(" [sp + %d] <- 0x%08x (upper bits of %g) ; [sp + %d]\n", 907 output_offset + kUpperOffset, 908 upper, 909 double_value, 910 *input_offset); 911 PrintF(" [sp + %d] <- 0x%08x (lower bits of %g) ; [sp + %d]\n", 912 output_offset + kLowerOffset, 913 lower, 914 double_value, 915 *input_offset); 916 } 917 output->SetFrameSlot(output_offset + kLowerOffset, lower); 918 output->SetFrameSlot(output_offset + kUpperOffset, upper); 919 break; 920 } 921 922 case Translation::LITERAL: { 923 // Just ignore non-materialized literals. 924 iterator->Next(); 925 break; 926 } 927 928 case Translation::ARGUMENTS_OBJECT: { 929 // Optimized code assumes that the argument object has not been 930 // materialized and so bypasses it when doing arguments access. 931 // We should have bailed out before starting the frame 932 // translation. 933 UNREACHABLE(); 934 return false; 935 } 936 } 937 938 if (!duplicate) *input_offset -= kPointerSize; 939 return true; 940} 941 942 943void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code, 944 Code* check_code, 945 Code* replacement_code) { 946 // Iterate over the stack check table and patch every stack check 947 // call to an unconditional call to the replacement code. 948 ASSERT(unoptimized_code->kind() == Code::FUNCTION); 949 Address stack_check_cursor = unoptimized_code->instruction_start() + 950 unoptimized_code->stack_check_table_offset(); 951 uint32_t table_length = Memory::uint32_at(stack_check_cursor); 952 stack_check_cursor += kIntSize; 953 for (uint32_t i = 0; i < table_length; ++i) { 954 uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize); 955 Address pc_after = unoptimized_code->instruction_start() + pc_offset; 956 PatchStackCheckCodeAt(pc_after, check_code, replacement_code); 957 stack_check_cursor += 2 * kIntSize; 958 } 959} 960 961 962void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code, 963 Code* check_code, 964 Code* replacement_code) { 965 // Iterate over the stack check table and revert the patched 966 // stack check calls. 967 ASSERT(unoptimized_code->kind() == Code::FUNCTION); 968 Address stack_check_cursor = unoptimized_code->instruction_start() + 969 unoptimized_code->stack_check_table_offset(); 970 uint32_t table_length = Memory::uint32_at(stack_check_cursor); 971 stack_check_cursor += kIntSize; 972 for (uint32_t i = 0; i < table_length; ++i) { 973 uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize); 974 Address pc_after = unoptimized_code->instruction_start() + pc_offset; 975 RevertStackCheckCodeAt(pc_after, check_code, replacement_code); 976 stack_check_cursor += 2 * kIntSize; 977 } 978} 979 980 981unsigned Deoptimizer::ComputeInputFrameSize() const { 982 unsigned fixed_size = ComputeFixedSize(function_); 983 // The fp-to-sp delta already takes the context and the function 984 // into account so we have to avoid double counting them (-2). 985 unsigned result = fixed_size + fp_to_sp_delta_ - (2 * kPointerSize); 986#ifdef DEBUG 987 if (bailout_type_ == OSR) { 988 // TODO(kasperl): It would be nice if we could verify that the 989 // size matches with the stack height we can compute based on the 990 // environment at the OSR entry. The code for that his built into 991 // the DoComputeOsrOutputFrame function for now. 992 } else { 993 unsigned stack_slots = optimized_code_->stack_slots(); 994 unsigned outgoing_size = ComputeOutgoingArgumentSize(); 995 ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size); 996 } 997#endif 998 return result; 999} 1000 1001 1002unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const { 1003 // The fixed part of the frame consists of the return address, frame 1004 // pointer, function, context, and all the incoming arguments. 1005 static const unsigned kFixedSlotSize = 4 * kPointerSize; 1006 return ComputeIncomingArgumentSize(function) + kFixedSlotSize; 1007} 1008 1009 1010unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const { 1011 // The incoming arguments is the values for formal parameters and 1012 // the receiver. Every slot contains a pointer. 1013 unsigned arguments = function->shared()->formal_parameter_count() + 1; 1014 return arguments * kPointerSize; 1015} 1016 1017 1018unsigned Deoptimizer::ComputeOutgoingArgumentSize() const { 1019 DeoptimizationInputData* data = DeoptimizationInputData::cast( 1020 optimized_code_->deoptimization_data()); 1021 unsigned height = data->ArgumentsStackHeight(bailout_id_)->value(); 1022 return height * kPointerSize; 1023} 1024 1025 1026Object* Deoptimizer::ComputeLiteral(int index) const { 1027 DeoptimizationInputData* data = DeoptimizationInputData::cast( 1028 optimized_code_->deoptimization_data()); 1029 FixedArray* literals = data->LiteralArray(); 1030 return literals->get(index); 1031} 1032 1033 1034void Deoptimizer::AddDoubleValue(intptr_t slot_address, 1035 double value) { 1036 HeapNumberMaterializationDescriptor value_desc( 1037 reinterpret_cast<Address>(slot_address), value); 1038 deferred_heap_numbers_.Add(value_desc); 1039} 1040 1041 1042LargeObjectChunk* Deoptimizer::CreateCode(BailoutType type) { 1043 // We cannot run this if the serializer is enabled because this will 1044 // cause us to emit relocation information for the external 1045 // references. This is fine because the deoptimizer's code section 1046 // isn't meant to be serialized at all. 1047 ASSERT(!Serializer::enabled()); 1048 1049 MacroAssembler masm(Isolate::Current(), NULL, 16 * KB); 1050 masm.set_emit_debug_code(false); 1051 GenerateDeoptimizationEntries(&masm, kNumberOfEntries, type); 1052 CodeDesc desc; 1053 masm.GetCode(&desc); 1054 ASSERT(desc.reloc_size == 0); 1055 1056 LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE); 1057 if (chunk == NULL) { 1058 V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table"); 1059 } 1060 memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size); 1061 CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size); 1062 return chunk; 1063} 1064 1065 1066Code* Deoptimizer::FindDeoptimizingCodeFromAddress(Address addr) { 1067 DeoptimizingCodeListNode* node = 1068 Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_; 1069 while (node != NULL) { 1070 if (node->code()->contains(addr)) return *node->code(); 1071 node = node->next(); 1072 } 1073 return NULL; 1074} 1075 1076 1077void Deoptimizer::RemoveDeoptimizingCode(Code* code) { 1078 DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); 1079 ASSERT(data->deoptimizing_code_list_ != NULL); 1080 // Run through the code objects to find this one and remove it. 1081 DeoptimizingCodeListNode* prev = NULL; 1082 DeoptimizingCodeListNode* current = data->deoptimizing_code_list_; 1083 while (current != NULL) { 1084 if (*current->code() == code) { 1085 // Unlink from list. If prev is NULL we are looking at the first element. 1086 if (prev == NULL) { 1087 data->deoptimizing_code_list_ = current->next(); 1088 } else { 1089 prev->set_next(current->next()); 1090 } 1091 delete current; 1092 return; 1093 } 1094 // Move to next in list. 1095 prev = current; 1096 current = current->next(); 1097 } 1098 // Deoptimizing code is removed through weak callback. Each object is expected 1099 // to be removed once and only once. 1100 UNREACHABLE(); 1101} 1102 1103 1104FrameDescription::FrameDescription(uint32_t frame_size, 1105 JSFunction* function) 1106 : frame_size_(frame_size), 1107 function_(function), 1108 top_(kZapUint32), 1109 pc_(kZapUint32), 1110 fp_(kZapUint32) { 1111 // Zap all the registers. 1112 for (int r = 0; r < Register::kNumRegisters; r++) { 1113 SetRegister(r, kZapUint32); 1114 } 1115 1116 // Zap all the slots. 1117 for (unsigned o = 0; o < frame_size; o += kPointerSize) { 1118 SetFrameSlot(o, kZapUint32); 1119 } 1120} 1121 1122 1123unsigned FrameDescription::GetOffsetFromSlotIndex(Deoptimizer* deoptimizer, 1124 int slot_index) { 1125 if (slot_index >= 0) { 1126 // Local or spill slots. Skip the fixed part of the frame 1127 // including all arguments. 1128 unsigned base = 1129 GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction()); 1130 return base - ((slot_index + 1) * kPointerSize); 1131 } else { 1132 // Incoming parameter. 1133 unsigned base = GetFrameSize() - 1134 deoptimizer->ComputeIncomingArgumentSize(GetFunction()); 1135 return base - ((slot_index + 1) * kPointerSize); 1136 } 1137} 1138 1139 1140int FrameDescription::ComputeParametersCount() { 1141 return function_->shared()->formal_parameter_count(); 1142} 1143 1144 1145Object* FrameDescription::GetParameter(Deoptimizer* deoptimizer, int index) { 1146 ASSERT_EQ(Code::FUNCTION, kind_); 1147 ASSERT(index >= 0); 1148 ASSERT(index < ComputeParametersCount()); 1149 // The slot indexes for incoming arguments are negative. 1150 unsigned offset = GetOffsetFromSlotIndex(deoptimizer, 1151 index - ComputeParametersCount()); 1152 return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset)); 1153} 1154 1155 1156unsigned FrameDescription::GetExpressionCount(Deoptimizer* deoptimizer) { 1157 ASSERT_EQ(Code::FUNCTION, kind_); 1158 unsigned size = GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction()); 1159 return size / kPointerSize; 1160} 1161 1162 1163Object* FrameDescription::GetExpression(Deoptimizer* deoptimizer, int index) { 1164 ASSERT_EQ(Code::FUNCTION, kind_); 1165 unsigned offset = GetOffsetFromSlotIndex(deoptimizer, index); 1166 return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset)); 1167} 1168 1169 1170void TranslationBuffer::Add(int32_t value) { 1171 // Encode the sign bit in the least significant bit. 1172 bool is_negative = (value < 0); 1173 uint32_t bits = ((is_negative ? -value : value) << 1) | 1174 static_cast<int32_t>(is_negative); 1175 // Encode the individual bytes using the least significant bit of 1176 // each byte to indicate whether or not more bytes follow. 1177 do { 1178 uint32_t next = bits >> 7; 1179 contents_.Add(((bits << 1) & 0xFF) | (next != 0)); 1180 bits = next; 1181 } while (bits != 0); 1182} 1183 1184 1185int32_t TranslationIterator::Next() { 1186 // Run through the bytes until we reach one with a least significant 1187 // bit of zero (marks the end). 1188 uint32_t bits = 0; 1189 for (int i = 0; true; i += 7) { 1190 ASSERT(HasNext()); 1191 uint8_t next = buffer_->get(index_++); 1192 bits |= (next >> 1) << i; 1193 if ((next & 1) == 0) break; 1194 } 1195 // The bits encode the sign in the least significant bit. 1196 bool is_negative = (bits & 1) == 1; 1197 int32_t result = bits >> 1; 1198 return is_negative ? -result : result; 1199} 1200 1201 1202Handle<ByteArray> TranslationBuffer::CreateByteArray() { 1203 int length = contents_.length(); 1204 Handle<ByteArray> result = 1205 Isolate::Current()->factory()->NewByteArray(length, TENURED); 1206 memcpy(result->GetDataStartAddress(), contents_.ToVector().start(), length); 1207 return result; 1208} 1209 1210 1211void Translation::BeginFrame(int node_id, int literal_id, unsigned height) { 1212 buffer_->Add(FRAME); 1213 buffer_->Add(node_id); 1214 buffer_->Add(literal_id); 1215 buffer_->Add(height); 1216} 1217 1218 1219void Translation::StoreRegister(Register reg) { 1220 buffer_->Add(REGISTER); 1221 buffer_->Add(reg.code()); 1222} 1223 1224 1225void Translation::StoreInt32Register(Register reg) { 1226 buffer_->Add(INT32_REGISTER); 1227 buffer_->Add(reg.code()); 1228} 1229 1230 1231void Translation::StoreDoubleRegister(DoubleRegister reg) { 1232 buffer_->Add(DOUBLE_REGISTER); 1233 buffer_->Add(DoubleRegister::ToAllocationIndex(reg)); 1234} 1235 1236 1237void Translation::StoreStackSlot(int index) { 1238 buffer_->Add(STACK_SLOT); 1239 buffer_->Add(index); 1240} 1241 1242 1243void Translation::StoreInt32StackSlot(int index) { 1244 buffer_->Add(INT32_STACK_SLOT); 1245 buffer_->Add(index); 1246} 1247 1248 1249void Translation::StoreDoubleStackSlot(int index) { 1250 buffer_->Add(DOUBLE_STACK_SLOT); 1251 buffer_->Add(index); 1252} 1253 1254 1255void Translation::StoreLiteral(int literal_id) { 1256 buffer_->Add(LITERAL); 1257 buffer_->Add(literal_id); 1258} 1259 1260 1261void Translation::StoreArgumentsObject() { 1262 buffer_->Add(ARGUMENTS_OBJECT); 1263} 1264 1265 1266void Translation::MarkDuplicate() { 1267 buffer_->Add(DUPLICATE); 1268} 1269 1270 1271int Translation::NumberOfOperandsFor(Opcode opcode) { 1272 switch (opcode) { 1273 case ARGUMENTS_OBJECT: 1274 case DUPLICATE: 1275 return 0; 1276 case BEGIN: 1277 case REGISTER: 1278 case INT32_REGISTER: 1279 case DOUBLE_REGISTER: 1280 case STACK_SLOT: 1281 case INT32_STACK_SLOT: 1282 case DOUBLE_STACK_SLOT: 1283 case LITERAL: 1284 return 1; 1285 case FRAME: 1286 return 3; 1287 } 1288 UNREACHABLE(); 1289 return -1; 1290} 1291 1292 1293#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER) 1294 1295const char* Translation::StringFor(Opcode opcode) { 1296 switch (opcode) { 1297 case BEGIN: 1298 return "BEGIN"; 1299 case FRAME: 1300 return "FRAME"; 1301 case REGISTER: 1302 return "REGISTER"; 1303 case INT32_REGISTER: 1304 return "INT32_REGISTER"; 1305 case DOUBLE_REGISTER: 1306 return "DOUBLE_REGISTER"; 1307 case STACK_SLOT: 1308 return "STACK_SLOT"; 1309 case INT32_STACK_SLOT: 1310 return "INT32_STACK_SLOT"; 1311 case DOUBLE_STACK_SLOT: 1312 return "DOUBLE_STACK_SLOT"; 1313 case LITERAL: 1314 return "LITERAL"; 1315 case ARGUMENTS_OBJECT: 1316 return "ARGUMENTS_OBJECT"; 1317 case DUPLICATE: 1318 return "DUPLICATE"; 1319 } 1320 UNREACHABLE(); 1321 return ""; 1322} 1323 1324#endif 1325 1326 1327DeoptimizingCodeListNode::DeoptimizingCodeListNode(Code* code): next_(NULL) { 1328 GlobalHandles* global_handles = Isolate::Current()->global_handles(); 1329 // Globalize the code object and make it weak. 1330 code_ = Handle<Code>::cast(global_handles->Create(code)); 1331 global_handles->MakeWeak(reinterpret_cast<Object**>(code_.location()), 1332 this, 1333 Deoptimizer::HandleWeakDeoptimizedCode); 1334} 1335 1336 1337DeoptimizingCodeListNode::~DeoptimizingCodeListNode() { 1338 GlobalHandles* global_handles = Isolate::Current()->global_handles(); 1339 global_handles->Destroy(reinterpret_cast<Object**>(code_.location())); 1340} 1341 1342 1343// We can't intermix stack decoding and allocations because 1344// deoptimization infrastracture is not GC safe. 1345// Thus we build a temporary structure in malloced space. 1346SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator, 1347 DeoptimizationInputData* data, 1348 JavaScriptFrame* frame) { 1349 Translation::Opcode opcode = 1350 static_cast<Translation::Opcode>(iterator->Next()); 1351 1352 switch (opcode) { 1353 case Translation::BEGIN: 1354 case Translation::FRAME: 1355 // Peeled off before getting here. 1356 break; 1357 1358 case Translation::ARGUMENTS_OBJECT: 1359 // This can be only emitted for local slots not for argument slots. 1360 break; 1361 1362 case Translation::REGISTER: 1363 case Translation::INT32_REGISTER: 1364 case Translation::DOUBLE_REGISTER: 1365 case Translation::DUPLICATE: 1366 // We are at safepoint which corresponds to call. All registers are 1367 // saved by caller so there would be no live registers at this 1368 // point. Thus these translation commands should not be used. 1369 break; 1370 1371 case Translation::STACK_SLOT: { 1372 int slot_index = iterator->Next(); 1373 Address slot_addr = SlotAddress(frame, slot_index); 1374 return SlotRef(slot_addr, SlotRef::TAGGED); 1375 } 1376 1377 case Translation::INT32_STACK_SLOT: { 1378 int slot_index = iterator->Next(); 1379 Address slot_addr = SlotAddress(frame, slot_index); 1380 return SlotRef(slot_addr, SlotRef::INT32); 1381 } 1382 1383 case Translation::DOUBLE_STACK_SLOT: { 1384 int slot_index = iterator->Next(); 1385 Address slot_addr = SlotAddress(frame, slot_index); 1386 return SlotRef(slot_addr, SlotRef::DOUBLE); 1387 } 1388 1389 case Translation::LITERAL: { 1390 int literal_index = iterator->Next(); 1391 return SlotRef(data->LiteralArray()->get(literal_index)); 1392 } 1393 } 1394 1395 UNREACHABLE(); 1396 return SlotRef(); 1397} 1398 1399 1400void SlotRef::ComputeSlotMappingForArguments(JavaScriptFrame* frame, 1401 int inlined_frame_index, 1402 Vector<SlotRef>* args_slots) { 1403 AssertNoAllocation no_gc; 1404 int deopt_index = AstNode::kNoNumber; 1405 DeoptimizationInputData* data = 1406 static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index); 1407 TranslationIterator it(data->TranslationByteArray(), 1408 data->TranslationIndex(deopt_index)->value()); 1409 Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next()); 1410 ASSERT(opcode == Translation::BEGIN); 1411 int frame_count = it.Next(); 1412 USE(frame_count); 1413 ASSERT(frame_count > inlined_frame_index); 1414 int frames_to_skip = inlined_frame_index; 1415 while (true) { 1416 opcode = static_cast<Translation::Opcode>(it.Next()); 1417 // Skip over operands to advance to the next opcode. 1418 it.Skip(Translation::NumberOfOperandsFor(opcode)); 1419 if (opcode == Translation::FRAME) { 1420 if (frames_to_skip == 0) { 1421 // We reached the frame corresponding to the inlined function 1422 // in question. Process the translation commands for the 1423 // arguments. 1424 // 1425 // Skip the translation command for the receiver. 1426 it.Skip(Translation::NumberOfOperandsFor( 1427 static_cast<Translation::Opcode>(it.Next()))); 1428 // Compute slots for arguments. 1429 for (int i = 0; i < args_slots->length(); ++i) { 1430 (*args_slots)[i] = ComputeSlotForNextArgument(&it, data, frame); 1431 } 1432 return; 1433 } 1434 frames_to_skip--; 1435 } 1436 } 1437 1438 UNREACHABLE(); 1439} 1440 1441#ifdef ENABLE_DEBUGGER_SUPPORT 1442 1443DeoptimizedFrameInfo::DeoptimizedFrameInfo( 1444 Deoptimizer* deoptimizer, int frame_index) { 1445 FrameDescription* output_frame = deoptimizer->output_[frame_index]; 1446 SetFunction(output_frame->GetFunction()); 1447 expression_count_ = output_frame->GetExpressionCount(deoptimizer); 1448 parameters_count_ = output_frame->ComputeParametersCount(); 1449 parameters_ = new Object*[parameters_count_]; 1450 for (int i = 0; i < parameters_count_; i++) { 1451 SetParameter(i, output_frame->GetParameter(deoptimizer, i)); 1452 } 1453 expression_stack_ = new Object*[expression_count_]; 1454 for (int i = 0; i < expression_count_; i++) { 1455 SetExpression(i, output_frame->GetExpression(deoptimizer, i)); 1456 } 1457} 1458 1459 1460DeoptimizedFrameInfo::~DeoptimizedFrameInfo() { 1461 delete[] expression_stack_; 1462 delete[] parameters_; 1463} 1464 1465void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) { 1466 v->VisitPointer(BitCast<Object**>(&function_)); 1467 v->VisitPointers(parameters_, parameters_ + parameters_count_); 1468 v->VisitPointers(expression_stack_, expression_stack_ + expression_count_); 1469} 1470 1471#endif // ENABLE_DEBUGGER_SUPPORT 1472 1473} } // namespace v8::internal 1474