lithium-codegen-ia32.cc revision 69a99ed0b2b2ef69d393c371b03db3a98aaf880e
1// Copyright 2011 the V8 project authors. All rights reserved. 2// Redistribution and use in source and binary forms, with or without 3// modification, are permitted provided that the following conditions are 4// met: 5// 6// * Redistributions of source code must retain the above copyright 7// notice, this list of conditions and the following disclaimer. 8// * Redistributions in binary form must reproduce the above 9// copyright notice, this list of conditions and the following 10// disclaimer in the documentation and/or other materials provided 11// with the distribution. 12// * Neither the name of Google Inc. nor the names of its 13// contributors may be used to endorse or promote products derived 14// from this software without specific prior written permission. 15// 16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28#include "v8.h" 29 30#if defined(V8_TARGET_ARCH_IA32) 31 32#include "ia32/lithium-codegen-ia32.h" 33#include "code-stubs.h" 34#include "deoptimizer.h" 35#include "stub-cache.h" 36 37namespace v8 { 38namespace internal { 39 40 41// When invoking builtins, we need to record the safepoint in the middle of 42// the invoke instruction sequence generated by the macro assembler. 43class SafepointGenerator : public CallWrapper { 44 public: 45 SafepointGenerator(LCodeGen* codegen, 46 LPointerMap* pointers, 47 int deoptimization_index) 48 : codegen_(codegen), 49 pointers_(pointers), 50 deoptimization_index_(deoptimization_index) {} 51 virtual ~SafepointGenerator() { } 52 53 virtual void BeforeCall(int call_size) const {} 54 55 virtual void AfterCall() const { 56 codegen_->RecordSafepoint(pointers_, deoptimization_index_); 57 } 58 59 private: 60 LCodeGen* codegen_; 61 LPointerMap* pointers_; 62 int deoptimization_index_; 63}; 64 65 66#define __ masm()-> 67 68bool LCodeGen::GenerateCode() { 69 HPhase phase("Code generation", chunk()); 70 ASSERT(is_unused()); 71 status_ = GENERATING; 72 CpuFeatures::Scope scope(SSE2); 73 return GeneratePrologue() && 74 GenerateBody() && 75 GenerateDeferredCode() && 76 GenerateSafepointTable(); 77} 78 79 80void LCodeGen::FinishCode(Handle<Code> code) { 81 ASSERT(is_done()); 82 code->set_stack_slots(GetStackSlotCount()); 83 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); 84 PopulateDeoptimizationData(code); 85 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code); 86} 87 88 89void LCodeGen::Abort(const char* format, ...) { 90 if (FLAG_trace_bailout) { 91 SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString()); 92 PrintF("Aborting LCodeGen in @\"%s\": ", *name); 93 va_list arguments; 94 va_start(arguments, format); 95 OS::VPrint(format, arguments); 96 va_end(arguments); 97 PrintF("\n"); 98 } 99 status_ = ABORTED; 100} 101 102 103void LCodeGen::Comment(const char* format, ...) { 104 if (!FLAG_code_comments) return; 105 char buffer[4 * KB]; 106 StringBuilder builder(buffer, ARRAY_SIZE(buffer)); 107 va_list arguments; 108 va_start(arguments, format); 109 builder.AddFormattedList(format, arguments); 110 va_end(arguments); 111 112 // Copy the string before recording it in the assembler to avoid 113 // issues when the stack allocated buffer goes out of scope. 114 size_t length = builder.position(); 115 Vector<char> copy = Vector<char>::New(length + 1); 116 memcpy(copy.start(), builder.Finalize(), copy.length()); 117 masm()->RecordComment(copy.start()); 118} 119 120 121bool LCodeGen::GeneratePrologue() { 122 ASSERT(is_generating()); 123 124#ifdef DEBUG 125 if (strlen(FLAG_stop_at) > 0 && 126 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { 127 __ int3(); 128 } 129#endif 130 131 // Strict mode functions and builtins need to replace the receiver 132 // with undefined when called as functions (without an explicit 133 // receiver object). ecx is zero for method calls and non-zero for 134 // function calls. 135 if (info_->is_strict_mode() || info_->is_native()) { 136 Label ok; 137 __ test(ecx, Operand(ecx)); 138 __ j(zero, &ok, Label::kNear); 139 // +1 for return address. 140 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize; 141 __ mov(Operand(esp, receiver_offset), 142 Immediate(isolate()->factory()->undefined_value())); 143 __ bind(&ok); 144 } 145 146 __ push(ebp); // Caller's frame pointer. 147 __ mov(ebp, esp); 148 __ push(esi); // Callee's context. 149 __ push(edi); // Callee's JS function. 150 151 // Reserve space for the stack slots needed by the code. 152 int slots = GetStackSlotCount(); 153 if (slots > 0) { 154 if (FLAG_debug_code) { 155 __ mov(Operand(eax), Immediate(slots)); 156 Label loop; 157 __ bind(&loop); 158 __ push(Immediate(kSlotsZapValue)); 159 __ dec(eax); 160 __ j(not_zero, &loop); 161 } else { 162 __ sub(Operand(esp), Immediate(slots * kPointerSize)); 163#ifdef _MSC_VER 164 // On windows, you may not access the stack more than one page below 165 // the most recently mapped page. To make the allocated area randomly 166 // accessible, we write to each page in turn (the value is irrelevant). 167 const int kPageSize = 4 * KB; 168 for (int offset = slots * kPointerSize - kPageSize; 169 offset > 0; 170 offset -= kPageSize) { 171 __ mov(Operand(esp, offset), eax); 172 } 173#endif 174 } 175 } 176 177 // Possibly allocate a local context. 178 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 179 if (heap_slots > 0) { 180 Comment(";;; Allocate local context"); 181 // Argument to NewContext is the function, which is still in edi. 182 __ push(edi); 183 if (heap_slots <= FastNewContextStub::kMaximumSlots) { 184 FastNewContextStub stub(heap_slots); 185 __ CallStub(&stub); 186 } else { 187 __ CallRuntime(Runtime::kNewFunctionContext, 1); 188 } 189 RecordSafepoint(Safepoint::kNoDeoptimizationIndex); 190 // Context is returned in both eax and esi. It replaces the context 191 // passed to us. It's saved in the stack and kept live in esi. 192 __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi); 193 194 // Copy parameters into context if necessary. 195 int num_parameters = scope()->num_parameters(); 196 for (int i = 0; i < num_parameters; i++) { 197 Slot* slot = scope()->parameter(i)->AsSlot(); 198 if (slot != NULL && slot->type() == Slot::CONTEXT) { 199 int parameter_offset = StandardFrameConstants::kCallerSPOffset + 200 (num_parameters - 1 - i) * kPointerSize; 201 // Load parameter from stack. 202 __ mov(eax, Operand(ebp, parameter_offset)); 203 // Store it in the context. 204 int context_offset = Context::SlotOffset(slot->index()); 205 __ mov(Operand(esi, context_offset), eax); 206 // Update the write barrier. This clobbers all involved 207 // registers, so we have to use a third register to avoid 208 // clobbering esi. 209 __ mov(ecx, esi); 210 __ RecordWrite(ecx, context_offset, eax, ebx); 211 } 212 } 213 Comment(";;; End allocate local context"); 214 } 215 216 // Trace the call. 217 if (FLAG_trace) { 218 // We have not executed any compiled code yet, so esi still holds the 219 // incoming context. 220 __ CallRuntime(Runtime::kTraceEnter, 0); 221 } 222 return !is_aborted(); 223} 224 225 226bool LCodeGen::GenerateBody() { 227 ASSERT(is_generating()); 228 bool emit_instructions = true; 229 for (current_instruction_ = 0; 230 !is_aborted() && current_instruction_ < instructions_->length(); 231 current_instruction_++) { 232 LInstruction* instr = instructions_->at(current_instruction_); 233 if (instr->IsLabel()) { 234 LLabel* label = LLabel::cast(instr); 235 emit_instructions = !label->HasReplacement(); 236 } 237 238 if (emit_instructions) { 239 Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); 240 instr->CompileToNative(this); 241 } 242 } 243 return !is_aborted(); 244} 245 246 247LInstruction* LCodeGen::GetNextInstruction() { 248 if (current_instruction_ < instructions_->length() - 1) { 249 return instructions_->at(current_instruction_ + 1); 250 } else { 251 return NULL; 252 } 253} 254 255 256bool LCodeGen::GenerateDeferredCode() { 257 ASSERT(is_generating()); 258 if (deferred_.length() > 0) { 259 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { 260 LDeferredCode* code = deferred_[i]; 261 __ bind(code->entry()); 262 code->Generate(); 263 __ jmp(code->exit()); 264 } 265 266 // Pad code to ensure that the last piece of deferred code have 267 // room for lazy bailout. 268 while ((masm()->pc_offset() - LastSafepointEnd()) 269 < Deoptimizer::patch_size()) { 270 __ nop(); 271 } 272 } 273 274 // Deferred code is the last part of the instruction sequence. Mark 275 // the generated code as done unless we bailed out. 276 if (!is_aborted()) status_ = DONE; 277 return !is_aborted(); 278} 279 280 281bool LCodeGen::GenerateSafepointTable() { 282 ASSERT(is_done()); 283 safepoints_.Emit(masm(), GetStackSlotCount()); 284 return !is_aborted(); 285} 286 287 288Register LCodeGen::ToRegister(int index) const { 289 return Register::FromAllocationIndex(index); 290} 291 292 293XMMRegister LCodeGen::ToDoubleRegister(int index) const { 294 return XMMRegister::FromAllocationIndex(index); 295} 296 297 298Register LCodeGen::ToRegister(LOperand* op) const { 299 ASSERT(op->IsRegister()); 300 return ToRegister(op->index()); 301} 302 303 304XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 305 ASSERT(op->IsDoubleRegister()); 306 return ToDoubleRegister(op->index()); 307} 308 309 310int LCodeGen::ToInteger32(LConstantOperand* op) const { 311 Handle<Object> value = chunk_->LookupLiteral(op); 312 ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32()); 313 ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) == 314 value->Number()); 315 return static_cast<int32_t>(value->Number()); 316} 317 318 319Immediate LCodeGen::ToImmediate(LOperand* op) { 320 LConstantOperand* const_op = LConstantOperand::cast(op); 321 Handle<Object> literal = chunk_->LookupLiteral(const_op); 322 Representation r = chunk_->LookupLiteralRepresentation(const_op); 323 if (r.IsInteger32()) { 324 ASSERT(literal->IsNumber()); 325 return Immediate(static_cast<int32_t>(literal->Number())); 326 } else if (r.IsDouble()) { 327 Abort("unsupported double immediate"); 328 } 329 ASSERT(r.IsTagged()); 330 return Immediate(literal); 331} 332 333 334Operand LCodeGen::ToOperand(LOperand* op) const { 335 if (op->IsRegister()) return Operand(ToRegister(op)); 336 if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op)); 337 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); 338 int index = op->index(); 339 if (index >= 0) { 340 // Local or spill slot. Skip the frame pointer, function, and 341 // context in the fixed part of the frame. 342 return Operand(ebp, -(index + 3) * kPointerSize); 343 } else { 344 // Incoming parameter. Skip the return address. 345 return Operand(ebp, -(index - 1) * kPointerSize); 346 } 347} 348 349 350Operand LCodeGen::HighOperand(LOperand* op) { 351 ASSERT(op->IsDoubleStackSlot()); 352 int index = op->index(); 353 int offset = (index >= 0) ? index + 3 : index - 1; 354 return Operand(ebp, -offset * kPointerSize); 355} 356 357 358void LCodeGen::WriteTranslation(LEnvironment* environment, 359 Translation* translation) { 360 if (environment == NULL) return; 361 362 // The translation includes one command per value in the environment. 363 int translation_size = environment->values()->length(); 364 // The output frame height does not include the parameters. 365 int height = translation_size - environment->parameter_count(); 366 367 WriteTranslation(environment->outer(), translation); 368 int closure_id = DefineDeoptimizationLiteral(environment->closure()); 369 translation->BeginFrame(environment->ast_id(), closure_id, height); 370 for (int i = 0; i < translation_size; ++i) { 371 LOperand* value = environment->values()->at(i); 372 // spilled_registers_ and spilled_double_registers_ are either 373 // both NULL or both set. 374 if (environment->spilled_registers() != NULL && value != NULL) { 375 if (value->IsRegister() && 376 environment->spilled_registers()[value->index()] != NULL) { 377 translation->MarkDuplicate(); 378 AddToTranslation(translation, 379 environment->spilled_registers()[value->index()], 380 environment->HasTaggedValueAt(i)); 381 } else if ( 382 value->IsDoubleRegister() && 383 environment->spilled_double_registers()[value->index()] != NULL) { 384 translation->MarkDuplicate(); 385 AddToTranslation( 386 translation, 387 environment->spilled_double_registers()[value->index()], 388 false); 389 } 390 } 391 392 AddToTranslation(translation, value, environment->HasTaggedValueAt(i)); 393 } 394} 395 396 397void LCodeGen::AddToTranslation(Translation* translation, 398 LOperand* op, 399 bool is_tagged) { 400 if (op == NULL) { 401 // TODO(twuerthinger): Introduce marker operands to indicate that this value 402 // is not present and must be reconstructed from the deoptimizer. Currently 403 // this is only used for the arguments object. 404 translation->StoreArgumentsObject(); 405 } else if (op->IsStackSlot()) { 406 if (is_tagged) { 407 translation->StoreStackSlot(op->index()); 408 } else { 409 translation->StoreInt32StackSlot(op->index()); 410 } 411 } else if (op->IsDoubleStackSlot()) { 412 translation->StoreDoubleStackSlot(op->index()); 413 } else if (op->IsArgument()) { 414 ASSERT(is_tagged); 415 int src_index = GetStackSlotCount() + op->index(); 416 translation->StoreStackSlot(src_index); 417 } else if (op->IsRegister()) { 418 Register reg = ToRegister(op); 419 if (is_tagged) { 420 translation->StoreRegister(reg); 421 } else { 422 translation->StoreInt32Register(reg); 423 } 424 } else if (op->IsDoubleRegister()) { 425 XMMRegister reg = ToDoubleRegister(op); 426 translation->StoreDoubleRegister(reg); 427 } else if (op->IsConstantOperand()) { 428 Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op)); 429 int src_index = DefineDeoptimizationLiteral(literal); 430 translation->StoreLiteral(src_index); 431 } else { 432 UNREACHABLE(); 433 } 434} 435 436 437void LCodeGen::CallCodeGeneric(Handle<Code> code, 438 RelocInfo::Mode mode, 439 LInstruction* instr, 440 SafepointMode safepoint_mode) { 441 ASSERT(instr != NULL); 442 LPointerMap* pointers = instr->pointer_map(); 443 RecordPosition(pointers->position()); 444 445 __ call(code, mode); 446 447 RegisterLazyDeoptimization(instr, safepoint_mode); 448 449 // Signal that we don't inline smi code before these stubs in the 450 // optimizing code generator. 451 if (code->kind() == Code::BINARY_OP_IC || 452 code->kind() == Code::COMPARE_IC) { 453 __ nop(); 454 } 455} 456 457 458void LCodeGen::CallCode(Handle<Code> code, 459 RelocInfo::Mode mode, 460 LInstruction* instr) { 461 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); 462} 463 464 465void LCodeGen::CallRuntime(const Runtime::Function* fun, 466 int argc, 467 LInstruction* instr) { 468 ASSERT(instr != NULL); 469 ASSERT(instr->HasPointerMap()); 470 LPointerMap* pointers = instr->pointer_map(); 471 RecordPosition(pointers->position()); 472 473 __ CallRuntime(fun, argc); 474 475 RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT); 476} 477 478 479void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, 480 int argc, 481 LInstruction* instr, 482 LOperand* context) { 483 ASSERT(context->IsRegister() || context->IsStackSlot()); 484 if (context->IsRegister()) { 485 if (!ToRegister(context).is(esi)) { 486 __ mov(esi, ToRegister(context)); 487 } 488 } else { 489 // Context is stack slot. 490 __ mov(esi, ToOperand(context)); 491 } 492 493 __ CallRuntimeSaveDoubles(id); 494 RecordSafepointWithRegisters( 495 instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex); 496} 497 498 499void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr, 500 SafepointMode safepoint_mode) { 501 // Create the environment to bailout to. If the call has side effects 502 // execution has to continue after the call otherwise execution can continue 503 // from a previous bailout point repeating the call. 504 LEnvironment* deoptimization_environment; 505 if (instr->HasDeoptimizationEnvironment()) { 506 deoptimization_environment = instr->deoptimization_environment(); 507 } else { 508 deoptimization_environment = instr->environment(); 509 } 510 511 RegisterEnvironmentForDeoptimization(deoptimization_environment); 512 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { 513 RecordSafepoint(instr->pointer_map(), 514 deoptimization_environment->deoptimization_index()); 515 } else { 516 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 517 RecordSafepointWithRegisters( 518 instr->pointer_map(), 519 0, 520 deoptimization_environment->deoptimization_index()); 521 } 522} 523 524 525void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) { 526 if (!environment->HasBeenRegistered()) { 527 // Physical stack frame layout: 528 // -x ............. -4 0 ..................................... y 529 // [incoming arguments] [spill slots] [pushed outgoing arguments] 530 531 // Layout of the environment: 532 // 0 ..................................................... size-1 533 // [parameters] [locals] [expression stack including arguments] 534 535 // Layout of the translation: 536 // 0 ........................................................ size - 1 + 4 537 // [expression stack including arguments] [locals] [4 words] [parameters] 538 // |>------------ translation_size ------------<| 539 540 int frame_count = 0; 541 for (LEnvironment* e = environment; e != NULL; e = e->outer()) { 542 ++frame_count; 543 } 544 Translation translation(&translations_, frame_count); 545 WriteTranslation(environment, &translation); 546 int deoptimization_index = deoptimizations_.length(); 547 environment->Register(deoptimization_index, translation.index()); 548 deoptimizations_.Add(environment); 549 } 550} 551 552 553void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { 554 RegisterEnvironmentForDeoptimization(environment); 555 ASSERT(environment->HasBeenRegistered()); 556 int id = environment->deoptimization_index(); 557 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); 558 ASSERT(entry != NULL); 559 if (entry == NULL) { 560 Abort("bailout was not prepared"); 561 return; 562 } 563 564 if (FLAG_deopt_every_n_times != 0) { 565 Handle<SharedFunctionInfo> shared(info_->shared_info()); 566 Label no_deopt; 567 __ pushfd(); 568 __ push(eax); 569 __ push(ebx); 570 __ mov(ebx, shared); 571 __ mov(eax, FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset)); 572 __ sub(Operand(eax), Immediate(Smi::FromInt(1))); 573 __ j(not_zero, &no_deopt, Label::kNear); 574 if (FLAG_trap_on_deopt) __ int3(); 575 __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times))); 576 __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax); 577 __ pop(ebx); 578 __ pop(eax); 579 __ popfd(); 580 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); 581 582 __ bind(&no_deopt); 583 __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax); 584 __ pop(ebx); 585 __ pop(eax); 586 __ popfd(); 587 } 588 589 if (cc == no_condition) { 590 if (FLAG_trap_on_deopt) __ int3(); 591 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); 592 } else { 593 if (FLAG_trap_on_deopt) { 594 Label done; 595 __ j(NegateCondition(cc), &done, Label::kNear); 596 __ int3(); 597 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); 598 __ bind(&done); 599 } else { 600 __ j(cc, entry, RelocInfo::RUNTIME_ENTRY); 601 } 602 } 603} 604 605 606void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { 607 int length = deoptimizations_.length(); 608 if (length == 0) return; 609 ASSERT(FLAG_deopt); 610 Handle<DeoptimizationInputData> data = 611 factory()->NewDeoptimizationInputData(length, TENURED); 612 613 Handle<ByteArray> translations = translations_.CreateByteArray(); 614 data->SetTranslationByteArray(*translations); 615 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); 616 617 Handle<FixedArray> literals = 618 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); 619 for (int i = 0; i < deoptimization_literals_.length(); i++) { 620 literals->set(i, *deoptimization_literals_[i]); 621 } 622 data->SetLiteralArray(*literals); 623 624 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id())); 625 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); 626 627 // Populate the deoptimization entries. 628 for (int i = 0; i < length; i++) { 629 LEnvironment* env = deoptimizations_[i]; 630 data->SetAstId(i, Smi::FromInt(env->ast_id())); 631 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index())); 632 data->SetArgumentsStackHeight(i, 633 Smi::FromInt(env->arguments_stack_height())); 634 } 635 code->set_deoptimization_data(*data); 636} 637 638 639int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) { 640 int result = deoptimization_literals_.length(); 641 for (int i = 0; i < deoptimization_literals_.length(); ++i) { 642 if (deoptimization_literals_[i].is_identical_to(literal)) return i; 643 } 644 deoptimization_literals_.Add(literal); 645 return result; 646} 647 648 649void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { 650 ASSERT(deoptimization_literals_.length() == 0); 651 652 const ZoneList<Handle<JSFunction> >* inlined_closures = 653 chunk()->inlined_closures(); 654 655 for (int i = 0, length = inlined_closures->length(); 656 i < length; 657 i++) { 658 DefineDeoptimizationLiteral(inlined_closures->at(i)); 659 } 660 661 inlined_function_count_ = deoptimization_literals_.length(); 662} 663 664 665void LCodeGen::RecordSafepoint( 666 LPointerMap* pointers, 667 Safepoint::Kind kind, 668 int arguments, 669 int deoptimization_index) { 670 ASSERT(kind == expected_safepoint_kind_); 671 const ZoneList<LOperand*>* operands = pointers->operands(); 672 Safepoint safepoint = safepoints_.DefineSafepoint(masm(), 673 kind, arguments, deoptimization_index); 674 for (int i = 0; i < operands->length(); i++) { 675 LOperand* pointer = operands->at(i); 676 if (pointer->IsStackSlot()) { 677 safepoint.DefinePointerSlot(pointer->index()); 678 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { 679 safepoint.DefinePointerRegister(ToRegister(pointer)); 680 } 681 } 682} 683 684 685void LCodeGen::RecordSafepoint(LPointerMap* pointers, 686 int deoptimization_index) { 687 RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index); 688} 689 690 691void LCodeGen::RecordSafepoint(int deoptimization_index) { 692 LPointerMap empty_pointers(RelocInfo::kNoPosition); 693 RecordSafepoint(&empty_pointers, deoptimization_index); 694} 695 696 697void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, 698 int arguments, 699 int deoptimization_index) { 700 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, 701 deoptimization_index); 702} 703 704 705void LCodeGen::RecordPosition(int position) { 706 if (position == RelocInfo::kNoPosition) return; 707 masm()->positions_recorder()->RecordPosition(position); 708} 709 710 711void LCodeGen::DoLabel(LLabel* label) { 712 if (label->is_loop_header()) { 713 Comment(";;; B%d - LOOP entry", label->block_id()); 714 } else { 715 Comment(";;; B%d", label->block_id()); 716 } 717 __ bind(label->label()); 718 current_block_ = label->block_id(); 719 DoGap(label); 720} 721 722 723void LCodeGen::DoParallelMove(LParallelMove* move) { 724 resolver_.Resolve(move); 725} 726 727 728void LCodeGen::DoGap(LGap* gap) { 729 for (int i = LGap::FIRST_INNER_POSITION; 730 i <= LGap::LAST_INNER_POSITION; 731 i++) { 732 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); 733 LParallelMove* move = gap->GetParallelMove(inner_pos); 734 if (move != NULL) DoParallelMove(move); 735 } 736 737 LInstruction* next = GetNextInstruction(); 738 if (next != NULL && next->IsLazyBailout()) { 739 int pc = masm()->pc_offset(); 740 safepoints_.SetPcAfterGap(pc); 741 } 742} 743 744 745void LCodeGen::DoInstructionGap(LInstructionGap* instr) { 746 DoGap(instr); 747} 748 749 750void LCodeGen::DoParameter(LParameter* instr) { 751 // Nothing to do. 752} 753 754 755void LCodeGen::DoCallStub(LCallStub* instr) { 756 ASSERT(ToRegister(instr->context()).is(esi)); 757 ASSERT(ToRegister(instr->result()).is(eax)); 758 switch (instr->hydrogen()->major_key()) { 759 case CodeStub::RegExpConstructResult: { 760 RegExpConstructResultStub stub; 761 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 762 break; 763 } 764 case CodeStub::RegExpExec: { 765 RegExpExecStub stub; 766 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 767 break; 768 } 769 case CodeStub::SubString: { 770 SubStringStub stub; 771 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 772 break; 773 } 774 case CodeStub::NumberToString: { 775 NumberToStringStub stub; 776 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 777 break; 778 } 779 case CodeStub::StringAdd: { 780 StringAddStub stub(NO_STRING_ADD_FLAGS); 781 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 782 break; 783 } 784 case CodeStub::StringCompare: { 785 StringCompareStub stub; 786 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 787 break; 788 } 789 case CodeStub::TranscendentalCache: { 790 TranscendentalCacheStub stub(instr->transcendental_type(), 791 TranscendentalCacheStub::TAGGED); 792 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 793 break; 794 } 795 default: 796 UNREACHABLE(); 797 } 798} 799 800 801void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { 802 // Nothing to do. 803} 804 805 806void LCodeGen::DoModI(LModI* instr) { 807 if (instr->hydrogen()->HasPowerOf2Divisor()) { 808 Register dividend = ToRegister(instr->InputAt(0)); 809 810 int32_t divisor = 811 HConstant::cast(instr->hydrogen()->right())->Integer32Value(); 812 813 if (divisor < 0) divisor = -divisor; 814 815 Label positive_dividend, done; 816 __ test(dividend, Operand(dividend)); 817 __ j(not_sign, &positive_dividend, Label::kNear); 818 __ neg(dividend); 819 __ and_(dividend, divisor - 1); 820 __ neg(dividend); 821 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 822 __ j(not_zero, &done, Label::kNear); 823 DeoptimizeIf(no_condition, instr->environment()); 824 } else { 825 __ jmp(&done, Label::kNear); 826 } 827 __ bind(&positive_dividend); 828 __ and_(dividend, divisor - 1); 829 __ bind(&done); 830 } else { 831 Label done, remainder_eq_dividend, slow, do_subtraction, both_positive; 832 Register left_reg = ToRegister(instr->InputAt(0)); 833 Register right_reg = ToRegister(instr->InputAt(1)); 834 Register result_reg = ToRegister(instr->result()); 835 836 ASSERT(left_reg.is(eax)); 837 ASSERT(result_reg.is(edx)); 838 ASSERT(!right_reg.is(eax)); 839 ASSERT(!right_reg.is(edx)); 840 841 // Check for x % 0. 842 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { 843 __ test(right_reg, Operand(right_reg)); 844 DeoptimizeIf(zero, instr->environment()); 845 } 846 847 __ test(left_reg, Operand(left_reg)); 848 __ j(zero, &remainder_eq_dividend, Label::kNear); 849 __ j(sign, &slow, Label::kNear); 850 851 __ test(right_reg, Operand(right_reg)); 852 __ j(not_sign, &both_positive, Label::kNear); 853 // The sign of the divisor doesn't matter. 854 __ neg(right_reg); 855 856 __ bind(&both_positive); 857 // If the dividend is smaller than the nonnegative 858 // divisor, the dividend is the result. 859 __ cmp(left_reg, Operand(right_reg)); 860 __ j(less, &remainder_eq_dividend, Label::kNear); 861 862 // Check if the divisor is a PowerOfTwo integer. 863 Register scratch = ToRegister(instr->TempAt(0)); 864 __ mov(scratch, right_reg); 865 __ sub(Operand(scratch), Immediate(1)); 866 __ test(scratch, Operand(right_reg)); 867 __ j(not_zero, &do_subtraction, Label::kNear); 868 __ and_(left_reg, Operand(scratch)); 869 __ jmp(&remainder_eq_dividend, Label::kNear); 870 871 __ bind(&do_subtraction); 872 const int kUnfolds = 3; 873 // Try a few subtractions of the dividend. 874 __ mov(scratch, left_reg); 875 for (int i = 0; i < kUnfolds; i++) { 876 // Reduce the dividend by the divisor. 877 __ sub(left_reg, Operand(right_reg)); 878 // Check if the dividend is less than the divisor. 879 __ cmp(left_reg, Operand(right_reg)); 880 __ j(less, &remainder_eq_dividend, Label::kNear); 881 } 882 __ mov(left_reg, scratch); 883 884 // Slow case, using idiv instruction. 885 __ bind(&slow); 886 // Sign extend to edx. 887 __ cdq(); 888 889 // Check for (0 % -x) that will produce negative zero. 890 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 891 Label positive_left; 892 Label done; 893 __ test(left_reg, Operand(left_reg)); 894 __ j(not_sign, &positive_left, Label::kNear); 895 __ idiv(right_reg); 896 897 // Test the remainder for 0, because then the result would be -0. 898 __ test(result_reg, Operand(result_reg)); 899 __ j(not_zero, &done, Label::kNear); 900 901 DeoptimizeIf(no_condition, instr->environment()); 902 __ bind(&positive_left); 903 __ idiv(right_reg); 904 __ bind(&done); 905 } else { 906 __ idiv(right_reg); 907 } 908 __ jmp(&done, Label::kNear); 909 910 __ bind(&remainder_eq_dividend); 911 __ mov(result_reg, left_reg); 912 913 __ bind(&done); 914 } 915} 916 917 918void LCodeGen::DoDivI(LDivI* instr) { 919 LOperand* right = instr->InputAt(1); 920 ASSERT(ToRegister(instr->result()).is(eax)); 921 ASSERT(ToRegister(instr->InputAt(0)).is(eax)); 922 ASSERT(!ToRegister(instr->InputAt(1)).is(eax)); 923 ASSERT(!ToRegister(instr->InputAt(1)).is(edx)); 924 925 Register left_reg = eax; 926 927 // Check for x / 0. 928 Register right_reg = ToRegister(right); 929 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { 930 __ test(right_reg, ToOperand(right)); 931 DeoptimizeIf(zero, instr->environment()); 932 } 933 934 // Check for (0 / -x) that will produce negative zero. 935 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 936 Label left_not_zero; 937 __ test(left_reg, Operand(left_reg)); 938 __ j(not_zero, &left_not_zero, Label::kNear); 939 __ test(right_reg, ToOperand(right)); 940 DeoptimizeIf(sign, instr->environment()); 941 __ bind(&left_not_zero); 942 } 943 944 // Check for (-kMinInt / -1). 945 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 946 Label left_not_min_int; 947 __ cmp(left_reg, kMinInt); 948 __ j(not_zero, &left_not_min_int, Label::kNear); 949 __ cmp(right_reg, -1); 950 DeoptimizeIf(zero, instr->environment()); 951 __ bind(&left_not_min_int); 952 } 953 954 // Sign extend to edx. 955 __ cdq(); 956 __ idiv(right_reg); 957 958 // Deoptimize if remainder is not 0. 959 __ test(edx, Operand(edx)); 960 DeoptimizeIf(not_zero, instr->environment()); 961} 962 963 964void LCodeGen::DoMulI(LMulI* instr) { 965 Register left = ToRegister(instr->InputAt(0)); 966 LOperand* right = instr->InputAt(1); 967 968 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 969 __ mov(ToRegister(instr->TempAt(0)), left); 970 } 971 972 if (right->IsConstantOperand()) { 973 // Try strength reductions on the multiplication. 974 // All replacement instructions are at most as long as the imul 975 // and have better latency. 976 int constant = ToInteger32(LConstantOperand::cast(right)); 977 if (constant == -1) { 978 __ neg(left); 979 } else if (constant == 0) { 980 __ xor_(left, Operand(left)); 981 } else if (constant == 2) { 982 __ add(left, Operand(left)); 983 } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 984 // If we know that the multiplication can't overflow, it's safe to 985 // use instructions that don't set the overflow flag for the 986 // multiplication. 987 switch (constant) { 988 case 1: 989 // Do nothing. 990 break; 991 case 3: 992 __ lea(left, Operand(left, left, times_2, 0)); 993 break; 994 case 4: 995 __ shl(left, 2); 996 break; 997 case 5: 998 __ lea(left, Operand(left, left, times_4, 0)); 999 break; 1000 case 8: 1001 __ shl(left, 3); 1002 break; 1003 case 9: 1004 __ lea(left, Operand(left, left, times_8, 0)); 1005 break; 1006 case 16: 1007 __ shl(left, 4); 1008 break; 1009 default: 1010 __ imul(left, left, constant); 1011 break; 1012 } 1013 } else { 1014 __ imul(left, left, constant); 1015 } 1016 } else { 1017 __ imul(left, ToOperand(right)); 1018 } 1019 1020 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1021 DeoptimizeIf(overflow, instr->environment()); 1022 } 1023 1024 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1025 // Bail out if the result is supposed to be negative zero. 1026 Label done; 1027 __ test(left, Operand(left)); 1028 __ j(not_zero, &done, Label::kNear); 1029 if (right->IsConstantOperand()) { 1030 if (ToInteger32(LConstantOperand::cast(right)) <= 0) { 1031 DeoptimizeIf(no_condition, instr->environment()); 1032 } 1033 } else { 1034 // Test the non-zero operand for negative sign. 1035 __ or_(ToRegister(instr->TempAt(0)), ToOperand(right)); 1036 DeoptimizeIf(sign, instr->environment()); 1037 } 1038 __ bind(&done); 1039 } 1040} 1041 1042 1043void LCodeGen::DoBitI(LBitI* instr) { 1044 LOperand* left = instr->InputAt(0); 1045 LOperand* right = instr->InputAt(1); 1046 ASSERT(left->Equals(instr->result())); 1047 ASSERT(left->IsRegister()); 1048 1049 if (right->IsConstantOperand()) { 1050 int right_operand = ToInteger32(LConstantOperand::cast(right)); 1051 switch (instr->op()) { 1052 case Token::BIT_AND: 1053 __ and_(ToRegister(left), right_operand); 1054 break; 1055 case Token::BIT_OR: 1056 __ or_(ToRegister(left), right_operand); 1057 break; 1058 case Token::BIT_XOR: 1059 __ xor_(ToRegister(left), right_operand); 1060 break; 1061 default: 1062 UNREACHABLE(); 1063 break; 1064 } 1065 } else { 1066 switch (instr->op()) { 1067 case Token::BIT_AND: 1068 __ and_(ToRegister(left), ToOperand(right)); 1069 break; 1070 case Token::BIT_OR: 1071 __ or_(ToRegister(left), ToOperand(right)); 1072 break; 1073 case Token::BIT_XOR: 1074 __ xor_(ToRegister(left), ToOperand(right)); 1075 break; 1076 default: 1077 UNREACHABLE(); 1078 break; 1079 } 1080 } 1081} 1082 1083 1084void LCodeGen::DoShiftI(LShiftI* instr) { 1085 LOperand* left = instr->InputAt(0); 1086 LOperand* right = instr->InputAt(1); 1087 ASSERT(left->Equals(instr->result())); 1088 ASSERT(left->IsRegister()); 1089 if (right->IsRegister()) { 1090 ASSERT(ToRegister(right).is(ecx)); 1091 1092 switch (instr->op()) { 1093 case Token::SAR: 1094 __ sar_cl(ToRegister(left)); 1095 break; 1096 case Token::SHR: 1097 __ shr_cl(ToRegister(left)); 1098 if (instr->can_deopt()) { 1099 __ test(ToRegister(left), Immediate(0x80000000)); 1100 DeoptimizeIf(not_zero, instr->environment()); 1101 } 1102 break; 1103 case Token::SHL: 1104 __ shl_cl(ToRegister(left)); 1105 break; 1106 default: 1107 UNREACHABLE(); 1108 break; 1109 } 1110 } else { 1111 int value = ToInteger32(LConstantOperand::cast(right)); 1112 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); 1113 switch (instr->op()) { 1114 case Token::SAR: 1115 if (shift_count != 0) { 1116 __ sar(ToRegister(left), shift_count); 1117 } 1118 break; 1119 case Token::SHR: 1120 if (shift_count == 0 && instr->can_deopt()) { 1121 __ test(ToRegister(left), Immediate(0x80000000)); 1122 DeoptimizeIf(not_zero, instr->environment()); 1123 } else { 1124 __ shr(ToRegister(left), shift_count); 1125 } 1126 break; 1127 case Token::SHL: 1128 if (shift_count != 0) { 1129 __ shl(ToRegister(left), shift_count); 1130 } 1131 break; 1132 default: 1133 UNREACHABLE(); 1134 break; 1135 } 1136 } 1137} 1138 1139 1140void LCodeGen::DoSubI(LSubI* instr) { 1141 LOperand* left = instr->InputAt(0); 1142 LOperand* right = instr->InputAt(1); 1143 ASSERT(left->Equals(instr->result())); 1144 1145 if (right->IsConstantOperand()) { 1146 __ sub(ToOperand(left), ToImmediate(right)); 1147 } else { 1148 __ sub(ToRegister(left), ToOperand(right)); 1149 } 1150 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1151 DeoptimizeIf(overflow, instr->environment()); 1152 } 1153} 1154 1155 1156void LCodeGen::DoConstantI(LConstantI* instr) { 1157 ASSERT(instr->result()->IsRegister()); 1158 __ Set(ToRegister(instr->result()), Immediate(instr->value())); 1159} 1160 1161 1162void LCodeGen::DoConstantD(LConstantD* instr) { 1163 ASSERT(instr->result()->IsDoubleRegister()); 1164 XMMRegister res = ToDoubleRegister(instr->result()); 1165 double v = instr->value(); 1166 // Use xor to produce +0.0 in a fast and compact way, but avoid to 1167 // do so if the constant is -0.0. 1168 if (BitCast<uint64_t, double>(v) == 0) { 1169 __ xorps(res, res); 1170 } else { 1171 Register temp = ToRegister(instr->TempAt(0)); 1172 uint64_t int_val = BitCast<uint64_t, double>(v); 1173 int32_t lower = static_cast<int32_t>(int_val); 1174 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); 1175 if (CpuFeatures::IsSupported(SSE4_1)) { 1176 CpuFeatures::Scope scope(SSE4_1); 1177 if (lower != 0) { 1178 __ Set(temp, Immediate(lower)); 1179 __ movd(res, Operand(temp)); 1180 __ Set(temp, Immediate(upper)); 1181 __ pinsrd(res, Operand(temp), 1); 1182 } else { 1183 __ xorps(res, res); 1184 __ Set(temp, Immediate(upper)); 1185 __ pinsrd(res, Operand(temp), 1); 1186 } 1187 } else { 1188 __ Set(temp, Immediate(upper)); 1189 __ movd(res, Operand(temp)); 1190 __ psllq(res, 32); 1191 if (lower != 0) { 1192 __ Set(temp, Immediate(lower)); 1193 __ movd(xmm0, Operand(temp)); 1194 __ por(res, xmm0); 1195 } 1196 } 1197 } 1198} 1199 1200 1201void LCodeGen::DoConstantT(LConstantT* instr) { 1202 ASSERT(instr->result()->IsRegister()); 1203 __ Set(ToRegister(instr->result()), Immediate(instr->value())); 1204} 1205 1206 1207void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) { 1208 Register result = ToRegister(instr->result()); 1209 Register array = ToRegister(instr->InputAt(0)); 1210 __ mov(result, FieldOperand(array, JSArray::kLengthOffset)); 1211} 1212 1213 1214void LCodeGen::DoFixedArrayBaseLength( 1215 LFixedArrayBaseLength* instr) { 1216 Register result = ToRegister(instr->result()); 1217 Register array = ToRegister(instr->InputAt(0)); 1218 __ mov(result, FieldOperand(array, FixedArrayBase::kLengthOffset)); 1219} 1220 1221 1222void LCodeGen::DoElementsKind(LElementsKind* instr) { 1223 Register result = ToRegister(instr->result()); 1224 Register input = ToRegister(instr->InputAt(0)); 1225 1226 // Load map into |result|. 1227 __ mov(result, FieldOperand(input, HeapObject::kMapOffset)); 1228 // Load the map's "bit field 2" into |result|. We only need the first byte, 1229 // but the following masking takes care of that anyway. 1230 __ mov(result, FieldOperand(result, Map::kBitField2Offset)); 1231 // Retrieve elements_kind from bit field 2. 1232 __ and_(result, Map::kElementsKindMask); 1233 __ shr(result, Map::kElementsKindShift); 1234} 1235 1236 1237void LCodeGen::DoValueOf(LValueOf* instr) { 1238 Register input = ToRegister(instr->InputAt(0)); 1239 Register result = ToRegister(instr->result()); 1240 Register map = ToRegister(instr->TempAt(0)); 1241 ASSERT(input.is(result)); 1242 Label done; 1243 // If the object is a smi return the object. 1244 __ JumpIfSmi(input, &done, Label::kNear); 1245 1246 // If the object is not a value type, return the object. 1247 __ CmpObjectType(input, JS_VALUE_TYPE, map); 1248 __ j(not_equal, &done, Label::kNear); 1249 __ mov(result, FieldOperand(input, JSValue::kValueOffset)); 1250 1251 __ bind(&done); 1252} 1253 1254 1255void LCodeGen::DoBitNotI(LBitNotI* instr) { 1256 LOperand* input = instr->InputAt(0); 1257 ASSERT(input->Equals(instr->result())); 1258 __ not_(ToRegister(input)); 1259} 1260 1261 1262void LCodeGen::DoThrow(LThrow* instr) { 1263 __ push(ToOperand(instr->value())); 1264 ASSERT(ToRegister(instr->context()).is(esi)); 1265 CallRuntime(Runtime::kThrow, 1, instr); 1266 1267 if (FLAG_debug_code) { 1268 Comment("Unreachable code."); 1269 __ int3(); 1270 } 1271} 1272 1273 1274void LCodeGen::DoAddI(LAddI* instr) { 1275 LOperand* left = instr->InputAt(0); 1276 LOperand* right = instr->InputAt(1); 1277 ASSERT(left->Equals(instr->result())); 1278 1279 if (right->IsConstantOperand()) { 1280 __ add(ToOperand(left), ToImmediate(right)); 1281 } else { 1282 __ add(ToRegister(left), ToOperand(right)); 1283 } 1284 1285 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1286 DeoptimizeIf(overflow, instr->environment()); 1287 } 1288} 1289 1290 1291void LCodeGen::DoArithmeticD(LArithmeticD* instr) { 1292 XMMRegister left = ToDoubleRegister(instr->InputAt(0)); 1293 XMMRegister right = ToDoubleRegister(instr->InputAt(1)); 1294 XMMRegister result = ToDoubleRegister(instr->result()); 1295 // Modulo uses a fixed result register. 1296 ASSERT(instr->op() == Token::MOD || left.is(result)); 1297 switch (instr->op()) { 1298 case Token::ADD: 1299 __ addsd(left, right); 1300 break; 1301 case Token::SUB: 1302 __ subsd(left, right); 1303 break; 1304 case Token::MUL: 1305 __ mulsd(left, right); 1306 break; 1307 case Token::DIV: 1308 __ divsd(left, right); 1309 break; 1310 case Token::MOD: { 1311 // Pass two doubles as arguments on the stack. 1312 __ PrepareCallCFunction(4, eax); 1313 __ movdbl(Operand(esp, 0 * kDoubleSize), left); 1314 __ movdbl(Operand(esp, 1 * kDoubleSize), right); 1315 __ CallCFunction( 1316 ExternalReference::double_fp_operation(Token::MOD, isolate()), 1317 4); 1318 1319 // Return value is in st(0) on ia32. 1320 // Store it into the (fixed) result register. 1321 __ sub(Operand(esp), Immediate(kDoubleSize)); 1322 __ fstp_d(Operand(esp, 0)); 1323 __ movdbl(result, Operand(esp, 0)); 1324 __ add(Operand(esp), Immediate(kDoubleSize)); 1325 break; 1326 } 1327 default: 1328 UNREACHABLE(); 1329 break; 1330 } 1331} 1332 1333 1334void LCodeGen::DoArithmeticT(LArithmeticT* instr) { 1335 ASSERT(ToRegister(instr->context()).is(esi)); 1336 ASSERT(ToRegister(instr->left()).is(edx)); 1337 ASSERT(ToRegister(instr->right()).is(eax)); 1338 ASSERT(ToRegister(instr->result()).is(eax)); 1339 1340 BinaryOpStub stub(instr->op(), NO_OVERWRITE); 1341 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 1342 __ nop(); // Signals no inlined code. 1343} 1344 1345 1346int LCodeGen::GetNextEmittedBlock(int block) { 1347 for (int i = block + 1; i < graph()->blocks()->length(); ++i) { 1348 LLabel* label = chunk_->GetLabel(i); 1349 if (!label->HasReplacement()) return i; 1350 } 1351 return -1; 1352} 1353 1354 1355void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) { 1356 int next_block = GetNextEmittedBlock(current_block_); 1357 right_block = chunk_->LookupDestination(right_block); 1358 left_block = chunk_->LookupDestination(left_block); 1359 1360 if (right_block == left_block) { 1361 EmitGoto(left_block); 1362 } else if (left_block == next_block) { 1363 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block)); 1364 } else if (right_block == next_block) { 1365 __ j(cc, chunk_->GetAssemblyLabel(left_block)); 1366 } else { 1367 __ j(cc, chunk_->GetAssemblyLabel(left_block)); 1368 __ jmp(chunk_->GetAssemblyLabel(right_block)); 1369 } 1370} 1371 1372 1373void LCodeGen::DoBranch(LBranch* instr) { 1374 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1375 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1376 1377 Representation r = instr->hydrogen()->value()->representation(); 1378 if (r.IsInteger32()) { 1379 Register reg = ToRegister(instr->InputAt(0)); 1380 __ test(reg, Operand(reg)); 1381 EmitBranch(true_block, false_block, not_zero); 1382 } else if (r.IsDouble()) { 1383 XMMRegister reg = ToDoubleRegister(instr->InputAt(0)); 1384 __ xorps(xmm0, xmm0); 1385 __ ucomisd(reg, xmm0); 1386 EmitBranch(true_block, false_block, not_equal); 1387 } else { 1388 ASSERT(r.IsTagged()); 1389 Register reg = ToRegister(instr->InputAt(0)); 1390 HType type = instr->hydrogen()->value()->type(); 1391 if (type.IsBoolean()) { 1392 __ cmp(reg, factory()->true_value()); 1393 EmitBranch(true_block, false_block, equal); 1394 } else if (type.IsSmi()) { 1395 __ test(reg, Operand(reg)); 1396 EmitBranch(true_block, false_block, not_equal); 1397 } else { 1398 Label* true_label = chunk_->GetAssemblyLabel(true_block); 1399 Label* false_label = chunk_->GetAssemblyLabel(false_block); 1400 1401 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); 1402 // Avoid deopts in the case where we've never executed this path before. 1403 if (expected.IsEmpty()) expected = ToBooleanStub::all_types(); 1404 1405 if (expected.Contains(ToBooleanStub::UNDEFINED)) { 1406 // undefined -> false. 1407 __ cmp(reg, factory()->undefined_value()); 1408 __ j(equal, false_label); 1409 } 1410 if (expected.Contains(ToBooleanStub::BOOLEAN)) { 1411 // true -> true. 1412 __ cmp(reg, factory()->true_value()); 1413 __ j(equal, true_label); 1414 // false -> false. 1415 __ cmp(reg, factory()->false_value()); 1416 __ j(equal, false_label); 1417 } 1418 if (expected.Contains(ToBooleanStub::NULL_TYPE)) { 1419 // 'null' -> false. 1420 __ cmp(reg, factory()->null_value()); 1421 __ j(equal, false_label); 1422 } 1423 1424 if (expected.Contains(ToBooleanStub::SMI)) { 1425 // Smis: 0 -> false, all other -> true. 1426 __ test(reg, Operand(reg)); 1427 __ j(equal, false_label); 1428 __ JumpIfSmi(reg, true_label); 1429 } else if (expected.NeedsMap()) { 1430 // If we need a map later and have a Smi -> deopt. 1431 __ test(reg, Immediate(kSmiTagMask)); 1432 DeoptimizeIf(zero, instr->environment()); 1433 } 1434 1435 Register map = no_reg; // Keep the compiler happy. 1436 if (expected.NeedsMap()) { 1437 map = ToRegister(instr->TempAt(0)); 1438 ASSERT(!map.is(reg)); 1439 __ mov(map, FieldOperand(reg, HeapObject::kMapOffset)); 1440 1441 if (expected.CanBeUndetectable()) { 1442 // Undetectable -> false. 1443 __ test_b(FieldOperand(map, Map::kBitFieldOffset), 1444 1 << Map::kIsUndetectable); 1445 __ j(not_zero, false_label); 1446 } 1447 } 1448 1449 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) { 1450 // spec object -> true. 1451 __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE); 1452 __ j(above_equal, true_label); 1453 } 1454 1455 if (expected.Contains(ToBooleanStub::STRING)) { 1456 // String value -> false iff empty. 1457 Label not_string; 1458 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE); 1459 __ j(above_equal, ¬_string, Label::kNear); 1460 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); 1461 __ j(not_zero, true_label); 1462 __ jmp(false_label); 1463 __ bind(¬_string); 1464 } 1465 1466 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { 1467 // heap number -> false iff +0, -0, or NaN. 1468 Label not_heap_number; 1469 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), 1470 factory()->heap_number_map()); 1471 __ j(not_equal, ¬_heap_number, Label::kNear); 1472 __ fldz(); 1473 __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset)); 1474 __ FCmp(); 1475 __ j(zero, false_label); 1476 __ jmp(true_label); 1477 __ bind(¬_heap_number); 1478 } 1479 1480 // We've seen something for the first time -> deopt. 1481 DeoptimizeIf(no_condition, instr->environment()); 1482 } 1483 } 1484} 1485 1486 1487void LCodeGen::EmitGoto(int block) { 1488 block = chunk_->LookupDestination(block); 1489 int next_block = GetNextEmittedBlock(current_block_); 1490 if (block != next_block) { 1491 __ jmp(chunk_->GetAssemblyLabel(block)); 1492 } 1493} 1494 1495 1496void LCodeGen::DoGoto(LGoto* instr) { 1497 EmitGoto(instr->block_id()); 1498} 1499 1500 1501Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { 1502 Condition cond = no_condition; 1503 switch (op) { 1504 case Token::EQ: 1505 case Token::EQ_STRICT: 1506 cond = equal; 1507 break; 1508 case Token::LT: 1509 cond = is_unsigned ? below : less; 1510 break; 1511 case Token::GT: 1512 cond = is_unsigned ? above : greater; 1513 break; 1514 case Token::LTE: 1515 cond = is_unsigned ? below_equal : less_equal; 1516 break; 1517 case Token::GTE: 1518 cond = is_unsigned ? above_equal : greater_equal; 1519 break; 1520 case Token::IN: 1521 case Token::INSTANCEOF: 1522 default: 1523 UNREACHABLE(); 1524 } 1525 return cond; 1526} 1527 1528 1529void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) { 1530 if (right->IsConstantOperand()) { 1531 __ cmp(ToOperand(left), ToImmediate(right)); 1532 } else { 1533 __ cmp(ToRegister(left), ToOperand(right)); 1534 } 1535} 1536 1537 1538void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { 1539 LOperand* left = instr->InputAt(0); 1540 LOperand* right = instr->InputAt(1); 1541 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1542 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1543 1544 if (instr->is_double()) { 1545 // Don't base result on EFLAGS when a NaN is involved. Instead 1546 // jump to the false block. 1547 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); 1548 __ j(parity_even, chunk_->GetAssemblyLabel(false_block)); 1549 } else { 1550 EmitCmpI(left, right); 1551 } 1552 1553 Condition cc = TokenToCondition(instr->op(), instr->is_double()); 1554 EmitBranch(true_block, false_block, cc); 1555} 1556 1557 1558void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { 1559 Register left = ToRegister(instr->InputAt(0)); 1560 Operand right = ToOperand(instr->InputAt(1)); 1561 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1562 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1563 1564 __ cmp(left, Operand(right)); 1565 EmitBranch(true_block, false_block, equal); 1566} 1567 1568 1569void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) { 1570 Register left = ToRegister(instr->InputAt(0)); 1571 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1572 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1573 1574 __ cmp(left, instr->hydrogen()->right()); 1575 EmitBranch(true_block, false_block, equal); 1576} 1577 1578 1579void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) { 1580 Register reg = ToRegister(instr->InputAt(0)); 1581 1582 // TODO(fsc): If the expression is known to be a smi, then it's 1583 // definitely not null. Jump to the false block. 1584 1585 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1586 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1587 1588 __ cmp(reg, factory()->null_value()); 1589 if (instr->is_strict()) { 1590 EmitBranch(true_block, false_block, equal); 1591 } else { 1592 Label* true_label = chunk_->GetAssemblyLabel(true_block); 1593 Label* false_label = chunk_->GetAssemblyLabel(false_block); 1594 __ j(equal, true_label); 1595 __ cmp(reg, factory()->undefined_value()); 1596 __ j(equal, true_label); 1597 __ JumpIfSmi(reg, false_label); 1598 // Check for undetectable objects by looking in the bit field in 1599 // the map. The object has already been smi checked. 1600 Register scratch = ToRegister(instr->TempAt(0)); 1601 __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset)); 1602 __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset)); 1603 __ test(scratch, Immediate(1 << Map::kIsUndetectable)); 1604 EmitBranch(true_block, false_block, not_zero); 1605 } 1606} 1607 1608 1609Condition LCodeGen::EmitIsObject(Register input, 1610 Register temp1, 1611 Label* is_not_object, 1612 Label* is_object) { 1613 __ JumpIfSmi(input, is_not_object); 1614 1615 __ cmp(input, isolate()->factory()->null_value()); 1616 __ j(equal, is_object); 1617 1618 __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset)); 1619 // Undetectable objects behave like undefined. 1620 __ test_b(FieldOperand(temp1, Map::kBitFieldOffset), 1621 1 << Map::kIsUndetectable); 1622 __ j(not_zero, is_not_object); 1623 1624 __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset)); 1625 __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); 1626 __ j(below, is_not_object); 1627 __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); 1628 return below_equal; 1629} 1630 1631 1632void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { 1633 Register reg = ToRegister(instr->InputAt(0)); 1634 Register temp = ToRegister(instr->TempAt(0)); 1635 1636 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1637 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1638 Label* true_label = chunk_->GetAssemblyLabel(true_block); 1639 Label* false_label = chunk_->GetAssemblyLabel(false_block); 1640 1641 Condition true_cond = EmitIsObject(reg, temp, false_label, true_label); 1642 1643 EmitBranch(true_block, false_block, true_cond); 1644} 1645 1646 1647void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { 1648 Operand input = ToOperand(instr->InputAt(0)); 1649 1650 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1651 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1652 1653 __ test(input, Immediate(kSmiTagMask)); 1654 EmitBranch(true_block, false_block, zero); 1655} 1656 1657 1658void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { 1659 Register input = ToRegister(instr->InputAt(0)); 1660 Register temp = ToRegister(instr->TempAt(0)); 1661 1662 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1663 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1664 1665 STATIC_ASSERT(kSmiTag == 0); 1666 __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block)); 1667 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); 1668 __ test_b(FieldOperand(temp, Map::kBitFieldOffset), 1669 1 << Map::kIsUndetectable); 1670 EmitBranch(true_block, false_block, not_zero); 1671} 1672 1673 1674static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { 1675 InstanceType from = instr->from(); 1676 InstanceType to = instr->to(); 1677 if (from == FIRST_TYPE) return to; 1678 ASSERT(from == to || to == LAST_TYPE); 1679 return from; 1680} 1681 1682 1683static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { 1684 InstanceType from = instr->from(); 1685 InstanceType to = instr->to(); 1686 if (from == to) return equal; 1687 if (to == LAST_TYPE) return above_equal; 1688 if (from == FIRST_TYPE) return below_equal; 1689 UNREACHABLE(); 1690 return equal; 1691} 1692 1693 1694void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { 1695 Register input = ToRegister(instr->InputAt(0)); 1696 Register temp = ToRegister(instr->TempAt(0)); 1697 1698 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1699 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1700 1701 Label* false_label = chunk_->GetAssemblyLabel(false_block); 1702 1703 __ JumpIfSmi(input, false_label); 1704 1705 __ CmpObjectType(input, TestType(instr->hydrogen()), temp); 1706 EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen())); 1707} 1708 1709 1710void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { 1711 Register input = ToRegister(instr->InputAt(0)); 1712 Register result = ToRegister(instr->result()); 1713 1714 if (FLAG_debug_code) { 1715 __ AbortIfNotString(input); 1716 } 1717 1718 __ mov(result, FieldOperand(input, String::kHashFieldOffset)); 1719 __ IndexFromHash(result, result); 1720} 1721 1722 1723void LCodeGen::DoHasCachedArrayIndexAndBranch( 1724 LHasCachedArrayIndexAndBranch* instr) { 1725 Register input = ToRegister(instr->InputAt(0)); 1726 1727 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1728 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1729 1730 __ test(FieldOperand(input, String::kHashFieldOffset), 1731 Immediate(String::kContainsCachedArrayIndexMask)); 1732 EmitBranch(true_block, false_block, equal); 1733} 1734 1735 1736// Branches to a label or falls through with the answer in the z flag. Trashes 1737// the temp registers, but not the input. Only input and temp2 may alias. 1738void LCodeGen::EmitClassOfTest(Label* is_true, 1739 Label* is_false, 1740 Handle<String>class_name, 1741 Register input, 1742 Register temp, 1743 Register temp2) { 1744 ASSERT(!input.is(temp)); 1745 ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register. 1746 __ JumpIfSmi(input, is_false); 1747 __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp); 1748 __ j(below, is_false); 1749 1750 // Map is now in temp. 1751 // Functions have class 'Function'. 1752 __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE); 1753 if (class_name->IsEqualTo(CStrVector("Function"))) { 1754 __ j(above_equal, is_true); 1755 } else { 1756 __ j(above_equal, is_false); 1757 } 1758 1759 // Check if the constructor in the map is a function. 1760 __ mov(temp, FieldOperand(temp, Map::kConstructorOffset)); 1761 1762 // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and 1763 // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after 1764 // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter. 1765 STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); 1766 STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE == 1767 LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1); 1768 1769 // Objects with a non-function constructor have class 'Object'. 1770 __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2); 1771 if (class_name->IsEqualTo(CStrVector("Object"))) { 1772 __ j(not_equal, is_true); 1773 } else { 1774 __ j(not_equal, is_false); 1775 } 1776 1777 // temp now contains the constructor function. Grab the 1778 // instance class name from there. 1779 __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset)); 1780 __ mov(temp, FieldOperand(temp, 1781 SharedFunctionInfo::kInstanceClassNameOffset)); 1782 // The class name we are testing against is a symbol because it's a literal. 1783 // The name in the constructor is a symbol because of the way the context is 1784 // booted. This routine isn't expected to work for random API-created 1785 // classes and it doesn't have to because you can't access it with natives 1786 // syntax. Since both sides are symbols it is sufficient to use an identity 1787 // comparison. 1788 __ cmp(temp, class_name); 1789 // End with the answer in the z flag. 1790} 1791 1792 1793void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { 1794 Register input = ToRegister(instr->InputAt(0)); 1795 Register temp = ToRegister(instr->TempAt(0)); 1796 Register temp2 = ToRegister(instr->TempAt(1)); 1797 if (input.is(temp)) { 1798 // Swap. 1799 Register swapper = temp; 1800 temp = temp2; 1801 temp2 = swapper; 1802 } 1803 Handle<String> class_name = instr->hydrogen()->class_name(); 1804 1805 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1806 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1807 1808 Label* true_label = chunk_->GetAssemblyLabel(true_block); 1809 Label* false_label = chunk_->GetAssemblyLabel(false_block); 1810 1811 EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2); 1812 1813 EmitBranch(true_block, false_block, equal); 1814} 1815 1816 1817void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { 1818 Register reg = ToRegister(instr->InputAt(0)); 1819 int true_block = instr->true_block_id(); 1820 int false_block = instr->false_block_id(); 1821 1822 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map()); 1823 EmitBranch(true_block, false_block, equal); 1824} 1825 1826 1827void LCodeGen::DoInstanceOf(LInstanceOf* instr) { 1828 // Object and function are in fixed registers defined by the stub. 1829 ASSERT(ToRegister(instr->context()).is(esi)); 1830 InstanceofStub stub(InstanceofStub::kArgsInRegisters); 1831 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 1832 1833 Label true_value, done; 1834 __ test(eax, Operand(eax)); 1835 __ j(zero, &true_value, Label::kNear); 1836 __ mov(ToRegister(instr->result()), factory()->false_value()); 1837 __ jmp(&done, Label::kNear); 1838 __ bind(&true_value); 1839 __ mov(ToRegister(instr->result()), factory()->true_value()); 1840 __ bind(&done); 1841} 1842 1843 1844void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { 1845 class DeferredInstanceOfKnownGlobal: public LDeferredCode { 1846 public: 1847 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, 1848 LInstanceOfKnownGlobal* instr) 1849 : LDeferredCode(codegen), instr_(instr) { } 1850 virtual void Generate() { 1851 codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_); 1852 } 1853 1854 Label* map_check() { return &map_check_; } 1855 1856 private: 1857 LInstanceOfKnownGlobal* instr_; 1858 Label map_check_; 1859 }; 1860 1861 DeferredInstanceOfKnownGlobal* deferred; 1862 deferred = new DeferredInstanceOfKnownGlobal(this, instr); 1863 1864 Label done, false_result; 1865 Register object = ToRegister(instr->InputAt(1)); 1866 Register temp = ToRegister(instr->TempAt(0)); 1867 1868 // A Smi is not an instance of anything. 1869 __ JumpIfSmi(object, &false_result); 1870 1871 // This is the inlined call site instanceof cache. The two occurences of the 1872 // hole value will be patched to the last map/result pair generated by the 1873 // instanceof stub. 1874 Label cache_miss; 1875 Register map = ToRegister(instr->TempAt(0)); 1876 __ mov(map, FieldOperand(object, HeapObject::kMapOffset)); 1877 __ bind(deferred->map_check()); // Label for calculating code patching. 1878 __ cmp(map, factory()->the_hole_value()); // Patched to cached map. 1879 __ j(not_equal, &cache_miss, Label::kNear); 1880 __ mov(eax, factory()->the_hole_value()); // Patched to either true or false. 1881 __ jmp(&done); 1882 1883 // The inlined call site cache did not match. Check for null and string 1884 // before calling the deferred code. 1885 __ bind(&cache_miss); 1886 // Null is not an instance of anything. 1887 __ cmp(object, factory()->null_value()); 1888 __ j(equal, &false_result); 1889 1890 // String values are not instances of anything. 1891 Condition is_string = masm_->IsObjectStringType(object, temp, temp); 1892 __ j(is_string, &false_result); 1893 1894 // Go to the deferred code. 1895 __ jmp(deferred->entry()); 1896 1897 __ bind(&false_result); 1898 __ mov(ToRegister(instr->result()), factory()->false_value()); 1899 1900 // Here result has either true or false. Deferred code also produces true or 1901 // false object. 1902 __ bind(deferred->exit()); 1903 __ bind(&done); 1904} 1905 1906 1907void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, 1908 Label* map_check) { 1909 PushSafepointRegistersScope scope(this); 1910 1911 InstanceofStub::Flags flags = InstanceofStub::kNoFlags; 1912 flags = static_cast<InstanceofStub::Flags>( 1913 flags | InstanceofStub::kArgsInRegisters); 1914 flags = static_cast<InstanceofStub::Flags>( 1915 flags | InstanceofStub::kCallSiteInlineCheck); 1916 flags = static_cast<InstanceofStub::Flags>( 1917 flags | InstanceofStub::kReturnTrueFalseObject); 1918 InstanceofStub stub(flags); 1919 1920 // Get the temp register reserved by the instruction. This needs to be a 1921 // register which is pushed last by PushSafepointRegisters as top of the 1922 // stack is used to pass the offset to the location of the map check to 1923 // the stub. 1924 Register temp = ToRegister(instr->TempAt(0)); 1925 ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0); 1926 __ mov(InstanceofStub::right(), Immediate(instr->function())); 1927 static const int kAdditionalDelta = 13; 1928 int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta; 1929 __ mov(temp, Immediate(delta)); 1930 __ StoreToSafepointRegisterSlot(temp, temp); 1931 CallCodeGeneric(stub.GetCode(), 1932 RelocInfo::CODE_TARGET, 1933 instr, 1934 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 1935 // Put the result value into the eax slot and restore all registers. 1936 __ StoreToSafepointRegisterSlot(eax, eax); 1937} 1938 1939 1940static Condition ComputeCompareCondition(Token::Value op) { 1941 switch (op) { 1942 case Token::EQ_STRICT: 1943 case Token::EQ: 1944 return equal; 1945 case Token::LT: 1946 return less; 1947 case Token::GT: 1948 return greater; 1949 case Token::LTE: 1950 return less_equal; 1951 case Token::GTE: 1952 return greater_equal; 1953 default: 1954 UNREACHABLE(); 1955 return no_condition; 1956 } 1957} 1958 1959 1960void LCodeGen::DoCmpT(LCmpT* instr) { 1961 Token::Value op = instr->op(); 1962 1963 Handle<Code> ic = CompareIC::GetUninitialized(op); 1964 CallCode(ic, RelocInfo::CODE_TARGET, instr); 1965 1966 Condition condition = ComputeCompareCondition(op); 1967 if (op == Token::GT || op == Token::LTE) { 1968 condition = ReverseCondition(condition); 1969 } 1970 Label true_value, done; 1971 __ test(eax, Operand(eax)); 1972 __ j(condition, &true_value, Label::kNear); 1973 __ mov(ToRegister(instr->result()), factory()->false_value()); 1974 __ jmp(&done, Label::kNear); 1975 __ bind(&true_value); 1976 __ mov(ToRegister(instr->result()), factory()->true_value()); 1977 __ bind(&done); 1978} 1979 1980 1981void LCodeGen::DoReturn(LReturn* instr) { 1982 if (FLAG_trace) { 1983 // Preserve the return value on the stack and rely on the runtime call 1984 // to return the value in the same register. We're leaving the code 1985 // managed by the register allocator and tearing down the frame, it's 1986 // safe to write to the context register. 1987 __ push(eax); 1988 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 1989 __ CallRuntime(Runtime::kTraceExit, 1); 1990 } 1991 __ mov(esp, ebp); 1992 __ pop(ebp); 1993 __ Ret((GetParameterCount() + 1) * kPointerSize, ecx); 1994} 1995 1996 1997void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { 1998 Register result = ToRegister(instr->result()); 1999 __ mov(result, Operand::Cell(instr->hydrogen()->cell())); 2000 if (instr->hydrogen()->check_hole_value()) { 2001 __ cmp(result, factory()->the_hole_value()); 2002 DeoptimizeIf(equal, instr->environment()); 2003 } 2004} 2005 2006 2007void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { 2008 ASSERT(ToRegister(instr->context()).is(esi)); 2009 ASSERT(ToRegister(instr->global_object()).is(eax)); 2010 ASSERT(ToRegister(instr->result()).is(eax)); 2011 2012 __ mov(ecx, instr->name()); 2013 RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET : 2014 RelocInfo::CODE_TARGET_CONTEXT; 2015 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); 2016 CallCode(ic, mode, instr); 2017} 2018 2019 2020void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { 2021 Register value = ToRegister(instr->InputAt(0)); 2022 Operand cell_operand = Operand::Cell(instr->hydrogen()->cell()); 2023 2024 // If the cell we are storing to contains the hole it could have 2025 // been deleted from the property dictionary. In that case, we need 2026 // to update the property details in the property dictionary to mark 2027 // it as no longer deleted. We deoptimize in that case. 2028 if (instr->hydrogen()->check_hole_value()) { 2029 __ cmp(cell_operand, factory()->the_hole_value()); 2030 DeoptimizeIf(equal, instr->environment()); 2031 } 2032 2033 // Store the value. 2034 __ mov(cell_operand, value); 2035} 2036 2037 2038void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) { 2039 ASSERT(ToRegister(instr->context()).is(esi)); 2040 ASSERT(ToRegister(instr->global_object()).is(edx)); 2041 ASSERT(ToRegister(instr->value()).is(eax)); 2042 2043 __ mov(ecx, instr->name()); 2044 Handle<Code> ic = instr->strict_mode() 2045 ? isolate()->builtins()->StoreIC_Initialize_Strict() 2046 : isolate()->builtins()->StoreIC_Initialize(); 2047 CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr); 2048} 2049 2050 2051void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { 2052 Register context = ToRegister(instr->context()); 2053 Register result = ToRegister(instr->result()); 2054 __ mov(result, ContextOperand(context, instr->slot_index())); 2055} 2056 2057 2058void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { 2059 Register context = ToRegister(instr->context()); 2060 Register value = ToRegister(instr->value()); 2061 __ mov(ContextOperand(context, instr->slot_index()), value); 2062 if (instr->needs_write_barrier()) { 2063 Register temp = ToRegister(instr->TempAt(0)); 2064 int offset = Context::SlotOffset(instr->slot_index()); 2065 __ RecordWrite(context, offset, value, temp); 2066 } 2067} 2068 2069 2070void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { 2071 Register object = ToRegister(instr->object()); 2072 Register result = ToRegister(instr->result()); 2073 if (instr->hydrogen()->is_in_object()) { 2074 __ mov(result, FieldOperand(object, instr->hydrogen()->offset())); 2075 } else { 2076 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset)); 2077 __ mov(result, FieldOperand(result, instr->hydrogen()->offset())); 2078 } 2079} 2080 2081 2082void LCodeGen::EmitLoadFieldOrConstantFunction(Register result, 2083 Register object, 2084 Handle<Map> type, 2085 Handle<String> name) { 2086 LookupResult lookup; 2087 type->LookupInDescriptors(NULL, *name, &lookup); 2088 ASSERT(lookup.IsProperty() && 2089 (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION)); 2090 if (lookup.type() == FIELD) { 2091 int index = lookup.GetLocalFieldIndexFromMap(*type); 2092 int offset = index * kPointerSize; 2093 if (index < 0) { 2094 // Negative property indices are in-object properties, indexed 2095 // from the end of the fixed part of the object. 2096 __ mov(result, FieldOperand(object, offset + type->instance_size())); 2097 } else { 2098 // Non-negative property indices are in the properties array. 2099 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset)); 2100 __ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize)); 2101 } 2102 } else { 2103 Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type)); 2104 LoadHeapObject(result, Handle<HeapObject>::cast(function)); 2105 } 2106} 2107 2108 2109void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) { 2110 Register object = ToRegister(instr->object()); 2111 Register result = ToRegister(instr->result()); 2112 2113 int map_count = instr->hydrogen()->types()->length(); 2114 Handle<String> name = instr->hydrogen()->name(); 2115 if (map_count == 0) { 2116 ASSERT(instr->hydrogen()->need_generic()); 2117 __ mov(ecx, name); 2118 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); 2119 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2120 } else { 2121 Label done; 2122 for (int i = 0; i < map_count - 1; ++i) { 2123 Handle<Map> map = instr->hydrogen()->types()->at(i); 2124 Label next; 2125 __ cmp(FieldOperand(object, HeapObject::kMapOffset), map); 2126 __ j(not_equal, &next, Label::kNear); 2127 EmitLoadFieldOrConstantFunction(result, object, map, name); 2128 __ jmp(&done, Label::kNear); 2129 __ bind(&next); 2130 } 2131 Handle<Map> map = instr->hydrogen()->types()->last(); 2132 __ cmp(FieldOperand(object, HeapObject::kMapOffset), map); 2133 if (instr->hydrogen()->need_generic()) { 2134 Label generic; 2135 __ j(not_equal, &generic, Label::kNear); 2136 EmitLoadFieldOrConstantFunction(result, object, map, name); 2137 __ jmp(&done, Label::kNear); 2138 __ bind(&generic); 2139 __ mov(ecx, name); 2140 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); 2141 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2142 } else { 2143 DeoptimizeIf(not_equal, instr->environment()); 2144 EmitLoadFieldOrConstantFunction(result, object, map, name); 2145 } 2146 __ bind(&done); 2147 } 2148} 2149 2150 2151void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { 2152 ASSERT(ToRegister(instr->context()).is(esi)); 2153 ASSERT(ToRegister(instr->object()).is(eax)); 2154 ASSERT(ToRegister(instr->result()).is(eax)); 2155 2156 __ mov(ecx, instr->name()); 2157 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); 2158 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2159} 2160 2161 2162void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { 2163 Register function = ToRegister(instr->function()); 2164 Register temp = ToRegister(instr->TempAt(0)); 2165 Register result = ToRegister(instr->result()); 2166 2167 // Check that the function really is a function. 2168 __ CmpObjectType(function, JS_FUNCTION_TYPE, result); 2169 DeoptimizeIf(not_equal, instr->environment()); 2170 2171 // Check whether the function has an instance prototype. 2172 Label non_instance; 2173 __ test_b(FieldOperand(result, Map::kBitFieldOffset), 2174 1 << Map::kHasNonInstancePrototype); 2175 __ j(not_zero, &non_instance, Label::kNear); 2176 2177 // Get the prototype or initial map from the function. 2178 __ mov(result, 2179 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 2180 2181 // Check that the function has a prototype or an initial map. 2182 __ cmp(Operand(result), Immediate(factory()->the_hole_value())); 2183 DeoptimizeIf(equal, instr->environment()); 2184 2185 // If the function does not have an initial map, we're done. 2186 Label done; 2187 __ CmpObjectType(result, MAP_TYPE, temp); 2188 __ j(not_equal, &done, Label::kNear); 2189 2190 // Get the prototype from the initial map. 2191 __ mov(result, FieldOperand(result, Map::kPrototypeOffset)); 2192 __ jmp(&done, Label::kNear); 2193 2194 // Non-instance prototype: Fetch prototype from constructor field 2195 // in the function's map. 2196 __ bind(&non_instance); 2197 __ mov(result, FieldOperand(result, Map::kConstructorOffset)); 2198 2199 // All done. 2200 __ bind(&done); 2201} 2202 2203 2204void LCodeGen::DoLoadElements(LLoadElements* instr) { 2205 Register result = ToRegister(instr->result()); 2206 Register input = ToRegister(instr->InputAt(0)); 2207 __ mov(result, FieldOperand(input, JSObject::kElementsOffset)); 2208 if (FLAG_debug_code) { 2209 Label done, ok, fail; 2210 __ cmp(FieldOperand(result, HeapObject::kMapOffset), 2211 Immediate(factory()->fixed_array_map())); 2212 __ j(equal, &done, Label::kNear); 2213 __ cmp(FieldOperand(result, HeapObject::kMapOffset), 2214 Immediate(factory()->fixed_cow_array_map())); 2215 __ j(equal, &done, Label::kNear); 2216 Register temp((result.is(eax)) ? ebx : eax); 2217 __ push(temp); 2218 __ mov(temp, FieldOperand(result, HeapObject::kMapOffset)); 2219 __ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset)); 2220 __ and_(temp, Map::kElementsKindMask); 2221 __ shr(temp, Map::kElementsKindShift); 2222 __ cmp(temp, JSObject::FAST_ELEMENTS); 2223 __ j(equal, &ok, Label::kNear); 2224 __ cmp(temp, JSObject::FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND); 2225 __ j(less, &fail, Label::kNear); 2226 __ cmp(temp, JSObject::LAST_EXTERNAL_ARRAY_ELEMENTS_KIND); 2227 __ j(less_equal, &ok, Label::kNear); 2228 __ bind(&fail); 2229 __ Abort("Check for fast or external elements failed."); 2230 __ bind(&ok); 2231 __ pop(temp); 2232 __ bind(&done); 2233 } 2234} 2235 2236 2237void LCodeGen::DoLoadExternalArrayPointer( 2238 LLoadExternalArrayPointer* instr) { 2239 Register result = ToRegister(instr->result()); 2240 Register input = ToRegister(instr->InputAt(0)); 2241 __ mov(result, FieldOperand(input, 2242 ExternalArray::kExternalPointerOffset)); 2243} 2244 2245 2246void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { 2247 Register arguments = ToRegister(instr->arguments()); 2248 Register length = ToRegister(instr->length()); 2249 Operand index = ToOperand(instr->index()); 2250 Register result = ToRegister(instr->result()); 2251 2252 __ sub(length, index); 2253 DeoptimizeIf(below_equal, instr->environment()); 2254 2255 // There are two words between the frame pointer and the last argument. 2256 // Subtracting from length accounts for one of them add one more. 2257 __ mov(result, Operand(arguments, length, times_4, kPointerSize)); 2258} 2259 2260 2261void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { 2262 Register result = ToRegister(instr->result()); 2263 2264 // Load the result. 2265 __ mov(result, 2266 BuildFastArrayOperand(instr->elements(), instr->key(), 2267 JSObject::FAST_ELEMENTS, 2268 FixedArray::kHeaderSize - kHeapObjectTag)); 2269 2270 // Check for the hole value. 2271 if (instr->hydrogen()->RequiresHoleCheck()) { 2272 __ cmp(result, factory()->the_hole_value()); 2273 DeoptimizeIf(equal, instr->environment()); 2274 } 2275} 2276 2277 2278void LCodeGen::DoLoadKeyedFastDoubleElement( 2279 LLoadKeyedFastDoubleElement* instr) { 2280 XMMRegister result = ToDoubleRegister(instr->result()); 2281 2282 if (instr->hydrogen()->RequiresHoleCheck()) { 2283 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + 2284 sizeof(kHoleNanLower32); 2285 Operand hole_check_operand = BuildFastArrayOperand( 2286 instr->elements(), instr->key(), 2287 JSObject::FAST_DOUBLE_ELEMENTS, 2288 offset); 2289 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); 2290 DeoptimizeIf(equal, instr->environment()); 2291 } 2292 2293 Operand double_load_operand = BuildFastArrayOperand( 2294 instr->elements(), instr->key(), JSObject::FAST_DOUBLE_ELEMENTS, 2295 FixedDoubleArray::kHeaderSize - kHeapObjectTag); 2296 __ movdbl(result, double_load_operand); 2297} 2298 2299 2300Operand LCodeGen::BuildFastArrayOperand( 2301 LOperand* elements_pointer, 2302 LOperand* key, 2303 JSObject::ElementsKind elements_kind, 2304 uint32_t offset) { 2305 Register elements_pointer_reg = ToRegister(elements_pointer); 2306 int shift_size = ElementsKindToShiftSize(elements_kind); 2307 if (key->IsConstantOperand()) { 2308 int constant_value = ToInteger32(LConstantOperand::cast(key)); 2309 if (constant_value & 0xF0000000) { 2310 Abort("array index constant value too big"); 2311 } 2312 return Operand(elements_pointer_reg, 2313 constant_value * (1 << shift_size) + offset); 2314 } else { 2315 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size); 2316 return Operand(elements_pointer_reg, ToRegister(key), scale_factor, offset); 2317 } 2318} 2319 2320 2321void LCodeGen::DoLoadKeyedSpecializedArrayElement( 2322 LLoadKeyedSpecializedArrayElement* instr) { 2323 JSObject::ElementsKind elements_kind = instr->elements_kind(); 2324 Operand operand(BuildFastArrayOperand(instr->external_pointer(), 2325 instr->key(), elements_kind, 0)); 2326 if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) { 2327 XMMRegister result(ToDoubleRegister(instr->result())); 2328 __ movss(result, operand); 2329 __ cvtss2sd(result, result); 2330 } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) { 2331 __ movdbl(ToDoubleRegister(instr->result()), operand); 2332 } else { 2333 Register result(ToRegister(instr->result())); 2334 switch (elements_kind) { 2335 case JSObject::EXTERNAL_BYTE_ELEMENTS: 2336 __ movsx_b(result, operand); 2337 break; 2338 case JSObject::EXTERNAL_PIXEL_ELEMENTS: 2339 case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: 2340 __ movzx_b(result, operand); 2341 break; 2342 case JSObject::EXTERNAL_SHORT_ELEMENTS: 2343 __ movsx_w(result, operand); 2344 break; 2345 case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: 2346 __ movzx_w(result, operand); 2347 break; 2348 case JSObject::EXTERNAL_INT_ELEMENTS: 2349 __ mov(result, operand); 2350 break; 2351 case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: 2352 __ mov(result, operand); 2353 __ test(result, Operand(result)); 2354 // TODO(danno): we could be more clever here, perhaps having a special 2355 // version of the stub that detects if the overflow case actually 2356 // happens, and generate code that returns a double rather than int. 2357 DeoptimizeIf(negative, instr->environment()); 2358 break; 2359 case JSObject::EXTERNAL_FLOAT_ELEMENTS: 2360 case JSObject::EXTERNAL_DOUBLE_ELEMENTS: 2361 case JSObject::FAST_ELEMENTS: 2362 case JSObject::FAST_DOUBLE_ELEMENTS: 2363 case JSObject::DICTIONARY_ELEMENTS: 2364 case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS: 2365 UNREACHABLE(); 2366 break; 2367 } 2368 } 2369} 2370 2371 2372void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { 2373 ASSERT(ToRegister(instr->context()).is(esi)); 2374 ASSERT(ToRegister(instr->object()).is(edx)); 2375 ASSERT(ToRegister(instr->key()).is(eax)); 2376 2377 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); 2378 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2379} 2380 2381 2382void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { 2383 Register result = ToRegister(instr->result()); 2384 2385 // Check for arguments adapter frame. 2386 Label done, adapted; 2387 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); 2388 __ mov(result, Operand(result, StandardFrameConstants::kContextOffset)); 2389 __ cmp(Operand(result), 2390 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 2391 __ j(equal, &adapted, Label::kNear); 2392 2393 // No arguments adaptor frame. 2394 __ mov(result, Operand(ebp)); 2395 __ jmp(&done, Label::kNear); 2396 2397 // Arguments adaptor frame present. 2398 __ bind(&adapted); 2399 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); 2400 2401 // Result is the frame pointer for the frame if not adapted and for the real 2402 // frame below the adaptor frame if adapted. 2403 __ bind(&done); 2404} 2405 2406 2407void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { 2408 Operand elem = ToOperand(instr->InputAt(0)); 2409 Register result = ToRegister(instr->result()); 2410 2411 Label done; 2412 2413 // If no arguments adaptor frame the number of arguments is fixed. 2414 __ cmp(ebp, elem); 2415 __ mov(result, Immediate(scope()->num_parameters())); 2416 __ j(equal, &done, Label::kNear); 2417 2418 // Arguments adaptor frame present. Get argument length from there. 2419 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); 2420 __ mov(result, Operand(result, 2421 ArgumentsAdaptorFrameConstants::kLengthOffset)); 2422 __ SmiUntag(result); 2423 2424 // Argument length is in result register. 2425 __ bind(&done); 2426} 2427 2428 2429void LCodeGen::DoApplyArguments(LApplyArguments* instr) { 2430 Register receiver = ToRegister(instr->receiver()); 2431 Register function = ToRegister(instr->function()); 2432 Register length = ToRegister(instr->length()); 2433 Register elements = ToRegister(instr->elements()); 2434 Register scratch = ToRegister(instr->TempAt(0)); 2435 ASSERT(receiver.is(eax)); // Used for parameter count. 2436 ASSERT(function.is(edi)); // Required by InvokeFunction. 2437 ASSERT(ToRegister(instr->result()).is(eax)); 2438 2439 // If the receiver is null or undefined, we have to pass the global 2440 // object as a receiver to normal functions. Values have to be 2441 // passed unchanged to builtins and strict-mode functions. 2442 Label global_object, receiver_ok; 2443 2444 // Do not transform the receiver to object for strict mode 2445 // functions. 2446 __ mov(scratch, 2447 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); 2448 __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset), 2449 1 << SharedFunctionInfo::kStrictModeBitWithinByte); 2450 __ j(not_equal, &receiver_ok, Label::kNear); 2451 2452 // Do not transform the receiver to object for builtins. 2453 __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset), 2454 1 << SharedFunctionInfo::kNativeBitWithinByte); 2455 __ j(not_equal, &receiver_ok, Label::kNear); 2456 2457 // Normal function. Replace undefined or null with global receiver. 2458 __ cmp(receiver, factory()->null_value()); 2459 __ j(equal, &global_object, Label::kNear); 2460 __ cmp(receiver, factory()->undefined_value()); 2461 __ j(equal, &global_object, Label::kNear); 2462 2463 // The receiver should be a JS object. 2464 __ test(receiver, Immediate(kSmiTagMask)); 2465 DeoptimizeIf(equal, instr->environment()); 2466 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch); 2467 DeoptimizeIf(below, instr->environment()); 2468 __ jmp(&receiver_ok, Label::kNear); 2469 2470 __ bind(&global_object); 2471 // TODO(kmillikin): We have a hydrogen value for the global object. See 2472 // if it's better to use it than to explicitly fetch it from the context 2473 // here. 2474 __ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset)); 2475 __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX)); 2476 __ mov(receiver, 2477 FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset)); 2478 __ bind(&receiver_ok); 2479 2480 // Copy the arguments to this function possibly from the 2481 // adaptor frame below it. 2482 const uint32_t kArgumentsLimit = 1 * KB; 2483 __ cmp(length, kArgumentsLimit); 2484 DeoptimizeIf(above, instr->environment()); 2485 2486 __ push(receiver); 2487 __ mov(receiver, length); 2488 2489 // Loop through the arguments pushing them onto the execution 2490 // stack. 2491 Label invoke, loop; 2492 // length is a small non-negative integer, due to the test above. 2493 __ test(length, Operand(length)); 2494 __ j(zero, &invoke, Label::kNear); 2495 __ bind(&loop); 2496 __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize)); 2497 __ dec(length); 2498 __ j(not_zero, &loop); 2499 2500 // Invoke the function. 2501 __ bind(&invoke); 2502 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); 2503 LPointerMap* pointers = instr->pointer_map(); 2504 LEnvironment* env = instr->deoptimization_environment(); 2505 RecordPosition(pointers->position()); 2506 RegisterEnvironmentForDeoptimization(env); 2507 SafepointGenerator safepoint_generator(this, 2508 pointers, 2509 env->deoptimization_index()); 2510 ParameterCount actual(eax); 2511 __ InvokeFunction(function, actual, CALL_FUNCTION, 2512 safepoint_generator, CALL_AS_METHOD); 2513} 2514 2515 2516void LCodeGen::DoPushArgument(LPushArgument* instr) { 2517 LOperand* argument = instr->InputAt(0); 2518 if (argument->IsConstantOperand()) { 2519 __ push(ToImmediate(argument)); 2520 } else { 2521 __ push(ToOperand(argument)); 2522 } 2523} 2524 2525 2526void LCodeGen::DoThisFunction(LThisFunction* instr) { 2527 Register result = ToRegister(instr->result()); 2528 __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); 2529} 2530 2531 2532void LCodeGen::DoContext(LContext* instr) { 2533 Register result = ToRegister(instr->result()); 2534 __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset)); 2535} 2536 2537 2538void LCodeGen::DoOuterContext(LOuterContext* instr) { 2539 Register context = ToRegister(instr->context()); 2540 Register result = ToRegister(instr->result()); 2541 __ mov(result, 2542 Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX))); 2543} 2544 2545 2546void LCodeGen::DoGlobalObject(LGlobalObject* instr) { 2547 Register context = ToRegister(instr->context()); 2548 Register result = ToRegister(instr->result()); 2549 __ mov(result, Operand(context, Context::SlotOffset(Context::GLOBAL_INDEX))); 2550} 2551 2552 2553void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { 2554 Register global = ToRegister(instr->global()); 2555 Register result = ToRegister(instr->result()); 2556 __ mov(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset)); 2557} 2558 2559 2560void LCodeGen::CallKnownFunction(Handle<JSFunction> function, 2561 int arity, 2562 LInstruction* instr, 2563 CallKind call_kind) { 2564 // Change context if needed. 2565 bool change_context = 2566 (info()->closure()->context() != function->context()) || 2567 scope()->contains_with() || 2568 (scope()->num_heap_slots() > 0); 2569 if (change_context) { 2570 __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); 2571 } else { 2572 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 2573 } 2574 2575 // Set eax to arguments count if adaption is not needed. Assumes that eax 2576 // is available to write to at this point. 2577 if (!function->NeedsArgumentsAdaption()) { 2578 __ mov(eax, arity); 2579 } 2580 2581 LPointerMap* pointers = instr->pointer_map(); 2582 RecordPosition(pointers->position()); 2583 2584 // Invoke function. 2585 __ SetCallKind(ecx, call_kind); 2586 if (*function == *info()->closure()) { 2587 __ CallSelf(); 2588 } else { 2589 __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset)); 2590 } 2591 2592 // Setup deoptimization. 2593 RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT); 2594} 2595 2596 2597void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { 2598 ASSERT(ToRegister(instr->result()).is(eax)); 2599 __ mov(edi, instr->function()); 2600 CallKnownFunction(instr->function(), 2601 instr->arity(), 2602 instr, 2603 CALL_AS_METHOD); 2604} 2605 2606 2607void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { 2608 Register input_reg = ToRegister(instr->value()); 2609 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 2610 factory()->heap_number_map()); 2611 DeoptimizeIf(not_equal, instr->environment()); 2612 2613 Label done; 2614 Register tmp = input_reg.is(eax) ? ecx : eax; 2615 Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx; 2616 2617 // Preserve the value of all registers. 2618 PushSafepointRegistersScope scope(this); 2619 2620 Label negative; 2621 __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); 2622 // Check the sign of the argument. If the argument is positive, just 2623 // return it. We do not need to patch the stack since |input| and 2624 // |result| are the same register and |input| will be restored 2625 // unchanged by popping safepoint registers. 2626 __ test(tmp, Immediate(HeapNumber::kSignMask)); 2627 __ j(not_zero, &negative); 2628 __ jmp(&done); 2629 2630 __ bind(&negative); 2631 2632 Label allocated, slow; 2633 __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow); 2634 __ jmp(&allocated); 2635 2636 // Slow case: Call the runtime system to do the number allocation. 2637 __ bind(&slow); 2638 2639 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, 2640 instr, instr->context()); 2641 2642 // Set the pointer to the new heap number in tmp. 2643 if (!tmp.is(eax)) __ mov(tmp, eax); 2644 2645 // Restore input_reg after call to runtime. 2646 __ LoadFromSafepointRegisterSlot(input_reg, input_reg); 2647 2648 __ bind(&allocated); 2649 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset)); 2650 __ and_(tmp2, ~HeapNumber::kSignMask); 2651 __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2); 2652 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); 2653 __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2); 2654 __ StoreToSafepointRegisterSlot(input_reg, tmp); 2655 2656 __ bind(&done); 2657} 2658 2659 2660void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { 2661 Register input_reg = ToRegister(instr->value()); 2662 __ test(input_reg, Operand(input_reg)); 2663 Label is_positive; 2664 __ j(not_sign, &is_positive); 2665 __ neg(input_reg); 2666 __ test(input_reg, Operand(input_reg)); 2667 DeoptimizeIf(negative, instr->environment()); 2668 __ bind(&is_positive); 2669} 2670 2671 2672void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { 2673 // Class for deferred case. 2674 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { 2675 public: 2676 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, 2677 LUnaryMathOperation* instr) 2678 : LDeferredCode(codegen), instr_(instr) { } 2679 virtual void Generate() { 2680 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); 2681 } 2682 private: 2683 LUnaryMathOperation* instr_; 2684 }; 2685 2686 ASSERT(instr->value()->Equals(instr->result())); 2687 Representation r = instr->hydrogen()->value()->representation(); 2688 2689 if (r.IsDouble()) { 2690 XMMRegister scratch = xmm0; 2691 XMMRegister input_reg = ToDoubleRegister(instr->value()); 2692 __ xorps(scratch, scratch); 2693 __ subsd(scratch, input_reg); 2694 __ pand(input_reg, scratch); 2695 } else if (r.IsInteger32()) { 2696 EmitIntegerMathAbs(instr); 2697 } else { // Tagged case. 2698 DeferredMathAbsTaggedHeapNumber* deferred = 2699 new DeferredMathAbsTaggedHeapNumber(this, instr); 2700 Register input_reg = ToRegister(instr->value()); 2701 // Smi check. 2702 __ JumpIfNotSmi(input_reg, deferred->entry()); 2703 EmitIntegerMathAbs(instr); 2704 __ bind(deferred->exit()); 2705 } 2706} 2707 2708 2709void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { 2710 XMMRegister xmm_scratch = xmm0; 2711 Register output_reg = ToRegister(instr->result()); 2712 XMMRegister input_reg = ToDoubleRegister(instr->value()); 2713 2714 if (CpuFeatures::IsSupported(SSE4_1)) { 2715 CpuFeatures::Scope scope(SSE4_1); 2716 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 2717 // Deoptimize on negative zero. 2718 Label non_zero; 2719 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. 2720 __ ucomisd(input_reg, xmm_scratch); 2721 __ j(not_equal, &non_zero, Label::kNear); 2722 __ movmskpd(output_reg, input_reg); 2723 __ test(output_reg, Immediate(1)); 2724 DeoptimizeIf(not_zero, instr->environment()); 2725 __ bind(&non_zero); 2726 } 2727 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown); 2728 __ cvttsd2si(output_reg, Operand(xmm_scratch)); 2729 // Overflow is signalled with minint. 2730 __ cmp(output_reg, 0x80000000u); 2731 DeoptimizeIf(equal, instr->environment()); 2732 } else { 2733 Label done; 2734 // Deoptimize on negative numbers. 2735 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. 2736 __ ucomisd(input_reg, xmm_scratch); 2737 DeoptimizeIf(below, instr->environment()); 2738 2739 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 2740 // Check for negative zero. 2741 Label positive_sign; 2742 __ j(above, &positive_sign, Label::kNear); 2743 __ movmskpd(output_reg, input_reg); 2744 __ test(output_reg, Immediate(1)); 2745 DeoptimizeIf(not_zero, instr->environment()); 2746 __ Set(output_reg, Immediate(0)); 2747 __ jmp(&done, Label::kNear); 2748 __ bind(&positive_sign); 2749 } 2750 2751 // Use truncating instruction (OK because input is positive). 2752 __ cvttsd2si(output_reg, Operand(input_reg)); 2753 2754 // Overflow is signalled with minint. 2755 __ cmp(output_reg, 0x80000000u); 2756 DeoptimizeIf(equal, instr->environment()); 2757 __ bind(&done); 2758 } 2759} 2760 2761void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { 2762 XMMRegister xmm_scratch = xmm0; 2763 Register output_reg = ToRegister(instr->result()); 2764 XMMRegister input_reg = ToDoubleRegister(instr->value()); 2765 2766 Label below_half, done; 2767 // xmm_scratch = 0.5 2768 ExternalReference one_half = ExternalReference::address_of_one_half(); 2769 __ movdbl(xmm_scratch, Operand::StaticVariable(one_half)); 2770 __ ucomisd(xmm_scratch, input_reg); 2771 __ j(above, &below_half); 2772 // input = input + 0.5 2773 __ addsd(input_reg, xmm_scratch); 2774 2775 // Compute Math.floor(value + 0.5). 2776 // Use truncating instruction (OK because input is positive). 2777 __ cvttsd2si(output_reg, Operand(input_reg)); 2778 2779 // Overflow is signalled with minint. 2780 __ cmp(output_reg, 0x80000000u); 2781 DeoptimizeIf(equal, instr->environment()); 2782 __ jmp(&done); 2783 2784 __ bind(&below_half); 2785 2786 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if 2787 // we can ignore the difference between a result of -0 and +0. 2788 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 2789 // If the sign is positive, we return +0. 2790 __ movmskpd(output_reg, input_reg); 2791 __ test(output_reg, Immediate(1)); 2792 DeoptimizeIf(not_zero, instr->environment()); 2793 } else { 2794 // If the input is >= -0.5, we return +0. 2795 __ mov(output_reg, Immediate(0xBF000000)); 2796 __ movd(xmm_scratch, Operand(output_reg)); 2797 __ cvtss2sd(xmm_scratch, xmm_scratch); 2798 __ ucomisd(input_reg, xmm_scratch); 2799 DeoptimizeIf(below, instr->environment()); 2800 } 2801 __ Set(output_reg, Immediate(0)); 2802 __ bind(&done); 2803} 2804 2805 2806void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { 2807 XMMRegister input_reg = ToDoubleRegister(instr->value()); 2808 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); 2809 __ sqrtsd(input_reg, input_reg); 2810} 2811 2812 2813void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { 2814 XMMRegister xmm_scratch = xmm0; 2815 XMMRegister input_reg = ToDoubleRegister(instr->value()); 2816 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); 2817 __ xorps(xmm_scratch, xmm_scratch); 2818 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0. 2819 __ sqrtsd(input_reg, input_reg); 2820} 2821 2822 2823void LCodeGen::DoPower(LPower* instr) { 2824 LOperand* left = instr->InputAt(0); 2825 LOperand* right = instr->InputAt(1); 2826 DoubleRegister result_reg = ToDoubleRegister(instr->result()); 2827 Representation exponent_type = instr->hydrogen()->right()->representation(); 2828 2829 if (exponent_type.IsDouble()) { 2830 // It is safe to use ebx directly since the instruction is marked 2831 // as a call. 2832 __ PrepareCallCFunction(4, ebx); 2833 __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left)); 2834 __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right)); 2835 __ CallCFunction(ExternalReference::power_double_double_function(isolate()), 2836 4); 2837 } else if (exponent_type.IsInteger32()) { 2838 // It is safe to use ebx directly since the instruction is marked 2839 // as a call. 2840 ASSERT(!ToRegister(right).is(ebx)); 2841 __ PrepareCallCFunction(4, ebx); 2842 __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left)); 2843 __ mov(Operand(esp, 1 * kDoubleSize), ToRegister(right)); 2844 __ CallCFunction(ExternalReference::power_double_int_function(isolate()), 2845 4); 2846 } else { 2847 ASSERT(exponent_type.IsTagged()); 2848 CpuFeatures::Scope scope(SSE2); 2849 Register right_reg = ToRegister(right); 2850 2851 Label non_smi, call; 2852 __ JumpIfNotSmi(right_reg, &non_smi); 2853 __ SmiUntag(right_reg); 2854 __ cvtsi2sd(result_reg, Operand(right_reg)); 2855 __ jmp(&call); 2856 2857 __ bind(&non_smi); 2858 // It is safe to use ebx directly since the instruction is marked 2859 // as a call. 2860 ASSERT(!right_reg.is(ebx)); 2861 __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , ebx); 2862 DeoptimizeIf(not_equal, instr->environment()); 2863 __ movdbl(result_reg, FieldOperand(right_reg, HeapNumber::kValueOffset)); 2864 2865 __ bind(&call); 2866 __ PrepareCallCFunction(4, ebx); 2867 __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left)); 2868 __ movdbl(Operand(esp, 1 * kDoubleSize), result_reg); 2869 __ CallCFunction(ExternalReference::power_double_double_function(isolate()), 2870 4); 2871 } 2872 2873 // Return value is in st(0) on ia32. 2874 // Store it into the (fixed) result register. 2875 __ sub(Operand(esp), Immediate(kDoubleSize)); 2876 __ fstp_d(Operand(esp, 0)); 2877 __ movdbl(result_reg, Operand(esp, 0)); 2878 __ add(Operand(esp), Immediate(kDoubleSize)); 2879} 2880 2881 2882void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { 2883 ASSERT(instr->value()->Equals(instr->result())); 2884 XMMRegister input_reg = ToDoubleRegister(instr->value()); 2885 Label positive, done, zero; 2886 __ xorps(xmm0, xmm0); 2887 __ ucomisd(input_reg, xmm0); 2888 __ j(above, &positive, Label::kNear); 2889 __ j(equal, &zero, Label::kNear); 2890 ExternalReference nan = 2891 ExternalReference::address_of_canonical_non_hole_nan(); 2892 __ movdbl(input_reg, Operand::StaticVariable(nan)); 2893 __ jmp(&done, Label::kNear); 2894 __ bind(&zero); 2895 __ push(Immediate(0xFFF00000)); 2896 __ push(Immediate(0)); 2897 __ movdbl(input_reg, Operand(esp, 0)); 2898 __ add(Operand(esp), Immediate(kDoubleSize)); 2899 __ jmp(&done, Label::kNear); 2900 __ bind(&positive); 2901 __ fldln2(); 2902 __ sub(Operand(esp), Immediate(kDoubleSize)); 2903 __ movdbl(Operand(esp, 0), input_reg); 2904 __ fld_d(Operand(esp, 0)); 2905 __ fyl2x(); 2906 __ fstp_d(Operand(esp, 0)); 2907 __ movdbl(input_reg, Operand(esp, 0)); 2908 __ add(Operand(esp), Immediate(kDoubleSize)); 2909 __ bind(&done); 2910} 2911 2912 2913void LCodeGen::DoMathCos(LUnaryMathOperation* instr) { 2914 ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); 2915 TranscendentalCacheStub stub(TranscendentalCache::COS, 2916 TranscendentalCacheStub::UNTAGGED); 2917 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 2918} 2919 2920 2921void LCodeGen::DoMathSin(LUnaryMathOperation* instr) { 2922 ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); 2923 TranscendentalCacheStub stub(TranscendentalCache::SIN, 2924 TranscendentalCacheStub::UNTAGGED); 2925 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 2926} 2927 2928 2929void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { 2930 switch (instr->op()) { 2931 case kMathAbs: 2932 DoMathAbs(instr); 2933 break; 2934 case kMathFloor: 2935 DoMathFloor(instr); 2936 break; 2937 case kMathRound: 2938 DoMathRound(instr); 2939 break; 2940 case kMathSqrt: 2941 DoMathSqrt(instr); 2942 break; 2943 case kMathPowHalf: 2944 DoMathPowHalf(instr); 2945 break; 2946 case kMathCos: 2947 DoMathCos(instr); 2948 break; 2949 case kMathSin: 2950 DoMathSin(instr); 2951 break; 2952 case kMathLog: 2953 DoMathLog(instr); 2954 break; 2955 2956 default: 2957 UNREACHABLE(); 2958 } 2959} 2960 2961 2962void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { 2963 ASSERT(ToRegister(instr->context()).is(esi)); 2964 ASSERT(ToRegister(instr->function()).is(edi)); 2965 ASSERT(instr->HasPointerMap()); 2966 ASSERT(instr->HasDeoptimizationEnvironment()); 2967 LPointerMap* pointers = instr->pointer_map(); 2968 LEnvironment* env = instr->deoptimization_environment(); 2969 RecordPosition(pointers->position()); 2970 RegisterEnvironmentForDeoptimization(env); 2971 SafepointGenerator generator(this, pointers, env->deoptimization_index()); 2972 ParameterCount count(instr->arity()); 2973 __ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD); 2974} 2975 2976 2977void LCodeGen::DoCallKeyed(LCallKeyed* instr) { 2978 ASSERT(ToRegister(instr->context()).is(esi)); 2979 ASSERT(ToRegister(instr->key()).is(ecx)); 2980 ASSERT(ToRegister(instr->result()).is(eax)); 2981 2982 int arity = instr->arity(); 2983 Handle<Code> ic = isolate()->stub_cache()-> 2984 ComputeKeyedCallInitialize(arity, NOT_IN_LOOP); 2985 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2986} 2987 2988 2989void LCodeGen::DoCallNamed(LCallNamed* instr) { 2990 ASSERT(ToRegister(instr->context()).is(esi)); 2991 ASSERT(ToRegister(instr->result()).is(eax)); 2992 2993 int arity = instr->arity(); 2994 RelocInfo::Mode mode = RelocInfo::CODE_TARGET; 2995 Handle<Code> ic = 2996 isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode); 2997 __ mov(ecx, instr->name()); 2998 CallCode(ic, mode, instr); 2999} 3000 3001 3002void LCodeGen::DoCallFunction(LCallFunction* instr) { 3003 ASSERT(ToRegister(instr->context()).is(esi)); 3004 ASSERT(ToRegister(instr->result()).is(eax)); 3005 3006 int arity = instr->arity(); 3007 CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_IMPLICIT); 3008 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 3009 __ Drop(1); 3010} 3011 3012 3013void LCodeGen::DoCallGlobal(LCallGlobal* instr) { 3014 ASSERT(ToRegister(instr->context()).is(esi)); 3015 ASSERT(ToRegister(instr->result()).is(eax)); 3016 3017 int arity = instr->arity(); 3018 RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT; 3019 Handle<Code> ic = 3020 isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode); 3021 __ mov(ecx, instr->name()); 3022 CallCode(ic, mode, instr); 3023} 3024 3025 3026void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { 3027 ASSERT(ToRegister(instr->result()).is(eax)); 3028 __ mov(edi, instr->target()); 3029 CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION); 3030} 3031 3032 3033void LCodeGen::DoCallNew(LCallNew* instr) { 3034 ASSERT(ToRegister(instr->context()).is(esi)); 3035 ASSERT(ToRegister(instr->constructor()).is(edi)); 3036 ASSERT(ToRegister(instr->result()).is(eax)); 3037 3038 Handle<Code> builtin = isolate()->builtins()->JSConstructCall(); 3039 __ Set(eax, Immediate(instr->arity())); 3040 CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr); 3041} 3042 3043 3044void LCodeGen::DoCallRuntime(LCallRuntime* instr) { 3045 CallRuntime(instr->function(), instr->arity(), instr); 3046} 3047 3048 3049void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { 3050 Register object = ToRegister(instr->object()); 3051 Register value = ToRegister(instr->value()); 3052 int offset = instr->offset(); 3053 3054 if (!instr->transition().is_null()) { 3055 __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition()); 3056 } 3057 3058 // Do the store. 3059 if (instr->is_in_object()) { 3060 __ mov(FieldOperand(object, offset), value); 3061 if (instr->needs_write_barrier()) { 3062 Register temp = ToRegister(instr->TempAt(0)); 3063 // Update the write barrier for the object for in-object properties. 3064 __ RecordWrite(object, offset, value, temp); 3065 } 3066 } else { 3067 Register temp = ToRegister(instr->TempAt(0)); 3068 __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset)); 3069 __ mov(FieldOperand(temp, offset), value); 3070 if (instr->needs_write_barrier()) { 3071 // Update the write barrier for the properties array. 3072 // object is used as a scratch register. 3073 __ RecordWrite(temp, offset, value, object); 3074 } 3075 } 3076} 3077 3078 3079void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { 3080 ASSERT(ToRegister(instr->context()).is(esi)); 3081 ASSERT(ToRegister(instr->object()).is(edx)); 3082 ASSERT(ToRegister(instr->value()).is(eax)); 3083 3084 __ mov(ecx, instr->name()); 3085 Handle<Code> ic = instr->strict_mode() 3086 ? isolate()->builtins()->StoreIC_Initialize_Strict() 3087 : isolate()->builtins()->StoreIC_Initialize(); 3088 CallCode(ic, RelocInfo::CODE_TARGET, instr); 3089} 3090 3091 3092void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { 3093 if (instr->index()->IsConstantOperand()) { 3094 __ cmp(ToOperand(instr->length()), 3095 ToImmediate(LConstantOperand::cast(instr->index()))); 3096 DeoptimizeIf(below_equal, instr->environment()); 3097 } else { 3098 __ cmp(ToRegister(instr->index()), ToOperand(instr->length())); 3099 DeoptimizeIf(above_equal, instr->environment()); 3100 } 3101} 3102 3103 3104void LCodeGen::DoStoreKeyedSpecializedArrayElement( 3105 LStoreKeyedSpecializedArrayElement* instr) { 3106 JSObject::ElementsKind elements_kind = instr->elements_kind(); 3107 Operand operand(BuildFastArrayOperand(instr->external_pointer(), 3108 instr->key(), elements_kind, 0)); 3109 if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) { 3110 __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value())); 3111 __ movss(operand, xmm0); 3112 } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) { 3113 __ movdbl(operand, ToDoubleRegister(instr->value())); 3114 } else { 3115 Register value = ToRegister(instr->value()); 3116 switch (elements_kind) { 3117 case JSObject::EXTERNAL_PIXEL_ELEMENTS: 3118 case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: 3119 case JSObject::EXTERNAL_BYTE_ELEMENTS: 3120 __ mov_b(operand, value); 3121 break; 3122 case JSObject::EXTERNAL_SHORT_ELEMENTS: 3123 case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: 3124 __ mov_w(operand, value); 3125 break; 3126 case JSObject::EXTERNAL_INT_ELEMENTS: 3127 case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: 3128 __ mov(operand, value); 3129 break; 3130 case JSObject::EXTERNAL_FLOAT_ELEMENTS: 3131 case JSObject::EXTERNAL_DOUBLE_ELEMENTS: 3132 case JSObject::FAST_ELEMENTS: 3133 case JSObject::FAST_DOUBLE_ELEMENTS: 3134 case JSObject::DICTIONARY_ELEMENTS: 3135 case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS: 3136 UNREACHABLE(); 3137 break; 3138 } 3139 } 3140} 3141 3142 3143void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { 3144 Register value = ToRegister(instr->value()); 3145 Register elements = ToRegister(instr->object()); 3146 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; 3147 3148 // Do the store. 3149 if (instr->key()->IsConstantOperand()) { 3150 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); 3151 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); 3152 int offset = 3153 ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize; 3154 __ mov(FieldOperand(elements, offset), value); 3155 } else { 3156 __ mov(FieldOperand(elements, 3157 key, 3158 times_pointer_size, 3159 FixedArray::kHeaderSize), 3160 value); 3161 } 3162 3163 if (instr->hydrogen()->NeedsWriteBarrier()) { 3164 // Compute address of modified element and store it into key register. 3165 __ lea(key, 3166 FieldOperand(elements, 3167 key, 3168 times_pointer_size, 3169 FixedArray::kHeaderSize)); 3170 __ RecordWrite(elements, key, value); 3171 } 3172} 3173 3174 3175void LCodeGen::DoStoreKeyedFastDoubleElement( 3176 LStoreKeyedFastDoubleElement* instr) { 3177 XMMRegister value = ToDoubleRegister(instr->value()); 3178 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; 3179 Label have_value; 3180 3181 __ ucomisd(value, value); 3182 __ j(parity_odd, &have_value); // NaN. 3183 3184 ExternalReference canonical_nan_reference = 3185 ExternalReference::address_of_canonical_non_hole_nan(); 3186 __ movdbl(value, Operand::StaticVariable(canonical_nan_reference)); 3187 __ bind(&have_value); 3188 3189 Operand double_store_operand = BuildFastArrayOperand( 3190 instr->elements(), instr->key(), JSObject::FAST_DOUBLE_ELEMENTS, 3191 FixedDoubleArray::kHeaderSize - kHeapObjectTag); 3192 __ movdbl(double_store_operand, value); 3193} 3194 3195 3196void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { 3197 ASSERT(ToRegister(instr->context()).is(esi)); 3198 ASSERT(ToRegister(instr->object()).is(edx)); 3199 ASSERT(ToRegister(instr->key()).is(ecx)); 3200 ASSERT(ToRegister(instr->value()).is(eax)); 3201 3202 Handle<Code> ic = instr->strict_mode() 3203 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() 3204 : isolate()->builtins()->KeyedStoreIC_Initialize(); 3205 CallCode(ic, RelocInfo::CODE_TARGET, instr); 3206} 3207 3208 3209void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { 3210 class DeferredStringCharCodeAt: public LDeferredCode { 3211 public: 3212 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) 3213 : LDeferredCode(codegen), instr_(instr) { } 3214 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } 3215 private: 3216 LStringCharCodeAt* instr_; 3217 }; 3218 3219 Register string = ToRegister(instr->string()); 3220 Register index = ToRegister(instr->index()); 3221 Register result = ToRegister(instr->result()); 3222 3223 DeferredStringCharCodeAt* deferred = 3224 new DeferredStringCharCodeAt(this, instr); 3225 3226 // Fetch the instance type of the receiver into result register. 3227 __ mov(result, FieldOperand(string, HeapObject::kMapOffset)); 3228 __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset)); 3229 3230 // We need special handling for indirect strings. 3231 Label check_sequential; 3232 __ test(result, Immediate(kIsIndirectStringMask)); 3233 __ j(zero, &check_sequential, Label::kNear); 3234 3235 // Dispatch on the indirect string shape: slice or cons. 3236 Label cons_string; 3237 __ test(result, Immediate(kSlicedNotConsMask)); 3238 __ j(zero, &cons_string, Label::kNear); 3239 3240 // Handle slices. 3241 Label indirect_string_loaded; 3242 __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset)); 3243 __ SmiUntag(result); 3244 __ add(index, Operand(result)); 3245 __ mov(string, FieldOperand(string, SlicedString::kParentOffset)); 3246 __ jmp(&indirect_string_loaded, Label::kNear); 3247 3248 // Handle conses. 3249 // Check whether the right hand side is the empty string (i.e. if 3250 // this is really a flat string in a cons string). If that is not 3251 // the case we would rather go to the runtime system now to flatten 3252 // the string. 3253 __ bind(&cons_string); 3254 __ cmp(FieldOperand(string, ConsString::kSecondOffset), 3255 Immediate(factory()->empty_string())); 3256 __ j(not_equal, deferred->entry()); 3257 __ mov(string, FieldOperand(string, ConsString::kFirstOffset)); 3258 3259 __ bind(&indirect_string_loaded); 3260 __ mov(result, FieldOperand(string, HeapObject::kMapOffset)); 3261 __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset)); 3262 3263 // Check whether the string is sequential. The only non-sequential 3264 // shapes we support have just been unwrapped above. 3265 __ bind(&check_sequential); 3266 STATIC_ASSERT(kSeqStringTag == 0); 3267 __ test(result, Immediate(kStringRepresentationMask)); 3268 __ j(not_zero, deferred->entry()); 3269 3270 // Dispatch on the encoding: ASCII or two-byte. 3271 Label ascii_string; 3272 STATIC_ASSERT(kAsciiStringTag != 0); 3273 __ test(result, Immediate(kStringEncodingMask)); 3274 __ j(not_zero, &ascii_string, Label::kNear); 3275 3276 // Two-byte string. 3277 // Load the two-byte character code into the result register. 3278 Label done; 3279 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); 3280 __ movzx_w(result, FieldOperand(string, 3281 index, 3282 times_2, 3283 SeqTwoByteString::kHeaderSize)); 3284 __ jmp(&done, Label::kNear); 3285 3286 // ASCII string. 3287 // Load the byte into the result register. 3288 __ bind(&ascii_string); 3289 __ movzx_b(result, FieldOperand(string, 3290 index, 3291 times_1, 3292 SeqAsciiString::kHeaderSize)); 3293 __ bind(&done); 3294 __ bind(deferred->exit()); 3295} 3296 3297 3298void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { 3299 Register string = ToRegister(instr->string()); 3300 Register result = ToRegister(instr->result()); 3301 3302 // TODO(3095996): Get rid of this. For now, we need to make the 3303 // result register contain a valid pointer because it is already 3304 // contained in the register pointer map. 3305 __ Set(result, Immediate(0)); 3306 3307 PushSafepointRegistersScope scope(this); 3308 __ push(string); 3309 // Push the index as a smi. This is safe because of the checks in 3310 // DoStringCharCodeAt above. 3311 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); 3312 if (instr->index()->IsConstantOperand()) { 3313 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 3314 __ push(Immediate(Smi::FromInt(const_index))); 3315 } else { 3316 Register index = ToRegister(instr->index()); 3317 __ SmiTag(index); 3318 __ push(index); 3319 } 3320 CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, 3321 instr, instr->context()); 3322 if (FLAG_debug_code) { 3323 __ AbortIfNotSmi(eax); 3324 } 3325 __ SmiUntag(eax); 3326 __ StoreToSafepointRegisterSlot(result, eax); 3327} 3328 3329 3330void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { 3331 class DeferredStringCharFromCode: public LDeferredCode { 3332 public: 3333 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) 3334 : LDeferredCode(codegen), instr_(instr) { } 3335 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } 3336 private: 3337 LStringCharFromCode* instr_; 3338 }; 3339 3340 DeferredStringCharFromCode* deferred = 3341 new DeferredStringCharFromCode(this, instr); 3342 3343 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); 3344 Register char_code = ToRegister(instr->char_code()); 3345 Register result = ToRegister(instr->result()); 3346 ASSERT(!char_code.is(result)); 3347 3348 __ cmp(char_code, String::kMaxAsciiCharCode); 3349 __ j(above, deferred->entry()); 3350 __ Set(result, Immediate(factory()->single_character_string_cache())); 3351 __ mov(result, FieldOperand(result, 3352 char_code, times_pointer_size, 3353 FixedArray::kHeaderSize)); 3354 __ cmp(result, factory()->undefined_value()); 3355 __ j(equal, deferred->entry()); 3356 __ bind(deferred->exit()); 3357} 3358 3359 3360void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { 3361 Register char_code = ToRegister(instr->char_code()); 3362 Register result = ToRegister(instr->result()); 3363 3364 // TODO(3095996): Get rid of this. For now, we need to make the 3365 // result register contain a valid pointer because it is already 3366 // contained in the register pointer map. 3367 __ Set(result, Immediate(0)); 3368 3369 PushSafepointRegistersScope scope(this); 3370 __ SmiTag(char_code); 3371 __ push(char_code); 3372 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); 3373 __ StoreToSafepointRegisterSlot(result, eax); 3374} 3375 3376 3377void LCodeGen::DoStringLength(LStringLength* instr) { 3378 Register string = ToRegister(instr->string()); 3379 Register result = ToRegister(instr->result()); 3380 __ mov(result, FieldOperand(string, String::kLengthOffset)); 3381} 3382 3383 3384void LCodeGen::DoStringAdd(LStringAdd* instr) { 3385 if (instr->left()->IsConstantOperand()) { 3386 __ push(ToImmediate(instr->left())); 3387 } else { 3388 __ push(ToOperand(instr->left())); 3389 } 3390 if (instr->right()->IsConstantOperand()) { 3391 __ push(ToImmediate(instr->right())); 3392 } else { 3393 __ push(ToOperand(instr->right())); 3394 } 3395 StringAddStub stub(NO_STRING_CHECK_IN_STUB); 3396 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 3397} 3398 3399 3400void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 3401 LOperand* input = instr->InputAt(0); 3402 ASSERT(input->IsRegister() || input->IsStackSlot()); 3403 LOperand* output = instr->result(); 3404 ASSERT(output->IsDoubleRegister()); 3405 __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); 3406} 3407 3408 3409void LCodeGen::DoNumberTagI(LNumberTagI* instr) { 3410 class DeferredNumberTagI: public LDeferredCode { 3411 public: 3412 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) 3413 : LDeferredCode(codegen), instr_(instr) { } 3414 virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); } 3415 private: 3416 LNumberTagI* instr_; 3417 }; 3418 3419 LOperand* input = instr->InputAt(0); 3420 ASSERT(input->IsRegister() && input->Equals(instr->result())); 3421 Register reg = ToRegister(input); 3422 3423 DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr); 3424 __ SmiTag(reg); 3425 __ j(overflow, deferred->entry()); 3426 __ bind(deferred->exit()); 3427} 3428 3429 3430void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { 3431 Label slow; 3432 Register reg = ToRegister(instr->InputAt(0)); 3433 Register tmp = reg.is(eax) ? ecx : eax; 3434 3435 // Preserve the value of all registers. 3436 PushSafepointRegistersScope scope(this); 3437 3438 // There was overflow, so bits 30 and 31 of the original integer 3439 // disagree. Try to allocate a heap number in new space and store 3440 // the value in there. If that fails, call the runtime system. 3441 Label done; 3442 __ SmiUntag(reg); 3443 __ xor_(reg, 0x80000000); 3444 __ cvtsi2sd(xmm0, Operand(reg)); 3445 if (FLAG_inline_new) { 3446 __ AllocateHeapNumber(reg, tmp, no_reg, &slow); 3447 __ jmp(&done, Label::kNear); 3448 } 3449 3450 // Slow case: Call the runtime system to do the number allocation. 3451 __ bind(&slow); 3452 3453 // TODO(3095996): Put a valid pointer value in the stack slot where the result 3454 // register is stored, as this register is in the pointer map, but contains an 3455 // integer value. 3456 __ StoreToSafepointRegisterSlot(reg, Immediate(0)); 3457 // NumberTagI and NumberTagD use the context from the frame, rather than 3458 // the environment's HContext or HInlinedContext value. 3459 // They only call Runtime::kAllocateHeapNumber. 3460 // The corresponding HChange instructions are added in a phase that does 3461 // not have easy access to the local context. 3462 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 3463 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 3464 RecordSafepointWithRegisters( 3465 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); 3466 if (!reg.is(eax)) __ mov(reg, eax); 3467 3468 // Done. Put the value in xmm0 into the value of the allocated heap 3469 // number. 3470 __ bind(&done); 3471 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0); 3472 __ StoreToSafepointRegisterSlot(reg, reg); 3473} 3474 3475 3476void LCodeGen::DoNumberTagD(LNumberTagD* instr) { 3477 class DeferredNumberTagD: public LDeferredCode { 3478 public: 3479 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) 3480 : LDeferredCode(codegen), instr_(instr) { } 3481 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } 3482 private: 3483 LNumberTagD* instr_; 3484 }; 3485 3486 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); 3487 Register reg = ToRegister(instr->result()); 3488 Register tmp = ToRegister(instr->TempAt(0)); 3489 3490 DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr); 3491 if (FLAG_inline_new) { 3492 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); 3493 } else { 3494 __ jmp(deferred->entry()); 3495 } 3496 __ bind(deferred->exit()); 3497 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); 3498} 3499 3500 3501void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 3502 // TODO(3095996): Get rid of this. For now, we need to make the 3503 // result register contain a valid pointer because it is already 3504 // contained in the register pointer map. 3505 Register reg = ToRegister(instr->result()); 3506 __ Set(reg, Immediate(0)); 3507 3508 PushSafepointRegistersScope scope(this); 3509 // NumberTagI and NumberTagD use the context from the frame, rather than 3510 // the environment's HContext or HInlinedContext value. 3511 // They only call Runtime::kAllocateHeapNumber. 3512 // The corresponding HChange instructions are added in a phase that does 3513 // not have easy access to the local context. 3514 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 3515 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 3516 RecordSafepointWithRegisters(instr->pointer_map(), 0, 3517 Safepoint::kNoDeoptimizationIndex); 3518 __ StoreToSafepointRegisterSlot(reg, eax); 3519} 3520 3521 3522void LCodeGen::DoSmiTag(LSmiTag* instr) { 3523 LOperand* input = instr->InputAt(0); 3524 ASSERT(input->IsRegister() && input->Equals(instr->result())); 3525 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)); 3526 __ SmiTag(ToRegister(input)); 3527} 3528 3529 3530void LCodeGen::DoSmiUntag(LSmiUntag* instr) { 3531 LOperand* input = instr->InputAt(0); 3532 ASSERT(input->IsRegister() && input->Equals(instr->result())); 3533 if (instr->needs_check()) { 3534 __ test(ToRegister(input), Immediate(kSmiTagMask)); 3535 DeoptimizeIf(not_zero, instr->environment()); 3536 } 3537 __ SmiUntag(ToRegister(input)); 3538} 3539 3540 3541void LCodeGen::EmitNumberUntagD(Register input_reg, 3542 XMMRegister result_reg, 3543 bool deoptimize_on_undefined, 3544 LEnvironment* env) { 3545 Label load_smi, done; 3546 3547 // Smi check. 3548 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); 3549 3550 // Heap number map check. 3551 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 3552 factory()->heap_number_map()); 3553 if (deoptimize_on_undefined) { 3554 DeoptimizeIf(not_equal, env); 3555 } else { 3556 Label heap_number; 3557 __ j(equal, &heap_number, Label::kNear); 3558 3559 __ cmp(input_reg, factory()->undefined_value()); 3560 DeoptimizeIf(not_equal, env); 3561 3562 // Convert undefined to NaN. 3563 ExternalReference nan = 3564 ExternalReference::address_of_canonical_non_hole_nan(); 3565 __ movdbl(result_reg, Operand::StaticVariable(nan)); 3566 __ jmp(&done, Label::kNear); 3567 3568 __ bind(&heap_number); 3569 } 3570 // Heap number to XMM conversion. 3571 __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); 3572 __ jmp(&done, Label::kNear); 3573 3574 // Smi to XMM conversion 3575 __ bind(&load_smi); 3576 __ SmiUntag(input_reg); // Untag smi before converting to float. 3577 __ cvtsi2sd(result_reg, Operand(input_reg)); 3578 __ SmiTag(input_reg); // Retag smi. 3579 __ bind(&done); 3580} 3581 3582 3583class DeferredTaggedToI: public LDeferredCode { 3584 public: 3585 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) 3586 : LDeferredCode(codegen), instr_(instr) { } 3587 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } 3588 private: 3589 LTaggedToI* instr_; 3590}; 3591 3592 3593void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { 3594 Label done, heap_number; 3595 Register input_reg = ToRegister(instr->InputAt(0)); 3596 3597 // Heap number map check. 3598 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 3599 factory()->heap_number_map()); 3600 3601 if (instr->truncating()) { 3602 __ j(equal, &heap_number, Label::kNear); 3603 // Check for undefined. Undefined is converted to zero for truncating 3604 // conversions. 3605 __ cmp(input_reg, factory()->undefined_value()); 3606 DeoptimizeIf(not_equal, instr->environment()); 3607 __ mov(input_reg, 0); 3608 __ jmp(&done, Label::kNear); 3609 3610 __ bind(&heap_number); 3611 if (CpuFeatures::IsSupported(SSE3)) { 3612 CpuFeatures::Scope scope(SSE3); 3613 Label convert; 3614 // Use more powerful conversion when sse3 is available. 3615 // Load x87 register with heap number. 3616 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); 3617 // Get exponent alone and check for too-big exponent. 3618 __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset)); 3619 __ and_(input_reg, HeapNumber::kExponentMask); 3620 const uint32_t kTooBigExponent = 3621 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift; 3622 __ cmp(Operand(input_reg), Immediate(kTooBigExponent)); 3623 __ j(less, &convert, Label::kNear); 3624 // Pop FPU stack before deoptimizing. 3625 __ ffree(0); 3626 __ fincstp(); 3627 DeoptimizeIf(no_condition, instr->environment()); 3628 3629 // Reserve space for 64 bit answer. 3630 __ bind(&convert); 3631 __ sub(Operand(esp), Immediate(kDoubleSize)); 3632 // Do conversion, which cannot fail because we checked the exponent. 3633 __ fisttp_d(Operand(esp, 0)); 3634 __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result. 3635 __ add(Operand(esp), Immediate(kDoubleSize)); 3636 } else { 3637 XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0)); 3638 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); 3639 __ cvttsd2si(input_reg, Operand(xmm0)); 3640 __ cmp(input_reg, 0x80000000u); 3641 __ j(not_equal, &done); 3642 // Check if the input was 0x8000000 (kMinInt). 3643 // If no, then we got an overflow and we deoptimize. 3644 ExternalReference min_int = ExternalReference::address_of_min_int(); 3645 __ movdbl(xmm_temp, Operand::StaticVariable(min_int)); 3646 __ ucomisd(xmm_temp, xmm0); 3647 DeoptimizeIf(not_equal, instr->environment()); 3648 DeoptimizeIf(parity_even, instr->environment()); // NaN. 3649 } 3650 } else { 3651 // Deoptimize if we don't have a heap number. 3652 DeoptimizeIf(not_equal, instr->environment()); 3653 3654 XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0)); 3655 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); 3656 __ cvttsd2si(input_reg, Operand(xmm0)); 3657 __ cvtsi2sd(xmm_temp, Operand(input_reg)); 3658 __ ucomisd(xmm0, xmm_temp); 3659 DeoptimizeIf(not_equal, instr->environment()); 3660 DeoptimizeIf(parity_even, instr->environment()); // NaN. 3661 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3662 __ test(input_reg, Operand(input_reg)); 3663 __ j(not_zero, &done); 3664 __ movmskpd(input_reg, xmm0); 3665 __ and_(input_reg, 1); 3666 DeoptimizeIf(not_zero, instr->environment()); 3667 } 3668 } 3669 __ bind(&done); 3670} 3671 3672 3673void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 3674 LOperand* input = instr->InputAt(0); 3675 ASSERT(input->IsRegister()); 3676 ASSERT(input->Equals(instr->result())); 3677 3678 Register input_reg = ToRegister(input); 3679 3680 DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr); 3681 3682 // Smi check. 3683 __ JumpIfNotSmi(input_reg, deferred->entry()); 3684 3685 // Smi to int32 conversion 3686 __ SmiUntag(input_reg); // Untag smi. 3687 3688 __ bind(deferred->exit()); 3689} 3690 3691 3692void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 3693 LOperand* input = instr->InputAt(0); 3694 ASSERT(input->IsRegister()); 3695 LOperand* result = instr->result(); 3696 ASSERT(result->IsDoubleRegister()); 3697 3698 Register input_reg = ToRegister(input); 3699 XMMRegister result_reg = ToDoubleRegister(result); 3700 3701 EmitNumberUntagD(input_reg, result_reg, 3702 instr->hydrogen()->deoptimize_on_undefined(), 3703 instr->environment()); 3704} 3705 3706 3707void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 3708 LOperand* input = instr->InputAt(0); 3709 ASSERT(input->IsDoubleRegister()); 3710 LOperand* result = instr->result(); 3711 ASSERT(result->IsRegister()); 3712 3713 XMMRegister input_reg = ToDoubleRegister(input); 3714 Register result_reg = ToRegister(result); 3715 3716 if (instr->truncating()) { 3717 // Performs a truncating conversion of a floating point number as used by 3718 // the JS bitwise operations. 3719 __ cvttsd2si(result_reg, Operand(input_reg)); 3720 __ cmp(result_reg, 0x80000000u); 3721 if (CpuFeatures::IsSupported(SSE3)) { 3722 // This will deoptimize if the exponent of the input in out of range. 3723 CpuFeatures::Scope scope(SSE3); 3724 Label convert, done; 3725 __ j(not_equal, &done, Label::kNear); 3726 __ sub(Operand(esp), Immediate(kDoubleSize)); 3727 __ movdbl(Operand(esp, 0), input_reg); 3728 // Get exponent alone and check for too-big exponent. 3729 __ mov(result_reg, Operand(esp, sizeof(int32_t))); 3730 __ and_(result_reg, HeapNumber::kExponentMask); 3731 const uint32_t kTooBigExponent = 3732 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift; 3733 __ cmp(Operand(result_reg), Immediate(kTooBigExponent)); 3734 __ j(less, &convert, Label::kNear); 3735 __ add(Operand(esp), Immediate(kDoubleSize)); 3736 DeoptimizeIf(no_condition, instr->environment()); 3737 __ bind(&convert); 3738 // Do conversion, which cannot fail because we checked the exponent. 3739 __ fld_d(Operand(esp, 0)); 3740 __ fisttp_d(Operand(esp, 0)); 3741 __ mov(result_reg, Operand(esp, 0)); // Low word of answer is the result. 3742 __ add(Operand(esp), Immediate(kDoubleSize)); 3743 __ bind(&done); 3744 } else { 3745 Label done; 3746 Register temp_reg = ToRegister(instr->TempAt(0)); 3747 XMMRegister xmm_scratch = xmm0; 3748 3749 // If cvttsd2si succeeded, we're done. Otherwise, we attempt 3750 // manual conversion. 3751 __ j(not_equal, &done, Label::kNear); 3752 3753 // Get high 32 bits of the input in result_reg and temp_reg. 3754 __ pshufd(xmm_scratch, input_reg, 1); 3755 __ movd(Operand(temp_reg), xmm_scratch); 3756 __ mov(result_reg, temp_reg); 3757 3758 // Prepare negation mask in temp_reg. 3759 __ sar(temp_reg, kBitsPerInt - 1); 3760 3761 // Extract the exponent from result_reg and subtract adjusted 3762 // bias from it. The adjustment is selected in a way such that 3763 // when the difference is zero, the answer is in the low 32 bits 3764 // of the input, otherwise a shift has to be performed. 3765 __ shr(result_reg, HeapNumber::kExponentShift); 3766 __ and_(result_reg, 3767 HeapNumber::kExponentMask >> HeapNumber::kExponentShift); 3768 __ sub(Operand(result_reg), 3769 Immediate(HeapNumber::kExponentBias + 3770 HeapNumber::kExponentBits + 3771 HeapNumber::kMantissaBits)); 3772 // Don't handle big (> kMantissaBits + kExponentBits == 63) or 3773 // special exponents. 3774 DeoptimizeIf(greater, instr->environment()); 3775 3776 // Zero out the sign and the exponent in the input (by shifting 3777 // it to the left) and restore the implicit mantissa bit, 3778 // i.e. convert the input to unsigned int64 shifted left by 3779 // kExponentBits. 3780 ExternalReference minus_zero = ExternalReference::address_of_minus_zero(); 3781 // Minus zero has the most significant bit set and the other 3782 // bits cleared. 3783 __ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero)); 3784 __ psllq(input_reg, HeapNumber::kExponentBits); 3785 __ por(input_reg, xmm_scratch); 3786 3787 // Get the amount to shift the input right in xmm_scratch. 3788 __ neg(result_reg); 3789 __ movd(xmm_scratch, Operand(result_reg)); 3790 3791 // Shift the input right and extract low 32 bits. 3792 __ psrlq(input_reg, xmm_scratch); 3793 __ movd(Operand(result_reg), input_reg); 3794 3795 // Use the prepared mask in temp_reg to negate the result if necessary. 3796 __ xor_(result_reg, Operand(temp_reg)); 3797 __ sub(result_reg, Operand(temp_reg)); 3798 __ bind(&done); 3799 } 3800 } else { 3801 Label done; 3802 __ cvttsd2si(result_reg, Operand(input_reg)); 3803 __ cvtsi2sd(xmm0, Operand(result_reg)); 3804 __ ucomisd(xmm0, input_reg); 3805 DeoptimizeIf(not_equal, instr->environment()); 3806 DeoptimizeIf(parity_even, instr->environment()); // NaN. 3807 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3808 // The integer converted back is equal to the original. We 3809 // only have to test if we got -0 as an input. 3810 __ test(result_reg, Operand(result_reg)); 3811 __ j(not_zero, &done, Label::kNear); 3812 __ movmskpd(result_reg, input_reg); 3813 // Bit 0 contains the sign of the double in input_reg. 3814 // If input was positive, we are ok and return 0, otherwise 3815 // deoptimize. 3816 __ and_(result_reg, 1); 3817 DeoptimizeIf(not_zero, instr->environment()); 3818 } 3819 __ bind(&done); 3820 } 3821} 3822 3823 3824void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 3825 LOperand* input = instr->InputAt(0); 3826 __ test(ToOperand(input), Immediate(kSmiTagMask)); 3827 DeoptimizeIf(not_zero, instr->environment()); 3828} 3829 3830 3831void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { 3832 LOperand* input = instr->InputAt(0); 3833 __ test(ToOperand(input), Immediate(kSmiTagMask)); 3834 DeoptimizeIf(zero, instr->environment()); 3835} 3836 3837 3838void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { 3839 Register input = ToRegister(instr->InputAt(0)); 3840 Register temp = ToRegister(instr->TempAt(0)); 3841 3842 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); 3843 3844 if (instr->hydrogen()->is_interval_check()) { 3845 InstanceType first; 3846 InstanceType last; 3847 instr->hydrogen()->GetCheckInterval(&first, &last); 3848 3849 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), 3850 static_cast<int8_t>(first)); 3851 3852 // If there is only one type in the interval check for equality. 3853 if (first == last) { 3854 DeoptimizeIf(not_equal, instr->environment()); 3855 } else { 3856 DeoptimizeIf(below, instr->environment()); 3857 // Omit check for the last type. 3858 if (last != LAST_TYPE) { 3859 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), 3860 static_cast<int8_t>(last)); 3861 DeoptimizeIf(above, instr->environment()); 3862 } 3863 } 3864 } else { 3865 uint8_t mask; 3866 uint8_t tag; 3867 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); 3868 3869 if (IsPowerOf2(mask)) { 3870 ASSERT(tag == 0 || IsPowerOf2(tag)); 3871 __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask); 3872 DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment()); 3873 } else { 3874 __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset)); 3875 __ and_(temp, mask); 3876 __ cmpb(Operand(temp), tag); 3877 DeoptimizeIf(not_equal, instr->environment()); 3878 } 3879 } 3880} 3881 3882 3883void LCodeGen::DoCheckFunction(LCheckFunction* instr) { 3884 ASSERT(instr->InputAt(0)->IsRegister()); 3885 Operand operand = ToOperand(instr->InputAt(0)); 3886 __ cmp(operand, instr->hydrogen()->target()); 3887 DeoptimizeIf(not_equal, instr->environment()); 3888} 3889 3890 3891void LCodeGen::DoCheckMap(LCheckMap* instr) { 3892 LOperand* input = instr->InputAt(0); 3893 ASSERT(input->IsRegister()); 3894 Register reg = ToRegister(input); 3895 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), 3896 instr->hydrogen()->map()); 3897 DeoptimizeIf(not_equal, instr->environment()); 3898} 3899 3900 3901void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 3902 XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); 3903 Register result_reg = ToRegister(instr->result()); 3904 __ ClampDoubleToUint8(value_reg, xmm0, result_reg); 3905} 3906 3907 3908void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { 3909 ASSERT(instr->unclamped()->Equals(instr->result())); 3910 Register value_reg = ToRegister(instr->result()); 3911 __ ClampUint8(value_reg); 3912} 3913 3914 3915void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { 3916 ASSERT(instr->unclamped()->Equals(instr->result())); 3917 Register input_reg = ToRegister(instr->unclamped()); 3918 Label is_smi, done, heap_number; 3919 3920 __ JumpIfSmi(input_reg, &is_smi); 3921 3922 // Check for heap number 3923 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 3924 factory()->heap_number_map()); 3925 __ j(equal, &heap_number, Label::kNear); 3926 3927 // Check for undefined. Undefined is converted to zero for clamping 3928 // conversions. 3929 __ cmp(input_reg, factory()->undefined_value()); 3930 DeoptimizeIf(not_equal, instr->environment()); 3931 __ mov(input_reg, 0); 3932 __ jmp(&done, Label::kNear); 3933 3934 // Heap number 3935 __ bind(&heap_number); 3936 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); 3937 __ ClampDoubleToUint8(xmm0, xmm1, input_reg); 3938 __ jmp(&done, Label::kNear); 3939 3940 // smi 3941 __ bind(&is_smi); 3942 __ SmiUntag(input_reg); 3943 __ ClampUint8(input_reg); 3944 3945 __ bind(&done); 3946} 3947 3948 3949void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) { 3950 if (isolate()->heap()->InNewSpace(*object)) { 3951 Handle<JSGlobalPropertyCell> cell = 3952 isolate()->factory()->NewJSGlobalPropertyCell(object); 3953 __ mov(result, Operand::Cell(cell)); 3954 } else { 3955 __ mov(result, object); 3956 } 3957} 3958 3959 3960void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { 3961 Register reg = ToRegister(instr->TempAt(0)); 3962 3963 Handle<JSObject> holder = instr->holder(); 3964 Handle<JSObject> current_prototype = instr->prototype(); 3965 3966 // Load prototype object. 3967 LoadHeapObject(reg, current_prototype); 3968 3969 // Check prototype maps up to the holder. 3970 while (!current_prototype.is_identical_to(holder)) { 3971 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), 3972 Handle<Map>(current_prototype->map())); 3973 DeoptimizeIf(not_equal, instr->environment()); 3974 current_prototype = 3975 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype())); 3976 // Load next prototype object. 3977 LoadHeapObject(reg, current_prototype); 3978 } 3979 3980 // Check the holder map. 3981 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), 3982 Handle<Map>(current_prototype->map())); 3983 DeoptimizeIf(not_equal, instr->environment()); 3984} 3985 3986 3987void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { 3988 ASSERT(ToRegister(instr->context()).is(esi)); 3989 // Setup the parameters to the stub/runtime call. 3990 __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); 3991 __ push(FieldOperand(eax, JSFunction::kLiteralsOffset)); 3992 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); 3993 __ push(Immediate(instr->hydrogen()->constant_elements())); 3994 3995 // Pick the right runtime function or stub to call. 3996 int length = instr->hydrogen()->length(); 3997 if (instr->hydrogen()->IsCopyOnWrite()) { 3998 ASSERT(instr->hydrogen()->depth() == 1); 3999 FastCloneShallowArrayStub::Mode mode = 4000 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS; 4001 FastCloneShallowArrayStub stub(mode, length); 4002 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4003 } else if (instr->hydrogen()->depth() > 1) { 4004 CallRuntime(Runtime::kCreateArrayLiteral, 3, instr); 4005 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { 4006 CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr); 4007 } else { 4008 FastCloneShallowArrayStub::Mode mode = 4009 FastCloneShallowArrayStub::CLONE_ELEMENTS; 4010 FastCloneShallowArrayStub stub(mode, length); 4011 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4012 } 4013} 4014 4015 4016void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { 4017 ASSERT(ToRegister(instr->context()).is(esi)); 4018 // Setup the parameters to the stub/runtime call. 4019 __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); 4020 __ push(FieldOperand(eax, JSFunction::kLiteralsOffset)); 4021 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); 4022 __ push(Immediate(instr->hydrogen()->constant_properties())); 4023 int flags = instr->hydrogen()->fast_elements() 4024 ? ObjectLiteral::kFastElements 4025 : ObjectLiteral::kNoFlags; 4026 flags |= instr->hydrogen()->has_function() 4027 ? ObjectLiteral::kHasFunction 4028 : ObjectLiteral::kNoFlags; 4029 __ push(Immediate(Smi::FromInt(flags))); 4030 4031 // Pick the right runtime function to call. 4032 if (instr->hydrogen()->depth() > 1) { 4033 CallRuntime(Runtime::kCreateObjectLiteral, 4, instr); 4034 } else { 4035 CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr); 4036 } 4037} 4038 4039 4040void LCodeGen::DoToFastProperties(LToFastProperties* instr) { 4041 ASSERT(ToRegister(instr->InputAt(0)).is(eax)); 4042 __ push(eax); 4043 CallRuntime(Runtime::kToFastProperties, 1, instr); 4044} 4045 4046 4047void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { 4048 ASSERT(ToRegister(instr->context()).is(esi)); 4049 Label materialized; 4050 // Registers will be used as follows: 4051 // edi = JS function. 4052 // ecx = literals array. 4053 // ebx = regexp literal. 4054 // eax = regexp literal clone. 4055 // esi = context. 4056 __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); 4057 __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset)); 4058 int literal_offset = FixedArray::kHeaderSize + 4059 instr->hydrogen()->literal_index() * kPointerSize; 4060 __ mov(ebx, FieldOperand(ecx, literal_offset)); 4061 __ cmp(ebx, factory()->undefined_value()); 4062 __ j(not_equal, &materialized, Label::kNear); 4063 4064 // Create regexp literal using runtime function 4065 // Result will be in eax. 4066 __ push(ecx); 4067 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); 4068 __ push(Immediate(instr->hydrogen()->pattern())); 4069 __ push(Immediate(instr->hydrogen()->flags())); 4070 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); 4071 __ mov(ebx, eax); 4072 4073 __ bind(&materialized); 4074 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; 4075 Label allocated, runtime_allocate; 4076 __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT); 4077 __ jmp(&allocated); 4078 4079 __ bind(&runtime_allocate); 4080 __ push(ebx); 4081 __ push(Immediate(Smi::FromInt(size))); 4082 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); 4083 __ pop(ebx); 4084 4085 __ bind(&allocated); 4086 // Copy the content into the newly allocated memory. 4087 // (Unroll copy loop once for better throughput). 4088 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) { 4089 __ mov(edx, FieldOperand(ebx, i)); 4090 __ mov(ecx, FieldOperand(ebx, i + kPointerSize)); 4091 __ mov(FieldOperand(eax, i), edx); 4092 __ mov(FieldOperand(eax, i + kPointerSize), ecx); 4093 } 4094 if ((size % (2 * kPointerSize)) != 0) { 4095 __ mov(edx, FieldOperand(ebx, size - kPointerSize)); 4096 __ mov(FieldOperand(eax, size - kPointerSize), edx); 4097 } 4098} 4099 4100 4101void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { 4102 ASSERT(ToRegister(instr->context()).is(esi)); 4103 // Use the fast case closure allocation code that allocates in new 4104 // space for nested functions that don't need literals cloning. 4105 Handle<SharedFunctionInfo> shared_info = instr->shared_info(); 4106 bool pretenure = instr->hydrogen()->pretenure(); 4107 if (!pretenure && shared_info->num_literals() == 0) { 4108 FastNewClosureStub stub( 4109 shared_info->strict_mode() ? kStrictMode : kNonStrictMode); 4110 __ push(Immediate(shared_info)); 4111 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4112 } else { 4113 __ push(Operand(ebp, StandardFrameConstants::kContextOffset)); 4114 __ push(Immediate(shared_info)); 4115 __ push(Immediate(pretenure 4116 ? factory()->true_value() 4117 : factory()->false_value())); 4118 CallRuntime(Runtime::kNewClosure, 3, instr); 4119 } 4120} 4121 4122 4123void LCodeGen::DoTypeof(LTypeof* instr) { 4124 LOperand* input = instr->InputAt(1); 4125 if (input->IsConstantOperand()) { 4126 __ push(ToImmediate(input)); 4127 } else { 4128 __ push(ToOperand(input)); 4129 } 4130 CallRuntime(Runtime::kTypeof, 1, instr); 4131} 4132 4133 4134void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { 4135 Register input = ToRegister(instr->InputAt(0)); 4136 int true_block = chunk_->LookupDestination(instr->true_block_id()); 4137 int false_block = chunk_->LookupDestination(instr->false_block_id()); 4138 Label* true_label = chunk_->GetAssemblyLabel(true_block); 4139 Label* false_label = chunk_->GetAssemblyLabel(false_block); 4140 4141 Condition final_branch_condition = EmitTypeofIs(true_label, 4142 false_label, 4143 input, 4144 instr->type_literal()); 4145 4146 EmitBranch(true_block, false_block, final_branch_condition); 4147} 4148 4149 4150Condition LCodeGen::EmitTypeofIs(Label* true_label, 4151 Label* false_label, 4152 Register input, 4153 Handle<String> type_name) { 4154 Condition final_branch_condition = no_condition; 4155 if (type_name->Equals(heap()->number_symbol())) { 4156 __ JumpIfSmi(input, true_label); 4157 __ cmp(FieldOperand(input, HeapObject::kMapOffset), 4158 factory()->heap_number_map()); 4159 final_branch_condition = equal; 4160 4161 } else if (type_name->Equals(heap()->string_symbol())) { 4162 __ JumpIfSmi(input, false_label); 4163 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input); 4164 __ j(above_equal, false_label); 4165 __ test_b(FieldOperand(input, Map::kBitFieldOffset), 4166 1 << Map::kIsUndetectable); 4167 final_branch_condition = zero; 4168 4169 } else if (type_name->Equals(heap()->boolean_symbol())) { 4170 __ cmp(input, factory()->true_value()); 4171 __ j(equal, true_label); 4172 __ cmp(input, factory()->false_value()); 4173 final_branch_condition = equal; 4174 4175 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) { 4176 __ cmp(input, factory()->null_value()); 4177 final_branch_condition = equal; 4178 4179 } else if (type_name->Equals(heap()->undefined_symbol())) { 4180 __ cmp(input, factory()->undefined_value()); 4181 __ j(equal, true_label); 4182 __ JumpIfSmi(input, false_label); 4183 // Check for undetectable objects => true. 4184 __ mov(input, FieldOperand(input, HeapObject::kMapOffset)); 4185 __ test_b(FieldOperand(input, Map::kBitFieldOffset), 4186 1 << Map::kIsUndetectable); 4187 final_branch_condition = not_zero; 4188 4189 } else if (type_name->Equals(heap()->function_symbol())) { 4190 STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); 4191 __ JumpIfSmi(input, false_label); 4192 __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input); 4193 final_branch_condition = above_equal; 4194 4195 } else if (type_name->Equals(heap()->object_symbol())) { 4196 __ JumpIfSmi(input, false_label); 4197 if (!FLAG_harmony_typeof) { 4198 __ cmp(input, factory()->null_value()); 4199 __ j(equal, true_label); 4200 } 4201 __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input); 4202 __ j(below, false_label); 4203 __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); 4204 __ j(above, false_label); 4205 // Check for undetectable objects => false. 4206 __ test_b(FieldOperand(input, Map::kBitFieldOffset), 4207 1 << Map::kIsUndetectable); 4208 final_branch_condition = zero; 4209 4210 } else { 4211 final_branch_condition = not_equal; 4212 __ jmp(false_label); 4213 // A dead branch instruction will be generated after this point. 4214 } 4215 4216 return final_branch_condition; 4217} 4218 4219 4220void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { 4221 Register temp = ToRegister(instr->TempAt(0)); 4222 int true_block = chunk_->LookupDestination(instr->true_block_id()); 4223 int false_block = chunk_->LookupDestination(instr->false_block_id()); 4224 4225 EmitIsConstructCall(temp); 4226 EmitBranch(true_block, false_block, equal); 4227} 4228 4229 4230void LCodeGen::EmitIsConstructCall(Register temp) { 4231 // Get the frame pointer for the calling frame. 4232 __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); 4233 4234 // Skip the arguments adaptor frame if it exists. 4235 Label check_frame_marker; 4236 __ cmp(Operand(temp, StandardFrameConstants::kContextOffset), 4237 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 4238 __ j(not_equal, &check_frame_marker, Label::kNear); 4239 __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset)); 4240 4241 // Check the marker in the calling frame. 4242 __ bind(&check_frame_marker); 4243 __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset), 4244 Immediate(Smi::FromInt(StackFrame::CONSTRUCT))); 4245} 4246 4247 4248void LCodeGen::DoLazyBailout(LLazyBailout* instr) { 4249 // No code for lazy bailout instruction. Used to capture environment after a 4250 // call for populating the safepoint data with deoptimization data. 4251} 4252 4253 4254void LCodeGen::DoDeoptimize(LDeoptimize* instr) { 4255 DeoptimizeIf(no_condition, instr->environment()); 4256} 4257 4258 4259void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { 4260 LOperand* obj = instr->object(); 4261 LOperand* key = instr->key(); 4262 __ push(ToOperand(obj)); 4263 if (key->IsConstantOperand()) { 4264 __ push(ToImmediate(key)); 4265 } else { 4266 __ push(ToOperand(key)); 4267 } 4268 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); 4269 LPointerMap* pointers = instr->pointer_map(); 4270 LEnvironment* env = instr->deoptimization_environment(); 4271 RecordPosition(pointers->position()); 4272 RegisterEnvironmentForDeoptimization(env); 4273 // Create safepoint generator that will also ensure enough space in the 4274 // reloc info for patching in deoptimization (since this is invoking a 4275 // builtin) 4276 SafepointGenerator safepoint_generator(this, 4277 pointers, 4278 env->deoptimization_index()); 4279 __ push(Immediate(Smi::FromInt(strict_mode_flag()))); 4280 __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator); 4281} 4282 4283 4284void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { 4285 { 4286 PushSafepointRegistersScope scope(this); 4287 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 4288 __ CallRuntimeSaveDoubles(Runtime::kStackGuard); 4289 RegisterLazyDeoptimization( 4290 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 4291 } 4292 4293 // The gap code includes the restoring of the safepoint registers. 4294 int pc = masm()->pc_offset(); 4295 safepoints_.SetPcAfterGap(pc); 4296} 4297 4298 4299void LCodeGen::DoStackCheck(LStackCheck* instr) { 4300 class DeferredStackCheck: public LDeferredCode { 4301 public: 4302 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) 4303 : LDeferredCode(codegen), instr_(instr) { } 4304 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); } 4305 private: 4306 LStackCheck* instr_; 4307 }; 4308 4309 if (instr->hydrogen()->is_function_entry()) { 4310 // Perform stack overflow check. 4311 Label done; 4312 ExternalReference stack_limit = 4313 ExternalReference::address_of_stack_limit(isolate()); 4314 __ cmp(esp, Operand::StaticVariable(stack_limit)); 4315 __ j(above_equal, &done, Label::kNear); 4316 4317 ASSERT(instr->context()->IsRegister()); 4318 ASSERT(ToRegister(instr->context()).is(esi)); 4319 StackCheckStub stub; 4320 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4321 __ bind(&done); 4322 } else { 4323 ASSERT(instr->hydrogen()->is_backwards_branch()); 4324 // Perform stack overflow check if this goto needs it before jumping. 4325 DeferredStackCheck* deferred_stack_check = 4326 new DeferredStackCheck(this, instr); 4327 ExternalReference stack_limit = 4328 ExternalReference::address_of_stack_limit(isolate()); 4329 __ cmp(esp, Operand::StaticVariable(stack_limit)); 4330 __ j(below, deferred_stack_check->entry()); 4331 __ bind(instr->done_label()); 4332 deferred_stack_check->SetExit(instr->done_label()); 4333 } 4334} 4335 4336 4337void LCodeGen::DoOsrEntry(LOsrEntry* instr) { 4338 // This is a pseudo-instruction that ensures that the environment here is 4339 // properly registered for deoptimization and records the assembler's PC 4340 // offset. 4341 LEnvironment* environment = instr->environment(); 4342 environment->SetSpilledRegisters(instr->SpilledRegisterArray(), 4343 instr->SpilledDoubleRegisterArray()); 4344 4345 // If the environment were already registered, we would have no way of 4346 // backpatching it with the spill slot operands. 4347 ASSERT(!environment->HasBeenRegistered()); 4348 RegisterEnvironmentForDeoptimization(environment); 4349 ASSERT(osr_pc_offset_ == -1); 4350 osr_pc_offset_ = masm()->pc_offset(); 4351} 4352 4353 4354void LCodeGen::DoIn(LIn* instr) { 4355 LOperand* obj = instr->object(); 4356 LOperand* key = instr->key(); 4357 if (key->IsConstantOperand()) { 4358 __ push(ToImmediate(key)); 4359 } else { 4360 __ push(ToOperand(key)); 4361 } 4362 if (obj->IsConstantOperand()) { 4363 __ push(ToImmediate(obj)); 4364 } else { 4365 __ push(ToOperand(obj)); 4366 } 4367 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); 4368 LPointerMap* pointers = instr->pointer_map(); 4369 LEnvironment* env = instr->deoptimization_environment(); 4370 RecordPosition(pointers->position()); 4371 RegisterEnvironmentForDeoptimization(env); 4372 // Create safepoint generator that will also ensure enough space in the 4373 // reloc info for patching in deoptimization (since this is invoking a 4374 // builtin) 4375 SafepointGenerator safepoint_generator(this, 4376 pointers, 4377 env->deoptimization_index()); 4378 __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator); 4379} 4380 4381 4382#undef __ 4383 4384} } // namespace v8::internal 4385 4386#endif // V8_TARGET_ARCH_IA32 4387