1// Copyright 2013 the V8 project authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "src/crankshaft/arm64/lithium-codegen-arm64.h" 6 7#include "src/arm64/frames-arm64.h" 8#include "src/base/bits.h" 9#include "src/code-factory.h" 10#include "src/code-stubs.h" 11#include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h" 12#include "src/crankshaft/hydrogen-osr.h" 13#include "src/ic/ic.h" 14#include "src/ic/stub-cache.h" 15 16namespace v8 { 17namespace internal { 18 19 20class SafepointGenerator final : public CallWrapper { 21 public: 22 SafepointGenerator(LCodeGen* codegen, 23 LPointerMap* pointers, 24 Safepoint::DeoptMode mode) 25 : codegen_(codegen), 26 pointers_(pointers), 27 deopt_mode_(mode) { } 28 virtual ~SafepointGenerator() { } 29 30 virtual void BeforeCall(int call_size) const { } 31 32 virtual void AfterCall() const { 33 codegen_->RecordSafepoint(pointers_, deopt_mode_); 34 } 35 36 private: 37 LCodeGen* codegen_; 38 LPointerMap* pointers_; 39 Safepoint::DeoptMode deopt_mode_; 40}; 41 42 43#define __ masm()-> 44 45// Emit code to branch if the given condition holds. 46// The code generated here doesn't modify the flags and they must have 47// been set by some prior instructions. 48// 49// The EmitInverted function simply inverts the condition. 50class BranchOnCondition : public BranchGenerator { 51 public: 52 BranchOnCondition(LCodeGen* codegen, Condition cond) 53 : BranchGenerator(codegen), 54 cond_(cond) { } 55 56 virtual void Emit(Label* label) const { 57 __ B(cond_, label); 58 } 59 60 virtual void EmitInverted(Label* label) const { 61 if (cond_ != al) { 62 __ B(NegateCondition(cond_), label); 63 } 64 } 65 66 private: 67 Condition cond_; 68}; 69 70 71// Emit code to compare lhs and rhs and branch if the condition holds. 72// This uses MacroAssembler's CompareAndBranch function so it will handle 73// converting the comparison to Cbz/Cbnz if the right-hand side is 0. 74// 75// EmitInverted still compares the two operands but inverts the condition. 76class CompareAndBranch : public BranchGenerator { 77 public: 78 CompareAndBranch(LCodeGen* codegen, 79 Condition cond, 80 const Register& lhs, 81 const Operand& rhs) 82 : BranchGenerator(codegen), 83 cond_(cond), 84 lhs_(lhs), 85 rhs_(rhs) { } 86 87 virtual void Emit(Label* label) const { 88 __ CompareAndBranch(lhs_, rhs_, cond_, label); 89 } 90 91 virtual void EmitInverted(Label* label) const { 92 __ CompareAndBranch(lhs_, rhs_, NegateCondition(cond_), label); 93 } 94 95 private: 96 Condition cond_; 97 const Register& lhs_; 98 const Operand& rhs_; 99}; 100 101 102// Test the input with the given mask and branch if the condition holds. 103// If the condition is 'eq' or 'ne' this will use MacroAssembler's 104// TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the 105// conversion to Tbz/Tbnz when possible. 106class TestAndBranch : public BranchGenerator { 107 public: 108 TestAndBranch(LCodeGen* codegen, 109 Condition cond, 110 const Register& value, 111 uint64_t mask) 112 : BranchGenerator(codegen), 113 cond_(cond), 114 value_(value), 115 mask_(mask) { } 116 117 virtual void Emit(Label* label) const { 118 switch (cond_) { 119 case eq: 120 __ TestAndBranchIfAllClear(value_, mask_, label); 121 break; 122 case ne: 123 __ TestAndBranchIfAnySet(value_, mask_, label); 124 break; 125 default: 126 __ Tst(value_, mask_); 127 __ B(cond_, label); 128 } 129 } 130 131 virtual void EmitInverted(Label* label) const { 132 // The inverse of "all clear" is "any set" and vice versa. 133 switch (cond_) { 134 case eq: 135 __ TestAndBranchIfAnySet(value_, mask_, label); 136 break; 137 case ne: 138 __ TestAndBranchIfAllClear(value_, mask_, label); 139 break; 140 default: 141 __ Tst(value_, mask_); 142 __ B(NegateCondition(cond_), label); 143 } 144 } 145 146 private: 147 Condition cond_; 148 const Register& value_; 149 uint64_t mask_; 150}; 151 152 153// Test the input and branch if it is non-zero and not a NaN. 154class BranchIfNonZeroNumber : public BranchGenerator { 155 public: 156 BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value, 157 const FPRegister& scratch) 158 : BranchGenerator(codegen), value_(value), scratch_(scratch) { } 159 160 virtual void Emit(Label* label) const { 161 __ Fabs(scratch_, value_); 162 // Compare with 0.0. Because scratch_ is positive, the result can be one of 163 // nZCv (equal), nzCv (greater) or nzCV (unordered). 164 __ Fcmp(scratch_, 0.0); 165 __ B(gt, label); 166 } 167 168 virtual void EmitInverted(Label* label) const { 169 __ Fabs(scratch_, value_); 170 __ Fcmp(scratch_, 0.0); 171 __ B(le, label); 172 } 173 174 private: 175 const FPRegister& value_; 176 const FPRegister& scratch_; 177}; 178 179 180// Test the input and branch if it is a heap number. 181class BranchIfHeapNumber : public BranchGenerator { 182 public: 183 BranchIfHeapNumber(LCodeGen* codegen, const Register& value) 184 : BranchGenerator(codegen), value_(value) { } 185 186 virtual void Emit(Label* label) const { 187 __ JumpIfHeapNumber(value_, label); 188 } 189 190 virtual void EmitInverted(Label* label) const { 191 __ JumpIfNotHeapNumber(value_, label); 192 } 193 194 private: 195 const Register& value_; 196}; 197 198 199// Test the input and branch if it is the specified root value. 200class BranchIfRoot : public BranchGenerator { 201 public: 202 BranchIfRoot(LCodeGen* codegen, const Register& value, 203 Heap::RootListIndex index) 204 : BranchGenerator(codegen), value_(value), index_(index) { } 205 206 virtual void Emit(Label* label) const { 207 __ JumpIfRoot(value_, index_, label); 208 } 209 210 virtual void EmitInverted(Label* label) const { 211 __ JumpIfNotRoot(value_, index_, label); 212 } 213 214 private: 215 const Register& value_; 216 const Heap::RootListIndex index_; 217}; 218 219 220void LCodeGen::WriteTranslation(LEnvironment* environment, 221 Translation* translation) { 222 if (environment == NULL) return; 223 224 // The translation includes one command per value in the environment. 225 int translation_size = environment->translation_size(); 226 227 WriteTranslation(environment->outer(), translation); 228 WriteTranslationFrame(environment, translation); 229 230 int object_index = 0; 231 int dematerialized_index = 0; 232 for (int i = 0; i < translation_size; ++i) { 233 LOperand* value = environment->values()->at(i); 234 AddToTranslation( 235 environment, translation, value, environment->HasTaggedValueAt(i), 236 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index); 237 } 238} 239 240 241void LCodeGen::AddToTranslation(LEnvironment* environment, 242 Translation* translation, 243 LOperand* op, 244 bool is_tagged, 245 bool is_uint32, 246 int* object_index_pointer, 247 int* dematerialized_index_pointer) { 248 if (op == LEnvironment::materialization_marker()) { 249 int object_index = (*object_index_pointer)++; 250 if (environment->ObjectIsDuplicateAt(object_index)) { 251 int dupe_of = environment->ObjectDuplicateOfAt(object_index); 252 translation->DuplicateObject(dupe_of); 253 return; 254 } 255 int object_length = environment->ObjectLengthAt(object_index); 256 if (environment->ObjectIsArgumentsAt(object_index)) { 257 translation->BeginArgumentsObject(object_length); 258 } else { 259 translation->BeginCapturedObject(object_length); 260 } 261 int dematerialized_index = *dematerialized_index_pointer; 262 int env_offset = environment->translation_size() + dematerialized_index; 263 *dematerialized_index_pointer += object_length; 264 for (int i = 0; i < object_length; ++i) { 265 LOperand* value = environment->values()->at(env_offset + i); 266 AddToTranslation(environment, 267 translation, 268 value, 269 environment->HasTaggedValueAt(env_offset + i), 270 environment->HasUint32ValueAt(env_offset + i), 271 object_index_pointer, 272 dematerialized_index_pointer); 273 } 274 return; 275 } 276 277 if (op->IsStackSlot()) { 278 int index = op->index(); 279 if (is_tagged) { 280 translation->StoreStackSlot(index); 281 } else if (is_uint32) { 282 translation->StoreUint32StackSlot(index); 283 } else { 284 translation->StoreInt32StackSlot(index); 285 } 286 } else if (op->IsDoubleStackSlot()) { 287 int index = op->index(); 288 translation->StoreDoubleStackSlot(index); 289 } else if (op->IsRegister()) { 290 Register reg = ToRegister(op); 291 if (is_tagged) { 292 translation->StoreRegister(reg); 293 } else if (is_uint32) { 294 translation->StoreUint32Register(reg); 295 } else { 296 translation->StoreInt32Register(reg); 297 } 298 } else if (op->IsDoubleRegister()) { 299 DoubleRegister reg = ToDoubleRegister(op); 300 translation->StoreDoubleRegister(reg); 301 } else if (op->IsConstantOperand()) { 302 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); 303 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); 304 translation->StoreLiteral(src_index); 305 } else { 306 UNREACHABLE(); 307 } 308} 309 310 311void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, 312 Safepoint::DeoptMode mode) { 313 environment->set_has_been_used(); 314 if (!environment->HasBeenRegistered()) { 315 int frame_count = 0; 316 int jsframe_count = 0; 317 for (LEnvironment* e = environment; e != NULL; e = e->outer()) { 318 ++frame_count; 319 if (e->frame_type() == JS_FUNCTION) { 320 ++jsframe_count; 321 } 322 } 323 Translation translation(&translations_, frame_count, jsframe_count, zone()); 324 WriteTranslation(environment, &translation); 325 int deoptimization_index = deoptimizations_.length(); 326 int pc_offset = masm()->pc_offset(); 327 environment->Register(deoptimization_index, 328 translation.index(), 329 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); 330 deoptimizations_.Add(environment, zone()); 331 } 332} 333 334 335void LCodeGen::CallCode(Handle<Code> code, 336 RelocInfo::Mode mode, 337 LInstruction* instr) { 338 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); 339} 340 341 342void LCodeGen::CallCodeGeneric(Handle<Code> code, 343 RelocInfo::Mode mode, 344 LInstruction* instr, 345 SafepointMode safepoint_mode) { 346 DCHECK(instr != NULL); 347 348 Assembler::BlockPoolsScope scope(masm_); 349 __ Call(code, mode); 350 RecordSafepointWithLazyDeopt(instr, safepoint_mode); 351 352 if ((code->kind() == Code::BINARY_OP_IC) || 353 (code->kind() == Code::COMPARE_IC)) { 354 // Signal that we don't inline smi code before these stubs in the 355 // optimizing code generator. 356 InlineSmiCheckInfo::EmitNotInlined(masm()); 357 } 358} 359 360 361void LCodeGen::DoCallNewArray(LCallNewArray* instr) { 362 DCHECK(instr->IsMarkedAsCall()); 363 DCHECK(ToRegister(instr->context()).is(cp)); 364 DCHECK(ToRegister(instr->constructor()).is(x1)); 365 366 __ Mov(x0, Operand(instr->arity())); 367 __ Mov(x2, instr->hydrogen()->site()); 368 369 ElementsKind kind = instr->hydrogen()->elements_kind(); 370 AllocationSiteOverrideMode override_mode = 371 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) 372 ? DISABLE_ALLOCATION_SITES 373 : DONT_OVERRIDE; 374 375 if (instr->arity() == 0) { 376 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); 377 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 378 } else if (instr->arity() == 1) { 379 Label done; 380 if (IsFastPackedElementsKind(kind)) { 381 Label packed_case; 382 383 // We might need to create a holey array; look at the first argument. 384 __ Peek(x10, 0); 385 __ Cbz(x10, &packed_case); 386 387 ElementsKind holey_kind = GetHoleyElementsKind(kind); 388 ArraySingleArgumentConstructorStub stub(isolate(), 389 holey_kind, 390 override_mode); 391 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 392 __ B(&done); 393 __ Bind(&packed_case); 394 } 395 396 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); 397 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 398 __ Bind(&done); 399 } else { 400 ArrayNArgumentsConstructorStub stub(isolate()); 401 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 402 } 403 RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta()); 404 405 DCHECK(ToRegister(instr->result()).is(x0)); 406} 407 408 409void LCodeGen::CallRuntime(const Runtime::Function* function, 410 int num_arguments, 411 LInstruction* instr, 412 SaveFPRegsMode save_doubles) { 413 DCHECK(instr != NULL); 414 415 __ CallRuntime(function, num_arguments, save_doubles); 416 417 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 418} 419 420 421void LCodeGen::LoadContextFromDeferred(LOperand* context) { 422 if (context->IsRegister()) { 423 __ Mov(cp, ToRegister(context)); 424 } else if (context->IsStackSlot()) { 425 __ Ldr(cp, ToMemOperand(context, kMustUseFramePointer)); 426 } else if (context->IsConstantOperand()) { 427 HConstant* constant = 428 chunk_->LookupConstant(LConstantOperand::cast(context)); 429 __ LoadHeapObject(cp, 430 Handle<HeapObject>::cast(constant->handle(isolate()))); 431 } else { 432 UNREACHABLE(); 433 } 434} 435 436 437void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, 438 int argc, 439 LInstruction* instr, 440 LOperand* context) { 441 LoadContextFromDeferred(context); 442 __ CallRuntimeSaveDoubles(id); 443 RecordSafepointWithRegisters( 444 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); 445} 446 447 448void LCodeGen::RecordAndWritePosition(int position) { 449 if (position == RelocInfo::kNoPosition) return; 450 masm()->positions_recorder()->RecordPosition(position); 451} 452 453 454void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr, 455 SafepointMode safepoint_mode) { 456 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { 457 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); 458 } else { 459 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 460 RecordSafepointWithRegisters( 461 instr->pointer_map(), 0, Safepoint::kLazyDeopt); 462 } 463} 464 465 466void LCodeGen::RecordSafepoint(LPointerMap* pointers, 467 Safepoint::Kind kind, 468 int arguments, 469 Safepoint::DeoptMode deopt_mode) { 470 DCHECK(expected_safepoint_kind_ == kind); 471 472 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); 473 Safepoint safepoint = safepoints_.DefineSafepoint( 474 masm(), kind, arguments, deopt_mode); 475 476 for (int i = 0; i < operands->length(); i++) { 477 LOperand* pointer = operands->at(i); 478 if (pointer->IsStackSlot()) { 479 safepoint.DefinePointerSlot(pointer->index(), zone()); 480 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { 481 safepoint.DefinePointerRegister(ToRegister(pointer), zone()); 482 } 483 } 484} 485 486void LCodeGen::RecordSafepoint(LPointerMap* pointers, 487 Safepoint::DeoptMode deopt_mode) { 488 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); 489} 490 491 492void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { 493 LPointerMap empty_pointers(zone()); 494 RecordSafepoint(&empty_pointers, deopt_mode); 495} 496 497 498void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, 499 int arguments, 500 Safepoint::DeoptMode deopt_mode) { 501 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode); 502} 503 504 505bool LCodeGen::GenerateCode() { 506 LPhase phase("Z_Code generation", chunk()); 507 DCHECK(is_unused()); 508 status_ = GENERATING; 509 510 // Open a frame scope to indicate that there is a frame on the stack. The 511 // NONE indicates that the scope shouldn't actually generate code to set up 512 // the frame (that is done in GeneratePrologue). 513 FrameScope frame_scope(masm_, StackFrame::NONE); 514 515 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && 516 GenerateJumpTable() && GenerateSafepointTable(); 517} 518 519 520void LCodeGen::SaveCallerDoubles() { 521 DCHECK(info()->saves_caller_doubles()); 522 DCHECK(NeedsEagerFrame()); 523 Comment(";;; Save clobbered callee double registers"); 524 BitVector* doubles = chunk()->allocated_double_registers(); 525 BitVector::Iterator iterator(doubles); 526 int count = 0; 527 while (!iterator.Done()) { 528 // TODO(all): Is this supposed to save just the callee-saved doubles? It 529 // looks like it's saving all of them. 530 FPRegister value = FPRegister::from_code(iterator.Current()); 531 __ Poke(value, count * kDoubleSize); 532 iterator.Advance(); 533 count++; 534 } 535} 536 537 538void LCodeGen::RestoreCallerDoubles() { 539 DCHECK(info()->saves_caller_doubles()); 540 DCHECK(NeedsEagerFrame()); 541 Comment(";;; Restore clobbered callee double registers"); 542 BitVector* doubles = chunk()->allocated_double_registers(); 543 BitVector::Iterator iterator(doubles); 544 int count = 0; 545 while (!iterator.Done()) { 546 // TODO(all): Is this supposed to restore just the callee-saved doubles? It 547 // looks like it's restoring all of them. 548 FPRegister value = FPRegister::from_code(iterator.Current()); 549 __ Peek(value, count * kDoubleSize); 550 iterator.Advance(); 551 count++; 552 } 553} 554 555 556bool LCodeGen::GeneratePrologue() { 557 DCHECK(is_generating()); 558 559 if (info()->IsOptimizing()) { 560 ProfileEntryHookStub::MaybeCallEntryHook(masm_); 561 } 562 563 DCHECK(__ StackPointer().Is(jssp)); 564 info()->set_prologue_offset(masm_->pc_offset()); 565 if (NeedsEagerFrame()) { 566 if (info()->IsStub()) { 567 __ StubPrologue( 568 StackFrame::STUB, 569 GetStackSlotCount() + TypedFrameConstants::kFixedSlotCount); 570 } else { 571 __ Prologue(info()->GeneratePreagedPrologue()); 572 // Reserve space for the stack slots needed by the code. 573 int slots = GetStackSlotCount(); 574 if (slots > 0) { 575 __ Claim(slots, kPointerSize); 576 } 577 } 578 frame_is_built_ = true; 579 } 580 581 if (info()->saves_caller_doubles()) { 582 SaveCallerDoubles(); 583 } 584 return !is_aborted(); 585} 586 587 588void LCodeGen::DoPrologue(LPrologue* instr) { 589 Comment(";;; Prologue begin"); 590 591 // Allocate a local context if needed. 592 if (info()->scope()->num_heap_slots() > 0) { 593 Comment(";;; Allocate local context"); 594 bool need_write_barrier = true; 595 // Argument to NewContext is the function, which is in x1. 596 int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 597 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt; 598 if (info()->scope()->is_script_scope()) { 599 __ Mov(x10, Operand(info()->scope()->GetScopeInfo(info()->isolate()))); 600 __ Push(x1, x10); 601 __ CallRuntime(Runtime::kNewScriptContext); 602 deopt_mode = Safepoint::kLazyDeopt; 603 } else if (slots <= FastNewContextStub::kMaximumSlots) { 604 FastNewContextStub stub(isolate(), slots); 605 __ CallStub(&stub); 606 // Result of FastNewContextStub is always in new space. 607 need_write_barrier = false; 608 } else { 609 __ Push(x1); 610 __ CallRuntime(Runtime::kNewFunctionContext); 611 } 612 RecordSafepoint(deopt_mode); 613 // Context is returned in x0. It replaces the context passed to us. It's 614 // saved in the stack and kept live in cp. 615 __ Mov(cp, x0); 616 __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset)); 617 // Copy any necessary parameters into the context. 618 int num_parameters = scope()->num_parameters(); 619 int first_parameter = scope()->has_this_declaration() ? -1 : 0; 620 for (int i = first_parameter; i < num_parameters; i++) { 621 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i); 622 if (var->IsContextSlot()) { 623 Register value = x0; 624 Register scratch = x3; 625 626 int parameter_offset = StandardFrameConstants::kCallerSPOffset + 627 (num_parameters - 1 - i) * kPointerSize; 628 // Load parameter from stack. 629 __ Ldr(value, MemOperand(fp, parameter_offset)); 630 // Store it in the context. 631 MemOperand target = ContextMemOperand(cp, var->index()); 632 __ Str(value, target); 633 // Update the write barrier. This clobbers value and scratch. 634 if (need_write_barrier) { 635 __ RecordWriteContextSlot(cp, static_cast<int>(target.offset()), 636 value, scratch, GetLinkRegisterState(), 637 kSaveFPRegs); 638 } else if (FLAG_debug_code) { 639 Label done; 640 __ JumpIfInNewSpace(cp, &done); 641 __ Abort(kExpectedNewSpaceObject); 642 __ bind(&done); 643 } 644 } 645 } 646 Comment(";;; End allocate local context"); 647 } 648 649 Comment(";;; Prologue end"); 650} 651 652 653void LCodeGen::GenerateOsrPrologue() { 654 // Generate the OSR entry prologue at the first unknown OSR value, or if there 655 // are none, at the OSR entrypoint instruction. 656 if (osr_pc_offset_ >= 0) return; 657 658 osr_pc_offset_ = masm()->pc_offset(); 659 660 // Adjust the frame size, subsuming the unoptimized frame into the 661 // optimized frame. 662 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); 663 DCHECK(slots >= 0); 664 __ Claim(slots); 665} 666 667 668void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { 669 if (instr->IsCall()) { 670 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 671 } 672 if (!instr->IsLazyBailout() && !instr->IsGap()) { 673 safepoints_.BumpLastLazySafepointIndex(); 674 } 675} 676 677 678bool LCodeGen::GenerateDeferredCode() { 679 DCHECK(is_generating()); 680 if (deferred_.length() > 0) { 681 for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) { 682 LDeferredCode* code = deferred_[i]; 683 684 HValue* value = 685 instructions_->at(code->instruction_index())->hydrogen_value(); 686 RecordAndWritePosition( 687 chunk()->graph()->SourcePositionToScriptPosition(value->position())); 688 689 Comment(";;; <@%d,#%d> " 690 "-------------------- Deferred %s --------------------", 691 code->instruction_index(), 692 code->instr()->hydrogen_value()->id(), 693 code->instr()->Mnemonic()); 694 695 __ Bind(code->entry()); 696 697 if (NeedsDeferredFrame()) { 698 Comment(";;; Build frame"); 699 DCHECK(!frame_is_built_); 700 DCHECK(info()->IsStub()); 701 frame_is_built_ = true; 702 __ Push(lr, fp); 703 __ Mov(fp, Smi::FromInt(StackFrame::STUB)); 704 __ Push(fp); 705 __ Add(fp, __ StackPointer(), 706 TypedFrameConstants::kFixedFrameSizeFromFp); 707 Comment(";;; Deferred code"); 708 } 709 710 code->Generate(); 711 712 if (NeedsDeferredFrame()) { 713 Comment(";;; Destroy frame"); 714 DCHECK(frame_is_built_); 715 __ Pop(xzr, fp, lr); 716 frame_is_built_ = false; 717 } 718 719 __ B(code->exit()); 720 } 721 } 722 723 // Force constant pool emission at the end of the deferred code to make 724 // sure that no constant pools are emitted after deferred code because 725 // deferred code generation is the last step which generates code. The two 726 // following steps will only output data used by crakshaft. 727 masm()->CheckConstPool(true, false); 728 729 return !is_aborted(); 730} 731 732 733bool LCodeGen::GenerateJumpTable() { 734 Label needs_frame, call_deopt_entry; 735 736 if (jump_table_.length() > 0) { 737 Comment(";;; -------------------- Jump table --------------------"); 738 Address base = jump_table_[0]->address; 739 740 UseScratchRegisterScope temps(masm()); 741 Register entry_offset = temps.AcquireX(); 742 743 int length = jump_table_.length(); 744 for (int i = 0; i < length; i++) { 745 Deoptimizer::JumpTableEntry* table_entry = jump_table_[i]; 746 __ Bind(&table_entry->label); 747 748 Address entry = table_entry->address; 749 DeoptComment(table_entry->deopt_info); 750 751 // Second-level deopt table entries are contiguous and small, so instead 752 // of loading the full, absolute address of each one, load the base 753 // address and add an immediate offset. 754 __ Mov(entry_offset, entry - base); 755 756 if (table_entry->needs_frame) { 757 DCHECK(!info()->saves_caller_doubles()); 758 Comment(";;; call deopt with frame"); 759 // Save lr before Bl, fp will be adjusted in the needs_frame code. 760 __ Push(lr, fp); 761 // Reuse the existing needs_frame code. 762 __ Bl(&needs_frame); 763 } else { 764 // There is nothing special to do, so just continue to the second-level 765 // table. 766 __ Bl(&call_deopt_entry); 767 } 768 769 masm()->CheckConstPool(false, false); 770 } 771 772 if (needs_frame.is_linked()) { 773 // This variant of deopt can only be used with stubs. Since we don't 774 // have a function pointer to install in the stack frame that we're 775 // building, install a special marker there instead. 776 DCHECK(info()->IsStub()); 777 778 Comment(";;; needs_frame common code"); 779 UseScratchRegisterScope temps(masm()); 780 Register stub_marker = temps.AcquireX(); 781 __ Bind(&needs_frame); 782 __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB)); 783 __ Push(cp, stub_marker); 784 __ Add(fp, __ StackPointer(), 2 * kPointerSize); 785 } 786 787 // Generate common code for calling the second-level deopt table. 788 __ Bind(&call_deopt_entry); 789 790 if (info()->saves_caller_doubles()) { 791 DCHECK(info()->IsStub()); 792 RestoreCallerDoubles(); 793 } 794 795 Register deopt_entry = temps.AcquireX(); 796 __ Mov(deopt_entry, Operand(reinterpret_cast<uint64_t>(base), 797 RelocInfo::RUNTIME_ENTRY)); 798 __ Add(deopt_entry, deopt_entry, entry_offset); 799 __ Br(deopt_entry); 800 } 801 802 // Force constant pool emission at the end of the deopt jump table to make 803 // sure that no constant pools are emitted after. 804 masm()->CheckConstPool(true, false); 805 806 // The deoptimization jump table is the last part of the instruction 807 // sequence. Mark the generated code as done unless we bailed out. 808 if (!is_aborted()) status_ = DONE; 809 return !is_aborted(); 810} 811 812 813bool LCodeGen::GenerateSafepointTable() { 814 DCHECK(is_done()); 815 // We do not know how much data will be emitted for the safepoint table, so 816 // force emission of the veneer pool. 817 masm()->CheckVeneerPool(true, true); 818 safepoints_.Emit(masm(), GetTotalFrameSlotCount()); 819 return !is_aborted(); 820} 821 822 823void LCodeGen::FinishCode(Handle<Code> code) { 824 DCHECK(is_done()); 825 code->set_stack_slots(GetTotalFrameSlotCount()); 826 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); 827 PopulateDeoptimizationData(code); 828} 829 830 831void LCodeGen::DeoptimizeBranch( 832 LInstruction* instr, Deoptimizer::DeoptReason deopt_reason, 833 BranchType branch_type, Register reg, int bit, 834 Deoptimizer::BailoutType* override_bailout_type) { 835 LEnvironment* environment = instr->environment(); 836 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 837 Deoptimizer::BailoutType bailout_type = 838 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; 839 840 if (override_bailout_type != NULL) { 841 bailout_type = *override_bailout_type; 842 } 843 844 DCHECK(environment->HasBeenRegistered()); 845 int id = environment->deoptimization_index(); 846 Address entry = 847 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); 848 849 if (entry == NULL) { 850 Abort(kBailoutWasNotPrepared); 851 } 852 853 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { 854 Label not_zero; 855 ExternalReference count = ExternalReference::stress_deopt_count(isolate()); 856 857 __ Push(x0, x1, x2); 858 __ Mrs(x2, NZCV); 859 __ Mov(x0, count); 860 __ Ldr(w1, MemOperand(x0)); 861 __ Subs(x1, x1, 1); 862 __ B(gt, ¬_zero); 863 __ Mov(w1, FLAG_deopt_every_n_times); 864 __ Str(w1, MemOperand(x0)); 865 __ Pop(x2, x1, x0); 866 DCHECK(frame_is_built_); 867 __ Call(entry, RelocInfo::RUNTIME_ENTRY); 868 __ Unreachable(); 869 870 __ Bind(¬_zero); 871 __ Str(w1, MemOperand(x0)); 872 __ Msr(NZCV, x2); 873 __ Pop(x2, x1, x0); 874 } 875 876 if (info()->ShouldTrapOnDeopt()) { 877 Label dont_trap; 878 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit); 879 __ Debug("trap_on_deopt", __LINE__, BREAK); 880 __ Bind(&dont_trap); 881 } 882 883 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id); 884 885 DCHECK(info()->IsStub() || frame_is_built_); 886 // Go through jump table if we need to build frame, or restore caller doubles. 887 if (branch_type == always && 888 frame_is_built_ && !info()->saves_caller_doubles()) { 889 DeoptComment(deopt_info); 890 __ Call(entry, RelocInfo::RUNTIME_ENTRY); 891 } else { 892 Deoptimizer::JumpTableEntry* table_entry = 893 new (zone()) Deoptimizer::JumpTableEntry( 894 entry, deopt_info, bailout_type, !frame_is_built_); 895 // We often have several deopts to the same entry, reuse the last 896 // jump entry if this is the case. 897 if (FLAG_trace_deopt || isolate()->is_profiling() || 898 jump_table_.is_empty() || 899 !table_entry->IsEquivalentTo(*jump_table_.last())) { 900 jump_table_.Add(table_entry, zone()); 901 } 902 __ B(&jump_table_.last()->label, branch_type, reg, bit); 903 } 904} 905 906 907void LCodeGen::Deoptimize(LInstruction* instr, 908 Deoptimizer::DeoptReason deopt_reason, 909 Deoptimizer::BailoutType* override_bailout_type) { 910 DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1, 911 override_bailout_type); 912} 913 914 915void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, 916 Deoptimizer::DeoptReason deopt_reason) { 917 DeoptimizeBranch(instr, deopt_reason, static_cast<BranchType>(cond)); 918} 919 920 921void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr, 922 Deoptimizer::DeoptReason deopt_reason) { 923 DeoptimizeBranch(instr, deopt_reason, reg_zero, rt); 924} 925 926 927void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr, 928 Deoptimizer::DeoptReason deopt_reason) { 929 DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt); 930} 931 932 933void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr, 934 Deoptimizer::DeoptReason deopt_reason) { 935 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit; 936 DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason); 937} 938 939 940void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr, 941 Deoptimizer::DeoptReason deopt_reason) { 942 DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason); 943} 944 945 946void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr, 947 Deoptimizer::DeoptReason deopt_reason) { 948 DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason); 949} 950 951 952void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index, 953 LInstruction* instr, 954 Deoptimizer::DeoptReason deopt_reason) { 955 __ CompareRoot(rt, index); 956 DeoptimizeIf(eq, instr, deopt_reason); 957} 958 959 960void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index, 961 LInstruction* instr, 962 Deoptimizer::DeoptReason deopt_reason) { 963 __ CompareRoot(rt, index); 964 DeoptimizeIf(ne, instr, deopt_reason); 965} 966 967 968void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr, 969 Deoptimizer::DeoptReason deopt_reason) { 970 __ TestForMinusZero(input); 971 DeoptimizeIf(vs, instr, deopt_reason); 972} 973 974 975void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) { 976 __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex); 977 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); 978} 979 980 981void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr, 982 Deoptimizer::DeoptReason deopt_reason) { 983 DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit); 984} 985 986 987void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr, 988 Deoptimizer::DeoptReason deopt_reason) { 989 DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit); 990} 991 992 993void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { 994 if (info()->ShouldEnsureSpaceForLazyDeopt()) { 995 // Ensure that we have enough space after the previous lazy-bailout 996 // instruction for patching the code here. 997 intptr_t current_pc = masm()->pc_offset(); 998 999 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) { 1000 ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; 1001 DCHECK((padding_size % kInstructionSize) == 0); 1002 InstructionAccurateScope instruction_accurate( 1003 masm(), padding_size / kInstructionSize); 1004 1005 while (padding_size > 0) { 1006 __ nop(); 1007 padding_size -= kInstructionSize; 1008 } 1009 } 1010 } 1011 last_lazy_deopt_pc_ = masm()->pc_offset(); 1012} 1013 1014 1015Register LCodeGen::ToRegister(LOperand* op) const { 1016 // TODO(all): support zero register results, as ToRegister32. 1017 DCHECK((op != NULL) && op->IsRegister()); 1018 return Register::from_code(op->index()); 1019} 1020 1021 1022Register LCodeGen::ToRegister32(LOperand* op) const { 1023 DCHECK(op != NULL); 1024 if (op->IsConstantOperand()) { 1025 // If this is a constant operand, the result must be the zero register. 1026 DCHECK(ToInteger32(LConstantOperand::cast(op)) == 0); 1027 return wzr; 1028 } else { 1029 return ToRegister(op).W(); 1030 } 1031} 1032 1033 1034Smi* LCodeGen::ToSmi(LConstantOperand* op) const { 1035 HConstant* constant = chunk_->LookupConstant(op); 1036 return Smi::FromInt(constant->Integer32Value()); 1037} 1038 1039 1040DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 1041 DCHECK((op != NULL) && op->IsDoubleRegister()); 1042 return DoubleRegister::from_code(op->index()); 1043} 1044 1045 1046Operand LCodeGen::ToOperand(LOperand* op) { 1047 DCHECK(op != NULL); 1048 if (op->IsConstantOperand()) { 1049 LConstantOperand* const_op = LConstantOperand::cast(op); 1050 HConstant* constant = chunk()->LookupConstant(const_op); 1051 Representation r = chunk_->LookupLiteralRepresentation(const_op); 1052 if (r.IsSmi()) { 1053 DCHECK(constant->HasSmiValue()); 1054 return Operand(Smi::FromInt(constant->Integer32Value())); 1055 } else if (r.IsInteger32()) { 1056 DCHECK(constant->HasInteger32Value()); 1057 return Operand(constant->Integer32Value()); 1058 } else if (r.IsDouble()) { 1059 Abort(kToOperandUnsupportedDoubleImmediate); 1060 } 1061 DCHECK(r.IsTagged()); 1062 return Operand(constant->handle(isolate())); 1063 } else if (op->IsRegister()) { 1064 return Operand(ToRegister(op)); 1065 } else if (op->IsDoubleRegister()) { 1066 Abort(kToOperandIsDoubleRegisterUnimplemented); 1067 return Operand(0); 1068 } 1069 // Stack slots not implemented, use ToMemOperand instead. 1070 UNREACHABLE(); 1071 return Operand(0); 1072} 1073 1074 1075Operand LCodeGen::ToOperand32(LOperand* op) { 1076 DCHECK(op != NULL); 1077 if (op->IsRegister()) { 1078 return Operand(ToRegister32(op)); 1079 } else if (op->IsConstantOperand()) { 1080 LConstantOperand* const_op = LConstantOperand::cast(op); 1081 HConstant* constant = chunk()->LookupConstant(const_op); 1082 Representation r = chunk_->LookupLiteralRepresentation(const_op); 1083 if (r.IsInteger32()) { 1084 return Operand(constant->Integer32Value()); 1085 } else { 1086 // Other constants not implemented. 1087 Abort(kToOperand32UnsupportedImmediate); 1088 } 1089 } 1090 // Other cases are not implemented. 1091 UNREACHABLE(); 1092 return Operand(0); 1093} 1094 1095 1096static int64_t ArgumentsOffsetWithoutFrame(int index) { 1097 DCHECK(index < 0); 1098 return -(index + 1) * kPointerSize; 1099} 1100 1101 1102MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const { 1103 DCHECK(op != NULL); 1104 DCHECK(!op->IsRegister()); 1105 DCHECK(!op->IsDoubleRegister()); 1106 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); 1107 if (NeedsEagerFrame()) { 1108 int fp_offset = FrameSlotToFPOffset(op->index()); 1109 // Loads and stores have a bigger reach in positive offset than negative. 1110 // We try to access using jssp (positive offset) first, then fall back to 1111 // fp (negative offset) if that fails. 1112 // 1113 // We can reference a stack slot from jssp only if we know how much we've 1114 // put on the stack. We don't know this in the following cases: 1115 // - stack_mode != kCanUseStackPointer: this is the case when deferred 1116 // code has saved the registers. 1117 // - saves_caller_doubles(): some double registers have been pushed, jssp 1118 // references the end of the double registers and not the end of the stack 1119 // slots. 1120 // In both of the cases above, we _could_ add the tracking information 1121 // required so that we can use jssp here, but in practice it isn't worth it. 1122 if ((stack_mode == kCanUseStackPointer) && 1123 !info()->saves_caller_doubles()) { 1124 int jssp_offset_to_fp = 1125 (pushed_arguments_ + GetTotalFrameSlotCount()) * kPointerSize - 1126 StandardFrameConstants::kFixedFrameSizeAboveFp; 1127 int jssp_offset = fp_offset + jssp_offset_to_fp; 1128 if (masm()->IsImmLSScaled(jssp_offset, LSDoubleWord)) { 1129 return MemOperand(masm()->StackPointer(), jssp_offset); 1130 } 1131 } 1132 return MemOperand(fp, fp_offset); 1133 } else { 1134 // Retrieve parameter without eager stack-frame relative to the 1135 // stack-pointer. 1136 return MemOperand(masm()->StackPointer(), 1137 ArgumentsOffsetWithoutFrame(op->index())); 1138 } 1139} 1140 1141 1142Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { 1143 HConstant* constant = chunk_->LookupConstant(op); 1144 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); 1145 return constant->handle(isolate()); 1146} 1147 1148 1149template <class LI> 1150Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info) { 1151 if (shift_info->shift() == NO_SHIFT) { 1152 return ToOperand32(right); 1153 } else { 1154 return Operand( 1155 ToRegister32(right), 1156 shift_info->shift(), 1157 JSShiftAmountFromLConstant(shift_info->shift_amount())); 1158 } 1159} 1160 1161 1162bool LCodeGen::IsSmi(LConstantOperand* op) const { 1163 return chunk_->LookupLiteralRepresentation(op).IsSmi(); 1164} 1165 1166 1167bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const { 1168 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); 1169} 1170 1171 1172int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { 1173 HConstant* constant = chunk_->LookupConstant(op); 1174 return constant->Integer32Value(); 1175} 1176 1177 1178double LCodeGen::ToDouble(LConstantOperand* op) const { 1179 HConstant* constant = chunk_->LookupConstant(op); 1180 DCHECK(constant->HasDoubleValue()); 1181 return constant->DoubleValue(); 1182} 1183 1184 1185Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { 1186 Condition cond = nv; 1187 switch (op) { 1188 case Token::EQ: 1189 case Token::EQ_STRICT: 1190 cond = eq; 1191 break; 1192 case Token::NE: 1193 case Token::NE_STRICT: 1194 cond = ne; 1195 break; 1196 case Token::LT: 1197 cond = is_unsigned ? lo : lt; 1198 break; 1199 case Token::GT: 1200 cond = is_unsigned ? hi : gt; 1201 break; 1202 case Token::LTE: 1203 cond = is_unsigned ? ls : le; 1204 break; 1205 case Token::GTE: 1206 cond = is_unsigned ? hs : ge; 1207 break; 1208 case Token::IN: 1209 case Token::INSTANCEOF: 1210 default: 1211 UNREACHABLE(); 1212 } 1213 return cond; 1214} 1215 1216 1217template<class InstrType> 1218void LCodeGen::EmitBranchGeneric(InstrType instr, 1219 const BranchGenerator& branch) { 1220 int left_block = instr->TrueDestination(chunk_); 1221 int right_block = instr->FalseDestination(chunk_); 1222 1223 int next_block = GetNextEmittedBlock(); 1224 1225 if (right_block == left_block) { 1226 EmitGoto(left_block); 1227 } else if (left_block == next_block) { 1228 branch.EmitInverted(chunk_->GetAssemblyLabel(right_block)); 1229 } else { 1230 branch.Emit(chunk_->GetAssemblyLabel(left_block)); 1231 if (right_block != next_block) { 1232 __ B(chunk_->GetAssemblyLabel(right_block)); 1233 } 1234 } 1235} 1236 1237 1238template<class InstrType> 1239void LCodeGen::EmitBranch(InstrType instr, Condition condition) { 1240 DCHECK((condition != al) && (condition != nv)); 1241 BranchOnCondition branch(this, condition); 1242 EmitBranchGeneric(instr, branch); 1243} 1244 1245 1246template<class InstrType> 1247void LCodeGen::EmitCompareAndBranch(InstrType instr, 1248 Condition condition, 1249 const Register& lhs, 1250 const Operand& rhs) { 1251 DCHECK((condition != al) && (condition != nv)); 1252 CompareAndBranch branch(this, condition, lhs, rhs); 1253 EmitBranchGeneric(instr, branch); 1254} 1255 1256 1257template<class InstrType> 1258void LCodeGen::EmitTestAndBranch(InstrType instr, 1259 Condition condition, 1260 const Register& value, 1261 uint64_t mask) { 1262 DCHECK((condition != al) && (condition != nv)); 1263 TestAndBranch branch(this, condition, value, mask); 1264 EmitBranchGeneric(instr, branch); 1265} 1266 1267 1268template<class InstrType> 1269void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr, 1270 const FPRegister& value, 1271 const FPRegister& scratch) { 1272 BranchIfNonZeroNumber branch(this, value, scratch); 1273 EmitBranchGeneric(instr, branch); 1274} 1275 1276 1277template<class InstrType> 1278void LCodeGen::EmitBranchIfHeapNumber(InstrType instr, 1279 const Register& value) { 1280 BranchIfHeapNumber branch(this, value); 1281 EmitBranchGeneric(instr, branch); 1282} 1283 1284 1285template<class InstrType> 1286void LCodeGen::EmitBranchIfRoot(InstrType instr, 1287 const Register& value, 1288 Heap::RootListIndex index) { 1289 BranchIfRoot branch(this, value, index); 1290 EmitBranchGeneric(instr, branch); 1291} 1292 1293 1294void LCodeGen::DoGap(LGap* gap) { 1295 for (int i = LGap::FIRST_INNER_POSITION; 1296 i <= LGap::LAST_INNER_POSITION; 1297 i++) { 1298 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); 1299 LParallelMove* move = gap->GetParallelMove(inner_pos); 1300 if (move != NULL) { 1301 resolver_.Resolve(move); 1302 } 1303 } 1304} 1305 1306 1307void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { 1308 Register arguments = ToRegister(instr->arguments()); 1309 Register result = ToRegister(instr->result()); 1310 1311 // The pointer to the arguments array come from DoArgumentsElements. 1312 // It does not point directly to the arguments and there is an offest of 1313 // two words that we must take into account when accessing an argument. 1314 // Subtracting the index from length accounts for one, so we add one more. 1315 1316 if (instr->length()->IsConstantOperand() && 1317 instr->index()->IsConstantOperand()) { 1318 int index = ToInteger32(LConstantOperand::cast(instr->index())); 1319 int length = ToInteger32(LConstantOperand::cast(instr->length())); 1320 int offset = ((length - index) + 1) * kPointerSize; 1321 __ Ldr(result, MemOperand(arguments, offset)); 1322 } else if (instr->index()->IsConstantOperand()) { 1323 Register length = ToRegister32(instr->length()); 1324 int index = ToInteger32(LConstantOperand::cast(instr->index())); 1325 int loc = index - 1; 1326 if (loc != 0) { 1327 __ Sub(result.W(), length, loc); 1328 __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2)); 1329 } else { 1330 __ Ldr(result, MemOperand(arguments, length, UXTW, kPointerSizeLog2)); 1331 } 1332 } else { 1333 Register length = ToRegister32(instr->length()); 1334 Operand index = ToOperand32(instr->index()); 1335 __ Sub(result.W(), length, index); 1336 __ Add(result.W(), result.W(), 1); 1337 __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2)); 1338 } 1339} 1340 1341 1342void LCodeGen::DoAddE(LAddE* instr) { 1343 Register result = ToRegister(instr->result()); 1344 Register left = ToRegister(instr->left()); 1345 Operand right = Operand(x0); // Dummy initialization. 1346 if (instr->hydrogen()->external_add_type() == AddOfExternalAndTagged) { 1347 right = Operand(ToRegister(instr->right())); 1348 } else if (instr->right()->IsConstantOperand()) { 1349 right = ToInteger32(LConstantOperand::cast(instr->right())); 1350 } else { 1351 right = Operand(ToRegister32(instr->right()), SXTW); 1352 } 1353 1354 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)); 1355 __ Add(result, left, right); 1356} 1357 1358 1359void LCodeGen::DoAddI(LAddI* instr) { 1360 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1361 Register result = ToRegister32(instr->result()); 1362 Register left = ToRegister32(instr->left()); 1363 Operand right = ToShiftedRightOperand32(instr->right(), instr); 1364 1365 if (can_overflow) { 1366 __ Adds(result, left, right); 1367 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 1368 } else { 1369 __ Add(result, left, right); 1370 } 1371} 1372 1373 1374void LCodeGen::DoAddS(LAddS* instr) { 1375 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1376 Register result = ToRegister(instr->result()); 1377 Register left = ToRegister(instr->left()); 1378 Operand right = ToOperand(instr->right()); 1379 if (can_overflow) { 1380 __ Adds(result, left, right); 1381 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 1382 } else { 1383 __ Add(result, left, right); 1384 } 1385} 1386 1387 1388void LCodeGen::DoAllocate(LAllocate* instr) { 1389 class DeferredAllocate: public LDeferredCode { 1390 public: 1391 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) 1392 : LDeferredCode(codegen), instr_(instr) { } 1393 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); } 1394 virtual LInstruction* instr() { return instr_; } 1395 private: 1396 LAllocate* instr_; 1397 }; 1398 1399 DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr); 1400 1401 Register result = ToRegister(instr->result()); 1402 Register temp1 = ToRegister(instr->temp1()); 1403 Register temp2 = ToRegister(instr->temp2()); 1404 1405 // Allocate memory for the object. 1406 AllocationFlags flags = NO_ALLOCATION_FLAGS; 1407 if (instr->hydrogen()->MustAllocateDoubleAligned()) { 1408 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); 1409 } 1410 1411 if (instr->hydrogen()->IsOldSpaceAllocation()) { 1412 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 1413 flags = static_cast<AllocationFlags>(flags | PRETENURE); 1414 } 1415 1416 if (instr->hydrogen()->IsAllocationFoldingDominator()) { 1417 flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR); 1418 } 1419 DCHECK(!instr->hydrogen()->IsAllocationFolded()); 1420 1421 if (instr->size()->IsConstantOperand()) { 1422 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 1423 CHECK(size <= Page::kMaxRegularHeapObjectSize); 1424 __ Allocate(size, result, temp1, temp2, deferred->entry(), flags); 1425 } else { 1426 Register size = ToRegister32(instr->size()); 1427 __ Sxtw(size.X(), size); 1428 __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags); 1429 } 1430 1431 __ Bind(deferred->exit()); 1432 1433 if (instr->hydrogen()->MustPrefillWithFiller()) { 1434 Register start = temp1; 1435 Register end = temp2; 1436 Register filler = ToRegister(instr->temp3()); 1437 1438 __ Sub(start, result, kHeapObjectTag); 1439 1440 if (instr->size()->IsConstantOperand()) { 1441 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 1442 __ Add(end, start, size); 1443 } else { 1444 __ Add(end, start, ToRegister(instr->size())); 1445 } 1446 __ LoadRoot(filler, Heap::kOnePointerFillerMapRootIndex); 1447 __ InitializeFieldsWithFiller(start, end, filler); 1448 } else { 1449 DCHECK(instr->temp3() == NULL); 1450 } 1451} 1452 1453 1454void LCodeGen::DoDeferredAllocate(LAllocate* instr) { 1455 // TODO(3095996): Get rid of this. For now, we need to make the 1456 // result register contain a valid pointer because it is already 1457 // contained in the register pointer map. 1458 __ Mov(ToRegister(instr->result()), Smi::FromInt(0)); 1459 1460 PushSafepointRegistersScope scope(this); 1461 // We're in a SafepointRegistersScope so we can use any scratch registers. 1462 Register size = x0; 1463 if (instr->size()->IsConstantOperand()) { 1464 __ Mov(size, ToSmi(LConstantOperand::cast(instr->size()))); 1465 } else { 1466 __ SmiTag(size, ToRegister32(instr->size()).X()); 1467 } 1468 int flags = AllocateDoubleAlignFlag::encode( 1469 instr->hydrogen()->MustAllocateDoubleAligned()); 1470 if (instr->hydrogen()->IsOldSpaceAllocation()) { 1471 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 1472 flags = AllocateTargetSpace::update(flags, OLD_SPACE); 1473 } else { 1474 flags = AllocateTargetSpace::update(flags, NEW_SPACE); 1475 } 1476 __ Mov(x10, Smi::FromInt(flags)); 1477 __ Push(size, x10); 1478 1479 CallRuntimeFromDeferred( 1480 Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); 1481 __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result())); 1482 1483 if (instr->hydrogen()->IsAllocationFoldingDominator()) { 1484 AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS; 1485 if (instr->hydrogen()->IsOldSpaceAllocation()) { 1486 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 1487 allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE); 1488 } 1489 // If the allocation folding dominator allocate triggered a GC, allocation 1490 // happend in the runtime. We have to reset the top pointer to virtually 1491 // undo the allocation. 1492 ExternalReference allocation_top = 1493 AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags); 1494 Register top_address = x10; 1495 __ Sub(x0, x0, Operand(kHeapObjectTag)); 1496 __ Mov(top_address, Operand(allocation_top)); 1497 __ Str(x0, MemOperand(top_address)); 1498 __ Add(x0, x0, Operand(kHeapObjectTag)); 1499 } 1500} 1501 1502void LCodeGen::DoFastAllocate(LFastAllocate* instr) { 1503 DCHECK(instr->hydrogen()->IsAllocationFolded()); 1504 DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator()); 1505 Register result = ToRegister(instr->result()); 1506 Register scratch1 = ToRegister(instr->temp1()); 1507 Register scratch2 = ToRegister(instr->temp2()); 1508 1509 AllocationFlags flags = ALLOCATION_FOLDED; 1510 if (instr->hydrogen()->MustAllocateDoubleAligned()) { 1511 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); 1512 } 1513 if (instr->hydrogen()->IsOldSpaceAllocation()) { 1514 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 1515 flags = static_cast<AllocationFlags>(flags | PRETENURE); 1516 } 1517 if (instr->size()->IsConstantOperand()) { 1518 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 1519 CHECK(size <= Page::kMaxRegularHeapObjectSize); 1520 __ FastAllocate(size, result, scratch1, scratch2, flags); 1521 } else { 1522 Register size = ToRegister(instr->size()); 1523 __ FastAllocate(size, result, scratch1, scratch2, flags); 1524 } 1525} 1526 1527 1528void LCodeGen::DoApplyArguments(LApplyArguments* instr) { 1529 Register receiver = ToRegister(instr->receiver()); 1530 Register function = ToRegister(instr->function()); 1531 Register length = ToRegister32(instr->length()); 1532 1533 Register elements = ToRegister(instr->elements()); 1534 Register scratch = x5; 1535 DCHECK(receiver.Is(x0)); // Used for parameter count. 1536 DCHECK(function.Is(x1)); // Required by InvokeFunction. 1537 DCHECK(ToRegister(instr->result()).Is(x0)); 1538 DCHECK(instr->IsMarkedAsCall()); 1539 1540 // Copy the arguments to this function possibly from the 1541 // adaptor frame below it. 1542 const uint32_t kArgumentsLimit = 1 * KB; 1543 __ Cmp(length, kArgumentsLimit); 1544 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments); 1545 1546 // Push the receiver and use the register to keep the original 1547 // number of arguments. 1548 __ Push(receiver); 1549 Register argc = receiver; 1550 receiver = NoReg; 1551 __ Sxtw(argc, length); 1552 // The arguments are at a one pointer size offset from elements. 1553 __ Add(elements, elements, 1 * kPointerSize); 1554 1555 // Loop through the arguments pushing them onto the execution 1556 // stack. 1557 Label invoke, loop; 1558 // length is a small non-negative integer, due to the test above. 1559 __ Cbz(length, &invoke); 1560 __ Bind(&loop); 1561 __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2)); 1562 __ Push(scratch); 1563 __ Subs(length, length, 1); 1564 __ B(ne, &loop); 1565 1566 __ Bind(&invoke); 1567 1568 InvokeFlag flag = CALL_FUNCTION; 1569 if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) { 1570 DCHECK(!info()->saves_caller_doubles()); 1571 // TODO(ishell): drop current frame before pushing arguments to the stack. 1572 flag = JUMP_FUNCTION; 1573 ParameterCount actual(x0); 1574 // It is safe to use x3, x4 and x5 as scratch registers here given that 1575 // 1) we are not going to return to caller function anyway, 1576 // 2) x3 (new.target) will be initialized below. 1577 PrepareForTailCall(actual, x3, x4, x5); 1578 } 1579 1580 DCHECK(instr->HasPointerMap()); 1581 LPointerMap* pointers = instr->pointer_map(); 1582 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); 1583 // The number of arguments is stored in argc (receiver) which is x0, as 1584 // expected by InvokeFunction. 1585 ParameterCount actual(argc); 1586 __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator); 1587} 1588 1589 1590void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { 1591 Register result = ToRegister(instr->result()); 1592 1593 if (instr->hydrogen()->from_inlined()) { 1594 // When we are inside an inlined function, the arguments are the last things 1595 // that have been pushed on the stack. Therefore the arguments array can be 1596 // accessed directly from jssp. 1597 // However in the normal case, it is accessed via fp but there are two words 1598 // on the stack between fp and the arguments (the saved lr and fp) and the 1599 // LAccessArgumentsAt implementation take that into account. 1600 // In the inlined case we need to subtract the size of 2 words to jssp to 1601 // get a pointer which will work well with LAccessArgumentsAt. 1602 DCHECK(masm()->StackPointer().Is(jssp)); 1603 __ Sub(result, jssp, 2 * kPointerSize); 1604 } else if (instr->hydrogen()->arguments_adaptor()) { 1605 DCHECK(instr->temp() != NULL); 1606 Register previous_fp = ToRegister(instr->temp()); 1607 1608 __ Ldr(previous_fp, 1609 MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 1610 __ Ldr(result, MemOperand(previous_fp, 1611 CommonFrameConstants::kContextOrFrameTypeOffset)); 1612 __ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); 1613 __ Csel(result, fp, previous_fp, ne); 1614 } else { 1615 __ Mov(result, fp); 1616 } 1617} 1618 1619 1620void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { 1621 Register elements = ToRegister(instr->elements()); 1622 Register result = ToRegister32(instr->result()); 1623 Label done; 1624 1625 // If no arguments adaptor frame the number of arguments is fixed. 1626 __ Cmp(fp, elements); 1627 __ Mov(result, scope()->num_parameters()); 1628 __ B(eq, &done); 1629 1630 // Arguments adaptor frame present. Get argument length from there. 1631 __ Ldr(result.X(), MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 1632 __ Ldr(result, 1633 UntagSmiMemOperand(result.X(), 1634 ArgumentsAdaptorFrameConstants::kLengthOffset)); 1635 1636 // Argument length is in result register. 1637 __ Bind(&done); 1638} 1639 1640 1641void LCodeGen::DoArithmeticD(LArithmeticD* instr) { 1642 DoubleRegister left = ToDoubleRegister(instr->left()); 1643 DoubleRegister right = ToDoubleRegister(instr->right()); 1644 DoubleRegister result = ToDoubleRegister(instr->result()); 1645 1646 switch (instr->op()) { 1647 case Token::ADD: __ Fadd(result, left, right); break; 1648 case Token::SUB: __ Fsub(result, left, right); break; 1649 case Token::MUL: __ Fmul(result, left, right); break; 1650 case Token::DIV: __ Fdiv(result, left, right); break; 1651 case Token::MOD: { 1652 // The ECMA-262 remainder operator is the remainder from a truncating 1653 // (round-towards-zero) division. Note that this differs from IEEE-754. 1654 // 1655 // TODO(jbramley): See if it's possible to do this inline, rather than by 1656 // calling a helper function. With frintz (to produce the intermediate 1657 // quotient) and fmsub (to calculate the remainder without loss of 1658 // precision), it should be possible. However, we would need support for 1659 // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't 1660 // support that yet. 1661 DCHECK(left.Is(d0)); 1662 DCHECK(right.Is(d1)); 1663 __ CallCFunction( 1664 ExternalReference::mod_two_doubles_operation(isolate()), 1665 0, 2); 1666 DCHECK(result.Is(d0)); 1667 break; 1668 } 1669 default: 1670 UNREACHABLE(); 1671 break; 1672 } 1673} 1674 1675 1676void LCodeGen::DoArithmeticT(LArithmeticT* instr) { 1677 DCHECK(ToRegister(instr->context()).is(cp)); 1678 DCHECK(ToRegister(instr->left()).is(x1)); 1679 DCHECK(ToRegister(instr->right()).is(x0)); 1680 DCHECK(ToRegister(instr->result()).is(x0)); 1681 1682 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code(); 1683 CallCode(code, RelocInfo::CODE_TARGET, instr); 1684} 1685 1686 1687void LCodeGen::DoBitI(LBitI* instr) { 1688 Register result = ToRegister32(instr->result()); 1689 Register left = ToRegister32(instr->left()); 1690 Operand right = ToShiftedRightOperand32(instr->right(), instr); 1691 1692 switch (instr->op()) { 1693 case Token::BIT_AND: __ And(result, left, right); break; 1694 case Token::BIT_OR: __ Orr(result, left, right); break; 1695 case Token::BIT_XOR: __ Eor(result, left, right); break; 1696 default: 1697 UNREACHABLE(); 1698 break; 1699 } 1700} 1701 1702 1703void LCodeGen::DoBitS(LBitS* instr) { 1704 Register result = ToRegister(instr->result()); 1705 Register left = ToRegister(instr->left()); 1706 Operand right = ToOperand(instr->right()); 1707 1708 switch (instr->op()) { 1709 case Token::BIT_AND: __ And(result, left, right); break; 1710 case Token::BIT_OR: __ Orr(result, left, right); break; 1711 case Token::BIT_XOR: __ Eor(result, left, right); break; 1712 default: 1713 UNREACHABLE(); 1714 break; 1715 } 1716} 1717 1718 1719void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) { 1720 Condition cond = instr->hydrogen()->allow_equality() ? hi : hs; 1721 DCHECK(instr->hydrogen()->index()->representation().IsInteger32()); 1722 DCHECK(instr->hydrogen()->length()->representation().IsInteger32()); 1723 if (instr->index()->IsConstantOperand()) { 1724 Operand index = ToOperand32(instr->index()); 1725 Register length = ToRegister32(instr->length()); 1726 __ Cmp(length, index); 1727 cond = CommuteCondition(cond); 1728 } else { 1729 Register index = ToRegister32(instr->index()); 1730 Operand length = ToOperand32(instr->length()); 1731 __ Cmp(index, length); 1732 } 1733 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { 1734 __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed); 1735 } else { 1736 DeoptimizeIf(cond, instr, Deoptimizer::kOutOfBounds); 1737 } 1738} 1739 1740 1741void LCodeGen::DoBranch(LBranch* instr) { 1742 Representation r = instr->hydrogen()->value()->representation(); 1743 Label* true_label = instr->TrueLabel(chunk_); 1744 Label* false_label = instr->FalseLabel(chunk_); 1745 1746 if (r.IsInteger32()) { 1747 DCHECK(!info()->IsStub()); 1748 EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0); 1749 } else if (r.IsSmi()) { 1750 DCHECK(!info()->IsStub()); 1751 STATIC_ASSERT(kSmiTag == 0); 1752 EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0); 1753 } else if (r.IsDouble()) { 1754 DoubleRegister value = ToDoubleRegister(instr->value()); 1755 // Test the double value. Zero and NaN are false. 1756 EmitBranchIfNonZeroNumber(instr, value, double_scratch()); 1757 } else { 1758 DCHECK(r.IsTagged()); 1759 Register value = ToRegister(instr->value()); 1760 HType type = instr->hydrogen()->value()->type(); 1761 1762 if (type.IsBoolean()) { 1763 DCHECK(!info()->IsStub()); 1764 __ CompareRoot(value, Heap::kTrueValueRootIndex); 1765 EmitBranch(instr, eq); 1766 } else if (type.IsSmi()) { 1767 DCHECK(!info()->IsStub()); 1768 EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0)); 1769 } else if (type.IsJSArray()) { 1770 DCHECK(!info()->IsStub()); 1771 EmitGoto(instr->TrueDestination(chunk())); 1772 } else if (type.IsHeapNumber()) { 1773 DCHECK(!info()->IsStub()); 1774 __ Ldr(double_scratch(), FieldMemOperand(value, 1775 HeapNumber::kValueOffset)); 1776 // Test the double value. Zero and NaN are false. 1777 EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch()); 1778 } else if (type.IsString()) { 1779 DCHECK(!info()->IsStub()); 1780 Register temp = ToRegister(instr->temp1()); 1781 __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset)); 1782 EmitCompareAndBranch(instr, ne, temp, 0); 1783 } else { 1784 ToBooleanICStub::Types expected = 1785 instr->hydrogen()->expected_input_types(); 1786 // Avoid deopts in the case where we've never executed this path before. 1787 if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic(); 1788 1789 if (expected.Contains(ToBooleanICStub::UNDEFINED)) { 1790 // undefined -> false. 1791 __ JumpIfRoot( 1792 value, Heap::kUndefinedValueRootIndex, false_label); 1793 } 1794 1795 if (expected.Contains(ToBooleanICStub::BOOLEAN)) { 1796 // Boolean -> its value. 1797 __ JumpIfRoot( 1798 value, Heap::kTrueValueRootIndex, true_label); 1799 __ JumpIfRoot( 1800 value, Heap::kFalseValueRootIndex, false_label); 1801 } 1802 1803 if (expected.Contains(ToBooleanICStub::NULL_TYPE)) { 1804 // 'null' -> false. 1805 __ JumpIfRoot( 1806 value, Heap::kNullValueRootIndex, false_label); 1807 } 1808 1809 if (expected.Contains(ToBooleanICStub::SMI)) { 1810 // Smis: 0 -> false, all other -> true. 1811 DCHECK(Smi::FromInt(0) == 0); 1812 __ Cbz(value, false_label); 1813 __ JumpIfSmi(value, true_label); 1814 } else if (expected.NeedsMap()) { 1815 // If we need a map later and have a smi, deopt. 1816 DeoptimizeIfSmi(value, instr, Deoptimizer::kSmi); 1817 } 1818 1819 Register map = NoReg; 1820 Register scratch = NoReg; 1821 1822 if (expected.NeedsMap()) { 1823 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); 1824 map = ToRegister(instr->temp1()); 1825 scratch = ToRegister(instr->temp2()); 1826 1827 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); 1828 1829 if (expected.CanBeUndetectable()) { 1830 // Undetectable -> false. 1831 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); 1832 __ TestAndBranchIfAnySet( 1833 scratch, 1 << Map::kIsUndetectable, false_label); 1834 } 1835 } 1836 1837 if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) { 1838 // spec object -> true. 1839 __ CompareInstanceType(map, scratch, FIRST_JS_RECEIVER_TYPE); 1840 __ B(ge, true_label); 1841 } 1842 1843 if (expected.Contains(ToBooleanICStub::STRING)) { 1844 // String value -> false iff empty. 1845 Label not_string; 1846 __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE); 1847 __ B(ge, ¬_string); 1848 __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset)); 1849 __ Cbz(scratch, false_label); 1850 __ B(true_label); 1851 __ Bind(¬_string); 1852 } 1853 1854 if (expected.Contains(ToBooleanICStub::SYMBOL)) { 1855 // Symbol value -> true. 1856 __ CompareInstanceType(map, scratch, SYMBOL_TYPE); 1857 __ B(eq, true_label); 1858 } 1859 1860 if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) { 1861 // SIMD value -> true. 1862 __ CompareInstanceType(map, scratch, SIMD128_VALUE_TYPE); 1863 __ B(eq, true_label); 1864 } 1865 1866 if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) { 1867 Label not_heap_number; 1868 __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, ¬_heap_number); 1869 1870 __ Ldr(double_scratch(), 1871 FieldMemOperand(value, HeapNumber::kValueOffset)); 1872 __ Fcmp(double_scratch(), 0.0); 1873 // If we got a NaN (overflow bit is set), jump to the false branch. 1874 __ B(vs, false_label); 1875 __ B(eq, false_label); 1876 __ B(true_label); 1877 __ Bind(¬_heap_number); 1878 } 1879 1880 if (!expected.IsGeneric()) { 1881 // We've seen something for the first time -> deopt. 1882 // This can only happen if we are not generic already. 1883 Deoptimize(instr, Deoptimizer::kUnexpectedObject); 1884 } 1885 } 1886 } 1887} 1888 1889void LCodeGen::CallKnownFunction(Handle<JSFunction> function, 1890 int formal_parameter_count, int arity, 1891 bool is_tail_call, LInstruction* instr) { 1892 bool dont_adapt_arguments = 1893 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; 1894 bool can_invoke_directly = 1895 dont_adapt_arguments || formal_parameter_count == arity; 1896 1897 // The function interface relies on the following register assignments. 1898 Register function_reg = x1; 1899 Register arity_reg = x0; 1900 1901 LPointerMap* pointers = instr->pointer_map(); 1902 1903 if (FLAG_debug_code) { 1904 Label is_not_smi; 1905 // Try to confirm that function_reg (x1) is a tagged pointer. 1906 __ JumpIfNotSmi(function_reg, &is_not_smi); 1907 __ Abort(kExpectedFunctionObject); 1908 __ Bind(&is_not_smi); 1909 } 1910 1911 if (can_invoke_directly) { 1912 // Change context. 1913 __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset)); 1914 1915 // Always initialize new target and number of actual arguments. 1916 __ LoadRoot(x3, Heap::kUndefinedValueRootIndex); 1917 __ Mov(arity_reg, arity); 1918 1919 bool is_self_call = function.is_identical_to(info()->closure()); 1920 1921 // Invoke function. 1922 if (is_self_call) { 1923 Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location())); 1924 if (is_tail_call) { 1925 __ Jump(self, RelocInfo::CODE_TARGET); 1926 } else { 1927 __ Call(self, RelocInfo::CODE_TARGET); 1928 } 1929 } else { 1930 __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset)); 1931 if (is_tail_call) { 1932 __ Jump(x10); 1933 } else { 1934 __ Call(x10); 1935 } 1936 } 1937 1938 if (!is_tail_call) { 1939 // Set up deoptimization. 1940 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 1941 } 1942 } else { 1943 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 1944 ParameterCount actual(arity); 1945 ParameterCount expected(formal_parameter_count); 1946 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; 1947 __ InvokeFunction(function_reg, expected, actual, flag, generator); 1948 } 1949} 1950 1951void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { 1952 DCHECK(instr->IsMarkedAsCall()); 1953 DCHECK(ToRegister(instr->result()).Is(x0)); 1954 1955 if (instr->hydrogen()->IsTailCall()) { 1956 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL); 1957 1958 if (instr->target()->IsConstantOperand()) { 1959 LConstantOperand* target = LConstantOperand::cast(instr->target()); 1960 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); 1961 // TODO(all): on ARM we use a call descriptor to specify a storage mode 1962 // but on ARM64 we only have one storage mode so it isn't necessary. Check 1963 // this understanding is correct. 1964 __ Jump(code, RelocInfo::CODE_TARGET); 1965 } else { 1966 DCHECK(instr->target()->IsRegister()); 1967 Register target = ToRegister(instr->target()); 1968 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag); 1969 __ Br(target); 1970 } 1971 } else { 1972 LPointerMap* pointers = instr->pointer_map(); 1973 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 1974 1975 if (instr->target()->IsConstantOperand()) { 1976 LConstantOperand* target = LConstantOperand::cast(instr->target()); 1977 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); 1978 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); 1979 // TODO(all): on ARM we use a call descriptor to specify a storage mode 1980 // but on ARM64 we only have one storage mode so it isn't necessary. Check 1981 // this understanding is correct. 1982 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None()); 1983 } else { 1984 DCHECK(instr->target()->IsRegister()); 1985 Register target = ToRegister(instr->target()); 1986 generator.BeforeCall(__ CallSize(target)); 1987 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag); 1988 __ Call(target); 1989 } 1990 generator.AfterCall(); 1991 } 1992 1993 RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta()); 1994} 1995 1996 1997void LCodeGen::DoCallRuntime(LCallRuntime* instr) { 1998 CallRuntime(instr->function(), instr->arity(), instr); 1999 RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta()); 2000} 2001 2002 2003void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { 2004 GenerateOsrPrologue(); 2005} 2006 2007 2008void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { 2009 Register temp = ToRegister(instr->temp()); 2010 { 2011 PushSafepointRegistersScope scope(this); 2012 __ Push(object); 2013 __ Mov(cp, 0); 2014 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); 2015 RecordSafepointWithRegisters( 2016 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); 2017 __ StoreToSafepointRegisterSlot(x0, temp); 2018 } 2019 DeoptimizeIfSmi(temp, instr, Deoptimizer::kInstanceMigrationFailed); 2020} 2021 2022 2023void LCodeGen::DoCheckMaps(LCheckMaps* instr) { 2024 class DeferredCheckMaps: public LDeferredCode { 2025 public: 2026 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) 2027 : LDeferredCode(codegen), instr_(instr), object_(object) { 2028 SetExit(check_maps()); 2029 } 2030 virtual void Generate() { 2031 codegen()->DoDeferredInstanceMigration(instr_, object_); 2032 } 2033 Label* check_maps() { return &check_maps_; } 2034 virtual LInstruction* instr() { return instr_; } 2035 private: 2036 LCheckMaps* instr_; 2037 Label check_maps_; 2038 Register object_; 2039 }; 2040 2041 if (instr->hydrogen()->IsStabilityCheck()) { 2042 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); 2043 for (int i = 0; i < maps->size(); ++i) { 2044 AddStabilityDependency(maps->at(i).handle()); 2045 } 2046 return; 2047 } 2048 2049 Register object = ToRegister(instr->value()); 2050 Register map_reg = ToRegister(instr->temp()); 2051 2052 __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset)); 2053 2054 DeferredCheckMaps* deferred = NULL; 2055 if (instr->hydrogen()->HasMigrationTarget()) { 2056 deferred = new(zone()) DeferredCheckMaps(this, instr, object); 2057 __ Bind(deferred->check_maps()); 2058 } 2059 2060 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); 2061 Label success; 2062 for (int i = 0; i < maps->size() - 1; i++) { 2063 Handle<Map> map = maps->at(i).handle(); 2064 __ CompareMap(map_reg, map); 2065 __ B(eq, &success); 2066 } 2067 Handle<Map> map = maps->at(maps->size() - 1).handle(); 2068 __ CompareMap(map_reg, map); 2069 2070 // We didn't match a map. 2071 if (instr->hydrogen()->HasMigrationTarget()) { 2072 __ B(ne, deferred->entry()); 2073 } else { 2074 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); 2075 } 2076 2077 __ Bind(&success); 2078} 2079 2080 2081void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { 2082 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2083 DeoptimizeIfSmi(ToRegister(instr->value()), instr, Deoptimizer::kSmi); 2084 } 2085} 2086 2087 2088void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 2089 Register value = ToRegister(instr->value()); 2090 DCHECK(!instr->result() || ToRegister(instr->result()).Is(value)); 2091 DeoptimizeIfNotSmi(value, instr, Deoptimizer::kNotASmi); 2092} 2093 2094 2095void LCodeGen::DoCheckArrayBufferNotNeutered( 2096 LCheckArrayBufferNotNeutered* instr) { 2097 UseScratchRegisterScope temps(masm()); 2098 Register view = ToRegister(instr->view()); 2099 Register scratch = temps.AcquireX(); 2100 2101 __ Ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset)); 2102 __ Ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset)); 2103 __ Tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift)); 2104 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds); 2105} 2106 2107 2108void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { 2109 Register input = ToRegister(instr->value()); 2110 Register scratch = ToRegister(instr->temp()); 2111 2112 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 2113 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 2114 2115 if (instr->hydrogen()->is_interval_check()) { 2116 InstanceType first, last; 2117 instr->hydrogen()->GetCheckInterval(&first, &last); 2118 2119 __ Cmp(scratch, first); 2120 if (first == last) { 2121 // If there is only one type in the interval check for equality. 2122 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); 2123 } else if (last == LAST_TYPE) { 2124 // We don't need to compare with the higher bound of the interval. 2125 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType); 2126 } else { 2127 // If we are below the lower bound, set the C flag and clear the Z flag 2128 // to force a deopt. 2129 __ Ccmp(scratch, last, CFlag, hs); 2130 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType); 2131 } 2132 } else { 2133 uint8_t mask; 2134 uint8_t tag; 2135 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); 2136 2137 if (base::bits::IsPowerOfTwo32(mask)) { 2138 DCHECK((tag == 0) || (tag == mask)); 2139 if (tag == 0) { 2140 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr, 2141 Deoptimizer::kWrongInstanceType); 2142 } else { 2143 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr, 2144 Deoptimizer::kWrongInstanceType); 2145 } 2146 } else { 2147 if (tag == 0) { 2148 __ Tst(scratch, mask); 2149 } else { 2150 __ And(scratch, scratch, mask); 2151 __ Cmp(scratch, tag); 2152 } 2153 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); 2154 } 2155 } 2156} 2157 2158 2159void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 2160 DoubleRegister input = ToDoubleRegister(instr->unclamped()); 2161 Register result = ToRegister32(instr->result()); 2162 __ ClampDoubleToUint8(result, input, double_scratch()); 2163} 2164 2165 2166void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { 2167 Register input = ToRegister32(instr->unclamped()); 2168 Register result = ToRegister32(instr->result()); 2169 __ ClampInt32ToUint8(result, input); 2170} 2171 2172 2173void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { 2174 Register input = ToRegister(instr->unclamped()); 2175 Register result = ToRegister32(instr->result()); 2176 Label done; 2177 2178 // Both smi and heap number cases are handled. 2179 Label is_not_smi; 2180 __ JumpIfNotSmi(input, &is_not_smi); 2181 __ SmiUntag(result.X(), input); 2182 __ ClampInt32ToUint8(result); 2183 __ B(&done); 2184 2185 __ Bind(&is_not_smi); 2186 2187 // Check for heap number. 2188 Label is_heap_number; 2189 __ JumpIfHeapNumber(input, &is_heap_number); 2190 2191 // Check for undefined. Undefined is coverted to zero for clamping conversion. 2192 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, 2193 Deoptimizer::kNotAHeapNumberUndefined); 2194 __ Mov(result, 0); 2195 __ B(&done); 2196 2197 // Heap number case. 2198 __ Bind(&is_heap_number); 2199 DoubleRegister dbl_scratch = double_scratch(); 2200 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1()); 2201 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset)); 2202 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2); 2203 2204 __ Bind(&done); 2205} 2206 2207 2208void LCodeGen::DoDoubleBits(LDoubleBits* instr) { 2209 DoubleRegister value_reg = ToDoubleRegister(instr->value()); 2210 Register result_reg = ToRegister(instr->result()); 2211 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { 2212 __ Fmov(result_reg, value_reg); 2213 __ Lsr(result_reg, result_reg, 32); 2214 } else { 2215 __ Fmov(result_reg.W(), value_reg.S()); 2216 } 2217} 2218 2219 2220void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { 2221 Handle<String> class_name = instr->hydrogen()->class_name(); 2222 Label* true_label = instr->TrueLabel(chunk_); 2223 Label* false_label = instr->FalseLabel(chunk_); 2224 Register input = ToRegister(instr->value()); 2225 Register scratch1 = ToRegister(instr->temp1()); 2226 Register scratch2 = ToRegister(instr->temp2()); 2227 2228 __ JumpIfSmi(input, false_label); 2229 2230 Register map = scratch2; 2231 __ CompareObjectType(input, map, scratch1, FIRST_FUNCTION_TYPE); 2232 STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE); 2233 if (String::Equals(isolate()->factory()->Function_string(), class_name)) { 2234 __ B(hs, true_label); 2235 } else { 2236 __ B(hs, false_label); 2237 } 2238 2239 // Check if the constructor in the map is a function. 2240 { 2241 UseScratchRegisterScope temps(masm()); 2242 Register instance_type = temps.AcquireX(); 2243 __ GetMapConstructor(scratch1, map, scratch2, instance_type); 2244 __ Cmp(instance_type, JS_FUNCTION_TYPE); 2245 } 2246 // Objects with a non-function constructor have class 'Object'. 2247 if (String::Equals(class_name, isolate()->factory()->Object_string())) { 2248 __ B(ne, true_label); 2249 } else { 2250 __ B(ne, false_label); 2251 } 2252 2253 // The constructor function is in scratch1. Get its instance class name. 2254 __ Ldr(scratch1, 2255 FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset)); 2256 __ Ldr(scratch1, 2257 FieldMemOperand(scratch1, 2258 SharedFunctionInfo::kInstanceClassNameOffset)); 2259 2260 // The class name we are testing against is internalized since it's a literal. 2261 // The name in the constructor is internalized because of the way the context 2262 // is booted. This routine isn't expected to work for random API-created 2263 // classes and it doesn't have to because you can't access it with natives 2264 // syntax. Since both sides are internalized it is sufficient to use an 2265 // identity comparison. 2266 EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name)); 2267} 2268 2269 2270void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) { 2271 DCHECK(instr->hydrogen()->representation().IsDouble()); 2272 FPRegister object = ToDoubleRegister(instr->object()); 2273 Register temp = ToRegister(instr->temp()); 2274 2275 // If we don't have a NaN, we don't have the hole, so branch now to avoid the 2276 // (relatively expensive) hole-NaN check. 2277 __ Fcmp(object, object); 2278 __ B(vc, instr->FalseLabel(chunk_)); 2279 2280 // We have a NaN, but is it the hole? 2281 __ Fmov(temp, object); 2282 EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64); 2283} 2284 2285 2286void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) { 2287 DCHECK(instr->hydrogen()->representation().IsTagged()); 2288 Register object = ToRegister(instr->object()); 2289 2290 EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex); 2291} 2292 2293 2294void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { 2295 Register value = ToRegister(instr->value()); 2296 Register map = ToRegister(instr->temp()); 2297 2298 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); 2299 EmitCompareAndBranch(instr, eq, map, Operand(instr->map())); 2300} 2301 2302 2303void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { 2304 LOperand* left = instr->left(); 2305 LOperand* right = instr->right(); 2306 bool is_unsigned = 2307 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || 2308 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); 2309 Condition cond = TokenToCondition(instr->op(), is_unsigned); 2310 2311 if (left->IsConstantOperand() && right->IsConstantOperand()) { 2312 // We can statically evaluate the comparison. 2313 double left_val = ToDouble(LConstantOperand::cast(left)); 2314 double right_val = ToDouble(LConstantOperand::cast(right)); 2315 int next_block = Token::EvalComparison(instr->op(), left_val, right_val) 2316 ? instr->TrueDestination(chunk_) 2317 : instr->FalseDestination(chunk_); 2318 EmitGoto(next_block); 2319 } else { 2320 if (instr->is_double()) { 2321 __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right)); 2322 2323 // If a NaN is involved, i.e. the result is unordered (V set), 2324 // jump to false block label. 2325 __ B(vs, instr->FalseLabel(chunk_)); 2326 EmitBranch(instr, cond); 2327 } else { 2328 if (instr->hydrogen_value()->representation().IsInteger32()) { 2329 if (right->IsConstantOperand()) { 2330 EmitCompareAndBranch(instr, cond, ToRegister32(left), 2331 ToOperand32(right)); 2332 } else { 2333 // Commute the operands and the condition. 2334 EmitCompareAndBranch(instr, CommuteCondition(cond), 2335 ToRegister32(right), ToOperand32(left)); 2336 } 2337 } else { 2338 DCHECK(instr->hydrogen_value()->representation().IsSmi()); 2339 if (right->IsConstantOperand()) { 2340 int32_t value = ToInteger32(LConstantOperand::cast(right)); 2341 EmitCompareAndBranch(instr, 2342 cond, 2343 ToRegister(left), 2344 Operand(Smi::FromInt(value))); 2345 } else if (left->IsConstantOperand()) { 2346 // Commute the operands and the condition. 2347 int32_t value = ToInteger32(LConstantOperand::cast(left)); 2348 EmitCompareAndBranch(instr, 2349 CommuteCondition(cond), 2350 ToRegister(right), 2351 Operand(Smi::FromInt(value))); 2352 } else { 2353 EmitCompareAndBranch(instr, 2354 cond, 2355 ToRegister(left), 2356 ToRegister(right)); 2357 } 2358 } 2359 } 2360 } 2361} 2362 2363 2364void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { 2365 Register left = ToRegister(instr->left()); 2366 Register right = ToRegister(instr->right()); 2367 EmitCompareAndBranch(instr, eq, left, right); 2368} 2369 2370 2371void LCodeGen::DoCmpT(LCmpT* instr) { 2372 DCHECK(ToRegister(instr->context()).is(cp)); 2373 Token::Value op = instr->op(); 2374 Condition cond = TokenToCondition(op, false); 2375 2376 DCHECK(ToRegister(instr->left()).Is(x1)); 2377 DCHECK(ToRegister(instr->right()).Is(x0)); 2378 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code(); 2379 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2380 // Signal that we don't inline smi code before this stub. 2381 InlineSmiCheckInfo::EmitNotInlined(masm()); 2382 2383 // Return true or false depending on CompareIC result. 2384 // This instruction is marked as call. We can clobber any register. 2385 DCHECK(instr->IsMarkedAsCall()); 2386 __ LoadTrueFalseRoots(x1, x2); 2387 __ Cmp(x0, 0); 2388 __ Csel(ToRegister(instr->result()), x1, x2, cond); 2389} 2390 2391 2392void LCodeGen::DoConstantD(LConstantD* instr) { 2393 DCHECK(instr->result()->IsDoubleRegister()); 2394 DoubleRegister result = ToDoubleRegister(instr->result()); 2395 if (instr->value() == 0) { 2396 if (copysign(1.0, instr->value()) == 1.0) { 2397 __ Fmov(result, fp_zero); 2398 } else { 2399 __ Fneg(result, fp_zero); 2400 } 2401 } else { 2402 __ Fmov(result, instr->value()); 2403 } 2404} 2405 2406 2407void LCodeGen::DoConstantE(LConstantE* instr) { 2408 __ Mov(ToRegister(instr->result()), Operand(instr->value())); 2409} 2410 2411 2412void LCodeGen::DoConstantI(LConstantI* instr) { 2413 DCHECK(is_int32(instr->value())); 2414 // Cast the value here to ensure that the value isn't sign extended by the 2415 // implicit Operand constructor. 2416 __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value())); 2417} 2418 2419 2420void LCodeGen::DoConstantS(LConstantS* instr) { 2421 __ Mov(ToRegister(instr->result()), Operand(instr->value())); 2422} 2423 2424 2425void LCodeGen::DoConstantT(LConstantT* instr) { 2426 Handle<Object> object = instr->value(isolate()); 2427 AllowDeferredHandleDereference smi_check; 2428 __ LoadObject(ToRegister(instr->result()), object); 2429} 2430 2431 2432void LCodeGen::DoContext(LContext* instr) { 2433 // If there is a non-return use, the context must be moved to a register. 2434 Register result = ToRegister(instr->result()); 2435 if (info()->IsOptimizing()) { 2436 __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); 2437 } else { 2438 // If there is no frame, the context must be in cp. 2439 DCHECK(result.is(cp)); 2440 } 2441} 2442 2443 2444void LCodeGen::DoCheckValue(LCheckValue* instr) { 2445 Register reg = ToRegister(instr->value()); 2446 Handle<HeapObject> object = instr->hydrogen()->object().handle(); 2447 AllowDeferredHandleDereference smi_check; 2448 if (isolate()->heap()->InNewSpace(*object)) { 2449 UseScratchRegisterScope temps(masm()); 2450 Register temp = temps.AcquireX(); 2451 Handle<Cell> cell = isolate()->factory()->NewCell(object); 2452 __ Mov(temp, Operand(cell)); 2453 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset)); 2454 __ Cmp(reg, temp); 2455 } else { 2456 __ Cmp(reg, Operand(object)); 2457 } 2458 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch); 2459} 2460 2461 2462void LCodeGen::DoLazyBailout(LLazyBailout* instr) { 2463 last_lazy_deopt_pc_ = masm()->pc_offset(); 2464 DCHECK(instr->HasEnvironment()); 2465 LEnvironment* env = instr->environment(); 2466 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 2467 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 2468} 2469 2470 2471void LCodeGen::DoDeoptimize(LDeoptimize* instr) { 2472 Deoptimizer::BailoutType type = instr->hydrogen()->type(); 2473 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the 2474 // needed return address), even though the implementation of LAZY and EAGER is 2475 // now identical. When LAZY is eventually completely folded into EAGER, remove 2476 // the special case below. 2477 if (info()->IsStub() && (type == Deoptimizer::EAGER)) { 2478 type = Deoptimizer::LAZY; 2479 } 2480 2481 Deoptimize(instr, instr->hydrogen()->reason(), &type); 2482} 2483 2484 2485void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { 2486 Register dividend = ToRegister32(instr->dividend()); 2487 int32_t divisor = instr->divisor(); 2488 Register result = ToRegister32(instr->result()); 2489 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); 2490 DCHECK(!result.is(dividend)); 2491 2492 // Check for (0 / -x) that will produce negative zero. 2493 HDiv* hdiv = instr->hydrogen(); 2494 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 2495 DeoptimizeIfZero(dividend, instr, Deoptimizer::kDivisionByZero); 2496 } 2497 // Check for (kMinInt / -1). 2498 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { 2499 // Test dividend for kMinInt by subtracting one (cmp) and checking for 2500 // overflow. 2501 __ Cmp(dividend, 1); 2502 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 2503 } 2504 // Deoptimize if remainder will not be 0. 2505 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && 2506 divisor != 1 && divisor != -1) { 2507 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 2508 __ Tst(dividend, mask); 2509 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); 2510 } 2511 2512 if (divisor == -1) { // Nice shortcut, not needed for correctness. 2513 __ Neg(result, dividend); 2514 return; 2515 } 2516 int32_t shift = WhichPowerOf2Abs(divisor); 2517 if (shift == 0) { 2518 __ Mov(result, dividend); 2519 } else if (shift == 1) { 2520 __ Add(result, dividend, Operand(dividend, LSR, 31)); 2521 } else { 2522 __ Mov(result, Operand(dividend, ASR, 31)); 2523 __ Add(result, dividend, Operand(result, LSR, 32 - shift)); 2524 } 2525 if (shift > 0) __ Mov(result, Operand(result, ASR, shift)); 2526 if (divisor < 0) __ Neg(result, result); 2527} 2528 2529 2530void LCodeGen::DoDivByConstI(LDivByConstI* instr) { 2531 Register dividend = ToRegister32(instr->dividend()); 2532 int32_t divisor = instr->divisor(); 2533 Register result = ToRegister32(instr->result()); 2534 DCHECK(!AreAliased(dividend, result)); 2535 2536 if (divisor == 0) { 2537 Deoptimize(instr, Deoptimizer::kDivisionByZero); 2538 return; 2539 } 2540 2541 // Check for (0 / -x) that will produce negative zero. 2542 HDiv* hdiv = instr->hydrogen(); 2543 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 2544 DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero); 2545 } 2546 2547 __ TruncatingDiv(result, dividend, Abs(divisor)); 2548 if (divisor < 0) __ Neg(result, result); 2549 2550 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { 2551 Register temp = ToRegister32(instr->temp()); 2552 DCHECK(!AreAliased(dividend, result, temp)); 2553 __ Sxtw(dividend.X(), dividend); 2554 __ Mov(temp, divisor); 2555 __ Smsubl(temp.X(), result, temp, dividend.X()); 2556 DeoptimizeIfNotZero(temp, instr, Deoptimizer::kLostPrecision); 2557 } 2558} 2559 2560 2561// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. 2562void LCodeGen::DoDivI(LDivI* instr) { 2563 HBinaryOperation* hdiv = instr->hydrogen(); 2564 Register dividend = ToRegister32(instr->dividend()); 2565 Register divisor = ToRegister32(instr->divisor()); 2566 Register result = ToRegister32(instr->result()); 2567 2568 // Issue the division first, and then check for any deopt cases whilst the 2569 // result is computed. 2570 __ Sdiv(result, dividend, divisor); 2571 2572 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { 2573 DCHECK(!instr->temp()); 2574 return; 2575 } 2576 2577 // Check for x / 0. 2578 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 2579 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero); 2580 } 2581 2582 // Check for (0 / -x) as that will produce negative zero. 2583 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 2584 __ Cmp(divisor, 0); 2585 2586 // If the divisor < 0 (mi), compare the dividend, and deopt if it is 2587 // zero, ie. zero dividend with negative divisor deopts. 2588 // If the divisor >= 0 (pl, the opposite of mi) set the flags to 2589 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt. 2590 __ Ccmp(dividend, 0, NoFlag, mi); 2591 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 2592 } 2593 2594 // Check for (kMinInt / -1). 2595 if (hdiv->CheckFlag(HValue::kCanOverflow)) { 2596 // Test dividend for kMinInt by subtracting one (cmp) and checking for 2597 // overflow. 2598 __ Cmp(dividend, 1); 2599 // If overflow is set, ie. dividend = kMinInt, compare the divisor with 2600 // -1. If overflow is clear, set the flags for condition ne, as the 2601 // dividend isn't -1, and thus we shouldn't deopt. 2602 __ Ccmp(divisor, -1, NoFlag, vs); 2603 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); 2604 } 2605 2606 // Compute remainder and deopt if it's not zero. 2607 Register remainder = ToRegister32(instr->temp()); 2608 __ Msub(remainder, result, divisor, dividend); 2609 DeoptimizeIfNotZero(remainder, instr, Deoptimizer::kLostPrecision); 2610} 2611 2612 2613void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) { 2614 DoubleRegister input = ToDoubleRegister(instr->value()); 2615 Register result = ToRegister32(instr->result()); 2616 2617 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 2618 DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero); 2619 } 2620 2621 __ TryRepresentDoubleAsInt32(result, input, double_scratch()); 2622 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); 2623 2624 if (instr->tag_result()) { 2625 __ SmiTag(result.X()); 2626 } 2627} 2628 2629 2630void LCodeGen::DoDrop(LDrop* instr) { 2631 __ Drop(instr->count()); 2632 2633 RecordPushedArgumentsDelta(instr->hydrogen_value()->argument_delta()); 2634} 2635 2636 2637void LCodeGen::DoDummy(LDummy* instr) { 2638 // Nothing to see here, move on! 2639} 2640 2641 2642void LCodeGen::DoDummyUse(LDummyUse* instr) { 2643 // Nothing to see here, move on! 2644} 2645 2646 2647void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { 2648 Register map = ToRegister(instr->map()); 2649 Register result = ToRegister(instr->result()); 2650 Label load_cache, done; 2651 2652 __ EnumLengthUntagged(result, map); 2653 __ Cbnz(result, &load_cache); 2654 2655 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array())); 2656 __ B(&done); 2657 2658 __ Bind(&load_cache); 2659 __ LoadInstanceDescriptors(map, result); 2660 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); 2661 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); 2662 DeoptimizeIfZero(result, instr, Deoptimizer::kNoCache); 2663 2664 __ Bind(&done); 2665} 2666 2667 2668void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { 2669 Register object = ToRegister(instr->object()); 2670 2671 DCHECK(instr->IsMarkedAsCall()); 2672 DCHECK(object.Is(x0)); 2673 2674 Label use_cache, call_runtime; 2675 __ CheckEnumCache(object, x5, x1, x2, x3, x4, &call_runtime); 2676 2677 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); 2678 __ B(&use_cache); 2679 2680 // Get the set of properties to enumerate. 2681 __ Bind(&call_runtime); 2682 __ Push(object); 2683 CallRuntime(Runtime::kForInEnumerate, instr); 2684 __ Bind(&use_cache); 2685} 2686 2687 2688void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { 2689 Register input = ToRegister(instr->value()); 2690 Register result = ToRegister(instr->result()); 2691 2692 __ AssertString(input); 2693 2694 // Assert that we can use a W register load to get the hash. 2695 DCHECK((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits); 2696 __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset)); 2697 __ IndexFromHash(result, result); 2698} 2699 2700 2701void LCodeGen::EmitGoto(int block) { 2702 // Do not emit jump if we are emitting a goto to the next block. 2703 if (!IsNextEmittedBlock(block)) { 2704 __ B(chunk_->GetAssemblyLabel(LookupDestination(block))); 2705 } 2706} 2707 2708 2709void LCodeGen::DoGoto(LGoto* instr) { 2710 EmitGoto(instr->block_id()); 2711} 2712 2713 2714void LCodeGen::DoHasCachedArrayIndexAndBranch( 2715 LHasCachedArrayIndexAndBranch* instr) { 2716 Register input = ToRegister(instr->value()); 2717 Register temp = ToRegister32(instr->temp()); 2718 2719 // Assert that the cache status bits fit in a W register. 2720 DCHECK(is_uint32(String::kContainsCachedArrayIndexMask)); 2721 __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset)); 2722 __ Tst(temp, String::kContainsCachedArrayIndexMask); 2723 EmitBranch(instr, eq); 2724} 2725 2726 2727// HHasInstanceTypeAndBranch instruction is built with an interval of type 2728// to test but is only used in very restricted ways. The only possible kinds 2729// of intervals are: 2730// - [ FIRST_TYPE, instr->to() ] 2731// - [ instr->form(), LAST_TYPE ] 2732// - instr->from() == instr->to() 2733// 2734// These kinds of intervals can be check with only one compare instruction 2735// providing the correct value and test condition are used. 2736// 2737// TestType() will return the value to use in the compare instruction and 2738// BranchCondition() will return the condition to use depending on the kind 2739// of interval actually specified in the instruction. 2740static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { 2741 InstanceType from = instr->from(); 2742 InstanceType to = instr->to(); 2743 if (from == FIRST_TYPE) return to; 2744 DCHECK((from == to) || (to == LAST_TYPE)); 2745 return from; 2746} 2747 2748 2749// See comment above TestType function for what this function does. 2750static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { 2751 InstanceType from = instr->from(); 2752 InstanceType to = instr->to(); 2753 if (from == to) return eq; 2754 if (to == LAST_TYPE) return hs; 2755 if (from == FIRST_TYPE) return ls; 2756 UNREACHABLE(); 2757 return eq; 2758} 2759 2760 2761void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { 2762 Register input = ToRegister(instr->value()); 2763 Register scratch = ToRegister(instr->temp()); 2764 2765 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2766 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); 2767 } 2768 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen())); 2769 EmitBranch(instr, BranchCondition(instr->hydrogen())); 2770} 2771 2772 2773void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { 2774 Register result = ToRegister(instr->result()); 2775 Register base = ToRegister(instr->base_object()); 2776 if (instr->offset()->IsConstantOperand()) { 2777 __ Add(result, base, ToOperand32(instr->offset())); 2778 } else { 2779 __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW)); 2780 } 2781} 2782 2783 2784void LCodeGen::DoHasInPrototypeChainAndBranch( 2785 LHasInPrototypeChainAndBranch* instr) { 2786 Register const object = ToRegister(instr->object()); 2787 Register const object_map = ToRegister(instr->scratch1()); 2788 Register const object_instance_type = ToRegister(instr->scratch2()); 2789 Register const object_prototype = object_map; 2790 Register const prototype = ToRegister(instr->prototype()); 2791 2792 // The {object} must be a spec object. It's sufficient to know that {object} 2793 // is not a smi, since all other non-spec objects have {null} prototypes and 2794 // will be ruled out below. 2795 if (instr->hydrogen()->ObjectNeedsSmiCheck()) { 2796 __ JumpIfSmi(object, instr->FalseLabel(chunk_)); 2797 } 2798 2799 // Loop through the {object}s prototype chain looking for the {prototype}. 2800 __ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); 2801 Label loop; 2802 __ Bind(&loop); 2803 2804 // Deoptimize if the object needs to be access checked. 2805 __ Ldrb(object_instance_type, 2806 FieldMemOperand(object_map, Map::kBitFieldOffset)); 2807 __ Tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded)); 2808 DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck); 2809 // Deoptimize for proxies. 2810 __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE); 2811 DeoptimizeIf(eq, instr, Deoptimizer::kProxy); 2812 2813 __ Ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset)); 2814 __ CompareRoot(object_prototype, Heap::kNullValueRootIndex); 2815 __ B(eq, instr->FalseLabel(chunk_)); 2816 __ Cmp(object_prototype, prototype); 2817 __ B(eq, instr->TrueLabel(chunk_)); 2818 __ Ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset)); 2819 __ B(&loop); 2820} 2821 2822 2823void LCodeGen::DoInstructionGap(LInstructionGap* instr) { 2824 DoGap(instr); 2825} 2826 2827 2828void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 2829 Register value = ToRegister32(instr->value()); 2830 DoubleRegister result = ToDoubleRegister(instr->result()); 2831 __ Scvtf(result, value); 2832} 2833 2834void LCodeGen::PrepareForTailCall(const ParameterCount& actual, 2835 Register scratch1, Register scratch2, 2836 Register scratch3) { 2837#if DEBUG 2838 if (actual.is_reg()) { 2839 DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3)); 2840 } else { 2841 DCHECK(!AreAliased(scratch1, scratch2, scratch3)); 2842 } 2843#endif 2844 if (FLAG_code_comments) { 2845 if (actual.is_reg()) { 2846 Comment(";;; PrepareForTailCall, actual: %s {", 2847 RegisterConfiguration::Crankshaft()->GetGeneralRegisterName( 2848 actual.reg().code())); 2849 } else { 2850 Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate()); 2851 } 2852 } 2853 2854 // Check if next frame is an arguments adaptor frame. 2855 Register caller_args_count_reg = scratch1; 2856 Label no_arguments_adaptor, formal_parameter_count_loaded; 2857 __ Ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 2858 __ Ldr(scratch3, 2859 MemOperand(scratch2, StandardFrameConstants::kContextOffset)); 2860 __ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 2861 __ B(ne, &no_arguments_adaptor); 2862 2863 // Drop current frame and load arguments count from arguments adaptor frame. 2864 __ mov(fp, scratch2); 2865 __ Ldr(caller_args_count_reg, 2866 MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); 2867 __ SmiUntag(caller_args_count_reg); 2868 __ B(&formal_parameter_count_loaded); 2869 2870 __ bind(&no_arguments_adaptor); 2871 // Load caller's formal parameter count 2872 __ Mov(caller_args_count_reg, 2873 Immediate(info()->literal()->parameter_count())); 2874 2875 __ bind(&formal_parameter_count_loaded); 2876 __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3); 2877 2878 Comment(";;; }"); 2879} 2880 2881void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { 2882 HInvokeFunction* hinstr = instr->hydrogen(); 2883 DCHECK(ToRegister(instr->context()).is(cp)); 2884 // The function is required to be in x1. 2885 DCHECK(ToRegister(instr->function()).is(x1)); 2886 DCHECK(instr->HasPointerMap()); 2887 2888 bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow; 2889 2890 if (is_tail_call) { 2891 DCHECK(!info()->saves_caller_doubles()); 2892 ParameterCount actual(instr->arity()); 2893 // It is safe to use x3, x4 and x5 as scratch registers here given that 2894 // 1) we are not going to return to caller function anyway, 2895 // 2) x3 (new.target) will be initialized below. 2896 PrepareForTailCall(actual, x3, x4, x5); 2897 } 2898 2899 Handle<JSFunction> known_function = hinstr->known_function(); 2900 if (known_function.is_null()) { 2901 LPointerMap* pointers = instr->pointer_map(); 2902 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 2903 ParameterCount actual(instr->arity()); 2904 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; 2905 __ InvokeFunction(x1, no_reg, actual, flag, generator); 2906 } else { 2907 CallKnownFunction(known_function, hinstr->formal_parameter_count(), 2908 instr->arity(), is_tail_call, instr); 2909 } 2910 RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta()); 2911} 2912 2913 2914Condition LCodeGen::EmitIsString(Register input, 2915 Register temp1, 2916 Label* is_not_string, 2917 SmiCheck check_needed = INLINE_SMI_CHECK) { 2918 if (check_needed == INLINE_SMI_CHECK) { 2919 __ JumpIfSmi(input, is_not_string); 2920 } 2921 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE); 2922 2923 return lt; 2924} 2925 2926 2927void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { 2928 Register val = ToRegister(instr->value()); 2929 Register scratch = ToRegister(instr->temp()); 2930 2931 SmiCheck check_needed = 2932 instr->hydrogen()->value()->type().IsHeapObject() 2933 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 2934 Condition true_cond = 2935 EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed); 2936 2937 EmitBranch(instr, true_cond); 2938} 2939 2940 2941void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { 2942 Register value = ToRegister(instr->value()); 2943 STATIC_ASSERT(kSmiTag == 0); 2944 EmitTestAndBranch(instr, eq, value, kSmiTagMask); 2945} 2946 2947 2948void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { 2949 Register input = ToRegister(instr->value()); 2950 Register temp = ToRegister(instr->temp()); 2951 2952 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2953 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); 2954 } 2955 __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset)); 2956 __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); 2957 2958 EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable); 2959} 2960 2961 2962static const char* LabelType(LLabel* label) { 2963 if (label->is_loop_header()) return " (loop header)"; 2964 if (label->is_osr_entry()) return " (OSR entry)"; 2965 return ""; 2966} 2967 2968 2969void LCodeGen::DoLabel(LLabel* label) { 2970 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", 2971 current_instruction_, 2972 label->hydrogen_value()->id(), 2973 label->block_id(), 2974 LabelType(label)); 2975 2976 // Inherit pushed_arguments_ from the predecessor's argument count. 2977 if (label->block()->HasPredecessor()) { 2978 pushed_arguments_ = label->block()->predecessors()->at(0)->argument_count(); 2979#ifdef DEBUG 2980 for (auto p : *label->block()->predecessors()) { 2981 DCHECK_EQ(p->argument_count(), pushed_arguments_); 2982 } 2983#endif 2984 } 2985 2986 __ Bind(label->label()); 2987 current_block_ = label->block_id(); 2988 DoGap(label); 2989} 2990 2991 2992void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { 2993 Register context = ToRegister(instr->context()); 2994 Register result = ToRegister(instr->result()); 2995 __ Ldr(result, ContextMemOperand(context, instr->slot_index())); 2996 if (instr->hydrogen()->RequiresHoleCheck()) { 2997 if (instr->hydrogen()->DeoptimizesOnHole()) { 2998 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, 2999 Deoptimizer::kHole); 3000 } else { 3001 Label not_the_hole; 3002 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole); 3003 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); 3004 __ Bind(¬_the_hole); 3005 } 3006 } 3007} 3008 3009 3010void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { 3011 Register function = ToRegister(instr->function()); 3012 Register result = ToRegister(instr->result()); 3013 Register temp = ToRegister(instr->temp()); 3014 3015 // Get the prototype or initial map from the function. 3016 __ Ldr(result, FieldMemOperand(function, 3017 JSFunction::kPrototypeOrInitialMapOffset)); 3018 3019 // Check that the function has a prototype or an initial map. 3020 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, 3021 Deoptimizer::kHole); 3022 3023 // If the function does not have an initial map, we're done. 3024 Label done; 3025 __ CompareObjectType(result, temp, temp, MAP_TYPE); 3026 __ B(ne, &done); 3027 3028 // Get the prototype from the initial map. 3029 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); 3030 3031 // All done. 3032 __ Bind(&done); 3033} 3034 3035 3036template <class T> 3037void LCodeGen::EmitVectorLoadICRegisters(T* instr) { 3038 Register vector_register = ToRegister(instr->temp_vector()); 3039 Register slot_register = LoadWithVectorDescriptor::SlotRegister(); 3040 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister())); 3041 DCHECK(slot_register.is(x0)); 3042 3043 AllowDeferredHandleDereference vector_structure_check; 3044 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector(); 3045 __ Mov(vector_register, vector); 3046 // No need to allocate this register. 3047 FeedbackVectorSlot slot = instr->hydrogen()->slot(); 3048 int index = vector->GetIndex(slot); 3049 __ Mov(slot_register, Smi::FromInt(index)); 3050} 3051 3052 3053template <class T> 3054void LCodeGen::EmitVectorStoreICRegisters(T* instr) { 3055 Register vector_register = ToRegister(instr->temp_vector()); 3056 Register slot_register = ToRegister(instr->temp_slot()); 3057 3058 AllowDeferredHandleDereference vector_structure_check; 3059 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector(); 3060 __ Mov(vector_register, vector); 3061 FeedbackVectorSlot slot = instr->hydrogen()->slot(); 3062 int index = vector->GetIndex(slot); 3063 __ Mov(slot_register, Smi::FromInt(index)); 3064} 3065 3066 3067void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { 3068 DCHECK(ToRegister(instr->context()).is(cp)); 3069 DCHECK(ToRegister(instr->result()).Is(x0)); 3070 3071 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr); 3072 Handle<Code> ic = 3073 CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode()) 3074 .code(); 3075 CallCode(ic, RelocInfo::CODE_TARGET, instr); 3076} 3077 3078 3079MemOperand LCodeGen::PrepareKeyedExternalArrayOperand( 3080 Register key, 3081 Register base, 3082 Register scratch, 3083 bool key_is_smi, 3084 bool key_is_constant, 3085 int constant_key, 3086 ElementsKind elements_kind, 3087 int base_offset) { 3088 int element_size_shift = ElementsKindToShiftSize(elements_kind); 3089 3090 if (key_is_constant) { 3091 int key_offset = constant_key << element_size_shift; 3092 return MemOperand(base, key_offset + base_offset); 3093 } 3094 3095 if (key_is_smi) { 3096 __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift)); 3097 return MemOperand(scratch, base_offset); 3098 } 3099 3100 if (base_offset == 0) { 3101 return MemOperand(base, key, SXTW, element_size_shift); 3102 } 3103 3104 DCHECK(!AreAliased(scratch, key)); 3105 __ Add(scratch, base, base_offset); 3106 return MemOperand(scratch, key, SXTW, element_size_shift); 3107} 3108 3109 3110void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) { 3111 Register ext_ptr = ToRegister(instr->elements()); 3112 Register scratch; 3113 ElementsKind elements_kind = instr->elements_kind(); 3114 3115 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); 3116 bool key_is_constant = instr->key()->IsConstantOperand(); 3117 Register key = no_reg; 3118 int constant_key = 0; 3119 if (key_is_constant) { 3120 DCHECK(instr->temp() == NULL); 3121 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 3122 if (constant_key & 0xf0000000) { 3123 Abort(kArrayIndexConstantValueTooBig); 3124 } 3125 } else { 3126 scratch = ToRegister(instr->temp()); 3127 key = ToRegister(instr->key()); 3128 } 3129 3130 MemOperand mem_op = 3131 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi, 3132 key_is_constant, constant_key, 3133 elements_kind, 3134 instr->base_offset()); 3135 3136 if (elements_kind == FLOAT32_ELEMENTS) { 3137 DoubleRegister result = ToDoubleRegister(instr->result()); 3138 __ Ldr(result.S(), mem_op); 3139 __ Fcvt(result, result.S()); 3140 } else if (elements_kind == FLOAT64_ELEMENTS) { 3141 DoubleRegister result = ToDoubleRegister(instr->result()); 3142 __ Ldr(result, mem_op); 3143 } else { 3144 Register result = ToRegister(instr->result()); 3145 3146 switch (elements_kind) { 3147 case INT8_ELEMENTS: 3148 __ Ldrsb(result, mem_op); 3149 break; 3150 case UINT8_ELEMENTS: 3151 case UINT8_CLAMPED_ELEMENTS: 3152 __ Ldrb(result, mem_op); 3153 break; 3154 case INT16_ELEMENTS: 3155 __ Ldrsh(result, mem_op); 3156 break; 3157 case UINT16_ELEMENTS: 3158 __ Ldrh(result, mem_op); 3159 break; 3160 case INT32_ELEMENTS: 3161 __ Ldrsw(result, mem_op); 3162 break; 3163 case UINT32_ELEMENTS: 3164 __ Ldr(result.W(), mem_op); 3165 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { 3166 // Deopt if value > 0x80000000. 3167 __ Tst(result, 0xFFFFFFFF80000000); 3168 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue); 3169 } 3170 break; 3171 case FLOAT32_ELEMENTS: 3172 case FLOAT64_ELEMENTS: 3173 case FAST_HOLEY_DOUBLE_ELEMENTS: 3174 case FAST_HOLEY_ELEMENTS: 3175 case FAST_HOLEY_SMI_ELEMENTS: 3176 case FAST_DOUBLE_ELEMENTS: 3177 case FAST_ELEMENTS: 3178 case FAST_SMI_ELEMENTS: 3179 case DICTIONARY_ELEMENTS: 3180 case FAST_SLOPPY_ARGUMENTS_ELEMENTS: 3181 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: 3182 case FAST_STRING_WRAPPER_ELEMENTS: 3183 case SLOW_STRING_WRAPPER_ELEMENTS: 3184 case NO_ELEMENTS: 3185 UNREACHABLE(); 3186 break; 3187 } 3188 } 3189} 3190 3191 3192MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base, 3193 Register elements, 3194 Register key, 3195 bool key_is_tagged, 3196 ElementsKind elements_kind, 3197 Representation representation, 3198 int base_offset) { 3199 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits); 3200 STATIC_ASSERT(kSmiTag == 0); 3201 int element_size_shift = ElementsKindToShiftSize(elements_kind); 3202 3203 // Even though the HLoad/StoreKeyed instructions force the input 3204 // representation for the key to be an integer, the input gets replaced during 3205 // bounds check elimination with the index argument to the bounds check, which 3206 // can be tagged, so that case must be handled here, too. 3207 if (key_is_tagged) { 3208 __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift)); 3209 if (representation.IsInteger32()) { 3210 DCHECK(elements_kind == FAST_SMI_ELEMENTS); 3211 // Read or write only the smi payload in the case of fast smi arrays. 3212 return UntagSmiMemOperand(base, base_offset); 3213 } else { 3214 return MemOperand(base, base_offset); 3215 } 3216 } else { 3217 // Sign extend key because it could be a 32-bit negative value or contain 3218 // garbage in the top 32-bits. The address computation happens in 64-bit. 3219 DCHECK((element_size_shift >= 0) && (element_size_shift <= 4)); 3220 if (representation.IsInteger32()) { 3221 DCHECK(elements_kind == FAST_SMI_ELEMENTS); 3222 // Read or write only the smi payload in the case of fast smi arrays. 3223 __ Add(base, elements, Operand(key, SXTW, element_size_shift)); 3224 return UntagSmiMemOperand(base, base_offset); 3225 } else { 3226 __ Add(base, elements, base_offset); 3227 return MemOperand(base, key, SXTW, element_size_shift); 3228 } 3229 } 3230} 3231 3232 3233void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) { 3234 Register elements = ToRegister(instr->elements()); 3235 DoubleRegister result = ToDoubleRegister(instr->result()); 3236 MemOperand mem_op; 3237 3238 if (instr->key()->IsConstantOperand()) { 3239 DCHECK(instr->hydrogen()->RequiresHoleCheck() || 3240 (instr->temp() == NULL)); 3241 3242 int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 3243 if (constant_key & 0xf0000000) { 3244 Abort(kArrayIndexConstantValueTooBig); 3245 } 3246 int offset = instr->base_offset() + constant_key * kDoubleSize; 3247 mem_op = MemOperand(elements, offset); 3248 } else { 3249 Register load_base = ToRegister(instr->temp()); 3250 Register key = ToRegister(instr->key()); 3251 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); 3252 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged, 3253 instr->hydrogen()->elements_kind(), 3254 instr->hydrogen()->representation(), 3255 instr->base_offset()); 3256 } 3257 3258 __ Ldr(result, mem_op); 3259 3260 if (instr->hydrogen()->RequiresHoleCheck()) { 3261 Register scratch = ToRegister(instr->temp()); 3262 __ Fmov(scratch, result); 3263 __ Eor(scratch, scratch, kHoleNanInt64); 3264 DeoptimizeIfZero(scratch, instr, Deoptimizer::kHole); 3265 } 3266} 3267 3268 3269void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) { 3270 Register elements = ToRegister(instr->elements()); 3271 Register result = ToRegister(instr->result()); 3272 MemOperand mem_op; 3273 3274 Representation representation = instr->hydrogen()->representation(); 3275 if (instr->key()->IsConstantOperand()) { 3276 DCHECK(instr->temp() == NULL); 3277 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); 3278 int offset = instr->base_offset() + 3279 ToInteger32(const_operand) * kPointerSize; 3280 if (representation.IsInteger32()) { 3281 DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); 3282 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits); 3283 STATIC_ASSERT(kSmiTag == 0); 3284 mem_op = UntagSmiMemOperand(elements, offset); 3285 } else { 3286 mem_op = MemOperand(elements, offset); 3287 } 3288 } else { 3289 Register load_base = ToRegister(instr->temp()); 3290 Register key = ToRegister(instr->key()); 3291 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); 3292 3293 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged, 3294 instr->hydrogen()->elements_kind(), 3295 representation, instr->base_offset()); 3296 } 3297 3298 __ Load(result, mem_op, representation); 3299 3300 if (instr->hydrogen()->RequiresHoleCheck()) { 3301 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { 3302 DeoptimizeIfNotSmi(result, instr, Deoptimizer::kNotASmi); 3303 } else { 3304 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, 3305 Deoptimizer::kHole); 3306 } 3307 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { 3308 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); 3309 Label done; 3310 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); 3311 __ B(ne, &done); 3312 if (info()->IsStub()) { 3313 // A stub can safely convert the hole to undefined only if the array 3314 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise 3315 // it needs to bail out. 3316 __ LoadRoot(result, Heap::kArrayProtectorRootIndex); 3317 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset)); 3318 __ Cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid))); 3319 DeoptimizeIf(ne, instr, Deoptimizer::kHole); 3320 } 3321 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); 3322 __ Bind(&done); 3323 } 3324} 3325 3326 3327void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { 3328 DCHECK(ToRegister(instr->context()).is(cp)); 3329 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); 3330 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister())); 3331 3332 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr); 3333 3334 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code(); 3335 CallCode(ic, RelocInfo::CODE_TARGET, instr); 3336 3337 DCHECK(ToRegister(instr->result()).Is(x0)); 3338} 3339 3340 3341void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { 3342 HObjectAccess access = instr->hydrogen()->access(); 3343 int offset = access.offset(); 3344 Register object = ToRegister(instr->object()); 3345 3346 if (access.IsExternalMemory()) { 3347 Register result = ToRegister(instr->result()); 3348 __ Load(result, MemOperand(object, offset), access.representation()); 3349 return; 3350 } 3351 3352 if (instr->hydrogen()->representation().IsDouble()) { 3353 DCHECK(access.IsInobject()); 3354 FPRegister result = ToDoubleRegister(instr->result()); 3355 __ Ldr(result, FieldMemOperand(object, offset)); 3356 return; 3357 } 3358 3359 Register result = ToRegister(instr->result()); 3360 Register source; 3361 if (access.IsInobject()) { 3362 source = object; 3363 } else { 3364 // Load the properties array, using result as a scratch register. 3365 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); 3366 source = result; 3367 } 3368 3369 if (access.representation().IsSmi() && 3370 instr->hydrogen()->representation().IsInteger32()) { 3371 // Read int value directly from upper half of the smi. 3372 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits); 3373 STATIC_ASSERT(kSmiTag == 0); 3374 __ Load(result, UntagSmiFieldMemOperand(source, offset), 3375 Representation::Integer32()); 3376 } else { 3377 __ Load(result, FieldMemOperand(source, offset), access.representation()); 3378 } 3379} 3380 3381 3382void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { 3383 DCHECK(ToRegister(instr->context()).is(cp)); 3384 // LoadIC expects name and receiver in registers. 3385 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); 3386 __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name())); 3387 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr); 3388 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code(); 3389 CallCode(ic, RelocInfo::CODE_TARGET, instr); 3390 3391 DCHECK(ToRegister(instr->result()).is(x0)); 3392} 3393 3394 3395void LCodeGen::DoLoadRoot(LLoadRoot* instr) { 3396 Register result = ToRegister(instr->result()); 3397 __ LoadRoot(result, instr->index()); 3398} 3399 3400 3401void LCodeGen::DoMathAbs(LMathAbs* instr) { 3402 Representation r = instr->hydrogen()->value()->representation(); 3403 if (r.IsDouble()) { 3404 DoubleRegister input = ToDoubleRegister(instr->value()); 3405 DoubleRegister result = ToDoubleRegister(instr->result()); 3406 __ Fabs(result, input); 3407 } else if (r.IsSmi() || r.IsInteger32()) { 3408 Register input = r.IsSmi() ? ToRegister(instr->value()) 3409 : ToRegister32(instr->value()); 3410 Register result = r.IsSmi() ? ToRegister(instr->result()) 3411 : ToRegister32(instr->result()); 3412 __ Abs(result, input); 3413 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 3414 } 3415} 3416 3417 3418void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr, 3419 Label* exit, 3420 Label* allocation_entry) { 3421 // Handle the tricky cases of MathAbsTagged: 3422 // - HeapNumber inputs. 3423 // - Negative inputs produce a positive result, so a new HeapNumber is 3424 // allocated to hold it. 3425 // - Positive inputs are returned as-is, since there is no need to allocate 3426 // a new HeapNumber for the result. 3427 // - The (smi) input -0x80000000, produces +0x80000000, which does not fit 3428 // a smi. In this case, the inline code sets the result and jumps directly 3429 // to the allocation_entry label. 3430 DCHECK(instr->context() != NULL); 3431 DCHECK(ToRegister(instr->context()).is(cp)); 3432 Register input = ToRegister(instr->value()); 3433 Register temp1 = ToRegister(instr->temp1()); 3434 Register temp2 = ToRegister(instr->temp2()); 3435 Register result_bits = ToRegister(instr->temp3()); 3436 Register result = ToRegister(instr->result()); 3437 3438 Label runtime_allocation; 3439 3440 // Deoptimize if the input is not a HeapNumber. 3441 DeoptimizeIfNotHeapNumber(input, instr); 3442 3443 // If the argument is positive, we can return it as-is, without any need to 3444 // allocate a new HeapNumber for the result. We have to do this in integer 3445 // registers (rather than with fabs) because we need to be able to distinguish 3446 // the two zeroes. 3447 __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset)); 3448 __ Mov(result, input); 3449 __ Tbz(result_bits, kXSignBit, exit); 3450 3451 // Calculate abs(input) by clearing the sign bit. 3452 __ Bic(result_bits, result_bits, kXSignMask); 3453 3454 // Allocate a new HeapNumber to hold the result. 3455 // result_bits The bit representation of the (double) result. 3456 __ Bind(allocation_entry); 3457 __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2); 3458 // The inline (non-deferred) code will store result_bits into result. 3459 __ B(exit); 3460 3461 __ Bind(&runtime_allocation); 3462 if (FLAG_debug_code) { 3463 // Because result is in the pointer map, we need to make sure it has a valid 3464 // tagged value before we call the runtime. We speculatively set it to the 3465 // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already 3466 // be valid. 3467 Label result_ok; 3468 Register input = ToRegister(instr->value()); 3469 __ JumpIfSmi(result, &result_ok); 3470 __ Cmp(input, result); 3471 __ Assert(eq, kUnexpectedValue); 3472 __ Bind(&result_ok); 3473 } 3474 3475 { PushSafepointRegistersScope scope(this); 3476 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, 3477 instr->context()); 3478 __ StoreToSafepointRegisterSlot(x0, result); 3479 } 3480 // The inline (non-deferred) code will store result_bits into result. 3481} 3482 3483 3484void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) { 3485 // Class for deferred case. 3486 class DeferredMathAbsTagged: public LDeferredCode { 3487 public: 3488 DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr) 3489 : LDeferredCode(codegen), instr_(instr) { } 3490 virtual void Generate() { 3491 codegen()->DoDeferredMathAbsTagged(instr_, exit(), 3492 allocation_entry()); 3493 } 3494 virtual LInstruction* instr() { return instr_; } 3495 Label* allocation_entry() { return &allocation; } 3496 private: 3497 LMathAbsTagged* instr_; 3498 Label allocation; 3499 }; 3500 3501 // TODO(jbramley): The early-exit mechanism would skip the new frame handling 3502 // in GenerateDeferredCode. Tidy this up. 3503 DCHECK(!NeedsDeferredFrame()); 3504 3505 DeferredMathAbsTagged* deferred = 3506 new(zone()) DeferredMathAbsTagged(this, instr); 3507 3508 DCHECK(instr->hydrogen()->value()->representation().IsTagged() || 3509 instr->hydrogen()->value()->representation().IsSmi()); 3510 Register input = ToRegister(instr->value()); 3511 Register result_bits = ToRegister(instr->temp3()); 3512 Register result = ToRegister(instr->result()); 3513 Label done; 3514 3515 // Handle smis inline. 3516 // We can treat smis as 64-bit integers, since the (low-order) tag bits will 3517 // never get set by the negation. This is therefore the same as the Integer32 3518 // case in DoMathAbs, except that it operates on 64-bit values. 3519 STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0)); 3520 3521 __ JumpIfNotSmi(input, deferred->entry()); 3522 3523 __ Abs(result, input, NULL, &done); 3524 3525 // The result is the magnitude (abs) of the smallest value a smi can 3526 // represent, encoded as a double. 3527 __ Mov(result_bits, double_to_rawbits(0x80000000)); 3528 __ B(deferred->allocation_entry()); 3529 3530 __ Bind(deferred->exit()); 3531 __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset)); 3532 3533 __ Bind(&done); 3534} 3535 3536void LCodeGen::DoMathCos(LMathCos* instr) { 3537 DCHECK(instr->IsMarkedAsCall()); 3538 DCHECK(ToDoubleRegister(instr->value()).is(d0)); 3539 __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1); 3540 DCHECK(ToDoubleRegister(instr->result()).Is(d0)); 3541} 3542 3543void LCodeGen::DoMathSin(LMathSin* instr) { 3544 DCHECK(instr->IsMarkedAsCall()); 3545 DCHECK(ToDoubleRegister(instr->value()).is(d0)); 3546 __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1); 3547 DCHECK(ToDoubleRegister(instr->result()).Is(d0)); 3548} 3549 3550void LCodeGen::DoMathExp(LMathExp* instr) { 3551 DCHECK(instr->IsMarkedAsCall()); 3552 DCHECK(ToDoubleRegister(instr->value()).is(d0)); 3553 __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1); 3554 DCHECK(ToDoubleRegister(instr->result()).Is(d0)); 3555} 3556 3557 3558void LCodeGen::DoMathFloorD(LMathFloorD* instr) { 3559 DoubleRegister input = ToDoubleRegister(instr->value()); 3560 DoubleRegister result = ToDoubleRegister(instr->result()); 3561 3562 __ Frintm(result, input); 3563} 3564 3565 3566void LCodeGen::DoMathFloorI(LMathFloorI* instr) { 3567 DoubleRegister input = ToDoubleRegister(instr->value()); 3568 Register result = ToRegister(instr->result()); 3569 3570 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3571 DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero); 3572 } 3573 3574 __ Fcvtms(result, input); 3575 3576 // Check that the result fits into a 32-bit integer. 3577 // - The result did not overflow. 3578 __ Cmp(result, Operand(result, SXTW)); 3579 // - The input was not NaN. 3580 __ Fccmp(input, input, NoFlag, eq); 3581 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); 3582} 3583 3584 3585void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { 3586 Register dividend = ToRegister32(instr->dividend()); 3587 Register result = ToRegister32(instr->result()); 3588 int32_t divisor = instr->divisor(); 3589 3590 // If the divisor is 1, return the dividend. 3591 if (divisor == 1) { 3592 __ Mov(result, dividend, kDiscardForSameWReg); 3593 return; 3594 } 3595 3596 // If the divisor is positive, things are easy: There can be no deopts and we 3597 // can simply do an arithmetic right shift. 3598 int32_t shift = WhichPowerOf2Abs(divisor); 3599 if (divisor > 1) { 3600 __ Mov(result, Operand(dividend, ASR, shift)); 3601 return; 3602 } 3603 3604 // If the divisor is negative, we have to negate and handle edge cases. 3605 __ Negs(result, dividend); 3606 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3607 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 3608 } 3609 3610 // Dividing by -1 is basically negation, unless we overflow. 3611 if (divisor == -1) { 3612 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 3613 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 3614 } 3615 return; 3616 } 3617 3618 // If the negation could not overflow, simply shifting is OK. 3619 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 3620 __ Mov(result, Operand(dividend, ASR, shift)); 3621 return; 3622 } 3623 3624 __ Asr(result, result, shift); 3625 __ Csel(result, result, kMinInt / divisor, vc); 3626} 3627 3628 3629void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { 3630 Register dividend = ToRegister32(instr->dividend()); 3631 int32_t divisor = instr->divisor(); 3632 Register result = ToRegister32(instr->result()); 3633 DCHECK(!AreAliased(dividend, result)); 3634 3635 if (divisor == 0) { 3636 Deoptimize(instr, Deoptimizer::kDivisionByZero); 3637 return; 3638 } 3639 3640 // Check for (0 / -x) that will produce negative zero. 3641 HMathFloorOfDiv* hdiv = instr->hydrogen(); 3642 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 3643 DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero); 3644 } 3645 3646 // Easy case: We need no dynamic check for the dividend and the flooring 3647 // division is the same as the truncating division. 3648 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || 3649 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { 3650 __ TruncatingDiv(result, dividend, Abs(divisor)); 3651 if (divisor < 0) __ Neg(result, result); 3652 return; 3653 } 3654 3655 // In the general case we may need to adjust before and after the truncating 3656 // division to get a flooring division. 3657 Register temp = ToRegister32(instr->temp()); 3658 DCHECK(!AreAliased(temp, dividend, result)); 3659 Label needs_adjustment, done; 3660 __ Cmp(dividend, 0); 3661 __ B(divisor > 0 ? lt : gt, &needs_adjustment); 3662 __ TruncatingDiv(result, dividend, Abs(divisor)); 3663 if (divisor < 0) __ Neg(result, result); 3664 __ B(&done); 3665 __ Bind(&needs_adjustment); 3666 __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1)); 3667 __ TruncatingDiv(result, temp, Abs(divisor)); 3668 if (divisor < 0) __ Neg(result, result); 3669 __ Sub(result, result, Operand(1)); 3670 __ Bind(&done); 3671} 3672 3673 3674// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. 3675void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { 3676 Register dividend = ToRegister32(instr->dividend()); 3677 Register divisor = ToRegister32(instr->divisor()); 3678 Register remainder = ToRegister32(instr->temp()); 3679 Register result = ToRegister32(instr->result()); 3680 3681 // This can't cause an exception on ARM, so we can speculatively 3682 // execute it already now. 3683 __ Sdiv(result, dividend, divisor); 3684 3685 // Check for x / 0. 3686 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero); 3687 3688 // Check for (kMinInt / -1). 3689 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 3690 // The V flag will be set iff dividend == kMinInt. 3691 __ Cmp(dividend, 1); 3692 __ Ccmp(divisor, -1, NoFlag, vs); 3693 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); 3694 } 3695 3696 // Check for (0 / -x) that will produce negative zero. 3697 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3698 __ Cmp(divisor, 0); 3699 __ Ccmp(dividend, 0, ZFlag, mi); 3700 // "divisor" can't be null because the code would have already been 3701 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0). 3702 // In this case we need to deoptimize to produce a -0. 3703 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 3704 } 3705 3706 Label done; 3707 // If both operands have the same sign then we are done. 3708 __ Eor(remainder, dividend, divisor); 3709 __ Tbz(remainder, kWSignBit, &done); 3710 3711 // Check if the result needs to be corrected. 3712 __ Msub(remainder, result, divisor, dividend); 3713 __ Cbz(remainder, &done); 3714 __ Sub(result, result, 1); 3715 3716 __ Bind(&done); 3717} 3718 3719 3720void LCodeGen::DoMathLog(LMathLog* instr) { 3721 DCHECK(instr->IsMarkedAsCall()); 3722 DCHECK(ToDoubleRegister(instr->value()).is(d0)); 3723 __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1); 3724 DCHECK(ToDoubleRegister(instr->result()).Is(d0)); 3725} 3726 3727 3728void LCodeGen::DoMathClz32(LMathClz32* instr) { 3729 Register input = ToRegister32(instr->value()); 3730 Register result = ToRegister32(instr->result()); 3731 __ Clz(result, input); 3732} 3733 3734 3735void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { 3736 DoubleRegister input = ToDoubleRegister(instr->value()); 3737 DoubleRegister result = ToDoubleRegister(instr->result()); 3738 Label done; 3739 3740 // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases: 3741 // Math.pow(-Infinity, 0.5) == +Infinity 3742 // Math.pow(-0.0, 0.5) == +0.0 3743 3744 // Catch -infinity inputs first. 3745 // TODO(jbramley): A constant infinity register would be helpful here. 3746 __ Fmov(double_scratch(), kFP64NegativeInfinity); 3747 __ Fcmp(double_scratch(), input); 3748 __ Fabs(result, input); 3749 __ B(&done, eq); 3750 3751 // Add +0.0 to convert -0.0 to +0.0. 3752 __ Fadd(double_scratch(), input, fp_zero); 3753 __ Fsqrt(result, double_scratch()); 3754 3755 __ Bind(&done); 3756} 3757 3758 3759void LCodeGen::DoPower(LPower* instr) { 3760 Representation exponent_type = instr->hydrogen()->right()->representation(); 3761 // Having marked this as a call, we can use any registers. 3762 // Just make sure that the input/output registers are the expected ones. 3763 Register tagged_exponent = MathPowTaggedDescriptor::exponent(); 3764 Register integer_exponent = MathPowIntegerDescriptor::exponent(); 3765 DCHECK(!instr->right()->IsDoubleRegister() || 3766 ToDoubleRegister(instr->right()).is(d1)); 3767 DCHECK(exponent_type.IsInteger32() || !instr->right()->IsRegister() || 3768 ToRegister(instr->right()).is(tagged_exponent)); 3769 DCHECK(!exponent_type.IsInteger32() || 3770 ToRegister(instr->right()).is(integer_exponent)); 3771 DCHECK(ToDoubleRegister(instr->left()).is(d0)); 3772 DCHECK(ToDoubleRegister(instr->result()).is(d0)); 3773 3774 if (exponent_type.IsSmi()) { 3775 MathPowStub stub(isolate(), MathPowStub::TAGGED); 3776 __ CallStub(&stub); 3777 } else if (exponent_type.IsTagged()) { 3778 Label no_deopt; 3779 __ JumpIfSmi(tagged_exponent, &no_deopt); 3780 DeoptimizeIfNotHeapNumber(tagged_exponent, instr); 3781 __ Bind(&no_deopt); 3782 MathPowStub stub(isolate(), MathPowStub::TAGGED); 3783 __ CallStub(&stub); 3784 } else if (exponent_type.IsInteger32()) { 3785 // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub 3786 // supports large integer exponents. 3787 __ Sxtw(integer_exponent, integer_exponent); 3788 MathPowStub stub(isolate(), MathPowStub::INTEGER); 3789 __ CallStub(&stub); 3790 } else { 3791 DCHECK(exponent_type.IsDouble()); 3792 MathPowStub stub(isolate(), MathPowStub::DOUBLE); 3793 __ CallStub(&stub); 3794 } 3795} 3796 3797 3798void LCodeGen::DoMathRoundD(LMathRoundD* instr) { 3799 DoubleRegister input = ToDoubleRegister(instr->value()); 3800 DoubleRegister result = ToDoubleRegister(instr->result()); 3801 DoubleRegister scratch_d = double_scratch(); 3802 3803 DCHECK(!AreAliased(input, result, scratch_d)); 3804 3805 Label done; 3806 3807 __ Frinta(result, input); 3808 __ Fcmp(input, 0.0); 3809 __ Fccmp(result, input, ZFlag, lt); 3810 // The result is correct if the input was in [-0, +infinity], or was a 3811 // negative integral value. 3812 __ B(eq, &done); 3813 3814 // Here the input is negative, non integral, with an exponent lower than 52. 3815 // We do not have to worry about the 0.49999999999999994 (0x3fdfffffffffffff) 3816 // case. So we can safely add 0.5. 3817 __ Fmov(scratch_d, 0.5); 3818 __ Fadd(result, input, scratch_d); 3819 __ Frintm(result, result); 3820 // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative. 3821 __ Fabs(result, result); 3822 __ Fneg(result, result); 3823 3824 __ Bind(&done); 3825} 3826 3827 3828void LCodeGen::DoMathRoundI(LMathRoundI* instr) { 3829 DoubleRegister input = ToDoubleRegister(instr->value()); 3830 DoubleRegister temp = ToDoubleRegister(instr->temp1()); 3831 DoubleRegister dot_five = double_scratch(); 3832 Register result = ToRegister(instr->result()); 3833 Label done; 3834 3835 // Math.round() rounds to the nearest integer, with ties going towards 3836 // +infinity. This does not match any IEEE-754 rounding mode. 3837 // - Infinities and NaNs are propagated unchanged, but cause deopts because 3838 // they can't be represented as integers. 3839 // - The sign of the result is the same as the sign of the input. This means 3840 // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a 3841 // result of -0.0. 3842 3843 // Add 0.5 and round towards -infinity. 3844 __ Fmov(dot_five, 0.5); 3845 __ Fadd(temp, input, dot_five); 3846 __ Fcvtms(result, temp); 3847 3848 // The result is correct if: 3849 // result is not 0, as the input could be NaN or [-0.5, -0.0]. 3850 // result is not 1, as 0.499...94 will wrongly map to 1. 3851 // result fits in 32 bits. 3852 __ Cmp(result, Operand(result.W(), SXTW)); 3853 __ Ccmp(result, 1, ZFlag, eq); 3854 __ B(hi, &done); 3855 3856 // At this point, we have to handle possible inputs of NaN or numbers in the 3857 // range [-0.5, 1.5[, or numbers larger than 32 bits. 3858 3859 // Deoptimize if the result > 1, as it must be larger than 32 bits. 3860 __ Cmp(result, 1); 3861 DeoptimizeIf(hi, instr, Deoptimizer::kOverflow); 3862 3863 // Deoptimize for negative inputs, which at this point are only numbers in 3864 // the range [-0.5, -0.0] 3865 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3866 __ Fmov(result, input); 3867 DeoptimizeIfNegative(result, instr, Deoptimizer::kMinusZero); 3868 } 3869 3870 // Deoptimize if the input was NaN. 3871 __ Fcmp(input, dot_five); 3872 DeoptimizeIf(vs, instr, Deoptimizer::kNaN); 3873 3874 // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[ 3875 // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1, 3876 // else 0; we avoid dealing with 0.499...94 directly. 3877 __ Cset(result, ge); 3878 __ Bind(&done); 3879} 3880 3881 3882void LCodeGen::DoMathFround(LMathFround* instr) { 3883 DoubleRegister input = ToDoubleRegister(instr->value()); 3884 DoubleRegister result = ToDoubleRegister(instr->result()); 3885 __ Fcvt(result.S(), input); 3886 __ Fcvt(result, result.S()); 3887} 3888 3889 3890void LCodeGen::DoMathSqrt(LMathSqrt* instr) { 3891 DoubleRegister input = ToDoubleRegister(instr->value()); 3892 DoubleRegister result = ToDoubleRegister(instr->result()); 3893 __ Fsqrt(result, input); 3894} 3895 3896 3897void LCodeGen::DoMathMinMax(LMathMinMax* instr) { 3898 HMathMinMax::Operation op = instr->hydrogen()->operation(); 3899 if (instr->hydrogen()->representation().IsInteger32()) { 3900 Register result = ToRegister32(instr->result()); 3901 Register left = ToRegister32(instr->left()); 3902 Operand right = ToOperand32(instr->right()); 3903 3904 __ Cmp(left, right); 3905 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le); 3906 } else if (instr->hydrogen()->representation().IsSmi()) { 3907 Register result = ToRegister(instr->result()); 3908 Register left = ToRegister(instr->left()); 3909 Operand right = ToOperand(instr->right()); 3910 3911 __ Cmp(left, right); 3912 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le); 3913 } else { 3914 DCHECK(instr->hydrogen()->representation().IsDouble()); 3915 DoubleRegister result = ToDoubleRegister(instr->result()); 3916 DoubleRegister left = ToDoubleRegister(instr->left()); 3917 DoubleRegister right = ToDoubleRegister(instr->right()); 3918 3919 if (op == HMathMinMax::kMathMax) { 3920 __ Fmax(result, left, right); 3921 } else { 3922 DCHECK(op == HMathMinMax::kMathMin); 3923 __ Fmin(result, left, right); 3924 } 3925 } 3926} 3927 3928 3929void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { 3930 Register dividend = ToRegister32(instr->dividend()); 3931 int32_t divisor = instr->divisor(); 3932 DCHECK(dividend.is(ToRegister32(instr->result()))); 3933 3934 // Theoretically, a variation of the branch-free code for integer division by 3935 // a power of 2 (calculating the remainder via an additional multiplication 3936 // (which gets simplified to an 'and') and subtraction) should be faster, and 3937 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to 3938 // indicate that positive dividends are heavily favored, so the branching 3939 // version performs better. 3940 HMod* hmod = instr->hydrogen(); 3941 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 3942 Label dividend_is_not_negative, done; 3943 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { 3944 __ Tbz(dividend, kWSignBit, ÷nd_is_not_negative); 3945 // Note that this is correct even for kMinInt operands. 3946 __ Neg(dividend, dividend); 3947 __ And(dividend, dividend, mask); 3948 __ Negs(dividend, dividend); 3949 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 3950 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 3951 } 3952 __ B(&done); 3953 } 3954 3955 __ bind(÷nd_is_not_negative); 3956 __ And(dividend, dividend, mask); 3957 __ bind(&done); 3958} 3959 3960 3961void LCodeGen::DoModByConstI(LModByConstI* instr) { 3962 Register dividend = ToRegister32(instr->dividend()); 3963 int32_t divisor = instr->divisor(); 3964 Register result = ToRegister32(instr->result()); 3965 Register temp = ToRegister32(instr->temp()); 3966 DCHECK(!AreAliased(dividend, result, temp)); 3967 3968 if (divisor == 0) { 3969 Deoptimize(instr, Deoptimizer::kDivisionByZero); 3970 return; 3971 } 3972 3973 __ TruncatingDiv(result, dividend, Abs(divisor)); 3974 __ Sxtw(dividend.X(), dividend); 3975 __ Mov(temp, Abs(divisor)); 3976 __ Smsubl(result.X(), result, temp, dividend.X()); 3977 3978 // Check for negative zero. 3979 HMod* hmod = instr->hydrogen(); 3980 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 3981 Label remainder_not_zero; 3982 __ Cbnz(result, &remainder_not_zero); 3983 DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero); 3984 __ bind(&remainder_not_zero); 3985 } 3986} 3987 3988 3989void LCodeGen::DoModI(LModI* instr) { 3990 Register dividend = ToRegister32(instr->left()); 3991 Register divisor = ToRegister32(instr->right()); 3992 Register result = ToRegister32(instr->result()); 3993 3994 Label done; 3995 // modulo = dividend - quotient * divisor 3996 __ Sdiv(result, dividend, divisor); 3997 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { 3998 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero); 3999 } 4000 __ Msub(result, result, divisor, dividend); 4001 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4002 __ Cbnz(result, &done); 4003 DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero); 4004 } 4005 __ Bind(&done); 4006} 4007 4008 4009void LCodeGen::DoMulConstIS(LMulConstIS* instr) { 4010 DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32()); 4011 bool is_smi = instr->hydrogen()->representation().IsSmi(); 4012 Register result = 4013 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result()); 4014 Register left = 4015 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()); 4016 int32_t right = ToInteger32(instr->right()); 4017 DCHECK((right > -kMaxInt) && (right < kMaxInt)); 4018 4019 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 4020 bool bailout_on_minus_zero = 4021 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 4022 4023 if (bailout_on_minus_zero) { 4024 if (right < 0) { 4025 // The result is -0 if right is negative and left is zero. 4026 DeoptimizeIfZero(left, instr, Deoptimizer::kMinusZero); 4027 } else if (right == 0) { 4028 // The result is -0 if the right is zero and the left is negative. 4029 DeoptimizeIfNegative(left, instr, Deoptimizer::kMinusZero); 4030 } 4031 } 4032 4033 switch (right) { 4034 // Cases which can detect overflow. 4035 case -1: 4036 if (can_overflow) { 4037 // Only 0x80000000 can overflow here. 4038 __ Negs(result, left); 4039 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 4040 } else { 4041 __ Neg(result, left); 4042 } 4043 break; 4044 case 0: 4045 // This case can never overflow. 4046 __ Mov(result, 0); 4047 break; 4048 case 1: 4049 // This case can never overflow. 4050 __ Mov(result, left, kDiscardForSameWReg); 4051 break; 4052 case 2: 4053 if (can_overflow) { 4054 __ Adds(result, left, left); 4055 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 4056 } else { 4057 __ Add(result, left, left); 4058 } 4059 break; 4060 4061 default: 4062 // Multiplication by constant powers of two (and some related values) 4063 // can be done efficiently with shifted operands. 4064 int32_t right_abs = Abs(right); 4065 4066 if (base::bits::IsPowerOfTwo32(right_abs)) { 4067 int right_log2 = WhichPowerOf2(right_abs); 4068 4069 if (can_overflow) { 4070 Register scratch = result; 4071 DCHECK(!AreAliased(scratch, left)); 4072 __ Cls(scratch, left); 4073 __ Cmp(scratch, right_log2); 4074 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow); 4075 } 4076 4077 if (right >= 0) { 4078 // result = left << log2(right) 4079 __ Lsl(result, left, right_log2); 4080 } else { 4081 // result = -left << log2(-right) 4082 if (can_overflow) { 4083 __ Negs(result, Operand(left, LSL, right_log2)); 4084 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 4085 } else { 4086 __ Neg(result, Operand(left, LSL, right_log2)); 4087 } 4088 } 4089 return; 4090 } 4091 4092 4093 // For the following cases, we could perform a conservative overflow check 4094 // with CLS as above. However the few cycles saved are likely not worth 4095 // the risk of deoptimizing more often than required. 4096 DCHECK(!can_overflow); 4097 4098 if (right >= 0) { 4099 if (base::bits::IsPowerOfTwo32(right - 1)) { 4100 // result = left + left << log2(right - 1) 4101 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1))); 4102 } else if (base::bits::IsPowerOfTwo32(right + 1)) { 4103 // result = -left + left << log2(right + 1) 4104 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1))); 4105 __ Neg(result, result); 4106 } else { 4107 UNREACHABLE(); 4108 } 4109 } else { 4110 if (base::bits::IsPowerOfTwo32(-right + 1)) { 4111 // result = left - left << log2(-right + 1) 4112 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1))); 4113 } else if (base::bits::IsPowerOfTwo32(-right - 1)) { 4114 // result = -left - left << log2(-right - 1) 4115 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1))); 4116 __ Neg(result, result); 4117 } else { 4118 UNREACHABLE(); 4119 } 4120 } 4121 } 4122} 4123 4124 4125void LCodeGen::DoMulI(LMulI* instr) { 4126 Register result = ToRegister32(instr->result()); 4127 Register left = ToRegister32(instr->left()); 4128 Register right = ToRegister32(instr->right()); 4129 4130 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 4131 bool bailout_on_minus_zero = 4132 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 4133 4134 if (bailout_on_minus_zero && !left.Is(right)) { 4135 // If one operand is zero and the other is negative, the result is -0. 4136 // - Set Z (eq) if either left or right, or both, are 0. 4137 __ Cmp(left, 0); 4138 __ Ccmp(right, 0, ZFlag, ne); 4139 // - If so (eq), set N (mi) if left + right is negative. 4140 // - Otherwise, clear N. 4141 __ Ccmn(left, right, NoFlag, eq); 4142 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); 4143 } 4144 4145 if (can_overflow) { 4146 __ Smull(result.X(), left, right); 4147 __ Cmp(result.X(), Operand(result, SXTW)); 4148 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); 4149 } else { 4150 __ Mul(result, left, right); 4151 } 4152} 4153 4154 4155void LCodeGen::DoMulS(LMulS* instr) { 4156 Register result = ToRegister(instr->result()); 4157 Register left = ToRegister(instr->left()); 4158 Register right = ToRegister(instr->right()); 4159 4160 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 4161 bool bailout_on_minus_zero = 4162 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 4163 4164 if (bailout_on_minus_zero && !left.Is(right)) { 4165 // If one operand is zero and the other is negative, the result is -0. 4166 // - Set Z (eq) if either left or right, or both, are 0. 4167 __ Cmp(left, 0); 4168 __ Ccmp(right, 0, ZFlag, ne); 4169 // - If so (eq), set N (mi) if left + right is negative. 4170 // - Otherwise, clear N. 4171 __ Ccmn(left, right, NoFlag, eq); 4172 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); 4173 } 4174 4175 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); 4176 if (can_overflow) { 4177 __ Smulh(result, left, right); 4178 __ Cmp(result, Operand(result.W(), SXTW)); 4179 __ SmiTag(result); 4180 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); 4181 } else { 4182 if (AreAliased(result, left, right)) { 4183 // All three registers are the same: half untag the input and then 4184 // multiply, giving a tagged result. 4185 STATIC_ASSERT((kSmiShift % 2) == 0); 4186 __ Asr(result, left, kSmiShift / 2); 4187 __ Mul(result, result, result); 4188 } else if (result.Is(left) && !left.Is(right)) { 4189 // Registers result and left alias, right is distinct: untag left into 4190 // result, and then multiply by right, giving a tagged result. 4191 __ SmiUntag(result, left); 4192 __ Mul(result, result, right); 4193 } else { 4194 DCHECK(!left.Is(result)); 4195 // Registers result and right alias, left is distinct, or all registers 4196 // are distinct: untag right into result, and then multiply by left, 4197 // giving a tagged result. 4198 __ SmiUntag(result, right); 4199 __ Mul(result, left, result); 4200 } 4201 } 4202} 4203 4204 4205void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 4206 // TODO(3095996): Get rid of this. For now, we need to make the 4207 // result register contain a valid pointer because it is already 4208 // contained in the register pointer map. 4209 Register result = ToRegister(instr->result()); 4210 __ Mov(result, 0); 4211 4212 PushSafepointRegistersScope scope(this); 4213 // NumberTagU and NumberTagD use the context from the frame, rather than 4214 // the environment's HContext or HInlinedContext value. 4215 // They only call Runtime::kAllocateHeapNumber. 4216 // The corresponding HChange instructions are added in a phase that does 4217 // not have easy access to the local context. 4218 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 4219 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 4220 RecordSafepointWithRegisters( 4221 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 4222 __ StoreToSafepointRegisterSlot(x0, result); 4223} 4224 4225 4226void LCodeGen::DoNumberTagD(LNumberTagD* instr) { 4227 class DeferredNumberTagD: public LDeferredCode { 4228 public: 4229 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) 4230 : LDeferredCode(codegen), instr_(instr) { } 4231 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } 4232 virtual LInstruction* instr() { return instr_; } 4233 private: 4234 LNumberTagD* instr_; 4235 }; 4236 4237 DoubleRegister input = ToDoubleRegister(instr->value()); 4238 Register result = ToRegister(instr->result()); 4239 Register temp1 = ToRegister(instr->temp1()); 4240 Register temp2 = ToRegister(instr->temp2()); 4241 4242 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); 4243 if (FLAG_inline_new) { 4244 __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2); 4245 } else { 4246 __ B(deferred->entry()); 4247 } 4248 4249 __ Bind(deferred->exit()); 4250 __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset)); 4251} 4252 4253 4254void LCodeGen::DoDeferredNumberTagU(LInstruction* instr, 4255 LOperand* value, 4256 LOperand* temp1, 4257 LOperand* temp2) { 4258 Label slow, convert_and_store; 4259 Register src = ToRegister32(value); 4260 Register dst = ToRegister(instr->result()); 4261 Register scratch1 = ToRegister(temp1); 4262 4263 if (FLAG_inline_new) { 4264 Register scratch2 = ToRegister(temp2); 4265 __ AllocateHeapNumber(dst, &slow, scratch1, scratch2); 4266 __ B(&convert_and_store); 4267 } 4268 4269 // Slow case: call the runtime system to do the number allocation. 4270 __ Bind(&slow); 4271 // TODO(3095996): Put a valid pointer value in the stack slot where the result 4272 // register is stored, as this register is in the pointer map, but contains an 4273 // integer value. 4274 __ Mov(dst, 0); 4275 { 4276 // Preserve the value of all registers. 4277 PushSafepointRegistersScope scope(this); 4278 4279 // NumberTagU and NumberTagD use the context from the frame, rather than 4280 // the environment's HContext or HInlinedContext value. 4281 // They only call Runtime::kAllocateHeapNumber. 4282 // The corresponding HChange instructions are added in a phase that does 4283 // not have easy access to the local context. 4284 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 4285 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 4286 RecordSafepointWithRegisters( 4287 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 4288 __ StoreToSafepointRegisterSlot(x0, dst); 4289 } 4290 4291 // Convert number to floating point and store in the newly allocated heap 4292 // number. 4293 __ Bind(&convert_and_store); 4294 DoubleRegister dbl_scratch = double_scratch(); 4295 __ Ucvtf(dbl_scratch, src); 4296 __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset)); 4297} 4298 4299 4300void LCodeGen::DoNumberTagU(LNumberTagU* instr) { 4301 class DeferredNumberTagU: public LDeferredCode { 4302 public: 4303 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) 4304 : LDeferredCode(codegen), instr_(instr) { } 4305 virtual void Generate() { 4306 codegen()->DoDeferredNumberTagU(instr_, 4307 instr_->value(), 4308 instr_->temp1(), 4309 instr_->temp2()); 4310 } 4311 virtual LInstruction* instr() { return instr_; } 4312 private: 4313 LNumberTagU* instr_; 4314 }; 4315 4316 Register value = ToRegister32(instr->value()); 4317 Register result = ToRegister(instr->result()); 4318 4319 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); 4320 __ Cmp(value, Smi::kMaxValue); 4321 __ B(hi, deferred->entry()); 4322 __ SmiTag(result, value.X()); 4323 __ Bind(deferred->exit()); 4324} 4325 4326 4327void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 4328 Register input = ToRegister(instr->value()); 4329 Register scratch = ToRegister(instr->temp()); 4330 DoubleRegister result = ToDoubleRegister(instr->result()); 4331 bool can_convert_undefined_to_nan = 4332 instr->hydrogen()->can_convert_undefined_to_nan(); 4333 4334 Label done, load_smi; 4335 4336 // Work out what untag mode we're working with. 4337 HValue* value = instr->hydrogen()->value(); 4338 NumberUntagDMode mode = value->representation().IsSmi() 4339 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; 4340 4341 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { 4342 __ JumpIfSmi(input, &load_smi); 4343 4344 Label convert_undefined; 4345 4346 // Heap number map check. 4347 if (can_convert_undefined_to_nan) { 4348 __ JumpIfNotHeapNumber(input, &convert_undefined); 4349 } else { 4350 DeoptimizeIfNotHeapNumber(input, instr); 4351 } 4352 4353 // Load heap number. 4354 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset)); 4355 if (instr->hydrogen()->deoptimize_on_minus_zero()) { 4356 DeoptimizeIfMinusZero(result, instr, Deoptimizer::kMinusZero); 4357 } 4358 __ B(&done); 4359 4360 if (can_convert_undefined_to_nan) { 4361 __ Bind(&convert_undefined); 4362 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, 4363 Deoptimizer::kNotAHeapNumberUndefined); 4364 4365 __ LoadRoot(scratch, Heap::kNanValueRootIndex); 4366 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset)); 4367 __ B(&done); 4368 } 4369 4370 } else { 4371 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); 4372 // Fall through to load_smi. 4373 } 4374 4375 // Smi to double register conversion. 4376 __ Bind(&load_smi); 4377 __ SmiUntagToDouble(result, input); 4378 4379 __ Bind(&done); 4380} 4381 4382 4383void LCodeGen::DoOsrEntry(LOsrEntry* instr) { 4384 // This is a pseudo-instruction that ensures that the environment here is 4385 // properly registered for deoptimization and records the assembler's PC 4386 // offset. 4387 LEnvironment* environment = instr->environment(); 4388 4389 // If the environment were already registered, we would have no way of 4390 // backpatching it with the spill slot operands. 4391 DCHECK(!environment->HasBeenRegistered()); 4392 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 4393 4394 GenerateOsrPrologue(); 4395} 4396 4397 4398void LCodeGen::DoParameter(LParameter* instr) { 4399 // Nothing to do. 4400} 4401 4402 4403void LCodeGen::DoPreparePushArguments(LPreparePushArguments* instr) { 4404 __ PushPreamble(instr->argc(), kPointerSize); 4405} 4406 4407 4408void LCodeGen::DoPushArguments(LPushArguments* instr) { 4409 MacroAssembler::PushPopQueue args(masm()); 4410 4411 for (int i = 0; i < instr->ArgumentCount(); ++i) { 4412 LOperand* arg = instr->argument(i); 4413 if (arg->IsDoubleRegister() || arg->IsDoubleStackSlot()) { 4414 Abort(kDoPushArgumentNotImplementedForDoubleType); 4415 return; 4416 } 4417 args.Queue(ToRegister(arg)); 4418 } 4419 4420 // The preamble was done by LPreparePushArguments. 4421 args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE); 4422 4423 RecordPushedArgumentsDelta(instr->ArgumentCount()); 4424} 4425 4426 4427void LCodeGen::DoReturn(LReturn* instr) { 4428 if (FLAG_trace && info()->IsOptimizing()) { 4429 // Push the return value on the stack as the parameter. 4430 // Runtime::TraceExit returns its parameter in x0. We're leaving the code 4431 // managed by the register allocator and tearing down the frame, it's 4432 // safe to write to the context register. 4433 __ Push(x0); 4434 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 4435 __ CallRuntime(Runtime::kTraceExit); 4436 } 4437 4438 if (info()->saves_caller_doubles()) { 4439 RestoreCallerDoubles(); 4440 } 4441 4442 if (NeedsEagerFrame()) { 4443 Register stack_pointer = masm()->StackPointer(); 4444 __ Mov(stack_pointer, fp); 4445 __ Pop(fp, lr); 4446 } 4447 4448 if (instr->has_constant_parameter_count()) { 4449 int parameter_count = ToInteger32(instr->constant_parameter_count()); 4450 __ Drop(parameter_count + 1); 4451 } else { 4452 DCHECK(info()->IsStub()); // Functions would need to drop one more value. 4453 Register parameter_count = ToRegister(instr->parameter_count()); 4454 __ DropBySMI(parameter_count); 4455 } 4456 __ Ret(); 4457} 4458 4459 4460MemOperand LCodeGen::BuildSeqStringOperand(Register string, 4461 Register temp, 4462 LOperand* index, 4463 String::Encoding encoding) { 4464 if (index->IsConstantOperand()) { 4465 int offset = ToInteger32(LConstantOperand::cast(index)); 4466 if (encoding == String::TWO_BYTE_ENCODING) { 4467 offset *= kUC16Size; 4468 } 4469 STATIC_ASSERT(kCharSize == 1); 4470 return FieldMemOperand(string, SeqString::kHeaderSize + offset); 4471 } 4472 4473 __ Add(temp, string, SeqString::kHeaderSize - kHeapObjectTag); 4474 if (encoding == String::ONE_BYTE_ENCODING) { 4475 return MemOperand(temp, ToRegister32(index), SXTW); 4476 } else { 4477 STATIC_ASSERT(kUC16Size == 2); 4478 return MemOperand(temp, ToRegister32(index), SXTW, 1); 4479 } 4480} 4481 4482 4483void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { 4484 String::Encoding encoding = instr->hydrogen()->encoding(); 4485 Register string = ToRegister(instr->string()); 4486 Register result = ToRegister(instr->result()); 4487 Register temp = ToRegister(instr->temp()); 4488 4489 if (FLAG_debug_code) { 4490 // Even though this lithium instruction comes with a temp register, we 4491 // can't use it here because we want to use "AtStart" constraints on the 4492 // inputs and the debug code here needs a scratch register. 4493 UseScratchRegisterScope temps(masm()); 4494 Register dbg_temp = temps.AcquireX(); 4495 4496 __ Ldr(dbg_temp, FieldMemOperand(string, HeapObject::kMapOffset)); 4497 __ Ldrb(dbg_temp, FieldMemOperand(dbg_temp, Map::kInstanceTypeOffset)); 4498 4499 __ And(dbg_temp, dbg_temp, 4500 Operand(kStringRepresentationMask | kStringEncodingMask)); 4501 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; 4502 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 4503 __ Cmp(dbg_temp, Operand(encoding == String::ONE_BYTE_ENCODING 4504 ? one_byte_seq_type : two_byte_seq_type)); 4505 __ Check(eq, kUnexpectedStringType); 4506 } 4507 4508 MemOperand operand = 4509 BuildSeqStringOperand(string, temp, instr->index(), encoding); 4510 if (encoding == String::ONE_BYTE_ENCODING) { 4511 __ Ldrb(result, operand); 4512 } else { 4513 __ Ldrh(result, operand); 4514 } 4515} 4516 4517 4518void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { 4519 String::Encoding encoding = instr->hydrogen()->encoding(); 4520 Register string = ToRegister(instr->string()); 4521 Register value = ToRegister(instr->value()); 4522 Register temp = ToRegister(instr->temp()); 4523 4524 if (FLAG_debug_code) { 4525 DCHECK(ToRegister(instr->context()).is(cp)); 4526 Register index = ToRegister(instr->index()); 4527 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; 4528 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 4529 int encoding_mask = 4530 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING 4531 ? one_byte_seq_type : two_byte_seq_type; 4532 __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp, 4533 encoding_mask); 4534 } 4535 MemOperand operand = 4536 BuildSeqStringOperand(string, temp, instr->index(), encoding); 4537 if (encoding == String::ONE_BYTE_ENCODING) { 4538 __ Strb(value, operand); 4539 } else { 4540 __ Strh(value, operand); 4541 } 4542} 4543 4544 4545void LCodeGen::DoSmiTag(LSmiTag* instr) { 4546 HChange* hchange = instr->hydrogen(); 4547 Register input = ToRegister(instr->value()); 4548 Register output = ToRegister(instr->result()); 4549 if (hchange->CheckFlag(HValue::kCanOverflow) && 4550 hchange->value()->CheckFlag(HValue::kUint32)) { 4551 DeoptimizeIfNegative(input.W(), instr, Deoptimizer::kOverflow); 4552 } 4553 __ SmiTag(output, input); 4554} 4555 4556 4557void LCodeGen::DoSmiUntag(LSmiUntag* instr) { 4558 Register input = ToRegister(instr->value()); 4559 Register result = ToRegister(instr->result()); 4560 Label done, untag; 4561 4562 if (instr->needs_check()) { 4563 DeoptimizeIfNotSmi(input, instr, Deoptimizer::kNotASmi); 4564 } 4565 4566 __ Bind(&untag); 4567 __ SmiUntag(result, input); 4568 __ Bind(&done); 4569} 4570 4571 4572void LCodeGen::DoShiftI(LShiftI* instr) { 4573 LOperand* right_op = instr->right(); 4574 Register left = ToRegister32(instr->left()); 4575 Register result = ToRegister32(instr->result()); 4576 4577 if (right_op->IsRegister()) { 4578 Register right = ToRegister32(instr->right()); 4579 switch (instr->op()) { 4580 case Token::ROR: __ Ror(result, left, right); break; 4581 case Token::SAR: __ Asr(result, left, right); break; 4582 case Token::SHL: __ Lsl(result, left, right); break; 4583 case Token::SHR: 4584 __ Lsr(result, left, right); 4585 if (instr->can_deopt()) { 4586 // If `left >>> right` >= 0x80000000, the result is not representable 4587 // in a signed 32-bit smi. 4588 DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue); 4589 } 4590 break; 4591 default: UNREACHABLE(); 4592 } 4593 } else { 4594 DCHECK(right_op->IsConstantOperand()); 4595 int shift_count = JSShiftAmountFromLConstant(right_op); 4596 if (shift_count == 0) { 4597 if ((instr->op() == Token::SHR) && instr->can_deopt()) { 4598 DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue); 4599 } 4600 __ Mov(result, left, kDiscardForSameWReg); 4601 } else { 4602 switch (instr->op()) { 4603 case Token::ROR: __ Ror(result, left, shift_count); break; 4604 case Token::SAR: __ Asr(result, left, shift_count); break; 4605 case Token::SHL: __ Lsl(result, left, shift_count); break; 4606 case Token::SHR: __ Lsr(result, left, shift_count); break; 4607 default: UNREACHABLE(); 4608 } 4609 } 4610 } 4611} 4612 4613 4614void LCodeGen::DoShiftS(LShiftS* instr) { 4615 LOperand* right_op = instr->right(); 4616 Register left = ToRegister(instr->left()); 4617 Register result = ToRegister(instr->result()); 4618 4619 if (right_op->IsRegister()) { 4620 Register right = ToRegister(instr->right()); 4621 4622 // JavaScript shifts only look at the bottom 5 bits of the 'right' operand. 4623 // Since we're handling smis in X registers, we have to extract these bits 4624 // explicitly. 4625 __ Ubfx(result, right, kSmiShift, 5); 4626 4627 switch (instr->op()) { 4628 case Token::ROR: { 4629 // This is the only case that needs a scratch register. To keep things 4630 // simple for the other cases, borrow a MacroAssembler scratch register. 4631 UseScratchRegisterScope temps(masm()); 4632 Register temp = temps.AcquireW(); 4633 __ SmiUntag(temp, left); 4634 __ Ror(result.W(), temp.W(), result.W()); 4635 __ SmiTag(result); 4636 break; 4637 } 4638 case Token::SAR: 4639 __ Asr(result, left, result); 4640 __ Bic(result, result, kSmiShiftMask); 4641 break; 4642 case Token::SHL: 4643 __ Lsl(result, left, result); 4644 break; 4645 case Token::SHR: 4646 __ Lsr(result, left, result); 4647 __ Bic(result, result, kSmiShiftMask); 4648 if (instr->can_deopt()) { 4649 // If `left >>> right` >= 0x80000000, the result is not representable 4650 // in a signed 32-bit smi. 4651 DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue); 4652 } 4653 break; 4654 default: UNREACHABLE(); 4655 } 4656 } else { 4657 DCHECK(right_op->IsConstantOperand()); 4658 int shift_count = JSShiftAmountFromLConstant(right_op); 4659 if (shift_count == 0) { 4660 if ((instr->op() == Token::SHR) && instr->can_deopt()) { 4661 DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue); 4662 } 4663 __ Mov(result, left); 4664 } else { 4665 switch (instr->op()) { 4666 case Token::ROR: 4667 __ SmiUntag(result, left); 4668 __ Ror(result.W(), result.W(), shift_count); 4669 __ SmiTag(result); 4670 break; 4671 case Token::SAR: 4672 __ Asr(result, left, shift_count); 4673 __ Bic(result, result, kSmiShiftMask); 4674 break; 4675 case Token::SHL: 4676 __ Lsl(result, left, shift_count); 4677 break; 4678 case Token::SHR: 4679 __ Lsr(result, left, shift_count); 4680 __ Bic(result, result, kSmiShiftMask); 4681 break; 4682 default: UNREACHABLE(); 4683 } 4684 } 4685 } 4686} 4687 4688 4689void LCodeGen::DoDebugBreak(LDebugBreak* instr) { 4690 __ Debug("LDebugBreak", 0, BREAK); 4691} 4692 4693 4694void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { 4695 DCHECK(ToRegister(instr->context()).is(cp)); 4696 Register scratch1 = x5; 4697 Register scratch2 = x6; 4698 DCHECK(instr->IsMarkedAsCall()); 4699 4700 // TODO(all): if Mov could handle object in new space then it could be used 4701 // here. 4702 __ LoadHeapObject(scratch1, instr->hydrogen()->pairs()); 4703 __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags())); 4704 __ Push(scratch1, scratch2); 4705 CallRuntime(Runtime::kDeclareGlobals, instr); 4706} 4707 4708 4709void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { 4710 PushSafepointRegistersScope scope(this); 4711 LoadContextFromDeferred(instr->context()); 4712 __ CallRuntimeSaveDoubles(Runtime::kStackGuard); 4713 RecordSafepointWithLazyDeopt( 4714 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 4715 DCHECK(instr->HasEnvironment()); 4716 LEnvironment* env = instr->environment(); 4717 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 4718} 4719 4720 4721void LCodeGen::DoStackCheck(LStackCheck* instr) { 4722 class DeferredStackCheck: public LDeferredCode { 4723 public: 4724 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) 4725 : LDeferredCode(codegen), instr_(instr) { } 4726 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); } 4727 virtual LInstruction* instr() { return instr_; } 4728 private: 4729 LStackCheck* instr_; 4730 }; 4731 4732 DCHECK(instr->HasEnvironment()); 4733 LEnvironment* env = instr->environment(); 4734 // There is no LLazyBailout instruction for stack-checks. We have to 4735 // prepare for lazy deoptimization explicitly here. 4736 if (instr->hydrogen()->is_function_entry()) { 4737 // Perform stack overflow check. 4738 Label done; 4739 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex); 4740 __ B(hs, &done); 4741 4742 PredictableCodeSizeScope predictable(masm_, 4743 Assembler::kCallSizeWithRelocation); 4744 DCHECK(instr->context()->IsRegister()); 4745 DCHECK(ToRegister(instr->context()).is(cp)); 4746 CallCode(isolate()->builtins()->StackCheck(), 4747 RelocInfo::CODE_TARGET, 4748 instr); 4749 __ Bind(&done); 4750 } else { 4751 DCHECK(instr->hydrogen()->is_backwards_branch()); 4752 // Perform stack overflow check if this goto needs it before jumping. 4753 DeferredStackCheck* deferred_stack_check = 4754 new(zone()) DeferredStackCheck(this, instr); 4755 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex); 4756 __ B(lo, deferred_stack_check->entry()); 4757 4758 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 4759 __ Bind(instr->done_label()); 4760 deferred_stack_check->SetExit(instr->done_label()); 4761 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 4762 // Don't record a deoptimization index for the safepoint here. 4763 // This will be done explicitly when emitting call and the safepoint in 4764 // the deferred code. 4765 } 4766} 4767 4768 4769void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { 4770 Register function = ToRegister(instr->function()); 4771 Register code_object = ToRegister(instr->code_object()); 4772 Register temp = ToRegister(instr->temp()); 4773 __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag); 4774 __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset)); 4775} 4776 4777 4778void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { 4779 Register context = ToRegister(instr->context()); 4780 Register value = ToRegister(instr->value()); 4781 Register scratch = ToRegister(instr->temp()); 4782 MemOperand target = ContextMemOperand(context, instr->slot_index()); 4783 4784 Label skip_assignment; 4785 4786 if (instr->hydrogen()->RequiresHoleCheck()) { 4787 __ Ldr(scratch, target); 4788 if (instr->hydrogen()->DeoptimizesOnHole()) { 4789 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr, 4790 Deoptimizer::kHole); 4791 } else { 4792 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment); 4793 } 4794 } 4795 4796 __ Str(value, target); 4797 if (instr->hydrogen()->NeedsWriteBarrier()) { 4798 SmiCheck check_needed = 4799 instr->hydrogen()->value()->type().IsHeapObject() 4800 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 4801 __ RecordWriteContextSlot(context, static_cast<int>(target.offset()), value, 4802 scratch, GetLinkRegisterState(), kSaveFPRegs, 4803 EMIT_REMEMBERED_SET, check_needed); 4804 } 4805 __ Bind(&skip_assignment); 4806} 4807 4808 4809void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) { 4810 Register ext_ptr = ToRegister(instr->elements()); 4811 Register key = no_reg; 4812 Register scratch; 4813 ElementsKind elements_kind = instr->elements_kind(); 4814 4815 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); 4816 bool key_is_constant = instr->key()->IsConstantOperand(); 4817 int constant_key = 0; 4818 if (key_is_constant) { 4819 DCHECK(instr->temp() == NULL); 4820 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 4821 if (constant_key & 0xf0000000) { 4822 Abort(kArrayIndexConstantValueTooBig); 4823 } 4824 } else { 4825 key = ToRegister(instr->key()); 4826 scratch = ToRegister(instr->temp()); 4827 } 4828 4829 MemOperand dst = 4830 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi, 4831 key_is_constant, constant_key, 4832 elements_kind, 4833 instr->base_offset()); 4834 4835 if (elements_kind == FLOAT32_ELEMENTS) { 4836 DoubleRegister value = ToDoubleRegister(instr->value()); 4837 DoubleRegister dbl_scratch = double_scratch(); 4838 __ Fcvt(dbl_scratch.S(), value); 4839 __ Str(dbl_scratch.S(), dst); 4840 } else if (elements_kind == FLOAT64_ELEMENTS) { 4841 DoubleRegister value = ToDoubleRegister(instr->value()); 4842 __ Str(value, dst); 4843 } else { 4844 Register value = ToRegister(instr->value()); 4845 4846 switch (elements_kind) { 4847 case UINT8_ELEMENTS: 4848 case UINT8_CLAMPED_ELEMENTS: 4849 case INT8_ELEMENTS: 4850 __ Strb(value, dst); 4851 break; 4852 case INT16_ELEMENTS: 4853 case UINT16_ELEMENTS: 4854 __ Strh(value, dst); 4855 break; 4856 case INT32_ELEMENTS: 4857 case UINT32_ELEMENTS: 4858 __ Str(value.W(), dst); 4859 break; 4860 case FLOAT32_ELEMENTS: 4861 case FLOAT64_ELEMENTS: 4862 case FAST_DOUBLE_ELEMENTS: 4863 case FAST_ELEMENTS: 4864 case FAST_SMI_ELEMENTS: 4865 case FAST_HOLEY_DOUBLE_ELEMENTS: 4866 case FAST_HOLEY_ELEMENTS: 4867 case FAST_HOLEY_SMI_ELEMENTS: 4868 case DICTIONARY_ELEMENTS: 4869 case FAST_SLOPPY_ARGUMENTS_ELEMENTS: 4870 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: 4871 case FAST_STRING_WRAPPER_ELEMENTS: 4872 case SLOW_STRING_WRAPPER_ELEMENTS: 4873 case NO_ELEMENTS: 4874 UNREACHABLE(); 4875 break; 4876 } 4877 } 4878} 4879 4880 4881void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) { 4882 Register elements = ToRegister(instr->elements()); 4883 DoubleRegister value = ToDoubleRegister(instr->value()); 4884 MemOperand mem_op; 4885 4886 if (instr->key()->IsConstantOperand()) { 4887 int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 4888 if (constant_key & 0xf0000000) { 4889 Abort(kArrayIndexConstantValueTooBig); 4890 } 4891 int offset = instr->base_offset() + constant_key * kDoubleSize; 4892 mem_op = MemOperand(elements, offset); 4893 } else { 4894 Register store_base = ToRegister(instr->temp()); 4895 Register key = ToRegister(instr->key()); 4896 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); 4897 mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged, 4898 instr->hydrogen()->elements_kind(), 4899 instr->hydrogen()->representation(), 4900 instr->base_offset()); 4901 } 4902 4903 if (instr->NeedsCanonicalization()) { 4904 __ CanonicalizeNaN(double_scratch(), value); 4905 __ Str(double_scratch(), mem_op); 4906 } else { 4907 __ Str(value, mem_op); 4908 } 4909} 4910 4911 4912void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) { 4913 Register value = ToRegister(instr->value()); 4914 Register elements = ToRegister(instr->elements()); 4915 Register scratch = no_reg; 4916 Register store_base = no_reg; 4917 Register key = no_reg; 4918 MemOperand mem_op; 4919 4920 if (!instr->key()->IsConstantOperand() || 4921 instr->hydrogen()->NeedsWriteBarrier()) { 4922 scratch = ToRegister(instr->temp()); 4923 } 4924 4925 Representation representation = instr->hydrogen()->value()->representation(); 4926 if (instr->key()->IsConstantOperand()) { 4927 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); 4928 int offset = instr->base_offset() + 4929 ToInteger32(const_operand) * kPointerSize; 4930 store_base = elements; 4931 if (representation.IsInteger32()) { 4932 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); 4933 DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); 4934 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits); 4935 STATIC_ASSERT(kSmiTag == 0); 4936 mem_op = UntagSmiMemOperand(store_base, offset); 4937 } else { 4938 mem_op = MemOperand(store_base, offset); 4939 } 4940 } else { 4941 store_base = scratch; 4942 key = ToRegister(instr->key()); 4943 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); 4944 4945 mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged, 4946 instr->hydrogen()->elements_kind(), 4947 representation, instr->base_offset()); 4948 } 4949 4950 __ Store(value, mem_op, representation); 4951 4952 if (instr->hydrogen()->NeedsWriteBarrier()) { 4953 DCHECK(representation.IsTagged()); 4954 // This assignment may cause element_addr to alias store_base. 4955 Register element_addr = scratch; 4956 SmiCheck check_needed = 4957 instr->hydrogen()->value()->type().IsHeapObject() 4958 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 4959 // Compute address of modified element and store it into key register. 4960 __ Add(element_addr, mem_op.base(), mem_op.OffsetAsOperand()); 4961 __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(), 4962 kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed, 4963 instr->hydrogen()->PointersToHereCheckForValue()); 4964 } 4965} 4966 4967 4968void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { 4969 DCHECK(ToRegister(instr->context()).is(cp)); 4970 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); 4971 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister())); 4972 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); 4973 4974 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr); 4975 4976 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode( 4977 isolate(), instr->language_mode()) 4978 .code(); 4979 CallCode(ic, RelocInfo::CODE_TARGET, instr); 4980} 4981 4982 4983void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) { 4984 class DeferredMaybeGrowElements final : public LDeferredCode { 4985 public: 4986 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr) 4987 : LDeferredCode(codegen), instr_(instr) {} 4988 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); } 4989 LInstruction* instr() override { return instr_; } 4990 4991 private: 4992 LMaybeGrowElements* instr_; 4993 }; 4994 4995 Register result = x0; 4996 DeferredMaybeGrowElements* deferred = 4997 new (zone()) DeferredMaybeGrowElements(this, instr); 4998 LOperand* key = instr->key(); 4999 LOperand* current_capacity = instr->current_capacity(); 5000 5001 DCHECK(instr->hydrogen()->key()->representation().IsInteger32()); 5002 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32()); 5003 DCHECK(key->IsConstantOperand() || key->IsRegister()); 5004 DCHECK(current_capacity->IsConstantOperand() || 5005 current_capacity->IsRegister()); 5006 5007 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) { 5008 int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); 5009 int32_t constant_capacity = 5010 ToInteger32(LConstantOperand::cast(current_capacity)); 5011 if (constant_key >= constant_capacity) { 5012 // Deferred case. 5013 __ B(deferred->entry()); 5014 } 5015 } else if (key->IsConstantOperand()) { 5016 int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); 5017 __ Cmp(ToRegister(current_capacity), Operand(constant_key)); 5018 __ B(le, deferred->entry()); 5019 } else if (current_capacity->IsConstantOperand()) { 5020 int32_t constant_capacity = 5021 ToInteger32(LConstantOperand::cast(current_capacity)); 5022 __ Cmp(ToRegister(key), Operand(constant_capacity)); 5023 __ B(ge, deferred->entry()); 5024 } else { 5025 __ Cmp(ToRegister(key), ToRegister(current_capacity)); 5026 __ B(ge, deferred->entry()); 5027 } 5028 5029 __ Mov(result, ToRegister(instr->elements())); 5030 5031 __ Bind(deferred->exit()); 5032} 5033 5034 5035void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) { 5036 // TODO(3095996): Get rid of this. For now, we need to make the 5037 // result register contain a valid pointer because it is already 5038 // contained in the register pointer map. 5039 Register result = x0; 5040 __ Mov(result, 0); 5041 5042 // We have to call a stub. 5043 { 5044 PushSafepointRegistersScope scope(this); 5045 __ Move(result, ToRegister(instr->object())); 5046 5047 LOperand* key = instr->key(); 5048 if (key->IsConstantOperand()) { 5049 __ Mov(x3, Operand(ToSmi(LConstantOperand::cast(key)))); 5050 } else { 5051 __ Mov(x3, ToRegister(key)); 5052 __ SmiTag(x3); 5053 } 5054 5055 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(), 5056 instr->hydrogen()->kind()); 5057 __ CallStub(&stub); 5058 RecordSafepointWithLazyDeopt( 5059 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 5060 __ StoreToSafepointRegisterSlot(result, result); 5061 } 5062 5063 // Deopt on smi, which means the elements array changed to dictionary mode. 5064 DeoptimizeIfSmi(result, instr, Deoptimizer::kSmi); 5065} 5066 5067 5068void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { 5069 Representation representation = instr->representation(); 5070 5071 Register object = ToRegister(instr->object()); 5072 HObjectAccess access = instr->hydrogen()->access(); 5073 int offset = access.offset(); 5074 5075 if (access.IsExternalMemory()) { 5076 DCHECK(!instr->hydrogen()->has_transition()); 5077 DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); 5078 Register value = ToRegister(instr->value()); 5079 __ Store(value, MemOperand(object, offset), representation); 5080 return; 5081 } 5082 5083 __ AssertNotSmi(object); 5084 5085 if (!FLAG_unbox_double_fields && representation.IsDouble()) { 5086 DCHECK(access.IsInobject()); 5087 DCHECK(!instr->hydrogen()->has_transition()); 5088 DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); 5089 FPRegister value = ToDoubleRegister(instr->value()); 5090 __ Str(value, FieldMemOperand(object, offset)); 5091 return; 5092 } 5093 5094 DCHECK(!representation.IsSmi() || 5095 !instr->value()->IsConstantOperand() || 5096 IsInteger32Constant(LConstantOperand::cast(instr->value()))); 5097 5098 if (instr->hydrogen()->has_transition()) { 5099 Handle<Map> transition = instr->hydrogen()->transition_map(); 5100 AddDeprecationDependency(transition); 5101 // Store the new map value. 5102 Register new_map_value = ToRegister(instr->temp0()); 5103 __ Mov(new_map_value, Operand(transition)); 5104 __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset)); 5105 if (instr->hydrogen()->NeedsWriteBarrierForMap()) { 5106 // Update the write barrier for the map field. 5107 __ RecordWriteForMap(object, 5108 new_map_value, 5109 ToRegister(instr->temp1()), 5110 GetLinkRegisterState(), 5111 kSaveFPRegs); 5112 } 5113 } 5114 5115 // Do the store. 5116 Register destination; 5117 if (access.IsInobject()) { 5118 destination = object; 5119 } else { 5120 Register temp0 = ToRegister(instr->temp0()); 5121 __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset)); 5122 destination = temp0; 5123 } 5124 5125 if (FLAG_unbox_double_fields && representation.IsDouble()) { 5126 DCHECK(access.IsInobject()); 5127 FPRegister value = ToDoubleRegister(instr->value()); 5128 __ Str(value, FieldMemOperand(object, offset)); 5129 } else if (representation.IsSmi() && 5130 instr->hydrogen()->value()->representation().IsInteger32()) { 5131 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); 5132#ifdef DEBUG 5133 Register temp0 = ToRegister(instr->temp0()); 5134 __ Ldr(temp0, FieldMemOperand(destination, offset)); 5135 __ AssertSmi(temp0); 5136 // If destination aliased temp0, restore it to the address calculated 5137 // earlier. 5138 if (destination.Is(temp0)) { 5139 DCHECK(!access.IsInobject()); 5140 __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset)); 5141 } 5142#endif 5143 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits); 5144 STATIC_ASSERT(kSmiTag == 0); 5145 Register value = ToRegister(instr->value()); 5146 __ Store(value, UntagSmiFieldMemOperand(destination, offset), 5147 Representation::Integer32()); 5148 } else { 5149 Register value = ToRegister(instr->value()); 5150 __ Store(value, FieldMemOperand(destination, offset), representation); 5151 } 5152 if (instr->hydrogen()->NeedsWriteBarrier()) { 5153 Register value = ToRegister(instr->value()); 5154 __ RecordWriteField(destination, 5155 offset, 5156 value, // Clobbered. 5157 ToRegister(instr->temp1()), // Clobbered. 5158 GetLinkRegisterState(), 5159 kSaveFPRegs, 5160 EMIT_REMEMBERED_SET, 5161 instr->hydrogen()->SmiCheckForWriteBarrier(), 5162 instr->hydrogen()->PointersToHereCheckForValue()); 5163 } 5164} 5165 5166 5167void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { 5168 DCHECK(ToRegister(instr->context()).is(cp)); 5169 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); 5170 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); 5171 5172 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr); 5173 5174 __ Mov(StoreDescriptor::NameRegister(), Operand(instr->name())); 5175 Handle<Code> ic = 5176 CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode()) 5177 .code(); 5178 CallCode(ic, RelocInfo::CODE_TARGET, instr); 5179} 5180 5181 5182void LCodeGen::DoStringAdd(LStringAdd* instr) { 5183 DCHECK(ToRegister(instr->context()).is(cp)); 5184 DCHECK(ToRegister(instr->left()).Is(x1)); 5185 DCHECK(ToRegister(instr->right()).Is(x0)); 5186 StringAddStub stub(isolate(), 5187 instr->hydrogen()->flags(), 5188 instr->hydrogen()->pretenure_flag()); 5189 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 5190} 5191 5192 5193void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { 5194 class DeferredStringCharCodeAt: public LDeferredCode { 5195 public: 5196 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) 5197 : LDeferredCode(codegen), instr_(instr) { } 5198 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } 5199 virtual LInstruction* instr() { return instr_; } 5200 private: 5201 LStringCharCodeAt* instr_; 5202 }; 5203 5204 DeferredStringCharCodeAt* deferred = 5205 new(zone()) DeferredStringCharCodeAt(this, instr); 5206 5207 StringCharLoadGenerator::Generate(masm(), 5208 ToRegister(instr->string()), 5209 ToRegister32(instr->index()), 5210 ToRegister(instr->result()), 5211 deferred->entry()); 5212 __ Bind(deferred->exit()); 5213} 5214 5215 5216void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { 5217 Register string = ToRegister(instr->string()); 5218 Register result = ToRegister(instr->result()); 5219 5220 // TODO(3095996): Get rid of this. For now, we need to make the 5221 // result register contain a valid pointer because it is already 5222 // contained in the register pointer map. 5223 __ Mov(result, 0); 5224 5225 PushSafepointRegistersScope scope(this); 5226 __ Push(string); 5227 // Push the index as a smi. This is safe because of the checks in 5228 // DoStringCharCodeAt above. 5229 Register index = ToRegister(instr->index()); 5230 __ SmiTagAndPush(index); 5231 5232 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr, 5233 instr->context()); 5234 __ AssertSmi(x0); 5235 __ SmiUntag(x0); 5236 __ StoreToSafepointRegisterSlot(x0, result); 5237} 5238 5239 5240void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { 5241 class DeferredStringCharFromCode: public LDeferredCode { 5242 public: 5243 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) 5244 : LDeferredCode(codegen), instr_(instr) { } 5245 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } 5246 virtual LInstruction* instr() { return instr_; } 5247 private: 5248 LStringCharFromCode* instr_; 5249 }; 5250 5251 DeferredStringCharFromCode* deferred = 5252 new(zone()) DeferredStringCharFromCode(this, instr); 5253 5254 DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); 5255 Register char_code = ToRegister32(instr->char_code()); 5256 Register result = ToRegister(instr->result()); 5257 5258 __ Cmp(char_code, String::kMaxOneByteCharCode); 5259 __ B(hi, deferred->entry()); 5260 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); 5261 __ Add(result, result, FixedArray::kHeaderSize - kHeapObjectTag); 5262 __ Ldr(result, MemOperand(result, char_code, SXTW, kPointerSizeLog2)); 5263 __ CompareRoot(result, Heap::kUndefinedValueRootIndex); 5264 __ B(eq, deferred->entry()); 5265 __ Bind(deferred->exit()); 5266} 5267 5268 5269void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { 5270 Register char_code = ToRegister(instr->char_code()); 5271 Register result = ToRegister(instr->result()); 5272 5273 // TODO(3095996): Get rid of this. For now, we need to make the 5274 // result register contain a valid pointer because it is already 5275 // contained in the register pointer map. 5276 __ Mov(result, 0); 5277 5278 PushSafepointRegistersScope scope(this); 5279 __ SmiTagAndPush(char_code); 5280 CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr, 5281 instr->context()); 5282 __ StoreToSafepointRegisterSlot(x0, result); 5283} 5284 5285 5286void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { 5287 DCHECK(ToRegister(instr->context()).is(cp)); 5288 DCHECK(ToRegister(instr->left()).is(x1)); 5289 DCHECK(ToRegister(instr->right()).is(x0)); 5290 5291 Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code(); 5292 CallCode(code, RelocInfo::CODE_TARGET, instr); 5293 __ CompareRoot(x0, Heap::kTrueValueRootIndex); 5294 EmitBranch(instr, eq); 5295} 5296 5297 5298void LCodeGen::DoSubI(LSubI* instr) { 5299 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 5300 Register result = ToRegister32(instr->result()); 5301 Register left = ToRegister32(instr->left()); 5302 Operand right = ToShiftedRightOperand32(instr->right(), instr); 5303 5304 if (can_overflow) { 5305 __ Subs(result, left, right); 5306 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 5307 } else { 5308 __ Sub(result, left, right); 5309 } 5310} 5311 5312 5313void LCodeGen::DoSubS(LSubS* instr) { 5314 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 5315 Register result = ToRegister(instr->result()); 5316 Register left = ToRegister(instr->left()); 5317 Operand right = ToOperand(instr->right()); 5318 if (can_overflow) { 5319 __ Subs(result, left, right); 5320 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 5321 } else { 5322 __ Sub(result, left, right); 5323 } 5324} 5325 5326 5327void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, 5328 LOperand* value, 5329 LOperand* temp1, 5330 LOperand* temp2) { 5331 Register input = ToRegister(value); 5332 Register scratch1 = ToRegister(temp1); 5333 DoubleRegister dbl_scratch1 = double_scratch(); 5334 5335 Label done; 5336 5337 if (instr->truncating()) { 5338 Register output = ToRegister(instr->result()); 5339 Label check_bools; 5340 5341 // If it's not a heap number, jump to undefined check. 5342 __ JumpIfNotHeapNumber(input, &check_bools); 5343 5344 // A heap number: load value and convert to int32 using truncating function. 5345 __ TruncateHeapNumberToI(output, input); 5346 __ B(&done); 5347 5348 __ Bind(&check_bools); 5349 5350 Register true_root = output; 5351 Register false_root = scratch1; 5352 __ LoadTrueFalseRoots(true_root, false_root); 5353 __ Cmp(input, true_root); 5354 __ Cset(output, eq); 5355 __ Ccmp(input, false_root, ZFlag, ne); 5356 __ B(eq, &done); 5357 5358 // Output contains zero, undefined is converted to zero for truncating 5359 // conversions. 5360 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, 5361 Deoptimizer::kNotAHeapNumberUndefinedBoolean); 5362 } else { 5363 Register output = ToRegister32(instr->result()); 5364 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2); 5365 5366 DeoptimizeIfNotHeapNumber(input, instr); 5367 5368 // A heap number: load value and convert to int32 using non-truncating 5369 // function. If the result is out of range, branch to deoptimize. 5370 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset)); 5371 __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2); 5372 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); 5373 5374 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 5375 __ Cmp(output, 0); 5376 __ B(ne, &done); 5377 __ Fmov(scratch1, dbl_scratch1); 5378 DeoptimizeIfNegative(scratch1, instr, Deoptimizer::kMinusZero); 5379 } 5380 } 5381 __ Bind(&done); 5382} 5383 5384 5385void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 5386 class DeferredTaggedToI: public LDeferredCode { 5387 public: 5388 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) 5389 : LDeferredCode(codegen), instr_(instr) { } 5390 virtual void Generate() { 5391 codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(), 5392 instr_->temp2()); 5393 } 5394 5395 virtual LInstruction* instr() { return instr_; } 5396 private: 5397 LTaggedToI* instr_; 5398 }; 5399 5400 Register input = ToRegister(instr->value()); 5401 Register output = ToRegister(instr->result()); 5402 5403 if (instr->hydrogen()->value()->representation().IsSmi()) { 5404 __ SmiUntag(output, input); 5405 } else { 5406 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); 5407 5408 __ JumpIfNotSmi(input, deferred->entry()); 5409 __ SmiUntag(output, input); 5410 __ Bind(deferred->exit()); 5411 } 5412} 5413 5414 5415void LCodeGen::DoThisFunction(LThisFunction* instr) { 5416 Register result = ToRegister(instr->result()); 5417 __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); 5418} 5419 5420 5421void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { 5422 Register object = ToRegister(instr->object()); 5423 5424 Handle<Map> from_map = instr->original_map(); 5425 Handle<Map> to_map = instr->transitioned_map(); 5426 ElementsKind from_kind = instr->from_kind(); 5427 ElementsKind to_kind = instr->to_kind(); 5428 5429 Label not_applicable; 5430 5431 if (IsSimpleMapChangeTransition(from_kind, to_kind)) { 5432 Register temp1 = ToRegister(instr->temp1()); 5433 Register new_map = ToRegister(instr->temp2()); 5434 __ CheckMap(object, temp1, from_map, ¬_applicable, DONT_DO_SMI_CHECK); 5435 __ Mov(new_map, Operand(to_map)); 5436 __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset)); 5437 // Write barrier. 5438 __ RecordWriteForMap(object, new_map, temp1, GetLinkRegisterState(), 5439 kDontSaveFPRegs); 5440 } else { 5441 { 5442 UseScratchRegisterScope temps(masm()); 5443 // Use the temp register only in a restricted scope - the codegen checks 5444 // that we do not use any register across a call. 5445 __ CheckMap(object, temps.AcquireX(), from_map, ¬_applicable, 5446 DONT_DO_SMI_CHECK); 5447 } 5448 DCHECK(object.is(x0)); 5449 DCHECK(ToRegister(instr->context()).is(cp)); 5450 PushSafepointRegistersScope scope(this); 5451 __ Mov(x1, Operand(to_map)); 5452 TransitionElementsKindStub stub(isolate(), from_kind, to_kind); 5453 __ CallStub(&stub); 5454 RecordSafepointWithRegisters( 5455 instr->pointer_map(), 0, Safepoint::kLazyDeopt); 5456 } 5457 __ Bind(¬_applicable); 5458} 5459 5460 5461void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { 5462 Register object = ToRegister(instr->object()); 5463 Register temp1 = ToRegister(instr->temp1()); 5464 Register temp2 = ToRegister(instr->temp2()); 5465 5466 Label no_memento_found; 5467 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found); 5468 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound); 5469 __ Bind(&no_memento_found); 5470} 5471 5472 5473void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) { 5474 DoubleRegister input = ToDoubleRegister(instr->value()); 5475 Register result = ToRegister(instr->result()); 5476 __ TruncateDoubleToI(result, input); 5477 if (instr->tag_result()) { 5478 __ SmiTag(result, result); 5479 } 5480} 5481 5482 5483void LCodeGen::DoTypeof(LTypeof* instr) { 5484 DCHECK(ToRegister(instr->value()).is(x3)); 5485 DCHECK(ToRegister(instr->result()).is(x0)); 5486 Label end, do_call; 5487 Register value_register = ToRegister(instr->value()); 5488 __ JumpIfNotSmi(value_register, &do_call); 5489 __ Mov(x0, Immediate(isolate()->factory()->number_string())); 5490 __ B(&end); 5491 __ Bind(&do_call); 5492 TypeofStub stub(isolate()); 5493 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 5494 __ Bind(&end); 5495} 5496 5497 5498void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { 5499 Handle<String> type_name = instr->type_literal(); 5500 Label* true_label = instr->TrueLabel(chunk_); 5501 Label* false_label = instr->FalseLabel(chunk_); 5502 Register value = ToRegister(instr->value()); 5503 5504 Factory* factory = isolate()->factory(); 5505 if (String::Equals(type_name, factory->number_string())) { 5506 __ JumpIfSmi(value, true_label); 5507 5508 int true_block = instr->TrueDestination(chunk_); 5509 int false_block = instr->FalseDestination(chunk_); 5510 int next_block = GetNextEmittedBlock(); 5511 5512 if (true_block == false_block) { 5513 EmitGoto(true_block); 5514 } else if (true_block == next_block) { 5515 __ JumpIfNotHeapNumber(value, chunk_->GetAssemblyLabel(false_block)); 5516 } else { 5517 __ JumpIfHeapNumber(value, chunk_->GetAssemblyLabel(true_block)); 5518 if (false_block != next_block) { 5519 __ B(chunk_->GetAssemblyLabel(false_block)); 5520 } 5521 } 5522 5523 } else if (String::Equals(type_name, factory->string_string())) { 5524 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); 5525 Register map = ToRegister(instr->temp1()); 5526 Register scratch = ToRegister(instr->temp2()); 5527 5528 __ JumpIfSmi(value, false_label); 5529 __ CompareObjectType(value, map, scratch, FIRST_NONSTRING_TYPE); 5530 EmitBranch(instr, lt); 5531 5532 } else if (String::Equals(type_name, factory->symbol_string())) { 5533 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); 5534 Register map = ToRegister(instr->temp1()); 5535 Register scratch = ToRegister(instr->temp2()); 5536 5537 __ JumpIfSmi(value, false_label); 5538 __ CompareObjectType(value, map, scratch, SYMBOL_TYPE); 5539 EmitBranch(instr, eq); 5540 5541 } else if (String::Equals(type_name, factory->boolean_string())) { 5542 __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label); 5543 __ CompareRoot(value, Heap::kFalseValueRootIndex); 5544 EmitBranch(instr, eq); 5545 5546 } else if (String::Equals(type_name, factory->undefined_string())) { 5547 DCHECK(instr->temp1() != NULL); 5548 Register scratch = ToRegister(instr->temp1()); 5549 5550 __ JumpIfRoot(value, Heap::kNullValueRootIndex, false_label); 5551 __ JumpIfSmi(value, false_label); 5552 // Check for undetectable objects and jump to the true branch in this case. 5553 __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); 5554 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); 5555 EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable); 5556 5557 } else if (String::Equals(type_name, factory->function_string())) { 5558 DCHECK(instr->temp1() != NULL); 5559 Register scratch = ToRegister(instr->temp1()); 5560 5561 __ JumpIfSmi(value, false_label); 5562 __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); 5563 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); 5564 __ And(scratch, scratch, 5565 (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)); 5566 EmitCompareAndBranch(instr, eq, scratch, 1 << Map::kIsCallable); 5567 5568 } else if (String::Equals(type_name, factory->object_string())) { 5569 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); 5570 Register map = ToRegister(instr->temp1()); 5571 Register scratch = ToRegister(instr->temp2()); 5572 5573 __ JumpIfSmi(value, false_label); 5574 __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label); 5575 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); 5576 __ JumpIfObjectType(value, map, scratch, FIRST_JS_RECEIVER_TYPE, 5577 false_label, lt); 5578 // Check for callable or undetectable objects => false. 5579 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); 5580 EmitTestAndBranch(instr, eq, scratch, 5581 (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)); 5582 5583// clang-format off 5584#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \ 5585 } else if (String::Equals(type_name, factory->type##_string())) { \ 5586 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); \ 5587 Register map = ToRegister(instr->temp1()); \ 5588 \ 5589 __ JumpIfSmi(value, false_label); \ 5590 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); \ 5591 __ CompareRoot(map, Heap::k##Type##MapRootIndex); \ 5592 EmitBranch(instr, eq); 5593 SIMD128_TYPES(SIMD128_TYPE) 5594#undef SIMD128_TYPE 5595 // clang-format on 5596 5597 } else { 5598 __ B(false_label); 5599 } 5600} 5601 5602 5603void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { 5604 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value())); 5605} 5606 5607 5608void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { 5609 Register object = ToRegister(instr->value()); 5610 Register map = ToRegister(instr->map()); 5611 Register temp = ToRegister(instr->temp()); 5612 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); 5613 __ Cmp(map, temp); 5614 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); 5615} 5616 5617 5618void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { 5619 Register receiver = ToRegister(instr->receiver()); 5620 Register function = ToRegister(instr->function()); 5621 Register result = ToRegister(instr->result()); 5622 5623 // If the receiver is null or undefined, we have to pass the global object as 5624 // a receiver to normal functions. Values have to be passed unchanged to 5625 // builtins and strict-mode functions. 5626 Label global_object, done, copy_receiver; 5627 5628 if (!instr->hydrogen()->known_function()) { 5629 __ Ldr(result, FieldMemOperand(function, 5630 JSFunction::kSharedFunctionInfoOffset)); 5631 5632 // CompilerHints is an int32 field. See objects.h. 5633 __ Ldr(result.W(), 5634 FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset)); 5635 5636 // Do not transform the receiver to object for strict mode functions. 5637 __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, ©_receiver); 5638 5639 // Do not transform the receiver to object for builtins. 5640 __ Tbnz(result, SharedFunctionInfo::kNative, ©_receiver); 5641 } 5642 5643 // Normal function. Replace undefined or null with global receiver. 5644 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object); 5645 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object); 5646 5647 // Deoptimize if the receiver is not a JS object. 5648 DeoptimizeIfSmi(receiver, instr, Deoptimizer::kSmi); 5649 __ CompareObjectType(receiver, result, result, FIRST_JS_RECEIVER_TYPE); 5650 __ B(ge, ©_receiver); 5651 Deoptimize(instr, Deoptimizer::kNotAJavaScriptObject); 5652 5653 __ Bind(&global_object); 5654 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); 5655 __ Ldr(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX)); 5656 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX)); 5657 __ B(&done); 5658 5659 __ Bind(©_receiver); 5660 __ Mov(result, receiver); 5661 __ Bind(&done); 5662} 5663 5664 5665void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, 5666 Register result, 5667 Register object, 5668 Register index) { 5669 PushSafepointRegistersScope scope(this); 5670 __ Push(object); 5671 __ Push(index); 5672 __ Mov(cp, 0); 5673 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); 5674 RecordSafepointWithRegisters( 5675 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); 5676 __ StoreToSafepointRegisterSlot(x0, result); 5677} 5678 5679 5680void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { 5681 class DeferredLoadMutableDouble final : public LDeferredCode { 5682 public: 5683 DeferredLoadMutableDouble(LCodeGen* codegen, 5684 LLoadFieldByIndex* instr, 5685 Register result, 5686 Register object, 5687 Register index) 5688 : LDeferredCode(codegen), 5689 instr_(instr), 5690 result_(result), 5691 object_(object), 5692 index_(index) { 5693 } 5694 void Generate() override { 5695 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_); 5696 } 5697 LInstruction* instr() override { return instr_; } 5698 5699 private: 5700 LLoadFieldByIndex* instr_; 5701 Register result_; 5702 Register object_; 5703 Register index_; 5704 }; 5705 Register object = ToRegister(instr->object()); 5706 Register index = ToRegister(instr->index()); 5707 Register result = ToRegister(instr->result()); 5708 5709 __ AssertSmi(index); 5710 5711 DeferredLoadMutableDouble* deferred; 5712 deferred = new(zone()) DeferredLoadMutableDouble( 5713 this, instr, result, object, index); 5714 5715 Label out_of_object, done; 5716 5717 __ TestAndBranchIfAnySet( 5718 index, reinterpret_cast<uint64_t>(Smi::FromInt(1)), deferred->entry()); 5719 __ Mov(index, Operand(index, ASR, 1)); 5720 5721 __ Cmp(index, Smi::FromInt(0)); 5722 __ B(lt, &out_of_object); 5723 5724 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize); 5725 __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2)); 5726 __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize)); 5727 5728 __ B(&done); 5729 5730 __ Bind(&out_of_object); 5731 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); 5732 // Index is equal to negated out of object property index plus 1. 5733 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2)); 5734 __ Ldr(result, FieldMemOperand(result, 5735 FixedArray::kHeaderSize - kPointerSize)); 5736 __ Bind(deferred->exit()); 5737 __ Bind(&done); 5738} 5739 5740} // namespace internal 5741} // namespace v8 5742