1// Copyright 2012 the V8 project authors. All rights reserved. 2// Redistribution and use in source and binary forms, with or without 3// modification, are permitted provided that the following conditions are 4// met: 5// 6// * Redistributions of source code must retain the above copyright 7// notice, this list of conditions and the following disclaimer. 8// * Redistributions in binary form must reproduce the above 9// copyright notice, this list of conditions and the following 10// disclaimer in the documentation and/or other materials provided 11// with the distribution. 12// * Neither the name of Google Inc. nor the names of its 13// contributors may be used to endorse or promote products derived 14// from this software without specific prior written permission. 15// 16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28#include "v8.h" 29 30#include "arm/lithium-codegen-arm.h" 31#include "arm/lithium-gap-resolver-arm.h" 32#include "code-stubs.h" 33#include "stub-cache.h" 34 35namespace v8 { 36namespace internal { 37 38 39class SafepointGenerator : public CallWrapper { 40 public: 41 SafepointGenerator(LCodeGen* codegen, 42 LPointerMap* pointers, 43 Safepoint::DeoptMode mode) 44 : codegen_(codegen), 45 pointers_(pointers), 46 deopt_mode_(mode) { } 47 virtual ~SafepointGenerator() { } 48 49 virtual void BeforeCall(int call_size) const { } 50 51 virtual void AfterCall() const { 52 codegen_->RecordSafepoint(pointers_, deopt_mode_); 53 } 54 55 private: 56 LCodeGen* codegen_; 57 LPointerMap* pointers_; 58 Safepoint::DeoptMode deopt_mode_; 59}; 60 61 62#define __ masm()-> 63 64bool LCodeGen::GenerateCode() { 65 LPhase phase("Z_Code generation", chunk()); 66 ASSERT(is_unused()); 67 status_ = GENERATING; 68 69 // Open a frame scope to indicate that there is a frame on the stack. The 70 // NONE indicates that the scope shouldn't actually generate code to set up 71 // the frame (that is done in GeneratePrologue). 72 FrameScope frame_scope(masm_, StackFrame::NONE); 73 74 return GeneratePrologue() && 75 GenerateBody() && 76 GenerateDeferredCode() && 77 GenerateDeoptJumpTable() && 78 GenerateSafepointTable(); 79} 80 81 82void LCodeGen::FinishCode(Handle<Code> code) { 83 ASSERT(is_done()); 84 code->set_stack_slots(GetStackSlotCount()); 85 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); 86 if (FLAG_weak_embedded_maps_in_optimized_code) { 87 RegisterDependentCodeForEmbeddedMaps(code); 88 } 89 PopulateDeoptimizationData(code); 90 info()->CommitDependencies(code); 91} 92 93 94void LCodeGen::Abort(BailoutReason reason) { 95 info()->set_bailout_reason(reason); 96 status_ = ABORTED; 97} 98 99 100void LCodeGen::Comment(const char* format, ...) { 101 if (!FLAG_code_comments) return; 102 char buffer[4 * KB]; 103 StringBuilder builder(buffer, ARRAY_SIZE(buffer)); 104 va_list arguments; 105 va_start(arguments, format); 106 builder.AddFormattedList(format, arguments); 107 va_end(arguments); 108 109 // Copy the string before recording it in the assembler to avoid 110 // issues when the stack allocated buffer goes out of scope. 111 size_t length = builder.position(); 112 Vector<char> copy = Vector<char>::New(length + 1); 113 OS::MemCopy(copy.start(), builder.Finalize(), copy.length()); 114 masm()->RecordComment(copy.start()); 115} 116 117 118bool LCodeGen::GeneratePrologue() { 119 ASSERT(is_generating()); 120 121 if (info()->IsOptimizing()) { 122 ProfileEntryHookStub::MaybeCallEntryHook(masm_); 123 124#ifdef DEBUG 125 if (strlen(FLAG_stop_at) > 0 && 126 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { 127 __ stop("stop_at"); 128 } 129#endif 130 131 // r1: Callee's JS function. 132 // cp: Callee's context. 133 // fp: Caller's frame pointer. 134 // lr: Caller's pc. 135 136 // Strict mode functions and builtins need to replace the receiver 137 // with undefined when called as functions (without an explicit 138 // receiver object). r5 is zero for method calls and non-zero for 139 // function calls. 140 if (!info_->is_classic_mode() || info_->is_native()) { 141 Label ok; 142 __ cmp(r5, Operand::Zero()); 143 __ b(eq, &ok); 144 int receiver_offset = scope()->num_parameters() * kPointerSize; 145 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); 146 __ str(r2, MemOperand(sp, receiver_offset)); 147 __ bind(&ok); 148 } 149 } 150 151 info()->set_prologue_offset(masm_->pc_offset()); 152 if (NeedsEagerFrame()) { 153 if (info()->IsStub()) { 154 __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); 155 __ Push(Smi::FromInt(StackFrame::STUB)); 156 // Adjust FP to point to saved FP. 157 __ add(fp, sp, Operand(2 * kPointerSize)); 158 } else { 159 PredictableCodeSizeScope predictible_code_size_scope( 160 masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize); 161 // The following three instructions must remain together and unmodified 162 // for code aging to work properly. 163 __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); 164 __ nop(ip.code()); 165 // Adjust FP to point to saved FP. 166 __ add(fp, sp, Operand(2 * kPointerSize)); 167 } 168 frame_is_built_ = true; 169 info_->AddNoFrameRange(0, masm_->pc_offset()); 170 } 171 172 // Reserve space for the stack slots needed by the code. 173 int slots = GetStackSlotCount(); 174 if (slots > 0) { 175 if (FLAG_debug_code) { 176 __ sub(sp, sp, Operand(slots * kPointerSize)); 177 __ push(r0); 178 __ push(r1); 179 __ add(r0, sp, Operand(slots * kPointerSize)); 180 __ mov(r1, Operand(kSlotsZapValue)); 181 Label loop; 182 __ bind(&loop); 183 __ sub(r0, r0, Operand(kPointerSize)); 184 __ str(r1, MemOperand(r0, 2 * kPointerSize)); 185 __ cmp(r0, sp); 186 __ b(ne, &loop); 187 __ pop(r1); 188 __ pop(r0); 189 } else { 190 __ sub(sp, sp, Operand(slots * kPointerSize)); 191 } 192 } 193 194 if (info()->saves_caller_doubles()) { 195 Comment(";;; Save clobbered callee double registers"); 196 int count = 0; 197 BitVector* doubles = chunk()->allocated_double_registers(); 198 BitVector::Iterator save_iterator(doubles); 199 while (!save_iterator.Done()) { 200 __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), 201 MemOperand(sp, count * kDoubleSize)); 202 save_iterator.Advance(); 203 count++; 204 } 205 } 206 207 // Possibly allocate a local context. 208 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 209 if (heap_slots > 0) { 210 Comment(";;; Allocate local context"); 211 // Argument to NewContext is the function, which is in r1. 212 __ push(r1); 213 if (heap_slots <= FastNewContextStub::kMaximumSlots) { 214 FastNewContextStub stub(heap_slots); 215 __ CallStub(&stub); 216 } else { 217 __ CallRuntime(Runtime::kNewFunctionContext, 1); 218 } 219 RecordSafepoint(Safepoint::kNoLazyDeopt); 220 // Context is returned in both r0 and cp. It replaces the context 221 // passed to us. It's saved in the stack and kept live in cp. 222 __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 223 // Copy any necessary parameters into the context. 224 int num_parameters = scope()->num_parameters(); 225 for (int i = 0; i < num_parameters; i++) { 226 Variable* var = scope()->parameter(i); 227 if (var->IsContextSlot()) { 228 int parameter_offset = StandardFrameConstants::kCallerSPOffset + 229 (num_parameters - 1 - i) * kPointerSize; 230 // Load parameter from stack. 231 __ ldr(r0, MemOperand(fp, parameter_offset)); 232 // Store it in the context. 233 MemOperand target = ContextOperand(cp, var->index()); 234 __ str(r0, target); 235 // Update the write barrier. This clobbers r3 and r0. 236 __ RecordWriteContextSlot( 237 cp, 238 target.offset(), 239 r0, 240 r3, 241 GetLinkRegisterState(), 242 kSaveFPRegs); 243 } 244 } 245 Comment(";;; End allocate local context"); 246 } 247 248 // Trace the call. 249 if (FLAG_trace && info()->IsOptimizing()) { 250 __ CallRuntime(Runtime::kTraceEnter, 0); 251 } 252 return !is_aborted(); 253} 254 255 256bool LCodeGen::GenerateBody() { 257 ASSERT(is_generating()); 258 bool emit_instructions = true; 259 for (current_instruction_ = 0; 260 !is_aborted() && current_instruction_ < instructions_->length(); 261 current_instruction_++) { 262 LInstruction* instr = instructions_->at(current_instruction_); 263 264 // Don't emit code for basic blocks with a replacement. 265 if (instr->IsLabel()) { 266 emit_instructions = !LLabel::cast(instr)->HasReplacement(); 267 } 268 if (!emit_instructions) continue; 269 270 if (FLAG_code_comments && instr->HasInterestingComment(this)) { 271 Comment(";;; <@%d,#%d> %s", 272 current_instruction_, 273 instr->hydrogen_value()->id(), 274 instr->Mnemonic()); 275 } 276 277 RecordAndUpdatePosition(instr->position()); 278 279 instr->CompileToNative(this); 280 } 281 EnsureSpaceForLazyDeopt(); 282 last_lazy_deopt_pc_ = masm()->pc_offset(); 283 return !is_aborted(); 284} 285 286 287bool LCodeGen::GenerateDeferredCode() { 288 ASSERT(is_generating()); 289 if (deferred_.length() > 0) { 290 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { 291 LDeferredCode* code = deferred_[i]; 292 293 int pos = instructions_->at(code->instruction_index())->position(); 294 RecordAndUpdatePosition(pos); 295 296 Comment(";;; <@%d,#%d> " 297 "-------------------- Deferred %s --------------------", 298 code->instruction_index(), 299 code->instr()->hydrogen_value()->id(), 300 code->instr()->Mnemonic()); 301 __ bind(code->entry()); 302 if (NeedsDeferredFrame()) { 303 Comment(";;; Build frame"); 304 ASSERT(!frame_is_built_); 305 ASSERT(info()->IsStub()); 306 frame_is_built_ = true; 307 __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); 308 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); 309 __ push(scratch0()); 310 __ add(fp, sp, Operand(2 * kPointerSize)); 311 Comment(";;; Deferred code"); 312 } 313 code->Generate(); 314 if (NeedsDeferredFrame()) { 315 Comment(";;; Destroy frame"); 316 ASSERT(frame_is_built_); 317 __ pop(ip); 318 __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit()); 319 frame_is_built_ = false; 320 } 321 __ jmp(code->exit()); 322 } 323 } 324 325 // Force constant pool emission at the end of the deferred code to make 326 // sure that no constant pools are emitted after. 327 masm()->CheckConstPool(true, false); 328 329 return !is_aborted(); 330} 331 332 333bool LCodeGen::GenerateDeoptJumpTable() { 334 // Check that the jump table is accessible from everywhere in the function 335 // code, i.e. that offsets to the table can be encoded in the 24bit signed 336 // immediate of a branch instruction. 337 // To simplify we consider the code size from the first instruction to the 338 // end of the jump table. We also don't consider the pc load delta. 339 // Each entry in the jump table generates one instruction and inlines one 340 // 32bit data after it. 341 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + 342 deopt_jump_table_.length() * 7)) { 343 Abort(kGeneratedCodeIsTooLarge); 344 } 345 346 if (deopt_jump_table_.length() > 0) { 347 Comment(";;; -------------------- Jump table --------------------"); 348 } 349 Label table_start; 350 __ bind(&table_start); 351 Label needs_frame; 352 for (int i = 0; i < deopt_jump_table_.length(); i++) { 353 __ bind(&deopt_jump_table_[i].label); 354 Address entry = deopt_jump_table_[i].address; 355 Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type; 356 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); 357 if (id == Deoptimizer::kNotDeoptimizationEntry) { 358 Comment(";;; jump table entry %d.", i); 359 } else { 360 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); 361 } 362 if (deopt_jump_table_[i].needs_frame) { 363 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry))); 364 if (needs_frame.is_bound()) { 365 __ b(&needs_frame); 366 } else { 367 __ bind(&needs_frame); 368 __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); 369 // This variant of deopt can only be used with stubs. Since we don't 370 // have a function pointer to install in the stack frame that we're 371 // building, install a special marker there instead. 372 ASSERT(info()->IsStub()); 373 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); 374 __ push(scratch0()); 375 __ add(fp, sp, Operand(2 * kPointerSize)); 376 __ mov(lr, Operand(pc), LeaveCC, al); 377 __ mov(pc, ip); 378 } 379 } else { 380 __ mov(lr, Operand(pc), LeaveCC, al); 381 __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry))); 382 } 383 masm()->CheckConstPool(false, false); 384 } 385 386 // Force constant pool emission at the end of the deopt jump table to make 387 // sure that no constant pools are emitted after. 388 masm()->CheckConstPool(true, false); 389 390 // The deoptimization jump table is the last part of the instruction 391 // sequence. Mark the generated code as done unless we bailed out. 392 if (!is_aborted()) status_ = DONE; 393 return !is_aborted(); 394} 395 396 397bool LCodeGen::GenerateSafepointTable() { 398 ASSERT(is_done()); 399 safepoints_.Emit(masm(), GetStackSlotCount()); 400 return !is_aborted(); 401} 402 403 404Register LCodeGen::ToRegister(int index) const { 405 return Register::FromAllocationIndex(index); 406} 407 408 409DwVfpRegister LCodeGen::ToDoubleRegister(int index) const { 410 return DwVfpRegister::FromAllocationIndex(index); 411} 412 413 414Register LCodeGen::ToRegister(LOperand* op) const { 415 ASSERT(op->IsRegister()); 416 return ToRegister(op->index()); 417} 418 419 420Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { 421 if (op->IsRegister()) { 422 return ToRegister(op->index()); 423 } else if (op->IsConstantOperand()) { 424 LConstantOperand* const_op = LConstantOperand::cast(op); 425 HConstant* constant = chunk_->LookupConstant(const_op); 426 Handle<Object> literal = constant->handle(); 427 Representation r = chunk_->LookupLiteralRepresentation(const_op); 428 if (r.IsInteger32()) { 429 ASSERT(literal->IsNumber()); 430 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number()))); 431 } else if (r.IsDouble()) { 432 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); 433 } else { 434 ASSERT(r.IsTagged()); 435 __ LoadObject(scratch, literal); 436 } 437 return scratch; 438 } else if (op->IsStackSlot() || op->IsArgument()) { 439 __ ldr(scratch, ToMemOperand(op)); 440 return scratch; 441 } 442 UNREACHABLE(); 443 return scratch; 444} 445 446 447DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 448 ASSERT(op->IsDoubleRegister()); 449 return ToDoubleRegister(op->index()); 450} 451 452 453DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, 454 SwVfpRegister flt_scratch, 455 DwVfpRegister dbl_scratch) { 456 if (op->IsDoubleRegister()) { 457 return ToDoubleRegister(op->index()); 458 } else if (op->IsConstantOperand()) { 459 LConstantOperand* const_op = LConstantOperand::cast(op); 460 HConstant* constant = chunk_->LookupConstant(const_op); 461 Handle<Object> literal = constant->handle(); 462 Representation r = chunk_->LookupLiteralRepresentation(const_op); 463 if (r.IsInteger32()) { 464 ASSERT(literal->IsNumber()); 465 __ mov(ip, Operand(static_cast<int32_t>(literal->Number()))); 466 __ vmov(flt_scratch, ip); 467 __ vcvt_f64_s32(dbl_scratch, flt_scratch); 468 return dbl_scratch; 469 } else if (r.IsDouble()) { 470 Abort(kUnsupportedDoubleImmediate); 471 } else if (r.IsTagged()) { 472 Abort(kUnsupportedTaggedImmediate); 473 } 474 } else if (op->IsStackSlot() || op->IsArgument()) { 475 // TODO(regis): Why is vldr not taking a MemOperand? 476 // __ vldr(dbl_scratch, ToMemOperand(op)); 477 MemOperand mem_op = ToMemOperand(op); 478 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset()); 479 return dbl_scratch; 480 } 481 UNREACHABLE(); 482 return dbl_scratch; 483} 484 485 486Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { 487 HConstant* constant = chunk_->LookupConstant(op); 488 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); 489 return constant->handle(); 490} 491 492 493bool LCodeGen::IsInteger32(LConstantOperand* op) const { 494 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); 495} 496 497 498bool LCodeGen::IsSmi(LConstantOperand* op) const { 499 return chunk_->LookupLiteralRepresentation(op).IsSmi(); 500} 501 502 503int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { 504 return ToRepresentation(op, Representation::Integer32()); 505} 506 507 508int32_t LCodeGen::ToRepresentation(LConstantOperand* op, 509 const Representation& r) const { 510 HConstant* constant = chunk_->LookupConstant(op); 511 int32_t value = constant->Integer32Value(); 512 if (r.IsInteger32()) return value; 513 ASSERT(r.IsSmiOrTagged()); 514 return reinterpret_cast<int32_t>(Smi::FromInt(value)); 515} 516 517 518Smi* LCodeGen::ToSmi(LConstantOperand* op) const { 519 HConstant* constant = chunk_->LookupConstant(op); 520 return Smi::FromInt(constant->Integer32Value()); 521} 522 523 524double LCodeGen::ToDouble(LConstantOperand* op) const { 525 HConstant* constant = chunk_->LookupConstant(op); 526 ASSERT(constant->HasDoubleValue()); 527 return constant->DoubleValue(); 528} 529 530 531Operand LCodeGen::ToOperand(LOperand* op) { 532 if (op->IsConstantOperand()) { 533 LConstantOperand* const_op = LConstantOperand::cast(op); 534 HConstant* constant = chunk()->LookupConstant(const_op); 535 Representation r = chunk_->LookupLiteralRepresentation(const_op); 536 if (r.IsSmi()) { 537 ASSERT(constant->HasSmiValue()); 538 return Operand(Smi::FromInt(constant->Integer32Value())); 539 } else if (r.IsInteger32()) { 540 ASSERT(constant->HasInteger32Value()); 541 return Operand(constant->Integer32Value()); 542 } else if (r.IsDouble()) { 543 Abort(kToOperandUnsupportedDoubleImmediate); 544 } 545 ASSERT(r.IsTagged()); 546 return Operand(constant->handle()); 547 } else if (op->IsRegister()) { 548 return Operand(ToRegister(op)); 549 } else if (op->IsDoubleRegister()) { 550 Abort(kToOperandIsDoubleRegisterUnimplemented); 551 return Operand::Zero(); 552 } 553 // Stack slots not implemented, use ToMemOperand instead. 554 UNREACHABLE(); 555 return Operand::Zero(); 556} 557 558 559MemOperand LCodeGen::ToMemOperand(LOperand* op) const { 560 ASSERT(!op->IsRegister()); 561 ASSERT(!op->IsDoubleRegister()); 562 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); 563 return MemOperand(fp, StackSlotOffset(op->index())); 564} 565 566 567MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { 568 ASSERT(op->IsDoubleStackSlot()); 569 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize); 570} 571 572 573void LCodeGen::WriteTranslation(LEnvironment* environment, 574 Translation* translation) { 575 if (environment == NULL) return; 576 577 // The translation includes one command per value in the environment. 578 int translation_size = environment->translation_size(); 579 // The output frame height does not include the parameters. 580 int height = translation_size - environment->parameter_count(); 581 582 WriteTranslation(environment->outer(), translation); 583 bool has_closure_id = !info()->closure().is_null() && 584 !info()->closure().is_identical_to(environment->closure()); 585 int closure_id = has_closure_id 586 ? DefineDeoptimizationLiteral(environment->closure()) 587 : Translation::kSelfLiteralId; 588 589 switch (environment->frame_type()) { 590 case JS_FUNCTION: 591 translation->BeginJSFrame(environment->ast_id(), closure_id, height); 592 break; 593 case JS_CONSTRUCT: 594 translation->BeginConstructStubFrame(closure_id, translation_size); 595 break; 596 case JS_GETTER: 597 ASSERT(translation_size == 1); 598 ASSERT(height == 0); 599 translation->BeginGetterStubFrame(closure_id); 600 break; 601 case JS_SETTER: 602 ASSERT(translation_size == 2); 603 ASSERT(height == 0); 604 translation->BeginSetterStubFrame(closure_id); 605 break; 606 case STUB: 607 translation->BeginCompiledStubFrame(); 608 break; 609 case ARGUMENTS_ADAPTOR: 610 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); 611 break; 612 } 613 614 int object_index = 0; 615 int dematerialized_index = 0; 616 for (int i = 0; i < translation_size; ++i) { 617 LOperand* value = environment->values()->at(i); 618 AddToTranslation(environment, 619 translation, 620 value, 621 environment->HasTaggedValueAt(i), 622 environment->HasUint32ValueAt(i), 623 &object_index, 624 &dematerialized_index); 625 } 626} 627 628 629void LCodeGen::AddToTranslation(LEnvironment* environment, 630 Translation* translation, 631 LOperand* op, 632 bool is_tagged, 633 bool is_uint32, 634 int* object_index_pointer, 635 int* dematerialized_index_pointer) { 636 if (op == LEnvironment::materialization_marker()) { 637 int object_index = (*object_index_pointer)++; 638 if (environment->ObjectIsDuplicateAt(object_index)) { 639 int dupe_of = environment->ObjectDuplicateOfAt(object_index); 640 translation->DuplicateObject(dupe_of); 641 return; 642 } 643 int object_length = environment->ObjectLengthAt(object_index); 644 if (environment->ObjectIsArgumentsAt(object_index)) { 645 translation->BeginArgumentsObject(object_length); 646 } else { 647 translation->BeginCapturedObject(object_length); 648 } 649 int dematerialized_index = *dematerialized_index_pointer; 650 int env_offset = environment->translation_size() + dematerialized_index; 651 *dematerialized_index_pointer += object_length; 652 for (int i = 0; i < object_length; ++i) { 653 LOperand* value = environment->values()->at(env_offset + i); 654 AddToTranslation(environment, 655 translation, 656 value, 657 environment->HasTaggedValueAt(env_offset + i), 658 environment->HasUint32ValueAt(env_offset + i), 659 object_index_pointer, 660 dematerialized_index_pointer); 661 } 662 return; 663 } 664 665 if (op->IsStackSlot()) { 666 if (is_tagged) { 667 translation->StoreStackSlot(op->index()); 668 } else if (is_uint32) { 669 translation->StoreUint32StackSlot(op->index()); 670 } else { 671 translation->StoreInt32StackSlot(op->index()); 672 } 673 } else if (op->IsDoubleStackSlot()) { 674 translation->StoreDoubleStackSlot(op->index()); 675 } else if (op->IsArgument()) { 676 ASSERT(is_tagged); 677 int src_index = GetStackSlotCount() + op->index(); 678 translation->StoreStackSlot(src_index); 679 } else if (op->IsRegister()) { 680 Register reg = ToRegister(op); 681 if (is_tagged) { 682 translation->StoreRegister(reg); 683 } else if (is_uint32) { 684 translation->StoreUint32Register(reg); 685 } else { 686 translation->StoreInt32Register(reg); 687 } 688 } else if (op->IsDoubleRegister()) { 689 DoubleRegister reg = ToDoubleRegister(op); 690 translation->StoreDoubleRegister(reg); 691 } else if (op->IsConstantOperand()) { 692 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); 693 int src_index = DefineDeoptimizationLiteral(constant->handle()); 694 translation->StoreLiteral(src_index); 695 } else { 696 UNREACHABLE(); 697 } 698} 699 700 701void LCodeGen::CallCode(Handle<Code> code, 702 RelocInfo::Mode mode, 703 LInstruction* instr, 704 TargetAddressStorageMode storage_mode) { 705 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode); 706} 707 708 709void LCodeGen::CallCodeGeneric(Handle<Code> code, 710 RelocInfo::Mode mode, 711 LInstruction* instr, 712 SafepointMode safepoint_mode, 713 TargetAddressStorageMode storage_mode) { 714 EnsureSpaceForLazyDeopt(); 715 ASSERT(instr != NULL); 716 // Block literal pool emission to ensure nop indicating no inlined smi code 717 // is in the correct position. 718 Assembler::BlockConstPoolScope block_const_pool(masm()); 719 LPointerMap* pointers = instr->pointer_map(); 720 RecordPosition(pointers->position()); 721 __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode); 722 RecordSafepointWithLazyDeopt(instr, safepoint_mode); 723 724 // Signal that we don't inline smi code before these stubs in the 725 // optimizing code generator. 726 if (code->kind() == Code::BINARY_OP_IC || 727 code->kind() == Code::COMPARE_IC) { 728 __ nop(); 729 } 730} 731 732 733void LCodeGen::CallRuntime(const Runtime::Function* function, 734 int num_arguments, 735 LInstruction* instr) { 736 ASSERT(instr != NULL); 737 LPointerMap* pointers = instr->pointer_map(); 738 ASSERT(pointers != NULL); 739 RecordPosition(pointers->position()); 740 741 __ CallRuntime(function, num_arguments); 742 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 743} 744 745 746void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, 747 int argc, 748 LInstruction* instr) { 749 __ CallRuntimeSaveDoubles(id); 750 RecordSafepointWithRegisters( 751 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); 752} 753 754 755void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, 756 Safepoint::DeoptMode mode) { 757 if (!environment->HasBeenRegistered()) { 758 // Physical stack frame layout: 759 // -x ............. -4 0 ..................................... y 760 // [incoming arguments] [spill slots] [pushed outgoing arguments] 761 762 // Layout of the environment: 763 // 0 ..................................................... size-1 764 // [parameters] [locals] [expression stack including arguments] 765 766 // Layout of the translation: 767 // 0 ........................................................ size - 1 + 4 768 // [expression stack including arguments] [locals] [4 words] [parameters] 769 // |>------------ translation_size ------------<| 770 771 int frame_count = 0; 772 int jsframe_count = 0; 773 for (LEnvironment* e = environment; e != NULL; e = e->outer()) { 774 ++frame_count; 775 if (e->frame_type() == JS_FUNCTION) { 776 ++jsframe_count; 777 } 778 } 779 Translation translation(&translations_, frame_count, jsframe_count, zone()); 780 WriteTranslation(environment, &translation); 781 int deoptimization_index = deoptimizations_.length(); 782 int pc_offset = masm()->pc_offset(); 783 environment->Register(deoptimization_index, 784 translation.index(), 785 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); 786 deoptimizations_.Add(environment, zone()); 787 } 788} 789 790 791void LCodeGen::DeoptimizeIf(Condition condition, 792 LEnvironment* environment, 793 Deoptimizer::BailoutType bailout_type) { 794 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 795 ASSERT(environment->HasBeenRegistered()); 796 int id = environment->deoptimization_index(); 797 ASSERT(info()->IsOptimizing() || info()->IsStub()); 798 Address entry = 799 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); 800 if (entry == NULL) { 801 Abort(kBailoutWasNotPrepared); 802 return; 803 } 804 805 ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM. 806 if (FLAG_deopt_every_n_times == 1 && 807 !info()->IsStub() && 808 info()->opt_count() == id) { 809 ASSERT(frame_is_built_); 810 __ Call(entry, RelocInfo::RUNTIME_ENTRY); 811 return; 812 } 813 814 if (info()->ShouldTrapOnDeopt()) { 815 __ stop("trap_on_deopt", condition); 816 } 817 818 ASSERT(info()->IsStub() || frame_is_built_); 819 if (condition == al && frame_is_built_) { 820 __ Call(entry, RelocInfo::RUNTIME_ENTRY); 821 } else { 822 // We often have several deopts to the same entry, reuse the last 823 // jump entry if this is the case. 824 if (deopt_jump_table_.is_empty() || 825 (deopt_jump_table_.last().address != entry) || 826 (deopt_jump_table_.last().bailout_type != bailout_type) || 827 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) { 828 Deoptimizer::JumpTableEntry table_entry(entry, 829 bailout_type, 830 !frame_is_built_); 831 deopt_jump_table_.Add(table_entry, zone()); 832 } 833 __ b(condition, &deopt_jump_table_.last().label); 834 } 835} 836 837 838void LCodeGen::DeoptimizeIf(Condition condition, 839 LEnvironment* environment) { 840 Deoptimizer::BailoutType bailout_type = info()->IsStub() 841 ? Deoptimizer::LAZY 842 : Deoptimizer::EAGER; 843 DeoptimizeIf(condition, environment, bailout_type); 844} 845 846 847void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) { 848 ZoneList<Handle<Map> > maps(1, zone()); 849 int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); 850 for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) { 851 RelocInfo::Mode mode = it.rinfo()->rmode(); 852 if (mode == RelocInfo::EMBEDDED_OBJECT && 853 it.rinfo()->target_object()->IsMap()) { 854 Handle<Map> map(Map::cast(it.rinfo()->target_object())); 855 if (map->CanTransition()) { 856 maps.Add(map, zone()); 857 } 858 } 859 } 860#ifdef VERIFY_HEAP 861 // This disables verification of weak embedded maps after full GC. 862 // AddDependentCode can cause a GC, which would observe the state where 863 // this code is not yet in the depended code lists of the embedded maps. 864 NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps; 865#endif 866 for (int i = 0; i < maps.length(); i++) { 867 maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code); 868 } 869} 870 871 872void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { 873 int length = deoptimizations_.length(); 874 if (length == 0) return; 875 Handle<DeoptimizationInputData> data = 876 factory()->NewDeoptimizationInputData(length, TENURED); 877 878 Handle<ByteArray> translations = 879 translations_.CreateByteArray(isolate()->factory()); 880 data->SetTranslationByteArray(*translations); 881 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); 882 883 Handle<FixedArray> literals = 884 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); 885 { AllowDeferredHandleDereference copy_handles; 886 for (int i = 0; i < deoptimization_literals_.length(); i++) { 887 literals->set(i, *deoptimization_literals_[i]); 888 } 889 data->SetLiteralArray(*literals); 890 } 891 892 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt())); 893 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); 894 895 // Populate the deoptimization entries. 896 for (int i = 0; i < length; i++) { 897 LEnvironment* env = deoptimizations_[i]; 898 data->SetAstId(i, env->ast_id()); 899 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index())); 900 data->SetArgumentsStackHeight(i, 901 Smi::FromInt(env->arguments_stack_height())); 902 data->SetPc(i, Smi::FromInt(env->pc_offset())); 903 } 904 code->set_deoptimization_data(*data); 905} 906 907 908int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) { 909 int result = deoptimization_literals_.length(); 910 for (int i = 0; i < deoptimization_literals_.length(); ++i) { 911 if (deoptimization_literals_[i].is_identical_to(literal)) return i; 912 } 913 deoptimization_literals_.Add(literal, zone()); 914 return result; 915} 916 917 918void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { 919 ASSERT(deoptimization_literals_.length() == 0); 920 921 const ZoneList<Handle<JSFunction> >* inlined_closures = 922 chunk()->inlined_closures(); 923 924 for (int i = 0, length = inlined_closures->length(); 925 i < length; 926 i++) { 927 DefineDeoptimizationLiteral(inlined_closures->at(i)); 928 } 929 930 inlined_function_count_ = deoptimization_literals_.length(); 931} 932 933 934void LCodeGen::RecordSafepointWithLazyDeopt( 935 LInstruction* instr, SafepointMode safepoint_mode) { 936 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { 937 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); 938 } else { 939 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 940 RecordSafepointWithRegisters( 941 instr->pointer_map(), 0, Safepoint::kLazyDeopt); 942 } 943} 944 945 946void LCodeGen::RecordSafepoint( 947 LPointerMap* pointers, 948 Safepoint::Kind kind, 949 int arguments, 950 Safepoint::DeoptMode deopt_mode) { 951 ASSERT(expected_safepoint_kind_ == kind); 952 953 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); 954 Safepoint safepoint = safepoints_.DefineSafepoint(masm(), 955 kind, arguments, deopt_mode); 956 for (int i = 0; i < operands->length(); i++) { 957 LOperand* pointer = operands->at(i); 958 if (pointer->IsStackSlot()) { 959 safepoint.DefinePointerSlot(pointer->index(), zone()); 960 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { 961 safepoint.DefinePointerRegister(ToRegister(pointer), zone()); 962 } 963 } 964 if (kind & Safepoint::kWithRegisters) { 965 // Register cp always contains a pointer to the context. 966 safepoint.DefinePointerRegister(cp, zone()); 967 } 968} 969 970 971void LCodeGen::RecordSafepoint(LPointerMap* pointers, 972 Safepoint::DeoptMode deopt_mode) { 973 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); 974} 975 976 977void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { 978 LPointerMap empty_pointers(RelocInfo::kNoPosition, zone()); 979 RecordSafepoint(&empty_pointers, deopt_mode); 980} 981 982 983void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, 984 int arguments, 985 Safepoint::DeoptMode deopt_mode) { 986 RecordSafepoint( 987 pointers, Safepoint::kWithRegisters, arguments, deopt_mode); 988} 989 990 991void LCodeGen::RecordSafepointWithRegistersAndDoubles( 992 LPointerMap* pointers, 993 int arguments, 994 Safepoint::DeoptMode deopt_mode) { 995 RecordSafepoint( 996 pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode); 997} 998 999 1000void LCodeGen::RecordPosition(int position) { 1001 if (position == RelocInfo::kNoPosition) return; 1002 masm()->positions_recorder()->RecordPosition(position); 1003} 1004 1005 1006void LCodeGen::RecordAndUpdatePosition(int position) { 1007 if (position >= 0 && position != old_position_) { 1008 masm()->positions_recorder()->RecordPosition(position); 1009 old_position_ = position; 1010 } 1011} 1012 1013 1014static const char* LabelType(LLabel* label) { 1015 if (label->is_loop_header()) return " (loop header)"; 1016 if (label->is_osr_entry()) return " (OSR entry)"; 1017 return ""; 1018} 1019 1020 1021void LCodeGen::DoLabel(LLabel* label) { 1022 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", 1023 current_instruction_, 1024 label->hydrogen_value()->id(), 1025 label->block_id(), 1026 LabelType(label)); 1027 __ bind(label->label()); 1028 current_block_ = label->block_id(); 1029 DoGap(label); 1030} 1031 1032 1033void LCodeGen::DoParallelMove(LParallelMove* move) { 1034 resolver_.Resolve(move); 1035} 1036 1037 1038void LCodeGen::DoGap(LGap* gap) { 1039 for (int i = LGap::FIRST_INNER_POSITION; 1040 i <= LGap::LAST_INNER_POSITION; 1041 i++) { 1042 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); 1043 LParallelMove* move = gap->GetParallelMove(inner_pos); 1044 if (move != NULL) DoParallelMove(move); 1045 } 1046} 1047 1048 1049void LCodeGen::DoInstructionGap(LInstructionGap* instr) { 1050 DoGap(instr); 1051} 1052 1053 1054void LCodeGen::DoParameter(LParameter* instr) { 1055 // Nothing to do. 1056} 1057 1058 1059void LCodeGen::DoCallStub(LCallStub* instr) { 1060 ASSERT(ToRegister(instr->result()).is(r0)); 1061 switch (instr->hydrogen()->major_key()) { 1062 case CodeStub::RegExpConstructResult: { 1063 RegExpConstructResultStub stub; 1064 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); 1065 break; 1066 } 1067 case CodeStub::RegExpExec: { 1068 RegExpExecStub stub; 1069 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); 1070 break; 1071 } 1072 case CodeStub::SubString: { 1073 SubStringStub stub; 1074 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); 1075 break; 1076 } 1077 case CodeStub::NumberToString: { 1078 NumberToStringStub stub; 1079 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); 1080 break; 1081 } 1082 case CodeStub::StringCompare: { 1083 StringCompareStub stub; 1084 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); 1085 break; 1086 } 1087 case CodeStub::TranscendentalCache: { 1088 __ ldr(r0, MemOperand(sp, 0)); 1089 TranscendentalCacheStub stub(instr->transcendental_type(), 1090 TranscendentalCacheStub::TAGGED); 1091 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); 1092 break; 1093 } 1094 default: 1095 UNREACHABLE(); 1096 } 1097} 1098 1099 1100void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { 1101 // Record the address of the first unknown OSR value as the place to enter. 1102 if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset(); 1103} 1104 1105 1106void LCodeGen::DoModI(LModI* instr) { 1107 HMod* hmod = instr->hydrogen(); 1108 HValue* left = hmod->left(); 1109 HValue* right = hmod->right(); 1110 if (hmod->HasPowerOf2Divisor()) { 1111 // TODO(svenpanne) We should really do the strength reduction on the 1112 // Hydrogen level. 1113 Register left_reg = ToRegister(instr->left()); 1114 Register result_reg = ToRegister(instr->result()); 1115 1116 // Note: The code below even works when right contains kMinInt. 1117 int32_t divisor = Abs(right->GetInteger32Constant()); 1118 1119 Label left_is_not_negative, done; 1120 if (left->CanBeNegative()) { 1121 __ cmp(left_reg, Operand::Zero()); 1122 __ b(pl, &left_is_not_negative); 1123 __ rsb(result_reg, left_reg, Operand::Zero()); 1124 __ and_(result_reg, result_reg, Operand(divisor - 1)); 1125 __ rsb(result_reg, result_reg, Operand::Zero(), SetCC); 1126 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1127 DeoptimizeIf(eq, instr->environment()); 1128 } 1129 __ b(&done); 1130 } 1131 1132 __ bind(&left_is_not_negative); 1133 __ and_(result_reg, left_reg, Operand(divisor - 1)); 1134 __ bind(&done); 1135 1136 } else if (hmod->fixed_right_arg().has_value) { 1137 Register left_reg = ToRegister(instr->left()); 1138 Register right_reg = ToRegister(instr->right()); 1139 Register result_reg = ToRegister(instr->result()); 1140 1141 int32_t divisor = hmod->fixed_right_arg().value; 1142 ASSERT(IsPowerOf2(divisor)); 1143 1144 // Check if our assumption of a fixed right operand still holds. 1145 __ cmp(right_reg, Operand(divisor)); 1146 DeoptimizeIf(ne, instr->environment()); 1147 1148 Label left_is_not_negative, done; 1149 if (left->CanBeNegative()) { 1150 __ cmp(left_reg, Operand::Zero()); 1151 __ b(pl, &left_is_not_negative); 1152 __ rsb(result_reg, left_reg, Operand::Zero()); 1153 __ and_(result_reg, result_reg, Operand(divisor - 1)); 1154 __ rsb(result_reg, result_reg, Operand::Zero(), SetCC); 1155 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1156 DeoptimizeIf(eq, instr->environment()); 1157 } 1158 __ b(&done); 1159 } 1160 1161 __ bind(&left_is_not_negative); 1162 __ and_(result_reg, left_reg, Operand(divisor - 1)); 1163 __ bind(&done); 1164 1165 } else if (CpuFeatures::IsSupported(SUDIV)) { 1166 CpuFeatureScope scope(masm(), SUDIV); 1167 1168 Register left_reg = ToRegister(instr->left()); 1169 Register right_reg = ToRegister(instr->right()); 1170 Register result_reg = ToRegister(instr->result()); 1171 1172 Label done; 1173 // Check for x % 0, sdiv might signal an exception. We have to deopt in this 1174 // case because we can't return a NaN. 1175 if (right->CanBeZero()) { 1176 __ cmp(right_reg, Operand::Zero()); 1177 DeoptimizeIf(eq, instr->environment()); 1178 } 1179 1180 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we 1181 // want. We have to deopt if we care about -0, because we can't return that. 1182 if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) { 1183 Label no_overflow_possible; 1184 __ cmp(left_reg, Operand(kMinInt)); 1185 __ b(ne, &no_overflow_possible); 1186 __ cmp(right_reg, Operand(-1)); 1187 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1188 DeoptimizeIf(eq, instr->environment()); 1189 } else { 1190 __ b(ne, &no_overflow_possible); 1191 __ mov(result_reg, Operand::Zero()); 1192 __ jmp(&done); 1193 } 1194 __ bind(&no_overflow_possible); 1195 } 1196 1197 // For 'r3 = r1 % r2' we can have the following ARM code: 1198 // sdiv r3, r1, r2 1199 // mls r3, r3, r2, r1 1200 1201 __ sdiv(result_reg, left_reg, right_reg); 1202 __ mls(result_reg, result_reg, right_reg, left_reg); 1203 1204 // If we care about -0, test if the dividend is <0 and the result is 0. 1205 if (left->CanBeNegative() && 1206 hmod->CanBeZero() && 1207 hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1208 __ cmp(result_reg, Operand::Zero()); 1209 __ b(ne, &done); 1210 __ cmp(left_reg, Operand::Zero()); 1211 DeoptimizeIf(lt, instr->environment()); 1212 } 1213 __ bind(&done); 1214 1215 } else { 1216 // General case, without any SDIV support. 1217 Register left_reg = ToRegister(instr->left()); 1218 Register right_reg = ToRegister(instr->right()); 1219 Register result_reg = ToRegister(instr->result()); 1220 Register scratch = scratch0(); 1221 ASSERT(!scratch.is(left_reg)); 1222 ASSERT(!scratch.is(right_reg)); 1223 ASSERT(!scratch.is(result_reg)); 1224 DwVfpRegister dividend = ToDoubleRegister(instr->temp()); 1225 DwVfpRegister divisor = ToDoubleRegister(instr->temp2()); 1226 ASSERT(!divisor.is(dividend)); 1227 LowDwVfpRegister quotient = double_scratch0(); 1228 ASSERT(!quotient.is(dividend)); 1229 ASSERT(!quotient.is(divisor)); 1230 1231 Label done; 1232 // Check for x % 0, we have to deopt in this case because we can't return a 1233 // NaN. 1234 if (right->CanBeZero()) { 1235 __ cmp(right_reg, Operand::Zero()); 1236 DeoptimizeIf(eq, instr->environment()); 1237 } 1238 1239 __ Move(result_reg, left_reg); 1240 // Load the arguments in VFP registers. The divisor value is preloaded 1241 // before. Be careful that 'right_reg' is only live on entry. 1242 // TODO(svenpanne) The last comments seems to be wrong nowadays. 1243 __ vmov(double_scratch0().low(), left_reg); 1244 __ vcvt_f64_s32(dividend, double_scratch0().low()); 1245 __ vmov(double_scratch0().low(), right_reg); 1246 __ vcvt_f64_s32(divisor, double_scratch0().low()); 1247 1248 // We do not care about the sign of the divisor. Note that we still handle 1249 // the kMinInt % -1 case correctly, though. 1250 __ vabs(divisor, divisor); 1251 // Compute the quotient and round it to a 32bit integer. 1252 __ vdiv(quotient, dividend, divisor); 1253 __ vcvt_s32_f64(quotient.low(), quotient); 1254 __ vcvt_f64_s32(quotient, quotient.low()); 1255 1256 // Compute the remainder in result. 1257 __ vmul(double_scratch0(), divisor, quotient); 1258 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0()); 1259 __ vmov(scratch, double_scratch0().low()); 1260 __ sub(result_reg, left_reg, scratch, SetCC); 1261 1262 // If we care about -0, test if the dividend is <0 and the result is 0. 1263 if (left->CanBeNegative() && 1264 hmod->CanBeZero() && 1265 hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1266 __ b(ne, &done); 1267 __ cmp(left_reg, Operand::Zero()); 1268 DeoptimizeIf(mi, instr->environment()); 1269 } 1270 __ bind(&done); 1271 } 1272} 1273 1274 1275void LCodeGen::EmitSignedIntegerDivisionByConstant( 1276 Register result, 1277 Register dividend, 1278 int32_t divisor, 1279 Register remainder, 1280 Register scratch, 1281 LEnvironment* environment) { 1282 ASSERT(!AreAliased(dividend, scratch, ip)); 1283 ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor)); 1284 1285 uint32_t divisor_abs = abs(divisor); 1286 1287 int32_t power_of_2_factor = 1288 CompilerIntrinsics::CountTrailingZeros(divisor_abs); 1289 1290 switch (divisor_abs) { 1291 case 0: 1292 DeoptimizeIf(al, environment); 1293 return; 1294 1295 case 1: 1296 if (divisor > 0) { 1297 __ Move(result, dividend); 1298 } else { 1299 __ rsb(result, dividend, Operand::Zero(), SetCC); 1300 DeoptimizeIf(vs, environment); 1301 } 1302 // Compute the remainder. 1303 __ mov(remainder, Operand::Zero()); 1304 return; 1305 1306 default: 1307 if (IsPowerOf2(divisor_abs)) { 1308 // Branch and condition free code for integer division by a power 1309 // of two. 1310 int32_t power = WhichPowerOf2(divisor_abs); 1311 if (power > 1) { 1312 __ mov(scratch, Operand(dividend, ASR, power - 1)); 1313 } 1314 __ add(scratch, dividend, Operand(scratch, LSR, 32 - power)); 1315 __ mov(result, Operand(scratch, ASR, power)); 1316 // Negate if necessary. 1317 // We don't need to check for overflow because the case '-1' is 1318 // handled separately. 1319 if (divisor < 0) { 1320 ASSERT(divisor != -1); 1321 __ rsb(result, result, Operand::Zero()); 1322 } 1323 // Compute the remainder. 1324 if (divisor > 0) { 1325 __ sub(remainder, dividend, Operand(result, LSL, power)); 1326 } else { 1327 __ add(remainder, dividend, Operand(result, LSL, power)); 1328 } 1329 return; 1330 } else { 1331 // Use magic numbers for a few specific divisors. 1332 // Details and proofs can be found in: 1333 // - Hacker's Delight, Henry S. Warren, Jr. 1334 // - The PowerPC Compiler Writer’s Guide 1335 // and probably many others. 1336 // 1337 // We handle 1338 // <divisor with magic numbers> * <power of 2> 1339 // but not 1340 // <divisor with magic numbers> * <other divisor with magic numbers> 1341 DivMagicNumbers magic_numbers = 1342 DivMagicNumberFor(divisor_abs >> power_of_2_factor); 1343 // Branch and condition free code for integer division by a power 1344 // of two. 1345 const int32_t M = magic_numbers.M; 1346 const int32_t s = magic_numbers.s + power_of_2_factor; 1347 1348 __ mov(ip, Operand(M)); 1349 __ smull(ip, scratch, dividend, ip); 1350 if (M < 0) { 1351 __ add(scratch, scratch, Operand(dividend)); 1352 } 1353 if (s > 0) { 1354 __ mov(scratch, Operand(scratch, ASR, s)); 1355 } 1356 __ add(result, scratch, Operand(dividend, LSR, 31)); 1357 if (divisor < 0) __ rsb(result, result, Operand::Zero()); 1358 // Compute the remainder. 1359 __ mov(ip, Operand(divisor)); 1360 // This sequence could be replaced with 'mls' when 1361 // it gets implemented. 1362 __ mul(scratch, result, ip); 1363 __ sub(remainder, dividend, scratch); 1364 } 1365 } 1366} 1367 1368 1369void LCodeGen::DoDivI(LDivI* instr) { 1370 if (instr->hydrogen()->HasPowerOf2Divisor()) { 1371 Register dividend = ToRegister(instr->left()); 1372 int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant(); 1373 int32_t test_value = 0; 1374 int32_t power = 0; 1375 1376 if (divisor > 0) { 1377 test_value = divisor - 1; 1378 power = WhichPowerOf2(divisor); 1379 } else { 1380 // Check for (0 / -x) that will produce negative zero. 1381 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1382 __ tst(dividend, Operand(dividend)); 1383 DeoptimizeIf(eq, instr->environment()); 1384 } 1385 // Check for (kMinInt / -1). 1386 if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1387 __ cmp(dividend, Operand(kMinInt)); 1388 DeoptimizeIf(eq, instr->environment()); 1389 } 1390 test_value = - divisor - 1; 1391 power = WhichPowerOf2(-divisor); 1392 } 1393 1394 if (test_value != 0) { 1395 if (instr->hydrogen()->CheckFlag( 1396 HInstruction::kAllUsesTruncatingToInt32)) { 1397 __ cmp(dividend, Operand(0)); 1398 __ rsb(dividend, dividend, Operand(0), LeaveCC, lt); 1399 __ mov(dividend, Operand(dividend, ASR, power)); 1400 if (divisor > 0) __ rsb(dividend, dividend, Operand(0), LeaveCC, lt); 1401 if (divisor < 0) __ rsb(dividend, dividend, Operand(0), LeaveCC, gt); 1402 return; // Don't fall through to "__ rsb" below. 1403 } else { 1404 // Deoptimize if remainder is not 0. 1405 __ tst(dividend, Operand(test_value)); 1406 DeoptimizeIf(ne, instr->environment()); 1407 __ mov(dividend, Operand(dividend, ASR, power)); 1408 } 1409 } 1410 if (divisor < 0) __ rsb(dividend, dividend, Operand(0)); 1411 1412 return; 1413 } 1414 1415 const Register left = ToRegister(instr->left()); 1416 const Register right = ToRegister(instr->right()); 1417 const Register result = ToRegister(instr->result()); 1418 1419 // Check for x / 0. 1420 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { 1421 __ cmp(right, Operand::Zero()); 1422 DeoptimizeIf(eq, instr->environment()); 1423 } 1424 1425 // Check for (0 / -x) that will produce negative zero. 1426 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1427 Label left_not_zero; 1428 __ cmp(left, Operand::Zero()); 1429 __ b(ne, &left_not_zero); 1430 __ cmp(right, Operand::Zero()); 1431 DeoptimizeIf(mi, instr->environment()); 1432 __ bind(&left_not_zero); 1433 } 1434 1435 // Check for (kMinInt / -1). 1436 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1437 Label left_not_min_int; 1438 __ cmp(left, Operand(kMinInt)); 1439 __ b(ne, &left_not_min_int); 1440 __ cmp(right, Operand(-1)); 1441 DeoptimizeIf(eq, instr->environment()); 1442 __ bind(&left_not_min_int); 1443 } 1444 1445 if (CpuFeatures::IsSupported(SUDIV)) { 1446 CpuFeatureScope scope(masm(), SUDIV); 1447 __ sdiv(result, left, right); 1448 1449 if (!instr->hydrogen()->CheckFlag( 1450 HInstruction::kAllUsesTruncatingToInt32)) { 1451 // Compute remainder and deopt if it's not zero. 1452 const Register remainder = scratch0(); 1453 __ mls(remainder, result, right, left); 1454 __ cmp(remainder, Operand::Zero()); 1455 DeoptimizeIf(ne, instr->environment()); 1456 } 1457 } else { 1458 const DoubleRegister vleft = ToDoubleRegister(instr->temp()); 1459 const DoubleRegister vright = double_scratch0(); 1460 __ vmov(double_scratch0().low(), left); 1461 __ vcvt_f64_s32(vleft, double_scratch0().low()); 1462 __ vmov(double_scratch0().low(), right); 1463 __ vcvt_f64_s32(vright, double_scratch0().low()); 1464 __ vdiv(vleft, vleft, vright); // vleft now contains the result. 1465 __ vcvt_s32_f64(double_scratch0().low(), vleft); 1466 __ vmov(result, double_scratch0().low()); 1467 1468 if (!instr->hydrogen()->CheckFlag( 1469 HInstruction::kAllUsesTruncatingToInt32)) { 1470 // Deopt if exact conversion to integer was not possible. 1471 // Use vright as scratch register. 1472 __ vcvt_f64_s32(double_scratch0(), double_scratch0().low()); 1473 __ VFPCompareAndSetFlags(vleft, double_scratch0()); 1474 DeoptimizeIf(ne, instr->environment()); 1475 } 1476 } 1477} 1478 1479 1480void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { 1481 DwVfpRegister addend = ToDoubleRegister(instr->addend()); 1482 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); 1483 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); 1484 1485 // This is computed in-place. 1486 ASSERT(addend.is(ToDoubleRegister(instr->result()))); 1487 1488 __ vmla(addend, multiplier, multiplicand); 1489} 1490 1491 1492void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) { 1493 DwVfpRegister minuend = ToDoubleRegister(instr->minuend()); 1494 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); 1495 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); 1496 1497 // This is computed in-place. 1498 ASSERT(minuend.is(ToDoubleRegister(instr->result()))); 1499 1500 __ vmls(minuend, multiplier, multiplicand); 1501} 1502 1503 1504void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { 1505 const Register result = ToRegister(instr->result()); 1506 const Register left = ToRegister(instr->left()); 1507 const Register remainder = ToRegister(instr->temp()); 1508 const Register scratch = scratch0(); 1509 1510 if (!CpuFeatures::IsSupported(SUDIV)) { 1511 // If the CPU doesn't support sdiv instruction, we only optimize when we 1512 // have magic numbers for the divisor. The standard integer division routine 1513 // is usually slower than transitionning to VFP. 1514 ASSERT(instr->right()->IsConstantOperand()); 1515 int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right())); 1516 ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor)); 1517 if (divisor < 0) { 1518 __ cmp(left, Operand::Zero()); 1519 DeoptimizeIf(eq, instr->environment()); 1520 } 1521 EmitSignedIntegerDivisionByConstant(result, 1522 left, 1523 divisor, 1524 remainder, 1525 scratch, 1526 instr->environment()); 1527 // We performed a truncating division. Correct the result if necessary. 1528 __ cmp(remainder, Operand::Zero()); 1529 __ teq(remainder, Operand(divisor), ne); 1530 __ sub(result, result, Operand(1), LeaveCC, mi); 1531 } else { 1532 CpuFeatureScope scope(masm(), SUDIV); 1533 const Register right = ToRegister(instr->right()); 1534 1535 // Check for x / 0. 1536 __ cmp(right, Operand::Zero()); 1537 DeoptimizeIf(eq, instr->environment()); 1538 1539 // Check for (kMinInt / -1). 1540 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1541 Label left_not_min_int; 1542 __ cmp(left, Operand(kMinInt)); 1543 __ b(ne, &left_not_min_int); 1544 __ cmp(right, Operand(-1)); 1545 DeoptimizeIf(eq, instr->environment()); 1546 __ bind(&left_not_min_int); 1547 } 1548 1549 // Check for (0 / -x) that will produce negative zero. 1550 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1551 __ cmp(right, Operand::Zero()); 1552 __ cmp(left, Operand::Zero(), mi); 1553 // "right" can't be null because the code would have already been 1554 // deoptimized. The Z flag is set only if (right < 0) and (left == 0). 1555 // In this case we need to deoptimize to produce a -0. 1556 DeoptimizeIf(eq, instr->environment()); 1557 } 1558 1559 Label done; 1560 __ sdiv(result, left, right); 1561 // If both operands have the same sign then we are done. 1562 __ eor(remainder, left, Operand(right), SetCC); 1563 __ b(pl, &done); 1564 1565 // Check if the result needs to be corrected. 1566 __ mls(remainder, result, right, left); 1567 __ cmp(remainder, Operand::Zero()); 1568 __ sub(result, result, Operand(1), LeaveCC, ne); 1569 1570 __ bind(&done); 1571 } 1572} 1573 1574 1575void LCodeGen::DoMulI(LMulI* instr) { 1576 Register scratch = scratch0(); 1577 Register result = ToRegister(instr->result()); 1578 // Note that result may alias left. 1579 Register left = ToRegister(instr->left()); 1580 LOperand* right_op = instr->right(); 1581 1582 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1583 bool bailout_on_minus_zero = 1584 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 1585 1586 if (right_op->IsConstantOperand() && !can_overflow) { 1587 // Use optimized code for specific constants. 1588 int32_t constant = ToRepresentation( 1589 LConstantOperand::cast(right_op), 1590 instr->hydrogen()->right()->representation()); 1591 1592 if (bailout_on_minus_zero && (constant < 0)) { 1593 // The case of a null constant will be handled separately. 1594 // If constant is negative and left is null, the result should be -0. 1595 __ cmp(left, Operand::Zero()); 1596 DeoptimizeIf(eq, instr->environment()); 1597 } 1598 1599 switch (constant) { 1600 case -1: 1601 __ rsb(result, left, Operand::Zero()); 1602 break; 1603 case 0: 1604 if (bailout_on_minus_zero) { 1605 // If left is strictly negative and the constant is null, the 1606 // result is -0. Deoptimize if required, otherwise return 0. 1607 __ cmp(left, Operand::Zero()); 1608 DeoptimizeIf(mi, instr->environment()); 1609 } 1610 __ mov(result, Operand::Zero()); 1611 break; 1612 case 1: 1613 __ Move(result, left); 1614 break; 1615 default: 1616 // Multiplying by powers of two and powers of two plus or minus 1617 // one can be done faster with shifted operands. 1618 // For other constants we emit standard code. 1619 int32_t mask = constant >> 31; 1620 uint32_t constant_abs = (constant + mask) ^ mask; 1621 1622 if (IsPowerOf2(constant_abs) || 1623 IsPowerOf2(constant_abs - 1) || 1624 IsPowerOf2(constant_abs + 1)) { 1625 if (IsPowerOf2(constant_abs)) { 1626 int32_t shift = WhichPowerOf2(constant_abs); 1627 __ mov(result, Operand(left, LSL, shift)); 1628 } else if (IsPowerOf2(constant_abs - 1)) { 1629 int32_t shift = WhichPowerOf2(constant_abs - 1); 1630 __ add(result, left, Operand(left, LSL, shift)); 1631 } else if (IsPowerOf2(constant_abs + 1)) { 1632 int32_t shift = WhichPowerOf2(constant_abs + 1); 1633 __ rsb(result, left, Operand(left, LSL, shift)); 1634 } 1635 1636 // Correct the sign of the result is the constant is negative. 1637 if (constant < 0) __ rsb(result, result, Operand::Zero()); 1638 1639 } else { 1640 // Generate standard code. 1641 __ mov(ip, Operand(constant)); 1642 __ mul(result, left, ip); 1643 } 1644 } 1645 1646 } else { 1647 Register right = EmitLoadRegister(right_op, scratch); 1648 if (bailout_on_minus_zero) { 1649 __ orr(ToRegister(instr->temp()), left, right); 1650 } 1651 1652 if (can_overflow) { 1653 // scratch:result = left * right. 1654 if (instr->hydrogen()->representation().IsSmi()) { 1655 __ SmiUntag(result, left); 1656 __ smull(result, scratch, result, right); 1657 } else { 1658 __ smull(result, scratch, left, right); 1659 } 1660 __ cmp(scratch, Operand(result, ASR, 31)); 1661 DeoptimizeIf(ne, instr->environment()); 1662 } else { 1663 if (instr->hydrogen()->representation().IsSmi()) { 1664 __ SmiUntag(result, left); 1665 __ mul(result, result, right); 1666 } else { 1667 __ mul(result, left, right); 1668 } 1669 } 1670 1671 if (bailout_on_minus_zero) { 1672 // Bail out if the result is supposed to be negative zero. 1673 Label done; 1674 __ cmp(result, Operand::Zero()); 1675 __ b(ne, &done); 1676 __ cmp(ToRegister(instr->temp()), Operand::Zero()); 1677 DeoptimizeIf(mi, instr->environment()); 1678 __ bind(&done); 1679 } 1680 } 1681} 1682 1683 1684void LCodeGen::DoBitI(LBitI* instr) { 1685 LOperand* left_op = instr->left(); 1686 LOperand* right_op = instr->right(); 1687 ASSERT(left_op->IsRegister()); 1688 Register left = ToRegister(left_op); 1689 Register result = ToRegister(instr->result()); 1690 Operand right(no_reg); 1691 1692 if (right_op->IsStackSlot() || right_op->IsArgument()) { 1693 right = Operand(EmitLoadRegister(right_op, ip)); 1694 } else { 1695 ASSERT(right_op->IsRegister() || right_op->IsConstantOperand()); 1696 right = ToOperand(right_op); 1697 } 1698 1699 switch (instr->op()) { 1700 case Token::BIT_AND: 1701 __ and_(result, left, right); 1702 break; 1703 case Token::BIT_OR: 1704 __ orr(result, left, right); 1705 break; 1706 case Token::BIT_XOR: 1707 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) { 1708 __ mvn(result, Operand(left)); 1709 } else { 1710 __ eor(result, left, right); 1711 } 1712 break; 1713 default: 1714 UNREACHABLE(); 1715 break; 1716 } 1717} 1718 1719 1720void LCodeGen::DoShiftI(LShiftI* instr) { 1721 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so 1722 // result may alias either of them. 1723 LOperand* right_op = instr->right(); 1724 Register left = ToRegister(instr->left()); 1725 Register result = ToRegister(instr->result()); 1726 Register scratch = scratch0(); 1727 if (right_op->IsRegister()) { 1728 // Mask the right_op operand. 1729 __ and_(scratch, ToRegister(right_op), Operand(0x1F)); 1730 switch (instr->op()) { 1731 case Token::ROR: 1732 __ mov(result, Operand(left, ROR, scratch)); 1733 break; 1734 case Token::SAR: 1735 __ mov(result, Operand(left, ASR, scratch)); 1736 break; 1737 case Token::SHR: 1738 if (instr->can_deopt()) { 1739 __ mov(result, Operand(left, LSR, scratch), SetCC); 1740 DeoptimizeIf(mi, instr->environment()); 1741 } else { 1742 __ mov(result, Operand(left, LSR, scratch)); 1743 } 1744 break; 1745 case Token::SHL: 1746 __ mov(result, Operand(left, LSL, scratch)); 1747 break; 1748 default: 1749 UNREACHABLE(); 1750 break; 1751 } 1752 } else { 1753 // Mask the right_op operand. 1754 int value = ToInteger32(LConstantOperand::cast(right_op)); 1755 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); 1756 switch (instr->op()) { 1757 case Token::ROR: 1758 if (shift_count != 0) { 1759 __ mov(result, Operand(left, ROR, shift_count)); 1760 } else { 1761 __ Move(result, left); 1762 } 1763 break; 1764 case Token::SAR: 1765 if (shift_count != 0) { 1766 __ mov(result, Operand(left, ASR, shift_count)); 1767 } else { 1768 __ Move(result, left); 1769 } 1770 break; 1771 case Token::SHR: 1772 if (shift_count != 0) { 1773 __ mov(result, Operand(left, LSR, shift_count)); 1774 } else { 1775 if (instr->can_deopt()) { 1776 __ tst(left, Operand(0x80000000)); 1777 DeoptimizeIf(ne, instr->environment()); 1778 } 1779 __ Move(result, left); 1780 } 1781 break; 1782 case Token::SHL: 1783 if (shift_count != 0) { 1784 if (instr->hydrogen_value()->representation().IsSmi() && 1785 instr->can_deopt()) { 1786 if (shift_count != 1) { 1787 __ mov(result, Operand(left, LSL, shift_count - 1)); 1788 __ SmiTag(result, result, SetCC); 1789 } else { 1790 __ SmiTag(result, left, SetCC); 1791 } 1792 DeoptimizeIf(vs, instr->environment()); 1793 } else { 1794 __ mov(result, Operand(left, LSL, shift_count)); 1795 } 1796 } else { 1797 __ Move(result, left); 1798 } 1799 break; 1800 default: 1801 UNREACHABLE(); 1802 break; 1803 } 1804 } 1805} 1806 1807 1808void LCodeGen::DoSubI(LSubI* instr) { 1809 LOperand* left = instr->left(); 1810 LOperand* right = instr->right(); 1811 LOperand* result = instr->result(); 1812 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1813 SBit set_cond = can_overflow ? SetCC : LeaveCC; 1814 1815 if (right->IsStackSlot() || right->IsArgument()) { 1816 Register right_reg = EmitLoadRegister(right, ip); 1817 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); 1818 } else { 1819 ASSERT(right->IsRegister() || right->IsConstantOperand()); 1820 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); 1821 } 1822 1823 if (can_overflow) { 1824 DeoptimizeIf(vs, instr->environment()); 1825 } 1826} 1827 1828 1829void LCodeGen::DoRSubI(LRSubI* instr) { 1830 LOperand* left = instr->left(); 1831 LOperand* right = instr->right(); 1832 LOperand* result = instr->result(); 1833 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1834 SBit set_cond = can_overflow ? SetCC : LeaveCC; 1835 1836 if (right->IsStackSlot() || right->IsArgument()) { 1837 Register right_reg = EmitLoadRegister(right, ip); 1838 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); 1839 } else { 1840 ASSERT(right->IsRegister() || right->IsConstantOperand()); 1841 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); 1842 } 1843 1844 if (can_overflow) { 1845 DeoptimizeIf(vs, instr->environment()); 1846 } 1847} 1848 1849 1850void LCodeGen::DoConstantI(LConstantI* instr) { 1851 __ mov(ToRegister(instr->result()), Operand(instr->value())); 1852} 1853 1854 1855void LCodeGen::DoConstantS(LConstantS* instr) { 1856 __ mov(ToRegister(instr->result()), Operand(instr->value())); 1857} 1858 1859 1860void LCodeGen::DoConstantD(LConstantD* instr) { 1861 ASSERT(instr->result()->IsDoubleRegister()); 1862 DwVfpRegister result = ToDoubleRegister(instr->result()); 1863 double v = instr->value(); 1864 __ Vmov(result, v, scratch0()); 1865} 1866 1867 1868void LCodeGen::DoConstantE(LConstantE* instr) { 1869 __ mov(ToRegister(instr->result()), Operand(instr->value())); 1870} 1871 1872 1873void LCodeGen::DoConstantT(LConstantT* instr) { 1874 Handle<Object> value = instr->value(); 1875 AllowDeferredHandleDereference smi_check; 1876 __ LoadObject(ToRegister(instr->result()), value); 1877} 1878 1879 1880void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) { 1881 Register result = ToRegister(instr->result()); 1882 Register map = ToRegister(instr->value()); 1883 __ EnumLength(result, map); 1884} 1885 1886 1887void LCodeGen::DoElementsKind(LElementsKind* instr) { 1888 Register result = ToRegister(instr->result()); 1889 Register input = ToRegister(instr->value()); 1890 1891 // Load map into |result|. 1892 __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset)); 1893 // Load the map's "bit field 2" into |result|. We only need the first byte, 1894 // but the following bit field extraction takes care of that anyway. 1895 __ ldr(result, FieldMemOperand(result, Map::kBitField2Offset)); 1896 // Retrieve elements_kind from bit field 2. 1897 __ ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount); 1898} 1899 1900 1901void LCodeGen::DoValueOf(LValueOf* instr) { 1902 Register input = ToRegister(instr->value()); 1903 Register result = ToRegister(instr->result()); 1904 Register map = ToRegister(instr->temp()); 1905 Label done; 1906 1907 if (!instr->hydrogen()->value()->IsHeapObject()) { 1908 // If the object is a smi return the object. 1909 __ SmiTst(input); 1910 __ Move(result, input, eq); 1911 __ b(eq, &done); 1912 } 1913 1914 // If the object is not a value type, return the object. 1915 __ CompareObjectType(input, map, map, JS_VALUE_TYPE); 1916 __ Move(result, input, ne); 1917 __ b(ne, &done); 1918 __ ldr(result, FieldMemOperand(input, JSValue::kValueOffset)); 1919 1920 __ bind(&done); 1921} 1922 1923 1924void LCodeGen::DoDateField(LDateField* instr) { 1925 Register object = ToRegister(instr->date()); 1926 Register result = ToRegister(instr->result()); 1927 Register scratch = ToRegister(instr->temp()); 1928 Smi* index = instr->index(); 1929 Label runtime, done; 1930 ASSERT(object.is(result)); 1931 ASSERT(object.is(r0)); 1932 ASSERT(!scratch.is(scratch0())); 1933 ASSERT(!scratch.is(object)); 1934 1935 __ SmiTst(object); 1936 DeoptimizeIf(eq, instr->environment()); 1937 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE); 1938 DeoptimizeIf(ne, instr->environment()); 1939 1940 if (index->value() == 0) { 1941 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); 1942 } else { 1943 if (index->value() < JSDate::kFirstUncachedField) { 1944 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); 1945 __ mov(scratch, Operand(stamp)); 1946 __ ldr(scratch, MemOperand(scratch)); 1947 __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); 1948 __ cmp(scratch, scratch0()); 1949 __ b(ne, &runtime); 1950 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset + 1951 kPointerSize * index->value())); 1952 __ jmp(&done); 1953 } 1954 __ bind(&runtime); 1955 __ PrepareCallCFunction(2, scratch); 1956 __ mov(r1, Operand(index)); 1957 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); 1958 __ bind(&done); 1959 } 1960} 1961 1962 1963void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { 1964 Register string = ToRegister(instr->string()); 1965 Register index = ToRegister(instr->index()); 1966 Register value = ToRegister(instr->value()); 1967 String::Encoding encoding = instr->encoding(); 1968 1969 if (FLAG_debug_code) { 1970 __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset)); 1971 __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset)); 1972 1973 __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask)); 1974 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; 1975 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 1976 __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING 1977 ? one_byte_seq_type : two_byte_seq_type)); 1978 __ Check(eq, kUnexpectedStringType); 1979 } 1980 1981 __ add(ip, 1982 string, 1983 Operand(SeqString::kHeaderSize - kHeapObjectTag)); 1984 if (encoding == String::ONE_BYTE_ENCODING) { 1985 __ strb(value, MemOperand(ip, index)); 1986 } else { 1987 // MemOperand with ip as the base register is not allowed for strh, so 1988 // we do the address calculation explicitly. 1989 __ add(ip, ip, Operand(index, LSL, 1)); 1990 __ strh(value, MemOperand(ip)); 1991 } 1992} 1993 1994 1995void LCodeGen::DoThrow(LThrow* instr) { 1996 Register input_reg = EmitLoadRegister(instr->value(), ip); 1997 __ push(input_reg); 1998 CallRuntime(Runtime::kThrow, 1, instr); 1999 2000 if (FLAG_debug_code) { 2001 __ stop("Unreachable code."); 2002 } 2003} 2004 2005 2006void LCodeGen::DoAddI(LAddI* instr) { 2007 LOperand* left = instr->left(); 2008 LOperand* right = instr->right(); 2009 LOperand* result = instr->result(); 2010 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 2011 SBit set_cond = can_overflow ? SetCC : LeaveCC; 2012 2013 if (right->IsStackSlot() || right->IsArgument()) { 2014 Register right_reg = EmitLoadRegister(right, ip); 2015 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); 2016 } else { 2017 ASSERT(right->IsRegister() || right->IsConstantOperand()); 2018 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); 2019 } 2020 2021 if (can_overflow) { 2022 DeoptimizeIf(vs, instr->environment()); 2023 } 2024} 2025 2026 2027void LCodeGen::DoMathMinMax(LMathMinMax* instr) { 2028 LOperand* left = instr->left(); 2029 LOperand* right = instr->right(); 2030 HMathMinMax::Operation operation = instr->hydrogen()->operation(); 2031 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { 2032 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; 2033 Register left_reg = ToRegister(left); 2034 Operand right_op = (right->IsRegister() || right->IsConstantOperand()) 2035 ? ToOperand(right) 2036 : Operand(EmitLoadRegister(right, ip)); 2037 Register result_reg = ToRegister(instr->result()); 2038 __ cmp(left_reg, right_op); 2039 __ Move(result_reg, left_reg, condition); 2040 __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition)); 2041 } else { 2042 ASSERT(instr->hydrogen()->representation().IsDouble()); 2043 DwVfpRegister left_reg = ToDoubleRegister(left); 2044 DwVfpRegister right_reg = ToDoubleRegister(right); 2045 DwVfpRegister result_reg = ToDoubleRegister(instr->result()); 2046 Label result_is_nan, return_left, return_right, check_zero, done; 2047 __ VFPCompareAndSetFlags(left_reg, right_reg); 2048 if (operation == HMathMinMax::kMathMin) { 2049 __ b(mi, &return_left); 2050 __ b(gt, &return_right); 2051 } else { 2052 __ b(mi, &return_right); 2053 __ b(gt, &return_left); 2054 } 2055 __ b(vs, &result_is_nan); 2056 // Left equals right => check for -0. 2057 __ VFPCompareAndSetFlags(left_reg, 0.0); 2058 if (left_reg.is(result_reg) || right_reg.is(result_reg)) { 2059 __ b(ne, &done); // left == right != 0. 2060 } else { 2061 __ b(ne, &return_left); // left == right != 0. 2062 } 2063 // At this point, both left and right are either 0 or -0. 2064 if (operation == HMathMinMax::kMathMin) { 2065 // We could use a single 'vorr' instruction here if we had NEON support. 2066 __ vneg(left_reg, left_reg); 2067 __ vsub(result_reg, left_reg, right_reg); 2068 __ vneg(result_reg, result_reg); 2069 } else { 2070 // Since we operate on +0 and/or -0, vadd and vand have the same effect; 2071 // the decision for vadd is easy because vand is a NEON instruction. 2072 __ vadd(result_reg, left_reg, right_reg); 2073 } 2074 __ b(&done); 2075 2076 __ bind(&result_is_nan); 2077 __ vadd(result_reg, left_reg, right_reg); 2078 __ b(&done); 2079 2080 __ bind(&return_right); 2081 __ Move(result_reg, right_reg); 2082 if (!left_reg.is(result_reg)) { 2083 __ b(&done); 2084 } 2085 2086 __ bind(&return_left); 2087 __ Move(result_reg, left_reg); 2088 2089 __ bind(&done); 2090 } 2091} 2092 2093 2094void LCodeGen::DoArithmeticD(LArithmeticD* instr) { 2095 DwVfpRegister left = ToDoubleRegister(instr->left()); 2096 DwVfpRegister right = ToDoubleRegister(instr->right()); 2097 DwVfpRegister result = ToDoubleRegister(instr->result()); 2098 switch (instr->op()) { 2099 case Token::ADD: 2100 __ vadd(result, left, right); 2101 break; 2102 case Token::SUB: 2103 __ vsub(result, left, right); 2104 break; 2105 case Token::MUL: 2106 __ vmul(result, left, right); 2107 break; 2108 case Token::DIV: 2109 __ vdiv(result, left, right); 2110 break; 2111 case Token::MOD: { 2112 // Save r0-r3 on the stack. 2113 __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit()); 2114 2115 __ PrepareCallCFunction(0, 2, scratch0()); 2116 __ SetCallCDoubleArguments(left, right); 2117 __ CallCFunction( 2118 ExternalReference::double_fp_operation(Token::MOD, isolate()), 2119 0, 2); 2120 // Move the result in the double result register. 2121 __ GetCFunctionDoubleResult(result); 2122 2123 // Restore r0-r3. 2124 __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit()); 2125 break; 2126 } 2127 default: 2128 UNREACHABLE(); 2129 break; 2130 } 2131} 2132 2133 2134void LCodeGen::DoArithmeticT(LArithmeticT* instr) { 2135 ASSERT(ToRegister(instr->left()).is(r1)); 2136 ASSERT(ToRegister(instr->right()).is(r0)); 2137 ASSERT(ToRegister(instr->result()).is(r0)); 2138 2139 BinaryOpStub stub(instr->op(), NO_OVERWRITE); 2140 // Block literal pool emission to ensure nop indicating no inlined smi code 2141 // is in the correct position. 2142 Assembler::BlockConstPoolScope block_const_pool(masm()); 2143 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); 2144 __ nop(); // Signals no inlined code. 2145} 2146 2147 2148int LCodeGen::GetNextEmittedBlock() const { 2149 for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) { 2150 if (!chunk_->GetLabel(i)->HasReplacement()) return i; 2151 } 2152 return -1; 2153} 2154 2155template<class InstrType> 2156void LCodeGen::EmitBranch(InstrType instr, Condition condition) { 2157 int left_block = instr->TrueDestination(chunk_); 2158 int right_block = instr->FalseDestination(chunk_); 2159 2160 int next_block = GetNextEmittedBlock(); 2161 2162 if (right_block == left_block || condition == al) { 2163 EmitGoto(left_block); 2164 } else if (left_block == next_block) { 2165 __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block)); 2166 } else if (right_block == next_block) { 2167 __ b(condition, chunk_->GetAssemblyLabel(left_block)); 2168 } else { 2169 __ b(condition, chunk_->GetAssemblyLabel(left_block)); 2170 __ b(chunk_->GetAssemblyLabel(right_block)); 2171 } 2172} 2173 2174 2175template<class InstrType> 2176void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) { 2177 int false_block = instr->FalseDestination(chunk_); 2178 __ b(condition, chunk_->GetAssemblyLabel(false_block)); 2179} 2180 2181 2182void LCodeGen::DoDebugBreak(LDebugBreak* instr) { 2183 __ stop("LBreak"); 2184} 2185 2186 2187void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) { 2188 Representation r = instr->hydrogen()->value()->representation(); 2189 if (r.IsSmiOrInteger32() || r.IsDouble()) { 2190 EmitBranch(instr, al); 2191 } else { 2192 ASSERT(r.IsTagged()); 2193 Register reg = ToRegister(instr->value()); 2194 HType type = instr->hydrogen()->value()->type(); 2195 if (type.IsTaggedNumber()) { 2196 EmitBranch(instr, al); 2197 } 2198 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); 2199 __ ldr(scratch0(), FieldMemOperand(reg, HeapObject::kMapOffset)); 2200 __ CompareRoot(scratch0(), Heap::kHeapNumberMapRootIndex); 2201 EmitBranch(instr, eq); 2202 } 2203} 2204 2205 2206void LCodeGen::DoBranch(LBranch* instr) { 2207 Representation r = instr->hydrogen()->value()->representation(); 2208 if (r.IsInteger32() || r.IsSmi()) { 2209 ASSERT(!info()->IsStub()); 2210 Register reg = ToRegister(instr->value()); 2211 __ cmp(reg, Operand::Zero()); 2212 EmitBranch(instr, ne); 2213 } else if (r.IsDouble()) { 2214 ASSERT(!info()->IsStub()); 2215 DwVfpRegister reg = ToDoubleRegister(instr->value()); 2216 // Test the double value. Zero and NaN are false. 2217 __ VFPCompareAndSetFlags(reg, 0.0); 2218 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false) 2219 EmitBranch(instr, ne); 2220 } else { 2221 ASSERT(r.IsTagged()); 2222 Register reg = ToRegister(instr->value()); 2223 HType type = instr->hydrogen()->value()->type(); 2224 if (type.IsBoolean()) { 2225 ASSERT(!info()->IsStub()); 2226 __ CompareRoot(reg, Heap::kTrueValueRootIndex); 2227 EmitBranch(instr, eq); 2228 } else if (type.IsSmi()) { 2229 ASSERT(!info()->IsStub()); 2230 __ cmp(reg, Operand::Zero()); 2231 EmitBranch(instr, ne); 2232 } else if (type.IsJSArray()) { 2233 ASSERT(!info()->IsStub()); 2234 EmitBranch(instr, al); 2235 } else if (type.IsHeapNumber()) { 2236 ASSERT(!info()->IsStub()); 2237 DwVfpRegister dbl_scratch = double_scratch0(); 2238 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); 2239 // Test the double value. Zero and NaN are false. 2240 __ VFPCompareAndSetFlags(dbl_scratch, 0.0); 2241 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN) 2242 EmitBranch(instr, ne); 2243 } else if (type.IsString()) { 2244 ASSERT(!info()->IsStub()); 2245 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); 2246 __ cmp(ip, Operand::Zero()); 2247 EmitBranch(instr, ne); 2248 } else { 2249 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); 2250 // Avoid deopts in the case where we've never executed this path before. 2251 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); 2252 2253 if (expected.Contains(ToBooleanStub::UNDEFINED)) { 2254 // undefined -> false. 2255 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex); 2256 __ b(eq, instr->FalseLabel(chunk_)); 2257 } 2258 if (expected.Contains(ToBooleanStub::BOOLEAN)) { 2259 // Boolean -> its value. 2260 __ CompareRoot(reg, Heap::kTrueValueRootIndex); 2261 __ b(eq, instr->TrueLabel(chunk_)); 2262 __ CompareRoot(reg, Heap::kFalseValueRootIndex); 2263 __ b(eq, instr->FalseLabel(chunk_)); 2264 } 2265 if (expected.Contains(ToBooleanStub::NULL_TYPE)) { 2266 // 'null' -> false. 2267 __ CompareRoot(reg, Heap::kNullValueRootIndex); 2268 __ b(eq, instr->FalseLabel(chunk_)); 2269 } 2270 2271 if (expected.Contains(ToBooleanStub::SMI)) { 2272 // Smis: 0 -> false, all other -> true. 2273 __ cmp(reg, Operand::Zero()); 2274 __ b(eq, instr->FalseLabel(chunk_)); 2275 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); 2276 } else if (expected.NeedsMap()) { 2277 // If we need a map later and have a Smi -> deopt. 2278 __ SmiTst(reg); 2279 DeoptimizeIf(eq, instr->environment()); 2280 } 2281 2282 const Register map = scratch0(); 2283 if (expected.NeedsMap()) { 2284 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset)); 2285 2286 if (expected.CanBeUndetectable()) { 2287 // Undetectable -> false. 2288 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); 2289 __ tst(ip, Operand(1 << Map::kIsUndetectable)); 2290 __ b(ne, instr->FalseLabel(chunk_)); 2291 } 2292 } 2293 2294 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) { 2295 // spec object -> true. 2296 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE); 2297 __ b(ge, instr->TrueLabel(chunk_)); 2298 } 2299 2300 if (expected.Contains(ToBooleanStub::STRING)) { 2301 // String value -> false iff empty. 2302 Label not_string; 2303 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); 2304 __ b(ge, ¬_string); 2305 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); 2306 __ cmp(ip, Operand::Zero()); 2307 __ b(ne, instr->TrueLabel(chunk_)); 2308 __ b(instr->FalseLabel(chunk_)); 2309 __ bind(¬_string); 2310 } 2311 2312 if (expected.Contains(ToBooleanStub::SYMBOL)) { 2313 // Symbol value -> true. 2314 __ CompareInstanceType(map, ip, SYMBOL_TYPE); 2315 __ b(eq, instr->TrueLabel(chunk_)); 2316 } 2317 2318 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { 2319 // heap number -> false iff +0, -0, or NaN. 2320 DwVfpRegister dbl_scratch = double_scratch0(); 2321 Label not_heap_number; 2322 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); 2323 __ b(ne, ¬_heap_number); 2324 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); 2325 __ VFPCompareAndSetFlags(dbl_scratch, 0.0); 2326 __ cmp(r0, r0, vs); // NaN -> false. 2327 __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false. 2328 __ b(instr->TrueLabel(chunk_)); 2329 __ bind(¬_heap_number); 2330 } 2331 2332 if (!expected.IsGeneric()) { 2333 // We've seen something for the first time -> deopt. 2334 // This can only happen if we are not generic already. 2335 DeoptimizeIf(al, instr->environment()); 2336 } 2337 } 2338 } 2339} 2340 2341 2342void LCodeGen::EmitGoto(int block) { 2343 if (!IsNextEmittedBlock(block)) { 2344 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); 2345 } 2346} 2347 2348 2349void LCodeGen::DoGoto(LGoto* instr) { 2350 EmitGoto(instr->block_id()); 2351} 2352 2353 2354Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { 2355 Condition cond = kNoCondition; 2356 switch (op) { 2357 case Token::EQ: 2358 case Token::EQ_STRICT: 2359 cond = eq; 2360 break; 2361 case Token::LT: 2362 cond = is_unsigned ? lo : lt; 2363 break; 2364 case Token::GT: 2365 cond = is_unsigned ? hi : gt; 2366 break; 2367 case Token::LTE: 2368 cond = is_unsigned ? ls : le; 2369 break; 2370 case Token::GTE: 2371 cond = is_unsigned ? hs : ge; 2372 break; 2373 case Token::IN: 2374 case Token::INSTANCEOF: 2375 default: 2376 UNREACHABLE(); 2377 } 2378 return cond; 2379} 2380 2381 2382void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { 2383 LOperand* left = instr->left(); 2384 LOperand* right = instr->right(); 2385 Condition cond = TokenToCondition(instr->op(), false); 2386 2387 if (left->IsConstantOperand() && right->IsConstantOperand()) { 2388 // We can statically evaluate the comparison. 2389 double left_val = ToDouble(LConstantOperand::cast(left)); 2390 double right_val = ToDouble(LConstantOperand::cast(right)); 2391 int next_block = EvalComparison(instr->op(), left_val, right_val) ? 2392 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); 2393 EmitGoto(next_block); 2394 } else { 2395 if (instr->is_double()) { 2396 // Compare left and right operands as doubles and load the 2397 // resulting flags into the normal status register. 2398 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right)); 2399 // If a NaN is involved, i.e. the result is unordered (V set), 2400 // jump to false block label. 2401 __ b(vs, instr->FalseLabel(chunk_)); 2402 } else { 2403 if (right->IsConstantOperand()) { 2404 int32_t value = ToInteger32(LConstantOperand::cast(right)); 2405 if (instr->hydrogen_value()->representation().IsSmi()) { 2406 __ cmp(ToRegister(left), Operand(Smi::FromInt(value))); 2407 } else { 2408 __ cmp(ToRegister(left), Operand(value)); 2409 } 2410 } else if (left->IsConstantOperand()) { 2411 int32_t value = ToInteger32(LConstantOperand::cast(left)); 2412 if (instr->hydrogen_value()->representation().IsSmi()) { 2413 __ cmp(ToRegister(right), Operand(Smi::FromInt(value))); 2414 } else { 2415 __ cmp(ToRegister(right), Operand(value)); 2416 } 2417 // We transposed the operands. Reverse the condition. 2418 cond = ReverseCondition(cond); 2419 } else { 2420 __ cmp(ToRegister(left), ToRegister(right)); 2421 } 2422 } 2423 EmitBranch(instr, cond); 2424 } 2425} 2426 2427 2428void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { 2429 Register left = ToRegister(instr->left()); 2430 Register right = ToRegister(instr->right()); 2431 2432 __ cmp(left, Operand(right)); 2433 EmitBranch(instr, eq); 2434} 2435 2436 2437void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { 2438 if (instr->hydrogen()->representation().IsTagged()) { 2439 Register input_reg = ToRegister(instr->object()); 2440 __ mov(ip, Operand(factory()->the_hole_value())); 2441 __ cmp(input_reg, ip); 2442 EmitBranch(instr, eq); 2443 return; 2444 } 2445 2446 DwVfpRegister input_reg = ToDoubleRegister(instr->object()); 2447 __ VFPCompareAndSetFlags(input_reg, input_reg); 2448 EmitFalseBranch(instr, vc); 2449 2450 Register scratch = scratch0(); 2451 __ VmovHigh(scratch, input_reg); 2452 __ cmp(scratch, Operand(kHoleNanUpper32)); 2453 EmitBranch(instr, eq); 2454} 2455 2456 2457Condition LCodeGen::EmitIsObject(Register input, 2458 Register temp1, 2459 Label* is_not_object, 2460 Label* is_object) { 2461 Register temp2 = scratch0(); 2462 __ JumpIfSmi(input, is_not_object); 2463 2464 __ LoadRoot(temp2, Heap::kNullValueRootIndex); 2465 __ cmp(input, temp2); 2466 __ b(eq, is_object); 2467 2468 // Load map. 2469 __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset)); 2470 // Undetectable objects behave like undefined. 2471 __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset)); 2472 __ tst(temp2, Operand(1 << Map::kIsUndetectable)); 2473 __ b(ne, is_not_object); 2474 2475 // Load instance type and check that it is in object type range. 2476 __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset)); 2477 __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); 2478 __ b(lt, is_not_object); 2479 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); 2480 return le; 2481} 2482 2483 2484void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { 2485 Register reg = ToRegister(instr->value()); 2486 Register temp1 = ToRegister(instr->temp()); 2487 2488 Condition true_cond = 2489 EmitIsObject(reg, temp1, 2490 instr->FalseLabel(chunk_), instr->TrueLabel(chunk_)); 2491 2492 EmitBranch(instr, true_cond); 2493} 2494 2495 2496Condition LCodeGen::EmitIsString(Register input, 2497 Register temp1, 2498 Label* is_not_string, 2499 SmiCheck check_needed = INLINE_SMI_CHECK) { 2500 if (check_needed == INLINE_SMI_CHECK) { 2501 __ JumpIfSmi(input, is_not_string); 2502 } 2503 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE); 2504 2505 return lt; 2506} 2507 2508 2509void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { 2510 Register reg = ToRegister(instr->value()); 2511 Register temp1 = ToRegister(instr->temp()); 2512 2513 SmiCheck check_needed = 2514 instr->hydrogen()->value()->IsHeapObject() 2515 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 2516 Condition true_cond = 2517 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed); 2518 2519 EmitBranch(instr, true_cond); 2520} 2521 2522 2523void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { 2524 Register input_reg = EmitLoadRegister(instr->value(), ip); 2525 __ SmiTst(input_reg); 2526 EmitBranch(instr, eq); 2527} 2528 2529 2530void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { 2531 Register input = ToRegister(instr->value()); 2532 Register temp = ToRegister(instr->temp()); 2533 2534 if (!instr->hydrogen()->value()->IsHeapObject()) { 2535 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); 2536 } 2537 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset)); 2538 __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); 2539 __ tst(temp, Operand(1 << Map::kIsUndetectable)); 2540 EmitBranch(instr, ne); 2541} 2542 2543 2544static Condition ComputeCompareCondition(Token::Value op) { 2545 switch (op) { 2546 case Token::EQ_STRICT: 2547 case Token::EQ: 2548 return eq; 2549 case Token::LT: 2550 return lt; 2551 case Token::GT: 2552 return gt; 2553 case Token::LTE: 2554 return le; 2555 case Token::GTE: 2556 return ge; 2557 default: 2558 UNREACHABLE(); 2559 return kNoCondition; 2560 } 2561} 2562 2563 2564void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { 2565 Token::Value op = instr->op(); 2566 2567 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); 2568 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2569 // This instruction also signals no smi code inlined. 2570 __ cmp(r0, Operand::Zero()); 2571 2572 Condition condition = ComputeCompareCondition(op); 2573 2574 EmitBranch(instr, condition); 2575} 2576 2577 2578static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { 2579 InstanceType from = instr->from(); 2580 InstanceType to = instr->to(); 2581 if (from == FIRST_TYPE) return to; 2582 ASSERT(from == to || to == LAST_TYPE); 2583 return from; 2584} 2585 2586 2587static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { 2588 InstanceType from = instr->from(); 2589 InstanceType to = instr->to(); 2590 if (from == to) return eq; 2591 if (to == LAST_TYPE) return hs; 2592 if (from == FIRST_TYPE) return ls; 2593 UNREACHABLE(); 2594 return eq; 2595} 2596 2597 2598void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { 2599 Register scratch = scratch0(); 2600 Register input = ToRegister(instr->value()); 2601 2602 if (!instr->hydrogen()->value()->IsHeapObject()) { 2603 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); 2604 } 2605 2606 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen())); 2607 EmitBranch(instr, BranchCondition(instr->hydrogen())); 2608} 2609 2610 2611void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { 2612 Register input = ToRegister(instr->value()); 2613 Register result = ToRegister(instr->result()); 2614 2615 __ AssertString(input); 2616 2617 __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset)); 2618 __ IndexFromHash(result, result); 2619} 2620 2621 2622void LCodeGen::DoHasCachedArrayIndexAndBranch( 2623 LHasCachedArrayIndexAndBranch* instr) { 2624 Register input = ToRegister(instr->value()); 2625 Register scratch = scratch0(); 2626 2627 __ ldr(scratch, 2628 FieldMemOperand(input, String::kHashFieldOffset)); 2629 __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask)); 2630 EmitBranch(instr, eq); 2631} 2632 2633 2634// Branches to a label or falls through with the answer in flags. Trashes 2635// the temp registers, but not the input. 2636void LCodeGen::EmitClassOfTest(Label* is_true, 2637 Label* is_false, 2638 Handle<String>class_name, 2639 Register input, 2640 Register temp, 2641 Register temp2) { 2642 ASSERT(!input.is(temp)); 2643 ASSERT(!input.is(temp2)); 2644 ASSERT(!temp.is(temp2)); 2645 2646 __ JumpIfSmi(input, is_false); 2647 2648 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) { 2649 // Assuming the following assertions, we can use the same compares to test 2650 // for both being a function type and being in the object type range. 2651 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); 2652 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == 2653 FIRST_SPEC_OBJECT_TYPE + 1); 2654 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == 2655 LAST_SPEC_OBJECT_TYPE - 1); 2656 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); 2657 __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE); 2658 __ b(lt, is_false); 2659 __ b(eq, is_true); 2660 __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE)); 2661 __ b(eq, is_true); 2662 } else { 2663 // Faster code path to avoid two compares: subtract lower bound from the 2664 // actual type and do a signed compare with the width of the type range. 2665 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset)); 2666 __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset)); 2667 __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); 2668 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - 2669 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); 2670 __ b(gt, is_false); 2671 } 2672 2673 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. 2674 // Check if the constructor in the map is a function. 2675 __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset)); 2676 2677 // Objects with a non-function constructor have class 'Object'. 2678 __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE); 2679 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) { 2680 __ b(ne, is_true); 2681 } else { 2682 __ b(ne, is_false); 2683 } 2684 2685 // temp now contains the constructor function. Grab the 2686 // instance class name from there. 2687 __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset)); 2688 __ ldr(temp, FieldMemOperand(temp, 2689 SharedFunctionInfo::kInstanceClassNameOffset)); 2690 // The class name we are testing against is internalized since it's a literal. 2691 // The name in the constructor is internalized because of the way the context 2692 // is booted. This routine isn't expected to work for random API-created 2693 // classes and it doesn't have to because you can't access it with natives 2694 // syntax. Since both sides are internalized it is sufficient to use an 2695 // identity comparison. 2696 __ cmp(temp, Operand(class_name)); 2697 // End with the answer in flags. 2698} 2699 2700 2701void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { 2702 Register input = ToRegister(instr->value()); 2703 Register temp = scratch0(); 2704 Register temp2 = ToRegister(instr->temp()); 2705 Handle<String> class_name = instr->hydrogen()->class_name(); 2706 2707 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), 2708 class_name, input, temp, temp2); 2709 2710 EmitBranch(instr, eq); 2711} 2712 2713 2714void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { 2715 Register reg = ToRegister(instr->value()); 2716 Register temp = ToRegister(instr->temp()); 2717 2718 __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); 2719 __ cmp(temp, Operand(instr->map())); 2720 EmitBranch(instr, eq); 2721} 2722 2723 2724void LCodeGen::DoInstanceOf(LInstanceOf* instr) { 2725 ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0. 2726 ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1. 2727 2728 InstanceofStub stub(InstanceofStub::kArgsInRegisters); 2729 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); 2730 2731 __ cmp(r0, Operand::Zero()); 2732 __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne); 2733 __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq); 2734} 2735 2736 2737void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { 2738 class DeferredInstanceOfKnownGlobal: public LDeferredCode { 2739 public: 2740 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, 2741 LInstanceOfKnownGlobal* instr) 2742 : LDeferredCode(codegen), instr_(instr) { } 2743 virtual void Generate() { 2744 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); 2745 } 2746 virtual LInstruction* instr() { return instr_; } 2747 Label* map_check() { return &map_check_; } 2748 private: 2749 LInstanceOfKnownGlobal* instr_; 2750 Label map_check_; 2751 }; 2752 2753 DeferredInstanceOfKnownGlobal* deferred; 2754 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); 2755 2756 Label done, false_result; 2757 Register object = ToRegister(instr->value()); 2758 Register temp = ToRegister(instr->temp()); 2759 Register result = ToRegister(instr->result()); 2760 2761 ASSERT(object.is(r0)); 2762 ASSERT(result.is(r0)); 2763 2764 // A Smi is not instance of anything. 2765 __ JumpIfSmi(object, &false_result); 2766 2767 // This is the inlined call site instanceof cache. The two occurences of the 2768 // hole value will be patched to the last map/result pair generated by the 2769 // instanceof stub. 2770 Label cache_miss; 2771 Register map = temp; 2772 __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); 2773 { 2774 // Block constant pool emission to ensure the positions of instructions are 2775 // as expected by the patcher. See InstanceofStub::Generate(). 2776 Assembler::BlockConstPoolScope block_const_pool(masm()); 2777 __ bind(deferred->map_check()); // Label for calculating code patching. 2778 // We use Factory::the_hole_value() on purpose instead of loading from the 2779 // root array to force relocation to be able to later patch with 2780 // the cached map. 2781 PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize); 2782 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value()); 2783 __ mov(ip, Operand(Handle<Object>(cell))); 2784 __ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset)); 2785 __ cmp(map, Operand(ip)); 2786 __ b(ne, &cache_miss); 2787 // We use Factory::the_hole_value() on purpose instead of loading from the 2788 // root array to force relocation to be able to later patch 2789 // with true or false. 2790 __ mov(result, Operand(factory()->the_hole_value())); 2791 } 2792 __ b(&done); 2793 2794 // The inlined call site cache did not match. Check null and string before 2795 // calling the deferred code. 2796 __ bind(&cache_miss); 2797 // Null is not instance of anything. 2798 __ LoadRoot(ip, Heap::kNullValueRootIndex); 2799 __ cmp(object, Operand(ip)); 2800 __ b(eq, &false_result); 2801 2802 // String values is not instance of anything. 2803 Condition is_string = masm_->IsObjectStringType(object, temp); 2804 __ b(is_string, &false_result); 2805 2806 // Go to the deferred code. 2807 __ b(deferred->entry()); 2808 2809 __ bind(&false_result); 2810 __ LoadRoot(result, Heap::kFalseValueRootIndex); 2811 2812 // Here result has either true or false. Deferred code also produces true or 2813 // false object. 2814 __ bind(deferred->exit()); 2815 __ bind(&done); 2816} 2817 2818 2819void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, 2820 Label* map_check) { 2821 Register result = ToRegister(instr->result()); 2822 ASSERT(result.is(r0)); 2823 2824 InstanceofStub::Flags flags = InstanceofStub::kNoFlags; 2825 flags = static_cast<InstanceofStub::Flags>( 2826 flags | InstanceofStub::kArgsInRegisters); 2827 flags = static_cast<InstanceofStub::Flags>( 2828 flags | InstanceofStub::kCallSiteInlineCheck); 2829 flags = static_cast<InstanceofStub::Flags>( 2830 flags | InstanceofStub::kReturnTrueFalseObject); 2831 InstanceofStub stub(flags); 2832 2833 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 2834 2835 // Get the temp register reserved by the instruction. This needs to be r4 as 2836 // its slot of the pushing of safepoint registers is used to communicate the 2837 // offset to the location of the map check. 2838 Register temp = ToRegister(instr->temp()); 2839 ASSERT(temp.is(r4)); 2840 __ LoadHeapObject(InstanceofStub::right(), instr->function()); 2841 static const int kAdditionalDelta = 5; 2842 // Make sure that code size is predicable, since we use specific constants 2843 // offsets in the code to find embedded values.. 2844 PredictableCodeSizeScope predictable(masm_, 6 * Assembler::kInstrSize); 2845 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta; 2846 Label before_push_delta; 2847 __ bind(&before_push_delta); 2848 __ BlockConstPoolFor(kAdditionalDelta); 2849 __ mov(temp, Operand(delta * kPointerSize)); 2850 // The mov above can generate one or two instructions. The delta was computed 2851 // for two instructions, so we need to pad here in case of one instruction. 2852 if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) { 2853 ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta)); 2854 __ nop(); 2855 } 2856 __ StoreToSafepointRegisterSlot(temp, temp); 2857 CallCodeGeneric(stub.GetCode(isolate()), 2858 RelocInfo::CODE_TARGET, 2859 instr, 2860 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 2861 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); 2862 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 2863 // Put the result value into the result register slot and 2864 // restore all registers. 2865 __ StoreToSafepointRegisterSlot(result, result); 2866} 2867 2868 2869void LCodeGen::DoInstanceSize(LInstanceSize* instr) { 2870 Register object = ToRegister(instr->object()); 2871 Register result = ToRegister(instr->result()); 2872 __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset)); 2873 __ ldrb(result, FieldMemOperand(result, Map::kInstanceSizeOffset)); 2874} 2875 2876 2877void LCodeGen::DoCmpT(LCmpT* instr) { 2878 Token::Value op = instr->op(); 2879 2880 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); 2881 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2882 // This instruction also signals no smi code inlined. 2883 __ cmp(r0, Operand::Zero()); 2884 2885 Condition condition = ComputeCompareCondition(op); 2886 __ LoadRoot(ToRegister(instr->result()), 2887 Heap::kTrueValueRootIndex, 2888 condition); 2889 __ LoadRoot(ToRegister(instr->result()), 2890 Heap::kFalseValueRootIndex, 2891 NegateCondition(condition)); 2892} 2893 2894 2895void LCodeGen::DoReturn(LReturn* instr) { 2896 if (FLAG_trace && info()->IsOptimizing()) { 2897 // Push the return value on the stack as the parameter. 2898 // Runtime::TraceExit returns its parameter in r0. 2899 __ push(r0); 2900 __ CallRuntime(Runtime::kTraceExit, 1); 2901 } 2902 if (info()->saves_caller_doubles()) { 2903 ASSERT(NeedsEagerFrame()); 2904 BitVector* doubles = chunk()->allocated_double_registers(); 2905 BitVector::Iterator save_iterator(doubles); 2906 int count = 0; 2907 while (!save_iterator.Done()) { 2908 __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), 2909 MemOperand(sp, count * kDoubleSize)); 2910 save_iterator.Advance(); 2911 count++; 2912 } 2913 } 2914 int no_frame_start = -1; 2915 if (NeedsEagerFrame()) { 2916 __ mov(sp, fp); 2917 no_frame_start = masm_->pc_offset(); 2918 __ ldm(ia_w, sp, fp.bit() | lr.bit()); 2919 } 2920 if (instr->has_constant_parameter_count()) { 2921 int parameter_count = ToInteger32(instr->constant_parameter_count()); 2922 int32_t sp_delta = (parameter_count + 1) * kPointerSize; 2923 if (sp_delta != 0) { 2924 __ add(sp, sp, Operand(sp_delta)); 2925 } 2926 } else { 2927 Register reg = ToRegister(instr->parameter_count()); 2928 // The argument count parameter is a smi 2929 __ SmiUntag(reg); 2930 __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2)); 2931 } 2932 2933 __ Jump(lr); 2934 2935 if (no_frame_start != -1) { 2936 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); 2937 } 2938} 2939 2940 2941void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { 2942 Register result = ToRegister(instr->result()); 2943 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell()))); 2944 __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset)); 2945 if (instr->hydrogen()->RequiresHoleCheck()) { 2946 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 2947 __ cmp(result, ip); 2948 DeoptimizeIf(eq, instr->environment()); 2949 } 2950} 2951 2952 2953void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { 2954 ASSERT(ToRegister(instr->global_object()).is(r0)); 2955 ASSERT(ToRegister(instr->result()).is(r0)); 2956 2957 __ mov(r2, Operand(instr->name())); 2958 RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET 2959 : RelocInfo::CODE_TARGET_CONTEXT; 2960 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); 2961 CallCode(ic, mode, instr); 2962} 2963 2964 2965void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { 2966 Register value = ToRegister(instr->value()); 2967 Register cell = scratch0(); 2968 2969 // Load the cell. 2970 __ mov(cell, Operand(instr->hydrogen()->cell())); 2971 2972 // If the cell we are storing to contains the hole it could have 2973 // been deleted from the property dictionary. In that case, we need 2974 // to update the property details in the property dictionary to mark 2975 // it as no longer deleted. 2976 if (instr->hydrogen()->RequiresHoleCheck()) { 2977 // We use a temp to check the payload (CompareRoot might clobber ip). 2978 Register payload = ToRegister(instr->temp()); 2979 __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); 2980 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex); 2981 DeoptimizeIf(eq, instr->environment()); 2982 } 2983 2984 // Store the value. 2985 __ str(value, FieldMemOperand(cell, Cell::kValueOffset)); 2986 // Cells are always rescanned, so no write barrier here. 2987} 2988 2989 2990void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) { 2991 ASSERT(ToRegister(instr->global_object()).is(r1)); 2992 ASSERT(ToRegister(instr->value()).is(r0)); 2993 2994 __ mov(r2, Operand(instr->name())); 2995 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode) 2996 ? isolate()->builtins()->StoreIC_Initialize_Strict() 2997 : isolate()->builtins()->StoreIC_Initialize(); 2998 CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr); 2999} 3000 3001 3002void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { 3003 Register context = ToRegister(instr->context()); 3004 Register result = ToRegister(instr->result()); 3005 __ ldr(result, ContextOperand(context, instr->slot_index())); 3006 if (instr->hydrogen()->RequiresHoleCheck()) { 3007 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 3008 __ cmp(result, ip); 3009 if (instr->hydrogen()->DeoptimizesOnHole()) { 3010 DeoptimizeIf(eq, instr->environment()); 3011 } else { 3012 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq); 3013 } 3014 } 3015} 3016 3017 3018void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { 3019 Register context = ToRegister(instr->context()); 3020 Register value = ToRegister(instr->value()); 3021 Register scratch = scratch0(); 3022 MemOperand target = ContextOperand(context, instr->slot_index()); 3023 3024 Label skip_assignment; 3025 3026 if (instr->hydrogen()->RequiresHoleCheck()) { 3027 __ ldr(scratch, target); 3028 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 3029 __ cmp(scratch, ip); 3030 if (instr->hydrogen()->DeoptimizesOnHole()) { 3031 DeoptimizeIf(eq, instr->environment()); 3032 } else { 3033 __ b(ne, &skip_assignment); 3034 } 3035 } 3036 3037 __ str(value, target); 3038 if (instr->hydrogen()->NeedsWriteBarrier()) { 3039 SmiCheck check_needed = 3040 instr->hydrogen()->value()->IsHeapObject() 3041 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 3042 __ RecordWriteContextSlot(context, 3043 target.offset(), 3044 value, 3045 scratch, 3046 GetLinkRegisterState(), 3047 kSaveFPRegs, 3048 EMIT_REMEMBERED_SET, 3049 check_needed); 3050 } 3051 3052 __ bind(&skip_assignment); 3053} 3054 3055 3056void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { 3057 HObjectAccess access = instr->hydrogen()->access(); 3058 int offset = access.offset(); 3059 Register object = ToRegister(instr->object()); 3060 3061 if (access.IsExternalMemory()) { 3062 Register result = ToRegister(instr->result()); 3063 __ ldr(result, MemOperand(object, offset)); 3064 return; 3065 } 3066 3067 if (instr->hydrogen()->representation().IsDouble()) { 3068 DwVfpRegister result = ToDoubleRegister(instr->result()); 3069 __ vldr(result, FieldMemOperand(object, offset)); 3070 return; 3071 } 3072 3073 Register result = ToRegister(instr->result()); 3074 if (access.IsInobject()) { 3075 __ ldr(result, FieldMemOperand(object, offset)); 3076 } else { 3077 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); 3078 __ ldr(result, FieldMemOperand(result, offset)); 3079 } 3080} 3081 3082 3083void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { 3084 ASSERT(ToRegister(instr->object()).is(r0)); 3085 ASSERT(ToRegister(instr->result()).is(r0)); 3086 3087 // Name is always in r2. 3088 __ mov(r2, Operand(instr->name())); 3089 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); 3090 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); 3091} 3092 3093 3094void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { 3095 Register scratch = scratch0(); 3096 Register function = ToRegister(instr->function()); 3097 Register result = ToRegister(instr->result()); 3098 3099 // Check that the function really is a function. Load map into the 3100 // result register. 3101 __ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE); 3102 DeoptimizeIf(ne, instr->environment()); 3103 3104 // Make sure that the function has an instance prototype. 3105 Label non_instance; 3106 __ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); 3107 __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype)); 3108 __ b(ne, &non_instance); 3109 3110 // Get the prototype or initial map from the function. 3111 __ ldr(result, 3112 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 3113 3114 // Check that the function has a prototype or an initial map. 3115 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 3116 __ cmp(result, ip); 3117 DeoptimizeIf(eq, instr->environment()); 3118 3119 // If the function does not have an initial map, we're done. 3120 Label done; 3121 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); 3122 __ b(ne, &done); 3123 3124 // Get the prototype from the initial map. 3125 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); 3126 __ jmp(&done); 3127 3128 // Non-instance prototype: Fetch prototype from constructor field 3129 // in initial map. 3130 __ bind(&non_instance); 3131 __ ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); 3132 3133 // All done. 3134 __ bind(&done); 3135} 3136 3137 3138void LCodeGen::DoLoadExternalArrayPointer( 3139 LLoadExternalArrayPointer* instr) { 3140 Register to_reg = ToRegister(instr->result()); 3141 Register from_reg = ToRegister(instr->object()); 3142 __ ldr(to_reg, FieldMemOperand(from_reg, 3143 ExternalArray::kExternalPointerOffset)); 3144} 3145 3146 3147void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { 3148 Register arguments = ToRegister(instr->arguments()); 3149 Register result = ToRegister(instr->result()); 3150 if (instr->length()->IsConstantOperand() && 3151 instr->index()->IsConstantOperand()) { 3152 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 3153 int const_length = ToInteger32(LConstantOperand::cast(instr->length())); 3154 int index = (const_length - const_index) + 1; 3155 __ ldr(result, MemOperand(arguments, index * kPointerSize)); 3156 } else { 3157 Register length = ToRegister(instr->length()); 3158 Register index = ToRegister(instr->index()); 3159 // There are two words between the frame pointer and the last argument. 3160 // Subtracting from length accounts for one of them add one more. 3161 __ sub(length, length, index); 3162 __ add(length, length, Operand(1)); 3163 __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2)); 3164 } 3165} 3166 3167 3168void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { 3169 Register external_pointer = ToRegister(instr->elements()); 3170 Register key = no_reg; 3171 ElementsKind elements_kind = instr->elements_kind(); 3172 bool key_is_constant = instr->key()->IsConstantOperand(); 3173 int constant_key = 0; 3174 if (key_is_constant) { 3175 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 3176 if (constant_key & 0xF0000000) { 3177 Abort(kArrayIndexConstantValueTooBig); 3178 } 3179 } else { 3180 key = ToRegister(instr->key()); 3181 } 3182 int element_size_shift = ElementsKindToShiftSize(elements_kind); 3183 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) 3184 ? (element_size_shift - kSmiTagSize) : element_size_shift; 3185 int additional_offset = instr->additional_index() << element_size_shift; 3186 3187 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || 3188 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { 3189 DwVfpRegister result = ToDoubleRegister(instr->result()); 3190 Operand operand = key_is_constant 3191 ? Operand(constant_key << element_size_shift) 3192 : Operand(key, LSL, shift_size); 3193 __ add(scratch0(), external_pointer, operand); 3194 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { 3195 __ vldr(double_scratch0().low(), scratch0(), additional_offset); 3196 __ vcvt_f64_f32(result, double_scratch0().low()); 3197 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS 3198 __ vldr(result, scratch0(), additional_offset); 3199 } 3200 } else { 3201 Register result = ToRegister(instr->result()); 3202 MemOperand mem_operand = PrepareKeyedOperand( 3203 key, external_pointer, key_is_constant, constant_key, 3204 element_size_shift, shift_size, 3205 instr->additional_index(), additional_offset); 3206 switch (elements_kind) { 3207 case EXTERNAL_BYTE_ELEMENTS: 3208 __ ldrsb(result, mem_operand); 3209 break; 3210 case EXTERNAL_PIXEL_ELEMENTS: 3211 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: 3212 __ ldrb(result, mem_operand); 3213 break; 3214 case EXTERNAL_SHORT_ELEMENTS: 3215 __ ldrsh(result, mem_operand); 3216 break; 3217 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: 3218 __ ldrh(result, mem_operand); 3219 break; 3220 case EXTERNAL_INT_ELEMENTS: 3221 __ ldr(result, mem_operand); 3222 break; 3223 case EXTERNAL_UNSIGNED_INT_ELEMENTS: 3224 __ ldr(result, mem_operand); 3225 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { 3226 __ cmp(result, Operand(0x80000000)); 3227 DeoptimizeIf(cs, instr->environment()); 3228 } 3229 break; 3230 case EXTERNAL_FLOAT_ELEMENTS: 3231 case EXTERNAL_DOUBLE_ELEMENTS: 3232 case FAST_HOLEY_DOUBLE_ELEMENTS: 3233 case FAST_HOLEY_ELEMENTS: 3234 case FAST_HOLEY_SMI_ELEMENTS: 3235 case FAST_DOUBLE_ELEMENTS: 3236 case FAST_ELEMENTS: 3237 case FAST_SMI_ELEMENTS: 3238 case DICTIONARY_ELEMENTS: 3239 case NON_STRICT_ARGUMENTS_ELEMENTS: 3240 UNREACHABLE(); 3241 break; 3242 } 3243 } 3244} 3245 3246 3247void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { 3248 Register elements = ToRegister(instr->elements()); 3249 bool key_is_constant = instr->key()->IsConstantOperand(); 3250 Register key = no_reg; 3251 DwVfpRegister result = ToDoubleRegister(instr->result()); 3252 Register scratch = scratch0(); 3253 3254 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); 3255 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) 3256 ? (element_size_shift - kSmiTagSize) : element_size_shift; 3257 int constant_key = 0; 3258 if (key_is_constant) { 3259 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 3260 if (constant_key & 0xF0000000) { 3261 Abort(kArrayIndexConstantValueTooBig); 3262 } 3263 } else { 3264 key = ToRegister(instr->key()); 3265 } 3266 3267 int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) + 3268 ((constant_key + instr->additional_index()) << element_size_shift); 3269 if (!key_is_constant) { 3270 __ add(elements, elements, Operand(key, LSL, shift_size)); 3271 } 3272 __ add(elements, elements, Operand(base_offset)); 3273 __ vldr(result, elements, 0); 3274 if (instr->hydrogen()->RequiresHoleCheck()) { 3275 __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); 3276 __ cmp(scratch, Operand(kHoleNanUpper32)); 3277 DeoptimizeIf(eq, instr->environment()); 3278 } 3279} 3280 3281 3282void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { 3283 Register elements = ToRegister(instr->elements()); 3284 Register result = ToRegister(instr->result()); 3285 Register scratch = scratch0(); 3286 Register store_base = scratch; 3287 int offset = 0; 3288 3289 if (instr->key()->IsConstantOperand()) { 3290 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); 3291 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + 3292 instr->additional_index()); 3293 store_base = elements; 3294 } else { 3295 Register key = EmitLoadRegister(instr->key(), scratch0()); 3296 // Even though the HLoadKeyed instruction forces the input 3297 // representation for the key to be an integer, the input gets replaced 3298 // during bound check elimination with the index argument to the bounds 3299 // check, which can be tagged, so that case must be handled here, too. 3300 if (instr->hydrogen()->key()->representation().IsSmi()) { 3301 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key)); 3302 } else { 3303 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); 3304 } 3305 offset = FixedArray::OffsetOfElementAt(instr->additional_index()); 3306 } 3307 __ ldr(result, FieldMemOperand(store_base, offset)); 3308 3309 // Check for the hole value. 3310 if (instr->hydrogen()->RequiresHoleCheck()) { 3311 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { 3312 __ SmiTst(result); 3313 DeoptimizeIf(ne, instr->environment()); 3314 } else { 3315 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); 3316 __ cmp(result, scratch); 3317 DeoptimizeIf(eq, instr->environment()); 3318 } 3319 } 3320} 3321 3322 3323void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { 3324 if (instr->is_external()) { 3325 DoLoadKeyedExternalArray(instr); 3326 } else if (instr->hydrogen()->representation().IsDouble()) { 3327 DoLoadKeyedFixedDoubleArray(instr); 3328 } else { 3329 DoLoadKeyedFixedArray(instr); 3330 } 3331} 3332 3333 3334MemOperand LCodeGen::PrepareKeyedOperand(Register key, 3335 Register base, 3336 bool key_is_constant, 3337 int constant_key, 3338 int element_size, 3339 int shift_size, 3340 int additional_index, 3341 int additional_offset) { 3342 if (additional_index != 0 && !key_is_constant) { 3343 additional_index *= 1 << (element_size - shift_size); 3344 __ add(scratch0(), key, Operand(additional_index)); 3345 } 3346 3347 if (key_is_constant) { 3348 return MemOperand(base, 3349 (constant_key << element_size) + additional_offset); 3350 } 3351 3352 if (additional_index == 0) { 3353 if (shift_size >= 0) { 3354 return MemOperand(base, key, LSL, shift_size); 3355 } else { 3356 ASSERT_EQ(-1, shift_size); 3357 return MemOperand(base, key, LSR, 1); 3358 } 3359 } 3360 3361 if (shift_size >= 0) { 3362 return MemOperand(base, scratch0(), LSL, shift_size); 3363 } else { 3364 ASSERT_EQ(-1, shift_size); 3365 return MemOperand(base, scratch0(), LSR, 1); 3366 } 3367} 3368 3369 3370void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { 3371 ASSERT(ToRegister(instr->object()).is(r1)); 3372 ASSERT(ToRegister(instr->key()).is(r0)); 3373 3374 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); 3375 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); 3376} 3377 3378 3379void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { 3380 Register scratch = scratch0(); 3381 Register result = ToRegister(instr->result()); 3382 3383 if (instr->hydrogen()->from_inlined()) { 3384 __ sub(result, sp, Operand(2 * kPointerSize)); 3385 } else { 3386 // Check if the calling frame is an arguments adaptor frame. 3387 Label done, adapted; 3388 __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 3389 __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset)); 3390 __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 3391 3392 // Result is the frame pointer for the frame if not adapted and for the real 3393 // frame below the adaptor frame if adapted. 3394 __ mov(result, fp, LeaveCC, ne); 3395 __ mov(result, scratch, LeaveCC, eq); 3396 } 3397} 3398 3399 3400void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { 3401 Register elem = ToRegister(instr->elements()); 3402 Register result = ToRegister(instr->result()); 3403 3404 Label done; 3405 3406 // If no arguments adaptor frame the number of arguments is fixed. 3407 __ cmp(fp, elem); 3408 __ mov(result, Operand(scope()->num_parameters())); 3409 __ b(eq, &done); 3410 3411 // Arguments adaptor frame present. Get argument length from there. 3412 __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 3413 __ ldr(result, 3414 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset)); 3415 __ SmiUntag(result); 3416 3417 // Argument length is in result register. 3418 __ bind(&done); 3419} 3420 3421 3422void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { 3423 Register receiver = ToRegister(instr->receiver()); 3424 Register function = ToRegister(instr->function()); 3425 Register scratch = scratch0(); 3426 3427 // If the receiver is null or undefined, we have to pass the global 3428 // object as a receiver to normal functions. Values have to be 3429 // passed unchanged to builtins and strict-mode functions. 3430 Label global_object, receiver_ok; 3431 3432 // Do not transform the receiver to object for strict mode 3433 // functions. 3434 __ ldr(scratch, 3435 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); 3436 __ ldr(scratch, 3437 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); 3438 __ tst(scratch, 3439 Operand(1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize))); 3440 __ b(ne, &receiver_ok); 3441 3442 // Do not transform the receiver to object for builtins. 3443 __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); 3444 __ b(ne, &receiver_ok); 3445 3446 // Normal function. Replace undefined or null with global receiver. 3447 __ LoadRoot(scratch, Heap::kNullValueRootIndex); 3448 __ cmp(receiver, scratch); 3449 __ b(eq, &global_object); 3450 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); 3451 __ cmp(receiver, scratch); 3452 __ b(eq, &global_object); 3453 3454 // Deoptimize if the receiver is not a JS object. 3455 __ SmiTst(receiver); 3456 DeoptimizeIf(eq, instr->environment()); 3457 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE); 3458 DeoptimizeIf(lt, instr->environment()); 3459 __ jmp(&receiver_ok); 3460 3461 __ bind(&global_object); 3462 __ ldr(receiver, GlobalObjectOperand()); 3463 __ ldr(receiver, 3464 FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset)); 3465 __ bind(&receiver_ok); 3466} 3467 3468 3469void LCodeGen::DoApplyArguments(LApplyArguments* instr) { 3470 Register receiver = ToRegister(instr->receiver()); 3471 Register function = ToRegister(instr->function()); 3472 Register length = ToRegister(instr->length()); 3473 Register elements = ToRegister(instr->elements()); 3474 Register scratch = scratch0(); 3475 ASSERT(receiver.is(r0)); // Used for parameter count. 3476 ASSERT(function.is(r1)); // Required by InvokeFunction. 3477 ASSERT(ToRegister(instr->result()).is(r0)); 3478 3479 // Copy the arguments to this function possibly from the 3480 // adaptor frame below it. 3481 const uint32_t kArgumentsLimit = 1 * KB; 3482 __ cmp(length, Operand(kArgumentsLimit)); 3483 DeoptimizeIf(hi, instr->environment()); 3484 3485 // Push the receiver and use the register to keep the original 3486 // number of arguments. 3487 __ push(receiver); 3488 __ mov(receiver, length); 3489 // The arguments are at a one pointer size offset from elements. 3490 __ add(elements, elements, Operand(1 * kPointerSize)); 3491 3492 // Loop through the arguments pushing them onto the execution 3493 // stack. 3494 Label invoke, loop; 3495 // length is a small non-negative integer, due to the test above. 3496 __ cmp(length, Operand::Zero()); 3497 __ b(eq, &invoke); 3498 __ bind(&loop); 3499 __ ldr(scratch, MemOperand(elements, length, LSL, 2)); 3500 __ push(scratch); 3501 __ sub(length, length, Operand(1), SetCC); 3502 __ b(ne, &loop); 3503 3504 __ bind(&invoke); 3505 ASSERT(instr->HasPointerMap()); 3506 LPointerMap* pointers = instr->pointer_map(); 3507 RecordPosition(pointers->position()); 3508 SafepointGenerator safepoint_generator( 3509 this, pointers, Safepoint::kLazyDeopt); 3510 // The number of arguments is stored in receiver which is r0, as expected 3511 // by InvokeFunction. 3512 ParameterCount actual(receiver); 3513 __ InvokeFunction(function, actual, CALL_FUNCTION, 3514 safepoint_generator, CALL_AS_METHOD); 3515 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 3516} 3517 3518 3519void LCodeGen::DoPushArgument(LPushArgument* instr) { 3520 LOperand* argument = instr->value(); 3521 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { 3522 Abort(kDoPushArgumentNotImplementedForDoubleType); 3523 } else { 3524 Register argument_reg = EmitLoadRegister(argument, ip); 3525 __ push(argument_reg); 3526 } 3527} 3528 3529 3530void LCodeGen::DoDrop(LDrop* instr) { 3531 __ Drop(instr->count()); 3532} 3533 3534 3535void LCodeGen::DoThisFunction(LThisFunction* instr) { 3536 Register result = ToRegister(instr->result()); 3537 __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); 3538} 3539 3540 3541void LCodeGen::DoContext(LContext* instr) { 3542 // If there is a non-return use, the context must be moved to a register. 3543 Register result = ToRegister(instr->result()); 3544 for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) { 3545 if (!it.value()->IsReturn()) { 3546 __ mov(result, cp); 3547 return; 3548 } 3549 } 3550} 3551 3552 3553void LCodeGen::DoOuterContext(LOuterContext* instr) { 3554 Register context = ToRegister(instr->context()); 3555 Register result = ToRegister(instr->result()); 3556 __ ldr(result, 3557 MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX))); 3558} 3559 3560 3561void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { 3562 __ push(cp); // The context is the first argument. 3563 __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs()); 3564 __ push(scratch0()); 3565 __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags()))); 3566 __ push(scratch0()); 3567 CallRuntime(Runtime::kDeclareGlobals, 3, instr); 3568} 3569 3570 3571void LCodeGen::DoGlobalObject(LGlobalObject* instr) { 3572 Register result = ToRegister(instr->result()); 3573 __ ldr(result, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); 3574} 3575 3576 3577void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { 3578 Register global = ToRegister(instr->global_object()); 3579 Register result = ToRegister(instr->result()); 3580 __ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset)); 3581} 3582 3583 3584void LCodeGen::CallKnownFunction(Handle<JSFunction> function, 3585 int formal_parameter_count, 3586 int arity, 3587 LInstruction* instr, 3588 CallKind call_kind, 3589 R1State r1_state) { 3590 bool dont_adapt_arguments = 3591 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; 3592 bool can_invoke_directly = 3593 dont_adapt_arguments || formal_parameter_count == arity; 3594 3595 LPointerMap* pointers = instr->pointer_map(); 3596 RecordPosition(pointers->position()); 3597 3598 if (can_invoke_directly) { 3599 if (r1_state == R1_UNINITIALIZED) { 3600 __ LoadHeapObject(r1, function); 3601 } 3602 3603 // Change context. 3604 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); 3605 3606 // Set r0 to arguments count if adaption is not needed. Assumes that r0 3607 // is available to write to at this point. 3608 if (dont_adapt_arguments) { 3609 __ mov(r0, Operand(arity)); 3610 } 3611 3612 // Invoke function. 3613 __ SetCallKind(r5, call_kind); 3614 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); 3615 __ Call(ip); 3616 3617 // Set up deoptimization. 3618 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 3619 } else { 3620 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 3621 ParameterCount count(arity); 3622 ParameterCount expected(formal_parameter_count); 3623 __ InvokeFunction( 3624 function, expected, count, CALL_FUNCTION, generator, call_kind); 3625 } 3626 3627 // Restore context. 3628 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 3629} 3630 3631 3632void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { 3633 ASSERT(ToRegister(instr->result()).is(r0)); 3634 CallKnownFunction(instr->hydrogen()->function(), 3635 instr->hydrogen()->formal_parameter_count(), 3636 instr->arity(), 3637 instr, 3638 CALL_AS_METHOD, 3639 R1_UNINITIALIZED); 3640} 3641 3642 3643void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { 3644 Register input = ToRegister(instr->value()); 3645 Register result = ToRegister(instr->result()); 3646 Register scratch = scratch0(); 3647 3648 // Deoptimize if not a heap number. 3649 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 3650 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 3651 __ cmp(scratch, Operand(ip)); 3652 DeoptimizeIf(ne, instr->environment()); 3653 3654 Label done; 3655 Register exponent = scratch0(); 3656 scratch = no_reg; 3657 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); 3658 // Check the sign of the argument. If the argument is positive, just 3659 // return it. 3660 __ tst(exponent, Operand(HeapNumber::kSignMask)); 3661 // Move the input to the result if necessary. 3662 __ Move(result, input); 3663 __ b(eq, &done); 3664 3665 // Input is negative. Reverse its sign. 3666 // Preserve the value of all registers. 3667 { 3668 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 3669 3670 // Registers were saved at the safepoint, so we can use 3671 // many scratch registers. 3672 Register tmp1 = input.is(r1) ? r0 : r1; 3673 Register tmp2 = input.is(r2) ? r0 : r2; 3674 Register tmp3 = input.is(r3) ? r0 : r3; 3675 Register tmp4 = input.is(r4) ? r0 : r4; 3676 3677 // exponent: floating point exponent value. 3678 3679 Label allocated, slow; 3680 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex); 3681 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow); 3682 __ b(&allocated); 3683 3684 // Slow case: Call the runtime system to do the number allocation. 3685 __ bind(&slow); 3686 3687 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); 3688 // Set the pointer to the new heap number in tmp. 3689 if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0)); 3690 // Restore input_reg after call to runtime. 3691 __ LoadFromSafepointRegisterSlot(input, input); 3692 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); 3693 3694 __ bind(&allocated); 3695 // exponent: floating point exponent value. 3696 // tmp1: allocated heap number. 3697 __ bic(exponent, exponent, Operand(HeapNumber::kSignMask)); 3698 __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset)); 3699 __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); 3700 __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); 3701 3702 __ StoreToSafepointRegisterSlot(tmp1, result); 3703 } 3704 3705 __ bind(&done); 3706} 3707 3708 3709void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { 3710 Register input = ToRegister(instr->value()); 3711 Register result = ToRegister(instr->result()); 3712 __ cmp(input, Operand::Zero()); 3713 __ Move(result, input, pl); 3714 // We can make rsb conditional because the previous cmp instruction 3715 // will clear the V (overflow) flag and rsb won't set this flag 3716 // if input is positive. 3717 __ rsb(result, input, Operand::Zero(), SetCC, mi); 3718 // Deoptimize on overflow. 3719 DeoptimizeIf(vs, instr->environment()); 3720} 3721 3722 3723void LCodeGen::DoMathAbs(LMathAbs* instr) { 3724 // Class for deferred case. 3725 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { 3726 public: 3727 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) 3728 : LDeferredCode(codegen), instr_(instr) { } 3729 virtual void Generate() { 3730 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); 3731 } 3732 virtual LInstruction* instr() { return instr_; } 3733 private: 3734 LMathAbs* instr_; 3735 }; 3736 3737 Representation r = instr->hydrogen()->value()->representation(); 3738 if (r.IsDouble()) { 3739 DwVfpRegister input = ToDoubleRegister(instr->value()); 3740 DwVfpRegister result = ToDoubleRegister(instr->result()); 3741 __ vabs(result, input); 3742 } else if (r.IsSmiOrInteger32()) { 3743 EmitIntegerMathAbs(instr); 3744 } else { 3745 // Representation is tagged. 3746 DeferredMathAbsTaggedHeapNumber* deferred = 3747 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); 3748 Register input = ToRegister(instr->value()); 3749 // Smi check. 3750 __ JumpIfNotSmi(input, deferred->entry()); 3751 // If smi, handle it directly. 3752 EmitIntegerMathAbs(instr); 3753 __ bind(deferred->exit()); 3754 } 3755} 3756 3757 3758void LCodeGen::DoMathFloor(LMathFloor* instr) { 3759 DwVfpRegister input = ToDoubleRegister(instr->value()); 3760 Register result = ToRegister(instr->result()); 3761 Register input_high = scratch0(); 3762 Label done, exact; 3763 3764 __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact); 3765 DeoptimizeIf(al, instr->environment()); 3766 3767 __ bind(&exact); 3768 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3769 // Test for -0. 3770 __ cmp(result, Operand::Zero()); 3771 __ b(ne, &done); 3772 __ cmp(input_high, Operand::Zero()); 3773 DeoptimizeIf(mi, instr->environment()); 3774 } 3775 __ bind(&done); 3776} 3777 3778 3779void LCodeGen::DoMathRound(LMathRound* instr) { 3780 DwVfpRegister input = ToDoubleRegister(instr->value()); 3781 Register result = ToRegister(instr->result()); 3782 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); 3783 DwVfpRegister input_plus_dot_five = double_scratch1; 3784 Register input_high = scratch0(); 3785 DwVfpRegister dot_five = double_scratch0(); 3786 Label convert, done; 3787 3788 __ Vmov(dot_five, 0.5, scratch0()); 3789 __ vabs(double_scratch1, input); 3790 __ VFPCompareAndSetFlags(double_scratch1, dot_five); 3791 // If input is in [-0.5, -0], the result is -0. 3792 // If input is in [+0, +0.5[, the result is +0. 3793 // If the input is +0.5, the result is 1. 3794 __ b(hi, &convert); // Out of [-0.5, +0.5]. 3795 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3796 __ VmovHigh(input_high, input); 3797 __ cmp(input_high, Operand::Zero()); 3798 DeoptimizeIf(mi, instr->environment()); // [-0.5, -0]. 3799 } 3800 __ VFPCompareAndSetFlags(input, dot_five); 3801 __ mov(result, Operand(1), LeaveCC, eq); // +0.5. 3802 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on 3803 // flag kBailoutOnMinusZero. 3804 __ mov(result, Operand::Zero(), LeaveCC, ne); 3805 __ b(&done); 3806 3807 __ bind(&convert); 3808 __ vadd(input_plus_dot_five, input, dot_five); 3809 // Reuse dot_five (double_scratch0) as we no longer need this value. 3810 __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(), 3811 &done, &done); 3812 DeoptimizeIf(al, instr->environment()); 3813 __ bind(&done); 3814} 3815 3816 3817void LCodeGen::DoMathSqrt(LMathSqrt* instr) { 3818 DwVfpRegister input = ToDoubleRegister(instr->value()); 3819 DwVfpRegister result = ToDoubleRegister(instr->result()); 3820 __ vsqrt(result, input); 3821} 3822 3823 3824void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { 3825 DwVfpRegister input = ToDoubleRegister(instr->value()); 3826 DwVfpRegister result = ToDoubleRegister(instr->result()); 3827 DwVfpRegister temp = ToDoubleRegister(instr->temp()); 3828 3829 // Note that according to ECMA-262 15.8.2.13: 3830 // Math.pow(-Infinity, 0.5) == Infinity 3831 // Math.sqrt(-Infinity) == NaN 3832 Label done; 3833 __ vmov(temp, -V8_INFINITY, scratch0()); 3834 __ VFPCompareAndSetFlags(input, temp); 3835 __ vneg(result, temp, eq); 3836 __ b(&done, eq); 3837 3838 // Add +0 to convert -0 to +0. 3839 __ vadd(result, input, kDoubleRegZero); 3840 __ vsqrt(result, result); 3841 __ bind(&done); 3842} 3843 3844 3845void LCodeGen::DoPower(LPower* instr) { 3846 Representation exponent_type = instr->hydrogen()->right()->representation(); 3847 // Having marked this as a call, we can use any registers. 3848 // Just make sure that the input/output registers are the expected ones. 3849 ASSERT(!instr->right()->IsDoubleRegister() || 3850 ToDoubleRegister(instr->right()).is(d2)); 3851 ASSERT(!instr->right()->IsRegister() || 3852 ToRegister(instr->right()).is(r2)); 3853 ASSERT(ToDoubleRegister(instr->left()).is(d1)); 3854 ASSERT(ToDoubleRegister(instr->result()).is(d3)); 3855 3856 if (exponent_type.IsSmi()) { 3857 MathPowStub stub(MathPowStub::TAGGED); 3858 __ CallStub(&stub); 3859 } else if (exponent_type.IsTagged()) { 3860 Label no_deopt; 3861 __ JumpIfSmi(r2, &no_deopt); 3862 __ ldr(r7, FieldMemOperand(r2, HeapObject::kMapOffset)); 3863 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 3864 __ cmp(r7, Operand(ip)); 3865 DeoptimizeIf(ne, instr->environment()); 3866 __ bind(&no_deopt); 3867 MathPowStub stub(MathPowStub::TAGGED); 3868 __ CallStub(&stub); 3869 } else if (exponent_type.IsInteger32()) { 3870 MathPowStub stub(MathPowStub::INTEGER); 3871 __ CallStub(&stub); 3872 } else { 3873 ASSERT(exponent_type.IsDouble()); 3874 MathPowStub stub(MathPowStub::DOUBLE); 3875 __ CallStub(&stub); 3876 } 3877} 3878 3879 3880void LCodeGen::DoRandom(LRandom* instr) { 3881 class DeferredDoRandom: public LDeferredCode { 3882 public: 3883 DeferredDoRandom(LCodeGen* codegen, LRandom* instr) 3884 : LDeferredCode(codegen), instr_(instr) { } 3885 virtual void Generate() { codegen()->DoDeferredRandom(instr_); } 3886 virtual LInstruction* instr() { return instr_; } 3887 private: 3888 LRandom* instr_; 3889 }; 3890 3891 DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr); 3892 3893 // Having marked this instruction as a call we can use any 3894 // registers. 3895 ASSERT(ToDoubleRegister(instr->result()).is(d7)); 3896 ASSERT(ToRegister(instr->global_object()).is(r0)); 3897 3898 static const int kSeedSize = sizeof(uint32_t); 3899 STATIC_ASSERT(kPointerSize == kSeedSize); 3900 3901 __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset)); 3902 static const int kRandomSeedOffset = 3903 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize; 3904 __ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset)); 3905 // r2: FixedArray of the native context's random seeds 3906 3907 // Load state[0]. 3908 __ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize)); 3909 __ cmp(r1, Operand::Zero()); 3910 __ b(eq, deferred->entry()); 3911 // Load state[1]. 3912 __ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize)); 3913 // r1: state[0]. 3914 // r0: state[1]. 3915 3916 // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16) 3917 __ and_(r3, r1, Operand(0xFFFF)); 3918 __ mov(r4, Operand(18273)); 3919 __ mul(r3, r3, r4); 3920 __ add(r1, r3, Operand(r1, LSR, 16)); 3921 // Save state[0]. 3922 __ str(r1, FieldMemOperand(r2, ByteArray::kHeaderSize)); 3923 3924 // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16) 3925 __ and_(r3, r0, Operand(0xFFFF)); 3926 __ mov(r4, Operand(36969)); 3927 __ mul(r3, r3, r4); 3928 __ add(r0, r3, Operand(r0, LSR, 16)); 3929 // Save state[1]. 3930 __ str(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize)); 3931 3932 // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF) 3933 __ and_(r0, r0, Operand(0x3FFFF)); 3934 __ add(r0, r0, Operand(r1, LSL, 14)); 3935 3936 __ bind(deferred->exit()); 3937 // 0x41300000 is the top half of 1.0 x 2^20 as a double. 3938 // Create this constant using mov/orr to avoid PC relative load. 3939 __ mov(r1, Operand(0x41000000)); 3940 __ orr(r1, r1, Operand(0x300000)); 3941 // Move 0x41300000xxxxxxxx (x = random bits) to VFP. 3942 __ vmov(d7, r0, r1); 3943 // Move 0x4130000000000000 to VFP. 3944 __ mov(r0, Operand::Zero()); 3945 __ vmov(d8, r0, r1); 3946 // Subtract and store the result in the heap number. 3947 __ vsub(d7, d7, d8); 3948} 3949 3950 3951void LCodeGen::DoDeferredRandom(LRandom* instr) { 3952 __ PrepareCallCFunction(1, scratch0()); 3953 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); 3954 // Return value is in r0. 3955} 3956 3957 3958void LCodeGen::DoMathExp(LMathExp* instr) { 3959 DwVfpRegister input = ToDoubleRegister(instr->value()); 3960 DwVfpRegister result = ToDoubleRegister(instr->result()); 3961 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); 3962 DwVfpRegister double_scratch2 = double_scratch0(); 3963 Register temp1 = ToRegister(instr->temp1()); 3964 Register temp2 = ToRegister(instr->temp2()); 3965 3966 MathExpGenerator::EmitMathExp( 3967 masm(), input, result, double_scratch1, double_scratch2, 3968 temp1, temp2, scratch0()); 3969} 3970 3971 3972void LCodeGen::DoMathLog(LMathLog* instr) { 3973 ASSERT(ToDoubleRegister(instr->result()).is(d2)); 3974 TranscendentalCacheStub stub(TranscendentalCache::LOG, 3975 TranscendentalCacheStub::UNTAGGED); 3976 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); 3977} 3978 3979 3980void LCodeGen::DoMathTan(LMathTan* instr) { 3981 ASSERT(ToDoubleRegister(instr->result()).is(d2)); 3982 TranscendentalCacheStub stub(TranscendentalCache::TAN, 3983 TranscendentalCacheStub::UNTAGGED); 3984 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); 3985} 3986 3987 3988void LCodeGen::DoMathCos(LMathCos* instr) { 3989 ASSERT(ToDoubleRegister(instr->result()).is(d2)); 3990 TranscendentalCacheStub stub(TranscendentalCache::COS, 3991 TranscendentalCacheStub::UNTAGGED); 3992 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); 3993} 3994 3995 3996void LCodeGen::DoMathSin(LMathSin* instr) { 3997 ASSERT(ToDoubleRegister(instr->result()).is(d2)); 3998 TranscendentalCacheStub stub(TranscendentalCache::SIN, 3999 TranscendentalCacheStub::UNTAGGED); 4000 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); 4001} 4002 4003 4004void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { 4005 ASSERT(ToRegister(instr->function()).is(r1)); 4006 ASSERT(instr->HasPointerMap()); 4007 4008 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); 4009 if (known_function.is_null()) { 4010 LPointerMap* pointers = instr->pointer_map(); 4011 RecordPosition(pointers->position()); 4012 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 4013 ParameterCount count(instr->arity()); 4014 __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD); 4015 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 4016 } else { 4017 CallKnownFunction(known_function, 4018 instr->hydrogen()->formal_parameter_count(), 4019 instr->arity(), 4020 instr, 4021 CALL_AS_METHOD, 4022 R1_CONTAINS_TARGET); 4023 } 4024} 4025 4026 4027void LCodeGen::DoCallKeyed(LCallKeyed* instr) { 4028 ASSERT(ToRegister(instr->result()).is(r0)); 4029 4030 int arity = instr->arity(); 4031 Handle<Code> ic = 4032 isolate()->stub_cache()->ComputeKeyedCallInitialize(arity); 4033 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); 4034 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 4035} 4036 4037 4038void LCodeGen::DoCallNamed(LCallNamed* instr) { 4039 ASSERT(ToRegister(instr->result()).is(r0)); 4040 4041 int arity = instr->arity(); 4042 RelocInfo::Mode mode = RelocInfo::CODE_TARGET; 4043 Handle<Code> ic = 4044 isolate()->stub_cache()->ComputeCallInitialize(arity, mode); 4045 __ mov(r2, Operand(instr->name())); 4046 CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS); 4047 // Restore context register. 4048 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 4049} 4050 4051 4052void LCodeGen::DoCallFunction(LCallFunction* instr) { 4053 ASSERT(ToRegister(instr->function()).is(r1)); 4054 ASSERT(ToRegister(instr->result()).is(r0)); 4055 4056 int arity = instr->arity(); 4057 CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS); 4058 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); 4059 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 4060} 4061 4062 4063void LCodeGen::DoCallGlobal(LCallGlobal* instr) { 4064 ASSERT(ToRegister(instr->result()).is(r0)); 4065 4066 int arity = instr->arity(); 4067 RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT; 4068 Handle<Code> ic = 4069 isolate()->stub_cache()->ComputeCallInitialize(arity, mode); 4070 __ mov(r2, Operand(instr->name())); 4071 CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS); 4072 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 4073} 4074 4075 4076void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { 4077 ASSERT(ToRegister(instr->result()).is(r0)); 4078 CallKnownFunction(instr->hydrogen()->target(), 4079 instr->hydrogen()->formal_parameter_count(), 4080 instr->arity(), 4081 instr, 4082 CALL_AS_FUNCTION, 4083 R1_UNINITIALIZED); 4084} 4085 4086 4087void LCodeGen::DoCallNew(LCallNew* instr) { 4088 ASSERT(ToRegister(instr->constructor()).is(r1)); 4089 ASSERT(ToRegister(instr->result()).is(r0)); 4090 4091 __ mov(r0, Operand(instr->arity())); 4092 // No cell in r2 for construct type feedback in optimized code 4093 Handle<Object> undefined_value(isolate()->factory()->undefined_value()); 4094 __ mov(r2, Operand(undefined_value)); 4095 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); 4096 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); 4097} 4098 4099 4100void LCodeGen::DoCallNewArray(LCallNewArray* instr) { 4101 ASSERT(ToRegister(instr->constructor()).is(r1)); 4102 ASSERT(ToRegister(instr->result()).is(r0)); 4103 4104 __ mov(r0, Operand(instr->arity())); 4105 __ mov(r2, Operand(instr->hydrogen()->property_cell())); 4106 ElementsKind kind = instr->hydrogen()->elements_kind(); 4107 AllocationSiteOverrideMode override_mode = 4108 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) 4109 ? DISABLE_ALLOCATION_SITES 4110 : DONT_OVERRIDE; 4111 ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED; 4112 4113 if (instr->arity() == 0) { 4114 ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode); 4115 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); 4116 } else if (instr->arity() == 1) { 4117 Label done; 4118 if (IsFastPackedElementsKind(kind)) { 4119 Label packed_case; 4120 // We might need a change here 4121 // look at the first argument 4122 __ ldr(r5, MemOperand(sp, 0)); 4123 __ cmp(r5, Operand::Zero()); 4124 __ b(eq, &packed_case); 4125 4126 ElementsKind holey_kind = GetHoleyElementsKind(kind); 4127 ArraySingleArgumentConstructorStub stub(holey_kind, context_mode, 4128 override_mode); 4129 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); 4130 __ jmp(&done); 4131 __ bind(&packed_case); 4132 } 4133 4134 ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode); 4135 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); 4136 __ bind(&done); 4137 } else { 4138 ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode); 4139 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); 4140 } 4141} 4142 4143 4144void LCodeGen::DoCallRuntime(LCallRuntime* instr) { 4145 CallRuntime(instr->function(), instr->arity(), instr); 4146} 4147 4148 4149void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { 4150 Register result = ToRegister(instr->result()); 4151 Register base = ToRegister(instr->base_object()); 4152 __ add(result, base, Operand(instr->offset())); 4153} 4154 4155 4156void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { 4157 Representation representation = instr->representation(); 4158 4159 Register object = ToRegister(instr->object()); 4160 Register scratch = scratch0(); 4161 HObjectAccess access = instr->hydrogen()->access(); 4162 int offset = access.offset(); 4163 4164 if (access.IsExternalMemory()) { 4165 Register value = ToRegister(instr->value()); 4166 __ str(value, MemOperand(object, offset)); 4167 return; 4168 } 4169 4170 Handle<Map> transition = instr->transition(); 4171 4172 if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { 4173 Register value = ToRegister(instr->value()); 4174 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 4175 __ SmiTst(value); 4176 DeoptimizeIf(eq, instr->environment()); 4177 } 4178 } else if (FLAG_track_double_fields && representation.IsDouble()) { 4179 ASSERT(transition.is_null()); 4180 ASSERT(access.IsInobject()); 4181 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); 4182 DwVfpRegister value = ToDoubleRegister(instr->value()); 4183 __ vstr(value, FieldMemOperand(object, offset)); 4184 return; 4185 } 4186 4187 if (!transition.is_null()) { 4188 __ mov(scratch, Operand(transition)); 4189 __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); 4190 if (instr->hydrogen()->NeedsWriteBarrierForMap()) { 4191 Register temp = ToRegister(instr->temp()); 4192 // Update the write barrier for the map field. 4193 __ RecordWriteField(object, 4194 HeapObject::kMapOffset, 4195 scratch, 4196 temp, 4197 GetLinkRegisterState(), 4198 kSaveFPRegs, 4199 OMIT_REMEMBERED_SET, 4200 OMIT_SMI_CHECK); 4201 } 4202 } 4203 4204 // Do the store. 4205 Register value = ToRegister(instr->value()); 4206 ASSERT(!object.is(value)); 4207 SmiCheck check_needed = 4208 instr->hydrogen()->value()->IsHeapObject() 4209 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 4210 if (access.IsInobject()) { 4211 __ str(value, FieldMemOperand(object, offset)); 4212 if (instr->hydrogen()->NeedsWriteBarrier()) { 4213 // Update the write barrier for the object for in-object properties. 4214 __ RecordWriteField(object, 4215 offset, 4216 value, 4217 scratch, 4218 GetLinkRegisterState(), 4219 kSaveFPRegs, 4220 EMIT_REMEMBERED_SET, 4221 check_needed); 4222 } 4223 } else { 4224 __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); 4225 __ str(value, FieldMemOperand(scratch, offset)); 4226 if (instr->hydrogen()->NeedsWriteBarrier()) { 4227 // Update the write barrier for the properties array. 4228 // object is used as a scratch register. 4229 __ RecordWriteField(scratch, 4230 offset, 4231 value, 4232 object, 4233 GetLinkRegisterState(), 4234 kSaveFPRegs, 4235 EMIT_REMEMBERED_SET, 4236 check_needed); 4237 } 4238 } 4239} 4240 4241 4242void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { 4243 ASSERT(ToRegister(instr->object()).is(r1)); 4244 ASSERT(ToRegister(instr->value()).is(r0)); 4245 4246 // Name is always in r2. 4247 __ mov(r2, Operand(instr->name())); 4248 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode) 4249 ? isolate()->builtins()->StoreIC_Initialize_Strict() 4250 : isolate()->builtins()->StoreIC_Initialize(); 4251 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); 4252} 4253 4254 4255void LCodeGen::ApplyCheckIf(Condition condition, LBoundsCheck* check) { 4256 if (FLAG_debug_code && check->hydrogen()->skip_check()) { 4257 Label done; 4258 __ b(NegateCondition(condition), &done); 4259 __ stop("eliminated bounds check failed"); 4260 __ bind(&done); 4261 } else { 4262 DeoptimizeIf(condition, check->environment()); 4263 } 4264} 4265 4266 4267void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { 4268 if (instr->hydrogen()->skip_check()) return; 4269 4270 if (instr->index()->IsConstantOperand()) { 4271 int constant_index = 4272 ToInteger32(LConstantOperand::cast(instr->index())); 4273 if (instr->hydrogen()->length()->representation().IsSmi()) { 4274 __ mov(ip, Operand(Smi::FromInt(constant_index))); 4275 } else { 4276 __ mov(ip, Operand(constant_index)); 4277 } 4278 __ cmp(ip, ToRegister(instr->length())); 4279 } else { 4280 __ cmp(ToRegister(instr->index()), ToRegister(instr->length())); 4281 } 4282 Condition condition = instr->hydrogen()->allow_equality() ? hi : hs; 4283 ApplyCheckIf(condition, instr); 4284} 4285 4286 4287void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { 4288 Register external_pointer = ToRegister(instr->elements()); 4289 Register key = no_reg; 4290 ElementsKind elements_kind = instr->elements_kind(); 4291 bool key_is_constant = instr->key()->IsConstantOperand(); 4292 int constant_key = 0; 4293 if (key_is_constant) { 4294 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 4295 if (constant_key & 0xF0000000) { 4296 Abort(kArrayIndexConstantValueTooBig); 4297 } 4298 } else { 4299 key = ToRegister(instr->key()); 4300 } 4301 int element_size_shift = ElementsKindToShiftSize(elements_kind); 4302 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) 4303 ? (element_size_shift - kSmiTagSize) : element_size_shift; 4304 int additional_offset = instr->additional_index() << element_size_shift; 4305 4306 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || 4307 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { 4308 DwVfpRegister value(ToDoubleRegister(instr->value())); 4309 Operand operand(key_is_constant 4310 ? Operand(constant_key << element_size_shift) 4311 : Operand(key, LSL, shift_size)); 4312 __ add(scratch0(), external_pointer, operand); 4313 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { 4314 __ vcvt_f32_f64(double_scratch0().low(), value); 4315 __ vstr(double_scratch0().low(), scratch0(), additional_offset); 4316 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS 4317 __ vstr(value, scratch0(), additional_offset); 4318 } 4319 } else { 4320 Register value(ToRegister(instr->value())); 4321 MemOperand mem_operand = PrepareKeyedOperand( 4322 key, external_pointer, key_is_constant, constant_key, 4323 element_size_shift, shift_size, 4324 instr->additional_index(), additional_offset); 4325 switch (elements_kind) { 4326 case EXTERNAL_PIXEL_ELEMENTS: 4327 case EXTERNAL_BYTE_ELEMENTS: 4328 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: 4329 __ strb(value, mem_operand); 4330 break; 4331 case EXTERNAL_SHORT_ELEMENTS: 4332 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: 4333 __ strh(value, mem_operand); 4334 break; 4335 case EXTERNAL_INT_ELEMENTS: 4336 case EXTERNAL_UNSIGNED_INT_ELEMENTS: 4337 __ str(value, mem_operand); 4338 break; 4339 case EXTERNAL_FLOAT_ELEMENTS: 4340 case EXTERNAL_DOUBLE_ELEMENTS: 4341 case FAST_DOUBLE_ELEMENTS: 4342 case FAST_ELEMENTS: 4343 case FAST_SMI_ELEMENTS: 4344 case FAST_HOLEY_DOUBLE_ELEMENTS: 4345 case FAST_HOLEY_ELEMENTS: 4346 case FAST_HOLEY_SMI_ELEMENTS: 4347 case DICTIONARY_ELEMENTS: 4348 case NON_STRICT_ARGUMENTS_ELEMENTS: 4349 UNREACHABLE(); 4350 break; 4351 } 4352 } 4353} 4354 4355 4356void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { 4357 DwVfpRegister value = ToDoubleRegister(instr->value()); 4358 Register elements = ToRegister(instr->elements()); 4359 Register key = no_reg; 4360 Register scratch = scratch0(); 4361 bool key_is_constant = instr->key()->IsConstantOperand(); 4362 int constant_key = 0; 4363 4364 // Calculate the effective address of the slot in the array to store the 4365 // double value. 4366 if (key_is_constant) { 4367 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 4368 if (constant_key & 0xF0000000) { 4369 Abort(kArrayIndexConstantValueTooBig); 4370 } 4371 } else { 4372 key = ToRegister(instr->key()); 4373 } 4374 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); 4375 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) 4376 ? (element_size_shift - kSmiTagSize) : element_size_shift; 4377 Operand operand = key_is_constant 4378 ? Operand((constant_key << element_size_shift) + 4379 FixedDoubleArray::kHeaderSize - kHeapObjectTag) 4380 : Operand(key, LSL, shift_size); 4381 __ add(scratch, elements, operand); 4382 if (!key_is_constant) { 4383 __ add(scratch, scratch, 4384 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); 4385 } 4386 4387 if (instr->NeedsCanonicalization()) { 4388 // Force a canonical NaN. 4389 if (masm()->emit_debug_code()) { 4390 __ vmrs(ip); 4391 __ tst(ip, Operand(kVFPDefaultNaNModeControlBit)); 4392 __ Assert(ne, kDefaultNaNModeNotSet); 4393 } 4394 __ VFPCanonicalizeNaN(value); 4395 } 4396 __ vstr(value, scratch, instr->additional_index() << element_size_shift); 4397} 4398 4399 4400void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { 4401 Register value = ToRegister(instr->value()); 4402 Register elements = ToRegister(instr->elements()); 4403 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) 4404 : no_reg; 4405 Register scratch = scratch0(); 4406 Register store_base = scratch; 4407 int offset = 0; 4408 4409 // Do the store. 4410 if (instr->key()->IsConstantOperand()) { 4411 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); 4412 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); 4413 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + 4414 instr->additional_index()); 4415 store_base = elements; 4416 } else { 4417 // Even though the HLoadKeyed instruction forces the input 4418 // representation for the key to be an integer, the input gets replaced 4419 // during bound check elimination with the index argument to the bounds 4420 // check, which can be tagged, so that case must be handled here, too. 4421 if (instr->hydrogen()->key()->representation().IsSmi()) { 4422 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key)); 4423 } else { 4424 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); 4425 } 4426 offset = FixedArray::OffsetOfElementAt(instr->additional_index()); 4427 } 4428 __ str(value, FieldMemOperand(store_base, offset)); 4429 4430 if (instr->hydrogen()->NeedsWriteBarrier()) { 4431 SmiCheck check_needed = 4432 instr->hydrogen()->value()->IsHeapObject() 4433 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 4434 // Compute address of modified element and store it into key register. 4435 __ add(key, store_base, Operand(offset - kHeapObjectTag)); 4436 __ RecordWrite(elements, 4437 key, 4438 value, 4439 GetLinkRegisterState(), 4440 kSaveFPRegs, 4441 EMIT_REMEMBERED_SET, 4442 check_needed); 4443 } 4444} 4445 4446 4447void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { 4448 // By cases: external, fast double 4449 if (instr->is_external()) { 4450 DoStoreKeyedExternalArray(instr); 4451 } else if (instr->hydrogen()->value()->representation().IsDouble()) { 4452 DoStoreKeyedFixedDoubleArray(instr); 4453 } else { 4454 DoStoreKeyedFixedArray(instr); 4455 } 4456} 4457 4458 4459void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { 4460 ASSERT(ToRegister(instr->object()).is(r2)); 4461 ASSERT(ToRegister(instr->key()).is(r1)); 4462 ASSERT(ToRegister(instr->value()).is(r0)); 4463 4464 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode) 4465 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() 4466 : isolate()->builtins()->KeyedStoreIC_Initialize(); 4467 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); 4468} 4469 4470 4471void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { 4472 Register object_reg = ToRegister(instr->object()); 4473 Register scratch = scratch0(); 4474 4475 Handle<Map> from_map = instr->original_map(); 4476 Handle<Map> to_map = instr->transitioned_map(); 4477 ElementsKind from_kind = instr->from_kind(); 4478 ElementsKind to_kind = instr->to_kind(); 4479 4480 Label not_applicable; 4481 __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset)); 4482 __ cmp(scratch, Operand(from_map)); 4483 __ b(ne, ¬_applicable); 4484 4485 if (IsSimpleMapChangeTransition(from_kind, to_kind)) { 4486 Register new_map_reg = ToRegister(instr->new_map_temp()); 4487 __ mov(new_map_reg, Operand(to_map)); 4488 __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); 4489 // Write barrier. 4490 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, 4491 scratch, GetLinkRegisterState(), kDontSaveFPRegs); 4492 } else { 4493 PushSafepointRegistersScope scope( 4494 this, Safepoint::kWithRegistersAndDoubles); 4495 __ Move(r0, object_reg); 4496 __ Move(r1, to_map); 4497 TransitionElementsKindStub stub(from_kind, to_kind); 4498 __ CallStub(&stub); 4499 RecordSafepointWithRegistersAndDoubles( 4500 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 4501 } 4502 __ bind(¬_applicable); 4503} 4504 4505 4506void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { 4507 Register object = ToRegister(instr->object()); 4508 Register temp = ToRegister(instr->temp()); 4509 __ TestJSArrayForAllocationMemento(object, temp); 4510 DeoptimizeIf(eq, instr->environment()); 4511} 4512 4513 4514void LCodeGen::DoStringAdd(LStringAdd* instr) { 4515 __ push(ToRegister(instr->left())); 4516 __ push(ToRegister(instr->right())); 4517 StringAddStub stub(instr->hydrogen()->flags()); 4518 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); 4519} 4520 4521 4522void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { 4523 class DeferredStringCharCodeAt: public LDeferredCode { 4524 public: 4525 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) 4526 : LDeferredCode(codegen), instr_(instr) { } 4527 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } 4528 virtual LInstruction* instr() { return instr_; } 4529 private: 4530 LStringCharCodeAt* instr_; 4531 }; 4532 4533 DeferredStringCharCodeAt* deferred = 4534 new(zone()) DeferredStringCharCodeAt(this, instr); 4535 4536 StringCharLoadGenerator::Generate(masm(), 4537 ToRegister(instr->string()), 4538 ToRegister(instr->index()), 4539 ToRegister(instr->result()), 4540 deferred->entry()); 4541 __ bind(deferred->exit()); 4542} 4543 4544 4545void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { 4546 Register string = ToRegister(instr->string()); 4547 Register result = ToRegister(instr->result()); 4548 Register scratch = scratch0(); 4549 4550 // TODO(3095996): Get rid of this. For now, we need to make the 4551 // result register contain a valid pointer because it is already 4552 // contained in the register pointer map. 4553 __ mov(result, Operand::Zero()); 4554 4555 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 4556 __ push(string); 4557 // Push the index as a smi. This is safe because of the checks in 4558 // DoStringCharCodeAt above. 4559 if (instr->index()->IsConstantOperand()) { 4560 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 4561 __ mov(scratch, Operand(Smi::FromInt(const_index))); 4562 __ push(scratch); 4563 } else { 4564 Register index = ToRegister(instr->index()); 4565 __ SmiTag(index); 4566 __ push(index); 4567 } 4568 CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr); 4569 __ AssertSmi(r0); 4570 __ SmiUntag(r0); 4571 __ StoreToSafepointRegisterSlot(r0, result); 4572} 4573 4574 4575void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { 4576 class DeferredStringCharFromCode: public LDeferredCode { 4577 public: 4578 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) 4579 : LDeferredCode(codegen), instr_(instr) { } 4580 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } 4581 virtual LInstruction* instr() { return instr_; } 4582 private: 4583 LStringCharFromCode* instr_; 4584 }; 4585 4586 DeferredStringCharFromCode* deferred = 4587 new(zone()) DeferredStringCharFromCode(this, instr); 4588 4589 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); 4590 Register char_code = ToRegister(instr->char_code()); 4591 Register result = ToRegister(instr->result()); 4592 ASSERT(!char_code.is(result)); 4593 4594 __ cmp(char_code, Operand(String::kMaxOneByteCharCode)); 4595 __ b(hi, deferred->entry()); 4596 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); 4597 __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2)); 4598 __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize)); 4599 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 4600 __ cmp(result, ip); 4601 __ b(eq, deferred->entry()); 4602 __ bind(deferred->exit()); 4603} 4604 4605 4606void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { 4607 Register char_code = ToRegister(instr->char_code()); 4608 Register result = ToRegister(instr->result()); 4609 4610 // TODO(3095996): Get rid of this. For now, we need to make the 4611 // result register contain a valid pointer because it is already 4612 // contained in the register pointer map. 4613 __ mov(result, Operand::Zero()); 4614 4615 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 4616 __ SmiTag(char_code); 4617 __ push(char_code); 4618 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr); 4619 __ StoreToSafepointRegisterSlot(r0, result); 4620} 4621 4622 4623void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 4624 LOperand* input = instr->value(); 4625 ASSERT(input->IsRegister() || input->IsStackSlot()); 4626 LOperand* output = instr->result(); 4627 ASSERT(output->IsDoubleRegister()); 4628 SwVfpRegister single_scratch = double_scratch0().low(); 4629 if (input->IsStackSlot()) { 4630 Register scratch = scratch0(); 4631 __ ldr(scratch, ToMemOperand(input)); 4632 __ vmov(single_scratch, scratch); 4633 } else { 4634 __ vmov(single_scratch, ToRegister(input)); 4635 } 4636 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch); 4637} 4638 4639 4640void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) { 4641 LOperand* input = instr->value(); 4642 LOperand* output = instr->result(); 4643 __ SmiTag(ToRegister(output), ToRegister(input), SetCC); 4644 if (!instr->hydrogen()->value()->HasRange() || 4645 !instr->hydrogen()->value()->range()->IsInSmiRange()) { 4646 DeoptimizeIf(vs, instr->environment()); 4647 } 4648} 4649 4650 4651void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { 4652 LOperand* input = instr->value(); 4653 LOperand* output = instr->result(); 4654 4655 SwVfpRegister flt_scratch = double_scratch0().low(); 4656 __ vmov(flt_scratch, ToRegister(input)); 4657 __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch); 4658} 4659 4660 4661void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) { 4662 LOperand* input = instr->value(); 4663 LOperand* output = instr->result(); 4664 if (!instr->hydrogen()->value()->HasRange() || 4665 !instr->hydrogen()->value()->range()->IsInSmiRange()) { 4666 __ tst(ToRegister(input), Operand(0xc0000000)); 4667 DeoptimizeIf(ne, instr->environment()); 4668 } 4669 __ SmiTag(ToRegister(output), ToRegister(input)); 4670} 4671 4672 4673void LCodeGen::DoNumberTagI(LNumberTagI* instr) { 4674 class DeferredNumberTagI: public LDeferredCode { 4675 public: 4676 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) 4677 : LDeferredCode(codegen), instr_(instr) { } 4678 virtual void Generate() { 4679 codegen()->DoDeferredNumberTagI(instr_, 4680 instr_->value(), 4681 SIGNED_INT32); 4682 } 4683 virtual LInstruction* instr() { return instr_; } 4684 private: 4685 LNumberTagI* instr_; 4686 }; 4687 4688 Register src = ToRegister(instr->value()); 4689 Register dst = ToRegister(instr->result()); 4690 4691 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr); 4692 __ SmiTag(dst, src, SetCC); 4693 __ b(vs, deferred->entry()); 4694 __ bind(deferred->exit()); 4695} 4696 4697 4698void LCodeGen::DoNumberTagU(LNumberTagU* instr) { 4699 class DeferredNumberTagU: public LDeferredCode { 4700 public: 4701 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) 4702 : LDeferredCode(codegen), instr_(instr) { } 4703 virtual void Generate() { 4704 codegen()->DoDeferredNumberTagI(instr_, 4705 instr_->value(), 4706 UNSIGNED_INT32); 4707 } 4708 virtual LInstruction* instr() { return instr_; } 4709 private: 4710 LNumberTagU* instr_; 4711 }; 4712 4713 LOperand* input = instr->value(); 4714 ASSERT(input->IsRegister() && input->Equals(instr->result())); 4715 Register reg = ToRegister(input); 4716 4717 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); 4718 __ cmp(reg, Operand(Smi::kMaxValue)); 4719 __ b(hi, deferred->entry()); 4720 __ SmiTag(reg, reg); 4721 __ bind(deferred->exit()); 4722} 4723 4724 4725void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, 4726 LOperand* value, 4727 IntegerSignedness signedness) { 4728 Label slow; 4729 Register src = ToRegister(value); 4730 Register dst = ToRegister(instr->result()); 4731 LowDwVfpRegister dbl_scratch = double_scratch0(); 4732 4733 // Preserve the value of all registers. 4734 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 4735 4736 Label done; 4737 if (signedness == SIGNED_INT32) { 4738 // There was overflow, so bits 30 and 31 of the original integer 4739 // disagree. Try to allocate a heap number in new space and store 4740 // the value in there. If that fails, call the runtime system. 4741 if (dst.is(src)) { 4742 __ SmiUntag(src, dst); 4743 __ eor(src, src, Operand(0x80000000)); 4744 } 4745 __ vmov(dbl_scratch.low(), src); 4746 __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low()); 4747 } else { 4748 __ vmov(dbl_scratch.low(), src); 4749 __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low()); 4750 } 4751 4752 if (FLAG_inline_new) { 4753 __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex); 4754 __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT); 4755 __ Move(dst, r5); 4756 __ b(&done); 4757 } 4758 4759 // Slow case: Call the runtime system to do the number allocation. 4760 __ bind(&slow); 4761 4762 // TODO(3095996): Put a valid pointer value in the stack slot where the result 4763 // register is stored, as this register is in the pointer map, but contains an 4764 // integer value. 4765 __ mov(ip, Operand::Zero()); 4766 __ StoreToSafepointRegisterSlot(ip, dst); 4767 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); 4768 __ Move(dst, r0); 4769 __ sub(dst, dst, Operand(kHeapObjectTag)); 4770 4771 // Done. Put the value in dbl_scratch into the value of the allocated heap 4772 // number. 4773 __ bind(&done); 4774 __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); 4775 __ add(dst, dst, Operand(kHeapObjectTag)); 4776 __ StoreToSafepointRegisterSlot(dst, dst); 4777} 4778 4779 4780void LCodeGen::DoNumberTagD(LNumberTagD* instr) { 4781 class DeferredNumberTagD: public LDeferredCode { 4782 public: 4783 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) 4784 : LDeferredCode(codegen), instr_(instr) { } 4785 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } 4786 virtual LInstruction* instr() { return instr_; } 4787 private: 4788 LNumberTagD* instr_; 4789 }; 4790 4791 DwVfpRegister input_reg = ToDoubleRegister(instr->value()); 4792 Register scratch = scratch0(); 4793 Register reg = ToRegister(instr->result()); 4794 Register temp1 = ToRegister(instr->temp()); 4795 Register temp2 = ToRegister(instr->temp2()); 4796 4797 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); 4798 if (FLAG_inline_new) { 4799 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); 4800 // We want the untagged address first for performance 4801 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(), 4802 DONT_TAG_RESULT); 4803 } else { 4804 __ jmp(deferred->entry()); 4805 } 4806 __ bind(deferred->exit()); 4807 __ vstr(input_reg, reg, HeapNumber::kValueOffset); 4808 // Now that we have finished with the object's real address tag it 4809 __ add(reg, reg, Operand(kHeapObjectTag)); 4810} 4811 4812 4813void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 4814 // TODO(3095996): Get rid of this. For now, we need to make the 4815 // result register contain a valid pointer because it is already 4816 // contained in the register pointer map. 4817 Register reg = ToRegister(instr->result()); 4818 __ mov(reg, Operand::Zero()); 4819 4820 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 4821 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); 4822 __ sub(r0, r0, Operand(kHeapObjectTag)); 4823 __ StoreToSafepointRegisterSlot(r0, reg); 4824} 4825 4826 4827void LCodeGen::DoSmiTag(LSmiTag* instr) { 4828 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)); 4829 __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value())); 4830} 4831 4832 4833void LCodeGen::DoSmiUntag(LSmiUntag* instr) { 4834 Register input = ToRegister(instr->value()); 4835 Register result = ToRegister(instr->result()); 4836 if (instr->needs_check()) { 4837 STATIC_ASSERT(kHeapObjectTag == 1); 4838 // If the input is a HeapObject, SmiUntag will set the carry flag. 4839 __ SmiUntag(result, input, SetCC); 4840 DeoptimizeIf(cs, instr->environment()); 4841 } else { 4842 __ SmiUntag(result, input); 4843 } 4844} 4845 4846 4847void LCodeGen::EmitNumberUntagD(Register input_reg, 4848 DwVfpRegister result_reg, 4849 bool can_convert_undefined_to_nan, 4850 bool deoptimize_on_minus_zero, 4851 LEnvironment* env, 4852 NumberUntagDMode mode) { 4853 Register scratch = scratch0(); 4854 SwVfpRegister flt_scratch = double_scratch0().low(); 4855 ASSERT(!result_reg.is(double_scratch0())); 4856 4857 Label load_smi, heap_number, done; 4858 4859 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { 4860 // Smi check. 4861 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); 4862 4863 // Heap number map check. 4864 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 4865 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 4866 __ cmp(scratch, Operand(ip)); 4867 if (!can_convert_undefined_to_nan) { 4868 DeoptimizeIf(ne, env); 4869 } else { 4870 Label heap_number, convert; 4871 __ b(eq, &heap_number); 4872 4873 // Convert undefined (and hole) to NaN. 4874 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 4875 __ cmp(input_reg, Operand(ip)); 4876 DeoptimizeIf(ne, env); 4877 4878 __ bind(&convert); 4879 __ LoadRoot(scratch, Heap::kNanValueRootIndex); 4880 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag); 4881 __ jmp(&done); 4882 4883 __ bind(&heap_number); 4884 } 4885 // Heap number to double register conversion. 4886 __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag); 4887 if (deoptimize_on_minus_zero) { 4888 __ VmovLow(scratch, result_reg); 4889 __ cmp(scratch, Operand::Zero()); 4890 __ b(ne, &done); 4891 __ VmovHigh(scratch, result_reg); 4892 __ cmp(scratch, Operand(HeapNumber::kSignMask)); 4893 DeoptimizeIf(eq, env); 4894 } 4895 __ jmp(&done); 4896 } else { 4897 __ SmiUntag(scratch, input_reg); 4898 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); 4899 } 4900 4901 // Smi to double register conversion 4902 __ bind(&load_smi); 4903 // scratch: untagged value of input_reg 4904 __ vmov(flt_scratch, scratch); 4905 __ vcvt_f64_s32(result_reg, flt_scratch); 4906 __ bind(&done); 4907} 4908 4909 4910void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { 4911 Register input_reg = ToRegister(instr->value()); 4912 Register scratch1 = scratch0(); 4913 Register scratch2 = ToRegister(instr->temp()); 4914 LowDwVfpRegister double_scratch = double_scratch0(); 4915 DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp3()); 4916 4917 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2)); 4918 ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1)); 4919 4920 Label done; 4921 4922 // The input was optimistically untagged; revert it. 4923 // The carry flag is set when we reach this deferred code as we just executed 4924 // SmiUntag(heap_object, SetCC) 4925 STATIC_ASSERT(kHeapObjectTag == 1); 4926 __ adc(input_reg, input_reg, Operand(input_reg)); 4927 4928 // Heap number map check. 4929 __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 4930 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 4931 __ cmp(scratch1, Operand(ip)); 4932 4933 if (instr->truncating()) { 4934 Register scratch3 = ToRegister(instr->temp2()); 4935 ASSERT(!scratch3.is(input_reg) && 4936 !scratch3.is(scratch1) && 4937 !scratch3.is(scratch2)); 4938 // Performs a truncating conversion of a floating point number as used by 4939 // the JS bitwise operations. 4940 Label heap_number; 4941 __ b(eq, &heap_number); 4942 // Check for undefined. Undefined is converted to zero for truncating 4943 // conversions. 4944 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 4945 __ cmp(input_reg, Operand(ip)); 4946 DeoptimizeIf(ne, instr->environment()); 4947 __ mov(input_reg, Operand::Zero()); 4948 __ b(&done); 4949 4950 __ bind(&heap_number); 4951 __ sub(scratch1, input_reg, Operand(kHeapObjectTag)); 4952 __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset); 4953 4954 __ ECMAToInt32(input_reg, double_scratch2, 4955 scratch1, scratch2, scratch3, double_scratch); 4956 4957 } else { 4958 // Deoptimize if we don't have a heap number. 4959 DeoptimizeIf(ne, instr->environment()); 4960 4961 __ sub(ip, input_reg, Operand(kHeapObjectTag)); 4962 __ vldr(double_scratch2, ip, HeapNumber::kValueOffset); 4963 __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch); 4964 DeoptimizeIf(ne, instr->environment()); 4965 4966 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4967 __ cmp(input_reg, Operand::Zero()); 4968 __ b(ne, &done); 4969 __ VmovHigh(scratch1, double_scratch2); 4970 __ tst(scratch1, Operand(HeapNumber::kSignMask)); 4971 DeoptimizeIf(ne, instr->environment()); 4972 } 4973 } 4974 __ bind(&done); 4975} 4976 4977 4978void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 4979 class DeferredTaggedToI: public LDeferredCode { 4980 public: 4981 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) 4982 : LDeferredCode(codegen), instr_(instr) { } 4983 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } 4984 virtual LInstruction* instr() { return instr_; } 4985 private: 4986 LTaggedToI* instr_; 4987 }; 4988 4989 LOperand* input = instr->value(); 4990 ASSERT(input->IsRegister()); 4991 ASSERT(input->Equals(instr->result())); 4992 4993 Register input_reg = ToRegister(input); 4994 4995 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); 4996 4997 // Optimistically untag the input. 4998 // If the input is a HeapObject, SmiUntag will set the carry flag. 4999 __ SmiUntag(input_reg, SetCC); 5000 // Branch to deferred code if the input was tagged. 5001 // The deferred code will take care of restoring the tag. 5002 __ b(cs, deferred->entry()); 5003 __ bind(deferred->exit()); 5004} 5005 5006 5007void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 5008 LOperand* input = instr->value(); 5009 ASSERT(input->IsRegister()); 5010 LOperand* result = instr->result(); 5011 ASSERT(result->IsDoubleRegister()); 5012 5013 Register input_reg = ToRegister(input); 5014 DwVfpRegister result_reg = ToDoubleRegister(result); 5015 5016 HValue* value = instr->hydrogen()->value(); 5017 NumberUntagDMode mode = value->representation().IsSmi() 5018 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; 5019 5020 EmitNumberUntagD(input_reg, result_reg, 5021 instr->hydrogen()->can_convert_undefined_to_nan(), 5022 instr->hydrogen()->deoptimize_on_minus_zero(), 5023 instr->environment(), 5024 mode); 5025} 5026 5027 5028void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 5029 Register result_reg = ToRegister(instr->result()); 5030 Register scratch1 = scratch0(); 5031 Register scratch2 = ToRegister(instr->temp()); 5032 DwVfpRegister double_input = ToDoubleRegister(instr->value()); 5033 LowDwVfpRegister double_scratch = double_scratch0(); 5034 5035 if (instr->truncating()) { 5036 Register scratch3 = ToRegister(instr->temp2()); 5037 __ ECMAToInt32(result_reg, double_input, 5038 scratch1, scratch2, scratch3, double_scratch); 5039 } else { 5040 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); 5041 // Deoptimize if the input wasn't a int32 (inside a double). 5042 DeoptimizeIf(ne, instr->environment()); 5043 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 5044 Label done; 5045 __ cmp(result_reg, Operand::Zero()); 5046 __ b(ne, &done); 5047 __ VmovHigh(scratch1, double_input); 5048 __ tst(scratch1, Operand(HeapNumber::kSignMask)); 5049 DeoptimizeIf(ne, instr->environment()); 5050 __ bind(&done); 5051 } 5052 } 5053} 5054 5055 5056void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { 5057 Register result_reg = ToRegister(instr->result()); 5058 Register scratch1 = scratch0(); 5059 Register scratch2 = ToRegister(instr->temp()); 5060 DwVfpRegister double_input = ToDoubleRegister(instr->value()); 5061 LowDwVfpRegister double_scratch = double_scratch0(); 5062 5063 if (instr->truncating()) { 5064 Register scratch3 = ToRegister(instr->temp2()); 5065 __ ECMAToInt32(result_reg, double_input, 5066 scratch1, scratch2, scratch3, double_scratch); 5067 } else { 5068 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); 5069 // Deoptimize if the input wasn't a int32 (inside a double). 5070 DeoptimizeIf(ne, instr->environment()); 5071 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 5072 Label done; 5073 __ cmp(result_reg, Operand::Zero()); 5074 __ b(ne, &done); 5075 __ VmovHigh(scratch1, double_input); 5076 __ tst(scratch1, Operand(HeapNumber::kSignMask)); 5077 DeoptimizeIf(ne, instr->environment()); 5078 __ bind(&done); 5079 } 5080 } 5081 __ SmiTag(result_reg, SetCC); 5082 DeoptimizeIf(vs, instr->environment()); 5083} 5084 5085 5086void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 5087 LOperand* input = instr->value(); 5088 __ SmiTst(ToRegister(input)); 5089 DeoptimizeIf(ne, instr->environment()); 5090} 5091 5092 5093void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { 5094 if (!instr->hydrogen()->value()->IsHeapObject()) { 5095 LOperand* input = instr->value(); 5096 __ SmiTst(ToRegister(input)); 5097 DeoptimizeIf(eq, instr->environment()); 5098 } 5099} 5100 5101 5102void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { 5103 Register input = ToRegister(instr->value()); 5104 Register scratch = scratch0(); 5105 5106 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 5107 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 5108 5109 if (instr->hydrogen()->is_interval_check()) { 5110 InstanceType first; 5111 InstanceType last; 5112 instr->hydrogen()->GetCheckInterval(&first, &last); 5113 5114 __ cmp(scratch, Operand(first)); 5115 5116 // If there is only one type in the interval check for equality. 5117 if (first == last) { 5118 DeoptimizeIf(ne, instr->environment()); 5119 } else { 5120 DeoptimizeIf(lo, instr->environment()); 5121 // Omit check for the last type. 5122 if (last != LAST_TYPE) { 5123 __ cmp(scratch, Operand(last)); 5124 DeoptimizeIf(hi, instr->environment()); 5125 } 5126 } 5127 } else { 5128 uint8_t mask; 5129 uint8_t tag; 5130 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); 5131 5132 if (IsPowerOf2(mask)) { 5133 ASSERT(tag == 0 || IsPowerOf2(tag)); 5134 __ tst(scratch, Operand(mask)); 5135 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment()); 5136 } else { 5137 __ and_(scratch, scratch, Operand(mask)); 5138 __ cmp(scratch, Operand(tag)); 5139 DeoptimizeIf(ne, instr->environment()); 5140 } 5141 } 5142} 5143 5144 5145void LCodeGen::DoCheckFunction(LCheckFunction* instr) { 5146 Register reg = ToRegister(instr->value()); 5147 Handle<JSFunction> target = instr->hydrogen()->target(); 5148 AllowDeferredHandleDereference smi_check; 5149 if (isolate()->heap()->InNewSpace(*target)) { 5150 Register reg = ToRegister(instr->value()); 5151 Handle<Cell> cell = isolate()->factory()->NewCell(target); 5152 __ mov(ip, Operand(Handle<Object>(cell))); 5153 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset)); 5154 __ cmp(reg, ip); 5155 } else { 5156 __ cmp(reg, Operand(target)); 5157 } 5158 DeoptimizeIf(ne, instr->environment()); 5159} 5160 5161 5162void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { 5163 { 5164 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 5165 __ push(object); 5166 CallRuntimeFromDeferred(Runtime::kMigrateInstance, 1, instr); 5167 __ StoreToSafepointRegisterSlot(r0, scratch0()); 5168 } 5169 __ tst(scratch0(), Operand(kSmiTagMask)); 5170 DeoptimizeIf(eq, instr->environment()); 5171} 5172 5173 5174void LCodeGen::DoCheckMaps(LCheckMaps* instr) { 5175 class DeferredCheckMaps: public LDeferredCode { 5176 public: 5177 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) 5178 : LDeferredCode(codegen), instr_(instr), object_(object) { 5179 SetExit(check_maps()); 5180 } 5181 virtual void Generate() { 5182 codegen()->DoDeferredInstanceMigration(instr_, object_); 5183 } 5184 Label* check_maps() { return &check_maps_; } 5185 virtual LInstruction* instr() { return instr_; } 5186 private: 5187 LCheckMaps* instr_; 5188 Label check_maps_; 5189 Register object_; 5190 }; 5191 5192 if (instr->hydrogen()->CanOmitMapChecks()) return; 5193 Register map_reg = scratch0(); 5194 5195 LOperand* input = instr->value(); 5196 ASSERT(input->IsRegister()); 5197 Register reg = ToRegister(input); 5198 5199 SmallMapList* map_set = instr->hydrogen()->map_set(); 5200 __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); 5201 5202 DeferredCheckMaps* deferred = NULL; 5203 if (instr->hydrogen()->has_migration_target()) { 5204 deferred = new(zone()) DeferredCheckMaps(this, instr, reg); 5205 __ bind(deferred->check_maps()); 5206 } 5207 5208 Label success; 5209 for (int i = 0; i < map_set->length() - 1; i++) { 5210 Handle<Map> map = map_set->at(i); 5211 __ CompareMap(map_reg, map, &success); 5212 __ b(eq, &success); 5213 } 5214 5215 Handle<Map> map = map_set->last(); 5216 __ CompareMap(map_reg, map, &success); 5217 if (instr->hydrogen()->has_migration_target()) { 5218 __ b(ne, deferred->entry()); 5219 } else { 5220 DeoptimizeIf(ne, instr->environment()); 5221 } 5222 5223 __ bind(&success); 5224} 5225 5226 5227void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 5228 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); 5229 Register result_reg = ToRegister(instr->result()); 5230 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); 5231} 5232 5233 5234void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { 5235 Register unclamped_reg = ToRegister(instr->unclamped()); 5236 Register result_reg = ToRegister(instr->result()); 5237 __ ClampUint8(result_reg, unclamped_reg); 5238} 5239 5240 5241void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { 5242 Register scratch = scratch0(); 5243 Register input_reg = ToRegister(instr->unclamped()); 5244 Register result_reg = ToRegister(instr->result()); 5245 DwVfpRegister temp_reg = ToDoubleRegister(instr->temp()); 5246 Label is_smi, done, heap_number; 5247 5248 // Both smi and heap number cases are handled. 5249 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); 5250 5251 // Check for heap number 5252 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 5253 __ cmp(scratch, Operand(factory()->heap_number_map())); 5254 __ b(eq, &heap_number); 5255 5256 // Check for undefined. Undefined is converted to zero for clamping 5257 // conversions. 5258 __ cmp(input_reg, Operand(factory()->undefined_value())); 5259 DeoptimizeIf(ne, instr->environment()); 5260 __ mov(result_reg, Operand::Zero()); 5261 __ jmp(&done); 5262 5263 // Heap number 5264 __ bind(&heap_number); 5265 __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); 5266 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); 5267 __ jmp(&done); 5268 5269 // smi 5270 __ bind(&is_smi); 5271 __ ClampUint8(result_reg, result_reg); 5272 5273 __ bind(&done); 5274} 5275 5276 5277void LCodeGen::DoAllocate(LAllocate* instr) { 5278 class DeferredAllocate: public LDeferredCode { 5279 public: 5280 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) 5281 : LDeferredCode(codegen), instr_(instr) { } 5282 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); } 5283 virtual LInstruction* instr() { return instr_; } 5284 private: 5285 LAllocate* instr_; 5286 }; 5287 5288 DeferredAllocate* deferred = 5289 new(zone()) DeferredAllocate(this, instr); 5290 5291 Register result = ToRegister(instr->result()); 5292 Register scratch = ToRegister(instr->temp1()); 5293 Register scratch2 = ToRegister(instr->temp2()); 5294 5295 // Allocate memory for the object. 5296 AllocationFlags flags = TAG_OBJECT; 5297 if (instr->hydrogen()->MustAllocateDoubleAligned()) { 5298 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); 5299 } 5300 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { 5301 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); 5302 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); 5303 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); 5304 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { 5305 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); 5306 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE); 5307 } 5308 5309 if (instr->size()->IsConstantOperand()) { 5310 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5311 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); 5312 } else { 5313 Register size = ToRegister(instr->size()); 5314 __ Allocate(size, 5315 result, 5316 scratch, 5317 scratch2, 5318 deferred->entry(), 5319 flags); 5320 } 5321 5322 __ bind(deferred->exit()); 5323 5324 if (instr->hydrogen()->MustPrefillWithFiller()) { 5325 if (instr->size()->IsConstantOperand()) { 5326 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5327 __ mov(scratch, Operand(size)); 5328 } else { 5329 scratch = ToRegister(instr->size()); 5330 } 5331 __ sub(scratch, scratch, Operand(kPointerSize)); 5332 __ sub(result, result, Operand(kHeapObjectTag)); 5333 Label loop; 5334 __ bind(&loop); 5335 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); 5336 __ str(scratch2, MemOperand(result, scratch)); 5337 __ sub(scratch, scratch, Operand(kPointerSize)); 5338 __ cmp(scratch, Operand(0)); 5339 __ b(ge, &loop); 5340 __ add(result, result, Operand(kHeapObjectTag)); 5341 } 5342} 5343 5344 5345void LCodeGen::DoDeferredAllocate(LAllocate* instr) { 5346 Register result = ToRegister(instr->result()); 5347 5348 // TODO(3095996): Get rid of this. For now, we need to make the 5349 // result register contain a valid pointer because it is already 5350 // contained in the register pointer map. 5351 __ mov(result, Operand(Smi::FromInt(0))); 5352 5353 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 5354 if (instr->size()->IsRegister()) { 5355 Register size = ToRegister(instr->size()); 5356 ASSERT(!size.is(result)); 5357 __ SmiTag(size); 5358 __ push(size); 5359 } else { 5360 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5361 __ Push(Smi::FromInt(size)); 5362 } 5363 5364 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { 5365 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); 5366 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); 5367 CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr); 5368 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { 5369 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); 5370 CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr); 5371 } else { 5372 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); 5373 } 5374 __ StoreToSafepointRegisterSlot(r0, result); 5375} 5376 5377 5378void LCodeGen::DoToFastProperties(LToFastProperties* instr) { 5379 ASSERT(ToRegister(instr->value()).is(r0)); 5380 __ push(r0); 5381 CallRuntime(Runtime::kToFastProperties, 1, instr); 5382} 5383 5384 5385void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { 5386 Label materialized; 5387 // Registers will be used as follows: 5388 // r7 = literals array. 5389 // r1 = regexp literal. 5390 // r0 = regexp literal clone. 5391 // r2 and r4-r6 are used as temporaries. 5392 int literal_offset = 5393 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index()); 5394 __ LoadHeapObject(r7, instr->hydrogen()->literals()); 5395 __ ldr(r1, FieldMemOperand(r7, literal_offset)); 5396 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 5397 __ cmp(r1, ip); 5398 __ b(ne, &materialized); 5399 5400 // Create regexp literal using runtime function 5401 // Result will be in r0. 5402 __ mov(r6, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); 5403 __ mov(r5, Operand(instr->hydrogen()->pattern())); 5404 __ mov(r4, Operand(instr->hydrogen()->flags())); 5405 __ Push(r7, r6, r5, r4); 5406 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); 5407 __ mov(r1, r0); 5408 5409 __ bind(&materialized); 5410 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; 5411 Label allocated, runtime_allocate; 5412 5413 __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT); 5414 __ jmp(&allocated); 5415 5416 __ bind(&runtime_allocate); 5417 __ mov(r0, Operand(Smi::FromInt(size))); 5418 __ Push(r1, r0); 5419 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); 5420 __ pop(r1); 5421 5422 __ bind(&allocated); 5423 // Copy the content into the newly allocated memory. 5424 __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize); 5425} 5426 5427 5428void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { 5429 // Use the fast case closure allocation code that allocates in new 5430 // space for nested functions that don't need literals cloning. 5431 bool pretenure = instr->hydrogen()->pretenure(); 5432 if (!pretenure && instr->hydrogen()->has_no_literals()) { 5433 FastNewClosureStub stub(instr->hydrogen()->language_mode(), 5434 instr->hydrogen()->is_generator()); 5435 __ mov(r1, Operand(instr->hydrogen()->shared_info())); 5436 __ push(r1); 5437 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); 5438 } else { 5439 __ mov(r2, Operand(instr->hydrogen()->shared_info())); 5440 __ mov(r1, Operand(pretenure ? factory()->true_value() 5441 : factory()->false_value())); 5442 __ Push(cp, r2, r1); 5443 CallRuntime(Runtime::kNewClosure, 3, instr); 5444 } 5445} 5446 5447 5448void LCodeGen::DoTypeof(LTypeof* instr) { 5449 Register input = ToRegister(instr->value()); 5450 __ push(input); 5451 CallRuntime(Runtime::kTypeof, 1, instr); 5452} 5453 5454 5455void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { 5456 Register input = ToRegister(instr->value()); 5457 5458 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_), 5459 instr->FalseLabel(chunk_), 5460 input, 5461 instr->type_literal()); 5462 if (final_branch_condition != kNoCondition) { 5463 EmitBranch(instr, final_branch_condition); 5464 } 5465} 5466 5467 5468Condition LCodeGen::EmitTypeofIs(Label* true_label, 5469 Label* false_label, 5470 Register input, 5471 Handle<String> type_name) { 5472 Condition final_branch_condition = kNoCondition; 5473 Register scratch = scratch0(); 5474 if (type_name->Equals(heap()->number_string())) { 5475 __ JumpIfSmi(input, true_label); 5476 __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset)); 5477 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 5478 __ cmp(input, Operand(ip)); 5479 final_branch_condition = eq; 5480 5481 } else if (type_name->Equals(heap()->string_string())) { 5482 __ JumpIfSmi(input, false_label); 5483 __ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE); 5484 __ b(ge, false_label); 5485 __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset)); 5486 __ tst(ip, Operand(1 << Map::kIsUndetectable)); 5487 final_branch_condition = eq; 5488 5489 } else if (type_name->Equals(heap()->symbol_string())) { 5490 __ JumpIfSmi(input, false_label); 5491 __ CompareObjectType(input, input, scratch, SYMBOL_TYPE); 5492 final_branch_condition = eq; 5493 5494 } else if (type_name->Equals(heap()->boolean_string())) { 5495 __ CompareRoot(input, Heap::kTrueValueRootIndex); 5496 __ b(eq, true_label); 5497 __ CompareRoot(input, Heap::kFalseValueRootIndex); 5498 final_branch_condition = eq; 5499 5500 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) { 5501 __ CompareRoot(input, Heap::kNullValueRootIndex); 5502 final_branch_condition = eq; 5503 5504 } else if (type_name->Equals(heap()->undefined_string())) { 5505 __ CompareRoot(input, Heap::kUndefinedValueRootIndex); 5506 __ b(eq, true_label); 5507 __ JumpIfSmi(input, false_label); 5508 // Check for undetectable objects => true. 5509 __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset)); 5510 __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset)); 5511 __ tst(ip, Operand(1 << Map::kIsUndetectable)); 5512 final_branch_condition = ne; 5513 5514 } else if (type_name->Equals(heap()->function_string())) { 5515 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); 5516 __ JumpIfSmi(input, false_label); 5517 __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE); 5518 __ b(eq, true_label); 5519 __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE)); 5520 final_branch_condition = eq; 5521 5522 } else if (type_name->Equals(heap()->object_string())) { 5523 __ JumpIfSmi(input, false_label); 5524 if (!FLAG_harmony_typeof) { 5525 __ CompareRoot(input, Heap::kNullValueRootIndex); 5526 __ b(eq, true_label); 5527 } 5528 __ CompareObjectType(input, input, scratch, 5529 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); 5530 __ b(lt, false_label); 5531 __ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); 5532 __ b(gt, false_label); 5533 // Check for undetectable objects => false. 5534 __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset)); 5535 __ tst(ip, Operand(1 << Map::kIsUndetectable)); 5536 final_branch_condition = eq; 5537 5538 } else { 5539 __ b(false_label); 5540 } 5541 5542 return final_branch_condition; 5543} 5544 5545 5546void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { 5547 Register temp1 = ToRegister(instr->temp()); 5548 5549 EmitIsConstructCall(temp1, scratch0()); 5550 EmitBranch(instr, eq); 5551} 5552 5553 5554void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { 5555 ASSERT(!temp1.is(temp2)); 5556 // Get the frame pointer for the calling frame. 5557 __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 5558 5559 // Skip the arguments adaptor frame if it exists. 5560 Label check_frame_marker; 5561 __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset)); 5562 __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 5563 __ b(ne, &check_frame_marker); 5564 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); 5565 5566 // Check the marker in the calling frame. 5567 __ bind(&check_frame_marker); 5568 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); 5569 __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); 5570} 5571 5572 5573void LCodeGen::EnsureSpaceForLazyDeopt() { 5574 if (info()->IsStub()) return; 5575 // Ensure that we have enough space after the previous lazy-bailout 5576 // instruction for patching the code here. 5577 int current_pc = masm()->pc_offset(); 5578 int patch_size = Deoptimizer::patch_size(); 5579 if (current_pc < last_lazy_deopt_pc_ + patch_size) { 5580 // Block literal pool emission for duration of padding. 5581 Assembler::BlockConstPoolScope block_const_pool(masm()); 5582 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; 5583 ASSERT_EQ(0, padding_size % Assembler::kInstrSize); 5584 while (padding_size > 0) { 5585 __ nop(); 5586 padding_size -= Assembler::kInstrSize; 5587 } 5588 } 5589} 5590 5591 5592void LCodeGen::DoLazyBailout(LLazyBailout* instr) { 5593 EnsureSpaceForLazyDeopt(); 5594 last_lazy_deopt_pc_ = masm()->pc_offset(); 5595 ASSERT(instr->HasEnvironment()); 5596 LEnvironment* env = instr->environment(); 5597 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5598 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 5599} 5600 5601 5602void LCodeGen::DoDeoptimize(LDeoptimize* instr) { 5603 Deoptimizer::BailoutType type = instr->hydrogen()->type(); 5604 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the 5605 // needed return address), even though the implementation of LAZY and EAGER is 5606 // now identical. When LAZY is eventually completely folded into EAGER, remove 5607 // the special case below. 5608 if (info()->IsStub() && type == Deoptimizer::EAGER) { 5609 type = Deoptimizer::LAZY; 5610 } 5611 5612 Comment(";;; deoptimize: %s", instr->hydrogen()->reason()); 5613 DeoptimizeIf(al, instr->environment(), type); 5614} 5615 5616 5617void LCodeGen::DoDummyUse(LDummyUse* instr) { 5618 // Nothing to see here, move on! 5619} 5620 5621 5622void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { 5623 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 5624 __ CallRuntimeSaveDoubles(Runtime::kStackGuard); 5625 RecordSafepointWithLazyDeopt( 5626 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 5627 ASSERT(instr->HasEnvironment()); 5628 LEnvironment* env = instr->environment(); 5629 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 5630} 5631 5632 5633void LCodeGen::DoStackCheck(LStackCheck* instr) { 5634 class DeferredStackCheck: public LDeferredCode { 5635 public: 5636 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) 5637 : LDeferredCode(codegen), instr_(instr) { } 5638 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); } 5639 virtual LInstruction* instr() { return instr_; } 5640 private: 5641 LStackCheck* instr_; 5642 }; 5643 5644 ASSERT(instr->HasEnvironment()); 5645 LEnvironment* env = instr->environment(); 5646 // There is no LLazyBailout instruction for stack-checks. We have to 5647 // prepare for lazy deoptimization explicitly here. 5648 if (instr->hydrogen()->is_function_entry()) { 5649 // Perform stack overflow check. 5650 Label done; 5651 __ LoadRoot(ip, Heap::kStackLimitRootIndex); 5652 __ cmp(sp, Operand(ip)); 5653 __ b(hs, &done); 5654 StackCheckStub stub; 5655 PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize); 5656 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); 5657 EnsureSpaceForLazyDeopt(); 5658 last_lazy_deopt_pc_ = masm()->pc_offset(); 5659 __ bind(&done); 5660 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5661 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 5662 } else { 5663 ASSERT(instr->hydrogen()->is_backwards_branch()); 5664 // Perform stack overflow check if this goto needs it before jumping. 5665 DeferredStackCheck* deferred_stack_check = 5666 new(zone()) DeferredStackCheck(this, instr); 5667 __ LoadRoot(ip, Heap::kStackLimitRootIndex); 5668 __ cmp(sp, Operand(ip)); 5669 __ b(lo, deferred_stack_check->entry()); 5670 EnsureSpaceForLazyDeopt(); 5671 last_lazy_deopt_pc_ = masm()->pc_offset(); 5672 __ bind(instr->done_label()); 5673 deferred_stack_check->SetExit(instr->done_label()); 5674 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5675 // Don't record a deoptimization index for the safepoint here. 5676 // This will be done explicitly when emitting call and the safepoint in 5677 // the deferred code. 5678 } 5679} 5680 5681 5682void LCodeGen::DoOsrEntry(LOsrEntry* instr) { 5683 // This is a pseudo-instruction that ensures that the environment here is 5684 // properly registered for deoptimization and records the assembler's PC 5685 // offset. 5686 LEnvironment* environment = instr->environment(); 5687 5688 // If the environment were already registered, we would have no way of 5689 // backpatching it with the spill slot operands. 5690 ASSERT(!environment->HasBeenRegistered()); 5691 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 5692 5693 // Normally we record the first unknown OSR value as the entrypoint to the OSR 5694 // code, but if there were none, record the entrypoint here. 5695 if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset(); 5696} 5697 5698 5699void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { 5700 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 5701 __ cmp(r0, ip); 5702 DeoptimizeIf(eq, instr->environment()); 5703 5704 Register null_value = r5; 5705 __ LoadRoot(null_value, Heap::kNullValueRootIndex); 5706 __ cmp(r0, null_value); 5707 DeoptimizeIf(eq, instr->environment()); 5708 5709 __ SmiTst(r0); 5710 DeoptimizeIf(eq, instr->environment()); 5711 5712 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); 5713 __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE); 5714 DeoptimizeIf(le, instr->environment()); 5715 5716 Label use_cache, call_runtime; 5717 __ CheckEnumCache(null_value, &call_runtime); 5718 5719 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); 5720 __ b(&use_cache); 5721 5722 // Get the set of properties to enumerate. 5723 __ bind(&call_runtime); 5724 __ push(r0); 5725 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); 5726 5727 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); 5728 __ LoadRoot(ip, Heap::kMetaMapRootIndex); 5729 __ cmp(r1, ip); 5730 DeoptimizeIf(ne, instr->environment()); 5731 __ bind(&use_cache); 5732} 5733 5734 5735void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { 5736 Register map = ToRegister(instr->map()); 5737 Register result = ToRegister(instr->result()); 5738 Label load_cache, done; 5739 __ EnumLength(result, map); 5740 __ cmp(result, Operand(Smi::FromInt(0))); 5741 __ b(ne, &load_cache); 5742 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); 5743 __ jmp(&done); 5744 5745 __ bind(&load_cache); 5746 __ LoadInstanceDescriptors(map, result); 5747 __ ldr(result, 5748 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); 5749 __ ldr(result, 5750 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); 5751 __ cmp(result, Operand::Zero()); 5752 DeoptimizeIf(eq, instr->environment()); 5753 5754 __ bind(&done); 5755} 5756 5757 5758void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { 5759 Register object = ToRegister(instr->value()); 5760 Register map = ToRegister(instr->map()); 5761 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); 5762 __ cmp(map, scratch0()); 5763 DeoptimizeIf(ne, instr->environment()); 5764} 5765 5766 5767void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { 5768 Register object = ToRegister(instr->object()); 5769 Register index = ToRegister(instr->index()); 5770 Register result = ToRegister(instr->result()); 5771 Register scratch = scratch0(); 5772 5773 Label out_of_object, done; 5774 __ cmp(index, Operand::Zero()); 5775 __ b(lt, &out_of_object); 5776 5777 __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index)); 5778 __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize)); 5779 5780 __ b(&done); 5781 5782 __ bind(&out_of_object); 5783 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); 5784 // Index is equal to negated out of object property index plus 1. 5785 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); 5786 __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index)); 5787 __ ldr(result, FieldMemOperand(scratch, 5788 FixedArray::kHeaderSize - kPointerSize)); 5789 __ bind(&done); 5790} 5791 5792 5793#undef __ 5794 5795} } // namespace v8::internal 5796